summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/char/Kconfig2
-rw-r--r--drivers/char/agp/hp-agp.c5
-rw-r--r--drivers/char/hw_random/Kconfig13
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/tx4939-rng.c184
-rw-r--r--drivers/char/isicom.c2
-rw-r--r--drivers/char/mem.c115
-rw-r--r--drivers/char/ppdev.c29
-rw-r--r--drivers/char/sysrq.c15
-rw-r--r--drivers/clocksource/sh_cmt.c4
-rw-r--r--drivers/clocksource/sh_mtu2.c1
-rw-r--r--drivers/clocksource/sh_tmu.c4
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c61
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c68
-rw-r--r--drivers/dma/Kconfig8
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/txx9dmac.c1354
-rw-r--r--drivers/dma/txx9dmac.h307
-rw-r--r--drivers/edac/Kconfig11
-rw-r--r--drivers/edac/Makefile1
-rw-r--r--drivers/edac/amd8111_edac.c3
-rw-r--r--drivers/edac/cell_edac.c2
-rw-r--r--drivers/edac/cpc925_edac.c1017
-rw-r--r--drivers/edac/edac_core.h1
-rw-r--r--drivers/edac/edac_device.c14
-rw-r--r--drivers/firmware/pcdp.c4
-rw-r--r--drivers/gpio/max7301.c2
-rw-r--r--drivers/gpio/pca953x.c80
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c2
-rw-r--r--drivers/isdn/i4l/isdn_net.c2
-rw-r--r--drivers/md/faulty.c21
-rw-r--r--drivers/md/linear.c218
-rw-r--r--drivers/md/linear.h12
-rw-r--r--drivers/md/md.c196
-rw-r--r--drivers/md/md.h14
-rw-r--r--drivers/md/multipath.c23
-rw-r--r--drivers/md/multipath.h6
-rw-r--r--drivers/md/raid0.c403
-rw-r--r--drivers/md/raid0.h10
-rw-r--r--drivers/md/raid1.c46
-rw-r--r--drivers/md/raid1.h6
-rw-r--r--drivers/md/raid10.c62
-rw-r--r--drivers/md/raid10.h6
-rw-r--r--drivers/md/raid5.c218
-rw-r--r--drivers/md/raid5.h8
-rw-r--r--drivers/media/video/ov772x.c6
-rw-r--r--drivers/media/video/tw9910.c6
-rw-r--r--drivers/mfd/Kconfig24
-rw-r--r--drivers/mfd/Makefile5
-rw-r--r--drivers/mfd/ab3100-core.c991
-rw-r--r--drivers/mfd/asic3.c312
-rw-r--r--drivers/mfd/da903x.c2
-rw-r--r--drivers/mfd/ezx-pcap.c505
-rw-r--r--drivers/mfd/pcf50633-core.c2
-rw-r--r--drivers/mfd/pcf50633-gpio.c3
-rw-r--r--drivers/mfd/t7l66xb.c2
-rw-r--r--drivers/mfd/tc6387xb.c2
-rw-r--r--drivers/mfd/tc6393xb.c2
-rw-r--r--drivers/mfd/twl4030-core.c2
-rw-r--r--drivers/mfd/twl4030-irq.c2
-rw-r--r--drivers/mfd/wm8350-regmap.c4
-rw-r--r--drivers/mfd/wm8400-core.c2
-rw-r--r--drivers/misc/sgi-gru/Makefile2
-rw-r--r--drivers/misc/sgi-gru/gru_instructions.h68
-rw-r--r--drivers/misc/sgi-gru/grufault.c118
-rw-r--r--drivers/misc/sgi-gru/grufile.c69
-rw-r--r--drivers/misc/sgi-gru/gruhandles.c17
-rw-r--r--drivers/misc/sgi-gru/gruhandles.h30
-rw-r--r--drivers/misc/sgi-gru/grukdump.c232
-rw-r--r--drivers/misc/sgi-gru/grukservices.c562
-rw-r--r--drivers/misc/sgi-gru/grukservices.h51
-rw-r--r--drivers/misc/sgi-gru/grulib.h69
-rw-r--r--drivers/misc/sgi-gru/grumain.c187
-rw-r--r--drivers/misc/sgi-gru/gruprocfs.c17
-rw-r--r--drivers/misc/sgi-gru/grutables.h60
-rw-r--r--drivers/mtd/ubi/Kconfig13
-rw-r--r--drivers/mtd/ubi/Makefile2
-rw-r--r--drivers/mtd/ubi/build.c161
-rw-r--r--drivers/mtd/ubi/cdev.c32
-rw-r--r--drivers/mtd/ubi/eba.c99
-rw-r--r--drivers/mtd/ubi/gluebi.c378
-rw-r--r--drivers/mtd/ubi/io.c82
-rw-r--r--drivers/mtd/ubi/kapi.c117
-rw-r--r--drivers/mtd/ubi/ubi.h84
-rw-r--r--drivers/mtd/ubi/upd.c8
-rw-r--r--drivers/mtd/ubi/vmt.c65
-rw-r--r--drivers/mtd/ubi/wl.c179
-rw-r--r--drivers/net/Kconfig5
-rw-r--r--drivers/net/bnx2.c4
-rw-r--r--drivers/net/davinci_emac.c1
-rw-r--r--drivers/net/e100.c11
-rw-r--r--drivers/net/e1000/e1000_main.c4
-rw-r--r--drivers/net/forcedeth.c46
-rw-r--r--drivers/net/hamradio/bpqether.c2
-rw-r--r--drivers/net/hp100.c35
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c6
-rw-r--r--drivers/net/mv643xx_eth.c2
-rw-r--r--drivers/net/netxen/netxen_nic.h11
-rw-r--r--drivers/net/netxen/netxen_nic_hdr.h1
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c6
-rw-r--r--drivers/net/netxen/netxen_nic_init.c11
-rw-r--r--drivers/net/netxen/netxen_nic_main.c32
-rw-r--r--drivers/net/niu.c4
-rw-r--r--drivers/net/phy/phy_device.c2
-rw-r--r--drivers/net/r8169.c19
-rw-r--r--drivers/net/sis190.c2
-rw-r--r--drivers/net/sky2.c161
-rw-r--r--drivers/net/sky2.h1
-rw-r--r--drivers/net/sonic.c2
-rw-r--r--drivers/net/ucc_geth.c113
-rw-r--r--drivers/net/ucc_geth.h2
-rw-r--r--drivers/net/via-velocity.c4
-rw-r--r--drivers/net/virtio_net.c10
-rw-r--r--drivers/net/vxge/vxge-config.c12
-rw-r--r--drivers/net/vxge/vxge-main.c13
-rw-r--r--drivers/net/vxge/vxge-version.h4
-rw-r--r--drivers/net/wan/lapbether.c16
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig1
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h10
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c29
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c130
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c141
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c4
-rw-r--r--drivers/net/wireless/libertas/if_spi.c11
-rw-r--r--drivers/pci/setup-res.c4
-rw-r--r--drivers/platform/x86/dell-laptop.c2
-rw-r--r--drivers/platform/x86/sony-laptop.c5
-rw-r--r--drivers/pps/Kconfig33
-rw-r--r--drivers/pps/Makefile8
-rw-r--r--drivers/pps/kapi.c329
-rw-r--r--drivers/pps/pps.c312
-rw-r--r--drivers/pps/sysfs.c98
-rw-r--r--drivers/rtc/Kconfig9
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/rtc-ds1307.c41
-rw-r--r--drivers/rtc/rtc-ds1553.c3
-rw-r--r--drivers/rtc/rtc-ds1742.c31
-rw-r--r--drivers/rtc/rtc-rx8025.c688
-rw-r--r--drivers/rtc/rtc-tx4939.c4
-rw-r--r--drivers/s390/net/qeth_l2_main.c2
-rw-r--r--drivers/s390/scsi/zfcp_aux.c10
-rw-r--r--drivers/s390/scsi/zfcp_def.h18
-rw-r--r--drivers/s390/scsi/zfcp_erp.c2
-rw-r--r--drivers/s390/scsi/zfcp_ext.h6
-rw-r--r--drivers/s390/scsi/zfcp_fc.c185
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c3
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c15
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c10
-rw-r--r--drivers/scsi/bnx2i/Kconfig1
-rw-r--r--drivers/scsi/lpfc/lpfc.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c27
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c35
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c34
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c7
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c661
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c264
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c2
-rw-r--r--drivers/scsi/ncr53c8xx.c2
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c15
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c7
-rw-r--r--drivers/scsi/scsi_devinfo.c1
-rw-r--r--drivers/scsi/scsi_transport_fc.c614
-rw-r--r--drivers/scsi/scsi_transport_spi.c18
-rw-r--r--drivers/serial/sh-sci.c18
-rw-r--r--drivers/sh/intc.c13
-rw-r--r--drivers/spi/atmel_spi.c14
-rw-r--r--drivers/spi/au1550_spi.c14
-rw-r--r--drivers/spi/mpc52xx_psc_spi.c34
-rw-r--r--drivers/spi/omap2_mcspi.c16
-rw-r--r--drivers/spi/omap_uwire.c14
-rw-r--r--drivers/spi/orion_spi.c12
-rw-r--r--drivers/spi/pxa2xx_spi.c23
-rw-r--r--drivers/spi/spi.c70
-rw-r--r--drivers/spi/spi_bfin5xx.c15
-rw-r--r--drivers/spi/spi_bitbang.c16
-rw-r--r--drivers/spi/spi_imx.c17
-rw-r--r--drivers/spi/spi_mpc83xx.c20
-rw-r--r--drivers/spi/spi_s3c24xx.c19
-rw-r--r--drivers/spi/spi_txx9.c11
-rw-r--r--drivers/spi/xilinx_spi.c18
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/octeon/Kconfig12
-rw-r--r--drivers/staging/octeon/Makefile30
-rw-r--r--drivers/staging/octeon/cvmx-address.h274
-rw-r--r--drivers/staging/octeon/cvmx-asxx-defs.h475
-rw-r--r--drivers/staging/octeon/cvmx-cmd-queue.c306
-rw-r--r--drivers/staging/octeon/cvmx-cmd-queue.h617
-rw-r--r--drivers/staging/octeon/cvmx-config.h169
-rw-r--r--drivers/staging/octeon/cvmx-dbg-defs.h72
-rw-r--r--drivers/staging/octeon/cvmx-fau.h597
-rw-r--r--drivers/staging/octeon/cvmx-fpa-defs.h403
-rw-r--r--drivers/staging/octeon/cvmx-fpa.c183
-rw-r--r--drivers/staging/octeon/cvmx-fpa.h299
-rw-r--r--drivers/staging/octeon/cvmx-gmxx-defs.h2529
-rw-r--r--drivers/staging/octeon/cvmx-helper-board.c706
-rw-r--r--drivers/staging/octeon/cvmx-helper-board.h180
-rw-r--r--drivers/staging/octeon/cvmx-helper-fpa.c243
-rw-r--r--drivers/staging/octeon/cvmx-helper-fpa.h64
-rw-r--r--drivers/staging/octeon/cvmx-helper-loop.c85
-rw-r--r--drivers/staging/octeon/cvmx-helper-loop.h59
-rw-r--r--drivers/staging/octeon/cvmx-helper-npi.c113
-rw-r--r--drivers/staging/octeon/cvmx-helper-npi.h60
-rw-r--r--drivers/staging/octeon/cvmx-helper-rgmii.c525
-rw-r--r--drivers/staging/octeon/cvmx-helper-rgmii.h110
-rw-r--r--drivers/staging/octeon/cvmx-helper-sgmii.c550
-rw-r--r--drivers/staging/octeon/cvmx-helper-sgmii.h104
-rw-r--r--drivers/staging/octeon/cvmx-helper-spi.c195
-rw-r--r--drivers/staging/octeon/cvmx-helper-spi.h84
-rw-r--r--drivers/staging/octeon/cvmx-helper-util.c433
-rw-r--r--drivers/staging/octeon/cvmx-helper-util.h215
-rw-r--r--drivers/staging/octeon/cvmx-helper-xaui.c348
-rw-r--r--drivers/staging/octeon/cvmx-helper-xaui.h103
-rw-r--r--drivers/staging/octeon/cvmx-helper.c1058
-rw-r--r--drivers/staging/octeon/cvmx-helper.h227
-rw-r--r--drivers/staging/octeon/cvmx-interrupt-decodes.c371
-rw-r--r--drivers/staging/octeon/cvmx-interrupt-rsl.c140
-rw-r--r--drivers/staging/octeon/cvmx-ipd.h338
-rw-r--r--drivers/staging/octeon/cvmx-mdio.h506
-rw-r--r--drivers/staging/octeon/cvmx-packet.h65
-rw-r--r--drivers/staging/octeon/cvmx-pcsx-defs.h370
-rw-r--r--drivers/staging/octeon/cvmx-pcsxx-defs.h316
-rw-r--r--drivers/staging/octeon/cvmx-pip-defs.h1267
-rw-r--r--drivers/staging/octeon/cvmx-pip.h524
-rw-r--r--drivers/staging/octeon/cvmx-pko-defs.h1133
-rw-r--r--drivers/staging/octeon/cvmx-pko.c506
-rw-r--r--drivers/staging/octeon/cvmx-pko.h610
-rw-r--r--drivers/staging/octeon/cvmx-pow.h1982
-rw-r--r--drivers/staging/octeon/cvmx-scratch.h139
-rw-r--r--drivers/staging/octeon/cvmx-smix-defs.h178
-rw-r--r--drivers/staging/octeon/cvmx-spi.c667
-rw-r--r--drivers/staging/octeon/cvmx-spi.h269
-rw-r--r--drivers/staging/octeon/cvmx-spxx-defs.h347
-rw-r--r--drivers/staging/octeon/cvmx-srxx-defs.h126
-rw-r--r--drivers/staging/octeon/cvmx-stxx-defs.h292
-rw-r--r--drivers/staging/octeon/cvmx-wqe.h397
-rw-r--r--drivers/staging/octeon/ethernet-common.c328
-rw-r--r--drivers/staging/octeon/ethernet-common.h29
-rw-r--r--drivers/staging/octeon/ethernet-defines.h134
-rw-r--r--drivers/staging/octeon/ethernet-mdio.c231
-rw-r--r--drivers/staging/octeon/ethernet-mdio.h46
-rw-r--r--drivers/staging/octeon/ethernet-mem.c198
-rw-r--r--drivers/staging/octeon/ethernet-mem.h29
-rw-r--r--drivers/staging/octeon/ethernet-proc.c256
-rw-r--r--drivers/staging/octeon/ethernet-proc.h29
-rw-r--r--drivers/staging/octeon/ethernet-rgmii.c397
-rw-r--r--drivers/staging/octeon/ethernet-rx.c505
-rw-r--r--drivers/staging/octeon/ethernet-rx.h33
-rw-r--r--drivers/staging/octeon/ethernet-sgmii.c129
-rw-r--r--drivers/staging/octeon/ethernet-spi.c323
-rw-r--r--drivers/staging/octeon/ethernet-tx.c634
-rw-r--r--drivers/staging/octeon/ethernet-tx.h32
-rw-r--r--drivers/staging/octeon/ethernet-util.h81
-rw-r--r--drivers/staging/octeon/ethernet-xaui.c127
-rw-r--r--drivers/staging/octeon/ethernet.c507
-rw-r--r--drivers/staging/octeon/octeon-ethernet.h127
-rw-r--r--drivers/w1/masters/w1-gpio.c35
-rw-r--r--drivers/watchdog/alim7101_wdt.c15
-rw-r--r--drivers/watchdog/ar7_wdt.c3
-rw-r--r--drivers/watchdog/at91rm9200_wdt.c3
-rw-r--r--drivers/watchdog/at91sam9_wdt.c3
-rw-r--r--drivers/watchdog/bfin_wdt.c14
-rw-r--r--drivers/watchdog/cpwd.c6
-rw-r--r--drivers/watchdog/davinci_wdt.c6
-rw-r--r--drivers/watchdog/hpwdt.c59
-rw-r--r--drivers/watchdog/iTCO_vendor_support.c88
-rw-r--r--drivers/watchdog/iTCO_wdt.c36
-rw-r--r--drivers/watchdog/indydog.c4
-rw-r--r--drivers/watchdog/it8712f_wdt.c3
-rw-r--r--drivers/watchdog/ks8695_wdt.c4
-rw-r--r--drivers/watchdog/machzwd.c9
-rw-r--r--drivers/watchdog/mpcore_wdt.c7
-rw-r--r--drivers/watchdog/mtx-1_wdt.c6
-rw-r--r--drivers/watchdog/pnx4008_wdt.c6
-rw-r--r--drivers/watchdog/rdc321x_wdt.c4
-rw-r--r--drivers/watchdog/rm9k_wdt.c6
-rw-r--r--drivers/watchdog/s3c2410_wdt.c32
-rw-r--r--drivers/watchdog/sb_wdog.c9
-rw-r--r--drivers/watchdog/sbc60xxwdt.c5
-rw-r--r--drivers/watchdog/sbc8360.c4
-rw-r--r--drivers/watchdog/sbc_epx_c3.c12
-rw-r--r--drivers/watchdog/scx200_wdt.c7
-rw-r--r--drivers/watchdog/shwdt.c4
-rw-r--r--drivers/watchdog/softdog.c7
-rw-r--r--drivers/watchdog/w83697hf_wdt.c3
-rw-r--r--drivers/watchdog/wdrtas.c7
301 files changed, 37648 insertions, 2810 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index a442c8f29fc1..48bbdbe43e69 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -52,6 +52,8 @@ source "drivers/i2c/Kconfig"
source "drivers/spi/Kconfig"
+source "drivers/pps/Kconfig"
+
source "drivers/gpio/Kconfig"
source "drivers/w1/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 00b44f4ccf03..bc4205d2fc3c 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -72,6 +72,7 @@ obj-$(CONFIG_INPUT) += input/
obj-$(CONFIG_I2O) += message/
obj-$(CONFIG_RTC_LIB) += rtc/
obj-y += i2c/ media/
+obj-$(CONFIG_PPS) += pps/
obj-$(CONFIG_W1) += w1/
obj-$(CONFIG_POWER_SUPPLY) += power/
obj-$(CONFIG_HWMON) += hwmon/
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 30bae6de6a0d..0bd01f49cfd8 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -807,7 +807,7 @@ if RTC_LIB=n
config RTC
tristate "Enhanced Real Time Clock Support (legacy PC RTC driver)"
depends on !PPC && !PARISC && !IA64 && !M68K && !SPARC && !FRV \
- && !ARM && !SUPERH && !S390 && !AVR32
+ && !ARM && !SUPERH && !S390 && !AVR32 && !BLACKFIN
---help---
If you say Y here and create a character special file /dev/rtc with
major number 10 and minor number 135 using mknod ("man mknod"), you
diff --git a/drivers/char/agp/hp-agp.c b/drivers/char/agp/hp-agp.c
index 183ac3fe44fb..9c7e2343c399 100644
--- a/drivers/char/agp/hp-agp.c
+++ b/drivers/char/agp/hp-agp.c
@@ -518,8 +518,9 @@ zx1_gart_probe (acpi_handle obj, u32 depth, void *context, void **ret)
if (hp_zx1_setup(sba_hpa + HP_ZX1_IOC_OFFSET, lba_hpa))
return AE_OK;
- printk(KERN_INFO PFX "Detected HP ZX1 %s AGP chipset (ioc=%lx, lba=%lx)\n",
- (char *) context, sba_hpa + HP_ZX1_IOC_OFFSET, lba_hpa);
+ printk(KERN_INFO PFX "Detected HP ZX1 %s AGP chipset "
+ "(ioc=%llx, lba=%llx)\n", (char *)context,
+ sba_hpa + HP_ZX1_IOC_OFFSET, lba_hpa);
hp_zx1_gart_found = 1;
return AE_CTRL_TERMINATE; /* we only support one bridge; quit looking */
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index f4b3f7293feb..ce66a70184f7 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -149,6 +149,19 @@ config HW_RANDOM_VIRTIO
To compile this driver as a module, choose M here: the
module will be called virtio-rng. If unsure, say N.
+config HW_RANDOM_TX4939
+ tristate "TX4939 Random Number Generator support"
+ depends on HW_RANDOM && SOC_TX4939
+ default HW_RANDOM
+ ---help---
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on TX4939 SoC.
+
+ To compile this driver as a module, choose M here: the
+ module will be called tx4939-rng.
+
+ If unsure, say Y.
+
config HW_RANDOM_MXC_RNGA
tristate "Freescale i.MX RNGA Random Number Generator"
depends on HW_RANDOM && ARCH_HAS_RNGA
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index fd1ecd2f6731..676828ba8123 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -15,4 +15,5 @@ obj-$(CONFIG_HW_RANDOM_IXP4XX) += ixp4xx-rng.o
obj-$(CONFIG_HW_RANDOM_OMAP) += omap-rng.o
obj-$(CONFIG_HW_RANDOM_PASEMI) += pasemi-rng.o
obj-$(CONFIG_HW_RANDOM_VIRTIO) += virtio-rng.o
+obj-$(CONFIG_HW_RANDOM_TX4939) += tx4939-rng.o
obj-$(CONFIG_HW_RANDOM_MXC_RNGA) += mxc-rnga.o
diff --git a/drivers/char/hw_random/tx4939-rng.c b/drivers/char/hw_random/tx4939-rng.c
new file mode 100644
index 000000000000..544d9085a8e8
--- /dev/null
+++ b/drivers/char/hw_random/tx4939-rng.c
@@ -0,0 +1,184 @@
+/*
+ * RNG driver for TX4939 Random Number Generators (RNG)
+ *
+ * Copyright (C) 2009 Atsushi Nemoto <anemo@mba.ocn.ne.jp>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/hw_random.h>
+
+#define TX4939_RNG_RCSR 0x00000000
+#define TX4939_RNG_ROR(n) (0x00000018 + (n) * 8)
+
+#define TX4939_RNG_RCSR_INTE 0x00000008
+#define TX4939_RNG_RCSR_RST 0x00000004
+#define TX4939_RNG_RCSR_FIN 0x00000002
+#define TX4939_RNG_RCSR_ST 0x00000001
+
+struct tx4939_rng {
+ struct hwrng rng;
+ void __iomem *base;
+ u64 databuf[3];
+ unsigned int data_avail;
+};
+
+static void rng_io_start(void)
+{
+#ifndef CONFIG_64BIT
+ /*
+ * readq is reading a 64-bit register using a 64-bit load. On
+ * a 32-bit kernel however interrupts or any other processor
+ * exception would clobber the upper 32-bit of the processor
+ * register so interrupts need to be disabled.
+ */
+ local_irq_disable();
+#endif
+}
+
+static void rng_io_end(void)
+{
+#ifndef CONFIG_64BIT
+ local_irq_enable();
+#endif
+}
+
+static u64 read_rng(void __iomem *base, unsigned int offset)
+{
+ return ____raw_readq(base + offset);
+}
+
+static void write_rng(u64 val, void __iomem *base, unsigned int offset)
+{
+ return ____raw_writeq(val, base + offset);
+}
+
+static int tx4939_rng_data_present(struct hwrng *rng, int wait)
+{
+ struct tx4939_rng *rngdev = container_of(rng, struct tx4939_rng, rng);
+ int i;
+
+ if (rngdev->data_avail)
+ return rngdev->data_avail;
+ for (i = 0; i < 20; i++) {
+ rng_io_start();
+ if (!(read_rng(rngdev->base, TX4939_RNG_RCSR)
+ & TX4939_RNG_RCSR_ST)) {
+ rngdev->databuf[0] =
+ read_rng(rngdev->base, TX4939_RNG_ROR(0));
+ rngdev->databuf[1] =
+ read_rng(rngdev->base, TX4939_RNG_ROR(1));
+ rngdev->databuf[2] =
+ read_rng(rngdev->base, TX4939_RNG_ROR(2));
+ rngdev->data_avail =
+ sizeof(rngdev->databuf) / sizeof(u32);
+ /* Start RNG */
+ write_rng(TX4939_RNG_RCSR_ST,
+ rngdev->base, TX4939_RNG_RCSR);
+ wait = 0;
+ }
+ rng_io_end();
+ if (!wait)
+ break;
+ /* 90 bus clock cycles by default for generation */
+ ndelay(90 * 5);
+ }
+ return rngdev->data_avail;
+}
+
+static int tx4939_rng_data_read(struct hwrng *rng, u32 *buffer)
+{
+ struct tx4939_rng *rngdev = container_of(rng, struct tx4939_rng, rng);
+
+ rngdev->data_avail--;
+ *buffer = *((u32 *)&rngdev->databuf + rngdev->data_avail);
+ return sizeof(u32);
+}
+
+static int __init tx4939_rng_probe(struct platform_device *dev)
+{
+ struct tx4939_rng *rngdev;
+ struct resource *r;
+ int i;
+
+ r = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!r)
+ return -EBUSY;
+ rngdev = devm_kzalloc(&dev->dev, sizeof(*rngdev), GFP_KERNEL);
+ if (!rngdev)
+ return -ENOMEM;
+ if (!devm_request_mem_region(&dev->dev, r->start, resource_size(r),
+ dev_name(&dev->dev)))
+ return -EBUSY;
+ rngdev->base = devm_ioremap(&dev->dev, r->start, resource_size(r));
+ if (!rngdev->base)
+ return -EBUSY;
+
+ rngdev->rng.name = dev_name(&dev->dev);
+ rngdev->rng.data_present = tx4939_rng_data_present;
+ rngdev->rng.data_read = tx4939_rng_data_read;
+
+ rng_io_start();
+ /* Reset RNG */
+ write_rng(TX4939_RNG_RCSR_RST, rngdev->base, TX4939_RNG_RCSR);
+ write_rng(0, rngdev->base, TX4939_RNG_RCSR);
+ /* Start RNG */
+ write_rng(TX4939_RNG_RCSR_ST, rngdev->base, TX4939_RNG_RCSR);
+ rng_io_end();
+ /*
+ * Drop first two results. From the datasheet:
+ * The quality of the random numbers generated immediately
+ * after reset can be insufficient. Therefore, do not use
+ * random numbers obtained from the first and second
+ * generations; use the ones from the third or subsequent
+ * generation.
+ */
+ for (i = 0; i < 2; i++) {
+ rngdev->data_avail = 0;
+ if (!tx4939_rng_data_present(&rngdev->rng, 1))
+ return -EIO;
+ }
+
+ platform_set_drvdata(dev, rngdev);
+ return hwrng_register(&rngdev->rng);
+}
+
+static int __exit tx4939_rng_remove(struct platform_device *dev)
+{
+ struct tx4939_rng *rngdev = platform_get_drvdata(dev);
+
+ hwrng_unregister(&rngdev->rng);
+ platform_set_drvdata(dev, NULL);
+ return 0;
+}
+
+static struct platform_driver tx4939_rng_driver = {
+ .driver = {
+ .name = "tx4939-rng",
+ .owner = THIS_MODULE,
+ },
+ .remove = tx4939_rng_remove,
+};
+
+static int __init tx4939rng_init(void)
+{
+ return platform_driver_probe(&tx4939_rng_driver, tx4939_rng_probe);
+}
+
+static void __exit tx4939rng_exit(void)
+{
+ platform_driver_unregister(&tx4939_rng_driver);
+}
+
+module_init(tx4939rng_init);
+module_exit(tx4939rng_exit);
+
+MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver for TX4939");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/isicom.c b/drivers/char/isicom.c
index 4d745a89504f..4159292e35cf 100644
--- a/drivers/char/isicom.c
+++ b/drivers/char/isicom.c
@@ -1593,7 +1593,7 @@ static unsigned int card_count;
static int __devinit isicom_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- unsigned int signature, index;
+ unsigned int uninitialized_var(signature), index;
int retval = -EPERM;
struct isi_board *board = NULL;
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index f96d0bef855e..afa8813e737a 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -863,59 +863,58 @@ static const struct file_operations kmsg_fops = {
.write = kmsg_write,
};
-static int memory_open(struct inode * inode, struct file * filp)
-{
- int ret = 0;
-
- lock_kernel();
- switch (iminor(inode)) {
- case 1:
- filp->f_op = &mem_fops;
- filp->f_mapping->backing_dev_info =
- &directly_mappable_cdev_bdi;
- break;
+static const struct {
+ unsigned int minor;
+ char *name;
+ umode_t mode;
+ const struct file_operations *fops;
+ struct backing_dev_info *dev_info;
+} devlist[] = { /* list of minor devices */
+ {1, "mem", S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops,
+ &directly_mappable_cdev_bdi},
#ifdef CONFIG_DEVKMEM
- case 2:
- filp->f_op = &kmem_fops;
- filp->f_mapping->backing_dev_info =
- &directly_mappable_cdev_bdi;
- break;
+ {2, "kmem", S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops,
+ &directly_mappable_cdev_bdi},
#endif
- case 3:
- filp->f_op = &null_fops;
- break;
+ {3, "null", S_IRUGO | S_IWUGO, &null_fops, NULL},
#ifdef CONFIG_DEVPORT
- case 4:
- filp->f_op = &port_fops;
- break;
+ {4, "port", S_IRUSR | S_IWUSR | S_IRGRP, &port_fops, NULL},
#endif
- case 5:
- filp->f_mapping->backing_dev_info = &zero_bdi;
- filp->f_op = &zero_fops;
- break;
- case 7:
- filp->f_op = &full_fops;
- break;
- case 8:
- filp->f_op = &random_fops;
- break;
- case 9:
- filp->f_op = &urandom_fops;
- break;
- case 11:
- filp->f_op = &kmsg_fops;
- break;
+ {5, "zero", S_IRUGO | S_IWUGO, &zero_fops, &zero_bdi},
+ {7, "full", S_IRUGO | S_IWUGO, &full_fops, NULL},
+ {8, "random", S_IRUGO | S_IWUSR, &random_fops, NULL},
+ {9, "urandom", S_IRUGO | S_IWUSR, &urandom_fops, NULL},
+ {11,"kmsg", S_IRUGO | S_IWUSR, &kmsg_fops, NULL},
#ifdef CONFIG_CRASH_DUMP
- case 12:
- filp->f_op = &oldmem_fops;
- break;
+ {12,"oldmem", S_IRUSR | S_IWUSR | S_IRGRP, &oldmem_fops, NULL},
#endif
- default:
- unlock_kernel();
- return -ENXIO;
+};
+
+static int memory_open(struct inode *inode, struct file *filp)
+{
+ int ret = 0;
+ int i;
+
+ lock_kernel();
+
+ for (i = 0; i < ARRAY_SIZE(devlist); i++) {
+ if (devlist[i].minor == iminor(inode)) {
+ filp->f_op = devlist[i].fops;
+ if (devlist[i].dev_info) {
+ filp->f_mapping->backing_dev_info =
+ devlist[i].dev_info;
+ }
+
+ break;
+ }
}
- if (filp->f_op && filp->f_op->open)
- ret = filp->f_op->open(inode,filp);
+
+ if (i == ARRAY_SIZE(devlist))
+ ret = -ENXIO;
+ else
+ if (filp->f_op && filp->f_op->open)
+ ret = filp->f_op->open(inode, filp);
+
unlock_kernel();
return ret;
}
@@ -924,30 +923,6 @@ static const struct file_operations memory_fops = {
.open = memory_open, /* just a selector for the real open */
};
-static const struct {
- unsigned int minor;
- char *name;
- umode_t mode;
- const struct file_operations *fops;
-} devlist[] = { /* list of minor devices */
- {1, "mem", S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops},
-#ifdef CONFIG_DEVKMEM
- {2, "kmem", S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops},
-#endif
- {3, "null", S_IRUGO | S_IWUGO, &null_fops},
-#ifdef CONFIG_DEVPORT
- {4, "port", S_IRUSR | S_IWUSR | S_IRGRP, &port_fops},
-#endif
- {5, "zero", S_IRUGO | S_IWUGO, &zero_fops},
- {7, "full", S_IRUGO | S_IWUGO, &full_fops},
- {8, "random", S_IRUGO | S_IWUSR, &random_fops},
- {9, "urandom", S_IRUGO | S_IWUSR, &urandom_fops},
- {11,"kmsg", S_IRUGO | S_IWUSR, &kmsg_fops},
-#ifdef CONFIG_CRASH_DUMP
- {12,"oldmem", S_IRUSR | S_IWUSR | S_IRGRP, &oldmem_fops},
-#endif
-};
-
static struct class *mem_class;
static int __init chr_dev_init(void)
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index c84c34fb1231..432655bcb04c 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -114,8 +114,7 @@ static ssize_t pp_read (struct file * file, char __user * buf, size_t count,
if (!(pp->flags & PP_CLAIMED)) {
/* Don't have the port claimed */
- printk (KERN_DEBUG CHRDEV "%x: claim the port first\n",
- minor);
+ pr_debug(CHRDEV "%x: claim the port first\n", minor);
return -EINVAL;
}
@@ -198,8 +197,7 @@ static ssize_t pp_write (struct file * file, const char __user * buf,
if (!(pp->flags & PP_CLAIMED)) {
/* Don't have the port claimed */
- printk (KERN_DEBUG CHRDEV "%x: claim the port first\n",
- minor);
+ pr_debug(CHRDEV "%x: claim the port first\n", minor);
return -EINVAL;
}
@@ -313,7 +311,7 @@ static int register_device (int minor, struct pp_struct *pp)
}
pp->pdev = pdev;
- printk (KERN_DEBUG "%s: registered pardevice\n", name);
+ pr_debug("%s: registered pardevice\n", name);
return 0;
}
@@ -343,8 +341,7 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
int ret;
if (pp->flags & PP_CLAIMED) {
- printk (KERN_DEBUG CHRDEV
- "%x: you've already got it!\n", minor);
+ pr_debug(CHRDEV "%x: you've already got it!\n", minor);
return -EINVAL;
}
@@ -379,7 +376,7 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
case PPEXCL:
if (pp->pdev) {
- printk (KERN_DEBUG CHRDEV "%x: too late for PPEXCL; "
+ pr_debug(CHRDEV "%x: too late for PPEXCL; "
"already registered\n", minor);
if (pp->flags & PP_EXCL)
/* But it's not really an error. */
@@ -491,8 +488,7 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
/* Everything else requires the port to be claimed, so check
* that now. */
if ((pp->flags & PP_CLAIMED) == 0) {
- printk (KERN_DEBUG CHRDEV "%x: claim the port first\n",
- minor);
+ pr_debug(CHRDEV "%x: claim the port first\n", minor);
return -EINVAL;
}
@@ -624,8 +620,7 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return 0;
default:
- printk (KERN_DEBUG CHRDEV "%x: What? (cmd=0x%x)\n", minor,
- cmd);
+ pr_debug(CHRDEV "%x: What? (cmd=0x%x)\n", minor, cmd);
return -EINVAL;
}
@@ -698,9 +693,8 @@ static int pp_release (struct inode * inode, struct file * file)
}
if (compat_negot) {
parport_negotiate (pp->pdev->port, IEEE1284_MODE_COMPAT);
- printk (KERN_DEBUG CHRDEV
- "%x: negotiated back to compatibility mode because "
- "user-space forgot\n", minor);
+ pr_debug(CHRDEV "%x: negotiated back to compatibility "
+ "mode because user-space forgot\n", minor);
}
if (pp->flags & PP_CLAIMED) {
@@ -713,7 +707,7 @@ static int pp_release (struct inode * inode, struct file * file)
info->phase = pp->saved_state.phase;
parport_release (pp->pdev);
if (compat_negot != 1) {
- printk (KERN_DEBUG CHRDEV "%x: released pardevice "
+ pr_debug(CHRDEV "%x: released pardevice "
"because user-space forgot\n", minor);
}
}
@@ -723,8 +717,7 @@ static int pp_release (struct inode * inode, struct file * file)
parport_unregister_device (pp->pdev);
kfree (name);
pp->pdev = NULL;
- printk (KERN_DEBUG CHRDEV "%x: unregistered pardevice\n",
- minor);
+ pr_debug(CHRDEV "%x: unregistered pardevice\n", minor);
}
kfree (pp);
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index 39a05b5fa9cb..0db35857e4d8 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -121,20 +121,17 @@ static struct sysrq_key_op sysrq_unraw_op = {
#define sysrq_unraw_op (*(struct sysrq_key_op *)0)
#endif /* CONFIG_VT */
-#ifdef CONFIG_KEXEC
-static void sysrq_handle_crashdump(int key, struct tty_struct *tty)
+static void sysrq_handle_crash(int key, struct tty_struct *tty)
{
- crash_kexec(get_irq_regs());
+ char *killer = NULL;
+ *killer = 1;
}
static struct sysrq_key_op sysrq_crashdump_op = {
- .handler = sysrq_handle_crashdump,
- .help_msg = "Crashdump",
- .action_msg = "Trigger a crashdump",
+ .handler = sysrq_handle_crash,
+ .help_msg = "Crash",
+ .action_msg = "Trigger a crash",
.enable_mask = SYSRQ_ENABLE_DUMP,
};
-#else
-#define sysrq_crashdump_op (*(struct sysrq_key_op *)0)
-#endif
static void sysrq_handle_reboot(int key, struct tty_struct *tty)
{
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index cf56a2af5fe1..2964f5f4a7ef 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -184,6 +184,9 @@ static void sh_cmt_disable(struct sh_cmt_priv *p)
/* disable channel */
sh_cmt_start_stop_ch(p, 0);
+ /* disable interrupts in CMT block */
+ sh_cmt_write(p, CMCSR, 0);
+
/* stop clock */
clk_disable(p->clk);
}
@@ -599,7 +602,6 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
p->irqaction.handler = sh_cmt_interrupt;
p->irqaction.dev_id = p;
p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL;
- p->irqaction.mask = CPU_MASK_NONE;
ret = setup_irq(irq, &p->irqaction);
if (ret) {
pr_err("sh_cmt: failed to request irq %d\n", irq);
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index d1ae75454d10..973e714d6051 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -283,7 +283,6 @@ static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev)
p->irqaction.dev_id = p;
p->irqaction.irq = irq;
p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL;
- p->irqaction.mask = CPU_MASK_NONE;
/* get hold of clock */
p->clk = clk_get(&p->pdev->dev, cfg->clk);
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index d6ea4398bf62..9ffb05f4095d 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -138,6 +138,9 @@ static void sh_tmu_disable(struct sh_tmu_priv *p)
/* disable channel */
sh_tmu_start_stop_ch(p, 0);
+ /* disable interrupts in TMU block */
+ sh_tmu_write(p, TCR, 0x0000);
+
/* stop clock */
clk_disable(p->clk);
}
@@ -385,7 +388,6 @@ static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev)
p->irqaction.dev_id = p;
p->irqaction.irq = irq;
p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL;
- p->irqaction.mask = CPU_MASK_NONE;
/* get hold of clock */
p->clk = clk_get(&p->pdev->dev, cfg->clk);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 7a74d175287b..7fc58af748b4 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -42,27 +42,12 @@
* this governor will not work.
* All times here are in uS.
*/
-static unsigned int def_sampling_rate;
#define MIN_SAMPLING_RATE_RATIO (2)
-/* for correct statistics, we need at least 10 ticks between each measure */
-#define MIN_STAT_SAMPLING_RATE \
- (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10))
-#define MIN_SAMPLING_RATE \
- (def_sampling_rate / MIN_SAMPLING_RATE_RATIO)
-/* Above MIN_SAMPLING_RATE will vanish with its sysfs file soon
- * Define the minimal settable sampling rate to the greater of:
- * - "HW transition latency" * 100 (same as default sampling / 10)
- * - MIN_STAT_SAMPLING_RATE
- * To avoid that userspace shoots itself.
-*/
-static unsigned int minimum_sampling_rate(void)
-{
- return max(def_sampling_rate / 10, MIN_STAT_SAMPLING_RATE);
-}
-/* This will also vanish soon with removing sampling_rate_max */
-#define MAX_SAMPLING_RATE (500 * def_sampling_rate)
+static unsigned int min_sampling_rate;
+
#define LATENCY_MULTIPLIER (1000)
+#define MIN_LATENCY_MULTIPLIER (100)
#define DEF_SAMPLING_DOWN_FACTOR (1)
#define MAX_SAMPLING_DOWN_FACTOR (10)
#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
@@ -182,27 +167,14 @@ static struct notifier_block dbs_cpufreq_notifier_block = {
/************************** sysfs interface ************************/
static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf)
{
- static int print_once;
-
- if (!print_once) {
- printk(KERN_INFO "CPUFREQ: conservative sampling_rate_max "
- "sysfs file is deprecated - used by: %s\n",
- current->comm);
- print_once = 1;
- }
- return sprintf(buf, "%u\n", MAX_SAMPLING_RATE);
+ printk_once(KERN_INFO "CPUFREQ: conservative sampling_rate_max "
+ "sysfs file is deprecated - used by: %s\n", current->comm);
+ return sprintf(buf, "%u\n", -1U);
}
static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf)
{
- static int print_once;
-
- if (!print_once) {
- printk(KERN_INFO "CPUFREQ: conservative sampling_rate_max "
- "sysfs file is deprecated - used by: %s\n", current->comm);
- print_once = 1;
- }
- return sprintf(buf, "%u\n", MIN_SAMPLING_RATE);
+ return sprintf(buf, "%u\n", min_sampling_rate);
}
#define define_one_ro(_name) \
@@ -254,7 +226,7 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
return -EINVAL;
mutex_lock(&dbs_mutex);
- dbs_tuners_ins.sampling_rate = max(input, minimum_sampling_rate());
+ dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate);
mutex_unlock(&dbs_mutex);
return count;
@@ -601,11 +573,18 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
if (latency == 0)
latency = 1;
- def_sampling_rate =
- max(latency * LATENCY_MULTIPLIER,
- MIN_STAT_SAMPLING_RATE);
-
- dbs_tuners_ins.sampling_rate = def_sampling_rate;
+ /*
+ * conservative does not implement micro like ondemand
+ * governor, thus we are bound to jiffes/HZ
+ */
+ min_sampling_rate =
+ MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10);
+ /* Bring kernel and HW constraints together */
+ min_sampling_rate = max(min_sampling_rate,
+ MIN_LATENCY_MULTIPLIER * latency);
+ dbs_tuners_ins.sampling_rate =
+ max(min_sampling_rate,
+ latency * LATENCY_MULTIPLIER);
cpufreq_register_notifier(
&dbs_cpufreq_notifier_block,
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index e741c339df76..1911d1729353 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -32,6 +32,7 @@
#define DEF_FREQUENCY_UP_THRESHOLD (80)
#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3)
#define MICRO_FREQUENCY_UP_THRESHOLD (95)
+#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000)
#define MIN_FREQUENCY_UP_THRESHOLD (11)
#define MAX_FREQUENCY_UP_THRESHOLD (100)
@@ -45,27 +46,12 @@
* this governor will not work.
* All times here are in uS.
*/
-static unsigned int def_sampling_rate;
#define MIN_SAMPLING_RATE_RATIO (2)
-/* for correct statistics, we need at least 10 ticks between each measure */
-#define MIN_STAT_SAMPLING_RATE \
- (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10))
-#define MIN_SAMPLING_RATE \
- (def_sampling_rate / MIN_SAMPLING_RATE_RATIO)
-/* Above MIN_SAMPLING_RATE will vanish with its sysfs file soon
- * Define the minimal settable sampling rate to the greater of:
- * - "HW transition latency" * 100 (same as default sampling / 10)
- * - MIN_STAT_SAMPLING_RATE
- * To avoid that userspace shoots itself.
-*/
-static unsigned int minimum_sampling_rate(void)
-{
- return max(def_sampling_rate / 10, MIN_STAT_SAMPLING_RATE);
-}
-/* This will also vanish soon with removing sampling_rate_max */
-#define MAX_SAMPLING_RATE (500 * def_sampling_rate)
+static unsigned int min_sampling_rate;
+
#define LATENCY_MULTIPLIER (1000)
+#define MIN_LATENCY_MULTIPLIER (100)
#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
static void do_dbs_timer(struct work_struct *work);
@@ -219,28 +205,14 @@ static void ondemand_powersave_bias_init(void)
/************************** sysfs interface ************************/
static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf)
{
- static int print_once;
-
- if (!print_once) {
- printk(KERN_INFO "CPUFREQ: ondemand sampling_rate_max "
- "sysfs file is deprecated - used by: %s\n",
- current->comm);
- print_once = 1;
- }
- return sprintf(buf, "%u\n", MAX_SAMPLING_RATE);
+ printk_once(KERN_INFO "CPUFREQ: ondemand sampling_rate_max "
+ "sysfs file is deprecated - used by: %s\n", current->comm);
+ return sprintf(buf, "%u\n", -1U);
}
static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf)
{
- static int print_once;
-
- if (!print_once) {
- printk(KERN_INFO "CPUFREQ: ondemand sampling_rate_min "
- "sysfs file is deprecated - used by: %s\n",
- current->comm);
- print_once = 1;
- }
- return sprintf(buf, "%u\n", MIN_SAMPLING_RATE);
+ return sprintf(buf, "%u\n", min_sampling_rate);
}
#define define_one_ro(_name) \
@@ -274,7 +246,7 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
mutex_unlock(&dbs_mutex);
return -EINVAL;
}
- dbs_tuners_ins.sampling_rate = max(input, minimum_sampling_rate());
+ dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate);
mutex_unlock(&dbs_mutex);
return count;
@@ -619,12 +591,12 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
latency = policy->cpuinfo.transition_latency / 1000;
if (latency == 0)
latency = 1;
-
- def_sampling_rate =
- max(latency * LATENCY_MULTIPLIER,
- MIN_STAT_SAMPLING_RATE);
-
- dbs_tuners_ins.sampling_rate = def_sampling_rate;
+ /* Bring kernel and HW constraints together */
+ min_sampling_rate = max(min_sampling_rate,
+ MIN_LATENCY_MULTIPLIER * latency);
+ dbs_tuners_ins.sampling_rate =
+ max(min_sampling_rate,
+ latency * LATENCY_MULTIPLIER);
}
dbs_timer_init(this_dbs_info);
@@ -678,6 +650,16 @@ static int __init cpufreq_gov_dbs_init(void)
dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
dbs_tuners_ins.down_differential =
MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
+ /*
+ * In no_hz/micro accounting case we set the minimum frequency
+ * not depending on HZ, but fixed (very low). The deferred
+ * timer might skip some samples if idle/sleeping as needed.
+ */
+ min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
+ } else {
+ /* For correct statistics, we need 10 ticks for each measure */
+ min_sampling_rate =
+ MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10);
}
kondemand_wq = create_workqueue("kondemand");
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 3b3c01b6f1ee..070357aaedbc 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -81,6 +81,14 @@ config MX3_IPU_IRQS
To avoid bloating the irq_desc[] array we allocate a sufficient
number of IRQ slots and map them dynamically to specific sources.
+config TXX9_DMAC
+ tristate "Toshiba TXx9 SoC DMA support"
+ depends on MACH_TX49XX || MACH_TX39XX
+ select DMA_ENGINE
+ help
+ Support the TXx9 SoC internal DMA controller. This can be
+ integrated in chips such as the Toshiba TX4927/38/39.
+
config DMA_ENGINE
bool
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 2e5dc96700d2..a0b6564800c4 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -8,3 +8,4 @@ obj-$(CONFIG_FSL_DMA) += fsldma.o
obj-$(CONFIG_MV_XOR) += mv_xor.o
obj-$(CONFIG_DW_DMAC) += dw_dmac.o
obj-$(CONFIG_MX3_IPU) += ipu/
+obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
new file mode 100644
index 000000000000..9aa9ea9822c8
--- /dev/null
+++ b/drivers/dma/txx9dmac.c
@@ -0,0 +1,1354 @@
+/*
+ * Driver for the TXx9 SoC DMA Controller
+ *
+ * Copyright (C) 2009 Atsushi Nemoto
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/scatterlist.h>
+#include "txx9dmac.h"
+
+static struct txx9dmac_chan *to_txx9dmac_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct txx9dmac_chan, chan);
+}
+
+static struct txx9dmac_cregs __iomem *__dma_regs(const struct txx9dmac_chan *dc)
+{
+ return dc->ch_regs;
+}
+
+static struct txx9dmac_cregs32 __iomem *__dma_regs32(
+ const struct txx9dmac_chan *dc)
+{
+ return dc->ch_regs;
+}
+
+#define channel64_readq(dc, name) \
+ __raw_readq(&(__dma_regs(dc)->name))
+#define channel64_writeq(dc, name, val) \
+ __raw_writeq((val), &(__dma_regs(dc)->name))
+#define channel64_readl(dc, name) \
+ __raw_readl(&(__dma_regs(dc)->name))
+#define channel64_writel(dc, name, val) \
+ __raw_writel((val), &(__dma_regs(dc)->name))
+
+#define channel32_readl(dc, name) \
+ __raw_readl(&(__dma_regs32(dc)->name))
+#define channel32_writel(dc, name, val) \
+ __raw_writel((val), &(__dma_regs32(dc)->name))
+
+#define channel_readq(dc, name) channel64_readq(dc, name)
+#define channel_writeq(dc, name, val) channel64_writeq(dc, name, val)
+#define channel_readl(dc, name) \
+ (is_dmac64(dc) ? \
+ channel64_readl(dc, name) : channel32_readl(dc, name))
+#define channel_writel(dc, name, val) \
+ (is_dmac64(dc) ? \
+ channel64_writel(dc, name, val) : channel32_writel(dc, name, val))
+
+static dma_addr_t channel64_read_CHAR(const struct txx9dmac_chan *dc)
+{
+ if (sizeof(__dma_regs(dc)->CHAR) == sizeof(u64))
+ return channel64_readq(dc, CHAR);
+ else
+ return channel64_readl(dc, CHAR);
+}
+
+static void channel64_write_CHAR(const struct txx9dmac_chan *dc, dma_addr_t val)
+{
+ if (sizeof(__dma_regs(dc)->CHAR) == sizeof(u64))
+ channel64_writeq(dc, CHAR, val);
+ else
+ channel64_writel(dc, CHAR, val);
+}
+
+static void channel64_clear_CHAR(const struct txx9dmac_chan *dc)
+{
+#if defined(CONFIG_32BIT) && !defined(CONFIG_64BIT_PHYS_ADDR)
+ channel64_writel(dc, CHAR, 0);
+ channel64_writel(dc, __pad_CHAR, 0);
+#else
+ channel64_writeq(dc, CHAR, 0);
+#endif
+}
+
+static dma_addr_t channel_read_CHAR(const struct txx9dmac_chan *dc)
+{
+ if (is_dmac64(dc))
+ return channel64_read_CHAR(dc);
+ else
+ return channel32_readl(dc, CHAR);
+}
+
+static void channel_write_CHAR(const struct txx9dmac_chan *dc, dma_addr_t val)
+{
+ if (is_dmac64(dc))
+ channel64_write_CHAR(dc, val);
+ else
+ channel32_writel(dc, CHAR, val);
+}
+
+static struct txx9dmac_regs __iomem *__txx9dmac_regs(
+ const struct txx9dmac_dev *ddev)
+{
+ return ddev->regs;
+}
+
+static struct txx9dmac_regs32 __iomem *__txx9dmac_regs32(
+ const struct txx9dmac_dev *ddev)
+{
+ return ddev->regs;
+}
+
+#define dma64_readl(ddev, name) \
+ __raw_readl(&(__txx9dmac_regs(ddev)->name))
+#define dma64_writel(ddev, name, val) \
+ __raw_writel((val), &(__txx9dmac_regs(ddev)->name))
+
+#define dma32_readl(ddev, name) \
+ __raw_readl(&(__txx9dmac_regs32(ddev)->name))
+#define dma32_writel(ddev, name, val) \
+ __raw_writel((val), &(__txx9dmac_regs32(ddev)->name))
+
+#define dma_readl(ddev, name) \
+ (__is_dmac64(ddev) ? \
+ dma64_readl(ddev, name) : dma32_readl(ddev, name))
+#define dma_writel(ddev, name, val) \
+ (__is_dmac64(ddev) ? \
+ dma64_writel(ddev, name, val) : dma32_writel(ddev, name, val))
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+ return &chan->dev->device;
+}
+static struct device *chan2parent(struct dma_chan *chan)
+{
+ return chan->dev->device.parent;
+}
+
+static struct txx9dmac_desc *
+txd_to_txx9dmac_desc(struct dma_async_tx_descriptor *txd)
+{
+ return container_of(txd, struct txx9dmac_desc, txd);
+}
+
+static dma_addr_t desc_read_CHAR(const struct txx9dmac_chan *dc,
+ const struct txx9dmac_desc *desc)
+{
+ return is_dmac64(dc) ? desc->hwdesc.CHAR : desc->hwdesc32.CHAR;
+}
+
+static void desc_write_CHAR(const struct txx9dmac_chan *dc,
+ struct txx9dmac_desc *desc, dma_addr_t val)
+{
+ if (is_dmac64(dc))
+ desc->hwdesc.CHAR = val;
+ else
+ desc->hwdesc32.CHAR = val;
+}
+
+#define TXX9_DMA_MAX_COUNT 0x04000000
+
+#define TXX9_DMA_INITIAL_DESC_COUNT 64
+
+static struct txx9dmac_desc *txx9dmac_first_active(struct txx9dmac_chan *dc)
+{
+ return list_entry(dc->active_list.next,
+ struct txx9dmac_desc, desc_node);
+}
+
+static struct txx9dmac_desc *txx9dmac_last_active(struct txx9dmac_chan *dc)
+{
+ return list_entry(dc->active_list.prev,
+ struct txx9dmac_desc, desc_node);
+}
+
+static struct txx9dmac_desc *txx9dmac_first_queued(struct txx9dmac_chan *dc)
+{
+ return list_entry(dc->queue.next, struct txx9dmac_desc, desc_node);
+}
+
+static struct txx9dmac_desc *txx9dmac_last_child(struct txx9dmac_desc *desc)
+{
+ if (!list_empty(&desc->txd.tx_list))
+ desc = list_entry(desc->txd.tx_list.prev,
+ struct txx9dmac_desc, desc_node);
+ return desc;
+}
+
+static dma_cookie_t txx9dmac_tx_submit(struct dma_async_tx_descriptor *tx);
+
+static struct txx9dmac_desc *txx9dmac_desc_alloc(struct txx9dmac_chan *dc,
+ gfp_t flags)
+{
+ struct txx9dmac_dev *ddev = dc->ddev;
+ struct txx9dmac_desc *desc;
+
+ desc = kzalloc(sizeof(*desc), flags);
+ if (!desc)
+ return NULL;
+ dma_async_tx_descriptor_init(&desc->txd, &dc->chan);
+ desc->txd.tx_submit = txx9dmac_tx_submit;
+ /* txd.flags will be overwritten in prep funcs */
+ desc->txd.flags = DMA_CTRL_ACK;
+ desc->txd.phys = dma_map_single(chan2parent(&dc->chan), &desc->hwdesc,
+ ddev->descsize, DMA_TO_DEVICE);
+ return desc;
+}
+
+static struct txx9dmac_desc *txx9dmac_desc_get(struct txx9dmac_chan *dc)
+{
+ struct txx9dmac_desc *desc, *_desc;
+ struct txx9dmac_desc *ret = NULL;
+ unsigned int i = 0;
+
+ spin_lock_bh(&dc->lock);
+ list_for_each_entry_safe(desc, _desc, &dc->free_list, desc_node) {
+ if (async_tx_test_ack(&desc->txd)) {
+ list_del(&desc->desc_node);
+ ret = desc;
+ break;
+ }
+ dev_dbg(chan2dev(&dc->chan), "desc %p not ACKed\n", desc);
+ i++;
+ }
+ spin_unlock_bh(&dc->lock);
+
+ dev_vdbg(chan2dev(&dc->chan), "scanned %u descriptors on freelist\n",
+ i);
+ if (!ret) {
+ ret = txx9dmac_desc_alloc(dc, GFP_ATOMIC);
+ if (ret) {
+ spin_lock_bh(&dc->lock);
+ dc->descs_allocated++;
+ spin_unlock_bh(&dc->lock);
+ } else
+ dev_err(chan2dev(&dc->chan),
+ "not enough descriptors available\n");
+ }
+ return ret;
+}
+
+static void txx9dmac_sync_desc_for_cpu(struct txx9dmac_chan *dc,
+ struct txx9dmac_desc *desc)
+{
+ struct txx9dmac_dev *ddev = dc->ddev;
+ struct txx9dmac_desc *child;
+
+ list_for_each_entry(child, &desc->txd.tx_list, desc_node)
+ dma_sync_single_for_cpu(chan2parent(&dc->chan),
+ child->txd.phys, ddev->descsize,
+ DMA_TO_DEVICE);
+ dma_sync_single_for_cpu(chan2parent(&dc->chan),
+ desc->txd.phys, ddev->descsize,
+ DMA_TO_DEVICE);
+}
+
+/*
+ * Move a descriptor, including any children, to the free list.
+ * `desc' must not be on any lists.
+ */
+static void txx9dmac_desc_put(struct txx9dmac_chan *dc,
+ struct txx9dmac_desc *desc)
+{
+ if (desc) {
+ struct txx9dmac_desc *child;
+
+ txx9dmac_sync_desc_for_cpu(dc, desc);
+
+ spin_lock_bh(&dc->lock);
+ list_for_each_entry(child, &desc->txd.tx_list, desc_node)
+ dev_vdbg(chan2dev(&dc->chan),
+ "moving child desc %p to freelist\n",
+ child);
+ list_splice_init(&desc->txd.tx_list, &dc->free_list);
+ dev_vdbg(chan2dev(&dc->chan), "moving desc %p to freelist\n",
+ desc);
+ list_add(&desc->desc_node, &dc->free_list);
+ spin_unlock_bh(&dc->lock);
+ }
+}
+
+/* Called with dc->lock held and bh disabled */
+static dma_cookie_t
+txx9dmac_assign_cookie(struct txx9dmac_chan *dc, struct txx9dmac_desc *desc)
+{
+ dma_cookie_t cookie = dc->chan.cookie;
+
+ if (++cookie < 0)
+ cookie = 1;
+
+ dc->chan.cookie = cookie;
+ desc->txd.cookie = cookie;
+
+ return cookie;
+}
+
+/*----------------------------------------------------------------------*/
+
+static void txx9dmac_dump_regs(struct txx9dmac_chan *dc)
+{
+ if (is_dmac64(dc))
+ dev_err(chan2dev(&dc->chan),
+ " CHAR: %#llx SAR: %#llx DAR: %#llx CNTR: %#x"
+ " SAIR: %#x DAIR: %#x CCR: %#x CSR: %#x\n",
+ (u64)channel64_read_CHAR(dc),
+ channel64_readq(dc, SAR),
+ channel64_readq(dc, DAR),
+ channel64_readl(dc, CNTR),
+ channel64_readl(dc, SAIR),
+ channel64_readl(dc, DAIR),
+ channel64_readl(dc, CCR),
+ channel64_readl(dc, CSR));
+ else
+ dev_err(chan2dev(&dc->chan),
+ " CHAR: %#x SAR: %#x DAR: %#x CNTR: %#x"
+ " SAIR: %#x DAIR: %#x CCR: %#x CSR: %#x\n",
+ channel32_readl(dc, CHAR),
+ channel32_readl(dc, SAR),
+ channel32_readl(dc, DAR),
+ channel32_readl(dc, CNTR),
+ channel32_readl(dc, SAIR),
+ channel32_readl(dc, DAIR),
+ channel32_readl(dc, CCR),
+ channel32_readl(dc, CSR));
+}
+
+static void txx9dmac_reset_chan(struct txx9dmac_chan *dc)
+{
+ channel_writel(dc, CCR, TXX9_DMA_CCR_CHRST);
+ if (is_dmac64(dc)) {
+ channel64_clear_CHAR(dc);
+ channel_writeq(dc, SAR, 0);
+ channel_writeq(dc, DAR, 0);
+ } else {
+ channel_writel(dc, CHAR, 0);
+ channel_writel(dc, SAR, 0);
+ channel_writel(dc, DAR, 0);
+ }
+ channel_writel(dc, CNTR, 0);
+ channel_writel(dc, SAIR, 0);
+ channel_writel(dc, DAIR, 0);
+ channel_writel(dc, CCR, 0);
+ mmiowb();
+}
+
+/* Called with dc->lock held and bh disabled */
+static void txx9dmac_dostart(struct txx9dmac_chan *dc,
+ struct txx9dmac_desc *first)
+{
+ struct txx9dmac_slave *ds = dc->chan.private;
+ u32 sai, dai;
+
+ dev_vdbg(chan2dev(&dc->chan), "dostart %u %p\n",
+ first->txd.cookie, first);
+ /* ASSERT: channel is idle */
+ if (channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT) {
+ dev_err(chan2dev(&dc->chan),
+ "BUG: Attempted to start non-idle channel\n");
+ txx9dmac_dump_regs(dc);
+ /* The tasklet will hopefully advance the queue... */
+ return;
+ }
+
+ if (is_dmac64(dc)) {
+ channel64_writel(dc, CNTR, 0);
+ channel64_writel(dc, CSR, 0xffffffff);
+ if (ds) {
+ if (ds->tx_reg) {
+ sai = ds->reg_width;
+ dai = 0;
+ } else {
+ sai = 0;
+ dai = ds->reg_width;
+ }
+ } else {
+ sai = 8;
+ dai = 8;
+ }
+ channel64_writel(dc, SAIR, sai);
+ channel64_writel(dc, DAIR, dai);
+ /* All 64-bit DMAC supports SMPCHN */
+ channel64_writel(dc, CCR, dc->ccr);
+ /* Writing a non zero value to CHAR will assert XFACT */
+ channel64_write_CHAR(dc, first->txd.phys);
+ } else {
+ channel32_writel(dc, CNTR, 0);
+ channel32_writel(dc, CSR, 0xffffffff);
+ if (ds) {
+ if (ds->tx_reg) {
+ sai = ds->reg_width;
+ dai = 0;
+ } else {
+ sai = 0;
+ dai = ds->reg_width;
+ }
+ } else {
+ sai = 4;
+ dai = 4;
+ }
+ channel32_writel(dc, SAIR, sai);
+ channel32_writel(dc, DAIR, dai);
+ if (txx9_dma_have_SMPCHN()) {
+ channel32_writel(dc, CCR, dc->ccr);
+ /* Writing a non zero value to CHAR will assert XFACT */
+ channel32_writel(dc, CHAR, first->txd.phys);
+ } else {
+ channel32_writel(dc, CHAR, first->txd.phys);
+ channel32_writel(dc, CCR, dc->ccr);
+ }
+ }
+}
+
+/*----------------------------------------------------------------------*/
+
+static void
+txx9dmac_descriptor_complete(struct txx9dmac_chan *dc,
+ struct txx9dmac_desc *desc)
+{
+ dma_async_tx_callback callback;
+ void *param;
+ struct dma_async_tx_descriptor *txd = &desc->txd;
+ struct txx9dmac_slave *ds = dc->chan.private;
+
+ dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n",
+ txd->cookie, desc);
+
+ dc->completed = txd->cookie;
+ callback = txd->callback;
+ param = txd->callback_param;
+
+ txx9dmac_sync_desc_for_cpu(dc, desc);
+ list_splice_init(&txd->tx_list, &dc->free_list);
+ list_move(&desc->desc_node, &dc->free_list);
+
+ /*
+ * We use dma_unmap_page() regardless of how the buffers were
+ * mapped before they were submitted...
+ */
+ if (!ds) {
+ dma_addr_t dmaaddr;
+ if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+ dmaaddr = is_dmac64(dc) ?
+ desc->hwdesc.DAR : desc->hwdesc32.DAR;
+ dma_unmap_page(chan2parent(&dc->chan), dmaaddr,
+ desc->len, DMA_FROM_DEVICE);
+ }
+ if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+ dmaaddr = is_dmac64(dc) ?
+ desc->hwdesc.SAR : desc->hwdesc32.SAR;
+ dma_unmap_page(chan2parent(&dc->chan), dmaaddr,
+ desc->len, DMA_TO_DEVICE);
+ }
+ }
+
+ /*
+ * The API requires that no submissions are done from a
+ * callback, so we don't need to drop the lock here
+ */
+ if (callback)
+ callback(param);
+ dma_run_dependencies(txd);
+}
+
+static void txx9dmac_dequeue(struct txx9dmac_chan *dc, struct list_head *list)
+{
+ struct txx9dmac_dev *ddev = dc->ddev;
+ struct txx9dmac_desc *desc;
+ struct txx9dmac_desc *prev = NULL;
+
+ BUG_ON(!list_empty(list));
+ do {
+ desc = txx9dmac_first_queued(dc);
+ if (prev) {
+ desc_write_CHAR(dc, prev, desc->txd.phys);
+ dma_sync_single_for_device(chan2parent(&dc->chan),
+ prev->txd.phys, ddev->descsize,
+ DMA_TO_DEVICE);
+ }
+ prev = txx9dmac_last_child(desc);
+ list_move_tail(&desc->desc_node, list);
+ /* Make chain-completion interrupt happen */
+ if ((desc->txd.flags & DMA_PREP_INTERRUPT) &&
+ !txx9dmac_chan_INTENT(dc))
+ break;
+ } while (!list_empty(&dc->queue));
+}
+
+static void txx9dmac_complete_all(struct txx9dmac_chan *dc)
+{
+ struct txx9dmac_desc *desc, *_desc;
+ LIST_HEAD(list);
+
+ /*
+ * Submit queued descriptors ASAP, i.e. before we go through
+ * the completed ones.
+ */
+ list_splice_init(&dc->active_list, &list);
+ if (!list_empty(&dc->queue)) {
+ txx9dmac_dequeue(dc, &dc->active_list);
+ txx9dmac_dostart(dc, txx9dmac_first_active(dc));
+ }
+
+ list_for_each_entry_safe(desc, _desc, &list, desc_node)
+ txx9dmac_descriptor_complete(dc, desc);
+}
+
+static void txx9dmac_dump_desc(struct txx9dmac_chan *dc,
+ struct txx9dmac_hwdesc *desc)
+{
+ if (is_dmac64(dc)) {
+#ifdef TXX9_DMA_USE_SIMPLE_CHAIN
+ dev_crit(chan2dev(&dc->chan),
+ " desc: ch%#llx s%#llx d%#llx c%#x\n",
+ (u64)desc->CHAR, desc->SAR, desc->DAR, desc->CNTR);
+#else
+ dev_crit(chan2dev(&dc->chan),
+ " desc: ch%#llx s%#llx d%#llx c%#x"
+ " si%#x di%#x cc%#x cs%#x\n",
+ (u64)desc->CHAR, desc->SAR, desc->DAR, desc->CNTR,
+ desc->SAIR, desc->DAIR, desc->CCR, desc->CSR);
+#endif
+ } else {
+ struct txx9dmac_hwdesc32 *d = (struct txx9dmac_hwdesc32 *)desc;
+#ifdef TXX9_DMA_USE_SIMPLE_CHAIN
+ dev_crit(chan2dev(&dc->chan),
+ " desc: ch%#x s%#x d%#x c%#x\n",
+ d->CHAR, d->SAR, d->DAR, d->CNTR);
+#else
+ dev_crit(chan2dev(&dc->chan),
+ " desc: ch%#x s%#x d%#x c%#x"
+ " si%#x di%#x cc%#x cs%#x\n",
+ d->CHAR, d->SAR, d->DAR, d->CNTR,
+ d->SAIR, d->DAIR, d->CCR, d->CSR);
+#endif
+ }
+}
+
+static void txx9dmac_handle_error(struct txx9dmac_chan *dc, u32 csr)
+{
+ struct txx9dmac_desc *bad_desc;
+ struct txx9dmac_desc *child;
+ u32 errors;
+
+ /*
+ * The descriptor currently at the head of the active list is
+ * borked. Since we don't have any way to report errors, we'll
+ * just have to scream loudly and try to carry on.
+ */
+ dev_crit(chan2dev(&dc->chan), "Abnormal Chain Completion\n");
+ txx9dmac_dump_regs(dc);
+
+ bad_desc = txx9dmac_first_active(dc);
+ list_del_init(&bad_desc->desc_node);
+
+ /* Clear all error flags and try to restart the controller */
+ errors = csr & (TXX9_DMA_CSR_ABCHC |
+ TXX9_DMA_CSR_CFERR | TXX9_DMA_CSR_CHERR |
+ TXX9_DMA_CSR_DESERR | TXX9_DMA_CSR_SORERR);
+ channel_writel(dc, CSR, errors);
+
+ if (list_empty(&dc->active_list) && !list_empty(&dc->queue))
+ txx9dmac_dequeue(dc, &dc->active_list);
+ if (!list_empty(&dc->active_list))
+ txx9dmac_dostart(dc, txx9dmac_first_active(dc));
+
+ dev_crit(chan2dev(&dc->chan),
+ "Bad descriptor submitted for DMA! (cookie: %d)\n",
+ bad_desc->txd.cookie);
+ txx9dmac_dump_desc(dc, &bad_desc->hwdesc);
+ list_for_each_entry(child, &bad_desc->txd.tx_list, desc_node)
+ txx9dmac_dump_desc(dc, &child->hwdesc);
+ /* Pretend the descriptor completed successfully */
+ txx9dmac_descriptor_complete(dc, bad_desc);
+}
+
+static void txx9dmac_scan_descriptors(struct txx9dmac_chan *dc)
+{
+ dma_addr_t chain;
+ struct txx9dmac_desc *desc, *_desc;
+ struct txx9dmac_desc *child;
+ u32 csr;
+
+ if (is_dmac64(dc)) {
+ chain = channel64_read_CHAR(dc);
+ csr = channel64_readl(dc, CSR);
+ channel64_writel(dc, CSR, csr);
+ } else {
+ chain = channel32_readl(dc, CHAR);
+ csr = channel32_readl(dc, CSR);
+ channel32_writel(dc, CSR, csr);
+ }
+ /* For dynamic chain, we should look at XFACT instead of NCHNC */
+ if (!(csr & (TXX9_DMA_CSR_XFACT | TXX9_DMA_CSR_ABCHC))) {
+ /* Everything we've submitted is done */
+ txx9dmac_complete_all(dc);
+ return;
+ }
+ if (!(csr & TXX9_DMA_CSR_CHNEN))
+ chain = 0; /* last descriptor of this chain */
+
+ dev_vdbg(chan2dev(&dc->chan), "scan_descriptors: char=%#llx\n",
+ (u64)chain);
+
+ list_for_each_entry_safe(desc, _desc, &dc->active_list, desc_node) {
+ if (desc_read_CHAR(dc, desc) == chain) {
+ /* This one is currently in progress */
+ if (csr & TXX9_DMA_CSR_ABCHC)
+ goto scan_done;
+ return;
+ }
+
+ list_for_each_entry(child, &desc->txd.tx_list, desc_node)
+ if (desc_read_CHAR(dc, child) == chain) {
+ /* Currently in progress */
+ if (csr & TXX9_DMA_CSR_ABCHC)
+ goto scan_done;
+ return;
+ }
+
+ /*
+ * No descriptors so far seem to be in progress, i.e.
+ * this one must be done.
+ */
+ txx9dmac_descriptor_complete(dc, desc);
+ }
+scan_done:
+ if (csr & TXX9_DMA_CSR_ABCHC) {
+ txx9dmac_handle_error(dc, csr);
+ return;
+ }
+
+ dev_err(chan2dev(&dc->chan),
+ "BUG: All descriptors done, but channel not idle!\n");
+
+ /* Try to continue after resetting the channel... */
+ txx9dmac_reset_chan(dc);
+
+ if (!list_empty(&dc->queue)) {
+ txx9dmac_dequeue(dc, &dc->active_list);
+ txx9dmac_dostart(dc, txx9dmac_first_active(dc));
+ }
+}
+
+static void txx9dmac_chan_tasklet(unsigned long data)
+{
+ int irq;
+ u32 csr;
+ struct txx9dmac_chan *dc;
+
+ dc = (struct txx9dmac_chan *)data;
+ csr = channel_readl(dc, CSR);
+ dev_vdbg(chan2dev(&dc->chan), "tasklet: status=%x\n", csr);
+
+ spin_lock(&dc->lock);
+ if (csr & (TXX9_DMA_CSR_ABCHC | TXX9_DMA_CSR_NCHNC |
+ TXX9_DMA_CSR_NTRNFC))
+ txx9dmac_scan_descriptors(dc);
+ spin_unlock(&dc->lock);
+ irq = dc->irq;
+
+ enable_irq(irq);
+}
+
+static irqreturn_t txx9dmac_chan_interrupt(int irq, void *dev_id)
+{
+ struct txx9dmac_chan *dc = dev_id;
+
+ dev_vdbg(chan2dev(&dc->chan), "interrupt: status=%#x\n",
+ channel_readl(dc, CSR));
+
+ tasklet_schedule(&dc->tasklet);
+ /*
+ * Just disable the interrupts. We'll turn them back on in the
+ * softirq handler.
+ */
+ disable_irq_nosync(irq);
+
+ return IRQ_HANDLED;
+}
+
+static void txx9dmac_tasklet(unsigned long data)
+{
+ int irq;
+ u32 csr;
+ struct txx9dmac_chan *dc;
+
+ struct txx9dmac_dev *ddev = (struct txx9dmac_dev *)data;
+ u32 mcr;
+ int i;
+
+ mcr = dma_readl(ddev, MCR);
+ dev_vdbg(ddev->chan[0]->dma.dev, "tasklet: mcr=%x\n", mcr);
+ for (i = 0; i < TXX9_DMA_MAX_NR_CHANNELS; i++) {
+ if ((mcr >> (24 + i)) & 0x11) {
+ dc = ddev->chan[i];
+ csr = channel_readl(dc, CSR);
+ dev_vdbg(chan2dev(&dc->chan), "tasklet: status=%x\n",
+ csr);
+ spin_lock(&dc->lock);
+ if (csr & (TXX9_DMA_CSR_ABCHC | TXX9_DMA_CSR_NCHNC |
+ TXX9_DMA_CSR_NTRNFC))
+ txx9dmac_scan_descriptors(dc);
+ spin_unlock(&dc->lock);
+ }
+ }
+ irq = ddev->irq;
+
+ enable_irq(irq);
+}
+
+static irqreturn_t txx9dmac_interrupt(int irq, void *dev_id)
+{
+ struct txx9dmac_dev *ddev = dev_id;
+
+ dev_vdbg(ddev->chan[0]->dma.dev, "interrupt: status=%#x\n",
+ dma_readl(ddev, MCR));
+
+ tasklet_schedule(&ddev->tasklet);
+ /*
+ * Just disable the interrupts. We'll turn them back on in the
+ * softirq handler.
+ */
+ disable_irq_nosync(irq);
+
+ return IRQ_HANDLED;
+}
+
+/*----------------------------------------------------------------------*/
+
+static dma_cookie_t txx9dmac_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct txx9dmac_desc *desc = txd_to_txx9dmac_desc(tx);
+ struct txx9dmac_chan *dc = to_txx9dmac_chan(tx->chan);
+ dma_cookie_t cookie;
+
+ spin_lock_bh(&dc->lock);
+ cookie = txx9dmac_assign_cookie(dc, desc);
+
+ dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u %p\n",
+ desc->txd.cookie, desc);
+
+ list_add_tail(&desc->desc_node, &dc->queue);
+ spin_unlock_bh(&dc->lock);
+
+ return cookie;
+}
+
+static struct dma_async_tx_descriptor *
+txx9dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
+ struct txx9dmac_dev *ddev = dc->ddev;
+ struct txx9dmac_desc *desc;
+ struct txx9dmac_desc *first;
+ struct txx9dmac_desc *prev;
+ size_t xfer_count;
+ size_t offset;
+
+ dev_vdbg(chan2dev(chan), "prep_dma_memcpy d%#llx s%#llx l%#zx f%#lx\n",
+ (u64)dest, (u64)src, len, flags);
+
+ if (unlikely(!len)) {
+ dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
+ return NULL;
+ }
+
+ prev = first = NULL;
+
+ for (offset = 0; offset < len; offset += xfer_count) {
+ xfer_count = min_t(size_t, len - offset, TXX9_DMA_MAX_COUNT);
+ /*
+ * Workaround for ERT-TX49H2-033, ERT-TX49H3-020,
+ * ERT-TX49H4-016 (slightly conservative)
+ */
+ if (__is_dmac64(ddev)) {
+ if (xfer_count > 0x100 &&
+ (xfer_count & 0xff) >= 0xfa &&
+ (xfer_count & 0xff) <= 0xff)
+ xfer_count -= 0x20;
+ } else {
+ if (xfer_count > 0x80 &&
+ (xfer_count & 0x7f) >= 0x7e &&
+ (xfer_count & 0x7f) <= 0x7f)
+ xfer_count -= 0x20;
+ }
+
+ desc = txx9dmac_desc_get(dc);
+ if (!desc) {
+ txx9dmac_desc_put(dc, first);
+ return NULL;
+ }
+
+ if (__is_dmac64(ddev)) {
+ desc->hwdesc.SAR = src + offset;
+ desc->hwdesc.DAR = dest + offset;
+ desc->hwdesc.CNTR = xfer_count;
+ txx9dmac_desc_set_nosimple(ddev, desc, 8, 8,
+ dc->ccr | TXX9_DMA_CCR_XFACT);
+ } else {
+ desc->hwdesc32.SAR = src + offset;
+ desc->hwdesc32.DAR = dest + offset;
+ desc->hwdesc32.CNTR = xfer_count;
+ txx9dmac_desc_set_nosimple(ddev, desc, 4, 4,
+ dc->ccr | TXX9_DMA_CCR_XFACT);
+ }
+
+ /*
+ * The descriptors on tx_list are not reachable from
+ * the dc->queue list or dc->active_list after a
+ * submit. If we put all descriptors on active_list,
+ * calling of callback on the completion will be more
+ * complex.
+ */
+ if (!first) {
+ first = desc;
+ } else {
+ desc_write_CHAR(dc, prev, desc->txd.phys);
+ dma_sync_single_for_device(chan2parent(&dc->chan),
+ prev->txd.phys, ddev->descsize,
+ DMA_TO_DEVICE);
+ list_add_tail(&desc->desc_node,
+ &first->txd.tx_list);
+ }
+ prev = desc;
+ }
+
+ /* Trigger interrupt after last block */
+ if (flags & DMA_PREP_INTERRUPT)
+ txx9dmac_desc_set_INTENT(ddev, prev);
+
+ desc_write_CHAR(dc, prev, 0);
+ dma_sync_single_for_device(chan2parent(&dc->chan),
+ prev->txd.phys, ddev->descsize,
+ DMA_TO_DEVICE);
+
+ first->txd.flags = flags;
+ first->len = len;
+
+ return &first->txd;
+}
+
+static struct dma_async_tx_descriptor *
+txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_data_direction direction,
+ unsigned long flags)
+{
+ struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
+ struct txx9dmac_dev *ddev = dc->ddev;
+ struct txx9dmac_slave *ds = chan->private;
+ struct txx9dmac_desc *prev;
+ struct txx9dmac_desc *first;
+ unsigned int i;
+ struct scatterlist *sg;
+
+ dev_vdbg(chan2dev(chan), "prep_dma_slave\n");
+
+ BUG_ON(!ds || !ds->reg_width);
+ if (ds->tx_reg)
+ BUG_ON(direction != DMA_TO_DEVICE);
+ else
+ BUG_ON(direction != DMA_FROM_DEVICE);
+ if (unlikely(!sg_len))
+ return NULL;
+
+ prev = first = NULL;
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ struct txx9dmac_desc *desc;
+ dma_addr_t mem;
+ u32 sai, dai;
+
+ desc = txx9dmac_desc_get(dc);
+ if (!desc) {
+ txx9dmac_desc_put(dc, first);
+ return NULL;
+ }
+
+ mem = sg_dma_address(sg);
+
+ if (__is_dmac64(ddev)) {
+ if (direction == DMA_TO_DEVICE) {
+ desc->hwdesc.SAR = mem;
+ desc->hwdesc.DAR = ds->tx_reg;
+ } else {
+ desc->hwdesc.SAR = ds->rx_reg;
+ desc->hwdesc.DAR = mem;
+ }
+ desc->hwdesc.CNTR = sg_dma_len(sg);
+ } else {
+ if (direction == DMA_TO_DEVICE) {
+ desc->hwdesc32.SAR = mem;
+ desc->hwdesc32.DAR = ds->tx_reg;
+ } else {
+ desc->hwdesc32.SAR = ds->rx_reg;
+ desc->hwdesc32.DAR = mem;
+ }
+ desc->hwdesc32.CNTR = sg_dma_len(sg);
+ }
+ if (direction == DMA_TO_DEVICE) {
+ sai = ds->reg_width;
+ dai = 0;
+ } else {
+ sai = 0;
+ dai = ds->reg_width;
+ }
+ txx9dmac_desc_set_nosimple(ddev, desc, sai, dai,
+ dc->ccr | TXX9_DMA_CCR_XFACT);
+
+ if (!first) {
+ first = desc;
+ } else {
+ desc_write_CHAR(dc, prev, desc->txd.phys);
+ dma_sync_single_for_device(chan2parent(&dc->chan),
+ prev->txd.phys,
+ ddev->descsize,
+ DMA_TO_DEVICE);
+ list_add_tail(&desc->desc_node,
+ &first->txd.tx_list);
+ }
+ prev = desc;
+ }
+
+ /* Trigger interrupt after last block */
+ if (flags & DMA_PREP_INTERRUPT)
+ txx9dmac_desc_set_INTENT(ddev, prev);
+
+ desc_write_CHAR(dc, prev, 0);
+ dma_sync_single_for_device(chan2parent(&dc->chan),
+ prev->txd.phys, ddev->descsize,
+ DMA_TO_DEVICE);
+
+ first->txd.flags = flags;
+ first->len = 0;
+
+ return &first->txd;
+}
+
+static void txx9dmac_terminate_all(struct dma_chan *chan)
+{
+ struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
+ struct txx9dmac_desc *desc, *_desc;
+ LIST_HEAD(list);
+
+ dev_vdbg(chan2dev(chan), "terminate_all\n");
+ spin_lock_bh(&dc->lock);
+
+ txx9dmac_reset_chan(dc);
+
+ /* active_list entries will end up before queued entries */
+ list_splice_init(&dc->queue, &list);
+ list_splice_init(&dc->active_list, &list);
+
+ spin_unlock_bh(&dc->lock);
+
+ /* Flush all pending and queued descriptors */
+ list_for_each_entry_safe(desc, _desc, &list, desc_node)
+ txx9dmac_descriptor_complete(dc, desc);
+}
+
+static enum dma_status
+txx9dmac_is_tx_complete(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ dma_cookie_t *done, dma_cookie_t *used)
+{
+ struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
+ dma_cookie_t last_used;
+ dma_cookie_t last_complete;
+ int ret;
+
+ last_complete = dc->completed;
+ last_used = chan->cookie;
+
+ ret = dma_async_is_complete(cookie, last_complete, last_used);
+ if (ret != DMA_SUCCESS) {
+ spin_lock_bh(&dc->lock);
+ txx9dmac_scan_descriptors(dc);
+ spin_unlock_bh(&dc->lock);
+
+ last_complete = dc->completed;
+ last_used = chan->cookie;
+
+ ret = dma_async_is_complete(cookie, last_complete, last_used);
+ }
+
+ if (done)
+ *done = last_complete;
+ if (used)
+ *used = last_used;
+
+ return ret;
+}
+
+static void txx9dmac_chain_dynamic(struct txx9dmac_chan *dc,
+ struct txx9dmac_desc *prev)
+{
+ struct txx9dmac_dev *ddev = dc->ddev;
+ struct txx9dmac_desc *desc;
+ LIST_HEAD(list);
+
+ prev = txx9dmac_last_child(prev);
+ txx9dmac_dequeue(dc, &list);
+ desc = list_entry(list.next, struct txx9dmac_desc, desc_node);
+ desc_write_CHAR(dc, prev, desc->txd.phys);
+ dma_sync_single_for_device(chan2parent(&dc->chan),
+ prev->txd.phys, ddev->descsize,
+ DMA_TO_DEVICE);
+ mmiowb();
+ if (!(channel_readl(dc, CSR) & TXX9_DMA_CSR_CHNEN) &&
+ channel_read_CHAR(dc) == prev->txd.phys)
+ /* Restart chain DMA */
+ channel_write_CHAR(dc, desc->txd.phys);
+ list_splice_tail(&list, &dc->active_list);
+}
+
+static void txx9dmac_issue_pending(struct dma_chan *chan)
+{
+ struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
+
+ spin_lock_bh(&dc->lock);
+
+ if (!list_empty(&dc->active_list))
+ txx9dmac_scan_descriptors(dc);
+ if (!list_empty(&dc->queue)) {
+ if (list_empty(&dc->active_list)) {
+ txx9dmac_dequeue(dc, &dc->active_list);
+ txx9dmac_dostart(dc, txx9dmac_first_active(dc));
+ } else if (txx9_dma_have_SMPCHN()) {
+ struct txx9dmac_desc *prev = txx9dmac_last_active(dc);
+
+ if (!(prev->txd.flags & DMA_PREP_INTERRUPT) ||
+ txx9dmac_chan_INTENT(dc))
+ txx9dmac_chain_dynamic(dc, prev);
+ }
+ }
+
+ spin_unlock_bh(&dc->lock);
+}
+
+static int txx9dmac_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
+ struct txx9dmac_slave *ds = chan->private;
+ struct txx9dmac_desc *desc;
+ int i;
+
+ dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
+
+ /* ASSERT: channel is idle */
+ if (channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT) {
+ dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
+ return -EIO;
+ }
+
+ dc->completed = chan->cookie = 1;
+
+ dc->ccr = TXX9_DMA_CCR_IMMCHN | TXX9_DMA_CCR_INTENE | CCR_LE;
+ txx9dmac_chan_set_SMPCHN(dc);
+ if (!txx9_dma_have_SMPCHN() || (dc->ccr & TXX9_DMA_CCR_SMPCHN))
+ dc->ccr |= TXX9_DMA_CCR_INTENC;
+ if (chan->device->device_prep_dma_memcpy) {
+ if (ds)
+ return -EINVAL;
+ dc->ccr |= TXX9_DMA_CCR_XFSZ_X8;
+ } else {
+ if (!ds ||
+ (ds->tx_reg && ds->rx_reg) || (!ds->tx_reg && !ds->rx_reg))
+ return -EINVAL;
+ dc->ccr |= TXX9_DMA_CCR_EXTRQ |
+ TXX9_DMA_CCR_XFSZ(__ffs(ds->reg_width));
+ txx9dmac_chan_set_INTENT(dc);
+ }
+
+ spin_lock_bh(&dc->lock);
+ i = dc->descs_allocated;
+ while (dc->descs_allocated < TXX9_DMA_INITIAL_DESC_COUNT) {
+ spin_unlock_bh(&dc->lock);
+
+ desc = txx9dmac_desc_alloc(dc, GFP_KERNEL);
+ if (!desc) {
+ dev_info(chan2dev(chan),
+ "only allocated %d descriptors\n", i);
+ spin_lock_bh(&dc->lock);
+ break;
+ }
+ txx9dmac_desc_put(dc, desc);
+
+ spin_lock_bh(&dc->lock);
+ i = ++dc->descs_allocated;
+ }
+ spin_unlock_bh(&dc->lock);
+
+ dev_dbg(chan2dev(chan),
+ "alloc_chan_resources allocated %d descriptors\n", i);
+
+ return i;
+}
+
+static void txx9dmac_free_chan_resources(struct dma_chan *chan)
+{
+ struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
+ struct txx9dmac_dev *ddev = dc->ddev;
+ struct txx9dmac_desc *desc, *_desc;
+ LIST_HEAD(list);
+
+ dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n",
+ dc->descs_allocated);
+
+ /* ASSERT: channel is idle */
+ BUG_ON(!list_empty(&dc->active_list));
+ BUG_ON(!list_empty(&dc->queue));
+ BUG_ON(channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT);
+
+ spin_lock_bh(&dc->lock);
+ list_splice_init(&dc->free_list, &list);
+ dc->descs_allocated = 0;
+ spin_unlock_bh(&dc->lock);
+
+ list_for_each_entry_safe(desc, _desc, &list, desc_node) {
+ dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
+ dma_unmap_single(chan2parent(chan), desc->txd.phys,
+ ddev->descsize, DMA_TO_DEVICE);
+ kfree(desc);
+ }
+
+ dev_vdbg(chan2dev(chan), "free_chan_resources done\n");
+}
+
+/*----------------------------------------------------------------------*/
+
+static void txx9dmac_off(struct txx9dmac_dev *ddev)
+{
+ dma_writel(ddev, MCR, 0);
+ mmiowb();
+}
+
+static int __init txx9dmac_chan_probe(struct platform_device *pdev)
+{
+ struct txx9dmac_chan_platform_data *cpdata = pdev->dev.platform_data;
+ struct platform_device *dmac_dev = cpdata->dmac_dev;
+ struct txx9dmac_platform_data *pdata = dmac_dev->dev.platform_data;
+ struct txx9dmac_chan *dc;
+ int err;
+ int ch = pdev->id % TXX9_DMA_MAX_NR_CHANNELS;
+ int irq;
+
+ dc = devm_kzalloc(&pdev->dev, sizeof(*dc), GFP_KERNEL);
+ if (!dc)
+ return -ENOMEM;
+
+ dc->dma.dev = &pdev->dev;
+ dc->dma.device_alloc_chan_resources = txx9dmac_alloc_chan_resources;
+ dc->dma.device_free_chan_resources = txx9dmac_free_chan_resources;
+ dc->dma.device_terminate_all = txx9dmac_terminate_all;
+ dc->dma.device_is_tx_complete = txx9dmac_is_tx_complete;
+ dc->dma.device_issue_pending = txx9dmac_issue_pending;
+ if (pdata && pdata->memcpy_chan == ch) {
+ dc->dma.device_prep_dma_memcpy = txx9dmac_prep_dma_memcpy;
+ dma_cap_set(DMA_MEMCPY, dc->dma.cap_mask);
+ } else {
+ dc->dma.device_prep_slave_sg = txx9dmac_prep_slave_sg;
+ dma_cap_set(DMA_SLAVE, dc->dma.cap_mask);
+ dma_cap_set(DMA_PRIVATE, dc->dma.cap_mask);
+ }
+
+ INIT_LIST_HEAD(&dc->dma.channels);
+ dc->ddev = platform_get_drvdata(dmac_dev);
+ if (dc->ddev->irq < 0) {
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+ tasklet_init(&dc->tasklet, txx9dmac_chan_tasklet,
+ (unsigned long)dc);
+ dc->irq = irq;
+ err = devm_request_irq(&pdev->dev, dc->irq,
+ txx9dmac_chan_interrupt, 0, dev_name(&pdev->dev), dc);
+ if (err)
+ return err;
+ } else
+ dc->irq = -1;
+ dc->ddev->chan[ch] = dc;
+ dc->chan.device = &dc->dma;
+ list_add_tail(&dc->chan.device_node, &dc->chan.device->channels);
+ dc->chan.cookie = dc->completed = 1;
+
+ if (is_dmac64(dc))
+ dc->ch_regs = &__txx9dmac_regs(dc->ddev)->CHAN[ch];
+ else
+ dc->ch_regs = &__txx9dmac_regs32(dc->ddev)->CHAN[ch];
+ spin_lock_init(&dc->lock);
+
+ INIT_LIST_HEAD(&dc->active_list);
+ INIT_LIST_HEAD(&dc->queue);
+ INIT_LIST_HEAD(&dc->free_list);
+
+ txx9dmac_reset_chan(dc);
+
+ platform_set_drvdata(pdev, dc);
+
+ err = dma_async_device_register(&dc->dma);
+ if (err)
+ return err;
+ dev_dbg(&pdev->dev, "TXx9 DMA Channel (dma%d%s%s)\n",
+ dc->dma.dev_id,
+ dma_has_cap(DMA_MEMCPY, dc->dma.cap_mask) ? " memcpy" : "",
+ dma_has_cap(DMA_SLAVE, dc->dma.cap_mask) ? " slave" : "");
+
+ return 0;
+}
+
+static int __exit txx9dmac_chan_remove(struct platform_device *pdev)
+{
+ struct txx9dmac_chan *dc = platform_get_drvdata(pdev);
+
+ dma_async_device_unregister(&dc->dma);
+ if (dc->irq >= 0)
+ tasklet_kill(&dc->tasklet);
+ dc->ddev->chan[pdev->id % TXX9_DMA_MAX_NR_CHANNELS] = NULL;
+ return 0;
+}
+
+static int __init txx9dmac_probe(struct platform_device *pdev)
+{
+ struct txx9dmac_platform_data *pdata = pdev->dev.platform_data;
+ struct resource *io;
+ struct txx9dmac_dev *ddev;
+ u32 mcr;
+ int err;
+
+ io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!io)
+ return -EINVAL;
+
+ ddev = devm_kzalloc(&pdev->dev, sizeof(*ddev), GFP_KERNEL);
+ if (!ddev)
+ return -ENOMEM;
+
+ if (!devm_request_mem_region(&pdev->dev, io->start, resource_size(io),
+ dev_name(&pdev->dev)))
+ return -EBUSY;
+
+ ddev->regs = devm_ioremap(&pdev->dev, io->start, resource_size(io));
+ if (!ddev->regs)
+ return -ENOMEM;
+ ddev->have_64bit_regs = pdata->have_64bit_regs;
+ if (__is_dmac64(ddev))
+ ddev->descsize = sizeof(struct txx9dmac_hwdesc);
+ else
+ ddev->descsize = sizeof(struct txx9dmac_hwdesc32);
+
+ /* force dma off, just in case */
+ txx9dmac_off(ddev);
+
+ ddev->irq = platform_get_irq(pdev, 0);
+ if (ddev->irq >= 0) {
+ tasklet_init(&ddev->tasklet, txx9dmac_tasklet,
+ (unsigned long)ddev);
+ err = devm_request_irq(&pdev->dev, ddev->irq,
+ txx9dmac_interrupt, 0, dev_name(&pdev->dev), ddev);
+ if (err)
+ return err;
+ }
+
+ mcr = TXX9_DMA_MCR_MSTEN | MCR_LE;
+ if (pdata && pdata->memcpy_chan >= 0)
+ mcr |= TXX9_DMA_MCR_FIFUM(pdata->memcpy_chan);
+ dma_writel(ddev, MCR, mcr);
+
+ platform_set_drvdata(pdev, ddev);
+ return 0;
+}
+
+static int __exit txx9dmac_remove(struct platform_device *pdev)
+{
+ struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
+
+ txx9dmac_off(ddev);
+ if (ddev->irq >= 0)
+ tasklet_kill(&ddev->tasklet);
+ return 0;
+}
+
+static void txx9dmac_shutdown(struct platform_device *pdev)
+{
+ struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
+
+ txx9dmac_off(ddev);
+}
+
+static int txx9dmac_suspend_late(struct platform_device *pdev,
+ pm_message_t mesg)
+{
+ struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
+
+ txx9dmac_off(ddev);
+ return 0;
+}
+
+static int txx9dmac_resume_early(struct platform_device *pdev)
+{
+ struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
+ struct txx9dmac_platform_data *pdata = pdev->dev.platform_data;
+ u32 mcr;
+
+ mcr = TXX9_DMA_MCR_MSTEN | MCR_LE;
+ if (pdata && pdata->memcpy_chan >= 0)
+ mcr |= TXX9_DMA_MCR_FIFUM(pdata->memcpy_chan);
+ dma_writel(ddev, MCR, mcr);
+ return 0;
+
+}
+
+static struct platform_driver txx9dmac_chan_driver = {
+ .remove = __exit_p(txx9dmac_chan_remove),
+ .driver = {
+ .name = "txx9dmac-chan",
+ },
+};
+
+static struct platform_driver txx9dmac_driver = {
+ .remove = __exit_p(txx9dmac_remove),
+ .shutdown = txx9dmac_shutdown,
+ .suspend_late = txx9dmac_suspend_late,
+ .resume_early = txx9dmac_resume_early,
+ .driver = {
+ .name = "txx9dmac",
+ },
+};
+
+static int __init txx9dmac_init(void)
+{
+ int rc;
+
+ rc = platform_driver_probe(&txx9dmac_driver, txx9dmac_probe);
+ if (!rc) {
+ rc = platform_driver_probe(&txx9dmac_chan_driver,
+ txx9dmac_chan_probe);
+ if (rc)
+ platform_driver_unregister(&txx9dmac_driver);
+ }
+ return rc;
+}
+module_init(txx9dmac_init);
+
+static void __exit txx9dmac_exit(void)
+{
+ platform_driver_unregister(&txx9dmac_chan_driver);
+ platform_driver_unregister(&txx9dmac_driver);
+}
+module_exit(txx9dmac_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("TXx9 DMA Controller driver");
+MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>");
diff --git a/drivers/dma/txx9dmac.h b/drivers/dma/txx9dmac.h
new file mode 100644
index 000000000000..c907ff01d276
--- /dev/null
+++ b/drivers/dma/txx9dmac.h
@@ -0,0 +1,307 @@
+/*
+ * Driver for the TXx9 SoC DMA Controller
+ *
+ * Copyright (C) 2009 Atsushi Nemoto
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef TXX9DMAC_H
+#define TXX9DMAC_H
+
+#include <linux/dmaengine.h>
+#include <asm/txx9/dmac.h>
+
+/*
+ * Design Notes:
+ *
+ * This DMAC have four channels and one FIFO buffer. Each channel can
+ * be configured for memory-memory or device-memory transfer, but only
+ * one channel can do alignment-free memory-memory transfer at a time
+ * while the channel should occupy the FIFO buffer for effective
+ * transfers.
+ *
+ * Instead of dynamically assign the FIFO buffer to channels, I chose
+ * make one dedicated channel for memory-memory transfer. The
+ * dedicated channel is public. Other channels are private and used
+ * for slave transfer. Some devices in the SoC are wired to certain
+ * DMA channel.
+ */
+
+#ifdef CONFIG_MACH_TX49XX
+static inline bool txx9_dma_have_SMPCHN(void)
+{
+ return true;
+}
+#define TXX9_DMA_USE_SIMPLE_CHAIN
+#else
+static inline bool txx9_dma_have_SMPCHN(void)
+{
+ return false;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN
+#ifdef CONFIG_MACH_TX49XX
+#define CCR_LE TXX9_DMA_CCR_LE
+#define MCR_LE 0
+#else
+#define CCR_LE 0
+#define MCR_LE TXX9_DMA_MCR_LE
+#endif
+#else
+#define CCR_LE 0
+#define MCR_LE 0
+#endif
+
+/*
+ * Redefine this macro to handle differences between 32- and 64-bit
+ * addressing, big vs. little endian, etc.
+ */
+#ifdef __BIG_ENDIAN
+#define TXX9_DMA_REG32(name) u32 __pad_##name; u32 name
+#else
+#define TXX9_DMA_REG32(name) u32 name; u32 __pad_##name
+#endif
+
+/* Hardware register definitions. */
+struct txx9dmac_cregs {
+#if defined(CONFIG_32BIT) && !defined(CONFIG_64BIT_PHYS_ADDR)
+ TXX9_DMA_REG32(CHAR); /* Chain Address Register */
+#else
+ u64 CHAR; /* Chain Address Register */
+#endif
+ u64 SAR; /* Source Address Register */
+ u64 DAR; /* Destination Address Register */
+ TXX9_DMA_REG32(CNTR); /* Count Register */
+ TXX9_DMA_REG32(SAIR); /* Source Address Increment Register */
+ TXX9_DMA_REG32(DAIR); /* Destination Address Increment Register */
+ TXX9_DMA_REG32(CCR); /* Channel Control Register */
+ TXX9_DMA_REG32(CSR); /* Channel Status Register */
+};
+struct txx9dmac_cregs32 {
+ u32 CHAR;
+ u32 SAR;
+ u32 DAR;
+ u32 CNTR;
+ u32 SAIR;
+ u32 DAIR;
+ u32 CCR;
+ u32 CSR;
+};
+
+struct txx9dmac_regs {
+ /* per-channel registers */
+ struct txx9dmac_cregs CHAN[TXX9_DMA_MAX_NR_CHANNELS];
+ u64 __pad[9];
+ u64 MFDR; /* Memory Fill Data Register */
+ TXX9_DMA_REG32(MCR); /* Master Control Register */
+};
+struct txx9dmac_regs32 {
+ struct txx9dmac_cregs32 CHAN[TXX9_DMA_MAX_NR_CHANNELS];
+ u32 __pad[9];
+ u32 MFDR;
+ u32 MCR;
+};
+
+/* bits for MCR */
+#define TXX9_DMA_MCR_EIS(ch) (0x10000000<<(ch))
+#define TXX9_DMA_MCR_DIS(ch) (0x01000000<<(ch))
+#define TXX9_DMA_MCR_RSFIF 0x00000080
+#define TXX9_DMA_MCR_FIFUM(ch) (0x00000008<<(ch))
+#define TXX9_DMA_MCR_LE 0x00000004
+#define TXX9_DMA_MCR_RPRT 0x00000002
+#define TXX9_DMA_MCR_MSTEN 0x00000001
+
+/* bits for CCRn */
+#define TXX9_DMA_CCR_IMMCHN 0x20000000
+#define TXX9_DMA_CCR_USEXFSZ 0x10000000
+#define TXX9_DMA_CCR_LE 0x08000000
+#define TXX9_DMA_CCR_DBINH 0x04000000
+#define TXX9_DMA_CCR_SBINH 0x02000000
+#define TXX9_DMA_CCR_CHRST 0x01000000
+#define TXX9_DMA_CCR_RVBYTE 0x00800000
+#define TXX9_DMA_CCR_ACKPOL 0x00400000
+#define TXX9_DMA_CCR_REQPL 0x00200000
+#define TXX9_DMA_CCR_EGREQ 0x00100000
+#define TXX9_DMA_CCR_CHDN 0x00080000
+#define TXX9_DMA_CCR_DNCTL 0x00060000
+#define TXX9_DMA_CCR_EXTRQ 0x00010000
+#define TXX9_DMA_CCR_INTRQD 0x0000e000
+#define TXX9_DMA_CCR_INTENE 0x00001000
+#define TXX9_DMA_CCR_INTENC 0x00000800
+#define TXX9_DMA_CCR_INTENT 0x00000400
+#define TXX9_DMA_CCR_CHNEN 0x00000200
+#define TXX9_DMA_CCR_XFACT 0x00000100
+#define TXX9_DMA_CCR_SMPCHN 0x00000020
+#define TXX9_DMA_CCR_XFSZ(order) (((order) << 2) & 0x0000001c)
+#define TXX9_DMA_CCR_XFSZ_1 TXX9_DMA_CCR_XFSZ(0)
+#define TXX9_DMA_CCR_XFSZ_2 TXX9_DMA_CCR_XFSZ(1)
+#define TXX9_DMA_CCR_XFSZ_4 TXX9_DMA_CCR_XFSZ(2)
+#define TXX9_DMA_CCR_XFSZ_8 TXX9_DMA_CCR_XFSZ(3)
+#define TXX9_DMA_CCR_XFSZ_X4 TXX9_DMA_CCR_XFSZ(4)
+#define TXX9_DMA_CCR_XFSZ_X8 TXX9_DMA_CCR_XFSZ(5)
+#define TXX9_DMA_CCR_XFSZ_X16 TXX9_DMA_CCR_XFSZ(6)
+#define TXX9_DMA_CCR_XFSZ_X32 TXX9_DMA_CCR_XFSZ(7)
+#define TXX9_DMA_CCR_MEMIO 0x00000002
+#define TXX9_DMA_CCR_SNGAD 0x00000001
+
+/* bits for CSRn */
+#define TXX9_DMA_CSR_CHNEN 0x00000400
+#define TXX9_DMA_CSR_STLXFER 0x00000200
+#define TXX9_DMA_CSR_XFACT 0x00000100
+#define TXX9_DMA_CSR_ABCHC 0x00000080
+#define TXX9_DMA_CSR_NCHNC 0x00000040
+#define TXX9_DMA_CSR_NTRNFC 0x00000020
+#define TXX9_DMA_CSR_EXTDN 0x00000010
+#define TXX9_DMA_CSR_CFERR 0x00000008
+#define TXX9_DMA_CSR_CHERR 0x00000004
+#define TXX9_DMA_CSR_DESERR 0x00000002
+#define TXX9_DMA_CSR_SORERR 0x00000001
+
+struct txx9dmac_chan {
+ struct dma_chan chan;
+ struct dma_device dma;
+ struct txx9dmac_dev *ddev;
+ void __iomem *ch_regs;
+ struct tasklet_struct tasklet;
+ int irq;
+ u32 ccr;
+
+ spinlock_t lock;
+
+ /* these other elements are all protected by lock */
+ dma_cookie_t completed;
+ struct list_head active_list;
+ struct list_head queue;
+ struct list_head free_list;
+
+ unsigned int descs_allocated;
+};
+
+struct txx9dmac_dev {
+ void __iomem *regs;
+ struct tasklet_struct tasklet;
+ int irq;
+ struct txx9dmac_chan *chan[TXX9_DMA_MAX_NR_CHANNELS];
+ bool have_64bit_regs;
+ unsigned int descsize;
+};
+
+static inline bool __is_dmac64(const struct txx9dmac_dev *ddev)
+{
+ return ddev->have_64bit_regs;
+}
+
+static inline bool is_dmac64(const struct txx9dmac_chan *dc)
+{
+ return __is_dmac64(dc->ddev);
+}
+
+#ifdef TXX9_DMA_USE_SIMPLE_CHAIN
+/* Hardware descriptor definition. (for simple-chain) */
+struct txx9dmac_hwdesc {
+#if defined(CONFIG_32BIT) && !defined(CONFIG_64BIT_PHYS_ADDR)
+ TXX9_DMA_REG32(CHAR);
+#else
+ u64 CHAR;
+#endif
+ u64 SAR;
+ u64 DAR;
+ TXX9_DMA_REG32(CNTR);
+};
+struct txx9dmac_hwdesc32 {
+ u32 CHAR;
+ u32 SAR;
+ u32 DAR;
+ u32 CNTR;
+};
+#else
+#define txx9dmac_hwdesc txx9dmac_cregs
+#define txx9dmac_hwdesc32 txx9dmac_cregs32
+#endif
+
+struct txx9dmac_desc {
+ /* FIRST values the hardware uses */
+ union {
+ struct txx9dmac_hwdesc hwdesc;
+ struct txx9dmac_hwdesc32 hwdesc32;
+ };
+
+ /* THEN values for driver housekeeping */
+ struct list_head desc_node ____cacheline_aligned;
+ struct dma_async_tx_descriptor txd;
+ size_t len;
+};
+
+#ifdef TXX9_DMA_USE_SIMPLE_CHAIN
+
+static inline bool txx9dmac_chan_INTENT(struct txx9dmac_chan *dc)
+{
+ return (dc->ccr & TXX9_DMA_CCR_INTENT) != 0;
+}
+
+static inline void txx9dmac_chan_set_INTENT(struct txx9dmac_chan *dc)
+{
+ dc->ccr |= TXX9_DMA_CCR_INTENT;
+}
+
+static inline void txx9dmac_desc_set_INTENT(struct txx9dmac_dev *ddev,
+ struct txx9dmac_desc *desc)
+{
+}
+
+static inline void txx9dmac_chan_set_SMPCHN(struct txx9dmac_chan *dc)
+{
+ dc->ccr |= TXX9_DMA_CCR_SMPCHN;
+}
+
+static inline void txx9dmac_desc_set_nosimple(struct txx9dmac_dev *ddev,
+ struct txx9dmac_desc *desc,
+ u32 sair, u32 dair, u32 ccr)
+{
+}
+
+#else /* TXX9_DMA_USE_SIMPLE_CHAIN */
+
+static inline bool txx9dmac_chan_INTENT(struct txx9dmac_chan *dc)
+{
+ return true;
+}
+
+static void txx9dmac_chan_set_INTENT(struct txx9dmac_chan *dc)
+{
+}
+
+static inline void txx9dmac_desc_set_INTENT(struct txx9dmac_dev *ddev,
+ struct txx9dmac_desc *desc)
+{
+ if (__is_dmac64(ddev))
+ desc->hwdesc.CCR |= TXX9_DMA_CCR_INTENT;
+ else
+ desc->hwdesc32.CCR |= TXX9_DMA_CCR_INTENT;
+}
+
+static inline void txx9dmac_chan_set_SMPCHN(struct txx9dmac_chan *dc)
+{
+}
+
+static inline void txx9dmac_desc_set_nosimple(struct txx9dmac_dev *ddev,
+ struct txx9dmac_desc *desc,
+ u32 sai, u32 dai, u32 ccr)
+{
+ if (__is_dmac64(ddev)) {
+ desc->hwdesc.SAIR = sai;
+ desc->hwdesc.DAIR = dai;
+ desc->hwdesc.CCR = ccr;
+ } else {
+ desc->hwdesc32.SAIR = sai;
+ desc->hwdesc32.DAIR = dai;
+ desc->hwdesc32.CCR = ccr;
+ }
+}
+
+#endif /* TXX9_DMA_USE_SIMPLE_CHAIN */
+
+#endif /* TXX9DMAC_H */
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index ab4f3592a11c..4339b1a879cd 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -5,7 +5,7 @@
#
menuconfig EDAC
- bool "EDAC - error detection and reporting"
+ bool "EDAC (Error Detection And Correction) reporting"
depends on HAS_IOMEM
depends on X86 || PPC
help
@@ -232,4 +232,13 @@ config EDAC_AMD8111
Note, add more Kconfig dependency if it's adopted
on some machine other than Maple.
+config EDAC_CPC925
+ tristate "IBM CPC925 Memory Controller (PPC970FX)"
+ depends on EDAC_MM_EDAC && PPC64
+ help
+ Support for error detection and correction on the
+ IBM CPC925 Bridge and Memory Controller, which is
+ a companion chip to the PowerPC 970 family of
+ processors.
+
endif # EDAC
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index 633dc5604ee3..98aa4a7db412 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -18,6 +18,7 @@ edac_core-objs += edac_pci.o edac_pci_sysfs.o
endif
obj-$(CONFIG_EDAC_AMD76X) += amd76x_edac.o
+obj-$(CONFIG_EDAC_CPC925) += cpc925_edac.o
obj-$(CONFIG_EDAC_I5000) += i5000_edac.o
obj-$(CONFIG_EDAC_I5100) += i5100_edac.o
obj-$(CONFIG_EDAC_I5400) += i5400_edac.o
diff --git a/drivers/edac/amd8111_edac.c b/drivers/edac/amd8111_edac.c
index 2cb58ef743e0..35b78d04bbfa 100644
--- a/drivers/edac/amd8111_edac.c
+++ b/drivers/edac/amd8111_edac.c
@@ -37,7 +37,6 @@
#define AMD8111_EDAC_MOD_STR "amd8111_edac"
#define PCI_DEVICE_ID_AMD_8111_PCI 0x7460
-static int edac_dev_idx;
enum amd8111_edac_devs {
LPC_BRIDGE = 0,
@@ -377,7 +376,7 @@ static int amd8111_dev_probe(struct pci_dev *dev,
* edac_device_ctl_info, but make use of existing
* one instead.
*/
- dev_info->edac_idx = edac_dev_idx++;
+ dev_info->edac_idx = edac_device_alloc_index();
dev_info->edac_dev =
edac_device_alloc_ctl_info(0, dev_info->ctl_name, 1,
NULL, 0, 0,
diff --git a/drivers/edac/cell_edac.c b/drivers/edac/cell_edac.c
index cb0f639f049d..c973004c002c 100644
--- a/drivers/edac/cell_edac.c
+++ b/drivers/edac/cell_edac.c
@@ -227,7 +227,7 @@ static struct platform_driver cell_edac_driver = {
.owner = THIS_MODULE,
},
.probe = cell_edac_probe,
- .remove = cell_edac_remove,
+ .remove = __devexit_p(cell_edac_remove),
};
static int __init cell_edac_init(void)
diff --git a/drivers/edac/cpc925_edac.c b/drivers/edac/cpc925_edac.c
new file mode 100644
index 000000000000..8c54196b5aba
--- /dev/null
+++ b/drivers/edac/cpc925_edac.c
@@ -0,0 +1,1017 @@
+/*
+ * cpc925_edac.c, EDAC driver for IBM CPC925 Bridge and Memory Controller.
+ *
+ * Copyright (c) 2008 Wind River Systems, Inc.
+ *
+ * Authors: Cao Qingtao <qingtao.cao@windriver.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/edac.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "edac_core.h"
+#include "edac_module.h"
+
+#define CPC925_EDAC_REVISION " Ver: 1.0.0 " __DATE__
+#define CPC925_EDAC_MOD_STR "cpc925_edac"
+
+#define cpc925_printk(level, fmt, arg...) \
+ edac_printk(level, "CPC925", fmt, ##arg)
+
+#define cpc925_mc_printk(mci, level, fmt, arg...) \
+ edac_mc_chipset_printk(mci, level, "CPC925", fmt, ##arg)
+
+/*
+ * CPC925 registers are of 32 bits with bit0 defined at the
+ * most significant bit and bit31 at that of least significant.
+ */
+#define CPC925_BITS_PER_REG 32
+#define CPC925_BIT(nr) (1UL << (CPC925_BITS_PER_REG - 1 - nr))
+
+/*
+ * EDAC device names for the error detections of
+ * CPU Interface and Hypertransport Link.
+ */
+#define CPC925_CPU_ERR_DEV "cpu"
+#define CPC925_HT_LINK_DEV "htlink"
+
+/* Suppose DDR Refresh cycle is 15.6 microsecond */
+#define CPC925_REF_FREQ 0xFA69
+#define CPC925_SCRUB_BLOCK_SIZE 64 /* bytes */
+#define CPC925_NR_CSROWS 8
+
+/*
+ * All registers and bits definitions are taken from
+ * "CPC925 Bridge and Memory Controller User Manual, SA14-2761-02".
+ */
+
+/*
+ * CPU and Memory Controller Registers
+ */
+/************************************************************
+ * Processor Interface Exception Mask Register (APIMASK)
+ ************************************************************/
+#define REG_APIMASK_OFFSET 0x30070
+enum apimask_bits {
+ APIMASK_DART = CPC925_BIT(0), /* DART Exception */
+ APIMASK_ADI0 = CPC925_BIT(1), /* Handshake Error on PI0_ADI */
+ APIMASK_ADI1 = CPC925_BIT(2), /* Handshake Error on PI1_ADI */
+ APIMASK_STAT = CPC925_BIT(3), /* Status Exception */
+ APIMASK_DERR = CPC925_BIT(4), /* Data Error Exception */
+ APIMASK_ADRS0 = CPC925_BIT(5), /* Addressing Exception on PI0 */
+ APIMASK_ADRS1 = CPC925_BIT(6), /* Addressing Exception on PI1 */
+ /* BIT(7) Reserved */
+ APIMASK_ECC_UE_H = CPC925_BIT(8), /* UECC upper */
+ APIMASK_ECC_CE_H = CPC925_BIT(9), /* CECC upper */
+ APIMASK_ECC_UE_L = CPC925_BIT(10), /* UECC lower */
+ APIMASK_ECC_CE_L = CPC925_BIT(11), /* CECC lower */
+
+ CPU_MASK_ENABLE = (APIMASK_DART | APIMASK_ADI0 | APIMASK_ADI1 |
+ APIMASK_STAT | APIMASK_DERR | APIMASK_ADRS0 |
+ APIMASK_ADRS1),
+ ECC_MASK_ENABLE = (APIMASK_ECC_UE_H | APIMASK_ECC_CE_H |
+ APIMASK_ECC_UE_L | APIMASK_ECC_CE_L),
+};
+
+/************************************************************
+ * Processor Interface Exception Register (APIEXCP)
+ ************************************************************/
+#define REG_APIEXCP_OFFSET 0x30060
+enum apiexcp_bits {
+ APIEXCP_DART = CPC925_BIT(0), /* DART Exception */
+ APIEXCP_ADI0 = CPC925_BIT(1), /* Handshake Error on PI0_ADI */
+ APIEXCP_ADI1 = CPC925_BIT(2), /* Handshake Error on PI1_ADI */
+ APIEXCP_STAT = CPC925_BIT(3), /* Status Exception */
+ APIEXCP_DERR = CPC925_BIT(4), /* Data Error Exception */
+ APIEXCP_ADRS0 = CPC925_BIT(5), /* Addressing Exception on PI0 */
+ APIEXCP_ADRS1 = CPC925_BIT(6), /* Addressing Exception on PI1 */
+ /* BIT(7) Reserved */
+ APIEXCP_ECC_UE_H = CPC925_BIT(8), /* UECC upper */
+ APIEXCP_ECC_CE_H = CPC925_BIT(9), /* CECC upper */
+ APIEXCP_ECC_UE_L = CPC925_BIT(10), /* UECC lower */
+ APIEXCP_ECC_CE_L = CPC925_BIT(11), /* CECC lower */
+
+ CPU_EXCP_DETECTED = (APIEXCP_DART | APIEXCP_ADI0 | APIEXCP_ADI1 |
+ APIEXCP_STAT | APIEXCP_DERR | APIEXCP_ADRS0 |
+ APIEXCP_ADRS1),
+ UECC_EXCP_DETECTED = (APIEXCP_ECC_UE_H | APIEXCP_ECC_UE_L),
+ CECC_EXCP_DETECTED = (APIEXCP_ECC_CE_H | APIEXCP_ECC_CE_L),
+ ECC_EXCP_DETECTED = (UECC_EXCP_DETECTED | CECC_EXCP_DETECTED),
+};
+
+/************************************************************
+ * Memory Bus Configuration Register (MBCR)
+************************************************************/
+#define REG_MBCR_OFFSET 0x2190
+#define MBCR_64BITCFG_SHIFT 23
+#define MBCR_64BITCFG_MASK (1UL << MBCR_64BITCFG_SHIFT)
+#define MBCR_64BITBUS_SHIFT 22
+#define MBCR_64BITBUS_MASK (1UL << MBCR_64BITBUS_SHIFT)
+
+/************************************************************
+ * Memory Bank Mode Register (MBMR)
+************************************************************/
+#define REG_MBMR_OFFSET 0x21C0
+#define MBMR_MODE_MAX_VALUE 0xF
+#define MBMR_MODE_SHIFT 25
+#define MBMR_MODE_MASK (MBMR_MODE_MAX_VALUE << MBMR_MODE_SHIFT)
+#define MBMR_BBA_SHIFT 24
+#define MBMR_BBA_MASK (1UL << MBMR_BBA_SHIFT)
+
+/************************************************************
+ * Memory Bank Boundary Address Register (MBBAR)
+ ************************************************************/
+#define REG_MBBAR_OFFSET 0x21D0
+#define MBBAR_BBA_MAX_VALUE 0xFF
+#define MBBAR_BBA_SHIFT 24
+#define MBBAR_BBA_MASK (MBBAR_BBA_MAX_VALUE << MBBAR_BBA_SHIFT)
+
+/************************************************************
+ * Memory Scrub Control Register (MSCR)
+ ************************************************************/
+#define REG_MSCR_OFFSET 0x2400
+#define MSCR_SCRUB_MOD_MASK 0xC0000000 /* scrub_mod - bit0:1*/
+#define MSCR_BACKGR_SCRUB 0x40000000 /* 01 */
+#define MSCR_SI_SHIFT 16 /* si - bit8:15*/
+#define MSCR_SI_MAX_VALUE 0xFF
+#define MSCR_SI_MASK (MSCR_SI_MAX_VALUE << MSCR_SI_SHIFT)
+
+/************************************************************
+ * Memory Scrub Range Start Register (MSRSR)
+ ************************************************************/
+#define REG_MSRSR_OFFSET 0x2410
+
+/************************************************************
+ * Memory Scrub Range End Register (MSRER)
+ ************************************************************/
+#define REG_MSRER_OFFSET 0x2420
+
+/************************************************************
+ * Memory Scrub Pattern Register (MSPR)
+ ************************************************************/
+#define REG_MSPR_OFFSET 0x2430
+
+/************************************************************
+ * Memory Check Control Register (MCCR)
+ ************************************************************/
+#define REG_MCCR_OFFSET 0x2440
+enum mccr_bits {
+ MCCR_ECC_EN = CPC925_BIT(0), /* ECC high and low check */
+};
+
+/************************************************************
+ * Memory Check Range End Register (MCRER)
+ ************************************************************/
+#define REG_MCRER_OFFSET 0x2450
+
+/************************************************************
+ * Memory Error Address Register (MEAR)
+ ************************************************************/
+#define REG_MEAR_OFFSET 0x2460
+#define MEAR_BCNT_MAX_VALUE 0x3
+#define MEAR_BCNT_SHIFT 30
+#define MEAR_BCNT_MASK (MEAR_BCNT_MAX_VALUE << MEAR_BCNT_SHIFT)
+#define MEAR_RANK_MAX_VALUE 0x7
+#define MEAR_RANK_SHIFT 27
+#define MEAR_RANK_MASK (MEAR_RANK_MAX_VALUE << MEAR_RANK_SHIFT)
+#define MEAR_COL_MAX_VALUE 0x7FF
+#define MEAR_COL_SHIFT 16
+#define MEAR_COL_MASK (MEAR_COL_MAX_VALUE << MEAR_COL_SHIFT)
+#define MEAR_BANK_MAX_VALUE 0x3
+#define MEAR_BANK_SHIFT 14
+#define MEAR_BANK_MASK (MEAR_BANK_MAX_VALUE << MEAR_BANK_SHIFT)
+#define MEAR_ROW_MASK 0x00003FFF
+
+/************************************************************
+ * Memory Error Syndrome Register (MESR)
+ ************************************************************/
+#define REG_MESR_OFFSET 0x2470
+#define MESR_ECC_SYN_H_MASK 0xFF00
+#define MESR_ECC_SYN_L_MASK 0x00FF
+
+/************************************************************
+ * Memory Mode Control Register (MMCR)
+ ************************************************************/
+#define REG_MMCR_OFFSET 0x2500
+enum mmcr_bits {
+ MMCR_REG_DIMM_MODE = CPC925_BIT(3),
+};
+
+/*
+ * HyperTransport Link Registers
+ */
+/************************************************************
+ * Error Handling/Enumeration Scratch Pad Register (ERRCTRL)
+ ************************************************************/
+#define REG_ERRCTRL_OFFSET 0x70140
+enum errctrl_bits { /* nonfatal interrupts for */
+ ERRCTRL_SERR_NF = CPC925_BIT(0), /* system error */
+ ERRCTRL_CRC_NF = CPC925_BIT(1), /* CRC error */
+ ERRCTRL_RSP_NF = CPC925_BIT(2), /* Response error */
+ ERRCTRL_EOC_NF = CPC925_BIT(3), /* End-Of-Chain error */
+ ERRCTRL_OVF_NF = CPC925_BIT(4), /* Overflow error */
+ ERRCTRL_PROT_NF = CPC925_BIT(5), /* Protocol error */
+
+ ERRCTRL_RSP_ERR = CPC925_BIT(6), /* Response error received */
+ ERRCTRL_CHN_FAL = CPC925_BIT(7), /* Sync flooding detected */
+
+ HT_ERRCTRL_ENABLE = (ERRCTRL_SERR_NF | ERRCTRL_CRC_NF |
+ ERRCTRL_RSP_NF | ERRCTRL_EOC_NF |
+ ERRCTRL_OVF_NF | ERRCTRL_PROT_NF),
+ HT_ERRCTRL_DETECTED = (ERRCTRL_RSP_ERR | ERRCTRL_CHN_FAL),
+};
+
+/************************************************************
+ * Link Configuration and Link Control Register (LINKCTRL)
+ ************************************************************/
+#define REG_LINKCTRL_OFFSET 0x70110
+enum linkctrl_bits {
+ LINKCTRL_CRC_ERR = (CPC925_BIT(22) | CPC925_BIT(23)),
+ LINKCTRL_LINK_FAIL = CPC925_BIT(27),
+
+ HT_LINKCTRL_DETECTED = (LINKCTRL_CRC_ERR | LINKCTRL_LINK_FAIL),
+};
+
+/************************************************************
+ * Link FreqCap/Error/Freq/Revision ID Register (LINKERR)
+ ************************************************************/
+#define REG_LINKERR_OFFSET 0x70120
+enum linkerr_bits {
+ LINKERR_EOC_ERR = CPC925_BIT(17), /* End-Of-Chain error */
+ LINKERR_OVF_ERR = CPC925_BIT(18), /* Receive Buffer Overflow */
+ LINKERR_PROT_ERR = CPC925_BIT(19), /* Protocol error */
+
+ HT_LINKERR_DETECTED = (LINKERR_EOC_ERR | LINKERR_OVF_ERR |
+ LINKERR_PROT_ERR),
+};
+
+/************************************************************
+ * Bridge Control Register (BRGCTRL)
+ ************************************************************/
+#define REG_BRGCTRL_OFFSET 0x70300
+enum brgctrl_bits {
+ BRGCTRL_DETSERR = CPC925_BIT(0), /* SERR on Secondary Bus */
+ BRGCTRL_SECBUSRESET = CPC925_BIT(9), /* Secondary Bus Reset */
+};
+
+/* Private structure for edac memory controller */
+struct cpc925_mc_pdata {
+ void __iomem *vbase;
+ unsigned long total_mem;
+ const char *name;
+ int edac_idx;
+};
+
+/* Private structure for common edac device */
+struct cpc925_dev_info {
+ void __iomem *vbase;
+ struct platform_device *pdev;
+ char *ctl_name;
+ int edac_idx;
+ struct edac_device_ctl_info *edac_dev;
+ void (*init)(struct cpc925_dev_info *dev_info);
+ void (*exit)(struct cpc925_dev_info *dev_info);
+ void (*check)(struct edac_device_ctl_info *edac_dev);
+};
+
+/* Get total memory size from Open Firmware DTB */
+static void get_total_mem(struct cpc925_mc_pdata *pdata)
+{
+ struct device_node *np = NULL;
+ const unsigned int *reg, *reg_end;
+ int len, sw, aw;
+ unsigned long start, size;
+
+ np = of_find_node_by_type(NULL, "memory");
+ if (!np)
+ return;
+
+ aw = of_n_addr_cells(np);
+ sw = of_n_size_cells(np);
+ reg = (const unsigned int *)of_get_property(np, "reg", &len);
+ reg_end = reg + len/4;
+
+ pdata->total_mem = 0;
+ do {
+ start = of_read_number(reg, aw);
+ reg += aw;
+ size = of_read_number(reg, sw);
+ reg += sw;
+ debugf1("%s: start 0x%lx, size 0x%lx\n", __func__,
+ start, size);
+ pdata->total_mem += size;
+ } while (reg < reg_end);
+
+ of_node_put(np);
+ debugf0("%s: total_mem 0x%lx\n", __func__, pdata->total_mem);
+}
+
+static void cpc925_init_csrows(struct mem_ctl_info *mci)
+{
+ struct cpc925_mc_pdata *pdata = mci->pvt_info;
+ struct csrow_info *csrow;
+ int index;
+ u32 mbmr, mbbar, bba;
+ unsigned long row_size, last_nr_pages = 0;
+
+ get_total_mem(pdata);
+
+ for (index = 0; index < mci->nr_csrows; index++) {
+ mbmr = __raw_readl(pdata->vbase + REG_MBMR_OFFSET +
+ 0x20 * index);
+ mbbar = __raw_readl(pdata->vbase + REG_MBBAR_OFFSET +
+ 0x20 + index);
+ bba = (((mbmr & MBMR_BBA_MASK) >> MBMR_BBA_SHIFT) << 8) |
+ ((mbbar & MBBAR_BBA_MASK) >> MBBAR_BBA_SHIFT);
+
+ if (bba == 0)
+ continue; /* not populated */
+
+ csrow = &mci->csrows[index];
+
+ row_size = bba * (1UL << 28); /* 256M */
+ csrow->first_page = last_nr_pages;
+ csrow->nr_pages = row_size >> PAGE_SHIFT;
+ csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
+ last_nr_pages = csrow->last_page + 1;
+
+ csrow->mtype = MEM_RDDR;
+ csrow->edac_mode = EDAC_SECDED;
+
+ switch (csrow->nr_channels) {
+ case 1: /* Single channel */
+ csrow->grain = 32; /* four-beat burst of 32 bytes */
+ break;
+ case 2: /* Dual channel */
+ default:
+ csrow->grain = 64; /* four-beat burst of 64 bytes */
+ break;
+ }
+
+ switch ((mbmr & MBMR_MODE_MASK) >> MBMR_MODE_SHIFT) {
+ case 6: /* 0110, no way to differentiate X8 VS X16 */
+ case 5: /* 0101 */
+ case 8: /* 1000 */
+ csrow->dtype = DEV_X16;
+ break;
+ case 7: /* 0111 */
+ case 9: /* 1001 */
+ csrow->dtype = DEV_X8;
+ break;
+ default:
+ csrow->dtype = DEV_UNKNOWN;
+ break;
+ }
+ }
+}
+
+/* Enable memory controller ECC detection */
+static void cpc925_mc_init(struct mem_ctl_info *mci)
+{
+ struct cpc925_mc_pdata *pdata = mci->pvt_info;
+ u32 apimask;
+ u32 mccr;
+
+ /* Enable various ECC error exceptions */
+ apimask = __raw_readl(pdata->vbase + REG_APIMASK_OFFSET);
+ if ((apimask & ECC_MASK_ENABLE) == 0) {
+ apimask |= ECC_MASK_ENABLE;
+ __raw_writel(apimask, pdata->vbase + REG_APIMASK_OFFSET);
+ }
+
+ /* Enable ECC detection */
+ mccr = __raw_readl(pdata->vbase + REG_MCCR_OFFSET);
+ if ((mccr & MCCR_ECC_EN) == 0) {
+ mccr |= MCCR_ECC_EN;
+ __raw_writel(mccr, pdata->vbase + REG_MCCR_OFFSET);
+ }
+}
+
+/* Disable memory controller ECC detection */
+static void cpc925_mc_exit(struct mem_ctl_info *mci)
+{
+ /*
+ * WARNING:
+ * We are supposed to clear the ECC error detection bits,
+ * and it will be no problem to do so. However, once they
+ * are cleared here if we want to re-install CPC925 EDAC
+ * module later, setting them up in cpc925_mc_init() will
+ * trigger machine check exception.
+ * Also, it's ok to leave ECC error detection bits enabled,
+ * since they are reset to 1 by default or by boot loader.
+ */
+
+ return;
+}
+
+/*
+ * Revert DDR column/row/bank addresses into page frame number and
+ * offset in page.
+ *
+ * Suppose memory mode is 0x0111(128-bit mode, identical DIMM pairs),
+ * physical address(PA) bits to column address(CA) bits mappings are:
+ * CA 0 1 2 3 4 5 6 7 8 9 10
+ * PA 59 58 57 56 55 54 53 52 51 50 49
+ *
+ * physical address(PA) bits to bank address(BA) bits mappings are:
+ * BA 0 1
+ * PA 43 44
+ *
+ * physical address(PA) bits to row address(RA) bits mappings are:
+ * RA 0 1 2 3 4 5 6 7 8 9 10 11 12
+ * PA 36 35 34 48 47 46 45 40 41 42 39 38 37
+ */
+static void cpc925_mc_get_pfn(struct mem_ctl_info *mci, u32 mear,
+ unsigned long *pfn, unsigned long *offset, int *csrow)
+{
+ u32 bcnt, rank, col, bank, row;
+ u32 c;
+ unsigned long pa;
+ int i;
+
+ bcnt = (mear & MEAR_BCNT_MASK) >> MEAR_BCNT_SHIFT;
+ rank = (mear & MEAR_RANK_MASK) >> MEAR_RANK_SHIFT;
+ col = (mear & MEAR_COL_MASK) >> MEAR_COL_SHIFT;
+ bank = (mear & MEAR_BANK_MASK) >> MEAR_BANK_SHIFT;
+ row = mear & MEAR_ROW_MASK;
+
+ *csrow = rank;
+
+#ifdef CONFIG_EDAC_DEBUG
+ if (mci->csrows[rank].first_page == 0) {
+ cpc925_mc_printk(mci, KERN_ERR, "ECC occurs in a "
+ "non-populated csrow, broken hardware?\n");
+ return;
+ }
+#endif
+
+ /* Revert csrow number */
+ pa = mci->csrows[rank].first_page << PAGE_SHIFT;
+
+ /* Revert column address */
+ col += bcnt;
+ for (i = 0; i < 11; i++) {
+ c = col & 0x1;
+ col >>= 1;
+ pa |= c << (14 - i);
+ }
+
+ /* Revert bank address */
+ pa |= bank << 19;
+
+ /* Revert row address, in 4 steps */
+ for (i = 0; i < 3; i++) {
+ c = row & 0x1;
+ row >>= 1;
+ pa |= c << (26 - i);
+ }
+
+ for (i = 0; i < 3; i++) {
+ c = row & 0x1;
+ row >>= 1;
+ pa |= c << (21 + i);
+ }
+
+ for (i = 0; i < 4; i++) {
+ c = row & 0x1;
+ row >>= 1;
+ pa |= c << (18 - i);
+ }
+
+ for (i = 0; i < 3; i++) {
+ c = row & 0x1;
+ row >>= 1;
+ pa |= c << (29 - i);
+ }
+
+ *offset = pa & (PAGE_SIZE - 1);
+ *pfn = pa >> PAGE_SHIFT;
+
+ debugf0("%s: ECC physical address 0x%lx\n", __func__, pa);
+}
+
+static int cpc925_mc_find_channel(struct mem_ctl_info *mci, u16 syndrome)
+{
+ if ((syndrome & MESR_ECC_SYN_H_MASK) == 0)
+ return 0;
+
+ if ((syndrome & MESR_ECC_SYN_L_MASK) == 0)
+ return 1;
+
+ cpc925_mc_printk(mci, KERN_INFO, "Unexpected syndrome value: 0x%x\n",
+ syndrome);
+ return 1;
+}
+
+/* Check memory controller registers for ECC errors */
+static void cpc925_mc_check(struct mem_ctl_info *mci)
+{
+ struct cpc925_mc_pdata *pdata = mci->pvt_info;
+ u32 apiexcp;
+ u32 mear;
+ u32 mesr;
+ u16 syndrome;
+ unsigned long pfn = 0, offset = 0;
+ int csrow = 0, channel = 0;
+
+ /* APIEXCP is cleared when read */
+ apiexcp = __raw_readl(pdata->vbase + REG_APIEXCP_OFFSET);
+ if ((apiexcp & ECC_EXCP_DETECTED) == 0)
+ return;
+
+ mesr = __raw_readl(pdata->vbase + REG_MESR_OFFSET);
+ syndrome = mesr | (MESR_ECC_SYN_H_MASK | MESR_ECC_SYN_L_MASK);
+
+ mear = __raw_readl(pdata->vbase + REG_MEAR_OFFSET);
+
+ /* Revert column/row addresses into page frame number, etc */
+ cpc925_mc_get_pfn(mci, mear, &pfn, &offset, &csrow);
+
+ if (apiexcp & CECC_EXCP_DETECTED) {
+ cpc925_mc_printk(mci, KERN_INFO, "DRAM CECC Fault\n");
+ channel = cpc925_mc_find_channel(mci, syndrome);
+ edac_mc_handle_ce(mci, pfn, offset, syndrome,
+ csrow, channel, mci->ctl_name);
+ }
+
+ if (apiexcp & UECC_EXCP_DETECTED) {
+ cpc925_mc_printk(mci, KERN_INFO, "DRAM UECC Fault\n");
+ edac_mc_handle_ue(mci, pfn, offset, csrow, mci->ctl_name);
+ }
+
+ cpc925_mc_printk(mci, KERN_INFO, "Dump registers:\n");
+ cpc925_mc_printk(mci, KERN_INFO, "APIMASK 0x%08x\n",
+ __raw_readl(pdata->vbase + REG_APIMASK_OFFSET));
+ cpc925_mc_printk(mci, KERN_INFO, "APIEXCP 0x%08x\n",
+ apiexcp);
+ cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Ctrl 0x%08x\n",
+ __raw_readl(pdata->vbase + REG_MSCR_OFFSET));
+ cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Rge Start 0x%08x\n",
+ __raw_readl(pdata->vbase + REG_MSRSR_OFFSET));
+ cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Rge End 0x%08x\n",
+ __raw_readl(pdata->vbase + REG_MSRER_OFFSET));
+ cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Pattern 0x%08x\n",
+ __raw_readl(pdata->vbase + REG_MSPR_OFFSET));
+ cpc925_mc_printk(mci, KERN_INFO, "Mem Chk Ctrl 0x%08x\n",
+ __raw_readl(pdata->vbase + REG_MCCR_OFFSET));
+ cpc925_mc_printk(mci, KERN_INFO, "Mem Chk Rge End 0x%08x\n",
+ __raw_readl(pdata->vbase + REG_MCRER_OFFSET));
+ cpc925_mc_printk(mci, KERN_INFO, "Mem Err Address 0x%08x\n",
+ mesr);
+ cpc925_mc_printk(mci, KERN_INFO, "Mem Err Syndrome 0x%08x\n",
+ syndrome);
+}
+
+/******************** CPU err device********************************/
+/* Enable CPU Errors detection */
+static void cpc925_cpu_init(struct cpc925_dev_info *dev_info)
+{
+ u32 apimask;
+
+ apimask = __raw_readl(dev_info->vbase + REG_APIMASK_OFFSET);
+ if ((apimask & CPU_MASK_ENABLE) == 0) {
+ apimask |= CPU_MASK_ENABLE;
+ __raw_writel(apimask, dev_info->vbase + REG_APIMASK_OFFSET);
+ }
+}
+
+/* Disable CPU Errors detection */
+static void cpc925_cpu_exit(struct cpc925_dev_info *dev_info)
+{
+ /*
+ * WARNING:
+ * We are supposed to clear the CPU error detection bits,
+ * and it will be no problem to do so. However, once they
+ * are cleared here if we want to re-install CPC925 EDAC
+ * module later, setting them up in cpc925_cpu_init() will
+ * trigger machine check exception.
+ * Also, it's ok to leave CPU error detection bits enabled,
+ * since they are reset to 1 by default.
+ */
+
+ return;
+}
+
+/* Check for CPU Errors */
+static void cpc925_cpu_check(struct edac_device_ctl_info *edac_dev)
+{
+ struct cpc925_dev_info *dev_info = edac_dev->pvt_info;
+ u32 apiexcp;
+ u32 apimask;
+
+ /* APIEXCP is cleared when read */
+ apiexcp = __raw_readl(dev_info->vbase + REG_APIEXCP_OFFSET);
+ if ((apiexcp & CPU_EXCP_DETECTED) == 0)
+ return;
+
+ apimask = __raw_readl(dev_info->vbase + REG_APIMASK_OFFSET);
+ cpc925_printk(KERN_INFO, "Processor Interface Fault\n"
+ "Processor Interface register dump:\n");
+ cpc925_printk(KERN_INFO, "APIMASK 0x%08x\n", apimask);
+ cpc925_printk(KERN_INFO, "APIEXCP 0x%08x\n", apiexcp);
+
+ edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
+}
+
+/******************** HT Link err device****************************/
+/* Enable HyperTransport Link Error detection */
+static void cpc925_htlink_init(struct cpc925_dev_info *dev_info)
+{
+ u32 ht_errctrl;
+
+ ht_errctrl = __raw_readl(dev_info->vbase + REG_ERRCTRL_OFFSET);
+ if ((ht_errctrl & HT_ERRCTRL_ENABLE) == 0) {
+ ht_errctrl |= HT_ERRCTRL_ENABLE;
+ __raw_writel(ht_errctrl, dev_info->vbase + REG_ERRCTRL_OFFSET);
+ }
+}
+
+/* Disable HyperTransport Link Error detection */
+static void cpc925_htlink_exit(struct cpc925_dev_info *dev_info)
+{
+ u32 ht_errctrl;
+
+ ht_errctrl = __raw_readl(dev_info->vbase + REG_ERRCTRL_OFFSET);
+ ht_errctrl &= ~HT_ERRCTRL_ENABLE;
+ __raw_writel(ht_errctrl, dev_info->vbase + REG_ERRCTRL_OFFSET);
+}
+
+/* Check for HyperTransport Link errors */
+static void cpc925_htlink_check(struct edac_device_ctl_info *edac_dev)
+{
+ struct cpc925_dev_info *dev_info = edac_dev->pvt_info;
+ u32 brgctrl = __raw_readl(dev_info->vbase + REG_BRGCTRL_OFFSET);
+ u32 linkctrl = __raw_readl(dev_info->vbase + REG_LINKCTRL_OFFSET);
+ u32 errctrl = __raw_readl(dev_info->vbase + REG_ERRCTRL_OFFSET);
+ u32 linkerr = __raw_readl(dev_info->vbase + REG_LINKERR_OFFSET);
+
+ if (!((brgctrl & BRGCTRL_DETSERR) ||
+ (linkctrl & HT_LINKCTRL_DETECTED) ||
+ (errctrl & HT_ERRCTRL_DETECTED) ||
+ (linkerr & HT_LINKERR_DETECTED)))
+ return;
+
+ cpc925_printk(KERN_INFO, "HT Link Fault\n"
+ "HT register dump:\n");
+ cpc925_printk(KERN_INFO, "Bridge Ctrl 0x%08x\n",
+ brgctrl);
+ cpc925_printk(KERN_INFO, "Link Config Ctrl 0x%08x\n",
+ linkctrl);
+ cpc925_printk(KERN_INFO, "Error Enum and Ctrl 0x%08x\n",
+ errctrl);
+ cpc925_printk(KERN_INFO, "Link Error 0x%08x\n",
+ linkerr);
+
+ /* Clear by write 1 */
+ if (brgctrl & BRGCTRL_DETSERR)
+ __raw_writel(BRGCTRL_DETSERR,
+ dev_info->vbase + REG_BRGCTRL_OFFSET);
+
+ if (linkctrl & HT_LINKCTRL_DETECTED)
+ __raw_writel(HT_LINKCTRL_DETECTED,
+ dev_info->vbase + REG_LINKCTRL_OFFSET);
+
+ /* Initiate Secondary Bus Reset to clear the chain failure */
+ if (errctrl & ERRCTRL_CHN_FAL)
+ __raw_writel(BRGCTRL_SECBUSRESET,
+ dev_info->vbase + REG_BRGCTRL_OFFSET);
+
+ if (errctrl & ERRCTRL_RSP_ERR)
+ __raw_writel(ERRCTRL_RSP_ERR,
+ dev_info->vbase + REG_ERRCTRL_OFFSET);
+
+ if (linkerr & HT_LINKERR_DETECTED)
+ __raw_writel(HT_LINKERR_DETECTED,
+ dev_info->vbase + REG_LINKERR_OFFSET);
+
+ edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name);
+}
+
+static struct cpc925_dev_info cpc925_devs[] = {
+ {
+ .ctl_name = CPC925_CPU_ERR_DEV,
+ .init = cpc925_cpu_init,
+ .exit = cpc925_cpu_exit,
+ .check = cpc925_cpu_check,
+ },
+ {
+ .ctl_name = CPC925_HT_LINK_DEV,
+ .init = cpc925_htlink_init,
+ .exit = cpc925_htlink_exit,
+ .check = cpc925_htlink_check,
+ },
+ {0}, /* Terminated by NULL */
+};
+
+/*
+ * Add CPU Err detection and HyperTransport Link Err detection
+ * as common "edac_device", they have no corresponding device
+ * nodes in the Open Firmware DTB and we have to add platform
+ * devices for them. Also, they will share the MMIO with that
+ * of memory controller.
+ */
+static void cpc925_add_edac_devices(void __iomem *vbase)
+{
+ struct cpc925_dev_info *dev_info;
+
+ if (!vbase) {
+ cpc925_printk(KERN_ERR, "MMIO not established yet\n");
+ return;
+ }
+
+ for (dev_info = &cpc925_devs[0]; dev_info->init; dev_info++) {
+ dev_info->vbase = vbase;
+ dev_info->pdev = platform_device_register_simple(
+ dev_info->ctl_name, 0, NULL, 0);
+ if (IS_ERR(dev_info->pdev)) {
+ cpc925_printk(KERN_ERR,
+ "Can't register platform device for %s\n",
+ dev_info->ctl_name);
+ continue;
+ }
+
+ /*
+ * Don't have to allocate private structure but
+ * make use of cpc925_devs[] instead.
+ */
+ dev_info->edac_idx = edac_device_alloc_index();
+ dev_info->edac_dev =
+ edac_device_alloc_ctl_info(0, dev_info->ctl_name,
+ 1, NULL, 0, 0, NULL, 0, dev_info->edac_idx);
+ if (!dev_info->edac_dev) {
+ cpc925_printk(KERN_ERR, "No memory for edac device\n");
+ goto err1;
+ }
+
+ dev_info->edac_dev->pvt_info = dev_info;
+ dev_info->edac_dev->dev = &dev_info->pdev->dev;
+ dev_info->edac_dev->ctl_name = dev_info->ctl_name;
+ dev_info->edac_dev->mod_name = CPC925_EDAC_MOD_STR;
+ dev_info->edac_dev->dev_name = dev_name(&dev_info->pdev->dev);
+
+ if (edac_op_state == EDAC_OPSTATE_POLL)
+ dev_info->edac_dev->edac_check = dev_info->check;
+
+ if (dev_info->init)
+ dev_info->init(dev_info);
+
+ if (edac_device_add_device(dev_info->edac_dev) > 0) {
+ cpc925_printk(KERN_ERR,
+ "Unable to add edac device for %s\n",
+ dev_info->ctl_name);
+ goto err2;
+ }
+
+ debugf0("%s: Successfully added edac device for %s\n",
+ __func__, dev_info->ctl_name);
+
+ continue;
+
+err2:
+ if (dev_info->exit)
+ dev_info->exit(dev_info);
+ edac_device_free_ctl_info(dev_info->edac_dev);
+err1:
+ platform_device_unregister(dev_info->pdev);
+ }
+}
+
+/*
+ * Delete the common "edac_device" for CPU Err Detection
+ * and HyperTransport Link Err Detection
+ */
+static void cpc925_del_edac_devices(void)
+{
+ struct cpc925_dev_info *dev_info;
+
+ for (dev_info = &cpc925_devs[0]; dev_info->init; dev_info++) {
+ if (dev_info->edac_dev) {
+ edac_device_del_device(dev_info->edac_dev->dev);
+ edac_device_free_ctl_info(dev_info->edac_dev);
+ platform_device_unregister(dev_info->pdev);
+ }
+
+ if (dev_info->exit)
+ dev_info->exit(dev_info);
+
+ debugf0("%s: Successfully deleted edac device for %s\n",
+ __func__, dev_info->ctl_name);
+ }
+}
+
+/* Convert current back-ground scrub rate into byte/sec bandwith */
+static int cpc925_get_sdram_scrub_rate(struct mem_ctl_info *mci, u32 *bw)
+{
+ struct cpc925_mc_pdata *pdata = mci->pvt_info;
+ u32 mscr;
+ u8 si;
+
+ mscr = __raw_readl(pdata->vbase + REG_MSCR_OFFSET);
+ si = (mscr & MSCR_SI_MASK) >> MSCR_SI_SHIFT;
+
+ debugf0("%s, Mem Scrub Ctrl Register 0x%x\n", __func__, mscr);
+
+ if (((mscr & MSCR_SCRUB_MOD_MASK) != MSCR_BACKGR_SCRUB) ||
+ (si == 0)) {
+ cpc925_mc_printk(mci, KERN_INFO, "Scrub mode not enabled\n");
+ *bw = 0;
+ } else
+ *bw = CPC925_SCRUB_BLOCK_SIZE * 0xFA67 / si;
+
+ return 0;
+}
+
+/* Return 0 for single channel; 1 for dual channel */
+static int cpc925_mc_get_channels(void __iomem *vbase)
+{
+ int dual = 0;
+ u32 mbcr;
+
+ mbcr = __raw_readl(vbase + REG_MBCR_OFFSET);
+
+ /*
+ * Dual channel only when 128-bit wide physical bus
+ * and 128-bit configuration.
+ */
+ if (((mbcr & MBCR_64BITCFG_MASK) == 0) &&
+ ((mbcr & MBCR_64BITBUS_MASK) == 0))
+ dual = 1;
+
+ debugf0("%s: %s channel\n", __func__,
+ (dual > 0) ? "Dual" : "Single");
+
+ return dual;
+}
+
+static int __devinit cpc925_probe(struct platform_device *pdev)
+{
+ static int edac_mc_idx;
+ struct mem_ctl_info *mci;
+ void __iomem *vbase;
+ struct cpc925_mc_pdata *pdata;
+ struct resource *r;
+ int res = 0, nr_channels;
+
+ debugf0("%s: %s platform device found!\n", __func__, pdev->name);
+
+ if (!devres_open_group(&pdev->dev, cpc925_probe, GFP_KERNEL)) {
+ res = -ENOMEM;
+ goto out;
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ cpc925_printk(KERN_ERR, "Unable to get resource\n");
+ res = -ENOENT;
+ goto err1;
+ }
+
+ if (!devm_request_mem_region(&pdev->dev,
+ r->start,
+ r->end - r->start + 1,
+ pdev->name)) {
+ cpc925_printk(KERN_ERR, "Unable to request mem region\n");
+ res = -EBUSY;
+ goto err1;
+ }
+
+ vbase = devm_ioremap(&pdev->dev, r->start, r->end - r->start + 1);
+ if (!vbase) {
+ cpc925_printk(KERN_ERR, "Unable to ioremap device\n");
+ res = -ENOMEM;
+ goto err2;
+ }
+
+ nr_channels = cpc925_mc_get_channels(vbase);
+ mci = edac_mc_alloc(sizeof(struct cpc925_mc_pdata),
+ CPC925_NR_CSROWS, nr_channels + 1, edac_mc_idx);
+ if (!mci) {
+ cpc925_printk(KERN_ERR, "No memory for mem_ctl_info\n");
+ res = -ENOMEM;
+ goto err2;
+ }
+
+ pdata = mci->pvt_info;
+ pdata->vbase = vbase;
+ pdata->edac_idx = edac_mc_idx++;
+ pdata->name = pdev->name;
+
+ mci->dev = &pdev->dev;
+ platform_set_drvdata(pdev, mci);
+ mci->dev_name = dev_name(&pdev->dev);
+ mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
+ mci->edac_cap = EDAC_FLAG_SECDED;
+ mci->mod_name = CPC925_EDAC_MOD_STR;
+ mci->mod_ver = CPC925_EDAC_REVISION;
+ mci->ctl_name = pdev->name;
+
+ if (edac_op_state == EDAC_OPSTATE_POLL)
+ mci->edac_check = cpc925_mc_check;
+
+ mci->ctl_page_to_phys = NULL;
+ mci->scrub_mode = SCRUB_SW_SRC;
+ mci->set_sdram_scrub_rate = NULL;
+ mci->get_sdram_scrub_rate = cpc925_get_sdram_scrub_rate;
+
+ cpc925_init_csrows(mci);
+
+ /* Setup memory controller registers */
+ cpc925_mc_init(mci);
+
+ if (edac_mc_add_mc(mci) > 0) {
+ cpc925_mc_printk(mci, KERN_ERR, "Failed edac_mc_add_mc()\n");
+ goto err3;
+ }
+
+ cpc925_add_edac_devices(vbase);
+
+ /* get this far and it's successful */
+ debugf0("%s: success\n", __func__);
+
+ res = 0;
+ goto out;
+
+err3:
+ cpc925_mc_exit(mci);
+ edac_mc_free(mci);
+err2:
+ devm_release_mem_region(&pdev->dev, r->start, r->end-r->start+1);
+err1:
+ devres_release_group(&pdev->dev, cpc925_probe);
+out:
+ return res;
+}
+
+static int cpc925_remove(struct platform_device *pdev)
+{
+ struct mem_ctl_info *mci = platform_get_drvdata(pdev);
+
+ /*
+ * Delete common edac devices before edac mc, because
+ * the former share the MMIO of the latter.
+ */
+ cpc925_del_edac_devices();
+ cpc925_mc_exit(mci);
+
+ edac_mc_del_mc(&pdev->dev);
+ edac_mc_free(mci);
+
+ return 0;
+}
+
+static struct platform_driver cpc925_edac_driver = {
+ .probe = cpc925_probe,
+ .remove = cpc925_remove,
+ .driver = {
+ .name = "cpc925_edac",
+ }
+};
+
+static int __init cpc925_edac_init(void)
+{
+ int ret = 0;
+
+ printk(KERN_INFO "IBM CPC925 EDAC driver " CPC925_EDAC_REVISION "\n");
+ printk(KERN_INFO "\t(c) 2008 Wind River Systems, Inc\n");
+
+ /* Only support POLL mode so far */
+ edac_op_state = EDAC_OPSTATE_POLL;
+
+ ret = platform_driver_register(&cpc925_edac_driver);
+ if (ret) {
+ printk(KERN_WARNING "Failed to register %s\n",
+ CPC925_EDAC_MOD_STR);
+ }
+
+ return ret;
+}
+
+static void __exit cpc925_edac_exit(void)
+{
+ platform_driver_unregister(&cpc925_edac_driver);
+}
+
+module_init(cpc925_edac_init);
+module_exit(cpc925_edac_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Cao Qingtao <qingtao.cao@windriver.com>");
+MODULE_DESCRIPTION("IBM CPC925 Bridge and MC EDAC kernel module");
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index 48d3b1409834..3493c6bdb820 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -841,6 +841,7 @@ extern void edac_device_handle_ue(struct edac_device_ctl_info *edac_dev,
int inst_nr, int block_nr, const char *msg);
extern void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev,
int inst_nr, int block_nr, const char *msg);
+extern int edac_device_alloc_index(void);
/*
* edac_pci APIs
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
index a7d2c717d033..b02a6a69a8f0 100644
--- a/drivers/edac/edac_device.c
+++ b/drivers/edac/edac_device.c
@@ -490,6 +490,20 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
mutex_unlock(&device_ctls_mutex);
}
+/*
+ * edac_device_alloc_index: Allocate a unique device index number
+ *
+ * Return:
+ * allocated index number
+ */
+int edac_device_alloc_index(void)
+{
+ static atomic_t device_indexes = ATOMIC_INIT(0);
+
+ return atomic_inc_return(&device_indexes) - 1;
+}
+EXPORT_SYMBOL_GPL(edac_device_alloc_index);
+
/**
* edac_device_add_device: Insert the 'edac_dev' structure into the
* edac_device global list and create sysfs entries associated with
diff --git a/drivers/firmware/pcdp.c b/drivers/firmware/pcdp.c
index 58e9f8e457f8..51e0e2d8fac6 100644
--- a/drivers/firmware/pcdp.c
+++ b/drivers/firmware/pcdp.c
@@ -28,10 +28,10 @@ setup_serial_console(struct pcdp_uart *uart)
char parity;
mmio = (uart->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY);
- p += sprintf(p, "uart8250,%s,0x%lx",
+ p += sprintf(p, "uart8250,%s,0x%llx",
mmio ? "mmio" : "io", uart->addr.address);
if (uart->baud) {
- p += sprintf(p, ",%lu", uart->baud);
+ p += sprintf(p, ",%llu", uart->baud);
if (uart->bits) {
switch (uart->parity) {
case 0x2: parity = 'e'; break;
diff --git a/drivers/gpio/max7301.c b/drivers/gpio/max7301.c
index 3e7f4e06386e..7b82eaae2621 100644
--- a/drivers/gpio/max7301.c
+++ b/drivers/gpio/max7301.c
@@ -287,7 +287,7 @@ exit_destroy:
return ret;
}
-static int max7301_remove(struct spi_device *spi)
+static int __devexit max7301_remove(struct spi_device *spi)
{
struct max7301 *ts;
int ret;
diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c
index 8dc0164bd51e..cdb6574d25a6 100644
--- a/drivers/gpio/pca953x.c
+++ b/drivers/gpio/pca953x.c
@@ -15,6 +15,10 @@
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/i2c/pca953x.h>
+#ifdef CONFIG_OF_GPIO
+#include <linux/of_platform.h>
+#include <linux/of_gpio.h>
+#endif
#include <asm/gpio.h>
@@ -32,6 +36,7 @@ static const struct i2c_device_id pca953x_id[] = {
{ "pca9539", 16, },
{ "pca9554", 8, },
{ "pca9555", 16, },
+ { "pca9556", 8, },
{ "pca9557", 8, },
{ "max7310", 8, },
@@ -49,7 +54,9 @@ struct pca953x_chip {
uint16_t reg_direction;
struct i2c_client *client;
+ struct pca953x_platform_data *dyn_pdata;
struct gpio_chip gpio_chip;
+ char **names;
};
static int pca953x_write_reg(struct pca953x_chip *chip, int reg, uint16_t val)
@@ -192,8 +199,57 @@ static void pca953x_setup_gpio(struct pca953x_chip *chip, int gpios)
gc->label = chip->client->name;
gc->dev = &chip->client->dev;
gc->owner = THIS_MODULE;
+ gc->names = chip->names;
}
+/*
+ * Handlers for alternative sources of platform_data
+ */
+#ifdef CONFIG_OF_GPIO
+/*
+ * Translate OpenFirmware node properties into platform_data
+ */
+static struct pca953x_platform_data *
+pca953x_get_alt_pdata(struct i2c_client *client)
+{
+ struct pca953x_platform_data *pdata;
+ struct device_node *node;
+ const uint16_t *val;
+
+ node = dev_archdata_get_node(&client->dev.archdata);
+ if (node == NULL)
+ return NULL;
+
+ pdata = kzalloc(sizeof(struct pca953x_platform_data), GFP_KERNEL);
+ if (pdata == NULL) {
+ dev_err(&client->dev, "Unable to allocate platform_data\n");
+ return NULL;
+ }
+
+ pdata->gpio_base = -1;
+ val = of_get_property(node, "linux,gpio-base", NULL);
+ if (val) {
+ if (*val < 0)
+ dev_warn(&client->dev,
+ "invalid gpio-base in device tree\n");
+ else
+ pdata->gpio_base = *val;
+ }
+
+ val = of_get_property(node, "polarity", NULL);
+ if (val)
+ pdata->invert = *val;
+
+ return pdata;
+}
+#else
+static struct pca953x_platform_data *
+pca953x_get_alt_pdata(struct i2c_client *client)
+{
+ return NULL;
+}
+#endif
+
static int __devinit pca953x_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -201,20 +257,32 @@ static int __devinit pca953x_probe(struct i2c_client *client,
struct pca953x_chip *chip;
int ret;
+ chip = kzalloc(sizeof(struct pca953x_chip), GFP_KERNEL);
+ if (chip == NULL)
+ return -ENOMEM;
+
pdata = client->dev.platform_data;
if (pdata == NULL) {
- dev_dbg(&client->dev, "no platform data\n");
- return -EINVAL;
+ pdata = pca953x_get_alt_pdata(client);
+ /*
+ * Unlike normal platform_data, this is allocated
+ * dynamically and must be freed in the driver
+ */
+ chip->dyn_pdata = pdata;
}
- chip = kzalloc(sizeof(struct pca953x_chip), GFP_KERNEL);
- if (chip == NULL)
- return -ENOMEM;
+ if (pdata == NULL) {
+ dev_dbg(&client->dev, "no platform data\n");
+ ret = -EINVAL;
+ goto out_failed;
+ }
chip->client = client;
chip->gpio_start = pdata->gpio_base;
+ chip->names = pdata->names;
+
/* initialize cached registers from their original values.
* we can't share this chip with another i2c master.
*/
@@ -249,6 +317,7 @@ static int __devinit pca953x_probe(struct i2c_client *client,
return 0;
out_failed:
+ kfree(chip->dyn_pdata);
kfree(chip);
return ret;
}
@@ -276,6 +345,7 @@ static int pca953x_remove(struct i2c_client *client)
return ret;
}
+ kfree(chip->dyn_pdata);
kfree(chip);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index b0ce44b9f5ab..64f42b19cbfa 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -176,7 +176,7 @@ int radeon_master_create_kms(struct drm_device *dev, struct drm_master *master)
/* prebuild the SAREA */
sareapage = max_t(unsigned long, SAREA_MAX, PAGE_SIZE);
ret = drm_addmap(dev, 0, sareapage, _DRM_SHM,
- _DRM_CONTAINS_LOCK|_DRM_DRIVER,
+ _DRM_CONTAINS_LOCK,
&master_priv->sarea);
if (ret) {
DRM_ERROR("SAREA setup failed\n");
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index 34d54e7281fd..de4aad076ebc 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -1300,7 +1300,7 @@ isdn_net_start_xmit(struct sk_buff *skb, struct net_device *ndev)
netif_stop_queue(ndev);
}
}
- return 1;
+ return NETDEV_TX_BUSY;
}
/*
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index 8695809b24b0..87d88dbb667f 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -255,14 +255,14 @@ static void status(struct seq_file *seq, mddev_t *mddev)
}
-static int reconfig(mddev_t *mddev, int layout, int chunk_size)
+static int reshape(mddev_t *mddev)
{
- int mode = layout & ModeMask;
- int count = layout >> ModeShift;
+ int mode = mddev->new_layout & ModeMask;
+ int count = mddev->new_layout >> ModeShift;
conf_t *conf = mddev->private;
- if (chunk_size != -1)
- return -EINVAL;
+ if (mddev->new_layout < 0)
+ return 0;
/* new layout */
if (mode == ClearFaults)
@@ -279,6 +279,7 @@ static int reconfig(mddev_t *mddev, int layout, int chunk_size)
atomic_set(&conf->counters[mode], count);
} else
return -EINVAL;
+ mddev->new_layout = -1;
mddev->layout = -1; /* makes sure further changes come through */
return 0;
}
@@ -298,8 +299,12 @@ static int run(mddev_t *mddev)
{
mdk_rdev_t *rdev;
int i;
+ conf_t *conf;
+
+ if (md_check_no_bitmap(mddev))
+ return -EINVAL;
- conf_t *conf = kmalloc(sizeof(*conf), GFP_KERNEL);
+ conf = kmalloc(sizeof(*conf), GFP_KERNEL);
if (!conf)
return -ENOMEM;
@@ -315,7 +320,7 @@ static int run(mddev_t *mddev)
md_set_array_sectors(mddev, faulty_size(mddev, 0, 0));
mddev->private = conf;
- reconfig(mddev, mddev->layout, -1);
+ reshape(mddev);
return 0;
}
@@ -338,7 +343,7 @@ static struct mdk_personality faulty_personality =
.run = run,
.stop = stop,
.status = status,
- .reconfig = reconfig,
+ .check_reshape = reshape,
.size = faulty_size,
};
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 64f1f3e046e0..15c8b7b25a9b 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -27,19 +27,27 @@
*/
static inline dev_info_t *which_dev(mddev_t *mddev, sector_t sector)
{
- dev_info_t *hash;
- linear_conf_t *conf = mddev_to_conf(mddev);
- sector_t idx = sector >> conf->sector_shift;
+ int lo, mid, hi;
+ linear_conf_t *conf;
+
+ lo = 0;
+ hi = mddev->raid_disks - 1;
+ conf = rcu_dereference(mddev->private);
/*
- * sector_div(a,b) returns the remainer and sets a to a/b
+ * Binary Search
*/
- (void)sector_div(idx, conf->spacing);
- hash = conf->hash_table[idx];
- while (sector >= hash->num_sectors + hash->start_sector)
- hash++;
- return hash;
+ while (hi > lo) {
+
+ mid = (hi + lo) / 2;
+ if (sector < conf->disks[mid].end_sector)
+ hi = mid;
+ else
+ lo = mid + 1;
+ }
+
+ return conf->disks + lo;
}
/**
@@ -59,8 +67,10 @@ static int linear_mergeable_bvec(struct request_queue *q,
unsigned long maxsectors, bio_sectors = bvm->bi_size >> 9;
sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
+ rcu_read_lock();
dev0 = which_dev(mddev, sector);
- maxsectors = dev0->num_sectors - (sector - dev0->start_sector);
+ maxsectors = dev0->end_sector - sector;
+ rcu_read_unlock();
if (maxsectors < bio_sectors)
maxsectors = 0;
@@ -79,46 +89,57 @@ static int linear_mergeable_bvec(struct request_queue *q,
static void linear_unplug(struct request_queue *q)
{
mddev_t *mddev = q->queuedata;
- linear_conf_t *conf = mddev_to_conf(mddev);
+ linear_conf_t *conf;
int i;
+ rcu_read_lock();
+ conf = rcu_dereference(mddev->private);
+
for (i=0; i < mddev->raid_disks; i++) {
struct request_queue *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev);
blk_unplug(r_queue);
}
+ rcu_read_unlock();
}
static int linear_congested(void *data, int bits)
{
mddev_t *mddev = data;
- linear_conf_t *conf = mddev_to_conf(mddev);
+ linear_conf_t *conf;
int i, ret = 0;
+ rcu_read_lock();
+ conf = rcu_dereference(mddev->private);
+
for (i = 0; i < mddev->raid_disks && !ret ; i++) {
struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev);
ret |= bdi_congested(&q->backing_dev_info, bits);
}
+
+ rcu_read_unlock();
return ret;
}
static sector_t linear_size(mddev_t *mddev, sector_t sectors, int raid_disks)
{
- linear_conf_t *conf = mddev_to_conf(mddev);
+ linear_conf_t *conf;
+ sector_t array_sectors;
+ rcu_read_lock();
+ conf = rcu_dereference(mddev->private);
WARN_ONCE(sectors || raid_disks,
"%s does not support generic reshape\n", __func__);
+ array_sectors = conf->array_sectors;
+ rcu_read_unlock();
- return conf->array_sectors;
+ return array_sectors;
}
static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
{
linear_conf_t *conf;
- dev_info_t **table;
mdk_rdev_t *rdev;
- int i, nb_zone, cnt;
- sector_t min_sectors;
- sector_t curr_sector;
+ int i, cnt;
conf = kzalloc (sizeof (*conf) + raid_disks*sizeof(dev_info_t),
GFP_KERNEL);
@@ -131,6 +152,7 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
list_for_each_entry(rdev, &mddev->disks, same_set) {
int j = rdev->raid_disk;
dev_info_t *disk = conf->disks + j;
+ sector_t sectors;
if (j < 0 || j >= raid_disks || disk->rdev) {
printk("linear: disk numbering problem. Aborting!\n");
@@ -138,6 +160,11 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
}
disk->rdev = rdev;
+ if (mddev->chunk_sectors) {
+ sectors = rdev->sectors;
+ sector_div(sectors, mddev->chunk_sectors);
+ rdev->sectors = sectors * mddev->chunk_sectors;
+ }
blk_queue_stack_limits(mddev->queue,
rdev->bdev->bd_disk->queue);
@@ -149,102 +176,24 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
- disk->num_sectors = rdev->sectors;
conf->array_sectors += rdev->sectors;
-
cnt++;
+
}
if (cnt != raid_disks) {
printk("linear: not enough drives present. Aborting!\n");
goto out;
}
- min_sectors = conf->array_sectors;
- sector_div(min_sectors, PAGE_SIZE/sizeof(struct dev_info *));
- if (min_sectors == 0)
- min_sectors = 1;
-
- /* min_sectors is the minimum spacing that will fit the hash
- * table in one PAGE. This may be much smaller than needed.
- * We find the smallest non-terminal set of consecutive devices
- * that is larger than min_sectors and use the size of that as
- * the actual spacing
- */
- conf->spacing = conf->array_sectors;
- for (i=0; i < cnt-1 ; i++) {
- sector_t tmp = 0;
- int j;
- for (j = i; j < cnt - 1 && tmp < min_sectors; j++)
- tmp += conf->disks[j].num_sectors;
- if (tmp >= min_sectors && tmp < conf->spacing)
- conf->spacing = tmp;
- }
-
- /* spacing may be too large for sector_div to work with,
- * so we might need to pre-shift
- */
- conf->sector_shift = 0;
- if (sizeof(sector_t) > sizeof(u32)) {
- sector_t space = conf->spacing;
- while (space > (sector_t)(~(u32)0)) {
- space >>= 1;
- conf->sector_shift++;
- }
- }
/*
- * This code was restructured to work around a gcc-2.95.3 internal
- * compiler error. Alter it with care.
+ * Here we calculate the device offsets.
*/
- {
- sector_t sz;
- unsigned round;
- unsigned long base;
-
- sz = conf->array_sectors >> conf->sector_shift;
- sz += 1; /* force round-up */
- base = conf->spacing >> conf->sector_shift;
- round = sector_div(sz, base);
- nb_zone = sz + (round ? 1 : 0);
- }
- BUG_ON(nb_zone > PAGE_SIZE / sizeof(struct dev_info *));
-
- conf->hash_table = kmalloc (sizeof (struct dev_info *) * nb_zone,
- GFP_KERNEL);
- if (!conf->hash_table)
- goto out;
+ conf->disks[0].end_sector = conf->disks[0].rdev->sectors;
- /*
- * Here we generate the linear hash table
- * First calculate the device offsets.
- */
- conf->disks[0].start_sector = 0;
for (i = 1; i < raid_disks; i++)
- conf->disks[i].start_sector =
- conf->disks[i-1].start_sector +
- conf->disks[i-1].num_sectors;
-
- table = conf->hash_table;
- i = 0;
- for (curr_sector = 0;
- curr_sector < conf->array_sectors;
- curr_sector += conf->spacing) {
-
- while (i < raid_disks-1 &&
- curr_sector >= conf->disks[i+1].start_sector)
- i++;
-
- *table ++ = conf->disks + i;
- }
-
- if (conf->sector_shift) {
- conf->spacing >>= conf->sector_shift;
- /* round spacing up so that when we divide by it,
- * we err on the side of "too-low", which is safest.
- */
- conf->spacing++;
- }
-
- BUG_ON(table - conf->hash_table > nb_zone);
+ conf->disks[i].end_sector =
+ conf->disks[i-1].end_sector +
+ conf->disks[i].rdev->sectors;
return conf;
@@ -257,6 +206,8 @@ static int linear_run (mddev_t *mddev)
{
linear_conf_t *conf;
+ if (md_check_no_bitmap(mddev))
+ return -EINVAL;
mddev->queue->queue_lock = &mddev->queue->__queue_lock;
conf = linear_conf(mddev, mddev->raid_disks);
@@ -272,6 +223,12 @@ static int linear_run (mddev_t *mddev)
return 0;
}
+static void free_conf(struct rcu_head *head)
+{
+ linear_conf_t *conf = container_of(head, linear_conf_t, rcu);
+ kfree(conf);
+}
+
static int linear_add(mddev_t *mddev, mdk_rdev_t *rdev)
{
/* Adding a drive to a linear array allows the array to grow.
@@ -282,7 +239,7 @@ static int linear_add(mddev_t *mddev, mdk_rdev_t *rdev)
* The current one is never freed until the array is stopped.
* This avoids races.
*/
- linear_conf_t *newconf;
+ linear_conf_t *newconf, *oldconf;
if (rdev->saved_raid_disk != mddev->raid_disks)
return -EINVAL;
@@ -294,25 +251,29 @@ static int linear_add(mddev_t *mddev, mdk_rdev_t *rdev)
if (!newconf)
return -ENOMEM;
- newconf->prev = mddev_to_conf(mddev);
- mddev->private = newconf;
+ oldconf = rcu_dereference(mddev->private);
mddev->raid_disks++;
+ rcu_assign_pointer(mddev->private, newconf);
md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
set_capacity(mddev->gendisk, mddev->array_sectors);
+ call_rcu(&oldconf->rcu, free_conf);
return 0;
}
static int linear_stop (mddev_t *mddev)
{
- linear_conf_t *conf = mddev_to_conf(mddev);
-
+ linear_conf_t *conf = mddev->private;
+
+ /*
+ * We do not require rcu protection here since
+ * we hold reconfig_mutex for both linear_add and
+ * linear_stop, so they cannot race.
+ * We should make sure any old 'conf's are properly
+ * freed though.
+ */
+ rcu_barrier();
blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
- do {
- linear_conf_t *t = conf->prev;
- kfree(conf->hash_table);
- kfree(conf);
- conf = t;
- } while (conf);
+ kfree(conf);
return 0;
}
@@ -322,6 +283,7 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
const int rw = bio_data_dir(bio);
mddev_t *mddev = q->queuedata;
dev_info_t *tmp_dev;
+ sector_t start_sector;
int cpu;
if (unlikely(bio_barrier(bio))) {
@@ -335,33 +297,36 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
bio_sectors(bio));
part_stat_unlock();
+ rcu_read_lock();
tmp_dev = which_dev(mddev, bio->bi_sector);
-
- if (unlikely(bio->bi_sector >= (tmp_dev->num_sectors +
- tmp_dev->start_sector)
- || (bio->bi_sector <
- tmp_dev->start_sector))) {
+ start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
+
+
+ if (unlikely(bio->bi_sector >= (tmp_dev->end_sector)
+ || (bio->bi_sector < start_sector))) {
char b[BDEVNAME_SIZE];
printk("linear_make_request: Sector %llu out of bounds on "
"dev %s: %llu sectors, offset %llu\n",
(unsigned long long)bio->bi_sector,
bdevname(tmp_dev->rdev->bdev, b),
- (unsigned long long)tmp_dev->num_sectors,
- (unsigned long long)tmp_dev->start_sector);
+ (unsigned long long)tmp_dev->rdev->sectors,
+ (unsigned long long)start_sector);
+ rcu_read_unlock();
bio_io_error(bio);
return 0;
}
if (unlikely(bio->bi_sector + (bio->bi_size >> 9) >
- tmp_dev->start_sector + tmp_dev->num_sectors)) {
+ tmp_dev->end_sector)) {
/* This bio crosses a device boundary, so we have to
* split it.
*/
struct bio_pair *bp;
+ sector_t end_sector = tmp_dev->end_sector;
+
+ rcu_read_unlock();
- bp = bio_split(bio,
- tmp_dev->start_sector + tmp_dev->num_sectors
- - bio->bi_sector);
+ bp = bio_split(bio, end_sector - bio->bi_sector);
if (linear_make_request(q, &bp->bio1))
generic_make_request(&bp->bio1);
@@ -372,8 +337,9 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
}
bio->bi_bdev = tmp_dev->rdev->bdev;
- bio->bi_sector = bio->bi_sector - tmp_dev->start_sector
+ bio->bi_sector = bio->bi_sector - start_sector
+ tmp_dev->rdev->data_offset;
+ rcu_read_unlock();
return 1;
}
@@ -381,7 +347,7 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
static void linear_status (struct seq_file *seq, mddev_t *mddev)
{
- seq_printf(seq, " %dk rounding", mddev->chunk_size/1024);
+ seq_printf(seq, " %dk rounding", mddev->chunk_sectors / 2);
}
diff --git a/drivers/md/linear.h b/drivers/md/linear.h
index bf8179587f95..0ce29b61605a 100644
--- a/drivers/md/linear.h
+++ b/drivers/md/linear.h
@@ -3,27 +3,19 @@
struct dev_info {
mdk_rdev_t *rdev;
- sector_t num_sectors;
- sector_t start_sector;
+ sector_t end_sector;
};
typedef struct dev_info dev_info_t;
struct linear_private_data
{
- struct linear_private_data *prev; /* earlier version */
- dev_info_t **hash_table;
- sector_t spacing;
sector_t array_sectors;
- int sector_shift; /* shift before dividing
- * by spacing
- */
dev_info_t disks[0];
+ struct rcu_head rcu;
};
typedef struct linear_private_data linear_conf_t;
-#define mddev_to_conf(mddev) ((linear_conf_t *) mddev->private)
-
#endif
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 20f6ac338349..09be637d52cb 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -440,15 +440,6 @@ static inline sector_t calc_dev_sboffset(struct block_device *bdev)
return MD_NEW_SIZE_SECTORS(num_sectors);
}
-static sector_t calc_num_sectors(mdk_rdev_t *rdev, unsigned chunk_size)
-{
- sector_t num_sectors = rdev->sb_start;
-
- if (chunk_size)
- num_sectors &= ~((sector_t)chunk_size/512 - 1);
- return num_sectors;
-}
-
static int alloc_disk_sb(mdk_rdev_t * rdev)
{
if (rdev->sb_page)
@@ -745,6 +736,24 @@ struct super_type {
};
/*
+ * Check that the given mddev has no bitmap.
+ *
+ * This function is called from the run method of all personalities that do not
+ * support bitmaps. It prints an error message and returns non-zero if mddev
+ * has a bitmap. Otherwise, it returns 0.
+ *
+ */
+int md_check_no_bitmap(mddev_t *mddev)
+{
+ if (!mddev->bitmap_file && !mddev->bitmap_offset)
+ return 0;
+ printk(KERN_ERR "%s: bitmaps are not supported for %s\n",
+ mdname(mddev), mddev->pers->name);
+ return 1;
+}
+EXPORT_SYMBOL(md_check_no_bitmap);
+
+/*
* load_super for 0.90.0
*/
static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
@@ -797,17 +806,6 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
rdev->data_offset = 0;
rdev->sb_size = MD_SB_BYTES;
- if (sb->state & (1<<MD_SB_BITMAP_PRESENT)) {
- if (sb->level != 1 && sb->level != 4
- && sb->level != 5 && sb->level != 6
- && sb->level != 10) {
- /* FIXME use a better test */
- printk(KERN_WARNING
- "md: bitmaps not supported for this level.\n");
- goto abort;
- }
- }
-
if (sb->level == LEVEL_MULTIPATH)
rdev->desc_nr = -1;
else
@@ -836,7 +834,7 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
else
ret = 0;
}
- rdev->sectors = calc_num_sectors(rdev, sb->chunk_size);
+ rdev->sectors = rdev->sb_start;
if (rdev->sectors < sb->size * 2 && sb->level > 1)
/* "this cannot possibly happen" ... */
@@ -866,7 +864,7 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
mddev->minor_version = sb->minor_version;
mddev->patch_version = sb->patch_version;
mddev->external = 0;
- mddev->chunk_size = sb->chunk_size;
+ mddev->chunk_sectors = sb->chunk_size >> 9;
mddev->ctime = sb->ctime;
mddev->utime = sb->utime;
mddev->level = sb->level;
@@ -883,13 +881,13 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
mddev->delta_disks = sb->delta_disks;
mddev->new_level = sb->new_level;
mddev->new_layout = sb->new_layout;
- mddev->new_chunk = sb->new_chunk;
+ mddev->new_chunk_sectors = sb->new_chunk >> 9;
} else {
mddev->reshape_position = MaxSector;
mddev->delta_disks = 0;
mddev->new_level = mddev->level;
mddev->new_layout = mddev->layout;
- mddev->new_chunk = mddev->chunk_size;
+ mddev->new_chunk_sectors = mddev->chunk_sectors;
}
if (sb->state & (1<<MD_SB_CLEAN))
@@ -1004,7 +1002,7 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
sb->new_level = mddev->new_level;
sb->delta_disks = mddev->delta_disks;
sb->new_layout = mddev->new_layout;
- sb->new_chunk = mddev->new_chunk;
+ sb->new_chunk = mddev->new_chunk_sectors << 9;
}
mddev->minor_version = sb->minor_version;
if (mddev->in_sync)
@@ -1018,7 +1016,7 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
sb->recovery_cp = 0;
sb->layout = mddev->layout;
- sb->chunk_size = mddev->chunk_size;
+ sb->chunk_size = mddev->chunk_sectors << 9;
if (mddev->bitmap && mddev->bitmap_file == NULL)
sb->state |= (1<<MD_SB_BITMAP_PRESENT);
@@ -1185,17 +1183,6 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
bdevname(rdev->bdev,b));
return -EINVAL;
}
- if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET)) {
- if (sb->level != cpu_to_le32(1) &&
- sb->level != cpu_to_le32(4) &&
- sb->level != cpu_to_le32(5) &&
- sb->level != cpu_to_le32(6) &&
- sb->level != cpu_to_le32(10)) {
- printk(KERN_WARNING
- "md: bitmaps not supported for this level.\n");
- return -EINVAL;
- }
- }
rdev->preferred_minor = 0xffff;
rdev->data_offset = le64_to_cpu(sb->data_offset);
@@ -1248,9 +1235,6 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
if (rdev->sectors < le64_to_cpu(sb->data_size))
return -EINVAL;
rdev->sectors = le64_to_cpu(sb->data_size);
- if (le32_to_cpu(sb->chunksize))
- rdev->sectors &= ~((sector_t)le32_to_cpu(sb->chunksize) - 1);
-
if (le64_to_cpu(sb->size) > rdev->sectors)
return -EINVAL;
return ret;
@@ -1271,7 +1255,7 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
mddev->major_version = 1;
mddev->patch_version = 0;
mddev->external = 0;
- mddev->chunk_size = le32_to_cpu(sb->chunksize) << 9;
+ mddev->chunk_sectors = le32_to_cpu(sb->chunksize);
mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
mddev->level = le32_to_cpu(sb->level);
@@ -1297,13 +1281,13 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
mddev->delta_disks = le32_to_cpu(sb->delta_disks);
mddev->new_level = le32_to_cpu(sb->new_level);
mddev->new_layout = le32_to_cpu(sb->new_layout);
- mddev->new_chunk = le32_to_cpu(sb->new_chunk)<<9;
+ mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk);
} else {
mddev->reshape_position = MaxSector;
mddev->delta_disks = 0;
mddev->new_level = mddev->level;
mddev->new_layout = mddev->layout;
- mddev->new_chunk = mddev->chunk_size;
+ mddev->new_chunk_sectors = mddev->chunk_sectors;
}
} else if (mddev->pers == NULL) {
@@ -1375,7 +1359,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
sb->raid_disks = cpu_to_le32(mddev->raid_disks);
sb->size = cpu_to_le64(mddev->dev_sectors);
- sb->chunksize = cpu_to_le32(mddev->chunk_size >> 9);
+ sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
sb->level = cpu_to_le32(mddev->level);
sb->layout = cpu_to_le32(mddev->layout);
@@ -1402,7 +1386,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
sb->new_layout = cpu_to_le32(mddev->new_layout);
sb->delta_disks = cpu_to_le32(mddev->delta_disks);
sb->new_level = cpu_to_le32(mddev->new_level);
- sb->new_chunk = cpu_to_le32(mddev->new_chunk>>9);
+ sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
}
max_dev = 0;
@@ -1897,6 +1881,7 @@ static void md_update_sb(mddev_t * mddev, int force_change)
int sync_req;
int nospares = 0;
+ mddev->utime = get_seconds();
if (mddev->external)
return;
repeat:
@@ -1926,7 +1911,6 @@ repeat:
nospares = 0;
sync_req = mddev->in_sync;
- mddev->utime = get_seconds();
/* If this is just a dirty<->clean transition, and the array is clean
* and 'events' is odd, we can roll back to the previous clean state */
@@ -2597,15 +2581,6 @@ static void analyze_sbs(mddev_t * mddev)
clear_bit(In_sync, &rdev->flags);
}
}
-
-
-
- if (mddev->recovery_cp != MaxSector &&
- mddev->level >= 1)
- printk(KERN_ERR "md: %s: raid array is not clean"
- " -- starting background reconstruction\n",
- mdname(mddev));
-
}
static void md_safemode_timeout(unsigned long data);
@@ -2746,7 +2721,7 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
if (IS_ERR(priv)) {
mddev->new_level = mddev->level;
mddev->new_layout = mddev->layout;
- mddev->new_chunk = mddev->chunk_size;
+ mddev->new_chunk_sectors = mddev->chunk_sectors;
mddev->raid_disks -= mddev->delta_disks;
mddev->delta_disks = 0;
module_put(pers->owner);
@@ -2764,7 +2739,7 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
mddev->level = mddev->new_level;
mddev->layout = mddev->new_layout;
- mddev->chunk_size = mddev->new_chunk;
+ mddev->chunk_sectors = mddev->new_chunk_sectors;
mddev->delta_disks = 0;
pers->run(mddev);
mddev_resume(mddev);
@@ -2800,11 +2775,14 @@ layout_store(mddev_t *mddev, const char *buf, size_t len)
if (mddev->pers) {
int err;
- if (mddev->pers->reconfig == NULL)
+ if (mddev->pers->check_reshape == NULL)
return -EBUSY;
- err = mddev->pers->reconfig(mddev, n, -1);
- if (err)
+ mddev->new_layout = n;
+ err = mddev->pers->check_reshape(mddev);
+ if (err) {
+ mddev->new_layout = mddev->layout;
return err;
+ }
} else {
mddev->new_layout = n;
if (mddev->reshape_position == MaxSector)
@@ -2857,10 +2835,11 @@ static ssize_t
chunk_size_show(mddev_t *mddev, char *page)
{
if (mddev->reshape_position != MaxSector &&
- mddev->chunk_size != mddev->new_chunk)
- return sprintf(page, "%d (%d)\n", mddev->new_chunk,
- mddev->chunk_size);
- return sprintf(page, "%d\n", mddev->chunk_size);
+ mddev->chunk_sectors != mddev->new_chunk_sectors)
+ return sprintf(page, "%d (%d)\n",
+ mddev->new_chunk_sectors << 9,
+ mddev->chunk_sectors << 9);
+ return sprintf(page, "%d\n", mddev->chunk_sectors << 9);
}
static ssize_t
@@ -2874,15 +2853,18 @@ chunk_size_store(mddev_t *mddev, const char *buf, size_t len)
if (mddev->pers) {
int err;
- if (mddev->pers->reconfig == NULL)
+ if (mddev->pers->check_reshape == NULL)
return -EBUSY;
- err = mddev->pers->reconfig(mddev, -1, n);
- if (err)
+ mddev->new_chunk_sectors = n >> 9;
+ err = mddev->pers->check_reshape(mddev);
+ if (err) {
+ mddev->new_chunk_sectors = mddev->chunk_sectors;
return err;
+ }
} else {
- mddev->new_chunk = n;
+ mddev->new_chunk_sectors = n >> 9;
if (mddev->reshape_position == MaxSector)
- mddev->chunk_size = n;
+ mddev->chunk_sectors = n >> 9;
}
return len;
}
@@ -3527,8 +3509,9 @@ min_sync_store(mddev_t *mddev, const char *buf, size_t len)
return -EBUSY;
/* Must be a multiple of chunk_size */
- if (mddev->chunk_size) {
- if (min & (sector_t)((mddev->chunk_size>>9)-1))
+ if (mddev->chunk_sectors) {
+ sector_t temp = min;
+ if (sector_div(temp, mddev->chunk_sectors))
return -EINVAL;
}
mddev->resync_min = min;
@@ -3564,8 +3547,9 @@ max_sync_store(mddev_t *mddev, const char *buf, size_t len)
return -EBUSY;
/* Must be a multiple of chunk_size */
- if (mddev->chunk_size) {
- if (max & (sector_t)((mddev->chunk_size>>9)-1))
+ if (mddev->chunk_sectors) {
+ sector_t temp = max;
+ if (sector_div(temp, mddev->chunk_sectors))
return -EINVAL;
}
mddev->resync_max = max;
@@ -3656,7 +3640,7 @@ reshape_position_store(mddev_t *mddev, const char *buf, size_t len)
mddev->delta_disks = 0;
mddev->new_level = mddev->level;
mddev->new_layout = mddev->layout;
- mddev->new_chunk = mddev->chunk_size;
+ mddev->new_chunk_sectors = mddev->chunk_sectors;
return len;
}
@@ -3976,11 +3960,9 @@ static int start_dirty_degraded;
static int do_md_run(mddev_t * mddev)
{
int err;
- int chunk_size;
mdk_rdev_t *rdev;
struct gendisk *disk;
struct mdk_personality *pers;
- char b[BDEVNAME_SIZE];
if (list_empty(&mddev->disks))
/* cannot run an array with no devices.. */
@@ -3998,38 +3980,6 @@ static int do_md_run(mddev_t * mddev)
analyze_sbs(mddev);
}
- chunk_size = mddev->chunk_size;
-
- if (chunk_size) {
- if (chunk_size > MAX_CHUNK_SIZE) {
- printk(KERN_ERR "too big chunk_size: %d > %d\n",
- chunk_size, MAX_CHUNK_SIZE);
- return -EINVAL;
- }
- /*
- * chunk-size has to be a power of 2
- */
- if ( (1 << ffz(~chunk_size)) != chunk_size) {
- printk(KERN_ERR "chunk_size of %d not valid\n", chunk_size);
- return -EINVAL;
- }
-
- /* devices must have minimum size of one chunk */
- list_for_each_entry(rdev, &mddev->disks, same_set) {
- if (test_bit(Faulty, &rdev->flags))
- continue;
- if (rdev->sectors < chunk_size / 512) {
- printk(KERN_WARNING
- "md: Dev %s smaller than chunk_size:"
- " %llu < %d\n",
- bdevname(rdev->bdev,b),
- (unsigned long long)rdev->sectors,
- chunk_size / 512);
- return -EINVAL;
- }
- }
- }
-
if (mddev->level != LEVEL_NONE)
request_module("md-level-%d", mddev->level);
else if (mddev->clevel[0])
@@ -4405,7 +4355,7 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
mddev->flags = 0;
mddev->ro = 0;
mddev->metadata_type[0] = 0;
- mddev->chunk_size = 0;
+ mddev->chunk_sectors = 0;
mddev->ctime = mddev->utime = 0;
mddev->layout = 0;
mddev->max_disks = 0;
@@ -4413,7 +4363,7 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
mddev->delta_disks = 0;
mddev->new_level = LEVEL_NONE;
mddev->new_layout = 0;
- mddev->new_chunk = 0;
+ mddev->new_chunk_sectors = 0;
mddev->curr_resync = 0;
mddev->resync_mismatches = 0;
mddev->suspend_lo = mddev->suspend_hi = 0;
@@ -4618,7 +4568,7 @@ static int get_array_info(mddev_t * mddev, void __user * arg)
info.spare_disks = spare;
info.layout = mddev->layout;
- info.chunk_size = mddev->chunk_size;
+ info.chunk_size = mddev->chunk_sectors << 9;
if (copy_to_user(arg, &info, sizeof(info)))
return -EFAULT;
@@ -4843,7 +4793,7 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
rdev->sb_start = rdev->bdev->bd_inode->i_size / 512;
} else
rdev->sb_start = calc_dev_sboffset(rdev->bdev);
- rdev->sectors = calc_num_sectors(rdev, mddev->chunk_size);
+ rdev->sectors = rdev->sb_start;
err = bind_rdev_to_array(rdev, mddev);
if (err) {
@@ -4913,7 +4863,7 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev)
else
rdev->sb_start = rdev->bdev->bd_inode->i_size / 512;
- rdev->sectors = calc_num_sectors(rdev, mddev->chunk_size);
+ rdev->sectors = rdev->sb_start;
if (test_bit(Faulty, &rdev->flags)) {
printk(KERN_WARNING
@@ -5062,7 +5012,7 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
mddev->external = 0;
mddev->layout = info->layout;
- mddev->chunk_size = info->chunk_size;
+ mddev->chunk_sectors = info->chunk_size >> 9;
mddev->max_disks = MD_SB_DISKS;
@@ -5081,7 +5031,7 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
get_random_bytes(mddev->uuid, 16);
mddev->new_level = mddev->level;
- mddev->new_chunk = mddev->chunk_size;
+ mddev->new_chunk_sectors = mddev->chunk_sectors;
mddev->new_layout = mddev->layout;
mddev->delta_disks = 0;
@@ -5191,7 +5141,7 @@ static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
mddev->level != info->level ||
/* mddev->layout != info->layout || */
!mddev->persistent != info->not_persistent||
- mddev->chunk_size != info->chunk_size ||
+ mddev->chunk_sectors != info->chunk_size >> 9 ||
/* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
((state^info->state) & 0xfffffe00)
)
@@ -5215,10 +5165,15 @@ static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
* we don't need to do anything at the md level, the
* personality will take care of it all.
*/
- if (mddev->pers->reconfig == NULL)
+ if (mddev->pers->check_reshape == NULL)
return -EINVAL;
- else
- return mddev->pers->reconfig(mddev, info->layout, -1);
+ else {
+ mddev->new_layout = info->layout;
+ rv = mddev->pers->check_reshape(mddev);
+ if (rv)
+ mddev->new_layout = mddev->layout;
+ return rv;
+ }
}
if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
rv = update_size(mddev, (sector_t)info->size * 2);
@@ -6717,7 +6672,8 @@ void md_check_recovery(mddev_t *mddev)
*/
if (mddev->reshape_position != MaxSector) {
- if (mddev->pers->check_reshape(mddev) != 0)
+ if (mddev->pers->check_reshape == NULL ||
+ mddev->pers->check_reshape(mddev) != 0)
/* Cannot proceed */
goto unlock;
set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 8227ab909d44..9430a110db93 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -30,13 +30,6 @@ typedef struct mddev_s mddev_t;
typedef struct mdk_rdev_s mdk_rdev_t;
/*
- * options passed in raidrun:
- */
-
-/* Currently this must fit in an 'int' */
-#define MAX_CHUNK_SIZE (1<<30)
-
-/*
* MD's 'extended' device
*/
struct mdk_rdev_s
@@ -145,7 +138,7 @@ struct mddev_s
int external; /* metadata is
* managed externally */
char metadata_type[17]; /* externally set*/
- int chunk_size;
+ int chunk_sectors;
time_t ctime, utime;
int level, layout;
char clevel[16];
@@ -166,7 +159,8 @@ struct mddev_s
* If reshape_position is MaxSector, then no reshape is happening (yet).
*/
sector_t reshape_position;
- int delta_disks, new_level, new_layout, new_chunk;
+ int delta_disks, new_level, new_layout;
+ int new_chunk_sectors;
struct mdk_thread_s *thread; /* management thread */
struct mdk_thread_s *sync_thread; /* doing resync or reconstruct */
@@ -325,7 +319,6 @@ struct mdk_personality
int (*check_reshape) (mddev_t *mddev);
int (*start_reshape) (mddev_t *mddev);
void (*finish_reshape) (mddev_t *mddev);
- int (*reconfig) (mddev_t *mddev, int layout, int chunk_size);
/* quiesce moves between quiescence states
* 0 - fully active
* 1 - no new requests allowed
@@ -437,5 +430,6 @@ extern void md_new_event(mddev_t *mddev);
extern int md_allow_write(mddev_t *mddev);
extern void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev);
extern void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors);
+extern int md_check_no_bitmap(mddev_t *mddev);
#endif /* _MD_MD_H */
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 4ee31aa13c40..cbe368fa6598 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -58,7 +58,7 @@ static void multipath_reschedule_retry (struct multipath_bh *mp_bh)
{
unsigned long flags;
mddev_t *mddev = mp_bh->mddev;
- multipath_conf_t *conf = mddev_to_conf(mddev);
+ multipath_conf_t *conf = mddev->private;
spin_lock_irqsave(&conf->device_lock, flags);
list_add(&mp_bh->retry_list, &conf->retry_list);
@@ -75,7 +75,7 @@ static void multipath_reschedule_retry (struct multipath_bh *mp_bh)
static void multipath_end_bh_io (struct multipath_bh *mp_bh, int err)
{
struct bio *bio = mp_bh->master_bio;
- multipath_conf_t *conf = mddev_to_conf(mp_bh->mddev);
+ multipath_conf_t *conf = mp_bh->mddev->private;
bio_endio(bio, err);
mempool_free(mp_bh, conf->pool);
@@ -85,7 +85,7 @@ static void multipath_end_request(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
struct multipath_bh * mp_bh = (struct multipath_bh *)(bio->bi_private);
- multipath_conf_t *conf = mddev_to_conf(mp_bh->mddev);
+ multipath_conf_t *conf = mp_bh->mddev->private;
mdk_rdev_t *rdev = conf->multipaths[mp_bh->path].rdev;
if (uptodate)
@@ -107,7 +107,7 @@ static void multipath_end_request(struct bio *bio, int error)
static void unplug_slaves(mddev_t *mddev)
{
- multipath_conf_t *conf = mddev_to_conf(mddev);
+ multipath_conf_t *conf = mddev->private;
int i;
rcu_read_lock();
@@ -138,7 +138,7 @@ static void multipath_unplug(struct request_queue *q)
static int multipath_make_request (struct request_queue *q, struct bio * bio)
{
mddev_t *mddev = q->queuedata;
- multipath_conf_t *conf = mddev_to_conf(mddev);
+ multipath_conf_t *conf = mddev->private;
struct multipath_bh * mp_bh;
struct multipath_info *multipath;
const int rw = bio_data_dir(bio);
@@ -180,7 +180,7 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio)
static void multipath_status (struct seq_file *seq, mddev_t *mddev)
{
- multipath_conf_t *conf = mddev_to_conf(mddev);
+ multipath_conf_t *conf = mddev->private;
int i;
seq_printf (seq, " [%d/%d] [", conf->raid_disks,
@@ -195,7 +195,7 @@ static void multipath_status (struct seq_file *seq, mddev_t *mddev)
static int multipath_congested(void *data, int bits)
{
mddev_t *mddev = data;
- multipath_conf_t *conf = mddev_to_conf(mddev);
+ multipath_conf_t *conf = mddev->private;
int i, ret = 0;
rcu_read_lock();
@@ -220,7 +220,7 @@ static int multipath_congested(void *data, int bits)
*/
static void multipath_error (mddev_t *mddev, mdk_rdev_t *rdev)
{
- multipath_conf_t *conf = mddev_to_conf(mddev);
+ multipath_conf_t *conf = mddev->private;
if (conf->working_disks <= 1) {
/*
@@ -367,7 +367,7 @@ static void multipathd (mddev_t *mddev)
struct multipath_bh *mp_bh;
struct bio *bio;
unsigned long flags;
- multipath_conf_t *conf = mddev_to_conf(mddev);
+ multipath_conf_t *conf = mddev->private;
struct list_head *head = &conf->retry_list;
md_check_recovery(mddev);
@@ -421,6 +421,9 @@ static int multipath_run (mddev_t *mddev)
struct multipath_info *disk;
mdk_rdev_t *rdev;
+ if (md_check_no_bitmap(mddev))
+ return -EINVAL;
+
if (mddev->level != LEVEL_MULTIPATH) {
printk("multipath: %s: raid level not set to multipath IO (%d)\n",
mdname(mddev), mddev->level);
@@ -531,7 +534,7 @@ out:
static int multipath_stop (mddev_t *mddev)
{
- multipath_conf_t *conf = mddev_to_conf(mddev);
+ multipath_conf_t *conf = mddev->private;
md_unregister_thread(mddev->thread);
mddev->thread = NULL;
diff --git a/drivers/md/multipath.h b/drivers/md/multipath.h
index 6fa70b400cda..d1c2a8d78395 100644
--- a/drivers/md/multipath.h
+++ b/drivers/md/multipath.h
@@ -19,12 +19,6 @@ struct multipath_private_data {
typedef struct multipath_private_data multipath_conf_t;
/*
- * this is the only point in the RAID code where we violate
- * C type safety. mddev->private is an 'opaque' pointer.
- */
-#define mddev_to_conf(mddev) ((multipath_conf_t *) mddev->private)
-
-/*
* this is our 'private' 'collective' MULTIPATH buffer head.
* it contains information about what kind of IO operations were started
* for this MULTIPATH operation, and about their status:
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 925507e7d673..ab4a489d8695 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -26,8 +26,8 @@
static void raid0_unplug(struct request_queue *q)
{
mddev_t *mddev = q->queuedata;
- raid0_conf_t *conf = mddev_to_conf(mddev);
- mdk_rdev_t **devlist = conf->strip_zone[0].dev;
+ raid0_conf_t *conf = mddev->private;
+ mdk_rdev_t **devlist = conf->devlist;
int i;
for (i=0; i<mddev->raid_disks; i++) {
@@ -40,8 +40,8 @@ static void raid0_unplug(struct request_queue *q)
static int raid0_congested(void *data, int bits)
{
mddev_t *mddev = data;
- raid0_conf_t *conf = mddev_to_conf(mddev);
- mdk_rdev_t **devlist = conf->strip_zone[0].dev;
+ raid0_conf_t *conf = mddev->private;
+ mdk_rdev_t **devlist = conf->devlist;
int i, ret = 0;
for (i = 0; i < mddev->raid_disks && !ret ; i++) {
@@ -52,27 +52,60 @@ static int raid0_congested(void *data, int bits)
return ret;
}
+/*
+ * inform the user of the raid configuration
+*/
+static void dump_zones(mddev_t *mddev)
+{
+ int j, k, h;
+ sector_t zone_size = 0;
+ sector_t zone_start = 0;
+ char b[BDEVNAME_SIZE];
+ raid0_conf_t *conf = mddev->private;
+ printk(KERN_INFO "******* %s configuration *********\n",
+ mdname(mddev));
+ h = 0;
+ for (j = 0; j < conf->nr_strip_zones; j++) {
+ printk(KERN_INFO "zone%d=[", j);
+ for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
+ printk("%s/",
+ bdevname(conf->devlist[j*mddev->raid_disks
+ + k]->bdev, b));
+ printk("]\n");
+
+ zone_size = conf->strip_zone[j].zone_end - zone_start;
+ printk(KERN_INFO " zone offset=%llukb "
+ "device offset=%llukb size=%llukb\n",
+ (unsigned long long)zone_start>>1,
+ (unsigned long long)conf->strip_zone[j].dev_start>>1,
+ (unsigned long long)zone_size>>1);
+ zone_start = conf->strip_zone[j].zone_end;
+ }
+ printk(KERN_INFO "**********************************\n\n");
+}
-static int create_strip_zones (mddev_t *mddev)
+static int create_strip_zones(mddev_t *mddev)
{
- int i, c, j;
- sector_t current_start, curr_zone_start;
- sector_t min_spacing;
- raid0_conf_t *conf = mddev_to_conf(mddev);
- mdk_rdev_t *smallest, *rdev1, *rdev2, *rdev;
+ int i, c, j, err;
+ sector_t curr_zone_end, sectors;
+ mdk_rdev_t *smallest, *rdev1, *rdev2, *rdev, **dev;
struct strip_zone *zone;
int cnt;
char b[BDEVNAME_SIZE];
-
- /*
- * The number of 'same size groups'
- */
- conf->nr_strip_zones = 0;
-
+ raid0_conf_t *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
+
+ if (!conf)
+ return -ENOMEM;
list_for_each_entry(rdev1, &mddev->disks, same_set) {
printk(KERN_INFO "raid0: looking at %s\n",
bdevname(rdev1->bdev,b));
c = 0;
+
+ /* round size to chunk_size */
+ sectors = rdev1->sectors;
+ sector_div(sectors, mddev->chunk_sectors);
+ rdev1->sectors = sectors * mddev->chunk_sectors;
+
list_for_each_entry(rdev2, &mddev->disks, same_set) {
printk(KERN_INFO "raid0: comparing %s(%llu)",
bdevname(rdev1->bdev,b),
@@ -103,16 +136,16 @@ static int create_strip_zones (mddev_t *mddev)
}
}
printk(KERN_INFO "raid0: FINAL %d zones\n", conf->nr_strip_zones);
-
+ err = -ENOMEM;
conf->strip_zone = kzalloc(sizeof(struct strip_zone)*
conf->nr_strip_zones, GFP_KERNEL);
if (!conf->strip_zone)
- return 1;
+ goto abort;
conf->devlist = kzalloc(sizeof(mdk_rdev_t*)*
conf->nr_strip_zones*mddev->raid_disks,
GFP_KERNEL);
if (!conf->devlist)
- return 1;
+ goto abort;
/* The first zone must contain all devices, so here we check that
* there is a proper alignment of slots to devices and find them all
@@ -120,7 +153,8 @@ static int create_strip_zones (mddev_t *mddev)
zone = &conf->strip_zone[0];
cnt = 0;
smallest = NULL;
- zone->dev = conf->devlist;
+ dev = conf->devlist;
+ err = -EINVAL;
list_for_each_entry(rdev1, &mddev->disks, same_set) {
int j = rdev1->raid_disk;
@@ -129,12 +163,12 @@ static int create_strip_zones (mddev_t *mddev)
"aborting!\n", j);
goto abort;
}
- if (zone->dev[j]) {
+ if (dev[j]) {
printk(KERN_ERR "raid0: multiple devices for %d - "
"aborting!\n", j);
goto abort;
}
- zone->dev[j] = rdev1;
+ dev[j] = rdev1;
blk_queue_stack_limits(mddev->queue,
rdev1->bdev->bd_disk->queue);
@@ -157,34 +191,32 @@ static int create_strip_zones (mddev_t *mddev)
goto abort;
}
zone->nb_dev = cnt;
- zone->sectors = smallest->sectors * cnt;
- zone->zone_start = 0;
+ zone->zone_end = smallest->sectors * cnt;
- current_start = smallest->sectors;
- curr_zone_start = zone->sectors;
+ curr_zone_end = zone->zone_end;
/* now do the other zones */
for (i = 1; i < conf->nr_strip_zones; i++)
{
zone = conf->strip_zone + i;
- zone->dev = conf->strip_zone[i-1].dev + mddev->raid_disks;
+ dev = conf->devlist + i * mddev->raid_disks;
printk(KERN_INFO "raid0: zone %d\n", i);
- zone->dev_start = current_start;
+ zone->dev_start = smallest->sectors;
smallest = NULL;
c = 0;
for (j=0; j<cnt; j++) {
char b[BDEVNAME_SIZE];
- rdev = conf->strip_zone[0].dev[j];
+ rdev = conf->devlist[j];
printk(KERN_INFO "raid0: checking %s ...",
bdevname(rdev->bdev, b));
- if (rdev->sectors <= current_start) {
+ if (rdev->sectors <= zone->dev_start) {
printk(KERN_INFO " nope.\n");
continue;
}
printk(KERN_INFO " contained as device %d\n", c);
- zone->dev[c] = rdev;
+ dev[c] = rdev;
c++;
if (!smallest || rdev->sectors < smallest->sectors) {
smallest = rdev;
@@ -194,47 +226,39 @@ static int create_strip_zones (mddev_t *mddev)
}
zone->nb_dev = c;
- zone->sectors = (smallest->sectors - current_start) * c;
+ sectors = (smallest->sectors - zone->dev_start) * c;
printk(KERN_INFO "raid0: zone->nb_dev: %d, sectors: %llu\n",
- zone->nb_dev, (unsigned long long)zone->sectors);
+ zone->nb_dev, (unsigned long long)sectors);
- zone->zone_start = curr_zone_start;
- curr_zone_start += zone->sectors;
+ curr_zone_end += sectors;
+ zone->zone_end = curr_zone_end;
- current_start = smallest->sectors;
printk(KERN_INFO "raid0: current zone start: %llu\n",
- (unsigned long long)current_start);
- }
-
- /* Now find appropriate hash spacing.
- * We want a number which causes most hash entries to cover
- * at most two strips, but the hash table must be at most
- * 1 PAGE. We choose the smallest strip, or contiguous collection
- * of strips, that has big enough size. We never consider the last
- * strip though as it's size has no bearing on the efficacy of the hash
- * table.
- */
- conf->spacing = curr_zone_start;
- min_spacing = curr_zone_start;
- sector_div(min_spacing, PAGE_SIZE/sizeof(struct strip_zone*));
- for (i=0; i < conf->nr_strip_zones-1; i++) {
- sector_t s = 0;
- for (j = i; j < conf->nr_strip_zones - 1 &&
- s < min_spacing; j++)
- s += conf->strip_zone[j].sectors;
- if (s >= min_spacing && s < conf->spacing)
- conf->spacing = s;
+ (unsigned long long)smallest->sectors);
}
-
mddev->queue->unplug_fn = raid0_unplug;
-
mddev->queue->backing_dev_info.congested_fn = raid0_congested;
mddev->queue->backing_dev_info.congested_data = mddev;
+ /*
+ * now since we have the hard sector sizes, we can make sure
+ * chunk size is a multiple of that sector size
+ */
+ if ((mddev->chunk_sectors << 9) % queue_logical_block_size(mddev->queue)) {
+ printk(KERN_ERR "%s chunk_size of %d not valid\n",
+ mdname(mddev),
+ mddev->chunk_sectors << 9);
+ goto abort;
+ }
printk(KERN_INFO "raid0: done.\n");
+ mddev->private = conf;
return 0;
- abort:
- return 1;
+abort:
+ kfree(conf->strip_zone);
+ kfree(conf->devlist);
+ kfree(conf);
+ mddev->private = NULL;
+ return err;
}
/**
@@ -252,10 +276,15 @@ static int raid0_mergeable_bvec(struct request_queue *q,
mddev_t *mddev = q->queuedata;
sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
int max;
- unsigned int chunk_sectors = mddev->chunk_size >> 9;
+ unsigned int chunk_sectors = mddev->chunk_sectors;
unsigned int bio_sectors = bvm->bi_size >> 9;
- max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
+ if (is_power_of_2(chunk_sectors))
+ max = (chunk_sectors - ((sector & (chunk_sectors-1))
+ + bio_sectors)) << 9;
+ else
+ max = (chunk_sectors - (sector_div(sector, chunk_sectors)
+ + bio_sectors)) << 9;
if (max < 0) max = 0; /* bio_add cannot handle a negative return */
if (max <= biovec->bv_len && bio_sectors == 0)
return biovec->bv_len;
@@ -277,84 +306,28 @@ static sector_t raid0_size(mddev_t *mddev, sector_t sectors, int raid_disks)
return array_sectors;
}
-static int raid0_run (mddev_t *mddev)
+static int raid0_run(mddev_t *mddev)
{
- unsigned cur=0, i=0, nb_zone;
- s64 sectors;
- raid0_conf_t *conf;
+ int ret;
- if (mddev->chunk_size == 0) {
- printk(KERN_ERR "md/raid0: non-zero chunk size required.\n");
+ if (mddev->chunk_sectors == 0) {
+ printk(KERN_ERR "md/raid0: chunk size must be set.\n");
return -EINVAL;
}
- printk(KERN_INFO "%s: setting max_sectors to %d, segment boundary to %d\n",
- mdname(mddev),
- mddev->chunk_size >> 9,
- (mddev->chunk_size>>1)-1);
- blk_queue_max_sectors(mddev->queue, mddev->chunk_size >> 9);
- blk_queue_segment_boundary(mddev->queue, (mddev->chunk_size>>1) - 1);
+ if (md_check_no_bitmap(mddev))
+ return -EINVAL;
+ blk_queue_max_sectors(mddev->queue, mddev->chunk_sectors);
mddev->queue->queue_lock = &mddev->queue->__queue_lock;
- conf = kmalloc(sizeof (raid0_conf_t), GFP_KERNEL);
- if (!conf)
- goto out;
- mddev->private = (void *)conf;
-
- conf->strip_zone = NULL;
- conf->devlist = NULL;
- if (create_strip_zones (mddev))
- goto out_free_conf;
+ ret = create_strip_zones(mddev);
+ if (ret < 0)
+ return ret;
/* calculate array device size */
md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
printk(KERN_INFO "raid0 : md_size is %llu sectors.\n",
(unsigned long long)mddev->array_sectors);
- printk(KERN_INFO "raid0 : conf->spacing is %llu sectors.\n",
- (unsigned long long)conf->spacing);
- {
- sector_t s = raid0_size(mddev, 0, 0);
- sector_t space = conf->spacing;
- int round;
- conf->sector_shift = 0;
- if (sizeof(sector_t) > sizeof(u32)) {
- /*shift down space and s so that sector_div will work */
- while (space > (sector_t) (~(u32)0)) {
- s >>= 1;
- space >>= 1;
- s += 1; /* force round-up */
- conf->sector_shift++;
- }
- }
- round = sector_div(s, (u32)space) ? 1 : 0;
- nb_zone = s + round;
- }
- printk(KERN_INFO "raid0 : nb_zone is %d.\n", nb_zone);
-
- printk(KERN_INFO "raid0 : Allocating %zu bytes for hash.\n",
- nb_zone*sizeof(struct strip_zone*));
- conf->hash_table = kmalloc (sizeof (struct strip_zone *)*nb_zone, GFP_KERNEL);
- if (!conf->hash_table)
- goto out_free_conf;
- sectors = conf->strip_zone[cur].sectors;
-
- conf->hash_table[0] = conf->strip_zone + cur;
- for (i=1; i< nb_zone; i++) {
- while (sectors <= conf->spacing) {
- cur++;
- sectors += conf->strip_zone[cur].sectors;
- }
- sectors -= conf->spacing;
- conf->hash_table[i] = conf->strip_zone + cur;
- }
- if (conf->sector_shift) {
- conf->spacing >>= conf->sector_shift;
- /* round spacing up so when we divide by it, we
- * err on the side of too-low, which is safest
- */
- conf->spacing++;
- }
-
/* calculate the max read-ahead size.
* For read-ahead of large files to be effective, we need to
* readahead at least twice a whole stripe. i.e. number of devices
@@ -365,48 +338,107 @@ static int raid0_run (mddev_t *mddev)
* chunksize should be used in that case.
*/
{
- int stripe = mddev->raid_disks * mddev->chunk_size / PAGE_SIZE;
+ int stripe = mddev->raid_disks *
+ (mddev->chunk_sectors << 9) / PAGE_SIZE;
if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
mddev->queue->backing_dev_info.ra_pages = 2* stripe;
}
-
blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec);
+ dump_zones(mddev);
return 0;
+}
-out_free_conf:
+static int raid0_stop(mddev_t *mddev)
+{
+ raid0_conf_t *conf = mddev->private;
+
+ blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
kfree(conf->strip_zone);
kfree(conf->devlist);
kfree(conf);
mddev->private = NULL;
-out:
- return -ENOMEM;
+ return 0;
}
-static int raid0_stop (mddev_t *mddev)
+/* Find the zone which holds a particular offset
+ * Update *sectorp to be an offset in that zone
+ */
+static struct strip_zone *find_zone(struct raid0_private_data *conf,
+ sector_t *sectorp)
{
- raid0_conf_t *conf = mddev_to_conf(mddev);
+ int i;
+ struct strip_zone *z = conf->strip_zone;
+ sector_t sector = *sectorp;
+
+ for (i = 0; i < conf->nr_strip_zones; i++)
+ if (sector < z[i].zone_end) {
+ if (i)
+ *sectorp = sector - z[i-1].zone_end;
+ return z + i;
+ }
+ BUG();
+}
- blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
- kfree(conf->hash_table);
- conf->hash_table = NULL;
- kfree(conf->strip_zone);
- conf->strip_zone = NULL;
- kfree(conf);
- mddev->private = NULL;
+/*
+ * remaps the bio to the target device. we separate two flows.
+ * power 2 flow and a general flow for the sake of perfromance
+*/
+static mdk_rdev_t *map_sector(mddev_t *mddev, struct strip_zone *zone,
+ sector_t sector, sector_t *sector_offset)
+{
+ unsigned int sect_in_chunk;
+ sector_t chunk;
+ raid0_conf_t *conf = mddev->private;
+ unsigned int chunk_sects = mddev->chunk_sectors;
+
+ if (is_power_of_2(chunk_sects)) {
+ int chunksect_bits = ffz(~chunk_sects);
+ /* find the sector offset inside the chunk */
+ sect_in_chunk = sector & (chunk_sects - 1);
+ sector >>= chunksect_bits;
+ /* chunk in zone */
+ chunk = *sector_offset;
+ /* quotient is the chunk in real device*/
+ sector_div(chunk, zone->nb_dev << chunksect_bits);
+ } else{
+ sect_in_chunk = sector_div(sector, chunk_sects);
+ chunk = *sector_offset;
+ sector_div(chunk, chunk_sects * zone->nb_dev);
+ }
+ /*
+ * position the bio over the real device
+ * real sector = chunk in device + starting of zone
+ * + the position in the chunk
+ */
+ *sector_offset = (chunk * chunk_sects) + sect_in_chunk;
+ return conf->devlist[(zone - conf->strip_zone)*mddev->raid_disks
+ + sector_div(sector, zone->nb_dev)];
+}
- return 0;
+/*
+ * Is io distribute over 1 or more chunks ?
+*/
+static inline int is_io_in_chunk_boundary(mddev_t *mddev,
+ unsigned int chunk_sects, struct bio *bio)
+{
+ if (likely(is_power_of_2(chunk_sects))) {
+ return chunk_sects >= ((bio->bi_sector & (chunk_sects-1))
+ + (bio->bi_size >> 9));
+ } else{
+ sector_t sector = bio->bi_sector;
+ return chunk_sects >= (sector_div(sector, chunk_sects)
+ + (bio->bi_size >> 9));
+ }
}
-static int raid0_make_request (struct request_queue *q, struct bio *bio)
+static int raid0_make_request(struct request_queue *q, struct bio *bio)
{
mddev_t *mddev = q->queuedata;
- unsigned int sect_in_chunk, chunksect_bits, chunk_sects;
- raid0_conf_t *conf = mddev_to_conf(mddev);
+ unsigned int chunk_sects;
+ sector_t sector_offset;
struct strip_zone *zone;
mdk_rdev_t *tmp_dev;
- sector_t chunk;
- sector_t sector, rsect;
const int rw = bio_data_dir(bio);
int cpu;
@@ -421,11 +453,9 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio)
bio_sectors(bio));
part_stat_unlock();
- chunk_sects = mddev->chunk_size >> 9;
- chunksect_bits = ffz(~chunk_sects);
- sector = bio->bi_sector;
-
- if (unlikely(chunk_sects < (bio->bi_sector & (chunk_sects - 1)) + (bio->bi_size >> 9))) {
+ chunk_sects = mddev->chunk_sectors;
+ if (unlikely(!is_io_in_chunk_boundary(mddev, chunk_sects, bio))) {
+ sector_t sector = bio->bi_sector;
struct bio_pair *bp;
/* Sanity check -- queue functions should prevent this happening */
if (bio->bi_vcnt != 1 ||
@@ -434,7 +464,12 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio)
/* This is a one page bio that upper layers
* refuse to split for us, so we need to split it.
*/
- bp = bio_split(bio, chunk_sects - (bio->bi_sector & (chunk_sects - 1)));
+ if (likely(is_power_of_2(chunk_sects)))
+ bp = bio_split(bio, chunk_sects - (sector &
+ (chunk_sects-1)));
+ else
+ bp = bio_split(bio, chunk_sects -
+ sector_div(sector, chunk_sects));
if (raid0_make_request(q, &bp->bio1))
generic_make_request(&bp->bio1);
if (raid0_make_request(q, &bp->bio2))
@@ -443,34 +478,14 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio)
bio_pair_release(bp);
return 0;
}
-
-
- {
- sector_t x = sector >> conf->sector_shift;
- sector_div(x, (u32)conf->spacing);
- zone = conf->hash_table[x];
- }
- while (sector >= zone->zone_start + zone->sectors)
- zone++;
-
- sect_in_chunk = bio->bi_sector & (chunk_sects - 1);
-
-
- {
- sector_t x = (sector - zone->zone_start) >> chunksect_bits;
-
- sector_div(x, zone->nb_dev);
- chunk = x;
-
- x = sector >> chunksect_bits;
- tmp_dev = zone->dev[sector_div(x, zone->nb_dev)];
- }
- rsect = (chunk << chunksect_bits) + zone->dev_start + sect_in_chunk;
-
+ sector_offset = bio->bi_sector;
+ zone = find_zone(mddev->private, &sector_offset);
+ tmp_dev = map_sector(mddev, zone, bio->bi_sector,
+ &sector_offset);
bio->bi_bdev = tmp_dev->bdev;
- bio->bi_sector = rsect + tmp_dev->data_offset;
-
+ bio->bi_sector = sector_offset + zone->dev_start +
+ tmp_dev->data_offset;
/*
* Let the main block layer submit the IO and resolve recursion:
*/
@@ -485,31 +500,35 @@ bad_map:
return 0;
}
-static void raid0_status (struct seq_file *seq, mddev_t *mddev)
+static void raid0_status(struct seq_file *seq, mddev_t *mddev)
{
#undef MD_DEBUG
#ifdef MD_DEBUG
int j, k, h;
char b[BDEVNAME_SIZE];
- raid0_conf_t *conf = mddev_to_conf(mddev);
+ raid0_conf_t *conf = mddev->private;
+ sector_t zone_size;
+ sector_t zone_start = 0;
h = 0;
+
for (j = 0; j < conf->nr_strip_zones; j++) {
seq_printf(seq, " z%d", j);
- if (conf->hash_table[h] == conf->strip_zone+j)
- seq_printf(seq, "(h%d)", h++);
seq_printf(seq, "=[");
for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
seq_printf(seq, "%s/", bdevname(
- conf->strip_zone[j].dev[k]->bdev,b));
-
- seq_printf(seq, "] zs=%d ds=%d s=%d\n",
- conf->strip_zone[j].zone_start,
- conf->strip_zone[j].dev_start,
- conf->strip_zone[j].sectors);
+ conf->devlist[j*mddev->raid_disks + k]
+ ->bdev, b));
+
+ zone_size = conf->strip_zone[j].zone_end - zone_start;
+ seq_printf(seq, "] ze=%lld ds=%lld s=%lld\n",
+ (unsigned long long)zone_start>>1,
+ (unsigned long long)conf->strip_zone[j].dev_start>>1,
+ (unsigned long long)zone_size>>1);
+ zone_start = conf->strip_zone[j].zone_end;
}
#endif
- seq_printf(seq, " %dk chunks", mddev->chunk_size/1024);
+ seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2);
return;
}
diff --git a/drivers/md/raid0.h b/drivers/md/raid0.h
index 824b12eb1d4f..91f8e876ee64 100644
--- a/drivers/md/raid0.h
+++ b/drivers/md/raid0.h
@@ -3,26 +3,18 @@
struct strip_zone
{
- sector_t zone_start; /* Zone offset in md_dev (in sectors) */
+ sector_t zone_end; /* Start of the next zone (in sectors) */
sector_t dev_start; /* Zone offset in real dev (in sectors) */
- sector_t sectors; /* Zone size in sectors */
int nb_dev; /* # of devices attached to the zone */
- mdk_rdev_t **dev; /* Devices attached to the zone */
};
struct raid0_private_data
{
- struct strip_zone **hash_table; /* Table of indexes into strip_zone */
struct strip_zone *strip_zone;
mdk_rdev_t **devlist; /* lists of rdevs, pointed to by strip_zone->dev */
int nr_strip_zones;
-
- sector_t spacing;
- int sector_shift; /* shift this before divide by spacing */
};
typedef struct raid0_private_data raid0_conf_t;
-#define mddev_to_conf(mddev) ((raid0_conf_t *) mddev->private)
-
#endif
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index e23758b4a34e..89939a7aef57 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -182,7 +182,7 @@ static void put_all_bios(conf_t *conf, r1bio_t *r1_bio)
static void free_r1bio(r1bio_t *r1_bio)
{
- conf_t *conf = mddev_to_conf(r1_bio->mddev);
+ conf_t *conf = r1_bio->mddev->private;
/*
* Wake up any possible resync thread that waits for the device
@@ -196,7 +196,7 @@ static void free_r1bio(r1bio_t *r1_bio)
static void put_buf(r1bio_t *r1_bio)
{
- conf_t *conf = mddev_to_conf(r1_bio->mddev);
+ conf_t *conf = r1_bio->mddev->private;
int i;
for (i=0; i<conf->raid_disks; i++) {
@@ -214,7 +214,7 @@ static void reschedule_retry(r1bio_t *r1_bio)
{
unsigned long flags;
mddev_t *mddev = r1_bio->mddev;
- conf_t *conf = mddev_to_conf(mddev);
+ conf_t *conf = mddev->private;
spin_lock_irqsave(&conf->device_lock, flags);
list_add(&r1_bio->retry_list, &conf->retry_list);
@@ -253,7 +253,7 @@ static void raid_end_bio_io(r1bio_t *r1_bio)
*/
static inline void update_head_pos(int disk, r1bio_t *r1_bio)
{
- conf_t *conf = mddev_to_conf(r1_bio->mddev);
+ conf_t *conf = r1_bio->mddev->private;
conf->mirrors[disk].head_position =
r1_bio->sector + (r1_bio->sectors);
@@ -264,7 +264,7 @@ static void raid1_end_read_request(struct bio *bio, int error)
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
int mirror;
- conf_t *conf = mddev_to_conf(r1_bio->mddev);
+ conf_t *conf = r1_bio->mddev->private;
mirror = r1_bio->read_disk;
/*
@@ -309,7 +309,7 @@ static void raid1_end_write_request(struct bio *bio, int error)
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
- conf_t *conf = mddev_to_conf(r1_bio->mddev);
+ conf_t *conf = r1_bio->mddev->private;
struct bio *to_put = NULL;
@@ -541,7 +541,7 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
static void unplug_slaves(mddev_t *mddev)
{
- conf_t *conf = mddev_to_conf(mddev);
+ conf_t *conf = mddev->private;
int i;
rcu_read_lock();
@@ -573,7 +573,7 @@ static void raid1_unplug(struct request_queue *q)
static int raid1_congested(void *data, int bits)
{
mddev_t *mddev = data;
- conf_t *conf = mddev_to_conf(mddev);
+ conf_t *conf = mddev->private;
int i, ret = 0;
rcu_read_lock();
@@ -772,7 +772,7 @@ do_sync_io:
static int make_request(struct request_queue *q, struct bio * bio)
{
mddev_t *mddev = q->queuedata;
- conf_t *conf = mddev_to_conf(mddev);
+ conf_t *conf = mddev->private;
mirror_info_t *mirror;
r1bio_t *r1_bio;
struct bio *read_bio;
@@ -991,7 +991,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
static void status(struct seq_file *seq, mddev_t *mddev)
{
- conf_t *conf = mddev_to_conf(mddev);
+ conf_t *conf = mddev->private;
int i;
seq_printf(seq, " [%d/%d] [", conf->raid_disks,
@@ -1010,7 +1010,7 @@ static void status(struct seq_file *seq, mddev_t *mddev)
static void error(mddev_t *mddev, mdk_rdev_t *rdev)
{
char b[BDEVNAME_SIZE];
- conf_t *conf = mddev_to_conf(mddev);
+ conf_t *conf = mddev->private;
/*
* If it is not operational, then we have already marked it as dead
@@ -1214,7 +1214,7 @@ static void end_sync_write(struct bio *bio, int error)
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
mddev_t *mddev = r1_bio->mddev;
- conf_t *conf = mddev_to_conf(mddev);
+ conf_t *conf = mddev->private;
int i;
int mirror=0;
@@ -1248,7 +1248,7 @@ static void end_sync_write(struct bio *bio, int error)
static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
{
- conf_t *conf = mddev_to_conf(mddev);
+ conf_t *conf = mddev->private;
int i;
int disks = conf->raid_disks;
struct bio *bio, *wbio;
@@ -1562,7 +1562,7 @@ static void raid1d(mddev_t *mddev)
r1bio_t *r1_bio;
struct bio *bio;
unsigned long flags;
- conf_t *conf = mddev_to_conf(mddev);
+ conf_t *conf = mddev->private;
struct list_head *head = &conf->retry_list;
int unplug=0;
mdk_rdev_t *rdev;
@@ -1585,7 +1585,7 @@ static void raid1d(mddev_t *mddev)
spin_unlock_irqrestore(&conf->device_lock, flags);
mddev = r1_bio->mddev;
- conf = mddev_to_conf(mddev);
+ conf = mddev->private;
if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
sync_request_write(mddev, r1_bio);
unplug = 1;
@@ -1706,7 +1706,7 @@ static int init_resync(conf_t *conf)
static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
{
- conf_t *conf = mddev_to_conf(mddev);
+ conf_t *conf = mddev->private;
r1bio_t *r1_bio;
struct bio *bio;
sector_t max_sector, nr_sectors;
@@ -2052,6 +2052,10 @@ static int run(mddev_t *mddev)
goto out_free_conf;
}
+ if (mddev->recovery_cp != MaxSector)
+ printk(KERN_NOTICE "raid1: %s is not clean"
+ " -- starting background reconstruction\n",
+ mdname(mddev));
printk(KERN_INFO
"raid1: raid set %s active with %d out of %d mirrors\n",
mdname(mddev), mddev->raid_disks - mddev->degraded,
@@ -2087,7 +2091,7 @@ out:
static int stop(mddev_t *mddev)
{
- conf_t *conf = mddev_to_conf(mddev);
+ conf_t *conf = mddev->private;
struct bitmap *bitmap = mddev->bitmap;
int behind_wait = 0;
@@ -2155,16 +2159,16 @@ static int raid1_reshape(mddev_t *mddev)
mempool_t *newpool, *oldpool;
struct pool_info *newpoolinfo;
mirror_info_t *newmirrors;
- conf_t *conf = mddev_to_conf(mddev);
+ conf_t *conf = mddev->private;
int cnt, raid_disks;
unsigned long flags;
int d, d2, err;
/* Cannot change chunk_size, layout, or level */
- if (mddev->chunk_size != mddev->new_chunk ||
+ if (mddev->chunk_sectors != mddev->new_chunk_sectors ||
mddev->layout != mddev->new_layout ||
mddev->level != mddev->new_level) {
- mddev->new_chunk = mddev->chunk_size;
+ mddev->new_chunk_sectors = mddev->chunk_sectors;
mddev->new_layout = mddev->layout;
mddev->new_level = mddev->level;
return -EINVAL;
@@ -2252,7 +2256,7 @@ static int raid1_reshape(mddev_t *mddev)
static void raid1_quiesce(mddev_t *mddev, int state)
{
- conf_t *conf = mddev_to_conf(mddev);
+ conf_t *conf = mddev->private;
switch(state) {
case 1:
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index 1620eea3d57c..e87b84deff68 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -64,12 +64,6 @@ struct r1_private_data_s {
typedef struct r1_private_data_s conf_t;
/*
- * this is the only point in the RAID code where we violate
- * C type safety. mddev->private is an 'opaque' pointer.
- */
-#define mddev_to_conf(mddev) ((conf_t *) mddev->private)
-
-/*
* this is our 'private' RAID1 bio.
*
* it contains information about what kind of IO operations were started
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 750550c1166f..ae12ceafe10c 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -188,7 +188,7 @@ static void put_all_bios(conf_t *conf, r10bio_t *r10_bio)
static void free_r10bio(r10bio_t *r10_bio)
{
- conf_t *conf = mddev_to_conf(r10_bio->mddev);
+ conf_t *conf = r10_bio->mddev->private;
/*
* Wake up any possible resync thread that waits for the device
@@ -202,7 +202,7 @@ static void free_r10bio(r10bio_t *r10_bio)
static void put_buf(r10bio_t *r10_bio)
{
- conf_t *conf = mddev_to_conf(r10_bio->mddev);
+ conf_t *conf = r10_bio->mddev->private;
mempool_free(r10_bio, conf->r10buf_pool);
@@ -213,7 +213,7 @@ static void reschedule_retry(r10bio_t *r10_bio)
{
unsigned long flags;
mddev_t *mddev = r10_bio->mddev;
- conf_t *conf = mddev_to_conf(mddev);
+ conf_t *conf = mddev->private;
spin_lock_irqsave(&conf->device_lock, flags);
list_add(&r10_bio->retry_list, &conf->retry_list);
@@ -245,7 +245,7 @@ static void raid_end_bio_io(r10bio_t *r10_bio)
*/
static inline void update_head_pos(int slot, r10bio_t *r10_bio)
{
- conf_t *conf = mddev_to_conf(r10_bio->mddev);
+ conf_t *conf = r10_bio->mddev->private;
conf->mirrors[r10_bio->devs[slot].devnum].head_position =
r10_bio->devs[slot].addr + (r10_bio->sectors);
@@ -256,7 +256,7 @@ static void raid10_end_read_request(struct bio *bio, int error)
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
int slot, dev;
- conf_t *conf = mddev_to_conf(r10_bio->mddev);
+ conf_t *conf = r10_bio->mddev->private;
slot = r10_bio->read_slot;
@@ -297,7 +297,7 @@ static void raid10_end_write_request(struct bio *bio, int error)
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
int slot, dev;
- conf_t *conf = mddev_to_conf(r10_bio->mddev);
+ conf_t *conf = r10_bio->mddev->private;
for (slot = 0; slot < conf->copies; slot++)
if (r10_bio->devs[slot].bio == bio)
@@ -461,7 +461,7 @@ static int raid10_mergeable_bvec(struct request_queue *q,
mddev_t *mddev = q->queuedata;
sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
int max;
- unsigned int chunk_sectors = mddev->chunk_size >> 9;
+ unsigned int chunk_sectors = mddev->chunk_sectors;
unsigned int bio_sectors = bvm->bi_size >> 9;
max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
@@ -596,7 +596,7 @@ rb_out:
static void unplug_slaves(mddev_t *mddev)
{
- conf_t *conf = mddev_to_conf(mddev);
+ conf_t *conf = mddev->private;
int i;
rcu_read_lock();
@@ -628,7 +628,7 @@ static void raid10_unplug(struct request_queue *q)
static int raid10_congested(void *data, int bits)
{
mddev_t *mddev = data;
- conf_t *conf = mddev_to_conf(mddev);
+ conf_t *conf = mddev->private;
int i, ret = 0;
rcu_read_lock();
@@ -788,7 +788,7 @@ static void unfreeze_array(conf_t *conf)
static int make_request(struct request_queue *q, struct bio * bio)
{
mddev_t *mddev = q->queuedata;
- conf_t *conf = mddev_to_conf(mddev);
+ conf_t *conf = mddev->private;
mirror_info_t *mirror;
r10bio_t *r10_bio;
struct bio *read_bio;
@@ -981,11 +981,11 @@ static int make_request(struct request_queue *q, struct bio * bio)
static void status(struct seq_file *seq, mddev_t *mddev)
{
- conf_t *conf = mddev_to_conf(mddev);
+ conf_t *conf = mddev->private;
int i;
if (conf->near_copies < conf->raid_disks)
- seq_printf(seq, " %dK chunks", mddev->chunk_size/1024);
+ seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2);
if (conf->near_copies > 1)
seq_printf(seq, " %d near-copies", conf->near_copies);
if (conf->far_copies > 1) {
@@ -1006,7 +1006,7 @@ static void status(struct seq_file *seq, mddev_t *mddev)
static void error(mddev_t *mddev, mdk_rdev_t *rdev)
{
char b[BDEVNAME_SIZE];
- conf_t *conf = mddev_to_conf(mddev);
+ conf_t *conf = mddev->private;
/*
* If it is not operational, then we have already marked it as dead
@@ -1215,7 +1215,7 @@ abort:
static void end_sync_read(struct bio *bio, int error)
{
r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
- conf_t *conf = mddev_to_conf(r10_bio->mddev);
+ conf_t *conf = r10_bio->mddev->private;
int i,d;
for (i=0; i<conf->copies; i++)
@@ -1253,7 +1253,7 @@ static void end_sync_write(struct bio *bio, int error)
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
mddev_t *mddev = r10_bio->mddev;
- conf_t *conf = mddev_to_conf(mddev);
+ conf_t *conf = mddev->private;
int i,d;
for (i = 0; i < conf->copies; i++)
@@ -1300,7 +1300,7 @@ static void end_sync_write(struct bio *bio, int error)
*/
static void sync_request_write(mddev_t *mddev, r10bio_t *r10_bio)
{
- conf_t *conf = mddev_to_conf(mddev);
+ conf_t *conf = mddev->private;
int i, first;
struct bio *tbio, *fbio;
@@ -1400,7 +1400,7 @@ done:
static void recovery_request_write(mddev_t *mddev, r10bio_t *r10_bio)
{
- conf_t *conf = mddev_to_conf(mddev);
+ conf_t *conf = mddev->private;
int i, d;
struct bio *bio, *wbio;
@@ -1549,7 +1549,7 @@ static void raid10d(mddev_t *mddev)
r10bio_t *r10_bio;
struct bio *bio;
unsigned long flags;
- conf_t *conf = mddev_to_conf(mddev);
+ conf_t *conf = mddev->private;
struct list_head *head = &conf->retry_list;
int unplug=0;
mdk_rdev_t *rdev;
@@ -1572,7 +1572,7 @@ static void raid10d(mddev_t *mddev)
spin_unlock_irqrestore(&conf->device_lock, flags);
mddev = r10_bio->mddev;
- conf = mddev_to_conf(mddev);
+ conf = mddev->private;
if (test_bit(R10BIO_IsSync, &r10_bio->state)) {
sync_request_write(mddev, r10_bio);
unplug = 1;
@@ -1680,7 +1680,7 @@ static int init_resync(conf_t *conf)
static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
{
- conf_t *conf = mddev_to_conf(mddev);
+ conf_t *conf = mddev->private;
r10bio_t *r10_bio;
struct bio *biolist = NULL, *bio;
sector_t max_sector, nr_sectors;
@@ -2026,7 +2026,7 @@ static sector_t
raid10_size(mddev_t *mddev, sector_t sectors, int raid_disks)
{
sector_t size;
- conf_t *conf = mddev_to_conf(mddev);
+ conf_t *conf = mddev->private;
if (!raid_disks)
raid_disks = mddev->raid_disks;
@@ -2050,9 +2050,10 @@ static int run(mddev_t *mddev)
int nc, fc, fo;
sector_t stride, size;
- if (mddev->chunk_size < PAGE_SIZE) {
+ if (mddev->chunk_sectors < (PAGE_SIZE >> 9) ||
+ !is_power_of_2(mddev->chunk_sectors)) {
printk(KERN_ERR "md/raid10: chunk size must be "
- "at least PAGE_SIZE(%ld).\n", PAGE_SIZE);
+ "at least PAGE_SIZE(%ld) and be a power of 2.\n", PAGE_SIZE);
return -EINVAL;
}
@@ -2095,8 +2096,8 @@ static int run(mddev_t *mddev)
conf->far_copies = fc;
conf->copies = nc*fc;
conf->far_offset = fo;
- conf->chunk_mask = (sector_t)(mddev->chunk_size>>9)-1;
- conf->chunk_shift = ffz(~mddev->chunk_size) - 9;
+ conf->chunk_mask = mddev->chunk_sectors - 1;
+ conf->chunk_shift = ffz(~mddev->chunk_sectors);
size = mddev->dev_sectors >> conf->chunk_shift;
sector_div(size, fc);
size = size * conf->raid_disks;
@@ -2185,6 +2186,10 @@ static int run(mddev_t *mddev)
goto out_free_conf;
}
+ if (mddev->recovery_cp != MaxSector)
+ printk(KERN_NOTICE "raid10: %s is not clean"
+ " -- starting background reconstruction\n",
+ mdname(mddev));
printk(KERN_INFO
"raid10: raid set %s active with %d out of %d devices\n",
mdname(mddev), mddev->raid_disks - mddev->degraded,
@@ -2204,7 +2209,8 @@ static int run(mddev_t *mddev)
* maybe...
*/
{
- int stripe = conf->raid_disks * (mddev->chunk_size / PAGE_SIZE);
+ int stripe = conf->raid_disks *
+ ((mddev->chunk_sectors << 9) / PAGE_SIZE);
stripe /= conf->near_copies;
if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
mddev->queue->backing_dev_info.ra_pages = 2* stripe;
@@ -2227,7 +2233,7 @@ out:
static int stop(mddev_t *mddev)
{
- conf_t *conf = mddev_to_conf(mddev);
+ conf_t *conf = mddev->private;
raise_barrier(conf, 0);
lower_barrier(conf);
@@ -2245,7 +2251,7 @@ static int stop(mddev_t *mddev)
static void raid10_quiesce(mddev_t *mddev, int state)
{
- conf_t *conf = mddev_to_conf(mddev);
+ conf_t *conf = mddev->private;
switch(state) {
case 1:
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h
index 244dbe507a54..59cd1efb8d30 100644
--- a/drivers/md/raid10.h
+++ b/drivers/md/raid10.h
@@ -62,12 +62,6 @@ struct r10_private_data_s {
typedef struct r10_private_data_s conf_t;
/*
- * this is the only point in the RAID code where we violate
- * C type safety. mddev->private is an 'opaque' pointer.
- */
-#define mddev_to_conf(mddev) ((conf_t *) mddev->private)
-
-/*
* this is our 'private' RAID10 bio.
*
* it contains information about what kind of IO operations were started
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index bef876698232..f9f991e6e138 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1274,8 +1274,8 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
sector_t new_sector;
int algorithm = previous ? conf->prev_algo
: conf->algorithm;
- int sectors_per_chunk = previous ? (conf->prev_chunk >> 9)
- : (conf->chunk_size >> 9);
+ int sectors_per_chunk = previous ? conf->prev_chunk_sectors
+ : conf->chunk_sectors;
int raid_disks = previous ? conf->previous_raid_disks
: conf->raid_disks;
int data_disks = raid_disks - conf->max_degraded;
@@ -1480,8 +1480,8 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
int raid_disks = sh->disks;
int data_disks = raid_disks - conf->max_degraded;
sector_t new_sector = sh->sector, check;
- int sectors_per_chunk = previous ? (conf->prev_chunk >> 9)
- : (conf->chunk_size >> 9);
+ int sectors_per_chunk = previous ? conf->prev_chunk_sectors
+ : conf->chunk_sectors;
int algorithm = previous ? conf->prev_algo
: conf->algorithm;
sector_t stripe;
@@ -1997,8 +1997,7 @@ static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous,
struct stripe_head *sh)
{
int sectors_per_chunk =
- previous ? (conf->prev_chunk >> 9)
- : (conf->chunk_size >> 9);
+ previous ? conf->prev_chunk_sectors : conf->chunk_sectors;
int dd_idx;
int chunk_offset = sector_div(stripe, sectors_per_chunk);
int disks = previous ? conf->previous_raid_disks : conf->raid_disks;
@@ -3284,7 +3283,7 @@ static void activate_bit_delay(raid5_conf_t *conf)
static void unplug_slaves(mddev_t *mddev)
{
- raid5_conf_t *conf = mddev_to_conf(mddev);
+ raid5_conf_t *conf = mddev->private;
int i;
rcu_read_lock();
@@ -3308,7 +3307,7 @@ static void unplug_slaves(mddev_t *mddev)
static void raid5_unplug_device(struct request_queue *q)
{
mddev_t *mddev = q->queuedata;
- raid5_conf_t *conf = mddev_to_conf(mddev);
+ raid5_conf_t *conf = mddev->private;
unsigned long flags;
spin_lock_irqsave(&conf->device_lock, flags);
@@ -3327,7 +3326,7 @@ static void raid5_unplug_device(struct request_queue *q)
static int raid5_congested(void *data, int bits)
{
mddev_t *mddev = data;
- raid5_conf_t *conf = mddev_to_conf(mddev);
+ raid5_conf_t *conf = mddev->private;
/* No difference between reads and writes. Just check
* how busy the stripe_cache is
@@ -3352,14 +3351,14 @@ static int raid5_mergeable_bvec(struct request_queue *q,
mddev_t *mddev = q->queuedata;
sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
int max;
- unsigned int chunk_sectors = mddev->chunk_size >> 9;
+ unsigned int chunk_sectors = mddev->chunk_sectors;
unsigned int bio_sectors = bvm->bi_size >> 9;
if ((bvm->bi_rw & 1) == WRITE)
return biovec->bv_len; /* always allow writes to be mergeable */
- if (mddev->new_chunk < mddev->chunk_size)
- chunk_sectors = mddev->new_chunk >> 9;
+ if (mddev->new_chunk_sectors < mddev->chunk_sectors)
+ chunk_sectors = mddev->new_chunk_sectors;
max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
if (max < 0) max = 0;
if (max <= biovec->bv_len && bio_sectors == 0)
@@ -3372,11 +3371,11 @@ static int raid5_mergeable_bvec(struct request_queue *q,
static int in_chunk_boundary(mddev_t *mddev, struct bio *bio)
{
sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
- unsigned int chunk_sectors = mddev->chunk_size >> 9;
+ unsigned int chunk_sectors = mddev->chunk_sectors;
unsigned int bio_sectors = bio->bi_size >> 9;
- if (mddev->new_chunk < mddev->chunk_size)
- chunk_sectors = mddev->new_chunk >> 9;
+ if (mddev->new_chunk_sectors < mddev->chunk_sectors)
+ chunk_sectors = mddev->new_chunk_sectors;
return chunk_sectors >=
((sector & (chunk_sectors - 1)) + bio_sectors);
}
@@ -3440,7 +3439,7 @@ static void raid5_align_endio(struct bio *bi, int error)
bio_put(bi);
mddev = raid_bi->bi_bdev->bd_disk->queue->queuedata;
- conf = mddev_to_conf(mddev);
+ conf = mddev->private;
rdev = (void*)raid_bi->bi_next;
raid_bi->bi_next = NULL;
@@ -3482,7 +3481,7 @@ static int bio_fits_rdev(struct bio *bi)
static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio)
{
mddev_t *mddev = q->queuedata;
- raid5_conf_t *conf = mddev_to_conf(mddev);
+ raid5_conf_t *conf = mddev->private;
unsigned int dd_idx;
struct bio* align_bi;
mdk_rdev_t *rdev;
@@ -3599,7 +3598,7 @@ static struct stripe_head *__get_priority_stripe(raid5_conf_t *conf)
static int make_request(struct request_queue *q, struct bio * bi)
{
mddev_t *mddev = q->queuedata;
- raid5_conf_t *conf = mddev_to_conf(mddev);
+ raid5_conf_t *conf = mddev->private;
int dd_idx;
sector_t new_sector;
sector_t logical_sector, last_sector;
@@ -3696,6 +3695,7 @@ static int make_request(struct request_queue *q, struct bio * bi)
spin_unlock_irq(&conf->device_lock);
if (must_retry) {
release_stripe(sh);
+ schedule();
goto retry;
}
}
@@ -3791,10 +3791,10 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
* If old and new chunk sizes differ, we need to process the
* largest of these
*/
- if (mddev->new_chunk > mddev->chunk_size)
- reshape_sectors = mddev->new_chunk / 512;
+ if (mddev->new_chunk_sectors > mddev->chunk_sectors)
+ reshape_sectors = mddev->new_chunk_sectors;
else
- reshape_sectors = mddev->chunk_size / 512;
+ reshape_sectors = mddev->chunk_sectors;
/* we update the metadata when there is more than 3Meg
* in the block range (that is rather arbitrary, should
@@ -3917,7 +3917,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
1, &dd_idx, NULL);
last_sector =
raid5_compute_sector(conf, ((stripe_addr+reshape_sectors)
- *(new_data_disks) - 1),
+ * new_data_disks - 1),
1, &dd_idx, NULL);
if (last_sector >= mddev->dev_sectors)
last_sector = mddev->dev_sectors - 1;
@@ -3946,7 +3946,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
wait_event(conf->wait_for_overlap,
atomic_read(&conf->reshape_stripes) == 0);
mddev->reshape_position = conf->reshape_progress;
- mddev->curr_resync_completed = mddev->curr_resync;
+ mddev->curr_resync_completed = mddev->curr_resync + reshape_sectors;
conf->reshape_checkpoint = jiffies;
set_bit(MD_CHANGE_DEVS, &mddev->flags);
md_wakeup_thread(mddev->thread);
@@ -4129,7 +4129,7 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
static void raid5d(mddev_t *mddev)
{
struct stripe_head *sh;
- raid5_conf_t *conf = mddev_to_conf(mddev);
+ raid5_conf_t *conf = mddev->private;
int handled;
pr_debug("+++ raid5d active\n");
@@ -4185,7 +4185,7 @@ static void raid5d(mddev_t *mddev)
static ssize_t
raid5_show_stripe_cache_size(mddev_t *mddev, char *page)
{
- raid5_conf_t *conf = mddev_to_conf(mddev);
+ raid5_conf_t *conf = mddev->private;
if (conf)
return sprintf(page, "%d\n", conf->max_nr_stripes);
else
@@ -4195,7 +4195,7 @@ raid5_show_stripe_cache_size(mddev_t *mddev, char *page)
static ssize_t
raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len)
{
- raid5_conf_t *conf = mddev_to_conf(mddev);
+ raid5_conf_t *conf = mddev->private;
unsigned long new;
int err;
@@ -4233,7 +4233,7 @@ raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
static ssize_t
raid5_show_preread_threshold(mddev_t *mddev, char *page)
{
- raid5_conf_t *conf = mddev_to_conf(mddev);
+ raid5_conf_t *conf = mddev->private;
if (conf)
return sprintf(page, "%d\n", conf->bypass_threshold);
else
@@ -4243,7 +4243,7 @@ raid5_show_preread_threshold(mddev_t *mddev, char *page)
static ssize_t
raid5_store_preread_threshold(mddev_t *mddev, const char *page, size_t len)
{
- raid5_conf_t *conf = mddev_to_conf(mddev);
+ raid5_conf_t *conf = mddev->private;
unsigned long new;
if (len >= PAGE_SIZE)
return -EINVAL;
@@ -4267,7 +4267,7 @@ raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold,
static ssize_t
stripe_cache_active_show(mddev_t *mddev, char *page)
{
- raid5_conf_t *conf = mddev_to_conf(mddev);
+ raid5_conf_t *conf = mddev->private;
if (conf)
return sprintf(page, "%d\n", atomic_read(&conf->active_stripes));
else
@@ -4291,7 +4291,7 @@ static struct attribute_group raid5_attrs_group = {
static sector_t
raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks)
{
- raid5_conf_t *conf = mddev_to_conf(mddev);
+ raid5_conf_t *conf = mddev->private;
if (!sectors)
sectors = mddev->dev_sectors;
@@ -4303,8 +4303,8 @@ raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks)
raid_disks = conf->previous_raid_disks;
}
- sectors &= ~((sector_t)mddev->chunk_size/512 - 1);
- sectors &= ~((sector_t)mddev->new_chunk/512 - 1);
+ sectors &= ~((sector_t)mddev->chunk_sectors - 1);
+ sectors &= ~((sector_t)mddev->new_chunk_sectors - 1);
return sectors * (raid_disks - conf->max_degraded);
}
@@ -4336,9 +4336,11 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
return ERR_PTR(-EINVAL);
}
- if (!mddev->new_chunk || mddev->new_chunk % PAGE_SIZE) {
+ if (!mddev->new_chunk_sectors ||
+ (mddev->new_chunk_sectors << 9) % PAGE_SIZE ||
+ !is_power_of_2(mddev->new_chunk_sectors)) {
printk(KERN_ERR "raid5: invalid chunk size %d for %s\n",
- mddev->new_chunk, mdname(mddev));
+ mddev->new_chunk_sectors << 9, mdname(mddev));
return ERR_PTR(-EINVAL);
}
@@ -4401,7 +4403,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
conf->fullsync = 1;
}
- conf->chunk_size = mddev->new_chunk;
+ conf->chunk_sectors = mddev->new_chunk_sectors;
conf->level = mddev->new_level;
if (conf->level == 6)
conf->max_degraded = 2;
@@ -4411,7 +4413,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
conf->max_nr_stripes = NR_STRIPES;
conf->reshape_progress = mddev->reshape_position;
if (conf->reshape_progress != MaxSector) {
- conf->prev_chunk = mddev->chunk_size;
+ conf->prev_chunk_sectors = mddev->chunk_sectors;
conf->prev_algo = mddev->layout;
}
@@ -4453,6 +4455,10 @@ static int run(mddev_t *mddev)
int working_disks = 0;
mdk_rdev_t *rdev;
+ if (mddev->recovery_cp != MaxSector)
+ printk(KERN_NOTICE "raid5: %s is not clean"
+ " -- starting background reconstruction\n",
+ mdname(mddev));
if (mddev->reshape_position != MaxSector) {
/* Check that we can continue the reshape.
* Currently only disks can change, it must
@@ -4475,7 +4481,7 @@ static int run(mddev_t *mddev)
* geometry.
*/
here_new = mddev->reshape_position;
- if (sector_div(here_new, (mddev->new_chunk>>9)*
+ if (sector_div(here_new, mddev->new_chunk_sectors *
(mddev->raid_disks - max_degraded))) {
printk(KERN_ERR "raid5: reshape_position not "
"on a stripe boundary\n");
@@ -4483,7 +4489,7 @@ static int run(mddev_t *mddev)
}
/* here_new is the stripe we will write to */
here_old = mddev->reshape_position;
- sector_div(here_old, (mddev->chunk_size>>9)*
+ sector_div(here_old, mddev->chunk_sectors *
(old_disks-max_degraded));
/* here_old is the first stripe that we might need to read
* from */
@@ -4498,7 +4504,7 @@ static int run(mddev_t *mddev)
} else {
BUG_ON(mddev->level != mddev->new_level);
BUG_ON(mddev->layout != mddev->new_layout);
- BUG_ON(mddev->chunk_size != mddev->new_chunk);
+ BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors);
BUG_ON(mddev->delta_disks != 0);
}
@@ -4532,7 +4538,7 @@ static int run(mddev_t *mddev)
}
/* device size must be a multiple of chunk size */
- mddev->dev_sectors &= ~(mddev->chunk_size / 512 - 1);
+ mddev->dev_sectors &= ~(mddev->chunk_sectors - 1);
mddev->resync_max_sectors = mddev->dev_sectors;
if (mddev->degraded > 0 &&
@@ -4581,7 +4587,7 @@ static int run(mddev_t *mddev)
{
int data_disks = conf->previous_raid_disks - conf->max_degraded;
int stripe = data_disks *
- (mddev->chunk_size / PAGE_SIZE);
+ ((mddev->chunk_sectors << 9) / PAGE_SIZE);
if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
}
@@ -4678,7 +4684,8 @@ static void status(struct seq_file *seq, mddev_t *mddev)
raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
int i;
- seq_printf (seq, " level %d, %dk chunk, algorithm %d", mddev->level, mddev->chunk_size >> 10, mddev->layout);
+ seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level,
+ mddev->chunk_sectors / 2, mddev->layout);
seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded);
for (i = 0; i < conf->raid_disks; i++)
seq_printf (seq, "%s",
@@ -4826,7 +4833,7 @@ static int raid5_resize(mddev_t *mddev, sector_t sectors)
* any io in the removed space completes, but it hardly seems
* worth it.
*/
- sectors &= ~((sector_t)mddev->chunk_size/512 - 1);
+ sectors &= ~((sector_t)mddev->chunk_sectors - 1);
md_set_array_sectors(mddev, raid5_size(mddev, sectors,
mddev->raid_disks));
if (mddev->array_sectors >
@@ -4843,14 +4850,37 @@ static int raid5_resize(mddev_t *mddev, sector_t sectors)
return 0;
}
-static int raid5_check_reshape(mddev_t *mddev)
+static int check_stripe_cache(mddev_t *mddev)
{
- raid5_conf_t *conf = mddev_to_conf(mddev);
+ /* Can only proceed if there are plenty of stripe_heads.
+ * We need a minimum of one full stripe,, and for sensible progress
+ * it is best to have about 4 times that.
+ * If we require 4 times, then the default 256 4K stripe_heads will
+ * allow for chunk sizes up to 256K, which is probably OK.
+ * If the chunk size is greater, user-space should request more
+ * stripe_heads first.
+ */
+ raid5_conf_t *conf = mddev->private;
+ if (((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4
+ > conf->max_nr_stripes ||
+ ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4
+ > conf->max_nr_stripes) {
+ printk(KERN_WARNING "raid5: reshape: not enough stripes. Needed %lu\n",
+ ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9)
+ / STRIPE_SIZE)*4);
+ return 0;
+ }
+ return 1;
+}
+
+static int check_reshape(mddev_t *mddev)
+{
+ raid5_conf_t *conf = mddev->private;
if (mddev->delta_disks == 0 &&
mddev->new_layout == mddev->layout &&
- mddev->new_chunk == mddev->chunk_size)
- return -EINVAL; /* nothing to do */
+ mddev->new_chunk_sectors == mddev->chunk_sectors)
+ return 0; /* nothing to do */
if (mddev->bitmap)
/* Cannot grow a bitmap yet */
return -EBUSY;
@@ -4869,28 +4899,15 @@ static int raid5_check_reshape(mddev_t *mddev)
return -EINVAL;
}
- /* Can only proceed if there are plenty of stripe_heads.
- * We need a minimum of one full stripe,, and for sensible progress
- * it is best to have about 4 times that.
- * If we require 4 times, then the default 256 4K stripe_heads will
- * allow for chunk sizes up to 256K, which is probably OK.
- * If the chunk size is greater, user-space should request more
- * stripe_heads first.
- */
- if ((mddev->chunk_size / STRIPE_SIZE) * 4 > conf->max_nr_stripes ||
- (mddev->new_chunk / STRIPE_SIZE) * 4 > conf->max_nr_stripes) {
- printk(KERN_WARNING "raid5: reshape: not enough stripes. Needed %lu\n",
- (max(mddev->chunk_size, mddev->new_chunk)
- / STRIPE_SIZE)*4);
+ if (!check_stripe_cache(mddev))
return -ENOSPC;
- }
return resize_stripes(conf, conf->raid_disks + mddev->delta_disks);
}
static int raid5_start_reshape(mddev_t *mddev)
{
- raid5_conf_t *conf = mddev_to_conf(mddev);
+ raid5_conf_t *conf = mddev->private;
mdk_rdev_t *rdev;
int spares = 0;
int added_devices = 0;
@@ -4899,6 +4916,9 @@ static int raid5_start_reshape(mddev_t *mddev)
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
return -EBUSY;
+ if (!check_stripe_cache(mddev))
+ return -ENOSPC;
+
list_for_each_entry(rdev, &mddev->disks, same_set)
if (rdev->raid_disk < 0 &&
!test_bit(Faulty, &rdev->flags))
@@ -4925,8 +4945,8 @@ static int raid5_start_reshape(mddev_t *mddev)
spin_lock_irq(&conf->device_lock);
conf->previous_raid_disks = conf->raid_disks;
conf->raid_disks += mddev->delta_disks;
- conf->prev_chunk = conf->chunk_size;
- conf->chunk_size = mddev->new_chunk;
+ conf->prev_chunk_sectors = conf->chunk_sectors;
+ conf->chunk_sectors = mddev->new_chunk_sectors;
conf->prev_algo = conf->algorithm;
conf->algorithm = mddev->new_layout;
if (mddev->delta_disks < 0)
@@ -5008,7 +5028,7 @@ static void end_reshape(raid5_conf_t *conf)
*/
{
int data_disks = conf->raid_disks - conf->max_degraded;
- int stripe = data_disks * (conf->chunk_size
+ int stripe = data_disks * ((conf->chunk_sectors << 9)
/ PAGE_SIZE);
if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
@@ -5022,7 +5042,7 @@ static void end_reshape(raid5_conf_t *conf)
static void raid5_finish_reshape(mddev_t *mddev)
{
struct block_device *bdev;
- raid5_conf_t *conf = mddev_to_conf(mddev);
+ raid5_conf_t *conf = mddev->private;
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
@@ -5053,7 +5073,7 @@ static void raid5_finish_reshape(mddev_t *mddev)
raid5_remove_disk(mddev, d);
}
mddev->layout = conf->algorithm;
- mddev->chunk_size = conf->chunk_size;
+ mddev->chunk_sectors = conf->chunk_sectors;
mddev->reshape_position = MaxSector;
mddev->delta_disks = 0;
}
@@ -5061,7 +5081,7 @@ static void raid5_finish_reshape(mddev_t *mddev)
static void raid5_quiesce(mddev_t *mddev, int state)
{
- raid5_conf_t *conf = mddev_to_conf(mddev);
+ raid5_conf_t *conf = mddev->private;
switch(state) {
case 2: /* resume for a suspend */
@@ -5111,7 +5131,7 @@ static void *raid5_takeover_raid1(mddev_t *mddev)
mddev->new_level = 5;
mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC;
- mddev->new_chunk = chunksect << 9;
+ mddev->new_chunk_sectors = chunksect;
return setup_conf(mddev);
}
@@ -5150,24 +5170,24 @@ static void *raid5_takeover_raid6(mddev_t *mddev)
}
-static int raid5_reconfig(mddev_t *mddev, int new_layout, int new_chunk)
+static int raid5_check_reshape(mddev_t *mddev)
{
/* For a 2-drive array, the layout and chunk size can be changed
* immediately as not restriping is needed.
* For larger arrays we record the new value - after validation
* to be used by a reshape pass.
*/
- raid5_conf_t *conf = mddev_to_conf(mddev);
+ raid5_conf_t *conf = mddev->private;
+ int new_chunk = mddev->new_chunk_sectors;
- if (new_layout >= 0 && !algorithm_valid_raid5(new_layout))
+ if (mddev->new_layout >= 0 && !algorithm_valid_raid5(mddev->new_layout))
return -EINVAL;
if (new_chunk > 0) {
- if (new_chunk & (new_chunk-1))
- /* not a power of 2 */
+ if (!is_power_of_2(new_chunk))
return -EINVAL;
- if (new_chunk < PAGE_SIZE)
+ if (new_chunk < (PAGE_SIZE>>9))
return -EINVAL;
- if (mddev->array_sectors & ((new_chunk>>9)-1))
+ if (mddev->array_sectors & (new_chunk-1))
/* not factor of array size */
return -EINVAL;
}
@@ -5175,49 +5195,39 @@ static int raid5_reconfig(mddev_t *mddev, int new_layout, int new_chunk)
/* They look valid */
if (mddev->raid_disks == 2) {
-
- if (new_layout >= 0) {
- conf->algorithm = new_layout;
- mddev->layout = mddev->new_layout = new_layout;
+ /* can make the change immediately */
+ if (mddev->new_layout >= 0) {
+ conf->algorithm = mddev->new_layout;
+ mddev->layout = mddev->new_layout;
}
if (new_chunk > 0) {
- conf->chunk_size = new_chunk;
- mddev->chunk_size = mddev->new_chunk = new_chunk;
+ conf->chunk_sectors = new_chunk ;
+ mddev->chunk_sectors = new_chunk;
}
set_bit(MD_CHANGE_DEVS, &mddev->flags);
md_wakeup_thread(mddev->thread);
- } else {
- if (new_layout >= 0)
- mddev->new_layout = new_layout;
- if (new_chunk > 0)
- mddev->new_chunk = new_chunk;
}
- return 0;
+ return check_reshape(mddev);
}
-static int raid6_reconfig(mddev_t *mddev, int new_layout, int new_chunk)
+static int raid6_check_reshape(mddev_t *mddev)
{
- if (new_layout >= 0 && !algorithm_valid_raid6(new_layout))
+ int new_chunk = mddev->new_chunk_sectors;
+
+ if (mddev->new_layout >= 0 && !algorithm_valid_raid6(mddev->new_layout))
return -EINVAL;
if (new_chunk > 0) {
- if (new_chunk & (new_chunk-1))
- /* not a power of 2 */
+ if (!is_power_of_2(new_chunk))
return -EINVAL;
- if (new_chunk < PAGE_SIZE)
+ if (new_chunk < (PAGE_SIZE >> 9))
return -EINVAL;
- if (mddev->array_sectors & ((new_chunk>>9)-1))
+ if (mddev->array_sectors & (new_chunk-1))
/* not factor of array size */
return -EINVAL;
}
/* They look valid */
-
- if (new_layout >= 0)
- mddev->new_layout = new_layout;
- if (new_chunk > 0)
- mddev->new_chunk = new_chunk;
-
- return 0;
+ return check_reshape(mddev);
}
static void *raid5_takeover(mddev_t *mddev)
@@ -5227,8 +5237,6 @@ static void *raid5_takeover(mddev_t *mddev)
* raid1 - if there are two drives. We need to know the chunk size
* raid4 - trivial - just use a raid4 layout.
* raid6 - Providing it is a *_6 layout
- *
- * For now, just do raid1
*/
if (mddev->level == 1)
@@ -5310,12 +5318,11 @@ static struct mdk_personality raid6_personality =
.sync_request = sync_request,
.resize = raid5_resize,
.size = raid5_size,
- .check_reshape = raid5_check_reshape,
+ .check_reshape = raid6_check_reshape,
.start_reshape = raid5_start_reshape,
.finish_reshape = raid5_finish_reshape,
.quiesce = raid5_quiesce,
.takeover = raid6_takeover,
- .reconfig = raid6_reconfig,
};
static struct mdk_personality raid5_personality =
{
@@ -5338,7 +5345,6 @@ static struct mdk_personality raid5_personality =
.finish_reshape = raid5_finish_reshape,
.quiesce = raid5_quiesce,
.takeover = raid5_takeover,
- .reconfig = raid5_reconfig,
};
static struct mdk_personality raid4_personality =
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 52ba99954dec..9459689c4ea0 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -334,7 +334,8 @@ struct raid5_private_data {
struct hlist_head *stripe_hashtbl;
mddev_t *mddev;
struct disk_info *spare;
- int chunk_size, level, algorithm;
+ int chunk_sectors;
+ int level, algorithm;
int max_degraded;
int raid_disks;
int max_nr_stripes;
@@ -350,7 +351,8 @@ struct raid5_private_data {
*/
sector_t reshape_safe;
int previous_raid_disks;
- int prev_chunk, prev_algo;
+ int prev_chunk_sectors;
+ int prev_algo;
short generation; /* increments with every reshape */
unsigned long reshape_checkpoint; /* Time we last updated
* metadata */
@@ -408,8 +410,6 @@ struct raid5_private_data {
typedef struct raid5_private_data raid5_conf_t;
-#define mddev_to_conf(mddev) ((raid5_conf_t *) mddev->private)
-
/*
* Our supported algorithms
*/
diff --git a/drivers/media/video/ov772x.c b/drivers/media/video/ov772x.c
index c0d911252862..0bce255168bd 100644
--- a/drivers/media/video/ov772x.c
+++ b/drivers/media/video/ov772x.c
@@ -1067,10 +1067,12 @@ static int ov772x_probe(struct i2c_client *client,
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
int ret;
- info = client->dev.platform_data;
- if (!info)
+ if (!client->dev.platform_data)
return -EINVAL;
+ info = container_of(client->dev.platform_data,
+ struct ov772x_camera_info, link);
+
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
dev_err(&adapter->dev,
"I2C-Adapter doesn't support "
diff --git a/drivers/media/video/tw9910.c b/drivers/media/video/tw9910.c
index a39947643992..aa5065ea09ed 100644
--- a/drivers/media/video/tw9910.c
+++ b/drivers/media/video/tw9910.c
@@ -875,10 +875,12 @@ static int tw9910_probe(struct i2c_client *client,
const struct tw9910_scale_ctrl *scale;
int i, ret;
- info = client->dev.platform_data;
- if (!info)
+ if (!client->dev.platform_data)
return -EINVAL;
+ info = container_of(client->dev.platform_data,
+ struct tw9910_video_info, link);
+
if (!i2c_check_functionality(to_i2c_adapter(client->dev.parent),
I2C_FUNC_SMBUS_BYTE_DATA)) {
dev_err(&client->dev,
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index ee3927ab11e0..491ac0f800d2 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -30,6 +30,7 @@ config MFD_SM501_GPIO
config MFD_ASIC3
bool "Support for Compaq ASIC3"
depends on GENERIC_HARDIRQS && GPIOLIB && ARM
+ select MFD_CORE
---help---
This driver supports the ASIC3 multifunction chip found on many
PDAs (mainly iPAQ and HTC based ones)
@@ -152,7 +153,7 @@ config MFD_WM8400
depends on I2C
help
Support for the Wolfson Microelecronics WM8400 PMIC and audio
- CODEC. This driver adds provides common support for accessing
+ CODEC. This driver provides common support for accessing
the device, additional drivers must be enabled in order to use
the functionality of the device.
@@ -241,6 +242,27 @@ config PCF50633_GPIO
Say yes here if you want to include support GPIO for pins on
the PCF50633 chip.
+config AB3100_CORE
+ tristate "ST-Ericsson AB3100 Mixed Signal Circuit core functions"
+ depends on I2C
+ default y if ARCH_U300
+ help
+ Select this to enable the AB3100 Mixed Signal IC core
+ functionality. This connects to a AB3100 on the I2C bus
+ and expose a number of symbols needed for dependent devices
+ to read and write registers and subscribe to events from
+ this multi-functional IC. This is needed to use other features
+ of the AB3100 such as battery-backed RTC, charging control,
+ LEDs, vibrator, system power and temperature, power management
+ and ALSA sound.
+
+config EZX_PCAP
+ bool "PCAP Support"
+ depends on GENERIC_HARDIRQS && SPI_MASTER
+ help
+ This enables the PCAP ASIC present on EZX Phones. This is
+ needed for MMC, TouchScreen, Sound, USB, etc..
+
endmenu
menu "Multimedia Capabilities Port drivers"
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 3afb5192e4da..6f8a9a1af20b 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -26,6 +26,8 @@ obj-$(CONFIG_TWL4030_CORE) += twl4030-core.o twl4030-irq.o
obj-$(CONFIG_MFD_CORE) += mfd-core.o
+obj-$(CONFIG_EZX_PCAP) += ezx-pcap.o
+
obj-$(CONFIG_MCP) += mcp-core.o
obj-$(CONFIG_MCP_SA11X0) += mcp-sa11x0.o
obj-$(CONFIG_MCP_UCB1200) += ucb1x00-core.o
@@ -40,4 +42,5 @@ obj-$(CONFIG_PMIC_DA903X) += da903x.o
obj-$(CONFIG_MFD_PCF50633) += pcf50633-core.o
obj-$(CONFIG_PCF50633_ADC) += pcf50633-adc.o
-obj-$(CONFIG_PCF50633_GPIO) += pcf50633-gpio.o \ No newline at end of file
+obj-$(CONFIG_PCF50633_GPIO) += pcf50633-gpio.o
+obj-$(CONFIG_AB3100_CORE) += ab3100-core.o
diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
new file mode 100644
index 000000000000..13e7d7bfe85f
--- /dev/null
+++ b/drivers/mfd/ab3100-core.c
@@ -0,0 +1,991 @@
+/*
+ * Copyright (C) 2007-2009 ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ * Low-level core for exclusive access to the AB3100 IC on the I2C bus
+ * and some basic chip-configuration.
+ * Author: Linus Walleij <linus.walleij@stericsson.com>
+ */
+
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/notifier.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/mfd/ab3100.h>
+
+/* These are the only registers inside AB3100 used in this main file */
+
+/* Interrupt event registers */
+#define AB3100_EVENTA1 0x21
+#define AB3100_EVENTA2 0x22
+#define AB3100_EVENTA3 0x23
+
+/* AB3100 DAC converter registers */
+#define AB3100_DIS 0x00
+#define AB3100_D0C 0x01
+#define AB3100_D1C 0x02
+#define AB3100_D2C 0x03
+#define AB3100_D3C 0x04
+
+/* Chip ID register */
+#define AB3100_CID 0x20
+
+/* AB3100 interrupt registers */
+#define AB3100_IMRA1 0x24
+#define AB3100_IMRA2 0x25
+#define AB3100_IMRA3 0x26
+#define AB3100_IMRB1 0x2B
+#define AB3100_IMRB2 0x2C
+#define AB3100_IMRB3 0x2D
+
+/* System Power Monitoring and control registers */
+#define AB3100_MCA 0x2E
+#define AB3100_MCB 0x2F
+
+/* SIM power up */
+#define AB3100_SUP 0x50
+
+/*
+ * I2C communication
+ *
+ * The AB3100 is usually assigned address 0x48 (7-bit)
+ * The chip is defined in the platform i2c_board_data section.
+ */
+static unsigned short normal_i2c[] = { 0x48, I2C_CLIENT_END };
+I2C_CLIENT_INSMOD_1(ab3100);
+
+u8 ab3100_get_chip_type(struct ab3100 *ab3100)
+{
+ u8 chip = ABUNKNOWN;
+
+ switch (ab3100->chip_id & 0xf0) {
+ case 0xa0:
+ chip = AB3000;
+ break;
+ case 0xc0:
+ chip = AB3100;
+ break;
+ }
+ return chip;
+}
+EXPORT_SYMBOL(ab3100_get_chip_type);
+
+int ab3100_set_register(struct ab3100 *ab3100, u8 reg, u8 regval)
+{
+ u8 regandval[2] = {reg, regval};
+ int err;
+
+ err = mutex_lock_interruptible(&ab3100->access_mutex);
+ if (err)
+ return err;
+
+ /*
+ * A two-byte write message with the first byte containing the register
+ * number and the second byte containing the value to be written
+ * effectively sets a register in the AB3100.
+ */
+ err = i2c_master_send(ab3100->i2c_client, regandval, 2);
+ if (err < 0) {
+ dev_err(ab3100->dev,
+ "write error (write register): %d\n",
+ err);
+ } else if (err != 2) {
+ dev_err(ab3100->dev,
+ "write error (write register) "
+ "%d bytes transferred (expected 2)\n",
+ err);
+ err = -EIO;
+ } else {
+ /* All is well */
+ err = 0;
+ }
+ mutex_unlock(&ab3100->access_mutex);
+ return 0;
+}
+EXPORT_SYMBOL(ab3100_set_register);
+
+/*
+ * The test registers exist at an I2C bus address up one
+ * from the ordinary base. They are not supposed to be used
+ * in production code, but sometimes you have to do that
+ * anyway. It's currently only used from this file so declare
+ * it static and do not export.
+ */
+static int ab3100_set_test_register(struct ab3100 *ab3100,
+ u8 reg, u8 regval)
+{
+ u8 regandval[2] = {reg, regval};
+ int err;
+
+ err = mutex_lock_interruptible(&ab3100->access_mutex);
+ if (err)
+ return err;
+
+ err = i2c_master_send(ab3100->testreg_client, regandval, 2);
+ if (err < 0) {
+ dev_err(ab3100->dev,
+ "write error (write test register): %d\n",
+ err);
+ } else if (err != 2) {
+ dev_err(ab3100->dev,
+ "write error (write test register) "
+ "%d bytes transferred (expected 2)\n",
+ err);
+ err = -EIO;
+ } else {
+ /* All is well */
+ err = 0;
+ }
+ mutex_unlock(&ab3100->access_mutex);
+
+ return err;
+}
+
+int ab3100_get_register(struct ab3100 *ab3100, u8 reg, u8 *regval)
+{
+ int err;
+
+ err = mutex_lock_interruptible(&ab3100->access_mutex);
+ if (err)
+ return err;
+
+ /*
+ * AB3100 require an I2C "stop" command between each message, else
+ * it will not work. The only way of achieveing this with the
+ * message transport layer is to send the read and write messages
+ * separately.
+ */
+ err = i2c_master_send(ab3100->i2c_client, &reg, 1);
+ if (err < 0) {
+ dev_err(ab3100->dev,
+ "write error (send register address): %d\n",
+ err);
+ goto get_reg_out_unlock;
+ } else if (err != 1) {
+ dev_err(ab3100->dev,
+ "write error (send register address) "
+ "%d bytes transferred (expected 1)\n",
+ err);
+ err = -EIO;
+ goto get_reg_out_unlock;
+ } else {
+ /* All is well */
+ err = 0;
+ }
+
+ err = i2c_master_recv(ab3100->i2c_client, regval, 1);
+ if (err < 0) {
+ dev_err(ab3100->dev,
+ "write error (read register): %d\n",
+ err);
+ goto get_reg_out_unlock;
+ } else if (err != 1) {
+ dev_err(ab3100->dev,
+ "write error (read register) "
+ "%d bytes transferred (expected 1)\n",
+ err);
+ err = -EIO;
+ goto get_reg_out_unlock;
+ } else {
+ /* All is well */
+ err = 0;
+ }
+
+ get_reg_out_unlock:
+ mutex_unlock(&ab3100->access_mutex);
+ return err;
+}
+EXPORT_SYMBOL(ab3100_get_register);
+
+int ab3100_get_register_page(struct ab3100 *ab3100,
+ u8 first_reg, u8 *regvals, u8 numregs)
+{
+ int err;
+
+ if (ab3100->chip_id == 0xa0 ||
+ ab3100->chip_id == 0xa1)
+ /* These don't support paged reads */
+ return -EIO;
+
+ err = mutex_lock_interruptible(&ab3100->access_mutex);
+ if (err)
+ return err;
+
+ /*
+ * Paged read also require an I2C "stop" command.
+ */
+ err = i2c_master_send(ab3100->i2c_client, &first_reg, 1);
+ if (err < 0) {
+ dev_err(ab3100->dev,
+ "write error (send first register address): %d\n",
+ err);
+ goto get_reg_page_out_unlock;
+ } else if (err != 1) {
+ dev_err(ab3100->dev,
+ "write error (send first register address) "
+ "%d bytes transferred (expected 1)\n",
+ err);
+ err = -EIO;
+ goto get_reg_page_out_unlock;
+ }
+
+ err = i2c_master_recv(ab3100->i2c_client, regvals, numregs);
+ if (err < 0) {
+ dev_err(ab3100->dev,
+ "write error (read register page): %d\n",
+ err);
+ goto get_reg_page_out_unlock;
+ } else if (err != numregs) {
+ dev_err(ab3100->dev,
+ "write error (read register page) "
+ "%d bytes transferred (expected %d)\n",
+ err, numregs);
+ err = -EIO;
+ goto get_reg_page_out_unlock;
+ }
+
+ /* All is well */
+ err = 0;
+
+ get_reg_page_out_unlock:
+ mutex_unlock(&ab3100->access_mutex);
+ return err;
+}
+EXPORT_SYMBOL(ab3100_get_register_page);
+
+int ab3100_mask_and_set_register(struct ab3100 *ab3100,
+ u8 reg, u8 andmask, u8 ormask)
+{
+ u8 regandval[2] = {reg, 0};
+ int err;
+
+ err = mutex_lock_interruptible(&ab3100->access_mutex);
+ if (err)
+ return err;
+
+ /* First read out the target register */
+ err = i2c_master_send(ab3100->i2c_client, &reg, 1);
+ if (err < 0) {
+ dev_err(ab3100->dev,
+ "write error (maskset send address): %d\n",
+ err);
+ goto get_maskset_unlock;
+ } else if (err != 1) {
+ dev_err(ab3100->dev,
+ "write error (maskset send address) "
+ "%d bytes transferred (expected 1)\n",
+ err);
+ err = -EIO;
+ goto get_maskset_unlock;
+ }
+
+ err = i2c_master_recv(ab3100->i2c_client, &regandval[1], 1);
+ if (err < 0) {
+ dev_err(ab3100->dev,
+ "write error (maskset read register): %d\n",
+ err);
+ goto get_maskset_unlock;
+ } else if (err != 1) {
+ dev_err(ab3100->dev,
+ "write error (maskset read register) "
+ "%d bytes transferred (expected 1)\n",
+ err);
+ err = -EIO;
+ goto get_maskset_unlock;
+ }
+
+ /* Modify the register */
+ regandval[1] &= andmask;
+ regandval[1] |= ormask;
+
+ /* Write the register */
+ err = i2c_master_send(ab3100->i2c_client, regandval, 2);
+ if (err < 0) {
+ dev_err(ab3100->dev,
+ "write error (write register): %d\n",
+ err);
+ goto get_maskset_unlock;
+ } else if (err != 2) {
+ dev_err(ab3100->dev,
+ "write error (write register) "
+ "%d bytes transferred (expected 2)\n",
+ err);
+ err = -EIO;
+ goto get_maskset_unlock;
+ }
+
+ /* All is well */
+ err = 0;
+
+ get_maskset_unlock:
+ mutex_unlock(&ab3100->access_mutex);
+ return err;
+}
+EXPORT_SYMBOL(ab3100_mask_and_set_register);
+
+/*
+ * Register a simple callback for handling any AB3100 events.
+ */
+int ab3100_event_register(struct ab3100 *ab3100,
+ struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&ab3100->event_subscribers,
+ nb);
+}
+EXPORT_SYMBOL(ab3100_event_register);
+
+/*
+ * Remove a previously registered callback.
+ */
+int ab3100_event_unregister(struct ab3100 *ab3100,
+ struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&ab3100->event_subscribers,
+ nb);
+}
+EXPORT_SYMBOL(ab3100_event_unregister);
+
+
+int ab3100_event_registers_startup_state_get(struct ab3100 *ab3100,
+ u32 *fatevent)
+{
+ if (!ab3100->startup_events_read)
+ return -EAGAIN; /* Try again later */
+ *fatevent = ab3100->startup_events;
+ return 0;
+}
+EXPORT_SYMBOL(ab3100_event_registers_startup_state_get);
+
+/* Interrupt handling worker */
+static void ab3100_work(struct work_struct *work)
+{
+ struct ab3100 *ab3100 = container_of(work, struct ab3100, work);
+ u8 event_regs[3];
+ u32 fatevent;
+ int err;
+
+ err = ab3100_get_register_page(ab3100, AB3100_EVENTA1,
+ event_regs, 3);
+ if (err)
+ goto err_event_wq;
+
+ fatevent = (event_regs[0] << 16) |
+ (event_regs[1] << 8) |
+ event_regs[2];
+
+ if (!ab3100->startup_events_read) {
+ ab3100->startup_events = fatevent;
+ ab3100->startup_events_read = true;
+ }
+ /*
+ * The notified parties will have to mask out the events
+ * they're interested in and react to them. They will be
+ * notified on all events, then they use the fatevent value
+ * to determine if they're interested.
+ */
+ blocking_notifier_call_chain(&ab3100->event_subscribers,
+ fatevent, NULL);
+
+ dev_dbg(ab3100->dev,
+ "IRQ Event: 0x%08x\n", fatevent);
+
+ /* By now the IRQ should be acked and deasserted so enable it again */
+ enable_irq(ab3100->i2c_client->irq);
+ return;
+
+ err_event_wq:
+ dev_dbg(ab3100->dev,
+ "error in event workqueue\n");
+ /* Enable the IRQ anyway, what choice do we have? */
+ enable_irq(ab3100->i2c_client->irq);
+ return;
+}
+
+static irqreturn_t ab3100_irq_handler(int irq, void *data)
+{
+ struct ab3100 *ab3100 = data;
+ /*
+ * Disable the IRQ and dispatch a worker to handle the
+ * event. Since the chip resides on I2C this is slow
+ * stuff and we will re-enable the interrupts once th
+ * worker has finished.
+ */
+ disable_irq(ab3100->i2c_client->irq);
+ schedule_work(&ab3100->work);
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_DEBUG_FS
+/*
+ * Some debugfs entries only exposed if we're using debug
+ */
+static int ab3100_registers_print(struct seq_file *s, void *p)
+{
+ struct ab3100 *ab3100 = s->private;
+ u8 value;
+ u8 reg;
+
+ seq_printf(s, "AB3100 registers:\n");
+
+ for (reg = 0; reg < 0xff; reg++) {
+ ab3100_get_register(ab3100, reg, &value);
+ seq_printf(s, "[0x%x]: 0x%x\n", reg, value);
+ }
+ return 0;
+}
+
+static int ab3100_registers_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ab3100_registers_print, inode->i_private);
+}
+
+static const struct file_operations ab3100_registers_fops = {
+ .open = ab3100_registers_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+struct ab3100_get_set_reg_priv {
+ struct ab3100 *ab3100;
+ bool mode;
+};
+
+static int ab3100_get_set_reg_open_file(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static int ab3100_get_set_reg(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ab3100_get_set_reg_priv *priv = file->private_data;
+ struct ab3100 *ab3100 = priv->ab3100;
+ char buf[32];
+ int buf_size;
+ int regp;
+ unsigned long user_reg;
+ int err;
+ int i = 0;
+
+ /* Get userspace string and assure termination */
+ buf_size = min(count, (sizeof(buf)-1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ buf[buf_size] = 0;
+
+ /*
+ * The idea is here to parse a string which is either
+ * "0xnn" for reading a register, or "0xaa 0xbb" for
+ * writing 0xbb to the register 0xaa. First move past
+ * whitespace and then begin to parse the register.
+ */
+ while ((i < buf_size) && (buf[i] == ' '))
+ i++;
+ regp = i;
+
+ /*
+ * Advance pointer to end of string then terminate
+ * the register string. This is needed to satisfy
+ * the strict_strtoul() function.
+ */
+ while ((i < buf_size) && (buf[i] != ' '))
+ i++;
+ buf[i] = '\0';
+
+ err = strict_strtoul(&buf[regp], 16, &user_reg);
+ if (err)
+ return err;
+ if (user_reg > 0xff)
+ return -EINVAL;
+
+ /* Either we read or we write a register here */
+ if (!priv->mode) {
+ /* Reading */
+ u8 reg = (u8) user_reg;
+ u8 regvalue;
+
+ ab3100_get_register(ab3100, reg, &regvalue);
+
+ dev_info(ab3100->dev,
+ "debug read AB3100 reg[0x%02x]: 0x%02x\n",
+ reg, regvalue);
+ } else {
+ int valp;
+ unsigned long user_value;
+ u8 reg = (u8) user_reg;
+ u8 value;
+ u8 regvalue;
+
+ /*
+ * Writing, we need some value to write to
+ * the register so keep parsing the string
+ * from userspace.
+ */
+ i++;
+ while ((i < buf_size) && (buf[i] == ' '))
+ i++;
+ valp = i;
+ while ((i < buf_size) && (buf[i] != ' '))
+ i++;
+ buf[i] = '\0';
+
+ err = strict_strtoul(&buf[valp], 16, &user_value);
+ if (err)
+ return err;
+ if (user_reg > 0xff)
+ return -EINVAL;
+
+ value = (u8) user_value;
+ ab3100_set_register(ab3100, reg, value);
+ ab3100_get_register(ab3100, reg, &regvalue);
+
+ dev_info(ab3100->dev,
+ "debug write reg[0x%02x] with 0x%02x, "
+ "after readback: 0x%02x\n",
+ reg, value, regvalue);
+ }
+ return buf_size;
+}
+
+static const struct file_operations ab3100_get_set_reg_fops = {
+ .open = ab3100_get_set_reg_open_file,
+ .write = ab3100_get_set_reg,
+};
+
+static struct dentry *ab3100_dir;
+static struct dentry *ab3100_reg_file;
+static struct ab3100_get_set_reg_priv ab3100_get_priv;
+static struct dentry *ab3100_get_reg_file;
+static struct ab3100_get_set_reg_priv ab3100_set_priv;
+static struct dentry *ab3100_set_reg_file;
+
+static void ab3100_setup_debugfs(struct ab3100 *ab3100)
+{
+ int err;
+
+ ab3100_dir = debugfs_create_dir("ab3100", NULL);
+ if (!ab3100_dir)
+ goto exit_no_debugfs;
+
+ ab3100_reg_file = debugfs_create_file("registers",
+ S_IRUGO, ab3100_dir, ab3100,
+ &ab3100_registers_fops);
+ if (!ab3100_reg_file) {
+ err = -ENOMEM;
+ goto exit_destroy_dir;
+ }
+
+ ab3100_get_priv.ab3100 = ab3100;
+ ab3100_get_priv.mode = false;
+ ab3100_get_reg_file = debugfs_create_file("get_reg",
+ S_IWUGO, ab3100_dir, &ab3100_get_priv,
+ &ab3100_get_set_reg_fops);
+ if (!ab3100_get_reg_file) {
+ err = -ENOMEM;
+ goto exit_destroy_reg;
+ }
+
+ ab3100_set_priv.ab3100 = ab3100;
+ ab3100_set_priv.mode = true;
+ ab3100_set_reg_file = debugfs_create_file("set_reg",
+ S_IWUGO, ab3100_dir, &ab3100_set_priv,
+ &ab3100_get_set_reg_fops);
+ if (!ab3100_set_reg_file) {
+ err = -ENOMEM;
+ goto exit_destroy_get_reg;
+ }
+ return;
+
+ exit_destroy_get_reg:
+ debugfs_remove(ab3100_get_reg_file);
+ exit_destroy_reg:
+ debugfs_remove(ab3100_reg_file);
+ exit_destroy_dir:
+ debugfs_remove(ab3100_dir);
+ exit_no_debugfs:
+ return;
+}
+static inline void ab3100_remove_debugfs(void)
+{
+ debugfs_remove(ab3100_set_reg_file);
+ debugfs_remove(ab3100_get_reg_file);
+ debugfs_remove(ab3100_reg_file);
+ debugfs_remove(ab3100_dir);
+}
+#else
+static inline void ab3100_setup_debugfs(struct ab3100 *ab3100)
+{
+}
+static inline void ab3100_remove_debugfs(void)
+{
+}
+#endif
+
+/*
+ * Basic set-up, datastructure creation/destruction and I2C interface.
+ * This sets up a default config in the AB3100 chip so that it
+ * will work as expected.
+ */
+
+struct ab3100_init_setting {
+ u8 abreg;
+ u8 setting;
+};
+
+static const struct ab3100_init_setting __initdata
+ab3100_init_settings[] = {
+ {
+ .abreg = AB3100_MCA,
+ .setting = 0x01
+ }, {
+ .abreg = AB3100_MCB,
+ .setting = 0x30
+ }, {
+ .abreg = AB3100_IMRA1,
+ .setting = 0x00
+ }, {
+ .abreg = AB3100_IMRA2,
+ .setting = 0xFF
+ }, {
+ .abreg = AB3100_IMRA3,
+ .setting = 0x01
+ }, {
+ .abreg = AB3100_IMRB1,
+ .setting = 0xFF
+ }, {
+ .abreg = AB3100_IMRB2,
+ .setting = 0xFF
+ }, {
+ .abreg = AB3100_IMRB3,
+ .setting = 0xFF
+ }, {
+ .abreg = AB3100_SUP,
+ .setting = 0x00
+ }, {
+ .abreg = AB3100_DIS,
+ .setting = 0xF0
+ }, {
+ .abreg = AB3100_D0C,
+ .setting = 0x00
+ }, {
+ .abreg = AB3100_D1C,
+ .setting = 0x00
+ }, {
+ .abreg = AB3100_D2C,
+ .setting = 0x00
+ }, {
+ .abreg = AB3100_D3C,
+ .setting = 0x00
+ },
+};
+
+static int __init ab3100_setup(struct ab3100 *ab3100)
+{
+ int err = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ab3100_init_settings); i++) {
+ err = ab3100_set_register(ab3100,
+ ab3100_init_settings[i].abreg,
+ ab3100_init_settings[i].setting);
+ if (err)
+ goto exit_no_setup;
+ }
+
+ /*
+ * Special trick to make the AB3100 use the 32kHz clock (RTC)
+ * bit 3 in test registe 0x02 is a special, undocumented test
+ * register bit that only exist in AB3100 P1E
+ */
+ if (ab3100->chip_id == 0xc4) {
+ dev_warn(ab3100->dev,
+ "AB3100 P1E variant detected, "
+ "forcing chip to 32KHz\n");
+ err = ab3100_set_test_register(ab3100, 0x02, 0x08);
+ }
+
+ exit_no_setup:
+ return err;
+}
+
+/*
+ * Here we define all the platform devices that appear
+ * as children of the AB3100. These are regular platform
+ * devices with the IORESOURCE_IO .start and .end set
+ * to correspond to the internal AB3100 register range
+ * mapping to the corresponding subdevice.
+ */
+
+#define AB3100_DEVICE(devname, devid) \
+static struct platform_device ab3100_##devname##_device = { \
+ .name = devid, \
+ .id = -1, \
+}
+
+/*
+ * This lists all the subdevices and corresponding register
+ * ranges.
+ */
+AB3100_DEVICE(dac, "ab3100-dac");
+AB3100_DEVICE(leds, "ab3100-leds");
+AB3100_DEVICE(power, "ab3100-power");
+AB3100_DEVICE(regulators, "ab3100-regulators");
+AB3100_DEVICE(sim, "ab3100-sim");
+AB3100_DEVICE(uart, "ab3100-uart");
+AB3100_DEVICE(rtc, "ab3100-rtc");
+AB3100_DEVICE(charger, "ab3100-charger");
+AB3100_DEVICE(boost, "ab3100-boost");
+AB3100_DEVICE(adc, "ab3100-adc");
+AB3100_DEVICE(fuelgauge, "ab3100-fuelgauge");
+AB3100_DEVICE(vibrator, "ab3100-vibrator");
+AB3100_DEVICE(otp, "ab3100-otp");
+AB3100_DEVICE(codec, "ab3100-codec");
+
+static struct platform_device *
+ab3100_platform_devs[] = {
+ &ab3100_dac_device,
+ &ab3100_leds_device,
+ &ab3100_power_device,
+ &ab3100_regulators_device,
+ &ab3100_sim_device,
+ &ab3100_uart_device,
+ &ab3100_rtc_device,
+ &ab3100_charger_device,
+ &ab3100_boost_device,
+ &ab3100_adc_device,
+ &ab3100_fuelgauge_device,
+ &ab3100_vibrator_device,
+ &ab3100_otp_device,
+ &ab3100_codec_device,
+};
+
+struct ab_family_id {
+ u8 id;
+ char *name;
+};
+
+static const struct ab_family_id ids[] __initdata = {
+ /* AB3100 */
+ {
+ .id = 0xc0,
+ .name = "P1A"
+ }, {
+ .id = 0xc1,
+ .name = "P1B"
+ }, {
+ .id = 0xc2,
+ .name = "P1C"
+ }, {
+ .id = 0xc3,
+ .name = "P1D"
+ }, {
+ .id = 0xc4,
+ .name = "P1E"
+ }, {
+ .id = 0xc5,
+ .name = "P1F/R1A"
+ }, {
+ .id = 0xc6,
+ .name = "P1G/R1A"
+ }, {
+ .id = 0xc7,
+ .name = "P2A/R2A"
+ }, {
+ .id = 0xc8,
+ .name = "P2B/R2B"
+ },
+ /* AB3000 variants, not supported */
+ {
+ .id = 0xa0
+ }, {
+ .id = 0xa1
+ }, {
+ .id = 0xa2
+ }, {
+ .id = 0xa3
+ }, {
+ .id = 0xa4
+ }, {
+ .id = 0xa5
+ }, {
+ .id = 0xa6
+ }, {
+ .id = 0xa7
+ },
+ /* Terminator */
+ {
+ .id = 0x00,
+ },
+};
+
+static int __init ab3100_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct ab3100 *ab3100;
+ int err;
+ int i;
+
+ ab3100 = kzalloc(sizeof(struct ab3100), GFP_KERNEL);
+ if (!ab3100) {
+ dev_err(&client->dev, "could not allocate AB3100 device\n");
+ return -ENOMEM;
+ }
+
+ /* Initialize data structure */
+ mutex_init(&ab3100->access_mutex);
+ BLOCKING_INIT_NOTIFIER_HEAD(&ab3100->event_subscribers);
+
+ ab3100->i2c_client = client;
+ ab3100->dev = &ab3100->i2c_client->dev;
+
+ i2c_set_clientdata(client, ab3100);
+
+ /* Read chip ID register */
+ err = ab3100_get_register(ab3100, AB3100_CID,
+ &ab3100->chip_id);
+ if (err) {
+ dev_err(&client->dev,
+ "could not communicate with the AB3100 analog "
+ "baseband chip\n");
+ goto exit_no_detect;
+ }
+
+ for (i = 0; ids[i].id != 0x0; i++) {
+ if (ids[i].id == ab3100->chip_id) {
+ if (ids[i].name != NULL) {
+ snprintf(&ab3100->chip_name[0],
+ sizeof(ab3100->chip_name) - 1,
+ "AB3100 %s",
+ ids[i].name);
+ break;
+ } else {
+ dev_err(&client->dev,
+ "AB3000 is not supported\n");
+ goto exit_no_detect;
+ }
+ }
+ }
+
+ if (ids[i].id == 0x0) {
+ dev_err(&client->dev, "unknown analog baseband chip id: 0x%x\n",
+ ab3100->chip_id);
+ dev_err(&client->dev, "accepting it anyway. Please update "
+ "the driver.\n");
+ goto exit_no_detect;
+ }
+
+ dev_info(&client->dev, "Detected chip: %s\n",
+ &ab3100->chip_name[0]);
+
+ /* Attach a second dummy i2c_client to the test register address */
+ ab3100->testreg_client = i2c_new_dummy(client->adapter,
+ client->addr + 1);
+ if (!ab3100->testreg_client) {
+ err = -ENOMEM;
+ goto exit_no_testreg_client;
+ }
+
+ strlcpy(ab3100->testreg_client->name, id->name,
+ sizeof(ab3100->testreg_client->name));
+
+ err = ab3100_setup(ab3100);
+ if (err)
+ goto exit_no_setup;
+
+ INIT_WORK(&ab3100->work, ab3100_work);
+
+ /* This real unpredictable IRQ is of course sampled for entropy */
+ err = request_irq(client->irq, ab3100_irq_handler,
+ IRQF_DISABLED | IRQF_SAMPLE_RANDOM,
+ "AB3100 IRQ", ab3100);
+ if (err)
+ goto exit_no_irq;
+
+ /* Set parent and a pointer back to the container in device data */
+ for (i = 0; i < ARRAY_SIZE(ab3100_platform_devs); i++) {
+ ab3100_platform_devs[i]->dev.parent =
+ &client->dev;
+ platform_set_drvdata(ab3100_platform_devs[i], ab3100);
+ }
+
+ /* Register the platform devices */
+ platform_add_devices(ab3100_platform_devs,
+ ARRAY_SIZE(ab3100_platform_devs));
+
+ ab3100_setup_debugfs(ab3100);
+
+ return 0;
+
+ exit_no_irq:
+ exit_no_setup:
+ i2c_unregister_device(ab3100->testreg_client);
+ exit_no_testreg_client:
+ exit_no_detect:
+ kfree(ab3100);
+ return err;
+}
+
+static int __exit ab3100_remove(struct i2c_client *client)
+{
+ struct ab3100 *ab3100 = i2c_get_clientdata(client);
+ int i;
+
+ /* Unregister subdevices */
+ for (i = 0; i < ARRAY_SIZE(ab3100_platform_devs); i++)
+ platform_device_unregister(ab3100_platform_devs[i]);
+
+ ab3100_remove_debugfs();
+ i2c_unregister_device(ab3100->testreg_client);
+
+ /*
+ * At this point, all subscribers should have unregistered
+ * their notifiers so deactivate IRQ
+ */
+ free_irq(client->irq, ab3100);
+ kfree(ab3100);
+ return 0;
+}
+
+static const struct i2c_device_id ab3100_id[] = {
+ { "ab3100", ab3100 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ab3100_id);
+
+static struct i2c_driver ab3100_driver = {
+ .driver = {
+ .name = "ab3100",
+ .owner = THIS_MODULE,
+ },
+ .id_table = ab3100_id,
+ .probe = ab3100_probe,
+ .remove = __exit_p(ab3100_remove),
+};
+
+static int __init ab3100_i2c_init(void)
+{
+ return i2c_add_driver(&ab3100_driver);
+}
+
+static void __exit ab3100_i2c_exit(void)
+{
+ i2c_del_driver(&ab3100_driver);
+}
+
+subsys_initcall(ab3100_i2c_init);
+module_exit(ab3100_i2c_exit);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>");
+MODULE_DESCRIPTION("AB3100 core driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index 9e485459f63b..63a2a6632106 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -17,6 +17,7 @@
*/
#include <linux/kernel.h>
+#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/gpio.h>
#include <linux/io.h>
@@ -24,6 +25,51 @@
#include <linux/platform_device.h>
#include <linux/mfd/asic3.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/ds1wm.h>
+#include <linux/mfd/tmio.h>
+
+enum {
+ ASIC3_CLOCK_SPI,
+ ASIC3_CLOCK_OWM,
+ ASIC3_CLOCK_PWM0,
+ ASIC3_CLOCK_PWM1,
+ ASIC3_CLOCK_LED0,
+ ASIC3_CLOCK_LED1,
+ ASIC3_CLOCK_LED2,
+ ASIC3_CLOCK_SD_HOST,
+ ASIC3_CLOCK_SD_BUS,
+ ASIC3_CLOCK_SMBUS,
+ ASIC3_CLOCK_EX0,
+ ASIC3_CLOCK_EX1,
+};
+
+struct asic3_clk {
+ int enabled;
+ unsigned int cdex;
+ unsigned long rate;
+};
+
+#define INIT_CDEX(_name, _rate) \
+ [ASIC3_CLOCK_##_name] = { \
+ .cdex = CLOCK_CDEX_##_name, \
+ .rate = _rate, \
+ }
+
+struct asic3_clk asic3_clk_init[] __initdata = {
+ INIT_CDEX(SPI, 0),
+ INIT_CDEX(OWM, 5000000),
+ INIT_CDEX(PWM0, 0),
+ INIT_CDEX(PWM1, 0),
+ INIT_CDEX(LED0, 0),
+ INIT_CDEX(LED1, 0),
+ INIT_CDEX(LED2, 0),
+ INIT_CDEX(SD_HOST, 24576000),
+ INIT_CDEX(SD_BUS, 12288000),
+ INIT_CDEX(SMBUS, 0),
+ INIT_CDEX(EX0, 32768),
+ INIT_CDEX(EX1, 24576000),
+};
struct asic3 {
void __iomem *mapping;
@@ -34,6 +80,8 @@ struct asic3 {
u16 irq_bothedge[4];
struct gpio_chip gpio;
struct device *dev;
+
+ struct asic3_clk clocks[ARRAY_SIZE(asic3_clk_init)];
};
static int asic3_gpio_get(struct gpio_chip *chip, unsigned offset);
@@ -52,6 +100,21 @@ static inline u32 asic3_read_register(struct asic3 *asic,
(reg >> asic->bus_shift));
}
+void asic3_set_register(struct asic3 *asic, u32 reg, u32 bits, bool set)
+{
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&asic->lock, flags);
+ val = asic3_read_register(asic, reg);
+ if (set)
+ val |= bits;
+ else
+ val &= ~bits;
+ asic3_write_register(asic, reg, val);
+ spin_unlock_irqrestore(&asic->lock, flags);
+}
+
/* IRQs */
#define MAX_ASIC_ISR_LOOPS 20
#define ASIC3_GPIO_BASE_INCR \
@@ -525,6 +588,240 @@ static int asic3_gpio_remove(struct platform_device *pdev)
return gpiochip_remove(&asic->gpio);
}
+static int asic3_clk_enable(struct asic3 *asic, struct asic3_clk *clk)
+{
+ unsigned long flags;
+ u32 cdex;
+
+ spin_lock_irqsave(&asic->lock, flags);
+ if (clk->enabled++ == 0) {
+ cdex = asic3_read_register(asic, ASIC3_OFFSET(CLOCK, CDEX));
+ cdex |= clk->cdex;
+ asic3_write_register(asic, ASIC3_OFFSET(CLOCK, CDEX), cdex);
+ }
+ spin_unlock_irqrestore(&asic->lock, flags);
+
+ return 0;
+}
+
+static void asic3_clk_disable(struct asic3 *asic, struct asic3_clk *clk)
+{
+ unsigned long flags;
+ u32 cdex;
+
+ WARN_ON(clk->enabled == 0);
+
+ spin_lock_irqsave(&asic->lock, flags);
+ if (--clk->enabled == 0) {
+ cdex = asic3_read_register(asic, ASIC3_OFFSET(CLOCK, CDEX));
+ cdex &= ~clk->cdex;
+ asic3_write_register(asic, ASIC3_OFFSET(CLOCK, CDEX), cdex);
+ }
+ spin_unlock_irqrestore(&asic->lock, flags);
+}
+
+/* MFD cells (SPI, PWM, LED, DS1WM, MMC) */
+static struct ds1wm_driver_data ds1wm_pdata = {
+ .active_high = 1,
+};
+
+static struct resource ds1wm_resources[] = {
+ {
+ .start = ASIC3_OWM_BASE,
+ .end = ASIC3_OWM_BASE + 0x13,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = ASIC3_IRQ_OWM,
+ .start = ASIC3_IRQ_OWM,
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
+ },
+};
+
+static int ds1wm_enable(struct platform_device *pdev)
+{
+ struct asic3 *asic = dev_get_drvdata(pdev->dev.parent);
+
+ /* Turn on external clocks and the OWM clock */
+ asic3_clk_enable(asic, &asic->clocks[ASIC3_CLOCK_EX0]);
+ asic3_clk_enable(asic, &asic->clocks[ASIC3_CLOCK_EX1]);
+ asic3_clk_enable(asic, &asic->clocks[ASIC3_CLOCK_OWM]);
+ msleep(1);
+
+ /* Reset and enable DS1WM */
+ asic3_set_register(asic, ASIC3_OFFSET(EXTCF, RESET),
+ ASIC3_EXTCF_OWM_RESET, 1);
+ msleep(1);
+ asic3_set_register(asic, ASIC3_OFFSET(EXTCF, RESET),
+ ASIC3_EXTCF_OWM_RESET, 0);
+ msleep(1);
+ asic3_set_register(asic, ASIC3_OFFSET(EXTCF, SELECT),
+ ASIC3_EXTCF_OWM_EN, 1);
+ msleep(1);
+
+ return 0;
+}
+
+static int ds1wm_disable(struct platform_device *pdev)
+{
+ struct asic3 *asic = dev_get_drvdata(pdev->dev.parent);
+
+ asic3_set_register(asic, ASIC3_OFFSET(EXTCF, SELECT),
+ ASIC3_EXTCF_OWM_EN, 0);
+
+ asic3_clk_disable(asic, &asic->clocks[ASIC3_CLOCK_OWM]);
+ asic3_clk_disable(asic, &asic->clocks[ASIC3_CLOCK_EX0]);
+ asic3_clk_disable(asic, &asic->clocks[ASIC3_CLOCK_EX1]);
+
+ return 0;
+}
+
+static struct mfd_cell asic3_cell_ds1wm = {
+ .name = "ds1wm",
+ .enable = ds1wm_enable,
+ .disable = ds1wm_disable,
+ .driver_data = &ds1wm_pdata,
+ .num_resources = ARRAY_SIZE(ds1wm_resources),
+ .resources = ds1wm_resources,
+};
+
+static struct tmio_mmc_data asic3_mmc_data = {
+ .hclk = 24576000,
+};
+
+static struct resource asic3_mmc_resources[] = {
+ {
+ .start = ASIC3_SD_CTRL_BASE,
+ .end = ASIC3_SD_CTRL_BASE + 0x3ff,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = ASIC3_SD_CONFIG_BASE,
+ .end = ASIC3_SD_CONFIG_BASE + 0x1ff,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static int asic3_mmc_enable(struct platform_device *pdev)
+{
+ struct asic3 *asic = dev_get_drvdata(pdev->dev.parent);
+
+ /* Not sure if it must be done bit by bit, but leaving as-is */
+ asic3_set_register(asic, ASIC3_OFFSET(SDHWCTRL, SDCONF),
+ ASIC3_SDHWCTRL_LEVCD, 1);
+ asic3_set_register(asic, ASIC3_OFFSET(SDHWCTRL, SDCONF),
+ ASIC3_SDHWCTRL_LEVWP, 1);
+ asic3_set_register(asic, ASIC3_OFFSET(SDHWCTRL, SDCONF),
+ ASIC3_SDHWCTRL_SUSPEND, 0);
+ asic3_set_register(asic, ASIC3_OFFSET(SDHWCTRL, SDCONF),
+ ASIC3_SDHWCTRL_PCLR, 0);
+
+ asic3_clk_enable(asic, &asic->clocks[ASIC3_CLOCK_EX0]);
+ /* CLK32 used for card detection and for interruption detection
+ * when HCLK is stopped.
+ */
+ asic3_clk_enable(asic, &asic->clocks[ASIC3_CLOCK_EX1]);
+ msleep(1);
+
+ /* HCLK 24.576 MHz, BCLK 12.288 MHz: */
+ asic3_write_register(asic, ASIC3_OFFSET(CLOCK, SEL),
+ CLOCK_SEL_CX | CLOCK_SEL_SD_HCLK_SEL);
+
+ asic3_clk_enable(asic, &asic->clocks[ASIC3_CLOCK_SD_HOST]);
+ asic3_clk_enable(asic, &asic->clocks[ASIC3_CLOCK_SD_BUS]);
+ msleep(1);
+
+ asic3_set_register(asic, ASIC3_OFFSET(EXTCF, SELECT),
+ ASIC3_EXTCF_SD_MEM_ENABLE, 1);
+
+ /* Enable SD card slot 3.3V power supply */
+ asic3_set_register(asic, ASIC3_OFFSET(SDHWCTRL, SDCONF),
+ ASIC3_SDHWCTRL_SDPWR, 1);
+
+ return 0;
+}
+
+static int asic3_mmc_disable(struct platform_device *pdev)
+{
+ struct asic3 *asic = dev_get_drvdata(pdev->dev.parent);
+
+ /* Put in suspend mode */
+ asic3_set_register(asic, ASIC3_OFFSET(SDHWCTRL, SDCONF),
+ ASIC3_SDHWCTRL_SUSPEND, 1);
+
+ /* Disable clocks */
+ asic3_clk_disable(asic, &asic->clocks[ASIC3_CLOCK_SD_HOST]);
+ asic3_clk_disable(asic, &asic->clocks[ASIC3_CLOCK_SD_BUS]);
+ asic3_clk_disable(asic, &asic->clocks[ASIC3_CLOCK_EX0]);
+ asic3_clk_disable(asic, &asic->clocks[ASIC3_CLOCK_EX1]);
+ return 0;
+}
+
+static struct mfd_cell asic3_cell_mmc = {
+ .name = "tmio-mmc",
+ .enable = asic3_mmc_enable,
+ .disable = asic3_mmc_disable,
+ .driver_data = &asic3_mmc_data,
+ .num_resources = ARRAY_SIZE(asic3_mmc_resources),
+ .resources = asic3_mmc_resources,
+};
+
+static int __init asic3_mfd_probe(struct platform_device *pdev,
+ struct resource *mem)
+{
+ struct asic3 *asic = platform_get_drvdata(pdev);
+ struct resource *mem_sdio;
+ int irq, ret;
+
+ mem_sdio = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!mem_sdio)
+ dev_dbg(asic->dev, "no SDIO MEM resource\n");
+
+ irq = platform_get_irq(pdev, 1);
+ if (irq < 0)
+ dev_dbg(asic->dev, "no SDIO IRQ resource\n");
+
+ /* DS1WM */
+ asic3_set_register(asic, ASIC3_OFFSET(EXTCF, SELECT),
+ ASIC3_EXTCF_OWM_SMB, 0);
+
+ ds1wm_resources[0].start >>= asic->bus_shift;
+ ds1wm_resources[0].end >>= asic->bus_shift;
+
+ asic3_cell_ds1wm.platform_data = &asic3_cell_ds1wm;
+ asic3_cell_ds1wm.data_size = sizeof(asic3_cell_ds1wm);
+
+ /* MMC */
+ asic3_mmc_resources[0].start >>= asic->bus_shift;
+ asic3_mmc_resources[0].end >>= asic->bus_shift;
+ asic3_mmc_resources[1].start >>= asic->bus_shift;
+ asic3_mmc_resources[1].end >>= asic->bus_shift;
+
+ asic3_cell_mmc.platform_data = &asic3_cell_mmc;
+ asic3_cell_mmc.data_size = sizeof(asic3_cell_mmc);
+
+ ret = mfd_add_devices(&pdev->dev, pdev->id,
+ &asic3_cell_ds1wm, 1, mem, asic->irq_base);
+ if (ret < 0)
+ goto out;
+
+ if (mem_sdio && (irq >= 0))
+ ret = mfd_add_devices(&pdev->dev, pdev->id,
+ &asic3_cell_mmc, 1, mem_sdio, irq);
+
+ out:
+ return ret;
+}
+
+static void asic3_mfd_remove(struct platform_device *pdev)
+{
+ mfd_remove_devices(&pdev->dev);
+}
/* Core */
static int __init asic3_probe(struct platform_device *pdev)
@@ -533,7 +830,6 @@ static int __init asic3_probe(struct platform_device *pdev)
struct asic3 *asic;
struct resource *mem;
unsigned long clksel;
- int map_size;
int ret = 0;
asic = kzalloc(sizeof(struct asic3), GFP_KERNEL);
@@ -553,8 +849,7 @@ static int __init asic3_probe(struct platform_device *pdev)
goto out_free;
}
- map_size = mem->end - mem->start + 1;
- asic->mapping = ioremap(mem->start, map_size);
+ asic->mapping = ioremap(mem->start, resource_size(mem));
if (!asic->mapping) {
ret = -ENOMEM;
dev_err(asic->dev, "Couldn't ioremap\n");
@@ -564,7 +859,7 @@ static int __init asic3_probe(struct platform_device *pdev)
asic->irq_base = pdata->irq_base;
/* calculate bus shift from mem resource */
- asic->bus_shift = 2 - (map_size >> 12);
+ asic->bus_shift = 2 - (resource_size(mem) >> 12);
clksel = 0;
asic3_write_register(asic, ASIC3_OFFSET(CLOCK, SEL), clksel);
@@ -590,6 +885,13 @@ static int __init asic3_probe(struct platform_device *pdev)
goto out_irq;
}
+ /* Making a per-device copy is only needed for the
+ * theoretical case of multiple ASIC3s on one board:
+ */
+ memcpy(asic->clocks, asic3_clk_init, sizeof(asic3_clk_init));
+
+ asic3_mfd_probe(pdev, mem);
+
dev_info(asic->dev, "ASIC3 Core driver\n");
return 0;
@@ -611,6 +913,8 @@ static int asic3_remove(struct platform_device *pdev)
int ret;
struct asic3 *asic = platform_get_drvdata(pdev);
+ asic3_mfd_remove(pdev);
+
ret = asic3_gpio_remove(pdev);
if (ret < 0)
return ret;
diff --git a/drivers/mfd/da903x.c b/drivers/mfd/da903x.c
index 7283d88656af..e5ffe5617393 100644
--- a/drivers/mfd/da903x.c
+++ b/drivers/mfd/da903x.c
@@ -561,7 +561,7 @@ static int __init da903x_init(void)
{
return i2c_add_driver(&da903x_driver);
}
-module_init(da903x_init);
+subsys_initcall(da903x_init);
static void __exit da903x_exit(void)
{
diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c
new file mode 100644
index 000000000000..671a7efe86a8
--- /dev/null
+++ b/drivers/mfd/ezx-pcap.c
@@ -0,0 +1,505 @@
+/*
+ * Driver for Motorola PCAP2 as present in EZX phones
+ *
+ * Copyright (C) 2006 Harald Welte <laforge@openezx.org>
+ * Copyright (C) 2009 Daniel Ribeiro <drwyrm@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/mfd/ezx-pcap.h>
+#include <linux/spi/spi.h>
+
+#define PCAP_ADC_MAXQ 8
+struct pcap_adc_request {
+ u8 bank;
+ u8 ch[2];
+ u32 flags;
+ void (*callback)(void *, u16[]);
+ void *data;
+};
+
+struct pcap_adc_sync_request {
+ u16 res[2];
+ struct completion completion;
+};
+
+struct pcap_chip {
+ struct spi_device *spi;
+
+ /* IO */
+ u32 buf;
+ struct mutex io_mutex;
+
+ /* IRQ */
+ unsigned int irq_base;
+ u32 msr;
+ struct work_struct isr_work;
+ struct work_struct msr_work;
+ struct workqueue_struct *workqueue;
+
+ /* ADC */
+ struct pcap_adc_request *adc_queue[PCAP_ADC_MAXQ];
+ u8 adc_head;
+ u8 adc_tail;
+ struct mutex adc_mutex;
+};
+
+/* IO */
+static int ezx_pcap_putget(struct pcap_chip *pcap, u32 *data)
+{
+ struct spi_transfer t;
+ struct spi_message m;
+ int status;
+
+ memset(&t, 0, sizeof t);
+ spi_message_init(&m);
+ t.len = sizeof(u32);
+ spi_message_add_tail(&t, &m);
+
+ pcap->buf = *data;
+ t.tx_buf = (u8 *) &pcap->buf;
+ t.rx_buf = (u8 *) &pcap->buf;
+ status = spi_sync(pcap->spi, &m);
+
+ if (status == 0)
+ *data = pcap->buf;
+
+ return status;
+}
+
+int ezx_pcap_write(struct pcap_chip *pcap, u8 reg_num, u32 value)
+{
+ int ret;
+
+ mutex_lock(&pcap->io_mutex);
+ value &= PCAP_REGISTER_VALUE_MASK;
+ value |= PCAP_REGISTER_WRITE_OP_BIT
+ | (reg_num << PCAP_REGISTER_ADDRESS_SHIFT);
+ ret = ezx_pcap_putget(pcap, &value);
+ mutex_unlock(&pcap->io_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ezx_pcap_write);
+
+int ezx_pcap_read(struct pcap_chip *pcap, u8 reg_num, u32 *value)
+{
+ int ret;
+
+ mutex_lock(&pcap->io_mutex);
+ *value = PCAP_REGISTER_READ_OP_BIT
+ | (reg_num << PCAP_REGISTER_ADDRESS_SHIFT);
+
+ ret = ezx_pcap_putget(pcap, value);
+ mutex_unlock(&pcap->io_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ezx_pcap_read);
+
+/* IRQ */
+static inline unsigned int irq2pcap(struct pcap_chip *pcap, int irq)
+{
+ return 1 << (irq - pcap->irq_base);
+}
+
+int pcap_to_irq(struct pcap_chip *pcap, int irq)
+{
+ return pcap->irq_base + irq;
+}
+EXPORT_SYMBOL_GPL(pcap_to_irq);
+
+static void pcap_mask_irq(unsigned int irq)
+{
+ struct pcap_chip *pcap = get_irq_chip_data(irq);
+
+ pcap->msr |= irq2pcap(pcap, irq);
+ queue_work(pcap->workqueue, &pcap->msr_work);
+}
+
+static void pcap_unmask_irq(unsigned int irq)
+{
+ struct pcap_chip *pcap = get_irq_chip_data(irq);
+
+ pcap->msr &= ~irq2pcap(pcap, irq);
+ queue_work(pcap->workqueue, &pcap->msr_work);
+}
+
+static struct irq_chip pcap_irq_chip = {
+ .name = "pcap",
+ .mask = pcap_mask_irq,
+ .unmask = pcap_unmask_irq,
+};
+
+static void pcap_msr_work(struct work_struct *work)
+{
+ struct pcap_chip *pcap = container_of(work, struct pcap_chip, msr_work);
+
+ ezx_pcap_write(pcap, PCAP_REG_MSR, pcap->msr);
+}
+
+static void pcap_isr_work(struct work_struct *work)
+{
+ struct pcap_chip *pcap = container_of(work, struct pcap_chip, isr_work);
+ struct pcap_platform_data *pdata = pcap->spi->dev.platform_data;
+ u32 msr, isr, int_sel, service;
+ int irq;
+
+ ezx_pcap_read(pcap, PCAP_REG_MSR, &msr);
+ ezx_pcap_read(pcap, PCAP_REG_ISR, &isr);
+
+ /* We cant service/ack irqs that are assigned to port 2 */
+ if (!(pdata->config & PCAP_SECOND_PORT)) {
+ ezx_pcap_read(pcap, PCAP_REG_INT_SEL, &int_sel);
+ isr &= ~int_sel;
+ }
+ ezx_pcap_write(pcap, PCAP_REG_ISR, isr);
+
+ local_irq_disable();
+ service = isr & ~msr;
+
+ for (irq = pcap->irq_base; service; service >>= 1, irq++) {
+ if (service & 1) {
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ if (WARN(!desc, KERN_WARNING
+ "Invalid PCAP IRQ %d\n", irq))
+ break;
+
+ if (desc->status & IRQ_DISABLED)
+ note_interrupt(irq, desc, IRQ_NONE);
+ else
+ desc->handle_irq(irq, desc);
+ }
+ }
+ local_irq_enable();
+}
+
+static void pcap_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+ struct pcap_chip *pcap = get_irq_data(irq);
+
+ desc->chip->ack(irq);
+ queue_work(pcap->workqueue, &pcap->isr_work);
+ return;
+}
+
+/* ADC */
+static void pcap_disable_adc(struct pcap_chip *pcap)
+{
+ u32 tmp;
+
+ ezx_pcap_read(pcap, PCAP_REG_ADC, &tmp);
+ tmp &= ~(PCAP_ADC_ADEN|PCAP_ADC_BATT_I_ADC|PCAP_ADC_BATT_I_POLARITY);
+ ezx_pcap_write(pcap, PCAP_REG_ADC, tmp);
+}
+
+static void pcap_adc_trigger(struct pcap_chip *pcap)
+{
+ u32 tmp;
+ u8 head;
+
+ mutex_lock(&pcap->adc_mutex);
+ head = pcap->adc_head;
+ if (!pcap->adc_queue[head]) {
+ /* queue is empty, save power */
+ pcap_disable_adc(pcap);
+ mutex_unlock(&pcap->adc_mutex);
+ return;
+ }
+ mutex_unlock(&pcap->adc_mutex);
+
+ /* start conversion on requested bank */
+ tmp = pcap->adc_queue[head]->flags | PCAP_ADC_ADEN;
+
+ if (pcap->adc_queue[head]->bank == PCAP_ADC_BANK_1)
+ tmp |= PCAP_ADC_AD_SEL1;
+
+ ezx_pcap_write(pcap, PCAP_REG_ADC, tmp);
+ ezx_pcap_write(pcap, PCAP_REG_ADR, PCAP_ADR_ASC);
+}
+
+static irqreturn_t pcap_adc_irq(int irq, void *_pcap)
+{
+ struct pcap_chip *pcap = _pcap;
+ struct pcap_adc_request *req;
+ u16 res[2];
+ u32 tmp;
+
+ mutex_lock(&pcap->adc_mutex);
+ req = pcap->adc_queue[pcap->adc_head];
+
+ if (WARN(!req, KERN_WARNING "adc irq without pending request\n"))
+ return IRQ_HANDLED;
+
+ /* read requested channels results */
+ ezx_pcap_read(pcap, PCAP_REG_ADC, &tmp);
+ tmp &= ~(PCAP_ADC_ADA1_MASK | PCAP_ADC_ADA2_MASK);
+ tmp |= (req->ch[0] << PCAP_ADC_ADA1_SHIFT);
+ tmp |= (req->ch[1] << PCAP_ADC_ADA2_SHIFT);
+ ezx_pcap_write(pcap, PCAP_REG_ADC, tmp);
+ ezx_pcap_read(pcap, PCAP_REG_ADR, &tmp);
+ res[0] = (tmp & PCAP_ADR_ADD1_MASK) >> PCAP_ADR_ADD1_SHIFT;
+ res[1] = (tmp & PCAP_ADR_ADD2_MASK) >> PCAP_ADR_ADD2_SHIFT;
+
+ pcap->adc_queue[pcap->adc_head] = NULL;
+ pcap->adc_head = (pcap->adc_head + 1) & (PCAP_ADC_MAXQ - 1);
+ mutex_unlock(&pcap->adc_mutex);
+
+ /* pass the results and release memory */
+ req->callback(req->data, res);
+ kfree(req);
+
+ /* trigger next conversion (if any) on queue */
+ pcap_adc_trigger(pcap);
+
+ return IRQ_HANDLED;
+}
+
+int pcap_adc_async(struct pcap_chip *pcap, u8 bank, u32 flags, u8 ch[],
+ void *callback, void *data)
+{
+ struct pcap_adc_request *req;
+
+ /* This will be freed after we have a result */
+ req = kmalloc(sizeof(struct pcap_adc_request), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ req->bank = bank;
+ req->flags = flags;
+ req->ch[0] = ch[0];
+ req->ch[1] = ch[1];
+ req->callback = callback;
+ req->data = data;
+
+ mutex_lock(&pcap->adc_mutex);
+ if (pcap->adc_queue[pcap->adc_tail]) {
+ mutex_unlock(&pcap->adc_mutex);
+ kfree(req);
+ return -EBUSY;
+ }
+ pcap->adc_queue[pcap->adc_tail] = req;
+ pcap->adc_tail = (pcap->adc_tail + 1) & (PCAP_ADC_MAXQ - 1);
+ mutex_unlock(&pcap->adc_mutex);
+
+ /* start conversion */
+ pcap_adc_trigger(pcap);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pcap_adc_async);
+
+static void pcap_adc_sync_cb(void *param, u16 res[])
+{
+ struct pcap_adc_sync_request *req = param;
+
+ req->res[0] = res[0];
+ req->res[1] = res[1];
+ complete(&req->completion);
+}
+
+int pcap_adc_sync(struct pcap_chip *pcap, u8 bank, u32 flags, u8 ch[],
+ u16 res[])
+{
+ struct pcap_adc_sync_request sync_data;
+ int ret;
+
+ init_completion(&sync_data.completion);
+ ret = pcap_adc_async(pcap, bank, flags, ch, pcap_adc_sync_cb,
+ &sync_data);
+ if (ret)
+ return ret;
+ wait_for_completion(&sync_data.completion);
+ res[0] = sync_data.res[0];
+ res[1] = sync_data.res[1];
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pcap_adc_sync);
+
+/* subdevs */
+static int pcap_remove_subdev(struct device *dev, void *unused)
+{
+ platform_device_unregister(to_platform_device(dev));
+ return 0;
+}
+
+static int __devinit pcap_add_subdev(struct pcap_chip *pcap,
+ struct pcap_subdev *subdev)
+{
+ struct platform_device *pdev;
+
+ pdev = platform_device_alloc(subdev->name, subdev->id);
+ pdev->dev.parent = &pcap->spi->dev;
+ pdev->dev.platform_data = subdev->platform_data;
+ platform_set_drvdata(pdev, pcap);
+
+ return platform_device_add(pdev);
+}
+
+static int __devexit ezx_pcap_remove(struct spi_device *spi)
+{
+ struct pcap_chip *pcap = dev_get_drvdata(&spi->dev);
+ struct pcap_platform_data *pdata = spi->dev.platform_data;
+ int i, adc_irq;
+
+ /* remove all registered subdevs */
+ device_for_each_child(&spi->dev, NULL, pcap_remove_subdev);
+
+ /* cleanup ADC */
+ adc_irq = pcap_to_irq(pcap, (pdata->config & PCAP_SECOND_PORT) ?
+ PCAP_IRQ_ADCDONE2 : PCAP_IRQ_ADCDONE);
+ free_irq(adc_irq, pcap);
+ mutex_lock(&pcap->adc_mutex);
+ for (i = 0; i < PCAP_ADC_MAXQ; i++)
+ kfree(pcap->adc_queue[i]);
+ mutex_unlock(&pcap->adc_mutex);
+
+ /* cleanup irqchip */
+ for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++)
+ set_irq_chip_and_handler(i, NULL, NULL);
+
+ destroy_workqueue(pcap->workqueue);
+
+ kfree(pcap);
+
+ return 0;
+}
+
+static int __devinit ezx_pcap_probe(struct spi_device *spi)
+{
+ struct pcap_platform_data *pdata = spi->dev.platform_data;
+ struct pcap_chip *pcap;
+ int i, adc_irq;
+ int ret = -ENODEV;
+
+ /* platform data is required */
+ if (!pdata)
+ goto ret;
+
+ pcap = kzalloc(sizeof(*pcap), GFP_KERNEL);
+ if (!pcap) {
+ ret = -ENOMEM;
+ goto ret;
+ }
+
+ mutex_init(&pcap->io_mutex);
+ mutex_init(&pcap->adc_mutex);
+ INIT_WORK(&pcap->isr_work, pcap_isr_work);
+ INIT_WORK(&pcap->msr_work, pcap_msr_work);
+ dev_set_drvdata(&spi->dev, pcap);
+
+ /* setup spi */
+ spi->bits_per_word = 32;
+ spi->mode = SPI_MODE_0 | (pdata->config & PCAP_CS_AH ? SPI_CS_HIGH : 0);
+ ret = spi_setup(spi);
+ if (ret)
+ goto free_pcap;
+
+ pcap->spi = spi;
+
+ /* setup irq */
+ pcap->irq_base = pdata->irq_base;
+ pcap->workqueue = create_singlethread_workqueue("pcapd");
+ if (!pcap->workqueue) {
+ dev_err(&spi->dev, "cant create pcap thread\n");
+ goto free_pcap;
+ }
+
+ /* redirect interrupts to AP, except adcdone2 */
+ if (!(pdata->config & PCAP_SECOND_PORT))
+ ezx_pcap_write(pcap, PCAP_REG_INT_SEL,
+ (1 << PCAP_IRQ_ADCDONE2));
+
+ /* setup irq chip */
+ for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++) {
+ set_irq_chip_and_handler(i, &pcap_irq_chip, handle_simple_irq);
+ set_irq_chip_data(i, pcap);
+#ifdef CONFIG_ARM
+ set_irq_flags(i, IRQF_VALID);
+#else
+ set_irq_noprobe(i);
+#endif
+ }
+
+ /* mask/ack all PCAP interrupts */
+ ezx_pcap_write(pcap, PCAP_REG_MSR, PCAP_MASK_ALL_INTERRUPT);
+ ezx_pcap_write(pcap, PCAP_REG_ISR, PCAP_CLEAR_INTERRUPT_REGISTER);
+ pcap->msr = PCAP_MASK_ALL_INTERRUPT;
+
+ set_irq_type(spi->irq, IRQ_TYPE_EDGE_RISING);
+ set_irq_data(spi->irq, pcap);
+ set_irq_chained_handler(spi->irq, pcap_irq_handler);
+ set_irq_wake(spi->irq, 1);
+
+ /* ADC */
+ adc_irq = pcap_to_irq(pcap, (pdata->config & PCAP_SECOND_PORT) ?
+ PCAP_IRQ_ADCDONE2 : PCAP_IRQ_ADCDONE);
+
+ ret = request_irq(adc_irq, pcap_adc_irq, 0, "ADC", pcap);
+ if (ret)
+ goto free_irqchip;
+
+ /* setup subdevs */
+ for (i = 0; i < pdata->num_subdevs; i++) {
+ ret = pcap_add_subdev(pcap, &pdata->subdevs[i]);
+ if (ret)
+ goto remove_subdevs;
+ }
+
+ /* board specific quirks */
+ if (pdata->init)
+ pdata->init(pcap);
+
+ return 0;
+
+remove_subdevs:
+ device_for_each_child(&spi->dev, NULL, pcap_remove_subdev);
+/* free_adc: */
+ free_irq(adc_irq, pcap);
+free_irqchip:
+ for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++)
+ set_irq_chip_and_handler(i, NULL, NULL);
+/* destroy_workqueue: */
+ destroy_workqueue(pcap->workqueue);
+free_pcap:
+ kfree(pcap);
+ret:
+ return ret;
+}
+
+static struct spi_driver ezxpcap_driver = {
+ .probe = ezx_pcap_probe,
+ .remove = __devexit_p(ezx_pcap_remove),
+ .driver = {
+ .name = "ezx-pcap",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init ezx_pcap_init(void)
+{
+ return spi_register_driver(&ezxpcap_driver);
+}
+
+static void __exit ezx_pcap_exit(void)
+{
+ spi_unregister_driver(&ezxpcap_driver);
+}
+
+module_init(ezx_pcap_init);
+module_exit(ezx_pcap_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Daniel Ribeiro / Harald Welte");
+MODULE_DESCRIPTION("Motorola PCAP2 ASIC Driver");
diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c
index 082c197ab9b8..8d3c38bf9714 100644
--- a/drivers/mfd/pcf50633-core.c
+++ b/drivers/mfd/pcf50633-core.c
@@ -705,5 +705,5 @@ MODULE_DESCRIPTION("I2C chip driver for NXP PCF50633 PMU");
MODULE_AUTHOR("Harald Welte <laforge@openmoko.org>");
MODULE_LICENSE("GPL");
-module_init(pcf50633_init);
+subsys_initcall(pcf50633_init);
module_exit(pcf50633_exit);
diff --git a/drivers/mfd/pcf50633-gpio.c b/drivers/mfd/pcf50633-gpio.c
index 2fa2eca5c9cc..9ab19a8f669d 100644
--- a/drivers/mfd/pcf50633-gpio.c
+++ b/drivers/mfd/pcf50633-gpio.c
@@ -15,6 +15,7 @@
*/
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/mfd/pcf50633/core.h>
#include <linux/mfd/pcf50633/gpio.h>
@@ -116,3 +117,5 @@ int pcf50633_gpio_power_supply_set(struct pcf50633 *pcf,
return pcf50633_reg_set_bit_mask(pcf, reg, mask, val);
}
EXPORT_SYMBOL_GPL(pcf50633_gpio_power_supply_set);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c
index 875f7a875734..0a255c1f1ce7 100644
--- a/drivers/mfd/t7l66xb.c
+++ b/drivers/mfd/t7l66xb.c
@@ -108,7 +108,7 @@ static int t7l66xb_mmc_disable(struct platform_device *mmc)
/*--------------------------------------------------------------------------*/
-static const struct tmio_mmc_data t7166xb_mmc_data = {
+static struct tmio_mmc_data t7166xb_mmc_data = {
.hclk = 24000000,
};
diff --git a/drivers/mfd/tc6387xb.c b/drivers/mfd/tc6387xb.c
index c3993ac20542..3280ab33f88a 100644
--- a/drivers/mfd/tc6387xb.c
+++ b/drivers/mfd/tc6387xb.c
@@ -75,7 +75,7 @@ static int tc6387xb_mmc_disable(struct platform_device *mmc)
/*--------------------------------------------------------------------------*/
-const static struct tmio_mmc_data tc6387xb_mmc_data = {
+static struct tmio_mmc_data tc6387xb_mmc_data = {
.hclk = 24000000,
};
diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c
index 9d2abb5d6e2c..1429a7341a9a 100644
--- a/drivers/mfd/tc6393xb.c
+++ b/drivers/mfd/tc6393xb.c
@@ -136,7 +136,7 @@ static int tc6393xb_nand_enable(struct platform_device *nand)
return 0;
}
-const static struct tmio_mmc_data tc6393xb_mmc_data = {
+static struct tmio_mmc_data tc6393xb_mmc_data = {
.hclk = 24000000,
};
diff --git a/drivers/mfd/twl4030-core.c b/drivers/mfd/twl4030-core.c
index ec90e953adce..cd1008c19cd7 100644
--- a/drivers/mfd/twl4030-core.c
+++ b/drivers/mfd/twl4030-core.c
@@ -647,7 +647,7 @@ static inline int __init unprotect_pm_master(void)
return e;
}
-static void __init clocks_init(struct device *dev)
+static void clocks_init(struct device *dev)
{
int e = 0;
struct clk *osc;
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index aca2670afd78..bae61b22501c 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -255,7 +255,7 @@ static int twl4030_irq_thread(void *data)
* thread. All we do here is acknowledge and mask the interrupt and wakeup
* the kernel thread.
*/
-static void handle_twl4030_pih(unsigned int irq, irq_desc_t *desc)
+static void handle_twl4030_pih(unsigned int irq, struct irq_desc *desc)
{
/* Acknowledge, clear *AND* mask the interrupt... */
desc->chip->ack(irq);
diff --git a/drivers/mfd/wm8350-regmap.c b/drivers/mfd/wm8350-regmap.c
index 9a4cc954cb7c..7ccc1eab98ab 100644
--- a/drivers/mfd/wm8350-regmap.c
+++ b/drivers/mfd/wm8350-regmap.c
@@ -3186,7 +3186,7 @@ const struct wm8350_reg_access wm8350_reg_io_map[] = {
/* read write volatile */
{ 0xFFFF, 0xFFFF, 0xFFFF }, /* R0 - Reset/ID */
{ 0x7CFF, 0x0C00, 0x7FFF }, /* R1 - ID */
- { 0x0000, 0x0000, 0x0000 }, /* R2 */
+ { 0x007F, 0x0000, 0x0000 }, /* R2 - ROM Mask ID */
{ 0xBE3B, 0xBE3B, 0x8000 }, /* R3 - System Control 1 */
{ 0xFEF7, 0xFEF7, 0xF800 }, /* R4 - System Control 2 */
{ 0x80FF, 0x80FF, 0x8000 }, /* R5 - System Hibernate */
@@ -3411,7 +3411,7 @@ const struct wm8350_reg_access wm8350_reg_io_map[] = {
{ 0x0000, 0x0000, 0x0000 }, /* R224 */
{ 0x8F3F, 0x0000, 0xFFFF }, /* R225 - DCDC/LDO status */
{ 0x0000, 0x0000, 0xFFFF }, /* R226 - Charger status */
- { 0x0000, 0x0000, 0xFFFF }, /* R227 */
+ { 0x34FE, 0x0000, 0xFFFF }, /* R227 */
{ 0x0000, 0x0000, 0x0000 }, /* R228 */
{ 0x0000, 0x0000, 0x0000 }, /* R229 */
{ 0xFFFF, 0x1FFF, 0xFFFF }, /* R230 - GPIO Pin Status */
diff --git a/drivers/mfd/wm8400-core.c b/drivers/mfd/wm8400-core.c
index 7c21bf791569..ecfc8bbe89b9 100644
--- a/drivers/mfd/wm8400-core.c
+++ b/drivers/mfd/wm8400-core.c
@@ -460,7 +460,7 @@ static int __init wm8400_module_init(void)
return ret;
}
-module_init(wm8400_module_init);
+subsys_initcall(wm8400_module_init);
static void __exit wm8400_module_exit(void)
{
diff --git a/drivers/misc/sgi-gru/Makefile b/drivers/misc/sgi-gru/Makefile
index bcd8136d2f98..7c4c306dfa8a 100644
--- a/drivers/misc/sgi-gru/Makefile
+++ b/drivers/misc/sgi-gru/Makefile
@@ -3,5 +3,5 @@ ifdef CONFIG_SGI_GRU_DEBUG
endif
obj-$(CONFIG_SGI_GRU) := gru.o
-gru-y := grufile.o grumain.o grufault.o grutlbpurge.o gruprocfs.o grukservices.o gruhandles.o
+gru-y := grufile.o grumain.o grufault.o grutlbpurge.o gruprocfs.o grukservices.o gruhandles.o grukdump.o
diff --git a/drivers/misc/sgi-gru/gru_instructions.h b/drivers/misc/sgi-gru/gru_instructions.h
index 3fde33c1e8f3..3c9c06618e6a 100644
--- a/drivers/misc/sgi-gru/gru_instructions.h
+++ b/drivers/misc/sgi-gru/gru_instructions.h
@@ -81,6 +81,8 @@ struct control_block_extended_exc_detail {
int exopc;
long exceptdet0;
int exceptdet1;
+ int cbrstate;
+ int cbrexecstatus;
};
/*
@@ -107,7 +109,8 @@ struct gru_instruction_bits {
unsigned char reserved2: 2;
unsigned char istatus: 2;
unsigned char isubstatus:4;
- unsigned char reserved3: 2;
+ unsigned char reserved3: 1;
+ unsigned char tlb_fault_color: 1;
/* DW 1 */
unsigned long idef4; /* 42 bits: TRi1, BufSize */
/* DW 2-6 */
@@ -250,17 +253,37 @@ struct gru_instruction {
#define CBE_CAUSE_HA_RESPONSE_FATAL (1 << 13)
#define CBE_CAUSE_HA_RESPONSE_NON_FATAL (1 << 14)
#define CBE_CAUSE_ADDRESS_SPACE_DECODE_ERROR (1 << 15)
-#define CBE_CAUSE_RESPONSE_DATA_ERROR (1 << 16)
-#define CBE_CAUSE_PROTOCOL_STATE_DATA_ERROR (1 << 17)
+#define CBE_CAUSE_PROTOCOL_STATE_DATA_ERROR (1 << 16)
+#define CBE_CAUSE_RA_RESPONSE_DATA_ERROR (1 << 17)
+#define CBE_CAUSE_HA_RESPONSE_DATA_ERROR (1 << 18)
+
+/* CBE cbrexecstatus bits */
+#define CBR_EXS_ABORT_OCC_BIT 0
+#define CBR_EXS_INT_OCC_BIT 1
+#define CBR_EXS_PENDING_BIT 2
+#define CBR_EXS_QUEUED_BIT 3
+#define CBR_EXS_TLB_INVAL_BIT 4
+#define CBR_EXS_EXCEPTION_BIT 5
+
+#define CBR_EXS_ABORT_OCC (1 << CBR_EXS_ABORT_OCC_BIT)
+#define CBR_EXS_INT_OCC (1 << CBR_EXS_INT_OCC_BIT)
+#define CBR_EXS_PENDING (1 << CBR_EXS_PENDING_BIT)
+#define CBR_EXS_QUEUED (1 << CBR_EXS_QUEUED_BIT)
+#define CBR_TLB_INVAL (1 << CBR_EXS_TLB_INVAL_BIT)
+#define CBR_EXS_EXCEPTION (1 << CBR_EXS_EXCEPTION_BIT)
/*
* Exceptions are retried for the following cases. If any OTHER bits are set
* in ecause, the exception is not retryable.
*/
-#define EXCEPTION_RETRY_BITS (CBE_CAUSE_RESPONSE_DATA_ERROR | \
- CBE_CAUSE_RA_REQUEST_TIMEOUT | \
+#define EXCEPTION_RETRY_BITS (CBE_CAUSE_EXECUTION_HW_ERROR | \
CBE_CAUSE_TLBHW_ERROR | \
- CBE_CAUSE_HA_REQUEST_TIMEOUT)
+ CBE_CAUSE_RA_REQUEST_TIMEOUT | \
+ CBE_CAUSE_RA_RESPONSE_NON_FATAL | \
+ CBE_CAUSE_HA_RESPONSE_NON_FATAL | \
+ CBE_CAUSE_RA_RESPONSE_DATA_ERROR | \
+ CBE_CAUSE_HA_RESPONSE_DATA_ERROR \
+ )
/* Message queue head structure */
union gru_mesqhead {
@@ -600,9 +623,11 @@ static inline int gru_get_cb_substatus(void *cb)
return cbs->isubstatus;
}
-/* Check the status of a CB. If the CB is in UPM mode, call the
- * OS to handle the UPM status.
- * Returns the CB status field value (0 for normal completion)
+/*
+ * User interface to check an instruction status. UPM and exceptions
+ * are handled automatically. However, this function does NOT wait
+ * for an active instruction to complete.
+ *
*/
static inline int gru_check_status(void *cb)
{
@@ -610,34 +635,31 @@ static inline int gru_check_status(void *cb)
int ret;
ret = cbs->istatus;
- if (ret == CBS_CALL_OS)
+ if (ret != CBS_ACTIVE)
ret = gru_check_status_proc(cb);
return ret;
}
-/* Wait for CB to complete.
- * Returns the CB status field value (0 for normal completion)
+/*
+ * User interface (via inline function) to wait for an instruction
+ * to complete. Completion status (IDLE or EXCEPTION is returned
+ * to the user. Exception due to hardware errors are automatically
+ * retried before returning an exception.
+ *
*/
static inline int gru_wait(void *cb)
{
- struct gru_control_block_status *cbs = (void *)cb;
- int ret = cbs->istatus;
-
- if (ret != CBS_IDLE)
- ret = gru_wait_proc(cb);
- return ret;
+ return gru_wait_proc(cb);
}
-/* Wait for CB to complete. Aborts program if error. (Note: error does NOT
+/*
+ * Wait for CB to complete. Aborts program if error. (Note: error does NOT
* mean TLB mis - only fatal errors such as memory parity error or user
* bugs will cause termination.
*/
static inline void gru_wait_abort(void *cb)
{
- struct gru_control_block_status *cbs = (void *)cb;
-
- if (cbs->istatus != CBS_IDLE)
- gru_wait_abort_proc(cb);
+ gru_wait_abort_proc(cb);
}
diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c
index ab118558552e..679e01778286 100644
--- a/drivers/misc/sgi-gru/grufault.c
+++ b/drivers/misc/sgi-gru/grufault.c
@@ -166,7 +166,8 @@ static inline struct gru_state *irq_to_gru(int irq)
* the GRU, atomic operations must be used to clear bits.
*/
static void get_clear_fault_map(struct gru_state *gru,
- struct gru_tlb_fault_map *map)
+ struct gru_tlb_fault_map *imap,
+ struct gru_tlb_fault_map *dmap)
{
unsigned long i, k;
struct gru_tlb_fault_map *tfm;
@@ -177,7 +178,11 @@ static void get_clear_fault_map(struct gru_state *gru,
k = tfm->fault_bits[i];
if (k)
k = xchg(&tfm->fault_bits[i], 0UL);
- map->fault_bits[i] = k;
+ imap->fault_bits[i] = k;
+ k = tfm->done_bits[i];
+ if (k)
+ k = xchg(&tfm->done_bits[i], 0UL);
+ dmap->fault_bits[i] = k;
}
/*
@@ -334,6 +339,12 @@ static int gru_try_dropin(struct gru_thread_state *gts,
* Might be a hardware race OR a stupid user. Ignore FMM because FMM
* is a transient state.
*/
+ if (tfh->status != TFHSTATUS_EXCEPTION) {
+ gru_flush_cache(tfh);
+ if (tfh->status != TFHSTATUS_EXCEPTION)
+ goto failnoexception;
+ STAT(tfh_stale_on_fault);
+ }
if (tfh->state == TFHSTATE_IDLE)
goto failidle;
if (tfh->state == TFHSTATE_MISS_FMM && cb)
@@ -401,8 +412,17 @@ failfmm:
gru_dbg(grudev, "FAILED fmm tfh: 0x%p, state %d\n", tfh, tfh->state);
return 0;
+failnoexception:
+ /* TFH status did not show exception pending */
+ gru_flush_cache(tfh);
+ if (cb)
+ gru_flush_cache(cb);
+ STAT(tlb_dropin_fail_no_exception);
+ gru_dbg(grudev, "FAILED non-exception tfh: 0x%p, status %d, state %d\n", tfh, tfh->status, tfh->state);
+ return 0;
+
failidle:
- /* TFH was idle - no miss pending */
+ /* TFH state was idle - no miss pending */
gru_flush_cache(tfh);
if (cb)
gru_flush_cache(cb);
@@ -438,7 +458,7 @@ failactive:
irqreturn_t gru_intr(int irq, void *dev_id)
{
struct gru_state *gru;
- struct gru_tlb_fault_map map;
+ struct gru_tlb_fault_map imap, dmap;
struct gru_thread_state *gts;
struct gru_tlb_fault_handle *tfh = NULL;
int cbrnum, ctxnum;
@@ -451,11 +471,15 @@ irqreturn_t gru_intr(int irq, void *dev_id)
raw_smp_processor_id(), irq);
return IRQ_NONE;
}
- get_clear_fault_map(gru, &map);
- gru_dbg(grudev, "irq %d, gru %x, map 0x%lx\n", irq, gru->gs_gid,
- map.fault_bits[0]);
+ get_clear_fault_map(gru, &imap, &dmap);
+
+ for_each_cbr_in_tfm(cbrnum, dmap.fault_bits) {
+ complete(gru->gs_blade->bs_async_wq);
+ gru_dbg(grudev, "gid %d, cbr_done %d, done %d\n",
+ gru->gs_gid, cbrnum, gru->gs_blade->bs_async_wq->done);
+ }
- for_each_cbr_in_tfm(cbrnum, map.fault_bits) {
+ for_each_cbr_in_tfm(cbrnum, imap.fault_bits) {
tfh = get_tfh_by_index(gru, cbrnum);
prefetchw(tfh); /* Helps on hdw, required for emulator */
@@ -472,7 +496,9 @@ irqreturn_t gru_intr(int irq, void *dev_id)
* This is running in interrupt context. Trylock the mmap_sem.
* If it fails, retry the fault in user context.
*/
- if (down_read_trylock(&gts->ts_mm->mmap_sem)) {
+ if (!gts->ts_force_cch_reload &&
+ down_read_trylock(&gts->ts_mm->mmap_sem)) {
+ gts->ustats.fmm_tlbdropin++;
gru_try_dropin(gts, tfh, NULL);
up_read(&gts->ts_mm->mmap_sem);
} else {
@@ -491,6 +517,7 @@ static int gru_user_dropin(struct gru_thread_state *gts,
struct gru_mm_struct *gms = gts->ts_gms;
int ret;
+ gts->ustats.upm_tlbdropin++;
while (1) {
wait_event(gms->ms_wait_queue,
atomic_read(&gms->ms_range_active) == 0);
@@ -546,8 +573,8 @@ int gru_handle_user_call_os(unsigned long cb)
* CCH may contain stale data if ts_force_cch_reload is set.
*/
if (gts->ts_gru && gts->ts_force_cch_reload) {
- gru_update_cch(gts, 0);
gts->ts_force_cch_reload = 0;
+ gru_update_cch(gts, 0);
}
ret = -EAGAIN;
@@ -589,20 +616,26 @@ int gru_get_exception_detail(unsigned long arg)
} else if (gts->ts_gru) {
cbrnum = thread_cbr_number(gts, ucbnum);
cbe = get_cbe_by_index(gts->ts_gru, cbrnum);
- prefetchw(cbe);/* Harmless on hardware, required for emulator */
+ gru_flush_cache(cbe); /* CBE not coherent */
excdet.opc = cbe->opccpy;
excdet.exopc = cbe->exopccpy;
excdet.ecause = cbe->ecause;
excdet.exceptdet0 = cbe->idef1upd;
excdet.exceptdet1 = cbe->idef3upd;
+ excdet.cbrstate = cbe->cbrstate;
+ excdet.cbrexecstatus = cbe->cbrexecstatus;
+ gru_flush_cache(cbe);
ret = 0;
} else {
ret = -EAGAIN;
}
gru_unlock_gts(gts);
- gru_dbg(grudev, "address 0x%lx, ecause 0x%x\n", excdet.cb,
- excdet.ecause);
+ gru_dbg(grudev,
+ "cb 0x%lx, op %d, exopc %d, cbrstate %d, cbrexecstatus 0x%x, ecause 0x%x, "
+ "exdet0 0x%lx, exdet1 0x%x\n",
+ excdet.cb, excdet.opc, excdet.exopc, excdet.cbrstate, excdet.cbrexecstatus,
+ excdet.ecause, excdet.exceptdet0, excdet.exceptdet1);
if (!ret && copy_to_user((void __user *)arg, &excdet, sizeof(excdet)))
ret = -EFAULT;
return ret;
@@ -627,7 +660,7 @@ static int gru_unload_all_contexts(void)
if (gts && mutex_trylock(&gts->ts_ctxlock)) {
spin_unlock(&gru->gs_lock);
gru_unload_context(gts, 1);
- gru_unlock_gts(gts);
+ mutex_unlock(&gts->ts_ctxlock);
spin_lock(&gru->gs_lock);
}
}
@@ -669,6 +702,7 @@ int gru_user_flush_tlb(unsigned long arg)
{
struct gru_thread_state *gts;
struct gru_flush_tlb_req req;
+ struct gru_mm_struct *gms;
STAT(user_flush_tlb);
if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
@@ -681,8 +715,34 @@ int gru_user_flush_tlb(unsigned long arg)
if (!gts)
return -EINVAL;
- gru_flush_tlb_range(gts->ts_gms, req.vaddr, req.len);
+ gms = gts->ts_gms;
gru_unlock_gts(gts);
+ gru_flush_tlb_range(gms, req.vaddr, req.len);
+
+ return 0;
+}
+
+/*
+ * Fetch GSEG statisticss
+ */
+long gru_get_gseg_statistics(unsigned long arg)
+{
+ struct gru_thread_state *gts;
+ struct gru_get_gseg_statistics_req req;
+
+ if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
+ return -EFAULT;
+
+ gts = gru_find_lock_gts(req.gseg);
+ if (gts) {
+ memcpy(&req.stats, &gts->ustats, sizeof(gts->ustats));
+ gru_unlock_gts(gts);
+ } else {
+ memset(&req.stats, 0, sizeof(gts->ustats));
+ }
+
+ if (copy_to_user((void __user *)arg, &req, sizeof(req)))
+ return -EFAULT;
return 0;
}
@@ -691,18 +751,34 @@ int gru_user_flush_tlb(unsigned long arg)
* Register the current task as the user of the GSEG slice.
* Needed for TLB fault interrupt targeting.
*/
-int gru_set_task_slice(long address)
+int gru_set_context_option(unsigned long arg)
{
struct gru_thread_state *gts;
+ struct gru_set_context_option_req req;
+ int ret = 0;
+
+ STAT(set_context_option);
+ if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
+ return -EFAULT;
+ gru_dbg(grudev, "op %d, gseg 0x%lx, value1 0x%lx\n", req.op, req.gseg, req.val1);
- STAT(set_task_slice);
- gru_dbg(grudev, "address 0x%lx\n", address);
- gts = gru_alloc_locked_gts(address);
+ gts = gru_alloc_locked_gts(req.gseg);
if (!gts)
return -EINVAL;
- gts->ts_tgid_owner = current->tgid;
+ switch (req.op) {
+ case sco_gseg_owner:
+ /* Register the current task as the GSEG owner */
+ gts->ts_tgid_owner = current->tgid;
+ break;
+ case sco_cch_req_slice:
+ /* Set the CCH slice option */
+ gts->ts_cch_req_slice = req.val1 & 3;
+ break;
+ default:
+ ret = -EINVAL;
+ }
gru_unlock_gts(gts);
- return 0;
+ return ret;
}
diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c
index 3ce2920e2bf3..fa2d93a9fb8d 100644
--- a/drivers/misc/sgi-gru/grufile.c
+++ b/drivers/misc/sgi-gru/grufile.c
@@ -46,6 +46,7 @@
struct gru_blade_state *gru_base[GRU_MAX_BLADES] __read_mostly;
unsigned long gru_start_paddr __read_mostly;
+void *gru_start_vaddr __read_mostly;
unsigned long gru_end_paddr __read_mostly;
unsigned int gru_max_gids __read_mostly;
struct gru_stats_s gru_stats;
@@ -135,11 +136,9 @@ static int gru_create_new_context(unsigned long arg)
if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
return -EFAULT;
- if (req.data_segment_bytes == 0 ||
- req.data_segment_bytes > max_user_dsr_bytes)
+ if (req.data_segment_bytes > max_user_dsr_bytes)
return -EINVAL;
- if (!req.control_blocks || !req.maximum_thread_count ||
- req.control_blocks > max_user_cbrs)
+ if (req.control_blocks > max_user_cbrs || !req.maximum_thread_count)
return -EINVAL;
if (!(req.options & GRU_OPT_MISS_MASK))
@@ -184,41 +183,6 @@ static long gru_get_config_info(unsigned long arg)
}
/*
- * Get GRU chiplet status
- */
-static long gru_get_chiplet_status(unsigned long arg)
-{
- struct gru_state *gru;
- struct gru_chiplet_info info;
-
- if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
- return -EFAULT;
-
- if (info.node == -1)
- info.node = numa_node_id();
- if (info.node >= num_possible_nodes() ||
- info.chiplet >= GRU_CHIPLETS_PER_HUB ||
- info.node < 0 || info.chiplet < 0)
- return -EINVAL;
-
- info.blade = uv_node_to_blade_id(info.node);
- gru = get_gru(info.blade, info.chiplet);
-
- info.total_dsr_bytes = GRU_NUM_DSR_BYTES;
- info.total_cbr = GRU_NUM_CB;
- info.total_user_dsr_bytes = GRU_NUM_DSR_BYTES -
- gru->gs_reserved_dsr_bytes;
- info.total_user_cbr = GRU_NUM_CB - gru->gs_reserved_cbrs;
- info.free_user_dsr_bytes = hweight64(gru->gs_dsr_map) *
- GRU_DSR_AU_BYTES;
- info.free_user_cbr = hweight64(gru->gs_cbr_map) * GRU_CBR_AU_SIZE;
-
- if (copy_to_user((void __user *)arg, &info, sizeof(info)))
- return -EFAULT;
- return 0;
-}
-
-/*
* gru_file_unlocked_ioctl
*
* Called to update file attributes via IOCTL calls.
@@ -234,8 +198,8 @@ static long gru_file_unlocked_ioctl(struct file *file, unsigned int req,
case GRU_CREATE_CONTEXT:
err = gru_create_new_context(arg);
break;
- case GRU_SET_TASK_SLICE:
- err = gru_set_task_slice(arg);
+ case GRU_SET_CONTEXT_OPTION:
+ err = gru_set_context_option(arg);
break;
case GRU_USER_GET_EXCEPTION_DETAIL:
err = gru_get_exception_detail(arg);
@@ -243,18 +207,24 @@ static long gru_file_unlocked_ioctl(struct file *file, unsigned int req,
case GRU_USER_UNLOAD_CONTEXT:
err = gru_user_unload_context(arg);
break;
- case GRU_GET_CHIPLET_STATUS:
- err = gru_get_chiplet_status(arg);
- break;
case GRU_USER_FLUSH_TLB:
err = gru_user_flush_tlb(arg);
break;
case GRU_USER_CALL_OS:
err = gru_handle_user_call_os(arg);
break;
+ case GRU_GET_GSEG_STATISTICS:
+ err = gru_get_gseg_statistics(arg);
+ break;
+ case GRU_KTEST:
+ err = gru_ktest(arg);
+ break;
case GRU_GET_CONFIG_INFO:
err = gru_get_config_info(arg);
break;
+ case GRU_DUMP_CHIPLET_STATE:
+ err = gru_dump_chiplet_request(arg);
+ break;
}
return err;
}
@@ -282,7 +252,6 @@ static void gru_init_chiplet(struct gru_state *gru, unsigned long paddr,
gru_dbg(grudev, "bid %d, nid %d, gid %d, vaddr %p (0x%lx)\n",
bid, nid, gru->gs_gid, gru->gs_gru_base_vaddr,
gru->gs_gru_base_paddr);
- gru_kservices_init(gru);
}
static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr)
@@ -309,6 +278,7 @@ static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr)
memset(gru_base[bid], 0, sizeof(struct gru_blade_state));
gru_base[bid]->bs_lru_gru = &gru_base[bid]->bs_grus[0];
spin_lock_init(&gru_base[bid]->bs_lock);
+ init_rwsem(&gru_base[bid]->bs_kgts_sema);
dsrbytes = 0;
cbrs = 0;
@@ -372,7 +342,6 @@ static int __init gru_init(void)
{
int ret, irq, chip;
char id[10];
- void *gru_start_vaddr;
if (!is_uv_system())
return 0;
@@ -422,6 +391,7 @@ static int __init gru_init(void)
printk(KERN_ERR "%s: init tables failed\n", GRU_DRIVER_ID_STR);
goto exit3;
}
+ gru_kservices_init();
printk(KERN_INFO "%s: v%s\n", GRU_DRIVER_ID_STR,
GRU_DRIVER_VERSION_STR);
@@ -440,7 +410,7 @@ exit1:
static void __exit gru_exit(void)
{
- int i, bid, gid;
+ int i, bid;
int order = get_order(sizeof(struct gru_state) *
GRU_CHIPLETS_PER_BLADE);
@@ -449,10 +419,7 @@ static void __exit gru_exit(void)
for (i = 0; i < GRU_CHIPLETS_PER_BLADE; i++)
free_irq(IRQ_GRU + i, NULL);
-
- foreach_gid(gid)
- gru_kservices_exit(GID_TO_GRU(gid));
-
+ gru_kservices_exit();
for (bid = 0; bid < GRU_MAX_BLADES; bid++)
free_pages((unsigned long)gru_base[bid], order);
diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
index 9b7ccb328697..37e7cfc53b9c 100644
--- a/drivers/misc/sgi-gru/gruhandles.c
+++ b/drivers/misc/sgi-gru/gruhandles.c
@@ -57,7 +57,7 @@ static void start_instruction(void *h)
static int wait_instruction_complete(void *h, enum mcs_op opc)
{
int status;
- cycles_t start_time = get_cycles();
+ unsigned long start_time = get_cycles();
while (1) {
cpu_relax();
@@ -65,25 +65,16 @@ static int wait_instruction_complete(void *h, enum mcs_op opc)
if (status != CCHSTATUS_ACTIVE)
break;
if (GRU_OPERATION_TIMEOUT < (get_cycles() - start_time))
- panic("GRU %p is malfunctioning\n", h);
+ panic("GRU %p is malfunctioning: start %ld, end %ld\n",
+ h, start_time, (unsigned long)get_cycles());
}
if (gru_options & OPT_STATS)
update_mcs_stats(opc, get_cycles() - start_time);
return status;
}
-int cch_allocate(struct gru_context_configuration_handle *cch,
- int asidval, int sizeavail, unsigned long cbrmap,
- unsigned long dsrmap)
+int cch_allocate(struct gru_context_configuration_handle *cch)
{
- int i;
-
- for (i = 0; i < 8; i++) {
- cch->asid[i] = (asidval++);
- cch->sizeavail[i] = sizeavail;
- }
- cch->dsr_allocation_map = dsrmap;
- cch->cbr_allocation_map = cbrmap;
cch->opc = CCHOP_ALLOCATE;
start_instruction(cch);
return wait_instruction_complete(cch, cchop_allocate);
diff --git a/drivers/misc/sgi-gru/gruhandles.h b/drivers/misc/sgi-gru/gruhandles.h
index 1ed74d7508c8..f44112242d00 100644
--- a/drivers/misc/sgi-gru/gruhandles.h
+++ b/drivers/misc/sgi-gru/gruhandles.h
@@ -39,7 +39,6 @@
#define GRU_NUM_CBE 128
#define GRU_NUM_TFH 128
#define GRU_NUM_CCH 16
-#define GRU_NUM_GSH 1
/* Maximum resource counts that can be reserved by user programs */
#define GRU_NUM_USER_CBR GRU_NUM_CBE
@@ -56,7 +55,6 @@
#define GRU_CBE_BASE (GRU_MCS_BASE + 0x10000)
#define GRU_TFH_BASE (GRU_MCS_BASE + 0x18000)
#define GRU_CCH_BASE (GRU_MCS_BASE + 0x20000)
-#define GRU_GSH_BASE (GRU_MCS_BASE + 0x30000)
/* User gseg constants */
#define GRU_GSEG_STRIDE (4 * 1024 * 1024)
@@ -251,15 +249,15 @@ struct gru_tlb_fault_handle {
unsigned int fill1:9;
unsigned int status:2;
- unsigned int fill2:1;
- unsigned int color:1;
+ unsigned int fill2:2;
unsigned int state:3;
unsigned int fill3:1;
- unsigned int cause:7; /* DW 0 - high 32 */
+ unsigned int cause:6;
+ unsigned int cb_int:1;
unsigned int fill4:1;
- unsigned int indexway:12;
+ unsigned int indexway:12; /* DW 0 - high 32 */
unsigned int fill5:4;
unsigned int ctxnum:4;
@@ -457,21 +455,7 @@ enum gru_cbr_state {
CBRSTATE_BUSY_INTERRUPT,
};
-/* CBE cbrexecstatus bits */
-#define CBR_EXS_ABORT_OCC_BIT 0
-#define CBR_EXS_INT_OCC_BIT 1
-#define CBR_EXS_PENDING_BIT 2
-#define CBR_EXS_QUEUED_BIT 3
-#define CBR_EXS_TLBHW_BIT 4
-#define CBR_EXS_EXCEPTION_BIT 5
-
-#define CBR_EXS_ABORT_OCC (1 << CBR_EXS_ABORT_OCC_BIT)
-#define CBR_EXS_INT_OCC (1 << CBR_EXS_INT_OCC_BIT)
-#define CBR_EXS_PENDING (1 << CBR_EXS_PENDING_BIT)
-#define CBR_EXS_QUEUED (1 << CBR_EXS_QUEUED_BIT)
-#define CBR_EXS_TLBHW (1 << CBR_EXS_TLBHW_BIT)
-#define CBR_EXS_EXCEPTION (1 << CBR_EXS_EXCEPTION_BIT)
-
+/* CBE cbrexecstatus bits - defined in gru_instructions.h*/
/* CBE ecause bits - defined in gru_instructions.h */
/*
@@ -495,9 +479,7 @@ enum gru_cbr_state {
/* minimum TLB purge count to ensure a full purge */
#define GRUMAXINVAL 1024UL
-int cch_allocate(struct gru_context_configuration_handle *cch,
- int asidval, int sizeavail, unsigned long cbrmap, unsigned long dsrmap);
-
+int cch_allocate(struct gru_context_configuration_handle *cch);
int cch_start(struct gru_context_configuration_handle *cch);
int cch_interrupt(struct gru_context_configuration_handle *cch);
int cch_deallocate(struct gru_context_configuration_handle *cch);
diff --git a/drivers/misc/sgi-gru/grukdump.c b/drivers/misc/sgi-gru/grukdump.c
new file mode 100644
index 000000000000..55eabfa85585
--- /dev/null
+++ b/drivers/misc/sgi-gru/grukdump.c
@@ -0,0 +1,232 @@
+/*
+ * SN Platform GRU Driver
+ *
+ * Dump GRU State
+ *
+ * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <asm/uv/uv_hub.h>
+#include "gru.h"
+#include "grutables.h"
+#include "gruhandles.h"
+#include "grulib.h"
+
+#define CCH_LOCK_ATTEMPTS 10
+
+static int gru_user_copy_handle(void __user **dp, void *s)
+{
+ if (copy_to_user(*dp, s, GRU_HANDLE_BYTES))
+ return -1;
+ *dp += GRU_HANDLE_BYTES;
+ return 0;
+}
+
+static int gru_dump_context_data(void *grubase,
+ struct gru_context_configuration_handle *cch,
+ void __user *ubuf, int ctxnum, int dsrcnt)
+{
+ void *cb, *cbe, *tfh, *gseg;
+ int i, scr;
+
+ gseg = grubase + ctxnum * GRU_GSEG_STRIDE;
+ cb = gseg + GRU_CB_BASE;
+ cbe = grubase + GRU_CBE_BASE;
+ tfh = grubase + GRU_TFH_BASE;
+
+ for_each_cbr_in_allocation_map(i, &cch->cbr_allocation_map, scr) {
+ if (gru_user_copy_handle(&ubuf, cb))
+ goto fail;
+ if (gru_user_copy_handle(&ubuf, tfh + i * GRU_HANDLE_STRIDE))
+ goto fail;
+ if (gru_user_copy_handle(&ubuf, cbe + i * GRU_HANDLE_STRIDE))
+ goto fail;
+ cb += GRU_HANDLE_STRIDE;
+ }
+ if (dsrcnt)
+ memcpy(ubuf, gseg + GRU_DS_BASE, dsrcnt * GRU_HANDLE_STRIDE);
+ return 0;
+
+fail:
+ return -EFAULT;
+}
+
+static int gru_dump_tfm(struct gru_state *gru,
+ void __user *ubuf, void __user *ubufend)
+{
+ struct gru_tlb_fault_map *tfm;
+ int i, ret, bytes;
+
+ bytes = GRU_NUM_TFM * GRU_CACHE_LINE_BYTES;
+ if (bytes > ubufend - ubuf)
+ ret = -EFBIG;
+
+ for (i = 0; i < GRU_NUM_TFM; i++) {
+ tfm = get_tfm(gru->gs_gru_base_vaddr, i);
+ if (gru_user_copy_handle(&ubuf, tfm))
+ goto fail;
+ }
+ return GRU_NUM_TFM * GRU_CACHE_LINE_BYTES;
+
+fail:
+ return -EFAULT;
+}
+
+static int gru_dump_tgh(struct gru_state *gru,
+ void __user *ubuf, void __user *ubufend)
+{
+ struct gru_tlb_global_handle *tgh;
+ int i, ret, bytes;
+
+ bytes = GRU_NUM_TGH * GRU_CACHE_LINE_BYTES;
+ if (bytes > ubufend - ubuf)
+ ret = -EFBIG;
+
+ for (i = 0; i < GRU_NUM_TGH; i++) {
+ tgh = get_tgh(gru->gs_gru_base_vaddr, i);
+ if (gru_user_copy_handle(&ubuf, tgh))
+ goto fail;
+ }
+ return GRU_NUM_TGH * GRU_CACHE_LINE_BYTES;
+
+fail:
+ return -EFAULT;
+}
+
+static int gru_dump_context(struct gru_state *gru, int ctxnum,
+ void __user *ubuf, void __user *ubufend, char data_opt,
+ char lock_cch)
+{
+ struct gru_dump_context_header hdr;
+ struct gru_dump_context_header __user *uhdr = ubuf;
+ struct gru_context_configuration_handle *cch, *ubufcch;
+ struct gru_thread_state *gts;
+ int try, cch_locked, cbrcnt = 0, dsrcnt = 0, bytes = 0, ret = 0;
+ void *grubase;
+
+ memset(&hdr, 0, sizeof(hdr));
+ grubase = gru->gs_gru_base_vaddr;
+ cch = get_cch(grubase, ctxnum);
+ for (try = 0; try < CCH_LOCK_ATTEMPTS; try++) {
+ cch_locked = trylock_cch_handle(cch);
+ if (cch_locked)
+ break;
+ msleep(1);
+ }
+
+ ubuf += sizeof(hdr);
+ ubufcch = ubuf;
+ if (gru_user_copy_handle(&ubuf, cch))
+ goto fail;
+ if (cch_locked)
+ ubufcch->delresp = 0;
+ bytes = sizeof(hdr) + GRU_CACHE_LINE_BYTES;
+
+ if (cch_locked || !lock_cch) {
+ gts = gru->gs_gts[ctxnum];
+ if (gts && gts->ts_vma) {
+ hdr.pid = gts->ts_tgid_owner;
+ hdr.vaddr = gts->ts_vma->vm_start;
+ }
+ if (cch->state != CCHSTATE_INACTIVE) {
+ cbrcnt = hweight64(cch->cbr_allocation_map) *
+ GRU_CBR_AU_SIZE;
+ dsrcnt = data_opt ? hweight32(cch->dsr_allocation_map) *
+ GRU_DSR_AU_CL : 0;
+ }
+ bytes += (3 * cbrcnt + dsrcnt) * GRU_CACHE_LINE_BYTES;
+ if (bytes > ubufend - ubuf)
+ ret = -EFBIG;
+ else
+ ret = gru_dump_context_data(grubase, cch, ubuf, ctxnum,
+ dsrcnt);
+
+ }
+ if (cch_locked)
+ unlock_cch_handle(cch);
+ if (ret)
+ return ret;
+
+ hdr.magic = GRU_DUMP_MAGIC;
+ hdr.gid = gru->gs_gid;
+ hdr.ctxnum = ctxnum;
+ hdr.cbrcnt = cbrcnt;
+ hdr.dsrcnt = dsrcnt;
+ hdr.cch_locked = cch_locked;
+ if (!ret && copy_to_user((void __user *)uhdr, &hdr, sizeof(hdr)))
+ ret = -EFAULT;
+
+ return ret ? ret : bytes;
+
+fail:
+ unlock_cch_handle(cch);
+ return -EFAULT;
+}
+
+int gru_dump_chiplet_request(unsigned long arg)
+{
+ struct gru_state *gru;
+ struct gru_dump_chiplet_state_req req;
+ void __user *ubuf;
+ void __user *ubufend;
+ int ctxnum, ret, cnt = 0;
+
+ if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
+ return -EFAULT;
+
+ /* Currently, only dump by gid is implemented */
+ if (req.gid >= gru_max_gids || req.gid < 0)
+ return -EINVAL;
+
+ gru = GID_TO_GRU(req.gid);
+ ubuf = req.buf;
+ ubufend = req.buf + req.buflen;
+
+ ret = gru_dump_tfm(gru, ubuf, ubufend);
+ if (ret < 0)
+ goto fail;
+ ubuf += ret;
+
+ ret = gru_dump_tgh(gru, ubuf, ubufend);
+ if (ret < 0)
+ goto fail;
+ ubuf += ret;
+
+ for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) {
+ if (req.ctxnum == ctxnum || req.ctxnum < 0) {
+ ret = gru_dump_context(gru, ctxnum, ubuf, ubufend,
+ req.data_opt, req.lock_cch);
+ if (ret < 0)
+ goto fail;
+ ubuf += ret;
+ cnt++;
+ }
+ }
+
+ if (copy_to_user((void __user *)arg, &req, sizeof(req)))
+ return -EFAULT;
+ return cnt;
+
+fail:
+ return ret;
+}
diff --git a/drivers/misc/sgi-gru/grukservices.c b/drivers/misc/sgi-gru/grukservices.c
index d8bd7d84a7cf..eedbf9c32760 100644
--- a/drivers/misc/sgi-gru/grukservices.c
+++ b/drivers/misc/sgi-gru/grukservices.c
@@ -31,6 +31,7 @@
#include <linux/proc_fs.h>
#include <linux/interrupt.h>
#include <linux/uaccess.h>
+#include <linux/delay.h>
#include "gru.h"
#include "grulib.h"
#include "grutables.h"
@@ -45,18 +46,66 @@
* resources. This will likely be replaced when we better understand the
* kernel/user requirements.
*
- * At boot time, the kernel permanently reserves a fixed number of
- * CBRs/DSRs for each cpu to use. The resources are all taken from
- * the GRU chiplet 1 on the blade. This leaves the full set of resources
- * of chiplet 0 available to be allocated to a single user.
+ * Blade percpu resources reserved for kernel use. These resources are
+ * reserved whenever the the kernel context for the blade is loaded. Note
+ * that the kernel context is not guaranteed to be always available. It is
+ * loaded on demand & can be stolen by a user if the user demand exceeds the
+ * kernel demand. The kernel can always reload the kernel context but
+ * a SLEEP may be required!!!.
+ *
+ * Async Overview:
+ *
+ * Each blade has one "kernel context" that owns GRU kernel resources
+ * located on the blade. Kernel drivers use GRU resources in this context
+ * for sending messages, zeroing memory, etc.
+ *
+ * The kernel context is dynamically loaded on demand. If it is not in
+ * use by the kernel, the kernel context can be unloaded & given to a user.
+ * The kernel context will be reloaded when needed. This may require that
+ * a context be stolen from a user.
+ * NOTE: frequent unloading/reloading of the kernel context is
+ * expensive. We are depending on batch schedulers, cpusets, sane
+ * drivers or some other mechanism to prevent the need for frequent
+ * stealing/reloading.
+ *
+ * The kernel context consists of two parts:
+ * - 1 CB & a few DSRs that are reserved for each cpu on the blade.
+ * Each cpu has it's own private resources & does not share them
+ * with other cpus. These resources are used serially, ie,
+ * locked, used & unlocked on each call to a function in
+ * grukservices.
+ * (Now that we have dynamic loading of kernel contexts, I
+ * may rethink this & allow sharing between cpus....)
+ *
+ * - Additional resources can be reserved long term & used directly
+ * by UV drivers located in the kernel. Drivers using these GRU
+ * resources can use asynchronous GRU instructions that send
+ * interrupts on completion.
+ * - these resources must be explicitly locked/unlocked
+ * - locked resources prevent (obviously) the kernel
+ * context from being unloaded.
+ * - drivers using these resource directly issue their own
+ * GRU instruction and must wait/check completion.
+ *
+ * When these resources are reserved, the caller can optionally
+ * associate a wait_queue with the resources and use asynchronous
+ * GRU instructions. When an async GRU instruction completes, the
+ * driver will do a wakeup on the event.
+ *
*/
-/* Blade percpu resources PERMANENTLY reserved for kernel use */
+
+#define ASYNC_HAN_TO_BID(h) ((h) - 1)
+#define ASYNC_BID_TO_HAN(b) ((b) + 1)
+#define ASYNC_HAN_TO_BS(h) gru_base[ASYNC_HAN_TO_BID(h)]
+#define KCB_TO_GID(cb) ((cb - gru_start_vaddr) / \
+ (GRU_SIZE * GRU_CHIPLETS_PER_BLADE))
+#define KCB_TO_BS(cb) gru_base[KCB_TO_GID(cb)]
+
#define GRU_NUM_KERNEL_CBR 1
#define GRU_NUM_KERNEL_DSR_BYTES 256
#define GRU_NUM_KERNEL_DSR_CL (GRU_NUM_KERNEL_DSR_BYTES / \
GRU_CACHE_LINE_BYTES)
-#define KERNEL_CTXNUM 15
/* GRU instruction attributes for all instructions */
#define IMA IMA_CB_DELAY
@@ -98,6 +147,108 @@ struct message_header {
#define HSTATUS(mq, h) ((mq) + offsetof(struct message_queue, hstatus[h]))
+/*
+ * Reload the blade's kernel context into a GRU chiplet. Called holding
+ * the bs_kgts_sema for READ. Will steal user contexts if necessary.
+ */
+static void gru_load_kernel_context(struct gru_blade_state *bs, int blade_id)
+{
+ struct gru_state *gru;
+ struct gru_thread_state *kgts;
+ void *vaddr;
+ int ctxnum, ncpus;
+
+ up_read(&bs->bs_kgts_sema);
+ down_write(&bs->bs_kgts_sema);
+
+ if (!bs->bs_kgts)
+ bs->bs_kgts = gru_alloc_gts(NULL, 0, 0, 0, 0);
+ kgts = bs->bs_kgts;
+
+ if (!kgts->ts_gru) {
+ STAT(load_kernel_context);
+ ncpus = uv_blade_nr_possible_cpus(blade_id);
+ kgts->ts_cbr_au_count = GRU_CB_COUNT_TO_AU(
+ GRU_NUM_KERNEL_CBR * ncpus + bs->bs_async_cbrs);
+ kgts->ts_dsr_au_count = GRU_DS_BYTES_TO_AU(
+ GRU_NUM_KERNEL_DSR_BYTES * ncpus +
+ bs->bs_async_dsr_bytes);
+ while (!gru_assign_gru_context(kgts, blade_id)) {
+ msleep(1);
+ gru_steal_context(kgts, blade_id);
+ }
+ gru_load_context(kgts);
+ gru = bs->bs_kgts->ts_gru;
+ vaddr = gru->gs_gru_base_vaddr;
+ ctxnum = kgts->ts_ctxnum;
+ bs->kernel_cb = get_gseg_base_address_cb(vaddr, ctxnum, 0);
+ bs->kernel_dsr = get_gseg_base_address_ds(vaddr, ctxnum, 0);
+ }
+ downgrade_write(&bs->bs_kgts_sema);
+}
+
+/*
+ * Free all kernel contexts that are not currently in use.
+ * Returns 0 if all freed, else number of inuse context.
+ */
+static int gru_free_kernel_contexts(void)
+{
+ struct gru_blade_state *bs;
+ struct gru_thread_state *kgts;
+ int bid, ret = 0;
+
+ for (bid = 0; bid < GRU_MAX_BLADES; bid++) {
+ bs = gru_base[bid];
+ if (!bs)
+ continue;
+ if (down_write_trylock(&bs->bs_kgts_sema)) {
+ kgts = bs->bs_kgts;
+ if (kgts && kgts->ts_gru)
+ gru_unload_context(kgts, 0);
+ kfree(kgts);
+ bs->bs_kgts = NULL;
+ up_write(&bs->bs_kgts_sema);
+ } else {
+ ret++;
+ }
+ }
+ return ret;
+}
+
+/*
+ * Lock & load the kernel context for the specified blade.
+ */
+static struct gru_blade_state *gru_lock_kernel_context(int blade_id)
+{
+ struct gru_blade_state *bs;
+
+ STAT(lock_kernel_context);
+ bs = gru_base[blade_id];
+
+ down_read(&bs->bs_kgts_sema);
+ if (!bs->bs_kgts || !bs->bs_kgts->ts_gru)
+ gru_load_kernel_context(bs, blade_id);
+ return bs;
+
+}
+
+/*
+ * Unlock the kernel context for the specified blade. Context is not
+ * unloaded but may be stolen before next use.
+ */
+static void gru_unlock_kernel_context(int blade_id)
+{
+ struct gru_blade_state *bs;
+
+ bs = gru_base[blade_id];
+ up_read(&bs->bs_kgts_sema);
+ STAT(unlock_kernel_context);
+}
+
+/*
+ * Reserve & get pointers to the DSR/CBRs reserved for the current cpu.
+ * - returns with preemption disabled
+ */
static int gru_get_cpu_resources(int dsr_bytes, void **cb, void **dsr)
{
struct gru_blade_state *bs;
@@ -105,30 +256,148 @@ static int gru_get_cpu_resources(int dsr_bytes, void **cb, void **dsr)
BUG_ON(dsr_bytes > GRU_NUM_KERNEL_DSR_BYTES);
preempt_disable();
- bs = gru_base[uv_numa_blade_id()];
+ bs = gru_lock_kernel_context(uv_numa_blade_id());
lcpu = uv_blade_processor_id();
*cb = bs->kernel_cb + lcpu * GRU_HANDLE_STRIDE;
*dsr = bs->kernel_dsr + lcpu * GRU_NUM_KERNEL_DSR_BYTES;
return 0;
}
+/*
+ * Free the current cpus reserved DSR/CBR resources.
+ */
static void gru_free_cpu_resources(void *cb, void *dsr)
{
+ gru_unlock_kernel_context(uv_numa_blade_id());
preempt_enable();
}
+/*
+ * Reserve GRU resources to be used asynchronously.
+ * Note: currently supports only 1 reservation per blade.
+ *
+ * input:
+ * blade_id - blade on which resources should be reserved
+ * cbrs - number of CBRs
+ * dsr_bytes - number of DSR bytes needed
+ * output:
+ * handle to identify resource
+ * (0 = async resources already reserved)
+ */
+unsigned long gru_reserve_async_resources(int blade_id, int cbrs, int dsr_bytes,
+ struct completion *cmp)
+{
+ struct gru_blade_state *bs;
+ struct gru_thread_state *kgts;
+ int ret = 0;
+
+ bs = gru_base[blade_id];
+
+ down_write(&bs->bs_kgts_sema);
+
+ /* Verify no resources already reserved */
+ if (bs->bs_async_dsr_bytes + bs->bs_async_cbrs)
+ goto done;
+ bs->bs_async_dsr_bytes = dsr_bytes;
+ bs->bs_async_cbrs = cbrs;
+ bs->bs_async_wq = cmp;
+ kgts = bs->bs_kgts;
+
+ /* Resources changed. Unload context if already loaded */
+ if (kgts && kgts->ts_gru)
+ gru_unload_context(kgts, 0);
+ ret = ASYNC_BID_TO_HAN(blade_id);
+
+done:
+ up_write(&bs->bs_kgts_sema);
+ return ret;
+}
+
+/*
+ * Release async resources previously reserved.
+ *
+ * input:
+ * han - handle to identify resources
+ */
+void gru_release_async_resources(unsigned long han)
+{
+ struct gru_blade_state *bs = ASYNC_HAN_TO_BS(han);
+
+ down_write(&bs->bs_kgts_sema);
+ bs->bs_async_dsr_bytes = 0;
+ bs->bs_async_cbrs = 0;
+ bs->bs_async_wq = NULL;
+ up_write(&bs->bs_kgts_sema);
+}
+
+/*
+ * Wait for async GRU instructions to complete.
+ *
+ * input:
+ * han - handle to identify resources
+ */
+void gru_wait_async_cbr(unsigned long han)
+{
+ struct gru_blade_state *bs = ASYNC_HAN_TO_BS(han);
+
+ wait_for_completion(bs->bs_async_wq);
+ mb();
+}
+
+/*
+ * Lock previous reserved async GRU resources
+ *
+ * input:
+ * han - handle to identify resources
+ * output:
+ * cb - pointer to first CBR
+ * dsr - pointer to first DSR
+ */
+void gru_lock_async_resource(unsigned long han, void **cb, void **dsr)
+{
+ struct gru_blade_state *bs = ASYNC_HAN_TO_BS(han);
+ int blade_id = ASYNC_HAN_TO_BID(han);
+ int ncpus;
+
+ gru_lock_kernel_context(blade_id);
+ ncpus = uv_blade_nr_possible_cpus(blade_id);
+ if (cb)
+ *cb = bs->kernel_cb + ncpus * GRU_HANDLE_STRIDE;
+ if (dsr)
+ *dsr = bs->kernel_dsr + ncpus * GRU_NUM_KERNEL_DSR_BYTES;
+}
+
+/*
+ * Unlock previous reserved async GRU resources
+ *
+ * input:
+ * han - handle to identify resources
+ */
+void gru_unlock_async_resource(unsigned long han)
+{
+ int blade_id = ASYNC_HAN_TO_BID(han);
+
+ gru_unlock_kernel_context(blade_id);
+}
+
+/*----------------------------------------------------------------------*/
int gru_get_cb_exception_detail(void *cb,
struct control_block_extended_exc_detail *excdet)
{
struct gru_control_block_extended *cbe;
+ struct gru_blade_state *bs;
+ int cbrnum;
- cbe = get_cbe(GRUBASE(cb), get_cb_number(cb));
- prefetchw(cbe); /* Harmless on hardware, required for emulator */
+ bs = KCB_TO_BS(cb);
+ cbrnum = thread_cbr_number(bs->bs_kgts, get_cb_number(cb));
+ cbe = get_cbe(GRUBASE(cb), cbrnum);
+ gru_flush_cache(cbe); /* CBE not coherent */
excdet->opc = cbe->opccpy;
excdet->exopc = cbe->exopccpy;
excdet->ecause = cbe->ecause;
excdet->exceptdet0 = cbe->idef1upd;
excdet->exceptdet1 = cbe->idef3upd;
+ gru_flush_cache(cbe);
return 0;
}
@@ -167,13 +436,13 @@ static int gru_retry_exception(void *cb)
int retry = EXCEPTION_RETRY_LIMIT;
while (1) {
- if (gru_get_cb_message_queue_substatus(cb))
- break;
if (gru_wait_idle_or_exception(gen) == CBS_IDLE)
return CBS_IDLE;
-
+ if (gru_get_cb_message_queue_substatus(cb))
+ return CBS_EXCEPTION;
gru_get_cb_exception_detail(cb, &excdet);
- if (excdet.ecause & ~EXCEPTION_RETRY_BITS)
+ if ((excdet.ecause & ~EXCEPTION_RETRY_BITS) ||
+ (excdet.cbrexecstatus & CBR_EXS_ABORT_OCC))
break;
if (retry-- == 0)
break;
@@ -416,6 +685,29 @@ static void send_message_queue_interrupt(struct gru_message_queue_desc *mqd)
mqd->interrupt_vector);
}
+/*
+ * Handle a PUT failure. Note: if message was a 2-line message, one of the
+ * lines might have successfully have been written. Before sending the
+ * message, "present" must be cleared in BOTH lines to prevent the receiver
+ * from prematurely seeing the full message.
+ */
+static int send_message_put_nacked(void *cb, struct gru_message_queue_desc *mqd,
+ void *mesg, int lines)
+{
+ unsigned long m;
+
+ m = mqd->mq_gpa + (gru_get_amo_value_head(cb) << 6);
+ if (lines == 2) {
+ gru_vset(cb, m, 0, XTYPE_CL, lines, 1, IMA);
+ if (gru_wait(cb) != CBS_IDLE)
+ return MQE_UNEXPECTED_CB_ERR;
+ }
+ gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, lines, 1, IMA);
+ if (gru_wait(cb) != CBS_IDLE)
+ return MQE_UNEXPECTED_CB_ERR;
+ send_message_queue_interrupt(mqd);
+ return MQE_OK;
+}
/*
* Handle a gru_mesq failure. Some of these failures are software recoverable
@@ -425,7 +717,6 @@ static int send_message_failure(void *cb, struct gru_message_queue_desc *mqd,
void *mesg, int lines)
{
int substatus, ret = 0;
- unsigned long m;
substatus = gru_get_cb_message_queue_substatus(cb);
switch (substatus) {
@@ -447,14 +738,7 @@ static int send_message_failure(void *cb, struct gru_message_queue_desc *mqd,
break;
case CBSS_PUT_NACKED:
STAT(mesq_send_put_nacked);
- m = mqd->mq_gpa + (gru_get_amo_value_head(cb) << 6);
- gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, lines, 1, IMA);
- if (gru_wait(cb) == CBS_IDLE) {
- ret = MQE_OK;
- send_message_queue_interrupt(mqd);
- } else {
- ret = MQE_UNEXPECTED_CB_ERR;
- }
+ ret = send_message_put_nacked(cb, mqd, mesg, lines);
break;
default:
BUG();
@@ -597,115 +881,177 @@ EXPORT_SYMBOL_GPL(gru_copy_gpa);
/* ------------------- KERNEL QUICKTESTS RUN AT STARTUP ----------------*/
/* Temp - will delete after we gain confidence in the GRU */
-static __cacheline_aligned unsigned long word0;
-static __cacheline_aligned unsigned long word1;
-static int quicktest(struct gru_state *gru)
+static int quicktest0(unsigned long arg)
{
+ unsigned long word0;
+ unsigned long word1;
void *cb;
- void *ds;
+ void *dsr;
unsigned long *p;
+ int ret = -EIO;
- cb = get_gseg_base_address_cb(gru->gs_gru_base_vaddr, KERNEL_CTXNUM, 0);
- ds = get_gseg_base_address_ds(gru->gs_gru_base_vaddr, KERNEL_CTXNUM, 0);
- p = ds;
+ if (gru_get_cpu_resources(GRU_CACHE_LINE_BYTES, &cb, &dsr))
+ return MQE_BUG_NO_RESOURCES;
+ p = dsr;
word0 = MAGIC;
+ word1 = 0;
- gru_vload(cb, uv_gpa(&word0), 0, XTYPE_DW, 1, 1, IMA);
- if (gru_wait(cb) != CBS_IDLE)
- BUG();
+ gru_vload(cb, uv_gpa(&word0), gru_get_tri(dsr), XTYPE_DW, 1, 1, IMA);
+ if (gru_wait(cb) != CBS_IDLE) {
+ printk(KERN_DEBUG "GRU quicktest0: CBR failure 1\n");
+ goto done;
+ }
- if (*(unsigned long *)ds != MAGIC)
- BUG();
- gru_vstore(cb, uv_gpa(&word1), 0, XTYPE_DW, 1, 1, IMA);
- if (gru_wait(cb) != CBS_IDLE)
- BUG();
+ if (*p != MAGIC) {
+ printk(KERN_DEBUG "GRU: quicktest0 bad magic 0x%lx\n", *p);
+ goto done;
+ }
+ gru_vstore(cb, uv_gpa(&word1), gru_get_tri(dsr), XTYPE_DW, 1, 1, IMA);
+ if (gru_wait(cb) != CBS_IDLE) {
+ printk(KERN_DEBUG "GRU quicktest0: CBR failure 2\n");
+ goto done;
+ }
- if (word0 != word1 || word0 != MAGIC) {
- printk
- ("GRU quicktest err: gid %d, found 0x%lx, expected 0x%lx\n",
- gru->gs_gid, word1, MAGIC);
- BUG(); /* ZZZ should not be fatal */
+ if (word0 != word1 || word1 != MAGIC) {
+ printk(KERN_DEBUG
+ "GRU quicktest0 err: found 0x%lx, expected 0x%lx\n",
+ word1, MAGIC);
+ goto done;
}
+ ret = 0;
- return 0;
+done:
+ gru_free_cpu_resources(cb, dsr);
+ return ret;
}
+#define ALIGNUP(p, q) ((void *)(((unsigned long)(p) + (q) - 1) & ~(q - 1)))
-int gru_kservices_init(struct gru_state *gru)
+static int quicktest1(unsigned long arg)
{
- struct gru_blade_state *bs;
- struct gru_context_configuration_handle *cch;
- unsigned long cbr_map, dsr_map;
- int err, num, cpus_possible;
-
- /*
- * Currently, resources are reserved ONLY on the second chiplet
- * on each blade. This leaves ALL resources on chiplet 0 available
- * for user code.
- */
- bs = gru->gs_blade;
- if (gru != &bs->bs_grus[1])
- return 0;
-
- cpus_possible = uv_blade_nr_possible_cpus(gru->gs_blade_id);
-
- num = GRU_NUM_KERNEL_CBR * cpus_possible;
- cbr_map = gru_reserve_cb_resources(gru, GRU_CB_COUNT_TO_AU(num), NULL);
- gru->gs_reserved_cbrs += num;
-
- num = GRU_NUM_KERNEL_DSR_BYTES * cpus_possible;
- dsr_map = gru_reserve_ds_resources(gru, GRU_DS_BYTES_TO_AU(num), NULL);
- gru->gs_reserved_dsr_bytes += num;
-
- gru->gs_active_contexts++;
- __set_bit(KERNEL_CTXNUM, &gru->gs_context_map);
- cch = get_cch(gru->gs_gru_base_vaddr, KERNEL_CTXNUM);
-
- bs->kernel_cb = get_gseg_base_address_cb(gru->gs_gru_base_vaddr,
- KERNEL_CTXNUM, 0);
- bs->kernel_dsr = get_gseg_base_address_ds(gru->gs_gru_base_vaddr,
- KERNEL_CTXNUM, 0);
-
- lock_cch_handle(cch);
- cch->tfm_fault_bit_enable = 0;
- cch->tlb_int_enable = 0;
- cch->tfm_done_bit_enable = 0;
- cch->unmap_enable = 1;
- err = cch_allocate(cch, 0, 0, cbr_map, dsr_map);
- if (err) {
- gru_dbg(grudev,
- "Unable to allocate kernel CCH: gid %d, err %d\n",
- gru->gs_gid, err);
- BUG();
+ struct gru_message_queue_desc mqd;
+ void *p, *mq;
+ unsigned long *dw;
+ int i, ret = -EIO;
+ char mes[GRU_CACHE_LINE_BYTES], *m;
+
+ /* Need 1K cacheline aligned that does not cross page boundary */
+ p = kmalloc(4096, 0);
+ mq = ALIGNUP(p, 1024);
+ memset(mes, 0xee, sizeof(mes));
+ dw = mq;
+
+ gru_create_message_queue(&mqd, mq, 8 * GRU_CACHE_LINE_BYTES, 0, 0, 0);
+ for (i = 0; i < 6; i++) {
+ mes[8] = i;
+ do {
+ ret = gru_send_message_gpa(&mqd, mes, sizeof(mes));
+ } while (ret == MQE_CONGESTION);
+ if (ret)
+ break;
}
- if (cch_start(cch)) {
- gru_dbg(grudev, "Unable to start kernel CCH: gid %d, err %d\n",
- gru->gs_gid, err);
- BUG();
+ if (ret != MQE_QUEUE_FULL || i != 4)
+ goto done;
+
+ for (i = 0; i < 6; i++) {
+ m = gru_get_next_message(&mqd);
+ if (!m || m[8] != i)
+ break;
+ gru_free_message(&mqd, m);
}
- unlock_cch_handle(cch);
+ ret = (i == 4) ? 0 : -EIO;
- if (gru_options & GRU_QUICKLOOK)
- quicktest(gru);
- return 0;
+done:
+ kfree(p);
+ return ret;
}
-void gru_kservices_exit(struct gru_state *gru)
+static int quicktest2(unsigned long arg)
{
- struct gru_context_configuration_handle *cch;
- struct gru_blade_state *bs;
+ static DECLARE_COMPLETION(cmp);
+ unsigned long han;
+ int blade_id = 0;
+ int numcb = 4;
+ int ret = 0;
+ unsigned long *buf;
+ void *cb0, *cb;
+ int i, k, istatus, bytes;
+
+ bytes = numcb * 4 * 8;
+ buf = kmalloc(bytes, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = -EBUSY;
+ han = gru_reserve_async_resources(blade_id, numcb, 0, &cmp);
+ if (!han)
+ goto done;
+
+ gru_lock_async_resource(han, &cb0, NULL);
+ memset(buf, 0xee, bytes);
+ for (i = 0; i < numcb; i++)
+ gru_vset(cb0 + i * GRU_HANDLE_STRIDE, uv_gpa(&buf[i * 4]), 0,
+ XTYPE_DW, 4, 1, IMA_INTERRUPT);
+
+ ret = 0;
+ for (k = 0; k < numcb; k++) {
+ gru_wait_async_cbr(han);
+ for (i = 0; i < numcb; i++) {
+ cb = cb0 + i * GRU_HANDLE_STRIDE;
+ istatus = gru_check_status(cb);
+ if (istatus == CBS_ACTIVE)
+ continue;
+ if (istatus == CBS_EXCEPTION)
+ ret = -EFAULT;
+ else if (buf[i] || buf[i + 1] || buf[i + 2] ||
+ buf[i + 3])
+ ret = -EIO;
+ }
+ }
+ BUG_ON(cmp.done);
- bs = gru->gs_blade;
- if (gru != &bs->bs_grus[1])
- return;
+ gru_unlock_async_resource(han);
+ gru_release_async_resources(han);
+done:
+ kfree(buf);
+ return ret;
+}
- cch = get_cch(gru->gs_gru_base_vaddr, KERNEL_CTXNUM);
- lock_cch_handle(cch);
- if (cch_interrupt_sync(cch))
- BUG();
- if (cch_deallocate(cch))
+/*
+ * Debugging only. User hook for various kernel tests
+ * of driver & gru.
+ */
+int gru_ktest(unsigned long arg)
+{
+ int ret = -EINVAL;
+
+ switch (arg & 0xff) {
+ case 0:
+ ret = quicktest0(arg);
+ break;
+ case 1:
+ ret = quicktest1(arg);
+ break;
+ case 2:
+ ret = quicktest2(arg);
+ break;
+ case 99:
+ ret = gru_free_kernel_contexts();
+ break;
+ }
+ return ret;
+
+}
+
+int gru_kservices_init(void)
+{
+ return 0;
+}
+
+void gru_kservices_exit(void)
+{
+ if (gru_free_kernel_contexts())
BUG();
- unlock_cch_handle(cch);
}
diff --git a/drivers/misc/sgi-gru/grukservices.h b/drivers/misc/sgi-gru/grukservices.h
index 747ed315d56f..d60d34bca44d 100644
--- a/drivers/misc/sgi-gru/grukservices.h
+++ b/drivers/misc/sgi-gru/grukservices.h
@@ -146,4 +146,55 @@ extern void *gru_get_next_message(struct gru_message_queue_desc *mqd);
extern int gru_copy_gpa(unsigned long dest_gpa, unsigned long src_gpa,
unsigned int bytes);
+/*
+ * Reserve GRU resources to be used asynchronously.
+ *
+ * input:
+ * blade_id - blade on which resources should be reserved
+ * cbrs - number of CBRs
+ * dsr_bytes - number of DSR bytes needed
+ * cmp - completion structure for waiting for
+ * async completions
+ * output:
+ * handle to identify resource
+ * (0 = no resources)
+ */
+extern unsigned long gru_reserve_async_resources(int blade_id, int cbrs, int dsr_bytes,
+ struct completion *cmp);
+
+/*
+ * Release async resources previously reserved.
+ *
+ * input:
+ * han - handle to identify resources
+ */
+extern void gru_release_async_resources(unsigned long han);
+
+/*
+ * Wait for async GRU instructions to complete.
+ *
+ * input:
+ * han - handle to identify resources
+ */
+extern void gru_wait_async_cbr(unsigned long han);
+
+/*
+ * Lock previous reserved async GRU resources
+ *
+ * input:
+ * han - handle to identify resources
+ * output:
+ * cb - pointer to first CBR
+ * dsr - pointer to first DSR
+ */
+extern void gru_lock_async_resource(unsigned long han, void **cb, void **dsr);
+
+/*
+ * Unlock previous reserved async GRU resources
+ *
+ * input:
+ * han - handle to identify resources
+ */
+extern void gru_unlock_async_resource(unsigned long han);
+
#endif /* __GRU_KSERVICES_H_ */
diff --git a/drivers/misc/sgi-gru/grulib.h b/drivers/misc/sgi-gru/grulib.h
index e56e196a6998..889bc442a3e8 100644
--- a/drivers/misc/sgi-gru/grulib.h
+++ b/drivers/misc/sgi-gru/grulib.h
@@ -32,8 +32,8 @@
/* Set Number of Request Blocks */
#define GRU_CREATE_CONTEXT _IOWR(GRU_IOCTL_NUM, 1, void *)
-/* Register task as using the slice */
-#define GRU_SET_TASK_SLICE _IOWR(GRU_IOCTL_NUM, 5, void *)
+/* Set Context Options */
+#define GRU_SET_CONTEXT_OPTION _IOWR(GRU_IOCTL_NUM, 4, void *)
/* Fetch exception detail */
#define GRU_USER_GET_EXCEPTION_DETAIL _IOWR(GRU_IOCTL_NUM, 6, void *)
@@ -44,8 +44,11 @@
/* For user unload context */
#define GRU_USER_UNLOAD_CONTEXT _IOWR(GRU_IOCTL_NUM, 9, void *)
-/* For fetching GRU chiplet status */
-#define GRU_GET_CHIPLET_STATUS _IOWR(GRU_IOCTL_NUM, 10, void *)
+/* For dumpping GRU chiplet state */
+#define GRU_DUMP_CHIPLET_STATE _IOWR(GRU_IOCTL_NUM, 11, void *)
+
+/* For getting gseg statistics */
+#define GRU_GET_GSEG_STATISTICS _IOWR(GRU_IOCTL_NUM, 12, void *)
/* For user TLB flushing (primarily for tests) */
#define GRU_USER_FLUSH_TLB _IOWR(GRU_IOCTL_NUM, 50, void *)
@@ -53,8 +56,26 @@
/* Get some config options (primarily for tests & emulator) */
#define GRU_GET_CONFIG_INFO _IOWR(GRU_IOCTL_NUM, 51, void *)
+/* Various kernel self-tests */
+#define GRU_KTEST _IOWR(GRU_IOCTL_NUM, 52, void *)
+
#define CONTEXT_WINDOW_BYTES(th) (GRU_GSEG_PAGESIZE * (th))
#define THREAD_POINTER(p, th) (p + GRU_GSEG_PAGESIZE * (th))
+#define GSEG_START(cb) ((void *)((unsigned long)(cb) & ~(GRU_GSEG_PAGESIZE - 1)))
+
+/*
+ * Statictics kept on a per-GTS basis.
+ */
+struct gts_statistics {
+ unsigned long fmm_tlbdropin;
+ unsigned long upm_tlbdropin;
+ unsigned long context_stolen;
+};
+
+struct gru_get_gseg_statistics_req {
+ unsigned long gseg;
+ struct gts_statistics stats;
+};
/*
* Structure used to pass TLB flush parameters to the driver
@@ -75,6 +96,16 @@ struct gru_unload_context_req {
};
/*
+ * Structure used to set context options
+ */
+enum {sco_gseg_owner, sco_cch_req_slice};
+struct gru_set_context_option_req {
+ unsigned long gseg;
+ int op;
+ unsigned long val1;
+};
+
+/*
* Structure used to pass TLB flush parameters to the driver
*/
struct gru_flush_tlb_req {
@@ -84,6 +115,36 @@ struct gru_flush_tlb_req {
};
/*
+ * Structure used to pass TLB flush parameters to the driver
+ */
+enum {dcs_pid, dcs_gid};
+struct gru_dump_chiplet_state_req {
+ unsigned int op;
+ unsigned int gid;
+ int ctxnum;
+ char data_opt;
+ char lock_cch;
+ pid_t pid;
+ void *buf;
+ size_t buflen;
+ /* ---- output --- */
+ unsigned int num_contexts;
+};
+
+#define GRU_DUMP_MAGIC 0x3474ab6c
+struct gru_dump_context_header {
+ unsigned int magic;
+ unsigned int gid;
+ unsigned char ctxnum;
+ unsigned char cbrcnt;
+ unsigned char dsrcnt;
+ pid_t pid;
+ unsigned long vaddr;
+ int cch_locked;
+ unsigned long data[0];
+};
+
+/*
* GRU configuration info (temp - for testing)
*/
struct gru_config_info {
diff --git a/drivers/misc/sgi-gru/grumain.c b/drivers/misc/sgi-gru/grumain.c
index ec3f7a17d221..3bc643dad606 100644
--- a/drivers/misc/sgi-gru/grumain.c
+++ b/drivers/misc/sgi-gru/grumain.c
@@ -3,11 +3,21 @@
*
* DRIVER TABLE MANAGER + GRU CONTEXT LOAD/UNLOAD
*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
+ * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
*
- * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
@@ -96,7 +106,7 @@ static int gru_reset_asid_limit(struct gru_state *gru, int asid)
gid = gru->gs_gid;
again:
for (i = 0; i < GRU_NUM_CCH; i++) {
- if (!gru->gs_gts[i])
+ if (!gru->gs_gts[i] || is_kernel_context(gru->gs_gts[i]))
continue;
inuse_asid = gru->gs_gts[i]->ts_gms->ms_asids[gid].mt_asid;
gru_dbg(grudev, "gid %d, gts %p, gms %p, inuse 0x%x, cxt %d\n",
@@ -150,7 +160,7 @@ static unsigned long reserve_resources(unsigned long *p, int n, int mmax,
unsigned long bits = 0;
int i;
- do {
+ while (n--) {
i = find_first_bit(p, mmax);
if (i == mmax)
BUG();
@@ -158,7 +168,7 @@ static unsigned long reserve_resources(unsigned long *p, int n, int mmax,
__set_bit(i, &bits);
if (idx)
*idx++ = i;
- } while (--n);
+ }
return bits;
}
@@ -299,38 +309,39 @@ static struct gru_thread_state *gru_find_current_gts_nolock(struct gru_vma_data
/*
* Allocate a thread state structure.
*/
-static struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
- struct gru_vma_data *vdata,
- int tsid)
+struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
+ int cbr_au_count, int dsr_au_count, int options, int tsid)
{
struct gru_thread_state *gts;
int bytes;
- bytes = DSR_BYTES(vdata->vd_dsr_au_count) +
- CBR_BYTES(vdata->vd_cbr_au_count);
+ bytes = DSR_BYTES(dsr_au_count) + CBR_BYTES(cbr_au_count);
bytes += sizeof(struct gru_thread_state);
- gts = kzalloc(bytes, GFP_KERNEL);
+ gts = kmalloc(bytes, GFP_KERNEL);
if (!gts)
return NULL;
STAT(gts_alloc);
+ memset(gts, 0, sizeof(struct gru_thread_state)); /* zero out header */
atomic_set(&gts->ts_refcnt, 1);
mutex_init(&gts->ts_ctxlock);
- gts->ts_cbr_au_count = vdata->vd_cbr_au_count;
- gts->ts_dsr_au_count = vdata->vd_dsr_au_count;
- gts->ts_user_options = vdata->vd_user_options;
+ gts->ts_cbr_au_count = cbr_au_count;
+ gts->ts_dsr_au_count = dsr_au_count;
+ gts->ts_user_options = options;
gts->ts_tsid = tsid;
- gts->ts_user_options = vdata->vd_user_options;
gts->ts_ctxnum = NULLCTX;
- gts->ts_mm = current->mm;
- gts->ts_vma = vma;
gts->ts_tlb_int_select = -1;
- gts->ts_gms = gru_register_mmu_notifier();
+ gts->ts_cch_req_slice = -1;
gts->ts_sizeavail = GRU_SIZEAVAIL(PAGE_SHIFT);
- if (!gts->ts_gms)
- goto err;
+ if (vma) {
+ gts->ts_mm = current->mm;
+ gts->ts_vma = vma;
+ gts->ts_gms = gru_register_mmu_notifier();
+ if (!gts->ts_gms)
+ goto err;
+ }
- gru_dbg(grudev, "alloc vdata %p, new gts %p\n", vdata, gts);
+ gru_dbg(grudev, "alloc gts %p\n", gts);
return gts;
err:
@@ -381,7 +392,8 @@ struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct *vma,
struct gru_vma_data *vdata = vma->vm_private_data;
struct gru_thread_state *gts, *ngts;
- gts = gru_alloc_gts(vma, vdata, tsid);
+ gts = gru_alloc_gts(vma, vdata->vd_cbr_au_count, vdata->vd_dsr_au_count,
+ vdata->vd_user_options, tsid);
if (!gts)
return NULL;
@@ -458,7 +470,8 @@ static void gru_prefetch_context(void *gseg, void *cb, void *cbe,
}
static void gru_load_context_data(void *save, void *grubase, int ctxnum,
- unsigned long cbrmap, unsigned long dsrmap)
+ unsigned long cbrmap, unsigned long dsrmap,
+ int data_valid)
{
void *gseg, *cb, *cbe;
unsigned long length;
@@ -471,12 +484,22 @@ static void gru_load_context_data(void *save, void *grubase, int ctxnum,
gru_prefetch_context(gseg, cb, cbe, cbrmap, length);
for_each_cbr_in_allocation_map(i, &cbrmap, scr) {
- save += gru_copy_handle(cb, save);
- save += gru_copy_handle(cbe + i * GRU_HANDLE_STRIDE, save);
+ if (data_valid) {
+ save += gru_copy_handle(cb, save);
+ save += gru_copy_handle(cbe + i * GRU_HANDLE_STRIDE,
+ save);
+ } else {
+ memset(cb, 0, GRU_CACHE_LINE_BYTES);
+ memset(cbe + i * GRU_HANDLE_STRIDE, 0,
+ GRU_CACHE_LINE_BYTES);
+ }
cb += GRU_HANDLE_STRIDE;
}
- memcpy(gseg + GRU_DS_BASE, save, length);
+ if (data_valid)
+ memcpy(gseg + GRU_DS_BASE, save, length);
+ else
+ memset(gseg + GRU_DS_BASE, 0, length);
}
static void gru_unload_context_data(void *save, void *grubase, int ctxnum,
@@ -506,7 +529,8 @@ void gru_unload_context(struct gru_thread_state *gts, int savestate)
struct gru_context_configuration_handle *cch;
int ctxnum = gts->ts_ctxnum;
- zap_vma_ptes(gts->ts_vma, UGRUADDR(gts), GRU_GSEG_PAGESIZE);
+ if (!is_kernel_context(gts))
+ zap_vma_ptes(gts->ts_vma, UGRUADDR(gts), GRU_GSEG_PAGESIZE);
cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
gru_dbg(grudev, "gts %p\n", gts);
@@ -514,11 +538,14 @@ void gru_unload_context(struct gru_thread_state *gts, int savestate)
if (cch_interrupt_sync(cch))
BUG();
- gru_unload_mm_tracker(gru, gts);
- if (savestate)
+ if (!is_kernel_context(gts))
+ gru_unload_mm_tracker(gru, gts);
+ if (savestate) {
gru_unload_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr,
ctxnum, gts->ts_cbr_map,
gts->ts_dsr_map);
+ gts->ts_data_valid = 1;
+ }
if (cch_deallocate(cch))
BUG();
@@ -526,24 +553,22 @@ void gru_unload_context(struct gru_thread_state *gts, int savestate)
unlock_cch_handle(cch);
gru_free_gru_context(gts);
- STAT(unload_context);
}
/*
* Load a GRU context by copying it from the thread data structure in memory
* to the GRU.
*/
-static void gru_load_context(struct gru_thread_state *gts)
+void gru_load_context(struct gru_thread_state *gts)
{
struct gru_state *gru = gts->ts_gru;
struct gru_context_configuration_handle *cch;
- int err, asid, ctxnum = gts->ts_ctxnum;
+ int i, err, asid, ctxnum = gts->ts_ctxnum;
gru_dbg(grudev, "gts %p\n", gts);
cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
lock_cch_handle(cch);
- asid = gru_load_mm_tracker(gru, gts);
cch->tfm_fault_bit_enable =
(gts->ts_user_options == GRU_OPT_MISS_FMM_POLL
|| gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
@@ -552,9 +577,32 @@ static void gru_load_context(struct gru_thread_state *gts)
gts->ts_tlb_int_select = gru_cpu_fault_map_id();
cch->tlb_int_select = gts->ts_tlb_int_select;
}
+ if (gts->ts_cch_req_slice >= 0) {
+ cch->req_slice_set_enable = 1;
+ cch->req_slice = gts->ts_cch_req_slice;
+ } else {
+ cch->req_slice_set_enable =0;
+ }
cch->tfm_done_bit_enable = 0;
- err = cch_allocate(cch, asid, gts->ts_sizeavail, gts->ts_cbr_map,
- gts->ts_dsr_map);
+ cch->dsr_allocation_map = gts->ts_dsr_map;
+ cch->cbr_allocation_map = gts->ts_cbr_map;
+
+ if (is_kernel_context(gts)) {
+ cch->unmap_enable = 1;
+ cch->tfm_done_bit_enable = 1;
+ cch->cb_int_enable = 1;
+ } else {
+ cch->unmap_enable = 0;
+ cch->tfm_done_bit_enable = 0;
+ cch->cb_int_enable = 0;
+ asid = gru_load_mm_tracker(gru, gts);
+ for (i = 0; i < 8; i++) {
+ cch->asid[i] = asid + i;
+ cch->sizeavail[i] = gts->ts_sizeavail;
+ }
+ }
+
+ err = cch_allocate(cch);
if (err) {
gru_dbg(grudev,
"err %d: cch %p, gts %p, cbr 0x%lx, dsr 0x%lx\n",
@@ -563,13 +611,11 @@ static void gru_load_context(struct gru_thread_state *gts)
}
gru_load_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr, ctxnum,
- gts->ts_cbr_map, gts->ts_dsr_map);
+ gts->ts_cbr_map, gts->ts_dsr_map, gts->ts_data_valid);
if (cch_start(cch))
BUG();
unlock_cch_handle(cch);
-
- STAT(load_context);
}
/*
@@ -599,6 +645,9 @@ int gru_update_cch(struct gru_thread_state *gts, int force_unload)
cch->sizeavail[i] = gts->ts_sizeavail;
gts->ts_tlb_int_select = gru_cpu_fault_map_id();
cch->tlb_int_select = gru_cpu_fault_map_id();
+ cch->tfm_fault_bit_enable =
+ (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL
+ || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
} else {
for (i = 0; i < 8; i++)
cch->asid[i] = 0;
@@ -642,7 +691,28 @@ static int gru_retarget_intr(struct gru_thread_state *gts)
#define next_gru(b, g) (((g) < &(b)->bs_grus[GRU_CHIPLETS_PER_BLADE - 1]) ? \
((g)+1) : &(b)->bs_grus[0])
-static void gru_steal_context(struct gru_thread_state *gts)
+static int is_gts_stealable(struct gru_thread_state *gts,
+ struct gru_blade_state *bs)
+{
+ if (is_kernel_context(gts))
+ return down_write_trylock(&bs->bs_kgts_sema);
+ else
+ return mutex_trylock(&gts->ts_ctxlock);
+}
+
+static void gts_stolen(struct gru_thread_state *gts,
+ struct gru_blade_state *bs)
+{
+ if (is_kernel_context(gts)) {
+ up_write(&bs->bs_kgts_sema);
+ STAT(steal_kernel_context);
+ } else {
+ mutex_unlock(&gts->ts_ctxlock);
+ STAT(steal_user_context);
+ }
+}
+
+void gru_steal_context(struct gru_thread_state *gts, int blade_id)
{
struct gru_blade_state *blade;
struct gru_state *gru, *gru0;
@@ -652,8 +722,7 @@ static void gru_steal_context(struct gru_thread_state *gts)
cbr = gts->ts_cbr_au_count;
dsr = gts->ts_dsr_au_count;
- preempt_disable();
- blade = gru_base[uv_numa_blade_id()];
+ blade = gru_base[blade_id];
spin_lock(&blade->bs_lock);
ctxnum = next_ctxnum(blade->bs_lru_ctxnum);
@@ -676,7 +745,7 @@ static void gru_steal_context(struct gru_thread_state *gts)
* success are high. If trylock fails, try to steal a
* different GSEG.
*/
- if (ngts && mutex_trylock(&ngts->ts_ctxlock))
+ if (ngts && is_gts_stealable(ngts, blade))
break;
ngts = NULL;
flag = 1;
@@ -690,13 +759,12 @@ static void gru_steal_context(struct gru_thread_state *gts)
blade->bs_lru_gru = gru;
blade->bs_lru_ctxnum = ctxnum;
spin_unlock(&blade->bs_lock);
- preempt_enable();
if (ngts) {
- STAT(steal_context);
+ gts->ustats.context_stolen++;
ngts->ts_steal_jiffies = jiffies;
- gru_unload_context(ngts, 1);
- mutex_unlock(&ngts->ts_ctxlock);
+ gru_unload_context(ngts, is_kernel_context(ngts) ? 0 : 1);
+ gts_stolen(ngts, blade);
} else {
STAT(steal_context_failed);
}
@@ -710,17 +778,17 @@ static void gru_steal_context(struct gru_thread_state *gts)
/*
* Scan the GRUs on the local blade & assign a GRU context.
*/
-static struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts)
+struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts,
+ int blade)
{
struct gru_state *gru, *grux;
int i, max_active_contexts;
- preempt_disable();
again:
gru = NULL;
max_active_contexts = GRU_NUM_CCH;
- for_each_gru_on_blade(grux, uv_numa_blade_id(), i) {
+ for_each_gru_on_blade(grux, blade, i) {
if (check_gru_resources(grux, gts->ts_cbr_au_count,
gts->ts_dsr_au_count,
max_active_contexts)) {
@@ -760,7 +828,6 @@ again:
STAT(assign_context_failed);
}
- preempt_enable();
return gru;
}
@@ -775,6 +842,7 @@ int gru_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct gru_thread_state *gts;
unsigned long paddr, vaddr;
+ int blade_id;
vaddr = (unsigned long)vmf->virtual_address;
gru_dbg(grudev, "vma %p, vaddr 0x%lx (0x%lx)\n",
@@ -789,8 +857,10 @@ int gru_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
again:
mutex_lock(&gts->ts_ctxlock);
preempt_disable();
+ blade_id = uv_numa_blade_id();
+
if (gts->ts_gru) {
- if (gts->ts_gru->gs_blade_id != uv_numa_blade_id()) {
+ if (gts->ts_gru->gs_blade_id != blade_id) {
STAT(migrated_nopfn_unload);
gru_unload_context(gts, 1);
} else {
@@ -800,12 +870,15 @@ again:
}
if (!gts->ts_gru) {
- if (!gru_assign_gru_context(gts)) {
- mutex_unlock(&gts->ts_ctxlock);
+ STAT(load_user_context);
+ if (!gru_assign_gru_context(gts, blade_id)) {
preempt_enable();
+ mutex_unlock(&gts->ts_ctxlock);
+ set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(GRU_ASSIGN_DELAY); /* true hack ZZZ */
+ blade_id = uv_numa_blade_id();
if (gts->ts_steal_jiffies + GRU_STEAL_DELAY < jiffies)
- gru_steal_context(gts);
+ gru_steal_context(gts, blade_id);
goto again;
}
gru_load_context(gts);
@@ -815,8 +888,8 @@ again:
vma->vm_page_prot);
}
- mutex_unlock(&gts->ts_ctxlock);
preempt_enable();
+ mutex_unlock(&gts->ts_ctxlock);
return VM_FAULT_NOPAGE;
}
diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
index ee74821b171c..9cbf95bedce6 100644
--- a/drivers/misc/sgi-gru/gruprocfs.c
+++ b/drivers/misc/sgi-gru/gruprocfs.c
@@ -51,9 +51,12 @@ static int statistics_show(struct seq_file *s, void *p)
printstat(s, assign_context);
printstat(s, assign_context_failed);
printstat(s, free_context);
- printstat(s, load_context);
- printstat(s, unload_context);
- printstat(s, steal_context);
+ printstat(s, load_user_context);
+ printstat(s, load_kernel_context);
+ printstat(s, lock_kernel_context);
+ printstat(s, unlock_kernel_context);
+ printstat(s, steal_user_context);
+ printstat(s, steal_kernel_context);
printstat(s, steal_context_failed);
printstat(s, nopfn);
printstat(s, break_cow);
@@ -70,7 +73,7 @@ static int statistics_show(struct seq_file *s, void *p)
printstat(s, user_flush_tlb);
printstat(s, user_unload_context);
printstat(s, user_exception);
- printstat(s, set_task_slice);
+ printstat(s, set_context_option);
printstat(s, migrate_check);
printstat(s, migrated_retarget);
printstat(s, migrated_unload);
@@ -84,6 +87,9 @@ static int statistics_show(struct seq_file *s, void *p)
printstat(s, tlb_dropin_fail_range_active);
printstat(s, tlb_dropin_fail_idle);
printstat(s, tlb_dropin_fail_fmm);
+ printstat(s, tlb_dropin_fail_no_exception);
+ printstat(s, tlb_dropin_fail_no_exception_war);
+ printstat(s, tfh_stale_on_fault);
printstat(s, mmu_invalidate_range);
printstat(s, mmu_invalidate_page);
printstat(s, mmu_clear_flush_young);
@@ -158,8 +164,7 @@ static ssize_t options_write(struct file *file, const char __user *userbuf,
unsigned long val;
char buf[80];
- if (copy_from_user
- (buf, userbuf, count < sizeof(buf) ? count : sizeof(buf)))
+ if (strncpy_from_user(buf, userbuf, sizeof(buf) - 1) < 0)
return -EFAULT;
buf[count - 1] = '\0';
if (!strict_strtoul(buf, 10, &val))
diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
index bf1eeb7553ed..34ab3d453919 100644
--- a/drivers/misc/sgi-gru/grutables.h
+++ b/drivers/misc/sgi-gru/grutables.h
@@ -148,11 +148,13 @@
#include <linux/wait.h>
#include <linux/mmu_notifier.h>
#include "gru.h"
+#include "grulib.h"
#include "gruhandles.h"
extern struct gru_stats_s gru_stats;
extern struct gru_blade_state *gru_base[];
extern unsigned long gru_start_paddr, gru_end_paddr;
+extern void *gru_start_vaddr;
extern unsigned int gru_max_gids;
#define GRU_MAX_BLADES MAX_NUMNODES
@@ -174,9 +176,12 @@ struct gru_stats_s {
atomic_long_t assign_context;
atomic_long_t assign_context_failed;
atomic_long_t free_context;
- atomic_long_t load_context;
- atomic_long_t unload_context;
- atomic_long_t steal_context;
+ atomic_long_t load_user_context;
+ atomic_long_t load_kernel_context;
+ atomic_long_t lock_kernel_context;
+ atomic_long_t unlock_kernel_context;
+ atomic_long_t steal_user_context;
+ atomic_long_t steal_kernel_context;
atomic_long_t steal_context_failed;
atomic_long_t nopfn;
atomic_long_t break_cow;
@@ -193,7 +198,7 @@ struct gru_stats_s {
atomic_long_t user_flush_tlb;
atomic_long_t user_unload_context;
atomic_long_t user_exception;
- atomic_long_t set_task_slice;
+ atomic_long_t set_context_option;
atomic_long_t migrate_check;
atomic_long_t migrated_retarget;
atomic_long_t migrated_unload;
@@ -207,6 +212,9 @@ struct gru_stats_s {
atomic_long_t tlb_dropin_fail_range_active;
atomic_long_t tlb_dropin_fail_idle;
atomic_long_t tlb_dropin_fail_fmm;
+ atomic_long_t tlb_dropin_fail_no_exception;
+ atomic_long_t tlb_dropin_fail_no_exception_war;
+ atomic_long_t tfh_stale_on_fault;
atomic_long_t mmu_invalidate_range;
atomic_long_t mmu_invalidate_page;
atomic_long_t mmu_clear_flush_young;
@@ -253,7 +261,6 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
#define OPT_DPRINT 1
#define OPT_STATS 2
-#define GRU_QUICKLOOK 4
#define IRQ_GRU 110 /* Starting IRQ number for interrupts */
@@ -373,6 +380,7 @@ struct gru_thread_state {
required for contest */
unsigned char ts_cbr_au_count;/* Number of CBR resources
required for contest */
+ char ts_cch_req_slice;/* CCH packet slice */
char ts_blade; /* If >= 0, migrate context if
ref from diferent blade */
char ts_force_cch_reload;
@@ -380,6 +388,9 @@ struct gru_thread_state {
after migration */
char ts_cbr_idx[GRU_CBR_AU];/* CBR numbers of each
allocated CB */
+ int ts_data_valid; /* Indicates if ts_gdata has
+ valid data */
+ struct gts_statistics ustats; /* User statistics */
unsigned long ts_gdata[0]; /* save area for GRU data (CB,
DS, CBE) */
};
@@ -452,6 +463,14 @@ struct gru_blade_state {
reserved cb */
void *kernel_dsr; /* First kernel
reserved DSR */
+ struct rw_semaphore bs_kgts_sema; /* lock for kgts */
+ struct gru_thread_state *bs_kgts; /* GTS for kernel use */
+
+ /* ---- the following are used for managing kernel async GRU CBRs --- */
+ int bs_async_dsr_bytes; /* DSRs for async */
+ int bs_async_cbrs; /* CBRs AU for async */
+ struct completion *bs_async_wq;
+
/* ---- the following are protected by the bs_lock spinlock ---- */
spinlock_t bs_lock; /* lock used for
stealing contexts */
@@ -552,6 +571,12 @@ struct gru_blade_state {
/* Lock hierarchy checking enabled only in emulator */
+/* 0 = lock failed, 1 = locked */
+static inline int __trylock_handle(void *h)
+{
+ return !test_and_set_bit(1, h);
+}
+
static inline void __lock_handle(void *h)
{
while (test_and_set_bit(1, h))
@@ -563,6 +588,11 @@ static inline void __unlock_handle(void *h)
clear_bit(1, h);
}
+static inline int trylock_cch_handle(struct gru_context_configuration_handle *cch)
+{
+ return __trylock_handle(cch);
+}
+
static inline void lock_cch_handle(struct gru_context_configuration_handle *cch)
{
__lock_handle(cch);
@@ -584,6 +614,11 @@ static inline void unlock_tgh_handle(struct gru_tlb_global_handle *tgh)
__unlock_handle(tgh);
}
+static inline int is_kernel_context(struct gru_thread_state *gts)
+{
+ return !gts->ts_mm;
+}
+
/*-----------------------------------------------------------------------------
* Function prototypes & externs
*/
@@ -598,24 +633,32 @@ extern struct gru_thread_state *gru_find_thread_state(struct vm_area_struct
*vma, int tsid);
extern struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct
*vma, int tsid);
+extern struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts,
+ int blade);
+extern void gru_load_context(struct gru_thread_state *gts);
+extern void gru_steal_context(struct gru_thread_state *gts, int blade_id);
extern void gru_unload_context(struct gru_thread_state *gts, int savestate);
extern int gru_update_cch(struct gru_thread_state *gts, int force_unload);
extern void gts_drop(struct gru_thread_state *gts);
extern void gru_tgh_flush_init(struct gru_state *gru);
-extern int gru_kservices_init(struct gru_state *gru);
-extern void gru_kservices_exit(struct gru_state *gru);
+extern int gru_kservices_init(void);
+extern void gru_kservices_exit(void);
+extern int gru_dump_chiplet_request(unsigned long arg);
+extern long gru_get_gseg_statistics(unsigned long arg);
extern irqreturn_t gru_intr(int irq, void *dev_id);
extern int gru_handle_user_call_os(unsigned long address);
extern int gru_user_flush_tlb(unsigned long arg);
extern int gru_user_unload_context(unsigned long arg);
extern int gru_get_exception_detail(unsigned long arg);
-extern int gru_set_task_slice(long address);
+extern int gru_set_context_option(unsigned long address);
extern int gru_cpu_fault_map_id(void);
extern struct vm_area_struct *gru_find_vma(unsigned long vaddr);
extern void gru_flush_all_tlb(struct gru_state *gru);
extern int gru_proc_init(void);
extern void gru_proc_exit(void);
+extern struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
+ int cbr_au_count, int dsr_au_count, int options, int tsid);
extern unsigned long gru_reserve_cb_resources(struct gru_state *gru,
int cbr_au_count, char *cbmap);
extern unsigned long gru_reserve_ds_resources(struct gru_state *gru,
@@ -624,6 +667,7 @@ extern int gru_fault(struct vm_area_struct *, struct vm_fault *vmf);
extern struct gru_mm_struct *gru_register_mmu_notifier(void);
extern void gru_drop_mmu_notifier(struct gru_mm_struct *gms);
+extern int gru_ktest(unsigned long arg);
extern void gru_flush_tlb_range(struct gru_mm_struct *gms, unsigned long start,
unsigned long len);
diff --git a/drivers/mtd/ubi/Kconfig b/drivers/mtd/ubi/Kconfig
index 3f063108e95f..b1cd7a1a2191 100644
--- a/drivers/mtd/ubi/Kconfig
+++ b/drivers/mtd/ubi/Kconfig
@@ -49,15 +49,16 @@ config MTD_UBI_BEB_RESERVE
reserved. Leave the default value if unsure.
config MTD_UBI_GLUEBI
- bool "Emulate MTD devices"
+ tristate "MTD devices emulation driver (gluebi)"
default n
depends on MTD_UBI
help
- This option enables MTD devices emulation on top of UBI volumes: for
- each UBI volumes an MTD device is created, and all I/O to this MTD
- device is redirected to the UBI volume. This is handy to make
- MTD-oriented software (like JFFS2) work on top of UBI. Do not enable
- this if no legacy software will be used.
+ This option enables gluebi - an additional driver which emulates MTD
+ devices on top of UBI volumes: for each UBI volumes an MTD device is
+ created, and all I/O to this MTD device is redirected to the UBI
+ volume. This is handy to make MTD-oriented software (like JFFS2)
+ work on top of UBI. Do not enable this unless you use legacy
+ software.
source "drivers/mtd/ubi/Kconfig.debug"
endmenu
diff --git a/drivers/mtd/ubi/Makefile b/drivers/mtd/ubi/Makefile
index dd834e04151b..c9302a5452b0 100644
--- a/drivers/mtd/ubi/Makefile
+++ b/drivers/mtd/ubi/Makefile
@@ -4,4 +4,4 @@ ubi-y += vtbl.o vmt.o upd.o build.o cdev.o kapi.o eba.o io.o wl.o scan.o
ubi-y += misc.o
ubi-$(CONFIG_MTD_UBI_DEBUG) += debug.o
-ubi-$(CONFIG_MTD_UBI_GLUEBI) += gluebi.o
+obj-$(CONFIG_MTD_UBI_GLUEBI) += gluebi.o
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 4048db83aef6..286ed594e5a0 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -41,6 +41,7 @@
#include <linux/miscdevice.h>
#include <linux/log2.h>
#include <linux/kthread.h>
+#include <linux/reboot.h>
#include "ubi.h"
/* Maximum length of the 'mtd=' parameter */
@@ -122,6 +123,94 @@ static struct device_attribute dev_mtd_num =
__ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL);
/**
+ * ubi_volume_notify - send a volume change notification.
+ * @ubi: UBI device description object
+ * @vol: volume description object of the changed volume
+ * @ntype: notification type to send (%UBI_VOLUME_ADDED, etc)
+ *
+ * This is a helper function which notifies all subscribers about a volume
+ * change event (creation, removal, re-sizing, re-naming, updating). Returns
+ * zero in case of success and a negative error code in case of failure.
+ */
+int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype)
+{
+ struct ubi_notification nt;
+
+ ubi_do_get_device_info(ubi, &nt.di);
+ ubi_do_get_volume_info(ubi, vol, &nt.vi);
+ return blocking_notifier_call_chain(&ubi_notifiers, ntype, &nt);
+}
+
+/**
+ * ubi_notify_all - send a notification to all volumes.
+ * @ubi: UBI device description object
+ * @ntype: notification type to send (%UBI_VOLUME_ADDED, etc)
+ * @nb: the notifier to call
+ *
+ * This function walks all volumes of UBI device @ubi and sends the @ntype
+ * notification for each volume. If @nb is %NULL, then all registered notifiers
+ * are called, otherwise only the @nb notifier is called. Returns the number of
+ * sent notifications.
+ */
+int ubi_notify_all(struct ubi_device *ubi, int ntype, struct notifier_block *nb)
+{
+ struct ubi_notification nt;
+ int i, count = 0;
+
+ ubi_do_get_device_info(ubi, &nt.di);
+
+ mutex_lock(&ubi->device_mutex);
+ for (i = 0; i < ubi->vtbl_slots; i++) {
+ /*
+ * Since the @ubi->device is locked, and we are not going to
+ * change @ubi->volumes, we do not have to lock
+ * @ubi->volumes_lock.
+ */
+ if (!ubi->volumes[i])
+ continue;
+
+ ubi_do_get_volume_info(ubi, ubi->volumes[i], &nt.vi);
+ if (nb)
+ nb->notifier_call(nb, ntype, &nt);
+ else
+ blocking_notifier_call_chain(&ubi_notifiers, ntype,
+ &nt);
+ count += 1;
+ }
+ mutex_unlock(&ubi->device_mutex);
+
+ return count;
+}
+
+/**
+ * ubi_enumerate_volumes - send "add" notification for all existing volumes.
+ * @nb: the notifier to call
+ *
+ * This function walks all UBI devices and volumes and sends the
+ * %UBI_VOLUME_ADDED notification for each volume. If @nb is %NULL, then all
+ * registered notifiers are called, otherwise only the @nb notifier is called.
+ * Returns the number of sent notifications.
+ */
+int ubi_enumerate_volumes(struct notifier_block *nb)
+{
+ int i, count = 0;
+
+ /*
+ * Since the @ubi_devices_mutex is locked, and we are not going to
+ * change @ubi_devices, we do not have to lock @ubi_devices_lock.
+ */
+ for (i = 0; i < UBI_MAX_DEVICES; i++) {
+ struct ubi_device *ubi = ubi_devices[i];
+
+ if (!ubi)
+ continue;
+ count += ubi_notify_all(ubi, UBI_VOLUME_ADDED, nb);
+ }
+
+ return count;
+}
+
+/**
* ubi_get_device - get UBI device.
* @ubi_num: UBI device number
*
@@ -380,7 +469,7 @@ static void free_user_volumes(struct ubi_device *ubi)
* @ubi: UBI device description object
*
* This function returns zero in case of success and a negative error code in
- * case of failure. Note, this function destroys all volumes if it failes.
+ * case of failure. Note, this function destroys all volumes if it fails.
*/
static int uif_init(struct ubi_device *ubi)
{
@@ -633,6 +722,15 @@ static int io_init(struct ubi_device *ubi)
}
/*
+ * Set maximum amount of physical erroneous eraseblocks to be 10%.
+ * Erroneous PEB are those which have read errors.
+ */
+ ubi->max_erroneous = ubi->peb_count / 10;
+ if (ubi->max_erroneous < 16)
+ ubi->max_erroneous = 16;
+ dbg_msg("max_erroneous %d", ubi->max_erroneous);
+
+ /*
* It may happen that EC and VID headers are situated in one minimal
* I/O unit. In this case we can only accept this UBI image in
* read-only mode.
@@ -726,6 +824,34 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
}
/**
+ * ubi_reboot_notifier - halt UBI transactions immediately prior to a reboot.
+ * @n: reboot notifier object
+ * @state: SYS_RESTART, SYS_HALT, or SYS_POWER_OFF
+ * @cmd: pointer to command string for RESTART2
+ *
+ * This function stops the UBI background thread so that the flash device
+ * remains quiescent when Linux restarts the system. Any queued work will be
+ * discarded, but this function will block until do_work() finishes if an
+ * operation is already in progress.
+ *
+ * This function solves a real-life problem observed on NOR flashes when an
+ * PEB erase operation starts, then the system is rebooted before the erase is
+ * finishes, and the boot loader gets confused and dies. So we prefer to finish
+ * the ongoing operation before rebooting.
+ */
+static int ubi_reboot_notifier(struct notifier_block *n, unsigned long state,
+ void *cmd)
+{
+ struct ubi_device *ubi;
+
+ ubi = container_of(n, struct ubi_device, reboot_notifier);
+ if (ubi->bgt_thread)
+ kthread_stop(ubi->bgt_thread);
+ ubi_sync(ubi->ubi_num);
+ return NOTIFY_DONE;
+}
+
+/**
* ubi_attach_mtd_dev - attach an MTD device.
* @mtd: MTD device description object
* @ubi_num: number to assign to the new UBI device
@@ -806,8 +932,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
mutex_init(&ubi->buf_mutex);
mutex_init(&ubi->ckvol_mutex);
- mutex_init(&ubi->mult_mutex);
- mutex_init(&ubi->volumes_mutex);
+ mutex_init(&ubi->device_mutex);
spin_lock_init(&ubi->volumes_lock);
ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num);
@@ -825,7 +950,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
if (!ubi->peb_buf2)
goto out_free;
-#ifdef CONFIG_MTD_UBI_DEBUG
+#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
mutex_init(&ubi->dbg_buf_mutex);
ubi->dbg_peb_buf = vmalloc(ubi->peb_size);
if (!ubi->dbg_peb_buf)
@@ -872,11 +997,23 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
ubi->beb_rsvd_pebs);
ubi_msg("max/mean erase counter: %d/%d", ubi->max_ec, ubi->mean_ec);
+ /*
+ * The below lock makes sure we do not race with 'ubi_thread()' which
+ * checks @ubi->thread_enabled. Otherwise we may fail to wake it up.
+ */
+ spin_lock(&ubi->wl_lock);
if (!DBG_DISABLE_BGT)
ubi->thread_enabled = 1;
wake_up_process(ubi->bgt_thread);
+ spin_unlock(&ubi->wl_lock);
+
+ /* Flash device priority is 0 - UBI needs to shut down first */
+ ubi->reboot_notifier.priority = 1;
+ ubi->reboot_notifier.notifier_call = ubi_reboot_notifier;
+ register_reboot_notifier(&ubi->reboot_notifier);
ubi_devices[ubi_num] = ubi;
+ ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL);
return ubi_num;
out_uif:
@@ -892,7 +1029,7 @@ out_detach:
out_free:
vfree(ubi->peb_buf1);
vfree(ubi->peb_buf2);
-#ifdef CONFIG_MTD_UBI_DEBUG
+#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
vfree(ubi->dbg_peb_buf);
#endif
kfree(ubi);
@@ -919,13 +1056,13 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
return -EINVAL;
- spin_lock(&ubi_devices_lock);
- ubi = ubi_devices[ubi_num];
- if (!ubi) {
- spin_unlock(&ubi_devices_lock);
+ ubi = ubi_get_device(ubi_num);
+ if (!ubi)
return -EINVAL;
- }
+ spin_lock(&ubi_devices_lock);
+ put_device(&ubi->dev);
+ ubi->ref_count -= 1;
if (ubi->ref_count) {
if (!anyway) {
spin_unlock(&ubi_devices_lock);
@@ -939,12 +1076,14 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
spin_unlock(&ubi_devices_lock);
ubi_assert(ubi_num == ubi->ubi_num);
+ ubi_notify_all(ubi, UBI_VOLUME_REMOVED, NULL);
dbg_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num);
/*
* Before freeing anything, we have to stop the background thread to
* prevent it from doing anything on this device while we are freeing.
*/
+ unregister_reboot_notifier(&ubi->reboot_notifier);
if (ubi->bgt_thread)
kthread_stop(ubi->bgt_thread);
@@ -961,7 +1100,7 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
put_mtd_device(ubi->mtd);
vfree(ubi->peb_buf1);
vfree(ubi->peb_buf2);
-#ifdef CONFIG_MTD_UBI_DEBUG
+#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
vfree(ubi->dbg_peb_buf);
#endif
ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num);
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index f8e0f68f2186..f237ddbb2713 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -113,7 +113,8 @@ static int vol_cdev_open(struct inode *inode, struct file *file)
else
mode = UBI_READONLY;
- dbg_gen("open volume %d, mode %d", vol_id, mode);
+ dbg_gen("open device %d, volume %d, mode %d",
+ ubi_num, vol_id, mode);
desc = ubi_open_volume(ubi_num, vol_id, mode);
if (IS_ERR(desc))
@@ -128,7 +129,8 @@ static int vol_cdev_release(struct inode *inode, struct file *file)
struct ubi_volume_desc *desc = file->private_data;
struct ubi_volume *vol = desc->vol;
- dbg_gen("release volume %d, mode %d", vol->vol_id, desc->mode);
+ dbg_gen("release device %d, volume %d, mode %d",
+ vol->ubi->ubi_num, vol->vol_id, desc->mode);
if (vol->updating) {
ubi_warn("update of volume %d not finished, volume is damaged",
@@ -393,7 +395,7 @@ static ssize_t vol_cdev_write(struct file *file, const char __user *buf,
vol->corrupted = 1;
}
vol->checked = 1;
- ubi_gluebi_updated(vol);
+ ubi_volume_notify(ubi, vol, UBI_VOLUME_UPDATED);
revoke_exclusive(desc, UBI_READWRITE);
}
@@ -558,7 +560,7 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
break;
}
- /* Set volume property command*/
+ /* Set volume property command */
case UBI_IOCSETPROP:
{
struct ubi_set_prop_req req;
@@ -571,9 +573,9 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
}
switch (req.property) {
case UBI_PROP_DIRECT_WRITE:
- mutex_lock(&ubi->volumes_mutex);
+ mutex_lock(&ubi->device_mutex);
desc->vol->direct_writes = !!req.value;
- mutex_unlock(&ubi->volumes_mutex);
+ mutex_unlock(&ubi->device_mutex);
break;
default:
err = -EINVAL;
@@ -810,9 +812,9 @@ static int rename_volumes(struct ubi_device *ubi,
re->desc->vol->vol_id, re->desc->vol->name);
}
- mutex_lock(&ubi->volumes_mutex);
+ mutex_lock(&ubi->device_mutex);
err = ubi_rename_volumes(ubi, &rename_list);
- mutex_unlock(&ubi->volumes_mutex);
+ mutex_unlock(&ubi->device_mutex);
out_free:
list_for_each_entry_safe(re, re1, &rename_list, list) {
@@ -856,9 +858,9 @@ static long ubi_cdev_ioctl(struct file *file, unsigned int cmd,
if (err)
break;
- mutex_lock(&ubi->volumes_mutex);
+ mutex_lock(&ubi->device_mutex);
err = ubi_create_volume(ubi, &req);
- mutex_unlock(&ubi->volumes_mutex);
+ mutex_unlock(&ubi->device_mutex);
if (err)
break;
@@ -887,9 +889,9 @@ static long ubi_cdev_ioctl(struct file *file, unsigned int cmd,
break;
}
- mutex_lock(&ubi->volumes_mutex);
+ mutex_lock(&ubi->device_mutex);
err = ubi_remove_volume(desc, 0);
- mutex_unlock(&ubi->volumes_mutex);
+ mutex_unlock(&ubi->device_mutex);
/*
* The volume is deleted (unless an error occurred), and the
@@ -926,9 +928,9 @@ static long ubi_cdev_ioctl(struct file *file, unsigned int cmd,
pebs = div_u64(req.bytes + desc->vol->usable_leb_size - 1,
desc->vol->usable_leb_size);
- mutex_lock(&ubi->volumes_mutex);
+ mutex_lock(&ubi->device_mutex);
err = ubi_resize_volume(desc, pebs);
- mutex_unlock(&ubi->volumes_mutex);
+ mutex_unlock(&ubi->device_mutex);
ubi_close_volume(desc);
break;
}
@@ -952,9 +954,7 @@ static long ubi_cdev_ioctl(struct file *file, unsigned int cmd,
break;
}
- mutex_lock(&ubi->mult_mutex);
err = rename_volumes(ubi, req);
- mutex_unlock(&ubi->mult_mutex);
kfree(req);
break;
}
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 25def348e5ba..0f2034c3ed2f 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -419,8 +419,9 @@ retry:
* not implemented.
*/
if (err == UBI_IO_BAD_VID_HDR) {
- ubi_warn("bad VID header at PEB %d, LEB"
- "%d:%d", pnum, vol_id, lnum);
+ ubi_warn("corrupted VID header at PEB "
+ "%d, LEB %d:%d", pnum, vol_id,
+ lnum);
err = -EBADMSG;
} else
ubi_ro_mode(ubi);
@@ -940,6 +941,33 @@ write_error:
}
/**
+ * is_error_sane - check whether a read error is sane.
+ * @err: code of the error happened during reading
+ *
+ * This is a helper function for 'ubi_eba_copy_leb()' which is called when we
+ * cannot read data from the target PEB (an error @err happened). If the error
+ * code is sane, then we treat this error as non-fatal. Otherwise the error is
+ * fatal and UBI will be switched to R/O mode later.
+ *
+ * The idea is that we try not to switch to R/O mode if the read error is
+ * something which suggests there was a real read problem. E.g., %-EIO. Or a
+ * memory allocation failed (-%ENOMEM). Otherwise, it is safer to switch to R/O
+ * mode, simply because we do not know what happened at the MTD level, and we
+ * cannot handle this. E.g., the underlying driver may have become crazy, and
+ * it is safer to switch to R/O mode to preserve the data.
+ *
+ * And bear in mind, this is about reading from the target PEB, i.e. the PEB
+ * which we have just written.
+ */
+static int is_error_sane(int err)
+{
+ if (err == -EIO || err == -ENOMEM || err == UBI_IO_BAD_VID_HDR ||
+ err == -ETIMEDOUT)
+ return 0;
+ return 1;
+}
+
+/**
* ubi_eba_copy_leb - copy logical eraseblock.
* @ubi: UBI device description object
* @from: physical eraseblock number from where to copy
@@ -950,12 +978,7 @@ write_error:
* physical eraseblock @to. The @vid_hdr buffer may be changed by this
* function. Returns:
* o %0 in case of success;
- * o %1 if the operation was canceled because the volume is being deleted
- * or because the PEB was put meanwhile;
- * o %2 if the operation was canceled because there was a write error to the
- * target PEB;
- * o %-EAGAIN if the operation was canceled because a bit-flip was detected
- * in the target PEB;
+ * o %MOVE_CANCEL_RACE, %MOVE_TARGET_WR_ERR, %MOVE_CANCEL_BITFLIPS, etc;
* o a negative error code in case of failure.
*/
int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
@@ -968,7 +991,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
vol_id = be32_to_cpu(vid_hdr->vol_id);
lnum = be32_to_cpu(vid_hdr->lnum);
- dbg_eba("copy LEB %d:%d, PEB %d to PEB %d", vol_id, lnum, from, to);
+ dbg_wl("copy LEB %d:%d, PEB %d to PEB %d", vol_id, lnum, from, to);
if (vid_hdr->vol_type == UBI_VID_STATIC) {
data_size = be32_to_cpu(vid_hdr->data_size);
@@ -986,13 +1009,12 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
* be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish.
*/
vol = ubi->volumes[idx];
+ spin_unlock(&ubi->volumes_lock);
if (!vol) {
/* No need to do further work, cancel */
- dbg_eba("volume %d is being removed, cancel", vol_id);
- spin_unlock(&ubi->volumes_lock);
- return 1;
+ dbg_wl("volume %d is being removed, cancel", vol_id);
+ return MOVE_CANCEL_RACE;
}
- spin_unlock(&ubi->volumes_lock);
/*
* We do not want anybody to write to this logical eraseblock while we
@@ -1004,12 +1026,13 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
* (@from). This task locks the LEB and goes sleep in the
* 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are
* holding @ubi->move_mutex and go sleep on the LEB lock. So, if the
- * LEB is already locked, we just do not move it and return %1.
+ * LEB is already locked, we just do not move it and return
+ * %MOVE_CANCEL_RACE, which means that UBI will re-try, but later.
*/
err = leb_write_trylock(ubi, vol_id, lnum);
if (err) {
- dbg_eba("contention on LEB %d:%d, cancel", vol_id, lnum);
- return err;
+ dbg_wl("contention on LEB %d:%d, cancel", vol_id, lnum);
+ return MOVE_CANCEL_RACE;
}
/*
@@ -1018,25 +1041,26 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
* cancel it.
*/
if (vol->eba_tbl[lnum] != from) {
- dbg_eba("LEB %d:%d is no longer mapped to PEB %d, mapped to "
- "PEB %d, cancel", vol_id, lnum, from,
- vol->eba_tbl[lnum]);
- err = 1;
+ dbg_wl("LEB %d:%d is no longer mapped to PEB %d, mapped to "
+ "PEB %d, cancel", vol_id, lnum, from,
+ vol->eba_tbl[lnum]);
+ err = MOVE_CANCEL_RACE;
goto out_unlock_leb;
}
/*
* OK, now the LEB is locked and we can safely start moving it. Since
- * this function utilizes the @ubi->peb1_buf buffer which is shared
- * with some other functions, so lock the buffer by taking the
+ * this function utilizes the @ubi->peb_buf1 buffer which is shared
+ * with some other functions - we lock the buffer by taking the
* @ubi->buf_mutex.
*/
mutex_lock(&ubi->buf_mutex);
- dbg_eba("read %d bytes of data", aldata_size);
+ dbg_wl("read %d bytes of data", aldata_size);
err = ubi_io_read_data(ubi, ubi->peb_buf1, from, 0, aldata_size);
if (err && err != UBI_IO_BITFLIPS) {
ubi_warn("error %d while reading data from PEB %d",
err, from);
+ err = MOVE_SOURCE_RD_ERR;
goto out_unlock_buf;
}
@@ -1059,7 +1083,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
cond_resched();
/*
- * It may turn out to me that the whole @from physical eraseblock
+ * It may turn out to be that the whole @from physical eraseblock
* contains only 0xFF bytes. Then we have to only write the VID header
* and do not write any data. This also means we should not set
* @vid_hdr->copy_flag, @vid_hdr->data_size, and @vid_hdr->data_crc.
@@ -1074,7 +1098,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
err = ubi_io_write_vid_hdr(ubi, to, vid_hdr);
if (err) {
if (err == -EIO)
- err = 2;
+ err = MOVE_TARGET_WR_ERR;
goto out_unlock_buf;
}
@@ -1083,10 +1107,13 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
/* Read the VID header back and check if it was written correctly */
err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1);
if (err) {
- if (err != UBI_IO_BITFLIPS)
- ubi_warn("cannot read VID header back from PEB %d", to);
- else
- err = -EAGAIN;
+ if (err != UBI_IO_BITFLIPS) {
+ ubi_warn("error %d while reading VID header back from "
+ "PEB %d", err, to);
+ if (is_error_sane(err))
+ err = MOVE_TARGET_RD_ERR;
+ } else
+ err = MOVE_CANCEL_BITFLIPS;
goto out_unlock_buf;
}
@@ -1094,7 +1121,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
err = ubi_io_write_data(ubi, ubi->peb_buf1, to, 0, aldata_size);
if (err) {
if (err == -EIO)
- err = 2;
+ err = MOVE_TARGET_WR_ERR;
goto out_unlock_buf;
}
@@ -1107,11 +1134,13 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
err = ubi_io_read_data(ubi, ubi->peb_buf2, to, 0, aldata_size);
if (err) {
- if (err != UBI_IO_BITFLIPS)
- ubi_warn("cannot read data back from PEB %d",
- to);
- else
- err = -EAGAIN;
+ if (err != UBI_IO_BITFLIPS) {
+ ubi_warn("error %d while reading data back "
+ "from PEB %d", err, to);
+ if (is_error_sane(err))
+ err = MOVE_TARGET_RD_ERR;
+ } else
+ err = MOVE_CANCEL_BITFLIPS;
goto out_unlock_buf;
}
diff --git a/drivers/mtd/ubi/gluebi.c b/drivers/mtd/ubi/gluebi.c
index 49cd55ade9c8..95aaac03f938 100644
--- a/drivers/mtd/ubi/gluebi.c
+++ b/drivers/mtd/ubi/gluebi.c
@@ -19,17 +19,71 @@
*/
/*
- * This file includes implementation of fake MTD devices for each UBI volume.
- * This sounds strange, but it is in fact quite useful to make MTD-oriented
- * software (including all the legacy software) to work on top of UBI.
+ * This is a small driver which implements fake MTD devices on top of UBI
+ * volumes. This sounds strange, but it is in fact quite useful to make
+ * MTD-oriented software (including all the legacy software) work on top of
+ * UBI.
*
* Gluebi emulates MTD devices of "MTD_UBIVOLUME" type. Their minimal I/O unit
- * size (mtd->writesize) is equivalent to the UBI minimal I/O unit. The
+ * size (@mtd->writesize) is equivalent to the UBI minimal I/O unit. The
* eraseblock size is equivalent to the logical eraseblock size of the volume.
*/
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/sched.h>
#include <linux/math64.h>
-#include "ubi.h"
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/mtd/ubi.h>
+#include <linux/mtd/mtd.h>
+#include "ubi-media.h"
+
+#define err_msg(fmt, ...) \
+ printk(KERN_DEBUG "gluebi (pid %d): %s: " fmt "\n", \
+ current->pid, __func__, ##__VA_ARGS__)
+
+/**
+ * struct gluebi_device - a gluebi device description data structure.
+ * @mtd: emulated MTD device description object
+ * @refcnt: gluebi device reference count
+ * @desc: UBI volume descriptor
+ * @ubi_num: UBI device number this gluebi device works on
+ * @vol_id: ID of UBI volume this gluebi device works on
+ * @list: link in a list of gluebi devices
+ */
+struct gluebi_device {
+ struct mtd_info mtd;
+ int refcnt;
+ struct ubi_volume_desc *desc;
+ int ubi_num;
+ int vol_id;
+ struct list_head list;
+};
+
+/* List of all gluebi devices */
+static LIST_HEAD(gluebi_devices);
+static DEFINE_MUTEX(devices_mutex);
+
+/**
+ * find_gluebi_nolock - find a gluebi device.
+ * @ubi_num: UBI device number
+ * @vol_id: volume ID
+ *
+ * This function seraches for gluebi device corresponding to UBI device
+ * @ubi_num and UBI volume @vol_id. Returns the gluebi device description
+ * object in case of success and %NULL in case of failure. The caller has to
+ * have the &devices_mutex locked.
+ */
+static struct gluebi_device *find_gluebi_nolock(int ubi_num, int vol_id)
+{
+ struct gluebi_device *gluebi;
+
+ list_for_each_entry(gluebi, &gluebi_devices, list)
+ if (gluebi->ubi_num == ubi_num && gluebi->vol_id == vol_id)
+ return gluebi;
+ return NULL;
+}
/**
* gluebi_get_device - get MTD device reference.
@@ -41,15 +95,18 @@
*/
static int gluebi_get_device(struct mtd_info *mtd)
{
- struct ubi_volume *vol;
+ struct gluebi_device *gluebi;
+ int ubi_mode = UBI_READONLY;
- vol = container_of(mtd, struct ubi_volume, gluebi_mtd);
+ if (!try_module_get(THIS_MODULE))
+ return -ENODEV;
- /*
- * We do not introduce locks for gluebi reference count because the
- * get_device()/put_device() calls are already serialized at MTD.
- */
- if (vol->gluebi_refcount > 0) {
+ if (mtd->flags & MTD_WRITEABLE)
+ ubi_mode = UBI_READWRITE;
+
+ gluebi = container_of(mtd, struct gluebi_device, mtd);
+ mutex_lock(&devices_mutex);
+ if (gluebi->refcnt > 0) {
/*
* The MTD device is already referenced and this is just one
* more reference. MTD allows many users to open the same
@@ -58,7 +115,8 @@ static int gluebi_get_device(struct mtd_info *mtd)
* open the UBI volume again - just increase the reference
* counter and return.
*/
- vol->gluebi_refcount += 1;
+ gluebi->refcnt += 1;
+ mutex_unlock(&devices_mutex);
return 0;
}
@@ -66,11 +124,15 @@ static int gluebi_get_device(struct mtd_info *mtd)
* This is the first reference to this UBI volume via the MTD device
* interface. Open the corresponding volume in read-write mode.
*/
- vol->gluebi_desc = ubi_open_volume(vol->ubi->ubi_num, vol->vol_id,
- UBI_READWRITE);
- if (IS_ERR(vol->gluebi_desc))
- return PTR_ERR(vol->gluebi_desc);
- vol->gluebi_refcount += 1;
+ gluebi->desc = ubi_open_volume(gluebi->ubi_num, gluebi->vol_id,
+ ubi_mode);
+ if (IS_ERR(gluebi->desc)) {
+ mutex_unlock(&devices_mutex);
+ module_put(THIS_MODULE);
+ return PTR_ERR(gluebi->desc);
+ }
+ gluebi->refcnt += 1;
+ mutex_unlock(&devices_mutex);
return 0;
}
@@ -83,13 +145,15 @@ static int gluebi_get_device(struct mtd_info *mtd)
*/
static void gluebi_put_device(struct mtd_info *mtd)
{
- struct ubi_volume *vol;
-
- vol = container_of(mtd, struct ubi_volume, gluebi_mtd);
- vol->gluebi_refcount -= 1;
- ubi_assert(vol->gluebi_refcount >= 0);
- if (vol->gluebi_refcount == 0)
- ubi_close_volume(vol->gluebi_desc);
+ struct gluebi_device *gluebi;
+
+ gluebi = container_of(mtd, struct gluebi_device, mtd);
+ mutex_lock(&devices_mutex);
+ gluebi->refcnt -= 1;
+ if (gluebi->refcnt == 0)
+ ubi_close_volume(gluebi->desc);
+ module_put(THIS_MODULE);
+ mutex_unlock(&devices_mutex);
}
/**
@@ -107,16 +171,12 @@ static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, unsigned char *buf)
{
int err = 0, lnum, offs, total_read;
- struct ubi_volume *vol;
- struct ubi_device *ubi;
-
- dbg_gen("read %zd bytes from offset %lld", len, from);
+ struct gluebi_device *gluebi;
if (len < 0 || from < 0 || from + len > mtd->size)
return -EINVAL;
- vol = container_of(mtd, struct ubi_volume, gluebi_mtd);
- ubi = vol->ubi;
+ gluebi = container_of(mtd, struct gluebi_device, mtd);
lnum = div_u64_rem(from, mtd->erasesize, &offs);
total_read = len;
@@ -126,7 +186,7 @@ static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len,
if (to_read > total_read)
to_read = total_read;
- err = ubi_eba_read_leb(ubi, vol, lnum, buf, offs, to_read, 0);
+ err = ubi_read(gluebi->desc, lnum, buf, offs, to_read);
if (err)
break;
@@ -152,21 +212,17 @@ static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len,
* case of failure.
*/
static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
+ size_t *retlen, const u_char *buf)
{
int err = 0, lnum, offs, total_written;
- struct ubi_volume *vol;
- struct ubi_device *ubi;
-
- dbg_gen("write %zd bytes to offset %lld", len, to);
+ struct gluebi_device *gluebi;
if (len < 0 || to < 0 || len + to > mtd->size)
return -EINVAL;
- vol = container_of(mtd, struct ubi_volume, gluebi_mtd);
- ubi = vol->ubi;
+ gluebi = container_of(mtd, struct gluebi_device, mtd);
- if (ubi->ro_mode)
+ if (!(mtd->flags & MTD_WRITEABLE))
return -EROFS;
lnum = div_u64_rem(to, mtd->erasesize, &offs);
@@ -181,8 +237,7 @@ static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len,
if (to_write > total_written)
to_write = total_written;
- err = ubi_eba_write_leb(ubi, vol, lnum, buf, offs, to_write,
- UBI_UNKNOWN);
+ err = ubi_write(gluebi->desc, lnum, buf, offs, to_write);
if (err)
break;
@@ -207,41 +262,36 @@ static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len,
static int gluebi_erase(struct mtd_info *mtd, struct erase_info *instr)
{
int err, i, lnum, count;
- struct ubi_volume *vol;
- struct ubi_device *ubi;
-
- dbg_gen("erase %llu bytes at offset %llu", (unsigned long long)instr->len,
- (unsigned long long)instr->addr);
+ struct gluebi_device *gluebi;
if (instr->addr < 0 || instr->addr > mtd->size - mtd->erasesize)
return -EINVAL;
-
if (instr->len < 0 || instr->addr + instr->len > mtd->size)
return -EINVAL;
-
if (mtd_mod_by_ws(instr->addr, mtd) || mtd_mod_by_ws(instr->len, mtd))
return -EINVAL;
lnum = mtd_div_by_eb(instr->addr, mtd);
count = mtd_div_by_eb(instr->len, mtd);
- vol = container_of(mtd, struct ubi_volume, gluebi_mtd);
- ubi = vol->ubi;
+ gluebi = container_of(mtd, struct gluebi_device, mtd);
- if (ubi->ro_mode)
+ if (!(mtd->flags & MTD_WRITEABLE))
return -EROFS;
- for (i = 0; i < count; i++) {
- err = ubi_eba_unmap_leb(ubi, vol, lnum + i);
+ for (i = 0; i < count - 1; i++) {
+ err = ubi_leb_unmap(gluebi->desc, lnum + i);
if (err)
goto out_err;
}
-
/*
* MTD erase operations are synchronous, so we have to make sure the
* physical eraseblock is wiped out.
+ *
+ * Thus, perform leb_erase instead of leb_unmap operation - leb_erase
+ * will wait for the end of operations
*/
- err = ubi_wl_flush(ubi);
+ err = ubi_leb_erase(gluebi->desc, lnum + i);
if (err)
goto out_err;
@@ -256,28 +306,38 @@ out_err:
}
/**
- * ubi_create_gluebi - initialize gluebi for an UBI volume.
- * @ubi: UBI device description object
- * @vol: volume description object
+ * gluebi_create - create a gluebi device for an UBI volume.
+ * @di: UBI device description object
+ * @vi: UBI volume description object
*
- * This function is called when an UBI volume is created in order to create
+ * This function is called when a new UBI volume is created in order to create
* corresponding fake MTD device. Returns zero in case of success and a
* negative error code in case of failure.
*/
-int ubi_create_gluebi(struct ubi_device *ubi, struct ubi_volume *vol)
+static int gluebi_create(struct ubi_device_info *di,
+ struct ubi_volume_info *vi)
{
- struct mtd_info *mtd = &vol->gluebi_mtd;
+ struct gluebi_device *gluebi, *g;
+ struct mtd_info *mtd;
- mtd->name = kmemdup(vol->name, vol->name_len + 1, GFP_KERNEL);
- if (!mtd->name)
+ gluebi = kzalloc(sizeof(struct gluebi_device), GFP_KERNEL);
+ if (!gluebi)
return -ENOMEM;
+ mtd = &gluebi->mtd;
+ mtd->name = kmemdup(vi->name, vi->name_len + 1, GFP_KERNEL);
+ if (!mtd->name) {
+ kfree(gluebi);
+ return -ENOMEM;
+ }
+
+ gluebi->vol_id = vi->vol_id;
mtd->type = MTD_UBIVOLUME;
- if (!ubi->ro_mode)
+ if (!di->ro_mode)
mtd->flags = MTD_WRITEABLE;
- mtd->writesize = ubi->min_io_size;
mtd->owner = THIS_MODULE;
- mtd->erasesize = vol->usable_leb_size;
+ mtd->writesize = di->min_io_size;
+ mtd->erasesize = vi->usable_leb_size;
mtd->read = gluebi_read;
mtd->write = gluebi_write;
mtd->erase = gluebi_erase;
@@ -285,60 +345,196 @@ int ubi_create_gluebi(struct ubi_device *ubi, struct ubi_volume *vol)
mtd->put_device = gluebi_put_device;
/*
- * In case of dynamic volume, MTD device size is just volume size. In
+ * In case of dynamic a volume, MTD device size is just volume size. In
* case of a static volume the size is equivalent to the amount of data
* bytes.
*/
- if (vol->vol_type == UBI_DYNAMIC_VOLUME)
- mtd->size = (long long)vol->usable_leb_size * vol->reserved_pebs;
+ if (vi->vol_type == UBI_DYNAMIC_VOLUME)
+ mtd->size = (unsigned long long)vi->usable_leb_size * vi->size;
else
- mtd->size = vol->used_bytes;
+ mtd->size = vi->used_bytes;
+
+ /* Just a sanity check - make sure this gluebi device does not exist */
+ mutex_lock(&devices_mutex);
+ g = find_gluebi_nolock(vi->ubi_num, vi->vol_id);
+ if (g)
+ err_msg("gluebi MTD device %d form UBI device %d volume %d "
+ "already exists", g->mtd.index, vi->ubi_num,
+ vi->vol_id);
+ mutex_unlock(&devices_mutex);
if (add_mtd_device(mtd)) {
- ubi_err("cannot not add MTD device");
+ err_msg("cannot add MTD device");
kfree(mtd->name);
+ kfree(gluebi);
return -ENFILE;
}
- dbg_gen("added mtd%d (\"%s\"), size %llu, EB size %u",
- mtd->index, mtd->name, (unsigned long long)mtd->size, mtd->erasesize);
+ mutex_lock(&devices_mutex);
+ list_add_tail(&gluebi->list, &gluebi_devices);
+ mutex_unlock(&devices_mutex);
return 0;
}
/**
- * ubi_destroy_gluebi - close gluebi for an UBI volume.
- * @vol: volume description object
+ * gluebi_remove - remove a gluebi device.
+ * @vi: UBI volume description object
*
- * This function is called when an UBI volume is removed in order to remove
+ * This function is called when an UBI volume is removed and it removes
* corresponding fake MTD device. Returns zero in case of success and a
* negative error code in case of failure.
*/
-int ubi_destroy_gluebi(struct ubi_volume *vol)
+static int gluebi_remove(struct ubi_volume_info *vi)
{
- int err;
- struct mtd_info *mtd = &vol->gluebi_mtd;
+ int err = 0;
+ struct mtd_info *mtd;
+ struct gluebi_device *gluebi;
+
+ mutex_lock(&devices_mutex);
+ gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id);
+ if (!gluebi) {
+ err_msg("got remove notification for unknown UBI device %d "
+ "volume %d", vi->ubi_num, vi->vol_id);
+ err = -ENOENT;
+ } else if (gluebi->refcnt)
+ err = -EBUSY;
+ else
+ list_del(&gluebi->list);
+ mutex_unlock(&devices_mutex);
+ if (err)
+ return err;
- dbg_gen("remove mtd%d", mtd->index);
+ mtd = &gluebi->mtd;
err = del_mtd_device(mtd);
- if (err)
+ if (err) {
+ err_msg("cannot remove fake MTD device %d, UBI device %d, "
+ "volume %d, error %d", mtd->index, gluebi->ubi_num,
+ gluebi->vol_id, err);
+ mutex_lock(&devices_mutex);
+ list_add_tail(&gluebi->list, &gluebi_devices);
+ mutex_unlock(&devices_mutex);
return err;
+ }
+
kfree(mtd->name);
+ kfree(gluebi);
return 0;
}
/**
- * ubi_gluebi_updated - UBI volume was updated notifier.
- * @vol: volume description object
+ * gluebi_updated - UBI volume was updated notifier.
+ * @vi: volume info structure
*
- * This function is called every time an UBI volume is updated. This function
- * does nothing if volume @vol is dynamic, and changes MTD device size if the
+ * This function is called every time an UBI volume is updated. It does nothing
+ * if te volume @vol is dynamic, and changes MTD device size if the
* volume is static. This is needed because static volumes cannot be read past
- * data they contain.
+ * data they contain. This function returns zero in case of success and a
+ * negative error code in case of error.
+ */
+static int gluebi_updated(struct ubi_volume_info *vi)
+{
+ struct gluebi_device *gluebi;
+
+ mutex_lock(&devices_mutex);
+ gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id);
+ if (!gluebi) {
+ mutex_unlock(&devices_mutex);
+ err_msg("got update notification for unknown UBI device %d "
+ "volume %d", vi->ubi_num, vi->vol_id);
+ return -ENOENT;
+ }
+
+ if (vi->vol_type == UBI_STATIC_VOLUME)
+ gluebi->mtd.size = vi->used_bytes;
+ mutex_unlock(&devices_mutex);
+ return 0;
+}
+
+/**
+ * gluebi_resized - UBI volume was re-sized notifier.
+ * @vi: volume info structure
+ *
+ * This function is called every time an UBI volume is re-size. It changes the
+ * corresponding fake MTD device size. This function returns zero in case of
+ * success and a negative error code in case of error.
+ */
+static int gluebi_resized(struct ubi_volume_info *vi)
+{
+ struct gluebi_device *gluebi;
+
+ mutex_lock(&devices_mutex);
+ gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id);
+ if (!gluebi) {
+ mutex_unlock(&devices_mutex);
+ err_msg("got update notification for unknown UBI device %d "
+ "volume %d", vi->ubi_num, vi->vol_id);
+ return -ENOENT;
+ }
+ gluebi->mtd.size = vi->used_bytes;
+ mutex_unlock(&devices_mutex);
+ return 0;
+}
+
+/**
+ * gluebi_notify - UBI notification handler.
+ * @nb: registered notifier block
+ * @l: notification type
+ * @ptr: pointer to the &struct ubi_notification object
*/
-void ubi_gluebi_updated(struct ubi_volume *vol)
+static int gluebi_notify(struct notifier_block *nb, unsigned long l,
+ void *ns_ptr)
{
- struct mtd_info *mtd = &vol->gluebi_mtd;
+ struct ubi_notification *nt = ns_ptr;
+
+ switch (l) {
+ case UBI_VOLUME_ADDED:
+ gluebi_create(&nt->di, &nt->vi);
+ break;
+ case UBI_VOLUME_REMOVED:
+ gluebi_remove(&nt->vi);
+ break;
+ case UBI_VOLUME_RESIZED:
+ gluebi_resized(&nt->vi);
+ break;
+ case UBI_VOLUME_UPDATED:
+ gluebi_updated(&nt->vi);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
- if (vol->vol_type == UBI_STATIC_VOLUME)
- mtd->size = vol->used_bytes;
+static struct notifier_block gluebi_notifier = {
+ .notifier_call = gluebi_notify,
+};
+
+static int __init ubi_gluebi_init(void)
+{
+ return ubi_register_volume_notifier(&gluebi_notifier, 0);
}
+
+static void __exit ubi_gluebi_exit(void)
+{
+ struct gluebi_device *gluebi, *g;
+
+ list_for_each_entry_safe(gluebi, g, &gluebi_devices, list) {
+ int err;
+ struct mtd_info *mtd = &gluebi->mtd;
+
+ err = del_mtd_device(mtd);
+ if (err)
+ err_msg("error %d while removing gluebi MTD device %d, "
+ "UBI device %d, volume %d - ignoring", err,
+ mtd->index, gluebi->ubi_num, gluebi->vol_id);
+ kfree(mtd->name);
+ kfree(gluebi);
+ }
+ ubi_unregister_volume_notifier(&gluebi_notifier);
+}
+
+module_init(ubi_gluebi_init);
+module_exit(ubi_gluebi_exit);
+MODULE_DESCRIPTION("MTD emulation layer over UBI volumes");
+MODULE_AUTHOR("Artem Bityutskiy, Joern Engel");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index fe81039f2a7c..effaff28bab1 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -100,6 +100,7 @@ static int paranoid_check_vid_hdr(const struct ubi_device *ubi, int pnum,
const struct ubi_vid_hdr *vid_hdr);
static int paranoid_check_all_ff(struct ubi_device *ubi, int pnum, int offset,
int len);
+static int paranoid_check_empty(struct ubi_device *ubi, int pnum);
#else
#define paranoid_check_not_bad(ubi, pnum) 0
#define paranoid_check_peb_ec_hdr(ubi, pnum) 0
@@ -107,6 +108,7 @@ static int paranoid_check_all_ff(struct ubi_device *ubi, int pnum, int offset,
#define paranoid_check_peb_vid_hdr(ubi, pnum) 0
#define paranoid_check_vid_hdr(ubi, pnum, vid_hdr) 0
#define paranoid_check_all_ff(ubi, pnum, offset, len) 0
+#define paranoid_check_empty(ubi, pnum) 0
#endif
/**
@@ -670,11 +672,6 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
if (read_err != -EBADMSG &&
check_pattern(ec_hdr, 0xFF, UBI_EC_HDR_SIZE)) {
/* The physical eraseblock is supposedly empty */
-
- /*
- * The below is just a paranoid check, it has to be
- * compiled out if paranoid checks are disabled.
- */
err = paranoid_check_all_ff(ubi, pnum, 0,
ubi->peb_size);
if (err)
@@ -902,7 +899,7 @@ bad:
* o %UBI_IO_BITFLIPS if the CRC is correct, but bit-flips were detected
* and corrected by the flash driver; this is harmless but may indicate that
* this eraseblock may become bad soon;
- * o %UBI_IO_BAD_VID_HRD if the volume identifier header is corrupted (a CRC
+ * o %UBI_IO_BAD_VID_HDR if the volume identifier header is corrupted (a CRC
* error detected);
* o %UBI_IO_PEB_FREE if the physical eraseblock is free (i.e., there is no VID
* header there);
@@ -955,8 +952,7 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
* The below is just a paranoid check, it has to be
* compiled out if paranoid checks are disabled.
*/
- err = paranoid_check_all_ff(ubi, pnum, ubi->leb_start,
- ubi->leb_size);
+ err = paranoid_check_empty(ubi, pnum);
if (err)
return err > 0 ? UBI_IO_BAD_VID_HDR : err;
@@ -1280,4 +1276,74 @@ error:
return err;
}
+/**
+ * paranoid_check_empty - whether a PEB is empty.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock number to check
+ *
+ * This function makes sure PEB @pnum is empty, which means it contains only
+ * %0xFF data bytes. Returns zero if the PEB is empty, %1 if not, and a
+ * negative error code in case of failure.
+ *
+ * Empty PEBs have the EC header, and do not have the VID header. The caller of
+ * this function should have already made sure the PEB does not have the VID
+ * header. However, this function re-checks that, because it is possible that
+ * the header and data has already been written to the PEB.
+ *
+ * Let's consider a possible scenario. Suppose there are 2 tasks - A and B.
+ * Task A is in 'wear_leveling_worker()'. It is reading VID header of PEB X to
+ * find which LEB it corresponds to. PEB X is currently unmapped, and has no
+ * VID header. Task B is trying to write to PEB X.
+ *
+ * Task A: in 'ubi_io_read_vid_hdr()': reads the VID header from PEB X. The
+ * read data contain all 0xFF bytes;
+ * Task B: writes VID header and some data to PEB X;
+ * Task A: assumes PEB X is empty, calls 'paranoid_check_empty()'. And if we
+ * do not re-read the VID header, and do not cancel the checking if it
+ * is there, we fail.
+ */
+static int paranoid_check_empty(struct ubi_device *ubi, int pnum)
+{
+ int err, offs = ubi->vid_hdr_aloffset, len = ubi->vid_hdr_alsize;
+ size_t read;
+ uint32_t magic;
+ const struct ubi_vid_hdr *vid_hdr;
+
+ mutex_lock(&ubi->dbg_buf_mutex);
+ err = ubi->mtd->read(ubi->mtd, offs, len, &read, ubi->dbg_peb_buf);
+ if (err && err != -EUCLEAN) {
+ ubi_err("error %d while reading %d bytes from PEB %d:%d, "
+ "read %zd bytes", err, len, pnum, offs, read);
+ goto error;
+ }
+
+ vid_hdr = ubi->dbg_peb_buf;
+ magic = be32_to_cpu(vid_hdr->magic);
+ if (magic == UBI_VID_HDR_MAGIC)
+ /* The PEB contains VID header, so it is not empty */
+ goto out;
+
+ err = check_pattern(ubi->dbg_peb_buf, 0xFF, len);
+ if (err == 0) {
+ ubi_err("flash region at PEB %d:%d, length %d does not "
+ "contain all 0xFF bytes", pnum, offs, len);
+ goto fail;
+ }
+
+out:
+ mutex_unlock(&ubi->dbg_buf_mutex);
+ return 0;
+
+fail:
+ ubi_err("paranoid check failed for PEB %d", pnum);
+ ubi_msg("hex dump of the %d-%d region", offs, offs + len);
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
+ ubi->dbg_peb_buf, len, 1);
+ err = 1;
+error:
+ ubi_dbg_dump_stack();
+ mutex_unlock(&ubi->dbg_buf_mutex);
+ return err;
+}
+
#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index 4abbe573fa40..88a72e9c8beb 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -26,6 +26,24 @@
#include "ubi.h"
/**
+ * ubi_do_get_device_info - get information about UBI device.
+ * @ubi: UBI device description object
+ * @di: the information is stored here
+ *
+ * This function is the same as 'ubi_get_device_info()', but it assumes the UBI
+ * device is locked and cannot disappear.
+ */
+void ubi_do_get_device_info(struct ubi_device *ubi, struct ubi_device_info *di)
+{
+ di->ubi_num = ubi->ubi_num;
+ di->leb_size = ubi->leb_size;
+ di->min_io_size = ubi->min_io_size;
+ di->ro_mode = ubi->ro_mode;
+ di->cdev = ubi->cdev.dev;
+}
+EXPORT_SYMBOL_GPL(ubi_do_get_device_info);
+
+/**
* ubi_get_device_info - get information about UBI device.
* @ubi_num: UBI device number
* @di: the information is stored here
@@ -39,33 +57,24 @@ int ubi_get_device_info(int ubi_num, struct ubi_device_info *di)
if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
return -EINVAL;
-
ubi = ubi_get_device(ubi_num);
if (!ubi)
return -ENODEV;
-
- di->ubi_num = ubi->ubi_num;
- di->leb_size = ubi->leb_size;
- di->min_io_size = ubi->min_io_size;
- di->ro_mode = ubi->ro_mode;
- di->cdev = ubi->cdev.dev;
-
+ ubi_do_get_device_info(ubi, di);
ubi_put_device(ubi);
return 0;
}
EXPORT_SYMBOL_GPL(ubi_get_device_info);
/**
- * ubi_get_volume_info - get information about UBI volume.
- * @desc: volume descriptor
+ * ubi_do_get_volume_info - get information about UBI volume.
+ * @ubi: UBI device description object
+ * @vol: volume description object
* @vi: the information is stored here
*/
-void ubi_get_volume_info(struct ubi_volume_desc *desc,
- struct ubi_volume_info *vi)
+void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol,
+ struct ubi_volume_info *vi)
{
- const struct ubi_volume *vol = desc->vol;
- const struct ubi_device *ubi = vol->ubi;
-
vi->vol_id = vol->vol_id;
vi->ubi_num = ubi->ubi_num;
vi->size = vol->reserved_pebs;
@@ -79,6 +88,17 @@ void ubi_get_volume_info(struct ubi_volume_desc *desc,
vi->name = vol->name;
vi->cdev = vol->cdev.dev;
}
+
+/**
+ * ubi_get_volume_info - get information about UBI volume.
+ * @desc: volume descriptor
+ * @vi: the information is stored here
+ */
+void ubi_get_volume_info(struct ubi_volume_desc *desc,
+ struct ubi_volume_info *vi)
+{
+ ubi_do_get_volume_info(desc->vol->ubi, desc->vol, vi);
+}
EXPORT_SYMBOL_GPL(ubi_get_volume_info);
/**
@@ -106,7 +126,7 @@ struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode)
struct ubi_device *ubi;
struct ubi_volume *vol;
- dbg_gen("open device %d volume %d, mode %d", ubi_num, vol_id, mode);
+ dbg_gen("open device %d, volume %d, mode %d", ubi_num, vol_id, mode);
if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
return ERR_PTR(-EINVAL);
@@ -196,6 +216,8 @@ out_free:
kfree(desc);
out_put_ubi:
ubi_put_device(ubi);
+ dbg_err("cannot open device %d, volume %d, error %d",
+ ubi_num, vol_id, err);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(ubi_open_volume);
@@ -215,7 +237,7 @@ struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name,
struct ubi_device *ubi;
struct ubi_volume_desc *ret;
- dbg_gen("open volume %s, mode %d", name, mode);
+ dbg_gen("open device %d, volume %s, mode %d", ubi_num, name, mode);
if (!name)
return ERR_PTR(-EINVAL);
@@ -266,7 +288,8 @@ void ubi_close_volume(struct ubi_volume_desc *desc)
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
- dbg_gen("close volume %d, mode %d", vol->vol_id, desc->mode);
+ dbg_gen("close device %d, volume %d, mode %d",
+ ubi->ubi_num, vol->vol_id, desc->mode);
spin_lock(&ubi->volumes_lock);
switch (desc->mode) {
@@ -558,7 +581,7 @@ int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum)
EXPORT_SYMBOL_GPL(ubi_leb_unmap);
/**
- * ubi_leb_map - map logical erasblock to a physical eraseblock.
+ * ubi_leb_map - map logical eraseblock to a physical eraseblock.
* @desc: volume descriptor
* @lnum: logical eraseblock number
* @dtype: expected data type
@@ -656,3 +679,59 @@ int ubi_sync(int ubi_num)
return 0;
}
EXPORT_SYMBOL_GPL(ubi_sync);
+
+BLOCKING_NOTIFIER_HEAD(ubi_notifiers);
+
+/**
+ * ubi_register_volume_notifier - register a volume notifier.
+ * @nb: the notifier description object
+ * @ignore_existing: if non-zero, do not send "added" notification for all
+ * already existing volumes
+ *
+ * This function registers a volume notifier, which means that
+ * 'nb->notifier_call()' will be invoked when an UBI volume is created,
+ * removed, re-sized, re-named, or updated. The first argument of the function
+ * is the notification type. The second argument is pointer to a
+ * &struct ubi_notification object which describes the notification event.
+ * Using UBI API from the volume notifier is prohibited.
+ *
+ * This function returns zero in case of success and a negative error code
+ * in case of failure.
+ */
+int ubi_register_volume_notifier(struct notifier_block *nb,
+ int ignore_existing)
+{
+ int err;
+
+ err = blocking_notifier_chain_register(&ubi_notifiers, nb);
+ if (err != 0)
+ return err;
+ if (ignore_existing)
+ return 0;
+
+ /*
+ * We are going to walk all UBI devices and all volumes, and
+ * notify the user about existing volumes by the %UBI_VOLUME_ADDED
+ * event. We have to lock the @ubi_devices_mutex to make sure UBI
+ * devices do not disappear.
+ */
+ mutex_lock(&ubi_devices_mutex);
+ ubi_enumerate_volumes(nb);
+ mutex_unlock(&ubi_devices_mutex);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(ubi_register_volume_notifier);
+
+/**
+ * ubi_unregister_volume_notifier - unregister the volume notifier.
+ * @nb: the notifier description object
+ *
+ * This function unregisters volume notifier @nm and returns zero in case of
+ * success and a negative error code in case of failure.
+ */
+int ubi_unregister_volume_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&ubi_notifiers, nb);
+}
+EXPORT_SYMBOL_GPL(ubi_unregister_volume_notifier);
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index c055511bb1b2..28acd133c997 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -36,6 +36,7 @@
#include <linux/device.h>
#include <linux/string.h>
#include <linux/vmalloc.h>
+#include <linux/notifier.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/ubi.h>
@@ -100,6 +101,28 @@ enum {
UBI_IO_BITFLIPS
};
+/*
+ * Return codes of the 'ubi_eba_copy_leb()' function.
+ *
+ * MOVE_CANCEL_RACE: canceled because the volume is being deleted, the source
+ * PEB was put meanwhile, or there is I/O on the source PEB
+ * MOVE_SOURCE_RD_ERR: canceled because there was a read error from the source
+ * PEB
+ * MOVE_TARGET_RD_ERR: canceled because there was a read error from the target
+ * PEB
+ * MOVE_TARGET_WR_ERR: canceled because there was a write error to the target
+ * PEB
+ * MOVE_CANCEL_BITFLIPS: canceled because a bit-flip was detected in the
+ * target PEB
+ */
+enum {
+ MOVE_CANCEL_RACE = 1,
+ MOVE_SOURCE_RD_ERR,
+ MOVE_TARGET_RD_ERR,
+ MOVE_TARGET_WR_ERR,
+ MOVE_CANCEL_BITFLIPS,
+};
+
/**
* struct ubi_wl_entry - wear-leveling entry.
* @u.rb: link in the corresponding (free/used) RB-tree
@@ -208,10 +231,6 @@ struct ubi_volume_desc;
* @changing_leb: %1 if the atomic LEB change ioctl command is in progress
* @direct_writes: %1 if direct writes are enabled for this volume
*
- * @gluebi_desc: gluebi UBI volume descriptor
- * @gluebi_refcount: reference count of the gluebi MTD device
- * @gluebi_mtd: MTD device description object of the gluebi MTD device
- *
* The @corrupted field indicates that the volume's contents is corrupted.
* Since UBI protects only static volumes, this field is not relevant to
* dynamic volumes - it is user's responsibility to assure their data
@@ -255,17 +274,6 @@ struct ubi_volume {
unsigned int updating:1;
unsigned int changing_leb:1;
unsigned int direct_writes:1;
-
-#ifdef CONFIG_MTD_UBI_GLUEBI
- /*
- * Gluebi-related stuff may be compiled out.
- * Note: this should not be built into UBI but should be a separate
- * ubimtd driver which works on top of UBI and emulates MTD devices.
- */
- struct ubi_volume_desc *gluebi_desc;
- int gluebi_refcount;
- struct mtd_info gluebi_mtd;
-#endif
};
/**
@@ -305,9 +313,9 @@ struct ubi_wl_entry;
* @vtbl_slots: how many slots are available in the volume table
* @vtbl_size: size of the volume table in bytes
* @vtbl: in-RAM volume table copy
- * @volumes_mutex: protects on-flash volume table and serializes volume
- * changes, like creation, deletion, update, re-size,
- * re-name and set property
+ * @device_mutex: protects on-flash volume table and serializes volume
+ * creation, deletion, update, re-size, re-name and set
+ * property
*
* @max_ec: current highest erase counter value
* @mean_ec: current mean erase counter value
@@ -318,14 +326,15 @@ struct ubi_wl_entry;
* @alc_mutex: serializes "atomic LEB change" operations
*
* @used: RB-tree of used physical eraseblocks
+ * @erroneous: RB-tree of erroneous used physical eraseblocks
* @free: RB-tree of free physical eraseblocks
* @scrub: RB-tree of physical eraseblocks which need scrubbing
* @pq: protection queue (contain physical eraseblocks which are temporarily
* protected from the wear-leveling worker)
* @pq_head: protection queue head
* @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from,
- * @move_to, @move_to_put @erase_pending, @wl_scheduled and @works
- * fields
+ * @move_to, @move_to_put @erase_pending, @wl_scheduled, @works,
+ * @erroneous, and @erroneous_peb_count fields
* @move_mutex: serializes eraseblock moves
* @work_sem: synchronizes the WL worker with use tasks
* @wl_scheduled: non-zero if the wear-leveling was scheduled
@@ -339,12 +348,15 @@ struct ubi_wl_entry;
* @bgt_thread: background thread description object
* @thread_enabled: if the background thread is enabled
* @bgt_name: background thread name
+ * @reboot_notifier: notifier to terminate background thread before rebooting
*
* @flash_size: underlying MTD device size (in bytes)
* @peb_count: count of physical eraseblocks on the MTD device
* @peb_size: physical eraseblock size
* @bad_peb_count: count of bad physical eraseblocks
* @good_peb_count: count of good physical eraseblocks
+ * @erroneous_peb_count: count of erroneous physical eraseblocks in @erroneous
+ * @max_erroneous: maximum allowed amount of erroneous physical eraseblocks
* @min_io_size: minimal input/output unit size of the underlying MTD device
* @hdrs_min_io_size: minimal I/O unit size used for VID and EC headers
* @ro_mode: if the UBI device is in read-only mode
@@ -366,7 +378,6 @@ struct ubi_wl_entry;
* @peb_buf2: another buffer of PEB size used for different purposes
* @buf_mutex: protects @peb_buf1 and @peb_buf2
* @ckvol_mutex: serializes static volume checking when opening
- * @mult_mutex: serializes operations on multiple volumes, like re-naming
* @dbg_peb_buf: buffer of PEB size used for debugging
* @dbg_buf_mutex: protects @dbg_peb_buf
*/
@@ -389,7 +400,7 @@ struct ubi_device {
int vtbl_slots;
int vtbl_size;
struct ubi_vtbl_record *vtbl;
- struct mutex volumes_mutex;
+ struct mutex device_mutex;
int max_ec;
/* Note, mean_ec is not updated run-time - should be fixed */
@@ -403,6 +414,7 @@ struct ubi_device {
/* Wear-leveling sub-system's stuff */
struct rb_root used;
+ struct rb_root erroneous;
struct rb_root free;
struct rb_root scrub;
struct list_head pq[UBI_PROT_QUEUE_LEN];
@@ -420,6 +432,7 @@ struct ubi_device {
struct task_struct *bgt_thread;
int thread_enabled;
char bgt_name[sizeof(UBI_BGT_NAME_PATTERN)+2];
+ struct notifier_block reboot_notifier;
/* I/O sub-system's stuff */
long long flash_size;
@@ -427,6 +440,8 @@ struct ubi_device {
int peb_size;
int bad_peb_count;
int good_peb_count;
+ int erroneous_peb_count;
+ int max_erroneous;
int min_io_size;
int hdrs_min_io_size;
int ro_mode;
@@ -444,8 +459,7 @@ struct ubi_device {
void *peb_buf2;
struct mutex buf_mutex;
struct mutex ckvol_mutex;
- struct mutex mult_mutex;
-#ifdef CONFIG_MTD_UBI_DEBUG
+#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
void *dbg_peb_buf;
struct mutex dbg_buf_mutex;
#endif
@@ -457,6 +471,7 @@ extern const struct file_operations ubi_cdev_operations;
extern const struct file_operations ubi_vol_cdev_operations;
extern struct class *ubi_class;
extern struct mutex ubi_devices_mutex;
+extern struct blocking_notifier_head ubi_notifiers;
/* vtbl.c */
int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
@@ -489,17 +504,6 @@ int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf,
int ubi_check_volume(struct ubi_device *ubi, int vol_id);
void ubi_calculate_reserved(struct ubi_device *ubi);
-/* gluebi.c */
-#ifdef CONFIG_MTD_UBI_GLUEBI
-int ubi_create_gluebi(struct ubi_device *ubi, struct ubi_volume *vol);
-int ubi_destroy_gluebi(struct ubi_volume *vol);
-void ubi_gluebi_updated(struct ubi_volume *vol);
-#else
-#define ubi_create_gluebi(ubi, vol) 0
-#define ubi_destroy_gluebi(vol) 0
-#define ubi_gluebi_updated(vol)
-#endif
-
/* eba.c */
int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
int lnum);
@@ -549,6 +553,16 @@ struct ubi_device *ubi_get_device(int ubi_num);
void ubi_put_device(struct ubi_device *ubi);
struct ubi_device *ubi_get_by_major(int major);
int ubi_major2num(int major);
+int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol,
+ int ntype);
+int ubi_notify_all(struct ubi_device *ubi, int ntype,
+ struct notifier_block *nb);
+int ubi_enumerate_volumes(struct notifier_block *nb);
+
+/* kapi.c */
+void ubi_do_get_device_info(struct ubi_device *ubi, struct ubi_device_info *di);
+void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol,
+ struct ubi_volume_info *vi);
/*
* ubi_rb_for_each_entry - walk an RB-tree.
diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
index 6b4d1ae891ae..74fdc40c8627 100644
--- a/drivers/mtd/ubi/upd.c
+++ b/drivers/mtd/ubi/upd.c
@@ -68,10 +68,10 @@ static int set_update_marker(struct ubi_device *ubi, struct ubi_volume *vol)
sizeof(struct ubi_vtbl_record));
vtbl_rec.upd_marker = 1;
- mutex_lock(&ubi->volumes_mutex);
+ mutex_lock(&ubi->device_mutex);
err = ubi_change_vtbl_record(ubi, vol->vol_id, &vtbl_rec);
- mutex_unlock(&ubi->volumes_mutex);
vol->upd_marker = 1;
+ mutex_unlock(&ubi->device_mutex);
return err;
}
@@ -109,10 +109,10 @@ static int clear_update_marker(struct ubi_device *ubi, struct ubi_volume *vol,
vol->last_eb_bytes = vol->usable_leb_size;
}
- mutex_lock(&ubi->volumes_mutex);
+ mutex_lock(&ubi->device_mutex);
err = ubi_change_vtbl_record(ubi, vol->vol_id, &vtbl_rec);
- mutex_unlock(&ubi->volumes_mutex);
vol->upd_marker = 0;
+ mutex_unlock(&ubi->device_mutex);
return err;
}
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index df5483562b7a..ab64cb56df6e 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -198,7 +198,7 @@ static void volume_sysfs_close(struct ubi_volume *vol)
* %UBI_VOL_NUM_AUTO, this function automatically assign ID to the new volume
* and saves it in @req->vol_id. Returns zero in case of success and a negative
* error code in case of failure. Note, the caller has to have the
- * @ubi->volumes_mutex locked.
+ * @ubi->device_mutex locked.
*/
int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
{
@@ -232,8 +232,8 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
req->vol_id = vol_id;
}
- dbg_gen("volume ID %d, %llu bytes, type %d, name %s",
- vol_id, (unsigned long long)req->bytes,
+ dbg_gen("create device %d, volume %d, %llu bytes, type %d, name %s",
+ ubi->ubi_num, vol_id, (unsigned long long)req->bytes,
(int)req->vol_type, req->name);
/* Ensure that this volume does not exist */
@@ -317,10 +317,6 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
goto out_mapping;
}
- err = ubi_create_gluebi(ubi, vol);
- if (err)
- goto out_cdev;
-
vol->dev.release = vol_release;
vol->dev.parent = &ubi->dev;
vol->dev.devt = dev;
@@ -330,7 +326,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
err = device_register(&vol->dev);
if (err) {
ubi_err("cannot register device");
- goto out_gluebi;
+ goto out_cdev;
}
err = volume_sysfs_init(ubi, vol);
@@ -358,7 +354,9 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
ubi->vol_count += 1;
spin_unlock(&ubi->volumes_lock);
- err = paranoid_check_volumes(ubi);
+ ubi_volume_notify(ubi, vol, UBI_VOLUME_ADDED);
+ if (paranoid_check_volumes(ubi))
+ dbg_err("check failed while creating volume %d", vol_id);
return err;
out_sysfs:
@@ -373,10 +371,6 @@ out_sysfs:
do_free = 0;
get_device(&vol->dev);
volume_sysfs_close(vol);
-out_gluebi:
- if (ubi_destroy_gluebi(vol))
- dbg_err("cannot destroy gluebi for volume %d:%d",
- ubi->ubi_num, vol_id);
out_cdev:
cdev_del(&vol->cdev);
out_mapping:
@@ -403,7 +397,7 @@ out_unlock:
*
* This function removes volume described by @desc. The volume has to be opened
* in "exclusive" mode. Returns zero in case of success and a negative error
- * code in case of failure. The caller has to have the @ubi->volumes_mutex
+ * code in case of failure. The caller has to have the @ubi->device_mutex
* locked.
*/
int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl)
@@ -412,7 +406,7 @@ int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl)
struct ubi_device *ubi = vol->ubi;
int i, err, vol_id = vol->vol_id, reserved_pebs = vol->reserved_pebs;
- dbg_gen("remove UBI volume %d", vol_id);
+ dbg_gen("remove device %d, volume %d", ubi->ubi_num, vol_id);
ubi_assert(desc->mode == UBI_EXCLUSIVE);
ubi_assert(vol == ubi->volumes[vol_id]);
@@ -431,10 +425,6 @@ int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl)
ubi->volumes[vol_id] = NULL;
spin_unlock(&ubi->volumes_lock);
- err = ubi_destroy_gluebi(vol);
- if (err)
- goto out_err;
-
if (!no_vtbl) {
err = ubi_change_vtbl_record(ubi, vol_id, NULL);
if (err)
@@ -465,8 +455,10 @@ int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl)
ubi->vol_count -= 1;
spin_unlock(&ubi->volumes_lock);
- if (!no_vtbl)
- err = paranoid_check_volumes(ubi);
+ ubi_volume_notify(ubi, vol, UBI_VOLUME_REMOVED);
+ if (!no_vtbl && paranoid_check_volumes(ubi))
+ dbg_err("check failed while removing volume %d", vol_id);
+
return err;
out_err:
@@ -485,7 +477,7 @@ out_unlock:
*
* This function re-sizes the volume and returns zero in case of success, and a
* negative error code in case of failure. The caller has to have the
- * @ubi->volumes_mutex locked.
+ * @ubi->device_mutex locked.
*/
int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
{
@@ -498,8 +490,8 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
if (ubi->ro_mode)
return -EROFS;
- dbg_gen("re-size volume %d to from %d to %d PEBs",
- vol_id, vol->reserved_pebs, reserved_pebs);
+ dbg_gen("re-size device %d, volume %d to from %d to %d PEBs",
+ ubi->ubi_num, vol_id, vol->reserved_pebs, reserved_pebs);
if (vol->vol_type == UBI_STATIC_VOLUME &&
reserved_pebs < vol->used_ebs) {
@@ -587,7 +579,9 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
(long long)vol->used_ebs * vol->usable_leb_size;
}
- err = paranoid_check_volumes(ubi);
+ ubi_volume_notify(ubi, vol, UBI_VOLUME_RESIZED);
+ if (paranoid_check_volumes(ubi))
+ dbg_err("check failed while re-sizing volume %d", vol_id);
return err;
out_acc:
@@ -632,11 +626,12 @@ int ubi_rename_volumes(struct ubi_device *ubi, struct list_head *rename_list)
vol->name_len = re->new_name_len;
memcpy(vol->name, re->new_name, re->new_name_len + 1);
spin_unlock(&ubi->volumes_lock);
+ ubi_volume_notify(ubi, vol, UBI_VOLUME_RENAMED);
}
}
- if (!err)
- err = paranoid_check_volumes(ubi);
+ if (!err && paranoid_check_volumes(ubi))
+ ;
return err;
}
@@ -667,10 +662,6 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
return err;
}
- err = ubi_create_gluebi(ubi, vol);
- if (err)
- goto out_cdev;
-
vol->dev.release = vol_release;
vol->dev.parent = &ubi->dev;
vol->dev.devt = dev;
@@ -678,21 +669,19 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id);
err = device_register(&vol->dev);
if (err)
- goto out_gluebi;
+ goto out_cdev;
err = volume_sysfs_init(ubi, vol);
if (err) {
cdev_del(&vol->cdev);
- err = ubi_destroy_gluebi(vol);
volume_sysfs_close(vol);
return err;
}
- err = paranoid_check_volumes(ubi);
+ if (paranoid_check_volumes(ubi))
+ dbg_err("check failed while adding volume %d", vol_id);
return err;
-out_gluebi:
- err = ubi_destroy_gluebi(vol);
out_cdev:
cdev_del(&vol->cdev);
return err;
@@ -708,12 +697,9 @@ out_cdev:
*/
void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol)
{
- int err;
-
dbg_gen("free volume %d", vol->vol_id);
ubi->volumes[vol->vol_id] = NULL;
- err = ubi_destroy_gluebi(vol);
cdev_del(&vol->cdev);
volume_sysfs_close(vol);
}
@@ -868,6 +854,7 @@ fail:
if (vol)
ubi_dbg_dump_vol_info(vol);
ubi_dbg_dump_vtbl_record(&ubi->vtbl[vol_id], vol_id);
+ dump_stack();
spin_unlock(&ubi->volumes_lock);
return -EINVAL;
}
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 891534f8210d..2b2472300610 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -55,8 +55,8 @@
*
* As it was said, for the UBI sub-system all physical eraseblocks are either
* "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while
- * used eraseblocks are kept in @wl->used or @wl->scrub RB-trees, or
- * (temporarily) in the @wl->pq queue.
+ * used eraseblocks are kept in @wl->used, @wl->erroneous, or @wl->scrub
+ * RB-trees, as well as (temporarily) in the @wl->pq queue.
*
* When the WL sub-system returns a physical eraseblock, the physical
* eraseblock is protected from being moved for some "time". For this reason,
@@ -83,6 +83,8 @@
* used. The former state corresponds to the @wl->free tree. The latter state
* is split up on several sub-states:
* o the WL movement is allowed (@wl->used tree);
+ * o the WL movement is disallowed (@wl->erroneous) because the PEB is
+ * erroneous - e.g., there was a read error;
* o the WL movement is temporarily prohibited (@wl->pq queue);
* o scrubbing is needed (@wl->scrub tree).
*
@@ -653,7 +655,8 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
int cancel)
{
- int err, scrubbing = 0, torture = 0;
+ int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
+ int vol_id = -1, uninitialized_var(lnum);
struct ubi_wl_entry *e1, *e2;
struct ubi_vid_hdr *vid_hdr;
@@ -738,68 +741,78 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
/*
* We are trying to move PEB without a VID header. UBI
* always write VID headers shortly after the PEB was
- * given, so we have a situation when it did not have
- * chance to write it down because it was preempted.
- * Just re-schedule the work, so that next time it will
- * likely have the VID header in place.
+ * given, so we have a situation when it has not yet
+ * had a chance to write it, because it was preempted.
+ * So add this PEB to the protection queue so far,
+ * because presumably more data will be written there
+ * (including the missing VID header), and then we'll
+ * move it.
*/
dbg_wl("PEB %d has no VID header", e1->pnum);
+ protect = 1;
goto out_not_moved;
}
ubi_err("error %d while reading VID header from PEB %d",
err, e1->pnum);
- if (err > 0)
- err = -EIO;
goto out_error;
}
+ vol_id = be32_to_cpu(vid_hdr->vol_id);
+ lnum = be32_to_cpu(vid_hdr->lnum);
+
err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
if (err) {
- if (err == -EAGAIN)
+ if (err == MOVE_CANCEL_RACE) {
+ /*
+ * The LEB has not been moved because the volume is
+ * being deleted or the PEB has been put meanwhile. We
+ * should prevent this PEB from being selected for
+ * wear-leveling movement again, so put it to the
+ * protection queue.
+ */
+ protect = 1;
goto out_not_moved;
- if (err < 0)
- goto out_error;
- if (err == 2) {
- /* Target PEB write error, torture it */
+ }
+
+ if (err == MOVE_CANCEL_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
+ err == MOVE_TARGET_RD_ERR) {
+ /*
+ * Target PEB had bit-flips or write error - torture it.
+ */
torture = 1;
goto out_not_moved;
}
- /*
- * The LEB has not been moved because the volume is being
- * deleted or the PEB has been put meanwhile. We should prevent
- * this PEB from being selected for wear-leveling movement
- * again, so put it to the protection queue.
- */
-
- dbg_wl("canceled moving PEB %d", e1->pnum);
- ubi_assert(err == 1);
-
- ubi_free_vid_hdr(ubi, vid_hdr);
- vid_hdr = NULL;
-
- spin_lock(&ubi->wl_lock);
- prot_queue_add(ubi, e1);
- ubi_assert(!ubi->move_to_put);
- ubi->move_from = ubi->move_to = NULL;
- ubi->wl_scheduled = 0;
- spin_unlock(&ubi->wl_lock);
+ if (err == MOVE_SOURCE_RD_ERR) {
+ /*
+ * An error happened while reading the source PEB. Do
+ * not switch to R/O mode in this case, and give the
+ * upper layers a possibility to recover from this,
+ * e.g. by unmapping corresponding LEB. Instead, just
+ * put this PEB to the @ubi->erroneous list to prevent
+ * UBI from trying to move it over and over again.
+ */
+ if (ubi->erroneous_peb_count > ubi->max_erroneous) {
+ ubi_err("too many erroneous eraseblocks (%d)",
+ ubi->erroneous_peb_count);
+ goto out_error;
+ }
+ erroneous = 1;
+ goto out_not_moved;
+ }
- e1 = NULL;
- err = schedule_erase(ubi, e2, 0);
- if (err)
+ if (err < 0)
goto out_error;
- mutex_unlock(&ubi->move_mutex);
- return 0;
+
+ ubi_assert(0);
}
/* The PEB has been successfully moved */
- ubi_free_vid_hdr(ubi, vid_hdr);
- vid_hdr = NULL;
if (scrubbing)
- ubi_msg("scrubbed PEB %d, data moved to PEB %d",
- e1->pnum, e2->pnum);
+ ubi_msg("scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
+ e1->pnum, vol_id, lnum, e2->pnum);
+ ubi_free_vid_hdr(ubi, vid_hdr);
spin_lock(&ubi->wl_lock);
if (!ubi->move_to_put) {
@@ -812,8 +825,10 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
err = schedule_erase(ubi, e1, 0);
if (err) {
- e1 = NULL;
- goto out_error;
+ kmem_cache_free(ubi_wl_entry_slab, e1);
+ if (e2)
+ kmem_cache_free(ubi_wl_entry_slab, e2);
+ goto out_ro;
}
if (e2) {
@@ -821,10 +836,13 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
* Well, the target PEB was put meanwhile, schedule it for
* erasure.
*/
- dbg_wl("PEB %d was put meanwhile, erase", e2->pnum);
+ dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
+ e2->pnum, vol_id, lnum);
err = schedule_erase(ubi, e2, 0);
- if (err)
- goto out_error;
+ if (err) {
+ kmem_cache_free(ubi_wl_entry_slab, e2);
+ goto out_ro;
+ }
}
dbg_wl("done");
@@ -837,11 +855,19 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
* have been changed, schedule it for erasure.
*/
out_not_moved:
- dbg_wl("canceled moving PEB %d", e1->pnum);
- ubi_free_vid_hdr(ubi, vid_hdr);
- vid_hdr = NULL;
+ if (vol_id != -1)
+ dbg_wl("cancel moving PEB %d (LEB %d:%d) to PEB %d (%d)",
+ e1->pnum, vol_id, lnum, e2->pnum, err);
+ else
+ dbg_wl("cancel moving PEB %d to PEB %d (%d)",
+ e1->pnum, e2->pnum, err);
spin_lock(&ubi->wl_lock);
- if (scrubbing)
+ if (protect)
+ prot_queue_add(ubi, e1);
+ else if (erroneous) {
+ wl_tree_add(e1, &ubi->erroneous);
+ ubi->erroneous_peb_count += 1;
+ } else if (scrubbing)
wl_tree_add(e1, &ubi->scrub);
else
wl_tree_add(e1, &ubi->used);
@@ -850,32 +876,36 @@ out_not_moved:
ubi->wl_scheduled = 0;
spin_unlock(&ubi->wl_lock);
- e1 = NULL;
+ ubi_free_vid_hdr(ubi, vid_hdr);
err = schedule_erase(ubi, e2, torture);
- if (err)
- goto out_error;
-
+ if (err) {
+ kmem_cache_free(ubi_wl_entry_slab, e2);
+ goto out_ro;
+ }
mutex_unlock(&ubi->move_mutex);
return 0;
out_error:
- ubi_err("error %d while moving PEB %d to PEB %d",
- err, e1->pnum, e2->pnum);
-
- ubi_free_vid_hdr(ubi, vid_hdr);
+ if (vol_id != -1)
+ ubi_err("error %d while moving PEB %d to PEB %d",
+ err, e1->pnum, e2->pnum);
+ else
+ ubi_err("error %d while moving PEB %d (LEB %d:%d) to PEB %d",
+ err, e1->pnum, vol_id, lnum, e2->pnum);
spin_lock(&ubi->wl_lock);
ubi->move_from = ubi->move_to = NULL;
ubi->move_to_put = ubi->wl_scheduled = 0;
spin_unlock(&ubi->wl_lock);
- if (e1)
- kmem_cache_free(ubi_wl_entry_slab, e1);
- if (e2)
- kmem_cache_free(ubi_wl_entry_slab, e2);
- ubi_ro_mode(ubi);
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ kmem_cache_free(ubi_wl_entry_slab, e1);
+ kmem_cache_free(ubi_wl_entry_slab, e2);
+out_ro:
+ ubi_ro_mode(ubi);
mutex_unlock(&ubi->move_mutex);
- return err;
+ ubi_assert(err != 0);
+ return err < 0 ? err : -EIO;
out_cancel:
ubi->wl_scheduled = 0;
@@ -1015,7 +1045,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
/*
* If this is not %-EIO, we have no idea what to do. Scheduling
* this physical eraseblock for erasure again would cause
- * errors again and again. Well, lets switch to RO mode.
+ * errors again and again. Well, lets switch to R/O mode.
*/
goto out_ro;
}
@@ -1043,10 +1073,9 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
ubi_err("no reserved physical eraseblocks");
goto out_ro;
}
-
spin_unlock(&ubi->volumes_lock);
- ubi_msg("mark PEB %d as bad", pnum);
+ ubi_msg("mark PEB %d as bad", pnum);
err = ubi_io_mark_bad(ubi, pnum);
if (err)
goto out_ro;
@@ -1056,7 +1085,9 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
ubi->bad_peb_count += 1;
ubi->good_peb_count -= 1;
ubi_calculate_reserved(ubi);
- if (ubi->beb_rsvd_pebs == 0)
+ if (ubi->beb_rsvd_pebs)
+ ubi_msg("%d PEBs left in the reserve", ubi->beb_rsvd_pebs);
+ else
ubi_warn("last PEB from the reserved pool was used");
spin_unlock(&ubi->volumes_lock);
@@ -1125,6 +1156,13 @@ retry:
} else if (in_wl_tree(e, &ubi->scrub)) {
paranoid_check_in_wl_tree(e, &ubi->scrub);
rb_erase(&e->u.rb, &ubi->scrub);
+ } else if (in_wl_tree(e, &ubi->erroneous)) {
+ paranoid_check_in_wl_tree(e, &ubi->erroneous);
+ rb_erase(&e->u.rb, &ubi->erroneous);
+ ubi->erroneous_peb_count -= 1;
+ ubi_assert(ubi->erroneous_peb_count >= 0);
+ /* Erroneous PEBs should be tortured */
+ torture = 1;
} else {
err = prot_queue_del(ubi, e->pnum);
if (err) {
@@ -1373,7 +1411,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
struct ubi_scan_leb *seb, *tmp;
struct ubi_wl_entry *e;
- ubi->used = ubi->free = ubi->scrub = RB_ROOT;
+ ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
spin_lock_init(&ubi->wl_lock);
mutex_init(&ubi->move_mutex);
init_rwsem(&ubi->work_sem);
@@ -1511,6 +1549,7 @@ void ubi_wl_close(struct ubi_device *ubi)
cancel_pending(ubi);
protection_queue_destroy(ubi);
tree_destroy(&ubi->used);
+ tree_destroy(&ubi->erroneous);
tree_destroy(&ubi->free);
tree_destroy(&ubi->scrub);
kfree(ubi->lookuptbl);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 3b6383168c69..892a9e4e275f 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2272,8 +2272,9 @@ config BNX2
config CNIC
tristate "Broadcom CNIC support"
- depends on BNX2
- depends on UIO
+ depends on PCI
+ select BNX2
+ select UIO
help
This driver supports offload features of Broadcom NetXtremeII
gigabit Ethernet cards.
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 7e3738112c4e..38f1c3375d7f 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -3552,14 +3552,14 @@ bnx2_set_rx_mode(struct net_device *dev)
sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
}
- if (dev->uc_count > BNX2_MAX_UNICAST_ADDRESSES) {
+ if (dev->uc.count > BNX2_MAX_UNICAST_ADDRESSES) {
rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
BNX2_RPM_SORT_USER0_PROM_VLAN;
} else if (!(dev->flags & IFF_PROMISC)) {
/* Add all entries into to the match filter list */
i = 0;
- list_for_each_entry(ha, &dev->uc_list, list) {
+ list_for_each_entry(ha, &dev->uc.list, list) {
bnx2_set_mac_addr(bp, ha->addr,
i + BNX2_START_UNICAST_ADDRESS_INDEX);
sort_mode |= (1 <<
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index 0e9b9f9632c1..2df8fb0af701 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -2767,7 +2767,6 @@ static int __devexit davinci_emac_remove(struct platform_device *pdev)
dev_notice(&ndev->dev, "DaVinci EMAC: davinci_emac_remove()\n");
- clk_disable(emac_clk);
platform_set_drvdata(pdev, NULL);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mdiobus_unregister(priv->mii_bus);
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index f7929e89eb03..efa680f4b8dd 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -2895,12 +2895,13 @@ static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake)
static int __e100_power_off(struct pci_dev *pdev, bool wake)
{
- if (wake) {
+ if (wake)
return pci_prepare_to_sleep(pdev);
- } else {
- pci_wake_from_d3(pdev, false);
- return pci_set_power_state(pdev, PCI_D3hot);
- }
+
+ pci_wake_from_d3(pdev, false);
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ return 0;
}
#ifdef CONFIG_PM
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 8d36743c8140..5e3356f8eb5a 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -2370,7 +2370,7 @@ static void e1000_set_rx_mode(struct net_device *netdev)
rctl |= E1000_RCTL_VFE;
}
- if (netdev->uc_count > rar_entries - 1) {
+ if (netdev->uc.count > rar_entries - 1) {
rctl |= E1000_RCTL_UPE;
} else if (!(netdev->flags & IFF_PROMISC)) {
rctl &= ~E1000_RCTL_UPE;
@@ -2394,7 +2394,7 @@ static void e1000_set_rx_mode(struct net_device *netdev)
*/
i = 1;
if (use_uc)
- list_for_each_entry(ha, &netdev->uc_list, list) {
+ list_for_each_entry(ha, &netdev->uc.list, list) {
if (i == rar_entries)
break;
e1000_rar_set(hw, ha->addr, i++);
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index b60a3041b64c..1094d292630f 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -719,7 +719,8 @@ static const struct register_test nv_registers_test[] = {
struct nv_skb_map {
struct sk_buff *skb;
dma_addr_t dma;
- unsigned int dma_len;
+ unsigned int dma_len:31;
+ unsigned int dma_single:1;
struct ring_desc_ex *first_tx_desc;
struct nv_skb_map *next_tx_ctx;
};
@@ -1912,6 +1913,7 @@ static void nv_init_tx(struct net_device *dev)
np->tx_skb[i].skb = NULL;
np->tx_skb[i].dma = 0;
np->tx_skb[i].dma_len = 0;
+ np->tx_skb[i].dma_single = 0;
np->tx_skb[i].first_tx_desc = NULL;
np->tx_skb[i].next_tx_ctx = NULL;
}
@@ -1930,23 +1932,30 @@ static int nv_init_ring(struct net_device *dev)
return nv_alloc_rx_optimized(dev);
}
-static int nv_release_txskb(struct net_device *dev, struct nv_skb_map* tx_skb)
+static void nv_unmap_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb)
{
- struct fe_priv *np = netdev_priv(dev);
-
if (tx_skb->dma) {
- pci_unmap_page(np->pci_dev, tx_skb->dma,
- tx_skb->dma_len,
- PCI_DMA_TODEVICE);
+ if (tx_skb->dma_single)
+ pci_unmap_single(np->pci_dev, tx_skb->dma,
+ tx_skb->dma_len,
+ PCI_DMA_TODEVICE);
+ else
+ pci_unmap_page(np->pci_dev, tx_skb->dma,
+ tx_skb->dma_len,
+ PCI_DMA_TODEVICE);
tx_skb->dma = 0;
}
+}
+
+static int nv_release_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb)
+{
+ nv_unmap_txskb(np, tx_skb);
if (tx_skb->skb) {
dev_kfree_skb_any(tx_skb->skb);
tx_skb->skb = NULL;
return 1;
- } else {
- return 0;
}
+ return 0;
}
static void nv_drain_tx(struct net_device *dev)
@@ -1964,10 +1973,11 @@ static void nv_drain_tx(struct net_device *dev)
np->tx_ring.ex[i].bufhigh = 0;
np->tx_ring.ex[i].buflow = 0;
}
- if (nv_release_txskb(dev, &np->tx_skb[i]))
+ if (nv_release_txskb(np, &np->tx_skb[i]))
dev->stats.tx_dropped++;
np->tx_skb[i].dma = 0;
np->tx_skb[i].dma_len = 0;
+ np->tx_skb[i].dma_single = 0;
np->tx_skb[i].first_tx_desc = NULL;
np->tx_skb[i].next_tx_ctx = NULL;
}
@@ -2171,6 +2181,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
PCI_DMA_TODEVICE);
np->put_tx_ctx->dma_len = bcnt;
+ np->put_tx_ctx->dma_single = 1;
put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
@@ -2196,6 +2207,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
PCI_DMA_TODEVICE);
np->put_tx_ctx->dma_len = bcnt;
+ np->put_tx_ctx->dma_single = 0;
put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
@@ -2291,6 +2303,7 @@ static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev)
np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
PCI_DMA_TODEVICE);
np->put_tx_ctx->dma_len = bcnt;
+ np->put_tx_ctx->dma_single = 1;
put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
@@ -2317,6 +2330,7 @@ static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev)
np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
PCI_DMA_TODEVICE);
np->put_tx_ctx->dma_len = bcnt;
+ np->put_tx_ctx->dma_single = 0;
put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
@@ -2434,10 +2448,7 @@ static int nv_tx_done(struct net_device *dev, int limit)
dprintk(KERN_DEBUG "%s: nv_tx_done: flags 0x%x.\n",
dev->name, flags);
- pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma,
- np->get_tx_ctx->dma_len,
- PCI_DMA_TODEVICE);
- np->get_tx_ctx->dma = 0;
+ nv_unmap_txskb(np, np->get_tx_ctx);
if (np->desc_ver == DESC_VER_1) {
if (flags & NV_TX_LASTPACKET) {
@@ -2502,10 +2513,7 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
dprintk(KERN_DEBUG "%s: nv_tx_done_optimized: flags 0x%x.\n",
dev->name, flags);
- pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma,
- np->get_tx_ctx->dma_len,
- PCI_DMA_TODEVICE);
- np->get_tx_ctx->dma = 0;
+ nv_unmap_txskb(np, np->get_tx_ctx);
if (flags & NV_TX2_LASTPACKET) {
if (!(flags & NV_TX2_ERROR))
@@ -5091,7 +5099,7 @@ static int nv_loopback_test(struct net_device *dev)
dprintk(KERN_DEBUG "%s: loopback - did not receive test packet\n", dev->name);
}
- pci_unmap_page(np->pci_dev, test_dma_addr,
+ pci_unmap_single(np->pci_dev, test_dma_addr,
(skb_end_pointer(tx_skb) - tx_skb->data),
PCI_DMA_TODEVICE);
dev_kfree_skb_any(tx_skb);
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index 5105548ad50c..abcd19a8bff9 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -260,7 +260,7 @@ static int bpq_xmit(struct sk_buff *skb, struct net_device *dev)
*/
if (!netif_running(dev)) {
kfree_skb(skb);
- return -ENODEV;
+ return NETDEV_TX_OK;
}
skb_pull(skb, 1);
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c
index 8feda9fe8297..1d3429a415e6 100644
--- a/drivers/net/hp100.c
+++ b/drivers/net/hp100.c
@@ -1495,13 +1495,8 @@ static int hp100_start_xmit_bm(struct sk_buff *skb, struct net_device *dev)
hp100_outw(0x4210, TRACE);
printk("hp100: %s: start_xmit_bm\n", dev->name);
#endif
-
- if (skb == NULL) {
- return 0;
- }
-
if (skb->len <= 0)
- return 0;
+ goto drop;
if (lp->chip == HP100_CHIPID_SHASTA && skb_padto(skb, ETH_ZLEN))
return 0;
@@ -1514,10 +1509,10 @@ static int hp100_start_xmit_bm(struct sk_buff *skb, struct net_device *dev)
#endif
/* not waited long enough since last tx? */
if (time_before(jiffies, dev->trans_start + HZ))
- return -EAGAIN;
+ goto drop;
if (hp100_check_lan(dev))
- return -EIO;
+ goto drop;
if (lp->lan_type == HP100_LAN_100 && lp->hub_status < 0) {
/* we have a 100Mb/s adapter but it isn't connected to hub */
@@ -1551,7 +1546,7 @@ static int hp100_start_xmit_bm(struct sk_buff *skb, struct net_device *dev)
}
dev->trans_start = jiffies;
- return -EAGAIN;
+ goto drop;
}
/*
@@ -1591,6 +1586,10 @@ static int hp100_start_xmit_bm(struct sk_buff *skb, struct net_device *dev)
dev->trans_start = jiffies;
return 0;
+
+drop:
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
}
@@ -1648,16 +1647,11 @@ static int hp100_start_xmit(struct sk_buff *skb, struct net_device *dev)
hp100_outw(0x4212, TRACE);
printk("hp100: %s: start_xmit\n", dev->name);
#endif
-
- if (skb == NULL) {
- return 0;
- }
-
if (skb->len <= 0)
- return 0;
+ goto drop;
if (hp100_check_lan(dev))
- return -EIO;
+ goto drop;
/* If there is not enough free memory on the card... */
i = hp100_inl(TX_MEM_FREE) & 0x7fffffff;
@@ -1671,7 +1665,7 @@ static int hp100_start_xmit(struct sk_buff *skb, struct net_device *dev)
printk("hp100: %s: trans_start timing problem\n",
dev->name);
#endif
- return -EAGAIN;
+ goto drop;
}
if (lp->lan_type == HP100_LAN_100 && lp->hub_status < 0) {
/* we have a 100Mb/s adapter but it isn't connected to hub */
@@ -1705,7 +1699,7 @@ static int hp100_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
dev->trans_start = jiffies;
- return -EAGAIN;
+ goto drop;
}
for (i = 0; i < 6000 && (hp100_inb(OPTION_MSW) & HP100_TX_CMD); i++) {
@@ -1759,6 +1753,11 @@ static int hp100_start_xmit(struct sk_buff *skb, struct net_device *dev)
#endif
return 0;
+
+drop:
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+
}
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index a551a96ce676..e756e220db32 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -2321,7 +2321,7 @@ static void ixgbe_set_rx_mode(struct net_device *netdev)
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
/* reprogram secondary unicast list */
- hw->mac.ops.update_uc_addr_list(hw, &netdev->uc_list);
+ hw->mac.ops.update_uc_addr_list(hw, &netdev->uc.list);
/* reprogram multicast list */
addr_count = netdev->mc_count;
@@ -5261,7 +5261,7 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
/**
* ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding
- * netdev->dev_addr_list
+ * netdev->dev_addrs
* @netdev: network interface device structure
*
* Returns non-zero on failure
@@ -5282,7 +5282,7 @@ static int ixgbe_add_sanmac_netdev(struct net_device *dev)
/**
* ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding
- * netdev->dev_addr_list
+ * netdev->dev_addrs
* @netdev: network interface device structure
*
* Returns non-zero on failure
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index b4e18a58cb1b..745ae8b4a2e8 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -1729,7 +1729,7 @@ static u32 uc_addr_filter_mask(struct net_device *dev)
return 0;
nibbles = 1 << (dev->dev_addr[5] & 0x0f);
- list_for_each_entry(ha, &dev->uc_list, list) {
+ list_for_each_entry(ha, &dev->uc.list, list) {
if (memcmp(dev->dev_addr, ha->addr, 5))
return 0;
if ((dev->dev_addr[5] ^ ha->addr[5]) & 0xf0)
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index ab11c2b3f0fe..970cedeb5f37 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -169,6 +169,7 @@
#define MAX_NUM_CARDS 4
#define MAX_BUFFERS_PER_CMD 32
+#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + 4)
/*
* Following are the states of the Phantom. Phantom will set them and
@@ -1436,7 +1437,7 @@ int netxen_nic_set_mac(struct net_device *netdev, void *p);
struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev);
void netxen_nic_update_cmd_producer(struct netxen_adapter *adapter,
- struct nx_host_tx_ring *tx_ring, uint32_t crb_producer);
+ struct nx_host_tx_ring *tx_ring);
/*
* NetXen Board information
@@ -1538,6 +1539,14 @@ dma_watchdog_wakeup(struct netxen_adapter *adapter)
}
+static inline u32 netxen_tx_avail(struct nx_host_tx_ring *tx_ring)
+{
+ smp_mb();
+ return find_diff_among(tx_ring->producer,
+ tx_ring->sw_consumer, tx_ring->num_desc);
+
+}
+
int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, __le64 *mac);
int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, __le64 *mac);
extern void netxen_change_ringparam(struct netxen_adapter *adapter);
diff --git a/drivers/net/netxen/netxen_nic_hdr.h b/drivers/net/netxen/netxen_nic_hdr.h
index 7f0ddbfa7b28..3cc047844af3 100644
--- a/drivers/net/netxen/netxen_nic_hdr.h
+++ b/drivers/net/netxen/netxen_nic_hdr.h
@@ -355,6 +355,7 @@ enum {
#define NETXEN_HW_CRB_HUB_AGT_ADR_LPC \
((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_LPC_CRB_AGT_ADR)
+#define NETXEN_SRE_MISC (NETXEN_CRB_SRE + 0x0002c)
#define NETXEN_SRE_INT_STATUS (NETXEN_CRB_SRE + 0x00034)
#define NETXEN_SRE_PBI_ACTIVE_STATUS (NETXEN_CRB_SRE + 0x01014)
#define NETXEN_SRE_L1RE_CTL (NETXEN_CRB_SRE + 0x03000)
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index 42ffb825ebf1..ce3b89d2cbb6 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -488,7 +488,7 @@ netxen_send_cmd_descs(struct netxen_adapter *adapter,
tx_ring->producer = producer;
- netxen_nic_update_cmd_producer(adapter, tx_ring, producer);
+ netxen_nic_update_cmd_producer(adapter, tx_ring);
netif_tx_unlock_bh(adapter->netdev);
@@ -2041,8 +2041,8 @@ void netxen_nic_get_firmware_info(struct netxen_adapter *adapter)
fw_major, fw_minor, fw_build);
if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
- i = NXRD32(adapter, NETXEN_MIU_MN_CONTROL);
- adapter->ahw.cut_through = (i & 0x4) ? 1 : 0;
+ i = NXRD32(adapter, NETXEN_SRE_MISC);
+ adapter->ahw.cut_through = (i & 0x8000) ? 1 : 0;
dev_info(&pdev->dev, "firmware running in %s mode\n",
adapter->ahw.cut_through ? "cut-through" : "legacy");
}
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 6f77ad58e3b3..bdb143d2b5c7 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -1292,7 +1292,6 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter)
return 1;
sw_consumer = tx_ring->sw_consumer;
- barrier(); /* hw_consumer can change underneath */
hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
while (sw_consumer != hw_consumer) {
@@ -1319,14 +1318,15 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter)
break;
}
- tx_ring->sw_consumer = sw_consumer;
-
if (count && netif_running(netdev)) {
+ tx_ring->sw_consumer = sw_consumer;
+
smp_mb();
+
if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
netif_tx_lock(netdev);
- netif_wake_queue(netdev);
- smp_mb();
+ if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH)
+ netif_wake_queue(netdev);
netif_tx_unlock(netdev);
}
}
@@ -1343,7 +1343,6 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter)
* There is still a possible race condition and the host could miss an
* interrupt. The card has to take care of this.
*/
- barrier(); /* hw_consumer can change underneath */
hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
done = (sw_consumer == hw_consumer);
spin_unlock(&adapter->tx_clean_lock);
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 98737ef72936..71daa3d5f114 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -107,9 +107,14 @@ static uint32_t crb_cmd_producer[4] = {
void
netxen_nic_update_cmd_producer(struct netxen_adapter *adapter,
- struct nx_host_tx_ring *tx_ring, u32 producer)
+ struct nx_host_tx_ring *tx_ring)
{
- NXWR32(adapter, tx_ring->crb_cmd_producer, producer);
+ NXWR32(adapter, tx_ring->crb_cmd_producer, tx_ring->producer);
+
+ if (netxen_tx_avail(tx_ring) <= TX_STOP_THRESH) {
+ netif_stop_queue(adapter->netdev);
+ smp_mb();
+ }
}
static uint32_t crb_cmd_consumer[4] = {
@@ -119,9 +124,9 @@ static uint32_t crb_cmd_consumer[4] = {
static inline void
netxen_nic_update_cmd_consumer(struct netxen_adapter *adapter,
- struct nx_host_tx_ring *tx_ring, u32 consumer)
+ struct nx_host_tx_ring *tx_ring)
{
- NXWR32(adapter, tx_ring->crb_cmd_consumer, consumer);
+ NXWR32(adapter, tx_ring->crb_cmd_consumer, tx_ring->sw_consumer);
}
static uint32_t msi_tgt_status[8] = {
@@ -900,8 +905,11 @@ netxen_nic_attach(struct netxen_adapter *adapter)
tx_ring->crb_cmd_producer = crb_cmd_producer[adapter->portnum];
tx_ring->crb_cmd_consumer = crb_cmd_consumer[adapter->portnum];
- netxen_nic_update_cmd_producer(adapter, tx_ring, 0);
- netxen_nic_update_cmd_consumer(adapter, tx_ring, 0);
+ tx_ring->producer = 0;
+ tx_ring->sw_consumer = 0;
+
+ netxen_nic_update_cmd_producer(adapter, tx_ring);
+ netxen_nic_update_cmd_consumer(adapter, tx_ring);
}
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
@@ -1362,7 +1370,7 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
dma_addr_t temp_dma;
int i, k;
- u32 producer, consumer;
+ u32 producer;
int frag_count, no_of_desc;
u32 num_txd = tx_ring->num_desc;
bool is_tso = false;
@@ -1372,15 +1380,13 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
/* 4 fragments per cmd des */
no_of_desc = (frag_count + 3) >> 2;
- producer = tx_ring->producer;
- smp_mb();
- consumer = tx_ring->sw_consumer;
- if ((no_of_desc+2) >= find_diff_among(producer, consumer, num_txd)) {
+ if (unlikely(no_of_desc + 2) > netxen_tx_avail(tx_ring)) {
netif_stop_queue(netdev);
- smp_mb();
return NETDEV_TX_BUSY;
}
+ producer = tx_ring->producer;
+
hwdesc = &tx_ring->desc_head[producer];
netxen_clear_cmddesc((u64 *)hwdesc);
pbuf = &tx_ring->cmd_buf_arr[producer];
@@ -1493,7 +1499,7 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
tx_ring->producer = producer;
adapter->stats.txbytes += skb->len;
- netxen_nic_update_cmd_producer(adapter, tx_ring, producer);
+ netxen_nic_update_cmd_producer(adapter, tx_ring);
adapter->stats.xmitcalled++;
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index fa61a12c5e15..d2146d4a10f3 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -6376,7 +6376,7 @@ static void niu_set_rx_mode(struct net_device *dev)
if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 0))
np->flags |= NIU_FLAGS_MCAST;
- alt_cnt = dev->uc_count;
+ alt_cnt = dev->uc.count;
if (alt_cnt > niu_num_alt_addr(np)) {
alt_cnt = 0;
np->flags |= NIU_FLAGS_PROMISC;
@@ -6385,7 +6385,7 @@ static void niu_set_rx_mode(struct net_device *dev)
if (alt_cnt) {
int index = 0;
- list_for_each_entry(ha, &dev->uc_list, list) {
+ list_for_each_entry(ha, &dev->uc.list, list) {
err = niu_set_alt_mac(np, index, ha->addr);
if (err)
printk(KERN_WARNING PFX "%s: Error %d "
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index a2ece89622d6..eba937c46376 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -244,7 +244,7 @@ EXPORT_SYMBOL(get_phy_device);
/**
* phy_device_register - Register the phy device on the MDIO bus
- * @phy_device: phy_device structure to be added to the MDIO bus
+ * @phydev: phy_device structure to be added to the MDIO bus
*/
int phy_device_register(struct phy_device *phydev)
{
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 35196faa084e..4e22462684c9 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -3811,22 +3811,11 @@ static struct net_device_stats *rtl8169_get_stats(struct net_device *dev)
static void rtl8169_net_suspend(struct net_device *dev)
{
- struct rtl8169_private *tp = netdev_priv(dev);
- void __iomem *ioaddr = tp->mmio_addr;
-
if (!netif_running(dev))
return;
netif_device_detach(dev);
netif_stop_queue(dev);
-
- spin_lock_irq(&tp->lock);
-
- rtl8169_asic_down(ioaddr);
-
- rtl8169_rx_missed(dev, ioaddr);
-
- spin_unlock_irq(&tp->lock);
}
#ifdef CONFIG_PM
@@ -3876,9 +3865,17 @@ static struct dev_pm_ops rtl8169_pm_ops = {
static void rtl_shutdown(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
rtl8169_net_suspend(dev);
+ spin_lock_irq(&tp->lock);
+
+ rtl8169_asic_down(ioaddr);
+
+ spin_unlock_irq(&tp->lock);
+
if (system_state == SYSTEM_POWER_OFF) {
pci_wake_from_d3(pdev, true);
pci_set_power_state(pdev, PCI_D3hot);
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index e2247669a495..1f040e8a000b 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -1281,7 +1281,7 @@ static u16 sis190_default_phy(struct net_device *dev)
else if (phy_lan)
phy_default = phy_lan;
else
- phy_default = list_entry(&tp->first_phy,
+ phy_default = list_first_entry(&tp->first_phy,
struct sis190_phy, list);
}
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 6b5946fe8ae2..7681d28c53d7 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -50,7 +50,7 @@
#include "sky2.h"
#define DRV_NAME "sky2"
-#define DRV_VERSION "1.22"
+#define DRV_VERSION "1.23"
#define PFX DRV_NAME " "
/*
@@ -65,9 +65,9 @@
#define RX_DEF_PENDING RX_MAX_PENDING
#define TX_RING_SIZE 512
-#define TX_DEF_PENDING (TX_RING_SIZE - 1)
-#define TX_MIN_PENDING 64
+#define TX_DEF_PENDING 128
#define MAX_SKB_TX_LE (4 + (sizeof(dma_addr_t)/sizeof(u32))*MAX_SKB_FRAGS)
+#define TX_MIN_PENDING (MAX_SKB_TX_LE+1)
#define STATUS_RING_SIZE 2048 /* 2 ports * (TX + 2*RX) */
#define STATUS_LE_BYTES (STATUS_RING_SIZE*sizeof(struct sky2_status_le))
@@ -1151,7 +1151,14 @@ stopped:
/* reset the Rx prefetch unit */
sky2_write32(hw, Y2_QADDR(rxq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
- mmiowb();
+
+ /* Reset the RAM Buffer receive queue */
+ sky2_write8(hw, RB_ADDR(rxq, RB_CTRL), RB_RST_SET);
+
+ /* Reset Rx MAC FIFO */
+ sky2_write8(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), GMF_RST_SET);
+
+ sky2_read8(hw, B0_CTST);
}
/* Clean out receive buffer area, assumes receiver hardware stopped */
@@ -1169,6 +1176,7 @@ static void sky2_rx_clean(struct sky2_port *sky2)
re->skb = NULL;
}
}
+ skb_queue_purge(&sky2->rx_recycle);
}
/* Basic MII support */
@@ -1245,6 +1253,12 @@ static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp
}
#endif
+/* Amount of required worst case padding in rx buffer */
+static inline unsigned sky2_rx_pad(const struct sky2_hw *hw)
+{
+ return (hw->flags & SKY2_HW_RAM_BUFFER) ? 8 : 2;
+}
+
/*
* Allocate an skb for receiving. If the MTU is large enough
* make the skb non-linear with a fragment list of pages.
@@ -1254,6 +1268,13 @@ static struct sk_buff *sky2_rx_alloc(struct sky2_port *sky2)
struct sk_buff *skb;
int i;
+ skb = __skb_dequeue(&sky2->rx_recycle);
+ if (!skb)
+ skb = netdev_alloc_skb(sky2->netdev, sky2->rx_data_size
+ + sky2_rx_pad(sky2->hw));
+ if (!skb)
+ goto nomem;
+
if (sky2->hw->flags & SKY2_HW_RAM_BUFFER) {
unsigned char *start;
/*
@@ -1262,18 +1283,10 @@ static struct sk_buff *sky2_rx_alloc(struct sky2_port *sky2)
* The buffer returned from netdev_alloc_skb is
* aligned except if slab debugging is enabled.
*/
- skb = netdev_alloc_skb(sky2->netdev, sky2->rx_data_size + 8);
- if (!skb)
- goto nomem;
start = PTR_ALIGN(skb->data, 8);
skb_reserve(skb, start - skb->data);
- } else {
- skb = netdev_alloc_skb(sky2->netdev,
- sky2->rx_data_size + NET_IP_ALIGN);
- if (!skb)
- goto nomem;
+ } else
skb_reserve(skb, NET_IP_ALIGN);
- }
for (i = 0; i < sky2->rx_nfrags; i++) {
struct page *page = alloc_page(GFP_ATOMIC);
@@ -1350,6 +1363,8 @@ static int sky2_rx_start(struct sky2_port *sky2)
sky2->rx_data_size = size;
+ skb_queue_head_init(&sky2->rx_recycle);
+
/* Fill Rx ring */
for (i = 0; i < sky2->rx_pending; i++) {
re = sky2->rx_ring + i;
@@ -1488,6 +1503,7 @@ static int sky2_up(struct net_device *dev)
imask = sky2_read32(hw, B0_IMSK);
imask |= portirq_msk[port];
sky2_write32(hw, B0_IMSK, imask);
+ sky2_read32(hw, B0_IMSK);
sky2_set_multicast(dev);
@@ -1756,14 +1772,22 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
}
if (le->ctrl & EOP) {
+ struct sk_buff *skb = re->skb;
+
if (unlikely(netif_msg_tx_done(sky2)))
printk(KERN_DEBUG "%s: tx done %u\n",
dev->name, idx);
dev->stats.tx_packets++;
- dev->stats.tx_bytes += re->skb->len;
+ dev->stats.tx_bytes += skb->len;
+
+ if (skb_queue_len(&sky2->rx_recycle) < sky2->rx_pending
+ && skb_recycle_check(skb, sky2->rx_data_size
+ + sky2_rx_pad(sky2->hw)))
+ __skb_queue_head(&sky2->rx_recycle, skb);
+ else
+ dev_kfree_skb_any(skb);
- dev_kfree_skb_any(re->skb);
sky2->tx_next = RING_NEXT(idx, TX_RING_SIZE);
}
}
@@ -1805,10 +1829,10 @@ static int sky2_down(struct net_device *dev)
imask = sky2_read32(hw, B0_IMSK);
imask &= ~portirq_msk[port];
sky2_write32(hw, B0_IMSK, imask);
+ sky2_read32(hw, B0_IMSK);
- synchronize_irq(hw->pdev->irq);
-
- sky2_gmac_reset(hw, port);
+ /* Force flow control off */
+ sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
/* Stop transmitter */
sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_STOP);
@@ -1821,9 +1845,6 @@ static int sky2_down(struct net_device *dev)
ctrl &= ~(GM_GPCR_TX_ENA | GM_GPCR_RX_ENA);
gma_write16(hw, port, GM_GP_CTRL, ctrl);
- /* Make sure no packets are pending */
- napi_synchronize(&hw->napi);
-
sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
/* Workaround shared GMAC reset */
@@ -1854,6 +1875,15 @@ static int sky2_down(struct net_device *dev)
sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);
+ /* Force any delayed status interrrupt and NAPI */
+ sky2_write32(hw, STAT_LEV_TIMER_CNT, 0);
+ sky2_write32(hw, STAT_TX_TIMER_CNT, 0);
+ sky2_write32(hw, STAT_ISR_TIMER_CNT, 0);
+ sky2_read8(hw, STAT_ISR_TIMER_CTRL);
+
+ synchronize_irq(hw->pdev->irq);
+ napi_synchronize(&hw->napi);
+
sky2_phy_power_down(hw, port);
/* turn off LED's */
@@ -2343,11 +2373,45 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last)
}
}
+static inline void sky2_skb_rx(const struct sky2_port *sky2,
+ u32 status, struct sk_buff *skb)
+{
+#ifdef SKY2_VLAN_TAG_USED
+ u16 vlan_tag = be16_to_cpu(sky2->rx_tag);
+ if (sky2->vlgrp && (status & GMR_FS_VLAN)) {
+ if (skb->ip_summed == CHECKSUM_NONE)
+ vlan_hwaccel_receive_skb(skb, sky2->vlgrp, vlan_tag);
+ else
+ vlan_gro_receive(&sky2->hw->napi, sky2->vlgrp,
+ vlan_tag, skb);
+ return;
+ }
+#endif
+ if (skb->ip_summed == CHECKSUM_NONE)
+ netif_receive_skb(skb);
+ else
+ napi_gro_receive(&sky2->hw->napi, skb);
+}
+
+static inline void sky2_rx_done(struct sky2_hw *hw, unsigned port,
+ unsigned packets, unsigned bytes)
+{
+ if (packets) {
+ struct net_device *dev = hw->dev[port];
+
+ dev->stats.rx_packets += packets;
+ dev->stats.rx_bytes += bytes;
+ dev->last_rx = jiffies;
+ sky2_rx_update(netdev_priv(dev), rxqaddr[port]);
+ }
+}
+
/* Process status response ring */
static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
{
int work_done = 0;
- unsigned rx[2] = { 0, 0 };
+ unsigned int total_bytes[2] = { 0 };
+ unsigned int total_packets[2] = { 0 };
rmb();
do {
@@ -2374,7 +2438,8 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
le->opcode = 0;
switch (opcode & ~HW_OWNER) {
case OP_RXSTAT:
- ++rx[port];
+ total_packets[port]++;
+ total_bytes[port] += length;
skb = sky2_receive(dev, length, status);
if (unlikely(!skb)) {
dev->stats.rx_dropped++;
@@ -2392,18 +2457,8 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
}
skb->protocol = eth_type_trans(skb, dev);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += skb->len;
- dev->last_rx = jiffies;
-#ifdef SKY2_VLAN_TAG_USED
- if (sky2->vlgrp && (status & GMR_FS_VLAN)) {
- vlan_hwaccel_receive_skb(skb,
- sky2->vlgrp,
- be16_to_cpu(sky2->rx_tag));
- } else
-#endif
- netif_receive_skb(skb);
+ sky2_skb_rx(sky2, status, skb);
/* Stop after net poll weight */
if (++work_done >= to_do)
@@ -2473,11 +2528,8 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
exit_loop:
- if (rx[0])
- sky2_rx_update(netdev_priv(hw->dev[0]), Q_R1);
-
- if (rx[1])
- sky2_rx_update(netdev_priv(hw->dev[1]), Q_R2);
+ sky2_rx_done(hw, 0, total_packets[0], total_bytes[0]);
+ sky2_rx_done(hw, 1, total_packets[1], total_bytes[1]);
return work_done;
}
@@ -4364,6 +4416,22 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
goto err_out;
}
+ /* Get configuration information
+ * Note: only regular PCI config access once to test for HW issues
+ * other PCI access through shared memory for speed and to
+ * avoid MMCONFIG problems.
+ */
+ err = pci_read_config_dword(pdev, PCI_DEV_REG2, &reg);
+ if (err) {
+ dev_err(&pdev->dev, "PCI read config failed\n");
+ goto err_out;
+ }
+
+ if (~reg == 0) {
+ dev_err(&pdev->dev, "PCI configuration read error\n");
+ goto err_out;
+ }
+
err = pci_request_regions(pdev, DRV_NAME);
if (err) {
dev_err(&pdev->dev, "cannot obtain PCI resources\n");
@@ -4389,21 +4457,6 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
}
}
- /* Get configuration information
- * Note: only regular PCI config access once to test for HW issues
- * other PCI access through shared memory for speed and to
- * avoid MMCONFIG problems.
- */
- err = pci_read_config_dword(pdev, PCI_DEV_REG2, &reg);
- if (err) {
- dev_err(&pdev->dev, "PCI read config failed\n");
- goto err_out_free_regions;
- }
-
- /* size of available VPD, only impact sysfs */
- err = pci_vpd_truncate(pdev, 1ul << (((reg & PCI_VPD_ROM_SZ) >> 14) + 8));
- if (err)
- dev_warn(&pdev->dev, "Can't set VPD size\n");
#ifdef __BIG_ENDIAN
/* The sk98lin vendor driver uses hardware byte swapping but
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 92fb24b27d45..b5549c9e5107 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -2028,6 +2028,7 @@ struct sky2_port {
u16 rx_pending;
u16 rx_data_size;
u16 rx_nfrags;
+ struct sk_buff_head rx_recycle;
#ifdef SKY2_VLAN_TAG_USED
u16 rx_tag;
diff --git a/drivers/net/sonic.c b/drivers/net/sonic.c
index e4255d829380..753a1fba4609 100644
--- a/drivers/net/sonic.c
+++ b/drivers/net/sonic.c
@@ -223,7 +223,7 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
if (!laddr) {
printk(KERN_ERR "%s: failed to map tx DMA buffer.\n", dev->name);
dev_kfree_skb(skb);
- return NETDEV_TX_BUSY
+ return NETDEV_TX_BUSY;
}
sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index e2f2e91cfdd2..40c6eba775ce 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -65,8 +65,6 @@
static DEFINE_SPINLOCK(ugeth_lock);
-static void uec_configure_serdes(struct net_device *dev);
-
static struct {
u32 msg_enable;
} debug = { -1 };
@@ -1536,6 +1534,49 @@ static void adjust_link(struct net_device *dev)
spin_unlock_irqrestore(&ugeth->lock, flags);
}
+/* Initialize TBI PHY interface for communicating with the
+ * SERDES lynx PHY on the chip. We communicate with this PHY
+ * through the MDIO bus on each controller, treating it as a
+ * "normal" PHY at the address found in the UTBIPA register. We assume
+ * that the UTBIPA register is valid. Either the MDIO bus code will set
+ * it to a value that doesn't conflict with other PHYs on the bus, or the
+ * value doesn't matter, as there are no other PHYs on the bus.
+ */
+static void uec_configure_serdes(struct net_device *dev)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
+ struct ucc_geth_info *ug_info = ugeth->ug_info;
+ struct phy_device *tbiphy;
+
+ if (!ug_info->tbi_node) {
+ dev_warn(&dev->dev, "SGMII mode requires that the device "
+ "tree specify a tbi-handle\n");
+ return;
+ }
+
+ tbiphy = of_phy_find_device(ug_info->tbi_node);
+ if (!tbiphy) {
+ dev_err(&dev->dev, "error: Could not get TBI device\n");
+ return;
+ }
+
+ /*
+ * If the link is already up, we must already be ok, and don't need to
+ * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
+ * everything for us? Resetting it takes the link down and requires
+ * several seconds for it to come back.
+ */
+ if (phy_read(tbiphy, ENET_TBI_MII_SR) & TBISR_LSTATUS)
+ return;
+
+ /* Single clk mode, mii mode off(for serdes communication) */
+ phy_write(tbiphy, ENET_TBI_MII_ANA, TBIANA_SETTINGS);
+
+ phy_write(tbiphy, ENET_TBI_MII_TBICON, TBICON_CLK_SELECT);
+
+ phy_write(tbiphy, ENET_TBI_MII_CR, TBICR_SETTINGS);
+}
+
/* Configure the PHY for dev.
* returns 0 if success. -1 if failure
*/
@@ -1577,41 +1618,7 @@ static int init_phy(struct net_device *dev)
return 0;
}
-/* Initialize TBI PHY interface for communicating with the
- * SERDES lynx PHY on the chip. We communicate with this PHY
- * through the MDIO bus on each controller, treating it as a
- * "normal" PHY at the address found in the UTBIPA register. We assume
- * that the UTBIPA register is valid. Either the MDIO bus code will set
- * it to a value that doesn't conflict with other PHYs on the bus, or the
- * value doesn't matter, as there are no other PHYs on the bus.
- */
-static void uec_configure_serdes(struct net_device *dev)
-{
- struct ucc_geth_private *ugeth = netdev_priv(dev);
-
- if (!ugeth->tbiphy) {
- printk(KERN_WARNING "SGMII mode requires that the device "
- "tree specify a tbi-handle\n");
- return;
- }
- /*
- * If the link is already up, we must already be ok, and don't need to
- * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
- * everything for us? Resetting it takes the link down and requires
- * several seconds for it to come back.
- */
- if (phy_read(ugeth->tbiphy, ENET_TBI_MII_SR) & TBISR_LSTATUS)
- return;
-
- /* Single clk mode, mii mode off(for serdes communication) */
- phy_write(ugeth->tbiphy, ENET_TBI_MII_ANA, TBIANA_SETTINGS);
-
- phy_write(ugeth->tbiphy, ENET_TBI_MII_TBICON, TBICON_CLK_SELECT);
-
- phy_write(ugeth->tbiphy, ENET_TBI_MII_CR, TBICR_SETTINGS);
-
-}
static int ugeth_graceful_stop_tx(struct ucc_geth_private *ugeth)
{
@@ -3711,6 +3718,9 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
}
ug_info->phy_node = phy;
+ /* Find the TBI PHY node. If it's not there, we don't support SGMII */
+ ug_info->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
+
/* get the phy interface type, or default to MII */
prop = of_get_property(np, "phy-connection-type", NULL);
if (!prop) {
@@ -3818,37 +3828,6 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
ugeth->ndev = dev;
ugeth->node = np;
- /* Find the TBI PHY. If it's not there, we don't support SGMII */
- ph = of_get_property(np, "tbi-handle", NULL);
- if (ph) {
- struct device_node *tbi = of_find_node_by_phandle(*ph);
- struct of_device *ofdev;
- struct mii_bus *bus;
- const unsigned int *id;
-
- if (!tbi)
- return 0;
-
- mdio = of_get_parent(tbi);
- if (!mdio)
- return 0;
-
- ofdev = of_find_device_by_node(mdio);
-
- of_node_put(mdio);
-
- id = of_get_property(tbi, "reg", NULL);
- if (!id)
- return 0;
- of_node_put(tbi);
-
- bus = dev_get_drvdata(&ofdev->dev);
- if (!bus)
- return 0;
-
- ugeth->tbiphy = bus->phy_map[*id];
- }
-
return 0;
}
diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h
index 5beba4c14532..195ab267ead7 100644
--- a/drivers/net/ucc_geth.h
+++ b/drivers/net/ucc_geth.h
@@ -1125,6 +1125,7 @@ struct ucc_geth_info {
u16 pausePeriod;
u16 extensionField;
struct device_node *phy_node;
+ struct device_node *tbi_node;
u8 weightfactor[NUM_TX_QUEUES];
u8 interruptcoalescingmaxvalue[NUM_RX_QUEUES];
u8 l2qt[UCC_GETH_VLAN_PRIORITY_MAX];
@@ -1213,7 +1214,6 @@ struct ucc_geth_private {
struct ugeth_mii_info *mii_info;
struct phy_device *phydev;
- struct phy_device *tbiphy;
phy_interface_t phy_interface;
int max_speed;
uint32_t msg_enable;
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index e2a7725e567e..b02f7adff5dc 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -989,8 +989,10 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
if (ret < 0)
goto err_iounmap;
- if (velocity_get_link(dev))
+ if (!velocity_get_link(dev)) {
netif_carrier_off(dev);
+ vptr->mii_status |= VELOCITY_LINK_FAIL;
+ }
velocity_print_info(vptr);
pci_set_drvdata(pdev, dev);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 52198f6797a4..2a6e81d5b579 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -709,7 +709,7 @@ static void virtnet_set_rx_mode(struct net_device *dev)
allmulti ? "en" : "dis");
/* MAC filter - use one buffer for both lists */
- mac_data = buf = kzalloc(((dev->uc_count + dev->mc_count) * ETH_ALEN) +
+ mac_data = buf = kzalloc(((dev->uc.count + dev->mc_count) * ETH_ALEN) +
(2 * sizeof(mac_data->entries)), GFP_ATOMIC);
if (!buf) {
dev_warn(&dev->dev, "No memory for MAC address buffer\n");
@@ -719,16 +719,16 @@ static void virtnet_set_rx_mode(struct net_device *dev)
sg_init_table(sg, 2);
/* Store the unicast list and count in the front of the buffer */
- mac_data->entries = dev->uc_count;
+ mac_data->entries = dev->uc.count;
i = 0;
- list_for_each_entry(ha, &dev->uc_list, list)
+ list_for_each_entry(ha, &dev->uc.list, list)
memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
sg_set_buf(&sg[0], mac_data,
- sizeof(mac_data->entries) + (dev->uc_count * ETH_ALEN));
+ sizeof(mac_data->entries) + (dev->uc.count * ETH_ALEN));
/* multicast list and count fill the end */
- mac_data = (void *)&mac_data->macs[dev->uc_count][0];
+ mac_data = (void *)&mac_data->macs[dev->uc.count][0];
mac_data->entries = dev->mc_count;
addr = dev->mc_list;
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c
index 26cde573af43..58d2551c78ed 100644
--- a/drivers/net/vxge/vxge-config.c
+++ b/drivers/net/vxge/vxge-config.c
@@ -454,7 +454,7 @@ __vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev)
return VXGE_HW_OK;
}
-static enum vxge_hw_status
+enum vxge_hw_status
__vxge_hw_device_is_privilaged(struct __vxge_hw_device *hldev)
{
if ((hldev->host_type == VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION ||
@@ -676,10 +676,12 @@ enum vxge_hw_status __vxge_hw_device_initialize(struct __vxge_hw_device *hldev)
{
enum vxge_hw_status status = VXGE_HW_OK;
- /* Validate the pci-e link width and speed */
- status = __vxge_hw_verify_pci_e_info(hldev);
- if (status != VXGE_HW_OK)
- goto exit;
+ if (VXGE_HW_OK == __vxge_hw_device_is_privilaged(hldev)) {
+ /* Validate the pci-e link width and speed */
+ status = __vxge_hw_verify_pci_e_info(hldev);
+ if (status != VXGE_HW_OK)
+ goto exit;
+ }
vxge_hw_wrr_rebalance(hldev);
exit:
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index 6c838b3e063a..6034497536a4 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -4203,6 +4203,16 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
max_vpath_supported++;
}
+ /* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */
+ if ((VXGE_HW_FUNCTION_MODE_SRIOV ==
+ ll_config.device_hw_info.function_mode) &&
+ (max_config_dev > 1) && (pdev->is_physfn)) {
+ ret = pci_enable_sriov(pdev, max_config_dev - 1);
+ if (ret)
+ vxge_debug_ll_config(VXGE_ERR,
+ "Failed to enable SRIOV: %d \n", ret);
+ }
+
/*
* Configure vpaths and get driver configured number of vpaths
* which is less than or equal to the maximum vpaths per function.
@@ -4366,6 +4376,7 @@ _exit6:
vxge_device_unregister(hldev);
_exit5:
+ pci_disable_sriov(pdev);
vxge_hw_device_terminate(hldev);
_exit4:
iounmap(attr.bar1);
@@ -4429,6 +4440,8 @@ vxge_remove(struct pci_dev *pdev)
iounmap(vdev->bar0);
iounmap(vdev->bar1);
+ pci_disable_sriov(pdev);
+
/* we are safe to free it now */
free_netdev(dev);
diff --git a/drivers/net/vxge/vxge-version.h b/drivers/net/vxge/vxge-version.h
index 7da02c545ed5..82786ffb7dd9 100644
--- a/drivers/net/vxge/vxge-version.h
+++ b/drivers/net/vxge/vxge-version.h
@@ -17,7 +17,7 @@
#define VXGE_VERSION_MAJOR "2"
#define VXGE_VERSION_MINOR "0"
-#define VXGE_VERSION_FIX "1"
-#define VXGE_VERSION_BUILD "17129"
+#define VXGE_VERSION_FIX "4"
+#define VXGE_VERSION_BUILD "17795"
#define VXGE_VERSION_FOR "k"
#endif
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index 2dd78d20eb05..aff4f6bdf3d5 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -149,46 +149,40 @@ static int lapbeth_data_indication(struct net_device *dev, struct sk_buff *skb)
*/
static int lapbeth_xmit(struct sk_buff *skb, struct net_device *dev)
{
- int err = -ENODEV;
+ int err;
/*
* Just to be *really* sure not to send anything if the interface
* is down, the ethernet device may have gone.
*/
- if (!netif_running(dev)) {
+ if (!netif_running(dev))
goto drop;
- }
switch (skb->data[0]) {
case 0x00:
- err = 0;
break;
case 0x01:
if ((err = lapb_connect_request(dev)) != LAPB_OK)
printk(KERN_ERR "lapbeth: lapb_connect_request "
"error: %d\n", err);
- goto drop_ok;
+ goto drop;
case 0x02:
if ((err = lapb_disconnect_request(dev)) != LAPB_OK)
printk(KERN_ERR "lapbeth: lapb_disconnect_request "
"err: %d\n", err);
/* Fall thru */
default:
- goto drop_ok;
+ goto drop;
}
skb_pull(skb, 1);
if ((err = lapb_data_request(dev, skb)) != LAPB_OK) {
printk(KERN_ERR "lapbeth: lapb_data_request error - %d\n", err);
- err = -ENOMEM;
goto drop;
}
- err = 0;
out:
- return err;
-drop_ok:
- err = 0;
+ return NETDEV_TX_OK;
drop:
kfree_skb(skb);
goto out;
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index ec35503f6a40..2942f13c9c4a 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -733,8 +733,9 @@ void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
/*
* Set the beacon register and enable all timers.
*/
- /* When in AP mode zero timer0 to start TSF */
- if (ah->ah_op_mode == NL80211_IFTYPE_AP)
+ /* When in AP or Mesh Point mode zero timer0 to start TSF */
+ if (ah->ah_op_mode == NL80211_IFTYPE_AP ||
+ ah->ah_op_mode == NL80211_IFTYPE_MESH_POINT)
ath5k_hw_reg_write(ah, 0, AR5K_TIMER0);
ath5k_hw_reg_write(ah, next_beacon, AR5K_TIMER0);
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index 0ed1ac312aa6..2d79610bce12 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -1,7 +1,6 @@
config ATH9K
tristate "Atheros 802.11n wireless cards support"
depends on PCI && MAC80211 && WLAN_80211
- depends on RFKILL || RFKILL=n
select ATH_COMMON
select MAC80211_LEDS
select LEDS_CLASS
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 515880aa2116..5efc9345ca0d 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -21,7 +21,6 @@
#include <linux/device.h>
#include <net/mac80211.h>
#include <linux/leds.h>
-#include <linux/rfkill.h>
#include "hw.h"
#include "rc.h"
@@ -460,12 +459,6 @@ struct ath_led {
bool registered;
};
-struct ath_rfkill {
- struct rfkill *rfkill;
- struct rfkill_ops ops;
- char rfkill_name[32];
-};
-
/********************/
/* Main driver core */
/********************/
@@ -505,7 +498,6 @@ struct ath_rfkill {
#define SC_OP_PROTECT_ENABLE BIT(6)
#define SC_OP_RXFLUSH BIT(7)
#define SC_OP_LED_ASSOCIATED BIT(8)
-#define SC_OP_RFKILL_REGISTERED BIT(9)
#define SC_OP_WAIT_FOR_BEACON BIT(12)
#define SC_OP_LED_ON BIT(13)
#define SC_OP_SCANNING BIT(14)
@@ -591,7 +583,6 @@ struct ath_softc {
int beacon_interval;
- struct ath_rfkill rf_kill;
struct ath_ani ani;
struct ath9k_node_stats nodestats;
#ifdef CONFIG_ATH9K_DEBUG
@@ -677,6 +668,7 @@ static inline void ath9k_ps_restore(struct ath_softc *sc)
if (atomic_dec_and_test(&sc->ps_usecount))
if ((sc->hw->conf.flags & IEEE80211_CONF_PS) &&
!(sc->sc_flags & (SC_OP_WAIT_FOR_BEACON |
+ SC_OP_WAIT_FOR_CAB |
SC_OP_WAIT_FOR_PSPOLL_DATA |
SC_OP_WAIT_FOR_TX_ACK)))
ath9k_hw_setpower(sc->sc_ah,
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 1579c9407ed5..34935a8ee59d 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -2186,6 +2186,18 @@ static void ath9k_hw_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan
REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
}
+static void ath9k_enable_rfkill(struct ath_hw *ah)
+{
+ REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
+ AR_GPIO_INPUT_EN_VAL_RFSILENT_BB);
+
+ REG_CLR_BIT(ah, AR_GPIO_INPUT_MUX2,
+ AR_GPIO_INPUT_MUX2_RFSILENT);
+
+ ath9k_hw_cfg_gpio_input(ah, ah->rfkill_gpio);
+ REG_SET_BIT(ah, AR_PHY_TEST, RFSILENT_BB);
+}
+
int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
bool bChannelChange)
{
@@ -2313,10 +2325,9 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ath9k_hw_init_interrupt_masks(ah, ah->opmode);
ath9k_hw_init_qos(ah);
-#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
ath9k_enable_rfkill(ah);
-#endif
+
ath9k_hw_init_user_settings(ah);
REG_WRITE(ah, AR_STA_ID1,
@@ -3613,20 +3624,6 @@ void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val)
AR_GPIO_BIT(gpio));
}
-#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
-void ath9k_enable_rfkill(struct ath_hw *ah)
-{
- REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
- AR_GPIO_INPUT_EN_VAL_RFSILENT_BB);
-
- REG_CLR_BIT(ah, AR_GPIO_INPUT_MUX2,
- AR_GPIO_INPUT_MUX2_RFSILENT);
-
- ath9k_hw_cfg_gpio_input(ah, ah->rfkill_gpio);
- REG_SET_BIT(ah, AR_PHY_TEST, RFSILENT_BB);
-}
-#endif
-
u32 ath9k_hw_getdefantenna(struct ath_hw *ah)
{
return REG_READ(ah, AR_DEF_ANTENNA) & 0x7;
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index dd8508ef6e05..9d0b31ad4603 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -565,9 +565,6 @@ u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio);
void ath9k_hw_cfg_output(struct ath_hw *ah, u32 gpio,
u32 ah_signal_type);
void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val);
-#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
-void ath9k_enable_rfkill(struct ath_hw *ah);
-#endif
u32 ath9k_hw_getdefantenna(struct ath_hw *ah);
void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna);
bool ath9k_hw_setantennaswitch(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index f7baa406918b..9f49a3251d4d 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -231,6 +231,19 @@ static void ath_setup_rates(struct ath_softc *sc, enum ieee80211_band band)
}
}
+static struct ath9k_channel *ath_get_curchannel(struct ath_softc *sc,
+ struct ieee80211_hw *hw)
+{
+ struct ieee80211_channel *curchan = hw->conf.channel;
+ struct ath9k_channel *channel;
+ u8 chan_idx;
+
+ chan_idx = curchan->hw_value;
+ channel = &sc->sc_ah->channels[chan_idx];
+ ath9k_update_ichannel(sc, hw, channel);
+ return channel;
+}
+
/*
* Set/change channels. If the channel is really being changed, it's done
* by reseting the chip. To accomplish this we must first cleanup any pending
@@ -283,7 +296,7 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
"reset status %d\n",
channel->center_freq, r);
spin_unlock_bh(&sc->sc_resetlock);
- return r;
+ goto ps_restore;
}
spin_unlock_bh(&sc->sc_resetlock);
@@ -292,14 +305,17 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
if (ath_startrecv(sc) != 0) {
DPRINTF(sc, ATH_DBG_FATAL,
"Unable to restart recv logic\n");
- return -EIO;
+ r = -EIO;
+ goto ps_restore;
}
ath_cache_conf_rate(sc, &hw->conf);
ath_update_txpow(sc);
ath9k_hw_set_interrupts(ah, sc->imask);
+
+ ps_restore:
ath9k_ps_restore(sc);
- return 0;
+ return r;
}
/*
@@ -1110,6 +1126,9 @@ void ath_radio_enable(struct ath_softc *sc)
ath9k_ps_wakeup(sc);
ath9k_hw_configpcipowersave(ah, 0);
+ if (!ah->curchan)
+ ah->curchan = ath_get_curchannel(sc, sc->hw);
+
spin_lock_bh(&sc->sc_resetlock);
r = ath9k_hw_reset(ah, ah->curchan, false);
if (r) {
@@ -1162,6 +1181,9 @@ void ath_radio_disable(struct ath_softc *sc)
ath_stoprecv(sc); /* turn off frame recv */
ath_flushrecv(sc); /* flush recv queue */
+ if (!ah->curchan)
+ ah->curchan = ath_get_curchannel(sc, sc->hw);
+
spin_lock_bh(&sc->sc_resetlock);
r = ath9k_hw_reset(ah, ah->curchan, false);
if (r) {
@@ -1178,8 +1200,6 @@ void ath_radio_disable(struct ath_softc *sc)
ath9k_ps_restore(sc);
}
-#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
-
/*******************/
/* Rfkill */
/*******************/
@@ -1192,81 +1212,27 @@ static bool ath_is_rfkill_set(struct ath_softc *sc)
ah->rfkill_polarity;
}
-/* s/w rfkill handlers */
-static int ath_rfkill_set_block(void *data, bool blocked)
+static void ath9k_rfkill_poll_state(struct ieee80211_hw *hw)
{
- struct ath_softc *sc = data;
-
- if (blocked)
- ath_radio_disable(sc);
- else
- ath_radio_enable(sc);
-
- return 0;
-}
-
-static void ath_rfkill_poll_state(struct rfkill *rfkill, void *data)
-{
- struct ath_softc *sc = data;
+ struct ath_wiphy *aphy = hw->priv;
+ struct ath_softc *sc = aphy->sc;
bool blocked = !!ath_is_rfkill_set(sc);
- if (rfkill_set_hw_state(rfkill, blocked))
+ wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
+
+ if (blocked)
ath_radio_disable(sc);
else
ath_radio_enable(sc);
}
-/* Init s/w rfkill */
-static int ath_init_sw_rfkill(struct ath_softc *sc)
-{
- sc->rf_kill.ops.set_block = ath_rfkill_set_block;
- if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
- sc->rf_kill.ops.poll = ath_rfkill_poll_state;
-
- snprintf(sc->rf_kill.rfkill_name, sizeof(sc->rf_kill.rfkill_name),
- "ath9k-%s::rfkill", wiphy_name(sc->hw->wiphy));
-
- sc->rf_kill.rfkill = rfkill_alloc(sc->rf_kill.rfkill_name,
- wiphy_dev(sc->hw->wiphy),
- RFKILL_TYPE_WLAN,
- &sc->rf_kill.ops, sc);
- if (!sc->rf_kill.rfkill) {
- DPRINTF(sc, ATH_DBG_FATAL, "Failed to allocate rfkill\n");
- return -ENOMEM;
- }
-
- return 0;
-}
-
-/* Deinitialize rfkill */
-static void ath_deinit_rfkill(struct ath_softc *sc)
-{
- if (sc->sc_flags & SC_OP_RFKILL_REGISTERED) {
- rfkill_unregister(sc->rf_kill.rfkill);
- rfkill_destroy(sc->rf_kill.rfkill);
- sc->sc_flags &= ~SC_OP_RFKILL_REGISTERED;
- }
-}
-
-static int ath_start_rfkill_poll(struct ath_softc *sc)
+static void ath_start_rfkill_poll(struct ath_softc *sc)
{
- if (!(sc->sc_flags & SC_OP_RFKILL_REGISTERED)) {
- if (rfkill_register(sc->rf_kill.rfkill)) {
- DPRINTF(sc, ATH_DBG_FATAL,
- "Unable to register rfkill\n");
- rfkill_destroy(sc->rf_kill.rfkill);
-
- /* Deinitialize the device */
- ath_cleanup(sc);
- return -EIO;
- } else {
- sc->sc_flags |= SC_OP_RFKILL_REGISTERED;
- }
- }
+ struct ath_hw *ah = sc->sc_ah;
- return 0;
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
+ wiphy_rfkill_start_polling(sc->hw->wiphy);
}
-#endif /* CONFIG_RFKILL */
void ath_cleanup(struct ath_softc *sc)
{
@@ -1286,9 +1252,6 @@ void ath_detach(struct ath_softc *sc)
DPRINTF(sc, ATH_DBG_CONFIG, "Detach ATH hw\n");
-#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
- ath_deinit_rfkill(sc);
-#endif
ath_deinit_leds(sc);
cancel_work_sync(&sc->chan_work);
cancel_delayed_work_sync(&sc->wiphy_work);
@@ -1626,13 +1589,6 @@ int ath_attach(u16 devid, struct ath_softc *sc)
if (error != 0)
goto error_attach;
-#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
- /* Initialize s/w rfkill */
- error = ath_init_sw_rfkill(sc);
- if (error)
- goto error_attach;
-#endif
-
INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work);
INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
sc->wiphy_scheduler_int = msecs_to_jiffies(500);
@@ -1648,6 +1604,7 @@ int ath_attach(u16 devid, struct ath_softc *sc)
/* Initialize LED control */
ath_init_leds(sc);
+ ath_start_rfkill_poll(sc);
return 0;
@@ -1920,7 +1877,7 @@ static int ath9k_start(struct ieee80211_hw *hw)
struct ath_softc *sc = aphy->sc;
struct ieee80211_channel *curchan = hw->conf.channel;
struct ath9k_channel *init_channel;
- int r, pos;
+ int r;
DPRINTF(sc, ATH_DBG_CONFIG, "Starting driver with "
"initial channel: %d MHz\n", curchan->center_freq);
@@ -1950,11 +1907,9 @@ static int ath9k_start(struct ieee80211_hw *hw)
/* setup initial channel */
- pos = curchan->hw_value;
+ sc->chan_idx = curchan->hw_value;
- sc->chan_idx = pos;
- init_channel = &sc->sc_ah->channels[pos];
- ath9k_update_ichannel(sc, hw, init_channel);
+ init_channel = ath_get_curchannel(sc, hw);
/* Reset SERDES registers */
ath9k_hw_configpcipowersave(sc->sc_ah, 0);
@@ -2018,10 +1973,6 @@ static int ath9k_start(struct ieee80211_hw *hw)
ieee80211_wake_queues(hw);
-#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
- r = ath_start_rfkill_poll(sc);
-#endif
-
mutex_unlock:
mutex_unlock(&sc->mutex);
@@ -2159,7 +2110,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)
} else
sc->rx.rxlink = NULL;
- rfkill_pause_polling(sc->rf_kill.rfkill);
+ wiphy_rfkill_stop_polling(sc->hw->wiphy);
/* disable HAL and put h/w to sleep */
ath9k_hw_disable(sc->sc_ah);
@@ -2765,6 +2716,7 @@ struct ieee80211_ops ath9k_ops = {
.ampdu_action = ath9k_ampdu_action,
.sw_scan_start = ath9k_sw_scan_start,
.sw_scan_complete = ath9k_sw_scan_complete,
+ .rfkill_poll = ath9k_rfkill_poll_state,
};
static struct {
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 5014a19b0f75..f99f3a76df3f 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -817,6 +817,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
}
if (unlikely(sc->sc_flags & (SC_OP_WAIT_FOR_BEACON |
+ SC_OP_WAIT_FOR_CAB |
SC_OP_WAIT_FOR_PSPOLL_DATA)))
ath_rx_ps(sc, skb);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index a5637c4aa85d..6d1519e1f011 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -2152,7 +2152,6 @@ static int iwl_mac_start(struct ieee80211_hw *hw)
/* we should be verifying the device is ready to be opened */
mutex_lock(&priv->mutex);
- memset(&priv->staging_rxon, 0, sizeof(struct iwl_rxon_cmd));
/* fetch ucode file from disk, alloc and copy to bus-master buffers ...
* ucode filename and max sizes are card-specific. */
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index f9d16ca5b3d9..6ab07165ea28 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -629,13 +629,9 @@ u8 iwl_is_fat_tx_allowed(struct iwl_priv *priv,
if (!sta_ht_inf->ht_supported)
return 0;
}
-
- if (iwl_ht_conf->ht_protection & IEEE80211_HT_OP_MODE_PROTECTION_20MHZ)
- return 1;
- else
- return iwl_is_channel_extension(priv, priv->band,
- le16_to_cpu(priv->staging_rxon.channel),
- iwl_ht_conf->extension_chan_offset);
+ return iwl_is_channel_extension(priv, priv->band,
+ le16_to_cpu(priv->staging_rxon.channel),
+ iwl_ht_conf->extension_chan_offset);
}
EXPORT_SYMBOL(iwl_is_fat_tx_allowed);
@@ -826,9 +822,18 @@ void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info)
RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
if (iwl_is_fat_tx_allowed(priv, NULL)) {
/* pure 40 fat */
- if (rxon->flags & RXON_FLG_FAT_PROT_MSK)
+ if (ht_info->ht_protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
- else {
+ /* Note: control channel is opposite of extension channel */
+ switch (ht_info->extension_chan_offset) {
+ case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+ rxon->flags &= ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
+ break;
+ case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+ rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
+ break;
+ }
+ } else {
/* Note: control channel is opposite of extension channel */
switch (ht_info->extension_chan_offset) {
case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
@@ -2390,39 +2395,46 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
priv->ibss_beacon = ieee80211_beacon_get(hw, vif);
}
- if ((changes & BSS_CHANGED_BSSID) && !iwl_is_rfkill(priv)) {
- /* If there is currently a HW scan going on in the background
- * then we need to cancel it else the RXON below will fail. */
+ if (changes & BSS_CHANGED_BEACON_INT) {
+ priv->beacon_int = bss_conf->beacon_int;
+ /* TODO: in AP mode, do something to make this take effect */
+ }
+
+ if (changes & BSS_CHANGED_BSSID) {
+ IWL_DEBUG_MAC80211(priv, "BSSID %pM\n", bss_conf->bssid);
+
+ /*
+ * If there is currently a HW scan going on in the
+ * background then we need to cancel it else the RXON
+ * below/in post_associate will fail.
+ */
if (iwl_scan_cancel_timeout(priv, 100)) {
- IWL_WARN(priv, "Aborted scan still in progress "
- "after 100ms\n");
+ IWL_WARN(priv, "Aborted scan still in progress after 100ms\n");
IWL_DEBUG_MAC80211(priv, "leaving - scan abort failed.\n");
mutex_unlock(&priv->mutex);
return;
}
- memcpy(priv->staging_rxon.bssid_addr,
- bss_conf->bssid, ETH_ALEN);
-
- /* TODO: Audit driver for usage of these members and see
- * if mac80211 deprecates them (priv->bssid looks like it
- * shouldn't be there, but I haven't scanned the IBSS code
- * to verify) - jpk */
- memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
-
- if (priv->iw_mode == NL80211_IFTYPE_AP)
- iwlcore_config_ap(priv);
- else {
- int rc = iwlcore_commit_rxon(priv);
- if ((priv->iw_mode == NL80211_IFTYPE_STATION) && rc)
- iwl_rxon_add_station(
- priv, priv->active_rxon.bssid_addr, 1);
+
+ /* mac80211 only sets assoc when in STATION mode */
+ if (priv->iw_mode == NL80211_IFTYPE_ADHOC ||
+ bss_conf->assoc) {
+ memcpy(priv->staging_rxon.bssid_addr,
+ bss_conf->bssid, ETH_ALEN);
+
+ /* currently needed in a few places */
+ memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
+ } else {
+ priv->staging_rxon.filter_flags &=
+ ~RXON_FILTER_ASSOC_MSK;
}
- } else if (!iwl_is_rfkill(priv)) {
- iwl_scan_cancel_timeout(priv, 100);
- priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- iwlcore_commit_rxon(priv);
+
}
+ /*
+ * This needs to be after setting the BSSID in case
+ * mac80211 decides to do both changes at once because
+ * it will invoke post_associate.
+ */
if (priv->iw_mode == NL80211_IFTYPE_ADHOC &&
changes & BSS_CHANGED_BEACON) {
struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
@@ -2431,8 +2443,6 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
iwl_mac_beacon_update(hw, beacon);
}
- mutex_unlock(&priv->mutex);
-
if (changes & BSS_CHANGED_ERP_PREAMBLE) {
IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n",
bss_conf->use_short_preamble);
@@ -2450,6 +2460,23 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
priv->staging_rxon.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
}
+ if (changes & BSS_CHANGED_BASIC_RATES) {
+ /* XXX use this information
+ *
+ * To do that, remove code from iwl_set_rate() and put something
+ * like this here:
+ *
+ if (A-band)
+ priv->staging_rxon.ofdm_basic_rates =
+ bss_conf->basic_rates;
+ else
+ priv->staging_rxon.ofdm_basic_rates =
+ bss_conf->basic_rates >> 4;
+ priv->staging_rxon.cck_basic_rates =
+ bss_conf->basic_rates & 0xF;
+ */
+ }
+
if (changes & BSS_CHANGED_HT) {
iwl_ht_conf(priv, bss_conf);
@@ -2459,10 +2486,6 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
if (changes & BSS_CHANGED_ASSOC) {
IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc);
- /* This should never happen as this function should
- * never be called from interrupt context. */
- if (WARN_ON_ONCE(in_interrupt()))
- return;
if (bss_conf->assoc) {
priv->assoc_id = bss_conf->aid;
priv->beacon_int = bss_conf->beacon_int;
@@ -2470,27 +2493,35 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
priv->timestamp = bss_conf->timestamp;
priv->assoc_capability = bss_conf->assoc_capability;
- /* we have just associated, don't start scan too early
- * leave time for EAPOL exchange to complete
+ /*
+ * We have just associated, don't start scan too early
+ * leave time for EAPOL exchange to complete.
+ *
+ * XXX: do this in mac80211
*/
priv->next_scan_jiffies = jiffies +
IWL_DELAY_NEXT_SCAN_AFTER_ASSOC;
- mutex_lock(&priv->mutex);
- priv->cfg->ops->lib->post_associate(priv);
- mutex_unlock(&priv->mutex);
- } else {
+ if (!iwl_is_rfkill(priv))
+ priv->cfg->ops->lib->post_associate(priv);
+ } else
priv->assoc_id = 0;
- IWL_DEBUG_MAC80211(priv, "DISASSOC %d\n", bss_conf->assoc);
+
+ }
+
+ if (changes && iwl_is_associated(priv) && priv->assoc_id) {
+ IWL_DEBUG_MAC80211(priv, "Changes (%#x) while associated\n",
+ changes);
+ ret = iwl_send_rxon_assoc(priv);
+ if (!ret) {
+ /* Sync active_rxon with latest change. */
+ memcpy((void *)&priv->active_rxon,
+ &priv->staging_rxon,
+ sizeof(struct iwl_rxon_cmd));
}
- } else if (changes && iwl_is_associated(priv) && priv->assoc_id) {
- IWL_DEBUG_MAC80211(priv, "Associated Changes %d\n", changes);
- ret = iwl_send_rxon_assoc(priv);
- if (!ret)
- /* Sync active_rxon with latest change. */
- memcpy((void *)&priv->active_rxon,
- &priv->staging_rxon,
- sizeof(struct iwl_rxon_cmd));
}
+
+ mutex_unlock(&priv->mutex);
+
IWL_DEBUG_MAC80211(priv, "leave\n");
}
EXPORT_SYMBOL(iwl_bss_info_changed);
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 83d31606dd00..cb9bd4c8f25e 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -2498,8 +2498,7 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
struct iwl3945_rxon_cmd *active_rxon =
(struct iwl3945_rxon_cmd *)(&priv->active_rxon);
- memcpy(&priv->staging_rxon, &priv->active_rxon,
- sizeof(priv->staging_rxon));
+ priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
} else {
/* Initialize our rx_config data */
@@ -3147,7 +3146,6 @@ static int iwl3945_mac_start(struct ieee80211_hw *hw)
/* we should be verifying the device is ready to be opened */
mutex_lock(&priv->mutex);
- memset(&priv->staging_rxon, 0, sizeof(priv->staging_rxon));
/* fetch ucode file from disk, alloc and copy to bus-master buffers ...
* ucode filename and max sizes are card-specific. */
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index 06a46d7b3d6c..6564282ce476 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -812,7 +812,6 @@ out:
static void if_spi_e2h(struct if_spi_card *card)
{
int err = 0;
- unsigned long flags;
u32 cause;
struct lbs_private *priv = card->priv;
@@ -827,10 +826,7 @@ static void if_spi_e2h(struct if_spi_card *card)
/* generate a card interrupt */
spu_write_u16(card, IF_SPI_CARD_INT_CAUSE_REG, IF_SPI_CIC_HOST_EVENT);
- spin_lock_irqsave(&priv->driver_lock, flags);
lbs_queue_event(priv, cause & 0xff);
- spin_unlock_irqrestore(&priv->driver_lock, flags);
-
out:
if (err)
lbs_pr_err("%s: error %d\n", __func__, err);
@@ -875,7 +871,12 @@ static int lbs_spi_thread(void *data)
err = if_spi_c2h_data(card);
if (err)
goto err;
- if (hiStatus & IF_SPI_HIST_CMD_DOWNLOAD_RDY) {
+
+ /* workaround: in PS mode, the card does not set the Command
+ * Download Ready bit, but it sets TX Download Ready. */
+ if (hiStatus & IF_SPI_HIST_CMD_DOWNLOAD_RDY ||
+ (card->priv->psstate != PS_STATE_FULL_POWER &&
+ (hiStatus & IF_SPI_HIST_TX_DOWNLOAD_RDY))) {
/* This means two things. First of all,
* if there was a previous command sent, the card has
* successfully received it.
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 3039fcb86afc..12403516776a 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -99,11 +99,11 @@ void pci_update_resource(struct pci_dev *dev, int resno)
int pci_claim_resource(struct pci_dev *dev, int resource)
{
struct resource *res = &dev->resource[resource];
- struct resource *root = NULL;
+ struct resource *root;
char *dtype = resource < PCI_BRIDGE_RESOURCES ? "device" : "bridge";
int err;
- root = pcibios_select_root(dev, res);
+ root = pci_find_parent_resource(dev, res);
err = -EINVAL;
if (root != NULL)
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index 2faf0e14f05a..74909c4aaeea 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -177,7 +177,7 @@ dell_send_request(struct calling_interface_buffer *buffer, int class,
static int dell_rfkill_set(void *data, bool blocked)
{
struct calling_interface_buffer buffer;
- int disable = blocked ? 0 : 1;
+ int disable = blocked ? 1 : 0;
unsigned long radio = (unsigned long)data;
memset(&buffer, 0, sizeof(struct calling_interface_buffer));
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index e48d9a4506ff..dafaa4a92df5 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -1133,8 +1133,9 @@ static void sony_nc_rfkill_update()
continue;
if (hwblock) {
- if (rfkill_set_hw_state(sony_rfkill_devices[i], true))
- sony_nc_rfkill_set((void *)i, true);
+ if (rfkill_set_hw_state(sony_rfkill_devices[i], true)) {
+ /* we already know we're blocked */
+ }
continue;
}
diff --git a/drivers/pps/Kconfig b/drivers/pps/Kconfig
new file mode 100644
index 000000000000..cc2eb8edb514
--- /dev/null
+++ b/drivers/pps/Kconfig
@@ -0,0 +1,33 @@
+#
+# PPS support configuration
+#
+
+menu "PPS support"
+
+config PPS
+ tristate "PPS support"
+ depends on EXPERIMENTAL
+ ---help---
+ PPS (Pulse Per Second) is a special pulse provided by some GPS
+ antennae. Userland can use it to get a high-precision time
+ reference.
+
+ Some antennae's PPS signals are connected with the CD (Carrier
+ Detect) pin of the serial line they use to communicate with the
+ host. In this case use the SERIAL_LINE client support.
+
+ Some antennae's PPS signals are connected with some special host
+ inputs so you have to enable the corresponding client support.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pps_core.ko.
+
+config PPS_DEBUG
+ bool "PPS debugging messages"
+ depends on PPS
+ help
+ Say Y here if you want the PPS support to produce a bunch of debug
+ messages to the system log. Select this if you are having a
+ problem with PPS support and want to see more of what is going on.
+
+endmenu
diff --git a/drivers/pps/Makefile b/drivers/pps/Makefile
new file mode 100644
index 000000000000..19ea582f431d
--- /dev/null
+++ b/drivers/pps/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the PPS core.
+#
+
+pps_core-y := pps.o kapi.o sysfs.o
+obj-$(CONFIG_PPS) := pps_core.o
+
+ccflags-$(CONFIG_PPS_DEBUG) := -DDEBUG
diff --git a/drivers/pps/kapi.c b/drivers/pps/kapi.c
new file mode 100644
index 000000000000..35a0b192d768
--- /dev/null
+++ b/drivers/pps/kapi.c
@@ -0,0 +1,329 @@
+/*
+ * kernel API
+ *
+ *
+ * Copyright (C) 2005-2009 Rodolfo Giometti <giometti@linux.it>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/spinlock.h>
+#include <linux/idr.h>
+#include <linux/fs.h>
+#include <linux/pps_kernel.h>
+
+/*
+ * Global variables
+ */
+
+DEFINE_SPINLOCK(pps_idr_lock);
+DEFINE_IDR(pps_idr);
+
+/*
+ * Local functions
+ */
+
+static void pps_add_offset(struct pps_ktime *ts, struct pps_ktime *offset)
+{
+ ts->nsec += offset->nsec;
+ while (ts->nsec >= NSEC_PER_SEC) {
+ ts->nsec -= NSEC_PER_SEC;
+ ts->sec++;
+ }
+ while (ts->nsec < 0) {
+ ts->nsec += NSEC_PER_SEC;
+ ts->sec--;
+ }
+ ts->sec += offset->sec;
+}
+
+/*
+ * Exported functions
+ */
+
+/* pps_get_source - find a PPS source
+ * @source: the PPS source ID.
+ *
+ * This function is used to find an already registered PPS source into the
+ * system.
+ *
+ * The function returns NULL if found nothing, otherwise it returns a pointer
+ * to the PPS source data struct (the refcounter is incremented by 1).
+ */
+
+struct pps_device *pps_get_source(int source)
+{
+ struct pps_device *pps;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pps_idr_lock, flags);
+
+ pps = idr_find(&pps_idr, source);
+ if (pps != NULL)
+ atomic_inc(&pps->usage);
+
+ spin_unlock_irqrestore(&pps_idr_lock, flags);
+
+ return pps;
+}
+
+/* pps_put_source - free the PPS source data
+ * @pps: a pointer to the PPS source.
+ *
+ * This function is used to free a PPS data struct if its refcount is 0.
+ */
+
+void pps_put_source(struct pps_device *pps)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pps_idr_lock, flags);
+ BUG_ON(atomic_read(&pps->usage) == 0);
+
+ if (!atomic_dec_and_test(&pps->usage)) {
+ pps = NULL;
+ goto exit;
+ }
+
+ /* No more reference to the PPS source. We can safely remove the
+ * PPS data struct.
+ */
+ idr_remove(&pps_idr, pps->id);
+
+exit:
+ spin_unlock_irqrestore(&pps_idr_lock, flags);
+ kfree(pps);
+}
+
+/* pps_register_source - add a PPS source in the system
+ * @info: the PPS info struct
+ * @default_params: the default PPS parameters of the new source
+ *
+ * This function is used to add a new PPS source in the system. The new
+ * source is described by info's fields and it will have, as default PPS
+ * parameters, the ones specified into default_params.
+ *
+ * The function returns, in case of success, the PPS source ID.
+ */
+
+int pps_register_source(struct pps_source_info *info, int default_params)
+{
+ struct pps_device *pps;
+ int id;
+ int err;
+
+ /* Sanity checks */
+ if ((info->mode & default_params) != default_params) {
+ printk(KERN_ERR "pps: %s: unsupported default parameters\n",
+ info->name);
+ err = -EINVAL;
+ goto pps_register_source_exit;
+ }
+ if ((info->mode & (PPS_ECHOASSERT | PPS_ECHOCLEAR)) != 0 &&
+ info->echo == NULL) {
+ printk(KERN_ERR "pps: %s: echo function is not defined\n",
+ info->name);
+ err = -EINVAL;
+ goto pps_register_source_exit;
+ }
+ if ((info->mode & (PPS_TSFMT_TSPEC | PPS_TSFMT_NTPFP)) == 0) {
+ printk(KERN_ERR "pps: %s: unspecified time format\n",
+ info->name);
+ err = -EINVAL;
+ goto pps_register_source_exit;
+ }
+
+ /* Allocate memory for the new PPS source struct */
+ pps = kzalloc(sizeof(struct pps_device), GFP_KERNEL);
+ if (pps == NULL) {
+ err = -ENOMEM;
+ goto pps_register_source_exit;
+ }
+
+ /* These initializations must be done before calling idr_get_new()
+ * in order to avoid reces into pps_event().
+ */
+ pps->params.api_version = PPS_API_VERS;
+ pps->params.mode = default_params;
+ pps->info = *info;
+
+ init_waitqueue_head(&pps->queue);
+ spin_lock_init(&pps->lock);
+ atomic_set(&pps->usage, 1);
+
+ /* Get new ID for the new PPS source */
+ if (idr_pre_get(&pps_idr, GFP_KERNEL) == 0) {
+ err = -ENOMEM;
+ goto kfree_pps;
+ }
+
+ spin_lock_irq(&pps_idr_lock);
+
+ /* Now really allocate the PPS source.
+ * After idr_get_new() calling the new source will be freely available
+ * into the kernel.
+ */
+ err = idr_get_new(&pps_idr, pps, &id);
+ if (err < 0) {
+ spin_unlock_irq(&pps_idr_lock);
+ goto kfree_pps;
+ }
+
+ id = id & MAX_ID_MASK;
+ if (id >= PPS_MAX_SOURCES) {
+ spin_unlock_irq(&pps_idr_lock);
+
+ printk(KERN_ERR "pps: %s: too many PPS sources in the system\n",
+ info->name);
+ err = -EBUSY;
+ goto free_idr;
+ }
+ pps->id = id;
+
+ spin_unlock_irq(&pps_idr_lock);
+
+ /* Create the char device */
+ err = pps_register_cdev(pps);
+ if (err < 0) {
+ printk(KERN_ERR "pps: %s: unable to create char device\n",
+ info->name);
+ goto free_idr;
+ }
+
+ pr_info("new PPS source %s at ID %d\n", info->name, id);
+
+ return id;
+
+free_idr:
+ spin_lock_irq(&pps_idr_lock);
+ idr_remove(&pps_idr, id);
+ spin_unlock_irq(&pps_idr_lock);
+
+kfree_pps:
+ kfree(pps);
+
+pps_register_source_exit:
+ printk(KERN_ERR "pps: %s: unable to register source\n", info->name);
+
+ return err;
+}
+EXPORT_SYMBOL(pps_register_source);
+
+/* pps_unregister_source - remove a PPS source from the system
+ * @source: the PPS source ID
+ *
+ * This function is used to remove a previously registered PPS source from
+ * the system.
+ */
+
+void pps_unregister_source(int source)
+{
+ struct pps_device *pps;
+
+ spin_lock_irq(&pps_idr_lock);
+ pps = idr_find(&pps_idr, source);
+
+ if (!pps) {
+ BUG();
+ spin_unlock_irq(&pps_idr_lock);
+ return;
+ }
+ spin_unlock_irq(&pps_idr_lock);
+
+ pps_unregister_cdev(pps);
+ pps_put_source(pps);
+}
+EXPORT_SYMBOL(pps_unregister_source);
+
+/* pps_event - register a PPS event into the system
+ * @source: the PPS source ID
+ * @ts: the event timestamp
+ * @event: the event type
+ * @data: userdef pointer
+ *
+ * This function is used by each PPS client in order to register a new
+ * PPS event into the system (it's usually called inside an IRQ handler).
+ *
+ * If an echo function is associated with the PPS source it will be called
+ * as:
+ * pps->info.echo(source, event, data);
+ */
+
+void pps_event(int source, struct pps_ktime *ts, int event, void *data)
+{
+ struct pps_device *pps;
+ unsigned long flags;
+
+ if ((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0) {
+ printk(KERN_ERR "pps: unknown event (%x) for source %d\n",
+ event, source);
+ return;
+ }
+
+ pps = pps_get_source(source);
+ if (!pps)
+ return;
+
+ pr_debug("PPS event on source %d at %llu.%06u\n",
+ pps->id, (unsigned long long) ts->sec, ts->nsec);
+
+ spin_lock_irqsave(&pps->lock, flags);
+
+ /* Must call the echo function? */
+ if ((pps->params.mode & (PPS_ECHOASSERT | PPS_ECHOCLEAR)))
+ pps->info.echo(source, event, data);
+
+ /* Check the event */
+ pps->current_mode = pps->params.mode;
+ if (event & PPS_CAPTUREASSERT) {
+ /* We have to add an offset? */
+ if (pps->params.mode & PPS_OFFSETASSERT)
+ pps_add_offset(ts, &pps->params.assert_off_tu);
+
+ /* Save the time stamp */
+ pps->assert_tu = *ts;
+ pps->assert_sequence++;
+ pr_debug("capture assert seq #%u for source %d\n",
+ pps->assert_sequence, source);
+ }
+ if (event & PPS_CAPTURECLEAR) {
+ /* We have to add an offset? */
+ if (pps->params.mode & PPS_OFFSETCLEAR)
+ pps_add_offset(ts, &pps->params.clear_off_tu);
+
+ /* Save the time stamp */
+ pps->clear_tu = *ts;
+ pps->clear_sequence++;
+ pr_debug("capture clear seq #%u for source %d\n",
+ pps->clear_sequence, source);
+ }
+
+ pps->go = ~0;
+ wake_up_interruptible(&pps->queue);
+
+ kill_fasync(&pps->async_queue, SIGIO, POLL_IN);
+
+ spin_unlock_irqrestore(&pps->lock, flags);
+
+ /* Now we can release the PPS source for (possible) deregistration */
+ pps_put_source(pps);
+}
+EXPORT_SYMBOL(pps_event);
diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c
new file mode 100644
index 000000000000..ac8cc8cea1e3
--- /dev/null
+++ b/drivers/pps/pps.c
@@ -0,0 +1,312 @@
+/*
+ * PPS core file
+ *
+ *
+ * Copyright (C) 2005-2009 Rodolfo Giometti <giometti@linux.it>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <linux/idr.h>
+#include <linux/cdev.h>
+#include <linux/poll.h>
+#include <linux/pps_kernel.h>
+
+/*
+ * Local variables
+ */
+
+static dev_t pps_devt;
+static struct class *pps_class;
+
+/*
+ * Char device methods
+ */
+
+static unsigned int pps_cdev_poll(struct file *file, poll_table *wait)
+{
+ struct pps_device *pps = file->private_data;
+
+ poll_wait(file, &pps->queue, wait);
+
+ return POLLIN | POLLRDNORM;
+}
+
+static int pps_cdev_fasync(int fd, struct file *file, int on)
+{
+ struct pps_device *pps = file->private_data;
+ return fasync_helper(fd, file, on, &pps->async_queue);
+}
+
+static long pps_cdev_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct pps_device *pps = file->private_data;
+ struct pps_kparams params;
+ struct pps_fdata fdata;
+ unsigned long ticks;
+ void __user *uarg = (void __user *) arg;
+ int __user *iuarg = (int __user *) arg;
+ int err;
+
+ switch (cmd) {
+ case PPS_GETPARAMS:
+ pr_debug("PPS_GETPARAMS: source %d\n", pps->id);
+
+ /* Return current parameters */
+ err = copy_to_user(uarg, &pps->params,
+ sizeof(struct pps_kparams));
+ if (err)
+ return -EFAULT;
+
+ break;
+
+ case PPS_SETPARAMS:
+ pr_debug("PPS_SETPARAMS: source %d\n", pps->id);
+
+ /* Check the capabilities */
+ if (!capable(CAP_SYS_TIME))
+ return -EPERM;
+
+ err = copy_from_user(&params, uarg, sizeof(struct pps_kparams));
+ if (err)
+ return -EFAULT;
+ if (!(params.mode & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR))) {
+ pr_debug("capture mode unspecified (%x)\n",
+ params.mode);
+ return -EINVAL;
+ }
+
+ /* Check for supported capabilities */
+ if ((params.mode & ~pps->info.mode) != 0) {
+ pr_debug("unsupported capabilities (%x)\n",
+ params.mode);
+ return -EINVAL;
+ }
+
+ spin_lock_irq(&pps->lock);
+
+ /* Save the new parameters */
+ pps->params = params;
+
+ /* Restore the read only parameters */
+ if ((params.mode & (PPS_TSFMT_TSPEC | PPS_TSFMT_NTPFP)) == 0) {
+ /* section 3.3 of RFC 2783 interpreted */
+ pr_debug("time format unspecified (%x)\n",
+ params.mode);
+ pps->params.mode |= PPS_TSFMT_TSPEC;
+ }
+ if (pps->info.mode & PPS_CANWAIT)
+ pps->params.mode |= PPS_CANWAIT;
+ pps->params.api_version = PPS_API_VERS;
+
+ spin_unlock_irq(&pps->lock);
+
+ break;
+
+ case PPS_GETCAP:
+ pr_debug("PPS_GETCAP: source %d\n", pps->id);
+
+ err = put_user(pps->info.mode, iuarg);
+ if (err)
+ return -EFAULT;
+
+ break;
+
+ case PPS_FETCH:
+ pr_debug("PPS_FETCH: source %d\n", pps->id);
+
+ err = copy_from_user(&fdata, uarg, sizeof(struct pps_fdata));
+ if (err)
+ return -EFAULT;
+
+ pps->go = 0;
+
+ /* Manage the timeout */
+ if (fdata.timeout.flags & PPS_TIME_INVALID)
+ err = wait_event_interruptible(pps->queue, pps->go);
+ else {
+ pr_debug("timeout %lld.%09d\n",
+ (long long) fdata.timeout.sec,
+ fdata.timeout.nsec);
+ ticks = fdata.timeout.sec * HZ;
+ ticks += fdata.timeout.nsec / (NSEC_PER_SEC / HZ);
+
+ if (ticks != 0) {
+ err = wait_event_interruptible_timeout(
+ pps->queue, pps->go, ticks);
+ if (err == 0)
+ return -ETIMEDOUT;
+ }
+ }
+
+ /* Check for pending signals */
+ if (err == -ERESTARTSYS) {
+ pr_debug("pending signal caught\n");
+ return -EINTR;
+ }
+
+ /* Return the fetched timestamp */
+ spin_lock_irq(&pps->lock);
+
+ fdata.info.assert_sequence = pps->assert_sequence;
+ fdata.info.clear_sequence = pps->clear_sequence;
+ fdata.info.assert_tu = pps->assert_tu;
+ fdata.info.clear_tu = pps->clear_tu;
+ fdata.info.current_mode = pps->current_mode;
+
+ spin_unlock_irq(&pps->lock);
+
+ err = copy_to_user(uarg, &fdata, sizeof(struct pps_fdata));
+ if (err)
+ return -EFAULT;
+
+ break;
+
+ default:
+ return -ENOTTY;
+ break;
+ }
+
+ return 0;
+}
+
+static int pps_cdev_open(struct inode *inode, struct file *file)
+{
+ struct pps_device *pps = container_of(inode->i_cdev,
+ struct pps_device, cdev);
+ int found;
+
+ found = pps_get_source(pps->id) != 0;
+ if (!found)
+ return -ENODEV;
+
+ file->private_data = pps;
+
+ return 0;
+}
+
+static int pps_cdev_release(struct inode *inode, struct file *file)
+{
+ struct pps_device *pps = file->private_data;
+
+ /* Free the PPS source and wake up (possible) deregistration */
+ pps_put_source(pps);
+
+ return 0;
+}
+
+/*
+ * Char device stuff
+ */
+
+static const struct file_operations pps_cdev_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .poll = pps_cdev_poll,
+ .fasync = pps_cdev_fasync,
+ .unlocked_ioctl = pps_cdev_ioctl,
+ .open = pps_cdev_open,
+ .release = pps_cdev_release,
+};
+
+int pps_register_cdev(struct pps_device *pps)
+{
+ int err;
+
+ pps->devno = MKDEV(MAJOR(pps_devt), pps->id);
+ cdev_init(&pps->cdev, &pps_cdev_fops);
+ pps->cdev.owner = pps->info.owner;
+
+ err = cdev_add(&pps->cdev, pps->devno, 1);
+ if (err) {
+ printk(KERN_ERR "pps: %s: failed to add char device %d:%d\n",
+ pps->info.name, MAJOR(pps_devt), pps->id);
+ return err;
+ }
+ pps->dev = device_create(pps_class, pps->info.dev, pps->devno, NULL,
+ "pps%d", pps->id);
+ if (err)
+ goto del_cdev;
+ dev_set_drvdata(pps->dev, pps);
+
+ pr_debug("source %s got cdev (%d:%d)\n", pps->info.name,
+ MAJOR(pps_devt), pps->id);
+
+ return 0;
+
+del_cdev:
+ cdev_del(&pps->cdev);
+
+ return err;
+}
+
+void pps_unregister_cdev(struct pps_device *pps)
+{
+ device_destroy(pps_class, pps->devno);
+ cdev_del(&pps->cdev);
+}
+
+/*
+ * Module stuff
+ */
+
+static void __exit pps_exit(void)
+{
+ class_destroy(pps_class);
+ unregister_chrdev_region(pps_devt, PPS_MAX_SOURCES);
+}
+
+static int __init pps_init(void)
+{
+ int err;
+
+ pps_class = class_create(THIS_MODULE, "pps");
+ if (!pps_class) {
+ printk(KERN_ERR "pps: failed to allocate class\n");
+ return -ENOMEM;
+ }
+ pps_class->dev_attrs = pps_attrs;
+
+ err = alloc_chrdev_region(&pps_devt, 0, PPS_MAX_SOURCES, "pps");
+ if (err < 0) {
+ printk(KERN_ERR "pps: failed to allocate char device region\n");
+ goto remove_class;
+ }
+
+ pr_info("LinuxPPS API ver. %d registered\n", PPS_API_VERS);
+ pr_info("Software ver. %s - Copyright 2005-2007 Rodolfo Giometti "
+ "<giometti@linux.it>\n", PPS_VERSION);
+
+ return 0;
+
+remove_class:
+ class_destroy(pps_class);
+
+ return err;
+}
+
+subsys_initcall(pps_init);
+module_exit(pps_exit);
+
+MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>");
+MODULE_DESCRIPTION("LinuxPPS support (RFC 2783) - ver. " PPS_VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/pps/sysfs.c b/drivers/pps/sysfs.c
new file mode 100644
index 000000000000..ef0978c71eee
--- /dev/null
+++ b/drivers/pps/sysfs.c
@@ -0,0 +1,98 @@
+/*
+ * PPS sysfs support
+ *
+ *
+ * Copyright (C) 2007-2009 Rodolfo Giometti <giometti@linux.it>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/pps_kernel.h>
+
+/*
+ * Attribute functions
+ */
+
+static ssize_t pps_show_assert(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pps_device *pps = dev_get_drvdata(dev);
+
+ if (!(pps->info.mode & PPS_CAPTUREASSERT))
+ return 0;
+
+ return sprintf(buf, "%lld.%09d#%d\n",
+ (long long) pps->assert_tu.sec, pps->assert_tu.nsec,
+ pps->assert_sequence);
+}
+
+static ssize_t pps_show_clear(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pps_device *pps = dev_get_drvdata(dev);
+
+ if (!(pps->info.mode & PPS_CAPTURECLEAR))
+ return 0;
+
+ return sprintf(buf, "%lld.%09d#%d\n",
+ (long long) pps->clear_tu.sec, pps->clear_tu.nsec,
+ pps->clear_sequence);
+}
+
+static ssize_t pps_show_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pps_device *pps = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%4x\n", pps->info.mode);
+}
+
+static ssize_t pps_show_echo(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pps_device *pps = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", !!pps->info.echo);
+}
+
+static ssize_t pps_show_name(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pps_device *pps = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n", pps->info.name);
+}
+
+static ssize_t pps_show_path(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pps_device *pps = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n", pps->info.path);
+}
+
+struct device_attribute pps_attrs[] = {
+ __ATTR(assert, S_IRUGO, pps_show_assert, NULL),
+ __ATTR(clear, S_IRUGO, pps_show_clear, NULL),
+ __ATTR(mode, S_IRUGO, pps_show_mode, NULL),
+ __ATTR(echo, S_IRUGO, pps_show_echo, NULL),
+ __ATTR(name, S_IRUGO, pps_show_name, NULL),
+ __ATTR(path, S_IRUGO, pps_show_path, NULL),
+ __ATTR_NULL,
+};
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 277d35d232fa..81adbdbd5042 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -296,6 +296,15 @@ config RTC_DRV_RX8581
This driver can also be built as a module. If so the module
will be called rtc-rx8581.
+config RTC_DRV_RX8025
+ tristate "Epson RX-8025SA/NB"
+ help
+ If you say yes here you get support for the Epson
+ RX-8025SA/NB RTC chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-rx8025.
+
endif # I2C
comment "SPI RTC drivers"
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 6c0639a14f09..3c0f2b2ac927 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -62,6 +62,7 @@ obj-$(CONFIG_RTC_DRV_R9701) += rtc-r9701.o
obj-$(CONFIG_RTC_DRV_RS5C313) += rtc-rs5c313.o
obj-$(CONFIG_RTC_DRV_RS5C348) += rtc-rs5c348.o
obj-$(CONFIG_RTC_DRV_RS5C372) += rtc-rs5c372.o
+obj-$(CONFIG_RTC_DRV_RX8025) += rtc-rx8025.o
obj-$(CONFIG_RTC_DRV_RX8581) += rtc-rx8581.o
obj-$(CONFIG_RTC_DRV_S35390A) += rtc-s35390a.o
obj-$(CONFIG_RTC_DRV_S3C) += rtc-s3c.o
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index 2c4a65302a9d..8a6f9a9f9cb8 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -31,6 +31,8 @@ enum ds_type {
ds_1338,
ds_1339,
ds_1340,
+ ds_1388,
+ ds_3231,
m41t00,
rx_8025,
// rs5c372 too? different address...
@@ -66,6 +68,7 @@ enum ds_type {
#define DS1337_REG_CONTROL 0x0e
# define DS1337_BIT_nEOSC 0x80
# define DS1339_BIT_BBSQI 0x20
+# define DS3231_BIT_BBSQW 0x40 /* same as BBSQI */
# define DS1337_BIT_RS2 0x10
# define DS1337_BIT_RS1 0x08
# define DS1337_BIT_INTCN 0x04
@@ -94,6 +97,7 @@ enum ds_type {
struct ds1307 {
+ u8 offset; /* register's offset */
u8 regs[11];
enum ds_type type;
unsigned long flags;
@@ -128,6 +132,9 @@ static const struct chip_desc chips[] = {
},
[ds_1340] = {
},
+[ds_3231] = {
+ .alarm = 1,
+},
[m41t00] = {
},
[rx_8025] = {
@@ -138,7 +145,9 @@ static const struct i2c_device_id ds1307_id[] = {
{ "ds1337", ds_1337 },
{ "ds1338", ds_1338 },
{ "ds1339", ds_1339 },
+ { "ds1388", ds_1388 },
{ "ds1340", ds_1340 },
+ { "ds3231", ds_3231 },
{ "m41t00", m41t00 },
{ "rx8025", rx_8025 },
{ }
@@ -291,7 +300,7 @@ static int ds1307_get_time(struct device *dev, struct rtc_time *t)
/* read the RTC date and time registers all at once */
tmp = ds1307->read_block_data(ds1307->client,
- DS1307_REG_SECS, 7, ds1307->regs);
+ ds1307->offset, 7, ds1307->regs);
if (tmp != 7) {
dev_err(dev, "%s error %d\n", "read", tmp);
return -EIO;
@@ -353,6 +362,7 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t)
switch (ds1307->type) {
case ds_1337:
case ds_1339:
+ case ds_3231:
buf[DS1307_REG_MONTH] |= DS1337_BIT_CENTURY;
break;
case ds_1340:
@@ -367,7 +377,8 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t)
"write", buf[0], buf[1], buf[2], buf[3],
buf[4], buf[5], buf[6]);
- result = ds1307->write_block_data(ds1307->client, 0, 7, buf);
+ result = ds1307->write_block_data(ds1307->client,
+ ds1307->offset, 7, buf);
if (result < 0) {
dev_err(dev, "%s error %d\n", "write", result);
return result;
@@ -624,6 +635,11 @@ static int __devinit ds1307_probe(struct i2c_client *client,
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
int want_irq = false;
unsigned char *buf;
+ static const int bbsqi_bitpos[] = {
+ [ds_1337] = 0,
+ [ds_1339] = DS1339_BIT_BBSQI,
+ [ds_3231] = DS3231_BIT_BBSQW,
+ };
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)
&& !i2c_check_functionality(adapter, I2C_FUNC_SMBUS_I2C_BLOCK))
@@ -632,9 +648,12 @@ static int __devinit ds1307_probe(struct i2c_client *client,
if (!(ds1307 = kzalloc(sizeof(struct ds1307), GFP_KERNEL)))
return -ENOMEM;
- ds1307->client = client;
i2c_set_clientdata(client, ds1307);
- ds1307->type = id->driver_data;
+
+ ds1307->client = client;
+ ds1307->type = id->driver_data;
+ ds1307->offset = 0;
+
buf = ds1307->regs;
if (i2c_check_functionality(adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) {
ds1307->read_block_data = i2c_smbus_read_i2c_block_data;
@@ -647,6 +666,7 @@ static int __devinit ds1307_probe(struct i2c_client *client,
switch (ds1307->type) {
case ds_1337:
case ds_1339:
+ case ds_3231:
/* has IRQ? */
if (ds1307->client->irq > 0 && chip->alarm) {
INIT_WORK(&ds1307->work, ds1307_work);
@@ -666,12 +686,12 @@ static int __devinit ds1307_probe(struct i2c_client *client,
ds1307->regs[0] &= ~DS1337_BIT_nEOSC;
/* Using IRQ? Disable the square wave and both alarms.
- * For ds1339, be sure alarms can trigger when we're
- * running on Vbackup (BBSQI); we assume ds1337 will
- * ignore that bit
+ * For some variants, be sure alarms can trigger when we're
+ * running on Vbackup (BBSQI/BBSQW)
*/
if (want_irq) {
- ds1307->regs[0] |= DS1337_BIT_INTCN | DS1339_BIT_BBSQI;
+ ds1307->regs[0] |= DS1337_BIT_INTCN
+ | bbsqi_bitpos[ds1307->type];
ds1307->regs[0] &= ~(DS1337_BIT_A2IE | DS1337_BIT_A1IE);
}
@@ -751,6 +771,9 @@ static int __devinit ds1307_probe(struct i2c_client *client,
hour);
}
break;
+ case ds_1388:
+ ds1307->offset = 1; /* Seconds starts at 1 */
+ break;
default:
break;
}
@@ -814,6 +837,8 @@ read_rtc:
case rx_8025:
case ds_1337:
case ds_1339:
+ case ds_1388:
+ case ds_3231:
break;
}
diff --git a/drivers/rtc/rtc-ds1553.c b/drivers/rtc/rtc-ds1553.c
index 38d472b63406..717288527c6b 100644
--- a/drivers/rtc/rtc-ds1553.c
+++ b/drivers/rtc/rtc-ds1553.c
@@ -329,8 +329,7 @@ static int __devinit ds1553_rtc_probe(struct platform_device *pdev)
if (pdata->irq > 0) {
writeb(0, ioaddr + RTC_INTERRUPTS);
if (request_irq(pdata->irq, ds1553_rtc_interrupt,
- IRQF_DISABLED | IRQF_SHARED,
- pdev->name, pdev) < 0) {
+ IRQF_DISABLED, pdev->name, pdev) < 0) {
dev_warn(&pdev->dev, "interrupt not available.\n");
pdata->irq = 0;
}
diff --git a/drivers/rtc/rtc-ds1742.c b/drivers/rtc/rtc-ds1742.c
index 8bc8501bffc8..09249459e9a4 100644
--- a/drivers/rtc/rtc-ds1742.c
+++ b/drivers/rtc/rtc-ds1742.c
@@ -57,6 +57,7 @@ struct rtc_plat_data {
size_t size;
resource_size_t baseaddr;
unsigned long last_jiffies;
+ struct bin_attribute nvram_attr;
};
static int ds1742_rtc_set_time(struct device *dev, struct rtc_time *tm)
@@ -157,18 +158,6 @@ static ssize_t ds1742_nvram_write(struct kobject *kobj,
return count;
}
-static struct bin_attribute ds1742_nvram_attr = {
- .attr = {
- .name = "nvram",
- .mode = S_IRUGO | S_IWUSR,
- },
- .read = ds1742_nvram_read,
- .write = ds1742_nvram_write,
- /* REVISIT: size in sysfs won't match actual size... if it's
- * not a constant, each RTC should have its own attribute.
- */
-};
-
static int __devinit ds1742_rtc_probe(struct platform_device *pdev)
{
struct rtc_device *rtc;
@@ -199,6 +188,12 @@ static int __devinit ds1742_rtc_probe(struct platform_device *pdev)
pdata->size_nvram = pdata->size - RTC_SIZE;
pdata->ioaddr_rtc = ioaddr + pdata->size_nvram;
+ pdata->nvram_attr.attr.name = "nvram";
+ pdata->nvram_attr.attr.mode = S_IRUGO | S_IWUSR;
+ pdata->nvram_attr.read = ds1742_nvram_read;
+ pdata->nvram_attr.write = ds1742_nvram_write;
+ pdata->nvram_attr.size = pdata->size_nvram;
+
/* turn RTC on if it was not on */
ioaddr = pdata->ioaddr_rtc;
sec = readb(ioaddr + RTC_SECONDS);
@@ -221,11 +216,13 @@ static int __devinit ds1742_rtc_probe(struct platform_device *pdev)
pdata->rtc = rtc;
pdata->last_jiffies = jiffies;
platform_set_drvdata(pdev, pdata);
- ds1742_nvram_attr.size = max(ds1742_nvram_attr.size,
- pdata->size_nvram);
- ret = sysfs_create_bin_file(&pdev->dev.kobj, &ds1742_nvram_attr);
- if (ret)
+
+ ret = sysfs_create_bin_file(&pdev->dev.kobj, &pdata->nvram_attr);
+ if (ret) {
+ dev_err(&pdev->dev, "creating nvram file in sysfs failed\n");
goto out;
+ }
+
return 0;
out:
if (pdata->rtc)
@@ -242,7 +239,7 @@ static int __devexit ds1742_rtc_remove(struct platform_device *pdev)
{
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
- sysfs_remove_bin_file(&pdev->dev.kobj, &ds1742_nvram_attr);
+ sysfs_remove_bin_file(&pdev->dev.kobj, &pdata->nvram_attr);
rtc_device_unregister(pdata->rtc);
iounmap(pdata->ioaddr_nvram);
release_mem_region(pdata->baseaddr, pdata->size);
diff --git a/drivers/rtc/rtc-rx8025.c b/drivers/rtc/rtc-rx8025.c
new file mode 100644
index 000000000000..b1a29bcfdf13
--- /dev/null
+++ b/drivers/rtc/rtc-rx8025.c
@@ -0,0 +1,688 @@
+/*
+ * Driver for Epson's RTC module RX-8025 SA/NB
+ *
+ * Copyright (C) 2009 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * Copyright (C) 2005 by Digi International Inc.
+ * All rights reserved.
+ *
+ * Modified by fengjh at rising.com.cn
+ * <http://lists.lm-sensors.org/mailman/listinfo/lm-sensors>
+ * 2006.11
+ *
+ * Code cleanup by Sergei Poselenov, <sposelenov@emcraft.com>
+ * Converted to new style by Wolfgang Grandegger <wg@grandegger.com>
+ * Alarm and periodic interrupt added by Dmitry Rakhchev <rda@emcraft.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/bcd.h>
+#include <linux/i2c.h>
+#include <linux/list.h>
+#include <linux/rtc.h>
+
+/* Register definitions */
+#define RX8025_REG_SEC 0x00
+#define RX8025_REG_MIN 0x01
+#define RX8025_REG_HOUR 0x02
+#define RX8025_REG_WDAY 0x03
+#define RX8025_REG_MDAY 0x04
+#define RX8025_REG_MONTH 0x05
+#define RX8025_REG_YEAR 0x06
+#define RX8025_REG_DIGOFF 0x07
+#define RX8025_REG_ALWMIN 0x08
+#define RX8025_REG_ALWHOUR 0x09
+#define RX8025_REG_ALWWDAY 0x0a
+#define RX8025_REG_ALDMIN 0x0b
+#define RX8025_REG_ALDHOUR 0x0c
+/* 0x0d is reserved */
+#define RX8025_REG_CTRL1 0x0e
+#define RX8025_REG_CTRL2 0x0f
+
+#define RX8025_BIT_CTRL1_CT (7 << 0)
+/* 1 Hz periodic level irq */
+#define RX8025_BIT_CTRL1_CT_1HZ 4
+#define RX8025_BIT_CTRL1_TEST (1 << 3)
+#define RX8025_BIT_CTRL1_1224 (1 << 5)
+#define RX8025_BIT_CTRL1_DALE (1 << 6)
+#define RX8025_BIT_CTRL1_WALE (1 << 7)
+
+#define RX8025_BIT_CTRL2_DAFG (1 << 0)
+#define RX8025_BIT_CTRL2_WAFG (1 << 1)
+#define RX8025_BIT_CTRL2_CTFG (1 << 2)
+#define RX8025_BIT_CTRL2_PON (1 << 4)
+#define RX8025_BIT_CTRL2_XST (1 << 5)
+#define RX8025_BIT_CTRL2_VDET (1 << 6)
+
+/* Clock precision adjustment */
+#define RX8025_ADJ_RESOLUTION 3050 /* in ppb */
+#define RX8025_ADJ_DATA_MAX 62
+#define RX8025_ADJ_DATA_MIN -62
+
+static const struct i2c_device_id rx8025_id[] = {
+ { "rx8025", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, rx8025_id);
+
+struct rx8025_data {
+ struct i2c_client *client;
+ struct rtc_device *rtc;
+ struct work_struct work;
+ u8 ctrl1;
+ unsigned exiting:1;
+};
+
+static int rx8025_read_reg(struct i2c_client *client, int number, u8 *value)
+{
+ int ret = i2c_smbus_read_byte_data(client, (number << 4) | 0x08);
+
+ if (ret < 0) {
+ dev_err(&client->dev, "Unable to read register #%d\n", number);
+ return ret;
+ }
+
+ *value = ret;
+ return 0;
+}
+
+static int rx8025_read_regs(struct i2c_client *client,
+ int number, u8 length, u8 *values)
+{
+ int ret = i2c_smbus_read_i2c_block_data(client, (number << 4) | 0x08,
+ length, values);
+
+ if (ret != length) {
+ dev_err(&client->dev, "Unable to read registers #%d..#%d\n",
+ number, number + length - 1);
+ return ret < 0 ? ret : -EIO;
+ }
+
+ return 0;
+}
+
+static int rx8025_write_reg(struct i2c_client *client, int number, u8 value)
+{
+ int ret = i2c_smbus_write_byte_data(client, number << 4, value);
+
+ if (ret)
+ dev_err(&client->dev, "Unable to write register #%d\n",
+ number);
+
+ return ret;
+}
+
+static int rx8025_write_regs(struct i2c_client *client,
+ int number, u8 length, u8 *values)
+{
+ int ret = i2c_smbus_write_i2c_block_data(client, (number << 4) | 0x08,
+ length, values);
+
+ if (ret)
+ dev_err(&client->dev, "Unable to write registers #%d..#%d\n",
+ number, number + length - 1);
+
+ return ret;
+}
+
+static irqreturn_t rx8025_irq(int irq, void *dev_id)
+{
+ struct i2c_client *client = dev_id;
+ struct rx8025_data *rx8025 = i2c_get_clientdata(client);
+
+ disable_irq_nosync(irq);
+ schedule_work(&rx8025->work);
+ return IRQ_HANDLED;
+}
+
+static void rx8025_work(struct work_struct *work)
+{
+ struct rx8025_data *rx8025 = container_of(work, struct rx8025_data,
+ work);
+ struct i2c_client *client = rx8025->client;
+ struct mutex *lock = &rx8025->rtc->ops_lock;
+ u8 status;
+
+ mutex_lock(lock);
+
+ if (rx8025_read_reg(client, RX8025_REG_CTRL2, &status))
+ goto out;
+
+ if (!(status & RX8025_BIT_CTRL2_XST))
+ dev_warn(&client->dev, "Oscillation stop was detected,"
+ "you may have to readjust the clock\n");
+
+ if (status & RX8025_BIT_CTRL2_CTFG) {
+ /* periodic */
+ status &= ~RX8025_BIT_CTRL2_CTFG;
+ local_irq_disable();
+ rtc_update_irq(rx8025->rtc, 1, RTC_PF | RTC_IRQF);
+ local_irq_enable();
+ }
+
+ if (status & RX8025_BIT_CTRL2_DAFG) {
+ /* alarm */
+ status &= RX8025_BIT_CTRL2_DAFG;
+ if (rx8025_write_reg(client, RX8025_REG_CTRL1,
+ rx8025->ctrl1 & ~RX8025_BIT_CTRL1_DALE))
+ goto out;
+ local_irq_disable();
+ rtc_update_irq(rx8025->rtc, 1, RTC_AF | RTC_IRQF);
+ local_irq_enable();
+ }
+
+ /* acknowledge IRQ */
+ rx8025_write_reg(client, RX8025_REG_CTRL2,
+ status | RX8025_BIT_CTRL2_XST);
+
+out:
+ if (!rx8025->exiting)
+ enable_irq(client->irq);
+
+ mutex_unlock(lock);
+}
+
+static int rx8025_get_time(struct device *dev, struct rtc_time *dt)
+{
+ struct rx8025_data *rx8025 = dev_get_drvdata(dev);
+ u8 date[7];
+ int err;
+
+ err = rx8025_read_regs(rx8025->client, RX8025_REG_SEC, 7, date);
+ if (err)
+ return err;
+
+ dev_dbg(dev, "%s: read 0x%02x 0x%02x "
+ "0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n", __func__,
+ date[0], date[1], date[2], date[3], date[4],
+ date[5], date[6]);
+
+ dt->tm_sec = bcd2bin(date[RX8025_REG_SEC] & 0x7f);
+ dt->tm_min = bcd2bin(date[RX8025_REG_MIN] & 0x7f);
+ if (rx8025->ctrl1 & RX8025_BIT_CTRL1_1224)
+ dt->tm_hour = bcd2bin(date[RX8025_REG_HOUR] & 0x3f);
+ else
+ dt->tm_hour = bcd2bin(date[RX8025_REG_HOUR] & 0x1f) % 12
+ + (date[RX8025_REG_HOUR] & 0x20 ? 12 : 0);
+
+ dt->tm_mday = bcd2bin(date[RX8025_REG_MDAY] & 0x3f);
+ dt->tm_mon = bcd2bin(date[RX8025_REG_MONTH] & 0x1f) - 1;
+ dt->tm_year = bcd2bin(date[RX8025_REG_YEAR]);
+
+ if (dt->tm_year < 70)
+ dt->tm_year += 100;
+
+ dev_dbg(dev, "%s: date %ds %dm %dh %dmd %dm %dy\n", __func__,
+ dt->tm_sec, dt->tm_min, dt->tm_hour,
+ dt->tm_mday, dt->tm_mon, dt->tm_year);
+
+ return rtc_valid_tm(dt);
+}
+
+static int rx8025_set_time(struct device *dev, struct rtc_time *dt)
+{
+ struct rx8025_data *rx8025 = dev_get_drvdata(dev);
+ u8 date[7];
+
+ /*
+ * BUG: The HW assumes every year that is a multiple of 4 to be a leap
+ * year. Next time this is wrong is 2100, which will not be a leap
+ * year.
+ */
+
+ /*
+ * Here the read-only bits are written as "0". I'm not sure if that
+ * is sound.
+ */
+ date[RX8025_REG_SEC] = bin2bcd(dt->tm_sec);
+ date[RX8025_REG_MIN] = bin2bcd(dt->tm_min);
+ if (rx8025->ctrl1 & RX8025_BIT_CTRL1_1224)
+ date[RX8025_REG_HOUR] = bin2bcd(dt->tm_hour);
+ else
+ date[RX8025_REG_HOUR] = (dt->tm_hour >= 12 ? 0x20 : 0)
+ | bin2bcd((dt->tm_hour + 11) % 12 + 1);
+
+ date[RX8025_REG_WDAY] = bin2bcd(dt->tm_wday);
+ date[RX8025_REG_MDAY] = bin2bcd(dt->tm_mday);
+ date[RX8025_REG_MONTH] = bin2bcd(dt->tm_mon + 1);
+ date[RX8025_REG_YEAR] = bin2bcd(dt->tm_year % 100);
+
+ dev_dbg(dev,
+ "%s: write 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n",
+ __func__,
+ date[0], date[1], date[2], date[3], date[4], date[5], date[6]);
+
+ return rx8025_write_regs(rx8025->client, RX8025_REG_SEC, 7, date);
+}
+
+static int rx8025_init_client(struct i2c_client *client, int *need_reset)
+{
+ struct rx8025_data *rx8025 = i2c_get_clientdata(client);
+ u8 ctrl[2], ctrl2;
+ int need_clear = 0;
+ int err;
+
+ err = rx8025_read_regs(rx8025->client, RX8025_REG_CTRL1, 2, ctrl);
+ if (err)
+ goto out;
+
+ /* Keep test bit zero ! */
+ rx8025->ctrl1 = ctrl[0] & ~RX8025_BIT_CTRL1_TEST;
+
+ if (ctrl[1] & RX8025_BIT_CTRL2_PON) {
+ dev_warn(&client->dev, "power-on reset was detected, "
+ "you may have to readjust the clock\n");
+ *need_reset = 1;
+ }
+
+ if (ctrl[1] & RX8025_BIT_CTRL2_VDET) {
+ dev_warn(&client->dev, "a power voltage drop was detected, "
+ "you may have to readjust the clock\n");
+ *need_reset = 1;
+ }
+
+ if (!(ctrl[1] & RX8025_BIT_CTRL2_XST)) {
+ dev_warn(&client->dev, "Oscillation stop was detected,"
+ "you may have to readjust the clock\n");
+ *need_reset = 1;
+ }
+
+ if (ctrl[1] & (RX8025_BIT_CTRL2_DAFG | RX8025_BIT_CTRL2_WAFG)) {
+ dev_warn(&client->dev, "Alarm was detected\n");
+ need_clear = 1;
+ }
+
+ if (!(ctrl[1] & RX8025_BIT_CTRL2_CTFG))
+ need_clear = 1;
+
+ if (*need_reset || need_clear) {
+ ctrl2 = ctrl[0];
+ ctrl2 &= ~(RX8025_BIT_CTRL2_PON | RX8025_BIT_CTRL2_VDET |
+ RX8025_BIT_CTRL2_CTFG | RX8025_BIT_CTRL2_WAFG |
+ RX8025_BIT_CTRL2_DAFG);
+ ctrl2 |= RX8025_BIT_CTRL2_XST;
+
+ err = rx8025_write_reg(client, RX8025_REG_CTRL2, ctrl2);
+ }
+out:
+ return err;
+}
+
+/* Alarm support */
+static int rx8025_read_alarm(struct device *dev, struct rtc_wkalrm *t)
+{
+ struct rx8025_data *rx8025 = dev_get_drvdata(dev);
+ struct i2c_client *client = rx8025->client;
+ u8 ctrl2, ald[2];
+ int err;
+
+ if (client->irq <= 0)
+ return -EINVAL;
+
+ err = rx8025_read_regs(client, RX8025_REG_ALDMIN, 2, ald);
+ if (err)
+ return err;
+
+ err = rx8025_read_reg(client, RX8025_REG_CTRL2, &ctrl2);
+ if (err)
+ return err;
+
+ dev_dbg(dev, "%s: read alarm 0x%02x 0x%02x ctrl2 %02x\n",
+ __func__, ald[0], ald[1], ctrl2);
+
+ /* Hardware alarms precision is 1 minute! */
+ t->time.tm_sec = 0;
+ t->time.tm_min = bcd2bin(ald[0] & 0x7f);
+ if (rx8025->ctrl1 & RX8025_BIT_CTRL1_1224)
+ t->time.tm_hour = bcd2bin(ald[1] & 0x3f);
+ else
+ t->time.tm_hour = bcd2bin(ald[1] & 0x1f) % 12
+ + (ald[1] & 0x20 ? 12 : 0);
+
+ t->time.tm_wday = -1;
+ t->time.tm_mday = -1;
+ t->time.tm_mon = -1;
+ t->time.tm_year = -1;
+
+ dev_dbg(dev, "%s: date: %ds %dm %dh %dmd %dm %dy\n",
+ __func__,
+ t->time.tm_sec, t->time.tm_min, t->time.tm_hour,
+ t->time.tm_mday, t->time.tm_mon, t->time.tm_year);
+ t->enabled = !!(rx8025->ctrl1 & RX8025_BIT_CTRL1_DALE);
+ t->pending = (ctrl2 & RX8025_BIT_CTRL2_DAFG) && t->enabled;
+
+ return err;
+}
+
+static int rx8025_set_alarm(struct device *dev, struct rtc_wkalrm *t)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct rx8025_data *rx8025 = dev_get_drvdata(dev);
+ u8 ald[2];
+ int err;
+
+ if (client->irq <= 0)
+ return -EINVAL;
+
+ /* Hardware alarm precision is 1 minute! */
+ ald[0] = bin2bcd(t->time.tm_min);
+ if (rx8025->ctrl1 & RX8025_BIT_CTRL1_1224)
+ ald[1] = bin2bcd(t->time.tm_hour);
+ else
+ ald[1] = (t->time.tm_hour >= 12 ? 0x20 : 0)
+ | bin2bcd((t->time.tm_hour + 11) % 12 + 1);
+
+ dev_dbg(dev, "%s: write 0x%02x 0x%02x\n", __func__, ald[0], ald[1]);
+
+ if (rx8025->ctrl1 & RX8025_BIT_CTRL1_DALE) {
+ rx8025->ctrl1 &= ~RX8025_BIT_CTRL1_DALE;
+ err = rx8025_write_reg(rx8025->client, RX8025_REG_CTRL1,
+ rx8025->ctrl1);
+ if (err)
+ return err;
+ }
+ err = rx8025_write_regs(rx8025->client, RX8025_REG_ALDMIN, 2, ald);
+ if (err)
+ return err;
+
+ if (t->enabled) {
+ rx8025->ctrl1 |= RX8025_BIT_CTRL1_DALE;
+ err = rx8025_write_reg(rx8025->client, RX8025_REG_CTRL1,
+ rx8025->ctrl1);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int rx8025_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct rx8025_data *rx8025 = dev_get_drvdata(dev);
+ u8 ctrl1;
+ int err;
+
+ ctrl1 = rx8025->ctrl1;
+ if (enabled)
+ ctrl1 |= RX8025_BIT_CTRL1_DALE;
+ else
+ ctrl1 &= ~RX8025_BIT_CTRL1_DALE;
+
+ if (ctrl1 != rx8025->ctrl1) {
+ rx8025->ctrl1 = ctrl1;
+ err = rx8025_write_reg(rx8025->client, RX8025_REG_CTRL1,
+ rx8025->ctrl1);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static int rx8025_irq_set_state(struct device *dev, int enabled)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct rx8025_data *rx8025 = i2c_get_clientdata(client);
+ int ctrl1;
+ int err;
+
+ if (client->irq <= 0)
+ return -ENXIO;
+
+ ctrl1 = rx8025->ctrl1 & ~RX8025_BIT_CTRL1_CT;
+ if (enabled)
+ ctrl1 |= RX8025_BIT_CTRL1_CT_1HZ;
+ if (ctrl1 != rx8025->ctrl1) {
+ rx8025->ctrl1 = ctrl1;
+ err = rx8025_write_reg(rx8025->client, RX8025_REG_CTRL1,
+ rx8025->ctrl1);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static struct rtc_class_ops rx8025_rtc_ops = {
+ .read_time = rx8025_get_time,
+ .set_time = rx8025_set_time,
+ .read_alarm = rx8025_read_alarm,
+ .set_alarm = rx8025_set_alarm,
+ .alarm_irq_enable = rx8025_alarm_irq_enable,
+ .irq_set_state = rx8025_irq_set_state,
+};
+
+/*
+ * Clock precision adjustment support
+ *
+ * According to the RX8025 SA/NB application manual the frequency and
+ * temperature charateristics can be approximated using the following
+ * equation:
+ *
+ * df = a * (ut - t)**2
+ *
+ * df: Frequency deviation in any temperature
+ * a : Coefficient = (-35 +-5) * 10**-9
+ * ut: Ultimate temperature in degree = +25 +-5 degree
+ * t : Any temperature in degree
+ *
+ * Note that the clock adjustment in ppb must be entered (which is
+ * the negative value of the deviation).
+ */
+static int rx8025_get_clock_adjust(struct device *dev, int *adj)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ u8 digoff;
+ int err;
+
+ err = rx8025_read_reg(client, RX8025_REG_DIGOFF, &digoff);
+ if (err)
+ return err;
+
+ *adj = digoff >= 64 ? digoff - 128 : digoff;
+ if (*adj > 0)
+ (*adj)--;
+ *adj *= -RX8025_ADJ_RESOLUTION;
+
+ return 0;
+}
+
+static int rx8025_set_clock_adjust(struct device *dev, int adj)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ u8 digoff;
+ int err;
+
+ adj /= -RX8025_ADJ_RESOLUTION;
+ if (adj > RX8025_ADJ_DATA_MAX)
+ adj = RX8025_ADJ_DATA_MAX;
+ else if (adj < RX8025_ADJ_DATA_MIN)
+ adj = RX8025_ADJ_DATA_MIN;
+ else if (adj > 0)
+ adj++;
+ else if (adj < 0)
+ adj += 128;
+ digoff = adj;
+
+ err = rx8025_write_reg(client, RX8025_REG_DIGOFF, digoff);
+ if (err)
+ return err;
+
+ dev_dbg(dev, "%s: write 0x%02x\n", __func__, digoff);
+
+ return 0;
+}
+
+static ssize_t rx8025_sysfs_show_clock_adjust(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int err, adj;
+
+ err = rx8025_get_clock_adjust(dev, &adj);
+ if (err)
+ return err;
+
+ return sprintf(buf, "%d\n", adj);
+}
+
+static ssize_t rx8025_sysfs_store_clock_adjust(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int adj, err;
+
+ if (sscanf(buf, "%i", &adj) != 1)
+ return -EINVAL;
+
+ err = rx8025_set_clock_adjust(dev, adj);
+
+ return err ? err : count;
+}
+
+static DEVICE_ATTR(clock_adjust_ppb, S_IRUGO | S_IWUSR,
+ rx8025_sysfs_show_clock_adjust,
+ rx8025_sysfs_store_clock_adjust);
+
+static int rx8025_sysfs_register(struct device *dev)
+{
+ return device_create_file(dev, &dev_attr_clock_adjust_ppb);
+}
+
+static void rx8025_sysfs_unregister(struct device *dev)
+{
+ device_remove_file(dev, &dev_attr_clock_adjust_ppb);
+}
+
+static int __devinit rx8025_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ struct rx8025_data *rx8025;
+ int err, need_reset = 0;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA
+ | I2C_FUNC_SMBUS_I2C_BLOCK)) {
+ dev_err(&adapter->dev,
+ "doesn't support required functionality\n");
+ err = -EIO;
+ goto errout;
+ }
+
+ rx8025 = kzalloc(sizeof(*rx8025), GFP_KERNEL);
+ if (!rx8025) {
+ dev_err(&adapter->dev, "failed to alloc memory\n");
+ err = -ENOMEM;
+ goto errout;
+ }
+
+ rx8025->client = client;
+ i2c_set_clientdata(client, rx8025);
+ INIT_WORK(&rx8025->work, rx8025_work);
+
+ err = rx8025_init_client(client, &need_reset);
+ if (err)
+ goto errout_free;
+
+ if (need_reset) {
+ struct rtc_time tm;
+ dev_info(&client->dev,
+ "bad conditions detected, resetting date\n");
+ rtc_time_to_tm(0, &tm); /* 1970/1/1 */
+ rx8025_set_time(&client->dev, &tm);
+ }
+
+ rx8025->rtc = rtc_device_register(client->name, &client->dev,
+ &rx8025_rtc_ops, THIS_MODULE);
+ if (IS_ERR(rx8025->rtc)) {
+ err = PTR_ERR(rx8025->rtc);
+ dev_err(&client->dev, "unable to register the class device\n");
+ goto errout_free;
+ }
+
+ if (client->irq > 0) {
+ dev_info(&client->dev, "IRQ %d supplied\n", client->irq);
+ err = request_irq(client->irq, rx8025_irq,
+ 0, "rx8025", client);
+ if (err) {
+ dev_err(&client->dev, "unable to request IRQ\n");
+ goto errout_reg;
+ }
+ }
+
+ rx8025->rtc->irq_freq = 1;
+ rx8025->rtc->max_user_freq = 1;
+
+ err = rx8025_sysfs_register(&client->dev);
+ if (err)
+ goto errout_irq;
+
+ return 0;
+
+errout_irq:
+ if (client->irq > 0)
+ free_irq(client->irq, client);
+
+errout_reg:
+ rtc_device_unregister(rx8025->rtc);
+
+errout_free:
+ i2c_set_clientdata(client, NULL);
+ kfree(rx8025);
+
+errout:
+ dev_err(&adapter->dev, "probing for rx8025 failed\n");
+ return err;
+}
+
+static int __devexit rx8025_remove(struct i2c_client *client)
+{
+ struct rx8025_data *rx8025 = i2c_get_clientdata(client);
+ struct mutex *lock = &rx8025->rtc->ops_lock;
+
+ if (client->irq > 0) {
+ mutex_lock(lock);
+ rx8025->exiting = 1;
+ mutex_unlock(lock);
+
+ free_irq(client->irq, client);
+ flush_scheduled_work();
+ }
+
+ rx8025_sysfs_unregister(&client->dev);
+ rtc_device_unregister(rx8025->rtc);
+ i2c_set_clientdata(client, NULL);
+ kfree(rx8025);
+ return 0;
+}
+
+static struct i2c_driver rx8025_driver = {
+ .driver = {
+ .name = "rtc-rx8025",
+ .owner = THIS_MODULE,
+ },
+ .probe = rx8025_probe,
+ .remove = __devexit_p(rx8025_remove),
+ .id_table = rx8025_id,
+};
+
+static int __init rx8025_init(void)
+{
+ return i2c_add_driver(&rx8025_driver);
+}
+
+static void __exit rx8025_exit(void)
+{
+ i2c_del_driver(&rx8025_driver);
+}
+
+MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
+MODULE_DESCRIPTION("RX-8025 SA/NB RTC driver");
+MODULE_LICENSE("GPL");
+
+module_init(rx8025_init);
+module_exit(rx8025_exit);
diff --git a/drivers/rtc/rtc-tx4939.c b/drivers/rtc/rtc-tx4939.c
index 4ee4857ff207..4a6ed1104fbb 100644
--- a/drivers/rtc/rtc-tx4939.c
+++ b/drivers/rtc/rtc-tx4939.c
@@ -261,10 +261,8 @@ static int __init tx4939_rtc_probe(struct platform_device *pdev)
tx4939_rtc_cmd(pdata->rtcreg, TX4939_RTCCTL_COMMAND_NOP);
if (devm_request_irq(&pdev->dev, irq, tx4939_rtc_interrupt,
- IRQF_DISABLED | IRQF_SHARED,
- pdev->name, &pdev->dev) < 0) {
+ IRQF_DISABLED, pdev->name, &pdev->dev) < 0)
return -EBUSY;
- }
rtc = rtc_device_register(pdev->name, &pdev->dev,
&tx4939_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc))
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 81d7f268418a..691cecd03b83 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -655,7 +655,7 @@ static void qeth_l2_set_multicast_list(struct net_device *dev)
for (dm = dev->mc_list; dm; dm = dm->next)
qeth_l2_add_mc(card, dm->da_addr, 0);
- list_for_each_entry(ha, &dev->uc_list, list)
+ list_for_each_entry(ha, &dev->uc.list, list)
qeth_l2_add_mc(card, ha->addr, 1);
spin_unlock_bh(&card->mclock);
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 3ac27ee47396..2ccbd185a5fb 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -470,6 +470,12 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device)
if (!adapter)
return -ENOMEM;
+ adapter->gs = kzalloc(sizeof(struct zfcp_wka_ports), GFP_KERNEL);
+ if (!adapter->gs) {
+ kfree(adapter);
+ return -ENOMEM;
+ }
+
ccw_device->handler = NULL;
adapter->ccw_device = ccw_device;
atomic_set(&adapter->refcount, 0);
@@ -523,8 +529,7 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device)
goto sysfs_failed;
atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
-
- zfcp_fc_nameserver_init(adapter);
+ zfcp_fc_wka_ports_init(adapter);
if (!zfcp_adapter_scsi_register(adapter))
return 0;
@@ -571,6 +576,7 @@ void zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
kfree(adapter->req_list);
kfree(adapter->fc_stats);
kfree(adapter->stats_reset_data);
+ kfree(adapter->gs);
kfree(adapter);
}
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 2074d45dbf6c..49d0532bca1c 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -22,6 +22,8 @@
#include <linux/syscalls.h>
#include <linux/scatterlist.h>
#include <linux/ioctl.h>
+#include <scsi/fc/fc_fs.h>
+#include <scsi/fc/fc_gs.h>
#include <scsi/scsi.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_cmnd.h>
@@ -29,6 +31,7 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_fc.h>
+#include <scsi/scsi_bsg_fc.h>
#include <asm/ccwdev.h>
#include <asm/qdio.h>
#include <asm/debug.h>
@@ -228,11 +231,6 @@ struct zfcp_ls_adisc {
/* FC-PH/FC-GS well-known address identifiers for generic services */
#define ZFCP_DID_WKA 0xFFFFF0
-#define ZFCP_DID_MANAGEMENT_SERVICE 0xFFFFFA
-#define ZFCP_DID_TIME_SERVICE 0xFFFFFB
-#define ZFCP_DID_DIRECTORY_SERVICE 0xFFFFFC
-#define ZFCP_DID_ALIAS_SERVICE 0xFFFFF8
-#define ZFCP_DID_KEY_DISTRIBUTION_SERVICE 0xFFFFF7
/* remote port status */
#define ZFCP_STATUS_PORT_PHYS_OPEN 0x00000001
@@ -376,6 +374,14 @@ struct zfcp_wka_port {
struct delayed_work work;
};
+struct zfcp_wka_ports {
+ struct zfcp_wka_port ms; /* management service */
+ struct zfcp_wka_port ts; /* time service */
+ struct zfcp_wka_port ds; /* directory service */
+ struct zfcp_wka_port as; /* alias service */
+ struct zfcp_wka_port ks; /* key distribution service */
+};
+
struct zfcp_qdio_queue {
struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q];
u8 first; /* index of next free bfr in queue */
@@ -461,7 +467,7 @@ struct zfcp_adapter {
actions */
u32 erp_low_mem_count; /* nr of erp actions waiting
for memory */
- struct zfcp_wka_port nsp; /* adapter's nameserver */
+ struct zfcp_wka_ports *gs; /* generic services */
debug_info_t *rec_dbf;
debug_info_t *hba_dbf;
debug_info_t *san_dbf; /* debug feature areas */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index e50ea465bc2b..8030e25152fb 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -719,7 +719,7 @@ static void zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *act)
zfcp_qdio_close(adapter);
zfcp_fsf_req_dismiss_all(adapter);
adapter->fsf_req_seq_no = 0;
- zfcp_fc_wka_port_force_offline(&adapter->nsp);
+ zfcp_fc_wka_port_force_offline(&adapter->gs->ds);
/* all ports and units are closed */
zfcp_erp_modify_adapter_status(adapter, "erascl1", NULL,
ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR);
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 120a9a1c81f7..3044c6010306 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -106,8 +106,12 @@ extern int zfcp_fc_ns_gid_pn(struct zfcp_erp_action *);
extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fsf_plogi *);
extern void zfcp_test_link(struct zfcp_port *);
extern void zfcp_fc_link_test_work(struct work_struct *);
-extern void zfcp_fc_nameserver_init(struct zfcp_adapter *);
extern void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *);
+extern void zfcp_fc_wka_ports_init(struct zfcp_adapter *);
+extern int zfcp_fc_execute_els_fc_job(struct fc_bsg_job *);
+extern int zfcp_fc_execute_ct_fc_job(struct fc_bsg_job *);
+extern void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *);
+
/* zfcp_fsf.c */
extern int zfcp_fsf_open_port(struct zfcp_erp_action *);
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 35493a82d2a8..2f0705d76b72 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -120,14 +120,13 @@ static void zfcp_wka_port_put(struct zfcp_wka_port *wka_port)
schedule_delayed_work(&wka_port->work, HZ / 100);
}
-void zfcp_fc_nameserver_init(struct zfcp_adapter *adapter)
+static void zfcp_fc_wka_port_init(struct zfcp_wka_port *wka_port, u32 d_id,
+ struct zfcp_adapter *adapter)
{
- struct zfcp_wka_port *wka_port = &adapter->nsp;
-
init_waitqueue_head(&wka_port->completion_wq);
wka_port->adapter = adapter;
- wka_port->d_id = ZFCP_DID_DIRECTORY_SERVICE;
+ wka_port->d_id = d_id;
wka_port->status = ZFCP_WKA_PORT_OFFLINE;
atomic_set(&wka_port->refcount, 0);
@@ -143,6 +142,17 @@ void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *wka)
mutex_unlock(&wka->mutex);
}
+void zfcp_fc_wka_ports_init(struct zfcp_adapter *adapter)
+{
+ struct zfcp_wka_ports *gs = adapter->gs;
+
+ zfcp_fc_wka_port_init(&gs->ms, FC_FID_MGMT_SERV, adapter);
+ zfcp_fc_wka_port_init(&gs->ts, FC_FID_TIME_SERV, adapter);
+ zfcp_fc_wka_port_init(&gs->ds, FC_FID_DIR_SERV, adapter);
+ zfcp_fc_wka_port_init(&gs->as, FC_FID_ALIASES, adapter);
+ zfcp_fc_wka_port_init(&gs->ks, FC_FID_SEC_KEY, adapter);
+}
+
static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
struct fcp_rscn_element *elem)
{
@@ -282,7 +292,7 @@ int static zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *erp_action,
/* setup parameters for send generic command */
gid_pn->port = erp_action->port;
- gid_pn->ct.wka_port = &adapter->nsp;
+ gid_pn->ct.wka_port = &adapter->gs->ds;
gid_pn->ct.handler = zfcp_fc_ns_handler;
gid_pn->ct.handler_data = (unsigned long) &compl_rec;
gid_pn->ct.timeout = ZFCP_NS_GID_PN_TIMEOUT;
@@ -329,13 +339,13 @@ int zfcp_fc_ns_gid_pn(struct zfcp_erp_action *erp_action)
memset(gid_pn, 0, sizeof(*gid_pn));
- ret = zfcp_wka_port_get(&adapter->nsp);
+ ret = zfcp_wka_port_get(&adapter->gs->ds);
if (ret)
goto out;
ret = zfcp_fc_ns_gid_pn_request(erp_action, gid_pn);
- zfcp_wka_port_put(&adapter->nsp);
+ zfcp_wka_port_put(&adapter->gs->ds);
out:
mempool_free(gid_pn, adapter->pool.data_gid_pn);
return ret;
@@ -525,7 +535,7 @@ static int zfcp_scan_issue_gpn_ft(struct zfcp_gpn_ft *gpn_ft,
req->fc4_type = ZFCP_CT_SCSI_FCP;
/* prepare zfcp_send_ct */
- ct->wka_port = &adapter->nsp;
+ ct->wka_port = &adapter->gs->ds;
ct->handler = zfcp_fc_ns_handler;
ct->handler_data = (unsigned long)&compl_rec;
ct->timeout = 10;
@@ -644,7 +654,7 @@ int zfcp_scan_ports(struct zfcp_adapter *adapter)
fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV)
return 0;
- ret = zfcp_wka_port_get(&adapter->nsp);
+ ret = zfcp_wka_port_get(&adapter->gs->ds);
if (ret)
return ret;
@@ -666,7 +676,7 @@ int zfcp_scan_ports(struct zfcp_adapter *adapter)
}
zfcp_free_sg_env(gpn_ft, buf_num);
out:
- zfcp_wka_port_put(&adapter->nsp);
+ zfcp_wka_port_put(&adapter->gs->ds);
return ret;
}
@@ -675,3 +685,158 @@ void _zfcp_scan_ports_later(struct work_struct *work)
{
zfcp_scan_ports(container_of(work, struct zfcp_adapter, scan_work));
}
+
+struct zfcp_els_fc_job {
+ struct zfcp_send_els els;
+ struct fc_bsg_job *job;
+};
+
+static void zfcp_fc_generic_els_handler(unsigned long data)
+{
+ struct zfcp_els_fc_job *els_fc_job = (struct zfcp_els_fc_job *) data;
+ struct fc_bsg_job *job = els_fc_job->job;
+ struct fc_bsg_reply *reply = job->reply;
+
+ if (els_fc_job->els.status) {
+ /* request rejected or timed out */
+ reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_REJECT;
+ goto out;
+ }
+
+ reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
+ reply->reply_payload_rcv_len = job->reply_payload.payload_len;
+
+out:
+ job->state_flags = FC_RQST_STATE_DONE;
+ job->job_done(job);
+ kfree(els_fc_job);
+}
+
+int zfcp_fc_execute_els_fc_job(struct fc_bsg_job *job)
+{
+ struct zfcp_els_fc_job *els_fc_job;
+ struct fc_rport *rport = job->rport;
+ struct Scsi_Host *shost;
+ struct zfcp_adapter *adapter;
+ struct zfcp_port *port;
+ u8 *port_did;
+
+ shost = rport ? rport_to_shost(rport) : job->shost;
+ adapter = (struct zfcp_adapter *)shost->hostdata[0];
+
+ if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_OPEN))
+ return -EINVAL;
+
+ els_fc_job = kzalloc(sizeof(struct zfcp_els_fc_job), GFP_KERNEL);
+ if (!els_fc_job)
+ return -ENOMEM;
+
+ els_fc_job->els.adapter = adapter;
+ if (rport) {
+ read_lock_irq(&zfcp_data.config_lock);
+ port = rport->dd_data;
+ if (port)
+ els_fc_job->els.d_id = port->d_id;
+ read_unlock_irq(&zfcp_data.config_lock);
+ if (!port) {
+ kfree(els_fc_job);
+ return -EINVAL;
+ }
+ } else {
+ port_did = job->request->rqst_data.h_els.port_id;
+ els_fc_job->els.d_id = (port_did[0] << 16) +
+ (port_did[1] << 8) + port_did[2];
+ }
+
+ els_fc_job->els.req = job->request_payload.sg_list;
+ els_fc_job->els.resp = job->reply_payload.sg_list;
+ els_fc_job->els.handler = zfcp_fc_generic_els_handler;
+ els_fc_job->els.handler_data = (unsigned long) els_fc_job;
+ els_fc_job->job = job;
+
+ return zfcp_fsf_send_els(&els_fc_job->els);
+}
+
+struct zfcp_ct_fc_job {
+ struct zfcp_send_ct ct;
+ struct fc_bsg_job *job;
+};
+
+static void zfcp_fc_generic_ct_handler(unsigned long data)
+{
+ struct zfcp_ct_fc_job *ct_fc_job = (struct zfcp_ct_fc_job *) data;
+ struct fc_bsg_job *job = ct_fc_job->job;
+
+ job->reply->reply_data.ctels_reply.status = ct_fc_job->ct.status ?
+ FC_CTELS_STATUS_REJECT : FC_CTELS_STATUS_OK;
+ job->reply->reply_payload_rcv_len = job->reply_payload.payload_len;
+ job->state_flags = FC_RQST_STATE_DONE;
+ job->job_done(job);
+
+ zfcp_wka_port_put(ct_fc_job->ct.wka_port);
+
+ kfree(ct_fc_job);
+}
+
+int zfcp_fc_execute_ct_fc_job(struct fc_bsg_job *job)
+{
+ int ret;
+ u8 gs_type;
+ struct fc_rport *rport = job->rport;
+ struct Scsi_Host *shost;
+ struct zfcp_adapter *adapter;
+ struct zfcp_ct_fc_job *ct_fc_job;
+ u32 preamble_word1;
+
+ shost = rport ? rport_to_shost(rport) : job->shost;
+
+ adapter = (struct zfcp_adapter *)shost->hostdata[0];
+ if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_OPEN))
+ return -EINVAL;
+
+ ct_fc_job = kzalloc(sizeof(struct zfcp_ct_fc_job), GFP_KERNEL);
+ if (!ct_fc_job)
+ return -ENOMEM;
+
+ preamble_word1 = job->request->rqst_data.r_ct.preamble_word1;
+ gs_type = (preamble_word1 & 0xff000000) >> 24;
+
+ switch (gs_type) {
+ case FC_FST_ALIAS:
+ ct_fc_job->ct.wka_port = &adapter->gs->as;
+ break;
+ case FC_FST_MGMT:
+ ct_fc_job->ct.wka_port = &adapter->gs->ms;
+ break;
+ case FC_FST_TIME:
+ ct_fc_job->ct.wka_port = &adapter->gs->ts;
+ break;
+ case FC_FST_DIR:
+ ct_fc_job->ct.wka_port = &adapter->gs->ds;
+ break;
+ default:
+ kfree(ct_fc_job);
+ return -EINVAL; /* no such service */
+ }
+
+ ret = zfcp_wka_port_get(ct_fc_job->ct.wka_port);
+ if (ret) {
+ kfree(ct_fc_job);
+ return ret;
+ }
+
+ ct_fc_job->ct.req = job->request_payload.sg_list;
+ ct_fc_job->ct.resp = job->reply_payload.sg_list;
+ ct_fc_job->ct.timeout = ZFCP_FSF_REQUEST_TIMEOUT;
+ ct_fc_job->ct.handler = zfcp_fc_generic_ct_handler;
+ ct_fc_job->ct.handler_data = (unsigned long) ct_fc_job;
+ ct_fc_job->ct.completion = NULL;
+ ct_fc_job->job = job;
+
+ ret = zfcp_fsf_send_ct(&ct_fc_job->ct, NULL, NULL);
+ if (ret) {
+ kfree(ct_fc_job);
+ zfcp_wka_port_put(ct_fc_job->ct.wka_port);
+ }
+ return ret;
+}
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index e6dae3744e79..c57658f3d34f 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -1146,7 +1146,8 @@ static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
case FSF_RESPONSE_SIZE_TOO_LARGE:
break;
case FSF_ACCESS_DENIED:
- zfcp_fsf_access_denied_port(req, port);
+ if (port)
+ zfcp_fsf_access_denied_port(req, port);
break;
case FSF_SBAL_MISMATCH:
/* should never occure, avoided in zfcp_fsf_send_els */
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 7d0da230eb63..967ede73f4c5 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -623,6 +623,20 @@ void zfcp_scsi_scan(struct work_struct *work)
zfcp_unit_put(unit);
}
+static int zfcp_execute_fc_job(struct fc_bsg_job *job)
+{
+ switch (job->request->msgcode) {
+ case FC_BSG_RPT_ELS:
+ case FC_BSG_HST_ELS_NOLOGIN:
+ return zfcp_fc_execute_els_fc_job(job);
+ case FC_BSG_RPT_CT:
+ case FC_BSG_HST_CT:
+ return zfcp_fc_execute_ct_fc_job(job);
+ default:
+ return -EINVAL;
+ }
+}
+
struct fc_function_template zfcp_transport_functions = {
.show_starget_port_id = 1,
.show_starget_port_name = 1,
@@ -644,6 +658,7 @@ struct fc_function_template zfcp_transport_functions = {
.dev_loss_tmo_callbk = zfcp_scsi_dev_loss_tmo_callbk,
.terminate_rport_io = zfcp_scsi_terminate_rport_io,
.show_host_port_state = 1,
+ .bsg_request = zfcp_execute_fc_job,
/* no functions registered for following dynamic attributes but
directly set by LLDD */
.show_host_port_type = 1,
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 0f829b3b8ab7..75b23317bd26 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -627,19 +627,15 @@ ahd_linux_target_alloc(struct scsi_target *starget)
starget->id, &tstate);
if ((flags & CFPACKETIZED) == 0) {
- /* Do not negotiate packetized transfers */
- spi_rd_strm(starget) = 0;
- spi_pcomp_en(starget) = 0;
- spi_rti(starget) = 0;
- spi_wr_flow(starget) = 0;
- spi_hold_mcs(starget) = 0;
+ /* don't negotiate packetized (IU) transfers */
+ spi_max_iu(starget) = 0;
} else {
if ((ahd->features & AHD_RTI) == 0)
spi_rti(starget) = 0;
}
if ((flags & CFQAS) == 0)
- spi_qas(starget) = 0;
+ spi_max_qas(starget) = 0;
/* Transinfo values have been set to BIOS settings */
spi_max_width(starget) = (flags & CFWIDEB) ? 1 : 0;
diff --git a/drivers/scsi/bnx2i/Kconfig b/drivers/scsi/bnx2i/Kconfig
index 820d428ae839..b62b482e55e7 100644
--- a/drivers/scsi/bnx2i/Kconfig
+++ b/drivers/scsi/bnx2i/Kconfig
@@ -2,6 +2,7 @@ config SCSI_BNX2_ISCSI
tristate "Broadcom NetXtreme II iSCSI support"
select SCSI_ISCSI_ATTRS
select CNIC
+ depends on PCI
---help---
This driver supports iSCSI offload for the Broadcom NetXtreme II
devices.
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 540569849099..1877d9811831 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -457,10 +457,6 @@ struct lpfc_hba {
void (*lpfc_scsi_prep_cmnd)
(struct lpfc_vport *, struct lpfc_scsi_buf *,
struct lpfc_nodelist *);
- int (*lpfc_scsi_prep_task_mgmt_cmd)
- (struct lpfc_vport *, struct lpfc_scsi_buf *,
- unsigned int, uint8_t);
-
/* IOCB interface function jump table entries */
int (*__lpfc_sli_issue_iocb)
(struct lpfc_hba *, uint32_t,
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index d73e677201f8..fc07be5fbce9 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -3113,6 +3113,9 @@ sysfs_ctlreg_write(struct kobject *kobj, struct bin_attribute *bin_attr,
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
+ if (phba->sli_rev >= LPFC_SLI_REV4)
+ return -EPERM;
+
if ((off + count) > FF_REG_AREA_SIZE)
return -ERANGE;
@@ -3163,6 +3166,9 @@ sysfs_ctlreg_read(struct kobject *kobj, struct bin_attribute *bin_attr,
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
+ if (phba->sli_rev >= LPFC_SLI_REV4)
+ return -EPERM;
+
if (off > FF_REG_AREA_SIZE)
return -ERANGE;
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 1dbccfd3d022..0e532f072eb3 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1732,7 +1732,9 @@ lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag)
uint32_t *ptr, str[4];
uint8_t *fwname;
- if (vp->rev.rBit) {
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ sprintf(fwrevision, "%s", vp->rev.opFwName);
+ else if (vp->rev.rBit) {
if (psli->sli_flag & LPFC_SLI_ACTIVE)
rev = vp->rev.sli2FwRev;
else
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 6bdeb14878a2..f72fdf23bf1b 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -168,6 +168,19 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
if (elsiocb == NULL)
return NULL;
+ /*
+ * If this command is for fabric controller and HBA running
+ * in FIP mode send FLOGI, FDISC and LOGO as FIP frames.
+ */
+ if ((did == Fabric_DID) &&
+ bf_get(lpfc_fip_flag, &phba->sli4_hba.sli4_flags) &&
+ ((elscmd == ELS_CMD_FLOGI) ||
+ (elscmd == ELS_CMD_FDISC) ||
+ (elscmd == ELS_CMD_LOGO)))
+ elsiocb->iocb_flag |= LPFC_FIP_ELS;
+ else
+ elsiocb->iocb_flag &= ~LPFC_FIP_ELS;
+
icmd = &elsiocb->iocb;
/* fill in BDEs for command */
@@ -6108,9 +6121,17 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
icmd->un.elsreq64.myID = 0;
icmd->un.elsreq64.fl = 1;
- /* For FDISC, Let FDISC rsp set the NPortID for this VPI */
- icmd->ulpCt_h = 1;
- icmd->ulpCt_l = 0;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ /* FDISC needs to be 1 for WQE VPI */
+ elsiocb->iocb.ulpCt_h = (SLI4_CT_VPI >> 1) & 1;
+ elsiocb->iocb.ulpCt_l = SLI4_CT_VPI & 1 ;
+ /* Set the ulpContext to the vpi */
+ elsiocb->iocb.ulpContext = vport->vpi + phba->vpi_base;
+ } else {
+ /* For FDISC, Let FDISC rsp set the NPortID for this VPI */
+ icmd->ulpCt_h = 1;
+ icmd->ulpCt_l = 0;
+ }
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = ELS_CMD_FDISC;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 35c41ae75be2..ed46b24a3380 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1197,6 +1197,11 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
{
struct lpfc_fcf_conn_entry *conn_entry;
+ /* If FCF not available return 0 */
+ if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) ||
+ !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record))
+ return 0;
+
if (!phba->cfg_enable_fip) {
*boot_flag = 0;
*addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
@@ -1216,6 +1221,14 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
*boot_flag = 0;
*addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
new_fcf_record);
+
+ /*
+ * When there are no FCF connect entries, use driver's default
+ * addressing mode - FPMA.
+ */
+ if (*addr_mode & LPFC_FCF_FPMA)
+ *addr_mode = LPFC_FCF_FPMA;
+
*vlan_id = 0xFFFF;
return 1;
}
@@ -1241,6 +1254,14 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
}
/*
+ * If connection record does not support any addressing mode,
+ * skip the FCF record.
+ */
+ if (!(bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record)
+ & (LPFC_FCF_FPMA | LPFC_FCF_SPMA)))
+ continue;
+
+ /*
* Check if the connection record specifies a required
* addressing mode.
*/
@@ -1272,6 +1293,11 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
else
*boot_flag = 0;
+ /*
+ * If user did not specify any addressing mode, or if the
+ * prefered addressing mode specified by user is not supported
+ * by FCF, allow fabric to pick the addressing mode.
+ */
*addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
new_fcf_record);
/*
@@ -1297,12 +1323,6 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
!(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
(*addr_mode & LPFC_FCF_FPMA))
*addr_mode = LPFC_FCF_FPMA;
- /*
- * If user did not specify any addressing mode, use FPMA if
- * possible else use SPMA.
- */
- else if (*addr_mode & LPFC_FCF_FPMA)
- *addr_mode = LPFC_FCF_FPMA;
if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID)
*vlan_id = conn_entry->conn_rec.vlan_tag;
@@ -1864,7 +1884,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
vport->fc_flag &= ~FC_BYPASSED_MODE;
spin_unlock_irq(shost->host_lock);
- if (((phba->fc_eventTag + 1) < la->eventTag) ||
+ if ((phba->fc_eventTag < la->eventTag) ||
(phba->fc_eventTag == la->eventTag)) {
phba->fc_stat.LinkMultiEvent++;
if (la->attType == AT_LINK_UP)
@@ -2925,6 +2945,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
lpfc_no_rpi(phba, ndlp);
ndlp->nlp_rpi = 0;
ndlp->nlp_flag &= ~NLP_RPI_VALID;
+ ndlp->nlp_flag &= ~NLP_NPR_ADISC;
return 1;
}
return 0;
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 02aa016b93e9..8a3a026667e4 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1183,7 +1183,6 @@ typedef struct {
#define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12
#define PCI_VENDOR_ID_SERVERENGINE 0x19a2
#define PCI_DEVICE_ID_TIGERSHARK 0x0704
-#define PCI_DEVICE_ID_TIGERSHARK_S 0x0705
#define JEDEC_ID_ADDRESS 0x0080001c
#define FIREFLY_JEDEC_ID 0x1ACC
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 39c34b3ad29d..2995d128f07f 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -422,9 +422,9 @@ struct lpfc_wqe_generic{
#define lpfc_wqe_gen_pri_WORD word10
uint32_t word11;
#define lpfc_wqe_gen_cq_id_SHIFT 16
-#define lpfc_wqe_gen_cq_id_MASK 0x000003FF
+#define lpfc_wqe_gen_cq_id_MASK 0x0000FFFF
#define lpfc_wqe_gen_cq_id_WORD word11
-#define LPFC_WQE_CQ_ID_DEFAULT 0x3ff
+#define LPFC_WQE_CQ_ID_DEFAULT 0xffff
#define lpfc_wqe_gen_wqec_SHIFT 7
#define lpfc_wqe_gen_wqec_MASK 0x00000001
#define lpfc_wqe_gen_wqec_WORD word11
@@ -1128,7 +1128,7 @@ struct fcf_record {
#define lpfc_fcf_record_mac_5_WORD word4
#define lpfc_fcf_record_fcf_avail_SHIFT 16
#define lpfc_fcf_record_fcf_avail_MASK 0x000000FF
-#define lpfc_fcf_record_fc_avail_WORD word4
+#define lpfc_fcf_record_fcf_avail_WORD word4
#define lpfc_fcf_record_mac_addr_prov_SHIFT 24
#define lpfc_fcf_record_mac_addr_prov_MASK 0x000000FF
#define lpfc_fcf_record_mac_addr_prov_WORD word4
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 2f5907f92eea..fc67cc65c63b 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -428,7 +428,8 @@ lpfc_config_port_post(struct lpfc_hba *phba)
/* Reset the DFT_HBA_Q_DEPTH to the max xri */
if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
phba->cfg_hba_queue_depth =
- mb->un.varRdConfig.max_xri + 1;
+ (mb->un.varRdConfig.max_xri + 1) -
+ lpfc_sli4_get_els_iocb_cnt(phba);
phba->lmt = mb->un.varRdConfig.lmt;
@@ -1646,10 +1647,6 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
oneConnect = 1;
m = (typeof(m)) {"OCe10100-F", max_speed, "PCIe"};
break;
- case PCI_DEVICE_ID_TIGERSHARK_S:
- oneConnect = 1;
- m = (typeof(m)) {"OCe10100-F-S", max_speed, "PCIe"};
- break;
default:
m = (typeof(m)){ NULL };
break;
@@ -3543,6 +3540,7 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
/* Free the allocated rpi headers. */
lpfc_sli4_remove_rpi_hdrs(phba);
+ lpfc_sli4_remove_rpis(phba);
/* Free the ELS sgl list */
lpfc_free_active_sgl(phba);
@@ -7184,16 +7182,19 @@ lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
{
int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
- if (max_xri <= 100)
- return 4;
- else if (max_xri <= 256)
- return 8;
- else if (max_xri <= 512)
- return 16;
- else if (max_xri <= 1024)
- return 32;
- else
- return 48;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ if (max_xri <= 100)
+ return 4;
+ else if (max_xri <= 256)
+ return 8;
+ else if (max_xri <= 512)
+ return 16;
+ else if (max_xri <= 1024)
+ return 32;
+ else
+ return 48;
+ } else
+ return 0;
}
/**
@@ -7642,7 +7643,6 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
switch (dev_id) {
case PCI_DEVICE_ID_TIGERSHARK:
- case PCI_DEVICE_ID_TIGERSHARK_S:
rc = lpfc_pci_probe_one_s4(pdev, pid);
break;
default:
@@ -7941,8 +7941,6 @@ static struct pci_device_id lpfc_id_table[] = {
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK,
PCI_ANY_ID, PCI_ANY_ID, },
- {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK_S,
- PCI_ANY_ID, PCI_ANY_ID, },
{ 0 }
};
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index b9b451c09010..3423571dd1b3 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1631,6 +1631,7 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
/* In case of malloc fails, proceed with whatever we have */
if (!viraddr)
break;
+ memset(viraddr, 0, PAGE_SIZE);
mbox->sge_array->addr[pagen] = viraddr;
/* Keep the first page for later sub-header construction */
if (pagen == 0)
@@ -1715,8 +1716,10 @@ lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq)
/* Set up host requested features. */
bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1);
- /* Virtual fabrics and FIPs are not supported yet. */
- bf_set(lpfc_mbx_rq_ftr_rq_ifip, &mboxq->u.mqe.un.req_ftrs, 0);
+ if (phba->cfg_enable_fip)
+ bf_set(lpfc_mbx_rq_ftr_rq_ifip, &mboxq->u.mqe.un.req_ftrs, 0);
+ else
+ bf_set(lpfc_mbx_rq_ftr_rq_ifip, &mboxq->u.mqe.un.req_ftrs, 1);
/* Enable DIF (block guard) only if configured to do so. */
if (phba->cfg_enable_bg)
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 09f659f77bb3..3e74136f1ede 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -497,7 +497,7 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
else
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
- if ((ndlp->nlp_type & NLP_FABRIC) &&
+ if ((ndlp->nlp_DID == Fabric_DID) &&
vport->port_type == LPFC_NPIV_PORT) {
lpfc_linkdown_port(vport);
mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 7991ba1980ae..da59c4f0168f 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -116,6 +116,27 @@ lpfc_debug_save_dif(struct scsi_cmnd *cmnd)
}
/**
+ * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
+ * @phba: Pointer to HBA object.
+ * @lpfc_cmd: lpfc scsi command object pointer.
+ *
+ * This function is called from the lpfc_prep_task_mgmt_cmd function to
+ * set the last bit in the response sge entry.
+ **/
+static void
+lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
+ struct lpfc_scsi_buf *lpfc_cmd)
+{
+ struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
+ if (sgl) {
+ sgl += 1;
+ sgl->word2 = le32_to_cpu(sgl->word2);
+ bf_set(lpfc_sli4_sge_last, sgl, 1);
+ sgl->word2 = cpu_to_le32(sgl->word2);
+ }
+}
+
+/**
* lpfc_update_stats - Update statistical data for the command completion
* @phba: Pointer to HBA object.
* @lpfc_cmd: lpfc scsi command object pointer.
@@ -1978,7 +1999,7 @@ lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
}
/**
- * lpfc_scsi_unprep_dma_buf_s3 - Un-map DMA mapping of SG-list for SLI3 dev
+ * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev
* @phba: The HBA for which this call is being executed.
* @psb: The scsi buffer which is going to be un-mapped.
*
@@ -1986,7 +2007,7 @@ lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
* field of @lpfc_cmd for device with SLI-3 interface spec.
**/
static void
-lpfc_scsi_unprep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
+lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
{
/*
* There are only two special cases to consider. (1) the scsi command
@@ -2003,36 +2024,6 @@ lpfc_scsi_unprep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
}
/**
- * lpfc_scsi_unprep_dma_buf_s4 - Un-map DMA mapping of SG-list for SLI4 dev
- * @phba: The Hba for which this call is being executed.
- * @psb: The scsi buffer which is going to be un-mapped.
- *
- * This routine does DMA un-mapping of scatter gather list of scsi command
- * field of @lpfc_cmd for device with SLI-4 interface spec. If we have to
- * remove the sgl for this scsi buffer then we will do it here. For now
- * we should be able to just call the sli3 unprep routine.
- **/
-static void
-lpfc_scsi_unprep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
-{
- lpfc_scsi_unprep_dma_buf_s3(phba, psb);
-}
-
-/**
- * lpfc_scsi_unprep_dma_buf - Wrapper function for unmap DMA mapping of SG-list
- * @phba: The Hba for which this call is being executed.
- * @psb: The scsi buffer which is going to be un-mapped.
- *
- * This routine does DMA un-mapping of scatter gather list of scsi command
- * field of @lpfc_cmd for device with SLI-4 interface spec.
- **/
-static void
-lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
-{
- phba->lpfc_scsi_unprep_dma_buf(phba, psb);
-}
-
-/**
* lpfc_handler_fcp_err - FCP response handler
* @vport: The virtual port for which this call is being executed.
* @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
@@ -2461,7 +2452,7 @@ lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
}
/**
- * lpfc_scsi_prep_cmnd_s3 - Convert scsi cmnd to FCP infor unit for SLI3 dev
+ * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
* @vport: The virtual port for which this call is being executed.
* @lpfc_cmd: The scsi command which needs to send.
* @pnode: Pointer to lpfc_nodelist.
@@ -2470,7 +2461,7 @@ lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
* to transfer for device with SLI3 interface spec.
**/
static void
-lpfc_scsi_prep_cmnd_s3(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
+lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
struct lpfc_nodelist *pnode)
{
struct lpfc_hba *phba = vport->phba;
@@ -2558,46 +2549,7 @@ lpfc_scsi_prep_cmnd_s3(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
}
/**
- * lpfc_scsi_prep_cmnd_s4 - Convert scsi cmnd to FCP infor unit for SLI4 dev
- * @vport: The virtual port for which this call is being executed.
- * @lpfc_cmd: The scsi command which needs to send.
- * @pnode: Pointer to lpfc_nodelist.
- *
- * This routine initializes fcp_cmnd and iocb data structure from scsi command
- * to transfer for device with SLI4 interface spec.
- **/
-static void
-lpfc_scsi_prep_cmnd_s4(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
- struct lpfc_nodelist *pnode)
-{
- /*
- * The prep cmnd routines do not touch the sgl or its
- * entries. We may not have to do anything different.
- * I will leave this function in place until we can
- * run some IO through the driver and determine if changes
- * are needed.
- */
- return lpfc_scsi_prep_cmnd_s3(vport, lpfc_cmd, pnode);
-}
-
-/**
- * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
- * @vport: The virtual port for which this call is being executed.
- * @lpfc_cmd: The scsi command which needs to send.
- * @pnode: Pointer to lpfc_nodelist.
- *
- * This routine wraps the actual convert SCSI cmnd function pointer from
- * the lpfc_hba struct.
- **/
-static inline void
-lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
- struct lpfc_nodelist *pnode)
-{
- vport->phba->lpfc_scsi_prep_cmnd(vport, lpfc_cmd, pnode);
-}
-
-/**
- * lpfc_scsi_prep_task_mgmt_cmnd_s3 - Convert SLI3 scsi TM cmd to FCP info unit
+ * lpfc_scsi_prep_task_mgmt_cmnd - Convert SLI3 scsi TM cmd to FCP info unit
* @vport: The virtual port for which this call is being executed.
* @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
* @lun: Logical unit number.
@@ -2611,7 +2563,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
* 1 - Success
**/
static int
-lpfc_scsi_prep_task_mgmt_cmd_s3(struct lpfc_vport *vport,
+lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
struct lpfc_scsi_buf *lpfc_cmd,
unsigned int lun,
uint8_t task_mgmt_cmd)
@@ -2653,68 +2605,13 @@ lpfc_scsi_prep_task_mgmt_cmd_s3(struct lpfc_vport *vport,
* The driver will provide the timeout mechanism.
*/
piocb->ulpTimeout = 0;
- } else {
+ } else
piocb->ulpTimeout = lpfc_cmd->timeout;
- }
-
- return 1;
-}
-
-/**
- * lpfc_scsi_prep_task_mgmt_cmnd_s4 - Convert SLI4 scsi TM cmd to FCP info unit
- * @vport: The virtual port for which this call is being executed.
- * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
- * @lun: Logical unit number.
- * @task_mgmt_cmd: SCSI task management command.
- *
- * This routine creates FCP information unit corresponding to @task_mgmt_cmd
- * for device with SLI-4 interface spec.
- *
- * Return codes:
- * 0 - Error
- * 1 - Success
- **/
-static int
-lpfc_scsi_prep_task_mgmt_cmd_s4(struct lpfc_vport *vport,
- struct lpfc_scsi_buf *lpfc_cmd,
- unsigned int lun,
- uint8_t task_mgmt_cmd)
-{
- /*
- * The prep cmnd routines do not touch the sgl or its
- * entries. We may not have to do anything different.
- * I will leave this function in place until we can
- * run some IO through the driver and determine if changes
- * are needed.
- */
- return lpfc_scsi_prep_task_mgmt_cmd_s3(vport, lpfc_cmd, lun,
- task_mgmt_cmd);
-}
-/**
- * lpfc_scsi_prep_task_mgmt_cmnd - Wrapper func convert scsi TM cmd to FCP info
- * @vport: The virtual port for which this call is being executed.
- * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
- * @lun: Logical unit number.
- * @task_mgmt_cmd: SCSI task management command.
- *
- * This routine wraps the actual convert SCSI TM to FCP information unit
- * function pointer from the lpfc_hba struct.
- *
- * Return codes:
- * 0 - Error
- * 1 - Success
- **/
-static inline int
-lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
- struct lpfc_scsi_buf *lpfc_cmd,
- unsigned int lun,
- uint8_t task_mgmt_cmd)
-{
- struct lpfc_hba *phba = vport->phba;
+ if (vport->phba->sli_rev == LPFC_SLI_REV4)
+ lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
- return phba->lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun,
- task_mgmt_cmd);
+ return 1;
}
/**
@@ -2730,23 +2627,19 @@ int
lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
{
+ phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
+ phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd;
+ phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf;
+
switch (dev_grp) {
case LPFC_PCI_DEV_LP:
phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3;
phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
- phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd_s3;
- phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf_s3;
- phba->lpfc_scsi_prep_task_mgmt_cmd =
- lpfc_scsi_prep_task_mgmt_cmd_s3;
phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
break;
case LPFC_PCI_DEV_OC:
phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4;
phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
- phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd_s4;
- phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf_s4;
- phba->lpfc_scsi_prep_task_mgmt_cmd =
- lpfc_scsi_prep_task_mgmt_cmd_s4;
phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
break;
default:
@@ -2783,72 +2676,6 @@ lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
}
/**
- * lpfc_scsi_tgt_reset - Target reset handler
- * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure
- * @vport: The virtual port for which this call is being executed.
- * @tgt_id: Target ID.
- * @lun: Lun number.
- * @rdata: Pointer to lpfc_rport_data.
- *
- * This routine issues a TARGET RESET iocb to reset a target with @tgt_id ID.
- *
- * Return Code:
- * 0x2003 - Error
- * 0x2002 - Success.
- **/
-static int
-lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
- unsigned tgt_id, unsigned int lun,
- struct lpfc_rport_data *rdata)
-{
- struct lpfc_hba *phba = vport->phba;
- struct lpfc_iocbq *iocbq;
- struct lpfc_iocbq *iocbqrsp;
- int ret;
- int status;
-
- if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
- return FAILED;
-
- lpfc_cmd->rdata = rdata;
- status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun,
- FCP_TARGET_RESET);
- if (!status)
- return FAILED;
-
- iocbq = &lpfc_cmd->cur_iocbq;
- iocbqrsp = lpfc_sli_get_iocbq(phba);
-
- if (!iocbqrsp)
- return FAILED;
-
- /* Issue Target Reset to TGT <num> */
- lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
- "0702 Issue Target Reset to TGT %d Data: x%x x%x\n",
- tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
- status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
- iocbq, iocbqrsp, lpfc_cmd->timeout);
- if (status != IOCB_SUCCESS) {
- if (status == IOCB_TIMEDOUT) {
- iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
- ret = TIMEOUT_ERROR;
- } else
- ret = FAILED;
- lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
- } else {
- ret = SUCCESS;
- lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4];
- lpfc_cmd->status = iocbqrsp->iocb.ulpStatus;
- if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
- (lpfc_cmd->result & IOERR_DRVR_MASK))
- lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
- }
-
- lpfc_sli_release_iocbq(phba, iocbqrsp);
- return ret;
-}
-
-/**
* lpfc_info - Info entry point of scsi_host_template data structure
* @host: The scsi host for which this call is being executed.
*
@@ -3228,156 +3055,334 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
return ret;
}
+static char *
+lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
+{
+ switch (task_mgmt_cmd) {
+ case FCP_ABORT_TASK_SET:
+ return "ABORT_TASK_SET";
+ case FCP_CLEAR_TASK_SET:
+ return "FCP_CLEAR_TASK_SET";
+ case FCP_BUS_RESET:
+ return "FCP_BUS_RESET";
+ case FCP_LUN_RESET:
+ return "FCP_LUN_RESET";
+ case FCP_TARGET_RESET:
+ return "FCP_TARGET_RESET";
+ case FCP_CLEAR_ACA:
+ return "FCP_CLEAR_ACA";
+ case FCP_TERMINATE_TASK:
+ return "FCP_TERMINATE_TASK";
+ default:
+ return "unknown";
+ }
+}
+
/**
- * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point
- * @cmnd: Pointer to scsi_cmnd data structure.
+ * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
+ * @vport: The virtual port for which this call is being executed.
+ * @rdata: Pointer to remote port local data
+ * @tgt_id: Target ID of remote device.
+ * @lun_id: Lun number for the TMF
+ * @task_mgmt_cmd: type of TMF to send
*
- * This routine does a device reset by sending a TARGET_RESET task management
- * command.
+ * This routine builds and sends a TMF (SCSI Task Mgmt Function) to
+ * a remote port.
*
- * Return code :
- * 0x2003 - Error
- * 0x2002 - Success
+ * Return Code:
+ * 0x2003 - Error
+ * 0x2002 - Success.
**/
static int
-lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
+lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
+ unsigned tgt_id, unsigned int lun_id,
+ uint8_t task_mgmt_cmd)
{
- struct Scsi_Host *shost = cmnd->device->host;
- struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
struct lpfc_scsi_buf *lpfc_cmd;
- struct lpfc_iocbq *iocbq, *iocbqrsp;
- struct lpfc_rport_data *rdata = cmnd->device->hostdata;
- struct lpfc_nodelist *pnode = rdata->pnode;
- unsigned long later;
- int ret = SUCCESS;
+ struct lpfc_iocbq *iocbq;
+ struct lpfc_iocbq *iocbqrsp;
+ int ret;
int status;
- int cnt;
- struct lpfc_scsi_event_header scsi_event;
- lpfc_block_error_handler(cmnd);
- /*
- * If target is not in a MAPPED state, delay the reset until
- * target is rediscovered or devloss timeout expires.
- */
- later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
- while (time_after(later, jiffies)) {
- if (!pnode || !NLP_CHK_NODE_ACT(pnode))
- return FAILED;
- if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
- break;
- schedule_timeout_uninterruptible(msecs_to_jiffies(500));
- rdata = cmnd->device->hostdata;
- if (!rdata)
- break;
- pnode = rdata->pnode;
- }
-
- scsi_event.event_type = FC_REG_SCSI_EVENT;
- scsi_event.subcategory = LPFC_EVENT_TGTRESET;
- scsi_event.lun = 0;
- memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
- memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
-
- fc_host_post_vendor_event(shost,
- fc_get_event_number(),
- sizeof(scsi_event),
- (char *)&scsi_event,
- LPFC_NL_VENDOR_ID);
-
- if (!rdata || pnode->nlp_state != NLP_STE_MAPPED_NODE) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
- "0721 LUN Reset rport "
- "failure: msec x%x rdata x%p\n",
- jiffies_to_msecs(jiffies - later), rdata);
+ if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
return FAILED;
- }
+
lpfc_cmd = lpfc_get_scsi_buf(phba);
if (lpfc_cmd == NULL)
return FAILED;
lpfc_cmd->timeout = 60;
lpfc_cmd->rdata = rdata;
- status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd,
- cmnd->device->lun,
- FCP_TARGET_RESET);
+ status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
+ task_mgmt_cmd);
if (!status) {
lpfc_release_scsi_buf(phba, lpfc_cmd);
return FAILED;
}
- iocbq = &lpfc_cmd->cur_iocbq;
- /* get a buffer for this IOCB command response */
+ iocbq = &lpfc_cmd->cur_iocbq;
iocbqrsp = lpfc_sli_get_iocbq(phba);
if (iocbqrsp == NULL) {
lpfc_release_scsi_buf(phba, lpfc_cmd);
return FAILED;
}
+
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
- "0703 Issue target reset to TGT %d LUN %d "
- "rpi x%x nlp_flag x%x\n", cmnd->device->id,
- cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag);
+ "0702 Issue %s to TGT %d LUN %d "
+ "rpi x%x nlp_flag x%x\n",
+ lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
+ rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
+
status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
iocbq, iocbqrsp, lpfc_cmd->timeout);
- if (status == IOCB_TIMEDOUT) {
- iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
- ret = TIMEOUT_ERROR;
- } else {
- if (status != IOCB_SUCCESS)
+ if (status != IOCB_SUCCESS) {
+ if (status == IOCB_TIMEDOUT) {
+ iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
+ ret = TIMEOUT_ERROR;
+ } else
ret = FAILED;
- lpfc_release_scsi_buf(phba, lpfc_cmd);
- }
- lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
- "0713 SCSI layer issued device reset (%d, %d) "
- "return x%x status x%x result x%x\n",
- cmnd->device->id, cmnd->device->lun, ret,
- iocbqrsp->iocb.ulpStatus,
+ lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+ "0727 TMF %s to TGT %d LUN %d failed (%d, %d)\n",
+ lpfc_taskmgmt_name(task_mgmt_cmd),
+ tgt_id, lun_id, iocbqrsp->iocb.ulpStatus,
iocbqrsp->iocb.un.ulpWord[4]);
+ } else
+ ret = SUCCESS;
+
lpfc_sli_release_iocbq(phba, iocbqrsp);
- cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id, cmnd->device->lun,
- LPFC_CTX_TGT);
+
+ if (ret != TIMEOUT_ERROR)
+ lpfc_release_scsi_buf(phba, lpfc_cmd);
+
+ return ret;
+}
+
+/**
+ * lpfc_chk_tgt_mapped -
+ * @vport: The virtual port to check on
+ * @cmnd: Pointer to scsi_cmnd data structure.
+ *
+ * This routine delays until the scsi target (aka rport) for the
+ * command exists (is present and logged in) or we declare it non-existent.
+ *
+ * Return code :
+ * 0x2003 - Error
+ * 0x2002 - Success
+ **/
+static int
+lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
+{
+ struct lpfc_rport_data *rdata = cmnd->device->hostdata;
+ struct lpfc_nodelist *pnode = rdata->pnode;
+ unsigned long later;
+
+ /*
+ * If target is not in a MAPPED state, delay until
+ * target is rediscovered or devloss timeout expires.
+ */
+ later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
+ while (time_after(later, jiffies)) {
+ if (!pnode || !NLP_CHK_NODE_ACT(pnode))
+ return FAILED;
+ if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
+ return SUCCESS;
+ schedule_timeout_uninterruptible(msecs_to_jiffies(500));
+ rdata = cmnd->device->hostdata;
+ if (!rdata)
+ return FAILED;
+ pnode = rdata->pnode;
+ }
+ if (!pnode || !NLP_CHK_NODE_ACT(pnode) ||
+ (pnode->nlp_state != NLP_STE_MAPPED_NODE))
+ return FAILED;
+ return SUCCESS;
+}
+
+/**
+ * lpfc_reset_flush_io_context -
+ * @vport: The virtual port (scsi_host) for the flush context
+ * @tgt_id: If aborting by Target contect - specifies the target id
+ * @lun_id: If aborting by Lun context - specifies the lun id
+ * @context: specifies the context level to flush at.
+ *
+ * After a reset condition via TMF, we need to flush orphaned i/o
+ * contexts from the adapter. This routine aborts any contexts
+ * outstanding, then waits for their completions. The wait is
+ * bounded by devloss_tmo though.
+ *
+ * Return code :
+ * 0x2003 - Error
+ * 0x2002 - Success
+ **/
+static int
+lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
+ uint64_t lun_id, lpfc_ctx_cmd context)
+{
+ struct lpfc_hba *phba = vport->phba;
+ unsigned long later;
+ int cnt;
+
+ cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
if (cnt)
lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
- cmnd->device->id, cmnd->device->lun,
- LPFC_CTX_TGT);
+ tgt_id, lun_id, context);
later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
while (time_after(later, jiffies) && cnt) {
schedule_timeout_uninterruptible(msecs_to_jiffies(20));
- cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id,
- cmnd->device->lun, LPFC_CTX_TGT);
+ cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
}
if (cnt) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
- "0719 device reset I/O flush failure: "
- "cnt x%x\n", cnt);
- ret = FAILED;
+ "0724 I/O flush failure for context %s : cnt x%x\n",
+ ((context == LPFC_CTX_LUN) ? "LUN" :
+ ((context == LPFC_CTX_TGT) ? "TGT" :
+ ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))),
+ cnt);
+ return FAILED;
}
- return ret;
+ return SUCCESS;
+}
+
+/**
+ * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point
+ * @cmnd: Pointer to scsi_cmnd data structure.
+ *
+ * This routine does a device reset by sending a LUN_RESET task management
+ * command.
+ *
+ * Return code :
+ * 0x2003 - Error
+ * 0x2002 - Success
+ **/
+static int
+lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
+{
+ struct Scsi_Host *shost = cmnd->device->host;
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_rport_data *rdata = cmnd->device->hostdata;
+ struct lpfc_nodelist *pnode = rdata->pnode;
+ unsigned tgt_id = cmnd->device->id;
+ unsigned int lun_id = cmnd->device->lun;
+ struct lpfc_scsi_event_header scsi_event;
+ int status;
+
+ lpfc_block_error_handler(cmnd);
+
+ status = lpfc_chk_tgt_mapped(vport, cmnd);
+ if (status == FAILED) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+ "0721 Device Reset rport failure: rdata x%p\n", rdata);
+ return FAILED;
+ }
+
+ scsi_event.event_type = FC_REG_SCSI_EVENT;
+ scsi_event.subcategory = LPFC_EVENT_LUNRESET;
+ scsi_event.lun = lun_id;
+ memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
+ memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
+
+ fc_host_post_vendor_event(shost, fc_get_event_number(),
+ sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
+
+ status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
+ FCP_LUN_RESET);
+
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+ "0713 SCSI layer issued Device Reset (%d, %d) "
+ "return x%x\n", tgt_id, lun_id, status);
+
+ /*
+ * We have to clean up i/o as : they may be orphaned by the TMF;
+ * or if the TMF failed, they may be in an indeterminate state.
+ * So, continue on.
+ * We will report success if all the i/o aborts successfully.
+ */
+ status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
+ LPFC_CTX_LUN);
+ return status;
+}
+
+/**
+ * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point
+ * @cmnd: Pointer to scsi_cmnd data structure.
+ *
+ * This routine does a target reset by sending a TARGET_RESET task management
+ * command.
+ *
+ * Return code :
+ * 0x2003 - Error
+ * 0x2002 - Success
+ **/
+static int
+lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
+{
+ struct Scsi_Host *shost = cmnd->device->host;
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_rport_data *rdata = cmnd->device->hostdata;
+ struct lpfc_nodelist *pnode = rdata->pnode;
+ unsigned tgt_id = cmnd->device->id;
+ unsigned int lun_id = cmnd->device->lun;
+ struct lpfc_scsi_event_header scsi_event;
+ int status;
+
+ lpfc_block_error_handler(cmnd);
+
+ status = lpfc_chk_tgt_mapped(vport, cmnd);
+ if (status == FAILED) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+ "0722 Target Reset rport failure: rdata x%p\n", rdata);
+ return FAILED;
+ }
+
+ scsi_event.event_type = FC_REG_SCSI_EVENT;
+ scsi_event.subcategory = LPFC_EVENT_TGTRESET;
+ scsi_event.lun = 0;
+ memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
+ memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
+
+ fc_host_post_vendor_event(shost, fc_get_event_number(),
+ sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
+
+ status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
+ FCP_TARGET_RESET);
+
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+ "0723 SCSI layer issued Target Reset (%d, %d) "
+ "return x%x\n", tgt_id, lun_id, status);
+
+ /*
+ * We have to clean up i/o as : they may be orphaned by the TMF;
+ * or if the TMF failed, they may be in an indeterminate state.
+ * So, continue on.
+ * We will report success if all the i/o aborts successfully.
+ */
+ status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
+ LPFC_CTX_TGT);
+ return status;
}
/**
* lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point
* @cmnd: Pointer to scsi_cmnd data structure.
*
- * This routine does target reset to all target on @cmnd->device->host.
+ * This routine does target reset to all targets on @cmnd->device->host.
+ * This emulates Parallel SCSI Bus Reset Semantics.
*
- * Return Code:
- * 0x2003 - Error
- * 0x2002 - Success
+ * Return code :
+ * 0x2003 - Error
+ * 0x2002 - Success
**/
static int
lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
{
struct Scsi_Host *shost = cmnd->device->host;
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
- struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp = NULL;
- int match;
- int ret = SUCCESS, status = SUCCESS, i;
- int cnt;
- struct lpfc_scsi_buf * lpfc_cmd;
- unsigned long later;
struct lpfc_scsi_event_header scsi_event;
+ int match;
+ int ret = SUCCESS, status, i;
scsi_event.event_type = FC_REG_SCSI_EVENT;
scsi_event.subcategory = LPFC_EVENT_BUSRESET;
@@ -3385,13 +3390,11 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
- fc_host_post_vendor_event(shost,
- fc_get_event_number(),
- sizeof(scsi_event),
- (char *)&scsi_event,
- LPFC_NL_VENDOR_ID);
+ fc_host_post_vendor_event(shost, fc_get_event_number(),
+ sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
lpfc_block_error_handler(cmnd);
+
/*
* Since the driver manages a single bus device, reset all
* targets known to the driver. Should any target reset
@@ -3414,16 +3417,11 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
spin_unlock_irq(shost->host_lock);
if (!match)
continue;
- lpfc_cmd = lpfc_get_scsi_buf(phba);
- if (lpfc_cmd) {
- lpfc_cmd->timeout = 60;
- status = lpfc_scsi_tgt_reset(lpfc_cmd, vport, i,
- cmnd->device->lun,
- ndlp->rport->dd_data);
- if (status != TIMEOUT_ERROR)
- lpfc_release_scsi_buf(phba, lpfc_cmd);
- }
- if (!lpfc_cmd || status != SUCCESS) {
+
+ status = lpfc_send_taskmgmt(vport, ndlp->rport->dd_data,
+ i, 0, FCP_TARGET_RESET);
+
+ if (status != SUCCESS) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0700 Bus Reset on target %d failed\n",
i);
@@ -3431,25 +3429,16 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
}
}
/*
- * All outstanding txcmplq I/Os should have been aborted by
- * the targets. Unfortunately, some targets do not abide by
- * this forcing the driver to double check.
+ * We have to clean up i/o as : they may be orphaned by the TMFs
+ * above; or if any of the TMFs failed, they may be in an
+ * indeterminate state.
+ * We will report success if all the i/o aborts successfully.
*/
- cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST);
- if (cnt)
- lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
- 0, 0, LPFC_CTX_HOST);
- later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
- while (time_after(later, jiffies) && cnt) {
- schedule_timeout_uninterruptible(msecs_to_jiffies(20));
- cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST);
- }
- if (cnt) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
- "0715 Bus Reset I/O flush failure: "
- "cnt x%x left x%x\n", cnt, i);
+
+ status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST);
+ if (status != SUCCESS)
ret = FAILED;
- }
+
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
return ret;
@@ -3582,7 +3571,8 @@ struct scsi_host_template lpfc_template = {
.info = lpfc_info,
.queuecommand = lpfc_queuecommand,
.eh_abort_handler = lpfc_abort_handler,
- .eh_device_reset_handler= lpfc_device_reset_handler,
+ .eh_device_reset_handler = lpfc_device_reset_handler,
+ .eh_target_reset_handler = lpfc_target_reset_handler,
.eh_bus_reset_handler = lpfc_bus_reset_handler,
.slave_alloc = lpfc_slave_alloc,
.slave_configure = lpfc_slave_configure,
@@ -3602,7 +3592,8 @@ struct scsi_host_template lpfc_vport_template = {
.info = lpfc_info,
.queuecommand = lpfc_queuecommand,
.eh_abort_handler = lpfc_abort_handler,
- .eh_device_reset_handler= lpfc_device_reset_handler,
+ .eh_device_reset_handler = lpfc_device_reset_handler,
+ .eh_target_reset_handler = lpfc_target_reset_handler,
.eh_bus_reset_handler = lpfc_bus_reset_handler,
.slave_alloc = lpfc_slave_alloc,
.slave_configure = lpfc_slave_configure,
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index ff04daf18f48..acc43b061ba1 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -4139,8 +4139,11 @@ lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba,
return -EIO;
}
data_length = mqe->un.mb_words[5];
- if (data_length > DMP_FCOEPARAM_RGN_SIZE)
+ if (data_length > DMP_FCOEPARAM_RGN_SIZE) {
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
return -EIO;
+ }
lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
lpfc_mbuf_free(phba, mp->virt, mp->phys);
@@ -4211,27 +4214,6 @@ lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
return -EIO;
}
- lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
- "(%d):0380 Mailbox cmd x%x Status x%x "
- "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
- "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
- "CQ: x%x x%x x%x x%x\n",
- mboxq->vport ? mboxq->vport->vpi : 0,
- bf_get(lpfc_mqe_command, mqe),
- bf_get(lpfc_mqe_status, mqe),
- mqe->un.mb_words[0], mqe->un.mb_words[1],
- mqe->un.mb_words[2], mqe->un.mb_words[3],
- mqe->un.mb_words[4], mqe->un.mb_words[5],
- mqe->un.mb_words[6], mqe->un.mb_words[7],
- mqe->un.mb_words[8], mqe->un.mb_words[9],
- mqe->un.mb_words[10], mqe->un.mb_words[11],
- mqe->un.mb_words[12], mqe->un.mb_words[13],
- mqe->un.mb_words[14], mqe->un.mb_words[15],
- mqe->un.mb_words[16], mqe->un.mb_words[50],
- mboxq->mcqe.word0,
- mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
- mboxq->mcqe.trailer);
-
/*
* The available vpd length cannot be bigger than the
* DMA buffer passed to the port. Catch the less than
@@ -4337,21 +4319,18 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
goto out_free_vpd;
mqe = &mboxq->u.mqe;
- if ((bf_get(lpfc_mbx_rd_rev_sli_lvl,
- &mqe->un.read_rev) != LPFC_SLI_REV4) ||
- (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev) == 0)) {
+ phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
+ if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev))
+ phba->hba_flag |= HBA_FCOE_SUPPORT;
+ if (phba->sli_rev != LPFC_SLI_REV4 ||
+ !(phba->hba_flag & HBA_FCOE_SUPPORT)) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"0376 READ_REV Error. SLI Level %d "
"FCoE enabled %d\n",
- bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev),
- bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev));
+ phba->sli_rev, phba->hba_flag & HBA_FCOE_SUPPORT);
rc = -EIO;
goto out_free_vpd;
}
- /* Single threaded at this point, no need for lock */
- spin_lock_irq(&phba->hbalock);
- phba->hba_flag |= HBA_FCOE_SUPPORT;
- spin_unlock_irq(&phba->hbalock);
/*
* Evaluate the read rev and vpd data. Populate the driver
* state with the results. If this routine fails, the failure
@@ -4365,8 +4344,32 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
rc = 0;
}
- /* By now, we should determine the SLI revision, hard code for now */
- phba->sli_rev = LPFC_SLI_REV4;
+ /* Save information as VPD data */
+ phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
+ phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
+ phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
+ phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
+ &mqe->un.read_rev);
+ phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
+ &mqe->un.read_rev);
+ phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
+ &mqe->un.read_rev);
+ phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
+ &mqe->un.read_rev);
+ phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
+ memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
+ phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
+ memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
+ phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
+ memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
+ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+ "(%d):0380 READ_REV Status x%x "
+ "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
+ mboxq->vport ? mboxq->vport->vpi : 0,
+ bf_get(lpfc_mqe_status, mqe),
+ phba->vpd.rev.opFwName,
+ phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
+ phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
/*
* Discover the port's supported feature set and match it against the
@@ -4491,8 +4494,10 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
rc = -ENODEV;
goto out_free_vpd;
}
- /* Temporary initialization of lpfc_fip_flag to non-fip */
- bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 0);
+ if (phba->cfg_enable_fip)
+ bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 1);
+ else
+ bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 0);
/* Set up all the queues to the device */
rc = lpfc_sli4_queue_setup(phba);
@@ -5030,6 +5035,92 @@ out_not_finished:
}
/**
+ * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
+ * @phba: Pointer to HBA context object.
+ *
+ * The function blocks the posting of SLI4 asynchronous mailbox commands from
+ * the driver internal pending mailbox queue. It will then try to wait out the
+ * possible outstanding mailbox command before return.
+ *
+ * Returns:
+ * 0 - the outstanding mailbox command completed; otherwise, the wait for
+ * the outstanding mailbox command timed out.
+ **/
+static int
+lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
+{
+ struct lpfc_sli *psli = &phba->sli;
+ uint8_t actcmd = MBX_HEARTBEAT;
+ int rc = 0;
+ unsigned long timeout;
+
+ /* Mark the asynchronous mailbox command posting as blocked */
+ spin_lock_irq(&phba->hbalock);
+ psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
+ if (phba->sli.mbox_active)
+ actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
+ spin_unlock_irq(&phba->hbalock);
+ /* Determine how long we might wait for the active mailbox
+ * command to be gracefully completed by firmware.
+ */
+ timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * 1000) +
+ jiffies;
+ /* Wait for the outstnading mailbox command to complete */
+ while (phba->sli.mbox_active) {
+ /* Check active mailbox complete status every 2ms */
+ msleep(2);
+ if (time_after(jiffies, timeout)) {
+ /* Timeout, marked the outstanding cmd not complete */
+ rc = 1;
+ break;
+ }
+ }
+
+ /* Can not cleanly block async mailbox command, fails it */
+ if (rc) {
+ spin_lock_irq(&phba->hbalock);
+ psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
+ spin_unlock_irq(&phba->hbalock);
+ }
+ return rc;
+}
+
+/**
+ * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
+ * @phba: Pointer to HBA context object.
+ *
+ * The function unblocks and resume posting of SLI4 asynchronous mailbox
+ * commands from the driver internal pending mailbox queue. It makes sure
+ * that there is no outstanding mailbox command before resuming posting
+ * asynchronous mailbox commands. If, for any reason, there is outstanding
+ * mailbox command, it will try to wait it out before resuming asynchronous
+ * mailbox command posting.
+ **/
+static void
+lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
+{
+ struct lpfc_sli *psli = &phba->sli;
+
+ spin_lock_irq(&phba->hbalock);
+ if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
+ /* Asynchronous mailbox posting is not blocked, do nothing */
+ spin_unlock_irq(&phba->hbalock);
+ return;
+ }
+
+ /* Outstanding synchronous mailbox command is guaranteed to be done,
+ * successful or timeout, after timing-out the outstanding mailbox
+ * command shall always be removed, so just unblock posting async
+ * mailbox command and resume
+ */
+ psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
+ spin_unlock_irq(&phba->hbalock);
+
+ /* wake up worker thread to post asynchronlous mailbox command */
+ lpfc_worker_wake_up(phba);
+}
+
+/**
* lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
* @phba: Pointer to HBA context object.
* @mboxq: Pointer to mailbox object.
@@ -5204,14 +5295,35 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
psli->sli_flag, flag);
return rc;
} else if (flag == MBX_POLL) {
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
- "(%d):2542 Mailbox command x%x (x%x) "
- "cannot issue Data: x%x x%x\n",
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
+ "(%d):2542 Try to issue mailbox command "
+ "x%x (x%x) synchronously ahead of async"
+ "mailbox command queue: x%x x%x\n",
mboxq->vport ? mboxq->vport->vpi : 0,
mboxq->u.mb.mbxCommand,
lpfc_sli4_mbox_opcode_get(phba, mboxq),
psli->sli_flag, flag);
- return -EIO;
+ /* Try to block the asynchronous mailbox posting */
+ rc = lpfc_sli4_async_mbox_block(phba);
+ if (!rc) {
+ /* Successfully blocked, now issue sync mbox cmd */
+ rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
+ if (rc != MBX_SUCCESS)
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_MBOX | LOG_SLI,
+ "(%d):2597 Mailbox command "
+ "x%x (x%x) cannot issue "
+ "Data: x%x x%x\n",
+ mboxq->vport ?
+ mboxq->vport->vpi : 0,
+ mboxq->u.mb.mbxCommand,
+ lpfc_sli4_mbox_opcode_get(phba,
+ mboxq),
+ psli->sli_flag, flag);
+ /* Unblock the async mailbox posting afterward */
+ lpfc_sli4_async_mbox_unblock(phba);
+ }
+ return rc;
}
/* Now, interrupt mode asynchrous mailbox command */
@@ -5749,18 +5861,13 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
fip = bf_get(lpfc_fip_flag, &phba->sli4_hba.sli4_flags);
/* The fcp commands will set command type */
- if ((!(iocbq->iocb_flag & LPFC_IO_FCP)) && (!fip))
- command_type = ELS_COMMAND_NON_FIP;
- else if (!(iocbq->iocb_flag & LPFC_IO_FCP))
- command_type = ELS_COMMAND_FIP;
- else if (iocbq->iocb_flag & LPFC_IO_FCP)
+ if (iocbq->iocb_flag & LPFC_IO_FCP)
command_type = FCP_COMMAND;
- else {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "2019 Invalid cmd 0x%x\n",
- iocbq->iocb.ulpCommand);
- return IOCB_ERROR;
- }
+ else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS))
+ command_type = ELS_COMMAND_FIP;
+ else
+ command_type = ELS_COMMAND_NON_FIP;
+
/* Some of the fields are in the right position already */
memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
abort_tag = (uint32_t) iocbq->iotag;
@@ -5814,11 +5921,6 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(lpfc_wqe_gen_context, &wqe->generic,
iocbq->iocb.ulpContext);
- if (iocbq->vport->fc_myDID != 0) {
- bf_set(els_req64_sid, &wqe->els_req,
- iocbq->vport->fc_myDID);
- bf_set(els_req64_sp, &wqe->els_req, 1);
- }
bf_set(lpfc_wqe_gen_ct, &wqe->generic, ct);
bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
/* CCP CCPE PV PRI in word10 were set in the memcpy */
@@ -5877,14 +5979,19 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
* is set and we are sending our 2nd or greater command on
* this exchange.
*/
+ /* Always open the exchange */
+ bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
- /* ALLOW read & write to fall through to ICMD64 */
+ wqe->words[10] &= 0xffff0000; /* zero out ebde count */
+ bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
+ break;
case CMD_FCP_ICMND64_CR:
/* Always open the exchange */
bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
+ wqe->words[4] = 0;
wqe->words[10] &= 0xffff0000; /* zero out ebde count */
- bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
+ bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
break;
case CMD_GEN_REQUEST64_CR:
/* word3 command length is described as byte offset to the
@@ -7247,6 +7354,32 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
}
/**
+ * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
+ * @phba: Pointer to HBA context object..
+ * @piocbq: Pointer to command iocb.
+ * @flag: Flag to test.
+ *
+ * This routine grabs the hbalock and then test the iocb_flag to
+ * see if the passed in flag is set.
+ * Returns:
+ * 1 if flag is set.
+ * 0 if flag is not set.
+ **/
+static int
+lpfc_chk_iocb_flg(struct lpfc_hba *phba,
+ struct lpfc_iocbq *piocbq, uint32_t flag)
+{
+ unsigned long iflags;
+ int ret;
+
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ ret = piocbq->iocb_flag & flag;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ return ret;
+
+}
+
+/**
* lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
* @phba: Pointer to HBA context object..
* @pring: Pointer to sli ring.
@@ -7313,7 +7446,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
if (retval == IOCB_SUCCESS) {
timeout_req = timeout * HZ;
timeleft = wait_event_timeout(done_q,
- piocb->iocb_flag & LPFC_IO_WAKE,
+ lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
timeout_req);
if (piocb->iocb_flag & LPFC_IO_WAKE) {
@@ -7498,20 +7631,16 @@ lpfc_sli_eratt_read(struct lpfc_hba *phba)
if ((HS_FFER1 & phba->work_hs) &&
((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
HS_FFER6 | HS_FFER7) & phba->work_hs)) {
- spin_lock_irq(&phba->hbalock);
phba->hba_flag |= DEFER_ERATT;
- spin_unlock_irq(&phba->hbalock);
/* Clear all interrupt enable conditions */
writel(0, phba->HCregaddr);
readl(phba->HCregaddr);
}
/* Set the driver HA work bitmap */
- spin_lock_irq(&phba->hbalock);
phba->work_ha |= HA_ERATT;
/* Indicate polling handles this ERATT */
phba->hba_flag |= HBA_ERATT_HANDLED;
- spin_unlock_irq(&phba->hbalock);
return 1;
}
return 0;
@@ -7557,12 +7686,10 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba)
return 0;
phba->work_status[0] = uerr_sta_lo;
phba->work_status[1] = uerr_sta_hi;
- spin_lock_irq(&phba->hbalock);
/* Set the driver HA work bitmap */
phba->work_ha |= HA_ERATT;
/* Indicate polling handles this ERATT */
phba->hba_flag |= HBA_ERATT_HANDLED;
- spin_unlock_irq(&phba->hbalock);
return 1;
}
}
@@ -9245,6 +9372,7 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
kfree(dmabuf);
goto out_fail;
}
+ memset(dmabuf->virt, 0, PAGE_SIZE);
dmabuf->buffer_tag = x;
list_add_tail(&dmabuf->list, &queue->page_list);
/* initialize queue's entry array */
@@ -9667,7 +9795,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
/* link the wq onto the parent cq child list */
list_add_tail(&wq->list, &cq->child_list);
out:
- if (rc == MBX_TIMEOUT)
+ if (rc != MBX_TIMEOUT)
mempool_free(mbox, phba->mbox_mem_pool);
return status;
}
@@ -11020,10 +11148,7 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
rpi_page->start_rpi);
hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
- if (!phba->sli4_hba.intr_enable)
- rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
- else
- rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
@@ -11363,6 +11488,7 @@ lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
+ bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
LPFC_FCF_FPMA | LPFC_FCF_SPMA);
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 7d37eb7459bf..3c53316cf6d0 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -56,6 +56,7 @@ struct lpfc_iocbq {
#define LPFC_DRIVER_ABORTED 8 /* driver aborted this request */
#define LPFC_IO_FABRIC 0x10 /* Iocb send using fabric scheduler */
#define LPFC_DELAY_MEM_FREE 0x20 /* Defer free'ing of FC data */
+#define LPFC_FIP_ELS 0x40
uint8_t abort_count;
uint8_t rsvd2;
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 5196b46608d7..3b276b47d18f 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -229,7 +229,7 @@ struct lpfc_bmbx {
#define LPFC_EQE_DEF_COUNT 1024
#define LPFC_CQE_DEF_COUNT 256
-#define LPFC_WQE_DEF_COUNT 64
+#define LPFC_WQE_DEF_COUNT 256
#define LPFC_MQE_DEF_COUNT 16
#define LPFC_RQE_DEF_COUNT 512
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 6b8a148f0a55..41094e02304b 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "8.3.2"
+#define LPFC_DRIVER_VERSION "8.3.3"
#define LPFC_DRIVER_NAME "lpfc"
#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index a6313ee84ac5..e0b49922193e 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -695,8 +695,6 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
}
vport->unreg_vpi_cmpl = VPORT_INVAL;
timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
- if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
- goto skip_logo;
if (!lpfc_issue_els_npiv_logo(vport, ndlp))
while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout)
timeout = schedule_timeout(timeout);
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index 3b7240e40819..e3c482aa87b5 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -5444,7 +5444,7 @@ static void ncr_getsync(struct ncb *np, u_char sfac, u_char *fakp, u_char *scntl
** input speed faster than the period.
*/
kpc = per * clk;
- while (--div >= 0)
+ while (--div > 0)
if (kpc >= (div_10M[div] << 2)) break;
/*
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index 11a61ea8d5d9..70b60ade049e 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -530,7 +530,7 @@ static int nsp_negate_signal(struct scsi_cmnd *SCpnt, unsigned char mask,
if (reg == 0xff) {
break;
}
- } while ((time_out-- != 0) && (reg & mask) != 0);
+ } while ((--time_out != 0) && (reg & mask) != 0);
if (time_out == 0) {
nsp_msg(KERN_DEBUG, " %s signal off timeut", str);
@@ -801,7 +801,7 @@ static void nsp_pio_read(struct scsi_cmnd *SCpnt)
data->FifoCount = ocount;
- if (time_out == 0) {
+ if (time_out < 0) {
nsp_msg(KERN_DEBUG, "pio read timeout resid=%d this_residual=%d buffers_residual=%d",
scsi_get_resid(SCpnt), SCpnt->SCp.this_residual,
SCpnt->SCp.buffers_residual);
@@ -897,7 +897,7 @@ static void nsp_pio_write(struct scsi_cmnd *SCpnt)
data->FifoCount = ocount;
- if (time_out == 0) {
+ if (time_out < 0) {
nsp_msg(KERN_DEBUG, "pio write timeout resid=0x%x",
scsi_get_resid(SCpnt));
}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index c8d0a176fea4..245e7afb4c4d 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -37,6 +37,7 @@ qla2100_intr_handler(int irq, void *dev_id)
uint16_t hccr;
uint16_t mb[4];
struct rsp_que *rsp;
+ unsigned long flags;
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
@@ -49,7 +50,7 @@ qla2100_intr_handler(int irq, void *dev_id)
reg = &ha->iobase->isp;
status = 0;
- spin_lock(&ha->hardware_lock);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
vha = pci_get_drvdata(ha->pdev);
for (iter = 50; iter--; ) {
hccr = RD_REG_WORD(&reg->hccr);
@@ -101,7 +102,7 @@ qla2100_intr_handler(int irq, void *dev_id)
RD_REG_WORD(&reg->hccr);
}
}
- spin_unlock(&ha->hardware_lock);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
(status & MBX_INTERRUPT) && ha->flags.mbox_int) {
@@ -133,6 +134,7 @@ qla2300_intr_handler(int irq, void *dev_id)
uint16_t mb[4];
struct rsp_que *rsp;
struct qla_hw_data *ha;
+ unsigned long flags;
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
@@ -145,7 +147,7 @@ qla2300_intr_handler(int irq, void *dev_id)
reg = &ha->iobase->isp;
status = 0;
- spin_lock(&ha->hardware_lock);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
vha = pci_get_drvdata(ha->pdev);
for (iter = 50; iter--; ) {
stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
@@ -216,7 +218,7 @@ qla2300_intr_handler(int irq, void *dev_id)
WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
RD_REG_WORD_RELAXED(&reg->hccr);
}
- spin_unlock(&ha->hardware_lock);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
(status & MBX_INTERRUPT) && ha->flags.mbox_int) {
@@ -1626,6 +1628,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
uint32_t hccr;
uint16_t mb[4];
struct rsp_que *rsp;
+ unsigned long flags;
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
@@ -1638,7 +1641,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
reg = &ha->iobase->isp24;
status = 0;
- spin_lock(&ha->hardware_lock);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
vha = pci_get_drvdata(ha->pdev);
for (iter = 50; iter--; ) {
stat = RD_REG_DWORD(&reg->host_status);
@@ -1688,7 +1691,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
RD_REG_DWORD_RELAXED(&reg->hccr);
}
- spin_unlock(&ha->hardware_lock);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
(status & MBX_INTERRUPT) && ha->flags.mbox_int) {
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 6260505dceb5..010e69b29afe 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -945,7 +945,9 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
DEBUG2(qla_printk(KERN_INFO, ha, "NPIV[%02x]: wwpn=%llx "
"wwnn=%llx vf_id=0x%x Q_qos=0x%x F_qos=0x%x.\n", cnt,
- vid.port_name, vid.node_name, le16_to_cpu(entry->vf_id),
+ (unsigned long long)vid.port_name,
+ (unsigned long long)vid.node_name,
+ le16_to_cpu(entry->vf_id),
entry->q_qos, entry->f_qos));
if (i < QLA_PRECONFIG_VPORTS) {
@@ -954,7 +956,8 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
qla_printk(KERN_INFO, ha,
"NPIV-Config: Failed to create vport [%02x]: "
"wwpn=%llx wwnn=%llx.\n", cnt,
- vid.port_name, vid.node_name);
+ (unsigned long long)vid.port_name,
+ (unsigned long long)vid.node_name);
}
}
done:
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index b13481369642..8821df9a277b 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -225,6 +225,7 @@ static struct {
{"SGI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"IBM", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"SUN", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+ {"DELL", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36},
{"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN},
{"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index a152f89ae51c..3f64d93b6c8b 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -35,6 +35,7 @@
#include <linux/netlink.h>
#include <net/netlink.h>
#include <scsi/scsi_netlink_fc.h>
+#include <scsi/scsi_bsg_fc.h>
#include "scsi_priv.h"
#include "scsi_transport_fc_internal.h"
@@ -43,6 +44,10 @@ static void fc_vport_sched_delete(struct work_struct *work);
static int fc_vport_setup(struct Scsi_Host *shost, int channel,
struct device *pdev, struct fc_vport_identifiers *ids,
struct fc_vport **vport);
+static int fc_bsg_hostadd(struct Scsi_Host *, struct fc_host_attrs *);
+static int fc_bsg_rportadd(struct Scsi_Host *, struct fc_rport *);
+static void fc_bsg_remove(struct request_queue *);
+static void fc_bsg_goose_queue(struct fc_rport *);
/*
* Redefine so that we can have same named attributes in the
@@ -411,13 +416,26 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev,
return -ENOMEM;
}
+ fc_bsg_hostadd(shost, fc_host);
+ /* ignore any bsg add error - we just can't do sgio */
+
+ return 0;
+}
+
+static int fc_host_remove(struct transport_container *tc, struct device *dev,
+ struct device *cdev)
+{
+ struct Scsi_Host *shost = dev_to_shost(dev);
+ struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
+
+ fc_bsg_remove(fc_host->rqst_q);
return 0;
}
static DECLARE_TRANSPORT_CLASS(fc_host_class,
"fc_host",
fc_host_setup,
- NULL,
+ fc_host_remove,
NULL);
/*
@@ -2375,6 +2393,7 @@ fc_rport_final_delete(struct work_struct *work)
scsi_flush_work(shost);
fc_terminate_rport_io(rport);
+
/*
* Cancel any outstanding timers. These should really exist
* only when rmmod'ing the LLDD and we're asking for
@@ -2407,6 +2426,8 @@ fc_rport_final_delete(struct work_struct *work)
(i->f->dev_loss_tmo_callbk))
i->f->dev_loss_tmo_callbk(rport);
+ fc_bsg_remove(rport->rqst_q);
+
transport_remove_device(dev);
device_del(dev);
transport_destroy_device(dev);
@@ -2494,6 +2515,9 @@ fc_rport_create(struct Scsi_Host *shost, int channel,
transport_add_device(dev);
transport_configure_device(dev);
+ fc_bsg_rportadd(shost, rport);
+ /* ignore any bsg add error - we just can't do sgio */
+
if (rport->roles & FC_PORT_ROLE_FCP_TARGET) {
/* initiate a scan of the target */
rport->flags |= FC_RPORT_SCAN_PENDING;
@@ -2658,6 +2682,8 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
spin_unlock_irqrestore(shost->host_lock,
flags);
+ fc_bsg_goose_queue(rport);
+
return rport;
}
}
@@ -3343,6 +3369,592 @@ fc_vport_sched_delete(struct work_struct *work)
}
+/*
+ * BSG support
+ */
+
+
+/**
+ * fc_destroy_bsgjob - routine to teardown/delete a fc bsg job
+ * @job: fc_bsg_job that is to be torn down
+ */
+static void
+fc_destroy_bsgjob(struct fc_bsg_job *job)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&job->job_lock, flags);
+ if (job->ref_cnt) {
+ spin_unlock_irqrestore(&job->job_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&job->job_lock, flags);
+
+ put_device(job->dev); /* release reference for the request */
+
+ kfree(job->request_payload.sg_list);
+ kfree(job->reply_payload.sg_list);
+ kfree(job);
+}
+
+
+/**
+ * fc_bsg_jobdone - completion routine for bsg requests that the LLD has
+ * completed
+ * @job: fc_bsg_job that is complete
+ */
+static void
+fc_bsg_jobdone(struct fc_bsg_job *job)
+{
+ struct request *req = job->req;
+ struct request *rsp = req->next_rq;
+ unsigned long flags;
+ int err;
+
+ spin_lock_irqsave(&job->job_lock, flags);
+ job->state_flags |= FC_RQST_STATE_DONE;
+ job->ref_cnt--;
+ spin_unlock_irqrestore(&job->job_lock, flags);
+
+ err = job->req->errors = job->reply->result;
+ if (err < 0)
+ /* we're only returning the result field in the reply */
+ job->req->sense_len = sizeof(uint32_t);
+ else
+ job->req->sense_len = job->reply_len;
+
+ /* we assume all request payload was transferred, residual == 0 */
+ req->resid_len = 0;
+
+ if (rsp) {
+ WARN_ON(job->reply->reply_payload_rcv_len > rsp->resid_len);
+
+ /* set reply (bidi) residual */
+ rsp->resid_len -= min(job->reply->reply_payload_rcv_len,
+ rsp->resid_len);
+ }
+
+ blk_end_request_all(req, err);
+
+ fc_destroy_bsgjob(job);
+}
+
+
+/**
+ * fc_bsg_job_timeout - handler for when a bsg request timesout
+ * @req: request that timed out
+ */
+static enum blk_eh_timer_return
+fc_bsg_job_timeout(struct request *req)
+{
+ struct fc_bsg_job *job = (void *) req->special;
+ struct Scsi_Host *shost = job->shost;
+ struct fc_internal *i = to_fc_internal(shost->transportt);
+ unsigned long flags;
+ int err = 0, done = 0;
+
+ if (job->rport && job->rport->port_state == FC_PORTSTATE_BLOCKED)
+ return BLK_EH_RESET_TIMER;
+
+ spin_lock_irqsave(&job->job_lock, flags);
+ if (job->state_flags & FC_RQST_STATE_DONE)
+ done = 1;
+ else
+ job->ref_cnt++;
+ spin_unlock_irqrestore(&job->job_lock, flags);
+
+ if (!done && i->f->bsg_timeout) {
+ /* call LLDD to abort the i/o as it has timed out */
+ err = i->f->bsg_timeout(job);
+ if (err)
+ printk(KERN_ERR "ERROR: FC BSG request timeout - LLD "
+ "abort failed with status %d\n", err);
+ }
+
+ if (!done) {
+ spin_lock_irqsave(&job->job_lock, flags);
+ job->ref_cnt--;
+ spin_unlock_irqrestore(&job->job_lock, flags);
+ fc_destroy_bsgjob(job);
+ }
+
+ /* the blk_end_sync_io() doesn't check the error */
+ return BLK_EH_HANDLED;
+}
+
+
+
+static int
+fc_bsg_map_buffer(struct fc_bsg_buffer *buf, struct request *req)
+{
+ size_t sz = (sizeof(struct scatterlist) * req->nr_phys_segments);
+
+ BUG_ON(!req->nr_phys_segments);
+
+ buf->sg_list = kzalloc(sz, GFP_KERNEL);
+ if (!buf->sg_list)
+ return -ENOMEM;
+ sg_init_table(buf->sg_list, req->nr_phys_segments);
+ buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list);
+ buf->payload_len = blk_rq_bytes(req);
+ return 0;
+}
+
+
+/**
+ * fc_req_to_bsgjob - Allocate/create the fc_bsg_job structure for the
+ * bsg request
+ * @shost: SCSI Host corresponding to the bsg object
+ * @rport: (optional) FC Remote Port corresponding to the bsg object
+ * @req: BSG request that needs a job structure
+ */
+static int
+fc_req_to_bsgjob(struct Scsi_Host *shost, struct fc_rport *rport,
+ struct request *req)
+{
+ struct fc_internal *i = to_fc_internal(shost->transportt);
+ struct request *rsp = req->next_rq;
+ struct fc_bsg_job *job;
+ int ret;
+
+ BUG_ON(req->special);
+
+ job = kzalloc(sizeof(struct fc_bsg_job) + i->f->dd_bsg_size,
+ GFP_KERNEL);
+ if (!job)
+ return -ENOMEM;
+
+ /*
+ * Note: this is a bit silly.
+ * The request gets formatted as a SGIO v4 ioctl request, which
+ * then gets reformatted as a blk request, which then gets
+ * reformatted as a fc bsg request. And on completion, we have
+ * to wrap return results such that SGIO v4 thinks it was a scsi
+ * status. I hope this was all worth it.
+ */
+
+ req->special = job;
+ job->shost = shost;
+ job->rport = rport;
+ job->req = req;
+ if (i->f->dd_bsg_size)
+ job->dd_data = (void *)&job[1];
+ spin_lock_init(&job->job_lock);
+ job->request = (struct fc_bsg_request *)req->cmd;
+ job->request_len = req->cmd_len;
+ job->reply = req->sense;
+ job->reply_len = SCSI_SENSE_BUFFERSIZE; /* Size of sense buffer
+ * allocated */
+ if (req->bio) {
+ ret = fc_bsg_map_buffer(&job->request_payload, req);
+ if (ret)
+ goto failjob_rls_job;
+ }
+ if (rsp && rsp->bio) {
+ ret = fc_bsg_map_buffer(&job->reply_payload, rsp);
+ if (ret)
+ goto failjob_rls_rqst_payload;
+ }
+ job->job_done = fc_bsg_jobdone;
+ if (rport)
+ job->dev = &rport->dev;
+ else
+ job->dev = &shost->shost_gendev;
+ get_device(job->dev); /* take a reference for the request */
+
+ job->ref_cnt = 1;
+
+ return 0;
+
+
+failjob_rls_rqst_payload:
+ kfree(job->request_payload.sg_list);
+failjob_rls_job:
+ kfree(job);
+ return -ENOMEM;
+}
+
+
+enum fc_dispatch_result {
+ FC_DISPATCH_BREAK, /* on return, q is locked, break from q loop */
+ FC_DISPATCH_LOCKED, /* on return, q is locked, continue on */
+ FC_DISPATCH_UNLOCKED, /* on return, q is unlocked, continue on */
+};
+
+
+/**
+ * fc_bsg_host_dispatch - process fc host bsg requests and dispatch to LLDD
+ * @shost: scsi host rport attached to
+ * @job: bsg job to be processed
+ */
+static enum fc_dispatch_result
+fc_bsg_host_dispatch(struct request_queue *q, struct Scsi_Host *shost,
+ struct fc_bsg_job *job)
+{
+ struct fc_internal *i = to_fc_internal(shost->transportt);
+ int cmdlen = sizeof(uint32_t); /* start with length of msgcode */
+ int ret;
+
+ /* Validate the host command */
+ switch (job->request->msgcode) {
+ case FC_BSG_HST_ADD_RPORT:
+ cmdlen += sizeof(struct fc_bsg_host_add_rport);
+ break;
+
+ case FC_BSG_HST_DEL_RPORT:
+ cmdlen += sizeof(struct fc_bsg_host_del_rport);
+ break;
+
+ case FC_BSG_HST_ELS_NOLOGIN:
+ cmdlen += sizeof(struct fc_bsg_host_els);
+ /* there better be a xmt and rcv payloads */
+ if ((!job->request_payload.payload_len) ||
+ (!job->reply_payload.payload_len)) {
+ ret = -EINVAL;
+ goto fail_host_msg;
+ }
+ break;
+
+ case FC_BSG_HST_CT:
+ cmdlen += sizeof(struct fc_bsg_host_ct);
+ /* there better be xmt and rcv payloads */
+ if ((!job->request_payload.payload_len) ||
+ (!job->reply_payload.payload_len)) {
+ ret = -EINVAL;
+ goto fail_host_msg;
+ }
+ break;
+
+ case FC_BSG_HST_VENDOR:
+ cmdlen += sizeof(struct fc_bsg_host_vendor);
+ if ((shost->hostt->vendor_id == 0L) ||
+ (job->request->rqst_data.h_vendor.vendor_id !=
+ shost->hostt->vendor_id)) {
+ ret = -ESRCH;
+ goto fail_host_msg;
+ }
+ break;
+
+ default:
+ ret = -EBADR;
+ goto fail_host_msg;
+ }
+
+ /* check if we really have all the request data needed */
+ if (job->request_len < cmdlen) {
+ ret = -ENOMSG;
+ goto fail_host_msg;
+ }
+
+ ret = i->f->bsg_request(job);
+ if (!ret)
+ return FC_DISPATCH_UNLOCKED;
+
+fail_host_msg:
+ /* return the errno failure code as the only status */
+ BUG_ON(job->reply_len < sizeof(uint32_t));
+ job->reply->result = ret;
+ job->reply_len = sizeof(uint32_t);
+ fc_bsg_jobdone(job);
+ return FC_DISPATCH_UNLOCKED;
+}
+
+
+/*
+ * fc_bsg_goose_queue - restart rport queue in case it was stopped
+ * @rport: rport to be restarted
+ */
+static void
+fc_bsg_goose_queue(struct fc_rport *rport)
+{
+ int flagset;
+
+ if (!rport->rqst_q)
+ return;
+
+ get_device(&rport->dev);
+
+ spin_lock(rport->rqst_q->queue_lock);
+ flagset = test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags) &&
+ !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags);
+ if (flagset)
+ queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q);
+ __blk_run_queue(rport->rqst_q);
+ if (flagset)
+ queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q);
+ spin_unlock(rport->rqst_q->queue_lock);
+
+ put_device(&rport->dev);
+}
+
+
+/**
+ * fc_bsg_rport_dispatch - process rport bsg requests and dispatch to LLDD
+ * @shost: scsi host rport attached to
+ * @rport: rport request destined to
+ * @job: bsg job to be processed
+ */
+static enum fc_dispatch_result
+fc_bsg_rport_dispatch(struct request_queue *q, struct Scsi_Host *shost,
+ struct fc_rport *rport, struct fc_bsg_job *job)
+{
+ struct fc_internal *i = to_fc_internal(shost->transportt);
+ int cmdlen = sizeof(uint32_t); /* start with length of msgcode */
+ int ret;
+
+ /* Validate the rport command */
+ switch (job->request->msgcode) {
+ case FC_BSG_RPT_ELS:
+ cmdlen += sizeof(struct fc_bsg_rport_els);
+ goto check_bidi;
+
+ case FC_BSG_RPT_CT:
+ cmdlen += sizeof(struct fc_bsg_rport_ct);
+check_bidi:
+ /* there better be xmt and rcv payloads */
+ if ((!job->request_payload.payload_len) ||
+ (!job->reply_payload.payload_len)) {
+ ret = -EINVAL;
+ goto fail_rport_msg;
+ }
+ break;
+ default:
+ ret = -EBADR;
+ goto fail_rport_msg;
+ }
+
+ /* check if we really have all the request data needed */
+ if (job->request_len < cmdlen) {
+ ret = -ENOMSG;
+ goto fail_rport_msg;
+ }
+
+ ret = i->f->bsg_request(job);
+ if (!ret)
+ return FC_DISPATCH_UNLOCKED;
+
+fail_rport_msg:
+ /* return the errno failure code as the only status */
+ BUG_ON(job->reply_len < sizeof(uint32_t));
+ job->reply->result = ret;
+ job->reply_len = sizeof(uint32_t);
+ fc_bsg_jobdone(job);
+ return FC_DISPATCH_UNLOCKED;
+}
+
+
+/**
+ * fc_bsg_request_handler - generic handler for bsg requests
+ * @q: request queue to manage
+ * @shost: Scsi_Host related to the bsg object
+ * @rport: FC remote port related to the bsg object (optional)
+ * @dev: device structure for bsg object
+ */
+static void
+fc_bsg_request_handler(struct request_queue *q, struct Scsi_Host *shost,
+ struct fc_rport *rport, struct device *dev)
+{
+ struct request *req;
+ struct fc_bsg_job *job;
+ enum fc_dispatch_result ret;
+
+ if (!get_device(dev))
+ return;
+
+ while (!blk_queue_plugged(q)) {
+ if (rport && (rport->port_state == FC_PORTSTATE_BLOCKED))
+ break;
+
+ req = blk_fetch_request(q);
+ if (!req)
+ break;
+
+ if (rport && (rport->port_state != FC_PORTSTATE_ONLINE)) {
+ req->errors = -ENXIO;
+ spin_unlock_irq(q->queue_lock);
+ blk_end_request(req, -ENXIO, blk_rq_bytes(req));
+ spin_lock_irq(q->queue_lock);
+ continue;
+ }
+
+ spin_unlock_irq(q->queue_lock);
+
+ ret = fc_req_to_bsgjob(shost, rport, req);
+ if (ret) {
+ req->errors = ret;
+ blk_end_request(req, ret, blk_rq_bytes(req));
+ spin_lock_irq(q->queue_lock);
+ continue;
+ }
+
+ job = req->special;
+
+ /* check if we have the msgcode value at least */
+ if (job->request_len < sizeof(uint32_t)) {
+ BUG_ON(job->reply_len < sizeof(uint32_t));
+ job->reply->result = -ENOMSG;
+ job->reply_len = sizeof(uint32_t);
+ fc_bsg_jobdone(job);
+ spin_lock_irq(q->queue_lock);
+ continue;
+ }
+
+ /* the dispatch routines will unlock the queue_lock */
+ if (rport)
+ ret = fc_bsg_rport_dispatch(q, shost, rport, job);
+ else
+ ret = fc_bsg_host_dispatch(q, shost, job);
+
+ /* did dispatcher hit state that can't process any more */
+ if (ret == FC_DISPATCH_BREAK)
+ break;
+
+ /* did dispatcher had released the lock */
+ if (ret == FC_DISPATCH_UNLOCKED)
+ spin_lock_irq(q->queue_lock);
+ }
+
+ spin_unlock_irq(q->queue_lock);
+ put_device(dev);
+ spin_lock_irq(q->queue_lock);
+}
+
+
+/**
+ * fc_bsg_host_handler - handler for bsg requests for a fc host
+ * @q: fc host request queue
+ */
+static void
+fc_bsg_host_handler(struct request_queue *q)
+{
+ struct Scsi_Host *shost = q->queuedata;
+
+ fc_bsg_request_handler(q, shost, NULL, &shost->shost_gendev);
+}
+
+
+/**
+ * fc_bsg_rport_handler - handler for bsg requests for a fc rport
+ * @q: rport request queue
+ */
+static void
+fc_bsg_rport_handler(struct request_queue *q)
+{
+ struct fc_rport *rport = q->queuedata;
+ struct Scsi_Host *shost = rport_to_shost(rport);
+
+ fc_bsg_request_handler(q, shost, rport, &rport->dev);
+}
+
+
+/**
+ * fc_bsg_hostadd - Create and add the bsg hooks so we can receive requests
+ * @shost: shost for fc_host
+ * @fc_host: fc_host adding the structures to
+ */
+static int
+fc_bsg_hostadd(struct Scsi_Host *shost, struct fc_host_attrs *fc_host)
+{
+ struct device *dev = &shost->shost_gendev;
+ struct fc_internal *i = to_fc_internal(shost->transportt);
+ struct request_queue *q;
+ int err;
+ char bsg_name[BUS_ID_SIZE]; /*20*/
+
+ fc_host->rqst_q = NULL;
+
+ if (!i->f->bsg_request)
+ return -ENOTSUPP;
+
+ snprintf(bsg_name, sizeof(bsg_name),
+ "fc_host%d", shost->host_no);
+
+ q = __scsi_alloc_queue(shost, fc_bsg_host_handler);
+ if (!q) {
+ printk(KERN_ERR "fc_host%d: bsg interface failed to "
+ "initialize - no request queue\n",
+ shost->host_no);
+ return -ENOMEM;
+ }
+
+ q->queuedata = shost;
+ queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
+ blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
+ blk_queue_rq_timeout(q, FC_DEFAULT_BSG_TIMEOUT);
+
+ err = bsg_register_queue(q, dev, bsg_name, NULL);
+ if (err) {
+ printk(KERN_ERR "fc_host%d: bsg interface failed to "
+ "initialize - register queue\n",
+ shost->host_no);
+ blk_cleanup_queue(q);
+ return err;
+ }
+
+ fc_host->rqst_q = q;
+ return 0;
+}
+
+
+/**
+ * fc_bsg_rportadd - Create and add the bsg hooks so we can receive requests
+ * @shost: shost that rport is attached to
+ * @rport: rport that the bsg hooks are being attached to
+ */
+static int
+fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport)
+{
+ struct device *dev = &rport->dev;
+ struct fc_internal *i = to_fc_internal(shost->transportt);
+ struct request_queue *q;
+ int err;
+
+ rport->rqst_q = NULL;
+
+ if (!i->f->bsg_request)
+ return -ENOTSUPP;
+
+ q = __scsi_alloc_queue(shost, fc_bsg_rport_handler);
+ if (!q) {
+ printk(KERN_ERR "%s: bsg interface failed to "
+ "initialize - no request queue\n",
+ dev->kobj.name);
+ return -ENOMEM;
+ }
+
+ q->queuedata = rport;
+ queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
+ blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
+ blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
+
+ err = bsg_register_queue(q, dev, NULL, NULL);
+ if (err) {
+ printk(KERN_ERR "%s: bsg interface failed to "
+ "initialize - register queue\n",
+ dev->kobj.name);
+ blk_cleanup_queue(q);
+ return err;
+ }
+
+ rport->rqst_q = q;
+ return 0;
+}
+
+
+/**
+ * fc_bsg_remove - Deletes the bsg hooks on fchosts/rports
+ * @q: the request_queue that is to be torn down.
+ */
+static void
+fc_bsg_remove(struct request_queue *q)
+{
+ if (q) {
+ bsg_unregister_queue(q);
+ blk_cleanup_queue(q);
+ }
+}
+
+
/* Original Author: Martin Hicks */
MODULE_AUTHOR("James Smart");
MODULE_DESCRIPTION("FC Transport Attributes");
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index f49f55c6bfc8..654a34fb04cb 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -234,8 +234,10 @@ static int spi_setup_transport_attrs(struct transport_container *tc,
spi_width(starget) = 0; /* narrow */
spi_max_width(starget) = 1;
spi_iu(starget) = 0; /* no IU */
+ spi_max_iu(starget) = 1;
spi_dt(starget) = 0; /* ST */
spi_qas(starget) = 0;
+ spi_max_qas(starget) = 1;
spi_wr_flow(starget) = 0;
spi_rd_strm(starget) = 0;
spi_rti(starget) = 0;
@@ -360,9 +362,9 @@ static DEVICE_ATTR(field, S_IRUGO, \
/* The Parallel SCSI Tranport Attributes: */
spi_transport_max_attr(offset, "%d\n");
spi_transport_max_attr(width, "%d\n");
-spi_transport_rd_attr(iu, "%d\n");
+spi_transport_max_attr(iu, "%d\n");
spi_transport_rd_attr(dt, "%d\n");
-spi_transport_rd_attr(qas, "%d\n");
+spi_transport_max_attr(qas, "%d\n");
spi_transport_rd_attr(wr_flow, "%d\n");
spi_transport_rd_attr(rd_strm, "%d\n");
spi_transport_rd_attr(rti, "%d\n");
@@ -874,13 +876,13 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer)
/* try QAS requests; this should be harmless to set if the
* target supports it */
- if (scsi_device_qas(sdev)) {
+ if (scsi_device_qas(sdev) && spi_max_qas(starget)) {
DV_SET(qas, 1);
} else {
DV_SET(qas, 0);
}
- if (scsi_device_ius(sdev) && min_period < 9) {
+ if (scsi_device_ius(sdev) && spi_max_iu(starget) && min_period < 9) {
/* This u320 (or u640). Set IU transfers */
DV_SET(iu, 1);
/* Then set the optional parameters */
@@ -1412,12 +1414,18 @@ static mode_t target_attribute_is_visible(struct kobject *kobj,
else if (attr == &dev_attr_iu.attr &&
spi_support_ius(starget))
return TARGET_ATTRIBUTE_HELPER(iu);
+ else if (attr == &dev_attr_max_iu.attr &&
+ spi_support_ius(starget))
+ return TARGET_ATTRIBUTE_HELPER(iu);
else if (attr == &dev_attr_dt.attr &&
spi_support_dt(starget))
return TARGET_ATTRIBUTE_HELPER(dt);
else if (attr == &dev_attr_qas.attr &&
spi_support_qas(starget))
return TARGET_ATTRIBUTE_HELPER(qas);
+ else if (attr == &dev_attr_max_qas.attr &&
+ spi_support_qas(starget))
+ return TARGET_ATTRIBUTE_HELPER(qas);
else if (attr == &dev_attr_wr_flow.attr &&
spi_support_ius(starget))
return TARGET_ATTRIBUTE_HELPER(wr_flow);
@@ -1447,8 +1455,10 @@ static struct attribute *target_attributes[] = {
&dev_attr_width.attr,
&dev_attr_max_width.attr,
&dev_attr_iu.attr,
+ &dev_attr_max_iu.attr,
&dev_attr_dt.attr,
&dev_attr_qas.attr,
+ &dev_attr_max_qas.attr,
&dev_attr_wr_flow.attr,
&dev_attr_rd_strm.attr,
&dev_attr_rti.attr,
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c
index a4cf1079b312..66f52674ca0c 100644
--- a/drivers/serial/sh-sci.c
+++ b/drivers/serial/sh-sci.c
@@ -1332,44 +1332,46 @@ err_unreg:
return ret;
}
-static int sci_suspend(struct platform_device *dev, pm_message_t state)
+static int sci_suspend(struct device *dev)
{
- struct sh_sci_priv *priv = platform_get_drvdata(dev);
+ struct sh_sci_priv *priv = dev_get_drvdata(dev);
struct sci_port *p;
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
list_for_each_entry(p, &priv->ports, node)
uart_suspend_port(&sci_uart_driver, &p->port);
-
spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
-static int sci_resume(struct platform_device *dev)
+static int sci_resume(struct device *dev)
{
- struct sh_sci_priv *priv = platform_get_drvdata(dev);
+ struct sh_sci_priv *priv = dev_get_drvdata(dev);
struct sci_port *p;
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
list_for_each_entry(p, &priv->ports, node)
uart_resume_port(&sci_uart_driver, &p->port);
-
spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
+static struct dev_pm_ops sci_dev_pm_ops = {
+ .suspend = sci_suspend,
+ .resume = sci_resume,
+};
+
static struct platform_driver sci_driver = {
.probe = sci_probe,
.remove = __devexit_p(sci_remove),
- .suspend = sci_suspend,
- .resume = sci_resume,
.driver = {
.name = "sh-sci",
.owner = THIS_MODULE,
+ .pm = &sci_dev_pm_ops,
},
};
diff --git a/drivers/sh/intc.c b/drivers/sh/intc.c
index d687a9b93d03..3dd231a643b5 100644
--- a/drivers/sh/intc.c
+++ b/drivers/sh/intc.c
@@ -20,7 +20,6 @@
#include <linux/module.h>
#include <linux/io.h>
#include <linux/interrupt.h>
-#include <linux/bootmem.h>
#include <linux/sh_intc.h>
#include <linux/sysdev.h>
#include <linux/list.h>
@@ -675,7 +674,7 @@ void __init register_intc_controller(struct intc_desc *desc)
unsigned int i, k, smp;
struct intc_desc_int *d;
- d = alloc_bootmem(sizeof(*d));
+ d = kzalloc(sizeof(*d), GFP_NOWAIT);
INIT_LIST_HEAD(&d->list);
list_add(&d->list, &intc_list);
@@ -687,9 +686,9 @@ void __init register_intc_controller(struct intc_desc *desc)
#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A)
d->nr_reg += desc->ack_regs ? desc->nr_ack_regs : 0;
#endif
- d->reg = alloc_bootmem(d->nr_reg * sizeof(*d->reg));
+ d->reg = kzalloc(d->nr_reg * sizeof(*d->reg), GFP_NOWAIT);
#ifdef CONFIG_SMP
- d->smp = alloc_bootmem(d->nr_reg * sizeof(*d->smp));
+ d->smp = kzalloc(d->nr_reg * sizeof(*d->smp), GFP_NOWAIT);
#endif
k = 0;
@@ -702,7 +701,7 @@ void __init register_intc_controller(struct intc_desc *desc)
}
if (desc->prio_regs) {
- d->prio = alloc_bootmem(desc->nr_vectors * sizeof(*d->prio));
+ d->prio = kzalloc(desc->nr_vectors * sizeof(*d->prio), GFP_NOWAIT);
for (i = 0; i < desc->nr_prio_regs; i++) {
smp = IS_SMP(desc->prio_regs[i]);
@@ -712,7 +711,7 @@ void __init register_intc_controller(struct intc_desc *desc)
}
if (desc->sense_regs) {
- d->sense = alloc_bootmem(desc->nr_vectors * sizeof(*d->sense));
+ d->sense = kzalloc(desc->nr_vectors * sizeof(*d->sense), GFP_NOWAIT);
for (i = 0; i < desc->nr_sense_regs; i++) {
k += save_reg(d, k, desc->sense_regs[i].reg, 0);
@@ -757,7 +756,7 @@ void __init register_intc_controller(struct intc_desc *desc)
vect2->enum_id = 0;
if (!intc_evt2irq_table)
- intc_evt2irq_table = alloc_bootmem(NR_IRQS);
+ intc_evt2irq_table = kzalloc(NR_IRQS, GFP_NOWAIT);
if (!intc_evt2irq_table) {
pr_warning("intc: cannot allocate evt2irq!\n");
diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/atmel_spi.c
index 12e443cc4ac9..f5b3fdbb1e27 100644
--- a/drivers/spi/atmel_spi.c
+++ b/drivers/spi/atmel_spi.c
@@ -530,9 +530,6 @@ atmel_spi_interrupt(int irq, void *dev_id)
return ret;
}
-/* the spi->mode bits understood by this driver: */
-#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
-
static int atmel_spi_setup(struct spi_device *spi)
{
struct atmel_spi *as;
@@ -555,8 +552,6 @@ static int atmel_spi_setup(struct spi_device *spi)
return -EINVAL;
}
- if (bits == 0)
- bits = 8;
if (bits < 8 || bits > 16) {
dev_dbg(&spi->dev,
"setup: invalid bits_per_word %u (8 to 16)\n",
@@ -564,12 +559,6 @@ static int atmel_spi_setup(struct spi_device *spi)
return -EINVAL;
}
- if (spi->mode & ~MODEBITS) {
- dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
- spi->mode & ~MODEBITS);
- return -EINVAL;
- }
-
/* see notes above re chipselect */
if (!atmel_spi_is_v2()
&& spi->chip_select == 0
@@ -775,6 +764,9 @@ static int __init atmel_spi_probe(struct platform_device *pdev)
if (!master)
goto out_free;
+ /* the spi->mode bits understood by this driver: */
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+
master->bus_num = pdev->id;
master->num_chipselect = 4;
master->setup = atmel_spi_setup;
diff --git a/drivers/spi/au1550_spi.c b/drivers/spi/au1550_spi.c
index b02f25c702fd..76cbc1a66598 100644
--- a/drivers/spi/au1550_spi.c
+++ b/drivers/spi/au1550_spi.c
@@ -284,27 +284,16 @@ static int au1550_spi_setupxfer(struct spi_device *spi, struct spi_transfer *t)
return 0;
}
-/* the spi->mode bits understood by this driver: */
-#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST)
-
static int au1550_spi_setup(struct spi_device *spi)
{
struct au1550_spi *hw = spi_master_get_devdata(spi->master);
- if (spi->bits_per_word == 0)
- spi->bits_per_word = 8;
if (spi->bits_per_word < 4 || spi->bits_per_word > 24) {
dev_err(&spi->dev, "setup: invalid bits_per_word=%d\n",
spi->bits_per_word);
return -EINVAL;
}
- if (spi->mode & ~MODEBITS) {
- dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
- spi->mode & ~MODEBITS);
- return -EINVAL;
- }
-
if (spi->max_speed_hz == 0)
spi->max_speed_hz = hw->freq_max;
if (spi->max_speed_hz > hw->freq_max
@@ -781,6 +770,9 @@ static int __init au1550_spi_probe(struct platform_device *pdev)
goto err_nomem;
}
+ /* the spi->mode bits understood by this driver: */
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST;
+
hw = spi_master_get_devdata(master);
hw->master = spi_master_get(master);
diff --git a/drivers/spi/mpc52xx_psc_spi.c b/drivers/spi/mpc52xx_psc_spi.c
index 68c77a911595..1b74d5ca03f3 100644
--- a/drivers/spi/mpc52xx_psc_spi.c
+++ b/drivers/spi/mpc52xx_psc_spi.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/types.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/of_platform.h>
@@ -30,8 +31,7 @@
struct mpc52xx_psc_spi {
/* fsl_spi_platform data */
- void (*activate_cs)(u8, u8);
- void (*deactivate_cs)(u8, u8);
+ void (*cs_control)(struct spi_device *spi, bool on);
u32 sysclk;
/* driver internal data */
@@ -111,18 +111,16 @@ static void mpc52xx_psc_spi_activate_cs(struct spi_device *spi)
out_be16((u16 __iomem *)&psc->ccr, ccr);
mps->bits_per_word = cs->bits_per_word;
- if (mps->activate_cs)
- mps->activate_cs(spi->chip_select,
- (spi->mode & SPI_CS_HIGH) ? 1 : 0);
+ if (mps->cs_control)
+ mps->cs_control(spi, (spi->mode & SPI_CS_HIGH) ? 1 : 0);
}
static void mpc52xx_psc_spi_deactivate_cs(struct spi_device *spi)
{
struct mpc52xx_psc_spi *mps = spi_master_get_devdata(spi->master);
- if (mps->deactivate_cs)
- mps->deactivate_cs(spi->chip_select,
- (spi->mode & SPI_CS_HIGH) ? 1 : 0);
+ if (mps->cs_control)
+ mps->cs_control(spi, (spi->mode & SPI_CS_HIGH) ? 0 : 1);
}
#define MPC52xx_PSC_BUFSIZE (MPC52xx_PSC_RFNUM_MASK + 1)
@@ -261,9 +259,6 @@ static void mpc52xx_psc_spi_work(struct work_struct *work)
spin_unlock_irq(&mps->lock);
}
-/* the spi->mode bits understood by this driver: */
-#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST)
-
static int mpc52xx_psc_spi_setup(struct spi_device *spi)
{
struct mpc52xx_psc_spi *mps = spi_master_get_devdata(spi->master);
@@ -273,12 +268,6 @@ static int mpc52xx_psc_spi_setup(struct spi_device *spi)
if (spi->bits_per_word%8)
return -EINVAL;
- if (spi->mode & ~MODEBITS) {
- dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
- spi->mode & ~MODEBITS);
- return -EINVAL;
- }
-
if (!cs) {
cs = kzalloc(sizeof *cs, GFP_KERNEL);
if (!cs)
@@ -385,18 +374,19 @@ static int __init mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr,
dev_set_drvdata(dev, master);
mps = spi_master_get_devdata(master);
+ /* the spi->mode bits understood by this driver: */
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST;
+
mps->irq = irq;
if (pdata == NULL) {
dev_warn(dev, "probe called without platform data, no "
- "(de)activate_cs function will be called\n");
- mps->activate_cs = NULL;
- mps->deactivate_cs = NULL;
+ "cs_control function will be called\n");
+ mps->cs_control = NULL;
mps->sysclk = 0;
master->bus_num = bus_num;
master->num_chipselect = 255;
} else {
- mps->activate_cs = pdata->activate_cs;
- mps->deactivate_cs = pdata->deactivate_cs;
+ mps->cs_control = pdata->cs_control;
mps->sysclk = pdata->sysclk;
master->bus_num = pdata->bus_num;
master->num_chipselect = pdata->max_chipselect;
diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/omap2_mcspi.c
index d6d0c5d241ce..eee4b6e0af2c 100644
--- a/drivers/spi/omap2_mcspi.c
+++ b/drivers/spi/omap2_mcspi.c
@@ -603,9 +603,6 @@ static int omap2_mcspi_request_dma(struct spi_device *spi)
return 0;
}
-/* the spi->mode bits understood by this driver: */
-#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
-
static int omap2_mcspi_setup(struct spi_device *spi)
{
int ret;
@@ -613,15 +610,7 @@ static int omap2_mcspi_setup(struct spi_device *spi)
struct omap2_mcspi_dma *mcspi_dma;
struct omap2_mcspi_cs *cs = spi->controller_state;
- if (spi->mode & ~MODEBITS) {
- dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
- spi->mode & ~MODEBITS);
- return -EINVAL;
- }
-
- if (spi->bits_per_word == 0)
- spi->bits_per_word = 8;
- else if (spi->bits_per_word < 4 || spi->bits_per_word > 32) {
+ if (spi->bits_per_word < 4 || spi->bits_per_word > 32) {
dev_dbg(&spi->dev, "setup: unsupported %d bit words\n",
spi->bits_per_word);
return -EINVAL;
@@ -984,6 +973,9 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ /* the spi->mode bits understood by this driver: */
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+
if (pdev->id != -1)
master->bus_num = pdev->id;
diff --git a/drivers/spi/omap_uwire.c b/drivers/spi/omap_uwire.c
index fe8b9ac0ccef..aa90ddb37066 100644
--- a/drivers/spi/omap_uwire.c
+++ b/drivers/spi/omap_uwire.c
@@ -339,8 +339,6 @@ static int uwire_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
bits = spi->bits_per_word;
if (t != NULL && t->bits_per_word)
bits = t->bits_per_word;
- if (!bits)
- bits = 8;
if (bits > 16) {
pr_debug("%s: wordsize %d?\n", dev_name(&spi->dev), bits);
@@ -449,19 +447,10 @@ done:
return status;
}
-/* the spi->mode bits understood by this driver: */
-#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
-
static int uwire_setup(struct spi_device *spi)
{
struct uwire_state *ust = spi->controller_state;
- if (spi->mode & ~MODEBITS) {
- dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
- spi->mode & ~MODEBITS);
- return -EINVAL;
- }
-
if (ust == NULL) {
ust = kzalloc(sizeof(*ust), GFP_KERNEL);
if (ust == NULL)
@@ -522,6 +511,9 @@ static int __init uwire_probe(struct platform_device *pdev)
uwire_write_reg(UWIRE_SR3, 1);
+ /* the spi->mode bits understood by this driver: */
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+
master->bus_num = 2; /* "official" */
master->num_chipselect = 4;
master->setup = uwire_setup;
diff --git a/drivers/spi/orion_spi.c b/drivers/spi/orion_spi.c
index c8b0babdc2a6..3aea50da7b29 100644
--- a/drivers/spi/orion_spi.c
+++ b/drivers/spi/orion_spi.c
@@ -358,20 +358,11 @@ static int orion_spi_setup(struct spi_device *spi)
orion_spi = spi_master_get_devdata(spi->master);
- if (spi->mode) {
- dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
- spi->mode);
- return -EINVAL;
- }
-
/* Fix ac timing if required. */
if (orion_spi->spi_info->enable_clock_fix)
orion_spi_setbits(orion_spi, ORION_SPI_IF_CONFIG_REG,
(1 << 14));
- if (spi->bits_per_word == 0)
- spi->bits_per_word = 8;
-
if ((spi->max_speed_hz == 0)
|| (spi->max_speed_hz > orion_spi->max_speed))
spi->max_speed_hz = orion_spi->max_speed;
@@ -476,6 +467,9 @@ static int __init orion_spi_probe(struct platform_device *pdev)
if (pdev->id != -1)
master->bus_num = pdev->id;
+ /* we support only mode 0, and no options */
+ master->mode_bits = 0;
+
master->setup = orion_spi_setup;
master->transfer = orion_spi_transfer;
master->num_chipselect = ORION_NUM_CHIPSELECTS;
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c
index 3f3c08c6ba4e..d949dbf1141f 100644
--- a/drivers/spi/pxa2xx_spi.c
+++ b/drivers/spi/pxa2xx_spi.c
@@ -1185,9 +1185,6 @@ static int transfer(struct spi_device *spi, struct spi_message *msg)
return 0;
}
-/* the spi->mode bits understood by this driver: */
-#define MODEBITS (SPI_CPOL | SPI_CPHA)
-
static int setup_cs(struct spi_device *spi, struct chip_data *chip,
struct pxa2xx_spi_chip *chip_info)
{
@@ -1236,9 +1233,6 @@ static int setup(struct spi_device *spi)
uint tx_thres = TX_THRESH_DFLT;
uint rx_thres = RX_THRESH_DFLT;
- if (!spi->bits_per_word)
- spi->bits_per_word = 8;
-
if (drv_data->ssp_type != PXA25x_SSP
&& (spi->bits_per_word < 4 || spi->bits_per_word > 32)) {
dev_err(&spi->dev, "failed setup: ssp_type=%d, bits/wrd=%d "
@@ -1255,12 +1249,6 @@ static int setup(struct spi_device *spi)
return -EINVAL;
}
- if (spi->mode & ~MODEBITS) {
- dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
- spi->mode & ~MODEBITS);
- return -EINVAL;
- }
-
/* Only alloc on first setup */
chip = spi_get_ctldata(spi);
if (!chip) {
@@ -1328,18 +1316,14 @@ static int setup(struct spi_device *spi)
/* NOTE: PXA25x_SSP _could_ use external clocking ... */
if (drv_data->ssp_type != PXA25x_SSP)
- dev_dbg(&spi->dev, "%d bits/word, %ld Hz, mode %d, %s\n",
- spi->bits_per_word,
+ dev_dbg(&spi->dev, "%ld Hz actual, %s\n",
clk_get_rate(ssp->clk)
/ (1 + ((chip->cr0 & SSCR0_SCR) >> 8)),
- spi->mode & 0x3,
chip->enable_dma ? "DMA" : "PIO");
else
- dev_dbg(&spi->dev, "%d bits/word, %ld Hz, mode %d, %s\n",
- spi->bits_per_word,
+ dev_dbg(&spi->dev, "%ld Hz actual, %s\n",
clk_get_rate(ssp->clk) / 2
/ (1 + ((chip->cr0 & SSCR0_SCR) >> 8)),
- spi->mode & 0x3,
chip->enable_dma ? "DMA" : "PIO");
if (spi->bits_per_word <= 8) {
@@ -1500,6 +1484,9 @@ static int __init pxa2xx_spi_probe(struct platform_device *pdev)
drv_data->pdev = pdev;
drv_data->ssp = ssp;
+ /* the spi->mode bits understood by this driver: */
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+
master->bus_num = pdev->id;
master->num_chipselect = platform_info->num_chipselect;
master->dma_alignment = DMA_ALIGNMENT;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 8eba98c8ed1e..70845ccd85c3 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -265,7 +265,7 @@ int spi_add_device(struct spi_device *spi)
* normally rely on the device being setup. Devices
* using SPI_CS_HIGH can't coexist well otherwise...
*/
- status = spi->master->setup(spi);
+ status = spi_setup(spi);
if (status < 0) {
dev_err(dev, "can't %s %s, status %d\n",
"setup", dev_name(&spi->dev), status);
@@ -583,6 +583,70 @@ EXPORT_SYMBOL_GPL(spi_busnum_to_master);
/*-------------------------------------------------------------------------*/
+/* Core methods for SPI master protocol drivers. Some of the
+ * other core methods are currently defined as inline functions.
+ */
+
+/**
+ * spi_setup - setup SPI mode and clock rate
+ * @spi: the device whose settings are being modified
+ * Context: can sleep, and no requests are queued to the device
+ *
+ * SPI protocol drivers may need to update the transfer mode if the
+ * device doesn't work with its default. They may likewise need
+ * to update clock rates or word sizes from initial values. This function
+ * changes those settings, and must be called from a context that can sleep.
+ * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
+ * effect the next time the device is selected and data is transferred to
+ * or from it. When this function returns, the spi device is deselected.
+ *
+ * Note that this call will fail if the protocol driver specifies an option
+ * that the underlying controller or its driver does not support. For
+ * example, not all hardware supports wire transfers using nine bit words,
+ * LSB-first wire encoding, or active-high chipselects.
+ */
+int spi_setup(struct spi_device *spi)
+{
+ unsigned bad_bits;
+ int status;
+
+ /* help drivers fail *cleanly* when they need options
+ * that aren't supported with their current master
+ */
+ bad_bits = spi->mode & ~spi->master->mode_bits;
+ if (bad_bits) {
+ dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
+ bad_bits);
+ return -EINVAL;
+ }
+
+ if (!spi->bits_per_word)
+ spi->bits_per_word = 8;
+
+ status = spi->master->setup(spi);
+
+ dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s"
+ "%u bits/w, %u Hz max --> %d\n",
+ (int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
+ (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
+ (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
+ (spi->mode & SPI_3WIRE) ? "3wire, " : "",
+ (spi->mode & SPI_LOOP) ? "loopback, " : "",
+ spi->bits_per_word, spi->max_speed_hz,
+ status);
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(spi_setup);
+
+
+/*-------------------------------------------------------------------------*/
+
+/* Utility methods for SPI master protocol drivers, layered on
+ * top of the core. Some other utility methods are defined as
+ * inline functions.
+ */
+
static void spi_complete(void *arg)
{
complete(arg);
@@ -636,8 +700,8 @@ static u8 *buf;
* @spi: device with which data will be exchanged
* @txbuf: data to be written (need not be dma-safe)
* @n_tx: size of txbuf, in bytes
- * @rxbuf: buffer into which data will be read
- * @n_rx: size of rxbuf, in bytes (need not be dma-safe)
+ * @rxbuf: buffer into which data will be read (need not be dma-safe)
+ * @n_rx: size of rxbuf, in bytes
* Context: can sleep
*
* This performs a half duplex MicroWire style transaction with the
diff --git a/drivers/spi/spi_bfin5xx.c b/drivers/spi/spi_bfin5xx.c
index 011c5bddba6a..73e24ef5a2f9 100644
--- a/drivers/spi/spi_bfin5xx.c
+++ b/drivers/spi/spi_bfin5xx.c
@@ -169,7 +169,7 @@ static int bfin_spi_flush(struct driver_data *drv_data)
unsigned long limit = loops_per_jiffy << 1;
/* wait for stop and clear stat */
- while (!(read_STAT(drv_data) & BIT_STAT_SPIF) && limit--)
+ while (!(read_STAT(drv_data) & BIT_STAT_SPIF) && --limit)
cpu_relax();
write_STAT(drv_data, BIT_STAT_CLR);
@@ -1010,16 +1010,6 @@ static int bfin_spi_setup(struct spi_device *spi)
struct driver_data *drv_data = spi_master_get_devdata(spi->master);
int ret;
- /* Abort device setup if requested features are not supported */
- if (spi->mode & ~(SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST)) {
- dev_err(&spi->dev, "requested mode not fully supported\n");
- return -EINVAL;
- }
-
- /* Zero (the default) here means 8 bits */
- if (!spi->bits_per_word)
- spi->bits_per_word = 8;
-
if (spi->bits_per_word != 8 && spi->bits_per_word != 16)
return -EINVAL;
@@ -1287,6 +1277,9 @@ static int __init bfin_spi_probe(struct platform_device *pdev)
drv_data->pdev = pdev;
drv_data->pin_req = platform_info->pin_req;
+ /* the spi->mode bits supported by this driver: */
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
+
master->bus_num = pdev->id;
master->num_chipselect = platform_info->num_chipselect;
master->cleanup = bfin_spi_cleanup;
diff --git a/drivers/spi/spi_bitbang.c b/drivers/spi/spi_bitbang.c
index 85e61f451218..2a5abc08e857 100644
--- a/drivers/spi/spi_bitbang.c
+++ b/drivers/spi/spi_bitbang.c
@@ -188,12 +188,6 @@ int spi_bitbang_setup(struct spi_device *spi)
bitbang = spi_master_get_devdata(spi->master);
- /* Bitbangers can support SPI_CS_HIGH, SPI_3WIRE, and so on;
- * add those to master->flags, and provide the other support.
- */
- if ((spi->mode & ~(SPI_CPOL|SPI_CPHA|bitbang->flags)) != 0)
- return -EINVAL;
-
if (!cs) {
cs = kzalloc(sizeof *cs, GFP_KERNEL);
if (!cs)
@@ -201,9 +195,6 @@ int spi_bitbang_setup(struct spi_device *spi)
spi->controller_state = cs;
}
- if (!spi->bits_per_word)
- spi->bits_per_word = 8;
-
/* per-word shift register access, in hardware or bitbanging */
cs->txrx_word = bitbang->txrx_word[spi->mode & (SPI_CPOL|SPI_CPHA)];
if (!cs->txrx_word)
@@ -213,9 +204,7 @@ int spi_bitbang_setup(struct spi_device *spi)
if (retval < 0)
return retval;
- dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u nsec/bit\n",
- __func__, spi->mode & (SPI_CPOL | SPI_CPHA),
- spi->bits_per_word, 2 * cs->nsecs);
+ dev_dbg(&spi->dev, "%s, %u nsec/bit\n", __func__, 2 * cs->nsecs);
/* NOTE we _need_ to call chipselect() early, ideally with adapter
* setup, unless the hardware defaults cooperate to avoid confusion
@@ -457,6 +446,9 @@ int spi_bitbang_start(struct spi_bitbang *bitbang)
spin_lock_init(&bitbang->lock);
INIT_LIST_HEAD(&bitbang->queue);
+ if (!bitbang->master->mode_bits)
+ bitbang->master->mode_bits = SPI_CPOL | SPI_CPHA | bitbang->flags;
+
if (!bitbang->master->transfer)
bitbang->master->transfer = spi_bitbang_transfer;
if (!bitbang->txrx_bufs) {
diff --git a/drivers/spi/spi_imx.c b/drivers/spi/spi_imx.c
index 0671aeef5792..c195e45f7f35 100644
--- a/drivers/spi/spi_imx.c
+++ b/drivers/spi/spi_imx.c
@@ -1171,9 +1171,6 @@ msg_rejected:
return -EINVAL;
}
-/* the spi->mode bits understood by this driver: */
-#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
-
/* On first setup bad values must free chip_data memory since will cause
spi_new_device to fail. Bad value setup from protocol driver are simply not
applied and notified to the calling driver. */
@@ -1186,12 +1183,6 @@ static int setup(struct spi_device *spi)
u32 tmp;
int status = 0;
- if (spi->mode & ~MODEBITS) {
- dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
- spi->mode & ~MODEBITS);
- return -EINVAL;
- }
-
/* Get controller data */
chip_info = spi->controller_data;
@@ -1286,10 +1277,7 @@ static int setup(struct spi_device *spi)
/* SPI word width */
tmp = spi->bits_per_word;
- if (tmp == 0) {
- tmp = 8;
- spi->bits_per_word = 8;
- } else if (tmp > 16) {
+ if (tmp > 16) {
status = -EINVAL;
dev_err(&spi->dev,
"setup - "
@@ -1481,6 +1469,9 @@ static int __init spi_imx_probe(struct platform_device *pdev)
drv_data->master_info = platform_info;
drv_data->pdev = pdev;
+ /* the spi->mode bits understood by this driver: */
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+
master->bus_num = pdev->id;
master->num_chipselect = platform_info->num_chipselect;
master->dma_alignment = DMA_ALIGNMENT;
diff --git a/drivers/spi/spi_mpc83xx.c b/drivers/spi/spi_mpc83xx.c
index a32ccb44065e..ce61be98e06d 100644
--- a/drivers/spi/spi_mpc83xx.c
+++ b/drivers/spi/spi_mpc83xx.c
@@ -419,10 +419,6 @@ static void mpc83xx_spi_work(struct work_struct *work)
spin_unlock_irq(&mpc83xx_spi->lock);
}
-/* the spi->mode bits understood by this driver: */
-#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
- | SPI_LSB_FIRST | SPI_LOOP)
-
static int mpc83xx_spi_setup(struct spi_device *spi)
{
struct mpc83xx_spi *mpc83xx_spi;
@@ -430,12 +426,6 @@ static int mpc83xx_spi_setup(struct spi_device *spi)
u32 hw_mode;
struct spi_mpc83xx_cs *cs = spi->controller_state;
- if (spi->mode & ~MODEBITS) {
- dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
- spi->mode & ~MODEBITS);
- return -EINVAL;
- }
-
if (!spi->max_speed_hz)
return -EINVAL;
@@ -447,9 +437,6 @@ static int mpc83xx_spi_setup(struct spi_device *spi)
}
mpc83xx_spi = spi_master_get_devdata(spi->master);
- if (!spi->bits_per_word)
- spi->bits_per_word = 8;
-
hw_mode = cs->hw_mode; /* Save orginal settings */
cs->hw_mode = mpc83xx_spi_read_reg(&mpc83xx_spi->base->mode);
/* mask out bits we are going to set */
@@ -471,9 +458,6 @@ static int mpc83xx_spi_setup(struct spi_device *spi)
return retval;
}
- dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u Hz\n",
- __func__, spi->mode & (SPI_CPOL | SPI_CPHA),
- spi->bits_per_word, spi->max_speed_hz);
#if 0 /* Don't think this is needed */
/* NOTE we _need_ to call chipselect() early, ideally with adapter
* setup, unless the hardware defaults cooperate to avoid confusion
@@ -568,6 +552,10 @@ mpc83xx_spi_probe(struct device *dev, struct resource *mem, unsigned int irq)
dev_set_drvdata(dev, master);
+ /* the spi->mode bits understood by this driver: */
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH
+ | SPI_LSB_FIRST | SPI_LOOP;
+
master->setup = mpc83xx_spi_setup;
master->transfer = mpc83xx_spi_transfer;
master->cleanup = mpc83xx_spi_cleanup;
diff --git a/drivers/spi/spi_s3c24xx.c b/drivers/spi/spi_s3c24xx.c
index b3ebc1d0f85f..e0d44af4745a 100644
--- a/drivers/spi/spi_s3c24xx.c
+++ b/drivers/spi/spi_s3c24xx.c
@@ -146,32 +146,16 @@ static int s3c24xx_spi_setupxfer(struct spi_device *spi,
return 0;
}
-/* the spi->mode bits understood by this driver: */
-#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
-
static int s3c24xx_spi_setup(struct spi_device *spi)
{
int ret;
- if (!spi->bits_per_word)
- spi->bits_per_word = 8;
-
- if (spi->mode & ~MODEBITS) {
- dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
- spi->mode & ~MODEBITS);
- return -EINVAL;
- }
-
ret = s3c24xx_spi_setupxfer(spi, NULL);
if (ret < 0) {
dev_err(&spi->dev, "setupxfer returned %d\n", ret);
return ret;
}
- dev_dbg(&spi->dev, "%s: mode %d, %u bpw, %d hz\n",
- __func__, spi->mode, spi->bits_per_word,
- spi->max_speed_hz);
-
return 0;
}
@@ -290,6 +274,9 @@ static int __init s3c24xx_spi_probe(struct platform_device *pdev)
/* setup the master state. */
+ /* the spi->mode bits understood by this driver: */
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+
master->num_chipselect = hw->pdata->num_cs;
master->bus_num = pdata->bus_num;
diff --git a/drivers/spi/spi_txx9.c b/drivers/spi/spi_txx9.c
index 29cbb065618a..96057de133ad 100644
--- a/drivers/spi/spi_txx9.c
+++ b/drivers/spi/spi_txx9.c
@@ -110,23 +110,17 @@ static void txx9spi_cs_func(struct spi_device *spi, struct txx9spi *c,
ndelay(cs_delay); /* CS Setup Time / CS Recovery Time */
}
-/* the spi->mode bits understood by this driver: */
-#define MODEBITS (SPI_CS_HIGH|SPI_CPOL|SPI_CPHA)
-
static int txx9spi_setup(struct spi_device *spi)
{
struct txx9spi *c = spi_master_get_devdata(spi->master);
u8 bits_per_word;
- if (spi->mode & ~MODEBITS)
- return -EINVAL;
-
if (!spi->max_speed_hz
|| spi->max_speed_hz > c->max_speed_hz
|| spi->max_speed_hz < c->min_speed_hz)
return -EINVAL;
- bits_per_word = spi->bits_per_word ? : 8;
+ bits_per_word = spi->bits_per_word;
if (bits_per_word != 8 && bits_per_word != 16)
return -EINVAL;
@@ -414,6 +408,9 @@ static int __init txx9spi_probe(struct platform_device *dev)
(unsigned long long)res->start, irq,
(c->baseclk + 500000) / 1000000);
+ /* the spi->mode bits understood by this driver: */
+ master->mode_bits = SPI_CS_HIGH | SPI_CPOL | SPI_CPHA;
+
master->bus_num = dev->id;
master->setup = txx9spi_setup;
master->transfer = txx9spi_transfer;
diff --git a/drivers/spi/xilinx_spi.c b/drivers/spi/xilinx_spi.c
index 494d3f756e29..46b8c5c2f45e 100644
--- a/drivers/spi/xilinx_spi.c
+++ b/drivers/spi/xilinx_spi.c
@@ -158,9 +158,6 @@ static int xilinx_spi_setup_transfer(struct spi_device *spi,
return 0;
}
-/* the spi->mode bits understood by this driver: */
-#define MODEBITS (SPI_CPOL | SPI_CPHA)
-
static int xilinx_spi_setup(struct spi_device *spi)
{
struct spi_bitbang *bitbang;
@@ -170,22 +167,10 @@ static int xilinx_spi_setup(struct spi_device *spi)
xspi = spi_master_get_devdata(spi->master);
bitbang = &xspi->bitbang;
- if (!spi->bits_per_word)
- spi->bits_per_word = 8;
-
- if (spi->mode & ~MODEBITS) {
- dev_err(&spi->dev, "%s, unsupported mode bits %x\n",
- __func__, spi->mode & ~MODEBITS);
- return -EINVAL;
- }
-
retval = xilinx_spi_setup_transfer(spi, NULL);
if (retval < 0)
return retval;
- dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u nsec/bit\n",
- __func__, spi->mode & MODEBITS, spi->bits_per_word, 0);
-
return 0;
}
@@ -333,6 +318,9 @@ static int __init xilinx_spi_of_probe(struct of_device *ofdev,
goto put_master;
}
+ /* the spi->mode bits understood by this driver: */
+ master->mode_bits = SPI_CPOL | SPI_CPHA;
+
xspi = spi_master_get_devdata(master);
xspi->bitbang.master = spi_master_get(master);
xspi->bitbang.chipselect = xilinx_spi_chipselect;
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index d0fcf36c2ab2..925657889f0f 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -117,5 +117,7 @@ source "drivers/staging/serqt_usb/Kconfig"
source "drivers/gpu/drm/radeon/Kconfig"
+source "drivers/staging/octeon/Kconfig"
+
endif # !STAGING_EXCLUDE_BUILD
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 47dfd5b4288b..6da9c74c1840 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -40,3 +40,4 @@ obj-$(CONFIG_PLAN9AUTH) += p9auth/
obj-$(CONFIG_HECI) += heci/
obj-$(CONFIG_LINE6_USB) += line6/
obj-$(CONFIG_USB_SERIAL_QUATECH_ESU100) += serqt_usb/
+obj-$(CONFIG_OCTEON_ETHERNET) += octeon/
diff --git a/drivers/staging/octeon/Kconfig b/drivers/staging/octeon/Kconfig
new file mode 100644
index 000000000000..536e2382de54
--- /dev/null
+++ b/drivers/staging/octeon/Kconfig
@@ -0,0 +1,12 @@
+config OCTEON_ETHERNET
+ tristate "Cavium Networks Octeon Ethernet support"
+ depends on CPU_CAVIUM_OCTEON
+ select MII
+ help
+ This driver supports the builtin ethernet ports on Cavium
+ Networks' products in the Octeon family. This driver supports the
+ CN3XXX and CN5XXX Octeon processors.
+
+ To compile this driver as a module, choose M here. The module
+ will be called octeon-ethernet.
+
diff --git a/drivers/staging/octeon/Makefile b/drivers/staging/octeon/Makefile
new file mode 100644
index 000000000000..3c839e37d37f
--- /dev/null
+++ b/drivers/staging/octeon/Makefile
@@ -0,0 +1,30 @@
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 2005-2009 Cavium Networks
+#
+
+#
+# Makefile for Cavium OCTEON on-board ethernet driver
+#
+
+obj-${CONFIG_OCTEON_ETHERNET} := octeon-ethernet.o
+
+octeon-ethernet-objs := ethernet.o
+octeon-ethernet-objs += ethernet-common.o
+octeon-ethernet-objs += ethernet-mdio.o
+octeon-ethernet-objs += ethernet-mem.o
+octeon-ethernet-objs += ethernet-proc.o
+octeon-ethernet-objs += ethernet-rgmii.o
+octeon-ethernet-objs += ethernet-rx.o
+octeon-ethernet-objs += ethernet-sgmii.o
+octeon-ethernet-objs += ethernet-spi.o
+octeon-ethernet-objs += ethernet-tx.o
+octeon-ethernet-objs += ethernet-xaui.o
+octeon-ethernet-objs += cvmx-pko.o cvmx-spi.o cvmx-cmd-queue.o \
+ cvmx-helper-board.o cvmx-helper.o cvmx-helper-xaui.o \
+ cvmx-helper-rgmii.o cvmx-helper-sgmii.o cvmx-helper-npi.o \
+ cvmx-helper-loop.o cvmx-helper-spi.o cvmx-helper-util.o \
+ cvmx-interrupt-decodes.o cvmx-interrupt-rsl.o
+
diff --git a/drivers/staging/octeon/cvmx-address.h b/drivers/staging/octeon/cvmx-address.h
new file mode 100644
index 000000000000..3c74d826e2e6
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-address.h
@@ -0,0 +1,274 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2009 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/**
+ * Typedefs and defines for working with Octeon physical addresses.
+ *
+ */
+#ifndef __CVMX_ADDRESS_H__
+#define __CVMX_ADDRESS_H__
+
+#if 0
+typedef enum {
+ CVMX_MIPS_SPACE_XKSEG = 3LL,
+ CVMX_MIPS_SPACE_XKPHYS = 2LL,
+ CVMX_MIPS_SPACE_XSSEG = 1LL,
+ CVMX_MIPS_SPACE_XUSEG = 0LL
+} cvmx_mips_space_t;
+#endif
+
+typedef enum {
+ CVMX_MIPS_XKSEG_SPACE_KSEG0 = 0LL,
+ CVMX_MIPS_XKSEG_SPACE_KSEG1 = 1LL,
+ CVMX_MIPS_XKSEG_SPACE_SSEG = 2LL,
+ CVMX_MIPS_XKSEG_SPACE_KSEG3 = 3LL
+} cvmx_mips_xkseg_space_t;
+
+/* decodes <14:13> of a kseg3 window address */
+typedef enum {
+ CVMX_ADD_WIN_SCR = 0L,
+ /* see cvmx_add_win_dma_dec_t for further decode */
+ CVMX_ADD_WIN_DMA = 1L,
+ CVMX_ADD_WIN_UNUSED = 2L,
+ CVMX_ADD_WIN_UNUSED2 = 3L
+} cvmx_add_win_dec_t;
+
+/* decode within DMA space */
+typedef enum {
+ /*
+ * Add store data to the write buffer entry, allocating it if
+ * necessary.
+ */
+ CVMX_ADD_WIN_DMA_ADD = 0L,
+ /* send out the write buffer entry to DRAM */
+ CVMX_ADD_WIN_DMA_SENDMEM = 1L,
+ /* store data must be normal DRAM memory space address in this case */
+ /* send out the write buffer entry as an IOBDMA command */
+ CVMX_ADD_WIN_DMA_SENDDMA = 2L,
+ /* see CVMX_ADD_WIN_DMA_SEND_DEC for data contents */
+ /* send out the write buffer entry as an IO write */
+ CVMX_ADD_WIN_DMA_SENDIO = 3L,
+ /* store data must be normal IO space address in this case */
+ /* send out a single-tick command on the NCB bus */
+ CVMX_ADD_WIN_DMA_SENDSINGLE = 4L,
+ /* no write buffer data needed/used */
+} cvmx_add_win_dma_dec_t;
+
+/*
+ * Physical Address Decode
+ *
+ * Octeon-I HW never interprets this X (<39:36> reserved
+ * for future expansion), software should set to 0.
+ *
+ * - 0x0 XXX0 0000 0000 to DRAM Cached
+ * - 0x0 XXX0 0FFF FFFF
+ *
+ * - 0x0 XXX0 1000 0000 to Boot Bus Uncached (Converted to 0x1 00X0 1000 0000
+ * - 0x0 XXX0 1FFF FFFF + EJTAG to 0x1 00X0 1FFF FFFF)
+ *
+ * - 0x0 XXX0 2000 0000 to DRAM Cached
+ * - 0x0 XXXF FFFF FFFF
+ *
+ * - 0x1 00X0 0000 0000 to Boot Bus Uncached
+ * - 0x1 00XF FFFF FFFF
+ *
+ * - 0x1 01X0 0000 0000 to Other NCB Uncached
+ * - 0x1 FFXF FFFF FFFF devices
+ *
+ * Decode of all Octeon addresses
+ */
+typedef union {
+
+ uint64_t u64;
+ /* mapped or unmapped virtual address */
+ struct {
+ uint64_t R:2;
+ uint64_t offset:62;
+ } sva;
+
+ /* mapped USEG virtual addresses (typically) */
+ struct {
+ uint64_t zeroes:33;
+ uint64_t offset:31;
+ } suseg;
+
+ /* mapped or unmapped virtual address */
+ struct {
+ uint64_t ones:33;
+ uint64_t sp:2;
+ uint64_t offset:29;
+ } sxkseg;
+
+ /*
+ * physical address accessed through xkphys unmapped virtual
+ * address.
+ */
+ struct {
+ uint64_t R:2; /* CVMX_MIPS_SPACE_XKPHYS in this case */
+ uint64_t cca:3; /* ignored by octeon */
+ uint64_t mbz:10;
+ uint64_t pa:49; /* physical address */
+ } sxkphys;
+
+ /* physical address */
+ struct {
+ uint64_t mbz:15;
+ /* if set, the address is uncached and resides on MCB bus */
+ uint64_t is_io:1;
+ /*
+ * the hardware ignores this field when is_io==0, else
+ * device ID.
+ */
+ uint64_t did:8;
+ /* the hardware ignores <39:36> in Octeon I */
+ uint64_t unaddr:4;
+ uint64_t offset:36;
+ } sphys;
+
+ /* physical mem address */
+ struct {
+ /* techically, <47:40> are dont-cares */
+ uint64_t zeroes:24;
+ /* the hardware ignores <39:36> in Octeon I */
+ uint64_t unaddr:4;
+ uint64_t offset:36;
+ } smem;
+
+ /* physical IO address */
+ struct {
+ uint64_t mem_region:2;
+ uint64_t mbz:13;
+ /* 1 in this case */
+ uint64_t is_io:1;
+ /*
+ * The hardware ignores this field when is_io==0, else
+ * device ID.
+ */
+ uint64_t did:8;
+ /* the hardware ignores <39:36> in Octeon I */
+ uint64_t unaddr:4;
+ uint64_t offset:36;
+ } sio;
+
+ /*
+ * Scratchpad virtual address - accessed through a window at
+ * the end of kseg3
+ */
+ struct {
+ uint64_t ones:49;
+ /* CVMX_ADD_WIN_SCR (0) in this case */
+ cvmx_add_win_dec_t csrdec:2;
+ uint64_t addr:13;
+ } sscr;
+
+ /* there should only be stores to IOBDMA space, no loads */
+ /*
+ * IOBDMA virtual address - accessed through a window at the
+ * end of kseg3
+ */
+ struct {
+ uint64_t ones:49;
+ uint64_t csrdec:2; /* CVMX_ADD_WIN_DMA (1) in this case */
+ uint64_t unused2:3;
+ uint64_t type:3;
+ uint64_t addr:7;
+ } sdma;
+
+ struct {
+ uint64_t didspace:24;
+ uint64_t unused:40;
+ } sfilldidspace;
+
+} cvmx_addr_t;
+
+/* These macros for used by 32 bit applications */
+
+#define CVMX_MIPS32_SPACE_KSEG0 1l
+#define CVMX_ADD_SEG32(segment, add) \
+ (((int32_t)segment << 31) | (int32_t)(add))
+
+/*
+ * Currently all IOs are performed using XKPHYS addressing. Linux uses
+ * the CvmMemCtl register to enable XKPHYS addressing to IO space from
+ * user mode. Future OSes may need to change the upper bits of IO
+ * addresses. The following define controls the upper two bits for all
+ * IO addresses generated by the simple executive library.
+ */
+#define CVMX_IO_SEG CVMX_MIPS_SPACE_XKPHYS
+
+/* These macros simplify the process of creating common IO addresses */
+#define CVMX_ADD_SEG(segment, add) ((((uint64_t)segment) << 62) | (add))
+#ifndef CVMX_ADD_IO_SEG
+#define CVMX_ADD_IO_SEG(add) CVMX_ADD_SEG(CVMX_IO_SEG, (add))
+#endif
+#define CVMX_ADDR_DIDSPACE(did) (((CVMX_IO_SEG) << 22) | ((1ULL) << 8) | (did))
+#define CVMX_ADDR_DID(did) (CVMX_ADDR_DIDSPACE(did) << 40)
+#define CVMX_FULL_DID(did, subdid) (((did) << 3) | (subdid))
+
+ /* from include/ncb_rsl_id.v */
+#define CVMX_OCT_DID_MIS 0ULL /* misc stuff */
+#define CVMX_OCT_DID_GMX0 1ULL
+#define CVMX_OCT_DID_GMX1 2ULL
+#define CVMX_OCT_DID_PCI 3ULL
+#define CVMX_OCT_DID_KEY 4ULL
+#define CVMX_OCT_DID_FPA 5ULL
+#define CVMX_OCT_DID_DFA 6ULL
+#define CVMX_OCT_DID_ZIP 7ULL
+#define CVMX_OCT_DID_RNG 8ULL
+#define CVMX_OCT_DID_IPD 9ULL
+#define CVMX_OCT_DID_PKT 10ULL
+#define CVMX_OCT_DID_TIM 11ULL
+#define CVMX_OCT_DID_TAG 12ULL
+ /* the rest are not on the IO bus */
+#define CVMX_OCT_DID_L2C 16ULL
+#define CVMX_OCT_DID_LMC 17ULL
+#define CVMX_OCT_DID_SPX0 18ULL
+#define CVMX_OCT_DID_SPX1 19ULL
+#define CVMX_OCT_DID_PIP 20ULL
+#define CVMX_OCT_DID_ASX0 22ULL
+#define CVMX_OCT_DID_ASX1 23ULL
+#define CVMX_OCT_DID_IOB 30ULL
+
+#define CVMX_OCT_DID_PKT_SEND CVMX_FULL_DID(CVMX_OCT_DID_PKT, 2ULL)
+#define CVMX_OCT_DID_TAG_SWTAG CVMX_FULL_DID(CVMX_OCT_DID_TAG, 0ULL)
+#define CVMX_OCT_DID_TAG_TAG1 CVMX_FULL_DID(CVMX_OCT_DID_TAG, 1ULL)
+#define CVMX_OCT_DID_TAG_TAG2 CVMX_FULL_DID(CVMX_OCT_DID_TAG, 2ULL)
+#define CVMX_OCT_DID_TAG_TAG3 CVMX_FULL_DID(CVMX_OCT_DID_TAG, 3ULL)
+#define CVMX_OCT_DID_TAG_NULL_RD CVMX_FULL_DID(CVMX_OCT_DID_TAG, 4ULL)
+#define CVMX_OCT_DID_TAG_CSR CVMX_FULL_DID(CVMX_OCT_DID_TAG, 7ULL)
+#define CVMX_OCT_DID_FAU_FAI CVMX_FULL_DID(CVMX_OCT_DID_IOB, 0ULL)
+#define CVMX_OCT_DID_TIM_CSR CVMX_FULL_DID(CVMX_OCT_DID_TIM, 0ULL)
+#define CVMX_OCT_DID_KEY_RW CVMX_FULL_DID(CVMX_OCT_DID_KEY, 0ULL)
+#define CVMX_OCT_DID_PCI_6 CVMX_FULL_DID(CVMX_OCT_DID_PCI, 6ULL)
+#define CVMX_OCT_DID_MIS_BOO CVMX_FULL_DID(CVMX_OCT_DID_MIS, 0ULL)
+#define CVMX_OCT_DID_PCI_RML CVMX_FULL_DID(CVMX_OCT_DID_PCI, 0ULL)
+#define CVMX_OCT_DID_IPD_CSR CVMX_FULL_DID(CVMX_OCT_DID_IPD, 7ULL)
+#define CVMX_OCT_DID_DFA_CSR CVMX_FULL_DID(CVMX_OCT_DID_DFA, 7ULL)
+#define CVMX_OCT_DID_MIS_CSR CVMX_FULL_DID(CVMX_OCT_DID_MIS, 7ULL)
+#define CVMX_OCT_DID_ZIP_CSR CVMX_FULL_DID(CVMX_OCT_DID_ZIP, 0ULL)
+
+#endif /* __CVMX_ADDRESS_H__ */
diff --git a/drivers/staging/octeon/cvmx-asxx-defs.h b/drivers/staging/octeon/cvmx-asxx-defs.h
new file mode 100644
index 000000000000..91415a85e8d2
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-asxx-defs.h
@@ -0,0 +1,475 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_ASXX_DEFS_H__
+#define __CVMX_ASXX_DEFS_H__
+
+#define CVMX_ASXX_GMII_RX_CLK_SET(block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0000180ull + (((block_id) & 0) * 0x8000000ull))
+#define CVMX_ASXX_GMII_RX_DAT_SET(block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0000188ull + (((block_id) & 0) * 0x8000000ull))
+#define CVMX_ASXX_INT_EN(block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0000018ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_ASXX_INT_REG(block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0000010ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_ASXX_MII_RX_DAT_SET(block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0000190ull + (((block_id) & 0) * 0x8000000ull))
+#define CVMX_ASXX_PRT_LOOP(block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0000040ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_ASXX_RLD_BYPASS(block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0000248ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_ASXX_RLD_BYPASS_SETTING(block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0000250ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_ASXX_RLD_COMP(block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0000220ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_ASXX_RLD_DATA_DRV(block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0000218ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_ASXX_RLD_FCRAM_MODE(block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0000210ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_ASXX_RLD_NCTL_STRONG(block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0000230ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_ASXX_RLD_NCTL_WEAK(block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0000240ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_ASXX_RLD_PCTL_STRONG(block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0000228ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_ASXX_RLD_PCTL_WEAK(block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0000238ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_ASXX_RLD_SETTING(block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0000258ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_ASXX_RX_CLK_SETX(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0000020ull + (((offset) & 3) * 8) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_ASXX_RX_PRT_EN(block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0000000ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_ASXX_RX_WOL(block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0000100ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_ASXX_RX_WOL_MSK(block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0000108ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_ASXX_RX_WOL_POWOK(block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0000118ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_ASXX_RX_WOL_SIG(block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0000110ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_ASXX_TX_CLK_SETX(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0000048ull + (((offset) & 3) * 8) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_ASXX_TX_COMP_BYP(block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0000068ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_ASXX_TX_HI_WATERX(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0000080ull + (((offset) & 3) * 8) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_ASXX_TX_PRT_EN(block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0000008ull + (((block_id) & 1) * 0x8000000ull))
+
+union cvmx_asxx_gmii_rx_clk_set {
+ uint64_t u64;
+ struct cvmx_asxx_gmii_rx_clk_set_s {
+ uint64_t reserved_5_63:59;
+ uint64_t setting:5;
+ } s;
+ struct cvmx_asxx_gmii_rx_clk_set_s cn30xx;
+ struct cvmx_asxx_gmii_rx_clk_set_s cn31xx;
+ struct cvmx_asxx_gmii_rx_clk_set_s cn50xx;
+};
+
+union cvmx_asxx_gmii_rx_dat_set {
+ uint64_t u64;
+ struct cvmx_asxx_gmii_rx_dat_set_s {
+ uint64_t reserved_5_63:59;
+ uint64_t setting:5;
+ } s;
+ struct cvmx_asxx_gmii_rx_dat_set_s cn30xx;
+ struct cvmx_asxx_gmii_rx_dat_set_s cn31xx;
+ struct cvmx_asxx_gmii_rx_dat_set_s cn50xx;
+};
+
+union cvmx_asxx_int_en {
+ uint64_t u64;
+ struct cvmx_asxx_int_en_s {
+ uint64_t reserved_12_63:52;
+ uint64_t txpsh:4;
+ uint64_t txpop:4;
+ uint64_t ovrflw:4;
+ } s;
+ struct cvmx_asxx_int_en_cn30xx {
+ uint64_t reserved_11_63:53;
+ uint64_t txpsh:3;
+ uint64_t reserved_7_7:1;
+ uint64_t txpop:3;
+ uint64_t reserved_3_3:1;
+ uint64_t ovrflw:3;
+ } cn30xx;
+ struct cvmx_asxx_int_en_cn30xx cn31xx;
+ struct cvmx_asxx_int_en_s cn38xx;
+ struct cvmx_asxx_int_en_s cn38xxp2;
+ struct cvmx_asxx_int_en_cn30xx cn50xx;
+ struct cvmx_asxx_int_en_s cn58xx;
+ struct cvmx_asxx_int_en_s cn58xxp1;
+};
+
+union cvmx_asxx_int_reg {
+ uint64_t u64;
+ struct cvmx_asxx_int_reg_s {
+ uint64_t reserved_12_63:52;
+ uint64_t txpsh:4;
+ uint64_t txpop:4;
+ uint64_t ovrflw:4;
+ } s;
+ struct cvmx_asxx_int_reg_cn30xx {
+ uint64_t reserved_11_63:53;
+ uint64_t txpsh:3;
+ uint64_t reserved_7_7:1;
+ uint64_t txpop:3;
+ uint64_t reserved_3_3:1;
+ uint64_t ovrflw:3;
+ } cn30xx;
+ struct cvmx_asxx_int_reg_cn30xx cn31xx;
+ struct cvmx_asxx_int_reg_s cn38xx;
+ struct cvmx_asxx_int_reg_s cn38xxp2;
+ struct cvmx_asxx_int_reg_cn30xx cn50xx;
+ struct cvmx_asxx_int_reg_s cn58xx;
+ struct cvmx_asxx_int_reg_s cn58xxp1;
+};
+
+union cvmx_asxx_mii_rx_dat_set {
+ uint64_t u64;
+ struct cvmx_asxx_mii_rx_dat_set_s {
+ uint64_t reserved_5_63:59;
+ uint64_t setting:5;
+ } s;
+ struct cvmx_asxx_mii_rx_dat_set_s cn30xx;
+ struct cvmx_asxx_mii_rx_dat_set_s cn50xx;
+};
+
+union cvmx_asxx_prt_loop {
+ uint64_t u64;
+ struct cvmx_asxx_prt_loop_s {
+ uint64_t reserved_8_63:56;
+ uint64_t ext_loop:4;
+ uint64_t int_loop:4;
+ } s;
+ struct cvmx_asxx_prt_loop_cn30xx {
+ uint64_t reserved_7_63:57;
+ uint64_t ext_loop:3;
+ uint64_t reserved_3_3:1;
+ uint64_t int_loop:3;
+ } cn30xx;
+ struct cvmx_asxx_prt_loop_cn30xx cn31xx;
+ struct cvmx_asxx_prt_loop_s cn38xx;
+ struct cvmx_asxx_prt_loop_s cn38xxp2;
+ struct cvmx_asxx_prt_loop_cn30xx cn50xx;
+ struct cvmx_asxx_prt_loop_s cn58xx;
+ struct cvmx_asxx_prt_loop_s cn58xxp1;
+};
+
+union cvmx_asxx_rld_bypass {
+ uint64_t u64;
+ struct cvmx_asxx_rld_bypass_s {
+ uint64_t reserved_1_63:63;
+ uint64_t bypass:1;
+ } s;
+ struct cvmx_asxx_rld_bypass_s cn38xx;
+ struct cvmx_asxx_rld_bypass_s cn38xxp2;
+ struct cvmx_asxx_rld_bypass_s cn58xx;
+ struct cvmx_asxx_rld_bypass_s cn58xxp1;
+};
+
+union cvmx_asxx_rld_bypass_setting {
+ uint64_t u64;
+ struct cvmx_asxx_rld_bypass_setting_s {
+ uint64_t reserved_5_63:59;
+ uint64_t setting:5;
+ } s;
+ struct cvmx_asxx_rld_bypass_setting_s cn38xx;
+ struct cvmx_asxx_rld_bypass_setting_s cn38xxp2;
+ struct cvmx_asxx_rld_bypass_setting_s cn58xx;
+ struct cvmx_asxx_rld_bypass_setting_s cn58xxp1;
+};
+
+union cvmx_asxx_rld_comp {
+ uint64_t u64;
+ struct cvmx_asxx_rld_comp_s {
+ uint64_t reserved_9_63:55;
+ uint64_t pctl:5;
+ uint64_t nctl:4;
+ } s;
+ struct cvmx_asxx_rld_comp_cn38xx {
+ uint64_t reserved_8_63:56;
+ uint64_t pctl:4;
+ uint64_t nctl:4;
+ } cn38xx;
+ struct cvmx_asxx_rld_comp_cn38xx cn38xxp2;
+ struct cvmx_asxx_rld_comp_s cn58xx;
+ struct cvmx_asxx_rld_comp_s cn58xxp1;
+};
+
+union cvmx_asxx_rld_data_drv {
+ uint64_t u64;
+ struct cvmx_asxx_rld_data_drv_s {
+ uint64_t reserved_8_63:56;
+ uint64_t pctl:4;
+ uint64_t nctl:4;
+ } s;
+ struct cvmx_asxx_rld_data_drv_s cn38xx;
+ struct cvmx_asxx_rld_data_drv_s cn38xxp2;
+ struct cvmx_asxx_rld_data_drv_s cn58xx;
+ struct cvmx_asxx_rld_data_drv_s cn58xxp1;
+};
+
+union cvmx_asxx_rld_fcram_mode {
+ uint64_t u64;
+ struct cvmx_asxx_rld_fcram_mode_s {
+ uint64_t reserved_1_63:63;
+ uint64_t mode:1;
+ } s;
+ struct cvmx_asxx_rld_fcram_mode_s cn38xx;
+ struct cvmx_asxx_rld_fcram_mode_s cn38xxp2;
+};
+
+union cvmx_asxx_rld_nctl_strong {
+ uint64_t u64;
+ struct cvmx_asxx_rld_nctl_strong_s {
+ uint64_t reserved_5_63:59;
+ uint64_t nctl:5;
+ } s;
+ struct cvmx_asxx_rld_nctl_strong_s cn38xx;
+ struct cvmx_asxx_rld_nctl_strong_s cn38xxp2;
+ struct cvmx_asxx_rld_nctl_strong_s cn58xx;
+ struct cvmx_asxx_rld_nctl_strong_s cn58xxp1;
+};
+
+union cvmx_asxx_rld_nctl_weak {
+ uint64_t u64;
+ struct cvmx_asxx_rld_nctl_weak_s {
+ uint64_t reserved_5_63:59;
+ uint64_t nctl:5;
+ } s;
+ struct cvmx_asxx_rld_nctl_weak_s cn38xx;
+ struct cvmx_asxx_rld_nctl_weak_s cn38xxp2;
+ struct cvmx_asxx_rld_nctl_weak_s cn58xx;
+ struct cvmx_asxx_rld_nctl_weak_s cn58xxp1;
+};
+
+union cvmx_asxx_rld_pctl_strong {
+ uint64_t u64;
+ struct cvmx_asxx_rld_pctl_strong_s {
+ uint64_t reserved_5_63:59;
+ uint64_t pctl:5;
+ } s;
+ struct cvmx_asxx_rld_pctl_strong_s cn38xx;
+ struct cvmx_asxx_rld_pctl_strong_s cn38xxp2;
+ struct cvmx_asxx_rld_pctl_strong_s cn58xx;
+ struct cvmx_asxx_rld_pctl_strong_s cn58xxp1;
+};
+
+union cvmx_asxx_rld_pctl_weak {
+ uint64_t u64;
+ struct cvmx_asxx_rld_pctl_weak_s {
+ uint64_t reserved_5_63:59;
+ uint64_t pctl:5;
+ } s;
+ struct cvmx_asxx_rld_pctl_weak_s cn38xx;
+ struct cvmx_asxx_rld_pctl_weak_s cn38xxp2;
+ struct cvmx_asxx_rld_pctl_weak_s cn58xx;
+ struct cvmx_asxx_rld_pctl_weak_s cn58xxp1;
+};
+
+union cvmx_asxx_rld_setting {
+ uint64_t u64;
+ struct cvmx_asxx_rld_setting_s {
+ uint64_t reserved_13_63:51;
+ uint64_t dfaset:5;
+ uint64_t dfalag:1;
+ uint64_t dfalead:1;
+ uint64_t dfalock:1;
+ uint64_t setting:5;
+ } s;
+ struct cvmx_asxx_rld_setting_cn38xx {
+ uint64_t reserved_5_63:59;
+ uint64_t setting:5;
+ } cn38xx;
+ struct cvmx_asxx_rld_setting_cn38xx cn38xxp2;
+ struct cvmx_asxx_rld_setting_s cn58xx;
+ struct cvmx_asxx_rld_setting_s cn58xxp1;
+};
+
+union cvmx_asxx_rx_clk_setx {
+ uint64_t u64;
+ struct cvmx_asxx_rx_clk_setx_s {
+ uint64_t reserved_5_63:59;
+ uint64_t setting:5;
+ } s;
+ struct cvmx_asxx_rx_clk_setx_s cn30xx;
+ struct cvmx_asxx_rx_clk_setx_s cn31xx;
+ struct cvmx_asxx_rx_clk_setx_s cn38xx;
+ struct cvmx_asxx_rx_clk_setx_s cn38xxp2;
+ struct cvmx_asxx_rx_clk_setx_s cn50xx;
+ struct cvmx_asxx_rx_clk_setx_s cn58xx;
+ struct cvmx_asxx_rx_clk_setx_s cn58xxp1;
+};
+
+union cvmx_asxx_rx_prt_en {
+ uint64_t u64;
+ struct cvmx_asxx_rx_prt_en_s {
+ uint64_t reserved_4_63:60;
+ uint64_t prt_en:4;
+ } s;
+ struct cvmx_asxx_rx_prt_en_cn30xx {
+ uint64_t reserved_3_63:61;
+ uint64_t prt_en:3;
+ } cn30xx;
+ struct cvmx_asxx_rx_prt_en_cn30xx cn31xx;
+ struct cvmx_asxx_rx_prt_en_s cn38xx;
+ struct cvmx_asxx_rx_prt_en_s cn38xxp2;
+ struct cvmx_asxx_rx_prt_en_cn30xx cn50xx;
+ struct cvmx_asxx_rx_prt_en_s cn58xx;
+ struct cvmx_asxx_rx_prt_en_s cn58xxp1;
+};
+
+union cvmx_asxx_rx_wol {
+ uint64_t u64;
+ struct cvmx_asxx_rx_wol_s {
+ uint64_t reserved_2_63:62;
+ uint64_t status:1;
+ uint64_t enable:1;
+ } s;
+ struct cvmx_asxx_rx_wol_s cn38xx;
+ struct cvmx_asxx_rx_wol_s cn38xxp2;
+};
+
+union cvmx_asxx_rx_wol_msk {
+ uint64_t u64;
+ struct cvmx_asxx_rx_wol_msk_s {
+ uint64_t msk:64;
+ } s;
+ struct cvmx_asxx_rx_wol_msk_s cn38xx;
+ struct cvmx_asxx_rx_wol_msk_s cn38xxp2;
+};
+
+union cvmx_asxx_rx_wol_powok {
+ uint64_t u64;
+ struct cvmx_asxx_rx_wol_powok_s {
+ uint64_t reserved_1_63:63;
+ uint64_t powerok:1;
+ } s;
+ struct cvmx_asxx_rx_wol_powok_s cn38xx;
+ struct cvmx_asxx_rx_wol_powok_s cn38xxp2;
+};
+
+union cvmx_asxx_rx_wol_sig {
+ uint64_t u64;
+ struct cvmx_asxx_rx_wol_sig_s {
+ uint64_t reserved_32_63:32;
+ uint64_t sig:32;
+ } s;
+ struct cvmx_asxx_rx_wol_sig_s cn38xx;
+ struct cvmx_asxx_rx_wol_sig_s cn38xxp2;
+};
+
+union cvmx_asxx_tx_clk_setx {
+ uint64_t u64;
+ struct cvmx_asxx_tx_clk_setx_s {
+ uint64_t reserved_5_63:59;
+ uint64_t setting:5;
+ } s;
+ struct cvmx_asxx_tx_clk_setx_s cn30xx;
+ struct cvmx_asxx_tx_clk_setx_s cn31xx;
+ struct cvmx_asxx_tx_clk_setx_s cn38xx;
+ struct cvmx_asxx_tx_clk_setx_s cn38xxp2;
+ struct cvmx_asxx_tx_clk_setx_s cn50xx;
+ struct cvmx_asxx_tx_clk_setx_s cn58xx;
+ struct cvmx_asxx_tx_clk_setx_s cn58xxp1;
+};
+
+union cvmx_asxx_tx_comp_byp {
+ uint64_t u64;
+ struct cvmx_asxx_tx_comp_byp_s {
+ uint64_t reserved_0_63:64;
+ } s;
+ struct cvmx_asxx_tx_comp_byp_cn30xx {
+ uint64_t reserved_9_63:55;
+ uint64_t bypass:1;
+ uint64_t pctl:4;
+ uint64_t nctl:4;
+ } cn30xx;
+ struct cvmx_asxx_tx_comp_byp_cn30xx cn31xx;
+ struct cvmx_asxx_tx_comp_byp_cn38xx {
+ uint64_t reserved_8_63:56;
+ uint64_t pctl:4;
+ uint64_t nctl:4;
+ } cn38xx;
+ struct cvmx_asxx_tx_comp_byp_cn38xx cn38xxp2;
+ struct cvmx_asxx_tx_comp_byp_cn50xx {
+ uint64_t reserved_17_63:47;
+ uint64_t bypass:1;
+ uint64_t reserved_13_15:3;
+ uint64_t pctl:5;
+ uint64_t reserved_5_7:3;
+ uint64_t nctl:5;
+ } cn50xx;
+ struct cvmx_asxx_tx_comp_byp_cn58xx {
+ uint64_t reserved_13_63:51;
+ uint64_t pctl:5;
+ uint64_t reserved_5_7:3;
+ uint64_t nctl:5;
+ } cn58xx;
+ struct cvmx_asxx_tx_comp_byp_cn58xx cn58xxp1;
+};
+
+union cvmx_asxx_tx_hi_waterx {
+ uint64_t u64;
+ struct cvmx_asxx_tx_hi_waterx_s {
+ uint64_t reserved_4_63:60;
+ uint64_t mark:4;
+ } s;
+ struct cvmx_asxx_tx_hi_waterx_cn30xx {
+ uint64_t reserved_3_63:61;
+ uint64_t mark:3;
+ } cn30xx;
+ struct cvmx_asxx_tx_hi_waterx_cn30xx cn31xx;
+ struct cvmx_asxx_tx_hi_waterx_s cn38xx;
+ struct cvmx_asxx_tx_hi_waterx_s cn38xxp2;
+ struct cvmx_asxx_tx_hi_waterx_cn30xx cn50xx;
+ struct cvmx_asxx_tx_hi_waterx_s cn58xx;
+ struct cvmx_asxx_tx_hi_waterx_s cn58xxp1;
+};
+
+union cvmx_asxx_tx_prt_en {
+ uint64_t u64;
+ struct cvmx_asxx_tx_prt_en_s {
+ uint64_t reserved_4_63:60;
+ uint64_t prt_en:4;
+ } s;
+ struct cvmx_asxx_tx_prt_en_cn30xx {
+ uint64_t reserved_3_63:61;
+ uint64_t prt_en:3;
+ } cn30xx;
+ struct cvmx_asxx_tx_prt_en_cn30xx cn31xx;
+ struct cvmx_asxx_tx_prt_en_s cn38xx;
+ struct cvmx_asxx_tx_prt_en_s cn38xxp2;
+ struct cvmx_asxx_tx_prt_en_cn30xx cn50xx;
+ struct cvmx_asxx_tx_prt_en_s cn58xx;
+ struct cvmx_asxx_tx_prt_en_s cn58xxp1;
+};
+
+#endif
diff --git a/drivers/staging/octeon/cvmx-cmd-queue.c b/drivers/staging/octeon/cvmx-cmd-queue.c
new file mode 100644
index 000000000000..976227b01273
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-cmd-queue.c
@@ -0,0 +1,306 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * Support functions for managing command queues used for
+ * various hardware blocks.
+ */
+
+#include <linux/kernel.h>
+
+#include <asm/octeon/octeon.h>
+
+#include "cvmx-config.h"
+#include "cvmx-fpa.h"
+#include "cvmx-cmd-queue.h"
+
+#include <asm/octeon/cvmx-npei-defs.h>
+#include <asm/octeon/cvmx-pexp-defs.h>
+#include "cvmx-pko-defs.h"
+
+/**
+ * This application uses this pointer to access the global queue
+ * state. It points to a bootmem named block.
+ */
+__cvmx_cmd_queue_all_state_t *__cvmx_cmd_queue_state_ptr;
+
+/**
+ * Initialize the Global queue state pointer.
+ *
+ * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
+ */
+static cvmx_cmd_queue_result_t __cvmx_cmd_queue_init_state_ptr(void)
+{
+ char *alloc_name = "cvmx_cmd_queues";
+#if defined(CONFIG_CAVIUM_RESERVE32) && CONFIG_CAVIUM_RESERVE32
+ extern uint64_t octeon_reserve32_memory;
+#endif
+
+ if (likely(__cvmx_cmd_queue_state_ptr))
+ return CVMX_CMD_QUEUE_SUCCESS;
+
+#if defined(CONFIG_CAVIUM_RESERVE32) && CONFIG_CAVIUM_RESERVE32
+ if (octeon_reserve32_memory)
+ __cvmx_cmd_queue_state_ptr =
+ cvmx_bootmem_alloc_named_range(sizeof(*__cvmx_cmd_queue_state_ptr),
+ octeon_reserve32_memory,
+ octeon_reserve32_memory +
+ (CONFIG_CAVIUM_RESERVE32 <<
+ 20) - 1, 128, alloc_name);
+ else
+#endif
+ __cvmx_cmd_queue_state_ptr =
+ cvmx_bootmem_alloc_named(sizeof(*__cvmx_cmd_queue_state_ptr),
+ 128,
+ alloc_name);
+ if (__cvmx_cmd_queue_state_ptr)
+ memset(__cvmx_cmd_queue_state_ptr, 0,
+ sizeof(*__cvmx_cmd_queue_state_ptr));
+ else {
+ struct cvmx_bootmem_named_block_desc *block_desc =
+ cvmx_bootmem_find_named_block(alloc_name);
+ if (block_desc)
+ __cvmx_cmd_queue_state_ptr =
+ cvmx_phys_to_ptr(block_desc->base_addr);
+ else {
+ cvmx_dprintf
+ ("ERROR: cvmx_cmd_queue_initialize: Unable to get named block %s.\n",
+ alloc_name);
+ return CVMX_CMD_QUEUE_NO_MEMORY;
+ }
+ }
+ return CVMX_CMD_QUEUE_SUCCESS;
+}
+
+/**
+ * Initialize a command queue for use. The initial FPA buffer is
+ * allocated and the hardware unit is configured to point to the
+ * new command queue.
+ *
+ * @queue_id: Hardware command queue to initialize.
+ * @max_depth: Maximum outstanding commands that can be queued.
+ * @fpa_pool: FPA pool the command queues should come from.
+ * @pool_size: Size of each buffer in the FPA pool (bytes)
+ *
+ * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
+ */
+cvmx_cmd_queue_result_t cvmx_cmd_queue_initialize(cvmx_cmd_queue_id_t queue_id,
+ int max_depth, int fpa_pool,
+ int pool_size)
+{
+ __cvmx_cmd_queue_state_t *qstate;
+ cvmx_cmd_queue_result_t result = __cvmx_cmd_queue_init_state_ptr();
+ if (result != CVMX_CMD_QUEUE_SUCCESS)
+ return result;
+
+ qstate = __cvmx_cmd_queue_get_state(queue_id);
+ if (qstate == NULL)
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+
+ /*
+ * We artificially limit max_depth to 1<<20 words. It is an
+ * arbitrary limit.
+ */
+ if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH) {
+ if ((max_depth < 0) || (max_depth > 1 << 20))
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+ } else if (max_depth != 0)
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+
+ if ((fpa_pool < 0) || (fpa_pool > 7))
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+ if ((pool_size < 128) || (pool_size > 65536))
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+
+ /* See if someone else has already initialized the queue */
+ if (qstate->base_ptr_div128) {
+ if (max_depth != (int)qstate->max_depth) {
+ cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
+ "Queue already initalized with different "
+ "max_depth (%d).\n",
+ (int)qstate->max_depth);
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+ }
+ if (fpa_pool != qstate->fpa_pool) {
+ cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
+ "Queue already initalized with different "
+ "FPA pool (%u).\n",
+ qstate->fpa_pool);
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+ }
+ if ((pool_size >> 3) - 1 != qstate->pool_size_m1) {
+ cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
+ "Queue already initalized with different "
+ "FPA pool size (%u).\n",
+ (qstate->pool_size_m1 + 1) << 3);
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+ }
+ CVMX_SYNCWS;
+ return CVMX_CMD_QUEUE_ALREADY_SETUP;
+ } else {
+ union cvmx_fpa_ctl_status status;
+ void *buffer;
+
+ status.u64 = cvmx_read_csr(CVMX_FPA_CTL_STATUS);
+ if (!status.s.enb) {
+ cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
+ "FPA is not enabled.\n");
+ return CVMX_CMD_QUEUE_NO_MEMORY;
+ }
+ buffer = cvmx_fpa_alloc(fpa_pool);
+ if (buffer == NULL) {
+ cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
+ "Unable to allocate initial buffer.\n");
+ return CVMX_CMD_QUEUE_NO_MEMORY;
+ }
+
+ memset(qstate, 0, sizeof(*qstate));
+ qstate->max_depth = max_depth;
+ qstate->fpa_pool = fpa_pool;
+ qstate->pool_size_m1 = (pool_size >> 3) - 1;
+ qstate->base_ptr_div128 = cvmx_ptr_to_phys(buffer) / 128;
+ /*
+ * We zeroed the now serving field so we need to also
+ * zero the ticket.
+ */
+ __cvmx_cmd_queue_state_ptr->
+ ticket[__cvmx_cmd_queue_get_index(queue_id)] = 0;
+ CVMX_SYNCWS;
+ return CVMX_CMD_QUEUE_SUCCESS;
+ }
+}
+
+/**
+ * Shutdown a queue a free it's command buffers to the FPA. The
+ * hardware connected to the queue must be stopped before this
+ * function is called.
+ *
+ * @queue_id: Queue to shutdown
+ *
+ * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
+ */
+cvmx_cmd_queue_result_t cvmx_cmd_queue_shutdown(cvmx_cmd_queue_id_t queue_id)
+{
+ __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
+ if (qptr == NULL) {
+ cvmx_dprintf("ERROR: cvmx_cmd_queue_shutdown: Unable to "
+ "get queue information.\n");
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+ }
+
+ if (cvmx_cmd_queue_length(queue_id) > 0) {
+ cvmx_dprintf("ERROR: cvmx_cmd_queue_shutdown: Queue still "
+ "has data in it.\n");
+ return CVMX_CMD_QUEUE_FULL;
+ }
+
+ __cvmx_cmd_queue_lock(queue_id, qptr);
+ if (qptr->base_ptr_div128) {
+ cvmx_fpa_free(cvmx_phys_to_ptr
+ ((uint64_t) qptr->base_ptr_div128 << 7),
+ qptr->fpa_pool, 0);
+ qptr->base_ptr_div128 = 0;
+ }
+ __cvmx_cmd_queue_unlock(qptr);
+
+ return CVMX_CMD_QUEUE_SUCCESS;
+}
+
+/**
+ * Return the number of command words pending in the queue. This
+ * function may be relatively slow for some hardware units.
+ *
+ * @queue_id: Hardware command queue to query
+ *
+ * Returns Number of outstanding commands
+ */
+int cvmx_cmd_queue_length(cvmx_cmd_queue_id_t queue_id)
+{
+ if (CVMX_ENABLE_PARAMETER_CHECKING) {
+ if (__cvmx_cmd_queue_get_state(queue_id) == NULL)
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+ }
+
+ /*
+ * The cast is here so gcc with check that all values in the
+ * cvmx_cmd_queue_id_t enumeration are here.
+ */
+ switch ((cvmx_cmd_queue_id_t) (queue_id & 0xff0000)) {
+ case CVMX_CMD_QUEUE_PKO_BASE:
+ /*
+ * FIXME: Need atomic lock on
+ * CVMX_PKO_REG_READ_IDX. Right now we are normally
+ * called with the queue lock, so that is a SLIGHT
+ * amount of protection.
+ */
+ cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue_id & 0xffff);
+ if (OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
+ union cvmx_pko_mem_debug9 debug9;
+ debug9.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG9);
+ return debug9.cn38xx.doorbell;
+ } else {
+ union cvmx_pko_mem_debug8 debug8;
+ debug8.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG8);
+ return debug8.cn58xx.doorbell;
+ }
+ case CVMX_CMD_QUEUE_ZIP:
+ case CVMX_CMD_QUEUE_DFA:
+ case CVMX_CMD_QUEUE_RAID:
+ /* FIXME: Implement other lengths */
+ return 0;
+ case CVMX_CMD_QUEUE_DMA_BASE:
+ {
+ union cvmx_npei_dmax_counts dmax_counts;
+ dmax_counts.u64 =
+ cvmx_read_csr(CVMX_PEXP_NPEI_DMAX_COUNTS
+ (queue_id & 0x7));
+ return dmax_counts.s.dbell;
+ }
+ case CVMX_CMD_QUEUE_END:
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+ }
+ return CVMX_CMD_QUEUE_INVALID_PARAM;
+}
+
+/**
+ * Return the command buffer to be written to. The purpose of this
+ * function is to allow CVMX routine access t othe low level buffer
+ * for initial hardware setup. User applications should not call this
+ * function directly.
+ *
+ * @queue_id: Command queue to query
+ *
+ * Returns Command buffer or NULL on failure
+ */
+void *cvmx_cmd_queue_buffer(cvmx_cmd_queue_id_t queue_id)
+{
+ __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
+ if (qptr && qptr->base_ptr_div128)
+ return cvmx_phys_to_ptr((uint64_t) qptr->base_ptr_div128 << 7);
+ else
+ return NULL;
+}
diff --git a/drivers/staging/octeon/cvmx-cmd-queue.h b/drivers/staging/octeon/cvmx-cmd-queue.h
new file mode 100644
index 000000000000..f0cb20ffa39a
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-cmd-queue.h
@@ -0,0 +1,617 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ *
+ * Support functions for managing command queues used for
+ * various hardware blocks.
+ *
+ * The common command queue infrastructure abstracts out the
+ * software necessary for adding to Octeon's chained queue
+ * structures. These structures are used for commands to the
+ * PKO, ZIP, DFA, RAID, and DMA engine blocks. Although each
+ * hardware unit takes commands and CSRs of different types,
+ * they all use basic linked command buffers to store the
+ * pending request. In general, users of the CVMX API don't
+ * call cvmx-cmd-queue functions directly. Instead the hardware
+ * unit specific wrapper should be used. The wrappers perform
+ * unit specific validation and CSR writes to submit the
+ * commands.
+ *
+ * Even though most software will never directly interact with
+ * cvmx-cmd-queue, knowledge of its internal working can help
+ * in diagnosing performance problems and help with debugging.
+ *
+ * Command queue pointers are stored in a global named block
+ * called "cvmx_cmd_queues". Except for the PKO queues, each
+ * hardware queue is stored in its own cache line to reduce SMP
+ * contention on spin locks. The PKO queues are stored such that
+ * every 16th queue is next to each other in memory. This scheme
+ * allows for queues being in separate cache lines when there
+ * are low number of queues per port. With 16 queues per port,
+ * the first queue for each port is in the same cache area. The
+ * second queues for each port are in another area, etc. This
+ * allows software to implement very efficient lockless PKO with
+ * 16 queues per port using a minimum of cache lines per core.
+ * All queues for a given core will be isolated in the same
+ * cache area.
+ *
+ * In addition to the memory pointer layout, cvmx-cmd-queue
+ * provides an optimized fair ll/sc locking mechanism for the
+ * queues. The lock uses a "ticket / now serving" model to
+ * maintain fair order on contended locks. In addition, it uses
+ * predicted locking time to limit cache contention. When a core
+ * know it must wait in line for a lock, it spins on the
+ * internal cycle counter to completely eliminate any causes of
+ * bus traffic.
+ *
+ */
+
+#ifndef __CVMX_CMD_QUEUE_H__
+#define __CVMX_CMD_QUEUE_H__
+
+#include <linux/prefetch.h>
+
+#include "cvmx-fpa.h"
+/**
+ * By default we disable the max depth support. Most programs
+ * don't use it and it slows down the command queue processing
+ * significantly.
+ */
+#ifndef CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH
+#define CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH 0
+#endif
+
+/**
+ * Enumeration representing all hardware blocks that use command
+ * queues. Each hardware block has up to 65536 sub identifiers for
+ * multiple command queues. Not all chips support all hardware
+ * units.
+ */
+typedef enum {
+ CVMX_CMD_QUEUE_PKO_BASE = 0x00000,
+
+#define CVMX_CMD_QUEUE_PKO(queue) \
+ ((cvmx_cmd_queue_id_t)(CVMX_CMD_QUEUE_PKO_BASE + (0xffff&(queue))))
+
+ CVMX_CMD_QUEUE_ZIP = 0x10000,
+ CVMX_CMD_QUEUE_DFA = 0x20000,
+ CVMX_CMD_QUEUE_RAID = 0x30000,
+ CVMX_CMD_QUEUE_DMA_BASE = 0x40000,
+
+#define CVMX_CMD_QUEUE_DMA(queue) \
+ ((cvmx_cmd_queue_id_t)(CVMX_CMD_QUEUE_DMA_BASE + (0xffff&(queue))))
+
+ CVMX_CMD_QUEUE_END = 0x50000,
+} cvmx_cmd_queue_id_t;
+
+/**
+ * Command write operations can fail if the comamnd queue needs
+ * a new buffer and the associated FPA pool is empty. It can also
+ * fail if the number of queued command words reaches the maximum
+ * set at initialization.
+ */
+typedef enum {
+ CVMX_CMD_QUEUE_SUCCESS = 0,
+ CVMX_CMD_QUEUE_NO_MEMORY = -1,
+ CVMX_CMD_QUEUE_FULL = -2,
+ CVMX_CMD_QUEUE_INVALID_PARAM = -3,
+ CVMX_CMD_QUEUE_ALREADY_SETUP = -4,
+} cvmx_cmd_queue_result_t;
+
+typedef struct {
+ /* You have lock when this is your ticket */
+ uint8_t now_serving;
+ uint64_t unused1:24;
+ /* Maximum outstanding command words */
+ uint32_t max_depth;
+ /* FPA pool buffers come from */
+ uint64_t fpa_pool:3;
+ /* Top of command buffer pointer shifted 7 */
+ uint64_t base_ptr_div128:29;
+ uint64_t unused2:6;
+ /* FPA buffer size in 64bit words minus 1 */
+ uint64_t pool_size_m1:13;
+ /* Number of comamnds already used in buffer */
+ uint64_t index:13;
+} __cvmx_cmd_queue_state_t;
+
+/**
+ * This structure contains the global state of all comamnd queues.
+ * It is stored in a bootmem named block and shared by all
+ * applications running on Octeon. Tickets are stored in a differnet
+ * cahce line that queue information to reduce the contention on the
+ * ll/sc used to get a ticket. If this is not the case, the update
+ * of queue state causes the ll/sc to fail quite often.
+ */
+typedef struct {
+ uint64_t ticket[(CVMX_CMD_QUEUE_END >> 16) * 256];
+ __cvmx_cmd_queue_state_t state[(CVMX_CMD_QUEUE_END >> 16) * 256];
+} __cvmx_cmd_queue_all_state_t;
+
+/**
+ * Initialize a command queue for use. The initial FPA buffer is
+ * allocated and the hardware unit is configured to point to the
+ * new command queue.
+ *
+ * @queue_id: Hardware command queue to initialize.
+ * @max_depth: Maximum outstanding commands that can be queued.
+ * @fpa_pool: FPA pool the command queues should come from.
+ * @pool_size: Size of each buffer in the FPA pool (bytes)
+ *
+ * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
+ */
+cvmx_cmd_queue_result_t cvmx_cmd_queue_initialize(cvmx_cmd_queue_id_t queue_id,
+ int max_depth, int fpa_pool,
+ int pool_size);
+
+/**
+ * Shutdown a queue a free it's command buffers to the FPA. The
+ * hardware connected to the queue must be stopped before this
+ * function is called.
+ *
+ * @queue_id: Queue to shutdown
+ *
+ * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
+ */
+cvmx_cmd_queue_result_t cvmx_cmd_queue_shutdown(cvmx_cmd_queue_id_t queue_id);
+
+/**
+ * Return the number of command words pending in the queue. This
+ * function may be relatively slow for some hardware units.
+ *
+ * @queue_id: Hardware command queue to query
+ *
+ * Returns Number of outstanding commands
+ */
+int cvmx_cmd_queue_length(cvmx_cmd_queue_id_t queue_id);
+
+/**
+ * Return the command buffer to be written to. The purpose of this
+ * function is to allow CVMX routine access t othe low level buffer
+ * for initial hardware setup. User applications should not call this
+ * function directly.
+ *
+ * @queue_id: Command queue to query
+ *
+ * Returns Command buffer or NULL on failure
+ */
+void *cvmx_cmd_queue_buffer(cvmx_cmd_queue_id_t queue_id);
+
+/**
+ * Get the index into the state arrays for the supplied queue id.
+ *
+ * @queue_id: Queue ID to get an index for
+ *
+ * Returns Index into the state arrays
+ */
+static inline int __cvmx_cmd_queue_get_index(cvmx_cmd_queue_id_t queue_id)
+{
+ /*
+ * Warning: This code currently only works with devices that
+ * have 256 queues or less. Devices with more than 16 queues
+ * are layed out in memory to allow cores quick access to
+ * every 16th queue. This reduces cache thrashing when you are
+ * running 16 queues per port to support lockless operation.
+ */
+ int unit = queue_id >> 16;
+ int q = (queue_id >> 4) & 0xf;
+ int core = queue_id & 0xf;
+ return unit * 256 + core * 16 + q;
+}
+
+/**
+ * Lock the supplied queue so nobody else is updating it at the same
+ * time as us.
+ *
+ * @queue_id: Queue ID to lock
+ * @qptr: Pointer to the queue's global state
+ */
+static inline void __cvmx_cmd_queue_lock(cvmx_cmd_queue_id_t queue_id,
+ __cvmx_cmd_queue_state_t *qptr)
+{
+ extern __cvmx_cmd_queue_all_state_t
+ *__cvmx_cmd_queue_state_ptr;
+ int tmp;
+ int my_ticket;
+ prefetch(qptr);
+ asm volatile (
+ ".set push\n"
+ ".set noreorder\n"
+ "1:\n"
+ /* Atomic add one to ticket_ptr */
+ "ll %[my_ticket], %[ticket_ptr]\n"
+ /* and store the original value */
+ "li %[ticket], 1\n"
+ /* in my_ticket */
+ "baddu %[ticket], %[my_ticket]\n"
+ "sc %[ticket], %[ticket_ptr]\n"
+ "beqz %[ticket], 1b\n"
+ " nop\n"
+ /* Load the current now_serving ticket */
+ "lbu %[ticket], %[now_serving]\n"
+ "2:\n"
+ /* Jump out if now_serving == my_ticket */
+ "beq %[ticket], %[my_ticket], 4f\n"
+ /* Find out how many tickets are in front of me */
+ " subu %[ticket], %[my_ticket], %[ticket]\n"
+ /* Use tickets in front of me minus one to delay */
+ "subu %[ticket], 1\n"
+ /* Delay will be ((tickets in front)-1)*32 loops */
+ "cins %[ticket], %[ticket], 5, 7\n"
+ "3:\n"
+ /* Loop here until our ticket might be up */
+ "bnez %[ticket], 3b\n"
+ " subu %[ticket], 1\n"
+ /* Jump back up to check out ticket again */
+ "b 2b\n"
+ /* Load the current now_serving ticket */
+ " lbu %[ticket], %[now_serving]\n"
+ "4:\n"
+ ".set pop\n" :
+ [ticket_ptr] "=m"(__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]),
+ [now_serving] "=m"(qptr->now_serving), [ticket] "=r"(tmp),
+ [my_ticket] "=r"(my_ticket)
+ );
+}
+
+/**
+ * Unlock the queue, flushing all writes.
+ *
+ * @qptr: Queue to unlock
+ */
+static inline void __cvmx_cmd_queue_unlock(__cvmx_cmd_queue_state_t *qptr)
+{
+ qptr->now_serving++;
+ CVMX_SYNCWS;
+}
+
+/**
+ * Get the queue state structure for the given queue id
+ *
+ * @queue_id: Queue id to get
+ *
+ * Returns Queue structure or NULL on failure
+ */
+static inline __cvmx_cmd_queue_state_t
+ *__cvmx_cmd_queue_get_state(cvmx_cmd_queue_id_t queue_id)
+{
+ extern __cvmx_cmd_queue_all_state_t
+ *__cvmx_cmd_queue_state_ptr;
+ return &__cvmx_cmd_queue_state_ptr->
+ state[__cvmx_cmd_queue_get_index(queue_id)];
+}
+
+/**
+ * Write an arbitrary number of command words to a command queue.
+ * This is a generic function; the fixed number of comamnd word
+ * functions yield higher performance.
+ *
+ * @queue_id: Hardware command queue to write to
+ * @use_locking:
+ * Use internal locking to ensure exclusive access for queue
+ * updates. If you don't use this locking you must ensure
+ * exclusivity some other way. Locking is strongly recommended.
+ * @cmd_count: Number of command words to write
+ * @cmds: Array of comamnds to write
+ *
+ * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
+ */
+static inline cvmx_cmd_queue_result_t cvmx_cmd_queue_write(cvmx_cmd_queue_id_t
+ queue_id,
+ int use_locking,
+ int cmd_count,
+ uint64_t *cmds)
+{
+ __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
+
+ /* Make sure nobody else is updating the same queue */
+ if (likely(use_locking))
+ __cvmx_cmd_queue_lock(queue_id, qptr);
+
+ /*
+ * If a max queue length was specified then make sure we don't
+ * exceed it. If any part of the command would be below the
+ * limit we allow it.
+ */
+ if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH && unlikely(qptr->max_depth)) {
+ if (unlikely
+ (cvmx_cmd_queue_length(queue_id) > (int)qptr->max_depth)) {
+ if (likely(use_locking))
+ __cvmx_cmd_queue_unlock(qptr);
+ return CVMX_CMD_QUEUE_FULL;
+ }
+ }
+
+ /*
+ * Normally there is plenty of room in the current buffer for
+ * the command.
+ */
+ if (likely(qptr->index + cmd_count < qptr->pool_size_m1)) {
+ uint64_t *ptr =
+ (uint64_t *) cvmx_phys_to_ptr((uint64_t) qptr->
+ base_ptr_div128 << 7);
+ ptr += qptr->index;
+ qptr->index += cmd_count;
+ while (cmd_count--)
+ *ptr++ = *cmds++;
+ } else {
+ uint64_t *ptr;
+ int count;
+ /*
+ * We need a new comamnd buffer. Fail if there isn't
+ * one available.
+ */
+ uint64_t *new_buffer =
+ (uint64_t *) cvmx_fpa_alloc(qptr->fpa_pool);
+ if (unlikely(new_buffer == NULL)) {
+ if (likely(use_locking))
+ __cvmx_cmd_queue_unlock(qptr);
+ return CVMX_CMD_QUEUE_NO_MEMORY;
+ }
+ ptr =
+ (uint64_t *) cvmx_phys_to_ptr((uint64_t) qptr->
+ base_ptr_div128 << 7);
+ /*
+ * Figure out how many command words will fit in this
+ * buffer. One location will be needed for the next
+ * buffer pointer.
+ */
+ count = qptr->pool_size_m1 - qptr->index;
+ ptr += qptr->index;
+ cmd_count -= count;
+ while (count--)
+ *ptr++ = *cmds++;
+ *ptr = cvmx_ptr_to_phys(new_buffer);
+ /*
+ * The current buffer is full and has a link to the
+ * next buffer. Time to write the rest of the commands
+ * into the new buffer.
+ */
+ qptr->base_ptr_div128 = *ptr >> 7;
+ qptr->index = cmd_count;
+ ptr = new_buffer;
+ while (cmd_count--)
+ *ptr++ = *cmds++;
+ }
+
+ /* All updates are complete. Release the lock and return */
+ if (likely(use_locking))
+ __cvmx_cmd_queue_unlock(qptr);
+ return CVMX_CMD_QUEUE_SUCCESS;
+}
+
+/**
+ * Simple function to write two command words to a command
+ * queue.
+ *
+ * @queue_id: Hardware command queue to write to
+ * @use_locking:
+ * Use internal locking to ensure exclusive access for queue
+ * updates. If you don't use this locking you must ensure
+ * exclusivity some other way. Locking is strongly recommended.
+ * @cmd1: Command
+ * @cmd2: Command
+ *
+ * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
+ */
+static inline cvmx_cmd_queue_result_t cvmx_cmd_queue_write2(cvmx_cmd_queue_id_t
+ queue_id,
+ int use_locking,
+ uint64_t cmd1,
+ uint64_t cmd2)
+{
+ __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
+
+ /* Make sure nobody else is updating the same queue */
+ if (likely(use_locking))
+ __cvmx_cmd_queue_lock(queue_id, qptr);
+
+ /*
+ * If a max queue length was specified then make sure we don't
+ * exceed it. If any part of the command would be below the
+ * limit we allow it.
+ */
+ if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH && unlikely(qptr->max_depth)) {
+ if (unlikely
+ (cvmx_cmd_queue_length(queue_id) > (int)qptr->max_depth)) {
+ if (likely(use_locking))
+ __cvmx_cmd_queue_unlock(qptr);
+ return CVMX_CMD_QUEUE_FULL;
+ }
+ }
+
+ /*
+ * Normally there is plenty of room in the current buffer for
+ * the command.
+ */
+ if (likely(qptr->index + 2 < qptr->pool_size_m1)) {
+ uint64_t *ptr =
+ (uint64_t *) cvmx_phys_to_ptr((uint64_t) qptr->
+ base_ptr_div128 << 7);
+ ptr += qptr->index;
+ qptr->index += 2;
+ ptr[0] = cmd1;
+ ptr[1] = cmd2;
+ } else {
+ uint64_t *ptr;
+ /*
+ * Figure out how many command words will fit in this
+ * buffer. One location will be needed for the next
+ * buffer pointer.
+ */
+ int count = qptr->pool_size_m1 - qptr->index;
+ /*
+ * We need a new comamnd buffer. Fail if there isn't
+ * one available.
+ */
+ uint64_t *new_buffer =
+ (uint64_t *) cvmx_fpa_alloc(qptr->fpa_pool);
+ if (unlikely(new_buffer == NULL)) {
+ if (likely(use_locking))
+ __cvmx_cmd_queue_unlock(qptr);
+ return CVMX_CMD_QUEUE_NO_MEMORY;
+ }
+ count--;
+ ptr =
+ (uint64_t *) cvmx_phys_to_ptr((uint64_t) qptr->
+ base_ptr_div128 << 7);
+ ptr += qptr->index;
+ *ptr++ = cmd1;
+ if (likely(count))
+ *ptr++ = cmd2;
+ *ptr = cvmx_ptr_to_phys(new_buffer);
+ /*
+ * The current buffer is full and has a link to the
+ * next buffer. Time to write the rest of the commands
+ * into the new buffer.
+ */
+ qptr->base_ptr_div128 = *ptr >> 7;
+ qptr->index = 0;
+ if (unlikely(count == 0)) {
+ qptr->index = 1;
+ new_buffer[0] = cmd2;
+ }
+ }
+
+ /* All updates are complete. Release the lock and return */
+ if (likely(use_locking))
+ __cvmx_cmd_queue_unlock(qptr);
+ return CVMX_CMD_QUEUE_SUCCESS;
+}
+
+/**
+ * Simple function to write three command words to a command
+ * queue.
+ *
+ * @queue_id: Hardware command queue to write to
+ * @use_locking:
+ * Use internal locking to ensure exclusive access for queue
+ * updates. If you don't use this locking you must ensure
+ * exclusivity some other way. Locking is strongly recommended.
+ * @cmd1: Command
+ * @cmd2: Command
+ * @cmd3: Command
+ *
+ * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
+ */
+static inline cvmx_cmd_queue_result_t cvmx_cmd_queue_write3(cvmx_cmd_queue_id_t
+ queue_id,
+ int use_locking,
+ uint64_t cmd1,
+ uint64_t cmd2,
+ uint64_t cmd3)
+{
+ __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
+
+ /* Make sure nobody else is updating the same queue */
+ if (likely(use_locking))
+ __cvmx_cmd_queue_lock(queue_id, qptr);
+
+ /*
+ * If a max queue length was specified then make sure we don't
+ * exceed it. If any part of the command would be below the
+ * limit we allow it.
+ */
+ if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH && unlikely(qptr->max_depth)) {
+ if (unlikely
+ (cvmx_cmd_queue_length(queue_id) > (int)qptr->max_depth)) {
+ if (likely(use_locking))
+ __cvmx_cmd_queue_unlock(qptr);
+ return CVMX_CMD_QUEUE_FULL;
+ }
+ }
+
+ /*
+ * Normally there is plenty of room in the current buffer for
+ * the command.
+ */
+ if (likely(qptr->index + 3 < qptr->pool_size_m1)) {
+ uint64_t *ptr =
+ (uint64_t *) cvmx_phys_to_ptr((uint64_t) qptr->
+ base_ptr_div128 << 7);
+ ptr += qptr->index;
+ qptr->index += 3;
+ ptr[0] = cmd1;
+ ptr[1] = cmd2;
+ ptr[2] = cmd3;
+ } else {
+ uint64_t *ptr;
+ /*
+ * Figure out how many command words will fit in this
+ * buffer. One location will be needed for the next
+ * buffer pointer
+ */
+ int count = qptr->pool_size_m1 - qptr->index;
+ /*
+ * We need a new comamnd buffer. Fail if there isn't
+ * one available
+ */
+ uint64_t *new_buffer =
+ (uint64_t *) cvmx_fpa_alloc(qptr->fpa_pool);
+ if (unlikely(new_buffer == NULL)) {
+ if (likely(use_locking))
+ __cvmx_cmd_queue_unlock(qptr);
+ return CVMX_CMD_QUEUE_NO_MEMORY;
+ }
+ count--;
+ ptr =
+ (uint64_t *) cvmx_phys_to_ptr((uint64_t) qptr->
+ base_ptr_div128 << 7);
+ ptr += qptr->index;
+ *ptr++ = cmd1;
+ if (count) {
+ *ptr++ = cmd2;
+ if (count > 1)
+ *ptr++ = cmd3;
+ }
+ *ptr = cvmx_ptr_to_phys(new_buffer);
+ /*
+ * The current buffer is full and has a link to the
+ * next buffer. Time to write the rest of the commands
+ * into the new buffer.
+ */
+ qptr->base_ptr_div128 = *ptr >> 7;
+ qptr->index = 0;
+ ptr = new_buffer;
+ if (count == 0) {
+ *ptr++ = cmd2;
+ qptr->index++;
+ }
+ if (count < 2) {
+ *ptr++ = cmd3;
+ qptr->index++;
+ }
+ }
+
+ /* All updates are complete. Release the lock and return */
+ if (likely(use_locking))
+ __cvmx_cmd_queue_unlock(qptr);
+ return CVMX_CMD_QUEUE_SUCCESS;
+}
+
+#endif /* __CVMX_CMD_QUEUE_H__ */
diff --git a/drivers/staging/octeon/cvmx-config.h b/drivers/staging/octeon/cvmx-config.h
new file mode 100644
index 000000000000..078a520481cf
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-config.h
@@ -0,0 +1,169 @@
+#ifndef __CVMX_CONFIG_H__
+#define __CVMX_CONFIG_H__
+
+/************************* Config Specific Defines ************************/
+#define CVMX_LLM_NUM_PORTS 1
+#define CVMX_NULL_POINTER_PROTECT 1
+#define CVMX_ENABLE_DEBUG_PRINTS 1
+/* PKO queues per port for interface 0 (ports 0-15) */
+#define CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 1
+/* PKO queues per port for interface 1 (ports 16-31) */
+#define CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 1
+/* Limit on the number of PKO ports enabled for interface 0 */
+#define CVMX_PKO_MAX_PORTS_INTERFACE0 CVMX_HELPER_PKO_MAX_PORTS_INTERFACE0
+/* Limit on the number of PKO ports enabled for interface 1 */
+#define CVMX_PKO_MAX_PORTS_INTERFACE1 CVMX_HELPER_PKO_MAX_PORTS_INTERFACE1
+/* PKO queues per port for PCI (ports 32-35) */
+#define CVMX_PKO_QUEUES_PER_PORT_PCI 1
+/* PKO queues per port for Loop devices (ports 36-39) */
+#define CVMX_PKO_QUEUES_PER_PORT_LOOP 1
+
+/************************* FPA allocation *********************************/
+/* Pool sizes in bytes, must be multiple of a cache line */
+#define CVMX_FPA_POOL_0_SIZE (16 * CVMX_CACHE_LINE_SIZE)
+#define CVMX_FPA_POOL_1_SIZE (1 * CVMX_CACHE_LINE_SIZE)
+#define CVMX_FPA_POOL_2_SIZE (8 * CVMX_CACHE_LINE_SIZE)
+#define CVMX_FPA_POOL_3_SIZE (0 * CVMX_CACHE_LINE_SIZE)
+#define CVMX_FPA_POOL_4_SIZE (0 * CVMX_CACHE_LINE_SIZE)
+#define CVMX_FPA_POOL_5_SIZE (0 * CVMX_CACHE_LINE_SIZE)
+#define CVMX_FPA_POOL_6_SIZE (0 * CVMX_CACHE_LINE_SIZE)
+#define CVMX_FPA_POOL_7_SIZE (0 * CVMX_CACHE_LINE_SIZE)
+
+/* Pools in use */
+/* Packet buffers */
+#define CVMX_FPA_PACKET_POOL (0)
+#define CVMX_FPA_PACKET_POOL_SIZE CVMX_FPA_POOL_0_SIZE
+/* Work queue entrys */
+#define CVMX_FPA_WQE_POOL (1)
+#define CVMX_FPA_WQE_POOL_SIZE CVMX_FPA_POOL_1_SIZE
+/* PKO queue command buffers */
+#define CVMX_FPA_OUTPUT_BUFFER_POOL (2)
+#define CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE CVMX_FPA_POOL_2_SIZE
+
+/************************* FAU allocation ********************************/
+/* The fetch and add registers are allocated here. They are arranged
+ * in order of descending size so that all alignment constraints are
+ * automatically met. The enums are linked so that the following enum
+ * continues allocating where the previous one left off, so the
+ * numbering within each enum always starts with zero. The macros
+ * take care of the address increment size, so the values entered
+ * always increase by 1. FAU registers are accessed with byte
+ * addresses.
+ */
+
+#define CVMX_FAU_REG_64_ADDR(x) ((x << 3) + CVMX_FAU_REG_64_START)
+typedef enum {
+ CVMX_FAU_REG_64_START = 0,
+ CVMX_FAU_REG_64_END = CVMX_FAU_REG_64_ADDR(0),
+} cvmx_fau_reg_64_t;
+
+#define CVMX_FAU_REG_32_ADDR(x) ((x << 2) + CVMX_FAU_REG_32_START)
+typedef enum {
+ CVMX_FAU_REG_32_START = CVMX_FAU_REG_64_END,
+ CVMX_FAU_REG_32_END = CVMX_FAU_REG_32_ADDR(0),
+} cvmx_fau_reg_32_t;
+
+#define CVMX_FAU_REG_16_ADDR(x) ((x << 1) + CVMX_FAU_REG_16_START)
+typedef enum {
+ CVMX_FAU_REG_16_START = CVMX_FAU_REG_32_END,
+ CVMX_FAU_REG_16_END = CVMX_FAU_REG_16_ADDR(0),
+} cvmx_fau_reg_16_t;
+
+#define CVMX_FAU_REG_8_ADDR(x) ((x) + CVMX_FAU_REG_8_START)
+typedef enum {
+ CVMX_FAU_REG_8_START = CVMX_FAU_REG_16_END,
+ CVMX_FAU_REG_8_END = CVMX_FAU_REG_8_ADDR(0),
+} cvmx_fau_reg_8_t;
+
+/*
+ * The name CVMX_FAU_REG_AVAIL_BASE is provided to indicate the first
+ * available FAU address that is not allocated in cvmx-config.h. This
+ * is 64 bit aligned.
+ */
+#define CVMX_FAU_REG_AVAIL_BASE ((CVMX_FAU_REG_8_END + 0x7) & (~0x7ULL))
+#define CVMX_FAU_REG_END (2048)
+
+/********************** scratch memory allocation *************************/
+/* Scratchpad memory allocation. Note that these are byte memory
+ * addresses. Some uses of scratchpad (IOBDMA for example) require
+ * the use of 8-byte aligned addresses, so proper alignment needs to
+ * be taken into account.
+ */
+/* Generic scratch iobdma area */
+#define CVMX_SCR_SCRATCH (0)
+/* First location available after cvmx-config.h allocated region. */
+#define CVMX_SCR_REG_AVAIL_BASE (8)
+
+/*
+ * CVMX_HELPER_FIRST_MBUFF_SKIP is the number of bytes to reserve
+ * before the beginning of the packet. If necessary, override the
+ * default here. See the IPD section of the hardware manual for MBUFF
+ * SKIP details.
+ */
+#define CVMX_HELPER_FIRST_MBUFF_SKIP 184
+
+/*
+ * CVMX_HELPER_NOT_FIRST_MBUFF_SKIP is the number of bytes to reserve
+ * in each chained packet element. If necessary, override the default
+ * here.
+ */
+#define CVMX_HELPER_NOT_FIRST_MBUFF_SKIP 0
+
+/*
+ * CVMX_HELPER_ENABLE_BACK_PRESSURE controls whether back pressure is
+ * enabled for all input ports. This controls if IPD sends
+ * backpressure to all ports if Octeon's FPA pools don't have enough
+ * packet or work queue entries. Even when this is off, it is still
+ * possible to get backpressure from individual hardware ports. When
+ * configuring backpressure, also check
+ * CVMX_HELPER_DISABLE_*_BACKPRESSURE below. If necessary, override
+ * the default here.
+ */
+#define CVMX_HELPER_ENABLE_BACK_PRESSURE 1
+
+/*
+ * CVMX_HELPER_ENABLE_IPD controls if the IPD is enabled in the helper
+ * function. Once it is enabled the hardware starts accepting
+ * packets. You might want to skip the IPD enable if configuration
+ * changes are need from the default helper setup. If necessary,
+ * override the default here.
+ */
+#define CVMX_HELPER_ENABLE_IPD 0
+
+/*
+ * CVMX_HELPER_INPUT_TAG_TYPE selects the type of tag that the IPD assigns
+ * to incoming packets.
+ */
+#define CVMX_HELPER_INPUT_TAG_TYPE CVMX_POW_TAG_TYPE_ORDERED
+
+#define CVMX_ENABLE_PARAMETER_CHECKING 0
+
+/*
+ * The following select which fields are used by the PIP to generate
+ * the tag on INPUT
+ * 0: don't include
+ * 1: include
+ */
+#define CVMX_HELPER_INPUT_TAG_IPV6_SRC_IP 0
+#define CVMX_HELPER_INPUT_TAG_IPV6_DST_IP 0
+#define CVMX_HELPER_INPUT_TAG_IPV6_SRC_PORT 0
+#define CVMX_HELPER_INPUT_TAG_IPV6_DST_PORT 0
+#define CVMX_HELPER_INPUT_TAG_IPV6_NEXT_HEADER 0
+#define CVMX_HELPER_INPUT_TAG_IPV4_SRC_IP 0
+#define CVMX_HELPER_INPUT_TAG_IPV4_DST_IP 0
+#define CVMX_HELPER_INPUT_TAG_IPV4_SRC_PORT 0
+#define CVMX_HELPER_INPUT_TAG_IPV4_DST_PORT 0
+#define CVMX_HELPER_INPUT_TAG_IPV4_PROTOCOL 0
+#define CVMX_HELPER_INPUT_TAG_INPUT_PORT 1
+
+/* Select skip mode for input ports */
+#define CVMX_HELPER_INPUT_PORT_SKIP_MODE CVMX_PIP_PORT_CFG_MODE_SKIPL2
+
+/*
+ * Force backpressure to be disabled. This overrides all other
+ * backpressure configuration.
+ */
+#define CVMX_HELPER_DISABLE_RGMII_BACKPRESSURE 0
+
+#endif /* __CVMX_CONFIG_H__ */
+
diff --git a/drivers/staging/octeon/cvmx-dbg-defs.h b/drivers/staging/octeon/cvmx-dbg-defs.h
new file mode 100644
index 000000000000..abbf42d05e5a
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-dbg-defs.h
@@ -0,0 +1,72 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_DBG_DEFS_H__
+#define __CVMX_DBG_DEFS_H__
+
+#define CVMX_DBG_DATA \
+ CVMX_ADD_IO_SEG(0x00011F00000001E8ull)
+
+union cvmx_dbg_data {
+ uint64_t u64;
+ struct cvmx_dbg_data_s {
+ uint64_t reserved_23_63:41;
+ uint64_t c_mul:5;
+ uint64_t dsel_ext:1;
+ uint64_t data:17;
+ } s;
+ struct cvmx_dbg_data_cn30xx {
+ uint64_t reserved_31_63:33;
+ uint64_t pll_mul:3;
+ uint64_t reserved_23_27:5;
+ uint64_t c_mul:5;
+ uint64_t dsel_ext:1;
+ uint64_t data:17;
+ } cn30xx;
+ struct cvmx_dbg_data_cn30xx cn31xx;
+ struct cvmx_dbg_data_cn38xx {
+ uint64_t reserved_29_63:35;
+ uint64_t d_mul:4;
+ uint64_t dclk_mul2:1;
+ uint64_t cclk_div2:1;
+ uint64_t c_mul:5;
+ uint64_t dsel_ext:1;
+ uint64_t data:17;
+ } cn38xx;
+ struct cvmx_dbg_data_cn38xx cn38xxp2;
+ struct cvmx_dbg_data_cn30xx cn50xx;
+ struct cvmx_dbg_data_cn58xx {
+ uint64_t reserved_29_63:35;
+ uint64_t rem:6;
+ uint64_t c_mul:5;
+ uint64_t dsel_ext:1;
+ uint64_t data:17;
+ } cn58xx;
+ struct cvmx_dbg_data_cn58xx cn58xxp1;
+};
+
+#endif
diff --git a/drivers/staging/octeon/cvmx-fau.h b/drivers/staging/octeon/cvmx-fau.h
new file mode 100644
index 000000000000..29bdce66cdf8
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-fau.h
@@ -0,0 +1,597 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * Interface to the hardware Fetch and Add Unit.
+ */
+
+#ifndef __CVMX_FAU_H__
+#define __CVMX_FAU_H__
+
+/*
+ * Octeon Fetch and Add Unit (FAU)
+ */
+
+#define CVMX_FAU_LOAD_IO_ADDRESS cvmx_build_io_address(0x1e, 0)
+#define CVMX_FAU_BITS_SCRADDR 63, 56
+#define CVMX_FAU_BITS_LEN 55, 48
+#define CVMX_FAU_BITS_INEVAL 35, 14
+#define CVMX_FAU_BITS_TAGWAIT 13, 13
+#define CVMX_FAU_BITS_NOADD 13, 13
+#define CVMX_FAU_BITS_SIZE 12, 11
+#define CVMX_FAU_BITS_REGISTER 10, 0
+
+typedef enum {
+ CVMX_FAU_OP_SIZE_8 = 0,
+ CVMX_FAU_OP_SIZE_16 = 1,
+ CVMX_FAU_OP_SIZE_32 = 2,
+ CVMX_FAU_OP_SIZE_64 = 3
+} cvmx_fau_op_size_t;
+
+/**
+ * Tagwait return definition. If a timeout occurs, the error
+ * bit will be set. Otherwise the value of the register before
+ * the update will be returned.
+ */
+typedef struct {
+ uint64_t error:1;
+ int64_t value:63;
+} cvmx_fau_tagwait64_t;
+
+/**
+ * Tagwait return definition. If a timeout occurs, the error
+ * bit will be set. Otherwise the value of the register before
+ * the update will be returned.
+ */
+typedef struct {
+ uint64_t error:1;
+ int32_t value:31;
+} cvmx_fau_tagwait32_t;
+
+/**
+ * Tagwait return definition. If a timeout occurs, the error
+ * bit will be set. Otherwise the value of the register before
+ * the update will be returned.
+ */
+typedef struct {
+ uint64_t error:1;
+ int16_t value:15;
+} cvmx_fau_tagwait16_t;
+
+/**
+ * Tagwait return definition. If a timeout occurs, the error
+ * bit will be set. Otherwise the value of the register before
+ * the update will be returned.
+ */
+typedef struct {
+ uint64_t error:1;
+ int8_t value:7;
+} cvmx_fau_tagwait8_t;
+
+/**
+ * Asynchronous tagwait return definition. If a timeout occurs,
+ * the error bit will be set. Otherwise the value of the
+ * register before the update will be returned.
+ */
+typedef union {
+ uint64_t u64;
+ struct {
+ uint64_t invalid:1;
+ uint64_t data:63; /* unpredictable if invalid is set */
+ } s;
+} cvmx_fau_async_tagwait_result_t;
+
+/**
+ * Builds a store I/O address for writing to the FAU
+ *
+ * @noadd: 0 = Store value is atomically added to the current value
+ * 1 = Store value is atomically written over the current value
+ * @reg: FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 2 for 16 bit access.
+ * - Step by 4 for 32 bit access.
+ * - Step by 8 for 64 bit access.
+ * Returns Address to store for atomic update
+ */
+static inline uint64_t __cvmx_fau_store_address(uint64_t noadd, uint64_t reg)
+{
+ return CVMX_ADD_IO_SEG(CVMX_FAU_LOAD_IO_ADDRESS) |
+ cvmx_build_bits(CVMX_FAU_BITS_NOADD, noadd) |
+ cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg);
+}
+
+/**
+ * Builds a I/O address for accessing the FAU
+ *
+ * @tagwait: Should the atomic add wait for the current tag switch
+ * operation to complete.
+ * - 0 = Don't wait
+ * - 1 = Wait for tag switch to complete
+ * @reg: FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 2 for 16 bit access.
+ * - Step by 4 for 32 bit access.
+ * - Step by 8 for 64 bit access.
+ * @value: Signed value to add.
+ * Note: When performing 32 and 64 bit access, only the low
+ * 22 bits are available.
+ * Returns Address to read from for atomic update
+ */
+static inline uint64_t __cvmx_fau_atomic_address(uint64_t tagwait, uint64_t reg,
+ int64_t value)
+{
+ return CVMX_ADD_IO_SEG(CVMX_FAU_LOAD_IO_ADDRESS) |
+ cvmx_build_bits(CVMX_FAU_BITS_INEVAL, value) |
+ cvmx_build_bits(CVMX_FAU_BITS_TAGWAIT, tagwait) |
+ cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg);
+}
+
+/**
+ * Perform an atomic 64 bit add
+ *
+ * @reg: FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 8 for 64 bit access.
+ * @value: Signed value to add.
+ * Note: Only the low 22 bits are available.
+ * Returns Value of the register before the update
+ */
+static inline int64_t cvmx_fau_fetch_and_add64(cvmx_fau_reg_64_t reg,
+ int64_t value)
+{
+ return cvmx_read64_int64(__cvmx_fau_atomic_address(0, reg, value));
+}
+
+/**
+ * Perform an atomic 32 bit add
+ *
+ * @reg: FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 4 for 32 bit access.
+ * @value: Signed value to add.
+ * Note: Only the low 22 bits are available.
+ * Returns Value of the register before the update
+ */
+static inline int32_t cvmx_fau_fetch_and_add32(cvmx_fau_reg_32_t reg,
+ int32_t value)
+{
+ return cvmx_read64_int32(__cvmx_fau_atomic_address(0, reg, value));
+}
+
+/**
+ * Perform an atomic 16 bit add
+ *
+ * @reg: FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 2 for 16 bit access.
+ * @value: Signed value to add.
+ * Returns Value of the register before the update
+ */
+static inline int16_t cvmx_fau_fetch_and_add16(cvmx_fau_reg_16_t reg,
+ int16_t value)
+{
+ return cvmx_read64_int16(__cvmx_fau_atomic_address(0, reg, value));
+}
+
+/**
+ * Perform an atomic 8 bit add
+ *
+ * @reg: FAU atomic register to access. 0 <= reg < 2048.
+ * @value: Signed value to add.
+ * Returns Value of the register before the update
+ */
+static inline int8_t cvmx_fau_fetch_and_add8(cvmx_fau_reg_8_t reg, int8_t value)
+{
+ return cvmx_read64_int8(__cvmx_fau_atomic_address(0, reg, value));
+}
+
+/**
+ * Perform an atomic 64 bit add after the current tag switch
+ * completes
+ *
+ * @reg: FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 8 for 64 bit access.
+ * @value: Signed value to add.
+ * Note: Only the low 22 bits are available.
+ * Returns If a timeout occurs, the error bit will be set. Otherwise
+ * the value of the register before the update will be
+ * returned
+ */
+static inline cvmx_fau_tagwait64_t
+cvmx_fau_tagwait_fetch_and_add64(cvmx_fau_reg_64_t reg, int64_t value)
+{
+ union {
+ uint64_t i64;
+ cvmx_fau_tagwait64_t t;
+ } result;
+ result.i64 =
+ cvmx_read64_int64(__cvmx_fau_atomic_address(1, reg, value));
+ return result.t;
+}
+
+/**
+ * Perform an atomic 32 bit add after the current tag switch
+ * completes
+ *
+ * @reg: FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 4 for 32 bit access.
+ * @value: Signed value to add.
+ * Note: Only the low 22 bits are available.
+ * Returns If a timeout occurs, the error bit will be set. Otherwise
+ * the value of the register before the update will be
+ * returned
+ */
+static inline cvmx_fau_tagwait32_t
+cvmx_fau_tagwait_fetch_and_add32(cvmx_fau_reg_32_t reg, int32_t value)
+{
+ union {
+ uint64_t i32;
+ cvmx_fau_tagwait32_t t;
+ } result;
+ result.i32 =
+ cvmx_read64_int32(__cvmx_fau_atomic_address(1, reg, value));
+ return result.t;
+}
+
+/**
+ * Perform an atomic 16 bit add after the current tag switch
+ * completes
+ *
+ * @reg: FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 2 for 16 bit access.
+ * @value: Signed value to add.
+ * Returns If a timeout occurs, the error bit will be set. Otherwise
+ * the value of the register before the update will be
+ * returned
+ */
+static inline cvmx_fau_tagwait16_t
+cvmx_fau_tagwait_fetch_and_add16(cvmx_fau_reg_16_t reg, int16_t value)
+{
+ union {
+ uint64_t i16;
+ cvmx_fau_tagwait16_t t;
+ } result;
+ result.i16 =
+ cvmx_read64_int16(__cvmx_fau_atomic_address(1, reg, value));
+ return result.t;
+}
+
+/**
+ * Perform an atomic 8 bit add after the current tag switch
+ * completes
+ *
+ * @reg: FAU atomic register to access. 0 <= reg < 2048.
+ * @value: Signed value to add.
+ * Returns If a timeout occurs, the error bit will be set. Otherwise
+ * the value of the register before the update will be
+ * returned
+ */
+static inline cvmx_fau_tagwait8_t
+cvmx_fau_tagwait_fetch_and_add8(cvmx_fau_reg_8_t reg, int8_t value)
+{
+ union {
+ uint64_t i8;
+ cvmx_fau_tagwait8_t t;
+ } result;
+ result.i8 = cvmx_read64_int8(__cvmx_fau_atomic_address(1, reg, value));
+ return result.t;
+}
+
+/**
+ * Builds I/O data for async operations
+ *
+ * @scraddr: Scratch pad byte addres to write to. Must be 8 byte aligned
+ * @value: Signed value to add.
+ * Note: When performing 32 and 64 bit access, only the low
+ * 22 bits are available.
+ * @tagwait: Should the atomic add wait for the current tag switch
+ * operation to complete.
+ * - 0 = Don't wait
+ * - 1 = Wait for tag switch to complete
+ * @size: The size of the operation:
+ * - CVMX_FAU_OP_SIZE_8 (0) = 8 bits
+ * - CVMX_FAU_OP_SIZE_16 (1) = 16 bits
+ * - CVMX_FAU_OP_SIZE_32 (2) = 32 bits
+ * - CVMX_FAU_OP_SIZE_64 (3) = 64 bits
+ * @reg: FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 2 for 16 bit access.
+ * - Step by 4 for 32 bit access.
+ * - Step by 8 for 64 bit access.
+ * Returns Data to write using cvmx_send_single
+ */
+static inline uint64_t __cvmx_fau_iobdma_data(uint64_t scraddr, int64_t value,
+ uint64_t tagwait,
+ cvmx_fau_op_size_t size,
+ uint64_t reg)
+{
+ return CVMX_FAU_LOAD_IO_ADDRESS |
+ cvmx_build_bits(CVMX_FAU_BITS_SCRADDR, scraddr >> 3) |
+ cvmx_build_bits(CVMX_FAU_BITS_LEN, 1) |
+ cvmx_build_bits(CVMX_FAU_BITS_INEVAL, value) |
+ cvmx_build_bits(CVMX_FAU_BITS_TAGWAIT, tagwait) |
+ cvmx_build_bits(CVMX_FAU_BITS_SIZE, size) |
+ cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg);
+}
+
+/**
+ * Perform an async atomic 64 bit add. The old value is
+ * placed in the scratch memory at byte address scraddr.
+ *
+ * @scraddr: Scratch memory byte address to put response in.
+ * Must be 8 byte aligned.
+ * @reg: FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 8 for 64 bit access.
+ * @value: Signed value to add.
+ * Note: Only the low 22 bits are available.
+ * Returns Placed in the scratch pad register
+ */
+static inline void cvmx_fau_async_fetch_and_add64(uint64_t scraddr,
+ cvmx_fau_reg_64_t reg,
+ int64_t value)
+{
+ cvmx_send_single(__cvmx_fau_iobdma_data
+ (scraddr, value, 0, CVMX_FAU_OP_SIZE_64, reg));
+}
+
+/**
+ * Perform an async atomic 32 bit add. The old value is
+ * placed in the scratch memory at byte address scraddr.
+ *
+ * @scraddr: Scratch memory byte address to put response in.
+ * Must be 8 byte aligned.
+ * @reg: FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 4 for 32 bit access.
+ * @value: Signed value to add.
+ * Note: Only the low 22 bits are available.
+ * Returns Placed in the scratch pad register
+ */
+static inline void cvmx_fau_async_fetch_and_add32(uint64_t scraddr,
+ cvmx_fau_reg_32_t reg,
+ int32_t value)
+{
+ cvmx_send_single(__cvmx_fau_iobdma_data
+ (scraddr, value, 0, CVMX_FAU_OP_SIZE_32, reg));
+}
+
+/**
+ * Perform an async atomic 16 bit add. The old value is
+ * placed in the scratch memory at byte address scraddr.
+ *
+ * @scraddr: Scratch memory byte address to put response in.
+ * Must be 8 byte aligned.
+ * @reg: FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 2 for 16 bit access.
+ * @value: Signed value to add.
+ * Returns Placed in the scratch pad register
+ */
+static inline void cvmx_fau_async_fetch_and_add16(uint64_t scraddr,
+ cvmx_fau_reg_16_t reg,
+ int16_t value)
+{
+ cvmx_send_single(__cvmx_fau_iobdma_data
+ (scraddr, value, 0, CVMX_FAU_OP_SIZE_16, reg));
+}
+
+/**
+ * Perform an async atomic 8 bit add. The old value is
+ * placed in the scratch memory at byte address scraddr.
+ *
+ * @scraddr: Scratch memory byte address to put response in.
+ * Must be 8 byte aligned.
+ * @reg: FAU atomic register to access. 0 <= reg < 2048.
+ * @value: Signed value to add.
+ * Returns Placed in the scratch pad register
+ */
+static inline void cvmx_fau_async_fetch_and_add8(uint64_t scraddr,
+ cvmx_fau_reg_8_t reg,
+ int8_t value)
+{
+ cvmx_send_single(__cvmx_fau_iobdma_data
+ (scraddr, value, 0, CVMX_FAU_OP_SIZE_8, reg));
+}
+
+/**
+ * Perform an async atomic 64 bit add after the current tag
+ * switch completes.
+ *
+ * @scraddr: Scratch memory byte address to put response in. Must be
+ * 8 byte aligned. If a timeout occurs, the error bit (63)
+ * will be set. Otherwise the value of the register before
+ * the update will be returned
+ *
+ * @reg: FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 8 for 64 bit access.
+ * @value: Signed value to add.
+ * Note: Only the low 22 bits are available.
+ * Returns Placed in the scratch pad register
+ */
+static inline void cvmx_fau_async_tagwait_fetch_and_add64(uint64_t scraddr,
+ cvmx_fau_reg_64_t reg,
+ int64_t value)
+{
+ cvmx_send_single(__cvmx_fau_iobdma_data
+ (scraddr, value, 1, CVMX_FAU_OP_SIZE_64, reg));
+}
+
+/**
+ * Perform an async atomic 32 bit add after the current tag
+ * switch completes.
+ *
+ * @scraddr: Scratch memory byte address to put response in. Must be
+ * 8 byte aligned. If a timeout occurs, the error bit (63)
+ * will be set. Otherwise the value of the register before
+ * the update will be returned
+ *
+ * @reg: FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 4 for 32 bit access.
+ * @value: Signed value to add.
+ * Note: Only the low 22 bits are available.
+ * Returns Placed in the scratch pad register
+ */
+static inline void cvmx_fau_async_tagwait_fetch_and_add32(uint64_t scraddr,
+ cvmx_fau_reg_32_t reg,
+ int32_t value)
+{
+ cvmx_send_single(__cvmx_fau_iobdma_data
+ (scraddr, value, 1, CVMX_FAU_OP_SIZE_32, reg));
+}
+
+/**
+ * Perform an async atomic 16 bit add after the current tag
+ * switch completes.
+ *
+ * @scraddr: Scratch memory byte address to put response in. Must be
+ * 8 byte aligned. If a timeout occurs, the error bit (63)
+ * will be set. Otherwise the value of the register before
+ * the update will be returned
+ *
+ * @reg: FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 2 for 16 bit access.
+ * @value: Signed value to add.
+ *
+ * Returns Placed in the scratch pad register
+ */
+static inline void cvmx_fau_async_tagwait_fetch_and_add16(uint64_t scraddr,
+ cvmx_fau_reg_16_t reg,
+ int16_t value)
+{
+ cvmx_send_single(__cvmx_fau_iobdma_data
+ (scraddr, value, 1, CVMX_FAU_OP_SIZE_16, reg));
+}
+
+/**
+ * Perform an async atomic 8 bit add after the current tag
+ * switch completes.
+ *
+ * @scraddr: Scratch memory byte address to put response in. Must be
+ * 8 byte aligned. If a timeout occurs, the error bit (63)
+ * will be set. Otherwise the value of the register before
+ * the update will be returned
+ *
+ * @reg: FAU atomic register to access. 0 <= reg < 2048.
+ * @value: Signed value to add.
+ *
+ * Returns Placed in the scratch pad register
+ */
+static inline void cvmx_fau_async_tagwait_fetch_and_add8(uint64_t scraddr,
+ cvmx_fau_reg_8_t reg,
+ int8_t value)
+{
+ cvmx_send_single(__cvmx_fau_iobdma_data
+ (scraddr, value, 1, CVMX_FAU_OP_SIZE_8, reg));
+}
+
+/**
+ * Perform an atomic 64 bit add
+ *
+ * @reg: FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 8 for 64 bit access.
+ * @value: Signed value to add.
+ */
+static inline void cvmx_fau_atomic_add64(cvmx_fau_reg_64_t reg, int64_t value)
+{
+ cvmx_write64_int64(__cvmx_fau_store_address(0, reg), value);
+}
+
+/**
+ * Perform an atomic 32 bit add
+ *
+ * @reg: FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 4 for 32 bit access.
+ * @value: Signed value to add.
+ */
+static inline void cvmx_fau_atomic_add32(cvmx_fau_reg_32_t reg, int32_t value)
+{
+ cvmx_write64_int32(__cvmx_fau_store_address(0, reg), value);
+}
+
+/**
+ * Perform an atomic 16 bit add
+ *
+ * @reg: FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 2 for 16 bit access.
+ * @value: Signed value to add.
+ */
+static inline void cvmx_fau_atomic_add16(cvmx_fau_reg_16_t reg, int16_t value)
+{
+ cvmx_write64_int16(__cvmx_fau_store_address(0, reg), value);
+}
+
+/**
+ * Perform an atomic 8 bit add
+ *
+ * @reg: FAU atomic register to access. 0 <= reg < 2048.
+ * @value: Signed value to add.
+ */
+static inline void cvmx_fau_atomic_add8(cvmx_fau_reg_8_t reg, int8_t value)
+{
+ cvmx_write64_int8(__cvmx_fau_store_address(0, reg), value);
+}
+
+/**
+ * Perform an atomic 64 bit write
+ *
+ * @reg: FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 8 for 64 bit access.
+ * @value: Signed value to write.
+ */
+static inline void cvmx_fau_atomic_write64(cvmx_fau_reg_64_t reg, int64_t value)
+{
+ cvmx_write64_int64(__cvmx_fau_store_address(1, reg), value);
+}
+
+/**
+ * Perform an atomic 32 bit write
+ *
+ * @reg: FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 4 for 32 bit access.
+ * @value: Signed value to write.
+ */
+static inline void cvmx_fau_atomic_write32(cvmx_fau_reg_32_t reg, int32_t value)
+{
+ cvmx_write64_int32(__cvmx_fau_store_address(1, reg), value);
+}
+
+/**
+ * Perform an atomic 16 bit write
+ *
+ * @reg: FAU atomic register to access. 0 <= reg < 2048.
+ * - Step by 2 for 16 bit access.
+ * @value: Signed value to write.
+ */
+static inline void cvmx_fau_atomic_write16(cvmx_fau_reg_16_t reg, int16_t value)
+{
+ cvmx_write64_int16(__cvmx_fau_store_address(1, reg), value);
+}
+
+/**
+ * Perform an atomic 8 bit write
+ *
+ * @reg: FAU atomic register to access. 0 <= reg < 2048.
+ * @value: Signed value to write.
+ */
+static inline void cvmx_fau_atomic_write8(cvmx_fau_reg_8_t reg, int8_t value)
+{
+ cvmx_write64_int8(__cvmx_fau_store_address(1, reg), value);
+}
+
+#endif /* __CVMX_FAU_H__ */
diff --git a/drivers/staging/octeon/cvmx-fpa-defs.h b/drivers/staging/octeon/cvmx-fpa-defs.h
new file mode 100644
index 000000000000..bf5546b90110
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-fpa-defs.h
@@ -0,0 +1,403 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_FPA_DEFS_H__
+#define __CVMX_FPA_DEFS_H__
+
+#define CVMX_FPA_BIST_STATUS \
+ CVMX_ADD_IO_SEG(0x00011800280000E8ull)
+#define CVMX_FPA_CTL_STATUS \
+ CVMX_ADD_IO_SEG(0x0001180028000050ull)
+#define CVMX_FPA_FPF0_MARKS \
+ CVMX_ADD_IO_SEG(0x0001180028000000ull)
+#define CVMX_FPA_FPF0_SIZE \
+ CVMX_ADD_IO_SEG(0x0001180028000058ull)
+#define CVMX_FPA_FPF1_MARKS \
+ CVMX_ADD_IO_SEG(0x0001180028000008ull)
+#define CVMX_FPA_FPF2_MARKS \
+ CVMX_ADD_IO_SEG(0x0001180028000010ull)
+#define CVMX_FPA_FPF3_MARKS \
+ CVMX_ADD_IO_SEG(0x0001180028000018ull)
+#define CVMX_FPA_FPF4_MARKS \
+ CVMX_ADD_IO_SEG(0x0001180028000020ull)
+#define CVMX_FPA_FPF5_MARKS \
+ CVMX_ADD_IO_SEG(0x0001180028000028ull)
+#define CVMX_FPA_FPF6_MARKS \
+ CVMX_ADD_IO_SEG(0x0001180028000030ull)
+#define CVMX_FPA_FPF7_MARKS \
+ CVMX_ADD_IO_SEG(0x0001180028000038ull)
+#define CVMX_FPA_FPFX_MARKS(offset) \
+ CVMX_ADD_IO_SEG(0x0001180028000008ull + (((offset) & 7) * 8) - 8 * 1)
+#define CVMX_FPA_FPFX_SIZE(offset) \
+ CVMX_ADD_IO_SEG(0x0001180028000060ull + (((offset) & 7) * 8) - 8 * 1)
+#define CVMX_FPA_INT_ENB \
+ CVMX_ADD_IO_SEG(0x0001180028000048ull)
+#define CVMX_FPA_INT_SUM \
+ CVMX_ADD_IO_SEG(0x0001180028000040ull)
+#define CVMX_FPA_QUE0_PAGE_INDEX \
+ CVMX_ADD_IO_SEG(0x00011800280000F0ull)
+#define CVMX_FPA_QUE1_PAGE_INDEX \
+ CVMX_ADD_IO_SEG(0x00011800280000F8ull)
+#define CVMX_FPA_QUE2_PAGE_INDEX \
+ CVMX_ADD_IO_SEG(0x0001180028000100ull)
+#define CVMX_FPA_QUE3_PAGE_INDEX \
+ CVMX_ADD_IO_SEG(0x0001180028000108ull)
+#define CVMX_FPA_QUE4_PAGE_INDEX \
+ CVMX_ADD_IO_SEG(0x0001180028000110ull)
+#define CVMX_FPA_QUE5_PAGE_INDEX \
+ CVMX_ADD_IO_SEG(0x0001180028000118ull)
+#define CVMX_FPA_QUE6_PAGE_INDEX \
+ CVMX_ADD_IO_SEG(0x0001180028000120ull)
+#define CVMX_FPA_QUE7_PAGE_INDEX \
+ CVMX_ADD_IO_SEG(0x0001180028000128ull)
+#define CVMX_FPA_QUEX_AVAILABLE(offset) \
+ CVMX_ADD_IO_SEG(0x0001180028000098ull + (((offset) & 7) * 8))
+#define CVMX_FPA_QUEX_PAGE_INDEX(offset) \
+ CVMX_ADD_IO_SEG(0x00011800280000F0ull + (((offset) & 7) * 8))
+#define CVMX_FPA_QUE_ACT \
+ CVMX_ADD_IO_SEG(0x0001180028000138ull)
+#define CVMX_FPA_QUE_EXP \
+ CVMX_ADD_IO_SEG(0x0001180028000130ull)
+#define CVMX_FPA_WART_CTL \
+ CVMX_ADD_IO_SEG(0x00011800280000D8ull)
+#define CVMX_FPA_WART_STATUS \
+ CVMX_ADD_IO_SEG(0x00011800280000E0ull)
+
+union cvmx_fpa_bist_status {
+ uint64_t u64;
+ struct cvmx_fpa_bist_status_s {
+ uint64_t reserved_5_63:59;
+ uint64_t frd:1;
+ uint64_t fpf0:1;
+ uint64_t fpf1:1;
+ uint64_t ffr:1;
+ uint64_t fdr:1;
+ } s;
+ struct cvmx_fpa_bist_status_s cn30xx;
+ struct cvmx_fpa_bist_status_s cn31xx;
+ struct cvmx_fpa_bist_status_s cn38xx;
+ struct cvmx_fpa_bist_status_s cn38xxp2;
+ struct cvmx_fpa_bist_status_s cn50xx;
+ struct cvmx_fpa_bist_status_s cn52xx;
+ struct cvmx_fpa_bist_status_s cn52xxp1;
+ struct cvmx_fpa_bist_status_s cn56xx;
+ struct cvmx_fpa_bist_status_s cn56xxp1;
+ struct cvmx_fpa_bist_status_s cn58xx;
+ struct cvmx_fpa_bist_status_s cn58xxp1;
+};
+
+union cvmx_fpa_ctl_status {
+ uint64_t u64;
+ struct cvmx_fpa_ctl_status_s {
+ uint64_t reserved_18_63:46;
+ uint64_t reset:1;
+ uint64_t use_ldt:1;
+ uint64_t use_stt:1;
+ uint64_t enb:1;
+ uint64_t mem1_err:7;
+ uint64_t mem0_err:7;
+ } s;
+ struct cvmx_fpa_ctl_status_s cn30xx;
+ struct cvmx_fpa_ctl_status_s cn31xx;
+ struct cvmx_fpa_ctl_status_s cn38xx;
+ struct cvmx_fpa_ctl_status_s cn38xxp2;
+ struct cvmx_fpa_ctl_status_s cn50xx;
+ struct cvmx_fpa_ctl_status_s cn52xx;
+ struct cvmx_fpa_ctl_status_s cn52xxp1;
+ struct cvmx_fpa_ctl_status_s cn56xx;
+ struct cvmx_fpa_ctl_status_s cn56xxp1;
+ struct cvmx_fpa_ctl_status_s cn58xx;
+ struct cvmx_fpa_ctl_status_s cn58xxp1;
+};
+
+union cvmx_fpa_fpfx_marks {
+ uint64_t u64;
+ struct cvmx_fpa_fpfx_marks_s {
+ uint64_t reserved_22_63:42;
+ uint64_t fpf_wr:11;
+ uint64_t fpf_rd:11;
+ } s;
+ struct cvmx_fpa_fpfx_marks_s cn38xx;
+ struct cvmx_fpa_fpfx_marks_s cn38xxp2;
+ struct cvmx_fpa_fpfx_marks_s cn56xx;
+ struct cvmx_fpa_fpfx_marks_s cn56xxp1;
+ struct cvmx_fpa_fpfx_marks_s cn58xx;
+ struct cvmx_fpa_fpfx_marks_s cn58xxp1;
+};
+
+union cvmx_fpa_fpfx_size {
+ uint64_t u64;
+ struct cvmx_fpa_fpfx_size_s {
+ uint64_t reserved_11_63:53;
+ uint64_t fpf_siz:11;
+ } s;
+ struct cvmx_fpa_fpfx_size_s cn38xx;
+ struct cvmx_fpa_fpfx_size_s cn38xxp2;
+ struct cvmx_fpa_fpfx_size_s cn56xx;
+ struct cvmx_fpa_fpfx_size_s cn56xxp1;
+ struct cvmx_fpa_fpfx_size_s cn58xx;
+ struct cvmx_fpa_fpfx_size_s cn58xxp1;
+};
+
+union cvmx_fpa_fpf0_marks {
+ uint64_t u64;
+ struct cvmx_fpa_fpf0_marks_s {
+ uint64_t reserved_24_63:40;
+ uint64_t fpf_wr:12;
+ uint64_t fpf_rd:12;
+ } s;
+ struct cvmx_fpa_fpf0_marks_s cn38xx;
+ struct cvmx_fpa_fpf0_marks_s cn38xxp2;
+ struct cvmx_fpa_fpf0_marks_s cn56xx;
+ struct cvmx_fpa_fpf0_marks_s cn56xxp1;
+ struct cvmx_fpa_fpf0_marks_s cn58xx;
+ struct cvmx_fpa_fpf0_marks_s cn58xxp1;
+};
+
+union cvmx_fpa_fpf0_size {
+ uint64_t u64;
+ struct cvmx_fpa_fpf0_size_s {
+ uint64_t reserved_12_63:52;
+ uint64_t fpf_siz:12;
+ } s;
+ struct cvmx_fpa_fpf0_size_s cn38xx;
+ struct cvmx_fpa_fpf0_size_s cn38xxp2;
+ struct cvmx_fpa_fpf0_size_s cn56xx;
+ struct cvmx_fpa_fpf0_size_s cn56xxp1;
+ struct cvmx_fpa_fpf0_size_s cn58xx;
+ struct cvmx_fpa_fpf0_size_s cn58xxp1;
+};
+
+union cvmx_fpa_int_enb {
+ uint64_t u64;
+ struct cvmx_fpa_int_enb_s {
+ uint64_t reserved_28_63:36;
+ uint64_t q7_perr:1;
+ uint64_t q7_coff:1;
+ uint64_t q7_und:1;
+ uint64_t q6_perr:1;
+ uint64_t q6_coff:1;
+ uint64_t q6_und:1;
+ uint64_t q5_perr:1;
+ uint64_t q5_coff:1;
+ uint64_t q5_und:1;
+ uint64_t q4_perr:1;
+ uint64_t q4_coff:1;
+ uint64_t q4_und:1;
+ uint64_t q3_perr:1;
+ uint64_t q3_coff:1;
+ uint64_t q3_und:1;
+ uint64_t q2_perr:1;
+ uint64_t q2_coff:1;
+ uint64_t q2_und:1;
+ uint64_t q1_perr:1;
+ uint64_t q1_coff:1;
+ uint64_t q1_und:1;
+ uint64_t q0_perr:1;
+ uint64_t q0_coff:1;
+ uint64_t q0_und:1;
+ uint64_t fed1_dbe:1;
+ uint64_t fed1_sbe:1;
+ uint64_t fed0_dbe:1;
+ uint64_t fed0_sbe:1;
+ } s;
+ struct cvmx_fpa_int_enb_s cn30xx;
+ struct cvmx_fpa_int_enb_s cn31xx;
+ struct cvmx_fpa_int_enb_s cn38xx;
+ struct cvmx_fpa_int_enb_s cn38xxp2;
+ struct cvmx_fpa_int_enb_s cn50xx;
+ struct cvmx_fpa_int_enb_s cn52xx;
+ struct cvmx_fpa_int_enb_s cn52xxp1;
+ struct cvmx_fpa_int_enb_s cn56xx;
+ struct cvmx_fpa_int_enb_s cn56xxp1;
+ struct cvmx_fpa_int_enb_s cn58xx;
+ struct cvmx_fpa_int_enb_s cn58xxp1;
+};
+
+union cvmx_fpa_int_sum {
+ uint64_t u64;
+ struct cvmx_fpa_int_sum_s {
+ uint64_t reserved_28_63:36;
+ uint64_t q7_perr:1;
+ uint64_t q7_coff:1;
+ uint64_t q7_und:1;
+ uint64_t q6_perr:1;
+ uint64_t q6_coff:1;
+ uint64_t q6_und:1;
+ uint64_t q5_perr:1;
+ uint64_t q5_coff:1;
+ uint64_t q5_und:1;
+ uint64_t q4_perr:1;
+ uint64_t q4_coff:1;
+ uint64_t q4_und:1;
+ uint64_t q3_perr:1;
+ uint64_t q3_coff:1;
+ uint64_t q3_und:1;
+ uint64_t q2_perr:1;
+ uint64_t q2_coff:1;
+ uint64_t q2_und:1;
+ uint64_t q1_perr:1;
+ uint64_t q1_coff:1;
+ uint64_t q1_und:1;
+ uint64_t q0_perr:1;
+ uint64_t q0_coff:1;
+ uint64_t q0_und:1;
+ uint64_t fed1_dbe:1;
+ uint64_t fed1_sbe:1;
+ uint64_t fed0_dbe:1;
+ uint64_t fed0_sbe:1;
+ } s;
+ struct cvmx_fpa_int_sum_s cn30xx;
+ struct cvmx_fpa_int_sum_s cn31xx;
+ struct cvmx_fpa_int_sum_s cn38xx;
+ struct cvmx_fpa_int_sum_s cn38xxp2;
+ struct cvmx_fpa_int_sum_s cn50xx;
+ struct cvmx_fpa_int_sum_s cn52xx;
+ struct cvmx_fpa_int_sum_s cn52xxp1;
+ struct cvmx_fpa_int_sum_s cn56xx;
+ struct cvmx_fpa_int_sum_s cn56xxp1;
+ struct cvmx_fpa_int_sum_s cn58xx;
+ struct cvmx_fpa_int_sum_s cn58xxp1;
+};
+
+union cvmx_fpa_quex_available {
+ uint64_t u64;
+ struct cvmx_fpa_quex_available_s {
+ uint64_t reserved_29_63:35;
+ uint64_t que_siz:29;
+ } s;
+ struct cvmx_fpa_quex_available_s cn30xx;
+ struct cvmx_fpa_quex_available_s cn31xx;
+ struct cvmx_fpa_quex_available_s cn38xx;
+ struct cvmx_fpa_quex_available_s cn38xxp2;
+ struct cvmx_fpa_quex_available_s cn50xx;
+ struct cvmx_fpa_quex_available_s cn52xx;
+ struct cvmx_fpa_quex_available_s cn52xxp1;
+ struct cvmx_fpa_quex_available_s cn56xx;
+ struct cvmx_fpa_quex_available_s cn56xxp1;
+ struct cvmx_fpa_quex_available_s cn58xx;
+ struct cvmx_fpa_quex_available_s cn58xxp1;
+};
+
+union cvmx_fpa_quex_page_index {
+ uint64_t u64;
+ struct cvmx_fpa_quex_page_index_s {
+ uint64_t reserved_25_63:39;
+ uint64_t pg_num:25;
+ } s;
+ struct cvmx_fpa_quex_page_index_s cn30xx;
+ struct cvmx_fpa_quex_page_index_s cn31xx;
+ struct cvmx_fpa_quex_page_index_s cn38xx;
+ struct cvmx_fpa_quex_page_index_s cn38xxp2;
+ struct cvmx_fpa_quex_page_index_s cn50xx;
+ struct cvmx_fpa_quex_page_index_s cn52xx;
+ struct cvmx_fpa_quex_page_index_s cn52xxp1;
+ struct cvmx_fpa_quex_page_index_s cn56xx;
+ struct cvmx_fpa_quex_page_index_s cn56xxp1;
+ struct cvmx_fpa_quex_page_index_s cn58xx;
+ struct cvmx_fpa_quex_page_index_s cn58xxp1;
+};
+
+union cvmx_fpa_que_act {
+ uint64_t u64;
+ struct cvmx_fpa_que_act_s {
+ uint64_t reserved_29_63:35;
+ uint64_t act_que:3;
+ uint64_t act_indx:26;
+ } s;
+ struct cvmx_fpa_que_act_s cn30xx;
+ struct cvmx_fpa_que_act_s cn31xx;
+ struct cvmx_fpa_que_act_s cn38xx;
+ struct cvmx_fpa_que_act_s cn38xxp2;
+ struct cvmx_fpa_que_act_s cn50xx;
+ struct cvmx_fpa_que_act_s cn52xx;
+ struct cvmx_fpa_que_act_s cn52xxp1;
+ struct cvmx_fpa_que_act_s cn56xx;
+ struct cvmx_fpa_que_act_s cn56xxp1;
+ struct cvmx_fpa_que_act_s cn58xx;
+ struct cvmx_fpa_que_act_s cn58xxp1;
+};
+
+union cvmx_fpa_que_exp {
+ uint64_t u64;
+ struct cvmx_fpa_que_exp_s {
+ uint64_t reserved_29_63:35;
+ uint64_t exp_que:3;
+ uint64_t exp_indx:26;
+ } s;
+ struct cvmx_fpa_que_exp_s cn30xx;
+ struct cvmx_fpa_que_exp_s cn31xx;
+ struct cvmx_fpa_que_exp_s cn38xx;
+ struct cvmx_fpa_que_exp_s cn38xxp2;
+ struct cvmx_fpa_que_exp_s cn50xx;
+ struct cvmx_fpa_que_exp_s cn52xx;
+ struct cvmx_fpa_que_exp_s cn52xxp1;
+ struct cvmx_fpa_que_exp_s cn56xx;
+ struct cvmx_fpa_que_exp_s cn56xxp1;
+ struct cvmx_fpa_que_exp_s cn58xx;
+ struct cvmx_fpa_que_exp_s cn58xxp1;
+};
+
+union cvmx_fpa_wart_ctl {
+ uint64_t u64;
+ struct cvmx_fpa_wart_ctl_s {
+ uint64_t reserved_16_63:48;
+ uint64_t ctl:16;
+ } s;
+ struct cvmx_fpa_wart_ctl_s cn30xx;
+ struct cvmx_fpa_wart_ctl_s cn31xx;
+ struct cvmx_fpa_wart_ctl_s cn38xx;
+ struct cvmx_fpa_wart_ctl_s cn38xxp2;
+ struct cvmx_fpa_wart_ctl_s cn50xx;
+ struct cvmx_fpa_wart_ctl_s cn52xx;
+ struct cvmx_fpa_wart_ctl_s cn52xxp1;
+ struct cvmx_fpa_wart_ctl_s cn56xx;
+ struct cvmx_fpa_wart_ctl_s cn56xxp1;
+ struct cvmx_fpa_wart_ctl_s cn58xx;
+ struct cvmx_fpa_wart_ctl_s cn58xxp1;
+};
+
+union cvmx_fpa_wart_status {
+ uint64_t u64;
+ struct cvmx_fpa_wart_status_s {
+ uint64_t reserved_32_63:32;
+ uint64_t status:32;
+ } s;
+ struct cvmx_fpa_wart_status_s cn30xx;
+ struct cvmx_fpa_wart_status_s cn31xx;
+ struct cvmx_fpa_wart_status_s cn38xx;
+ struct cvmx_fpa_wart_status_s cn38xxp2;
+ struct cvmx_fpa_wart_status_s cn50xx;
+ struct cvmx_fpa_wart_status_s cn52xx;
+ struct cvmx_fpa_wart_status_s cn52xxp1;
+ struct cvmx_fpa_wart_status_s cn56xx;
+ struct cvmx_fpa_wart_status_s cn56xxp1;
+ struct cvmx_fpa_wart_status_s cn58xx;
+ struct cvmx_fpa_wart_status_s cn58xxp1;
+};
+
+#endif
diff --git a/drivers/staging/octeon/cvmx-fpa.c b/drivers/staging/octeon/cvmx-fpa.c
new file mode 100644
index 000000000000..55d9147acc85
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-fpa.c
@@ -0,0 +1,183 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Support library for the hardware Free Pool Allocator.
+ *
+ *
+ */
+
+#include "cvmx-config.h"
+#include "cvmx.h"
+#include "cvmx-fpa.h"
+#include "cvmx-ipd.h"
+
+/**
+ * Current state of all the pools. Use access functions
+ * instead of using it directly.
+ */
+CVMX_SHARED cvmx_fpa_pool_info_t cvmx_fpa_pool_info[CVMX_FPA_NUM_POOLS];
+
+/**
+ * Setup a FPA pool to control a new block of memory. The
+ * buffer pointer must be a physical address.
+ *
+ * @pool: Pool to initialize
+ * 0 <= pool < 8
+ * @name: Constant character string to name this pool.
+ * String is not copied.
+ * @buffer: Pointer to the block of memory to use. This must be
+ * accessable by all processors and external hardware.
+ * @block_size: Size for each block controlled by the FPA
+ * @num_blocks: Number of blocks
+ *
+ * Returns 0 on Success,
+ * -1 on failure
+ */
+int cvmx_fpa_setup_pool(uint64_t pool, const char *name, void *buffer,
+ uint64_t block_size, uint64_t num_blocks)
+{
+ char *ptr;
+ if (!buffer) {
+ cvmx_dprintf
+ ("ERROR: cvmx_fpa_setup_pool: NULL buffer pointer!\n");
+ return -1;
+ }
+ if (pool >= CVMX_FPA_NUM_POOLS) {
+ cvmx_dprintf("ERROR: cvmx_fpa_setup_pool: Illegal pool!\n");
+ return -1;
+ }
+
+ if (block_size < CVMX_FPA_MIN_BLOCK_SIZE) {
+ cvmx_dprintf
+ ("ERROR: cvmx_fpa_setup_pool: Block size too small.\n");
+ return -1;
+ }
+
+ if (((unsigned long)buffer & (CVMX_FPA_ALIGNMENT - 1)) != 0) {
+ cvmx_dprintf
+ ("ERROR: cvmx_fpa_setup_pool: Buffer not aligned properly.\n");
+ return -1;
+ }
+
+ cvmx_fpa_pool_info[pool].name = name;
+ cvmx_fpa_pool_info[pool].size = block_size;
+ cvmx_fpa_pool_info[pool].starting_element_count = num_blocks;
+ cvmx_fpa_pool_info[pool].base = buffer;
+
+ ptr = (char *)buffer;
+ while (num_blocks--) {
+ cvmx_fpa_free(ptr, pool, 0);
+ ptr += block_size;
+ }
+ return 0;
+}
+
+/**
+ * Shutdown a Memory pool and validate that it had all of
+ * the buffers originally placed in it.
+ *
+ * @pool: Pool to shutdown
+ * Returns Zero on success
+ * - Positive is count of missing buffers
+ * - Negative is too many buffers or corrupted pointers
+ */
+uint64_t cvmx_fpa_shutdown_pool(uint64_t pool)
+{
+ uint64_t errors = 0;
+ uint64_t count = 0;
+ uint64_t base = cvmx_ptr_to_phys(cvmx_fpa_pool_info[pool].base);
+ uint64_t finish =
+ base +
+ cvmx_fpa_pool_info[pool].size *
+ cvmx_fpa_pool_info[pool].starting_element_count;
+ void *ptr;
+ uint64_t address;
+
+ count = 0;
+ do {
+ ptr = cvmx_fpa_alloc(pool);
+ if (ptr)
+ address = cvmx_ptr_to_phys(ptr);
+ else
+ address = 0;
+ if (address) {
+ if ((address >= base) && (address < finish) &&
+ (((address -
+ base) % cvmx_fpa_pool_info[pool].size) == 0)) {
+ count++;
+ } else {
+ cvmx_dprintf
+ ("ERROR: cvmx_fpa_shutdown_pool: Illegal address 0x%llx in pool %s(%d)\n",
+ (unsigned long long)address,
+ cvmx_fpa_pool_info[pool].name, (int)pool);
+ errors++;
+ }
+ }
+ } while (address);
+
+#ifdef CVMX_ENABLE_PKO_FUNCTIONS
+ if (pool == 0)
+ cvmx_ipd_free_ptr();
+#endif
+
+ if (errors) {
+ cvmx_dprintf
+ ("ERROR: cvmx_fpa_shutdown_pool: Pool %s(%d) started at 0x%llx, ended at 0x%llx, with a step of 0x%llx\n",
+ cvmx_fpa_pool_info[pool].name, (int)pool,
+ (unsigned long long)base, (unsigned long long)finish,
+ (unsigned long long)cvmx_fpa_pool_info[pool].size);
+ return -errors;
+ } else
+ return 0;
+}
+
+uint64_t cvmx_fpa_get_block_size(uint64_t pool)
+{
+ switch (pool) {
+ case 0:
+ return CVMX_FPA_POOL_0_SIZE;
+ case 1:
+ return CVMX_FPA_POOL_1_SIZE;
+ case 2:
+ return CVMX_FPA_POOL_2_SIZE;
+ case 3:
+ return CVMX_FPA_POOL_3_SIZE;
+ case 4:
+ return CVMX_FPA_POOL_4_SIZE;
+ case 5:
+ return CVMX_FPA_POOL_5_SIZE;
+ case 6:
+ return CVMX_FPA_POOL_6_SIZE;
+ case 7:
+ return CVMX_FPA_POOL_7_SIZE;
+ default:
+ return 0;
+ }
+}
diff --git a/drivers/staging/octeon/cvmx-fpa.h b/drivers/staging/octeon/cvmx-fpa.h
new file mode 100644
index 000000000000..1d7788fe09f2
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-fpa.h
@@ -0,0 +1,299 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Interface to the hardware Free Pool Allocator.
+ *
+ *
+ */
+
+#ifndef __CVMX_FPA_H__
+#define __CVMX_FPA_H__
+
+#include "cvmx-address.h"
+#include "cvmx-fpa-defs.h"
+
+#define CVMX_FPA_NUM_POOLS 8
+#define CVMX_FPA_MIN_BLOCK_SIZE 128
+#define CVMX_FPA_ALIGNMENT 128
+
+/**
+ * Structure describing the data format used for stores to the FPA.
+ */
+typedef union {
+ uint64_t u64;
+ struct {
+ /*
+ * the (64-bit word) location in scratchpad to write
+ * to (if len != 0)
+ */
+ uint64_t scraddr:8;
+ /* the number of words in the response (0 => no response) */
+ uint64_t len:8;
+ /* the ID of the device on the non-coherent bus */
+ uint64_t did:8;
+ /*
+ * the address that will appear in the first tick on
+ * the NCB bus.
+ */
+ uint64_t addr:40;
+ } s;
+} cvmx_fpa_iobdma_data_t;
+
+/**
+ * Structure describing the current state of a FPA pool.
+ */
+typedef struct {
+ /* Name it was created under */
+ const char *name;
+ /* Size of each block */
+ uint64_t size;
+ /* The base memory address of whole block */
+ void *base;
+ /* The number of elements in the pool at creation */
+ uint64_t starting_element_count;
+} cvmx_fpa_pool_info_t;
+
+/**
+ * Current state of all the pools. Use access functions
+ * instead of using it directly.
+ */
+extern cvmx_fpa_pool_info_t cvmx_fpa_pool_info[CVMX_FPA_NUM_POOLS];
+
+/* CSR typedefs have been moved to cvmx-csr-*.h */
+
+/**
+ * Return the name of the pool
+ *
+ * @pool: Pool to get the name of
+ * Returns The name
+ */
+static inline const char *cvmx_fpa_get_name(uint64_t pool)
+{
+ return cvmx_fpa_pool_info[pool].name;
+}
+
+/**
+ * Return the base of the pool
+ *
+ * @pool: Pool to get the base of
+ * Returns The base
+ */
+static inline void *cvmx_fpa_get_base(uint64_t pool)
+{
+ return cvmx_fpa_pool_info[pool].base;
+}
+
+/**
+ * Check if a pointer belongs to an FPA pool. Return non-zero
+ * if the supplied pointer is inside the memory controlled by
+ * an FPA pool.
+ *
+ * @pool: Pool to check
+ * @ptr: Pointer to check
+ * Returns Non-zero if pointer is in the pool. Zero if not
+ */
+static inline int cvmx_fpa_is_member(uint64_t pool, void *ptr)
+{
+ return ((ptr >= cvmx_fpa_pool_info[pool].base) &&
+ ((char *)ptr <
+ ((char *)(cvmx_fpa_pool_info[pool].base)) +
+ cvmx_fpa_pool_info[pool].size *
+ cvmx_fpa_pool_info[pool].starting_element_count));
+}
+
+/**
+ * Enable the FPA for use. Must be performed after any CSR
+ * configuration but before any other FPA functions.
+ */
+static inline void cvmx_fpa_enable(void)
+{
+ union cvmx_fpa_ctl_status status;
+
+ status.u64 = cvmx_read_csr(CVMX_FPA_CTL_STATUS);
+ if (status.s.enb) {
+ cvmx_dprintf
+ ("Warning: Enabling FPA when FPA already enabled.\n");
+ }
+
+ /*
+ * Do runtime check as we allow pass1 compiled code to run on
+ * pass2 chips.
+ */
+ if (cvmx_octeon_is_pass1()) {
+ union cvmx_fpa_fpfx_marks marks;
+ int i;
+ for (i = 1; i < 8; i++) {
+ marks.u64 =
+ cvmx_read_csr(CVMX_FPA_FPF1_MARKS + (i - 1) * 8ull);
+ marks.s.fpf_wr = 0xe0;
+ cvmx_write_csr(CVMX_FPA_FPF1_MARKS + (i - 1) * 8ull,
+ marks.u64);
+ }
+
+ /* Enforce a 10 cycle delay between config and enable */
+ cvmx_wait(10);
+ }
+
+ /* FIXME: CVMX_FPA_CTL_STATUS read is unmodelled */
+ status.u64 = 0;
+ status.s.enb = 1;
+ cvmx_write_csr(CVMX_FPA_CTL_STATUS, status.u64);
+}
+
+/**
+ * Get a new block from the FPA
+ *
+ * @pool: Pool to get the block from
+ * Returns Pointer to the block or NULL on failure
+ */
+static inline void *cvmx_fpa_alloc(uint64_t pool)
+{
+ uint64_t address =
+ cvmx_read_csr(CVMX_ADDR_DID(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool)));
+ if (address)
+ return cvmx_phys_to_ptr(address);
+ else
+ return NULL;
+}
+
+/**
+ * Asynchronously get a new block from the FPA
+ *
+ * @scr_addr: Local scratch address to put response in. This is a byte address,
+ * but must be 8 byte aligned.
+ * @pool: Pool to get the block from
+ */
+static inline void cvmx_fpa_async_alloc(uint64_t scr_addr, uint64_t pool)
+{
+ cvmx_fpa_iobdma_data_t data;
+
+ /*
+ * Hardware only uses 64 bit alligned locations, so convert
+ * from byte address to 64-bit index
+ */
+ data.s.scraddr = scr_addr >> 3;
+ data.s.len = 1;
+ data.s.did = CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool);
+ data.s.addr = 0;
+ cvmx_send_single(data.u64);
+}
+
+/**
+ * Free a block allocated with a FPA pool. Does NOT provide memory
+ * ordering in cases where the memory block was modified by the core.
+ *
+ * @ptr: Block to free
+ * @pool: Pool to put it in
+ * @num_cache_lines:
+ * Cache lines to invalidate
+ */
+static inline void cvmx_fpa_free_nosync(void *ptr, uint64_t pool,
+ uint64_t num_cache_lines)
+{
+ cvmx_addr_t newptr;
+ newptr.u64 = cvmx_ptr_to_phys(ptr);
+ newptr.sfilldidspace.didspace =
+ CVMX_ADDR_DIDSPACE(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool));
+ /* Prevent GCC from reordering around free */
+ barrier();
+ /* value written is number of cache lines not written back */
+ cvmx_write_io(newptr.u64, num_cache_lines);
+}
+
+/**
+ * Free a block allocated with a FPA pool. Provides required memory
+ * ordering in cases where memory block was modified by core.
+ *
+ * @ptr: Block to free
+ * @pool: Pool to put it in
+ * @num_cache_lines:
+ * Cache lines to invalidate
+ */
+static inline void cvmx_fpa_free(void *ptr, uint64_t pool,
+ uint64_t num_cache_lines)
+{
+ cvmx_addr_t newptr;
+ newptr.u64 = cvmx_ptr_to_phys(ptr);
+ newptr.sfilldidspace.didspace =
+ CVMX_ADDR_DIDSPACE(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool));
+ /*
+ * Make sure that any previous writes to memory go out before
+ * we free this buffer. This also serves as a barrier to
+ * prevent GCC from reordering operations to after the
+ * free.
+ */
+ CVMX_SYNCWS;
+ /* value written is number of cache lines not written back */
+ cvmx_write_io(newptr.u64, num_cache_lines);
+}
+
+/**
+ * Setup a FPA pool to control a new block of memory.
+ * This can only be called once per pool. Make sure proper
+ * locking enforces this.
+ *
+ * @pool: Pool to initialize
+ * 0 <= pool < 8
+ * @name: Constant character string to name this pool.
+ * String is not copied.
+ * @buffer: Pointer to the block of memory to use. This must be
+ * accessable by all processors and external hardware.
+ * @block_size: Size for each block controlled by the FPA
+ * @num_blocks: Number of blocks
+ *
+ * Returns 0 on Success,
+ * -1 on failure
+ */
+extern int cvmx_fpa_setup_pool(uint64_t pool, const char *name, void *buffer,
+ uint64_t block_size, uint64_t num_blocks);
+
+/**
+ * Shutdown a Memory pool and validate that it had all of
+ * the buffers originally placed in it. This should only be
+ * called by one processor after all hardware has finished
+ * using the pool.
+ *
+ * @pool: Pool to shutdown
+ * Returns Zero on success
+ * - Positive is count of missing buffers
+ * - Negative is too many buffers or corrupted pointers
+ */
+extern uint64_t cvmx_fpa_shutdown_pool(uint64_t pool);
+
+/**
+ * Get the size of blocks controlled by the pool
+ * This is resolved to a constant at compile time.
+ *
+ * @pool: Pool to access
+ * Returns Size of the block in bytes
+ */
+uint64_t cvmx_fpa_get_block_size(uint64_t pool);
+
+#endif /* __CVM_FPA_H__ */
diff --git a/drivers/staging/octeon/cvmx-gmxx-defs.h b/drivers/staging/octeon/cvmx-gmxx-defs.h
new file mode 100644
index 000000000000..946a43a73fd7
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-gmxx-defs.h
@@ -0,0 +1,2529 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_GMXX_DEFS_H__
+#define __CVMX_GMXX_DEFS_H__
+
+#define CVMX_GMXX_BAD_REG(block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000518ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_BIST(block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000400ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_CLK_EN(block_id) \
+ CVMX_ADD_IO_SEG(0x00011800080007F0ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_HG2_CONTROL(block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000550ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_INF_MODE(block_id) \
+ CVMX_ADD_IO_SEG(0x00011800080007F8ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_NXA_ADR(block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000510ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_PRTX_CBFC_CTL(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000580ull + (((offset) & 0) * 8) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_PRTX_CFG(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000010ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_RXX_ADR_CAM0(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000180ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_RXX_ADR_CAM1(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000188ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_RXX_ADR_CAM2(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000190ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_RXX_ADR_CAM3(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000198ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_RXX_ADR_CAM4(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x00011800080001A0ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_RXX_ADR_CAM5(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x00011800080001A8ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_RXX_ADR_CAM_EN(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000108ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_RXX_ADR_CTL(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000100ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_RXX_DECISION(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000040ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_RXX_FRM_CHK(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000020ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_RXX_FRM_CTL(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000018ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_RXX_FRM_MAX(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000030ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_RXX_FRM_MIN(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000028ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_RXX_IFG(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000058ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_RXX_INT_EN(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000008ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_RXX_INT_REG(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000000ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_RXX_JABBER(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000038ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_RXX_PAUSE_DROP_TIME(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000068ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_RXX_RX_INBND(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000060ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_RXX_STATS_CTL(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000050ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_RXX_STATS_OCTS(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000088ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_RXX_STATS_OCTS_CTL(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000098ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_RXX_STATS_OCTS_DMAC(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x00011800080000A8ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_RXX_STATS_OCTS_DRP(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x00011800080000B8ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_RXX_STATS_PKTS(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000080ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_RXX_STATS_PKTS_BAD(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x00011800080000C0ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_RXX_STATS_PKTS_CTL(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000090ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_RXX_STATS_PKTS_DMAC(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x00011800080000A0ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_RXX_STATS_PKTS_DRP(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x00011800080000B0ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_RXX_UDD_SKP(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000048ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_RX_BP_DROPX(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000420ull + (((offset) & 3) * 8) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_RX_BP_OFFX(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000460ull + (((offset) & 3) * 8) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_RX_BP_ONX(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000440ull + (((offset) & 3) * 8) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_RX_HG2_STATUS(block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000548ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_RX_PASS_EN(block_id) \
+ CVMX_ADD_IO_SEG(0x00011800080005F8ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_RX_PASS_MAPX(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000600ull + (((offset) & 15) * 8) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_RX_PRTS(block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000410ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_RX_PRT_INFO(block_id) \
+ CVMX_ADD_IO_SEG(0x00011800080004E8ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_RX_TX_STATUS(block_id) \
+ CVMX_ADD_IO_SEG(0x00011800080007E8ull + (((block_id) & 0) * 0x8000000ull))
+#define CVMX_GMXX_RX_XAUI_BAD_COL(block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000538ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_RX_XAUI_CTL(block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000530ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_SMACX(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000230ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_STAT_BP(block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000520ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_TXX_APPEND(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000218ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_TXX_BURST(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000228ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_TXX_CBFC_XOFF(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x00011800080005A0ull + (((offset) & 0) * 8) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_TXX_CBFC_XON(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x00011800080005C0ull + (((offset) & 0) * 8) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_TXX_CLK(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000208ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_TXX_CTL(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000270ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_TXX_MIN_PKT(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000240ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_TXX_PAUSE_PKT_INTERVAL(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000248ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_TXX_PAUSE_PKT_TIME(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000238ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_TXX_PAUSE_TOGO(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000258ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_TXX_PAUSE_ZERO(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000260ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_TXX_SGMII_CTL(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000300ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_TXX_SLOT(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000220ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_TXX_SOFT_PAUSE(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000250ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_TXX_STAT0(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000280ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_TXX_STAT1(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000288ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_TXX_STAT2(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000290ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_TXX_STAT3(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000298ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_TXX_STAT4(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x00011800080002A0ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_TXX_STAT5(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x00011800080002A8ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_TXX_STAT6(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x00011800080002B0ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_TXX_STAT7(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x00011800080002B8ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_TXX_STAT8(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x00011800080002C0ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_TXX_STAT9(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x00011800080002C8ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_TXX_STATS_CTL(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000268ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_TXX_THRESH(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000210ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_TX_BP(block_id) \
+ CVMX_ADD_IO_SEG(0x00011800080004D0ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_TX_CLK_MSKX(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000780ull + (((offset) & 1) * 8) + (((block_id) & 0) * 0x0ull))
+#define CVMX_GMXX_TX_COL_ATTEMPT(block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000498ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_TX_CORRUPT(block_id) \
+ CVMX_ADD_IO_SEG(0x00011800080004D8ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_TX_HG2_REG1(block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000558ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_TX_HG2_REG2(block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000560ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_TX_IFG(block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000488ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_TX_INT_EN(block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000508ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_TX_INT_REG(block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000500ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_TX_JAM(block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000490ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_TX_LFSR(block_id) \
+ CVMX_ADD_IO_SEG(0x00011800080004F8ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_TX_OVR_BP(block_id) \
+ CVMX_ADD_IO_SEG(0x00011800080004C8ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_TX_PAUSE_PKT_DMAC(block_id) \
+ CVMX_ADD_IO_SEG(0x00011800080004A0ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_TX_PAUSE_PKT_TYPE(block_id) \
+ CVMX_ADD_IO_SEG(0x00011800080004A8ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_TX_PRTS(block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000480ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_TX_SPI_CTL(block_id) \
+ CVMX_ADD_IO_SEG(0x00011800080004C0ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_TX_SPI_DRAIN(block_id) \
+ CVMX_ADD_IO_SEG(0x00011800080004E0ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_TX_SPI_MAX(block_id) \
+ CVMX_ADD_IO_SEG(0x00011800080004B0ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_TX_SPI_ROUNDX(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000680ull + (((offset) & 31) * 8) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_TX_SPI_THRESH(block_id) \
+ CVMX_ADD_IO_SEG(0x00011800080004B8ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_TX_XAUI_CTL(block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000528ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_GMXX_XAUI_EXT_LOOPBACK(block_id) \
+ CVMX_ADD_IO_SEG(0x0001180008000540ull + (((block_id) & 1) * 0x8000000ull))
+
+union cvmx_gmxx_bad_reg {
+ uint64_t u64;
+ struct cvmx_gmxx_bad_reg_s {
+ uint64_t reserved_31_63:33;
+ uint64_t inb_nxa:4;
+ uint64_t statovr:1;
+ uint64_t loststat:4;
+ uint64_t reserved_18_21:4;
+ uint64_t out_ovr:16;
+ uint64_t ncb_ovr:1;
+ uint64_t out_col:1;
+ } s;
+ struct cvmx_gmxx_bad_reg_cn30xx {
+ uint64_t reserved_31_63:33;
+ uint64_t inb_nxa:4;
+ uint64_t statovr:1;
+ uint64_t reserved_25_25:1;
+ uint64_t loststat:3;
+ uint64_t reserved_5_21:17;
+ uint64_t out_ovr:3;
+ uint64_t reserved_0_1:2;
+ } cn30xx;
+ struct cvmx_gmxx_bad_reg_cn30xx cn31xx;
+ struct cvmx_gmxx_bad_reg_s cn38xx;
+ struct cvmx_gmxx_bad_reg_s cn38xxp2;
+ struct cvmx_gmxx_bad_reg_cn30xx cn50xx;
+ struct cvmx_gmxx_bad_reg_cn52xx {
+ uint64_t reserved_31_63:33;
+ uint64_t inb_nxa:4;
+ uint64_t statovr:1;
+ uint64_t loststat:4;
+ uint64_t reserved_6_21:16;
+ uint64_t out_ovr:4;
+ uint64_t reserved_0_1:2;
+ } cn52xx;
+ struct cvmx_gmxx_bad_reg_cn52xx cn52xxp1;
+ struct cvmx_gmxx_bad_reg_cn52xx cn56xx;
+ struct cvmx_gmxx_bad_reg_cn52xx cn56xxp1;
+ struct cvmx_gmxx_bad_reg_s cn58xx;
+ struct cvmx_gmxx_bad_reg_s cn58xxp1;
+};
+
+union cvmx_gmxx_bist {
+ uint64_t u64;
+ struct cvmx_gmxx_bist_s {
+ uint64_t reserved_17_63:47;
+ uint64_t status:17;
+ } s;
+ struct cvmx_gmxx_bist_cn30xx {
+ uint64_t reserved_10_63:54;
+ uint64_t status:10;
+ } cn30xx;
+ struct cvmx_gmxx_bist_cn30xx cn31xx;
+ struct cvmx_gmxx_bist_cn30xx cn38xx;
+ struct cvmx_gmxx_bist_cn30xx cn38xxp2;
+ struct cvmx_gmxx_bist_cn50xx {
+ uint64_t reserved_12_63:52;
+ uint64_t status:12;
+ } cn50xx;
+ struct cvmx_gmxx_bist_cn52xx {
+ uint64_t reserved_16_63:48;
+ uint64_t status:16;
+ } cn52xx;
+ struct cvmx_gmxx_bist_cn52xx cn52xxp1;
+ struct cvmx_gmxx_bist_cn52xx cn56xx;
+ struct cvmx_gmxx_bist_cn52xx cn56xxp1;
+ struct cvmx_gmxx_bist_s cn58xx;
+ struct cvmx_gmxx_bist_s cn58xxp1;
+};
+
+union cvmx_gmxx_clk_en {
+ uint64_t u64;
+ struct cvmx_gmxx_clk_en_s {
+ uint64_t reserved_1_63:63;
+ uint64_t clk_en:1;
+ } s;
+ struct cvmx_gmxx_clk_en_s cn52xx;
+ struct cvmx_gmxx_clk_en_s cn52xxp1;
+ struct cvmx_gmxx_clk_en_s cn56xx;
+ struct cvmx_gmxx_clk_en_s cn56xxp1;
+};
+
+union cvmx_gmxx_hg2_control {
+ uint64_t u64;
+ struct cvmx_gmxx_hg2_control_s {
+ uint64_t reserved_19_63:45;
+ uint64_t hg2tx_en:1;
+ uint64_t hg2rx_en:1;
+ uint64_t phys_en:1;
+ uint64_t logl_en:16;
+ } s;
+ struct cvmx_gmxx_hg2_control_s cn52xx;
+ struct cvmx_gmxx_hg2_control_s cn52xxp1;
+ struct cvmx_gmxx_hg2_control_s cn56xx;
+};
+
+union cvmx_gmxx_inf_mode {
+ uint64_t u64;
+ struct cvmx_gmxx_inf_mode_s {
+ uint64_t reserved_10_63:54;
+ uint64_t speed:2;
+ uint64_t reserved_6_7:2;
+ uint64_t mode:2;
+ uint64_t reserved_3_3:1;
+ uint64_t p0mii:1;
+ uint64_t en:1;
+ uint64_t type:1;
+ } s;
+ struct cvmx_gmxx_inf_mode_cn30xx {
+ uint64_t reserved_3_63:61;
+ uint64_t p0mii:1;
+ uint64_t en:1;
+ uint64_t type:1;
+ } cn30xx;
+ struct cvmx_gmxx_inf_mode_cn31xx {
+ uint64_t reserved_2_63:62;
+ uint64_t en:1;
+ uint64_t type:1;
+ } cn31xx;
+ struct cvmx_gmxx_inf_mode_cn31xx cn38xx;
+ struct cvmx_gmxx_inf_mode_cn31xx cn38xxp2;
+ struct cvmx_gmxx_inf_mode_cn30xx cn50xx;
+ struct cvmx_gmxx_inf_mode_cn52xx {
+ uint64_t reserved_10_63:54;
+ uint64_t speed:2;
+ uint64_t reserved_6_7:2;
+ uint64_t mode:2;
+ uint64_t reserved_2_3:2;
+ uint64_t en:1;
+ uint64_t type:1;
+ } cn52xx;
+ struct cvmx_gmxx_inf_mode_cn52xx cn52xxp1;
+ struct cvmx_gmxx_inf_mode_cn52xx cn56xx;
+ struct cvmx_gmxx_inf_mode_cn52xx cn56xxp1;
+ struct cvmx_gmxx_inf_mode_cn31xx cn58xx;
+ struct cvmx_gmxx_inf_mode_cn31xx cn58xxp1;
+};
+
+union cvmx_gmxx_nxa_adr {
+ uint64_t u64;
+ struct cvmx_gmxx_nxa_adr_s {
+ uint64_t reserved_6_63:58;
+ uint64_t prt:6;
+ } s;
+ struct cvmx_gmxx_nxa_adr_s cn30xx;
+ struct cvmx_gmxx_nxa_adr_s cn31xx;
+ struct cvmx_gmxx_nxa_adr_s cn38xx;
+ struct cvmx_gmxx_nxa_adr_s cn38xxp2;
+ struct cvmx_gmxx_nxa_adr_s cn50xx;
+ struct cvmx_gmxx_nxa_adr_s cn52xx;
+ struct cvmx_gmxx_nxa_adr_s cn52xxp1;
+ struct cvmx_gmxx_nxa_adr_s cn56xx;
+ struct cvmx_gmxx_nxa_adr_s cn56xxp1;
+ struct cvmx_gmxx_nxa_adr_s cn58xx;
+ struct cvmx_gmxx_nxa_adr_s cn58xxp1;
+};
+
+union cvmx_gmxx_prtx_cbfc_ctl {
+ uint64_t u64;
+ struct cvmx_gmxx_prtx_cbfc_ctl_s {
+ uint64_t phys_en:16;
+ uint64_t logl_en:16;
+ uint64_t phys_bp:16;
+ uint64_t reserved_4_15:12;
+ uint64_t bck_en:1;
+ uint64_t drp_en:1;
+ uint64_t tx_en:1;
+ uint64_t rx_en:1;
+ } s;
+ struct cvmx_gmxx_prtx_cbfc_ctl_s cn52xx;
+ struct cvmx_gmxx_prtx_cbfc_ctl_s cn56xx;
+};
+
+union cvmx_gmxx_prtx_cfg {
+ uint64_t u64;
+ struct cvmx_gmxx_prtx_cfg_s {
+ uint64_t reserved_14_63:50;
+ uint64_t tx_idle:1;
+ uint64_t rx_idle:1;
+ uint64_t reserved_9_11:3;
+ uint64_t speed_msb:1;
+ uint64_t reserved_4_7:4;
+ uint64_t slottime:1;
+ uint64_t duplex:1;
+ uint64_t speed:1;
+ uint64_t en:1;
+ } s;
+ struct cvmx_gmxx_prtx_cfg_cn30xx {
+ uint64_t reserved_4_63:60;
+ uint64_t slottime:1;
+ uint64_t duplex:1;
+ uint64_t speed:1;
+ uint64_t en:1;
+ } cn30xx;
+ struct cvmx_gmxx_prtx_cfg_cn30xx cn31xx;
+ struct cvmx_gmxx_prtx_cfg_cn30xx cn38xx;
+ struct cvmx_gmxx_prtx_cfg_cn30xx cn38xxp2;
+ struct cvmx_gmxx_prtx_cfg_cn30xx cn50xx;
+ struct cvmx_gmxx_prtx_cfg_s cn52xx;
+ struct cvmx_gmxx_prtx_cfg_s cn52xxp1;
+ struct cvmx_gmxx_prtx_cfg_s cn56xx;
+ struct cvmx_gmxx_prtx_cfg_s cn56xxp1;
+ struct cvmx_gmxx_prtx_cfg_cn30xx cn58xx;
+ struct cvmx_gmxx_prtx_cfg_cn30xx cn58xxp1;
+};
+
+union cvmx_gmxx_rxx_adr_cam0 {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_adr_cam0_s {
+ uint64_t adr:64;
+ } s;
+ struct cvmx_gmxx_rxx_adr_cam0_s cn30xx;
+ struct cvmx_gmxx_rxx_adr_cam0_s cn31xx;
+ struct cvmx_gmxx_rxx_adr_cam0_s cn38xx;
+ struct cvmx_gmxx_rxx_adr_cam0_s cn38xxp2;
+ struct cvmx_gmxx_rxx_adr_cam0_s cn50xx;
+ struct cvmx_gmxx_rxx_adr_cam0_s cn52xx;
+ struct cvmx_gmxx_rxx_adr_cam0_s cn52xxp1;
+ struct cvmx_gmxx_rxx_adr_cam0_s cn56xx;
+ struct cvmx_gmxx_rxx_adr_cam0_s cn56xxp1;
+ struct cvmx_gmxx_rxx_adr_cam0_s cn58xx;
+ struct cvmx_gmxx_rxx_adr_cam0_s cn58xxp1;
+};
+
+union cvmx_gmxx_rxx_adr_cam1 {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_adr_cam1_s {
+ uint64_t adr:64;
+ } s;
+ struct cvmx_gmxx_rxx_adr_cam1_s cn30xx;
+ struct cvmx_gmxx_rxx_adr_cam1_s cn31xx;
+ struct cvmx_gmxx_rxx_adr_cam1_s cn38xx;
+ struct cvmx_gmxx_rxx_adr_cam1_s cn38xxp2;
+ struct cvmx_gmxx_rxx_adr_cam1_s cn50xx;
+ struct cvmx_gmxx_rxx_adr_cam1_s cn52xx;
+ struct cvmx_gmxx_rxx_adr_cam1_s cn52xxp1;
+ struct cvmx_gmxx_rxx_adr_cam1_s cn56xx;
+ struct cvmx_gmxx_rxx_adr_cam1_s cn56xxp1;
+ struct cvmx_gmxx_rxx_adr_cam1_s cn58xx;
+ struct cvmx_gmxx_rxx_adr_cam1_s cn58xxp1;
+};
+
+union cvmx_gmxx_rxx_adr_cam2 {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_adr_cam2_s {
+ uint64_t adr:64;
+ } s;
+ struct cvmx_gmxx_rxx_adr_cam2_s cn30xx;
+ struct cvmx_gmxx_rxx_adr_cam2_s cn31xx;
+ struct cvmx_gmxx_rxx_adr_cam2_s cn38xx;
+ struct cvmx_gmxx_rxx_adr_cam2_s cn38xxp2;
+ struct cvmx_gmxx_rxx_adr_cam2_s cn50xx;
+ struct cvmx_gmxx_rxx_adr_cam2_s cn52xx;
+ struct cvmx_gmxx_rxx_adr_cam2_s cn52xxp1;
+ struct cvmx_gmxx_rxx_adr_cam2_s cn56xx;
+ struct cvmx_gmxx_rxx_adr_cam2_s cn56xxp1;
+ struct cvmx_gmxx_rxx_adr_cam2_s cn58xx;
+ struct cvmx_gmxx_rxx_adr_cam2_s cn58xxp1;
+};
+
+union cvmx_gmxx_rxx_adr_cam3 {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_adr_cam3_s {
+ uint64_t adr:64;
+ } s;
+ struct cvmx_gmxx_rxx_adr_cam3_s cn30xx;
+ struct cvmx_gmxx_rxx_adr_cam3_s cn31xx;
+ struct cvmx_gmxx_rxx_adr_cam3_s cn38xx;
+ struct cvmx_gmxx_rxx_adr_cam3_s cn38xxp2;
+ struct cvmx_gmxx_rxx_adr_cam3_s cn50xx;
+ struct cvmx_gmxx_rxx_adr_cam3_s cn52xx;
+ struct cvmx_gmxx_rxx_adr_cam3_s cn52xxp1;
+ struct cvmx_gmxx_rxx_adr_cam3_s cn56xx;
+ struct cvmx_gmxx_rxx_adr_cam3_s cn56xxp1;
+ struct cvmx_gmxx_rxx_adr_cam3_s cn58xx;
+ struct cvmx_gmxx_rxx_adr_cam3_s cn58xxp1;
+};
+
+union cvmx_gmxx_rxx_adr_cam4 {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_adr_cam4_s {
+ uint64_t adr:64;
+ } s;
+ struct cvmx_gmxx_rxx_adr_cam4_s cn30xx;
+ struct cvmx_gmxx_rxx_adr_cam4_s cn31xx;
+ struct cvmx_gmxx_rxx_adr_cam4_s cn38xx;
+ struct cvmx_gmxx_rxx_adr_cam4_s cn38xxp2;
+ struct cvmx_gmxx_rxx_adr_cam4_s cn50xx;
+ struct cvmx_gmxx_rxx_adr_cam4_s cn52xx;
+ struct cvmx_gmxx_rxx_adr_cam4_s cn52xxp1;
+ struct cvmx_gmxx_rxx_adr_cam4_s cn56xx;
+ struct cvmx_gmxx_rxx_adr_cam4_s cn56xxp1;
+ struct cvmx_gmxx_rxx_adr_cam4_s cn58xx;
+ struct cvmx_gmxx_rxx_adr_cam4_s cn58xxp1;
+};
+
+union cvmx_gmxx_rxx_adr_cam5 {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_adr_cam5_s {
+ uint64_t adr:64;
+ } s;
+ struct cvmx_gmxx_rxx_adr_cam5_s cn30xx;
+ struct cvmx_gmxx_rxx_adr_cam5_s cn31xx;
+ struct cvmx_gmxx_rxx_adr_cam5_s cn38xx;
+ struct cvmx_gmxx_rxx_adr_cam5_s cn38xxp2;
+ struct cvmx_gmxx_rxx_adr_cam5_s cn50xx;
+ struct cvmx_gmxx_rxx_adr_cam5_s cn52xx;
+ struct cvmx_gmxx_rxx_adr_cam5_s cn52xxp1;
+ struct cvmx_gmxx_rxx_adr_cam5_s cn56xx;
+ struct cvmx_gmxx_rxx_adr_cam5_s cn56xxp1;
+ struct cvmx_gmxx_rxx_adr_cam5_s cn58xx;
+ struct cvmx_gmxx_rxx_adr_cam5_s cn58xxp1;
+};
+
+union cvmx_gmxx_rxx_adr_cam_en {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_adr_cam_en_s {
+ uint64_t reserved_8_63:56;
+ uint64_t en:8;
+ } s;
+ struct cvmx_gmxx_rxx_adr_cam_en_s cn30xx;
+ struct cvmx_gmxx_rxx_adr_cam_en_s cn31xx;
+ struct cvmx_gmxx_rxx_adr_cam_en_s cn38xx;
+ struct cvmx_gmxx_rxx_adr_cam_en_s cn38xxp2;
+ struct cvmx_gmxx_rxx_adr_cam_en_s cn50xx;
+ struct cvmx_gmxx_rxx_adr_cam_en_s cn52xx;
+ struct cvmx_gmxx_rxx_adr_cam_en_s cn52xxp1;
+ struct cvmx_gmxx_rxx_adr_cam_en_s cn56xx;
+ struct cvmx_gmxx_rxx_adr_cam_en_s cn56xxp1;
+ struct cvmx_gmxx_rxx_adr_cam_en_s cn58xx;
+ struct cvmx_gmxx_rxx_adr_cam_en_s cn58xxp1;
+};
+
+union cvmx_gmxx_rxx_adr_ctl {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_adr_ctl_s {
+ uint64_t reserved_4_63:60;
+ uint64_t cam_mode:1;
+ uint64_t mcst:2;
+ uint64_t bcst:1;
+ } s;
+ struct cvmx_gmxx_rxx_adr_ctl_s cn30xx;
+ struct cvmx_gmxx_rxx_adr_ctl_s cn31xx;
+ struct cvmx_gmxx_rxx_adr_ctl_s cn38xx;
+ struct cvmx_gmxx_rxx_adr_ctl_s cn38xxp2;
+ struct cvmx_gmxx_rxx_adr_ctl_s cn50xx;
+ struct cvmx_gmxx_rxx_adr_ctl_s cn52xx;
+ struct cvmx_gmxx_rxx_adr_ctl_s cn52xxp1;
+ struct cvmx_gmxx_rxx_adr_ctl_s cn56xx;
+ struct cvmx_gmxx_rxx_adr_ctl_s cn56xxp1;
+ struct cvmx_gmxx_rxx_adr_ctl_s cn58xx;
+ struct cvmx_gmxx_rxx_adr_ctl_s cn58xxp1;
+};
+
+union cvmx_gmxx_rxx_decision {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_decision_s {
+ uint64_t reserved_5_63:59;
+ uint64_t cnt:5;
+ } s;
+ struct cvmx_gmxx_rxx_decision_s cn30xx;
+ struct cvmx_gmxx_rxx_decision_s cn31xx;
+ struct cvmx_gmxx_rxx_decision_s cn38xx;
+ struct cvmx_gmxx_rxx_decision_s cn38xxp2;
+ struct cvmx_gmxx_rxx_decision_s cn50xx;
+ struct cvmx_gmxx_rxx_decision_s cn52xx;
+ struct cvmx_gmxx_rxx_decision_s cn52xxp1;
+ struct cvmx_gmxx_rxx_decision_s cn56xx;
+ struct cvmx_gmxx_rxx_decision_s cn56xxp1;
+ struct cvmx_gmxx_rxx_decision_s cn58xx;
+ struct cvmx_gmxx_rxx_decision_s cn58xxp1;
+};
+
+union cvmx_gmxx_rxx_frm_chk {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_frm_chk_s {
+ uint64_t reserved_10_63:54;
+ uint64_t niberr:1;
+ uint64_t skperr:1;
+ uint64_t rcverr:1;
+ uint64_t lenerr:1;
+ uint64_t alnerr:1;
+ uint64_t fcserr:1;
+ uint64_t jabber:1;
+ uint64_t maxerr:1;
+ uint64_t carext:1;
+ uint64_t minerr:1;
+ } s;
+ struct cvmx_gmxx_rxx_frm_chk_s cn30xx;
+ struct cvmx_gmxx_rxx_frm_chk_s cn31xx;
+ struct cvmx_gmxx_rxx_frm_chk_s cn38xx;
+ struct cvmx_gmxx_rxx_frm_chk_s cn38xxp2;
+ struct cvmx_gmxx_rxx_frm_chk_cn50xx {
+ uint64_t reserved_10_63:54;
+ uint64_t niberr:1;
+ uint64_t skperr:1;
+ uint64_t rcverr:1;
+ uint64_t reserved_6_6:1;
+ uint64_t alnerr:1;
+ uint64_t fcserr:1;
+ uint64_t jabber:1;
+ uint64_t reserved_2_2:1;
+ uint64_t carext:1;
+ uint64_t reserved_0_0:1;
+ } cn50xx;
+ struct cvmx_gmxx_rxx_frm_chk_cn52xx {
+ uint64_t reserved_9_63:55;
+ uint64_t skperr:1;
+ uint64_t rcverr:1;
+ uint64_t reserved_5_6:2;
+ uint64_t fcserr:1;
+ uint64_t jabber:1;
+ uint64_t reserved_2_2:1;
+ uint64_t carext:1;
+ uint64_t reserved_0_0:1;
+ } cn52xx;
+ struct cvmx_gmxx_rxx_frm_chk_cn52xx cn52xxp1;
+ struct cvmx_gmxx_rxx_frm_chk_cn52xx cn56xx;
+ struct cvmx_gmxx_rxx_frm_chk_cn52xx cn56xxp1;
+ struct cvmx_gmxx_rxx_frm_chk_s cn58xx;
+ struct cvmx_gmxx_rxx_frm_chk_s cn58xxp1;
+};
+
+union cvmx_gmxx_rxx_frm_ctl {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_frm_ctl_s {
+ uint64_t reserved_11_63:53;
+ uint64_t null_dis:1;
+ uint64_t pre_align:1;
+ uint64_t pad_len:1;
+ uint64_t vlan_len:1;
+ uint64_t pre_free:1;
+ uint64_t ctl_smac:1;
+ uint64_t ctl_mcst:1;
+ uint64_t ctl_bck:1;
+ uint64_t ctl_drp:1;
+ uint64_t pre_strp:1;
+ uint64_t pre_chk:1;
+ } s;
+ struct cvmx_gmxx_rxx_frm_ctl_cn30xx {
+ uint64_t reserved_9_63:55;
+ uint64_t pad_len:1;
+ uint64_t vlan_len:1;
+ uint64_t pre_free:1;
+ uint64_t ctl_smac:1;
+ uint64_t ctl_mcst:1;
+ uint64_t ctl_bck:1;
+ uint64_t ctl_drp:1;
+ uint64_t pre_strp:1;
+ uint64_t pre_chk:1;
+ } cn30xx;
+ struct cvmx_gmxx_rxx_frm_ctl_cn31xx {
+ uint64_t reserved_8_63:56;
+ uint64_t vlan_len:1;
+ uint64_t pre_free:1;
+ uint64_t ctl_smac:1;
+ uint64_t ctl_mcst:1;
+ uint64_t ctl_bck:1;
+ uint64_t ctl_drp:1;
+ uint64_t pre_strp:1;
+ uint64_t pre_chk:1;
+ } cn31xx;
+ struct cvmx_gmxx_rxx_frm_ctl_cn30xx cn38xx;
+ struct cvmx_gmxx_rxx_frm_ctl_cn31xx cn38xxp2;
+ struct cvmx_gmxx_rxx_frm_ctl_cn50xx {
+ uint64_t reserved_11_63:53;
+ uint64_t null_dis:1;
+ uint64_t pre_align:1;
+ uint64_t reserved_7_8:2;
+ uint64_t pre_free:1;
+ uint64_t ctl_smac:1;
+ uint64_t ctl_mcst:1;
+ uint64_t ctl_bck:1;
+ uint64_t ctl_drp:1;
+ uint64_t pre_strp:1;
+ uint64_t pre_chk:1;
+ } cn50xx;
+ struct cvmx_gmxx_rxx_frm_ctl_cn50xx cn52xx;
+ struct cvmx_gmxx_rxx_frm_ctl_cn50xx cn52xxp1;
+ struct cvmx_gmxx_rxx_frm_ctl_cn50xx cn56xx;
+ struct cvmx_gmxx_rxx_frm_ctl_cn56xxp1 {
+ uint64_t reserved_10_63:54;
+ uint64_t pre_align:1;
+ uint64_t reserved_7_8:2;
+ uint64_t pre_free:1;
+ uint64_t ctl_smac:1;
+ uint64_t ctl_mcst:1;
+ uint64_t ctl_bck:1;
+ uint64_t ctl_drp:1;
+ uint64_t pre_strp:1;
+ uint64_t pre_chk:1;
+ } cn56xxp1;
+ struct cvmx_gmxx_rxx_frm_ctl_s cn58xx;
+ struct cvmx_gmxx_rxx_frm_ctl_cn30xx cn58xxp1;
+};
+
+union cvmx_gmxx_rxx_frm_max {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_frm_max_s {
+ uint64_t reserved_16_63:48;
+ uint64_t len:16;
+ } s;
+ struct cvmx_gmxx_rxx_frm_max_s cn30xx;
+ struct cvmx_gmxx_rxx_frm_max_s cn31xx;
+ struct cvmx_gmxx_rxx_frm_max_s cn38xx;
+ struct cvmx_gmxx_rxx_frm_max_s cn38xxp2;
+ struct cvmx_gmxx_rxx_frm_max_s cn58xx;
+ struct cvmx_gmxx_rxx_frm_max_s cn58xxp1;
+};
+
+union cvmx_gmxx_rxx_frm_min {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_frm_min_s {
+ uint64_t reserved_16_63:48;
+ uint64_t len:16;
+ } s;
+ struct cvmx_gmxx_rxx_frm_min_s cn30xx;
+ struct cvmx_gmxx_rxx_frm_min_s cn31xx;
+ struct cvmx_gmxx_rxx_frm_min_s cn38xx;
+ struct cvmx_gmxx_rxx_frm_min_s cn38xxp2;
+ struct cvmx_gmxx_rxx_frm_min_s cn58xx;
+ struct cvmx_gmxx_rxx_frm_min_s cn58xxp1;
+};
+
+union cvmx_gmxx_rxx_ifg {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_ifg_s {
+ uint64_t reserved_4_63:60;
+ uint64_t ifg:4;
+ } s;
+ struct cvmx_gmxx_rxx_ifg_s cn30xx;
+ struct cvmx_gmxx_rxx_ifg_s cn31xx;
+ struct cvmx_gmxx_rxx_ifg_s cn38xx;
+ struct cvmx_gmxx_rxx_ifg_s cn38xxp2;
+ struct cvmx_gmxx_rxx_ifg_s cn50xx;
+ struct cvmx_gmxx_rxx_ifg_s cn52xx;
+ struct cvmx_gmxx_rxx_ifg_s cn52xxp1;
+ struct cvmx_gmxx_rxx_ifg_s cn56xx;
+ struct cvmx_gmxx_rxx_ifg_s cn56xxp1;
+ struct cvmx_gmxx_rxx_ifg_s cn58xx;
+ struct cvmx_gmxx_rxx_ifg_s cn58xxp1;
+};
+
+union cvmx_gmxx_rxx_int_en {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_int_en_s {
+ uint64_t reserved_29_63:35;
+ uint64_t hg2cc:1;
+ uint64_t hg2fld:1;
+ uint64_t undat:1;
+ uint64_t uneop:1;
+ uint64_t unsop:1;
+ uint64_t bad_term:1;
+ uint64_t bad_seq:1;
+ uint64_t rem_fault:1;
+ uint64_t loc_fault:1;
+ uint64_t pause_drp:1;
+ uint64_t phy_dupx:1;
+ uint64_t phy_spd:1;
+ uint64_t phy_link:1;
+ uint64_t ifgerr:1;
+ uint64_t coldet:1;
+ uint64_t falerr:1;
+ uint64_t rsverr:1;
+ uint64_t pcterr:1;
+ uint64_t ovrerr:1;
+ uint64_t niberr:1;
+ uint64_t skperr:1;
+ uint64_t rcverr:1;
+ uint64_t lenerr:1;
+ uint64_t alnerr:1;
+ uint64_t fcserr:1;
+ uint64_t jabber:1;
+ uint64_t maxerr:1;
+ uint64_t carext:1;
+ uint64_t minerr:1;
+ } s;
+ struct cvmx_gmxx_rxx_int_en_cn30xx {
+ uint64_t reserved_19_63:45;
+ uint64_t phy_dupx:1;
+ uint64_t phy_spd:1;
+ uint64_t phy_link:1;
+ uint64_t ifgerr:1;
+ uint64_t coldet:1;
+ uint64_t falerr:1;
+ uint64_t rsverr:1;
+ uint64_t pcterr:1;
+ uint64_t ovrerr:1;
+ uint64_t niberr:1;
+ uint64_t skperr:1;
+ uint64_t rcverr:1;
+ uint64_t lenerr:1;
+ uint64_t alnerr:1;
+ uint64_t fcserr:1;
+ uint64_t jabber:1;
+ uint64_t maxerr:1;
+ uint64_t carext:1;
+ uint64_t minerr:1;
+ } cn30xx;
+ struct cvmx_gmxx_rxx_int_en_cn30xx cn31xx;
+ struct cvmx_gmxx_rxx_int_en_cn30xx cn38xx;
+ struct cvmx_gmxx_rxx_int_en_cn30xx cn38xxp2;
+ struct cvmx_gmxx_rxx_int_en_cn50xx {
+ uint64_t reserved_20_63:44;
+ uint64_t pause_drp:1;
+ uint64_t phy_dupx:1;
+ uint64_t phy_spd:1;
+ uint64_t phy_link:1;
+ uint64_t ifgerr:1;
+ uint64_t coldet:1;
+ uint64_t falerr:1;
+ uint64_t rsverr:1;
+ uint64_t pcterr:1;
+ uint64_t ovrerr:1;
+ uint64_t niberr:1;
+ uint64_t skperr:1;
+ uint64_t rcverr:1;
+ uint64_t reserved_6_6:1;
+ uint64_t alnerr:1;
+ uint64_t fcserr:1;
+ uint64_t jabber:1;
+ uint64_t reserved_2_2:1;
+ uint64_t carext:1;
+ uint64_t reserved_0_0:1;
+ } cn50xx;
+ struct cvmx_gmxx_rxx_int_en_cn52xx {
+ uint64_t reserved_29_63:35;
+ uint64_t hg2cc:1;
+ uint64_t hg2fld:1;
+ uint64_t undat:1;
+ uint64_t uneop:1;
+ uint64_t unsop:1;
+ uint64_t bad_term:1;
+ uint64_t bad_seq:1;
+ uint64_t rem_fault:1;
+ uint64_t loc_fault:1;
+ uint64_t pause_drp:1;
+ uint64_t reserved_16_18:3;
+ uint64_t ifgerr:1;
+ uint64_t coldet:1;
+ uint64_t falerr:1;
+ uint64_t rsverr:1;
+ uint64_t pcterr:1;
+ uint64_t ovrerr:1;
+ uint64_t reserved_9_9:1;
+ uint64_t skperr:1;
+ uint64_t rcverr:1;
+ uint64_t reserved_5_6:2;
+ uint64_t fcserr:1;
+ uint64_t jabber:1;
+ uint64_t reserved_2_2:1;
+ uint64_t carext:1;
+ uint64_t reserved_0_0:1;
+ } cn52xx;
+ struct cvmx_gmxx_rxx_int_en_cn52xx cn52xxp1;
+ struct cvmx_gmxx_rxx_int_en_cn52xx cn56xx;
+ struct cvmx_gmxx_rxx_int_en_cn56xxp1 {
+ uint64_t reserved_27_63:37;
+ uint64_t undat:1;
+ uint64_t uneop:1;
+ uint64_t unsop:1;
+ uint64_t bad_term:1;
+ uint64_t bad_seq:1;
+ uint64_t rem_fault:1;
+ uint64_t loc_fault:1;
+ uint64_t pause_drp:1;
+ uint64_t reserved_16_18:3;
+ uint64_t ifgerr:1;
+ uint64_t coldet:1;
+ uint64_t falerr:1;
+ uint64_t rsverr:1;
+ uint64_t pcterr:1;
+ uint64_t ovrerr:1;
+ uint64_t reserved_9_9:1;
+ uint64_t skperr:1;
+ uint64_t rcverr:1;
+ uint64_t reserved_5_6:2;
+ uint64_t fcserr:1;
+ uint64_t jabber:1;
+ uint64_t reserved_2_2:1;
+ uint64_t carext:1;
+ uint64_t reserved_0_0:1;
+ } cn56xxp1;
+ struct cvmx_gmxx_rxx_int_en_cn58xx {
+ uint64_t reserved_20_63:44;
+ uint64_t pause_drp:1;
+ uint64_t phy_dupx:1;
+ uint64_t phy_spd:1;
+ uint64_t phy_link:1;
+ uint64_t ifgerr:1;
+ uint64_t coldet:1;
+ uint64_t falerr:1;
+ uint64_t rsverr:1;
+ uint64_t pcterr:1;
+ uint64_t ovrerr:1;
+ uint64_t niberr:1;
+ uint64_t skperr:1;
+ uint64_t rcverr:1;
+ uint64_t lenerr:1;
+ uint64_t alnerr:1;
+ uint64_t fcserr:1;
+ uint64_t jabber:1;
+ uint64_t maxerr:1;
+ uint64_t carext:1;
+ uint64_t minerr:1;
+ } cn58xx;
+ struct cvmx_gmxx_rxx_int_en_cn58xx cn58xxp1;
+};
+
+union cvmx_gmxx_rxx_int_reg {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_int_reg_s {
+ uint64_t reserved_29_63:35;
+ uint64_t hg2cc:1;
+ uint64_t hg2fld:1;
+ uint64_t undat:1;
+ uint64_t uneop:1;
+ uint64_t unsop:1;
+ uint64_t bad_term:1;
+ uint64_t bad_seq:1;
+ uint64_t rem_fault:1;
+ uint64_t loc_fault:1;
+ uint64_t pause_drp:1;
+ uint64_t phy_dupx:1;
+ uint64_t phy_spd:1;
+ uint64_t phy_link:1;
+ uint64_t ifgerr:1;
+ uint64_t coldet:1;
+ uint64_t falerr:1;
+ uint64_t rsverr:1;
+ uint64_t pcterr:1;
+ uint64_t ovrerr:1;
+ uint64_t niberr:1;
+ uint64_t skperr:1;
+ uint64_t rcverr:1;
+ uint64_t lenerr:1;
+ uint64_t alnerr:1;
+ uint64_t fcserr:1;
+ uint64_t jabber:1;
+ uint64_t maxerr:1;
+ uint64_t carext:1;
+ uint64_t minerr:1;
+ } s;
+ struct cvmx_gmxx_rxx_int_reg_cn30xx {
+ uint64_t reserved_19_63:45;
+ uint64_t phy_dupx:1;
+ uint64_t phy_spd:1;
+ uint64_t phy_link:1;
+ uint64_t ifgerr:1;
+ uint64_t coldet:1;
+ uint64_t falerr:1;
+ uint64_t rsverr:1;
+ uint64_t pcterr:1;
+ uint64_t ovrerr:1;
+ uint64_t niberr:1;
+ uint64_t skperr:1;
+ uint64_t rcverr:1;
+ uint64_t lenerr:1;
+ uint64_t alnerr:1;
+ uint64_t fcserr:1;
+ uint64_t jabber:1;
+ uint64_t maxerr:1;
+ uint64_t carext:1;
+ uint64_t minerr:1;
+ } cn30xx;
+ struct cvmx_gmxx_rxx_int_reg_cn30xx cn31xx;
+ struct cvmx_gmxx_rxx_int_reg_cn30xx cn38xx;
+ struct cvmx_gmxx_rxx_int_reg_cn30xx cn38xxp2;
+ struct cvmx_gmxx_rxx_int_reg_cn50xx {
+ uint64_t reserved_20_63:44;
+ uint64_t pause_drp:1;
+ uint64_t phy_dupx:1;
+ uint64_t phy_spd:1;
+ uint64_t phy_link:1;
+ uint64_t ifgerr:1;
+ uint64_t coldet:1;
+ uint64_t falerr:1;
+ uint64_t rsverr:1;
+ uint64_t pcterr:1;
+ uint64_t ovrerr:1;
+ uint64_t niberr:1;
+ uint64_t skperr:1;
+ uint64_t rcverr:1;
+ uint64_t reserved_6_6:1;
+ uint64_t alnerr:1;
+ uint64_t fcserr:1;
+ uint64_t jabber:1;
+ uint64_t reserved_2_2:1;
+ uint64_t carext:1;
+ uint64_t reserved_0_0:1;
+ } cn50xx;
+ struct cvmx_gmxx_rxx_int_reg_cn52xx {
+ uint64_t reserved_29_63:35;
+ uint64_t hg2cc:1;
+ uint64_t hg2fld:1;
+ uint64_t undat:1;
+ uint64_t uneop:1;
+ uint64_t unsop:1;
+ uint64_t bad_term:1;
+ uint64_t bad_seq:1;
+ uint64_t rem_fault:1;
+ uint64_t loc_fault:1;
+ uint64_t pause_drp:1;
+ uint64_t reserved_16_18:3;
+ uint64_t ifgerr:1;
+ uint64_t coldet:1;
+ uint64_t falerr:1;
+ uint64_t rsverr:1;
+ uint64_t pcterr:1;
+ uint64_t ovrerr:1;
+ uint64_t reserved_9_9:1;
+ uint64_t skperr:1;
+ uint64_t rcverr:1;
+ uint64_t reserved_5_6:2;
+ uint64_t fcserr:1;
+ uint64_t jabber:1;
+ uint64_t reserved_2_2:1;
+ uint64_t carext:1;
+ uint64_t reserved_0_0:1;
+ } cn52xx;
+ struct cvmx_gmxx_rxx_int_reg_cn52xx cn52xxp1;
+ struct cvmx_gmxx_rxx_int_reg_cn52xx cn56xx;
+ struct cvmx_gmxx_rxx_int_reg_cn56xxp1 {
+ uint64_t reserved_27_63:37;
+ uint64_t undat:1;
+ uint64_t uneop:1;
+ uint64_t unsop:1;
+ uint64_t bad_term:1;
+ uint64_t bad_seq:1;
+ uint64_t rem_fault:1;
+ uint64_t loc_fault:1;
+ uint64_t pause_drp:1;
+ uint64_t reserved_16_18:3;
+ uint64_t ifgerr:1;
+ uint64_t coldet:1;
+ uint64_t falerr:1;
+ uint64_t rsverr:1;
+ uint64_t pcterr:1;
+ uint64_t ovrerr:1;
+ uint64_t reserved_9_9:1;
+ uint64_t skperr:1;
+ uint64_t rcverr:1;
+ uint64_t reserved_5_6:2;
+ uint64_t fcserr:1;
+ uint64_t jabber:1;
+ uint64_t reserved_2_2:1;
+ uint64_t carext:1;
+ uint64_t reserved_0_0:1;
+ } cn56xxp1;
+ struct cvmx_gmxx_rxx_int_reg_cn58xx {
+ uint64_t reserved_20_63:44;
+ uint64_t pause_drp:1;
+ uint64_t phy_dupx:1;
+ uint64_t phy_spd:1;
+ uint64_t phy_link:1;
+ uint64_t ifgerr:1;
+ uint64_t coldet:1;
+ uint64_t falerr:1;
+ uint64_t rsverr:1;
+ uint64_t pcterr:1;
+ uint64_t ovrerr:1;
+ uint64_t niberr:1;
+ uint64_t skperr:1;
+ uint64_t rcverr:1;
+ uint64_t lenerr:1;
+ uint64_t alnerr:1;
+ uint64_t fcserr:1;
+ uint64_t jabber:1;
+ uint64_t maxerr:1;
+ uint64_t carext:1;
+ uint64_t minerr:1;
+ } cn58xx;
+ struct cvmx_gmxx_rxx_int_reg_cn58xx cn58xxp1;
+};
+
+union cvmx_gmxx_rxx_jabber {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_jabber_s {
+ uint64_t reserved_16_63:48;
+ uint64_t cnt:16;
+ } s;
+ struct cvmx_gmxx_rxx_jabber_s cn30xx;
+ struct cvmx_gmxx_rxx_jabber_s cn31xx;
+ struct cvmx_gmxx_rxx_jabber_s cn38xx;
+ struct cvmx_gmxx_rxx_jabber_s cn38xxp2;
+ struct cvmx_gmxx_rxx_jabber_s cn50xx;
+ struct cvmx_gmxx_rxx_jabber_s cn52xx;
+ struct cvmx_gmxx_rxx_jabber_s cn52xxp1;
+ struct cvmx_gmxx_rxx_jabber_s cn56xx;
+ struct cvmx_gmxx_rxx_jabber_s cn56xxp1;
+ struct cvmx_gmxx_rxx_jabber_s cn58xx;
+ struct cvmx_gmxx_rxx_jabber_s cn58xxp1;
+};
+
+union cvmx_gmxx_rxx_pause_drop_time {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_pause_drop_time_s {
+ uint64_t reserved_16_63:48;
+ uint64_t status:16;
+ } s;
+ struct cvmx_gmxx_rxx_pause_drop_time_s cn50xx;
+ struct cvmx_gmxx_rxx_pause_drop_time_s cn52xx;
+ struct cvmx_gmxx_rxx_pause_drop_time_s cn52xxp1;
+ struct cvmx_gmxx_rxx_pause_drop_time_s cn56xx;
+ struct cvmx_gmxx_rxx_pause_drop_time_s cn56xxp1;
+ struct cvmx_gmxx_rxx_pause_drop_time_s cn58xx;
+ struct cvmx_gmxx_rxx_pause_drop_time_s cn58xxp1;
+};
+
+union cvmx_gmxx_rxx_rx_inbnd {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_rx_inbnd_s {
+ uint64_t reserved_4_63:60;
+ uint64_t duplex:1;
+ uint64_t speed:2;
+ uint64_t status:1;
+ } s;
+ struct cvmx_gmxx_rxx_rx_inbnd_s cn30xx;
+ struct cvmx_gmxx_rxx_rx_inbnd_s cn31xx;
+ struct cvmx_gmxx_rxx_rx_inbnd_s cn38xx;
+ struct cvmx_gmxx_rxx_rx_inbnd_s cn38xxp2;
+ struct cvmx_gmxx_rxx_rx_inbnd_s cn50xx;
+ struct cvmx_gmxx_rxx_rx_inbnd_s cn58xx;
+ struct cvmx_gmxx_rxx_rx_inbnd_s cn58xxp1;
+};
+
+union cvmx_gmxx_rxx_stats_ctl {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_stats_ctl_s {
+ uint64_t reserved_1_63:63;
+ uint64_t rd_clr:1;
+ } s;
+ struct cvmx_gmxx_rxx_stats_ctl_s cn30xx;
+ struct cvmx_gmxx_rxx_stats_ctl_s cn31xx;
+ struct cvmx_gmxx_rxx_stats_ctl_s cn38xx;
+ struct cvmx_gmxx_rxx_stats_ctl_s cn38xxp2;
+ struct cvmx_gmxx_rxx_stats_ctl_s cn50xx;
+ struct cvmx_gmxx_rxx_stats_ctl_s cn52xx;
+ struct cvmx_gmxx_rxx_stats_ctl_s cn52xxp1;
+ struct cvmx_gmxx_rxx_stats_ctl_s cn56xx;
+ struct cvmx_gmxx_rxx_stats_ctl_s cn56xxp1;
+ struct cvmx_gmxx_rxx_stats_ctl_s cn58xx;
+ struct cvmx_gmxx_rxx_stats_ctl_s cn58xxp1;
+};
+
+union cvmx_gmxx_rxx_stats_octs {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_stats_octs_s {
+ uint64_t reserved_48_63:16;
+ uint64_t cnt:48;
+ } s;
+ struct cvmx_gmxx_rxx_stats_octs_s cn30xx;
+ struct cvmx_gmxx_rxx_stats_octs_s cn31xx;
+ struct cvmx_gmxx_rxx_stats_octs_s cn38xx;
+ struct cvmx_gmxx_rxx_stats_octs_s cn38xxp2;
+ struct cvmx_gmxx_rxx_stats_octs_s cn50xx;
+ struct cvmx_gmxx_rxx_stats_octs_s cn52xx;
+ struct cvmx_gmxx_rxx_stats_octs_s cn52xxp1;
+ struct cvmx_gmxx_rxx_stats_octs_s cn56xx;
+ struct cvmx_gmxx_rxx_stats_octs_s cn56xxp1;
+ struct cvmx_gmxx_rxx_stats_octs_s cn58xx;
+ struct cvmx_gmxx_rxx_stats_octs_s cn58xxp1;
+};
+
+union cvmx_gmxx_rxx_stats_octs_ctl {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_stats_octs_ctl_s {
+ uint64_t reserved_48_63:16;
+ uint64_t cnt:48;
+ } s;
+ struct cvmx_gmxx_rxx_stats_octs_ctl_s cn30xx;
+ struct cvmx_gmxx_rxx_stats_octs_ctl_s cn31xx;
+ struct cvmx_gmxx_rxx_stats_octs_ctl_s cn38xx;
+ struct cvmx_gmxx_rxx_stats_octs_ctl_s cn38xxp2;
+ struct cvmx_gmxx_rxx_stats_octs_ctl_s cn50xx;
+ struct cvmx_gmxx_rxx_stats_octs_ctl_s cn52xx;
+ struct cvmx_gmxx_rxx_stats_octs_ctl_s cn52xxp1;
+ struct cvmx_gmxx_rxx_stats_octs_ctl_s cn56xx;
+ struct cvmx_gmxx_rxx_stats_octs_ctl_s cn56xxp1;
+ struct cvmx_gmxx_rxx_stats_octs_ctl_s cn58xx;
+ struct cvmx_gmxx_rxx_stats_octs_ctl_s cn58xxp1;
+};
+
+union cvmx_gmxx_rxx_stats_octs_dmac {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_stats_octs_dmac_s {
+ uint64_t reserved_48_63:16;
+ uint64_t cnt:48;
+ } s;
+ struct cvmx_gmxx_rxx_stats_octs_dmac_s cn30xx;
+ struct cvmx_gmxx_rxx_stats_octs_dmac_s cn31xx;
+ struct cvmx_gmxx_rxx_stats_octs_dmac_s cn38xx;
+ struct cvmx_gmxx_rxx_stats_octs_dmac_s cn38xxp2;
+ struct cvmx_gmxx_rxx_stats_octs_dmac_s cn50xx;
+ struct cvmx_gmxx_rxx_stats_octs_dmac_s cn52xx;
+ struct cvmx_gmxx_rxx_stats_octs_dmac_s cn52xxp1;
+ struct cvmx_gmxx_rxx_stats_octs_dmac_s cn56xx;
+ struct cvmx_gmxx_rxx_stats_octs_dmac_s cn56xxp1;
+ struct cvmx_gmxx_rxx_stats_octs_dmac_s cn58xx;
+ struct cvmx_gmxx_rxx_stats_octs_dmac_s cn58xxp1;
+};
+
+union cvmx_gmxx_rxx_stats_octs_drp {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_stats_octs_drp_s {
+ uint64_t reserved_48_63:16;
+ uint64_t cnt:48;
+ } s;
+ struct cvmx_gmxx_rxx_stats_octs_drp_s cn30xx;
+ struct cvmx_gmxx_rxx_stats_octs_drp_s cn31xx;
+ struct cvmx_gmxx_rxx_stats_octs_drp_s cn38xx;
+ struct cvmx_gmxx_rxx_stats_octs_drp_s cn38xxp2;
+ struct cvmx_gmxx_rxx_stats_octs_drp_s cn50xx;
+ struct cvmx_gmxx_rxx_stats_octs_drp_s cn52xx;
+ struct cvmx_gmxx_rxx_stats_octs_drp_s cn52xxp1;
+ struct cvmx_gmxx_rxx_stats_octs_drp_s cn56xx;
+ struct cvmx_gmxx_rxx_stats_octs_drp_s cn56xxp1;
+ struct cvmx_gmxx_rxx_stats_octs_drp_s cn58xx;
+ struct cvmx_gmxx_rxx_stats_octs_drp_s cn58xxp1;
+};
+
+union cvmx_gmxx_rxx_stats_pkts {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_stats_pkts_s {
+ uint64_t reserved_32_63:32;
+ uint64_t cnt:32;
+ } s;
+ struct cvmx_gmxx_rxx_stats_pkts_s cn30xx;
+ struct cvmx_gmxx_rxx_stats_pkts_s cn31xx;
+ struct cvmx_gmxx_rxx_stats_pkts_s cn38xx;
+ struct cvmx_gmxx_rxx_stats_pkts_s cn38xxp2;
+ struct cvmx_gmxx_rxx_stats_pkts_s cn50xx;
+ struct cvmx_gmxx_rxx_stats_pkts_s cn52xx;
+ struct cvmx_gmxx_rxx_stats_pkts_s cn52xxp1;
+ struct cvmx_gmxx_rxx_stats_pkts_s cn56xx;
+ struct cvmx_gmxx_rxx_stats_pkts_s cn56xxp1;
+ struct cvmx_gmxx_rxx_stats_pkts_s cn58xx;
+ struct cvmx_gmxx_rxx_stats_pkts_s cn58xxp1;
+};
+
+union cvmx_gmxx_rxx_stats_pkts_bad {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_stats_pkts_bad_s {
+ uint64_t reserved_32_63:32;
+ uint64_t cnt:32;
+ } s;
+ struct cvmx_gmxx_rxx_stats_pkts_bad_s cn30xx;
+ struct cvmx_gmxx_rxx_stats_pkts_bad_s cn31xx;
+ struct cvmx_gmxx_rxx_stats_pkts_bad_s cn38xx;
+ struct cvmx_gmxx_rxx_stats_pkts_bad_s cn38xxp2;
+ struct cvmx_gmxx_rxx_stats_pkts_bad_s cn50xx;
+ struct cvmx_gmxx_rxx_stats_pkts_bad_s cn52xx;
+ struct cvmx_gmxx_rxx_stats_pkts_bad_s cn52xxp1;
+ struct cvmx_gmxx_rxx_stats_pkts_bad_s cn56xx;
+ struct cvmx_gmxx_rxx_stats_pkts_bad_s cn56xxp1;
+ struct cvmx_gmxx_rxx_stats_pkts_bad_s cn58xx;
+ struct cvmx_gmxx_rxx_stats_pkts_bad_s cn58xxp1;
+};
+
+union cvmx_gmxx_rxx_stats_pkts_ctl {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_stats_pkts_ctl_s {
+ uint64_t reserved_32_63:32;
+ uint64_t cnt:32;
+ } s;
+ struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn30xx;
+ struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn31xx;
+ struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn38xx;
+ struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn38xxp2;
+ struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn50xx;
+ struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn52xx;
+ struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn52xxp1;
+ struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn56xx;
+ struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn56xxp1;
+ struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn58xx;
+ struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn58xxp1;
+};
+
+union cvmx_gmxx_rxx_stats_pkts_dmac {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_stats_pkts_dmac_s {
+ uint64_t reserved_32_63:32;
+ uint64_t cnt:32;
+ } s;
+ struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn30xx;
+ struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn31xx;
+ struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn38xx;
+ struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn38xxp2;
+ struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn50xx;
+ struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn52xx;
+ struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn52xxp1;
+ struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn56xx;
+ struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn56xxp1;
+ struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn58xx;
+ struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn58xxp1;
+};
+
+union cvmx_gmxx_rxx_stats_pkts_drp {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_stats_pkts_drp_s {
+ uint64_t reserved_32_63:32;
+ uint64_t cnt:32;
+ } s;
+ struct cvmx_gmxx_rxx_stats_pkts_drp_s cn30xx;
+ struct cvmx_gmxx_rxx_stats_pkts_drp_s cn31xx;
+ struct cvmx_gmxx_rxx_stats_pkts_drp_s cn38xx;
+ struct cvmx_gmxx_rxx_stats_pkts_drp_s cn38xxp2;
+ struct cvmx_gmxx_rxx_stats_pkts_drp_s cn50xx;
+ struct cvmx_gmxx_rxx_stats_pkts_drp_s cn52xx;
+ struct cvmx_gmxx_rxx_stats_pkts_drp_s cn52xxp1;
+ struct cvmx_gmxx_rxx_stats_pkts_drp_s cn56xx;
+ struct cvmx_gmxx_rxx_stats_pkts_drp_s cn56xxp1;
+ struct cvmx_gmxx_rxx_stats_pkts_drp_s cn58xx;
+ struct cvmx_gmxx_rxx_stats_pkts_drp_s cn58xxp1;
+};
+
+union cvmx_gmxx_rxx_udd_skp {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_udd_skp_s {
+ uint64_t reserved_9_63:55;
+ uint64_t fcssel:1;
+ uint64_t reserved_7_7:1;
+ uint64_t len:7;
+ } s;
+ struct cvmx_gmxx_rxx_udd_skp_s cn30xx;
+ struct cvmx_gmxx_rxx_udd_skp_s cn31xx;
+ struct cvmx_gmxx_rxx_udd_skp_s cn38xx;
+ struct cvmx_gmxx_rxx_udd_skp_s cn38xxp2;
+ struct cvmx_gmxx_rxx_udd_skp_s cn50xx;
+ struct cvmx_gmxx_rxx_udd_skp_s cn52xx;
+ struct cvmx_gmxx_rxx_udd_skp_s cn52xxp1;
+ struct cvmx_gmxx_rxx_udd_skp_s cn56xx;
+ struct cvmx_gmxx_rxx_udd_skp_s cn56xxp1;
+ struct cvmx_gmxx_rxx_udd_skp_s cn58xx;
+ struct cvmx_gmxx_rxx_udd_skp_s cn58xxp1;
+};
+
+union cvmx_gmxx_rx_bp_dropx {
+ uint64_t u64;
+ struct cvmx_gmxx_rx_bp_dropx_s {
+ uint64_t reserved_6_63:58;
+ uint64_t mark:6;
+ } s;
+ struct cvmx_gmxx_rx_bp_dropx_s cn30xx;
+ struct cvmx_gmxx_rx_bp_dropx_s cn31xx;
+ struct cvmx_gmxx_rx_bp_dropx_s cn38xx;
+ struct cvmx_gmxx_rx_bp_dropx_s cn38xxp2;
+ struct cvmx_gmxx_rx_bp_dropx_s cn50xx;
+ struct cvmx_gmxx_rx_bp_dropx_s cn52xx;
+ struct cvmx_gmxx_rx_bp_dropx_s cn52xxp1;
+ struct cvmx_gmxx_rx_bp_dropx_s cn56xx;
+ struct cvmx_gmxx_rx_bp_dropx_s cn56xxp1;
+ struct cvmx_gmxx_rx_bp_dropx_s cn58xx;
+ struct cvmx_gmxx_rx_bp_dropx_s cn58xxp1;
+};
+
+union cvmx_gmxx_rx_bp_offx {
+ uint64_t u64;
+ struct cvmx_gmxx_rx_bp_offx_s {
+ uint64_t reserved_6_63:58;
+ uint64_t mark:6;
+ } s;
+ struct cvmx_gmxx_rx_bp_offx_s cn30xx;
+ struct cvmx_gmxx_rx_bp_offx_s cn31xx;
+ struct cvmx_gmxx_rx_bp_offx_s cn38xx;
+ struct cvmx_gmxx_rx_bp_offx_s cn38xxp2;
+ struct cvmx_gmxx_rx_bp_offx_s cn50xx;
+ struct cvmx_gmxx_rx_bp_offx_s cn52xx;
+ struct cvmx_gmxx_rx_bp_offx_s cn52xxp1;
+ struct cvmx_gmxx_rx_bp_offx_s cn56xx;
+ struct cvmx_gmxx_rx_bp_offx_s cn56xxp1;
+ struct cvmx_gmxx_rx_bp_offx_s cn58xx;
+ struct cvmx_gmxx_rx_bp_offx_s cn58xxp1;
+};
+
+union cvmx_gmxx_rx_bp_onx {
+ uint64_t u64;
+ struct cvmx_gmxx_rx_bp_onx_s {
+ uint64_t reserved_9_63:55;
+ uint64_t mark:9;
+ } s;
+ struct cvmx_gmxx_rx_bp_onx_s cn30xx;
+ struct cvmx_gmxx_rx_bp_onx_s cn31xx;
+ struct cvmx_gmxx_rx_bp_onx_s cn38xx;
+ struct cvmx_gmxx_rx_bp_onx_s cn38xxp2;
+ struct cvmx_gmxx_rx_bp_onx_s cn50xx;
+ struct cvmx_gmxx_rx_bp_onx_s cn52xx;
+ struct cvmx_gmxx_rx_bp_onx_s cn52xxp1;
+ struct cvmx_gmxx_rx_bp_onx_s cn56xx;
+ struct cvmx_gmxx_rx_bp_onx_s cn56xxp1;
+ struct cvmx_gmxx_rx_bp_onx_s cn58xx;
+ struct cvmx_gmxx_rx_bp_onx_s cn58xxp1;
+};
+
+union cvmx_gmxx_rx_hg2_status {
+ uint64_t u64;
+ struct cvmx_gmxx_rx_hg2_status_s {
+ uint64_t reserved_48_63:16;
+ uint64_t phtim2go:16;
+ uint64_t xof:16;
+ uint64_t lgtim2go:16;
+ } s;
+ struct cvmx_gmxx_rx_hg2_status_s cn52xx;
+ struct cvmx_gmxx_rx_hg2_status_s cn52xxp1;
+ struct cvmx_gmxx_rx_hg2_status_s cn56xx;
+};
+
+union cvmx_gmxx_rx_pass_en {
+ uint64_t u64;
+ struct cvmx_gmxx_rx_pass_en_s {
+ uint64_t reserved_16_63:48;
+ uint64_t en:16;
+ } s;
+ struct cvmx_gmxx_rx_pass_en_s cn38xx;
+ struct cvmx_gmxx_rx_pass_en_s cn38xxp2;
+ struct cvmx_gmxx_rx_pass_en_s cn58xx;
+ struct cvmx_gmxx_rx_pass_en_s cn58xxp1;
+};
+
+union cvmx_gmxx_rx_pass_mapx {
+ uint64_t u64;
+ struct cvmx_gmxx_rx_pass_mapx_s {
+ uint64_t reserved_4_63:60;
+ uint64_t dprt:4;
+ } s;
+ struct cvmx_gmxx_rx_pass_mapx_s cn38xx;
+ struct cvmx_gmxx_rx_pass_mapx_s cn38xxp2;
+ struct cvmx_gmxx_rx_pass_mapx_s cn58xx;
+ struct cvmx_gmxx_rx_pass_mapx_s cn58xxp1;
+};
+
+union cvmx_gmxx_rx_prt_info {
+ uint64_t u64;
+ struct cvmx_gmxx_rx_prt_info_s {
+ uint64_t reserved_32_63:32;
+ uint64_t drop:16;
+ uint64_t commit:16;
+ } s;
+ struct cvmx_gmxx_rx_prt_info_cn30xx {
+ uint64_t reserved_19_63:45;
+ uint64_t drop:3;
+ uint64_t reserved_3_15:13;
+ uint64_t commit:3;
+ } cn30xx;
+ struct cvmx_gmxx_rx_prt_info_cn30xx cn31xx;
+ struct cvmx_gmxx_rx_prt_info_s cn38xx;
+ struct cvmx_gmxx_rx_prt_info_cn30xx cn50xx;
+ struct cvmx_gmxx_rx_prt_info_cn52xx {
+ uint64_t reserved_20_63:44;
+ uint64_t drop:4;
+ uint64_t reserved_4_15:12;
+ uint64_t commit:4;
+ } cn52xx;
+ struct cvmx_gmxx_rx_prt_info_cn52xx cn52xxp1;
+ struct cvmx_gmxx_rx_prt_info_cn52xx cn56xx;
+ struct cvmx_gmxx_rx_prt_info_cn52xx cn56xxp1;
+ struct cvmx_gmxx_rx_prt_info_s cn58xx;
+ struct cvmx_gmxx_rx_prt_info_s cn58xxp1;
+};
+
+union cvmx_gmxx_rx_prts {
+ uint64_t u64;
+ struct cvmx_gmxx_rx_prts_s {
+ uint64_t reserved_3_63:61;
+ uint64_t prts:3;
+ } s;
+ struct cvmx_gmxx_rx_prts_s cn30xx;
+ struct cvmx_gmxx_rx_prts_s cn31xx;
+ struct cvmx_gmxx_rx_prts_s cn38xx;
+ struct cvmx_gmxx_rx_prts_s cn38xxp2;
+ struct cvmx_gmxx_rx_prts_s cn50xx;
+ struct cvmx_gmxx_rx_prts_s cn52xx;
+ struct cvmx_gmxx_rx_prts_s cn52xxp1;
+ struct cvmx_gmxx_rx_prts_s cn56xx;
+ struct cvmx_gmxx_rx_prts_s cn56xxp1;
+ struct cvmx_gmxx_rx_prts_s cn58xx;
+ struct cvmx_gmxx_rx_prts_s cn58xxp1;
+};
+
+union cvmx_gmxx_rx_tx_status {
+ uint64_t u64;
+ struct cvmx_gmxx_rx_tx_status_s {
+ uint64_t reserved_7_63:57;
+ uint64_t tx:3;
+ uint64_t reserved_3_3:1;
+ uint64_t rx:3;
+ } s;
+ struct cvmx_gmxx_rx_tx_status_s cn30xx;
+ struct cvmx_gmxx_rx_tx_status_s cn31xx;
+ struct cvmx_gmxx_rx_tx_status_s cn50xx;
+};
+
+union cvmx_gmxx_rx_xaui_bad_col {
+ uint64_t u64;
+ struct cvmx_gmxx_rx_xaui_bad_col_s {
+ uint64_t reserved_40_63:24;
+ uint64_t val:1;
+ uint64_t state:3;
+ uint64_t lane_rxc:4;
+ uint64_t lane_rxd:32;
+ } s;
+ struct cvmx_gmxx_rx_xaui_bad_col_s cn52xx;
+ struct cvmx_gmxx_rx_xaui_bad_col_s cn52xxp1;
+ struct cvmx_gmxx_rx_xaui_bad_col_s cn56xx;
+ struct cvmx_gmxx_rx_xaui_bad_col_s cn56xxp1;
+};
+
+union cvmx_gmxx_rx_xaui_ctl {
+ uint64_t u64;
+ struct cvmx_gmxx_rx_xaui_ctl_s {
+ uint64_t reserved_2_63:62;
+ uint64_t status:2;
+ } s;
+ struct cvmx_gmxx_rx_xaui_ctl_s cn52xx;
+ struct cvmx_gmxx_rx_xaui_ctl_s cn52xxp1;
+ struct cvmx_gmxx_rx_xaui_ctl_s cn56xx;
+ struct cvmx_gmxx_rx_xaui_ctl_s cn56xxp1;
+};
+
+union cvmx_gmxx_smacx {
+ uint64_t u64;
+ struct cvmx_gmxx_smacx_s {
+ uint64_t reserved_48_63:16;
+ uint64_t smac:48;
+ } s;
+ struct cvmx_gmxx_smacx_s cn30xx;
+ struct cvmx_gmxx_smacx_s cn31xx;
+ struct cvmx_gmxx_smacx_s cn38xx;
+ struct cvmx_gmxx_smacx_s cn38xxp2;
+ struct cvmx_gmxx_smacx_s cn50xx;
+ struct cvmx_gmxx_smacx_s cn52xx;
+ struct cvmx_gmxx_smacx_s cn52xxp1;
+ struct cvmx_gmxx_smacx_s cn56xx;
+ struct cvmx_gmxx_smacx_s cn56xxp1;
+ struct cvmx_gmxx_smacx_s cn58xx;
+ struct cvmx_gmxx_smacx_s cn58xxp1;
+};
+
+union cvmx_gmxx_stat_bp {
+ uint64_t u64;
+ struct cvmx_gmxx_stat_bp_s {
+ uint64_t reserved_17_63:47;
+ uint64_t bp:1;
+ uint64_t cnt:16;
+ } s;
+ struct cvmx_gmxx_stat_bp_s cn30xx;
+ struct cvmx_gmxx_stat_bp_s cn31xx;
+ struct cvmx_gmxx_stat_bp_s cn38xx;
+ struct cvmx_gmxx_stat_bp_s cn38xxp2;
+ struct cvmx_gmxx_stat_bp_s cn50xx;
+ struct cvmx_gmxx_stat_bp_s cn52xx;
+ struct cvmx_gmxx_stat_bp_s cn52xxp1;
+ struct cvmx_gmxx_stat_bp_s cn56xx;
+ struct cvmx_gmxx_stat_bp_s cn56xxp1;
+ struct cvmx_gmxx_stat_bp_s cn58xx;
+ struct cvmx_gmxx_stat_bp_s cn58xxp1;
+};
+
+union cvmx_gmxx_txx_append {
+ uint64_t u64;
+ struct cvmx_gmxx_txx_append_s {
+ uint64_t reserved_4_63:60;
+ uint64_t force_fcs:1;
+ uint64_t fcs:1;
+ uint64_t pad:1;
+ uint64_t preamble:1;
+ } s;
+ struct cvmx_gmxx_txx_append_s cn30xx;
+ struct cvmx_gmxx_txx_append_s cn31xx;
+ struct cvmx_gmxx_txx_append_s cn38xx;
+ struct cvmx_gmxx_txx_append_s cn38xxp2;
+ struct cvmx_gmxx_txx_append_s cn50xx;
+ struct cvmx_gmxx_txx_append_s cn52xx;
+ struct cvmx_gmxx_txx_append_s cn52xxp1;
+ struct cvmx_gmxx_txx_append_s cn56xx;
+ struct cvmx_gmxx_txx_append_s cn56xxp1;
+ struct cvmx_gmxx_txx_append_s cn58xx;
+ struct cvmx_gmxx_txx_append_s cn58xxp1;
+};
+
+union cvmx_gmxx_txx_burst {
+ uint64_t u64;
+ struct cvmx_gmxx_txx_burst_s {
+ uint64_t reserved_16_63:48;
+ uint64_t burst:16;
+ } s;
+ struct cvmx_gmxx_txx_burst_s cn30xx;
+ struct cvmx_gmxx_txx_burst_s cn31xx;
+ struct cvmx_gmxx_txx_burst_s cn38xx;
+ struct cvmx_gmxx_txx_burst_s cn38xxp2;
+ struct cvmx_gmxx_txx_burst_s cn50xx;
+ struct cvmx_gmxx_txx_burst_s cn52xx;
+ struct cvmx_gmxx_txx_burst_s cn52xxp1;
+ struct cvmx_gmxx_txx_burst_s cn56xx;
+ struct cvmx_gmxx_txx_burst_s cn56xxp1;
+ struct cvmx_gmxx_txx_burst_s cn58xx;
+ struct cvmx_gmxx_txx_burst_s cn58xxp1;
+};
+
+union cvmx_gmxx_txx_cbfc_xoff {
+ uint64_t u64;
+ struct cvmx_gmxx_txx_cbfc_xoff_s {
+ uint64_t reserved_16_63:48;
+ uint64_t xoff:16;
+ } s;
+ struct cvmx_gmxx_txx_cbfc_xoff_s cn52xx;
+ struct cvmx_gmxx_txx_cbfc_xoff_s cn56xx;
+};
+
+union cvmx_gmxx_txx_cbfc_xon {
+ uint64_t u64;
+ struct cvmx_gmxx_txx_cbfc_xon_s {
+ uint64_t reserved_16_63:48;
+ uint64_t xon:16;
+ } s;
+ struct cvmx_gmxx_txx_cbfc_xon_s cn52xx;
+ struct cvmx_gmxx_txx_cbfc_xon_s cn56xx;
+};
+
+union cvmx_gmxx_txx_clk {
+ uint64_t u64;
+ struct cvmx_gmxx_txx_clk_s {
+ uint64_t reserved_6_63:58;
+ uint64_t clk_cnt:6;
+ } s;
+ struct cvmx_gmxx_txx_clk_s cn30xx;
+ struct cvmx_gmxx_txx_clk_s cn31xx;
+ struct cvmx_gmxx_txx_clk_s cn38xx;
+ struct cvmx_gmxx_txx_clk_s cn38xxp2;
+ struct cvmx_gmxx_txx_clk_s cn50xx;
+ struct cvmx_gmxx_txx_clk_s cn58xx;
+ struct cvmx_gmxx_txx_clk_s cn58xxp1;
+};
+
+union cvmx_gmxx_txx_ctl {
+ uint64_t u64;
+ struct cvmx_gmxx_txx_ctl_s {
+ uint64_t reserved_2_63:62;
+ uint64_t xsdef_en:1;
+ uint64_t xscol_en:1;
+ } s;
+ struct cvmx_gmxx_txx_ctl_s cn30xx;
+ struct cvmx_gmxx_txx_ctl_s cn31xx;
+ struct cvmx_gmxx_txx_ctl_s cn38xx;
+ struct cvmx_gmxx_txx_ctl_s cn38xxp2;
+ struct cvmx_gmxx_txx_ctl_s cn50xx;
+ struct cvmx_gmxx_txx_ctl_s cn52xx;
+ struct cvmx_gmxx_txx_ctl_s cn52xxp1;
+ struct cvmx_gmxx_txx_ctl_s cn56xx;
+ struct cvmx_gmxx_txx_ctl_s cn56xxp1;
+ struct cvmx_gmxx_txx_ctl_s cn58xx;
+ struct cvmx_gmxx_txx_ctl_s cn58xxp1;
+};
+
+union cvmx_gmxx_txx_min_pkt {
+ uint64_t u64;
+ struct cvmx_gmxx_txx_min_pkt_s {
+ uint64_t reserved_8_63:56;
+ uint64_t min_size:8;
+ } s;
+ struct cvmx_gmxx_txx_min_pkt_s cn30xx;
+ struct cvmx_gmxx_txx_min_pkt_s cn31xx;
+ struct cvmx_gmxx_txx_min_pkt_s cn38xx;
+ struct cvmx_gmxx_txx_min_pkt_s cn38xxp2;
+ struct cvmx_gmxx_txx_min_pkt_s cn50xx;
+ struct cvmx_gmxx_txx_min_pkt_s cn52xx;
+ struct cvmx_gmxx_txx_min_pkt_s cn52xxp1;
+ struct cvmx_gmxx_txx_min_pkt_s cn56xx;
+ struct cvmx_gmxx_txx_min_pkt_s cn56xxp1;
+ struct cvmx_gmxx_txx_min_pkt_s cn58xx;
+ struct cvmx_gmxx_txx_min_pkt_s cn58xxp1;
+};
+
+union cvmx_gmxx_txx_pause_pkt_interval {
+ uint64_t u64;
+ struct cvmx_gmxx_txx_pause_pkt_interval_s {
+ uint64_t reserved_16_63:48;
+ uint64_t interval:16;
+ } s;
+ struct cvmx_gmxx_txx_pause_pkt_interval_s cn30xx;
+ struct cvmx_gmxx_txx_pause_pkt_interval_s cn31xx;
+ struct cvmx_gmxx_txx_pause_pkt_interval_s cn38xx;
+ struct cvmx_gmxx_txx_pause_pkt_interval_s cn38xxp2;
+ struct cvmx_gmxx_txx_pause_pkt_interval_s cn50xx;
+ struct cvmx_gmxx_txx_pause_pkt_interval_s cn52xx;
+ struct cvmx_gmxx_txx_pause_pkt_interval_s cn52xxp1;
+ struct cvmx_gmxx_txx_pause_pkt_interval_s cn56xx;
+ struct cvmx_gmxx_txx_pause_pkt_interval_s cn56xxp1;
+ struct cvmx_gmxx_txx_pause_pkt_interval_s cn58xx;
+ struct cvmx_gmxx_txx_pause_pkt_interval_s cn58xxp1;
+};
+
+union cvmx_gmxx_txx_pause_pkt_time {
+ uint64_t u64;
+ struct cvmx_gmxx_txx_pause_pkt_time_s {
+ uint64_t reserved_16_63:48;
+ uint64_t time:16;
+ } s;
+ struct cvmx_gmxx_txx_pause_pkt_time_s cn30xx;
+ struct cvmx_gmxx_txx_pause_pkt_time_s cn31xx;
+ struct cvmx_gmxx_txx_pause_pkt_time_s cn38xx;
+ struct cvmx_gmxx_txx_pause_pkt_time_s cn38xxp2;
+ struct cvmx_gmxx_txx_pause_pkt_time_s cn50xx;
+ struct cvmx_gmxx_txx_pause_pkt_time_s cn52xx;
+ struct cvmx_gmxx_txx_pause_pkt_time_s cn52xxp1;
+ struct cvmx_gmxx_txx_pause_pkt_time_s cn56xx;
+ struct cvmx_gmxx_txx_pause_pkt_time_s cn56xxp1;
+ struct cvmx_gmxx_txx_pause_pkt_time_s cn58xx;
+ struct cvmx_gmxx_txx_pause_pkt_time_s cn58xxp1;
+};
+
+union cvmx_gmxx_txx_pause_togo {
+ uint64_t u64;
+ struct cvmx_gmxx_txx_pause_togo_s {
+ uint64_t reserved_32_63:32;
+ uint64_t msg_time:16;
+ uint64_t time:16;
+ } s;
+ struct cvmx_gmxx_txx_pause_togo_cn30xx {
+ uint64_t reserved_16_63:48;
+ uint64_t time:16;
+ } cn30xx;
+ struct cvmx_gmxx_txx_pause_togo_cn30xx cn31xx;
+ struct cvmx_gmxx_txx_pause_togo_cn30xx cn38xx;
+ struct cvmx_gmxx_txx_pause_togo_cn30xx cn38xxp2;
+ struct cvmx_gmxx_txx_pause_togo_cn30xx cn50xx;
+ struct cvmx_gmxx_txx_pause_togo_s cn52xx;
+ struct cvmx_gmxx_txx_pause_togo_s cn52xxp1;
+ struct cvmx_gmxx_txx_pause_togo_s cn56xx;
+ struct cvmx_gmxx_txx_pause_togo_cn30xx cn56xxp1;
+ struct cvmx_gmxx_txx_pause_togo_cn30xx cn58xx;
+ struct cvmx_gmxx_txx_pause_togo_cn30xx cn58xxp1;
+};
+
+union cvmx_gmxx_txx_pause_zero {
+ uint64_t u64;
+ struct cvmx_gmxx_txx_pause_zero_s {
+ uint64_t reserved_1_63:63;
+ uint64_t send:1;
+ } s;
+ struct cvmx_gmxx_txx_pause_zero_s cn30xx;
+ struct cvmx_gmxx_txx_pause_zero_s cn31xx;
+ struct cvmx_gmxx_txx_pause_zero_s cn38xx;
+ struct cvmx_gmxx_txx_pause_zero_s cn38xxp2;
+ struct cvmx_gmxx_txx_pause_zero_s cn50xx;
+ struct cvmx_gmxx_txx_pause_zero_s cn52xx;
+ struct cvmx_gmxx_txx_pause_zero_s cn52xxp1;
+ struct cvmx_gmxx_txx_pause_zero_s cn56xx;
+ struct cvmx_gmxx_txx_pause_zero_s cn56xxp1;
+ struct cvmx_gmxx_txx_pause_zero_s cn58xx;
+ struct cvmx_gmxx_txx_pause_zero_s cn58xxp1;
+};
+
+union cvmx_gmxx_txx_sgmii_ctl {
+ uint64_t u64;
+ struct cvmx_gmxx_txx_sgmii_ctl_s {
+ uint64_t reserved_1_63:63;
+ uint64_t align:1;
+ } s;
+ struct cvmx_gmxx_txx_sgmii_ctl_s cn52xx;
+ struct cvmx_gmxx_txx_sgmii_ctl_s cn52xxp1;
+ struct cvmx_gmxx_txx_sgmii_ctl_s cn56xx;
+ struct cvmx_gmxx_txx_sgmii_ctl_s cn56xxp1;
+};
+
+union cvmx_gmxx_txx_slot {
+ uint64_t u64;
+ struct cvmx_gmxx_txx_slot_s {
+ uint64_t reserved_10_63:54;
+ uint64_t slot:10;
+ } s;
+ struct cvmx_gmxx_txx_slot_s cn30xx;
+ struct cvmx_gmxx_txx_slot_s cn31xx;
+ struct cvmx_gmxx_txx_slot_s cn38xx;
+ struct cvmx_gmxx_txx_slot_s cn38xxp2;
+ struct cvmx_gmxx_txx_slot_s cn50xx;
+ struct cvmx_gmxx_txx_slot_s cn52xx;
+ struct cvmx_gmxx_txx_slot_s cn52xxp1;
+ struct cvmx_gmxx_txx_slot_s cn56xx;
+ struct cvmx_gmxx_txx_slot_s cn56xxp1;
+ struct cvmx_gmxx_txx_slot_s cn58xx;
+ struct cvmx_gmxx_txx_slot_s cn58xxp1;
+};
+
+union cvmx_gmxx_txx_soft_pause {
+ uint64_t u64;
+ struct cvmx_gmxx_txx_soft_pause_s {
+ uint64_t reserved_16_63:48;
+ uint64_t time:16;
+ } s;
+ struct cvmx_gmxx_txx_soft_pause_s cn30xx;
+ struct cvmx_gmxx_txx_soft_pause_s cn31xx;
+ struct cvmx_gmxx_txx_soft_pause_s cn38xx;
+ struct cvmx_gmxx_txx_soft_pause_s cn38xxp2;
+ struct cvmx_gmxx_txx_soft_pause_s cn50xx;
+ struct cvmx_gmxx_txx_soft_pause_s cn52xx;
+ struct cvmx_gmxx_txx_soft_pause_s cn52xxp1;
+ struct cvmx_gmxx_txx_soft_pause_s cn56xx;
+ struct cvmx_gmxx_txx_soft_pause_s cn56xxp1;
+ struct cvmx_gmxx_txx_soft_pause_s cn58xx;
+ struct cvmx_gmxx_txx_soft_pause_s cn58xxp1;
+};
+
+union cvmx_gmxx_txx_stat0 {
+ uint64_t u64;
+ struct cvmx_gmxx_txx_stat0_s {
+ uint64_t xsdef:32;
+ uint64_t xscol:32;
+ } s;
+ struct cvmx_gmxx_txx_stat0_s cn30xx;
+ struct cvmx_gmxx_txx_stat0_s cn31xx;
+ struct cvmx_gmxx_txx_stat0_s cn38xx;
+ struct cvmx_gmxx_txx_stat0_s cn38xxp2;
+ struct cvmx_gmxx_txx_stat0_s cn50xx;
+ struct cvmx_gmxx_txx_stat0_s cn52xx;
+ struct cvmx_gmxx_txx_stat0_s cn52xxp1;
+ struct cvmx_gmxx_txx_stat0_s cn56xx;
+ struct cvmx_gmxx_txx_stat0_s cn56xxp1;
+ struct cvmx_gmxx_txx_stat0_s cn58xx;
+ struct cvmx_gmxx_txx_stat0_s cn58xxp1;
+};
+
+union cvmx_gmxx_txx_stat1 {
+ uint64_t u64;
+ struct cvmx_gmxx_txx_stat1_s {
+ uint64_t scol:32;
+ uint64_t mcol:32;
+ } s;
+ struct cvmx_gmxx_txx_stat1_s cn30xx;
+ struct cvmx_gmxx_txx_stat1_s cn31xx;
+ struct cvmx_gmxx_txx_stat1_s cn38xx;
+ struct cvmx_gmxx_txx_stat1_s cn38xxp2;
+ struct cvmx_gmxx_txx_stat1_s cn50xx;
+ struct cvmx_gmxx_txx_stat1_s cn52xx;
+ struct cvmx_gmxx_txx_stat1_s cn52xxp1;
+ struct cvmx_gmxx_txx_stat1_s cn56xx;
+ struct cvmx_gmxx_txx_stat1_s cn56xxp1;
+ struct cvmx_gmxx_txx_stat1_s cn58xx;
+ struct cvmx_gmxx_txx_stat1_s cn58xxp1;
+};
+
+union cvmx_gmxx_txx_stat2 {
+ uint64_t u64;
+ struct cvmx_gmxx_txx_stat2_s {
+ uint64_t reserved_48_63:16;
+ uint64_t octs:48;
+ } s;
+ struct cvmx_gmxx_txx_stat2_s cn30xx;
+ struct cvmx_gmxx_txx_stat2_s cn31xx;
+ struct cvmx_gmxx_txx_stat2_s cn38xx;
+ struct cvmx_gmxx_txx_stat2_s cn38xxp2;
+ struct cvmx_gmxx_txx_stat2_s cn50xx;
+ struct cvmx_gmxx_txx_stat2_s cn52xx;
+ struct cvmx_gmxx_txx_stat2_s cn52xxp1;
+ struct cvmx_gmxx_txx_stat2_s cn56xx;
+ struct cvmx_gmxx_txx_stat2_s cn56xxp1;
+ struct cvmx_gmxx_txx_stat2_s cn58xx;
+ struct cvmx_gmxx_txx_stat2_s cn58xxp1;
+};
+
+union cvmx_gmxx_txx_stat3 {
+ uint64_t u64;
+ struct cvmx_gmxx_txx_stat3_s {
+ uint64_t reserved_32_63:32;
+ uint64_t pkts:32;
+ } s;
+ struct cvmx_gmxx_txx_stat3_s cn30xx;
+ struct cvmx_gmxx_txx_stat3_s cn31xx;
+ struct cvmx_gmxx_txx_stat3_s cn38xx;
+ struct cvmx_gmxx_txx_stat3_s cn38xxp2;
+ struct cvmx_gmxx_txx_stat3_s cn50xx;
+ struct cvmx_gmxx_txx_stat3_s cn52xx;
+ struct cvmx_gmxx_txx_stat3_s cn52xxp1;
+ struct cvmx_gmxx_txx_stat3_s cn56xx;
+ struct cvmx_gmxx_txx_stat3_s cn56xxp1;
+ struct cvmx_gmxx_txx_stat3_s cn58xx;
+ struct cvmx_gmxx_txx_stat3_s cn58xxp1;
+};
+
+union cvmx_gmxx_txx_stat4 {
+ uint64_t u64;
+ struct cvmx_gmxx_txx_stat4_s {
+ uint64_t hist1:32;
+ uint64_t hist0:32;
+ } s;
+ struct cvmx_gmxx_txx_stat4_s cn30xx;
+ struct cvmx_gmxx_txx_stat4_s cn31xx;
+ struct cvmx_gmxx_txx_stat4_s cn38xx;
+ struct cvmx_gmxx_txx_stat4_s cn38xxp2;
+ struct cvmx_gmxx_txx_stat4_s cn50xx;
+ struct cvmx_gmxx_txx_stat4_s cn52xx;
+ struct cvmx_gmxx_txx_stat4_s cn52xxp1;
+ struct cvmx_gmxx_txx_stat4_s cn56xx;
+ struct cvmx_gmxx_txx_stat4_s cn56xxp1;
+ struct cvmx_gmxx_txx_stat4_s cn58xx;
+ struct cvmx_gmxx_txx_stat4_s cn58xxp1;
+};
+
+union cvmx_gmxx_txx_stat5 {
+ uint64_t u64;
+ struct cvmx_gmxx_txx_stat5_s {
+ uint64_t hist3:32;
+ uint64_t hist2:32;
+ } s;
+ struct cvmx_gmxx_txx_stat5_s cn30xx;
+ struct cvmx_gmxx_txx_stat5_s cn31xx;
+ struct cvmx_gmxx_txx_stat5_s cn38xx;
+ struct cvmx_gmxx_txx_stat5_s cn38xxp2;
+ struct cvmx_gmxx_txx_stat5_s cn50xx;
+ struct cvmx_gmxx_txx_stat5_s cn52xx;
+ struct cvmx_gmxx_txx_stat5_s cn52xxp1;
+ struct cvmx_gmxx_txx_stat5_s cn56xx;
+ struct cvmx_gmxx_txx_stat5_s cn56xxp1;
+ struct cvmx_gmxx_txx_stat5_s cn58xx;
+ struct cvmx_gmxx_txx_stat5_s cn58xxp1;
+};
+
+union cvmx_gmxx_txx_stat6 {
+ uint64_t u64;
+ struct cvmx_gmxx_txx_stat6_s {
+ uint64_t hist5:32;
+ uint64_t hist4:32;
+ } s;
+ struct cvmx_gmxx_txx_stat6_s cn30xx;
+ struct cvmx_gmxx_txx_stat6_s cn31xx;
+ struct cvmx_gmxx_txx_stat6_s cn38xx;
+ struct cvmx_gmxx_txx_stat6_s cn38xxp2;
+ struct cvmx_gmxx_txx_stat6_s cn50xx;
+ struct cvmx_gmxx_txx_stat6_s cn52xx;
+ struct cvmx_gmxx_txx_stat6_s cn52xxp1;
+ struct cvmx_gmxx_txx_stat6_s cn56xx;
+ struct cvmx_gmxx_txx_stat6_s cn56xxp1;
+ struct cvmx_gmxx_txx_stat6_s cn58xx;
+ struct cvmx_gmxx_txx_stat6_s cn58xxp1;
+};
+
+union cvmx_gmxx_txx_stat7 {
+ uint64_t u64;
+ struct cvmx_gmxx_txx_stat7_s {
+ uint64_t hist7:32;
+ uint64_t hist6:32;
+ } s;
+ struct cvmx_gmxx_txx_stat7_s cn30xx;
+ struct cvmx_gmxx_txx_stat7_s cn31xx;
+ struct cvmx_gmxx_txx_stat7_s cn38xx;
+ struct cvmx_gmxx_txx_stat7_s cn38xxp2;
+ struct cvmx_gmxx_txx_stat7_s cn50xx;
+ struct cvmx_gmxx_txx_stat7_s cn52xx;
+ struct cvmx_gmxx_txx_stat7_s cn52xxp1;
+ struct cvmx_gmxx_txx_stat7_s cn56xx;
+ struct cvmx_gmxx_txx_stat7_s cn56xxp1;
+ struct cvmx_gmxx_txx_stat7_s cn58xx;
+ struct cvmx_gmxx_txx_stat7_s cn58xxp1;
+};
+
+union cvmx_gmxx_txx_stat8 {
+ uint64_t u64;
+ struct cvmx_gmxx_txx_stat8_s {
+ uint64_t mcst:32;
+ uint64_t bcst:32;
+ } s;
+ struct cvmx_gmxx_txx_stat8_s cn30xx;
+ struct cvmx_gmxx_txx_stat8_s cn31xx;
+ struct cvmx_gmxx_txx_stat8_s cn38xx;
+ struct cvmx_gmxx_txx_stat8_s cn38xxp2;
+ struct cvmx_gmxx_txx_stat8_s cn50xx;
+ struct cvmx_gmxx_txx_stat8_s cn52xx;
+ struct cvmx_gmxx_txx_stat8_s cn52xxp1;
+ struct cvmx_gmxx_txx_stat8_s cn56xx;
+ struct cvmx_gmxx_txx_stat8_s cn56xxp1;
+ struct cvmx_gmxx_txx_stat8_s cn58xx;
+ struct cvmx_gmxx_txx_stat8_s cn58xxp1;
+};
+
+union cvmx_gmxx_txx_stat9 {
+ uint64_t u64;
+ struct cvmx_gmxx_txx_stat9_s {
+ uint64_t undflw:32;
+ uint64_t ctl:32;
+ } s;
+ struct cvmx_gmxx_txx_stat9_s cn30xx;
+ struct cvmx_gmxx_txx_stat9_s cn31xx;
+ struct cvmx_gmxx_txx_stat9_s cn38xx;
+ struct cvmx_gmxx_txx_stat9_s cn38xxp2;
+ struct cvmx_gmxx_txx_stat9_s cn50xx;
+ struct cvmx_gmxx_txx_stat9_s cn52xx;
+ struct cvmx_gmxx_txx_stat9_s cn52xxp1;
+ struct cvmx_gmxx_txx_stat9_s cn56xx;
+ struct cvmx_gmxx_txx_stat9_s cn56xxp1;
+ struct cvmx_gmxx_txx_stat9_s cn58xx;
+ struct cvmx_gmxx_txx_stat9_s cn58xxp1;
+};
+
+union cvmx_gmxx_txx_stats_ctl {
+ uint64_t u64;
+ struct cvmx_gmxx_txx_stats_ctl_s {
+ uint64_t reserved_1_63:63;
+ uint64_t rd_clr:1;
+ } s;
+ struct cvmx_gmxx_txx_stats_ctl_s cn30xx;
+ struct cvmx_gmxx_txx_stats_ctl_s cn31xx;
+ struct cvmx_gmxx_txx_stats_ctl_s cn38xx;
+ struct cvmx_gmxx_txx_stats_ctl_s cn38xxp2;
+ struct cvmx_gmxx_txx_stats_ctl_s cn50xx;
+ struct cvmx_gmxx_txx_stats_ctl_s cn52xx;
+ struct cvmx_gmxx_txx_stats_ctl_s cn52xxp1;
+ struct cvmx_gmxx_txx_stats_ctl_s cn56xx;
+ struct cvmx_gmxx_txx_stats_ctl_s cn56xxp1;
+ struct cvmx_gmxx_txx_stats_ctl_s cn58xx;
+ struct cvmx_gmxx_txx_stats_ctl_s cn58xxp1;
+};
+
+union cvmx_gmxx_txx_thresh {
+ uint64_t u64;
+ struct cvmx_gmxx_txx_thresh_s {
+ uint64_t reserved_9_63:55;
+ uint64_t cnt:9;
+ } s;
+ struct cvmx_gmxx_txx_thresh_cn30xx {
+ uint64_t reserved_7_63:57;
+ uint64_t cnt:7;
+ } cn30xx;
+ struct cvmx_gmxx_txx_thresh_cn30xx cn31xx;
+ struct cvmx_gmxx_txx_thresh_s cn38xx;
+ struct cvmx_gmxx_txx_thresh_s cn38xxp2;
+ struct cvmx_gmxx_txx_thresh_cn30xx cn50xx;
+ struct cvmx_gmxx_txx_thresh_s cn52xx;
+ struct cvmx_gmxx_txx_thresh_s cn52xxp1;
+ struct cvmx_gmxx_txx_thresh_s cn56xx;
+ struct cvmx_gmxx_txx_thresh_s cn56xxp1;
+ struct cvmx_gmxx_txx_thresh_s cn58xx;
+ struct cvmx_gmxx_txx_thresh_s cn58xxp1;
+};
+
+union cvmx_gmxx_tx_bp {
+ uint64_t u64;
+ struct cvmx_gmxx_tx_bp_s {
+ uint64_t reserved_4_63:60;
+ uint64_t bp:4;
+ } s;
+ struct cvmx_gmxx_tx_bp_cn30xx {
+ uint64_t reserved_3_63:61;
+ uint64_t bp:3;
+ } cn30xx;
+ struct cvmx_gmxx_tx_bp_cn30xx cn31xx;
+ struct cvmx_gmxx_tx_bp_s cn38xx;
+ struct cvmx_gmxx_tx_bp_s cn38xxp2;
+ struct cvmx_gmxx_tx_bp_cn30xx cn50xx;
+ struct cvmx_gmxx_tx_bp_s cn52xx;
+ struct cvmx_gmxx_tx_bp_s cn52xxp1;
+ struct cvmx_gmxx_tx_bp_s cn56xx;
+ struct cvmx_gmxx_tx_bp_s cn56xxp1;
+ struct cvmx_gmxx_tx_bp_s cn58xx;
+ struct cvmx_gmxx_tx_bp_s cn58xxp1;
+};
+
+union cvmx_gmxx_tx_clk_mskx {
+ uint64_t u64;
+ struct cvmx_gmxx_tx_clk_mskx_s {
+ uint64_t reserved_1_63:63;
+ uint64_t msk:1;
+ } s;
+ struct cvmx_gmxx_tx_clk_mskx_s cn30xx;
+ struct cvmx_gmxx_tx_clk_mskx_s cn50xx;
+};
+
+union cvmx_gmxx_tx_col_attempt {
+ uint64_t u64;
+ struct cvmx_gmxx_tx_col_attempt_s {
+ uint64_t reserved_5_63:59;
+ uint64_t limit:5;
+ } s;
+ struct cvmx_gmxx_tx_col_attempt_s cn30xx;
+ struct cvmx_gmxx_tx_col_attempt_s cn31xx;
+ struct cvmx_gmxx_tx_col_attempt_s cn38xx;
+ struct cvmx_gmxx_tx_col_attempt_s cn38xxp2;
+ struct cvmx_gmxx_tx_col_attempt_s cn50xx;
+ struct cvmx_gmxx_tx_col_attempt_s cn52xx;
+ struct cvmx_gmxx_tx_col_attempt_s cn52xxp1;
+ struct cvmx_gmxx_tx_col_attempt_s cn56xx;
+ struct cvmx_gmxx_tx_col_attempt_s cn56xxp1;
+ struct cvmx_gmxx_tx_col_attempt_s cn58xx;
+ struct cvmx_gmxx_tx_col_attempt_s cn58xxp1;
+};
+
+union cvmx_gmxx_tx_corrupt {
+ uint64_t u64;
+ struct cvmx_gmxx_tx_corrupt_s {
+ uint64_t reserved_4_63:60;
+ uint64_t corrupt:4;
+ } s;
+ struct cvmx_gmxx_tx_corrupt_cn30xx {
+ uint64_t reserved_3_63:61;
+ uint64_t corrupt:3;
+ } cn30xx;
+ struct cvmx_gmxx_tx_corrupt_cn30xx cn31xx;
+ struct cvmx_gmxx_tx_corrupt_s cn38xx;
+ struct cvmx_gmxx_tx_corrupt_s cn38xxp2;
+ struct cvmx_gmxx_tx_corrupt_cn30xx cn50xx;
+ struct cvmx_gmxx_tx_corrupt_s cn52xx;
+ struct cvmx_gmxx_tx_corrupt_s cn52xxp1;
+ struct cvmx_gmxx_tx_corrupt_s cn56xx;
+ struct cvmx_gmxx_tx_corrupt_s cn56xxp1;
+ struct cvmx_gmxx_tx_corrupt_s cn58xx;
+ struct cvmx_gmxx_tx_corrupt_s cn58xxp1;
+};
+
+union cvmx_gmxx_tx_hg2_reg1 {
+ uint64_t u64;
+ struct cvmx_gmxx_tx_hg2_reg1_s {
+ uint64_t reserved_16_63:48;
+ uint64_t tx_xof:16;
+ } s;
+ struct cvmx_gmxx_tx_hg2_reg1_s cn52xx;
+ struct cvmx_gmxx_tx_hg2_reg1_s cn52xxp1;
+ struct cvmx_gmxx_tx_hg2_reg1_s cn56xx;
+};
+
+union cvmx_gmxx_tx_hg2_reg2 {
+ uint64_t u64;
+ struct cvmx_gmxx_tx_hg2_reg2_s {
+ uint64_t reserved_16_63:48;
+ uint64_t tx_xon:16;
+ } s;
+ struct cvmx_gmxx_tx_hg2_reg2_s cn52xx;
+ struct cvmx_gmxx_tx_hg2_reg2_s cn52xxp1;
+ struct cvmx_gmxx_tx_hg2_reg2_s cn56xx;
+};
+
+union cvmx_gmxx_tx_ifg {
+ uint64_t u64;
+ struct cvmx_gmxx_tx_ifg_s {
+ uint64_t reserved_8_63:56;
+ uint64_t ifg2:4;
+ uint64_t ifg1:4;
+ } s;
+ struct cvmx_gmxx_tx_ifg_s cn30xx;
+ struct cvmx_gmxx_tx_ifg_s cn31xx;
+ struct cvmx_gmxx_tx_ifg_s cn38xx;
+ struct cvmx_gmxx_tx_ifg_s cn38xxp2;
+ struct cvmx_gmxx_tx_ifg_s cn50xx;
+ struct cvmx_gmxx_tx_ifg_s cn52xx;
+ struct cvmx_gmxx_tx_ifg_s cn52xxp1;
+ struct cvmx_gmxx_tx_ifg_s cn56xx;
+ struct cvmx_gmxx_tx_ifg_s cn56xxp1;
+ struct cvmx_gmxx_tx_ifg_s cn58xx;
+ struct cvmx_gmxx_tx_ifg_s cn58xxp1;
+};
+
+union cvmx_gmxx_tx_int_en {
+ uint64_t u64;
+ struct cvmx_gmxx_tx_int_en_s {
+ uint64_t reserved_20_63:44;
+ uint64_t late_col:4;
+ uint64_t xsdef:4;
+ uint64_t xscol:4;
+ uint64_t reserved_6_7:2;
+ uint64_t undflw:4;
+ uint64_t ncb_nxa:1;
+ uint64_t pko_nxa:1;
+ } s;
+ struct cvmx_gmxx_tx_int_en_cn30xx {
+ uint64_t reserved_19_63:45;
+ uint64_t late_col:3;
+ uint64_t reserved_15_15:1;
+ uint64_t xsdef:3;
+ uint64_t reserved_11_11:1;
+ uint64_t xscol:3;
+ uint64_t reserved_5_7:3;
+ uint64_t undflw:3;
+ uint64_t reserved_1_1:1;
+ uint64_t pko_nxa:1;
+ } cn30xx;
+ struct cvmx_gmxx_tx_int_en_cn31xx {
+ uint64_t reserved_15_63:49;
+ uint64_t xsdef:3;
+ uint64_t reserved_11_11:1;
+ uint64_t xscol:3;
+ uint64_t reserved_5_7:3;
+ uint64_t undflw:3;
+ uint64_t reserved_1_1:1;
+ uint64_t pko_nxa:1;
+ } cn31xx;
+ struct cvmx_gmxx_tx_int_en_s cn38xx;
+ struct cvmx_gmxx_tx_int_en_cn38xxp2 {
+ uint64_t reserved_16_63:48;
+ uint64_t xsdef:4;
+ uint64_t xscol:4;
+ uint64_t reserved_6_7:2;
+ uint64_t undflw:4;
+ uint64_t ncb_nxa:1;
+ uint64_t pko_nxa:1;
+ } cn38xxp2;
+ struct cvmx_gmxx_tx_int_en_cn30xx cn50xx;
+ struct cvmx_gmxx_tx_int_en_cn52xx {
+ uint64_t reserved_20_63:44;
+ uint64_t late_col:4;
+ uint64_t xsdef:4;
+ uint64_t xscol:4;
+ uint64_t reserved_6_7:2;
+ uint64_t undflw:4;
+ uint64_t reserved_1_1:1;
+ uint64_t pko_nxa:1;
+ } cn52xx;
+ struct cvmx_gmxx_tx_int_en_cn52xx cn52xxp1;
+ struct cvmx_gmxx_tx_int_en_cn52xx cn56xx;
+ struct cvmx_gmxx_tx_int_en_cn52xx cn56xxp1;
+ struct cvmx_gmxx_tx_int_en_s cn58xx;
+ struct cvmx_gmxx_tx_int_en_s cn58xxp1;
+};
+
+union cvmx_gmxx_tx_int_reg {
+ uint64_t u64;
+ struct cvmx_gmxx_tx_int_reg_s {
+ uint64_t reserved_20_63:44;
+ uint64_t late_col:4;
+ uint64_t xsdef:4;
+ uint64_t xscol:4;
+ uint64_t reserved_6_7:2;
+ uint64_t undflw:4;
+ uint64_t ncb_nxa:1;
+ uint64_t pko_nxa:1;
+ } s;
+ struct cvmx_gmxx_tx_int_reg_cn30xx {
+ uint64_t reserved_19_63:45;
+ uint64_t late_col:3;
+ uint64_t reserved_15_15:1;
+ uint64_t xsdef:3;
+ uint64_t reserved_11_11:1;
+ uint64_t xscol:3;
+ uint64_t reserved_5_7:3;
+ uint64_t undflw:3;
+ uint64_t reserved_1_1:1;
+ uint64_t pko_nxa:1;
+ } cn30xx;
+ struct cvmx_gmxx_tx_int_reg_cn31xx {
+ uint64_t reserved_15_63:49;
+ uint64_t xsdef:3;
+ uint64_t reserved_11_11:1;
+ uint64_t xscol:3;
+ uint64_t reserved_5_7:3;
+ uint64_t undflw:3;
+ uint64_t reserved_1_1:1;
+ uint64_t pko_nxa:1;
+ } cn31xx;
+ struct cvmx_gmxx_tx_int_reg_s cn38xx;
+ struct cvmx_gmxx_tx_int_reg_cn38xxp2 {
+ uint64_t reserved_16_63:48;
+ uint64_t xsdef:4;
+ uint64_t xscol:4;
+ uint64_t reserved_6_7:2;
+ uint64_t undflw:4;
+ uint64_t ncb_nxa:1;
+ uint64_t pko_nxa:1;
+ } cn38xxp2;
+ struct cvmx_gmxx_tx_int_reg_cn30xx cn50xx;
+ struct cvmx_gmxx_tx_int_reg_cn52xx {
+ uint64_t reserved_20_63:44;
+ uint64_t late_col:4;
+ uint64_t xsdef:4;
+ uint64_t xscol:4;
+ uint64_t reserved_6_7:2;
+ uint64_t undflw:4;
+ uint64_t reserved_1_1:1;
+ uint64_t pko_nxa:1;
+ } cn52xx;
+ struct cvmx_gmxx_tx_int_reg_cn52xx cn52xxp1;
+ struct cvmx_gmxx_tx_int_reg_cn52xx cn56xx;
+ struct cvmx_gmxx_tx_int_reg_cn52xx cn56xxp1;
+ struct cvmx_gmxx_tx_int_reg_s cn58xx;
+ struct cvmx_gmxx_tx_int_reg_s cn58xxp1;
+};
+
+union cvmx_gmxx_tx_jam {
+ uint64_t u64;
+ struct cvmx_gmxx_tx_jam_s {
+ uint64_t reserved_8_63:56;
+ uint64_t jam:8;
+ } s;
+ struct cvmx_gmxx_tx_jam_s cn30xx;
+ struct cvmx_gmxx_tx_jam_s cn31xx;
+ struct cvmx_gmxx_tx_jam_s cn38xx;
+ struct cvmx_gmxx_tx_jam_s cn38xxp2;
+ struct cvmx_gmxx_tx_jam_s cn50xx;
+ struct cvmx_gmxx_tx_jam_s cn52xx;
+ struct cvmx_gmxx_tx_jam_s cn52xxp1;
+ struct cvmx_gmxx_tx_jam_s cn56xx;
+ struct cvmx_gmxx_tx_jam_s cn56xxp1;
+ struct cvmx_gmxx_tx_jam_s cn58xx;
+ struct cvmx_gmxx_tx_jam_s cn58xxp1;
+};
+
+union cvmx_gmxx_tx_lfsr {
+ uint64_t u64;
+ struct cvmx_gmxx_tx_lfsr_s {
+ uint64_t reserved_16_63:48;
+ uint64_t lfsr:16;
+ } s;
+ struct cvmx_gmxx_tx_lfsr_s cn30xx;
+ struct cvmx_gmxx_tx_lfsr_s cn31xx;
+ struct cvmx_gmxx_tx_lfsr_s cn38xx;
+ struct cvmx_gmxx_tx_lfsr_s cn38xxp2;
+ struct cvmx_gmxx_tx_lfsr_s cn50xx;
+ struct cvmx_gmxx_tx_lfsr_s cn52xx;
+ struct cvmx_gmxx_tx_lfsr_s cn52xxp1;
+ struct cvmx_gmxx_tx_lfsr_s cn56xx;
+ struct cvmx_gmxx_tx_lfsr_s cn56xxp1;
+ struct cvmx_gmxx_tx_lfsr_s cn58xx;
+ struct cvmx_gmxx_tx_lfsr_s cn58xxp1;
+};
+
+union cvmx_gmxx_tx_ovr_bp {
+ uint64_t u64;
+ struct cvmx_gmxx_tx_ovr_bp_s {
+ uint64_t reserved_48_63:16;
+ uint64_t tx_prt_bp:16;
+ uint64_t reserved_12_31:20;
+ uint64_t en:4;
+ uint64_t bp:4;
+ uint64_t ign_full:4;
+ } s;
+ struct cvmx_gmxx_tx_ovr_bp_cn30xx {
+ uint64_t reserved_11_63:53;
+ uint64_t en:3;
+ uint64_t reserved_7_7:1;
+ uint64_t bp:3;
+ uint64_t reserved_3_3:1;
+ uint64_t ign_full:3;
+ } cn30xx;
+ struct cvmx_gmxx_tx_ovr_bp_cn30xx cn31xx;
+ struct cvmx_gmxx_tx_ovr_bp_cn38xx {
+ uint64_t reserved_12_63:52;
+ uint64_t en:4;
+ uint64_t bp:4;
+ uint64_t ign_full:4;
+ } cn38xx;
+ struct cvmx_gmxx_tx_ovr_bp_cn38xx cn38xxp2;
+ struct cvmx_gmxx_tx_ovr_bp_cn30xx cn50xx;
+ struct cvmx_gmxx_tx_ovr_bp_s cn52xx;
+ struct cvmx_gmxx_tx_ovr_bp_s cn52xxp1;
+ struct cvmx_gmxx_tx_ovr_bp_s cn56xx;
+ struct cvmx_gmxx_tx_ovr_bp_s cn56xxp1;
+ struct cvmx_gmxx_tx_ovr_bp_cn38xx cn58xx;
+ struct cvmx_gmxx_tx_ovr_bp_cn38xx cn58xxp1;
+};
+
+union cvmx_gmxx_tx_pause_pkt_dmac {
+ uint64_t u64;
+ struct cvmx_gmxx_tx_pause_pkt_dmac_s {
+ uint64_t reserved_48_63:16;
+ uint64_t dmac:48;
+ } s;
+ struct cvmx_gmxx_tx_pause_pkt_dmac_s cn30xx;
+ struct cvmx_gmxx_tx_pause_pkt_dmac_s cn31xx;
+ struct cvmx_gmxx_tx_pause_pkt_dmac_s cn38xx;
+ struct cvmx_gmxx_tx_pause_pkt_dmac_s cn38xxp2;
+ struct cvmx_gmxx_tx_pause_pkt_dmac_s cn50xx;
+ struct cvmx_gmxx_tx_pause_pkt_dmac_s cn52xx;
+ struct cvmx_gmxx_tx_pause_pkt_dmac_s cn52xxp1;
+ struct cvmx_gmxx_tx_pause_pkt_dmac_s cn56xx;
+ struct cvmx_gmxx_tx_pause_pkt_dmac_s cn56xxp1;
+ struct cvmx_gmxx_tx_pause_pkt_dmac_s cn58xx;
+ struct cvmx_gmxx_tx_pause_pkt_dmac_s cn58xxp1;
+};
+
+union cvmx_gmxx_tx_pause_pkt_type {
+ uint64_t u64;
+ struct cvmx_gmxx_tx_pause_pkt_type_s {
+ uint64_t reserved_16_63:48;
+ uint64_t type:16;
+ } s;
+ struct cvmx_gmxx_tx_pause_pkt_type_s cn30xx;
+ struct cvmx_gmxx_tx_pause_pkt_type_s cn31xx;
+ struct cvmx_gmxx_tx_pause_pkt_type_s cn38xx;
+ struct cvmx_gmxx_tx_pause_pkt_type_s cn38xxp2;
+ struct cvmx_gmxx_tx_pause_pkt_type_s cn50xx;
+ struct cvmx_gmxx_tx_pause_pkt_type_s cn52xx;
+ struct cvmx_gmxx_tx_pause_pkt_type_s cn52xxp1;
+ struct cvmx_gmxx_tx_pause_pkt_type_s cn56xx;
+ struct cvmx_gmxx_tx_pause_pkt_type_s cn56xxp1;
+ struct cvmx_gmxx_tx_pause_pkt_type_s cn58xx;
+ struct cvmx_gmxx_tx_pause_pkt_type_s cn58xxp1;
+};
+
+union cvmx_gmxx_tx_prts {
+ uint64_t u64;
+ struct cvmx_gmxx_tx_prts_s {
+ uint64_t reserved_5_63:59;
+ uint64_t prts:5;
+ } s;
+ struct cvmx_gmxx_tx_prts_s cn30xx;
+ struct cvmx_gmxx_tx_prts_s cn31xx;
+ struct cvmx_gmxx_tx_prts_s cn38xx;
+ struct cvmx_gmxx_tx_prts_s cn38xxp2;
+ struct cvmx_gmxx_tx_prts_s cn50xx;
+ struct cvmx_gmxx_tx_prts_s cn52xx;
+ struct cvmx_gmxx_tx_prts_s cn52xxp1;
+ struct cvmx_gmxx_tx_prts_s cn56xx;
+ struct cvmx_gmxx_tx_prts_s cn56xxp1;
+ struct cvmx_gmxx_tx_prts_s cn58xx;
+ struct cvmx_gmxx_tx_prts_s cn58xxp1;
+};
+
+union cvmx_gmxx_tx_spi_ctl {
+ uint64_t u64;
+ struct cvmx_gmxx_tx_spi_ctl_s {
+ uint64_t reserved_2_63:62;
+ uint64_t tpa_clr:1;
+ uint64_t cont_pkt:1;
+ } s;
+ struct cvmx_gmxx_tx_spi_ctl_s cn38xx;
+ struct cvmx_gmxx_tx_spi_ctl_s cn38xxp2;
+ struct cvmx_gmxx_tx_spi_ctl_s cn58xx;
+ struct cvmx_gmxx_tx_spi_ctl_s cn58xxp1;
+};
+
+union cvmx_gmxx_tx_spi_drain {
+ uint64_t u64;
+ struct cvmx_gmxx_tx_spi_drain_s {
+ uint64_t reserved_16_63:48;
+ uint64_t drain:16;
+ } s;
+ struct cvmx_gmxx_tx_spi_drain_s cn38xx;
+ struct cvmx_gmxx_tx_spi_drain_s cn58xx;
+ struct cvmx_gmxx_tx_spi_drain_s cn58xxp1;
+};
+
+union cvmx_gmxx_tx_spi_max {
+ uint64_t u64;
+ struct cvmx_gmxx_tx_spi_max_s {
+ uint64_t reserved_23_63:41;
+ uint64_t slice:7;
+ uint64_t max2:8;
+ uint64_t max1:8;
+ } s;
+ struct cvmx_gmxx_tx_spi_max_cn38xx {
+ uint64_t reserved_16_63:48;
+ uint64_t max2:8;
+ uint64_t max1:8;
+ } cn38xx;
+ struct cvmx_gmxx_tx_spi_max_cn38xx cn38xxp2;
+ struct cvmx_gmxx_tx_spi_max_s cn58xx;
+ struct cvmx_gmxx_tx_spi_max_s cn58xxp1;
+};
+
+union cvmx_gmxx_tx_spi_roundx {
+ uint64_t u64;
+ struct cvmx_gmxx_tx_spi_roundx_s {
+ uint64_t reserved_16_63:48;
+ uint64_t round:16;
+ } s;
+ struct cvmx_gmxx_tx_spi_roundx_s cn58xx;
+ struct cvmx_gmxx_tx_spi_roundx_s cn58xxp1;
+};
+
+union cvmx_gmxx_tx_spi_thresh {
+ uint64_t u64;
+ struct cvmx_gmxx_tx_spi_thresh_s {
+ uint64_t reserved_6_63:58;
+ uint64_t thresh:6;
+ } s;
+ struct cvmx_gmxx_tx_spi_thresh_s cn38xx;
+ struct cvmx_gmxx_tx_spi_thresh_s cn38xxp2;
+ struct cvmx_gmxx_tx_spi_thresh_s cn58xx;
+ struct cvmx_gmxx_tx_spi_thresh_s cn58xxp1;
+};
+
+union cvmx_gmxx_tx_xaui_ctl {
+ uint64_t u64;
+ struct cvmx_gmxx_tx_xaui_ctl_s {
+ uint64_t reserved_11_63:53;
+ uint64_t hg_pause_hgi:2;
+ uint64_t hg_en:1;
+ uint64_t reserved_7_7:1;
+ uint64_t ls_byp:1;
+ uint64_t ls:2;
+ uint64_t reserved_2_3:2;
+ uint64_t uni_en:1;
+ uint64_t dic_en:1;
+ } s;
+ struct cvmx_gmxx_tx_xaui_ctl_s cn52xx;
+ struct cvmx_gmxx_tx_xaui_ctl_s cn52xxp1;
+ struct cvmx_gmxx_tx_xaui_ctl_s cn56xx;
+ struct cvmx_gmxx_tx_xaui_ctl_s cn56xxp1;
+};
+
+union cvmx_gmxx_xaui_ext_loopback {
+ uint64_t u64;
+ struct cvmx_gmxx_xaui_ext_loopback_s {
+ uint64_t reserved_5_63:59;
+ uint64_t en:1;
+ uint64_t thresh:4;
+ } s;
+ struct cvmx_gmxx_xaui_ext_loopback_s cn52xx;
+ struct cvmx_gmxx_xaui_ext_loopback_s cn52xxp1;
+ struct cvmx_gmxx_xaui_ext_loopback_s cn56xx;
+ struct cvmx_gmxx_xaui_ext_loopback_s cn56xxp1;
+};
+
+#endif
diff --git a/drivers/staging/octeon/cvmx-helper-board.c b/drivers/staging/octeon/cvmx-helper-board.c
new file mode 100644
index 000000000000..3085e38a6f99
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-helper-board.c
@@ -0,0 +1,706 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ *
+ * Helper functions to abstract board specific data about
+ * network ports from the rest of the cvmx-helper files.
+ */
+
+#include <asm/octeon/octeon.h>
+#include <asm/octeon/cvmx-bootinfo.h>
+
+#include "cvmx-config.h"
+
+#include "cvmx-mdio.h"
+
+#include "cvmx-helper.h"
+#include "cvmx-helper-util.h"
+#include "cvmx-helper-board.h"
+
+#include "cvmx-gmxx-defs.h"
+#include "cvmx-asxx-defs.h"
+
+/**
+ * cvmx_override_board_link_get(int ipd_port) is a function
+ * pointer. It is meant to allow customization of the process of
+ * talking to a PHY to determine link speed. It is called every
+ * time a PHY must be polled for link status. Users should set
+ * this pointer to a function before calling any cvmx-helper
+ * operations.
+ */
+cvmx_helper_link_info_t(*cvmx_override_board_link_get) (int ipd_port) =
+ NULL;
+
+/**
+ * Return the MII PHY address associated with the given IPD
+ * port. A result of -1 means there isn't a MII capable PHY
+ * connected to this port. On chips supporting multiple MII
+ * busses the bus number is encoded in bits <15:8>.
+ *
+ * This function must be modified for every new Octeon board.
+ * Internally it uses switch statements based on the cvmx_sysinfo
+ * data to determine board types and revisions. It replies on the
+ * fact that every Octeon board receives a unique board type
+ * enumeration from the bootloader.
+ *
+ * @ipd_port: Octeon IPD port to get the MII address for.
+ *
+ * Returns MII PHY address and bus number or -1.
+ */
+int cvmx_helper_board_get_mii_address(int ipd_port)
+{
+ switch (cvmx_sysinfo_get()->board_type) {
+ case CVMX_BOARD_TYPE_SIM:
+ /* Simulator doesn't have MII */
+ return -1;
+ case CVMX_BOARD_TYPE_EBT3000:
+ case CVMX_BOARD_TYPE_EBT5800:
+ case CVMX_BOARD_TYPE_THUNDER:
+ case CVMX_BOARD_TYPE_NICPRO2:
+ /* Interface 0 is SPI4, interface 1 is RGMII */
+ if ((ipd_port >= 16) && (ipd_port < 20))
+ return ipd_port - 16;
+ else
+ return -1;
+ case CVMX_BOARD_TYPE_KODAMA:
+ case CVMX_BOARD_TYPE_EBH3100:
+ case CVMX_BOARD_TYPE_HIKARI:
+ case CVMX_BOARD_TYPE_CN3010_EVB_HS5:
+ case CVMX_BOARD_TYPE_CN3005_EVB_HS5:
+ case CVMX_BOARD_TYPE_CN3020_EVB_HS5:
+ /*
+ * Port 0 is WAN connected to a PHY, Port 1 is GMII
+ * connected to a switch
+ */
+ if (ipd_port == 0)
+ return 4;
+ else if (ipd_port == 1)
+ return 9;
+ else
+ return -1;
+ case CVMX_BOARD_TYPE_NAC38:
+ /* Board has 8 RGMII ports PHYs are 0-7 */
+ if ((ipd_port >= 0) && (ipd_port < 4))
+ return ipd_port;
+ else if ((ipd_port >= 16) && (ipd_port < 20))
+ return ipd_port - 16 + 4;
+ else
+ return -1;
+ case CVMX_BOARD_TYPE_EBH3000:
+ /* Board has dual SPI4 and no PHYs */
+ return -1;
+ case CVMX_BOARD_TYPE_EBH5200:
+ case CVMX_BOARD_TYPE_EBH5201:
+ case CVMX_BOARD_TYPE_EBT5200:
+ /*
+ * Board has 4 SGMII ports. The PHYs start right after the MII
+ * ports MII0 = 0, MII1 = 1, SGMII = 2-5.
+ */
+ if ((ipd_port >= 0) && (ipd_port < 4))
+ return ipd_port + 2;
+ else
+ return -1;
+ case CVMX_BOARD_TYPE_EBH5600:
+ case CVMX_BOARD_TYPE_EBH5601:
+ case CVMX_BOARD_TYPE_EBH5610:
+ /*
+ * Board has 8 SGMII ports. 4 connect out, two connect
+ * to a switch, and 2 loop to each other
+ */
+ if ((ipd_port >= 0) && (ipd_port < 4))
+ return ipd_port + 1;
+ else
+ return -1;
+ case CVMX_BOARD_TYPE_CUST_NB5:
+ if (ipd_port == 2)
+ return 4;
+ else
+ return -1;
+ case CVMX_BOARD_TYPE_NIC_XLE_4G:
+ /* Board has 4 SGMII ports. connected QLM3(interface 1) */
+ if ((ipd_port >= 16) && (ipd_port < 20))
+ return ipd_port - 16 + 1;
+ else
+ return -1;
+ case CVMX_BOARD_TYPE_BBGW_REF:
+ /*
+ * No PHYs are connected to Octeon, everything is
+ * through switch.
+ */
+ return -1;
+ }
+
+ /* Some unknown board. Somebody forgot to update this function... */
+ cvmx_dprintf
+ ("cvmx_helper_board_get_mii_address: Unknown board type %d\n",
+ cvmx_sysinfo_get()->board_type);
+ return -1;
+}
+
+/**
+ * This function is the board specific method of determining an
+ * ethernet ports link speed. Most Octeon boards have Marvell PHYs
+ * and are handled by the fall through case. This function must be
+ * updated for boards that don't have the normal Marvell PHYs.
+ *
+ * This function must be modified for every new Octeon board.
+ * Internally it uses switch statements based on the cvmx_sysinfo
+ * data to determine board types and revisions. It relies on the
+ * fact that every Octeon board receives a unique board type
+ * enumeration from the bootloader.
+ *
+ * @ipd_port: IPD input port associated with the port we want to get link
+ * status for.
+ *
+ * Returns The ports link status. If the link isn't fully resolved, this must
+ * return zero.
+ */
+cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port)
+{
+ cvmx_helper_link_info_t result;
+ int phy_addr;
+ int is_broadcom_phy = 0;
+
+ /* Give the user a chance to override the processing of this function */
+ if (cvmx_override_board_link_get)
+ return cvmx_override_board_link_get(ipd_port);
+
+ /* Unless we fix it later, all links are defaulted to down */
+ result.u64 = 0;
+
+ /*
+ * This switch statement should handle all ports that either don't use
+ * Marvell PHYS, or don't support in-band status.
+ */
+ switch (cvmx_sysinfo_get()->board_type) {
+ case CVMX_BOARD_TYPE_SIM:
+ /* The simulator gives you a simulated 1Gbps full duplex link */
+ result.s.link_up = 1;
+ result.s.full_duplex = 1;
+ result.s.speed = 1000;
+ return result;
+ case CVMX_BOARD_TYPE_EBH3100:
+ case CVMX_BOARD_TYPE_CN3010_EVB_HS5:
+ case CVMX_BOARD_TYPE_CN3005_EVB_HS5:
+ case CVMX_BOARD_TYPE_CN3020_EVB_HS5:
+ /* Port 1 on these boards is always Gigabit */
+ if (ipd_port == 1) {
+ result.s.link_up = 1;
+ result.s.full_duplex = 1;
+ result.s.speed = 1000;
+ return result;
+ }
+ /* Fall through to the generic code below */
+ break;
+ case CVMX_BOARD_TYPE_CUST_NB5:
+ /* Port 1 on these boards is always Gigabit */
+ if (ipd_port == 1) {
+ result.s.link_up = 1;
+ result.s.full_duplex = 1;
+ result.s.speed = 1000;
+ return result;
+ } else /* The other port uses a broadcom PHY */
+ is_broadcom_phy = 1;
+ break;
+ case CVMX_BOARD_TYPE_BBGW_REF:
+ /* Port 1 on these boards is always Gigabit */
+ if (ipd_port == 2) {
+ /* Port 2 is not hooked up */
+ result.u64 = 0;
+ return result;
+ } else {
+ /* Ports 0 and 1 connect to the switch */
+ result.s.link_up = 1;
+ result.s.full_duplex = 1;
+ result.s.speed = 1000;
+ return result;
+ }
+ break;
+ }
+
+ phy_addr = cvmx_helper_board_get_mii_address(ipd_port);
+ if (phy_addr != -1) {
+ if (is_broadcom_phy) {
+ /*
+ * Below we are going to read SMI/MDIO
+ * register 0x19 which works on Broadcom
+ * parts
+ */
+ int phy_status =
+ cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
+ 0x19);
+ switch ((phy_status >> 8) & 0x7) {
+ case 0:
+ result.u64 = 0;
+ break;
+ case 1:
+ result.s.link_up = 1;
+ result.s.full_duplex = 0;
+ result.s.speed = 10;
+ break;
+ case 2:
+ result.s.link_up = 1;
+ result.s.full_duplex = 1;
+ result.s.speed = 10;
+ break;
+ case 3:
+ result.s.link_up = 1;
+ result.s.full_duplex = 0;
+ result.s.speed = 100;
+ break;
+ case 4:
+ result.s.link_up = 1;
+ result.s.full_duplex = 1;
+ result.s.speed = 100;
+ break;
+ case 5:
+ result.s.link_up = 1;
+ result.s.full_duplex = 1;
+ result.s.speed = 100;
+ break;
+ case 6:
+ result.s.link_up = 1;
+ result.s.full_duplex = 0;
+ result.s.speed = 1000;
+ break;
+ case 7:
+ result.s.link_up = 1;
+ result.s.full_duplex = 1;
+ result.s.speed = 1000;
+ break;
+ }
+ } else {
+ /*
+ * This code assumes we are using a Marvell
+ * Gigabit PHY. All the speed information can
+ * be read from register 17 in one
+ * go. Somebody using a different PHY will
+ * need to handle it above in the board
+ * specific area.
+ */
+ int phy_status =
+ cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, 17);
+
+ /*
+ * If the resolve bit 11 isn't set, see if
+ * autoneg is turned off (bit 12, reg 0). The
+ * resolve bit doesn't get set properly when
+ * autoneg is off, so force it.
+ */
+ if ((phy_status & (1 << 11)) == 0) {
+ int auto_status =
+ cvmx_mdio_read(phy_addr >> 8,
+ phy_addr & 0xff, 0);
+ if ((auto_status & (1 << 12)) == 0)
+ phy_status |= 1 << 11;
+ }
+
+ /*
+ * Only return a link if the PHY has finished
+ * auto negotiation and set the resolved bit
+ * (bit 11)
+ */
+ if (phy_status & (1 << 11)) {
+ result.s.link_up = 1;
+ result.s.full_duplex = ((phy_status >> 13) & 1);
+ switch ((phy_status >> 14) & 3) {
+ case 0: /* 10 Mbps */
+ result.s.speed = 10;
+ break;
+ case 1: /* 100 Mbps */
+ result.s.speed = 100;
+ break;
+ case 2: /* 1 Gbps */
+ result.s.speed = 1000;
+ break;
+ case 3: /* Illegal */
+ result.u64 = 0;
+ break;
+ }
+ }
+ }
+ } else if (OCTEON_IS_MODEL(OCTEON_CN3XXX)
+ || OCTEON_IS_MODEL(OCTEON_CN58XX)
+ || OCTEON_IS_MODEL(OCTEON_CN50XX)) {
+ /*
+ * We don't have a PHY address, so attempt to use
+ * in-band status. It is really important that boards
+ * not supporting in-band status never get
+ * here. Reading broken in-band status tends to do bad
+ * things
+ */
+ union cvmx_gmxx_rxx_rx_inbnd inband_status;
+ int interface = cvmx_helper_get_interface_num(ipd_port);
+ int index = cvmx_helper_get_interface_index_num(ipd_port);
+ inband_status.u64 =
+ cvmx_read_csr(CVMX_GMXX_RXX_RX_INBND(index, interface));
+
+ result.s.link_up = inband_status.s.status;
+ result.s.full_duplex = inband_status.s.duplex;
+ switch (inband_status.s.speed) {
+ case 0: /* 10 Mbps */
+ result.s.speed = 10;
+ break;
+ case 1: /* 100 Mbps */
+ result.s.speed = 100;
+ break;
+ case 2: /* 1 Gbps */
+ result.s.speed = 1000;
+ break;
+ case 3: /* Illegal */
+ result.u64 = 0;
+ break;
+ }
+ } else {
+ /*
+ * We don't have a PHY address and we don't have
+ * in-band status. There is no way to determine the
+ * link speed. Return down assuming this port isn't
+ * wired
+ */
+ result.u64 = 0;
+ }
+
+ /* If link is down, return all fields as zero. */
+ if (!result.s.link_up)
+ result.u64 = 0;
+
+ return result;
+}
+
+/**
+ * This function as a board specific method of changing the PHY
+ * speed, duplex, and auto-negotiation. This programs the PHY and
+ * not Octeon. This can be used to force Octeon's links to
+ * specific settings.
+ *
+ * @phy_addr: The address of the PHY to program
+ * @enable_autoneg:
+ * Non zero if you want to enable auto-negotiation.
+ * @link_info: Link speed to program. If the speed is zero and auto-negotiation
+ * is enabled, all possible negotiation speeds are advertised.
+ *
+ * Returns Zero on success, negative on failure
+ */
+int cvmx_helper_board_link_set_phy(int phy_addr,
+ cvmx_helper_board_set_phy_link_flags_types_t
+ link_flags,
+ cvmx_helper_link_info_t link_info)
+{
+
+ /* Set the flow control settings based on link_flags */
+ if ((link_flags & set_phy_link_flags_flow_control_mask) !=
+ set_phy_link_flags_flow_control_dont_touch) {
+ cvmx_mdio_phy_reg_autoneg_adver_t reg_autoneg_adver;
+ reg_autoneg_adver.u16 =
+ cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
+ CVMX_MDIO_PHY_REG_AUTONEG_ADVER);
+ reg_autoneg_adver.s.asymmetric_pause =
+ (link_flags & set_phy_link_flags_flow_control_mask) ==
+ set_phy_link_flags_flow_control_enable;
+ reg_autoneg_adver.s.pause =
+ (link_flags & set_phy_link_flags_flow_control_mask) ==
+ set_phy_link_flags_flow_control_enable;
+ cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff,
+ CVMX_MDIO_PHY_REG_AUTONEG_ADVER,
+ reg_autoneg_adver.u16);
+ }
+
+ /* If speed isn't set and autoneg is on advertise all supported modes */
+ if ((link_flags & set_phy_link_flags_autoneg)
+ && (link_info.s.speed == 0)) {
+ cvmx_mdio_phy_reg_control_t reg_control;
+ cvmx_mdio_phy_reg_status_t reg_status;
+ cvmx_mdio_phy_reg_autoneg_adver_t reg_autoneg_adver;
+ cvmx_mdio_phy_reg_extended_status_t reg_extended_status;
+ cvmx_mdio_phy_reg_control_1000_t reg_control_1000;
+
+ reg_status.u16 =
+ cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
+ CVMX_MDIO_PHY_REG_STATUS);
+ reg_autoneg_adver.u16 =
+ cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
+ CVMX_MDIO_PHY_REG_AUTONEG_ADVER);
+ reg_autoneg_adver.s.advert_100base_t4 =
+ reg_status.s.capable_100base_t4;
+ reg_autoneg_adver.s.advert_10base_tx_full =
+ reg_status.s.capable_10_full;
+ reg_autoneg_adver.s.advert_10base_tx_half =
+ reg_status.s.capable_10_half;
+ reg_autoneg_adver.s.advert_100base_tx_full =
+ reg_status.s.capable_100base_x_full;
+ reg_autoneg_adver.s.advert_100base_tx_half =
+ reg_status.s.capable_100base_x_half;
+ cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff,
+ CVMX_MDIO_PHY_REG_AUTONEG_ADVER,
+ reg_autoneg_adver.u16);
+ if (reg_status.s.capable_extended_status) {
+ reg_extended_status.u16 =
+ cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
+ CVMX_MDIO_PHY_REG_EXTENDED_STATUS);
+ reg_control_1000.u16 =
+ cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
+ CVMX_MDIO_PHY_REG_CONTROL_1000);
+ reg_control_1000.s.advert_1000base_t_full =
+ reg_extended_status.s.capable_1000base_t_full;
+ reg_control_1000.s.advert_1000base_t_half =
+ reg_extended_status.s.capable_1000base_t_half;
+ cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff,
+ CVMX_MDIO_PHY_REG_CONTROL_1000,
+ reg_control_1000.u16);
+ }
+ reg_control.u16 =
+ cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
+ CVMX_MDIO_PHY_REG_CONTROL);
+ reg_control.s.autoneg_enable = 1;
+ reg_control.s.restart_autoneg = 1;
+ cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff,
+ CVMX_MDIO_PHY_REG_CONTROL, reg_control.u16);
+ } else if ((link_flags & set_phy_link_flags_autoneg)) {
+ cvmx_mdio_phy_reg_control_t reg_control;
+ cvmx_mdio_phy_reg_status_t reg_status;
+ cvmx_mdio_phy_reg_autoneg_adver_t reg_autoneg_adver;
+ cvmx_mdio_phy_reg_extended_status_t reg_extended_status;
+ cvmx_mdio_phy_reg_control_1000_t reg_control_1000;
+
+ reg_status.u16 =
+ cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
+ CVMX_MDIO_PHY_REG_STATUS);
+ reg_autoneg_adver.u16 =
+ cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
+ CVMX_MDIO_PHY_REG_AUTONEG_ADVER);
+ reg_autoneg_adver.s.advert_100base_t4 = 0;
+ reg_autoneg_adver.s.advert_10base_tx_full = 0;
+ reg_autoneg_adver.s.advert_10base_tx_half = 0;
+ reg_autoneg_adver.s.advert_100base_tx_full = 0;
+ reg_autoneg_adver.s.advert_100base_tx_half = 0;
+ if (reg_status.s.capable_extended_status) {
+ reg_extended_status.u16 =
+ cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
+ CVMX_MDIO_PHY_REG_EXTENDED_STATUS);
+ reg_control_1000.u16 =
+ cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
+ CVMX_MDIO_PHY_REG_CONTROL_1000);
+ reg_control_1000.s.advert_1000base_t_full = 0;
+ reg_control_1000.s.advert_1000base_t_half = 0;
+ }
+ switch (link_info.s.speed) {
+ case 10:
+ reg_autoneg_adver.s.advert_10base_tx_full =
+ link_info.s.full_duplex;
+ reg_autoneg_adver.s.advert_10base_tx_half =
+ !link_info.s.full_duplex;
+ break;
+ case 100:
+ reg_autoneg_adver.s.advert_100base_tx_full =
+ link_info.s.full_duplex;
+ reg_autoneg_adver.s.advert_100base_tx_half =
+ !link_info.s.full_duplex;
+ break;
+ case 1000:
+ reg_control_1000.s.advert_1000base_t_full =
+ link_info.s.full_duplex;
+ reg_control_1000.s.advert_1000base_t_half =
+ !link_info.s.full_duplex;
+ break;
+ }
+ cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff,
+ CVMX_MDIO_PHY_REG_AUTONEG_ADVER,
+ reg_autoneg_adver.u16);
+ if (reg_status.s.capable_extended_status)
+ cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff,
+ CVMX_MDIO_PHY_REG_CONTROL_1000,
+ reg_control_1000.u16);
+ reg_control.u16 =
+ cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
+ CVMX_MDIO_PHY_REG_CONTROL);
+ reg_control.s.autoneg_enable = 1;
+ reg_control.s.restart_autoneg = 1;
+ cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff,
+ CVMX_MDIO_PHY_REG_CONTROL, reg_control.u16);
+ } else {
+ cvmx_mdio_phy_reg_control_t reg_control;
+ reg_control.u16 =
+ cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
+ CVMX_MDIO_PHY_REG_CONTROL);
+ reg_control.s.autoneg_enable = 0;
+ reg_control.s.restart_autoneg = 1;
+ reg_control.s.duplex = link_info.s.full_duplex;
+ if (link_info.s.speed == 1000) {
+ reg_control.s.speed_msb = 1;
+ reg_control.s.speed_lsb = 0;
+ } else if (link_info.s.speed == 100) {
+ reg_control.s.speed_msb = 0;
+ reg_control.s.speed_lsb = 1;
+ } else if (link_info.s.speed == 10) {
+ reg_control.s.speed_msb = 0;
+ reg_control.s.speed_lsb = 0;
+ }
+ cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff,
+ CVMX_MDIO_PHY_REG_CONTROL, reg_control.u16);
+ }
+ return 0;
+}
+
+/**
+ * This function is called by cvmx_helper_interface_probe() after it
+ * determines the number of ports Octeon can support on a specific
+ * interface. This function is the per board location to override
+ * this value. It is called with the number of ports Octeon might
+ * support and should return the number of actual ports on the
+ * board.
+ *
+ * This function must be modifed for every new Octeon board.
+ * Internally it uses switch statements based on the cvmx_sysinfo
+ * data to determine board types and revisions. It relys on the
+ * fact that every Octeon board receives a unique board type
+ * enumeration from the bootloader.
+ *
+ * @interface: Interface to probe
+ * @supported_ports:
+ * Number of ports Octeon supports.
+ *
+ * Returns Number of ports the actual board supports. Many times this will
+ * simple be "support_ports".
+ */
+int __cvmx_helper_board_interface_probe(int interface, int supported_ports)
+{
+ switch (cvmx_sysinfo_get()->board_type) {
+ case CVMX_BOARD_TYPE_CN3005_EVB_HS5:
+ if (interface == 0)
+ return 2;
+ break;
+ case CVMX_BOARD_TYPE_BBGW_REF:
+ if (interface == 0)
+ return 2;
+ break;
+ case CVMX_BOARD_TYPE_NIC_XLE_4G:
+ if (interface == 0)
+ return 0;
+ break;
+ /* The 2nd interface on the EBH5600 is connected to the Marvel switch,
+ which we don't support. Disable ports connected to it */
+ case CVMX_BOARD_TYPE_EBH5600:
+ if (interface == 1)
+ return 0;
+ break;
+ }
+ return supported_ports;
+}
+
+/**
+ * Enable packet input/output from the hardware. This function is
+ * called after by cvmx_helper_packet_hardware_enable() to
+ * perform board specific initialization. For most boards
+ * nothing is needed.
+ *
+ * @interface: Interface to enable
+ *
+ * Returns Zero on success, negative on failure
+ */
+int __cvmx_helper_board_hardware_enable(int interface)
+{
+ if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CN3005_EVB_HS5) {
+ if (interface == 0) {
+ /* Different config for switch port */
+ cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(1, interface), 0);
+ cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(1, interface), 0);
+ /*
+ * Boards with gigabit WAN ports need a
+ * different setting that is compatible with
+ * 100 Mbit settings
+ */
+ cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(0, interface),
+ 0xc);
+ cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(0, interface),
+ 0xc);
+ }
+ } else if (cvmx_sysinfo_get()->board_type ==
+ CVMX_BOARD_TYPE_CN3010_EVB_HS5) {
+ /*
+ * Broadcom PHYs require differnet ASX
+ * clocks. Unfortunately many boards don't define a
+ * new board Id and simply mangle the
+ * CN3010_EVB_HS5
+ */
+ if (interface == 0) {
+ /*
+ * Some boards use a hacked up bootloader that
+ * identifies them as CN3010_EVB_HS5
+ * evaluation boards. This leads to all kinds
+ * of configuration problems. Detect one
+ * case, and print warning, while trying to do
+ * the right thing.
+ */
+ int phy_addr = cvmx_helper_board_get_mii_address(0);
+ if (phy_addr != -1) {
+ int phy_identifier =
+ cvmx_mdio_read(phy_addr >> 8,
+ phy_addr & 0xff, 0x2);
+ /* Is it a Broadcom PHY? */
+ if (phy_identifier == 0x0143) {
+ cvmx_dprintf("\n");
+ cvmx_dprintf("ERROR:\n");
+ cvmx_dprintf
+ ("ERROR: Board type is CVMX_BOARD_TYPE_CN3010_EVB_HS5, but Broadcom PHY found.\n");
+ cvmx_dprintf
+ ("ERROR: The board type is mis-configured, and software malfunctions are likely.\n");
+ cvmx_dprintf
+ ("ERROR: All boards require a unique board type to identify them.\n");
+ cvmx_dprintf("ERROR:\n");
+ cvmx_dprintf("\n");
+ cvmx_wait(1000000000);
+ cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX
+ (0, interface), 5);
+ cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX
+ (0, interface), 5);
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+cvmx_helper_board_usb_clock_types_t __cvmx_helper_board_usb_get_clock_type(void)
+{
+ switch (cvmx_sysinfo_get()->board_type) {
+ case CVMX_BOARD_TYPE_BBGW_REF:
+ return USB_CLOCK_TYPE_CRYSTAL_12;
+ }
+ return USB_CLOCK_TYPE_REF_48;
+}
+
+int __cvmx_helper_board_usb_get_num_ports(int supported_ports)
+{
+ switch (cvmx_sysinfo_get()->board_type) {
+ case CVMX_BOARD_TYPE_NIC_XLE_4G:
+ return 0;
+ }
+
+ return supported_ports;
+}
diff --git a/drivers/staging/octeon/cvmx-helper-board.h b/drivers/staging/octeon/cvmx-helper-board.h
new file mode 100644
index 000000000000..dc20b01247c4
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-helper-board.h
@@ -0,0 +1,180 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/**
+ *
+ * Helper functions to abstract board specific data about
+ * network ports from the rest of the cvmx-helper files.
+ *
+ */
+#ifndef __CVMX_HELPER_BOARD_H__
+#define __CVMX_HELPER_BOARD_H__
+
+#include "cvmx-helper.h"
+
+typedef enum {
+ USB_CLOCK_TYPE_REF_12,
+ USB_CLOCK_TYPE_REF_24,
+ USB_CLOCK_TYPE_REF_48,
+ USB_CLOCK_TYPE_CRYSTAL_12,
+} cvmx_helper_board_usb_clock_types_t;
+
+typedef enum {
+ set_phy_link_flags_autoneg = 0x1,
+ set_phy_link_flags_flow_control_dont_touch = 0x0 << 1,
+ set_phy_link_flags_flow_control_enable = 0x1 << 1,
+ set_phy_link_flags_flow_control_disable = 0x2 << 1,
+ set_phy_link_flags_flow_control_mask = 0x3 << 1, /* Mask for 2 bit wide flow control field */
+} cvmx_helper_board_set_phy_link_flags_types_t;
+
+/**
+ * cvmx_override_board_link_get(int ipd_port) is a function
+ * pointer. It is meant to allow customization of the process of
+ * talking to a PHY to determine link speed. It is called every
+ * time a PHY must be polled for link status. Users should set
+ * this pointer to a function before calling any cvmx-helper
+ * operations.
+ */
+extern cvmx_helper_link_info_t(*cvmx_override_board_link_get) (int ipd_port);
+
+/**
+ * Return the MII PHY address associated with the given IPD
+ * port. A result of -1 means there isn't a MII capable PHY
+ * connected to this port. On chips supporting multiple MII
+ * busses the bus number is encoded in bits <15:8>.
+ *
+ * This function must be modifed for every new Octeon board.
+ * Internally it uses switch statements based on the cvmx_sysinfo
+ * data to determine board types and revisions. It relys on the
+ * fact that every Octeon board receives a unique board type
+ * enumeration from the bootloader.
+ *
+ * @ipd_port: Octeon IPD port to get the MII address for.
+ *
+ * Returns MII PHY address and bus number or -1.
+ */
+extern int cvmx_helper_board_get_mii_address(int ipd_port);
+
+/**
+ * This function as a board specific method of changing the PHY
+ * speed, duplex, and autonegotiation. This programs the PHY and
+ * not Octeon. This can be used to force Octeon's links to
+ * specific settings.
+ *
+ * @phy_addr: The address of the PHY to program
+ * @link_flags:
+ * Flags to control autonegotiation. Bit 0 is autonegotiation
+ * enable/disable to maintain backware compatability.
+ * @link_info: Link speed to program. If the speed is zero and autonegotiation
+ * is enabled, all possible negotiation speeds are advertised.
+ *
+ * Returns Zero on success, negative on failure
+ */
+int cvmx_helper_board_link_set_phy(int phy_addr,
+ cvmx_helper_board_set_phy_link_flags_types_t
+ link_flags,
+ cvmx_helper_link_info_t link_info);
+
+/**
+ * This function is the board specific method of determining an
+ * ethernet ports link speed. Most Octeon boards have Marvell PHYs
+ * and are handled by the fall through case. This function must be
+ * updated for boards that don't have the normal Marvell PHYs.
+ *
+ * This function must be modifed for every new Octeon board.
+ * Internally it uses switch statements based on the cvmx_sysinfo
+ * data to determine board types and revisions. It relys on the
+ * fact that every Octeon board receives a unique board type
+ * enumeration from the bootloader.
+ *
+ * @ipd_port: IPD input port associated with the port we want to get link
+ * status for.
+ *
+ * Returns The ports link status. If the link isn't fully resolved, this must
+ * return zero.
+ */
+extern cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port);
+
+/**
+ * This function is called by cvmx_helper_interface_probe() after it
+ * determines the number of ports Octeon can support on a specific
+ * interface. This function is the per board location to override
+ * this value. It is called with the number of ports Octeon might
+ * support and should return the number of actual ports on the
+ * board.
+ *
+ * This function must be modifed for every new Octeon board.
+ * Internally it uses switch statements based on the cvmx_sysinfo
+ * data to determine board types and revisions. It relys on the
+ * fact that every Octeon board receives a unique board type
+ * enumeration from the bootloader.
+ *
+ * @interface: Interface to probe
+ * @supported_ports:
+ * Number of ports Octeon supports.
+ *
+ * Returns Number of ports the actual board supports. Many times this will
+ * simple be "support_ports".
+ */
+extern int __cvmx_helper_board_interface_probe(int interface,
+ int supported_ports);
+
+/**
+ * Enable packet input/output from the hardware. This function is
+ * called after by cvmx_helper_packet_hardware_enable() to
+ * perform board specific initialization. For most boards
+ * nothing is needed.
+ *
+ * @interface: Interface to enable
+ *
+ * Returns Zero on success, negative on failure
+ */
+extern int __cvmx_helper_board_hardware_enable(int interface);
+
+/**
+ * Gets the clock type used for the USB block based on board type.
+ * Used by the USB code for auto configuration of clock type.
+ *
+ * Returns USB clock type enumeration
+ */
+cvmx_helper_board_usb_clock_types_t
+__cvmx_helper_board_usb_get_clock_type(void);
+
+/**
+ * Adjusts the number of available USB ports on Octeon based on board
+ * specifics.
+ *
+ * @supported_ports: expected number of ports based on chip type;
+ *
+ *
+ * Returns number of available usb ports, based on board specifics.
+ * Return value is supported_ports if function does not
+ * override.
+ */
+int __cvmx_helper_board_usb_get_num_ports(int supported_ports);
+
+#endif /* __CVMX_HELPER_BOARD_H__ */
diff --git a/drivers/staging/octeon/cvmx-helper-fpa.c b/drivers/staging/octeon/cvmx-helper-fpa.c
new file mode 100644
index 000000000000..c239e5f4ab9a
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-helper-fpa.c
@@ -0,0 +1,243 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Helper functions for FPA setup.
+ *
+ */
+#include "executive-config.h"
+#include "cvmx-config.h"
+#include "cvmx.h"
+#include "cvmx-bootmem.h"
+#include "cvmx-fpa.h"
+#include "cvmx-helper-fpa.h"
+
+/**
+ * Allocate memory for and initialize a single FPA pool.
+ *
+ * @pool: Pool to initialize
+ * @buffer_size: Size of buffers to allocate in bytes
+ * @buffers: Number of buffers to put in the pool. Zero is allowed
+ * @name: String name of the pool for debugging purposes
+ * Returns Zero on success, non-zero on failure
+ */
+static int __cvmx_helper_initialize_fpa_pool(int pool, uint64_t buffer_size,
+ uint64_t buffers, const char *name)
+{
+ uint64_t current_num;
+ void *memory;
+ uint64_t align = CVMX_CACHE_LINE_SIZE;
+
+ /*
+ * Align the allocation so that power of 2 size buffers are
+ * naturally aligned.
+ */
+ while (align < buffer_size)
+ align = align << 1;
+
+ if (buffers == 0)
+ return 0;
+
+ current_num = cvmx_read_csr(CVMX_FPA_QUEX_AVAILABLE(pool));
+ if (current_num) {
+ cvmx_dprintf("Fpa pool %d(%s) already has %llu buffers. "
+ "Skipping setup.\n",
+ pool, name, (unsigned long long)current_num);
+ return 0;
+ }
+
+ memory = cvmx_bootmem_alloc(buffer_size * buffers, align);
+ if (memory == NULL) {
+ cvmx_dprintf("Out of memory initializing fpa pool %d(%s).\n",
+ pool, name);
+ return -1;
+ }
+ cvmx_fpa_setup_pool(pool, name, memory, buffer_size, buffers);
+ return 0;
+}
+
+/**
+ * Allocate memory and initialize the FPA pools using memory
+ * from cvmx-bootmem. Specifying zero for the number of
+ * buffers will cause that FPA pool to not be setup. This is
+ * useful if you aren't using some of the hardware and want
+ * to save memory. Use cvmx_helper_initialize_fpa instead of
+ * this function directly.
+ *
+ * @pip_pool: Should always be CVMX_FPA_PACKET_POOL
+ * @pip_size: Should always be CVMX_FPA_PACKET_POOL_SIZE
+ * @pip_buffers:
+ * Number of packet buffers.
+ * @wqe_pool: Should always be CVMX_FPA_WQE_POOL
+ * @wqe_size: Should always be CVMX_FPA_WQE_POOL_SIZE
+ * @wqe_entries:
+ * Number of work queue entries
+ * @pko_pool: Should always be CVMX_FPA_OUTPUT_BUFFER_POOL
+ * @pko_size: Should always be CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE
+ * @pko_buffers:
+ * PKO Command buffers. You should at minimum have two per
+ * each PKO queue.
+ * @tim_pool: Should always be CVMX_FPA_TIMER_POOL
+ * @tim_size: Should always be CVMX_FPA_TIMER_POOL_SIZE
+ * @tim_buffers:
+ * TIM ring buffer command queues. At least two per timer bucket
+ * is recommened.
+ * @dfa_pool: Should always be CVMX_FPA_DFA_POOL
+ * @dfa_size: Should always be CVMX_FPA_DFA_POOL_SIZE
+ * @dfa_buffers:
+ * DFA command buffer. A relatively small (32 for example)
+ * number should work.
+ * Returns Zero on success, non-zero if out of memory
+ */
+static int __cvmx_helper_initialize_fpa(int pip_pool, int pip_size,
+ int pip_buffers, int wqe_pool,
+ int wqe_size, int wqe_entries,
+ int pko_pool, int pko_size,
+ int pko_buffers, int tim_pool,
+ int tim_size, int tim_buffers,
+ int dfa_pool, int dfa_size,
+ int dfa_buffers)
+{
+ int status;
+
+ cvmx_fpa_enable();
+
+ if ((pip_buffers > 0) && (pip_buffers <= 64))
+ cvmx_dprintf
+ ("Warning: %d packet buffers may not be enough for hardware"
+ " prefetch. 65 or more is recommended.\n", pip_buffers);
+
+ if (pip_pool >= 0) {
+ status =
+ __cvmx_helper_initialize_fpa_pool(pip_pool, pip_size,
+ pip_buffers,
+ "Packet Buffers");
+ if (status)
+ return status;
+ }
+
+ if (wqe_pool >= 0) {
+ status =
+ __cvmx_helper_initialize_fpa_pool(wqe_pool, wqe_size,
+ wqe_entries,
+ "Work Queue Entries");
+ if (status)
+ return status;
+ }
+
+ if (pko_pool >= 0) {
+ status =
+ __cvmx_helper_initialize_fpa_pool(pko_pool, pko_size,
+ pko_buffers,
+ "PKO Command Buffers");
+ if (status)
+ return status;
+ }
+
+ if (tim_pool >= 0) {
+ status =
+ __cvmx_helper_initialize_fpa_pool(tim_pool, tim_size,
+ tim_buffers,
+ "TIM Command Buffers");
+ if (status)
+ return status;
+ }
+
+ if (dfa_pool >= 0) {
+ status =
+ __cvmx_helper_initialize_fpa_pool(dfa_pool, dfa_size,
+ dfa_buffers,
+ "DFA Command Buffers");
+ if (status)
+ return status;
+ }
+
+ return 0;
+}
+
+/**
+ * Allocate memory and initialize the FPA pools using memory
+ * from cvmx-bootmem. Sizes of each element in the pools is
+ * controlled by the cvmx-config.h header file. Specifying
+ * zero for any parameter will cause that FPA pool to not be
+ * setup. This is useful if you aren't using some of the
+ * hardware and want to save memory.
+ *
+ * @packet_buffers:
+ * Number of packet buffers to allocate
+ * @work_queue_entries:
+ * Number of work queue entries
+ * @pko_buffers:
+ * PKO Command buffers. You should at minimum have two per
+ * each PKO queue.
+ * @tim_buffers:
+ * TIM ring buffer command queues. At least two per timer bucket
+ * is recommened.
+ * @dfa_buffers:
+ * DFA command buffer. A relatively small (32 for example)
+ * number should work.
+ * Returns Zero on success, non-zero if out of memory
+ */
+int cvmx_helper_initialize_fpa(int packet_buffers, int work_queue_entries,
+ int pko_buffers, int tim_buffers,
+ int dfa_buffers)
+{
+#ifndef CVMX_FPA_PACKET_POOL
+#define CVMX_FPA_PACKET_POOL -1
+#define CVMX_FPA_PACKET_POOL_SIZE 0
+#endif
+#ifndef CVMX_FPA_WQE_POOL
+#define CVMX_FPA_WQE_POOL -1
+#define CVMX_FPA_WQE_POOL_SIZE 0
+#endif
+#ifndef CVMX_FPA_OUTPUT_BUFFER_POOL
+#define CVMX_FPA_OUTPUT_BUFFER_POOL -1
+#define CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE 0
+#endif
+#ifndef CVMX_FPA_TIMER_POOL
+#define CVMX_FPA_TIMER_POOL -1
+#define CVMX_FPA_TIMER_POOL_SIZE 0
+#endif
+#ifndef CVMX_FPA_DFA_POOL
+#define CVMX_FPA_DFA_POOL -1
+#define CVMX_FPA_DFA_POOL_SIZE 0
+#endif
+ return __cvmx_helper_initialize_fpa(CVMX_FPA_PACKET_POOL,
+ CVMX_FPA_PACKET_POOL_SIZE,
+ packet_buffers, CVMX_FPA_WQE_POOL,
+ CVMX_FPA_WQE_POOL_SIZE,
+ work_queue_entries,
+ CVMX_FPA_OUTPUT_BUFFER_POOL,
+ CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE,
+ pko_buffers, CVMX_FPA_TIMER_POOL,
+ CVMX_FPA_TIMER_POOL_SIZE,
+ tim_buffers, CVMX_FPA_DFA_POOL,
+ CVMX_FPA_DFA_POOL_SIZE,
+ dfa_buffers);
+}
diff --git a/drivers/staging/octeon/cvmx-helper-fpa.h b/drivers/staging/octeon/cvmx-helper-fpa.h
new file mode 100644
index 000000000000..5ff8c93198de
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-helper-fpa.h
@@ -0,0 +1,64 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Helper functions for FPA setup.
+ *
+ */
+#ifndef __CVMX_HELPER_H_FPA__
+#define __CVMX_HELPER_H_FPA__
+
+/**
+ * Allocate memory and initialize the FPA pools using memory
+ * from cvmx-bootmem. Sizes of each element in the pools is
+ * controlled by the cvmx-config.h header file. Specifying
+ * zero for any parameter will cause that FPA pool to not be
+ * setup. This is useful if you aren't using some of the
+ * hardware and want to save memory.
+ *
+ * @packet_buffers:
+ * Number of packet buffers to allocate
+ * @work_queue_entries:
+ * Number of work queue entries
+ * @pko_buffers:
+ * PKO Command buffers. You should at minimum have two per
+ * each PKO queue.
+ * @tim_buffers:
+ * TIM ring buffer command queues. At least two per timer bucket
+ * is recommened.
+ * @dfa_buffers:
+ * DFA command buffer. A relatively small (32 for example)
+ * number should work.
+ * Returns Zero on success, non-zero if out of memory
+ */
+extern int cvmx_helper_initialize_fpa(int packet_buffers,
+ int work_queue_entries, int pko_buffers,
+ int tim_buffers, int dfa_buffers);
+
+#endif /* __CVMX_HELPER_H__ */
diff --git a/drivers/staging/octeon/cvmx-helper-loop.c b/drivers/staging/octeon/cvmx-helper-loop.c
new file mode 100644
index 000000000000..55a571a69529
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-helper-loop.c
@@ -0,0 +1,85 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * Functions for LOOP initialization, configuration,
+ * and monitoring.
+ */
+#include <asm/octeon/octeon.h>
+
+#include "cvmx-config.h"
+
+#include "cvmx-helper.h"
+#include "cvmx-pip-defs.h"
+
+/**
+ * Probe a LOOP interface and determine the number of ports
+ * connected to it. The LOOP interface should still be down
+ * after this call.
+ *
+ * @interface: Interface to probe
+ *
+ * Returns Number of ports on the interface. Zero to disable.
+ */
+int __cvmx_helper_loop_probe(int interface)
+{
+ union cvmx_ipd_sub_port_fcs ipd_sub_port_fcs;
+ int num_ports = 4;
+ int port;
+
+ /* We need to disable length checking so packet < 64 bytes and jumbo
+ frames don't get errors */
+ for (port = 0; port < num_ports; port++) {
+ union cvmx_pip_prt_cfgx port_cfg;
+ int ipd_port = cvmx_helper_get_ipd_port(interface, port);
+ port_cfg.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port));
+ port_cfg.s.maxerr_en = 0;
+ port_cfg.s.minerr_en = 0;
+ cvmx_write_csr(CVMX_PIP_PRT_CFGX(ipd_port), port_cfg.u64);
+ }
+
+ /* Disable FCS stripping for loopback ports */
+ ipd_sub_port_fcs.u64 = cvmx_read_csr(CVMX_IPD_SUB_PORT_FCS);
+ ipd_sub_port_fcs.s.port_bit2 = 0;
+ cvmx_write_csr(CVMX_IPD_SUB_PORT_FCS, ipd_sub_port_fcs.u64);
+ return num_ports;
+}
+
+/**
+ * Bringup and enable a LOOP interface. After this call packet
+ * I/O should be fully functional. This is called with IPD
+ * enabled but PKO disabled.
+ *
+ * @interface: Interface to bring up
+ *
+ * Returns Zero on success, negative on failure
+ */
+int __cvmx_helper_loop_enable(int interface)
+{
+ /* Do nothing. */
+ return 0;
+}
diff --git a/drivers/staging/octeon/cvmx-helper-loop.h b/drivers/staging/octeon/cvmx-helper-loop.h
new file mode 100644
index 000000000000..e646a6ccce75
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-helper-loop.h
@@ -0,0 +1,59 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as published by
+ * the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or NONINFRINGEMENT.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Functions for LOOP initialization, configuration,
+ * and monitoring.
+ *
+ */
+#ifndef __CVMX_HELPER_LOOP_H__
+#define __CVMX_HELPER_LOOP_H__
+
+/**
+ * Probe a LOOP interface and determine the number of ports
+ * connected to it. The LOOP interface should still be down after
+ * this call.
+ *
+ * @interface: Interface to probe
+ *
+ * Returns Number of ports on the interface. Zero to disable.
+ */
+extern int __cvmx_helper_loop_probe(int interface);
+
+/**
+ * Bringup and enable a LOOP interface. After this call packet
+ * I/O should be fully functional. This is called with IPD
+ * enabled but PKO disabled.
+ *
+ * @interface: Interface to bring up
+ *
+ * Returns Zero on success, negative on failure
+ */
+extern int __cvmx_helper_loop_enable(int interface);
+
+#endif
diff --git a/drivers/staging/octeon/cvmx-helper-npi.c b/drivers/staging/octeon/cvmx-helper-npi.c
new file mode 100644
index 000000000000..7388a1e72b38
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-helper-npi.c
@@ -0,0 +1,113 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * Functions for NPI initialization, configuration,
+ * and monitoring.
+ */
+#include <asm/octeon/octeon.h>
+
+#include "cvmx-config.h"
+
+#include "cvmx-helper.h"
+
+#include "cvmx-pip-defs.h"
+
+/**
+ * Probe a NPI interface and determine the number of ports
+ * connected to it. The NPI interface should still be down
+ * after this call.
+ *
+ * @interface: Interface to probe
+ *
+ * Returns Number of ports on the interface. Zero to disable.
+ */
+int __cvmx_helper_npi_probe(int interface)
+{
+#if CVMX_PKO_QUEUES_PER_PORT_PCI > 0
+ if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX))
+ return 4;
+ else if (OCTEON_IS_MODEL(OCTEON_CN56XX)
+ && !OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X))
+ /* The packet engines didn't exist before pass 2 */
+ return 4;
+ else if (OCTEON_IS_MODEL(OCTEON_CN52XX)
+ && !OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
+ /* The packet engines didn't exist before pass 2 */
+ return 4;
+#if 0
+ /*
+ * Technically CN30XX, CN31XX, and CN50XX contain packet
+ * engines, but nobody ever uses them. Since this is the case,
+ * we disable them here.
+ */
+ else if (OCTEON_IS_MODEL(OCTEON_CN31XX)
+ || OCTEON_IS_MODEL(OCTEON_CN50XX))
+ return 2;
+ else if (OCTEON_IS_MODEL(OCTEON_CN30XX))
+ return 1;
+#endif
+#endif
+ return 0;
+}
+
+/**
+ * Bringup and enable a NPI interface. After this call packet
+ * I/O should be fully functional. This is called with IPD
+ * enabled but PKO disabled.
+ *
+ * @interface: Interface to bring up
+ *
+ * Returns Zero on success, negative on failure
+ */
+int __cvmx_helper_npi_enable(int interface)
+{
+ /*
+ * On CN50XX, CN52XX, and CN56XX we need to disable length
+ * checking so packet < 64 bytes and jumbo frames don't get
+ * errors.
+ */
+ if (!OCTEON_IS_MODEL(OCTEON_CN3XXX) &&
+ !OCTEON_IS_MODEL(OCTEON_CN58XX)) {
+ int num_ports = cvmx_helper_ports_on_interface(interface);
+ int port;
+ for (port = 0; port < num_ports; port++) {
+ union cvmx_pip_prt_cfgx port_cfg;
+ int ipd_port =
+ cvmx_helper_get_ipd_port(interface, port);
+ port_cfg.u64 =
+ cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port));
+ port_cfg.s.maxerr_en = 0;
+ port_cfg.s.minerr_en = 0;
+ cvmx_write_csr(CVMX_PIP_PRT_CFGX(ipd_port),
+ port_cfg.u64);
+ }
+ }
+
+ /* Enables are controlled by the remote host, so nothing to do here */
+ return 0;
+}
diff --git a/drivers/staging/octeon/cvmx-helper-npi.h b/drivers/staging/octeon/cvmx-helper-npi.h
new file mode 100644
index 000000000000..908e7b08c214
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-helper-npi.h
@@ -0,0 +1,60 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Functions for NPI initialization, configuration,
+ * and monitoring.
+ *
+ */
+#ifndef __CVMX_HELPER_NPI_H__
+#define __CVMX_HELPER_NPI_H__
+
+/**
+ * Probe a NPI interface and determine the number of ports
+ * connected to it. The NPI interface should still be down after
+ * this call.
+ *
+ * @interface: Interface to probe
+ *
+ * Returns Number of ports on the interface. Zero to disable.
+ */
+extern int __cvmx_helper_npi_probe(int interface);
+
+/**
+ * Bringup and enable a NPI interface. After this call packet
+ * I/O should be fully functional. This is called with IPD
+ * enabled but PKO disabled.
+ *
+ * @interface: Interface to bring up
+ *
+ * Returns Zero on success, negative on failure
+ */
+extern int __cvmx_helper_npi_enable(int interface);
+
+#endif
diff --git a/drivers/staging/octeon/cvmx-helper-rgmii.c b/drivers/staging/octeon/cvmx-helper-rgmii.c
new file mode 100644
index 000000000000..aa2d5d7fee2b
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-helper-rgmii.c
@@ -0,0 +1,525 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * Functions for RGMII/GMII/MII initialization, configuration,
+ * and monitoring.
+ */
+#include <asm/octeon/octeon.h>
+
+#include "cvmx-config.h"
+
+
+#include "cvmx-mdio.h"
+#include "cvmx-pko.h"
+#include "cvmx-helper.h"
+#include "cvmx-helper-board.h"
+
+#include <asm/octeon/cvmx-npi-defs.h>
+#include "cvmx-gmxx-defs.h"
+#include "cvmx-asxx-defs.h"
+#include "cvmx-dbg-defs.h"
+
+void __cvmx_interrupt_gmxx_enable(int interface);
+void __cvmx_interrupt_asxx_enable(int block);
+
+/**
+ * Probe RGMII ports and determine the number present
+ *
+ * @interface: Interface to probe
+ *
+ * Returns Number of RGMII/GMII/MII ports (0-4).
+ */
+int __cvmx_helper_rgmii_probe(int interface)
+{
+ int num_ports = 0;
+ union cvmx_gmxx_inf_mode mode;
+ mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
+
+ if (mode.s.type) {
+ if (OCTEON_IS_MODEL(OCTEON_CN38XX)
+ || OCTEON_IS_MODEL(OCTEON_CN58XX)) {
+ cvmx_dprintf("ERROR: RGMII initialize called in "
+ "SPI interface\n");
+ } else if (OCTEON_IS_MODEL(OCTEON_CN31XX)
+ || OCTEON_IS_MODEL(OCTEON_CN30XX)
+ || OCTEON_IS_MODEL(OCTEON_CN50XX)) {
+ /*
+ * On these chips "type" says we're in
+ * GMII/MII mode. This limits us to 2 ports
+ */
+ num_ports = 2;
+ } else {
+ cvmx_dprintf("ERROR: Unsupported Octeon model in %s\n",
+ __func__);
+ }
+ } else {
+ if (OCTEON_IS_MODEL(OCTEON_CN38XX)
+ || OCTEON_IS_MODEL(OCTEON_CN58XX)) {
+ num_ports = 4;
+ } else if (OCTEON_IS_MODEL(OCTEON_CN31XX)
+ || OCTEON_IS_MODEL(OCTEON_CN30XX)
+ || OCTEON_IS_MODEL(OCTEON_CN50XX)) {
+ num_ports = 3;
+ } else {
+ cvmx_dprintf("ERROR: Unsupported Octeon model in %s\n",
+ __func__);
+ }
+ }
+ return num_ports;
+}
+
+/**
+ * Put an RGMII interface in loopback mode. Internal packets sent
+ * out will be received back again on the same port. Externally
+ * received packets will echo back out.
+ *
+ * @port: IPD port number to loop.
+ */
+void cvmx_helper_rgmii_internal_loopback(int port)
+{
+ int interface = (port >> 4) & 1;
+ int index = port & 0xf;
+ uint64_t tmp;
+
+ union cvmx_gmxx_prtx_cfg gmx_cfg;
+ gmx_cfg.u64 = 0;
+ gmx_cfg.s.duplex = 1;
+ gmx_cfg.s.slottime = 1;
+ gmx_cfg.s.speed = 1;
+ cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 1);
+ cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x200);
+ cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0x2000);
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
+ tmp = cvmx_read_csr(CVMX_ASXX_PRT_LOOP(interface));
+ cvmx_write_csr(CVMX_ASXX_PRT_LOOP(interface), (1 << index) | tmp);
+ tmp = cvmx_read_csr(CVMX_ASXX_TX_PRT_EN(interface));
+ cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(interface), (1 << index) | tmp);
+ tmp = cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface));
+ cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface), (1 << index) | tmp);
+ gmx_cfg.s.en = 1;
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
+}
+
+/**
+ * Workaround ASX setup errata with CN38XX pass1
+ *
+ * @interface: Interface to setup
+ * @port: Port to setup (0..3)
+ * @cpu_clock_hz:
+ * Chip frequency in Hertz
+ *
+ * Returns Zero on success, negative on failure
+ */
+static int __cvmx_helper_errata_asx_pass1(int interface, int port,
+ int cpu_clock_hz)
+{
+ /* Set hi water mark as per errata GMX-4 */
+ if (cpu_clock_hz >= 325000000 && cpu_clock_hz < 375000000)
+ cvmx_write_csr(CVMX_ASXX_TX_HI_WATERX(port, interface), 12);
+ else if (cpu_clock_hz >= 375000000 && cpu_clock_hz < 437000000)
+ cvmx_write_csr(CVMX_ASXX_TX_HI_WATERX(port, interface), 11);
+ else if (cpu_clock_hz >= 437000000 && cpu_clock_hz < 550000000)
+ cvmx_write_csr(CVMX_ASXX_TX_HI_WATERX(port, interface), 10);
+ else if (cpu_clock_hz >= 550000000 && cpu_clock_hz < 687000000)
+ cvmx_write_csr(CVMX_ASXX_TX_HI_WATERX(port, interface), 9);
+ else
+ cvmx_dprintf("Illegal clock frequency (%d). "
+ "CVMX_ASXX_TX_HI_WATERX not set\n", cpu_clock_hz);
+ return 0;
+}
+
+/**
+ * Configure all of the ASX, GMX, and PKO regsiters required
+ * to get RGMII to function on the supplied interface.
+ *
+ * @interface: PKO Interface to configure (0 or 1)
+ *
+ * Returns Zero on success
+ */
+int __cvmx_helper_rgmii_enable(int interface)
+{
+ int num_ports = cvmx_helper_ports_on_interface(interface);
+ int port;
+ struct cvmx_sysinfo *sys_info_ptr = cvmx_sysinfo_get();
+ union cvmx_gmxx_inf_mode mode;
+ union cvmx_asxx_tx_prt_en asx_tx;
+ union cvmx_asxx_rx_prt_en asx_rx;
+
+ mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
+
+ if (mode.s.en == 0)
+ return -1;
+ if ((OCTEON_IS_MODEL(OCTEON_CN38XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN58XX)) && mode.s.type == 1)
+ /* Ignore SPI interfaces */
+ return -1;
+
+ /* Configure the ASX registers needed to use the RGMII ports */
+ asx_tx.u64 = 0;
+ asx_tx.s.prt_en = cvmx_build_mask(num_ports);
+ cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(interface), asx_tx.u64);
+
+ asx_rx.u64 = 0;
+ asx_rx.s.prt_en = cvmx_build_mask(num_ports);
+ cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface), asx_rx.u64);
+
+ /* Configure the GMX registers needed to use the RGMII ports */
+ for (port = 0; port < num_ports; port++) {
+ /* Setting of CVMX_GMXX_TXX_THRESH has been moved to
+ __cvmx_helper_setup_gmx() */
+
+ if (cvmx_octeon_is_pass1())
+ __cvmx_helper_errata_asx_pass1(interface, port,
+ sys_info_ptr->
+ cpu_clock_hz);
+ else {
+ /*
+ * Configure more flexible RGMII preamble
+ * checking. Pass 1 doesn't support this
+ * feature.
+ */
+ union cvmx_gmxx_rxx_frm_ctl frm_ctl;
+ frm_ctl.u64 =
+ cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL
+ (port, interface));
+ /* New field, so must be compile time */
+ frm_ctl.s.pre_free = 1;
+ cvmx_write_csr(CVMX_GMXX_RXX_FRM_CTL(port, interface),
+ frm_ctl.u64);
+ }
+
+ /*
+ * Each pause frame transmitted will ask for about 10M
+ * bit times before resume. If buffer space comes
+ * available before that time has expired, an XON
+ * pause frame (0 time) will be transmitted to restart
+ * the flow.
+ */
+ cvmx_write_csr(CVMX_GMXX_TXX_PAUSE_PKT_TIME(port, interface),
+ 20000);
+ cvmx_write_csr(CVMX_GMXX_TXX_PAUSE_PKT_INTERVAL
+ (port, interface), 19000);
+
+ if (OCTEON_IS_MODEL(OCTEON_CN50XX)) {
+ cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, interface),
+ 16);
+ cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, interface),
+ 16);
+ } else {
+ cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, interface),
+ 24);
+ cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, interface),
+ 24);
+ }
+ }
+
+ __cvmx_helper_setup_gmx(interface, num_ports);
+
+ /* enable the ports now */
+ for (port = 0; port < num_ports; port++) {
+ union cvmx_gmxx_prtx_cfg gmx_cfg;
+ cvmx_helper_link_autoconf(cvmx_helper_get_ipd_port
+ (interface, port));
+ gmx_cfg.u64 =
+ cvmx_read_csr(CVMX_GMXX_PRTX_CFG(port, interface));
+ gmx_cfg.s.en = 1;
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(port, interface),
+ gmx_cfg.u64);
+ }
+ __cvmx_interrupt_asxx_enable(interface);
+ __cvmx_interrupt_gmxx_enable(interface);
+
+ return 0;
+}
+
+/**
+ * Return the link state of an IPD/PKO port as returned by
+ * auto negotiation. The result of this function may not match
+ * Octeon's link config if auto negotiation has changed since
+ * the last call to cvmx_helper_link_set().
+ *
+ * @ipd_port: IPD/PKO port to query
+ *
+ * Returns Link state
+ */
+cvmx_helper_link_info_t __cvmx_helper_rgmii_link_get(int ipd_port)
+{
+ int interface = cvmx_helper_get_interface_num(ipd_port);
+ int index = cvmx_helper_get_interface_index_num(ipd_port);
+ union cvmx_asxx_prt_loop asxx_prt_loop;
+
+ asxx_prt_loop.u64 = cvmx_read_csr(CVMX_ASXX_PRT_LOOP(interface));
+ if (asxx_prt_loop.s.int_loop & (1 << index)) {
+ /* Force 1Gbps full duplex on internal loopback */
+ cvmx_helper_link_info_t result;
+ result.u64 = 0;
+ result.s.full_duplex = 1;
+ result.s.link_up = 1;
+ result.s.speed = 1000;
+ return result;
+ } else
+ return __cvmx_helper_board_link_get(ipd_port);
+}
+
+/**
+ * Configure an IPD/PKO port for the specified link state. This
+ * function does not influence auto negotiation at the PHY level.
+ * The passed link state must always match the link state returned
+ * by cvmx_helper_link_get(). It is normally best to use
+ * cvmx_helper_link_autoconf() instead.
+ *
+ * @ipd_port: IPD/PKO port to configure
+ * @link_info: The new link state
+ *
+ * Returns Zero on success, negative on failure
+ */
+int __cvmx_helper_rgmii_link_set(int ipd_port,
+ cvmx_helper_link_info_t link_info)
+{
+ int result = 0;
+ int interface = cvmx_helper_get_interface_num(ipd_port);
+ int index = cvmx_helper_get_interface_index_num(ipd_port);
+ union cvmx_gmxx_prtx_cfg original_gmx_cfg;
+ union cvmx_gmxx_prtx_cfg new_gmx_cfg;
+ union cvmx_pko_mem_queue_qos pko_mem_queue_qos;
+ union cvmx_pko_mem_queue_qos pko_mem_queue_qos_save[16];
+ union cvmx_gmxx_tx_ovr_bp gmx_tx_ovr_bp;
+ union cvmx_gmxx_tx_ovr_bp gmx_tx_ovr_bp_save;
+ int i;
+
+ /* Ignore speed sets in the simulator */
+ if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM)
+ return 0;
+
+ /* Read the current settings so we know the current enable state */
+ original_gmx_cfg.u64 =
+ cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+ new_gmx_cfg = original_gmx_cfg;
+
+ /* Disable the lowest level RX */
+ cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface),
+ cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface)) &
+ ~(1 << index));
+
+ /* Disable all queues so that TX should become idle */
+ for (i = 0; i < cvmx_pko_get_num_queues(ipd_port); i++) {
+ int queue = cvmx_pko_get_base_queue(ipd_port) + i;
+ cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue);
+ pko_mem_queue_qos.u64 = cvmx_read_csr(CVMX_PKO_MEM_QUEUE_QOS);
+ pko_mem_queue_qos.s.pid = ipd_port;
+ pko_mem_queue_qos.s.qid = queue;
+ pko_mem_queue_qos_save[i] = pko_mem_queue_qos;
+ pko_mem_queue_qos.s.qos_mask = 0;
+ cvmx_write_csr(CVMX_PKO_MEM_QUEUE_QOS, pko_mem_queue_qos.u64);
+ }
+
+ /* Disable backpressure */
+ gmx_tx_ovr_bp.u64 = cvmx_read_csr(CVMX_GMXX_TX_OVR_BP(interface));
+ gmx_tx_ovr_bp_save = gmx_tx_ovr_bp;
+ gmx_tx_ovr_bp.s.bp &= ~(1 << index);
+ gmx_tx_ovr_bp.s.en |= 1 << index;
+ cvmx_write_csr(CVMX_GMXX_TX_OVR_BP(interface), gmx_tx_ovr_bp.u64);
+ cvmx_read_csr(CVMX_GMXX_TX_OVR_BP(interface));
+
+ /*
+ * Poll the GMX state machine waiting for it to become
+ * idle. Preferably we should only change speed when it is
+ * idle. If it doesn't become idle we will still do the speed
+ * change, but there is a slight chance that GMX will
+ * lockup.
+ */
+ cvmx_write_csr(CVMX_NPI_DBG_SELECT,
+ interface * 0x800 + index * 0x100 + 0x880);
+ CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, union cvmx_dbg_data, data & 7,
+ ==, 0, 10000);
+ CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, union cvmx_dbg_data, data & 0xf,
+ ==, 0, 10000);
+
+ /* Disable the port before we make any changes */
+ new_gmx_cfg.s.en = 0;
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64);
+ cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+
+ /* Set full/half duplex */
+ if (cvmx_octeon_is_pass1())
+ /* Half duplex is broken for 38XX Pass 1 */
+ new_gmx_cfg.s.duplex = 1;
+ else if (!link_info.s.link_up)
+ /* Force full duplex on down links */
+ new_gmx_cfg.s.duplex = 1;
+ else
+ new_gmx_cfg.s.duplex = link_info.s.full_duplex;
+
+ /* Set the link speed. Anything unknown is set to 1Gbps */
+ if (link_info.s.speed == 10) {
+ new_gmx_cfg.s.slottime = 0;
+ new_gmx_cfg.s.speed = 0;
+ } else if (link_info.s.speed == 100) {
+ new_gmx_cfg.s.slottime = 0;
+ new_gmx_cfg.s.speed = 0;
+ } else {
+ new_gmx_cfg.s.slottime = 1;
+ new_gmx_cfg.s.speed = 1;
+ }
+
+ /* Adjust the clocks */
+ if (link_info.s.speed == 10) {
+ cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 50);
+ cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x40);
+ cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0);
+ } else if (link_info.s.speed == 100) {
+ cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 5);
+ cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x40);
+ cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0);
+ } else {
+ cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 1);
+ cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x200);
+ cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0x2000);
+ }
+
+ if (OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN50XX)) {
+ if ((link_info.s.speed == 10) || (link_info.s.speed == 100)) {
+ union cvmx_gmxx_inf_mode mode;
+ mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
+
+ /*
+ * Port .en .type .p0mii Configuration
+ * ---- --- ----- ------ -----------------------------------------
+ * X 0 X X All links are disabled.
+ * 0 1 X 0 Port 0 is RGMII
+ * 0 1 X 1 Port 0 is MII
+ * 1 1 0 X Ports 1 and 2 are configured as RGMII ports.
+ * 1 1 1 X Port 1: GMII/MII; Port 2: disabled. GMII or
+ * MII port is selected by GMX_PRT1_CFG[SPEED].
+ */
+
+ /* In MII mode, CLK_CNT = 1. */
+ if (((index == 0) && (mode.s.p0mii == 1))
+ || ((index != 0) && (mode.s.type == 1))) {
+ cvmx_write_csr(CVMX_GMXX_TXX_CLK
+ (index, interface), 1);
+ }
+ }
+ }
+
+ /* Do a read to make sure all setup stuff is complete */
+ cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+
+ /* Save the new GMX setting without enabling the port */
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64);
+
+ /* Enable the lowest level RX */
+ cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface),
+ cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface)) | (1 <<
+ index));
+
+ /* Re-enable the TX path */
+ for (i = 0; i < cvmx_pko_get_num_queues(ipd_port); i++) {
+ int queue = cvmx_pko_get_base_queue(ipd_port) + i;
+ cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue);
+ cvmx_write_csr(CVMX_PKO_MEM_QUEUE_QOS,
+ pko_mem_queue_qos_save[i].u64);
+ }
+
+ /* Restore backpressure */
+ cvmx_write_csr(CVMX_GMXX_TX_OVR_BP(interface), gmx_tx_ovr_bp_save.u64);
+
+ /* Restore the GMX enable state. Port config is complete */
+ new_gmx_cfg.s.en = original_gmx_cfg.s.en;
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64);
+
+ return result;
+}
+
+/**
+ * Configure a port for internal and/or external loopback. Internal loopback
+ * causes packets sent by the port to be received by Octeon. External loopback
+ * causes packets received from the wire to sent out again.
+ *
+ * @ipd_port: IPD/PKO port to loopback.
+ * @enable_internal:
+ * Non zero if you want internal loopback
+ * @enable_external:
+ * Non zero if you want external loopback
+ *
+ * Returns Zero on success, negative on failure.
+ */
+int __cvmx_helper_rgmii_configure_loopback(int ipd_port, int enable_internal,
+ int enable_external)
+{
+ int interface = cvmx_helper_get_interface_num(ipd_port);
+ int index = cvmx_helper_get_interface_index_num(ipd_port);
+ int original_enable;
+ union cvmx_gmxx_prtx_cfg gmx_cfg;
+ union cvmx_asxx_prt_loop asxx_prt_loop;
+
+ /* Read the current enable state and save it */
+ gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+ original_enable = gmx_cfg.s.en;
+ /* Force port to be disabled */
+ gmx_cfg.s.en = 0;
+ if (enable_internal) {
+ /* Force speed if we're doing internal loopback */
+ gmx_cfg.s.duplex = 1;
+ gmx_cfg.s.slottime = 1;
+ gmx_cfg.s.speed = 1;
+ cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 1);
+ cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x200);
+ cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0x2000);
+ }
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
+
+ /* Set the loopback bits */
+ asxx_prt_loop.u64 = cvmx_read_csr(CVMX_ASXX_PRT_LOOP(interface));
+ if (enable_internal)
+ asxx_prt_loop.s.int_loop |= 1 << index;
+ else
+ asxx_prt_loop.s.int_loop &= ~(1 << index);
+ if (enable_external)
+ asxx_prt_loop.s.ext_loop |= 1 << index;
+ else
+ asxx_prt_loop.s.ext_loop &= ~(1 << index);
+ cvmx_write_csr(CVMX_ASXX_PRT_LOOP(interface), asxx_prt_loop.u64);
+
+ /* Force enables in internal loopback */
+ if (enable_internal) {
+ uint64_t tmp;
+ tmp = cvmx_read_csr(CVMX_ASXX_TX_PRT_EN(interface));
+ cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(interface),
+ (1 << index) | tmp);
+ tmp = cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface));
+ cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface),
+ (1 << index) | tmp);
+ original_enable = 1;
+ }
+
+ /* Restore the enable state */
+ gmx_cfg.s.en = original_enable;
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
+ return 0;
+}
diff --git a/drivers/staging/octeon/cvmx-helper-rgmii.h b/drivers/staging/octeon/cvmx-helper-rgmii.h
new file mode 100644
index 000000000000..ea2652604a57
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-helper-rgmii.h
@@ -0,0 +1,110 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Functions for RGMII/GMII/MII initialization, configuration,
+ * and monitoring.
+ *
+ */
+#ifndef __CVMX_HELPER_RGMII_H__
+#define __CVMX_HELPER_RGMII_H__
+
+/**
+ * Probe RGMII ports and determine the number present
+ *
+ * @interface: Interface to probe
+ *
+ * Returns Number of RGMII/GMII/MII ports (0-4).
+ */
+extern int __cvmx_helper_rgmii_probe(int interface);
+
+/**
+ * Put an RGMII interface in loopback mode. Internal packets sent
+ * out will be received back again on the same port. Externally
+ * received packets will echo back out.
+ *
+ * @port: IPD port number to loop.
+ */
+extern void cvmx_helper_rgmii_internal_loopback(int port);
+
+/**
+ * Configure all of the ASX, GMX, and PKO regsiters required
+ * to get RGMII to function on the supplied interface.
+ *
+ * @interface: PKO Interface to configure (0 or 1)
+ *
+ * Returns Zero on success
+ */
+extern int __cvmx_helper_rgmii_enable(int interface);
+
+/**
+ * Return the link state of an IPD/PKO port as returned by
+ * auto negotiation. The result of this function may not match
+ * Octeon's link config if auto negotiation has changed since
+ * the last call to cvmx_helper_link_set().
+ *
+ * @ipd_port: IPD/PKO port to query
+ *
+ * Returns Link state
+ */
+extern cvmx_helper_link_info_t __cvmx_helper_rgmii_link_get(int ipd_port);
+
+/**
+ * Configure an IPD/PKO port for the specified link state. This
+ * function does not influence auto negotiation at the PHY level.
+ * The passed link state must always match the link state returned
+ * by cvmx_helper_link_get(). It is normally best to use
+ * cvmx_helper_link_autoconf() instead.
+ *
+ * @ipd_port: IPD/PKO port to configure
+ * @link_info: The new link state
+ *
+ * Returns Zero on success, negative on failure
+ */
+extern int __cvmx_helper_rgmii_link_set(int ipd_port,
+ cvmx_helper_link_info_t link_info);
+
+/**
+ * Configure a port for internal and/or external loopback. Internal loopback
+ * causes packets sent by the port to be received by Octeon. External loopback
+ * causes packets received from the wire to sent out again.
+ *
+ * @ipd_port: IPD/PKO port to loopback.
+ * @enable_internal:
+ * Non zero if you want internal loopback
+ * @enable_external:
+ * Non zero if you want external loopback
+ *
+ * Returns Zero on success, negative on failure.
+ */
+extern int __cvmx_helper_rgmii_configure_loopback(int ipd_port,
+ int enable_internal,
+ int enable_external);
+
+#endif
diff --git a/drivers/staging/octeon/cvmx-helper-sgmii.c b/drivers/staging/octeon/cvmx-helper-sgmii.c
new file mode 100644
index 000000000000..6214e3b6d975
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-helper-sgmii.c
@@ -0,0 +1,550 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * Functions for SGMII initialization, configuration,
+ * and monitoring.
+ */
+
+#include <asm/octeon/octeon.h>
+
+#include "cvmx-config.h"
+
+#include "cvmx-mdio.h"
+#include "cvmx-helper.h"
+#include "cvmx-helper-board.h"
+
+#include "cvmx-gmxx-defs.h"
+#include "cvmx-pcsx-defs.h"
+
+void __cvmx_interrupt_gmxx_enable(int interface);
+void __cvmx_interrupt_pcsx_intx_en_reg_enable(int index, int block);
+void __cvmx_interrupt_pcsxx_int_en_reg_enable(int index);
+
+/**
+ * Perform initialization required only once for an SGMII port.
+ *
+ * @interface: Interface to init
+ * @index: Index of prot on the interface
+ *
+ * Returns Zero on success, negative on failure
+ */
+static int __cvmx_helper_sgmii_hardware_init_one_time(int interface, int index)
+{
+ const uint64_t clock_mhz = cvmx_sysinfo_get()->cpu_clock_hz / 1000000;
+ union cvmx_pcsx_miscx_ctl_reg pcs_misc_ctl_reg;
+ union cvmx_pcsx_linkx_timer_count_reg pcsx_linkx_timer_count_reg;
+ union cvmx_gmxx_prtx_cfg gmxx_prtx_cfg;
+
+ /* Disable GMX */
+ gmxx_prtx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+ gmxx_prtx_cfg.s.en = 0;
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
+
+ /*
+ * Write PCS*_LINK*_TIMER_COUNT_REG[COUNT] with the
+ * appropriate value. 1000BASE-X specifies a 10ms
+ * interval. SGMII specifies a 1.6ms interval.
+ */
+ pcs_misc_ctl_reg.u64 =
+ cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
+ pcsx_linkx_timer_count_reg.u64 =
+ cvmx_read_csr(CVMX_PCSX_LINKX_TIMER_COUNT_REG(index, interface));
+ if (pcs_misc_ctl_reg.s.mode) {
+ /* 1000BASE-X */
+ pcsx_linkx_timer_count_reg.s.count =
+ (10000ull * clock_mhz) >> 10;
+ } else {
+ /* SGMII */
+ pcsx_linkx_timer_count_reg.s.count =
+ (1600ull * clock_mhz) >> 10;
+ }
+ cvmx_write_csr(CVMX_PCSX_LINKX_TIMER_COUNT_REG(index, interface),
+ pcsx_linkx_timer_count_reg.u64);
+
+ /*
+ * Write the advertisement register to be used as the
+ * tx_Config_Reg<D15:D0> of the autonegotiation. In
+ * 1000BASE-X mode, tx_Config_Reg<D15:D0> is PCS*_AN*_ADV_REG.
+ * In SGMII PHY mode, tx_Config_Reg<D15:D0> is
+ * PCS*_SGM*_AN_ADV_REG. In SGMII MAC mode,
+ * tx_Config_Reg<D15:D0> is the fixed value 0x4001, so this
+ * step can be skipped.
+ */
+ if (pcs_misc_ctl_reg.s.mode) {
+ /* 1000BASE-X */
+ union cvmx_pcsx_anx_adv_reg pcsx_anx_adv_reg;
+ pcsx_anx_adv_reg.u64 =
+ cvmx_read_csr(CVMX_PCSX_ANX_ADV_REG(index, interface));
+ pcsx_anx_adv_reg.s.rem_flt = 0;
+ pcsx_anx_adv_reg.s.pause = 3;
+ pcsx_anx_adv_reg.s.hfd = 1;
+ pcsx_anx_adv_reg.s.fd = 1;
+ cvmx_write_csr(CVMX_PCSX_ANX_ADV_REG(index, interface),
+ pcsx_anx_adv_reg.u64);
+ } else {
+ union cvmx_pcsx_miscx_ctl_reg pcsx_miscx_ctl_reg;
+ pcsx_miscx_ctl_reg.u64 =
+ cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
+ if (pcsx_miscx_ctl_reg.s.mac_phy) {
+ /* PHY Mode */
+ union cvmx_pcsx_sgmx_an_adv_reg pcsx_sgmx_an_adv_reg;
+ pcsx_sgmx_an_adv_reg.u64 =
+ cvmx_read_csr(CVMX_PCSX_SGMX_AN_ADV_REG
+ (index, interface));
+ pcsx_sgmx_an_adv_reg.s.link = 1;
+ pcsx_sgmx_an_adv_reg.s.dup = 1;
+ pcsx_sgmx_an_adv_reg.s.speed = 2;
+ cvmx_write_csr(CVMX_PCSX_SGMX_AN_ADV_REG
+ (index, interface),
+ pcsx_sgmx_an_adv_reg.u64);
+ } else {
+ /* MAC Mode - Nothing to do */
+ }
+ }
+ return 0;
+}
+
+/**
+ * Initialize the SERTES link for the first time or after a loss
+ * of link.
+ *
+ * @interface: Interface to init
+ * @index: Index of prot on the interface
+ *
+ * Returns Zero on success, negative on failure
+ */
+static int __cvmx_helper_sgmii_hardware_init_link(int interface, int index)
+{
+ union cvmx_pcsx_mrx_control_reg control_reg;
+
+ /*
+ * Take PCS through a reset sequence.
+ * PCS*_MR*_CONTROL_REG[PWR_DN] should be cleared to zero.
+ * Write PCS*_MR*_CONTROL_REG[RESET]=1 (while not changing the
+ * value of the other PCS*_MR*_CONTROL_REG bits). Read
+ * PCS*_MR*_CONTROL_REG[RESET] until it changes value to
+ * zero.
+ */
+ control_reg.u64 =
+ cvmx_read_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface));
+ if (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) {
+ control_reg.s.reset = 1;
+ cvmx_write_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface),
+ control_reg.u64);
+ if (CVMX_WAIT_FOR_FIELD64
+ (CVMX_PCSX_MRX_CONTROL_REG(index, interface),
+ union cvmx_pcsx_mrx_control_reg, reset, ==, 0, 10000)) {
+ cvmx_dprintf("SGMII%d: Timeout waiting for port %d "
+ "to finish reset\n",
+ interface, index);
+ return -1;
+ }
+ }
+
+ /*
+ * Write PCS*_MR*_CONTROL_REG[RST_AN]=1 to ensure a fresh
+ * sgmii negotiation starts.
+ */
+ control_reg.s.rst_an = 1;
+ control_reg.s.an_en = 1;
+ control_reg.s.pwr_dn = 0;
+ cvmx_write_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface),
+ control_reg.u64);
+
+ /*
+ * Wait for PCS*_MR*_STATUS_REG[AN_CPT] to be set, indicating
+ * that sgmii autonegotiation is complete. In MAC mode this
+ * isn't an ethernet link, but a link between Octeon and the
+ * PHY.
+ */
+ if ((cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) &&
+ CVMX_WAIT_FOR_FIELD64(CVMX_PCSX_MRX_STATUS_REG(index, interface),
+ union cvmx_pcsx_mrx_status_reg, an_cpt, ==, 1,
+ 10000)) {
+ /* cvmx_dprintf("SGMII%d: Port %d link timeout\n", interface, index); */
+ return -1;
+ }
+ return 0;
+}
+
+/**
+ * Configure an SGMII link to the specified speed after the SERTES
+ * link is up.
+ *
+ * @interface: Interface to init
+ * @index: Index of prot on the interface
+ * @link_info: Link state to configure
+ *
+ * Returns Zero on success, negative on failure
+ */
+static int __cvmx_helper_sgmii_hardware_init_link_speed(int interface,
+ int index,
+ cvmx_helper_link_info_t
+ link_info)
+{
+ int is_enabled;
+ union cvmx_gmxx_prtx_cfg gmxx_prtx_cfg;
+ union cvmx_pcsx_miscx_ctl_reg pcsx_miscx_ctl_reg;
+
+ /* Disable GMX before we make any changes. Remember the enable state */
+ gmxx_prtx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+ is_enabled = gmxx_prtx_cfg.s.en;
+ gmxx_prtx_cfg.s.en = 0;
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
+
+ /* Wait for GMX to be idle */
+ if (CVMX_WAIT_FOR_FIELD64
+ (CVMX_GMXX_PRTX_CFG(index, interface), union cvmx_gmxx_prtx_cfg,
+ rx_idle, ==, 1, 10000)
+ || CVMX_WAIT_FOR_FIELD64(CVMX_GMXX_PRTX_CFG(index, interface),
+ union cvmx_gmxx_prtx_cfg, tx_idle, ==, 1,
+ 10000)) {
+ cvmx_dprintf
+ ("SGMII%d: Timeout waiting for port %d to be idle\n",
+ interface, index);
+ return -1;
+ }
+
+ /* Read GMX CFG again to make sure the disable completed */
+ gmxx_prtx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+
+ /*
+ * Get the misc control for PCS. We will need to set the
+ * duplication amount.
+ */
+ pcsx_miscx_ctl_reg.u64 =
+ cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
+
+ /*
+ * Use GMXENO to force the link down if the status we get says
+ * it should be down.
+ */
+ pcsx_miscx_ctl_reg.s.gmxeno = !link_info.s.link_up;
+
+ /* Only change the duplex setting if the link is up */
+ if (link_info.s.link_up)
+ gmxx_prtx_cfg.s.duplex = link_info.s.full_duplex;
+
+ /* Do speed based setting for GMX */
+ switch (link_info.s.speed) {
+ case 10:
+ gmxx_prtx_cfg.s.speed = 0;
+ gmxx_prtx_cfg.s.speed_msb = 1;
+ gmxx_prtx_cfg.s.slottime = 0;
+ /* Setting from GMX-603 */
+ pcsx_miscx_ctl_reg.s.samp_pt = 25;
+ cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 64);
+ cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0);
+ break;
+ case 100:
+ gmxx_prtx_cfg.s.speed = 0;
+ gmxx_prtx_cfg.s.speed_msb = 0;
+ gmxx_prtx_cfg.s.slottime = 0;
+ pcsx_miscx_ctl_reg.s.samp_pt = 0x5;
+ cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 64);
+ cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0);
+ break;
+ case 1000:
+ gmxx_prtx_cfg.s.speed = 1;
+ gmxx_prtx_cfg.s.speed_msb = 0;
+ gmxx_prtx_cfg.s.slottime = 1;
+ pcsx_miscx_ctl_reg.s.samp_pt = 1;
+ cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 512);
+ cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 8192);
+ break;
+ default:
+ break;
+ }
+
+ /* Write the new misc control for PCS */
+ cvmx_write_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface),
+ pcsx_miscx_ctl_reg.u64);
+
+ /* Write the new GMX settings with the port still disabled */
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
+
+ /* Read GMX CFG again to make sure the config completed */
+ gmxx_prtx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+
+ /* Restore the enabled / disabled state */
+ gmxx_prtx_cfg.s.en = is_enabled;
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
+
+ return 0;
+}
+
+/**
+ * Bring up the SGMII interface to be ready for packet I/O but
+ * leave I/O disabled using the GMX override. This function
+ * follows the bringup documented in 10.6.3 of the manual.
+ *
+ * @interface: Interface to bringup
+ * @num_ports: Number of ports on the interface
+ *
+ * Returns Zero on success, negative on failure
+ */
+static int __cvmx_helper_sgmii_hardware_init(int interface, int num_ports)
+{
+ int index;
+
+ __cvmx_helper_setup_gmx(interface, num_ports);
+
+ for (index = 0; index < num_ports; index++) {
+ int ipd_port = cvmx_helper_get_ipd_port(interface, index);
+ __cvmx_helper_sgmii_hardware_init_one_time(interface, index);
+ __cvmx_helper_sgmii_link_set(ipd_port,
+ __cvmx_helper_sgmii_link_get
+ (ipd_port));
+
+ }
+
+ return 0;
+}
+
+/**
+ * Probe a SGMII interface and determine the number of ports
+ * connected to it. The SGMII interface should still be down after
+ * this call.
+ *
+ * @interface: Interface to probe
+ *
+ * Returns Number of ports on the interface. Zero to disable.
+ */
+int __cvmx_helper_sgmii_probe(int interface)
+{
+ union cvmx_gmxx_inf_mode mode;
+
+ /*
+ * Due to errata GMX-700 on CN56XXp1.x and CN52XXp1.x, the
+ * interface needs to be enabled before IPD otherwise per port
+ * backpressure may not work properly
+ */
+ mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
+ mode.s.en = 1;
+ cvmx_write_csr(CVMX_GMXX_INF_MODE(interface), mode.u64);
+ return 4;
+}
+
+/**
+ * Bringup and enable a SGMII interface. After this call packet
+ * I/O should be fully functional. This is called with IPD
+ * enabled but PKO disabled.
+ *
+ * @interface: Interface to bring up
+ *
+ * Returns Zero on success, negative on failure
+ */
+int __cvmx_helper_sgmii_enable(int interface)
+{
+ int num_ports = cvmx_helper_ports_on_interface(interface);
+ int index;
+
+ __cvmx_helper_sgmii_hardware_init(interface, num_ports);
+
+ for (index = 0; index < num_ports; index++) {
+ union cvmx_gmxx_prtx_cfg gmxx_prtx_cfg;
+ gmxx_prtx_cfg.u64 =
+ cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+ gmxx_prtx_cfg.s.en = 1;
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
+ gmxx_prtx_cfg.u64);
+ __cvmx_interrupt_pcsx_intx_en_reg_enable(index, interface);
+ }
+ __cvmx_interrupt_pcsxx_int_en_reg_enable(interface);
+ __cvmx_interrupt_gmxx_enable(interface);
+ return 0;
+}
+
+/**
+ * Return the link state of an IPD/PKO port as returned by
+ * auto negotiation. The result of this function may not match
+ * Octeon's link config if auto negotiation has changed since
+ * the last call to cvmx_helper_link_set().
+ *
+ * @ipd_port: IPD/PKO port to query
+ *
+ * Returns Link state
+ */
+cvmx_helper_link_info_t __cvmx_helper_sgmii_link_get(int ipd_port)
+{
+ cvmx_helper_link_info_t result;
+ union cvmx_pcsx_miscx_ctl_reg pcs_misc_ctl_reg;
+ int interface = cvmx_helper_get_interface_num(ipd_port);
+ int index = cvmx_helper_get_interface_index_num(ipd_port);
+ union cvmx_pcsx_mrx_control_reg pcsx_mrx_control_reg;
+
+ result.u64 = 0;
+
+ if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM) {
+ /* The simulator gives you a simulated 1Gbps full duplex link */
+ result.s.link_up = 1;
+ result.s.full_duplex = 1;
+ result.s.speed = 1000;
+ return result;
+ }
+
+ pcsx_mrx_control_reg.u64 =
+ cvmx_read_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface));
+ if (pcsx_mrx_control_reg.s.loopbck1) {
+ /* Force 1Gbps full duplex link for internal loopback */
+ result.s.link_up = 1;
+ result.s.full_duplex = 1;
+ result.s.speed = 1000;
+ return result;
+ }
+
+ pcs_misc_ctl_reg.u64 =
+ cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
+ if (pcs_misc_ctl_reg.s.mode) {
+ /* 1000BASE-X */
+ /* FIXME */
+ } else {
+ union cvmx_pcsx_miscx_ctl_reg pcsx_miscx_ctl_reg;
+ pcsx_miscx_ctl_reg.u64 =
+ cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
+ if (pcsx_miscx_ctl_reg.s.mac_phy) {
+ /* PHY Mode */
+ union cvmx_pcsx_mrx_status_reg pcsx_mrx_status_reg;
+ union cvmx_pcsx_anx_results_reg pcsx_anx_results_reg;
+
+ /*
+ * Don't bother continuing if the SERTES low
+ * level link is down
+ */
+ pcsx_mrx_status_reg.u64 =
+ cvmx_read_csr(CVMX_PCSX_MRX_STATUS_REG
+ (index, interface));
+ if (pcsx_mrx_status_reg.s.lnk_st == 0) {
+ if (__cvmx_helper_sgmii_hardware_init_link
+ (interface, index) != 0)
+ return result;
+ }
+
+ /* Read the autoneg results */
+ pcsx_anx_results_reg.u64 =
+ cvmx_read_csr(CVMX_PCSX_ANX_RESULTS_REG
+ (index, interface));
+ if (pcsx_anx_results_reg.s.an_cpt) {
+ /*
+ * Auto negotiation is complete. Set
+ * status accordingly.
+ */
+ result.s.full_duplex =
+ pcsx_anx_results_reg.s.dup;
+ result.s.link_up =
+ pcsx_anx_results_reg.s.link_ok;
+ switch (pcsx_anx_results_reg.s.spd) {
+ case 0:
+ result.s.speed = 10;
+ break;
+ case 1:
+ result.s.speed = 100;
+ break;
+ case 2:
+ result.s.speed = 1000;
+ break;
+ default:
+ result.s.speed = 0;
+ result.s.link_up = 0;
+ break;
+ }
+ } else {
+ /*
+ * Auto negotiation isn't
+ * complete. Return link down.
+ */
+ result.s.speed = 0;
+ result.s.link_up = 0;
+ }
+ } else { /* MAC Mode */
+
+ result = __cvmx_helper_board_link_get(ipd_port);
+ }
+ }
+ return result;
+}
+
+/**
+ * Configure an IPD/PKO port for the specified link state. This
+ * function does not influence auto negotiation at the PHY level.
+ * The passed link state must always match the link state returned
+ * by cvmx_helper_link_get(). It is normally best to use
+ * cvmx_helper_link_autoconf() instead.
+ *
+ * @ipd_port: IPD/PKO port to configure
+ * @link_info: The new link state
+ *
+ * Returns Zero on success, negative on failure
+ */
+int __cvmx_helper_sgmii_link_set(int ipd_port,
+ cvmx_helper_link_info_t link_info)
+{
+ int interface = cvmx_helper_get_interface_num(ipd_port);
+ int index = cvmx_helper_get_interface_index_num(ipd_port);
+ __cvmx_helper_sgmii_hardware_init_link(interface, index);
+ return __cvmx_helper_sgmii_hardware_init_link_speed(interface, index,
+ link_info);
+}
+
+/**
+ * Configure a port for internal and/or external loopback. Internal
+ * loopback causes packets sent by the port to be received by
+ * Octeon. External loopback causes packets received from the wire to
+ * sent out again.
+ *
+ * @ipd_port: IPD/PKO port to loopback.
+ * @enable_internal:
+ * Non zero if you want internal loopback
+ * @enable_external:
+ * Non zero if you want external loopback
+ *
+ * Returns Zero on success, negative on failure.
+ */
+int __cvmx_helper_sgmii_configure_loopback(int ipd_port, int enable_internal,
+ int enable_external)
+{
+ int interface = cvmx_helper_get_interface_num(ipd_port);
+ int index = cvmx_helper_get_interface_index_num(ipd_port);
+ union cvmx_pcsx_mrx_control_reg pcsx_mrx_control_reg;
+ union cvmx_pcsx_miscx_ctl_reg pcsx_miscx_ctl_reg;
+
+ pcsx_mrx_control_reg.u64 =
+ cvmx_read_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface));
+ pcsx_mrx_control_reg.s.loopbck1 = enable_internal;
+ cvmx_write_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface),
+ pcsx_mrx_control_reg.u64);
+
+ pcsx_miscx_ctl_reg.u64 =
+ cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
+ pcsx_miscx_ctl_reg.s.loopbck2 = enable_external;
+ cvmx_write_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface),
+ pcsx_miscx_ctl_reg.u64);
+
+ __cvmx_helper_sgmii_hardware_init_link(interface, index);
+ return 0;
+}
diff --git a/drivers/staging/octeon/cvmx-helper-sgmii.h b/drivers/staging/octeon/cvmx-helper-sgmii.h
new file mode 100644
index 000000000000..19b48d60857f
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-helper-sgmii.h
@@ -0,0 +1,104 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Functions for SGMII initialization, configuration,
+ * and monitoring.
+ *
+ */
+#ifndef __CVMX_HELPER_SGMII_H__
+#define __CVMX_HELPER_SGMII_H__
+
+/**
+ * Probe a SGMII interface and determine the number of ports
+ * connected to it. The SGMII interface should still be down after
+ * this call.
+ *
+ * @interface: Interface to probe
+ *
+ * Returns Number of ports on the interface. Zero to disable.
+ */
+extern int __cvmx_helper_sgmii_probe(int interface);
+
+/**
+ * Bringup and enable a SGMII interface. After this call packet
+ * I/O should be fully functional. This is called with IPD
+ * enabled but PKO disabled.
+ *
+ * @interface: Interface to bring up
+ *
+ * Returns Zero on success, negative on failure
+ */
+extern int __cvmx_helper_sgmii_enable(int interface);
+
+/**
+ * Return the link state of an IPD/PKO port as returned by
+ * auto negotiation. The result of this function may not match
+ * Octeon's link config if auto negotiation has changed since
+ * the last call to cvmx_helper_link_set().
+ *
+ * @ipd_port: IPD/PKO port to query
+ *
+ * Returns Link state
+ */
+extern cvmx_helper_link_info_t __cvmx_helper_sgmii_link_get(int ipd_port);
+
+/**
+ * Configure an IPD/PKO port for the specified link state. This
+ * function does not influence auto negotiation at the PHY level.
+ * The passed link state must always match the link state returned
+ * by cvmx_helper_link_get(). It is normally best to use
+ * cvmx_helper_link_autoconf() instead.
+ *
+ * @ipd_port: IPD/PKO port to configure
+ * @link_info: The new link state
+ *
+ * Returns Zero on success, negative on failure
+ */
+extern int __cvmx_helper_sgmii_link_set(int ipd_port,
+ cvmx_helper_link_info_t link_info);
+
+/**
+ * Configure a port for internal and/or external loopback. Internal loopback
+ * causes packets sent by the port to be received by Octeon. External loopback
+ * causes packets received from the wire to sent out again.
+ *
+ * @ipd_port: IPD/PKO port to loopback.
+ * @enable_internal:
+ * Non zero if you want internal loopback
+ * @enable_external:
+ * Non zero if you want external loopback
+ *
+ * Returns Zero on success, negative on failure.
+ */
+extern int __cvmx_helper_sgmii_configure_loopback(int ipd_port,
+ int enable_internal,
+ int enable_external);
+
+#endif
diff --git a/drivers/staging/octeon/cvmx-helper-spi.c b/drivers/staging/octeon/cvmx-helper-spi.c
new file mode 100644
index 000000000000..8ba6c832471e
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-helper-spi.c
@@ -0,0 +1,195 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+void __cvmx_interrupt_gmxx_enable(int interface);
+void __cvmx_interrupt_spxx_int_msk_enable(int index);
+void __cvmx_interrupt_stxx_int_msk_enable(int index);
+
+/*
+ * Functions for SPI initialization, configuration,
+ * and monitoring.
+ */
+#include <asm/octeon/octeon.h>
+
+#include "cvmx-config.h"
+#include "cvmx-spi.h"
+#include "cvmx-helper.h"
+
+#include "cvmx-pip-defs.h"
+#include "cvmx-pko-defs.h"
+
+/*
+ * CVMX_HELPER_SPI_TIMEOUT is used to determine how long the SPI
+ * initialization routines wait for SPI training. You can override the
+ * value using executive-config.h if necessary.
+ */
+#ifndef CVMX_HELPER_SPI_TIMEOUT
+#define CVMX_HELPER_SPI_TIMEOUT 10
+#endif
+
+/**
+ * Probe a SPI interface and determine the number of ports
+ * connected to it. The SPI interface should still be down after
+ * this call.
+ *
+ * @interface: Interface to probe
+ *
+ * Returns Number of ports on the interface. Zero to disable.
+ */
+int __cvmx_helper_spi_probe(int interface)
+{
+ int num_ports = 0;
+
+ if ((cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) &&
+ cvmx_spi4000_is_present(interface)) {
+ num_ports = 10;
+ } else {
+ union cvmx_pko_reg_crc_enable enable;
+ num_ports = 16;
+ /*
+ * Unlike the SPI4000, most SPI devices don't
+ * automatically put on the L2 CRC. For everything
+ * except for the SPI4000 have PKO append the L2 CRC
+ * to the packet.
+ */
+ enable.u64 = cvmx_read_csr(CVMX_PKO_REG_CRC_ENABLE);
+ enable.s.enable |= 0xffff << (interface * 16);
+ cvmx_write_csr(CVMX_PKO_REG_CRC_ENABLE, enable.u64);
+ }
+ __cvmx_helper_setup_gmx(interface, num_ports);
+ return num_ports;
+}
+
+/**
+ * Bringup and enable a SPI interface. After this call packet I/O
+ * should be fully functional. This is called with IPD enabled but
+ * PKO disabled.
+ *
+ * @interface: Interface to bring up
+ *
+ * Returns Zero on success, negative on failure
+ */
+int __cvmx_helper_spi_enable(int interface)
+{
+ /*
+ * Normally the ethernet L2 CRC is checked and stripped in the
+ * GMX block. When you are using SPI, this isn' the case and
+ * IPD needs to check the L2 CRC.
+ */
+ int num_ports = cvmx_helper_ports_on_interface(interface);
+ int ipd_port;
+ for (ipd_port = interface * 16; ipd_port < interface * 16 + num_ports;
+ ipd_port++) {
+ union cvmx_pip_prt_cfgx port_config;
+ port_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port));
+ port_config.s.crc_en = 1;
+ cvmx_write_csr(CVMX_PIP_PRT_CFGX(ipd_port), port_config.u64);
+ }
+
+ if (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) {
+ cvmx_spi_start_interface(interface, CVMX_SPI_MODE_DUPLEX,
+ CVMX_HELPER_SPI_TIMEOUT, num_ports);
+ if (cvmx_spi4000_is_present(interface))
+ cvmx_spi4000_initialize(interface);
+ }
+ __cvmx_interrupt_spxx_int_msk_enable(interface);
+ __cvmx_interrupt_stxx_int_msk_enable(interface);
+ __cvmx_interrupt_gmxx_enable(interface);
+ return 0;
+}
+
+/**
+ * Return the link state of an IPD/PKO port as returned by
+ * auto negotiation. The result of this function may not match
+ * Octeon's link config if auto negotiation has changed since
+ * the last call to cvmx_helper_link_set().
+ *
+ * @ipd_port: IPD/PKO port to query
+ *
+ * Returns Link state
+ */
+cvmx_helper_link_info_t __cvmx_helper_spi_link_get(int ipd_port)
+{
+ cvmx_helper_link_info_t result;
+ int interface = cvmx_helper_get_interface_num(ipd_port);
+ int index = cvmx_helper_get_interface_index_num(ipd_port);
+ result.u64 = 0;
+
+ if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM) {
+ /* The simulator gives you a simulated full duplex link */
+ result.s.link_up = 1;
+ result.s.full_duplex = 1;
+ result.s.speed = 10000;
+ } else if (cvmx_spi4000_is_present(interface)) {
+ union cvmx_gmxx_rxx_rx_inbnd inband =
+ cvmx_spi4000_check_speed(interface, index);
+ result.s.link_up = inband.s.status;
+ result.s.full_duplex = inband.s.duplex;
+ switch (inband.s.speed) {
+ case 0: /* 10 Mbps */
+ result.s.speed = 10;
+ break;
+ case 1: /* 100 Mbps */
+ result.s.speed = 100;
+ break;
+ case 2: /* 1 Gbps */
+ result.s.speed = 1000;
+ break;
+ case 3: /* Illegal */
+ result.s.speed = 0;
+ result.s.link_up = 0;
+ break;
+ }
+ } else {
+ /* For generic SPI we can't determine the link, just return some
+ sane results */
+ result.s.link_up = 1;
+ result.s.full_duplex = 1;
+ result.s.speed = 10000;
+ }
+ return result;
+}
+
+/**
+ * Configure an IPD/PKO port for the specified link state. This
+ * function does not influence auto negotiation at the PHY level.
+ * The passed link state must always match the link state returned
+ * by cvmx_helper_link_get(). It is normally best to use
+ * cvmx_helper_link_autoconf() instead.
+ *
+ * @ipd_port: IPD/PKO port to configure
+ * @link_info: The new link state
+ *
+ * Returns Zero on success, negative on failure
+ */
+int __cvmx_helper_spi_link_set(int ipd_port, cvmx_helper_link_info_t link_info)
+{
+ /* Nothing to do. If we have a SPI4000 then the setup was already performed
+ by cvmx_spi4000_check_speed(). If not then there isn't any link
+ info */
+ return 0;
+}
diff --git a/drivers/staging/octeon/cvmx-helper-spi.h b/drivers/staging/octeon/cvmx-helper-spi.h
new file mode 100644
index 000000000000..69bac036d10e
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-helper-spi.h
@@ -0,0 +1,84 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * Functions for SPI initialization, configuration,
+ * and monitoring.
+ */
+#ifndef __CVMX_HELPER_SPI_H__
+#define __CVMX_HELPER_SPI_H__
+
+/**
+ * Probe a SPI interface and determine the number of ports
+ * connected to it. The SPI interface should still be down after
+ * this call.
+ *
+ * @interface: Interface to probe
+ *
+ * Returns Number of ports on the interface. Zero to disable.
+ */
+extern int __cvmx_helper_spi_probe(int interface);
+
+/**
+ * Bringup and enable a SPI interface. After this call packet I/O
+ * should be fully functional. This is called with IPD enabled but
+ * PKO disabled.
+ *
+ * @interface: Interface to bring up
+ *
+ * Returns Zero on success, negative on failure
+ */
+extern int __cvmx_helper_spi_enable(int interface);
+
+/**
+ * Return the link state of an IPD/PKO port as returned by
+ * auto negotiation. The result of this function may not match
+ * Octeon's link config if auto negotiation has changed since
+ * the last call to cvmx_helper_link_set().
+ *
+ * @ipd_port: IPD/PKO port to query
+ *
+ * Returns Link state
+ */
+extern cvmx_helper_link_info_t __cvmx_helper_spi_link_get(int ipd_port);
+
+/**
+ * Configure an IPD/PKO port for the specified link state. This
+ * function does not influence auto negotiation at the PHY level.
+ * The passed link state must always match the link state returned
+ * by cvmx_helper_link_get(). It is normally best to use
+ * cvmx_helper_link_autoconf() instead.
+ *
+ * @ipd_port: IPD/PKO port to configure
+ * @link_info: The new link state
+ *
+ * Returns Zero on success, negative on failure
+ */
+extern int __cvmx_helper_spi_link_set(int ipd_port,
+ cvmx_helper_link_info_t link_info);
+
+#endif
diff --git a/drivers/staging/octeon/cvmx-helper-util.c b/drivers/staging/octeon/cvmx-helper-util.c
new file mode 100644
index 000000000000..41ef8a40bb03
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-helper-util.c
@@ -0,0 +1,433 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * Small helper utilities.
+ */
+#include <linux/kernel.h>
+
+#include <asm/octeon/octeon.h>
+
+#include "cvmx-config.h"
+
+#include "cvmx-fpa.h"
+#include "cvmx-pip.h"
+#include "cvmx-pko.h"
+#include "cvmx-ipd.h"
+#include "cvmx-spi.h"
+
+#include "cvmx-helper.h"
+#include "cvmx-helper-util.h"
+
+#include <asm/octeon/cvmx-ipd-defs.h>
+
+/**
+ * Convert a interface mode into a human readable string
+ *
+ * @mode: Mode to convert
+ *
+ * Returns String
+ */
+const char *cvmx_helper_interface_mode_to_string(cvmx_helper_interface_mode_t
+ mode)
+{
+ switch (mode) {
+ case CVMX_HELPER_INTERFACE_MODE_DISABLED:
+ return "DISABLED";
+ case CVMX_HELPER_INTERFACE_MODE_RGMII:
+ return "RGMII";
+ case CVMX_HELPER_INTERFACE_MODE_GMII:
+ return "GMII";
+ case CVMX_HELPER_INTERFACE_MODE_SPI:
+ return "SPI";
+ case CVMX_HELPER_INTERFACE_MODE_PCIE:
+ return "PCIE";
+ case CVMX_HELPER_INTERFACE_MODE_XAUI:
+ return "XAUI";
+ case CVMX_HELPER_INTERFACE_MODE_SGMII:
+ return "SGMII";
+ case CVMX_HELPER_INTERFACE_MODE_PICMG:
+ return "PICMG";
+ case CVMX_HELPER_INTERFACE_MODE_NPI:
+ return "NPI";
+ case CVMX_HELPER_INTERFACE_MODE_LOOP:
+ return "LOOP";
+ }
+ return "UNKNOWN";
+}
+
+/**
+ * Debug routine to dump the packet structure to the console
+ *
+ * @work: Work queue entry containing the packet to dump
+ * Returns
+ */
+int cvmx_helper_dump_packet(cvmx_wqe_t *work)
+{
+ uint64_t count;
+ uint64_t remaining_bytes;
+ union cvmx_buf_ptr buffer_ptr;
+ uint64_t start_of_buffer;
+ uint8_t *data_address;
+ uint8_t *end_of_data;
+
+ cvmx_dprintf("Packet Length: %u\n", work->len);
+ cvmx_dprintf(" Input Port: %u\n", work->ipprt);
+ cvmx_dprintf(" QoS: %u\n", work->qos);
+ cvmx_dprintf(" Buffers: %u\n", work->word2.s.bufs);
+
+ if (work->word2.s.bufs == 0) {
+ union cvmx_ipd_wqe_fpa_queue wqe_pool;
+ wqe_pool.u64 = cvmx_read_csr(CVMX_IPD_WQE_FPA_QUEUE);
+ buffer_ptr.u64 = 0;
+ buffer_ptr.s.pool = wqe_pool.s.wqe_pool;
+ buffer_ptr.s.size = 128;
+ buffer_ptr.s.addr = cvmx_ptr_to_phys(work->packet_data);
+ if (likely(!work->word2.s.not_IP)) {
+ union cvmx_pip_ip_offset pip_ip_offset;
+ pip_ip_offset.u64 = cvmx_read_csr(CVMX_PIP_IP_OFFSET);
+ buffer_ptr.s.addr +=
+ (pip_ip_offset.s.offset << 3) -
+ work->word2.s.ip_offset;
+ buffer_ptr.s.addr += (work->word2.s.is_v6 ^ 1) << 2;
+ } else {
+ /*
+ * WARNING: This code assumes that the packet
+ * is not RAW. If it was, we would use
+ * PIP_GBL_CFG[RAW_SHF] instead of
+ * PIP_GBL_CFG[NIP_SHF].
+ */
+ union cvmx_pip_gbl_cfg pip_gbl_cfg;
+ pip_gbl_cfg.u64 = cvmx_read_csr(CVMX_PIP_GBL_CFG);
+ buffer_ptr.s.addr += pip_gbl_cfg.s.nip_shf;
+ }
+ } else
+ buffer_ptr = work->packet_ptr;
+ remaining_bytes = work->len;
+
+ while (remaining_bytes) {
+ start_of_buffer =
+ ((buffer_ptr.s.addr >> 7) - buffer_ptr.s.back) << 7;
+ cvmx_dprintf(" Buffer Start:%llx\n",
+ (unsigned long long)start_of_buffer);
+ cvmx_dprintf(" Buffer I : %u\n", buffer_ptr.s.i);
+ cvmx_dprintf(" Buffer Back: %u\n", buffer_ptr.s.back);
+ cvmx_dprintf(" Buffer Pool: %u\n", buffer_ptr.s.pool);
+ cvmx_dprintf(" Buffer Data: %llx\n",
+ (unsigned long long)buffer_ptr.s.addr);
+ cvmx_dprintf(" Buffer Size: %u\n", buffer_ptr.s.size);
+
+ cvmx_dprintf("\t\t");
+ data_address = (uint8_t *) cvmx_phys_to_ptr(buffer_ptr.s.addr);
+ end_of_data = data_address + buffer_ptr.s.size;
+ count = 0;
+ while (data_address < end_of_data) {
+ if (remaining_bytes == 0)
+ break;
+ else
+ remaining_bytes--;
+ cvmx_dprintf("%02x", (unsigned int)*data_address);
+ data_address++;
+ if (remaining_bytes && (count == 7)) {
+ cvmx_dprintf("\n\t\t");
+ count = 0;
+ } else
+ count++;
+ }
+ cvmx_dprintf("\n");
+
+ if (remaining_bytes)
+ buffer_ptr = *(union cvmx_buf_ptr *)
+ cvmx_phys_to_ptr(buffer_ptr.s.addr - 8);
+ }
+ return 0;
+}
+
+/**
+ * Setup Random Early Drop on a specific input queue
+ *
+ * @queue: Input queue to setup RED on (0-7)
+ * @pass_thresh:
+ * Packets will begin slowly dropping when there are less than
+ * this many packet buffers free in FPA 0.
+ * @drop_thresh:
+ * All incomming packets will be dropped when there are less
+ * than this many free packet buffers in FPA 0.
+ * Returns Zero on success. Negative on failure
+ */
+int cvmx_helper_setup_red_queue(int queue, int pass_thresh, int drop_thresh)
+{
+ union cvmx_ipd_qosx_red_marks red_marks;
+ union cvmx_ipd_red_quex_param red_param;
+
+ /* Set RED to begin dropping packets when there are pass_thresh buffers
+ left. It will linearly drop more packets until reaching drop_thresh
+ buffers */
+ red_marks.u64 = 0;
+ red_marks.s.drop = drop_thresh;
+ red_marks.s.pass = pass_thresh;
+ cvmx_write_csr(CVMX_IPD_QOSX_RED_MARKS(queue), red_marks.u64);
+
+ /* Use the actual queue 0 counter, not the average */
+ red_param.u64 = 0;
+ red_param.s.prb_con =
+ (255ul << 24) / (red_marks.s.pass - red_marks.s.drop);
+ red_param.s.avg_con = 1;
+ red_param.s.new_con = 255;
+ red_param.s.use_pcnt = 1;
+ cvmx_write_csr(CVMX_IPD_RED_QUEX_PARAM(queue), red_param.u64);
+ return 0;
+}
+
+/**
+ * Setup Random Early Drop to automatically begin dropping packets.
+ *
+ * @pass_thresh:
+ * Packets will begin slowly dropping when there are less than
+ * this many packet buffers free in FPA 0.
+ * @drop_thresh:
+ * All incomming packets will be dropped when there are less
+ * than this many free packet buffers in FPA 0.
+ * Returns Zero on success. Negative on failure
+ */
+int cvmx_helper_setup_red(int pass_thresh, int drop_thresh)
+{
+ union cvmx_ipd_portx_bp_page_cnt page_cnt;
+ union cvmx_ipd_bp_prt_red_end ipd_bp_prt_red_end;
+ union cvmx_ipd_red_port_enable red_port_enable;
+ int queue;
+ int interface;
+ int port;
+
+ /* Disable backpressure based on queued buffers. It needs SW support */
+ page_cnt.u64 = 0;
+ page_cnt.s.bp_enb = 0;
+ page_cnt.s.page_cnt = 100;
+ for (interface = 0; interface < 2; interface++) {
+ for (port = cvmx_helper_get_first_ipd_port(interface);
+ port < cvmx_helper_get_last_ipd_port(interface); port++)
+ cvmx_write_csr(CVMX_IPD_PORTX_BP_PAGE_CNT(port),
+ page_cnt.u64);
+ }
+
+ for (queue = 0; queue < 8; queue++)
+ cvmx_helper_setup_red_queue(queue, pass_thresh, drop_thresh);
+
+ /* Shutoff the dropping based on the per port page count. SW isn't
+ decrementing it right now */
+ ipd_bp_prt_red_end.u64 = 0;
+ ipd_bp_prt_red_end.s.prt_enb = 0;
+ cvmx_write_csr(CVMX_IPD_BP_PRT_RED_END, ipd_bp_prt_red_end.u64);
+
+ red_port_enable.u64 = 0;
+ red_port_enable.s.prt_enb = 0xfffffffffull;
+ red_port_enable.s.avg_dly = 10000;
+ red_port_enable.s.prb_dly = 10000;
+ cvmx_write_csr(CVMX_IPD_RED_PORT_ENABLE, red_port_enable.u64);
+
+ return 0;
+}
+
+/**
+ * Setup the common GMX settings that determine the number of
+ * ports. These setting apply to almost all configurations of all
+ * chips.
+ *
+ * @interface: Interface to configure
+ * @num_ports: Number of ports on the interface
+ *
+ * Returns Zero on success, negative on failure
+ */
+int __cvmx_helper_setup_gmx(int interface, int num_ports)
+{
+ union cvmx_gmxx_tx_prts gmx_tx_prts;
+ union cvmx_gmxx_rx_prts gmx_rx_prts;
+ union cvmx_pko_reg_gmx_port_mode pko_mode;
+ union cvmx_gmxx_txx_thresh gmx_tx_thresh;
+ int index;
+
+ /* Tell GMX the number of TX ports on this interface */
+ gmx_tx_prts.u64 = cvmx_read_csr(CVMX_GMXX_TX_PRTS(interface));
+ gmx_tx_prts.s.prts = num_ports;
+ cvmx_write_csr(CVMX_GMXX_TX_PRTS(interface), gmx_tx_prts.u64);
+
+ /* Tell GMX the number of RX ports on this interface. This only
+ ** applies to *GMII and XAUI ports */
+ if (cvmx_helper_interface_get_mode(interface) ==
+ CVMX_HELPER_INTERFACE_MODE_RGMII
+ || cvmx_helper_interface_get_mode(interface) ==
+ CVMX_HELPER_INTERFACE_MODE_SGMII
+ || cvmx_helper_interface_get_mode(interface) ==
+ CVMX_HELPER_INTERFACE_MODE_GMII
+ || cvmx_helper_interface_get_mode(interface) ==
+ CVMX_HELPER_INTERFACE_MODE_XAUI) {
+ if (num_ports > 4) {
+ cvmx_dprintf("__cvmx_helper_setup_gmx: Illegal "
+ "num_ports\n");
+ return -1;
+ }
+
+ gmx_rx_prts.u64 = cvmx_read_csr(CVMX_GMXX_RX_PRTS(interface));
+ gmx_rx_prts.s.prts = num_ports;
+ cvmx_write_csr(CVMX_GMXX_RX_PRTS(interface), gmx_rx_prts.u64);
+ }
+
+ /* Skip setting CVMX_PKO_REG_GMX_PORT_MODE on 30XX, 31XX, and 50XX */
+ if (!OCTEON_IS_MODEL(OCTEON_CN30XX) && !OCTEON_IS_MODEL(OCTEON_CN31XX)
+ && !OCTEON_IS_MODEL(OCTEON_CN50XX)) {
+ /* Tell PKO the number of ports on this interface */
+ pko_mode.u64 = cvmx_read_csr(CVMX_PKO_REG_GMX_PORT_MODE);
+ if (interface == 0) {
+ if (num_ports == 1)
+ pko_mode.s.mode0 = 4;
+ else if (num_ports == 2)
+ pko_mode.s.mode0 = 3;
+ else if (num_ports <= 4)
+ pko_mode.s.mode0 = 2;
+ else if (num_ports <= 8)
+ pko_mode.s.mode0 = 1;
+ else
+ pko_mode.s.mode0 = 0;
+ } else {
+ if (num_ports == 1)
+ pko_mode.s.mode1 = 4;
+ else if (num_ports == 2)
+ pko_mode.s.mode1 = 3;
+ else if (num_ports <= 4)
+ pko_mode.s.mode1 = 2;
+ else if (num_ports <= 8)
+ pko_mode.s.mode1 = 1;
+ else
+ pko_mode.s.mode1 = 0;
+ }
+ cvmx_write_csr(CVMX_PKO_REG_GMX_PORT_MODE, pko_mode.u64);
+ }
+
+ /*
+ * Set GMX to buffer as much data as possible before starting
+ * transmit. This reduces the chances that we have a TX under
+ * run due to memory contention. Any packet that fits entirely
+ * in the GMX FIFO can never have an under run regardless of
+ * memory load.
+ */
+ gmx_tx_thresh.u64 = cvmx_read_csr(CVMX_GMXX_TXX_THRESH(0, interface));
+ if (OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN31XX)
+ || OCTEON_IS_MODEL(OCTEON_CN50XX)) {
+ /* These chips have a fixed max threshold of 0x40 */
+ gmx_tx_thresh.s.cnt = 0x40;
+ } else {
+ /* Choose the max value for the number of ports */
+ if (num_ports <= 1)
+ gmx_tx_thresh.s.cnt = 0x100 / 1;
+ else if (num_ports == 2)
+ gmx_tx_thresh.s.cnt = 0x100 / 2;
+ else
+ gmx_tx_thresh.s.cnt = 0x100 / 4;
+ }
+ /*
+ * SPI and XAUI can have lots of ports but the GMX hardware
+ * only ever has a max of 4.
+ */
+ if (num_ports > 4)
+ num_ports = 4;
+ for (index = 0; index < num_ports; index++)
+ cvmx_write_csr(CVMX_GMXX_TXX_THRESH(index, interface),
+ gmx_tx_thresh.u64);
+
+ return 0;
+}
+
+/**
+ * Returns the IPD/PKO port number for a port on teh given
+ * interface.
+ *
+ * @interface: Interface to use
+ * @port: Port on the interface
+ *
+ * Returns IPD/PKO port number
+ */
+int cvmx_helper_get_ipd_port(int interface, int port)
+{
+ switch (interface) {
+ case 0:
+ return port;
+ case 1:
+ return port + 16;
+ case 2:
+ return port + 32;
+ case 3:
+ return port + 36;
+ }
+ return -1;
+}
+
+/**
+ * Returns the interface number for an IPD/PKO port number.
+ *
+ * @ipd_port: IPD/PKO port number
+ *
+ * Returns Interface number
+ */
+int cvmx_helper_get_interface_num(int ipd_port)
+{
+ if (ipd_port < 16)
+ return 0;
+ else if (ipd_port < 32)
+ return 1;
+ else if (ipd_port < 36)
+ return 2;
+ else if (ipd_port < 40)
+ return 3;
+ else
+ cvmx_dprintf("cvmx_helper_get_interface_num: Illegal IPD "
+ "port number\n");
+
+ return -1;
+}
+
+/**
+ * Returns the interface index number for an IPD/PKO port
+ * number.
+ *
+ * @ipd_port: IPD/PKO port number
+ *
+ * Returns Interface index number
+ */
+int cvmx_helper_get_interface_index_num(int ipd_port)
+{
+ if (ipd_port < 32)
+ return ipd_port & 15;
+ else if (ipd_port < 36)
+ return ipd_port & 3;
+ else if (ipd_port < 40)
+ return ipd_port & 3;
+ else
+ cvmx_dprintf("cvmx_helper_get_interface_index_num: "
+ "Illegal IPD port number\n");
+
+ return -1;
+}
diff --git a/drivers/staging/octeon/cvmx-helper-util.h b/drivers/staging/octeon/cvmx-helper-util.h
new file mode 100644
index 000000000000..6a6e52fc22c1
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-helper-util.h
@@ -0,0 +1,215 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ *
+ * Small helper utilities.
+ *
+ */
+
+#ifndef __CVMX_HELPER_UTIL_H__
+#define __CVMX_HELPER_UTIL_H__
+
+/**
+ * Convert a interface mode into a human readable string
+ *
+ * @mode: Mode to convert
+ *
+ * Returns String
+ */
+extern const char
+ *cvmx_helper_interface_mode_to_string(cvmx_helper_interface_mode_t mode);
+
+/**
+ * Debug routine to dump the packet structure to the console
+ *
+ * @work: Work queue entry containing the packet to dump
+ * Returns
+ */
+extern int cvmx_helper_dump_packet(cvmx_wqe_t *work);
+
+/**
+ * Setup Random Early Drop on a specific input queue
+ *
+ * @queue: Input queue to setup RED on (0-7)
+ * @pass_thresh:
+ * Packets will begin slowly dropping when there are less than
+ * this many packet buffers free in FPA 0.
+ * @drop_thresh:
+ * All incomming packets will be dropped when there are less
+ * than this many free packet buffers in FPA 0.
+ * Returns Zero on success. Negative on failure
+ */
+extern int cvmx_helper_setup_red_queue(int queue, int pass_thresh,
+ int drop_thresh);
+
+/**
+ * Setup Random Early Drop to automatically begin dropping packets.
+ *
+ * @pass_thresh:
+ * Packets will begin slowly dropping when there are less than
+ * this many packet buffers free in FPA 0.
+ * @drop_thresh:
+ * All incomming packets will be dropped when there are less
+ * than this many free packet buffers in FPA 0.
+ * Returns Zero on success. Negative on failure
+ */
+extern int cvmx_helper_setup_red(int pass_thresh, int drop_thresh);
+
+/**
+ * Get the version of the CVMX libraries.
+ *
+ * Returns Version string. Note this buffer is allocated statically
+ * and will be shared by all callers.
+ */
+extern const char *cvmx_helper_get_version(void);
+
+/**
+ * Setup the common GMX settings that determine the number of
+ * ports. These setting apply to almost all configurations of all
+ * chips.
+ *
+ * @interface: Interface to configure
+ * @num_ports: Number of ports on the interface
+ *
+ * Returns Zero on success, negative on failure
+ */
+extern int __cvmx_helper_setup_gmx(int interface, int num_ports);
+
+/**
+ * Returns the IPD/PKO port number for a port on the given
+ * interface.
+ *
+ * @interface: Interface to use
+ * @port: Port on the interface
+ *
+ * Returns IPD/PKO port number
+ */
+extern int cvmx_helper_get_ipd_port(int interface, int port);
+
+/**
+ * Returns the IPD/PKO port number for the first port on the given
+ * interface.
+ *
+ * @interface: Interface to use
+ *
+ * Returns IPD/PKO port number
+ */
+static inline int cvmx_helper_get_first_ipd_port(int interface)
+{
+ return cvmx_helper_get_ipd_port(interface, 0);
+}
+
+/**
+ * Returns the IPD/PKO port number for the last port on the given
+ * interface.
+ *
+ * @interface: Interface to use
+ *
+ * Returns IPD/PKO port number
+ */
+static inline int cvmx_helper_get_last_ipd_port(int interface)
+{
+ extern int cvmx_helper_ports_on_interface(int interface);
+
+ return cvmx_helper_get_first_ipd_port(interface) +
+ cvmx_helper_ports_on_interface(interface) - 1;
+}
+
+/**
+ * Free the packet buffers contained in a work queue entry.
+ * The work queue entry is not freed.
+ *
+ * @work: Work queue entry with packet to free
+ */
+static inline void cvmx_helper_free_packet_data(cvmx_wqe_t *work)
+{
+ uint64_t number_buffers;
+ union cvmx_buf_ptr buffer_ptr;
+ union cvmx_buf_ptr next_buffer_ptr;
+ uint64_t start_of_buffer;
+
+ number_buffers = work->word2.s.bufs;
+ if (number_buffers == 0)
+ return;
+ buffer_ptr = work->packet_ptr;
+
+ /*
+ * Since the number of buffers is not zero, we know this is
+ * not a dynamic short packet. We need to check if it is a
+ * packet received with IPD_CTL_STATUS[NO_WPTR]. If this is
+ * true, we need to free all buffers except for the first
+ * one. The caller doesn't expect their WQE pointer to be
+ * freed
+ */
+ start_of_buffer = ((buffer_ptr.s.addr >> 7) - buffer_ptr.s.back) << 7;
+ if (cvmx_ptr_to_phys(work) == start_of_buffer) {
+ next_buffer_ptr =
+ *(union cvmx_buf_ptr *) cvmx_phys_to_ptr(buffer_ptr.s.addr - 8);
+ buffer_ptr = next_buffer_ptr;
+ number_buffers--;
+ }
+
+ while (number_buffers--) {
+ /*
+ * Remember the back pointer is in cache lines, not
+ * 64bit words
+ */
+ start_of_buffer =
+ ((buffer_ptr.s.addr >> 7) - buffer_ptr.s.back) << 7;
+ /*
+ * Read pointer to next buffer before we free the
+ * current buffer.
+ */
+ next_buffer_ptr =
+ *(union cvmx_buf_ptr *) cvmx_phys_to_ptr(buffer_ptr.s.addr - 8);
+ cvmx_fpa_free(cvmx_phys_to_ptr(start_of_buffer),
+ buffer_ptr.s.pool, 0);
+ buffer_ptr = next_buffer_ptr;
+ }
+}
+
+/**
+ * Returns the interface number for an IPD/PKO port number.
+ *
+ * @ipd_port: IPD/PKO port number
+ *
+ * Returns Interface number
+ */
+extern int cvmx_helper_get_interface_num(int ipd_port);
+
+/**
+ * Returns the interface index number for an IPD/PKO port
+ * number.
+ *
+ * @ipd_port: IPD/PKO port number
+ *
+ * Returns Interface index number
+ */
+extern int cvmx_helper_get_interface_index_num(int ipd_port);
+
+#endif /* __CVMX_HELPER_H__ */
diff --git a/drivers/staging/octeon/cvmx-helper-xaui.c b/drivers/staging/octeon/cvmx-helper-xaui.c
new file mode 100644
index 000000000000..a11e6769e234
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-helper-xaui.c
@@ -0,0 +1,348 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * Functions for XAUI initialization, configuration,
+ * and monitoring.
+ *
+ */
+
+#include <asm/octeon/octeon.h>
+
+#include "cvmx-config.h"
+
+#include "cvmx-helper.h"
+
+#include "cvmx-pko-defs.h"
+#include "cvmx-gmxx-defs.h"
+#include "cvmx-pcsxx-defs.h"
+
+void __cvmx_interrupt_gmxx_enable(int interface);
+void __cvmx_interrupt_pcsx_intx_en_reg_enable(int index, int block);
+void __cvmx_interrupt_pcsxx_int_en_reg_enable(int index);
+/**
+ * Probe a XAUI interface and determine the number of ports
+ * connected to it. The XAUI interface should still be down
+ * after this call.
+ *
+ * @interface: Interface to probe
+ *
+ * Returns Number of ports on the interface. Zero to disable.
+ */
+int __cvmx_helper_xaui_probe(int interface)
+{
+ int i;
+ union cvmx_gmxx_hg2_control gmx_hg2_control;
+ union cvmx_gmxx_inf_mode mode;
+
+ /*
+ * Due to errata GMX-700 on CN56XXp1.x and CN52XXp1.x, the
+ * interface needs to be enabled before IPD otherwise per port
+ * backpressure may not work properly.
+ */
+ mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
+ mode.s.en = 1;
+ cvmx_write_csr(CVMX_GMXX_INF_MODE(interface), mode.u64);
+
+ __cvmx_helper_setup_gmx(interface, 1);
+
+ /*
+ * Setup PKO to support 16 ports for HiGig2 virtual
+ * ports. We're pointing all of the PKO packet ports for this
+ * interface to the XAUI. This allows us to use HiGig2
+ * backpressure per port.
+ */
+ for (i = 0; i < 16; i++) {
+ union cvmx_pko_mem_port_ptrs pko_mem_port_ptrs;
+ pko_mem_port_ptrs.u64 = 0;
+ /*
+ * We set each PKO port to have equal priority in a
+ * round robin fashion.
+ */
+ pko_mem_port_ptrs.s.static_p = 0;
+ pko_mem_port_ptrs.s.qos_mask = 0xff;
+ /* All PKO ports map to the same XAUI hardware port */
+ pko_mem_port_ptrs.s.eid = interface * 4;
+ pko_mem_port_ptrs.s.pid = interface * 16 + i;
+ cvmx_write_csr(CVMX_PKO_MEM_PORT_PTRS, pko_mem_port_ptrs.u64);
+ }
+
+ /* If HiGig2 is enabled return 16 ports, otherwise return 1 port */
+ gmx_hg2_control.u64 = cvmx_read_csr(CVMX_GMXX_HG2_CONTROL(interface));
+ if (gmx_hg2_control.s.hg2tx_en)
+ return 16;
+ else
+ return 1;
+}
+
+/**
+ * Bringup and enable a XAUI interface. After this call packet
+ * I/O should be fully functional. This is called with IPD
+ * enabled but PKO disabled.
+ *
+ * @interface: Interface to bring up
+ *
+ * Returns Zero on success, negative on failure
+ */
+int __cvmx_helper_xaui_enable(int interface)
+{
+ union cvmx_gmxx_prtx_cfg gmx_cfg;
+ union cvmx_pcsxx_control1_reg xauiCtl;
+ union cvmx_pcsxx_misc_ctl_reg xauiMiscCtl;
+ union cvmx_gmxx_tx_xaui_ctl gmxXauiTxCtl;
+ union cvmx_gmxx_rxx_int_en gmx_rx_int_en;
+ union cvmx_gmxx_tx_int_en gmx_tx_int_en;
+ union cvmx_pcsxx_int_en_reg pcsx_int_en_reg;
+
+ /* (1) Interface has already been enabled. */
+
+ /* (2) Disable GMX. */
+ xauiMiscCtl.u64 = cvmx_read_csr(CVMX_PCSXX_MISC_CTL_REG(interface));
+ xauiMiscCtl.s.gmxeno = 1;
+ cvmx_write_csr(CVMX_PCSXX_MISC_CTL_REG(interface), xauiMiscCtl.u64);
+
+ /* (3) Disable GMX and PCSX interrupts. */
+ gmx_rx_int_en.u64 = cvmx_read_csr(CVMX_GMXX_RXX_INT_EN(0, interface));
+ cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(0, interface), 0x0);
+ gmx_tx_int_en.u64 = cvmx_read_csr(CVMX_GMXX_TX_INT_EN(interface));
+ cvmx_write_csr(CVMX_GMXX_TX_INT_EN(interface), 0x0);
+ pcsx_int_en_reg.u64 = cvmx_read_csr(CVMX_PCSXX_INT_EN_REG(interface));
+ cvmx_write_csr(CVMX_PCSXX_INT_EN_REG(interface), 0x0);
+
+ /* (4) Bring up the PCSX and GMX reconciliation layer. */
+ /* (4)a Set polarity and lane swapping. */
+ /* (4)b */
+ gmxXauiTxCtl.u64 = cvmx_read_csr(CVMX_GMXX_TX_XAUI_CTL(interface));
+ /* Enable better IFG packing and improves performance */
+ gmxXauiTxCtl.s.dic_en = 1;
+ gmxXauiTxCtl.s.uni_en = 0;
+ cvmx_write_csr(CVMX_GMXX_TX_XAUI_CTL(interface), gmxXauiTxCtl.u64);
+
+ /* (4)c Aply reset sequence */
+ xauiCtl.u64 = cvmx_read_csr(CVMX_PCSXX_CONTROL1_REG(interface));
+ xauiCtl.s.lo_pwr = 0;
+ xauiCtl.s.reset = 1;
+ cvmx_write_csr(CVMX_PCSXX_CONTROL1_REG(interface), xauiCtl.u64);
+
+ /* Wait for PCS to come out of reset */
+ if (CVMX_WAIT_FOR_FIELD64
+ (CVMX_PCSXX_CONTROL1_REG(interface), union cvmx_pcsxx_control1_reg,
+ reset, ==, 0, 10000))
+ return -1;
+ /* Wait for PCS to be aligned */
+ if (CVMX_WAIT_FOR_FIELD64
+ (CVMX_PCSXX_10GBX_STATUS_REG(interface),
+ union cvmx_pcsxx_10gbx_status_reg, alignd, ==, 1, 10000))
+ return -1;
+ /* Wait for RX to be ready */
+ if (CVMX_WAIT_FOR_FIELD64
+ (CVMX_GMXX_RX_XAUI_CTL(interface), union cvmx_gmxx_rx_xaui_ctl,
+ status, ==, 0, 10000))
+ return -1;
+
+ /* (6) Configure GMX */
+ gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(0, interface));
+ gmx_cfg.s.en = 0;
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(0, interface), gmx_cfg.u64);
+
+ /* Wait for GMX RX to be idle */
+ if (CVMX_WAIT_FOR_FIELD64
+ (CVMX_GMXX_PRTX_CFG(0, interface), union cvmx_gmxx_prtx_cfg,
+ rx_idle, ==, 1, 10000))
+ return -1;
+ /* Wait for GMX TX to be idle */
+ if (CVMX_WAIT_FOR_FIELD64
+ (CVMX_GMXX_PRTX_CFG(0, interface), union cvmx_gmxx_prtx_cfg,
+ tx_idle, ==, 1, 10000))
+ return -1;
+
+ /* GMX configure */
+ gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(0, interface));
+ gmx_cfg.s.speed = 1;
+ gmx_cfg.s.speed_msb = 0;
+ gmx_cfg.s.slottime = 1;
+ cvmx_write_csr(CVMX_GMXX_TX_PRTS(interface), 1);
+ cvmx_write_csr(CVMX_GMXX_TXX_SLOT(0, interface), 512);
+ cvmx_write_csr(CVMX_GMXX_TXX_BURST(0, interface), 8192);
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(0, interface), gmx_cfg.u64);
+
+ /* (7) Clear out any error state */
+ cvmx_write_csr(CVMX_GMXX_RXX_INT_REG(0, interface),
+ cvmx_read_csr(CVMX_GMXX_RXX_INT_REG(0, interface)));
+ cvmx_write_csr(CVMX_GMXX_TX_INT_REG(interface),
+ cvmx_read_csr(CVMX_GMXX_TX_INT_REG(interface)));
+ cvmx_write_csr(CVMX_PCSXX_INT_REG(interface),
+ cvmx_read_csr(CVMX_PCSXX_INT_REG(interface)));
+
+ /* Wait for receive link */
+ if (CVMX_WAIT_FOR_FIELD64
+ (CVMX_PCSXX_STATUS1_REG(interface), union cvmx_pcsxx_status1_reg,
+ rcv_lnk, ==, 1, 10000))
+ return -1;
+ if (CVMX_WAIT_FOR_FIELD64
+ (CVMX_PCSXX_STATUS2_REG(interface), union cvmx_pcsxx_status2_reg,
+ xmtflt, ==, 0, 10000))
+ return -1;
+ if (CVMX_WAIT_FOR_FIELD64
+ (CVMX_PCSXX_STATUS2_REG(interface), union cvmx_pcsxx_status2_reg,
+ rcvflt, ==, 0, 10000))
+ return -1;
+
+ cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(0, interface), gmx_rx_int_en.u64);
+ cvmx_write_csr(CVMX_GMXX_TX_INT_EN(interface), gmx_tx_int_en.u64);
+ cvmx_write_csr(CVMX_PCSXX_INT_EN_REG(interface), pcsx_int_en_reg.u64);
+
+ cvmx_helper_link_autoconf(cvmx_helper_get_ipd_port(interface, 0));
+
+ /* (8) Enable packet reception */
+ xauiMiscCtl.s.gmxeno = 0;
+ cvmx_write_csr(CVMX_PCSXX_MISC_CTL_REG(interface), xauiMiscCtl.u64);
+
+ gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(0, interface));
+ gmx_cfg.s.en = 1;
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(0, interface), gmx_cfg.u64);
+
+ __cvmx_interrupt_pcsx_intx_en_reg_enable(0, interface);
+ __cvmx_interrupt_pcsx_intx_en_reg_enable(1, interface);
+ __cvmx_interrupt_pcsx_intx_en_reg_enable(2, interface);
+ __cvmx_interrupt_pcsx_intx_en_reg_enable(3, interface);
+ __cvmx_interrupt_pcsxx_int_en_reg_enable(interface);
+ __cvmx_interrupt_gmxx_enable(interface);
+
+ return 0;
+}
+
+/**
+ * Return the link state of an IPD/PKO port as returned by
+ * auto negotiation. The result of this function may not match
+ * Octeon's link config if auto negotiation has changed since
+ * the last call to cvmx_helper_link_set().
+ *
+ * @ipd_port: IPD/PKO port to query
+ *
+ * Returns Link state
+ */
+cvmx_helper_link_info_t __cvmx_helper_xaui_link_get(int ipd_port)
+{
+ int interface = cvmx_helper_get_interface_num(ipd_port);
+ union cvmx_gmxx_tx_xaui_ctl gmxx_tx_xaui_ctl;
+ union cvmx_gmxx_rx_xaui_ctl gmxx_rx_xaui_ctl;
+ union cvmx_pcsxx_status1_reg pcsxx_status1_reg;
+ cvmx_helper_link_info_t result;
+
+ gmxx_tx_xaui_ctl.u64 = cvmx_read_csr(CVMX_GMXX_TX_XAUI_CTL(interface));
+ gmxx_rx_xaui_ctl.u64 = cvmx_read_csr(CVMX_GMXX_RX_XAUI_CTL(interface));
+ pcsxx_status1_reg.u64 =
+ cvmx_read_csr(CVMX_PCSXX_STATUS1_REG(interface));
+ result.u64 = 0;
+
+ /* Only return a link if both RX and TX are happy */
+ if ((gmxx_tx_xaui_ctl.s.ls == 0) && (gmxx_rx_xaui_ctl.s.status == 0) &&
+ (pcsxx_status1_reg.s.rcv_lnk == 1)) {
+ result.s.link_up = 1;
+ result.s.full_duplex = 1;
+ result.s.speed = 10000;
+ } else {
+ /* Disable GMX and PCSX interrupts. */
+ cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(0, interface), 0x0);
+ cvmx_write_csr(CVMX_GMXX_TX_INT_EN(interface), 0x0);
+ cvmx_write_csr(CVMX_PCSXX_INT_EN_REG(interface), 0x0);
+ }
+ return result;
+}
+
+/**
+ * Configure an IPD/PKO port for the specified link state. This
+ * function does not influence auto negotiation at the PHY level.
+ * The passed link state must always match the link state returned
+ * by cvmx_helper_link_get(). It is normally best to use
+ * cvmx_helper_link_autoconf() instead.
+ *
+ * @ipd_port: IPD/PKO port to configure
+ * @link_info: The new link state
+ *
+ * Returns Zero on success, negative on failure
+ */
+int __cvmx_helper_xaui_link_set(int ipd_port, cvmx_helper_link_info_t link_info)
+{
+ int interface = cvmx_helper_get_interface_num(ipd_port);
+ union cvmx_gmxx_tx_xaui_ctl gmxx_tx_xaui_ctl;
+ union cvmx_gmxx_rx_xaui_ctl gmxx_rx_xaui_ctl;
+
+ gmxx_tx_xaui_ctl.u64 = cvmx_read_csr(CVMX_GMXX_TX_XAUI_CTL(interface));
+ gmxx_rx_xaui_ctl.u64 = cvmx_read_csr(CVMX_GMXX_RX_XAUI_CTL(interface));
+
+ /* If the link shouldn't be up, then just return */
+ if (!link_info.s.link_up)
+ return 0;
+
+ /* Do nothing if both RX and TX are happy */
+ if ((gmxx_tx_xaui_ctl.s.ls == 0) && (gmxx_rx_xaui_ctl.s.status == 0))
+ return 0;
+
+ /* Bring the link up */
+ return __cvmx_helper_xaui_enable(interface);
+}
+
+/**
+ * Configure a port for internal and/or external loopback. Internal loopback
+ * causes packets sent by the port to be received by Octeon. External loopback
+ * causes packets received from the wire to sent out again.
+ *
+ * @ipd_port: IPD/PKO port to loopback.
+ * @enable_internal:
+ * Non zero if you want internal loopback
+ * @enable_external:
+ * Non zero if you want external loopback
+ *
+ * Returns Zero on success, negative on failure.
+ */
+extern int __cvmx_helper_xaui_configure_loopback(int ipd_port,
+ int enable_internal,
+ int enable_external)
+{
+ int interface = cvmx_helper_get_interface_num(ipd_port);
+ union cvmx_pcsxx_control1_reg pcsxx_control1_reg;
+ union cvmx_gmxx_xaui_ext_loopback gmxx_xaui_ext_loopback;
+
+ /* Set the internal loop */
+ pcsxx_control1_reg.u64 =
+ cvmx_read_csr(CVMX_PCSXX_CONTROL1_REG(interface));
+ pcsxx_control1_reg.s.loopbck1 = enable_internal;
+ cvmx_write_csr(CVMX_PCSXX_CONTROL1_REG(interface),
+ pcsxx_control1_reg.u64);
+
+ /* Set the external loop */
+ gmxx_xaui_ext_loopback.u64 =
+ cvmx_read_csr(CVMX_GMXX_XAUI_EXT_LOOPBACK(interface));
+ gmxx_xaui_ext_loopback.s.en = enable_external;
+ cvmx_write_csr(CVMX_GMXX_XAUI_EXT_LOOPBACK(interface),
+ gmxx_xaui_ext_loopback.u64);
+
+ /* Take the link through a reset */
+ return __cvmx_helper_xaui_enable(interface);
+}
diff --git a/drivers/staging/octeon/cvmx-helper-xaui.h b/drivers/staging/octeon/cvmx-helper-xaui.h
new file mode 100644
index 000000000000..4b4db2f93cd4
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-helper-xaui.h
@@ -0,0 +1,103 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Functions for XAUI initialization, configuration,
+ * and monitoring.
+ *
+ */
+#ifndef __CVMX_HELPER_XAUI_H__
+#define __CVMX_HELPER_XAUI_H__
+
+/**
+ * Probe a XAUI interface and determine the number of ports
+ * connected to it. The XAUI interface should still be down
+ * after this call.
+ *
+ * @interface: Interface to probe
+ *
+ * Returns Number of ports on the interface. Zero to disable.
+ */
+extern int __cvmx_helper_xaui_probe(int interface);
+
+/**
+ * Bringup and enable a XAUI interface. After this call packet
+ * I/O should be fully functional. This is called with IPD
+ * enabled but PKO disabled.
+ *
+ * @interface: Interface to bring up
+ *
+ * Returns Zero on success, negative on failure
+ */
+extern int __cvmx_helper_xaui_enable(int interface);
+
+/**
+ * Return the link state of an IPD/PKO port as returned by
+ * auto negotiation. The result of this function may not match
+ * Octeon's link config if auto negotiation has changed since
+ * the last call to cvmx_helper_link_set().
+ *
+ * @ipd_port: IPD/PKO port to query
+ *
+ * Returns Link state
+ */
+extern cvmx_helper_link_info_t __cvmx_helper_xaui_link_get(int ipd_port);
+
+/**
+ * Configure an IPD/PKO port for the specified link state. This
+ * function does not influence auto negotiation at the PHY level.
+ * The passed link state must always match the link state returned
+ * by cvmx_helper_link_get(). It is normally best to use
+ * cvmx_helper_link_autoconf() instead.
+ *
+ * @ipd_port: IPD/PKO port to configure
+ * @link_info: The new link state
+ *
+ * Returns Zero on success, negative on failure
+ */
+extern int __cvmx_helper_xaui_link_set(int ipd_port,
+ cvmx_helper_link_info_t link_info);
+
+/**
+ * Configure a port for internal and/or external loopback. Internal loopback
+ * causes packets sent by the port to be received by Octeon. External loopback
+ * causes packets received from the wire to sent out again.
+ *
+ * @ipd_port: IPD/PKO port to loopback.
+ * @enable_internal:
+ * Non zero if you want internal loopback
+ * @enable_external:
+ * Non zero if you want external loopback
+ *
+ * Returns Zero on success, negative on failure.
+ */
+extern int __cvmx_helper_xaui_configure_loopback(int ipd_port,
+ int enable_internal,
+ int enable_external);
+#endif
diff --git a/drivers/staging/octeon/cvmx-helper.c b/drivers/staging/octeon/cvmx-helper.c
new file mode 100644
index 000000000000..591506643d02
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-helper.c
@@ -0,0 +1,1058 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ *
+ * Helper functions for common, but complicated tasks.
+ *
+ */
+#include <asm/octeon/octeon.h>
+
+#include "cvmx-config.h"
+
+#include "cvmx-fpa.h"
+#include "cvmx-pip.h"
+#include "cvmx-pko.h"
+#include "cvmx-ipd.h"
+#include "cvmx-spi.h"
+#include "cvmx-helper.h"
+#include "cvmx-helper-board.h"
+
+#include "cvmx-pip-defs.h"
+#include "cvmx-smix-defs.h"
+#include "cvmx-asxx-defs.h"
+
+/**
+ * cvmx_override_pko_queue_priority(int ipd_port, uint64_t
+ * priorities[16]) is a function pointer. It is meant to allow
+ * customization of the PKO queue priorities based on the port
+ * number. Users should set this pointer to a function before
+ * calling any cvmx-helper operations.
+ */
+void (*cvmx_override_pko_queue_priority) (int pko_port,
+ uint64_t priorities[16]);
+
+/**
+ * cvmx_override_ipd_port_setup(int ipd_port) is a function
+ * pointer. It is meant to allow customization of the IPD port
+ * setup before packet input/output comes online. It is called
+ * after cvmx-helper does the default IPD configuration, but
+ * before IPD is enabled. Users should set this pointer to a
+ * function before calling any cvmx-helper operations.
+ */
+void (*cvmx_override_ipd_port_setup) (int ipd_port);
+
+/* Port count per interface */
+static int interface_port_count[4] = { 0, 0, 0, 0 };
+
+/* Port last configured link info index by IPD/PKO port */
+static cvmx_helper_link_info_t
+ port_link_info[CVMX_PIP_NUM_INPUT_PORTS];
+
+/**
+ * Return the number of interfaces the chip has. Each interface
+ * may have multiple ports. Most chips support two interfaces,
+ * but the CNX0XX and CNX1XX are exceptions. These only support
+ * one interface.
+ *
+ * Returns Number of interfaces on chip
+ */
+int cvmx_helper_get_number_of_interfaces(void)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN52XX))
+ return 4;
+ else
+ return 3;
+}
+
+/**
+ * Return the number of ports on an interface. Depending on the
+ * chip and configuration, this can be 1-16. A value of 0
+ * specifies that the interface doesn't exist or isn't usable.
+ *
+ * @interface: Interface to get the port count for
+ *
+ * Returns Number of ports on interface. Can be Zero.
+ */
+int cvmx_helper_ports_on_interface(int interface)
+{
+ return interface_port_count[interface];
+}
+
+/**
+ * Get the operating mode of an interface. Depending on the Octeon
+ * chip and configuration, this function returns an enumeration
+ * of the type of packet I/O supported by an interface.
+ *
+ * @interface: Interface to probe
+ *
+ * Returns Mode of the interface. Unknown or unsupported interfaces return
+ * DISABLED.
+ */
+cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int interface)
+{
+ union cvmx_gmxx_inf_mode mode;
+ if (interface == 2)
+ return CVMX_HELPER_INTERFACE_MODE_NPI;
+
+ if (interface == 3) {
+ if (OCTEON_IS_MODEL(OCTEON_CN56XX)
+ || OCTEON_IS_MODEL(OCTEON_CN52XX))
+ return CVMX_HELPER_INTERFACE_MODE_LOOP;
+ else
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+ }
+
+ if (interface == 0
+ && cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CN3005_EVB_HS5
+ && cvmx_sysinfo_get()->board_rev_major == 1) {
+ /*
+ * Lie about interface type of CN3005 board. This
+ * board has a switch on port 1 like the other
+ * evaluation boards, but it is connected over RGMII
+ * instead of GMII. Report GMII mode so that the
+ * speed is forced to 1 Gbit full duplex. Other than
+ * some initial configuration (which does not use the
+ * output of this function) there is no difference in
+ * setup between GMII and RGMII modes.
+ */
+ return CVMX_HELPER_INTERFACE_MODE_GMII;
+ }
+
+ /* Interface 1 is always disabled on CN31XX and CN30XX */
+ if ((interface == 1)
+ && (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN30XX)
+ || OCTEON_IS_MODEL(OCTEON_CN50XX)
+ || OCTEON_IS_MODEL(OCTEON_CN52XX)))
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+
+ mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
+
+ if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN52XX)) {
+ switch (mode.cn56xx.mode) {
+ case 0:
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+ case 1:
+ return CVMX_HELPER_INTERFACE_MODE_XAUI;
+ case 2:
+ return CVMX_HELPER_INTERFACE_MODE_SGMII;
+ case 3:
+ return CVMX_HELPER_INTERFACE_MODE_PICMG;
+ default:
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+ }
+ } else {
+ if (!mode.s.en)
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+
+ if (mode.s.type) {
+ if (OCTEON_IS_MODEL(OCTEON_CN38XX)
+ || OCTEON_IS_MODEL(OCTEON_CN58XX))
+ return CVMX_HELPER_INTERFACE_MODE_SPI;
+ else
+ return CVMX_HELPER_INTERFACE_MODE_GMII;
+ } else
+ return CVMX_HELPER_INTERFACE_MODE_RGMII;
+ }
+}
+
+/**
+ * Configure the IPD/PIP tagging and QoS options for a specific
+ * port. This function determines the POW work queue entry
+ * contents for a port. The setup performed here is controlled by
+ * the defines in executive-config.h.
+ *
+ * @ipd_port: Port to configure. This follows the IPD numbering, not the
+ * per interface numbering
+ *
+ * Returns Zero on success, negative on failure
+ */
+static int __cvmx_helper_port_setup_ipd(int ipd_port)
+{
+ union cvmx_pip_prt_cfgx port_config;
+ union cvmx_pip_prt_tagx tag_config;
+
+ port_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port));
+ tag_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_TAGX(ipd_port));
+
+ /* Have each port go to a different POW queue */
+ port_config.s.qos = ipd_port & 0x7;
+
+ /* Process the headers and place the IP header in the work queue */
+ port_config.s.mode = CVMX_HELPER_INPUT_PORT_SKIP_MODE;
+
+ tag_config.s.ip6_src_flag = CVMX_HELPER_INPUT_TAG_IPV6_SRC_IP;
+ tag_config.s.ip6_dst_flag = CVMX_HELPER_INPUT_TAG_IPV6_DST_IP;
+ tag_config.s.ip6_sprt_flag = CVMX_HELPER_INPUT_TAG_IPV6_SRC_PORT;
+ tag_config.s.ip6_dprt_flag = CVMX_HELPER_INPUT_TAG_IPV6_DST_PORT;
+ tag_config.s.ip6_nxth_flag = CVMX_HELPER_INPUT_TAG_IPV6_NEXT_HEADER;
+ tag_config.s.ip4_src_flag = CVMX_HELPER_INPUT_TAG_IPV4_SRC_IP;
+ tag_config.s.ip4_dst_flag = CVMX_HELPER_INPUT_TAG_IPV4_DST_IP;
+ tag_config.s.ip4_sprt_flag = CVMX_HELPER_INPUT_TAG_IPV4_SRC_PORT;
+ tag_config.s.ip4_dprt_flag = CVMX_HELPER_INPUT_TAG_IPV4_DST_PORT;
+ tag_config.s.ip4_pctl_flag = CVMX_HELPER_INPUT_TAG_IPV4_PROTOCOL;
+ tag_config.s.inc_prt_flag = CVMX_HELPER_INPUT_TAG_INPUT_PORT;
+ tag_config.s.tcp6_tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
+ tag_config.s.tcp4_tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
+ tag_config.s.ip6_tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
+ tag_config.s.ip4_tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
+ tag_config.s.non_tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
+ /* Put all packets in group 0. Other groups can be used by the app */
+ tag_config.s.grp = 0;
+
+ cvmx_pip_config_port(ipd_port, port_config, tag_config);
+
+ /* Give the user a chance to override our setting for each port */
+ if (cvmx_override_ipd_port_setup)
+ cvmx_override_ipd_port_setup(ipd_port);
+
+ return 0;
+}
+
+/**
+ * This function probes an interface to determine the actual
+ * number of hardware ports connected to it. It doesn't setup the
+ * ports or enable them. The main goal here is to set the global
+ * interface_port_count[interface] correctly. Hardware setup of the
+ * ports will be performed later.
+ *
+ * @interface: Interface to probe
+ *
+ * Returns Zero on success, negative on failure
+ */
+int cvmx_helper_interface_probe(int interface)
+{
+ /* At this stage in the game we don't want packets to be moving yet.
+ The following probe calls should perform hardware setup
+ needed to determine port counts. Receive must still be disabled */
+ switch (cvmx_helper_interface_get_mode(interface)) {
+ /* These types don't support ports to IPD/PKO */
+ case CVMX_HELPER_INTERFACE_MODE_DISABLED:
+ case CVMX_HELPER_INTERFACE_MODE_PCIE:
+ interface_port_count[interface] = 0;
+ break;
+ /* XAUI is a single high speed port */
+ case CVMX_HELPER_INTERFACE_MODE_XAUI:
+ interface_port_count[interface] =
+ __cvmx_helper_xaui_probe(interface);
+ break;
+ /*
+ * RGMII/GMII/MII are all treated about the same. Most
+ * functions refer to these ports as RGMII.
+ */
+ case CVMX_HELPER_INTERFACE_MODE_RGMII:
+ case CVMX_HELPER_INTERFACE_MODE_GMII:
+ interface_port_count[interface] =
+ __cvmx_helper_rgmii_probe(interface);
+ break;
+ /*
+ * SPI4 can have 1-16 ports depending on the device at
+ * the other end.
+ */
+ case CVMX_HELPER_INTERFACE_MODE_SPI:
+ interface_port_count[interface] =
+ __cvmx_helper_spi_probe(interface);
+ break;
+ /*
+ * SGMII can have 1-4 ports depending on how many are
+ * hooked up.
+ */
+ case CVMX_HELPER_INTERFACE_MODE_SGMII:
+ case CVMX_HELPER_INTERFACE_MODE_PICMG:
+ interface_port_count[interface] =
+ __cvmx_helper_sgmii_probe(interface);
+ break;
+ /* PCI target Network Packet Interface */
+ case CVMX_HELPER_INTERFACE_MODE_NPI:
+ interface_port_count[interface] =
+ __cvmx_helper_npi_probe(interface);
+ break;
+ /*
+ * Special loopback only ports. These are not the same
+ * as other ports in loopback mode.
+ */
+ case CVMX_HELPER_INTERFACE_MODE_LOOP:
+ interface_port_count[interface] =
+ __cvmx_helper_loop_probe(interface);
+ break;
+ }
+
+ interface_port_count[interface] =
+ __cvmx_helper_board_interface_probe(interface,
+ interface_port_count
+ [interface]);
+
+ /* Make sure all global variables propagate to other cores */
+ CVMX_SYNCWS;
+
+ return 0;
+}
+
+/**
+ * Setup the IPD/PIP for the ports on an interface. Packet
+ * classification and tagging are set for every port on the
+ * interface. The number of ports on the interface must already
+ * have been probed.
+ *
+ * @interface: Interface to setup IPD/PIP for
+ *
+ * Returns Zero on success, negative on failure
+ */
+static int __cvmx_helper_interface_setup_ipd(int interface)
+{
+ int ipd_port = cvmx_helper_get_ipd_port(interface, 0);
+ int num_ports = interface_port_count[interface];
+
+ while (num_ports--) {
+ __cvmx_helper_port_setup_ipd(ipd_port);
+ ipd_port++;
+ }
+ return 0;
+}
+
+/**
+ * Setup global setting for IPD/PIP not related to a specific
+ * interface or port. This must be called before IPD is enabled.
+ *
+ * Returns Zero on success, negative on failure.
+ */
+static int __cvmx_helper_global_setup_ipd(void)
+{
+ /* Setup the global packet input options */
+ cvmx_ipd_config(CVMX_FPA_PACKET_POOL_SIZE / 8,
+ CVMX_HELPER_FIRST_MBUFF_SKIP / 8,
+ CVMX_HELPER_NOT_FIRST_MBUFF_SKIP / 8,
+ /* The +8 is to account for the next ptr */
+ (CVMX_HELPER_FIRST_MBUFF_SKIP + 8) / 128,
+ /* The +8 is to account for the next ptr */
+ (CVMX_HELPER_NOT_FIRST_MBUFF_SKIP + 8) / 128,
+ CVMX_FPA_WQE_POOL,
+ CVMX_IPD_OPC_MODE_STT,
+ CVMX_HELPER_ENABLE_BACK_PRESSURE);
+ return 0;
+}
+
+/**
+ * Setup the PKO for the ports on an interface. The number of
+ * queues per port and the priority of each PKO output queue
+ * is set here. PKO must be disabled when this function is called.
+ *
+ * @interface: Interface to setup PKO for
+ *
+ * Returns Zero on success, negative on failure
+ */
+static int __cvmx_helper_interface_setup_pko(int interface)
+{
+ /*
+ * Each packet output queue has an associated priority. The
+ * higher the priority, the more often it can send a packet. A
+ * priority of 8 means it can send in all 8 rounds of
+ * contention. We're going to make each queue one less than
+ * the last. The vector of priorities has been extended to
+ * support CN5xxx CPUs, where up to 16 queues can be
+ * associated to a port. To keep backward compatibility we
+ * don't change the initial 8 priorities and replicate them in
+ * the second half. With per-core PKO queues (PKO lockless
+ * operation) all queues have the same priority.
+ */
+ uint64_t priorities[16] =
+ { 8, 7, 6, 5, 4, 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, 1 };
+
+ /*
+ * Setup the IPD/PIP and PKO for the ports discovered
+ * above. Here packet classification, tagging and output
+ * priorities are set.
+ */
+ int ipd_port = cvmx_helper_get_ipd_port(interface, 0);
+ int num_ports = interface_port_count[interface];
+ while (num_ports--) {
+ /*
+ * Give the user a chance to override the per queue
+ * priorities.
+ */
+ if (cvmx_override_pko_queue_priority)
+ cvmx_override_pko_queue_priority(ipd_port, priorities);
+
+ cvmx_pko_config_port(ipd_port,
+ cvmx_pko_get_base_queue_per_core(ipd_port,
+ 0),
+ cvmx_pko_get_num_queues(ipd_port),
+ priorities);
+ ipd_port++;
+ }
+ return 0;
+}
+
+/**
+ * Setup global setting for PKO not related to a specific
+ * interface or port. This must be called before PKO is enabled.
+ *
+ * Returns Zero on success, negative on failure.
+ */
+static int __cvmx_helper_global_setup_pko(void)
+{
+ /*
+ * Disable tagwait FAU timeout. This needs to be done before
+ * anyone might start packet output using tags.
+ */
+ union cvmx_iob_fau_timeout fau_to;
+ fau_to.u64 = 0;
+ fau_to.s.tout_val = 0xfff;
+ fau_to.s.tout_enb = 0;
+ cvmx_write_csr(CVMX_IOB_FAU_TIMEOUT, fau_to.u64);
+ return 0;
+}
+
+/**
+ * Setup global backpressure setting.
+ *
+ * Returns Zero on success, negative on failure
+ */
+static int __cvmx_helper_global_setup_backpressure(void)
+{
+#if CVMX_HELPER_DISABLE_RGMII_BACKPRESSURE
+ /* Disable backpressure if configured to do so */
+ /* Disable backpressure (pause frame) generation */
+ int num_interfaces = cvmx_helper_get_number_of_interfaces();
+ int interface;
+ for (interface = 0; interface < num_interfaces; interface++) {
+ switch (cvmx_helper_interface_get_mode(interface)) {
+ case CVMX_HELPER_INTERFACE_MODE_DISABLED:
+ case CVMX_HELPER_INTERFACE_MODE_PCIE:
+ case CVMX_HELPER_INTERFACE_MODE_NPI:
+ case CVMX_HELPER_INTERFACE_MODE_LOOP:
+ case CVMX_HELPER_INTERFACE_MODE_XAUI:
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_RGMII:
+ case CVMX_HELPER_INTERFACE_MODE_GMII:
+ case CVMX_HELPER_INTERFACE_MODE_SPI:
+ case CVMX_HELPER_INTERFACE_MODE_SGMII:
+ case CVMX_HELPER_INTERFACE_MODE_PICMG:
+ cvmx_gmx_set_backpressure_override(interface, 0xf);
+ break;
+ }
+ }
+#endif
+
+ return 0;
+}
+
+/**
+ * Enable packet input/output from the hardware. This function is
+ * called after all internal setup is complete and IPD is enabled.
+ * After this function completes, packets will be accepted from the
+ * hardware ports. PKO should still be disabled to make sure packets
+ * aren't sent out partially setup hardware.
+ *
+ * @interface: Interface to enable
+ *
+ * Returns Zero on success, negative on failure
+ */
+static int __cvmx_helper_packet_hardware_enable(int interface)
+{
+ int result = 0;
+ switch (cvmx_helper_interface_get_mode(interface)) {
+ /* These types don't support ports to IPD/PKO */
+ case CVMX_HELPER_INTERFACE_MODE_DISABLED:
+ case CVMX_HELPER_INTERFACE_MODE_PCIE:
+ /* Nothing to do */
+ break;
+ /* XAUI is a single high speed port */
+ case CVMX_HELPER_INTERFACE_MODE_XAUI:
+ result = __cvmx_helper_xaui_enable(interface);
+ break;
+ /*
+ * RGMII/GMII/MII are all treated about the same. Most
+ * functions refer to these ports as RGMII
+ */
+ case CVMX_HELPER_INTERFACE_MODE_RGMII:
+ case CVMX_HELPER_INTERFACE_MODE_GMII:
+ result = __cvmx_helper_rgmii_enable(interface);
+ break;
+ /*
+ * SPI4 can have 1-16 ports depending on the device at
+ * the other end
+ */
+ case CVMX_HELPER_INTERFACE_MODE_SPI:
+ result = __cvmx_helper_spi_enable(interface);
+ break;
+ /*
+ * SGMII can have 1-4 ports depending on how many are
+ * hooked up
+ */
+ case CVMX_HELPER_INTERFACE_MODE_SGMII:
+ case CVMX_HELPER_INTERFACE_MODE_PICMG:
+ result = __cvmx_helper_sgmii_enable(interface);
+ break;
+ /* PCI target Network Packet Interface */
+ case CVMX_HELPER_INTERFACE_MODE_NPI:
+ result = __cvmx_helper_npi_enable(interface);
+ break;
+ /*
+ * Special loopback only ports. These are not the same
+ * as other ports in loopback mode
+ */
+ case CVMX_HELPER_INTERFACE_MODE_LOOP:
+ result = __cvmx_helper_loop_enable(interface);
+ break;
+ }
+ result |= __cvmx_helper_board_hardware_enable(interface);
+ return result;
+}
+
+/**
+ * Function to adjust internal IPD pointer alignments
+ *
+ * Returns 0 on success
+ * !0 on failure
+ */
+int __cvmx_helper_errata_fix_ipd_ptr_alignment(void)
+{
+#define FIX_IPD_FIRST_BUFF_PAYLOAD_BYTES \
+ (CVMX_FPA_PACKET_POOL_SIZE-8-CVMX_HELPER_FIRST_MBUFF_SKIP)
+#define FIX_IPD_NON_FIRST_BUFF_PAYLOAD_BYTES \
+ (CVMX_FPA_PACKET_POOL_SIZE-8-CVMX_HELPER_NOT_FIRST_MBUFF_SKIP)
+#define FIX_IPD_OUTPORT 0
+ /* Ports 0-15 are interface 0, 16-31 are interface 1 */
+#define INTERFACE(port) (port >> 4)
+#define INDEX(port) (port & 0xf)
+ uint64_t *p64;
+ cvmx_pko_command_word0_t pko_command;
+ union cvmx_buf_ptr g_buffer, pkt_buffer;
+ cvmx_wqe_t *work;
+ int size, num_segs = 0, wqe_pcnt, pkt_pcnt;
+ union cvmx_gmxx_prtx_cfg gmx_cfg;
+ int retry_cnt;
+ int retry_loop_cnt;
+ int mtu;
+ int i;
+ cvmx_helper_link_info_t link_info;
+
+ /* Save values for restore at end */
+ uint64_t prtx_cfg =
+ cvmx_read_csr(CVMX_GMXX_PRTX_CFG
+ (INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)));
+ uint64_t tx_ptr_en =
+ cvmx_read_csr(CVMX_ASXX_TX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)));
+ uint64_t rx_ptr_en =
+ cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)));
+ uint64_t rxx_jabber =
+ cvmx_read_csr(CVMX_GMXX_RXX_JABBER
+ (INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)));
+ uint64_t frame_max =
+ cvmx_read_csr(CVMX_GMXX_RXX_FRM_MAX
+ (INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)));
+
+ /* Configure port to gig FDX as required for loopback mode */
+ cvmx_helper_rgmii_internal_loopback(FIX_IPD_OUTPORT);
+
+ /*
+ * Disable reception on all ports so if traffic is present it
+ * will not interfere.
+ */
+ cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)), 0);
+
+ cvmx_wait(100000000ull);
+
+ for (retry_loop_cnt = 0; retry_loop_cnt < 10; retry_loop_cnt++) {
+ retry_cnt = 100000;
+ wqe_pcnt = cvmx_read_csr(CVMX_IPD_PTR_COUNT);
+ pkt_pcnt = (wqe_pcnt >> 7) & 0x7f;
+ wqe_pcnt &= 0x7f;
+
+ num_segs = (2 + pkt_pcnt - wqe_pcnt) & 3;
+
+ if (num_segs == 0)
+ goto fix_ipd_exit;
+
+ num_segs += 1;
+
+ size =
+ FIX_IPD_FIRST_BUFF_PAYLOAD_BYTES +
+ ((num_segs - 1) * FIX_IPD_NON_FIRST_BUFF_PAYLOAD_BYTES) -
+ (FIX_IPD_NON_FIRST_BUFF_PAYLOAD_BYTES / 2);
+
+ cvmx_write_csr(CVMX_ASXX_PRT_LOOP(INTERFACE(FIX_IPD_OUTPORT)),
+ 1 << INDEX(FIX_IPD_OUTPORT));
+ CVMX_SYNC;
+
+ g_buffer.u64 = 0;
+ g_buffer.s.addr =
+ cvmx_ptr_to_phys(cvmx_fpa_alloc(CVMX_FPA_WQE_POOL));
+ if (g_buffer.s.addr == 0) {
+ cvmx_dprintf("WARNING: FIX_IPD_PTR_ALIGNMENT "
+ "buffer allocation failure.\n");
+ goto fix_ipd_exit;
+ }
+
+ g_buffer.s.pool = CVMX_FPA_WQE_POOL;
+ g_buffer.s.size = num_segs;
+
+ pkt_buffer.u64 = 0;
+ pkt_buffer.s.addr =
+ cvmx_ptr_to_phys(cvmx_fpa_alloc(CVMX_FPA_PACKET_POOL));
+ if (pkt_buffer.s.addr == 0) {
+ cvmx_dprintf("WARNING: FIX_IPD_PTR_ALIGNMENT "
+ "buffer allocation failure.\n");
+ goto fix_ipd_exit;
+ }
+ pkt_buffer.s.i = 1;
+ pkt_buffer.s.pool = CVMX_FPA_PACKET_POOL;
+ pkt_buffer.s.size = FIX_IPD_FIRST_BUFF_PAYLOAD_BYTES;
+
+ p64 = (uint64_t *) cvmx_phys_to_ptr(pkt_buffer.s.addr);
+ p64[0] = 0xffffffffffff0000ull;
+ p64[1] = 0x08004510ull;
+ p64[2] = ((uint64_t) (size - 14) << 48) | 0x5ae740004000ull;
+ p64[3] = 0x3a5fc0a81073c0a8ull;
+
+ for (i = 0; i < num_segs; i++) {
+ if (i > 0)
+ pkt_buffer.s.size =
+ FIX_IPD_NON_FIRST_BUFF_PAYLOAD_BYTES;
+
+ if (i == (num_segs - 1))
+ pkt_buffer.s.i = 0;
+
+ *(uint64_t *) cvmx_phys_to_ptr(g_buffer.s.addr +
+ 8 * i) = pkt_buffer.u64;
+ }
+
+ /* Build the PKO command */
+ pko_command.u64 = 0;
+ pko_command.s.segs = num_segs;
+ pko_command.s.total_bytes = size;
+ pko_command.s.dontfree = 0;
+ pko_command.s.gather = 1;
+
+ gmx_cfg.u64 =
+ cvmx_read_csr(CVMX_GMXX_PRTX_CFG
+ (INDEX(FIX_IPD_OUTPORT),
+ INTERFACE(FIX_IPD_OUTPORT)));
+ gmx_cfg.s.en = 1;
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG
+ (INDEX(FIX_IPD_OUTPORT),
+ INTERFACE(FIX_IPD_OUTPORT)), gmx_cfg.u64);
+ cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)),
+ 1 << INDEX(FIX_IPD_OUTPORT));
+ cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)),
+ 1 << INDEX(FIX_IPD_OUTPORT));
+
+ mtu =
+ cvmx_read_csr(CVMX_GMXX_RXX_JABBER
+ (INDEX(FIX_IPD_OUTPORT),
+ INTERFACE(FIX_IPD_OUTPORT)));
+ cvmx_write_csr(CVMX_GMXX_RXX_JABBER
+ (INDEX(FIX_IPD_OUTPORT),
+ INTERFACE(FIX_IPD_OUTPORT)), 65392 - 14 - 4);
+ cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX
+ (INDEX(FIX_IPD_OUTPORT),
+ INTERFACE(FIX_IPD_OUTPORT)), 65392 - 14 - 4);
+
+ cvmx_pko_send_packet_prepare(FIX_IPD_OUTPORT,
+ cvmx_pko_get_base_queue
+ (FIX_IPD_OUTPORT),
+ CVMX_PKO_LOCK_CMD_QUEUE);
+ cvmx_pko_send_packet_finish(FIX_IPD_OUTPORT,
+ cvmx_pko_get_base_queue
+ (FIX_IPD_OUTPORT), pko_command,
+ g_buffer, CVMX_PKO_LOCK_CMD_QUEUE);
+
+ CVMX_SYNC;
+
+ do {
+ work = cvmx_pow_work_request_sync(CVMX_POW_WAIT);
+ retry_cnt--;
+ } while ((work == NULL) && (retry_cnt > 0));
+
+ if (!retry_cnt)
+ cvmx_dprintf("WARNING: FIX_IPD_PTR_ALIGNMENT "
+ "get_work() timeout occured.\n");
+
+ /* Free packet */
+ if (work)
+ cvmx_helper_free_packet_data(work);
+ }
+
+fix_ipd_exit:
+
+ /* Return CSR configs to saved values */
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG
+ (INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)),
+ prtx_cfg);
+ cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)),
+ tx_ptr_en);
+ cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)),
+ rx_ptr_en);
+ cvmx_write_csr(CVMX_GMXX_RXX_JABBER
+ (INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)),
+ rxx_jabber);
+ cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX
+ (INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)),
+ frame_max);
+ cvmx_write_csr(CVMX_ASXX_PRT_LOOP(INTERFACE(FIX_IPD_OUTPORT)), 0);
+ /* Set link to down so autonegotiation will set it up again */
+ link_info.u64 = 0;
+ cvmx_helper_link_set(FIX_IPD_OUTPORT, link_info);
+
+ /*
+ * Bring the link back up as autonegotiation is not done in
+ * user applications.
+ */
+ cvmx_helper_link_autoconf(FIX_IPD_OUTPORT);
+
+ CVMX_SYNC;
+ if (num_segs)
+ cvmx_dprintf("WARNING: FIX_IPD_PTR_ALIGNMENT failed.\n");
+
+ return !!num_segs;
+
+}
+
+/**
+ * Called after all internal packet IO paths are setup. This
+ * function enables IPD/PIP and begins packet input and output.
+ *
+ * Returns Zero on success, negative on failure
+ */
+int cvmx_helper_ipd_and_packet_input_enable(void)
+{
+ int num_interfaces;
+ int interface;
+
+ /* Enable IPD */
+ cvmx_ipd_enable();
+
+ /*
+ * Time to enable hardware ports packet input and output. Note
+ * that at this point IPD/PIP must be fully functional and PKO
+ * must be disabled
+ */
+ num_interfaces = cvmx_helper_get_number_of_interfaces();
+ for (interface = 0; interface < num_interfaces; interface++) {
+ if (cvmx_helper_ports_on_interface(interface) > 0)
+ __cvmx_helper_packet_hardware_enable(interface);
+ }
+
+ /* Finally enable PKO now that the entire path is up and running */
+ cvmx_pko_enable();
+
+ if ((OCTEON_IS_MODEL(OCTEON_CN31XX_PASS1)
+ || OCTEON_IS_MODEL(OCTEON_CN30XX_PASS1))
+ && (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM))
+ __cvmx_helper_errata_fix_ipd_ptr_alignment();
+ return 0;
+}
+
+/**
+ * Initialize the PIP, IPD, and PKO hardware to support
+ * simple priority based queues for the ethernet ports. Each
+ * port is configured with a number of priority queues based
+ * on CVMX_PKO_QUEUES_PER_PORT_* where each queue is lower
+ * priority than the previous.
+ *
+ * Returns Zero on success, non-zero on failure
+ */
+int cvmx_helper_initialize_packet_io_global(void)
+{
+ int result = 0;
+ int interface;
+ union cvmx_l2c_cfg l2c_cfg;
+ union cvmx_smix_en smix_en;
+ const int num_interfaces = cvmx_helper_get_number_of_interfaces();
+
+ /*
+ * CN52XX pass 1: Due to a bug in 2nd order CDR, it needs to
+ * be disabled.
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_0))
+ __cvmx_helper_errata_qlm_disable_2nd_order_cdr(1);
+
+ /*
+ * Tell L2 to give the IOB statically higher priority compared
+ * to the cores. This avoids conditions where IO blocks might
+ * be starved under very high L2 loads.
+ */
+ l2c_cfg.u64 = cvmx_read_csr(CVMX_L2C_CFG);
+ l2c_cfg.s.lrf_arb_mode = 0;
+ l2c_cfg.s.rfb_arb_mode = 0;
+ cvmx_write_csr(CVMX_L2C_CFG, l2c_cfg.u64);
+
+ /* Make sure SMI/MDIO is enabled so we can query PHYs */
+ smix_en.u64 = cvmx_read_csr(CVMX_SMIX_EN(0));
+ if (!smix_en.s.en) {
+ smix_en.s.en = 1;
+ cvmx_write_csr(CVMX_SMIX_EN(0), smix_en.u64);
+ }
+
+ /* Newer chips actually have two SMI/MDIO interfaces */
+ if (!OCTEON_IS_MODEL(OCTEON_CN3XXX) &&
+ !OCTEON_IS_MODEL(OCTEON_CN58XX) &&
+ !OCTEON_IS_MODEL(OCTEON_CN50XX)) {
+ smix_en.u64 = cvmx_read_csr(CVMX_SMIX_EN(1));
+ if (!smix_en.s.en) {
+ smix_en.s.en = 1;
+ cvmx_write_csr(CVMX_SMIX_EN(1), smix_en.u64);
+ }
+ }
+
+ cvmx_pko_initialize_global();
+ for (interface = 0; interface < num_interfaces; interface++) {
+ result |= cvmx_helper_interface_probe(interface);
+ if (cvmx_helper_ports_on_interface(interface) > 0)
+ cvmx_dprintf("Interface %d has %d ports (%s)\n",
+ interface,
+ cvmx_helper_ports_on_interface(interface),
+ cvmx_helper_interface_mode_to_string
+ (cvmx_helper_interface_get_mode
+ (interface)));
+ result |= __cvmx_helper_interface_setup_ipd(interface);
+ result |= __cvmx_helper_interface_setup_pko(interface);
+ }
+
+ result |= __cvmx_helper_global_setup_ipd();
+ result |= __cvmx_helper_global_setup_pko();
+
+ /* Enable any flow control and backpressure */
+ result |= __cvmx_helper_global_setup_backpressure();
+
+#if CVMX_HELPER_ENABLE_IPD
+ result |= cvmx_helper_ipd_and_packet_input_enable();
+#endif
+ return result;
+}
+
+/**
+ * Does core local initialization for packet io
+ *
+ * Returns Zero on success, non-zero on failure
+ */
+int cvmx_helper_initialize_packet_io_local(void)
+{
+ return cvmx_pko_initialize_local();
+}
+
+/**
+ * Auto configure an IPD/PKO port link state and speed. This
+ * function basically does the equivalent of:
+ * cvmx_helper_link_set(ipd_port, cvmx_helper_link_get(ipd_port));
+ *
+ * @ipd_port: IPD/PKO port to auto configure
+ *
+ * Returns Link state after configure
+ */
+cvmx_helper_link_info_t cvmx_helper_link_autoconf(int ipd_port)
+{
+ cvmx_helper_link_info_t link_info;
+ int interface = cvmx_helper_get_interface_num(ipd_port);
+ int index = cvmx_helper_get_interface_index_num(ipd_port);
+
+ if (index >= cvmx_helper_ports_on_interface(interface)) {
+ link_info.u64 = 0;
+ return link_info;
+ }
+
+ link_info = cvmx_helper_link_get(ipd_port);
+ if (link_info.u64 == port_link_info[ipd_port].u64)
+ return link_info;
+
+ /* If we fail to set the link speed, port_link_info will not change */
+ cvmx_helper_link_set(ipd_port, link_info);
+
+ /*
+ * port_link_info should be the current value, which will be
+ * different than expect if cvmx_helper_link_set() failed.
+ */
+ return port_link_info[ipd_port];
+}
+
+/**
+ * Return the link state of an IPD/PKO port as returned by
+ * auto negotiation. The result of this function may not match
+ * Octeon's link config if auto negotiation has changed since
+ * the last call to cvmx_helper_link_set().
+ *
+ * @ipd_port: IPD/PKO port to query
+ *
+ * Returns Link state
+ */
+cvmx_helper_link_info_t cvmx_helper_link_get(int ipd_port)
+{
+ cvmx_helper_link_info_t result;
+ int interface = cvmx_helper_get_interface_num(ipd_port);
+ int index = cvmx_helper_get_interface_index_num(ipd_port);
+
+ /* The default result will be a down link unless the code below
+ changes it */
+ result.u64 = 0;
+
+ if (index >= cvmx_helper_ports_on_interface(interface))
+ return result;
+
+ switch (cvmx_helper_interface_get_mode(interface)) {
+ case CVMX_HELPER_INTERFACE_MODE_DISABLED:
+ case CVMX_HELPER_INTERFACE_MODE_PCIE:
+ /* Network links are not supported */
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_XAUI:
+ result = __cvmx_helper_xaui_link_get(ipd_port);
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_GMII:
+ if (index == 0)
+ result = __cvmx_helper_rgmii_link_get(ipd_port);
+ else {
+ result.s.full_duplex = 1;
+ result.s.link_up = 1;
+ result.s.speed = 1000;
+ }
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_RGMII:
+ result = __cvmx_helper_rgmii_link_get(ipd_port);
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_SPI:
+ result = __cvmx_helper_spi_link_get(ipd_port);
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_SGMII:
+ case CVMX_HELPER_INTERFACE_MODE_PICMG:
+ result = __cvmx_helper_sgmii_link_get(ipd_port);
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_NPI:
+ case CVMX_HELPER_INTERFACE_MODE_LOOP:
+ /* Network links are not supported */
+ break;
+ }
+ return result;
+}
+
+/**
+ * Configure an IPD/PKO port for the specified link state. This
+ * function does not influence auto negotiation at the PHY level.
+ * The passed link state must always match the link state returned
+ * by cvmx_helper_link_get(). It is normally best to use
+ * cvmx_helper_link_autoconf() instead.
+ *
+ * @ipd_port: IPD/PKO port to configure
+ * @link_info: The new link state
+ *
+ * Returns Zero on success, negative on failure
+ */
+int cvmx_helper_link_set(int ipd_port, cvmx_helper_link_info_t link_info)
+{
+ int result = -1;
+ int interface = cvmx_helper_get_interface_num(ipd_port);
+ int index = cvmx_helper_get_interface_index_num(ipd_port);
+
+ if (index >= cvmx_helper_ports_on_interface(interface))
+ return -1;
+
+ switch (cvmx_helper_interface_get_mode(interface)) {
+ case CVMX_HELPER_INTERFACE_MODE_DISABLED:
+ case CVMX_HELPER_INTERFACE_MODE_PCIE:
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_XAUI:
+ result = __cvmx_helper_xaui_link_set(ipd_port, link_info);
+ break;
+ /*
+ * RGMII/GMII/MII are all treated about the same. Most
+ * functions refer to these ports as RGMII.
+ */
+ case CVMX_HELPER_INTERFACE_MODE_RGMII:
+ case CVMX_HELPER_INTERFACE_MODE_GMII:
+ result = __cvmx_helper_rgmii_link_set(ipd_port, link_info);
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_SPI:
+ result = __cvmx_helper_spi_link_set(ipd_port, link_info);
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_SGMII:
+ case CVMX_HELPER_INTERFACE_MODE_PICMG:
+ result = __cvmx_helper_sgmii_link_set(ipd_port, link_info);
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_NPI:
+ case CVMX_HELPER_INTERFACE_MODE_LOOP:
+ break;
+ }
+ /* Set the port_link_info here so that the link status is updated
+ no matter how cvmx_helper_link_set is called. We don't change
+ the value if link_set failed */
+ if (result == 0)
+ port_link_info[ipd_port].u64 = link_info.u64;
+ return result;
+}
+
+/**
+ * Configure a port for internal and/or external loopback. Internal loopback
+ * causes packets sent by the port to be received by Octeon. External loopback
+ * causes packets received from the wire to sent out again.
+ *
+ * @ipd_port: IPD/PKO port to loopback.
+ * @enable_internal:
+ * Non zero if you want internal loopback
+ * @enable_external:
+ * Non zero if you want external loopback
+ *
+ * Returns Zero on success, negative on failure.
+ */
+int cvmx_helper_configure_loopback(int ipd_port, int enable_internal,
+ int enable_external)
+{
+ int result = -1;
+ int interface = cvmx_helper_get_interface_num(ipd_port);
+ int index = cvmx_helper_get_interface_index_num(ipd_port);
+
+ if (index >= cvmx_helper_ports_on_interface(interface))
+ return -1;
+
+ switch (cvmx_helper_interface_get_mode(interface)) {
+ case CVMX_HELPER_INTERFACE_MODE_DISABLED:
+ case CVMX_HELPER_INTERFACE_MODE_PCIE:
+ case CVMX_HELPER_INTERFACE_MODE_SPI:
+ case CVMX_HELPER_INTERFACE_MODE_NPI:
+ case CVMX_HELPER_INTERFACE_MODE_LOOP:
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_XAUI:
+ result =
+ __cvmx_helper_xaui_configure_loopback(ipd_port,
+ enable_internal,
+ enable_external);
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_RGMII:
+ case CVMX_HELPER_INTERFACE_MODE_GMII:
+ result =
+ __cvmx_helper_rgmii_configure_loopback(ipd_port,
+ enable_internal,
+ enable_external);
+ break;
+ case CVMX_HELPER_INTERFACE_MODE_SGMII:
+ case CVMX_HELPER_INTERFACE_MODE_PICMG:
+ result =
+ __cvmx_helper_sgmii_configure_loopback(ipd_port,
+ enable_internal,
+ enable_external);
+ break;
+ }
+ return result;
+}
diff --git a/drivers/staging/octeon/cvmx-helper.h b/drivers/staging/octeon/cvmx-helper.h
new file mode 100644
index 000000000000..51916f3cc40c
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-helper.h
@@ -0,0 +1,227 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ *
+ * Helper functions for common, but complicated tasks.
+ *
+ */
+
+#ifndef __CVMX_HELPER_H__
+#define __CVMX_HELPER_H__
+
+#include "cvmx-config.h"
+#include "cvmx-fpa.h"
+#include "cvmx-wqe.h"
+
+typedef enum {
+ CVMX_HELPER_INTERFACE_MODE_DISABLED,
+ CVMX_HELPER_INTERFACE_MODE_RGMII,
+ CVMX_HELPER_INTERFACE_MODE_GMII,
+ CVMX_HELPER_INTERFACE_MODE_SPI,
+ CVMX_HELPER_INTERFACE_MODE_PCIE,
+ CVMX_HELPER_INTERFACE_MODE_XAUI,
+ CVMX_HELPER_INTERFACE_MODE_SGMII,
+ CVMX_HELPER_INTERFACE_MODE_PICMG,
+ CVMX_HELPER_INTERFACE_MODE_NPI,
+ CVMX_HELPER_INTERFACE_MODE_LOOP,
+} cvmx_helper_interface_mode_t;
+
+typedef union {
+ uint64_t u64;
+ struct {
+ uint64_t reserved_20_63:44;
+ uint64_t link_up:1; /**< Is the physical link up? */
+ uint64_t full_duplex:1; /**< 1 if the link is full duplex */
+ uint64_t speed:18; /**< Speed of the link in Mbps */
+ } s;
+} cvmx_helper_link_info_t;
+
+#include "cvmx-helper-fpa.h"
+
+#include <asm/octeon/cvmx-helper-errata.h>
+#include "cvmx-helper-loop.h"
+#include "cvmx-helper-npi.h"
+#include "cvmx-helper-rgmii.h"
+#include "cvmx-helper-sgmii.h"
+#include "cvmx-helper-spi.h"
+#include "cvmx-helper-util.h"
+#include "cvmx-helper-xaui.h"
+
+/**
+ * cvmx_override_pko_queue_priority(int ipd_port, uint64_t
+ * priorities[16]) is a function pointer. It is meant to allow
+ * customization of the PKO queue priorities based on the port
+ * number. Users should set this pointer to a function before
+ * calling any cvmx-helper operations.
+ */
+extern void (*cvmx_override_pko_queue_priority) (int pko_port,
+ uint64_t priorities[16]);
+
+/**
+ * cvmx_override_ipd_port_setup(int ipd_port) is a function
+ * pointer. It is meant to allow customization of the IPD port
+ * setup before packet input/output comes online. It is called
+ * after cvmx-helper does the default IPD configuration, but
+ * before IPD is enabled. Users should set this pointer to a
+ * function before calling any cvmx-helper operations.
+ */
+extern void (*cvmx_override_ipd_port_setup) (int ipd_port);
+
+/**
+ * This function enables the IPD and also enables the packet interfaces.
+ * The packet interfaces (RGMII and SPI) must be enabled after the
+ * IPD. This should be called by the user program after any additional
+ * IPD configuration changes are made if CVMX_HELPER_ENABLE_IPD
+ * is not set in the executive-config.h file.
+ *
+ * Returns 0 on success
+ * -1 on failure
+ */
+extern int cvmx_helper_ipd_and_packet_input_enable(void);
+
+/**
+ * Initialize the PIP, IPD, and PKO hardware to support
+ * simple priority based queues for the ethernet ports. Each
+ * port is configured with a number of priority queues based
+ * on CVMX_PKO_QUEUES_PER_PORT_* where each queue is lower
+ * priority than the previous.
+ *
+ * Returns Zero on success, non-zero on failure
+ */
+extern int cvmx_helper_initialize_packet_io_global(void);
+
+/**
+ * Does core local initialization for packet io
+ *
+ * Returns Zero on success, non-zero on failure
+ */
+extern int cvmx_helper_initialize_packet_io_local(void);
+
+/**
+ * Returns the number of ports on the given interface.
+ * The interface must be initialized before the port count
+ * can be returned.
+ *
+ * @interface: Which interface to return port count for.
+ *
+ * Returns Port count for interface
+ * -1 for uninitialized interface
+ */
+extern int cvmx_helper_ports_on_interface(int interface);
+
+/**
+ * Return the number of interfaces the chip has. Each interface
+ * may have multiple ports. Most chips support two interfaces,
+ * but the CNX0XX and CNX1XX are exceptions. These only support
+ * one interface.
+ *
+ * Returns Number of interfaces on chip
+ */
+extern int cvmx_helper_get_number_of_interfaces(void);
+
+/**
+ * Get the operating mode of an interface. Depending on the Octeon
+ * chip and configuration, this function returns an enumeration
+ * of the type of packet I/O supported by an interface.
+ *
+ * @interface: Interface to probe
+ *
+ * Returns Mode of the interface. Unknown or unsupported interfaces return
+ * DISABLED.
+ */
+extern cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int
+ interface);
+
+/**
+ * Auto configure an IPD/PKO port link state and speed. This
+ * function basically does the equivalent of:
+ * cvmx_helper_link_set(ipd_port, cvmx_helper_link_get(ipd_port));
+ *
+ * @ipd_port: IPD/PKO port to auto configure
+ *
+ * Returns Link state after configure
+ */
+extern cvmx_helper_link_info_t cvmx_helper_link_autoconf(int ipd_port);
+
+/**
+ * Return the link state of an IPD/PKO port as returned by
+ * auto negotiation. The result of this function may not match
+ * Octeon's link config if auto negotiation has changed since
+ * the last call to cvmx_helper_link_set().
+ *
+ * @ipd_port: IPD/PKO port to query
+ *
+ * Returns Link state
+ */
+extern cvmx_helper_link_info_t cvmx_helper_link_get(int ipd_port);
+
+/**
+ * Configure an IPD/PKO port for the specified link state. This
+ * function does not influence auto negotiation at the PHY level.
+ * The passed link state must always match the link state returned
+ * by cvmx_helper_link_get(). It is normally best to use
+ * cvmx_helper_link_autoconf() instead.
+ *
+ * @ipd_port: IPD/PKO port to configure
+ * @link_info: The new link state
+ *
+ * Returns Zero on success, negative on failure
+ */
+extern int cvmx_helper_link_set(int ipd_port,
+ cvmx_helper_link_info_t link_info);
+
+/**
+ * This function probes an interface to determine the actual
+ * number of hardware ports connected to it. It doesn't setup the
+ * ports or enable them. The main goal here is to set the global
+ * interface_port_count[interface] correctly. Hardware setup of the
+ * ports will be performed later.
+ *
+ * @interface: Interface to probe
+ *
+ * Returns Zero on success, negative on failure
+ */
+extern int cvmx_helper_interface_probe(int interface);
+
+/**
+ * Configure a port for internal and/or external loopback. Internal loopback
+ * causes packets sent by the port to be received by Octeon. External loopback
+ * causes packets received from the wire to sent out again.
+ *
+ * @ipd_port: IPD/PKO port to loopback.
+ * @enable_internal:
+ * Non zero if you want internal loopback
+ * @enable_external:
+ * Non zero if you want external loopback
+ *
+ * Returns Zero on success, negative on failure.
+ */
+extern int cvmx_helper_configure_loopback(int ipd_port, int enable_internal,
+ int enable_external);
+
+#endif /* __CVMX_HELPER_H__ */
diff --git a/drivers/staging/octeon/cvmx-interrupt-decodes.c b/drivers/staging/octeon/cvmx-interrupt-decodes.c
new file mode 100644
index 000000000000..a3337e382ee9
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-interrupt-decodes.c
@@ -0,0 +1,371 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2009 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ *
+ * Automatically generated functions useful for enabling
+ * and decoding RSL_INT_BLOCKS interrupts.
+ *
+ */
+
+#include <asm/octeon/octeon.h>
+
+#include "cvmx-gmxx-defs.h"
+#include "cvmx-pcsx-defs.h"
+#include "cvmx-pcsxx-defs.h"
+#include "cvmx-spxx-defs.h"
+#include "cvmx-stxx-defs.h"
+
+#ifndef PRINT_ERROR
+#define PRINT_ERROR(format, ...)
+#endif
+
+
+/**
+ * __cvmx_interrupt_gmxx_rxx_int_en_enable enables all interrupt bits in cvmx_gmxx_rxx_int_en_t
+ */
+void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block)
+{
+ union cvmx_gmxx_rxx_int_en gmx_rx_int_en;
+ cvmx_write_csr(CVMX_GMXX_RXX_INT_REG(index, block),
+ cvmx_read_csr(CVMX_GMXX_RXX_INT_REG(index, block)));
+ gmx_rx_int_en.u64 = 0;
+ if (OCTEON_IS_MODEL(OCTEON_CN56XX)) {
+ /* Skipping gmx_rx_int_en.s.reserved_29_63 */
+ gmx_rx_int_en.s.hg2cc = 1;
+ gmx_rx_int_en.s.hg2fld = 1;
+ gmx_rx_int_en.s.undat = 1;
+ gmx_rx_int_en.s.uneop = 1;
+ gmx_rx_int_en.s.unsop = 1;
+ gmx_rx_int_en.s.bad_term = 1;
+ gmx_rx_int_en.s.bad_seq = 1;
+ gmx_rx_int_en.s.rem_fault = 1;
+ gmx_rx_int_en.s.loc_fault = 1;
+ gmx_rx_int_en.s.pause_drp = 1;
+ /* Skipping gmx_rx_int_en.s.reserved_16_18 */
+ /*gmx_rx_int_en.s.ifgerr = 1; */
+ /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
+ /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
+ /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
+ /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
+ gmx_rx_int_en.s.ovrerr = 1;
+ /* Skipping gmx_rx_int_en.s.reserved_9_9 */
+ gmx_rx_int_en.s.skperr = 1;
+ gmx_rx_int_en.s.rcverr = 1;
+ /* Skipping gmx_rx_int_en.s.reserved_5_6 */
+ /*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */
+ gmx_rx_int_en.s.jabber = 1;
+ /* Skipping gmx_rx_int_en.s.reserved_2_2 */
+ gmx_rx_int_en.s.carext = 1;
+ /* Skipping gmx_rx_int_en.s.reserved_0_0 */
+ }
+ if (OCTEON_IS_MODEL(OCTEON_CN30XX)) {
+ /* Skipping gmx_rx_int_en.s.reserved_19_63 */
+ /*gmx_rx_int_en.s.phy_dupx = 1; */
+ /*gmx_rx_int_en.s.phy_spd = 1; */
+ /*gmx_rx_int_en.s.phy_link = 1; */
+ /*gmx_rx_int_en.s.ifgerr = 1; */
+ /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
+ /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
+ /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
+ /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
+ gmx_rx_int_en.s.ovrerr = 1;
+ gmx_rx_int_en.s.niberr = 1;
+ gmx_rx_int_en.s.skperr = 1;
+ gmx_rx_int_en.s.rcverr = 1;
+ /*gmx_rx_int_en.s.lenerr = 1; // Length errors are handled when we get work */
+ gmx_rx_int_en.s.alnerr = 1;
+ /*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */
+ gmx_rx_int_en.s.jabber = 1;
+ gmx_rx_int_en.s.maxerr = 1;
+ gmx_rx_int_en.s.carext = 1;
+ gmx_rx_int_en.s.minerr = 1;
+ }
+ if (OCTEON_IS_MODEL(OCTEON_CN50XX)) {
+ /* Skipping gmx_rx_int_en.s.reserved_20_63 */
+ gmx_rx_int_en.s.pause_drp = 1;
+ /*gmx_rx_int_en.s.phy_dupx = 1; */
+ /*gmx_rx_int_en.s.phy_spd = 1; */
+ /*gmx_rx_int_en.s.phy_link = 1; */
+ /*gmx_rx_int_en.s.ifgerr = 1; */
+ /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
+ /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
+ /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
+ /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
+ gmx_rx_int_en.s.ovrerr = 1;
+ gmx_rx_int_en.s.niberr = 1;
+ gmx_rx_int_en.s.skperr = 1;
+ gmx_rx_int_en.s.rcverr = 1;
+ /* Skipping gmx_rx_int_en.s.reserved_6_6 */
+ gmx_rx_int_en.s.alnerr = 1;
+ /*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */
+ gmx_rx_int_en.s.jabber = 1;
+ /* Skipping gmx_rx_int_en.s.reserved_2_2 */
+ gmx_rx_int_en.s.carext = 1;
+ /* Skipping gmx_rx_int_en.s.reserved_0_0 */
+ }
+ if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
+ /* Skipping gmx_rx_int_en.s.reserved_19_63 */
+ /*gmx_rx_int_en.s.phy_dupx = 1; */
+ /*gmx_rx_int_en.s.phy_spd = 1; */
+ /*gmx_rx_int_en.s.phy_link = 1; */
+ /*gmx_rx_int_en.s.ifgerr = 1; */
+ /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
+ /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
+ /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
+ /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
+ gmx_rx_int_en.s.ovrerr = 1;
+ gmx_rx_int_en.s.niberr = 1;
+ gmx_rx_int_en.s.skperr = 1;
+ gmx_rx_int_en.s.rcverr = 1;
+ /*gmx_rx_int_en.s.lenerr = 1; // Length errors are handled when we get work */
+ gmx_rx_int_en.s.alnerr = 1;
+ /*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */
+ gmx_rx_int_en.s.jabber = 1;
+ gmx_rx_int_en.s.maxerr = 1;
+ gmx_rx_int_en.s.carext = 1;
+ gmx_rx_int_en.s.minerr = 1;
+ }
+ if (OCTEON_IS_MODEL(OCTEON_CN31XX)) {
+ /* Skipping gmx_rx_int_en.s.reserved_19_63 */
+ /*gmx_rx_int_en.s.phy_dupx = 1; */
+ /*gmx_rx_int_en.s.phy_spd = 1; */
+ /*gmx_rx_int_en.s.phy_link = 1; */
+ /*gmx_rx_int_en.s.ifgerr = 1; */
+ /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
+ /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
+ /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
+ /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
+ gmx_rx_int_en.s.ovrerr = 1;
+ gmx_rx_int_en.s.niberr = 1;
+ gmx_rx_int_en.s.skperr = 1;
+ gmx_rx_int_en.s.rcverr = 1;
+ /*gmx_rx_int_en.s.lenerr = 1; // Length errors are handled when we get work */
+ gmx_rx_int_en.s.alnerr = 1;
+ /*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */
+ gmx_rx_int_en.s.jabber = 1;
+ gmx_rx_int_en.s.maxerr = 1;
+ gmx_rx_int_en.s.carext = 1;
+ gmx_rx_int_en.s.minerr = 1;
+ }
+ if (OCTEON_IS_MODEL(OCTEON_CN58XX)) {
+ /* Skipping gmx_rx_int_en.s.reserved_20_63 */
+ gmx_rx_int_en.s.pause_drp = 1;
+ /*gmx_rx_int_en.s.phy_dupx = 1; */
+ /*gmx_rx_int_en.s.phy_spd = 1; */
+ /*gmx_rx_int_en.s.phy_link = 1; */
+ /*gmx_rx_int_en.s.ifgerr = 1; */
+ /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
+ /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
+ /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
+ /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
+ gmx_rx_int_en.s.ovrerr = 1;
+ gmx_rx_int_en.s.niberr = 1;
+ gmx_rx_int_en.s.skperr = 1;
+ gmx_rx_int_en.s.rcverr = 1;
+ /*gmx_rx_int_en.s.lenerr = 1; // Length errors are handled when we get work */
+ gmx_rx_int_en.s.alnerr = 1;
+ /*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */
+ gmx_rx_int_en.s.jabber = 1;
+ gmx_rx_int_en.s.maxerr = 1;
+ gmx_rx_int_en.s.carext = 1;
+ gmx_rx_int_en.s.minerr = 1;
+ }
+ if (OCTEON_IS_MODEL(OCTEON_CN52XX)) {
+ /* Skipping gmx_rx_int_en.s.reserved_29_63 */
+ gmx_rx_int_en.s.hg2cc = 1;
+ gmx_rx_int_en.s.hg2fld = 1;
+ gmx_rx_int_en.s.undat = 1;
+ gmx_rx_int_en.s.uneop = 1;
+ gmx_rx_int_en.s.unsop = 1;
+ gmx_rx_int_en.s.bad_term = 1;
+ gmx_rx_int_en.s.bad_seq = 0;
+ gmx_rx_int_en.s.rem_fault = 1;
+ gmx_rx_int_en.s.loc_fault = 0;
+ gmx_rx_int_en.s.pause_drp = 1;
+ /* Skipping gmx_rx_int_en.s.reserved_16_18 */
+ /*gmx_rx_int_en.s.ifgerr = 1; */
+ /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
+ /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
+ /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
+ /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
+ gmx_rx_int_en.s.ovrerr = 1;
+ /* Skipping gmx_rx_int_en.s.reserved_9_9 */
+ gmx_rx_int_en.s.skperr = 1;
+ gmx_rx_int_en.s.rcverr = 1;
+ /* Skipping gmx_rx_int_en.s.reserved_5_6 */
+ /*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */
+ gmx_rx_int_en.s.jabber = 1;
+ /* Skipping gmx_rx_int_en.s.reserved_2_2 */
+ gmx_rx_int_en.s.carext = 1;
+ /* Skipping gmx_rx_int_en.s.reserved_0_0 */
+ }
+ cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(index, block), gmx_rx_int_en.u64);
+}
+/**
+ * __cvmx_interrupt_pcsx_intx_en_reg_enable enables all interrupt bits in cvmx_pcsx_intx_en_reg_t
+ */
+void __cvmx_interrupt_pcsx_intx_en_reg_enable(int index, int block)
+{
+ union cvmx_pcsx_intx_en_reg pcs_int_en_reg;
+ cvmx_write_csr(CVMX_PCSX_INTX_REG(index, block),
+ cvmx_read_csr(CVMX_PCSX_INTX_REG(index, block)));
+ pcs_int_en_reg.u64 = 0;
+ if (OCTEON_IS_MODEL(OCTEON_CN56XX)) {
+ /* Skipping pcs_int_en_reg.s.reserved_12_63 */
+ /*pcs_int_en_reg.s.dup = 1; // This happens during normal operation */
+ pcs_int_en_reg.s.sync_bad_en = 1;
+ pcs_int_en_reg.s.an_bad_en = 1;
+ pcs_int_en_reg.s.rxlock_en = 1;
+ pcs_int_en_reg.s.rxbad_en = 1;
+ /*pcs_int_en_reg.s.rxerr_en = 1; // This happens during normal operation */
+ pcs_int_en_reg.s.txbad_en = 1;
+ pcs_int_en_reg.s.txfifo_en = 1;
+ pcs_int_en_reg.s.txfifu_en = 1;
+ pcs_int_en_reg.s.an_err_en = 1;
+ /*pcs_int_en_reg.s.xmit_en = 1; // This happens during normal operation */
+ /*pcs_int_en_reg.s.lnkspd_en = 1; // This happens during normal operation */
+ }
+ if (OCTEON_IS_MODEL(OCTEON_CN52XX)) {
+ /* Skipping pcs_int_en_reg.s.reserved_12_63 */
+ /*pcs_int_en_reg.s.dup = 1; // This happens during normal operation */
+ pcs_int_en_reg.s.sync_bad_en = 1;
+ pcs_int_en_reg.s.an_bad_en = 1;
+ pcs_int_en_reg.s.rxlock_en = 1;
+ pcs_int_en_reg.s.rxbad_en = 1;
+ /*pcs_int_en_reg.s.rxerr_en = 1; // This happens during normal operation */
+ pcs_int_en_reg.s.txbad_en = 1;
+ pcs_int_en_reg.s.txfifo_en = 1;
+ pcs_int_en_reg.s.txfifu_en = 1;
+ pcs_int_en_reg.s.an_err_en = 1;
+ /*pcs_int_en_reg.s.xmit_en = 1; // This happens during normal operation */
+ /*pcs_int_en_reg.s.lnkspd_en = 1; // This happens during normal operation */
+ }
+ cvmx_write_csr(CVMX_PCSX_INTX_EN_REG(index, block), pcs_int_en_reg.u64);
+}
+/**
+ * __cvmx_interrupt_pcsxx_int_en_reg_enable enables all interrupt bits in cvmx_pcsxx_int_en_reg_t
+ */
+void __cvmx_interrupt_pcsxx_int_en_reg_enable(int index)
+{
+ union cvmx_pcsxx_int_en_reg pcsx_int_en_reg;
+ cvmx_write_csr(CVMX_PCSXX_INT_REG(index),
+ cvmx_read_csr(CVMX_PCSXX_INT_REG(index)));
+ pcsx_int_en_reg.u64 = 0;
+ if (OCTEON_IS_MODEL(OCTEON_CN56XX)) {
+ /* Skipping pcsx_int_en_reg.s.reserved_6_63 */
+ pcsx_int_en_reg.s.algnlos_en = 1;
+ pcsx_int_en_reg.s.synlos_en = 1;
+ pcsx_int_en_reg.s.bitlckls_en = 1;
+ pcsx_int_en_reg.s.rxsynbad_en = 1;
+ pcsx_int_en_reg.s.rxbad_en = 1;
+ pcsx_int_en_reg.s.txflt_en = 1;
+ }
+ if (OCTEON_IS_MODEL(OCTEON_CN52XX)) {
+ /* Skipping pcsx_int_en_reg.s.reserved_6_63 */
+ pcsx_int_en_reg.s.algnlos_en = 1;
+ pcsx_int_en_reg.s.synlos_en = 1;
+ pcsx_int_en_reg.s.bitlckls_en = 0; /* Happens if XAUI module is not installed */
+ pcsx_int_en_reg.s.rxsynbad_en = 1;
+ pcsx_int_en_reg.s.rxbad_en = 1;
+ pcsx_int_en_reg.s.txflt_en = 1;
+ }
+ cvmx_write_csr(CVMX_PCSXX_INT_EN_REG(index), pcsx_int_en_reg.u64);
+}
+
+/**
+ * __cvmx_interrupt_spxx_int_msk_enable enables all interrupt bits in cvmx_spxx_int_msk_t
+ */
+void __cvmx_interrupt_spxx_int_msk_enable(int index)
+{
+ union cvmx_spxx_int_msk spx_int_msk;
+ cvmx_write_csr(CVMX_SPXX_INT_REG(index),
+ cvmx_read_csr(CVMX_SPXX_INT_REG(index)));
+ spx_int_msk.u64 = 0;
+ if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
+ /* Skipping spx_int_msk.s.reserved_12_63 */
+ spx_int_msk.s.calerr = 1;
+ spx_int_msk.s.syncerr = 1;
+ spx_int_msk.s.diperr = 1;
+ spx_int_msk.s.tpaovr = 1;
+ spx_int_msk.s.rsverr = 1;
+ spx_int_msk.s.drwnng = 1;
+ spx_int_msk.s.clserr = 1;
+ spx_int_msk.s.spiovr = 1;
+ /* Skipping spx_int_msk.s.reserved_2_3 */
+ spx_int_msk.s.abnorm = 1;
+ spx_int_msk.s.prtnxa = 1;
+ }
+ if (OCTEON_IS_MODEL(OCTEON_CN58XX)) {
+ /* Skipping spx_int_msk.s.reserved_12_63 */
+ spx_int_msk.s.calerr = 1;
+ spx_int_msk.s.syncerr = 1;
+ spx_int_msk.s.diperr = 1;
+ spx_int_msk.s.tpaovr = 1;
+ spx_int_msk.s.rsverr = 1;
+ spx_int_msk.s.drwnng = 1;
+ spx_int_msk.s.clserr = 1;
+ spx_int_msk.s.spiovr = 1;
+ /* Skipping spx_int_msk.s.reserved_2_3 */
+ spx_int_msk.s.abnorm = 1;
+ spx_int_msk.s.prtnxa = 1;
+ }
+ cvmx_write_csr(CVMX_SPXX_INT_MSK(index), spx_int_msk.u64);
+}
+/**
+ * __cvmx_interrupt_stxx_int_msk_enable enables all interrupt bits in cvmx_stxx_int_msk_t
+ */
+void __cvmx_interrupt_stxx_int_msk_enable(int index)
+{
+ union cvmx_stxx_int_msk stx_int_msk;
+ cvmx_write_csr(CVMX_STXX_INT_REG(index),
+ cvmx_read_csr(CVMX_STXX_INT_REG(index)));
+ stx_int_msk.u64 = 0;
+ if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
+ /* Skipping stx_int_msk.s.reserved_8_63 */
+ stx_int_msk.s.frmerr = 1;
+ stx_int_msk.s.unxfrm = 1;
+ stx_int_msk.s.nosync = 1;
+ stx_int_msk.s.diperr = 1;
+ stx_int_msk.s.datovr = 1;
+ stx_int_msk.s.ovrbst = 1;
+ stx_int_msk.s.calpar1 = 1;
+ stx_int_msk.s.calpar0 = 1;
+ }
+ if (OCTEON_IS_MODEL(OCTEON_CN58XX)) {
+ /* Skipping stx_int_msk.s.reserved_8_63 */
+ stx_int_msk.s.frmerr = 1;
+ stx_int_msk.s.unxfrm = 1;
+ stx_int_msk.s.nosync = 1;
+ stx_int_msk.s.diperr = 1;
+ stx_int_msk.s.datovr = 1;
+ stx_int_msk.s.ovrbst = 1;
+ stx_int_msk.s.calpar1 = 1;
+ stx_int_msk.s.calpar0 = 1;
+ }
+ cvmx_write_csr(CVMX_STXX_INT_MSK(index), stx_int_msk.u64);
+}
diff --git a/drivers/staging/octeon/cvmx-interrupt-rsl.c b/drivers/staging/octeon/cvmx-interrupt-rsl.c
new file mode 100644
index 000000000000..df50048cfbc0
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-interrupt-rsl.c
@@ -0,0 +1,140 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * Utility functions to decode Octeon's RSL_INT_BLOCKS
+ * interrupts into error messages.
+ */
+
+#include <asm/octeon/octeon.h>
+
+#include "cvmx-asxx-defs.h"
+#include "cvmx-gmxx-defs.h"
+
+#ifndef PRINT_ERROR
+#define PRINT_ERROR(format, ...)
+#endif
+
+void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block);
+
+/**
+ * Enable ASX error interrupts that exist on CN3XXX, CN50XX, and
+ * CN58XX.
+ *
+ * @block: Interface to enable 0-1
+ */
+void __cvmx_interrupt_asxx_enable(int block)
+{
+ int mask;
+ union cvmx_asxx_int_en csr;
+ /*
+ * CN38XX and CN58XX have two interfaces with 4 ports per
+ * interface. All other chips have a max of 3 ports on
+ * interface 0
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX))
+ mask = 0xf; /* Set enables for 4 ports */
+ else
+ mask = 0x7; /* Set enables for 3 ports */
+
+ /* Enable interface interrupts */
+ csr.u64 = cvmx_read_csr(CVMX_ASXX_INT_EN(block));
+ csr.s.txpsh = mask;
+ csr.s.txpop = mask;
+ csr.s.ovrflw = mask;
+ cvmx_write_csr(CVMX_ASXX_INT_EN(block), csr.u64);
+}
+/**
+ * Enable GMX error reporting for the supplied interface
+ *
+ * @interface: Interface to enable
+ */
+void __cvmx_interrupt_gmxx_enable(int interface)
+{
+ union cvmx_gmxx_inf_mode mode;
+ union cvmx_gmxx_tx_int_en gmx_tx_int_en;
+ int num_ports;
+ int index;
+
+ mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
+
+ if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN52XX)) {
+ if (mode.s.en) {
+ switch (mode.cn56xx.mode) {
+ case 1: /* XAUI */
+ num_ports = 1;
+ break;
+ case 2: /* SGMII */
+ case 3: /* PICMG */
+ num_ports = 4;
+ break;
+ default: /* Disabled */
+ num_ports = 0;
+ break;
+ }
+ } else
+ num_ports = 0;
+ } else {
+ if (mode.s.en) {
+ if (OCTEON_IS_MODEL(OCTEON_CN38XX)
+ || OCTEON_IS_MODEL(OCTEON_CN58XX)) {
+ /*
+ * SPI on CN38XX and CN58XX report all
+ * errors through port 0. RGMII needs
+ * to check all 4 ports
+ */
+ if (mode.s.type)
+ num_ports = 1;
+ else
+ num_ports = 4;
+ } else {
+ /*
+ * CN30XX, CN31XX, and CN50XX have two
+ * or three ports. GMII and MII has 2,
+ * RGMII has three
+ */
+ if (mode.s.type)
+ num_ports = 2;
+ else
+ num_ports = 3;
+ }
+ } else
+ num_ports = 0;
+ }
+
+ gmx_tx_int_en.u64 = 0;
+ if (num_ports) {
+ if (OCTEON_IS_MODEL(OCTEON_CN38XX)
+ || OCTEON_IS_MODEL(OCTEON_CN58XX))
+ gmx_tx_int_en.s.ncb_nxa = 1;
+ gmx_tx_int_en.s.pko_nxa = 1;
+ }
+ gmx_tx_int_en.s.undflw = (1 << num_ports) - 1;
+ cvmx_write_csr(CVMX_GMXX_TX_INT_EN(interface), gmx_tx_int_en.u64);
+ for (index = 0; index < num_ports; index++)
+ __cvmx_interrupt_gmxx_rxx_int_en_enable(index, interface);
+}
diff --git a/drivers/staging/octeon/cvmx-ipd.h b/drivers/staging/octeon/cvmx-ipd.h
new file mode 100644
index 000000000000..115a552c5c7f
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-ipd.h
@@ -0,0 +1,338 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/**
+ *
+ * Interface to the hardware Input Packet Data unit.
+ */
+
+#ifndef __CVMX_IPD_H__
+#define __CVMX_IPD_H__
+
+#include <asm/octeon/octeon-feature.h>
+
+#include <asm/octeon/cvmx-ipd-defs.h>
+
+enum cvmx_ipd_mode {
+ CVMX_IPD_OPC_MODE_STT = 0LL, /* All blocks DRAM, not cached in L2 */
+ CVMX_IPD_OPC_MODE_STF = 1LL, /* All bloccks into L2 */
+ CVMX_IPD_OPC_MODE_STF1_STT = 2LL, /* 1st block L2, rest DRAM */
+ CVMX_IPD_OPC_MODE_STF2_STT = 3LL /* 1st, 2nd blocks L2, rest DRAM */
+};
+
+#ifndef CVMX_ENABLE_LEN_M8_FIX
+#define CVMX_ENABLE_LEN_M8_FIX 0
+#endif
+
+/* CSR typedefs have been moved to cvmx-csr-*.h */
+typedef union cvmx_ipd_1st_mbuff_skip cvmx_ipd_mbuff_first_skip_t;
+typedef union cvmx_ipd_1st_next_ptr_back cvmx_ipd_first_next_ptr_back_t;
+
+typedef cvmx_ipd_mbuff_first_skip_t cvmx_ipd_mbuff_not_first_skip_t;
+typedef cvmx_ipd_first_next_ptr_back_t cvmx_ipd_second_next_ptr_back_t;
+
+/**
+ * Configure IPD
+ *
+ * @mbuff_size: Packets buffer size in 8 byte words
+ * @first_mbuff_skip:
+ * Number of 8 byte words to skip in the first buffer
+ * @not_first_mbuff_skip:
+ * Number of 8 byte words to skip in each following buffer
+ * @first_back: Must be same as first_mbuff_skip / 128
+ * @second_back:
+ * Must be same as not_first_mbuff_skip / 128
+ * @wqe_fpa_pool:
+ * FPA pool to get work entries from
+ * @cache_mode:
+ * @back_pres_enable_flag:
+ * Enable or disable port back pressure
+ */
+static inline void cvmx_ipd_config(uint64_t mbuff_size,
+ uint64_t first_mbuff_skip,
+ uint64_t not_first_mbuff_skip,
+ uint64_t first_back,
+ uint64_t second_back,
+ uint64_t wqe_fpa_pool,
+ enum cvmx_ipd_mode cache_mode,
+ uint64_t back_pres_enable_flag)
+{
+ cvmx_ipd_mbuff_first_skip_t first_skip;
+ cvmx_ipd_mbuff_not_first_skip_t not_first_skip;
+ union cvmx_ipd_packet_mbuff_size size;
+ cvmx_ipd_first_next_ptr_back_t first_back_struct;
+ cvmx_ipd_second_next_ptr_back_t second_back_struct;
+ union cvmx_ipd_wqe_fpa_queue wqe_pool;
+ union cvmx_ipd_ctl_status ipd_ctl_reg;
+
+ first_skip.u64 = 0;
+ first_skip.s.skip_sz = first_mbuff_skip;
+ cvmx_write_csr(CVMX_IPD_1ST_MBUFF_SKIP, first_skip.u64);
+
+ not_first_skip.u64 = 0;
+ not_first_skip.s.skip_sz = not_first_mbuff_skip;
+ cvmx_write_csr(CVMX_IPD_NOT_1ST_MBUFF_SKIP, not_first_skip.u64);
+
+ size.u64 = 0;
+ size.s.mb_size = mbuff_size;
+ cvmx_write_csr(CVMX_IPD_PACKET_MBUFF_SIZE, size.u64);
+
+ first_back_struct.u64 = 0;
+ first_back_struct.s.back = first_back;
+ cvmx_write_csr(CVMX_IPD_1st_NEXT_PTR_BACK, first_back_struct.u64);
+
+ second_back_struct.u64 = 0;
+ second_back_struct.s.back = second_back;
+ cvmx_write_csr(CVMX_IPD_2nd_NEXT_PTR_BACK, second_back_struct.u64);
+
+ wqe_pool.u64 = 0;
+ wqe_pool.s.wqe_pool = wqe_fpa_pool;
+ cvmx_write_csr(CVMX_IPD_WQE_FPA_QUEUE, wqe_pool.u64);
+
+ ipd_ctl_reg.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
+ ipd_ctl_reg.s.opc_mode = cache_mode;
+ ipd_ctl_reg.s.pbp_en = back_pres_enable_flag;
+ cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_ctl_reg.u64);
+
+ /* Note: the example RED code that used to be here has been moved to
+ cvmx_helper_setup_red */
+}
+
+/**
+ * Enable IPD
+ */
+static inline void cvmx_ipd_enable(void)
+{
+ union cvmx_ipd_ctl_status ipd_reg;
+ ipd_reg.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
+ if (ipd_reg.s.ipd_en) {
+ cvmx_dprintf
+ ("Warning: Enabling IPD when IPD already enabled.\n");
+ }
+ ipd_reg.s.ipd_en = 1;
+#if CVMX_ENABLE_LEN_M8_FIX
+ if (!OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2))
+ ipd_reg.s.len_m8 = TRUE;
+#endif
+ cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_reg.u64);
+}
+
+/**
+ * Disable IPD
+ */
+static inline void cvmx_ipd_disable(void)
+{
+ union cvmx_ipd_ctl_status ipd_reg;
+ ipd_reg.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
+ ipd_reg.s.ipd_en = 0;
+ cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_reg.u64);
+}
+
+/**
+ * Supportive function for cvmx_fpa_shutdown_pool.
+ */
+static inline void cvmx_ipd_free_ptr(void)
+{
+ /* Only CN38XXp{1,2} cannot read pointer out of the IPD */
+ if (!OCTEON_IS_MODEL(OCTEON_CN38XX_PASS1)
+ && !OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2)) {
+ int no_wptr = 0;
+ union cvmx_ipd_ptr_count ipd_ptr_count;
+ ipd_ptr_count.u64 = cvmx_read_csr(CVMX_IPD_PTR_COUNT);
+
+ /* Handle Work Queue Entry in cn56xx and cn52xx */
+ if (octeon_has_feature(OCTEON_FEATURE_NO_WPTR)) {
+ union cvmx_ipd_ctl_status ipd_ctl_status;
+ ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
+ if (ipd_ctl_status.s.no_wptr)
+ no_wptr = 1;
+ }
+
+ /* Free the prefetched WQE */
+ if (ipd_ptr_count.s.wqev_cnt) {
+ union cvmx_ipd_wqe_ptr_valid ipd_wqe_ptr_valid;
+ ipd_wqe_ptr_valid.u64 =
+ cvmx_read_csr(CVMX_IPD_WQE_PTR_VALID);
+ if (no_wptr)
+ cvmx_fpa_free(cvmx_phys_to_ptr
+ ((uint64_t) ipd_wqe_ptr_valid.s.
+ ptr << 7), CVMX_FPA_PACKET_POOL,
+ 0);
+ else
+ cvmx_fpa_free(cvmx_phys_to_ptr
+ ((uint64_t) ipd_wqe_ptr_valid.s.
+ ptr << 7), CVMX_FPA_WQE_POOL, 0);
+ }
+
+ /* Free all WQE in the fifo */
+ if (ipd_ptr_count.s.wqe_pcnt) {
+ int i;
+ union cvmx_ipd_pwp_ptr_fifo_ctl ipd_pwp_ptr_fifo_ctl;
+ ipd_pwp_ptr_fifo_ctl.u64 =
+ cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL);
+ for (i = 0; i < ipd_ptr_count.s.wqe_pcnt; i++) {
+ ipd_pwp_ptr_fifo_ctl.s.cena = 0;
+ ipd_pwp_ptr_fifo_ctl.s.raddr =
+ ipd_pwp_ptr_fifo_ctl.s.max_cnts +
+ (ipd_pwp_ptr_fifo_ctl.s.wraddr +
+ i) % ipd_pwp_ptr_fifo_ctl.s.max_cnts;
+ cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL,
+ ipd_pwp_ptr_fifo_ctl.u64);
+ ipd_pwp_ptr_fifo_ctl.u64 =
+ cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL);
+ if (no_wptr)
+ cvmx_fpa_free(cvmx_phys_to_ptr
+ ((uint64_t)
+ ipd_pwp_ptr_fifo_ctl.s.
+ ptr << 7),
+ CVMX_FPA_PACKET_POOL, 0);
+ else
+ cvmx_fpa_free(cvmx_phys_to_ptr
+ ((uint64_t)
+ ipd_pwp_ptr_fifo_ctl.s.
+ ptr << 7),
+ CVMX_FPA_WQE_POOL, 0);
+ }
+ ipd_pwp_ptr_fifo_ctl.s.cena = 1;
+ cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL,
+ ipd_pwp_ptr_fifo_ctl.u64);
+ }
+
+ /* Free the prefetched packet */
+ if (ipd_ptr_count.s.pktv_cnt) {
+ union cvmx_ipd_pkt_ptr_valid ipd_pkt_ptr_valid;
+ ipd_pkt_ptr_valid.u64 =
+ cvmx_read_csr(CVMX_IPD_PKT_PTR_VALID);
+ cvmx_fpa_free(cvmx_phys_to_ptr
+ (ipd_pkt_ptr_valid.s.ptr << 7),
+ CVMX_FPA_PACKET_POOL, 0);
+ }
+
+ /* Free the per port prefetched packets */
+ if (1) {
+ int i;
+ union cvmx_ipd_prc_port_ptr_fifo_ctl
+ ipd_prc_port_ptr_fifo_ctl;
+ ipd_prc_port_ptr_fifo_ctl.u64 =
+ cvmx_read_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL);
+
+ for (i = 0; i < ipd_prc_port_ptr_fifo_ctl.s.max_pkt;
+ i++) {
+ ipd_prc_port_ptr_fifo_ctl.s.cena = 0;
+ ipd_prc_port_ptr_fifo_ctl.s.raddr =
+ i % ipd_prc_port_ptr_fifo_ctl.s.max_pkt;
+ cvmx_write_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL,
+ ipd_prc_port_ptr_fifo_ctl.u64);
+ ipd_prc_port_ptr_fifo_ctl.u64 =
+ cvmx_read_csr
+ (CVMX_IPD_PRC_PORT_PTR_FIFO_CTL);
+ cvmx_fpa_free(cvmx_phys_to_ptr
+ ((uint64_t)
+ ipd_prc_port_ptr_fifo_ctl.s.
+ ptr << 7), CVMX_FPA_PACKET_POOL,
+ 0);
+ }
+ ipd_prc_port_ptr_fifo_ctl.s.cena = 1;
+ cvmx_write_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL,
+ ipd_prc_port_ptr_fifo_ctl.u64);
+ }
+
+ /* Free all packets in the holding fifo */
+ if (ipd_ptr_count.s.pfif_cnt) {
+ int i;
+ union cvmx_ipd_prc_hold_ptr_fifo_ctl
+ ipd_prc_hold_ptr_fifo_ctl;
+
+ ipd_prc_hold_ptr_fifo_ctl.u64 =
+ cvmx_read_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL);
+
+ for (i = 0; i < ipd_ptr_count.s.pfif_cnt; i++) {
+ ipd_prc_hold_ptr_fifo_ctl.s.cena = 0;
+ ipd_prc_hold_ptr_fifo_ctl.s.raddr =
+ (ipd_prc_hold_ptr_fifo_ctl.s.praddr +
+ i) % ipd_prc_hold_ptr_fifo_ctl.s.max_pkt;
+ cvmx_write_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL,
+ ipd_prc_hold_ptr_fifo_ctl.u64);
+ ipd_prc_hold_ptr_fifo_ctl.u64 =
+ cvmx_read_csr
+ (CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL);
+ cvmx_fpa_free(cvmx_phys_to_ptr
+ ((uint64_t)
+ ipd_prc_hold_ptr_fifo_ctl.s.
+ ptr << 7), CVMX_FPA_PACKET_POOL,
+ 0);
+ }
+ ipd_prc_hold_ptr_fifo_ctl.s.cena = 1;
+ cvmx_write_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL,
+ ipd_prc_hold_ptr_fifo_ctl.u64);
+ }
+
+ /* Free all packets in the fifo */
+ if (ipd_ptr_count.s.pkt_pcnt) {
+ int i;
+ union cvmx_ipd_pwp_ptr_fifo_ctl ipd_pwp_ptr_fifo_ctl;
+ ipd_pwp_ptr_fifo_ctl.u64 =
+ cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL);
+
+ for (i = 0; i < ipd_ptr_count.s.pkt_pcnt; i++) {
+ ipd_pwp_ptr_fifo_ctl.s.cena = 0;
+ ipd_pwp_ptr_fifo_ctl.s.raddr =
+ (ipd_pwp_ptr_fifo_ctl.s.praddr +
+ i) % ipd_pwp_ptr_fifo_ctl.s.max_cnts;
+ cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL,
+ ipd_pwp_ptr_fifo_ctl.u64);
+ ipd_pwp_ptr_fifo_ctl.u64 =
+ cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL);
+ cvmx_fpa_free(cvmx_phys_to_ptr
+ ((uint64_t) ipd_pwp_ptr_fifo_ctl.
+ s.ptr << 7),
+ CVMX_FPA_PACKET_POOL, 0);
+ }
+ ipd_pwp_ptr_fifo_ctl.s.cena = 1;
+ cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL,
+ ipd_pwp_ptr_fifo_ctl.u64);
+ }
+
+ /* Reset the IPD to get all buffers out of it */
+ {
+ union cvmx_ipd_ctl_status ipd_ctl_status;
+ ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
+ ipd_ctl_status.s.reset = 1;
+ cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_ctl_status.u64);
+ }
+
+ /* Reset the PIP */
+ {
+ union cvmx_pip_sft_rst pip_sft_rst;
+ pip_sft_rst.u64 = cvmx_read_csr(CVMX_PIP_SFT_RST);
+ pip_sft_rst.s.rst = 1;
+ cvmx_write_csr(CVMX_PIP_SFT_RST, pip_sft_rst.u64);
+ }
+ }
+}
+
+#endif /* __CVMX_IPD_H__ */
diff --git a/drivers/staging/octeon/cvmx-mdio.h b/drivers/staging/octeon/cvmx-mdio.h
new file mode 100644
index 000000000000..c987a75a20cf
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-mdio.h
@@ -0,0 +1,506 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ *
+ * Interface to the SMI/MDIO hardware, including support for both IEEE 802.3
+ * clause 22 and clause 45 operations.
+ *
+ */
+
+#ifndef __CVMX_MIO_H__
+#define __CVMX_MIO_H__
+
+#include "cvmx-smix-defs.h"
+
+/**
+ * PHY register 0 from the 802.3 spec
+ */
+#define CVMX_MDIO_PHY_REG_CONTROL 0
+typedef union {
+ uint16_t u16;
+ struct {
+ uint16_t reset:1;
+ uint16_t loopback:1;
+ uint16_t speed_lsb:1;
+ uint16_t autoneg_enable:1;
+ uint16_t power_down:1;
+ uint16_t isolate:1;
+ uint16_t restart_autoneg:1;
+ uint16_t duplex:1;
+ uint16_t collision_test:1;
+ uint16_t speed_msb:1;
+ uint16_t unidirectional_enable:1;
+ uint16_t reserved_0_4:5;
+ } s;
+} cvmx_mdio_phy_reg_control_t;
+
+/**
+ * PHY register 1 from the 802.3 spec
+ */
+#define CVMX_MDIO_PHY_REG_STATUS 1
+typedef union {
+ uint16_t u16;
+ struct {
+ uint16_t capable_100base_t4:1;
+ uint16_t capable_100base_x_full:1;
+ uint16_t capable_100base_x_half:1;
+ uint16_t capable_10_full:1;
+ uint16_t capable_10_half:1;
+ uint16_t capable_100base_t2_full:1;
+ uint16_t capable_100base_t2_half:1;
+ uint16_t capable_extended_status:1;
+ uint16_t capable_unidirectional:1;
+ uint16_t capable_mf_preamble_suppression:1;
+ uint16_t autoneg_complete:1;
+ uint16_t remote_fault:1;
+ uint16_t capable_autoneg:1;
+ uint16_t link_status:1;
+ uint16_t jabber_detect:1;
+ uint16_t capable_extended_registers:1;
+
+ } s;
+} cvmx_mdio_phy_reg_status_t;
+
+/**
+ * PHY register 2 from the 802.3 spec
+ */
+#define CVMX_MDIO_PHY_REG_ID1 2
+typedef union {
+ uint16_t u16;
+ struct {
+ uint16_t oui_bits_3_18;
+ } s;
+} cvmx_mdio_phy_reg_id1_t;
+
+/**
+ * PHY register 3 from the 802.3 spec
+ */
+#define CVMX_MDIO_PHY_REG_ID2 3
+typedef union {
+ uint16_t u16;
+ struct {
+ uint16_t oui_bits_19_24:6;
+ uint16_t model:6;
+ uint16_t revision:4;
+ } s;
+} cvmx_mdio_phy_reg_id2_t;
+
+/**
+ * PHY register 4 from the 802.3 spec
+ */
+#define CVMX_MDIO_PHY_REG_AUTONEG_ADVER 4
+typedef union {
+ uint16_t u16;
+ struct {
+ uint16_t next_page:1;
+ uint16_t reserved_14:1;
+ uint16_t remote_fault:1;
+ uint16_t reserved_12:1;
+ uint16_t asymmetric_pause:1;
+ uint16_t pause:1;
+ uint16_t advert_100base_t4:1;
+ uint16_t advert_100base_tx_full:1;
+ uint16_t advert_100base_tx_half:1;
+ uint16_t advert_10base_tx_full:1;
+ uint16_t advert_10base_tx_half:1;
+ uint16_t selector:5;
+ } s;
+} cvmx_mdio_phy_reg_autoneg_adver_t;
+
+/**
+ * PHY register 5 from the 802.3 spec
+ */
+#define CVMX_MDIO_PHY_REG_LINK_PARTNER_ABILITY 5
+typedef union {
+ uint16_t u16;
+ struct {
+ uint16_t next_page:1;
+ uint16_t ack:1;
+ uint16_t remote_fault:1;
+ uint16_t reserved_12:1;
+ uint16_t asymmetric_pause:1;
+ uint16_t pause:1;
+ uint16_t advert_100base_t4:1;
+ uint16_t advert_100base_tx_full:1;
+ uint16_t advert_100base_tx_half:1;
+ uint16_t advert_10base_tx_full:1;
+ uint16_t advert_10base_tx_half:1;
+ uint16_t selector:5;
+ } s;
+} cvmx_mdio_phy_reg_link_partner_ability_t;
+
+/**
+ * PHY register 6 from the 802.3 spec
+ */
+#define CVMX_MDIO_PHY_REG_AUTONEG_EXPANSION 6
+typedef union {
+ uint16_t u16;
+ struct {
+ uint16_t reserved_5_15:11;
+ uint16_t parallel_detection_fault:1;
+ uint16_t link_partner_next_page_capable:1;
+ uint16_t local_next_page_capable:1;
+ uint16_t page_received:1;
+ uint16_t link_partner_autoneg_capable:1;
+
+ } s;
+} cvmx_mdio_phy_reg_autoneg_expansion_t;
+
+/**
+ * PHY register 9 from the 802.3 spec
+ */
+#define CVMX_MDIO_PHY_REG_CONTROL_1000 9
+typedef union {
+ uint16_t u16;
+ struct {
+ uint16_t test_mode:3;
+ uint16_t manual_master_slave:1;
+ uint16_t master:1;
+ uint16_t port_type:1;
+ uint16_t advert_1000base_t_full:1;
+ uint16_t advert_1000base_t_half:1;
+ uint16_t reserved_0_7:8;
+ } s;
+} cvmx_mdio_phy_reg_control_1000_t;
+
+/**
+ * PHY register 10 from the 802.3 spec
+ */
+#define CVMX_MDIO_PHY_REG_STATUS_1000 10
+typedef union {
+ uint16_t u16;
+ struct {
+ uint16_t master_slave_fault:1;
+ uint16_t is_master:1;
+ uint16_t local_receiver_ok:1;
+ uint16_t remote_receiver_ok:1;
+ uint16_t remote_capable_1000base_t_full:1;
+ uint16_t remote_capable_1000base_t_half:1;
+ uint16_t reserved_8_9:2;
+ uint16_t idle_error_count:8;
+ } s;
+} cvmx_mdio_phy_reg_status_1000_t;
+
+/**
+ * PHY register 15 from the 802.3 spec
+ */
+#define CVMX_MDIO_PHY_REG_EXTENDED_STATUS 15
+typedef union {
+ uint16_t u16;
+ struct {
+ uint16_t capable_1000base_x_full:1;
+ uint16_t capable_1000base_x_half:1;
+ uint16_t capable_1000base_t_full:1;
+ uint16_t capable_1000base_t_half:1;
+ uint16_t reserved_0_11:12;
+ } s;
+} cvmx_mdio_phy_reg_extended_status_t;
+
+/**
+ * PHY register 13 from the 802.3 spec
+ */
+#define CVMX_MDIO_PHY_REG_MMD_CONTROL 13
+typedef union {
+ uint16_t u16;
+ struct {
+ uint16_t function:2;
+ uint16_t reserved_5_13:9;
+ uint16_t devad:5;
+ } s;
+} cvmx_mdio_phy_reg_mmd_control_t;
+
+/**
+ * PHY register 14 from the 802.3 spec
+ */
+#define CVMX_MDIO_PHY_REG_MMD_ADDRESS_DATA 14
+typedef union {
+ uint16_t u16;
+ struct {
+ uint16_t address_data:16;
+ } s;
+} cvmx_mdio_phy_reg_mmd_address_data_t;
+
+/* Operating request encodings. */
+#define MDIO_CLAUSE_22_WRITE 0
+#define MDIO_CLAUSE_22_READ 1
+
+#define MDIO_CLAUSE_45_ADDRESS 0
+#define MDIO_CLAUSE_45_WRITE 1
+#define MDIO_CLAUSE_45_READ_INC 2
+#define MDIO_CLAUSE_45_READ 3
+
+/* MMD identifiers, mostly for accessing devices withing XENPAK modules. */
+#define CVMX_MMD_DEVICE_PMA_PMD 1
+#define CVMX_MMD_DEVICE_WIS 2
+#define CVMX_MMD_DEVICE_PCS 3
+#define CVMX_MMD_DEVICE_PHY_XS 4
+#define CVMX_MMD_DEVICE_DTS_XS 5
+#define CVMX_MMD_DEVICE_TC 6
+#define CVMX_MMD_DEVICE_CL22_EXT 29
+#define CVMX_MMD_DEVICE_VENDOR_1 30
+#define CVMX_MMD_DEVICE_VENDOR_2 31
+
+/* Helper function to put MDIO interface into clause 45 mode */
+static inline void __cvmx_mdio_set_clause45_mode(int bus_id)
+{
+ union cvmx_smix_clk smi_clk;
+ /* Put bus into clause 45 mode */
+ smi_clk.u64 = cvmx_read_csr(CVMX_SMIX_CLK(bus_id));
+ smi_clk.s.mode = 1;
+ smi_clk.s.preamble = 1;
+ cvmx_write_csr(CVMX_SMIX_CLK(bus_id), smi_clk.u64);
+}
+
+/* Helper function to put MDIO interface into clause 22 mode */
+static inline void __cvmx_mdio_set_clause22_mode(int bus_id)
+{
+ union cvmx_smix_clk smi_clk;
+ /* Put bus into clause 22 mode */
+ smi_clk.u64 = cvmx_read_csr(CVMX_SMIX_CLK(bus_id));
+ smi_clk.s.mode = 0;
+ cvmx_write_csr(CVMX_SMIX_CLK(bus_id), smi_clk.u64);
+}
+
+/**
+ * Perform an MII read. This function is used to read PHY
+ * registers controlling auto negotiation.
+ *
+ * @bus_id: MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
+ * support multiple busses.
+ * @phy_id: The MII phy id
+ * @location: Register location to read
+ *
+ * Returns Result from the read or -1 on failure
+ */
+static inline int cvmx_mdio_read(int bus_id, int phy_id, int location)
+{
+ union cvmx_smix_cmd smi_cmd;
+ union cvmx_smix_rd_dat smi_rd;
+ int timeout = 1000;
+
+ if (octeon_has_feature(OCTEON_FEATURE_MDIO_CLAUSE_45))
+ __cvmx_mdio_set_clause22_mode(bus_id);
+
+ smi_cmd.u64 = 0;
+ smi_cmd.s.phy_op = MDIO_CLAUSE_22_READ;
+ smi_cmd.s.phy_adr = phy_id;
+ smi_cmd.s.reg_adr = location;
+ cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64);
+
+ do {
+ cvmx_wait(1000);
+ smi_rd.u64 = cvmx_read_csr(CVMX_SMIX_RD_DAT(bus_id));
+ } while (smi_rd.s.pending && timeout--);
+
+ if (smi_rd.s.val)
+ return smi_rd.s.dat;
+ else
+ return -1;
+}
+
+/**
+ * Perform an MII write. This function is used to write PHY
+ * registers controlling auto negotiation.
+ *
+ * @bus_id: MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
+ * support multiple busses.
+ * @phy_id: The MII phy id
+ * @location: Register location to write
+ * @val: Value to write
+ *
+ * Returns -1 on error
+ * 0 on success
+ */
+static inline int cvmx_mdio_write(int bus_id, int phy_id, int location, int val)
+{
+ union cvmx_smix_cmd smi_cmd;
+ union cvmx_smix_wr_dat smi_wr;
+ int timeout = 1000;
+
+ if (octeon_has_feature(OCTEON_FEATURE_MDIO_CLAUSE_45))
+ __cvmx_mdio_set_clause22_mode(bus_id);
+
+ smi_wr.u64 = 0;
+ smi_wr.s.dat = val;
+ cvmx_write_csr(CVMX_SMIX_WR_DAT(bus_id), smi_wr.u64);
+
+ smi_cmd.u64 = 0;
+ smi_cmd.s.phy_op = MDIO_CLAUSE_22_WRITE;
+ smi_cmd.s.phy_adr = phy_id;
+ smi_cmd.s.reg_adr = location;
+ cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64);
+
+ do {
+ cvmx_wait(1000);
+ smi_wr.u64 = cvmx_read_csr(CVMX_SMIX_WR_DAT(bus_id));
+ } while (smi_wr.s.pending && --timeout);
+ if (timeout <= 0)
+ return -1;
+
+ return 0;
+}
+
+/**
+ * Perform an IEEE 802.3 clause 45 MII read. This function is used to
+ * read PHY registers controlling auto negotiation.
+ *
+ * @bus_id: MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
+ * support multiple busses.
+ * @phy_id: The MII phy id
+ * @device: MDIO Managable Device (MMD) id
+ * @location: Register location to read
+ *
+ * Returns Result from the read or -1 on failure
+ */
+
+static inline int cvmx_mdio_45_read(int bus_id, int phy_id, int device,
+ int location)
+{
+ union cvmx_smix_cmd smi_cmd;
+ union cvmx_smix_rd_dat smi_rd;
+ union cvmx_smix_wr_dat smi_wr;
+ int timeout = 1000;
+
+ if (!octeon_has_feature(OCTEON_FEATURE_MDIO_CLAUSE_45))
+ return -1;
+
+ __cvmx_mdio_set_clause45_mode(bus_id);
+
+ smi_wr.u64 = 0;
+ smi_wr.s.dat = location;
+ cvmx_write_csr(CVMX_SMIX_WR_DAT(bus_id), smi_wr.u64);
+
+ smi_cmd.u64 = 0;
+ smi_cmd.s.phy_op = MDIO_CLAUSE_45_ADDRESS;
+ smi_cmd.s.phy_adr = phy_id;
+ smi_cmd.s.reg_adr = device;
+ cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64);
+
+ do {
+ cvmx_wait(1000);
+ smi_wr.u64 = cvmx_read_csr(CVMX_SMIX_WR_DAT(bus_id));
+ } while (smi_wr.s.pending && --timeout);
+ if (timeout <= 0) {
+ cvmx_dprintf("cvmx_mdio_45_read: bus_id %d phy_id %2d "
+ "device %2d register %2d TIME OUT(address)\n",
+ bus_id, phy_id, device, location);
+ return -1;
+ }
+
+ smi_cmd.u64 = 0;
+ smi_cmd.s.phy_op = MDIO_CLAUSE_45_READ;
+ smi_cmd.s.phy_adr = phy_id;
+ smi_cmd.s.reg_adr = device;
+ cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64);
+
+ do {
+ cvmx_wait(1000);
+ smi_rd.u64 = cvmx_read_csr(CVMX_SMIX_RD_DAT(bus_id));
+ } while (smi_rd.s.pending && timeout--);
+
+ if (timeout <= 0) {
+ cvmx_dprintf("cvmx_mdio_45_read: bus_id %d phy_id %2d "
+ "device %2d register %2d TIME OUT(data)\n",
+ bus_id, phy_id, device, location);
+ return -1;
+ }
+
+ if (smi_rd.s.val)
+ return smi_rd.s.dat;
+ else {
+ cvmx_dprintf("cvmx_mdio_45_read: bus_id %d phy_id %2d "
+ "device %2d register %2d INVALID READ\n",
+ bus_id, phy_id, device, location);
+ return -1;
+ }
+}
+
+/**
+ * Perform an IEEE 802.3 clause 45 MII write. This function is used to
+ * write PHY registers controlling auto negotiation.
+ *
+ * @bus_id: MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
+ * support multiple busses.
+ * @phy_id: The MII phy id
+ * @device: MDIO Managable Device (MMD) id
+ * @location: Register location to write
+ * @val: Value to write
+ *
+ * Returns -1 on error
+ * 0 on success
+ */
+static inline int cvmx_mdio_45_write(int bus_id, int phy_id, int device,
+ int location, int val)
+{
+ union cvmx_smix_cmd smi_cmd;
+ union cvmx_smix_wr_dat smi_wr;
+ int timeout = 1000;
+
+ if (!octeon_has_feature(OCTEON_FEATURE_MDIO_CLAUSE_45))
+ return -1;
+
+ __cvmx_mdio_set_clause45_mode(bus_id);
+
+ smi_wr.u64 = 0;
+ smi_wr.s.dat = location;
+ cvmx_write_csr(CVMX_SMIX_WR_DAT(bus_id), smi_wr.u64);
+
+ smi_cmd.u64 = 0;
+ smi_cmd.s.phy_op = MDIO_CLAUSE_45_ADDRESS;
+ smi_cmd.s.phy_adr = phy_id;
+ smi_cmd.s.reg_adr = device;
+ cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64);
+
+ do {
+ cvmx_wait(1000);
+ smi_wr.u64 = cvmx_read_csr(CVMX_SMIX_WR_DAT(bus_id));
+ } while (smi_wr.s.pending && --timeout);
+ if (timeout <= 0)
+ return -1;
+
+ smi_wr.u64 = 0;
+ smi_wr.s.dat = val;
+ cvmx_write_csr(CVMX_SMIX_WR_DAT(bus_id), smi_wr.u64);
+
+ smi_cmd.u64 = 0;
+ smi_cmd.s.phy_op = MDIO_CLAUSE_45_WRITE;
+ smi_cmd.s.phy_adr = phy_id;
+ smi_cmd.s.reg_adr = device;
+ cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64);
+
+ do {
+ cvmx_wait(1000);
+ smi_wr.u64 = cvmx_read_csr(CVMX_SMIX_WR_DAT(bus_id));
+ } while (smi_wr.s.pending && --timeout);
+ if (timeout <= 0)
+ return -1;
+
+ return 0;
+}
+
+#endif
diff --git a/drivers/staging/octeon/cvmx-packet.h b/drivers/staging/octeon/cvmx-packet.h
new file mode 100644
index 000000000000..62ffe78a8c81
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-packet.h
@@ -0,0 +1,65 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/**
+ *
+ * Packet buffer defines.
+ */
+
+#ifndef __CVMX_PACKET_H__
+#define __CVMX_PACKET_H__
+
+/**
+ * This structure defines a buffer pointer on Octeon
+ */
+union cvmx_buf_ptr {
+ void *ptr;
+ uint64_t u64;
+ struct {
+ /*
+ * if set, invert the "free" pick of the overall
+ * packet. HW always sets this bit to 0 on inbound
+ * packet
+ */
+ uint64_t i:1;
+ /*
+ * Indicates the amount to back up to get to the
+ * buffer start in cache lines. In most cases this is
+ * less than one complete cache line, so the value is
+ * zero.
+ */
+ uint64_t back:4;
+ /* The pool that the buffer came from / goes to */
+ uint64_t pool:3;
+ /* The size of the segment pointed to by addr (in bytes) */
+ uint64_t size:16;
+ /* Pointer to the first byte of the data, NOT buffer */
+ uint64_t addr:40;
+ } s;
+};
+
+#endif /* __CVMX_PACKET_H__ */
diff --git a/drivers/staging/octeon/cvmx-pcsx-defs.h b/drivers/staging/octeon/cvmx-pcsx-defs.h
new file mode 100644
index 000000000000..d45952df5f5b
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-pcsx-defs.h
@@ -0,0 +1,370 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_PCSX_DEFS_H__
+#define __CVMX_PCSX_DEFS_H__
+
+#define CVMX_PCSX_ANX_ADV_REG(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0001010ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_PCSX_ANX_EXT_ST_REG(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0001028ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_PCSX_ANX_LP_ABIL_REG(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0001018ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_PCSX_ANX_RESULTS_REG(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0001020ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_PCSX_INTX_EN_REG(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0001088ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_PCSX_INTX_REG(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0001080ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_PCSX_LINKX_TIMER_COUNT_REG(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0001040ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_PCSX_LOG_ANLX_REG(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0001090ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_PCSX_MISCX_CTL_REG(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0001078ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_PCSX_MRX_CONTROL_REG(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0001000ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_PCSX_MRX_STATUS_REG(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0001008ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_PCSX_RXX_STATES_REG(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0001058ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_PCSX_RXX_SYNC_REG(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0001050ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_PCSX_SGMX_AN_ADV_REG(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0001068ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_PCSX_SGMX_LP_ADV_REG(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0001070ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_PCSX_TXX_STATES_REG(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0001060ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_PCSX_TX_RXX_POLARITY_REG(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0001048ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
+
+union cvmx_pcsx_anx_adv_reg {
+ uint64_t u64;
+ struct cvmx_pcsx_anx_adv_reg_s {
+ uint64_t reserved_16_63:48;
+ uint64_t np:1;
+ uint64_t reserved_14_14:1;
+ uint64_t rem_flt:2;
+ uint64_t reserved_9_11:3;
+ uint64_t pause:2;
+ uint64_t hfd:1;
+ uint64_t fd:1;
+ uint64_t reserved_0_4:5;
+ } s;
+ struct cvmx_pcsx_anx_adv_reg_s cn52xx;
+ struct cvmx_pcsx_anx_adv_reg_s cn52xxp1;
+ struct cvmx_pcsx_anx_adv_reg_s cn56xx;
+ struct cvmx_pcsx_anx_adv_reg_s cn56xxp1;
+};
+
+union cvmx_pcsx_anx_ext_st_reg {
+ uint64_t u64;
+ struct cvmx_pcsx_anx_ext_st_reg_s {
+ uint64_t reserved_16_63:48;
+ uint64_t thou_xfd:1;
+ uint64_t thou_xhd:1;
+ uint64_t thou_tfd:1;
+ uint64_t thou_thd:1;
+ uint64_t reserved_0_11:12;
+ } s;
+ struct cvmx_pcsx_anx_ext_st_reg_s cn52xx;
+ struct cvmx_pcsx_anx_ext_st_reg_s cn52xxp1;
+ struct cvmx_pcsx_anx_ext_st_reg_s cn56xx;
+ struct cvmx_pcsx_anx_ext_st_reg_s cn56xxp1;
+};
+
+union cvmx_pcsx_anx_lp_abil_reg {
+ uint64_t u64;
+ struct cvmx_pcsx_anx_lp_abil_reg_s {
+ uint64_t reserved_16_63:48;
+ uint64_t np:1;
+ uint64_t ack:1;
+ uint64_t rem_flt:2;
+ uint64_t reserved_9_11:3;
+ uint64_t pause:2;
+ uint64_t hfd:1;
+ uint64_t fd:1;
+ uint64_t reserved_0_4:5;
+ } s;
+ struct cvmx_pcsx_anx_lp_abil_reg_s cn52xx;
+ struct cvmx_pcsx_anx_lp_abil_reg_s cn52xxp1;
+ struct cvmx_pcsx_anx_lp_abil_reg_s cn56xx;
+ struct cvmx_pcsx_anx_lp_abil_reg_s cn56xxp1;
+};
+
+union cvmx_pcsx_anx_results_reg {
+ uint64_t u64;
+ struct cvmx_pcsx_anx_results_reg_s {
+ uint64_t reserved_7_63:57;
+ uint64_t pause:2;
+ uint64_t spd:2;
+ uint64_t an_cpt:1;
+ uint64_t dup:1;
+ uint64_t link_ok:1;
+ } s;
+ struct cvmx_pcsx_anx_results_reg_s cn52xx;
+ struct cvmx_pcsx_anx_results_reg_s cn52xxp1;
+ struct cvmx_pcsx_anx_results_reg_s cn56xx;
+ struct cvmx_pcsx_anx_results_reg_s cn56xxp1;
+};
+
+union cvmx_pcsx_intx_en_reg {
+ uint64_t u64;
+ struct cvmx_pcsx_intx_en_reg_s {
+ uint64_t reserved_12_63:52;
+ uint64_t dup:1;
+ uint64_t sync_bad_en:1;
+ uint64_t an_bad_en:1;
+ uint64_t rxlock_en:1;
+ uint64_t rxbad_en:1;
+ uint64_t rxerr_en:1;
+ uint64_t txbad_en:1;
+ uint64_t txfifo_en:1;
+ uint64_t txfifu_en:1;
+ uint64_t an_err_en:1;
+ uint64_t xmit_en:1;
+ uint64_t lnkspd_en:1;
+ } s;
+ struct cvmx_pcsx_intx_en_reg_s cn52xx;
+ struct cvmx_pcsx_intx_en_reg_s cn52xxp1;
+ struct cvmx_pcsx_intx_en_reg_s cn56xx;
+ struct cvmx_pcsx_intx_en_reg_s cn56xxp1;
+};
+
+union cvmx_pcsx_intx_reg {
+ uint64_t u64;
+ struct cvmx_pcsx_intx_reg_s {
+ uint64_t reserved_12_63:52;
+ uint64_t dup:1;
+ uint64_t sync_bad:1;
+ uint64_t an_bad:1;
+ uint64_t rxlock:1;
+ uint64_t rxbad:1;
+ uint64_t rxerr:1;
+ uint64_t txbad:1;
+ uint64_t txfifo:1;
+ uint64_t txfifu:1;
+ uint64_t an_err:1;
+ uint64_t xmit:1;
+ uint64_t lnkspd:1;
+ } s;
+ struct cvmx_pcsx_intx_reg_s cn52xx;
+ struct cvmx_pcsx_intx_reg_s cn52xxp1;
+ struct cvmx_pcsx_intx_reg_s cn56xx;
+ struct cvmx_pcsx_intx_reg_s cn56xxp1;
+};
+
+union cvmx_pcsx_linkx_timer_count_reg {
+ uint64_t u64;
+ struct cvmx_pcsx_linkx_timer_count_reg_s {
+ uint64_t reserved_16_63:48;
+ uint64_t count:16;
+ } s;
+ struct cvmx_pcsx_linkx_timer_count_reg_s cn52xx;
+ struct cvmx_pcsx_linkx_timer_count_reg_s cn52xxp1;
+ struct cvmx_pcsx_linkx_timer_count_reg_s cn56xx;
+ struct cvmx_pcsx_linkx_timer_count_reg_s cn56xxp1;
+};
+
+union cvmx_pcsx_log_anlx_reg {
+ uint64_t u64;
+ struct cvmx_pcsx_log_anlx_reg_s {
+ uint64_t reserved_4_63:60;
+ uint64_t lafifovfl:1;
+ uint64_t la_en:1;
+ uint64_t pkt_sz:2;
+ } s;
+ struct cvmx_pcsx_log_anlx_reg_s cn52xx;
+ struct cvmx_pcsx_log_anlx_reg_s cn52xxp1;
+ struct cvmx_pcsx_log_anlx_reg_s cn56xx;
+ struct cvmx_pcsx_log_anlx_reg_s cn56xxp1;
+};
+
+union cvmx_pcsx_miscx_ctl_reg {
+ uint64_t u64;
+ struct cvmx_pcsx_miscx_ctl_reg_s {
+ uint64_t reserved_13_63:51;
+ uint64_t sgmii:1;
+ uint64_t gmxeno:1;
+ uint64_t loopbck2:1;
+ uint64_t mac_phy:1;
+ uint64_t mode:1;
+ uint64_t an_ovrd:1;
+ uint64_t samp_pt:7;
+ } s;
+ struct cvmx_pcsx_miscx_ctl_reg_s cn52xx;
+ struct cvmx_pcsx_miscx_ctl_reg_s cn52xxp1;
+ struct cvmx_pcsx_miscx_ctl_reg_s cn56xx;
+ struct cvmx_pcsx_miscx_ctl_reg_s cn56xxp1;
+};
+
+union cvmx_pcsx_mrx_control_reg {
+ uint64_t u64;
+ struct cvmx_pcsx_mrx_control_reg_s {
+ uint64_t reserved_16_63:48;
+ uint64_t reset:1;
+ uint64_t loopbck1:1;
+ uint64_t spdlsb:1;
+ uint64_t an_en:1;
+ uint64_t pwr_dn:1;
+ uint64_t reserved_10_10:1;
+ uint64_t rst_an:1;
+ uint64_t dup:1;
+ uint64_t coltst:1;
+ uint64_t spdmsb:1;
+ uint64_t uni:1;
+ uint64_t reserved_0_4:5;
+ } s;
+ struct cvmx_pcsx_mrx_control_reg_s cn52xx;
+ struct cvmx_pcsx_mrx_control_reg_s cn52xxp1;
+ struct cvmx_pcsx_mrx_control_reg_s cn56xx;
+ struct cvmx_pcsx_mrx_control_reg_s cn56xxp1;
+};
+
+union cvmx_pcsx_mrx_status_reg {
+ uint64_t u64;
+ struct cvmx_pcsx_mrx_status_reg_s {
+ uint64_t reserved_16_63:48;
+ uint64_t hun_t4:1;
+ uint64_t hun_xfd:1;
+ uint64_t hun_xhd:1;
+ uint64_t ten_fd:1;
+ uint64_t ten_hd:1;
+ uint64_t hun_t2fd:1;
+ uint64_t hun_t2hd:1;
+ uint64_t ext_st:1;
+ uint64_t reserved_7_7:1;
+ uint64_t prb_sup:1;
+ uint64_t an_cpt:1;
+ uint64_t rm_flt:1;
+ uint64_t an_abil:1;
+ uint64_t lnk_st:1;
+ uint64_t reserved_1_1:1;
+ uint64_t extnd:1;
+ } s;
+ struct cvmx_pcsx_mrx_status_reg_s cn52xx;
+ struct cvmx_pcsx_mrx_status_reg_s cn52xxp1;
+ struct cvmx_pcsx_mrx_status_reg_s cn56xx;
+ struct cvmx_pcsx_mrx_status_reg_s cn56xxp1;
+};
+
+union cvmx_pcsx_rxx_states_reg {
+ uint64_t u64;
+ struct cvmx_pcsx_rxx_states_reg_s {
+ uint64_t reserved_16_63:48;
+ uint64_t rx_bad:1;
+ uint64_t rx_st:5;
+ uint64_t sync_bad:1;
+ uint64_t sync:4;
+ uint64_t an_bad:1;
+ uint64_t an_st:4;
+ } s;
+ struct cvmx_pcsx_rxx_states_reg_s cn52xx;
+ struct cvmx_pcsx_rxx_states_reg_s cn52xxp1;
+ struct cvmx_pcsx_rxx_states_reg_s cn56xx;
+ struct cvmx_pcsx_rxx_states_reg_s cn56xxp1;
+};
+
+union cvmx_pcsx_rxx_sync_reg {
+ uint64_t u64;
+ struct cvmx_pcsx_rxx_sync_reg_s {
+ uint64_t reserved_2_63:62;
+ uint64_t sync:1;
+ uint64_t bit_lock:1;
+ } s;
+ struct cvmx_pcsx_rxx_sync_reg_s cn52xx;
+ struct cvmx_pcsx_rxx_sync_reg_s cn52xxp1;
+ struct cvmx_pcsx_rxx_sync_reg_s cn56xx;
+ struct cvmx_pcsx_rxx_sync_reg_s cn56xxp1;
+};
+
+union cvmx_pcsx_sgmx_an_adv_reg {
+ uint64_t u64;
+ struct cvmx_pcsx_sgmx_an_adv_reg_s {
+ uint64_t reserved_16_63:48;
+ uint64_t link:1;
+ uint64_t ack:1;
+ uint64_t reserved_13_13:1;
+ uint64_t dup:1;
+ uint64_t speed:2;
+ uint64_t reserved_1_9:9;
+ uint64_t one:1;
+ } s;
+ struct cvmx_pcsx_sgmx_an_adv_reg_s cn52xx;
+ struct cvmx_pcsx_sgmx_an_adv_reg_s cn52xxp1;
+ struct cvmx_pcsx_sgmx_an_adv_reg_s cn56xx;
+ struct cvmx_pcsx_sgmx_an_adv_reg_s cn56xxp1;
+};
+
+union cvmx_pcsx_sgmx_lp_adv_reg {
+ uint64_t u64;
+ struct cvmx_pcsx_sgmx_lp_adv_reg_s {
+ uint64_t reserved_16_63:48;
+ uint64_t link:1;
+ uint64_t reserved_13_14:2;
+ uint64_t dup:1;
+ uint64_t speed:2;
+ uint64_t reserved_1_9:9;
+ uint64_t one:1;
+ } s;
+ struct cvmx_pcsx_sgmx_lp_adv_reg_s cn52xx;
+ struct cvmx_pcsx_sgmx_lp_adv_reg_s cn52xxp1;
+ struct cvmx_pcsx_sgmx_lp_adv_reg_s cn56xx;
+ struct cvmx_pcsx_sgmx_lp_adv_reg_s cn56xxp1;
+};
+
+union cvmx_pcsx_txx_states_reg {
+ uint64_t u64;
+ struct cvmx_pcsx_txx_states_reg_s {
+ uint64_t reserved_7_63:57;
+ uint64_t xmit:2;
+ uint64_t tx_bad:1;
+ uint64_t ord_st:4;
+ } s;
+ struct cvmx_pcsx_txx_states_reg_s cn52xx;
+ struct cvmx_pcsx_txx_states_reg_s cn52xxp1;
+ struct cvmx_pcsx_txx_states_reg_s cn56xx;
+ struct cvmx_pcsx_txx_states_reg_s cn56xxp1;
+};
+
+union cvmx_pcsx_tx_rxx_polarity_reg {
+ uint64_t u64;
+ struct cvmx_pcsx_tx_rxx_polarity_reg_s {
+ uint64_t reserved_4_63:60;
+ uint64_t rxovrd:1;
+ uint64_t autorxpl:1;
+ uint64_t rxplrt:1;
+ uint64_t txplrt:1;
+ } s;
+ struct cvmx_pcsx_tx_rxx_polarity_reg_s cn52xx;
+ struct cvmx_pcsx_tx_rxx_polarity_reg_s cn52xxp1;
+ struct cvmx_pcsx_tx_rxx_polarity_reg_s cn56xx;
+ struct cvmx_pcsx_tx_rxx_polarity_reg_s cn56xxp1;
+};
+
+#endif
diff --git a/drivers/staging/octeon/cvmx-pcsxx-defs.h b/drivers/staging/octeon/cvmx-pcsxx-defs.h
new file mode 100644
index 000000000000..55d120fe8aed
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-pcsxx-defs.h
@@ -0,0 +1,316 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_PCSXX_DEFS_H__
+#define __CVMX_PCSXX_DEFS_H__
+
+#define CVMX_PCSXX_10GBX_STATUS_REG(block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0000828ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_PCSXX_BIST_STATUS_REG(block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0000870ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_PCSXX_BIT_LOCK_STATUS_REG(block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0000850ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_PCSXX_CONTROL1_REG(block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0000800ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_PCSXX_CONTROL2_REG(block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0000818ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_PCSXX_INT_EN_REG(block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0000860ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_PCSXX_INT_REG(block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0000858ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_PCSXX_LOG_ANL_REG(block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0000868ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_PCSXX_MISC_CTL_REG(block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0000848ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_PCSXX_RX_SYNC_STATES_REG(block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0000838ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_PCSXX_SPD_ABIL_REG(block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0000810ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_PCSXX_STATUS1_REG(block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0000808ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_PCSXX_STATUS2_REG(block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0000820ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_PCSXX_TX_RX_POLARITY_REG(block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0000840ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_PCSXX_TX_RX_STATES_REG(block_id) \
+ CVMX_ADD_IO_SEG(0x00011800B0000830ull + (((block_id) & 1) * 0x8000000ull))
+
+union cvmx_pcsxx_10gbx_status_reg {
+ uint64_t u64;
+ struct cvmx_pcsxx_10gbx_status_reg_s {
+ uint64_t reserved_13_63:51;
+ uint64_t alignd:1;
+ uint64_t pattst:1;
+ uint64_t reserved_4_10:7;
+ uint64_t l3sync:1;
+ uint64_t l2sync:1;
+ uint64_t l1sync:1;
+ uint64_t l0sync:1;
+ } s;
+ struct cvmx_pcsxx_10gbx_status_reg_s cn52xx;
+ struct cvmx_pcsxx_10gbx_status_reg_s cn52xxp1;
+ struct cvmx_pcsxx_10gbx_status_reg_s cn56xx;
+ struct cvmx_pcsxx_10gbx_status_reg_s cn56xxp1;
+};
+
+union cvmx_pcsxx_bist_status_reg {
+ uint64_t u64;
+ struct cvmx_pcsxx_bist_status_reg_s {
+ uint64_t reserved_1_63:63;
+ uint64_t bist_status:1;
+ } s;
+ struct cvmx_pcsxx_bist_status_reg_s cn52xx;
+ struct cvmx_pcsxx_bist_status_reg_s cn52xxp1;
+ struct cvmx_pcsxx_bist_status_reg_s cn56xx;
+ struct cvmx_pcsxx_bist_status_reg_s cn56xxp1;
+};
+
+union cvmx_pcsxx_bit_lock_status_reg {
+ uint64_t u64;
+ struct cvmx_pcsxx_bit_lock_status_reg_s {
+ uint64_t reserved_4_63:60;
+ uint64_t bitlck3:1;
+ uint64_t bitlck2:1;
+ uint64_t bitlck1:1;
+ uint64_t bitlck0:1;
+ } s;
+ struct cvmx_pcsxx_bit_lock_status_reg_s cn52xx;
+ struct cvmx_pcsxx_bit_lock_status_reg_s cn52xxp1;
+ struct cvmx_pcsxx_bit_lock_status_reg_s cn56xx;
+ struct cvmx_pcsxx_bit_lock_status_reg_s cn56xxp1;
+};
+
+union cvmx_pcsxx_control1_reg {
+ uint64_t u64;
+ struct cvmx_pcsxx_control1_reg_s {
+ uint64_t reserved_16_63:48;
+ uint64_t reset:1;
+ uint64_t loopbck1:1;
+ uint64_t spdsel1:1;
+ uint64_t reserved_12_12:1;
+ uint64_t lo_pwr:1;
+ uint64_t reserved_7_10:4;
+ uint64_t spdsel0:1;
+ uint64_t spd:4;
+ uint64_t reserved_0_1:2;
+ } s;
+ struct cvmx_pcsxx_control1_reg_s cn52xx;
+ struct cvmx_pcsxx_control1_reg_s cn52xxp1;
+ struct cvmx_pcsxx_control1_reg_s cn56xx;
+ struct cvmx_pcsxx_control1_reg_s cn56xxp1;
+};
+
+union cvmx_pcsxx_control2_reg {
+ uint64_t u64;
+ struct cvmx_pcsxx_control2_reg_s {
+ uint64_t reserved_2_63:62;
+ uint64_t type:2;
+ } s;
+ struct cvmx_pcsxx_control2_reg_s cn52xx;
+ struct cvmx_pcsxx_control2_reg_s cn52xxp1;
+ struct cvmx_pcsxx_control2_reg_s cn56xx;
+ struct cvmx_pcsxx_control2_reg_s cn56xxp1;
+};
+
+union cvmx_pcsxx_int_en_reg {
+ uint64_t u64;
+ struct cvmx_pcsxx_int_en_reg_s {
+ uint64_t reserved_6_63:58;
+ uint64_t algnlos_en:1;
+ uint64_t synlos_en:1;
+ uint64_t bitlckls_en:1;
+ uint64_t rxsynbad_en:1;
+ uint64_t rxbad_en:1;
+ uint64_t txflt_en:1;
+ } s;
+ struct cvmx_pcsxx_int_en_reg_s cn52xx;
+ struct cvmx_pcsxx_int_en_reg_s cn52xxp1;
+ struct cvmx_pcsxx_int_en_reg_s cn56xx;
+ struct cvmx_pcsxx_int_en_reg_s cn56xxp1;
+};
+
+union cvmx_pcsxx_int_reg {
+ uint64_t u64;
+ struct cvmx_pcsxx_int_reg_s {
+ uint64_t reserved_6_63:58;
+ uint64_t algnlos:1;
+ uint64_t synlos:1;
+ uint64_t bitlckls:1;
+ uint64_t rxsynbad:1;
+ uint64_t rxbad:1;
+ uint64_t txflt:1;
+ } s;
+ struct cvmx_pcsxx_int_reg_s cn52xx;
+ struct cvmx_pcsxx_int_reg_s cn52xxp1;
+ struct cvmx_pcsxx_int_reg_s cn56xx;
+ struct cvmx_pcsxx_int_reg_s cn56xxp1;
+};
+
+union cvmx_pcsxx_log_anl_reg {
+ uint64_t u64;
+ struct cvmx_pcsxx_log_anl_reg_s {
+ uint64_t reserved_7_63:57;
+ uint64_t enc_mode:1;
+ uint64_t drop_ln:2;
+ uint64_t lafifovfl:1;
+ uint64_t la_en:1;
+ uint64_t pkt_sz:2;
+ } s;
+ struct cvmx_pcsxx_log_anl_reg_s cn52xx;
+ struct cvmx_pcsxx_log_anl_reg_s cn52xxp1;
+ struct cvmx_pcsxx_log_anl_reg_s cn56xx;
+ struct cvmx_pcsxx_log_anl_reg_s cn56xxp1;
+};
+
+union cvmx_pcsxx_misc_ctl_reg {
+ uint64_t u64;
+ struct cvmx_pcsxx_misc_ctl_reg_s {
+ uint64_t reserved_4_63:60;
+ uint64_t tx_swap:1;
+ uint64_t rx_swap:1;
+ uint64_t xaui:1;
+ uint64_t gmxeno:1;
+ } s;
+ struct cvmx_pcsxx_misc_ctl_reg_s cn52xx;
+ struct cvmx_pcsxx_misc_ctl_reg_s cn52xxp1;
+ struct cvmx_pcsxx_misc_ctl_reg_s cn56xx;
+ struct cvmx_pcsxx_misc_ctl_reg_s cn56xxp1;
+};
+
+union cvmx_pcsxx_rx_sync_states_reg {
+ uint64_t u64;
+ struct cvmx_pcsxx_rx_sync_states_reg_s {
+ uint64_t reserved_16_63:48;
+ uint64_t sync3st:4;
+ uint64_t sync2st:4;
+ uint64_t sync1st:4;
+ uint64_t sync0st:4;
+ } s;
+ struct cvmx_pcsxx_rx_sync_states_reg_s cn52xx;
+ struct cvmx_pcsxx_rx_sync_states_reg_s cn52xxp1;
+ struct cvmx_pcsxx_rx_sync_states_reg_s cn56xx;
+ struct cvmx_pcsxx_rx_sync_states_reg_s cn56xxp1;
+};
+
+union cvmx_pcsxx_spd_abil_reg {
+ uint64_t u64;
+ struct cvmx_pcsxx_spd_abil_reg_s {
+ uint64_t reserved_2_63:62;
+ uint64_t tenpasst:1;
+ uint64_t tengb:1;
+ } s;
+ struct cvmx_pcsxx_spd_abil_reg_s cn52xx;
+ struct cvmx_pcsxx_spd_abil_reg_s cn52xxp1;
+ struct cvmx_pcsxx_spd_abil_reg_s cn56xx;
+ struct cvmx_pcsxx_spd_abil_reg_s cn56xxp1;
+};
+
+union cvmx_pcsxx_status1_reg {
+ uint64_t u64;
+ struct cvmx_pcsxx_status1_reg_s {
+ uint64_t reserved_8_63:56;
+ uint64_t flt:1;
+ uint64_t reserved_3_6:4;
+ uint64_t rcv_lnk:1;
+ uint64_t lpable:1;
+ uint64_t reserved_0_0:1;
+ } s;
+ struct cvmx_pcsxx_status1_reg_s cn52xx;
+ struct cvmx_pcsxx_status1_reg_s cn52xxp1;
+ struct cvmx_pcsxx_status1_reg_s cn56xx;
+ struct cvmx_pcsxx_status1_reg_s cn56xxp1;
+};
+
+union cvmx_pcsxx_status2_reg {
+ uint64_t u64;
+ struct cvmx_pcsxx_status2_reg_s {
+ uint64_t reserved_16_63:48;
+ uint64_t dev:2;
+ uint64_t reserved_12_13:2;
+ uint64_t xmtflt:1;
+ uint64_t rcvflt:1;
+ uint64_t reserved_3_9:7;
+ uint64_t tengb_w:1;
+ uint64_t tengb_x:1;
+ uint64_t tengb_r:1;
+ } s;
+ struct cvmx_pcsxx_status2_reg_s cn52xx;
+ struct cvmx_pcsxx_status2_reg_s cn52xxp1;
+ struct cvmx_pcsxx_status2_reg_s cn56xx;
+ struct cvmx_pcsxx_status2_reg_s cn56xxp1;
+};
+
+union cvmx_pcsxx_tx_rx_polarity_reg {
+ uint64_t u64;
+ struct cvmx_pcsxx_tx_rx_polarity_reg_s {
+ uint64_t reserved_10_63:54;
+ uint64_t xor_rxplrt:4;
+ uint64_t xor_txplrt:4;
+ uint64_t rxplrt:1;
+ uint64_t txplrt:1;
+ } s;
+ struct cvmx_pcsxx_tx_rx_polarity_reg_s cn52xx;
+ struct cvmx_pcsxx_tx_rx_polarity_reg_cn52xxp1 {
+ uint64_t reserved_2_63:62;
+ uint64_t rxplrt:1;
+ uint64_t txplrt:1;
+ } cn52xxp1;
+ struct cvmx_pcsxx_tx_rx_polarity_reg_s cn56xx;
+ struct cvmx_pcsxx_tx_rx_polarity_reg_cn52xxp1 cn56xxp1;
+};
+
+union cvmx_pcsxx_tx_rx_states_reg {
+ uint64_t u64;
+ struct cvmx_pcsxx_tx_rx_states_reg_s {
+ uint64_t reserved_14_63:50;
+ uint64_t term_err:1;
+ uint64_t syn3bad:1;
+ uint64_t syn2bad:1;
+ uint64_t syn1bad:1;
+ uint64_t syn0bad:1;
+ uint64_t rxbad:1;
+ uint64_t algn_st:3;
+ uint64_t rx_st:2;
+ uint64_t tx_st:3;
+ } s;
+ struct cvmx_pcsxx_tx_rx_states_reg_s cn52xx;
+ struct cvmx_pcsxx_tx_rx_states_reg_cn52xxp1 {
+ uint64_t reserved_13_63:51;
+ uint64_t syn3bad:1;
+ uint64_t syn2bad:1;
+ uint64_t syn1bad:1;
+ uint64_t syn0bad:1;
+ uint64_t rxbad:1;
+ uint64_t algn_st:3;
+ uint64_t rx_st:2;
+ uint64_t tx_st:3;
+ } cn52xxp1;
+ struct cvmx_pcsxx_tx_rx_states_reg_s cn56xx;
+ struct cvmx_pcsxx_tx_rx_states_reg_cn52xxp1 cn56xxp1;
+};
+
+#endif
diff --git a/drivers/staging/octeon/cvmx-pip-defs.h b/drivers/staging/octeon/cvmx-pip-defs.h
new file mode 100644
index 000000000000..5a369100ca68
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-pip-defs.h
@@ -0,0 +1,1267 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_PIP_DEFS_H__
+#define __CVMX_PIP_DEFS_H__
+
+/*
+ * Enumeration representing the amount of packet processing
+ * and validation performed by the input hardware.
+ */
+enum cvmx_pip_port_parse_mode {
+ /*
+ * Packet input doesn't perform any processing of the input
+ * packet.
+ */
+ CVMX_PIP_PORT_CFG_MODE_NONE = 0ull,
+ /*
+ * Full packet processing is performed with pointer starting
+ * at the L2 (ethernet MAC) header.
+ */
+ CVMX_PIP_PORT_CFG_MODE_SKIPL2 = 1ull,
+ /*
+ * Input packets are assumed to be IP. Results from non IP
+ * packets is undefined. Pointers reference the beginning of
+ * the IP header.
+ */
+ CVMX_PIP_PORT_CFG_MODE_SKIPIP = 2ull
+};
+
+#define CVMX_PIP_BCK_PRS \
+ CVMX_ADD_IO_SEG(0x00011800A0000038ull)
+#define CVMX_PIP_BIST_STATUS \
+ CVMX_ADD_IO_SEG(0x00011800A0000000ull)
+#define CVMX_PIP_CRC_CTLX(offset) \
+ CVMX_ADD_IO_SEG(0x00011800A0000040ull + (((offset) & 1) * 8))
+#define CVMX_PIP_CRC_IVX(offset) \
+ CVMX_ADD_IO_SEG(0x00011800A0000050ull + (((offset) & 1) * 8))
+#define CVMX_PIP_DEC_IPSECX(offset) \
+ CVMX_ADD_IO_SEG(0x00011800A0000080ull + (((offset) & 3) * 8))
+#define CVMX_PIP_DSA_SRC_GRP \
+ CVMX_ADD_IO_SEG(0x00011800A0000190ull)
+#define CVMX_PIP_DSA_VID_GRP \
+ CVMX_ADD_IO_SEG(0x00011800A0000198ull)
+#define CVMX_PIP_FRM_LEN_CHKX(offset) \
+ CVMX_ADD_IO_SEG(0x00011800A0000180ull + (((offset) & 1) * 8))
+#define CVMX_PIP_GBL_CFG \
+ CVMX_ADD_IO_SEG(0x00011800A0000028ull)
+#define CVMX_PIP_GBL_CTL \
+ CVMX_ADD_IO_SEG(0x00011800A0000020ull)
+#define CVMX_PIP_HG_PRI_QOS \
+ CVMX_ADD_IO_SEG(0x00011800A00001A0ull)
+#define CVMX_PIP_INT_EN \
+ CVMX_ADD_IO_SEG(0x00011800A0000010ull)
+#define CVMX_PIP_INT_REG \
+ CVMX_ADD_IO_SEG(0x00011800A0000008ull)
+#define CVMX_PIP_IP_OFFSET \
+ CVMX_ADD_IO_SEG(0x00011800A0000060ull)
+#define CVMX_PIP_PRT_CFGX(offset) \
+ CVMX_ADD_IO_SEG(0x00011800A0000200ull + (((offset) & 63) * 8))
+#define CVMX_PIP_PRT_TAGX(offset) \
+ CVMX_ADD_IO_SEG(0x00011800A0000400ull + (((offset) & 63) * 8))
+#define CVMX_PIP_QOS_DIFFX(offset) \
+ CVMX_ADD_IO_SEG(0x00011800A0000600ull + (((offset) & 63) * 8))
+#define CVMX_PIP_QOS_VLANX(offset) \
+ CVMX_ADD_IO_SEG(0x00011800A00000C0ull + (((offset) & 7) * 8))
+#define CVMX_PIP_QOS_WATCHX(offset) \
+ CVMX_ADD_IO_SEG(0x00011800A0000100ull + (((offset) & 7) * 8))
+#define CVMX_PIP_RAW_WORD \
+ CVMX_ADD_IO_SEG(0x00011800A00000B0ull)
+#define CVMX_PIP_SFT_RST \
+ CVMX_ADD_IO_SEG(0x00011800A0000030ull)
+#define CVMX_PIP_STAT0_PRTX(offset) \
+ CVMX_ADD_IO_SEG(0x00011800A0000800ull + (((offset) & 63) * 80))
+#define CVMX_PIP_STAT1_PRTX(offset) \
+ CVMX_ADD_IO_SEG(0x00011800A0000808ull + (((offset) & 63) * 80))
+#define CVMX_PIP_STAT2_PRTX(offset) \
+ CVMX_ADD_IO_SEG(0x00011800A0000810ull + (((offset) & 63) * 80))
+#define CVMX_PIP_STAT3_PRTX(offset) \
+ CVMX_ADD_IO_SEG(0x00011800A0000818ull + (((offset) & 63) * 80))
+#define CVMX_PIP_STAT4_PRTX(offset) \
+ CVMX_ADD_IO_SEG(0x00011800A0000820ull + (((offset) & 63) * 80))
+#define CVMX_PIP_STAT5_PRTX(offset) \
+ CVMX_ADD_IO_SEG(0x00011800A0000828ull + (((offset) & 63) * 80))
+#define CVMX_PIP_STAT6_PRTX(offset) \
+ CVMX_ADD_IO_SEG(0x00011800A0000830ull + (((offset) & 63) * 80))
+#define CVMX_PIP_STAT7_PRTX(offset) \
+ CVMX_ADD_IO_SEG(0x00011800A0000838ull + (((offset) & 63) * 80))
+#define CVMX_PIP_STAT8_PRTX(offset) \
+ CVMX_ADD_IO_SEG(0x00011800A0000840ull + (((offset) & 63) * 80))
+#define CVMX_PIP_STAT9_PRTX(offset) \
+ CVMX_ADD_IO_SEG(0x00011800A0000848ull + (((offset) & 63) * 80))
+#define CVMX_PIP_STAT_CTL \
+ CVMX_ADD_IO_SEG(0x00011800A0000018ull)
+#define CVMX_PIP_STAT_INB_ERRSX(offset) \
+ CVMX_ADD_IO_SEG(0x00011800A0001A10ull + (((offset) & 63) * 32))
+#define CVMX_PIP_STAT_INB_OCTSX(offset) \
+ CVMX_ADD_IO_SEG(0x00011800A0001A08ull + (((offset) & 63) * 32))
+#define CVMX_PIP_STAT_INB_PKTSX(offset) \
+ CVMX_ADD_IO_SEG(0x00011800A0001A00ull + (((offset) & 63) * 32))
+#define CVMX_PIP_TAG_INCX(offset) \
+ CVMX_ADD_IO_SEG(0x00011800A0001800ull + (((offset) & 63) * 8))
+#define CVMX_PIP_TAG_MASK \
+ CVMX_ADD_IO_SEG(0x00011800A0000070ull)
+#define CVMX_PIP_TAG_SECRET \
+ CVMX_ADD_IO_SEG(0x00011800A0000068ull)
+#define CVMX_PIP_TODO_ENTRY \
+ CVMX_ADD_IO_SEG(0x00011800A0000078ull)
+
+union cvmx_pip_bck_prs {
+ uint64_t u64;
+ struct cvmx_pip_bck_prs_s {
+ uint64_t bckprs:1;
+ uint64_t reserved_13_62:50;
+ uint64_t hiwater:5;
+ uint64_t reserved_5_7:3;
+ uint64_t lowater:5;
+ } s;
+ struct cvmx_pip_bck_prs_s cn38xx;
+ struct cvmx_pip_bck_prs_s cn38xxp2;
+ struct cvmx_pip_bck_prs_s cn56xx;
+ struct cvmx_pip_bck_prs_s cn56xxp1;
+ struct cvmx_pip_bck_prs_s cn58xx;
+ struct cvmx_pip_bck_prs_s cn58xxp1;
+};
+
+union cvmx_pip_bist_status {
+ uint64_t u64;
+ struct cvmx_pip_bist_status_s {
+ uint64_t reserved_18_63:46;
+ uint64_t bist:18;
+ } s;
+ struct cvmx_pip_bist_status_s cn30xx;
+ struct cvmx_pip_bist_status_s cn31xx;
+ struct cvmx_pip_bist_status_s cn38xx;
+ struct cvmx_pip_bist_status_s cn38xxp2;
+ struct cvmx_pip_bist_status_cn50xx {
+ uint64_t reserved_17_63:47;
+ uint64_t bist:17;
+ } cn50xx;
+ struct cvmx_pip_bist_status_s cn52xx;
+ struct cvmx_pip_bist_status_s cn52xxp1;
+ struct cvmx_pip_bist_status_s cn56xx;
+ struct cvmx_pip_bist_status_s cn56xxp1;
+ struct cvmx_pip_bist_status_s cn58xx;
+ struct cvmx_pip_bist_status_s cn58xxp1;
+};
+
+union cvmx_pip_crc_ctlx {
+ uint64_t u64;
+ struct cvmx_pip_crc_ctlx_s {
+ uint64_t reserved_2_63:62;
+ uint64_t invres:1;
+ uint64_t reflect:1;
+ } s;
+ struct cvmx_pip_crc_ctlx_s cn38xx;
+ struct cvmx_pip_crc_ctlx_s cn38xxp2;
+ struct cvmx_pip_crc_ctlx_s cn58xx;
+ struct cvmx_pip_crc_ctlx_s cn58xxp1;
+};
+
+union cvmx_pip_crc_ivx {
+ uint64_t u64;
+ struct cvmx_pip_crc_ivx_s {
+ uint64_t reserved_32_63:32;
+ uint64_t iv:32;
+ } s;
+ struct cvmx_pip_crc_ivx_s cn38xx;
+ struct cvmx_pip_crc_ivx_s cn38xxp2;
+ struct cvmx_pip_crc_ivx_s cn58xx;
+ struct cvmx_pip_crc_ivx_s cn58xxp1;
+};
+
+union cvmx_pip_dec_ipsecx {
+ uint64_t u64;
+ struct cvmx_pip_dec_ipsecx_s {
+ uint64_t reserved_18_63:46;
+ uint64_t tcp:1;
+ uint64_t udp:1;
+ uint64_t dprt:16;
+ } s;
+ struct cvmx_pip_dec_ipsecx_s cn30xx;
+ struct cvmx_pip_dec_ipsecx_s cn31xx;
+ struct cvmx_pip_dec_ipsecx_s cn38xx;
+ struct cvmx_pip_dec_ipsecx_s cn38xxp2;
+ struct cvmx_pip_dec_ipsecx_s cn50xx;
+ struct cvmx_pip_dec_ipsecx_s cn52xx;
+ struct cvmx_pip_dec_ipsecx_s cn52xxp1;
+ struct cvmx_pip_dec_ipsecx_s cn56xx;
+ struct cvmx_pip_dec_ipsecx_s cn56xxp1;
+ struct cvmx_pip_dec_ipsecx_s cn58xx;
+ struct cvmx_pip_dec_ipsecx_s cn58xxp1;
+};
+
+union cvmx_pip_dsa_src_grp {
+ uint64_t u64;
+ struct cvmx_pip_dsa_src_grp_s {
+ uint64_t map15:4;
+ uint64_t map14:4;
+ uint64_t map13:4;
+ uint64_t map12:4;
+ uint64_t map11:4;
+ uint64_t map10:4;
+ uint64_t map9:4;
+ uint64_t map8:4;
+ uint64_t map7:4;
+ uint64_t map6:4;
+ uint64_t map5:4;
+ uint64_t map4:4;
+ uint64_t map3:4;
+ uint64_t map2:4;
+ uint64_t map1:4;
+ uint64_t map0:4;
+ } s;
+ struct cvmx_pip_dsa_src_grp_s cn52xx;
+ struct cvmx_pip_dsa_src_grp_s cn52xxp1;
+ struct cvmx_pip_dsa_src_grp_s cn56xx;
+};
+
+union cvmx_pip_dsa_vid_grp {
+ uint64_t u64;
+ struct cvmx_pip_dsa_vid_grp_s {
+ uint64_t map15:4;
+ uint64_t map14:4;
+ uint64_t map13:4;
+ uint64_t map12:4;
+ uint64_t map11:4;
+ uint64_t map10:4;
+ uint64_t map9:4;
+ uint64_t map8:4;
+ uint64_t map7:4;
+ uint64_t map6:4;
+ uint64_t map5:4;
+ uint64_t map4:4;
+ uint64_t map3:4;
+ uint64_t map2:4;
+ uint64_t map1:4;
+ uint64_t map0:4;
+ } s;
+ struct cvmx_pip_dsa_vid_grp_s cn52xx;
+ struct cvmx_pip_dsa_vid_grp_s cn52xxp1;
+ struct cvmx_pip_dsa_vid_grp_s cn56xx;
+};
+
+union cvmx_pip_frm_len_chkx {
+ uint64_t u64;
+ struct cvmx_pip_frm_len_chkx_s {
+ uint64_t reserved_32_63:32;
+ uint64_t maxlen:16;
+ uint64_t minlen:16;
+ } s;
+ struct cvmx_pip_frm_len_chkx_s cn50xx;
+ struct cvmx_pip_frm_len_chkx_s cn52xx;
+ struct cvmx_pip_frm_len_chkx_s cn52xxp1;
+ struct cvmx_pip_frm_len_chkx_s cn56xx;
+ struct cvmx_pip_frm_len_chkx_s cn56xxp1;
+};
+
+union cvmx_pip_gbl_cfg {
+ uint64_t u64;
+ struct cvmx_pip_gbl_cfg_s {
+ uint64_t reserved_19_63:45;
+ uint64_t tag_syn:1;
+ uint64_t ip6_udp:1;
+ uint64_t max_l2:1;
+ uint64_t reserved_11_15:5;
+ uint64_t raw_shf:3;
+ uint64_t reserved_3_7:5;
+ uint64_t nip_shf:3;
+ } s;
+ struct cvmx_pip_gbl_cfg_s cn30xx;
+ struct cvmx_pip_gbl_cfg_s cn31xx;
+ struct cvmx_pip_gbl_cfg_s cn38xx;
+ struct cvmx_pip_gbl_cfg_s cn38xxp2;
+ struct cvmx_pip_gbl_cfg_s cn50xx;
+ struct cvmx_pip_gbl_cfg_s cn52xx;
+ struct cvmx_pip_gbl_cfg_s cn52xxp1;
+ struct cvmx_pip_gbl_cfg_s cn56xx;
+ struct cvmx_pip_gbl_cfg_s cn56xxp1;
+ struct cvmx_pip_gbl_cfg_s cn58xx;
+ struct cvmx_pip_gbl_cfg_s cn58xxp1;
+};
+
+union cvmx_pip_gbl_ctl {
+ uint64_t u64;
+ struct cvmx_pip_gbl_ctl_s {
+ uint64_t reserved_27_63:37;
+ uint64_t dsa_grp_tvid:1;
+ uint64_t dsa_grp_scmd:1;
+ uint64_t dsa_grp_sid:1;
+ uint64_t reserved_21_23:3;
+ uint64_t ring_en:1;
+ uint64_t reserved_17_19:3;
+ uint64_t ignrs:1;
+ uint64_t vs_wqe:1;
+ uint64_t vs_qos:1;
+ uint64_t l2_mal:1;
+ uint64_t tcp_flag:1;
+ uint64_t l4_len:1;
+ uint64_t l4_chk:1;
+ uint64_t l4_prt:1;
+ uint64_t l4_mal:1;
+ uint64_t reserved_6_7:2;
+ uint64_t ip6_eext:2;
+ uint64_t ip4_opts:1;
+ uint64_t ip_hop:1;
+ uint64_t ip_mal:1;
+ uint64_t ip_chk:1;
+ } s;
+ struct cvmx_pip_gbl_ctl_cn30xx {
+ uint64_t reserved_17_63:47;
+ uint64_t ignrs:1;
+ uint64_t vs_wqe:1;
+ uint64_t vs_qos:1;
+ uint64_t l2_mal:1;
+ uint64_t tcp_flag:1;
+ uint64_t l4_len:1;
+ uint64_t l4_chk:1;
+ uint64_t l4_prt:1;
+ uint64_t l4_mal:1;
+ uint64_t reserved_6_7:2;
+ uint64_t ip6_eext:2;
+ uint64_t ip4_opts:1;
+ uint64_t ip_hop:1;
+ uint64_t ip_mal:1;
+ uint64_t ip_chk:1;
+ } cn30xx;
+ struct cvmx_pip_gbl_ctl_cn30xx cn31xx;
+ struct cvmx_pip_gbl_ctl_cn30xx cn38xx;
+ struct cvmx_pip_gbl_ctl_cn30xx cn38xxp2;
+ struct cvmx_pip_gbl_ctl_cn30xx cn50xx;
+ struct cvmx_pip_gbl_ctl_s cn52xx;
+ struct cvmx_pip_gbl_ctl_s cn52xxp1;
+ struct cvmx_pip_gbl_ctl_s cn56xx;
+ struct cvmx_pip_gbl_ctl_cn56xxp1 {
+ uint64_t reserved_21_63:43;
+ uint64_t ring_en:1;
+ uint64_t reserved_17_19:3;
+ uint64_t ignrs:1;
+ uint64_t vs_wqe:1;
+ uint64_t vs_qos:1;
+ uint64_t l2_mal:1;
+ uint64_t tcp_flag:1;
+ uint64_t l4_len:1;
+ uint64_t l4_chk:1;
+ uint64_t l4_prt:1;
+ uint64_t l4_mal:1;
+ uint64_t reserved_6_7:2;
+ uint64_t ip6_eext:2;
+ uint64_t ip4_opts:1;
+ uint64_t ip_hop:1;
+ uint64_t ip_mal:1;
+ uint64_t ip_chk:1;
+ } cn56xxp1;
+ struct cvmx_pip_gbl_ctl_cn30xx cn58xx;
+ struct cvmx_pip_gbl_ctl_cn30xx cn58xxp1;
+};
+
+union cvmx_pip_hg_pri_qos {
+ uint64_t u64;
+ struct cvmx_pip_hg_pri_qos_s {
+ uint64_t reserved_11_63:53;
+ uint64_t qos:3;
+ uint64_t reserved_6_7:2;
+ uint64_t pri:6;
+ } s;
+ struct cvmx_pip_hg_pri_qos_s cn52xx;
+ struct cvmx_pip_hg_pri_qos_s cn52xxp1;
+ struct cvmx_pip_hg_pri_qos_s cn56xx;
+};
+
+union cvmx_pip_int_en {
+ uint64_t u64;
+ struct cvmx_pip_int_en_s {
+ uint64_t reserved_13_63:51;
+ uint64_t punyerr:1;
+ uint64_t lenerr:1;
+ uint64_t maxerr:1;
+ uint64_t minerr:1;
+ uint64_t beperr:1;
+ uint64_t feperr:1;
+ uint64_t todoovr:1;
+ uint64_t skprunt:1;
+ uint64_t badtag:1;
+ uint64_t prtnxa:1;
+ uint64_t bckprs:1;
+ uint64_t crcerr:1;
+ uint64_t pktdrp:1;
+ } s;
+ struct cvmx_pip_int_en_cn30xx {
+ uint64_t reserved_9_63:55;
+ uint64_t beperr:1;
+ uint64_t feperr:1;
+ uint64_t todoovr:1;
+ uint64_t skprunt:1;
+ uint64_t badtag:1;
+ uint64_t prtnxa:1;
+ uint64_t bckprs:1;
+ uint64_t crcerr:1;
+ uint64_t pktdrp:1;
+ } cn30xx;
+ struct cvmx_pip_int_en_cn30xx cn31xx;
+ struct cvmx_pip_int_en_cn30xx cn38xx;
+ struct cvmx_pip_int_en_cn30xx cn38xxp2;
+ struct cvmx_pip_int_en_cn50xx {
+ uint64_t reserved_12_63:52;
+ uint64_t lenerr:1;
+ uint64_t maxerr:1;
+ uint64_t minerr:1;
+ uint64_t beperr:1;
+ uint64_t feperr:1;
+ uint64_t todoovr:1;
+ uint64_t skprunt:1;
+ uint64_t badtag:1;
+ uint64_t prtnxa:1;
+ uint64_t bckprs:1;
+ uint64_t reserved_1_1:1;
+ uint64_t pktdrp:1;
+ } cn50xx;
+ struct cvmx_pip_int_en_cn52xx {
+ uint64_t reserved_13_63:51;
+ uint64_t punyerr:1;
+ uint64_t lenerr:1;
+ uint64_t maxerr:1;
+ uint64_t minerr:1;
+ uint64_t beperr:1;
+ uint64_t feperr:1;
+ uint64_t todoovr:1;
+ uint64_t skprunt:1;
+ uint64_t badtag:1;
+ uint64_t prtnxa:1;
+ uint64_t bckprs:1;
+ uint64_t reserved_1_1:1;
+ uint64_t pktdrp:1;
+ } cn52xx;
+ struct cvmx_pip_int_en_cn52xx cn52xxp1;
+ struct cvmx_pip_int_en_s cn56xx;
+ struct cvmx_pip_int_en_cn56xxp1 {
+ uint64_t reserved_12_63:52;
+ uint64_t lenerr:1;
+ uint64_t maxerr:1;
+ uint64_t minerr:1;
+ uint64_t beperr:1;
+ uint64_t feperr:1;
+ uint64_t todoovr:1;
+ uint64_t skprunt:1;
+ uint64_t badtag:1;
+ uint64_t prtnxa:1;
+ uint64_t bckprs:1;
+ uint64_t crcerr:1;
+ uint64_t pktdrp:1;
+ } cn56xxp1;
+ struct cvmx_pip_int_en_cn58xx {
+ uint64_t reserved_13_63:51;
+ uint64_t punyerr:1;
+ uint64_t reserved_9_11:3;
+ uint64_t beperr:1;
+ uint64_t feperr:1;
+ uint64_t todoovr:1;
+ uint64_t skprunt:1;
+ uint64_t badtag:1;
+ uint64_t prtnxa:1;
+ uint64_t bckprs:1;
+ uint64_t crcerr:1;
+ uint64_t pktdrp:1;
+ } cn58xx;
+ struct cvmx_pip_int_en_cn30xx cn58xxp1;
+};
+
+union cvmx_pip_int_reg {
+ uint64_t u64;
+ struct cvmx_pip_int_reg_s {
+ uint64_t reserved_13_63:51;
+ uint64_t punyerr:1;
+ uint64_t lenerr:1;
+ uint64_t maxerr:1;
+ uint64_t minerr:1;
+ uint64_t beperr:1;
+ uint64_t feperr:1;
+ uint64_t todoovr:1;
+ uint64_t skprunt:1;
+ uint64_t badtag:1;
+ uint64_t prtnxa:1;
+ uint64_t bckprs:1;
+ uint64_t crcerr:1;
+ uint64_t pktdrp:1;
+ } s;
+ struct cvmx_pip_int_reg_cn30xx {
+ uint64_t reserved_9_63:55;
+ uint64_t beperr:1;
+ uint64_t feperr:1;
+ uint64_t todoovr:1;
+ uint64_t skprunt:1;
+ uint64_t badtag:1;
+ uint64_t prtnxa:1;
+ uint64_t bckprs:1;
+ uint64_t crcerr:1;
+ uint64_t pktdrp:1;
+ } cn30xx;
+ struct cvmx_pip_int_reg_cn30xx cn31xx;
+ struct cvmx_pip_int_reg_cn30xx cn38xx;
+ struct cvmx_pip_int_reg_cn30xx cn38xxp2;
+ struct cvmx_pip_int_reg_cn50xx {
+ uint64_t reserved_12_63:52;
+ uint64_t lenerr:1;
+ uint64_t maxerr:1;
+ uint64_t minerr:1;
+ uint64_t beperr:1;
+ uint64_t feperr:1;
+ uint64_t todoovr:1;
+ uint64_t skprunt:1;
+ uint64_t badtag:1;
+ uint64_t prtnxa:1;
+ uint64_t bckprs:1;
+ uint64_t reserved_1_1:1;
+ uint64_t pktdrp:1;
+ } cn50xx;
+ struct cvmx_pip_int_reg_cn52xx {
+ uint64_t reserved_13_63:51;
+ uint64_t punyerr:1;
+ uint64_t lenerr:1;
+ uint64_t maxerr:1;
+ uint64_t minerr:1;
+ uint64_t beperr:1;
+ uint64_t feperr:1;
+ uint64_t todoovr:1;
+ uint64_t skprunt:1;
+ uint64_t badtag:1;
+ uint64_t prtnxa:1;
+ uint64_t bckprs:1;
+ uint64_t reserved_1_1:1;
+ uint64_t pktdrp:1;
+ } cn52xx;
+ struct cvmx_pip_int_reg_cn52xx cn52xxp1;
+ struct cvmx_pip_int_reg_s cn56xx;
+ struct cvmx_pip_int_reg_cn56xxp1 {
+ uint64_t reserved_12_63:52;
+ uint64_t lenerr:1;
+ uint64_t maxerr:1;
+ uint64_t minerr:1;
+ uint64_t beperr:1;
+ uint64_t feperr:1;
+ uint64_t todoovr:1;
+ uint64_t skprunt:1;
+ uint64_t badtag:1;
+ uint64_t prtnxa:1;
+ uint64_t bckprs:1;
+ uint64_t crcerr:1;
+ uint64_t pktdrp:1;
+ } cn56xxp1;
+ struct cvmx_pip_int_reg_cn58xx {
+ uint64_t reserved_13_63:51;
+ uint64_t punyerr:1;
+ uint64_t reserved_9_11:3;
+ uint64_t beperr:1;
+ uint64_t feperr:1;
+ uint64_t todoovr:1;
+ uint64_t skprunt:1;
+ uint64_t badtag:1;
+ uint64_t prtnxa:1;
+ uint64_t bckprs:1;
+ uint64_t crcerr:1;
+ uint64_t pktdrp:1;
+ } cn58xx;
+ struct cvmx_pip_int_reg_cn30xx cn58xxp1;
+};
+
+union cvmx_pip_ip_offset {
+ uint64_t u64;
+ struct cvmx_pip_ip_offset_s {
+ uint64_t reserved_3_63:61;
+ uint64_t offset:3;
+ } s;
+ struct cvmx_pip_ip_offset_s cn30xx;
+ struct cvmx_pip_ip_offset_s cn31xx;
+ struct cvmx_pip_ip_offset_s cn38xx;
+ struct cvmx_pip_ip_offset_s cn38xxp2;
+ struct cvmx_pip_ip_offset_s cn50xx;
+ struct cvmx_pip_ip_offset_s cn52xx;
+ struct cvmx_pip_ip_offset_s cn52xxp1;
+ struct cvmx_pip_ip_offset_s cn56xx;
+ struct cvmx_pip_ip_offset_s cn56xxp1;
+ struct cvmx_pip_ip_offset_s cn58xx;
+ struct cvmx_pip_ip_offset_s cn58xxp1;
+};
+
+union cvmx_pip_prt_cfgx {
+ uint64_t u64;
+ struct cvmx_pip_prt_cfgx_s {
+ uint64_t reserved_53_63:11;
+ uint64_t pad_len:1;
+ uint64_t vlan_len:1;
+ uint64_t lenerr_en:1;
+ uint64_t maxerr_en:1;
+ uint64_t minerr_en:1;
+ uint64_t grp_wat_47:4;
+ uint64_t qos_wat_47:4;
+ uint64_t reserved_37_39:3;
+ uint64_t rawdrp:1;
+ uint64_t tag_inc:2;
+ uint64_t dyn_rs:1;
+ uint64_t inst_hdr:1;
+ uint64_t grp_wat:4;
+ uint64_t hg_qos:1;
+ uint64_t qos:3;
+ uint64_t qos_wat:4;
+ uint64_t qos_vsel:1;
+ uint64_t qos_vod:1;
+ uint64_t qos_diff:1;
+ uint64_t qos_vlan:1;
+ uint64_t reserved_13_15:3;
+ uint64_t crc_en:1;
+ uint64_t higig_en:1;
+ uint64_t dsa_en:1;
+ uint64_t mode:2;
+ uint64_t reserved_7_7:1;
+ uint64_t skip:7;
+ } s;
+ struct cvmx_pip_prt_cfgx_cn30xx {
+ uint64_t reserved_37_63:27;
+ uint64_t rawdrp:1;
+ uint64_t tag_inc:2;
+ uint64_t dyn_rs:1;
+ uint64_t inst_hdr:1;
+ uint64_t grp_wat:4;
+ uint64_t reserved_27_27:1;
+ uint64_t qos:3;
+ uint64_t qos_wat:4;
+ uint64_t reserved_18_19:2;
+ uint64_t qos_diff:1;
+ uint64_t qos_vlan:1;
+ uint64_t reserved_10_15:6;
+ uint64_t mode:2;
+ uint64_t reserved_7_7:1;
+ uint64_t skip:7;
+ } cn30xx;
+ struct cvmx_pip_prt_cfgx_cn30xx cn31xx;
+ struct cvmx_pip_prt_cfgx_cn38xx {
+ uint64_t reserved_37_63:27;
+ uint64_t rawdrp:1;
+ uint64_t tag_inc:2;
+ uint64_t dyn_rs:1;
+ uint64_t inst_hdr:1;
+ uint64_t grp_wat:4;
+ uint64_t reserved_27_27:1;
+ uint64_t qos:3;
+ uint64_t qos_wat:4;
+ uint64_t reserved_18_19:2;
+ uint64_t qos_diff:1;
+ uint64_t qos_vlan:1;
+ uint64_t reserved_13_15:3;
+ uint64_t crc_en:1;
+ uint64_t reserved_10_11:2;
+ uint64_t mode:2;
+ uint64_t reserved_7_7:1;
+ uint64_t skip:7;
+ } cn38xx;
+ struct cvmx_pip_prt_cfgx_cn38xx cn38xxp2;
+ struct cvmx_pip_prt_cfgx_cn50xx {
+ uint64_t reserved_53_63:11;
+ uint64_t pad_len:1;
+ uint64_t vlan_len:1;
+ uint64_t lenerr_en:1;
+ uint64_t maxerr_en:1;
+ uint64_t minerr_en:1;
+ uint64_t grp_wat_47:4;
+ uint64_t qos_wat_47:4;
+ uint64_t reserved_37_39:3;
+ uint64_t rawdrp:1;
+ uint64_t tag_inc:2;
+ uint64_t dyn_rs:1;
+ uint64_t inst_hdr:1;
+ uint64_t grp_wat:4;
+ uint64_t reserved_27_27:1;
+ uint64_t qos:3;
+ uint64_t qos_wat:4;
+ uint64_t reserved_19_19:1;
+ uint64_t qos_vod:1;
+ uint64_t qos_diff:1;
+ uint64_t qos_vlan:1;
+ uint64_t reserved_13_15:3;
+ uint64_t crc_en:1;
+ uint64_t reserved_10_11:2;
+ uint64_t mode:2;
+ uint64_t reserved_7_7:1;
+ uint64_t skip:7;
+ } cn50xx;
+ struct cvmx_pip_prt_cfgx_s cn52xx;
+ struct cvmx_pip_prt_cfgx_s cn52xxp1;
+ struct cvmx_pip_prt_cfgx_s cn56xx;
+ struct cvmx_pip_prt_cfgx_cn50xx cn56xxp1;
+ struct cvmx_pip_prt_cfgx_cn58xx {
+ uint64_t reserved_37_63:27;
+ uint64_t rawdrp:1;
+ uint64_t tag_inc:2;
+ uint64_t dyn_rs:1;
+ uint64_t inst_hdr:1;
+ uint64_t grp_wat:4;
+ uint64_t reserved_27_27:1;
+ uint64_t qos:3;
+ uint64_t qos_wat:4;
+ uint64_t reserved_19_19:1;
+ uint64_t qos_vod:1;
+ uint64_t qos_diff:1;
+ uint64_t qos_vlan:1;
+ uint64_t reserved_13_15:3;
+ uint64_t crc_en:1;
+ uint64_t reserved_10_11:2;
+ uint64_t mode:2;
+ uint64_t reserved_7_7:1;
+ uint64_t skip:7;
+ } cn58xx;
+ struct cvmx_pip_prt_cfgx_cn58xx cn58xxp1;
+};
+
+union cvmx_pip_prt_tagx {
+ uint64_t u64;
+ struct cvmx_pip_prt_tagx_s {
+ uint64_t reserved_40_63:24;
+ uint64_t grptagbase:4;
+ uint64_t grptagmask:4;
+ uint64_t grptag:1;
+ uint64_t grptag_mskip:1;
+ uint64_t tag_mode:2;
+ uint64_t inc_vs:2;
+ uint64_t inc_vlan:1;
+ uint64_t inc_prt_flag:1;
+ uint64_t ip6_dprt_flag:1;
+ uint64_t ip4_dprt_flag:1;
+ uint64_t ip6_sprt_flag:1;
+ uint64_t ip4_sprt_flag:1;
+ uint64_t ip6_nxth_flag:1;
+ uint64_t ip4_pctl_flag:1;
+ uint64_t ip6_dst_flag:1;
+ uint64_t ip4_dst_flag:1;
+ uint64_t ip6_src_flag:1;
+ uint64_t ip4_src_flag:1;
+ uint64_t tcp6_tag_type:2;
+ uint64_t tcp4_tag_type:2;
+ uint64_t ip6_tag_type:2;
+ uint64_t ip4_tag_type:2;
+ uint64_t non_tag_type:2;
+ uint64_t grp:4;
+ } s;
+ struct cvmx_pip_prt_tagx_cn30xx {
+ uint64_t reserved_40_63:24;
+ uint64_t grptagbase:4;
+ uint64_t grptagmask:4;
+ uint64_t grptag:1;
+ uint64_t reserved_30_30:1;
+ uint64_t tag_mode:2;
+ uint64_t inc_vs:2;
+ uint64_t inc_vlan:1;
+ uint64_t inc_prt_flag:1;
+ uint64_t ip6_dprt_flag:1;
+ uint64_t ip4_dprt_flag:1;
+ uint64_t ip6_sprt_flag:1;
+ uint64_t ip4_sprt_flag:1;
+ uint64_t ip6_nxth_flag:1;
+ uint64_t ip4_pctl_flag:1;
+ uint64_t ip6_dst_flag:1;
+ uint64_t ip4_dst_flag:1;
+ uint64_t ip6_src_flag:1;
+ uint64_t ip4_src_flag:1;
+ uint64_t tcp6_tag_type:2;
+ uint64_t tcp4_tag_type:2;
+ uint64_t ip6_tag_type:2;
+ uint64_t ip4_tag_type:2;
+ uint64_t non_tag_type:2;
+ uint64_t grp:4;
+ } cn30xx;
+ struct cvmx_pip_prt_tagx_cn30xx cn31xx;
+ struct cvmx_pip_prt_tagx_cn30xx cn38xx;
+ struct cvmx_pip_prt_tagx_cn30xx cn38xxp2;
+ struct cvmx_pip_prt_tagx_s cn50xx;
+ struct cvmx_pip_prt_tagx_s cn52xx;
+ struct cvmx_pip_prt_tagx_s cn52xxp1;
+ struct cvmx_pip_prt_tagx_s cn56xx;
+ struct cvmx_pip_prt_tagx_s cn56xxp1;
+ struct cvmx_pip_prt_tagx_cn30xx cn58xx;
+ struct cvmx_pip_prt_tagx_cn30xx cn58xxp1;
+};
+
+union cvmx_pip_qos_diffx {
+ uint64_t u64;
+ struct cvmx_pip_qos_diffx_s {
+ uint64_t reserved_3_63:61;
+ uint64_t qos:3;
+ } s;
+ struct cvmx_pip_qos_diffx_s cn30xx;
+ struct cvmx_pip_qos_diffx_s cn31xx;
+ struct cvmx_pip_qos_diffx_s cn38xx;
+ struct cvmx_pip_qos_diffx_s cn38xxp2;
+ struct cvmx_pip_qos_diffx_s cn50xx;
+ struct cvmx_pip_qos_diffx_s cn52xx;
+ struct cvmx_pip_qos_diffx_s cn52xxp1;
+ struct cvmx_pip_qos_diffx_s cn56xx;
+ struct cvmx_pip_qos_diffx_s cn56xxp1;
+ struct cvmx_pip_qos_diffx_s cn58xx;
+ struct cvmx_pip_qos_diffx_s cn58xxp1;
+};
+
+union cvmx_pip_qos_vlanx {
+ uint64_t u64;
+ struct cvmx_pip_qos_vlanx_s {
+ uint64_t reserved_7_63:57;
+ uint64_t qos1:3;
+ uint64_t reserved_3_3:1;
+ uint64_t qos:3;
+ } s;
+ struct cvmx_pip_qos_vlanx_cn30xx {
+ uint64_t reserved_3_63:61;
+ uint64_t qos:3;
+ } cn30xx;
+ struct cvmx_pip_qos_vlanx_cn30xx cn31xx;
+ struct cvmx_pip_qos_vlanx_cn30xx cn38xx;
+ struct cvmx_pip_qos_vlanx_cn30xx cn38xxp2;
+ struct cvmx_pip_qos_vlanx_cn30xx cn50xx;
+ struct cvmx_pip_qos_vlanx_s cn52xx;
+ struct cvmx_pip_qos_vlanx_s cn52xxp1;
+ struct cvmx_pip_qos_vlanx_s cn56xx;
+ struct cvmx_pip_qos_vlanx_cn30xx cn56xxp1;
+ struct cvmx_pip_qos_vlanx_cn30xx cn58xx;
+ struct cvmx_pip_qos_vlanx_cn30xx cn58xxp1;
+};
+
+union cvmx_pip_qos_watchx {
+ uint64_t u64;
+ struct cvmx_pip_qos_watchx_s {
+ uint64_t reserved_48_63:16;
+ uint64_t mask:16;
+ uint64_t reserved_28_31:4;
+ uint64_t grp:4;
+ uint64_t reserved_23_23:1;
+ uint64_t qos:3;
+ uint64_t reserved_19_19:1;
+ uint64_t match_type:3;
+ uint64_t match_value:16;
+ } s;
+ struct cvmx_pip_qos_watchx_cn30xx {
+ uint64_t reserved_48_63:16;
+ uint64_t mask:16;
+ uint64_t reserved_28_31:4;
+ uint64_t grp:4;
+ uint64_t reserved_23_23:1;
+ uint64_t qos:3;
+ uint64_t reserved_18_19:2;
+ uint64_t match_type:2;
+ uint64_t match_value:16;
+ } cn30xx;
+ struct cvmx_pip_qos_watchx_cn30xx cn31xx;
+ struct cvmx_pip_qos_watchx_cn30xx cn38xx;
+ struct cvmx_pip_qos_watchx_cn30xx cn38xxp2;
+ struct cvmx_pip_qos_watchx_s cn50xx;
+ struct cvmx_pip_qos_watchx_s cn52xx;
+ struct cvmx_pip_qos_watchx_s cn52xxp1;
+ struct cvmx_pip_qos_watchx_s cn56xx;
+ struct cvmx_pip_qos_watchx_s cn56xxp1;
+ struct cvmx_pip_qos_watchx_cn30xx cn58xx;
+ struct cvmx_pip_qos_watchx_cn30xx cn58xxp1;
+};
+
+union cvmx_pip_raw_word {
+ uint64_t u64;
+ struct cvmx_pip_raw_word_s {
+ uint64_t reserved_56_63:8;
+ uint64_t word:56;
+ } s;
+ struct cvmx_pip_raw_word_s cn30xx;
+ struct cvmx_pip_raw_word_s cn31xx;
+ struct cvmx_pip_raw_word_s cn38xx;
+ struct cvmx_pip_raw_word_s cn38xxp2;
+ struct cvmx_pip_raw_word_s cn50xx;
+ struct cvmx_pip_raw_word_s cn52xx;
+ struct cvmx_pip_raw_word_s cn52xxp1;
+ struct cvmx_pip_raw_word_s cn56xx;
+ struct cvmx_pip_raw_word_s cn56xxp1;
+ struct cvmx_pip_raw_word_s cn58xx;
+ struct cvmx_pip_raw_word_s cn58xxp1;
+};
+
+union cvmx_pip_sft_rst {
+ uint64_t u64;
+ struct cvmx_pip_sft_rst_s {
+ uint64_t reserved_1_63:63;
+ uint64_t rst:1;
+ } s;
+ struct cvmx_pip_sft_rst_s cn30xx;
+ struct cvmx_pip_sft_rst_s cn31xx;
+ struct cvmx_pip_sft_rst_s cn38xx;
+ struct cvmx_pip_sft_rst_s cn50xx;
+ struct cvmx_pip_sft_rst_s cn52xx;
+ struct cvmx_pip_sft_rst_s cn52xxp1;
+ struct cvmx_pip_sft_rst_s cn56xx;
+ struct cvmx_pip_sft_rst_s cn56xxp1;
+ struct cvmx_pip_sft_rst_s cn58xx;
+ struct cvmx_pip_sft_rst_s cn58xxp1;
+};
+
+union cvmx_pip_stat0_prtx {
+ uint64_t u64;
+ struct cvmx_pip_stat0_prtx_s {
+ uint64_t drp_pkts:32;
+ uint64_t drp_octs:32;
+ } s;
+ struct cvmx_pip_stat0_prtx_s cn30xx;
+ struct cvmx_pip_stat0_prtx_s cn31xx;
+ struct cvmx_pip_stat0_prtx_s cn38xx;
+ struct cvmx_pip_stat0_prtx_s cn38xxp2;
+ struct cvmx_pip_stat0_prtx_s cn50xx;
+ struct cvmx_pip_stat0_prtx_s cn52xx;
+ struct cvmx_pip_stat0_prtx_s cn52xxp1;
+ struct cvmx_pip_stat0_prtx_s cn56xx;
+ struct cvmx_pip_stat0_prtx_s cn56xxp1;
+ struct cvmx_pip_stat0_prtx_s cn58xx;
+ struct cvmx_pip_stat0_prtx_s cn58xxp1;
+};
+
+union cvmx_pip_stat1_prtx {
+ uint64_t u64;
+ struct cvmx_pip_stat1_prtx_s {
+ uint64_t reserved_48_63:16;
+ uint64_t octs:48;
+ } s;
+ struct cvmx_pip_stat1_prtx_s cn30xx;
+ struct cvmx_pip_stat1_prtx_s cn31xx;
+ struct cvmx_pip_stat1_prtx_s cn38xx;
+ struct cvmx_pip_stat1_prtx_s cn38xxp2;
+ struct cvmx_pip_stat1_prtx_s cn50xx;
+ struct cvmx_pip_stat1_prtx_s cn52xx;
+ struct cvmx_pip_stat1_prtx_s cn52xxp1;
+ struct cvmx_pip_stat1_prtx_s cn56xx;
+ struct cvmx_pip_stat1_prtx_s cn56xxp1;
+ struct cvmx_pip_stat1_prtx_s cn58xx;
+ struct cvmx_pip_stat1_prtx_s cn58xxp1;
+};
+
+union cvmx_pip_stat2_prtx {
+ uint64_t u64;
+ struct cvmx_pip_stat2_prtx_s {
+ uint64_t pkts:32;
+ uint64_t raw:32;
+ } s;
+ struct cvmx_pip_stat2_prtx_s cn30xx;
+ struct cvmx_pip_stat2_prtx_s cn31xx;
+ struct cvmx_pip_stat2_prtx_s cn38xx;
+ struct cvmx_pip_stat2_prtx_s cn38xxp2;
+ struct cvmx_pip_stat2_prtx_s cn50xx;
+ struct cvmx_pip_stat2_prtx_s cn52xx;
+ struct cvmx_pip_stat2_prtx_s cn52xxp1;
+ struct cvmx_pip_stat2_prtx_s cn56xx;
+ struct cvmx_pip_stat2_prtx_s cn56xxp1;
+ struct cvmx_pip_stat2_prtx_s cn58xx;
+ struct cvmx_pip_stat2_prtx_s cn58xxp1;
+};
+
+union cvmx_pip_stat3_prtx {
+ uint64_t u64;
+ struct cvmx_pip_stat3_prtx_s {
+ uint64_t bcst:32;
+ uint64_t mcst:32;
+ } s;
+ struct cvmx_pip_stat3_prtx_s cn30xx;
+ struct cvmx_pip_stat3_prtx_s cn31xx;
+ struct cvmx_pip_stat3_prtx_s cn38xx;
+ struct cvmx_pip_stat3_prtx_s cn38xxp2;
+ struct cvmx_pip_stat3_prtx_s cn50xx;
+ struct cvmx_pip_stat3_prtx_s cn52xx;
+ struct cvmx_pip_stat3_prtx_s cn52xxp1;
+ struct cvmx_pip_stat3_prtx_s cn56xx;
+ struct cvmx_pip_stat3_prtx_s cn56xxp1;
+ struct cvmx_pip_stat3_prtx_s cn58xx;
+ struct cvmx_pip_stat3_prtx_s cn58xxp1;
+};
+
+union cvmx_pip_stat4_prtx {
+ uint64_t u64;
+ struct cvmx_pip_stat4_prtx_s {
+ uint64_t h65to127:32;
+ uint64_t h64:32;
+ } s;
+ struct cvmx_pip_stat4_prtx_s cn30xx;
+ struct cvmx_pip_stat4_prtx_s cn31xx;
+ struct cvmx_pip_stat4_prtx_s cn38xx;
+ struct cvmx_pip_stat4_prtx_s cn38xxp2;
+ struct cvmx_pip_stat4_prtx_s cn50xx;
+ struct cvmx_pip_stat4_prtx_s cn52xx;
+ struct cvmx_pip_stat4_prtx_s cn52xxp1;
+ struct cvmx_pip_stat4_prtx_s cn56xx;
+ struct cvmx_pip_stat4_prtx_s cn56xxp1;
+ struct cvmx_pip_stat4_prtx_s cn58xx;
+ struct cvmx_pip_stat4_prtx_s cn58xxp1;
+};
+
+union cvmx_pip_stat5_prtx {
+ uint64_t u64;
+ struct cvmx_pip_stat5_prtx_s {
+ uint64_t h256to511:32;
+ uint64_t h128to255:32;
+ } s;
+ struct cvmx_pip_stat5_prtx_s cn30xx;
+ struct cvmx_pip_stat5_prtx_s cn31xx;
+ struct cvmx_pip_stat5_prtx_s cn38xx;
+ struct cvmx_pip_stat5_prtx_s cn38xxp2;
+ struct cvmx_pip_stat5_prtx_s cn50xx;
+ struct cvmx_pip_stat5_prtx_s cn52xx;
+ struct cvmx_pip_stat5_prtx_s cn52xxp1;
+ struct cvmx_pip_stat5_prtx_s cn56xx;
+ struct cvmx_pip_stat5_prtx_s cn56xxp1;
+ struct cvmx_pip_stat5_prtx_s cn58xx;
+ struct cvmx_pip_stat5_prtx_s cn58xxp1;
+};
+
+union cvmx_pip_stat6_prtx {
+ uint64_t u64;
+ struct cvmx_pip_stat6_prtx_s {
+ uint64_t h1024to1518:32;
+ uint64_t h512to1023:32;
+ } s;
+ struct cvmx_pip_stat6_prtx_s cn30xx;
+ struct cvmx_pip_stat6_prtx_s cn31xx;
+ struct cvmx_pip_stat6_prtx_s cn38xx;
+ struct cvmx_pip_stat6_prtx_s cn38xxp2;
+ struct cvmx_pip_stat6_prtx_s cn50xx;
+ struct cvmx_pip_stat6_prtx_s cn52xx;
+ struct cvmx_pip_stat6_prtx_s cn52xxp1;
+ struct cvmx_pip_stat6_prtx_s cn56xx;
+ struct cvmx_pip_stat6_prtx_s cn56xxp1;
+ struct cvmx_pip_stat6_prtx_s cn58xx;
+ struct cvmx_pip_stat6_prtx_s cn58xxp1;
+};
+
+union cvmx_pip_stat7_prtx {
+ uint64_t u64;
+ struct cvmx_pip_stat7_prtx_s {
+ uint64_t fcs:32;
+ uint64_t h1519:32;
+ } s;
+ struct cvmx_pip_stat7_prtx_s cn30xx;
+ struct cvmx_pip_stat7_prtx_s cn31xx;
+ struct cvmx_pip_stat7_prtx_s cn38xx;
+ struct cvmx_pip_stat7_prtx_s cn38xxp2;
+ struct cvmx_pip_stat7_prtx_s cn50xx;
+ struct cvmx_pip_stat7_prtx_s cn52xx;
+ struct cvmx_pip_stat7_prtx_s cn52xxp1;
+ struct cvmx_pip_stat7_prtx_s cn56xx;
+ struct cvmx_pip_stat7_prtx_s cn56xxp1;
+ struct cvmx_pip_stat7_prtx_s cn58xx;
+ struct cvmx_pip_stat7_prtx_s cn58xxp1;
+};
+
+union cvmx_pip_stat8_prtx {
+ uint64_t u64;
+ struct cvmx_pip_stat8_prtx_s {
+ uint64_t frag:32;
+ uint64_t undersz:32;
+ } s;
+ struct cvmx_pip_stat8_prtx_s cn30xx;
+ struct cvmx_pip_stat8_prtx_s cn31xx;
+ struct cvmx_pip_stat8_prtx_s cn38xx;
+ struct cvmx_pip_stat8_prtx_s cn38xxp2;
+ struct cvmx_pip_stat8_prtx_s cn50xx;
+ struct cvmx_pip_stat8_prtx_s cn52xx;
+ struct cvmx_pip_stat8_prtx_s cn52xxp1;
+ struct cvmx_pip_stat8_prtx_s cn56xx;
+ struct cvmx_pip_stat8_prtx_s cn56xxp1;
+ struct cvmx_pip_stat8_prtx_s cn58xx;
+ struct cvmx_pip_stat8_prtx_s cn58xxp1;
+};
+
+union cvmx_pip_stat9_prtx {
+ uint64_t u64;
+ struct cvmx_pip_stat9_prtx_s {
+ uint64_t jabber:32;
+ uint64_t oversz:32;
+ } s;
+ struct cvmx_pip_stat9_prtx_s cn30xx;
+ struct cvmx_pip_stat9_prtx_s cn31xx;
+ struct cvmx_pip_stat9_prtx_s cn38xx;
+ struct cvmx_pip_stat9_prtx_s cn38xxp2;
+ struct cvmx_pip_stat9_prtx_s cn50xx;
+ struct cvmx_pip_stat9_prtx_s cn52xx;
+ struct cvmx_pip_stat9_prtx_s cn52xxp1;
+ struct cvmx_pip_stat9_prtx_s cn56xx;
+ struct cvmx_pip_stat9_prtx_s cn56xxp1;
+ struct cvmx_pip_stat9_prtx_s cn58xx;
+ struct cvmx_pip_stat9_prtx_s cn58xxp1;
+};
+
+union cvmx_pip_stat_ctl {
+ uint64_t u64;
+ struct cvmx_pip_stat_ctl_s {
+ uint64_t reserved_1_63:63;
+ uint64_t rdclr:1;
+ } s;
+ struct cvmx_pip_stat_ctl_s cn30xx;
+ struct cvmx_pip_stat_ctl_s cn31xx;
+ struct cvmx_pip_stat_ctl_s cn38xx;
+ struct cvmx_pip_stat_ctl_s cn38xxp2;
+ struct cvmx_pip_stat_ctl_s cn50xx;
+ struct cvmx_pip_stat_ctl_s cn52xx;
+ struct cvmx_pip_stat_ctl_s cn52xxp1;
+ struct cvmx_pip_stat_ctl_s cn56xx;
+ struct cvmx_pip_stat_ctl_s cn56xxp1;
+ struct cvmx_pip_stat_ctl_s cn58xx;
+ struct cvmx_pip_stat_ctl_s cn58xxp1;
+};
+
+union cvmx_pip_stat_inb_errsx {
+ uint64_t u64;
+ struct cvmx_pip_stat_inb_errsx_s {
+ uint64_t reserved_16_63:48;
+ uint64_t errs:16;
+ } s;
+ struct cvmx_pip_stat_inb_errsx_s cn30xx;
+ struct cvmx_pip_stat_inb_errsx_s cn31xx;
+ struct cvmx_pip_stat_inb_errsx_s cn38xx;
+ struct cvmx_pip_stat_inb_errsx_s cn38xxp2;
+ struct cvmx_pip_stat_inb_errsx_s cn50xx;
+ struct cvmx_pip_stat_inb_errsx_s cn52xx;
+ struct cvmx_pip_stat_inb_errsx_s cn52xxp1;
+ struct cvmx_pip_stat_inb_errsx_s cn56xx;
+ struct cvmx_pip_stat_inb_errsx_s cn56xxp1;
+ struct cvmx_pip_stat_inb_errsx_s cn58xx;
+ struct cvmx_pip_stat_inb_errsx_s cn58xxp1;
+};
+
+union cvmx_pip_stat_inb_octsx {
+ uint64_t u64;
+ struct cvmx_pip_stat_inb_octsx_s {
+ uint64_t reserved_48_63:16;
+ uint64_t octs:48;
+ } s;
+ struct cvmx_pip_stat_inb_octsx_s cn30xx;
+ struct cvmx_pip_stat_inb_octsx_s cn31xx;
+ struct cvmx_pip_stat_inb_octsx_s cn38xx;
+ struct cvmx_pip_stat_inb_octsx_s cn38xxp2;
+ struct cvmx_pip_stat_inb_octsx_s cn50xx;
+ struct cvmx_pip_stat_inb_octsx_s cn52xx;
+ struct cvmx_pip_stat_inb_octsx_s cn52xxp1;
+ struct cvmx_pip_stat_inb_octsx_s cn56xx;
+ struct cvmx_pip_stat_inb_octsx_s cn56xxp1;
+ struct cvmx_pip_stat_inb_octsx_s cn58xx;
+ struct cvmx_pip_stat_inb_octsx_s cn58xxp1;
+};
+
+union cvmx_pip_stat_inb_pktsx {
+ uint64_t u64;
+ struct cvmx_pip_stat_inb_pktsx_s {
+ uint64_t reserved_32_63:32;
+ uint64_t pkts:32;
+ } s;
+ struct cvmx_pip_stat_inb_pktsx_s cn30xx;
+ struct cvmx_pip_stat_inb_pktsx_s cn31xx;
+ struct cvmx_pip_stat_inb_pktsx_s cn38xx;
+ struct cvmx_pip_stat_inb_pktsx_s cn38xxp2;
+ struct cvmx_pip_stat_inb_pktsx_s cn50xx;
+ struct cvmx_pip_stat_inb_pktsx_s cn52xx;
+ struct cvmx_pip_stat_inb_pktsx_s cn52xxp1;
+ struct cvmx_pip_stat_inb_pktsx_s cn56xx;
+ struct cvmx_pip_stat_inb_pktsx_s cn56xxp1;
+ struct cvmx_pip_stat_inb_pktsx_s cn58xx;
+ struct cvmx_pip_stat_inb_pktsx_s cn58xxp1;
+};
+
+union cvmx_pip_tag_incx {
+ uint64_t u64;
+ struct cvmx_pip_tag_incx_s {
+ uint64_t reserved_8_63:56;
+ uint64_t en:8;
+ } s;
+ struct cvmx_pip_tag_incx_s cn30xx;
+ struct cvmx_pip_tag_incx_s cn31xx;
+ struct cvmx_pip_tag_incx_s cn38xx;
+ struct cvmx_pip_tag_incx_s cn38xxp2;
+ struct cvmx_pip_tag_incx_s cn50xx;
+ struct cvmx_pip_tag_incx_s cn52xx;
+ struct cvmx_pip_tag_incx_s cn52xxp1;
+ struct cvmx_pip_tag_incx_s cn56xx;
+ struct cvmx_pip_tag_incx_s cn56xxp1;
+ struct cvmx_pip_tag_incx_s cn58xx;
+ struct cvmx_pip_tag_incx_s cn58xxp1;
+};
+
+union cvmx_pip_tag_mask {
+ uint64_t u64;
+ struct cvmx_pip_tag_mask_s {
+ uint64_t reserved_16_63:48;
+ uint64_t mask:16;
+ } s;
+ struct cvmx_pip_tag_mask_s cn30xx;
+ struct cvmx_pip_tag_mask_s cn31xx;
+ struct cvmx_pip_tag_mask_s cn38xx;
+ struct cvmx_pip_tag_mask_s cn38xxp2;
+ struct cvmx_pip_tag_mask_s cn50xx;
+ struct cvmx_pip_tag_mask_s cn52xx;
+ struct cvmx_pip_tag_mask_s cn52xxp1;
+ struct cvmx_pip_tag_mask_s cn56xx;
+ struct cvmx_pip_tag_mask_s cn56xxp1;
+ struct cvmx_pip_tag_mask_s cn58xx;
+ struct cvmx_pip_tag_mask_s cn58xxp1;
+};
+
+union cvmx_pip_tag_secret {
+ uint64_t u64;
+ struct cvmx_pip_tag_secret_s {
+ uint64_t reserved_32_63:32;
+ uint64_t dst:16;
+ uint64_t src:16;
+ } s;
+ struct cvmx_pip_tag_secret_s cn30xx;
+ struct cvmx_pip_tag_secret_s cn31xx;
+ struct cvmx_pip_tag_secret_s cn38xx;
+ struct cvmx_pip_tag_secret_s cn38xxp2;
+ struct cvmx_pip_tag_secret_s cn50xx;
+ struct cvmx_pip_tag_secret_s cn52xx;
+ struct cvmx_pip_tag_secret_s cn52xxp1;
+ struct cvmx_pip_tag_secret_s cn56xx;
+ struct cvmx_pip_tag_secret_s cn56xxp1;
+ struct cvmx_pip_tag_secret_s cn58xx;
+ struct cvmx_pip_tag_secret_s cn58xxp1;
+};
+
+union cvmx_pip_todo_entry {
+ uint64_t u64;
+ struct cvmx_pip_todo_entry_s {
+ uint64_t val:1;
+ uint64_t reserved_62_62:1;
+ uint64_t entry:62;
+ } s;
+ struct cvmx_pip_todo_entry_s cn30xx;
+ struct cvmx_pip_todo_entry_s cn31xx;
+ struct cvmx_pip_todo_entry_s cn38xx;
+ struct cvmx_pip_todo_entry_s cn38xxp2;
+ struct cvmx_pip_todo_entry_s cn50xx;
+ struct cvmx_pip_todo_entry_s cn52xx;
+ struct cvmx_pip_todo_entry_s cn52xxp1;
+ struct cvmx_pip_todo_entry_s cn56xx;
+ struct cvmx_pip_todo_entry_s cn56xxp1;
+ struct cvmx_pip_todo_entry_s cn58xx;
+ struct cvmx_pip_todo_entry_s cn58xxp1;
+};
+
+#endif
diff --git a/drivers/staging/octeon/cvmx-pip.h b/drivers/staging/octeon/cvmx-pip.h
new file mode 100644
index 000000000000..78dbce8f2c5e
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-pip.h
@@ -0,0 +1,524 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * Interface to the hardware Packet Input Processing unit.
+ *
+ */
+
+#ifndef __CVMX_PIP_H__
+#define __CVMX_PIP_H__
+
+#include "cvmx-wqe.h"
+#include "cvmx-fpa.h"
+#include "cvmx-pip-defs.h"
+
+#define CVMX_PIP_NUM_INPUT_PORTS 40
+#define CVMX_PIP_NUM_WATCHERS 4
+
+/*
+ * Encodes the different error and exception codes
+ */
+typedef enum {
+ CVMX_PIP_L4_NO_ERR = 0ull,
+ /*
+ * 1 = TCP (UDP) packet not long enough to cover TCP (UDP)
+ * header
+ */
+ CVMX_PIP_L4_MAL_ERR = 1ull,
+ /* 2 = TCP/UDP checksum failure */
+ CVMX_PIP_CHK_ERR = 2ull,
+ /*
+ * 3 = TCP/UDP length check (TCP/UDP length does not match IP
+ * length).
+ */
+ CVMX_PIP_L4_LENGTH_ERR = 3ull,
+ /* 4 = illegal TCP/UDP port (either source or dest port is zero) */
+ CVMX_PIP_BAD_PRT_ERR = 4ull,
+ /* 8 = TCP flags = FIN only */
+ CVMX_PIP_TCP_FLG8_ERR = 8ull,
+ /* 9 = TCP flags = 0 */
+ CVMX_PIP_TCP_FLG9_ERR = 9ull,
+ /* 10 = TCP flags = FIN+RST+* */
+ CVMX_PIP_TCP_FLG10_ERR = 10ull,
+ /* 11 = TCP flags = SYN+URG+* */
+ CVMX_PIP_TCP_FLG11_ERR = 11ull,
+ /* 12 = TCP flags = SYN+RST+* */
+ CVMX_PIP_TCP_FLG12_ERR = 12ull,
+ /* 13 = TCP flags = SYN+FIN+* */
+ CVMX_PIP_TCP_FLG13_ERR = 13ull
+} cvmx_pip_l4_err_t;
+
+typedef enum {
+
+ CVMX_PIP_IP_NO_ERR = 0ull,
+ /* 1 = not IPv4 or IPv6 */
+ CVMX_PIP_NOT_IP = 1ull,
+ /* 2 = IPv4 header checksum violation */
+ CVMX_PIP_IPV4_HDR_CHK = 2ull,
+ /* 3 = malformed (packet not long enough to cover IP hdr) */
+ CVMX_PIP_IP_MAL_HDR = 3ull,
+ /* 4 = malformed (packet not long enough to cover len in IP hdr) */
+ CVMX_PIP_IP_MAL_PKT = 4ull,
+ /* 5 = TTL / hop count equal zero */
+ CVMX_PIP_TTL_HOP = 5ull,
+ /* 6 = IPv4 options / IPv6 early extension headers */
+ CVMX_PIP_OPTS = 6ull
+} cvmx_pip_ip_exc_t;
+
+/**
+ * NOTES
+ * late collision (data received before collision)
+ * late collisions cannot be detected by the receiver
+ * they would appear as JAM bits which would appear as bad FCS
+ * or carrier extend error which is CVMX_PIP_EXTEND_ERR
+ */
+typedef enum {
+ /* No error */
+ CVMX_PIP_RX_NO_ERR = 0ull,
+ /* RGM+SPI 1 = partially received packet (buffering/bandwidth
+ * not adequate) */
+ CVMX_PIP_PARTIAL_ERR = 1ull,
+ /* RGM+SPI 2 = receive packet too large and truncated */
+ CVMX_PIP_JABBER_ERR = 2ull,
+ /*
+ * RGM 3 = max frame error (pkt len > max frame len) (with FCS
+ * error)
+ */
+ CVMX_PIP_OVER_FCS_ERR = 3ull,
+ /* RGM+SPI 4 = max frame error (pkt len > max frame len) */
+ CVMX_PIP_OVER_ERR = 4ull,
+ /*
+ * RGM 5 = nibble error (data not byte multiple - 100M and 10M
+ * only)
+ */
+ CVMX_PIP_ALIGN_ERR = 5ull,
+ /*
+ * RGM 6 = min frame error (pkt len < min frame len) (with FCS
+ * error)
+ */
+ CVMX_PIP_UNDER_FCS_ERR = 6ull,
+ /* RGM 7 = FCS error */
+ CVMX_PIP_GMX_FCS_ERR = 7ull,
+ /* RGM+SPI 8 = min frame error (pkt len < min frame len) */
+ CVMX_PIP_UNDER_ERR = 8ull,
+ /* RGM 9 = Frame carrier extend error */
+ CVMX_PIP_EXTEND_ERR = 9ull,
+ /*
+ * RGM 10 = length mismatch (len did not match len in L2
+ * length/type)
+ */
+ CVMX_PIP_LENGTH_ERR = 10ull,
+ /* RGM 11 = Frame error (some or all data bits marked err) */
+ CVMX_PIP_DAT_ERR = 11ull,
+ /* SPI 11 = DIP4 error */
+ CVMX_PIP_DIP_ERR = 11ull,
+ /*
+ * RGM 12 = packet was not large enough to pass the skipper -
+ * no inspection could occur.
+ */
+ CVMX_PIP_SKIP_ERR = 12ull,
+ /*
+ * RGM 13 = studder error (data not repeated - 100M and 10M
+ * only)
+ */
+ CVMX_PIP_NIBBLE_ERR = 13ull,
+ /* RGM+SPI 16 = FCS error */
+ CVMX_PIP_PIP_FCS = 16L,
+ /*
+ * RGM+SPI+PCI 17 = packet was not large enough to pass the
+ * skipper - no inspection could occur.
+ */
+ CVMX_PIP_PIP_SKIP_ERR = 17L,
+ /*
+ * RGM+SPI+PCI 18 = malformed l2 (packet not long enough to
+ * cover L2 hdr).
+ */
+ CVMX_PIP_PIP_L2_MAL_HDR = 18L
+ /*
+ * NOTES: xx = late collision (data received before collision)
+ * late collisions cannot be detected by the receiver
+ * they would appear as JAM bits which would appear as
+ * bad FCS or carrier extend error which is
+ * CVMX_PIP_EXTEND_ERR
+ */
+} cvmx_pip_rcv_err_t;
+
+/**
+ * This defines the err_code field errors in the work Q entry
+ */
+typedef union {
+ cvmx_pip_l4_err_t l4_err;
+ cvmx_pip_ip_exc_t ip_exc;
+ cvmx_pip_rcv_err_t rcv_err;
+} cvmx_pip_err_t;
+
+/**
+ * Status statistics for a port
+ */
+typedef struct {
+ /* Inbound octets marked to be dropped by the IPD */
+ uint32_t dropped_octets;
+ /* Inbound packets marked to be dropped by the IPD */
+ uint32_t dropped_packets;
+ /* RAW PCI Packets received by PIP per port */
+ uint32_t pci_raw_packets;
+ /* Number of octets processed by PIP */
+ uint32_t octets;
+ /* Number of packets processed by PIP */
+ uint32_t packets;
+ /*
+ * Number of indentified L2 multicast packets. Does not
+ * include broadcast packets. Only includes packets whose
+ * parse mode is SKIP_TO_L2
+ */
+ uint32_t multicast_packets;
+ /*
+ * Number of indentified L2 broadcast packets. Does not
+ * include multicast packets. Only includes packets whose
+ * parse mode is SKIP_TO_L2
+ */
+ uint32_t broadcast_packets;
+ /* Number of 64B packets */
+ uint32_t len_64_packets;
+ /* Number of 65-127B packets */
+ uint32_t len_65_127_packets;
+ /* Number of 128-255B packets */
+ uint32_t len_128_255_packets;
+ /* Number of 256-511B packets */
+ uint32_t len_256_511_packets;
+ /* Number of 512-1023B packets */
+ uint32_t len_512_1023_packets;
+ /* Number of 1024-1518B packets */
+ uint32_t len_1024_1518_packets;
+ /* Number of 1519-max packets */
+ uint32_t len_1519_max_packets;
+ /* Number of packets with FCS or Align opcode errors */
+ uint32_t fcs_align_err_packets;
+ /* Number of packets with length < min */
+ uint32_t runt_packets;
+ /* Number of packets with length < min and FCS error */
+ uint32_t runt_crc_packets;
+ /* Number of packets with length > max */
+ uint32_t oversize_packets;
+ /* Number of packets with length > max and FCS error */
+ uint32_t oversize_crc_packets;
+ /* Number of packets without GMX/SPX/PCI errors received by PIP */
+ uint32_t inb_packets;
+ /*
+ * Total number of octets from all packets received by PIP,
+ * including CRC
+ */
+ uint64_t inb_octets;
+ /* Number of packets with GMX/SPX/PCI errors received by PIP */
+ uint16_t inb_errors;
+} cvmx_pip_port_status_t;
+
+/**
+ * Definition of the PIP custom header that can be prepended
+ * to a packet by external hardware.
+ */
+typedef union {
+ uint64_t u64;
+ struct {
+ /*
+ * Documented as R - Set if the Packet is RAWFULL. If
+ * set, this header must be the full 8 bytes.
+ */
+ uint64_t rawfull:1;
+ /* Must be zero */
+ uint64_t reserved0:5;
+ /* PIP parse mode for this packet */
+ uint64_t parse_mode:2;
+ /* Must be zero */
+ uint64_t reserved1:1;
+ /*
+ * Skip amount, including this header, to the
+ * beginning of the packet
+ */
+ uint64_t skip_len:7;
+ /* Must be zero */
+ uint64_t reserved2:6;
+ /* POW input queue for this packet */
+ uint64_t qos:3;
+ /* POW input group for this packet */
+ uint64_t grp:4;
+ /*
+ * Flag to store this packet in the work queue entry,
+ * if possible
+ */
+ uint64_t rs:1;
+ /* POW input tag type */
+ uint64_t tag_type:2;
+ /* POW input tag */
+ uint64_t tag:32;
+ } s;
+} cvmx_pip_pkt_inst_hdr_t;
+
+/* CSR typedefs have been moved to cvmx-csr-*.h */
+
+/**
+ * Configure an ethernet input port
+ *
+ * @port_num: Port number to configure
+ * @port_cfg: Port hardware configuration
+ * @port_tag_cfg:
+ * Port POW tagging configuration
+ */
+static inline void cvmx_pip_config_port(uint64_t port_num,
+ union cvmx_pip_prt_cfgx port_cfg,
+ union cvmx_pip_prt_tagx port_tag_cfg)
+{
+ cvmx_write_csr(CVMX_PIP_PRT_CFGX(port_num), port_cfg.u64);
+ cvmx_write_csr(CVMX_PIP_PRT_TAGX(port_num), port_tag_cfg.u64);
+}
+#if 0
+/**
+ * @deprecated This function is a thin wrapper around the Pass1 version
+ * of the CVMX_PIP_QOS_WATCHX CSR; Pass2 has added a field for
+ * setting the group that is incompatible with this function,
+ * the preferred upgrade path is to use the CSR directly.
+ *
+ * Configure the global QoS packet watchers. Each watcher is
+ * capable of matching a field in a packet to determine the
+ * QoS queue for scheduling.
+ *
+ * @watcher: Watcher number to configure (0 - 3).
+ * @match_type: Watcher match type
+ * @match_value:
+ * Value the watcher will match against
+ * @qos: QoS queue for packets matching this watcher
+ */
+static inline void cvmx_pip_config_watcher(uint64_t watcher,
+ cvmx_pip_qos_watch_types match_type,
+ uint64_t match_value, uint64_t qos)
+{
+ cvmx_pip_port_watcher_cfg_t watcher_config;
+
+ watcher_config.u64 = 0;
+ watcher_config.s.match_type = match_type;
+ watcher_config.s.match_value = match_value;
+ watcher_config.s.qos = qos;
+
+ cvmx_write_csr(CVMX_PIP_QOS_WATCHX(watcher), watcher_config.u64);
+}
+#endif
+/**
+ * Configure the VLAN priority to QoS queue mapping.
+ *
+ * @vlan_priority:
+ * VLAN priority (0-7)
+ * @qos: QoS queue for packets matching this watcher
+ */
+static inline void cvmx_pip_config_vlan_qos(uint64_t vlan_priority,
+ uint64_t qos)
+{
+ union cvmx_pip_qos_vlanx pip_qos_vlanx;
+ pip_qos_vlanx.u64 = 0;
+ pip_qos_vlanx.s.qos = qos;
+ cvmx_write_csr(CVMX_PIP_QOS_VLANX(vlan_priority), pip_qos_vlanx.u64);
+}
+
+/**
+ * Configure the Diffserv to QoS queue mapping.
+ *
+ * @diffserv: Diffserv field value (0-63)
+ * @qos: QoS queue for packets matching this watcher
+ */
+static inline void cvmx_pip_config_diffserv_qos(uint64_t diffserv, uint64_t qos)
+{
+ union cvmx_pip_qos_diffx pip_qos_diffx;
+ pip_qos_diffx.u64 = 0;
+ pip_qos_diffx.s.qos = qos;
+ cvmx_write_csr(CVMX_PIP_QOS_DIFFX(diffserv), pip_qos_diffx.u64);
+}
+
+/**
+ * Get the status counters for a port.
+ *
+ * @port_num: Port number to get statistics for.
+ * @clear: Set to 1 to clear the counters after they are read
+ * @status: Where to put the results.
+ */
+static inline void cvmx_pip_get_port_status(uint64_t port_num, uint64_t clear,
+ cvmx_pip_port_status_t *status)
+{
+ union cvmx_pip_stat_ctl pip_stat_ctl;
+ union cvmx_pip_stat0_prtx stat0;
+ union cvmx_pip_stat1_prtx stat1;
+ union cvmx_pip_stat2_prtx stat2;
+ union cvmx_pip_stat3_prtx stat3;
+ union cvmx_pip_stat4_prtx stat4;
+ union cvmx_pip_stat5_prtx stat5;
+ union cvmx_pip_stat6_prtx stat6;
+ union cvmx_pip_stat7_prtx stat7;
+ union cvmx_pip_stat8_prtx stat8;
+ union cvmx_pip_stat9_prtx stat9;
+ union cvmx_pip_stat_inb_pktsx pip_stat_inb_pktsx;
+ union cvmx_pip_stat_inb_octsx pip_stat_inb_octsx;
+ union cvmx_pip_stat_inb_errsx pip_stat_inb_errsx;
+
+ pip_stat_ctl.u64 = 0;
+ pip_stat_ctl.s.rdclr = clear;
+ cvmx_write_csr(CVMX_PIP_STAT_CTL, pip_stat_ctl.u64);
+
+ stat0.u64 = cvmx_read_csr(CVMX_PIP_STAT0_PRTX(port_num));
+ stat1.u64 = cvmx_read_csr(CVMX_PIP_STAT1_PRTX(port_num));
+ stat2.u64 = cvmx_read_csr(CVMX_PIP_STAT2_PRTX(port_num));
+ stat3.u64 = cvmx_read_csr(CVMX_PIP_STAT3_PRTX(port_num));
+ stat4.u64 = cvmx_read_csr(CVMX_PIP_STAT4_PRTX(port_num));
+ stat5.u64 = cvmx_read_csr(CVMX_PIP_STAT5_PRTX(port_num));
+ stat6.u64 = cvmx_read_csr(CVMX_PIP_STAT6_PRTX(port_num));
+ stat7.u64 = cvmx_read_csr(CVMX_PIP_STAT7_PRTX(port_num));
+ stat8.u64 = cvmx_read_csr(CVMX_PIP_STAT8_PRTX(port_num));
+ stat9.u64 = cvmx_read_csr(CVMX_PIP_STAT9_PRTX(port_num));
+ pip_stat_inb_pktsx.u64 =
+ cvmx_read_csr(CVMX_PIP_STAT_INB_PKTSX(port_num));
+ pip_stat_inb_octsx.u64 =
+ cvmx_read_csr(CVMX_PIP_STAT_INB_OCTSX(port_num));
+ pip_stat_inb_errsx.u64 =
+ cvmx_read_csr(CVMX_PIP_STAT_INB_ERRSX(port_num));
+
+ status->dropped_octets = stat0.s.drp_octs;
+ status->dropped_packets = stat0.s.drp_pkts;
+ status->octets = stat1.s.octs;
+ status->pci_raw_packets = stat2.s.raw;
+ status->packets = stat2.s.pkts;
+ status->multicast_packets = stat3.s.mcst;
+ status->broadcast_packets = stat3.s.bcst;
+ status->len_64_packets = stat4.s.h64;
+ status->len_65_127_packets = stat4.s.h65to127;
+ status->len_128_255_packets = stat5.s.h128to255;
+ status->len_256_511_packets = stat5.s.h256to511;
+ status->len_512_1023_packets = stat6.s.h512to1023;
+ status->len_1024_1518_packets = stat6.s.h1024to1518;
+ status->len_1519_max_packets = stat7.s.h1519;
+ status->fcs_align_err_packets = stat7.s.fcs;
+ status->runt_packets = stat8.s.undersz;
+ status->runt_crc_packets = stat8.s.frag;
+ status->oversize_packets = stat9.s.oversz;
+ status->oversize_crc_packets = stat9.s.jabber;
+ status->inb_packets = pip_stat_inb_pktsx.s.pkts;
+ status->inb_octets = pip_stat_inb_octsx.s.octs;
+ status->inb_errors = pip_stat_inb_errsx.s.errs;
+
+ if (cvmx_octeon_is_pass1()) {
+ /*
+ * Kludge to fix Octeon Pass 1 errata - Drop counts
+ * don't work.
+ */
+ if (status->inb_packets > status->packets)
+ status->dropped_packets =
+ status->inb_packets - status->packets;
+ else
+ status->dropped_packets = 0;
+ if (status->inb_octets - status->inb_packets * 4 >
+ status->octets)
+ status->dropped_octets =
+ status->inb_octets - status->inb_packets * 4 -
+ status->octets;
+ else
+ status->dropped_octets = 0;
+ }
+}
+
+/**
+ * Configure the hardware CRC engine
+ *
+ * @interface: Interface to configure (0 or 1)
+ * @invert_result:
+ * Invert the result of the CRC
+ * @reflect: Reflect
+ * @initialization_vector:
+ * CRC initialization vector
+ */
+static inline void cvmx_pip_config_crc(uint64_t interface,
+ uint64_t invert_result, uint64_t reflect,
+ uint32_t initialization_vector)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)) {
+ union cvmx_pip_crc_ctlx config;
+ union cvmx_pip_crc_ivx pip_crc_ivx;
+
+ config.u64 = 0;
+ config.s.invres = invert_result;
+ config.s.reflect = reflect;
+ cvmx_write_csr(CVMX_PIP_CRC_CTLX(interface), config.u64);
+
+ pip_crc_ivx.u64 = 0;
+ pip_crc_ivx.s.iv = initialization_vector;
+ cvmx_write_csr(CVMX_PIP_CRC_IVX(interface), pip_crc_ivx.u64);
+ }
+}
+
+/**
+ * Clear all bits in a tag mask. This should be called on
+ * startup before any calls to cvmx_pip_tag_mask_set. Each bit
+ * set in the final mask represent a byte used in the packet for
+ * tag generation.
+ *
+ * @mask_index: Which tag mask to clear (0..3)
+ */
+static inline void cvmx_pip_tag_mask_clear(uint64_t mask_index)
+{
+ uint64_t index;
+ union cvmx_pip_tag_incx pip_tag_incx;
+ pip_tag_incx.u64 = 0;
+ pip_tag_incx.s.en = 0;
+ for (index = mask_index * 16; index < (mask_index + 1) * 16; index++)
+ cvmx_write_csr(CVMX_PIP_TAG_INCX(index), pip_tag_incx.u64);
+}
+
+/**
+ * Sets a range of bits in the tag mask. The tag mask is used
+ * when the cvmx_pip_port_tag_cfg_t tag_mode is non zero.
+ * There are four separate masks that can be configured.
+ *
+ * @mask_index: Which tag mask to modify (0..3)
+ * @offset: Offset into the bitmask to set bits at. Use the GCC macro
+ * offsetof() to determine the offsets into packet headers.
+ * For example, offsetof(ethhdr, protocol) returns the offset
+ * of the ethernet protocol field. The bitmask selects which
+ * bytes to include the the tag, with bit offset X selecting
+ * byte at offset X from the beginning of the packet data.
+ * @len: Number of bytes to include. Usually this is the sizeof()
+ * the field.
+ */
+static inline void cvmx_pip_tag_mask_set(uint64_t mask_index, uint64_t offset,
+ uint64_t len)
+{
+ while (len--) {
+ union cvmx_pip_tag_incx pip_tag_incx;
+ uint64_t index = mask_index * 16 + offset / 8;
+ pip_tag_incx.u64 = cvmx_read_csr(CVMX_PIP_TAG_INCX(index));
+ pip_tag_incx.s.en |= 0x80 >> (offset & 0x7);
+ cvmx_write_csr(CVMX_PIP_TAG_INCX(index), pip_tag_incx.u64);
+ offset++;
+ }
+}
+
+#endif /* __CVMX_PIP_H__ */
diff --git a/drivers/staging/octeon/cvmx-pko-defs.h b/drivers/staging/octeon/cvmx-pko-defs.h
new file mode 100644
index 000000000000..50e779cf1ad8
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-pko-defs.h
@@ -0,0 +1,1133 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_PKO_DEFS_H__
+#define __CVMX_PKO_DEFS_H__
+
+#define CVMX_PKO_MEM_COUNT0 \
+ CVMX_ADD_IO_SEG(0x0001180050001080ull)
+#define CVMX_PKO_MEM_COUNT1 \
+ CVMX_ADD_IO_SEG(0x0001180050001088ull)
+#define CVMX_PKO_MEM_DEBUG0 \
+ CVMX_ADD_IO_SEG(0x0001180050001100ull)
+#define CVMX_PKO_MEM_DEBUG1 \
+ CVMX_ADD_IO_SEG(0x0001180050001108ull)
+#define CVMX_PKO_MEM_DEBUG10 \
+ CVMX_ADD_IO_SEG(0x0001180050001150ull)
+#define CVMX_PKO_MEM_DEBUG11 \
+ CVMX_ADD_IO_SEG(0x0001180050001158ull)
+#define CVMX_PKO_MEM_DEBUG12 \
+ CVMX_ADD_IO_SEG(0x0001180050001160ull)
+#define CVMX_PKO_MEM_DEBUG13 \
+ CVMX_ADD_IO_SEG(0x0001180050001168ull)
+#define CVMX_PKO_MEM_DEBUG14 \
+ CVMX_ADD_IO_SEG(0x0001180050001170ull)
+#define CVMX_PKO_MEM_DEBUG2 \
+ CVMX_ADD_IO_SEG(0x0001180050001110ull)
+#define CVMX_PKO_MEM_DEBUG3 \
+ CVMX_ADD_IO_SEG(0x0001180050001118ull)
+#define CVMX_PKO_MEM_DEBUG4 \
+ CVMX_ADD_IO_SEG(0x0001180050001120ull)
+#define CVMX_PKO_MEM_DEBUG5 \
+ CVMX_ADD_IO_SEG(0x0001180050001128ull)
+#define CVMX_PKO_MEM_DEBUG6 \
+ CVMX_ADD_IO_SEG(0x0001180050001130ull)
+#define CVMX_PKO_MEM_DEBUG7 \
+ CVMX_ADD_IO_SEG(0x0001180050001138ull)
+#define CVMX_PKO_MEM_DEBUG8 \
+ CVMX_ADD_IO_SEG(0x0001180050001140ull)
+#define CVMX_PKO_MEM_DEBUG9 \
+ CVMX_ADD_IO_SEG(0x0001180050001148ull)
+#define CVMX_PKO_MEM_PORT_PTRS \
+ CVMX_ADD_IO_SEG(0x0001180050001010ull)
+#define CVMX_PKO_MEM_PORT_QOS \
+ CVMX_ADD_IO_SEG(0x0001180050001018ull)
+#define CVMX_PKO_MEM_PORT_RATE0 \
+ CVMX_ADD_IO_SEG(0x0001180050001020ull)
+#define CVMX_PKO_MEM_PORT_RATE1 \
+ CVMX_ADD_IO_SEG(0x0001180050001028ull)
+#define CVMX_PKO_MEM_QUEUE_PTRS \
+ CVMX_ADD_IO_SEG(0x0001180050001000ull)
+#define CVMX_PKO_MEM_QUEUE_QOS \
+ CVMX_ADD_IO_SEG(0x0001180050001008ull)
+#define CVMX_PKO_REG_BIST_RESULT \
+ CVMX_ADD_IO_SEG(0x0001180050000080ull)
+#define CVMX_PKO_REG_CMD_BUF \
+ CVMX_ADD_IO_SEG(0x0001180050000010ull)
+#define CVMX_PKO_REG_CRC_CTLX(offset) \
+ CVMX_ADD_IO_SEG(0x0001180050000028ull + (((offset) & 1) * 8))
+#define CVMX_PKO_REG_CRC_ENABLE \
+ CVMX_ADD_IO_SEG(0x0001180050000020ull)
+#define CVMX_PKO_REG_CRC_IVX(offset) \
+ CVMX_ADD_IO_SEG(0x0001180050000038ull + (((offset) & 1) * 8))
+#define CVMX_PKO_REG_DEBUG0 \
+ CVMX_ADD_IO_SEG(0x0001180050000098ull)
+#define CVMX_PKO_REG_DEBUG1 \
+ CVMX_ADD_IO_SEG(0x00011800500000A0ull)
+#define CVMX_PKO_REG_DEBUG2 \
+ CVMX_ADD_IO_SEG(0x00011800500000A8ull)
+#define CVMX_PKO_REG_DEBUG3 \
+ CVMX_ADD_IO_SEG(0x00011800500000B0ull)
+#define CVMX_PKO_REG_ENGINE_INFLIGHT \
+ CVMX_ADD_IO_SEG(0x0001180050000050ull)
+#define CVMX_PKO_REG_ENGINE_THRESH \
+ CVMX_ADD_IO_SEG(0x0001180050000058ull)
+#define CVMX_PKO_REG_ERROR \
+ CVMX_ADD_IO_SEG(0x0001180050000088ull)
+#define CVMX_PKO_REG_FLAGS \
+ CVMX_ADD_IO_SEG(0x0001180050000000ull)
+#define CVMX_PKO_REG_GMX_PORT_MODE \
+ CVMX_ADD_IO_SEG(0x0001180050000018ull)
+#define CVMX_PKO_REG_INT_MASK \
+ CVMX_ADD_IO_SEG(0x0001180050000090ull)
+#define CVMX_PKO_REG_QUEUE_MODE \
+ CVMX_ADD_IO_SEG(0x0001180050000048ull)
+#define CVMX_PKO_REG_QUEUE_PTRS1 \
+ CVMX_ADD_IO_SEG(0x0001180050000100ull)
+#define CVMX_PKO_REG_READ_IDX \
+ CVMX_ADD_IO_SEG(0x0001180050000008ull)
+
+union cvmx_pko_mem_count0 {
+ uint64_t u64;
+ struct cvmx_pko_mem_count0_s {
+ uint64_t reserved_32_63:32;
+ uint64_t count:32;
+ } s;
+ struct cvmx_pko_mem_count0_s cn30xx;
+ struct cvmx_pko_mem_count0_s cn31xx;
+ struct cvmx_pko_mem_count0_s cn38xx;
+ struct cvmx_pko_mem_count0_s cn38xxp2;
+ struct cvmx_pko_mem_count0_s cn50xx;
+ struct cvmx_pko_mem_count0_s cn52xx;
+ struct cvmx_pko_mem_count0_s cn52xxp1;
+ struct cvmx_pko_mem_count0_s cn56xx;
+ struct cvmx_pko_mem_count0_s cn56xxp1;
+ struct cvmx_pko_mem_count0_s cn58xx;
+ struct cvmx_pko_mem_count0_s cn58xxp1;
+};
+
+union cvmx_pko_mem_count1 {
+ uint64_t u64;
+ struct cvmx_pko_mem_count1_s {
+ uint64_t reserved_48_63:16;
+ uint64_t count:48;
+ } s;
+ struct cvmx_pko_mem_count1_s cn30xx;
+ struct cvmx_pko_mem_count1_s cn31xx;
+ struct cvmx_pko_mem_count1_s cn38xx;
+ struct cvmx_pko_mem_count1_s cn38xxp2;
+ struct cvmx_pko_mem_count1_s cn50xx;
+ struct cvmx_pko_mem_count1_s cn52xx;
+ struct cvmx_pko_mem_count1_s cn52xxp1;
+ struct cvmx_pko_mem_count1_s cn56xx;
+ struct cvmx_pko_mem_count1_s cn56xxp1;
+ struct cvmx_pko_mem_count1_s cn58xx;
+ struct cvmx_pko_mem_count1_s cn58xxp1;
+};
+
+union cvmx_pko_mem_debug0 {
+ uint64_t u64;
+ struct cvmx_pko_mem_debug0_s {
+ uint64_t fau:28;
+ uint64_t cmd:14;
+ uint64_t segs:6;
+ uint64_t size:16;
+ } s;
+ struct cvmx_pko_mem_debug0_s cn30xx;
+ struct cvmx_pko_mem_debug0_s cn31xx;
+ struct cvmx_pko_mem_debug0_s cn38xx;
+ struct cvmx_pko_mem_debug0_s cn38xxp2;
+ struct cvmx_pko_mem_debug0_s cn50xx;
+ struct cvmx_pko_mem_debug0_s cn52xx;
+ struct cvmx_pko_mem_debug0_s cn52xxp1;
+ struct cvmx_pko_mem_debug0_s cn56xx;
+ struct cvmx_pko_mem_debug0_s cn56xxp1;
+ struct cvmx_pko_mem_debug0_s cn58xx;
+ struct cvmx_pko_mem_debug0_s cn58xxp1;
+};
+
+union cvmx_pko_mem_debug1 {
+ uint64_t u64;
+ struct cvmx_pko_mem_debug1_s {
+ uint64_t i:1;
+ uint64_t back:4;
+ uint64_t pool:3;
+ uint64_t size:16;
+ uint64_t ptr:40;
+ } s;
+ struct cvmx_pko_mem_debug1_s cn30xx;
+ struct cvmx_pko_mem_debug1_s cn31xx;
+ struct cvmx_pko_mem_debug1_s cn38xx;
+ struct cvmx_pko_mem_debug1_s cn38xxp2;
+ struct cvmx_pko_mem_debug1_s cn50xx;
+ struct cvmx_pko_mem_debug1_s cn52xx;
+ struct cvmx_pko_mem_debug1_s cn52xxp1;
+ struct cvmx_pko_mem_debug1_s cn56xx;
+ struct cvmx_pko_mem_debug1_s cn56xxp1;
+ struct cvmx_pko_mem_debug1_s cn58xx;
+ struct cvmx_pko_mem_debug1_s cn58xxp1;
+};
+
+union cvmx_pko_mem_debug10 {
+ uint64_t u64;
+ struct cvmx_pko_mem_debug10_s {
+ uint64_t reserved_0_63:64;
+ } s;
+ struct cvmx_pko_mem_debug10_cn30xx {
+ uint64_t fau:28;
+ uint64_t cmd:14;
+ uint64_t segs:6;
+ uint64_t size:16;
+ } cn30xx;
+ struct cvmx_pko_mem_debug10_cn30xx cn31xx;
+ struct cvmx_pko_mem_debug10_cn30xx cn38xx;
+ struct cvmx_pko_mem_debug10_cn30xx cn38xxp2;
+ struct cvmx_pko_mem_debug10_cn50xx {
+ uint64_t reserved_49_63:15;
+ uint64_t ptrs1:17;
+ uint64_t reserved_17_31:15;
+ uint64_t ptrs2:17;
+ } cn50xx;
+ struct cvmx_pko_mem_debug10_cn50xx cn52xx;
+ struct cvmx_pko_mem_debug10_cn50xx cn52xxp1;
+ struct cvmx_pko_mem_debug10_cn50xx cn56xx;
+ struct cvmx_pko_mem_debug10_cn50xx cn56xxp1;
+ struct cvmx_pko_mem_debug10_cn50xx cn58xx;
+ struct cvmx_pko_mem_debug10_cn50xx cn58xxp1;
+};
+
+union cvmx_pko_mem_debug11 {
+ uint64_t u64;
+ struct cvmx_pko_mem_debug11_s {
+ uint64_t i:1;
+ uint64_t back:4;
+ uint64_t pool:3;
+ uint64_t size:16;
+ uint64_t reserved_0_39:40;
+ } s;
+ struct cvmx_pko_mem_debug11_cn30xx {
+ uint64_t i:1;
+ uint64_t back:4;
+ uint64_t pool:3;
+ uint64_t size:16;
+ uint64_t ptr:40;
+ } cn30xx;
+ struct cvmx_pko_mem_debug11_cn30xx cn31xx;
+ struct cvmx_pko_mem_debug11_cn30xx cn38xx;
+ struct cvmx_pko_mem_debug11_cn30xx cn38xxp2;
+ struct cvmx_pko_mem_debug11_cn50xx {
+ uint64_t reserved_23_63:41;
+ uint64_t maj:1;
+ uint64_t uid:3;
+ uint64_t sop:1;
+ uint64_t len:1;
+ uint64_t chk:1;
+ uint64_t cnt:13;
+ uint64_t mod:3;
+ } cn50xx;
+ struct cvmx_pko_mem_debug11_cn50xx cn52xx;
+ struct cvmx_pko_mem_debug11_cn50xx cn52xxp1;
+ struct cvmx_pko_mem_debug11_cn50xx cn56xx;
+ struct cvmx_pko_mem_debug11_cn50xx cn56xxp1;
+ struct cvmx_pko_mem_debug11_cn50xx cn58xx;
+ struct cvmx_pko_mem_debug11_cn50xx cn58xxp1;
+};
+
+union cvmx_pko_mem_debug12 {
+ uint64_t u64;
+ struct cvmx_pko_mem_debug12_s {
+ uint64_t reserved_0_63:64;
+ } s;
+ struct cvmx_pko_mem_debug12_cn30xx {
+ uint64_t data:64;
+ } cn30xx;
+ struct cvmx_pko_mem_debug12_cn30xx cn31xx;
+ struct cvmx_pko_mem_debug12_cn30xx cn38xx;
+ struct cvmx_pko_mem_debug12_cn30xx cn38xxp2;
+ struct cvmx_pko_mem_debug12_cn50xx {
+ uint64_t fau:28;
+ uint64_t cmd:14;
+ uint64_t segs:6;
+ uint64_t size:16;
+ } cn50xx;
+ struct cvmx_pko_mem_debug12_cn50xx cn52xx;
+ struct cvmx_pko_mem_debug12_cn50xx cn52xxp1;
+ struct cvmx_pko_mem_debug12_cn50xx cn56xx;
+ struct cvmx_pko_mem_debug12_cn50xx cn56xxp1;
+ struct cvmx_pko_mem_debug12_cn50xx cn58xx;
+ struct cvmx_pko_mem_debug12_cn50xx cn58xxp1;
+};
+
+union cvmx_pko_mem_debug13 {
+ uint64_t u64;
+ struct cvmx_pko_mem_debug13_s {
+ uint64_t i:1;
+ uint64_t back:4;
+ uint64_t pool:3;
+ uint64_t reserved_0_55:56;
+ } s;
+ struct cvmx_pko_mem_debug13_cn30xx {
+ uint64_t reserved_51_63:13;
+ uint64_t widx:17;
+ uint64_t ridx2:17;
+ uint64_t widx2:17;
+ } cn30xx;
+ struct cvmx_pko_mem_debug13_cn30xx cn31xx;
+ struct cvmx_pko_mem_debug13_cn30xx cn38xx;
+ struct cvmx_pko_mem_debug13_cn30xx cn38xxp2;
+ struct cvmx_pko_mem_debug13_cn50xx {
+ uint64_t i:1;
+ uint64_t back:4;
+ uint64_t pool:3;
+ uint64_t size:16;
+ uint64_t ptr:40;
+ } cn50xx;
+ struct cvmx_pko_mem_debug13_cn50xx cn52xx;
+ struct cvmx_pko_mem_debug13_cn50xx cn52xxp1;
+ struct cvmx_pko_mem_debug13_cn50xx cn56xx;
+ struct cvmx_pko_mem_debug13_cn50xx cn56xxp1;
+ struct cvmx_pko_mem_debug13_cn50xx cn58xx;
+ struct cvmx_pko_mem_debug13_cn50xx cn58xxp1;
+};
+
+union cvmx_pko_mem_debug14 {
+ uint64_t u64;
+ struct cvmx_pko_mem_debug14_s {
+ uint64_t reserved_0_63:64;
+ } s;
+ struct cvmx_pko_mem_debug14_cn30xx {
+ uint64_t reserved_17_63:47;
+ uint64_t ridx:17;
+ } cn30xx;
+ struct cvmx_pko_mem_debug14_cn30xx cn31xx;
+ struct cvmx_pko_mem_debug14_cn30xx cn38xx;
+ struct cvmx_pko_mem_debug14_cn30xx cn38xxp2;
+ struct cvmx_pko_mem_debug14_cn52xx {
+ uint64_t data:64;
+ } cn52xx;
+ struct cvmx_pko_mem_debug14_cn52xx cn52xxp1;
+ struct cvmx_pko_mem_debug14_cn52xx cn56xx;
+ struct cvmx_pko_mem_debug14_cn52xx cn56xxp1;
+};
+
+union cvmx_pko_mem_debug2 {
+ uint64_t u64;
+ struct cvmx_pko_mem_debug2_s {
+ uint64_t i:1;
+ uint64_t back:4;
+ uint64_t pool:3;
+ uint64_t size:16;
+ uint64_t ptr:40;
+ } s;
+ struct cvmx_pko_mem_debug2_s cn30xx;
+ struct cvmx_pko_mem_debug2_s cn31xx;
+ struct cvmx_pko_mem_debug2_s cn38xx;
+ struct cvmx_pko_mem_debug2_s cn38xxp2;
+ struct cvmx_pko_mem_debug2_s cn50xx;
+ struct cvmx_pko_mem_debug2_s cn52xx;
+ struct cvmx_pko_mem_debug2_s cn52xxp1;
+ struct cvmx_pko_mem_debug2_s cn56xx;
+ struct cvmx_pko_mem_debug2_s cn56xxp1;
+ struct cvmx_pko_mem_debug2_s cn58xx;
+ struct cvmx_pko_mem_debug2_s cn58xxp1;
+};
+
+union cvmx_pko_mem_debug3 {
+ uint64_t u64;
+ struct cvmx_pko_mem_debug3_s {
+ uint64_t reserved_0_63:64;
+ } s;
+ struct cvmx_pko_mem_debug3_cn30xx {
+ uint64_t i:1;
+ uint64_t back:4;
+ uint64_t pool:3;
+ uint64_t size:16;
+ uint64_t ptr:40;
+ } cn30xx;
+ struct cvmx_pko_mem_debug3_cn30xx cn31xx;
+ struct cvmx_pko_mem_debug3_cn30xx cn38xx;
+ struct cvmx_pko_mem_debug3_cn30xx cn38xxp2;
+ struct cvmx_pko_mem_debug3_cn50xx {
+ uint64_t data:64;
+ } cn50xx;
+ struct cvmx_pko_mem_debug3_cn50xx cn52xx;
+ struct cvmx_pko_mem_debug3_cn50xx cn52xxp1;
+ struct cvmx_pko_mem_debug3_cn50xx cn56xx;
+ struct cvmx_pko_mem_debug3_cn50xx cn56xxp1;
+ struct cvmx_pko_mem_debug3_cn50xx cn58xx;
+ struct cvmx_pko_mem_debug3_cn50xx cn58xxp1;
+};
+
+union cvmx_pko_mem_debug4 {
+ uint64_t u64;
+ struct cvmx_pko_mem_debug4_s {
+ uint64_t reserved_0_63:64;
+ } s;
+ struct cvmx_pko_mem_debug4_cn30xx {
+ uint64_t data:64;
+ } cn30xx;
+ struct cvmx_pko_mem_debug4_cn30xx cn31xx;
+ struct cvmx_pko_mem_debug4_cn30xx cn38xx;
+ struct cvmx_pko_mem_debug4_cn30xx cn38xxp2;
+ struct cvmx_pko_mem_debug4_cn50xx {
+ uint64_t cmnd_segs:3;
+ uint64_t cmnd_siz:16;
+ uint64_t cmnd_off:6;
+ uint64_t uid:3;
+ uint64_t dread_sop:1;
+ uint64_t init_dwrite:1;
+ uint64_t chk_once:1;
+ uint64_t chk_mode:1;
+ uint64_t active:1;
+ uint64_t static_p:1;
+ uint64_t qos:3;
+ uint64_t qcb_ridx:5;
+ uint64_t qid_off_max:4;
+ uint64_t qid_off:4;
+ uint64_t qid_base:8;
+ uint64_t wait:1;
+ uint64_t minor:2;
+ uint64_t major:3;
+ } cn50xx;
+ struct cvmx_pko_mem_debug4_cn52xx {
+ uint64_t curr_siz:8;
+ uint64_t curr_off:16;
+ uint64_t cmnd_segs:6;
+ uint64_t cmnd_siz:16;
+ uint64_t cmnd_off:6;
+ uint64_t uid:2;
+ uint64_t dread_sop:1;
+ uint64_t init_dwrite:1;
+ uint64_t chk_once:1;
+ uint64_t chk_mode:1;
+ uint64_t wait:1;
+ uint64_t minor:2;
+ uint64_t major:3;
+ } cn52xx;
+ struct cvmx_pko_mem_debug4_cn52xx cn52xxp1;
+ struct cvmx_pko_mem_debug4_cn52xx cn56xx;
+ struct cvmx_pko_mem_debug4_cn52xx cn56xxp1;
+ struct cvmx_pko_mem_debug4_cn50xx cn58xx;
+ struct cvmx_pko_mem_debug4_cn50xx cn58xxp1;
+};
+
+union cvmx_pko_mem_debug5 {
+ uint64_t u64;
+ struct cvmx_pko_mem_debug5_s {
+ uint64_t reserved_0_63:64;
+ } s;
+ struct cvmx_pko_mem_debug5_cn30xx {
+ uint64_t dwri_mod:1;
+ uint64_t dwri_sop:1;
+ uint64_t dwri_len:1;
+ uint64_t dwri_cnt:13;
+ uint64_t cmnd_siz:16;
+ uint64_t uid:1;
+ uint64_t xfer_wor:1;
+ uint64_t xfer_dwr:1;
+ uint64_t cbuf_fre:1;
+ uint64_t reserved_27_27:1;
+ uint64_t chk_mode:1;
+ uint64_t active:1;
+ uint64_t qos:3;
+ uint64_t qcb_ridx:5;
+ uint64_t qid_off:3;
+ uint64_t qid_base:7;
+ uint64_t wait:1;
+ uint64_t minor:2;
+ uint64_t major:4;
+ } cn30xx;
+ struct cvmx_pko_mem_debug5_cn30xx cn31xx;
+ struct cvmx_pko_mem_debug5_cn30xx cn38xx;
+ struct cvmx_pko_mem_debug5_cn30xx cn38xxp2;
+ struct cvmx_pko_mem_debug5_cn50xx {
+ uint64_t curr_ptr:29;
+ uint64_t curr_siz:16;
+ uint64_t curr_off:16;
+ uint64_t cmnd_segs:3;
+ } cn50xx;
+ struct cvmx_pko_mem_debug5_cn52xx {
+ uint64_t reserved_54_63:10;
+ uint64_t nxt_inflt:6;
+ uint64_t curr_ptr:40;
+ uint64_t curr_siz:8;
+ } cn52xx;
+ struct cvmx_pko_mem_debug5_cn52xx cn52xxp1;
+ struct cvmx_pko_mem_debug5_cn52xx cn56xx;
+ struct cvmx_pko_mem_debug5_cn52xx cn56xxp1;
+ struct cvmx_pko_mem_debug5_cn50xx cn58xx;
+ struct cvmx_pko_mem_debug5_cn50xx cn58xxp1;
+};
+
+union cvmx_pko_mem_debug6 {
+ uint64_t u64;
+ struct cvmx_pko_mem_debug6_s {
+ uint64_t reserved_37_63:27;
+ uint64_t qid_offres:4;
+ uint64_t qid_offths:4;
+ uint64_t preempter:1;
+ uint64_t preemptee:1;
+ uint64_t preempted:1;
+ uint64_t active:1;
+ uint64_t statc:1;
+ uint64_t qos:3;
+ uint64_t qcb_ridx:5;
+ uint64_t qid_offmax:4;
+ uint64_t reserved_0_11:12;
+ } s;
+ struct cvmx_pko_mem_debug6_cn30xx {
+ uint64_t reserved_11_63:53;
+ uint64_t qid_offm:3;
+ uint64_t static_p:1;
+ uint64_t work_min:3;
+ uint64_t dwri_chk:1;
+ uint64_t dwri_uid:1;
+ uint64_t dwri_mod:2;
+ } cn30xx;
+ struct cvmx_pko_mem_debug6_cn30xx cn31xx;
+ struct cvmx_pko_mem_debug6_cn30xx cn38xx;
+ struct cvmx_pko_mem_debug6_cn30xx cn38xxp2;
+ struct cvmx_pko_mem_debug6_cn50xx {
+ uint64_t reserved_11_63:53;
+ uint64_t curr_ptr:11;
+ } cn50xx;
+ struct cvmx_pko_mem_debug6_cn52xx {
+ uint64_t reserved_37_63:27;
+ uint64_t qid_offres:4;
+ uint64_t qid_offths:4;
+ uint64_t preempter:1;
+ uint64_t preemptee:1;
+ uint64_t preempted:1;
+ uint64_t active:1;
+ uint64_t statc:1;
+ uint64_t qos:3;
+ uint64_t qcb_ridx:5;
+ uint64_t qid_offmax:4;
+ uint64_t qid_off:4;
+ uint64_t qid_base:8;
+ } cn52xx;
+ struct cvmx_pko_mem_debug6_cn52xx cn52xxp1;
+ struct cvmx_pko_mem_debug6_cn52xx cn56xx;
+ struct cvmx_pko_mem_debug6_cn52xx cn56xxp1;
+ struct cvmx_pko_mem_debug6_cn50xx cn58xx;
+ struct cvmx_pko_mem_debug6_cn50xx cn58xxp1;
+};
+
+union cvmx_pko_mem_debug7 {
+ uint64_t u64;
+ struct cvmx_pko_mem_debug7_s {
+ uint64_t qos:5;
+ uint64_t tail:1;
+ uint64_t reserved_0_57:58;
+ } s;
+ struct cvmx_pko_mem_debug7_cn30xx {
+ uint64_t reserved_58_63:6;
+ uint64_t dwb:9;
+ uint64_t start:33;
+ uint64_t size:16;
+ } cn30xx;
+ struct cvmx_pko_mem_debug7_cn30xx cn31xx;
+ struct cvmx_pko_mem_debug7_cn30xx cn38xx;
+ struct cvmx_pko_mem_debug7_cn30xx cn38xxp2;
+ struct cvmx_pko_mem_debug7_cn50xx {
+ uint64_t qos:5;
+ uint64_t tail:1;
+ uint64_t buf_siz:13;
+ uint64_t buf_ptr:33;
+ uint64_t qcb_widx:6;
+ uint64_t qcb_ridx:6;
+ } cn50xx;
+ struct cvmx_pko_mem_debug7_cn50xx cn52xx;
+ struct cvmx_pko_mem_debug7_cn50xx cn52xxp1;
+ struct cvmx_pko_mem_debug7_cn50xx cn56xx;
+ struct cvmx_pko_mem_debug7_cn50xx cn56xxp1;
+ struct cvmx_pko_mem_debug7_cn50xx cn58xx;
+ struct cvmx_pko_mem_debug7_cn50xx cn58xxp1;
+};
+
+union cvmx_pko_mem_debug8 {
+ uint64_t u64;
+ struct cvmx_pko_mem_debug8_s {
+ uint64_t reserved_59_63:5;
+ uint64_t tail:1;
+ uint64_t buf_siz:13;
+ uint64_t reserved_0_44:45;
+ } s;
+ struct cvmx_pko_mem_debug8_cn30xx {
+ uint64_t qos:5;
+ uint64_t tail:1;
+ uint64_t buf_siz:13;
+ uint64_t buf_ptr:33;
+ uint64_t qcb_widx:6;
+ uint64_t qcb_ridx:6;
+ } cn30xx;
+ struct cvmx_pko_mem_debug8_cn30xx cn31xx;
+ struct cvmx_pko_mem_debug8_cn30xx cn38xx;
+ struct cvmx_pko_mem_debug8_cn30xx cn38xxp2;
+ struct cvmx_pko_mem_debug8_cn50xx {
+ uint64_t reserved_28_63:36;
+ uint64_t doorbell:20;
+ uint64_t reserved_6_7:2;
+ uint64_t static_p:1;
+ uint64_t s_tail:1;
+ uint64_t static_q:1;
+ uint64_t qos:3;
+ } cn50xx;
+ struct cvmx_pko_mem_debug8_cn52xx {
+ uint64_t reserved_29_63:35;
+ uint64_t preempter:1;
+ uint64_t doorbell:20;
+ uint64_t reserved_7_7:1;
+ uint64_t preemptee:1;
+ uint64_t static_p:1;
+ uint64_t s_tail:1;
+ uint64_t static_q:1;
+ uint64_t qos:3;
+ } cn52xx;
+ struct cvmx_pko_mem_debug8_cn52xx cn52xxp1;
+ struct cvmx_pko_mem_debug8_cn52xx cn56xx;
+ struct cvmx_pko_mem_debug8_cn52xx cn56xxp1;
+ struct cvmx_pko_mem_debug8_cn50xx cn58xx;
+ struct cvmx_pko_mem_debug8_cn50xx cn58xxp1;
+};
+
+union cvmx_pko_mem_debug9 {
+ uint64_t u64;
+ struct cvmx_pko_mem_debug9_s {
+ uint64_t reserved_49_63:15;
+ uint64_t ptrs0:17;
+ uint64_t reserved_0_31:32;
+ } s;
+ struct cvmx_pko_mem_debug9_cn30xx {
+ uint64_t reserved_28_63:36;
+ uint64_t doorbell:20;
+ uint64_t reserved_5_7:3;
+ uint64_t s_tail:1;
+ uint64_t static_q:1;
+ uint64_t qos:3;
+ } cn30xx;
+ struct cvmx_pko_mem_debug9_cn30xx cn31xx;
+ struct cvmx_pko_mem_debug9_cn38xx {
+ uint64_t reserved_28_63:36;
+ uint64_t doorbell:20;
+ uint64_t reserved_6_7:2;
+ uint64_t static_p:1;
+ uint64_t s_tail:1;
+ uint64_t static_q:1;
+ uint64_t qos:3;
+ } cn38xx;
+ struct cvmx_pko_mem_debug9_cn38xx cn38xxp2;
+ struct cvmx_pko_mem_debug9_cn50xx {
+ uint64_t reserved_49_63:15;
+ uint64_t ptrs0:17;
+ uint64_t reserved_17_31:15;
+ uint64_t ptrs3:17;
+ } cn50xx;
+ struct cvmx_pko_mem_debug9_cn50xx cn52xx;
+ struct cvmx_pko_mem_debug9_cn50xx cn52xxp1;
+ struct cvmx_pko_mem_debug9_cn50xx cn56xx;
+ struct cvmx_pko_mem_debug9_cn50xx cn56xxp1;
+ struct cvmx_pko_mem_debug9_cn50xx cn58xx;
+ struct cvmx_pko_mem_debug9_cn50xx cn58xxp1;
+};
+
+union cvmx_pko_mem_port_ptrs {
+ uint64_t u64;
+ struct cvmx_pko_mem_port_ptrs_s {
+ uint64_t reserved_62_63:2;
+ uint64_t static_p:1;
+ uint64_t qos_mask:8;
+ uint64_t reserved_16_52:37;
+ uint64_t bp_port:6;
+ uint64_t eid:4;
+ uint64_t pid:6;
+ } s;
+ struct cvmx_pko_mem_port_ptrs_s cn52xx;
+ struct cvmx_pko_mem_port_ptrs_s cn52xxp1;
+ struct cvmx_pko_mem_port_ptrs_s cn56xx;
+ struct cvmx_pko_mem_port_ptrs_s cn56xxp1;
+};
+
+union cvmx_pko_mem_port_qos {
+ uint64_t u64;
+ struct cvmx_pko_mem_port_qos_s {
+ uint64_t reserved_61_63:3;
+ uint64_t qos_mask:8;
+ uint64_t reserved_10_52:43;
+ uint64_t eid:4;
+ uint64_t pid:6;
+ } s;
+ struct cvmx_pko_mem_port_qos_s cn52xx;
+ struct cvmx_pko_mem_port_qos_s cn52xxp1;
+ struct cvmx_pko_mem_port_qos_s cn56xx;
+ struct cvmx_pko_mem_port_qos_s cn56xxp1;
+};
+
+union cvmx_pko_mem_port_rate0 {
+ uint64_t u64;
+ struct cvmx_pko_mem_port_rate0_s {
+ uint64_t reserved_51_63:13;
+ uint64_t rate_word:19;
+ uint64_t rate_pkt:24;
+ uint64_t reserved_6_7:2;
+ uint64_t pid:6;
+ } s;
+ struct cvmx_pko_mem_port_rate0_s cn52xx;
+ struct cvmx_pko_mem_port_rate0_s cn52xxp1;
+ struct cvmx_pko_mem_port_rate0_s cn56xx;
+ struct cvmx_pko_mem_port_rate0_s cn56xxp1;
+};
+
+union cvmx_pko_mem_port_rate1 {
+ uint64_t u64;
+ struct cvmx_pko_mem_port_rate1_s {
+ uint64_t reserved_32_63:32;
+ uint64_t rate_lim:24;
+ uint64_t reserved_6_7:2;
+ uint64_t pid:6;
+ } s;
+ struct cvmx_pko_mem_port_rate1_s cn52xx;
+ struct cvmx_pko_mem_port_rate1_s cn52xxp1;
+ struct cvmx_pko_mem_port_rate1_s cn56xx;
+ struct cvmx_pko_mem_port_rate1_s cn56xxp1;
+};
+
+union cvmx_pko_mem_queue_ptrs {
+ uint64_t u64;
+ struct cvmx_pko_mem_queue_ptrs_s {
+ uint64_t s_tail:1;
+ uint64_t static_p:1;
+ uint64_t static_q:1;
+ uint64_t qos_mask:8;
+ uint64_t buf_ptr:36;
+ uint64_t tail:1;
+ uint64_t index:3;
+ uint64_t port:6;
+ uint64_t queue:7;
+ } s;
+ struct cvmx_pko_mem_queue_ptrs_s cn30xx;
+ struct cvmx_pko_mem_queue_ptrs_s cn31xx;
+ struct cvmx_pko_mem_queue_ptrs_s cn38xx;
+ struct cvmx_pko_mem_queue_ptrs_s cn38xxp2;
+ struct cvmx_pko_mem_queue_ptrs_s cn50xx;
+ struct cvmx_pko_mem_queue_ptrs_s cn52xx;
+ struct cvmx_pko_mem_queue_ptrs_s cn52xxp1;
+ struct cvmx_pko_mem_queue_ptrs_s cn56xx;
+ struct cvmx_pko_mem_queue_ptrs_s cn56xxp1;
+ struct cvmx_pko_mem_queue_ptrs_s cn58xx;
+ struct cvmx_pko_mem_queue_ptrs_s cn58xxp1;
+};
+
+union cvmx_pko_mem_queue_qos {
+ uint64_t u64;
+ struct cvmx_pko_mem_queue_qos_s {
+ uint64_t reserved_61_63:3;
+ uint64_t qos_mask:8;
+ uint64_t reserved_13_52:40;
+ uint64_t pid:6;
+ uint64_t qid:7;
+ } s;
+ struct cvmx_pko_mem_queue_qos_s cn30xx;
+ struct cvmx_pko_mem_queue_qos_s cn31xx;
+ struct cvmx_pko_mem_queue_qos_s cn38xx;
+ struct cvmx_pko_mem_queue_qos_s cn38xxp2;
+ struct cvmx_pko_mem_queue_qos_s cn50xx;
+ struct cvmx_pko_mem_queue_qos_s cn52xx;
+ struct cvmx_pko_mem_queue_qos_s cn52xxp1;
+ struct cvmx_pko_mem_queue_qos_s cn56xx;
+ struct cvmx_pko_mem_queue_qos_s cn56xxp1;
+ struct cvmx_pko_mem_queue_qos_s cn58xx;
+ struct cvmx_pko_mem_queue_qos_s cn58xxp1;
+};
+
+union cvmx_pko_reg_bist_result {
+ uint64_t u64;
+ struct cvmx_pko_reg_bist_result_s {
+ uint64_t reserved_0_63:64;
+ } s;
+ struct cvmx_pko_reg_bist_result_cn30xx {
+ uint64_t reserved_27_63:37;
+ uint64_t psb2:5;
+ uint64_t count:1;
+ uint64_t rif:1;
+ uint64_t wif:1;
+ uint64_t ncb:1;
+ uint64_t out:1;
+ uint64_t crc:1;
+ uint64_t chk:1;
+ uint64_t qsb:2;
+ uint64_t qcb:2;
+ uint64_t pdb:4;
+ uint64_t psb:7;
+ } cn30xx;
+ struct cvmx_pko_reg_bist_result_cn30xx cn31xx;
+ struct cvmx_pko_reg_bist_result_cn30xx cn38xx;
+ struct cvmx_pko_reg_bist_result_cn30xx cn38xxp2;
+ struct cvmx_pko_reg_bist_result_cn50xx {
+ uint64_t reserved_33_63:31;
+ uint64_t csr:1;
+ uint64_t iob:1;
+ uint64_t out_crc:1;
+ uint64_t out_ctl:3;
+ uint64_t out_sta:1;
+ uint64_t out_wif:1;
+ uint64_t prt_chk:3;
+ uint64_t prt_nxt:1;
+ uint64_t prt_psb:6;
+ uint64_t ncb_inb:2;
+ uint64_t prt_qcb:2;
+ uint64_t prt_qsb:3;
+ uint64_t dat_dat:4;
+ uint64_t dat_ptr:4;
+ } cn50xx;
+ struct cvmx_pko_reg_bist_result_cn52xx {
+ uint64_t reserved_35_63:29;
+ uint64_t csr:1;
+ uint64_t iob:1;
+ uint64_t out_dat:1;
+ uint64_t out_ctl:3;
+ uint64_t out_sta:1;
+ uint64_t out_wif:1;
+ uint64_t prt_chk:3;
+ uint64_t prt_nxt:1;
+ uint64_t prt_psb:8;
+ uint64_t ncb_inb:2;
+ uint64_t prt_qcb:2;
+ uint64_t prt_qsb:3;
+ uint64_t prt_ctl:2;
+ uint64_t dat_dat:2;
+ uint64_t dat_ptr:4;
+ } cn52xx;
+ struct cvmx_pko_reg_bist_result_cn52xx cn52xxp1;
+ struct cvmx_pko_reg_bist_result_cn52xx cn56xx;
+ struct cvmx_pko_reg_bist_result_cn52xx cn56xxp1;
+ struct cvmx_pko_reg_bist_result_cn50xx cn58xx;
+ struct cvmx_pko_reg_bist_result_cn50xx cn58xxp1;
+};
+
+union cvmx_pko_reg_cmd_buf {
+ uint64_t u64;
+ struct cvmx_pko_reg_cmd_buf_s {
+ uint64_t reserved_23_63:41;
+ uint64_t pool:3;
+ uint64_t reserved_13_19:7;
+ uint64_t size:13;
+ } s;
+ struct cvmx_pko_reg_cmd_buf_s cn30xx;
+ struct cvmx_pko_reg_cmd_buf_s cn31xx;
+ struct cvmx_pko_reg_cmd_buf_s cn38xx;
+ struct cvmx_pko_reg_cmd_buf_s cn38xxp2;
+ struct cvmx_pko_reg_cmd_buf_s cn50xx;
+ struct cvmx_pko_reg_cmd_buf_s cn52xx;
+ struct cvmx_pko_reg_cmd_buf_s cn52xxp1;
+ struct cvmx_pko_reg_cmd_buf_s cn56xx;
+ struct cvmx_pko_reg_cmd_buf_s cn56xxp1;
+ struct cvmx_pko_reg_cmd_buf_s cn58xx;
+ struct cvmx_pko_reg_cmd_buf_s cn58xxp1;
+};
+
+union cvmx_pko_reg_crc_ctlx {
+ uint64_t u64;
+ struct cvmx_pko_reg_crc_ctlx_s {
+ uint64_t reserved_2_63:62;
+ uint64_t invres:1;
+ uint64_t refin:1;
+ } s;
+ struct cvmx_pko_reg_crc_ctlx_s cn38xx;
+ struct cvmx_pko_reg_crc_ctlx_s cn38xxp2;
+ struct cvmx_pko_reg_crc_ctlx_s cn58xx;
+ struct cvmx_pko_reg_crc_ctlx_s cn58xxp1;
+};
+
+union cvmx_pko_reg_crc_enable {
+ uint64_t u64;
+ struct cvmx_pko_reg_crc_enable_s {
+ uint64_t reserved_32_63:32;
+ uint64_t enable:32;
+ } s;
+ struct cvmx_pko_reg_crc_enable_s cn38xx;
+ struct cvmx_pko_reg_crc_enable_s cn38xxp2;
+ struct cvmx_pko_reg_crc_enable_s cn58xx;
+ struct cvmx_pko_reg_crc_enable_s cn58xxp1;
+};
+
+union cvmx_pko_reg_crc_ivx {
+ uint64_t u64;
+ struct cvmx_pko_reg_crc_ivx_s {
+ uint64_t reserved_32_63:32;
+ uint64_t iv:32;
+ } s;
+ struct cvmx_pko_reg_crc_ivx_s cn38xx;
+ struct cvmx_pko_reg_crc_ivx_s cn38xxp2;
+ struct cvmx_pko_reg_crc_ivx_s cn58xx;
+ struct cvmx_pko_reg_crc_ivx_s cn58xxp1;
+};
+
+union cvmx_pko_reg_debug0 {
+ uint64_t u64;
+ struct cvmx_pko_reg_debug0_s {
+ uint64_t asserts:64;
+ } s;
+ struct cvmx_pko_reg_debug0_cn30xx {
+ uint64_t reserved_17_63:47;
+ uint64_t asserts:17;
+ } cn30xx;
+ struct cvmx_pko_reg_debug0_cn30xx cn31xx;
+ struct cvmx_pko_reg_debug0_cn30xx cn38xx;
+ struct cvmx_pko_reg_debug0_cn30xx cn38xxp2;
+ struct cvmx_pko_reg_debug0_s cn50xx;
+ struct cvmx_pko_reg_debug0_s cn52xx;
+ struct cvmx_pko_reg_debug0_s cn52xxp1;
+ struct cvmx_pko_reg_debug0_s cn56xx;
+ struct cvmx_pko_reg_debug0_s cn56xxp1;
+ struct cvmx_pko_reg_debug0_s cn58xx;
+ struct cvmx_pko_reg_debug0_s cn58xxp1;
+};
+
+union cvmx_pko_reg_debug1 {
+ uint64_t u64;
+ struct cvmx_pko_reg_debug1_s {
+ uint64_t asserts:64;
+ } s;
+ struct cvmx_pko_reg_debug1_s cn50xx;
+ struct cvmx_pko_reg_debug1_s cn52xx;
+ struct cvmx_pko_reg_debug1_s cn52xxp1;
+ struct cvmx_pko_reg_debug1_s cn56xx;
+ struct cvmx_pko_reg_debug1_s cn56xxp1;
+ struct cvmx_pko_reg_debug1_s cn58xx;
+ struct cvmx_pko_reg_debug1_s cn58xxp1;
+};
+
+union cvmx_pko_reg_debug2 {
+ uint64_t u64;
+ struct cvmx_pko_reg_debug2_s {
+ uint64_t asserts:64;
+ } s;
+ struct cvmx_pko_reg_debug2_s cn50xx;
+ struct cvmx_pko_reg_debug2_s cn52xx;
+ struct cvmx_pko_reg_debug2_s cn52xxp1;
+ struct cvmx_pko_reg_debug2_s cn56xx;
+ struct cvmx_pko_reg_debug2_s cn56xxp1;
+ struct cvmx_pko_reg_debug2_s cn58xx;
+ struct cvmx_pko_reg_debug2_s cn58xxp1;
+};
+
+union cvmx_pko_reg_debug3 {
+ uint64_t u64;
+ struct cvmx_pko_reg_debug3_s {
+ uint64_t asserts:64;
+ } s;
+ struct cvmx_pko_reg_debug3_s cn50xx;
+ struct cvmx_pko_reg_debug3_s cn52xx;
+ struct cvmx_pko_reg_debug3_s cn52xxp1;
+ struct cvmx_pko_reg_debug3_s cn56xx;
+ struct cvmx_pko_reg_debug3_s cn56xxp1;
+ struct cvmx_pko_reg_debug3_s cn58xx;
+ struct cvmx_pko_reg_debug3_s cn58xxp1;
+};
+
+union cvmx_pko_reg_engine_inflight {
+ uint64_t u64;
+ struct cvmx_pko_reg_engine_inflight_s {
+ uint64_t reserved_40_63:24;
+ uint64_t engine9:4;
+ uint64_t engine8:4;
+ uint64_t engine7:4;
+ uint64_t engine6:4;
+ uint64_t engine5:4;
+ uint64_t engine4:4;
+ uint64_t engine3:4;
+ uint64_t engine2:4;
+ uint64_t engine1:4;
+ uint64_t engine0:4;
+ } s;
+ struct cvmx_pko_reg_engine_inflight_s cn52xx;
+ struct cvmx_pko_reg_engine_inflight_s cn52xxp1;
+ struct cvmx_pko_reg_engine_inflight_s cn56xx;
+ struct cvmx_pko_reg_engine_inflight_s cn56xxp1;
+};
+
+union cvmx_pko_reg_engine_thresh {
+ uint64_t u64;
+ struct cvmx_pko_reg_engine_thresh_s {
+ uint64_t reserved_10_63:54;
+ uint64_t mask:10;
+ } s;
+ struct cvmx_pko_reg_engine_thresh_s cn52xx;
+ struct cvmx_pko_reg_engine_thresh_s cn52xxp1;
+ struct cvmx_pko_reg_engine_thresh_s cn56xx;
+ struct cvmx_pko_reg_engine_thresh_s cn56xxp1;
+};
+
+union cvmx_pko_reg_error {
+ uint64_t u64;
+ struct cvmx_pko_reg_error_s {
+ uint64_t reserved_3_63:61;
+ uint64_t currzero:1;
+ uint64_t doorbell:1;
+ uint64_t parity:1;
+ } s;
+ struct cvmx_pko_reg_error_cn30xx {
+ uint64_t reserved_2_63:62;
+ uint64_t doorbell:1;
+ uint64_t parity:1;
+ } cn30xx;
+ struct cvmx_pko_reg_error_cn30xx cn31xx;
+ struct cvmx_pko_reg_error_cn30xx cn38xx;
+ struct cvmx_pko_reg_error_cn30xx cn38xxp2;
+ struct cvmx_pko_reg_error_s cn50xx;
+ struct cvmx_pko_reg_error_s cn52xx;
+ struct cvmx_pko_reg_error_s cn52xxp1;
+ struct cvmx_pko_reg_error_s cn56xx;
+ struct cvmx_pko_reg_error_s cn56xxp1;
+ struct cvmx_pko_reg_error_s cn58xx;
+ struct cvmx_pko_reg_error_s cn58xxp1;
+};
+
+union cvmx_pko_reg_flags {
+ uint64_t u64;
+ struct cvmx_pko_reg_flags_s {
+ uint64_t reserved_4_63:60;
+ uint64_t reset:1;
+ uint64_t store_be:1;
+ uint64_t ena_dwb:1;
+ uint64_t ena_pko:1;
+ } s;
+ struct cvmx_pko_reg_flags_s cn30xx;
+ struct cvmx_pko_reg_flags_s cn31xx;
+ struct cvmx_pko_reg_flags_s cn38xx;
+ struct cvmx_pko_reg_flags_s cn38xxp2;
+ struct cvmx_pko_reg_flags_s cn50xx;
+ struct cvmx_pko_reg_flags_s cn52xx;
+ struct cvmx_pko_reg_flags_s cn52xxp1;
+ struct cvmx_pko_reg_flags_s cn56xx;
+ struct cvmx_pko_reg_flags_s cn56xxp1;
+ struct cvmx_pko_reg_flags_s cn58xx;
+ struct cvmx_pko_reg_flags_s cn58xxp1;
+};
+
+union cvmx_pko_reg_gmx_port_mode {
+ uint64_t u64;
+ struct cvmx_pko_reg_gmx_port_mode_s {
+ uint64_t reserved_6_63:58;
+ uint64_t mode1:3;
+ uint64_t mode0:3;
+ } s;
+ struct cvmx_pko_reg_gmx_port_mode_s cn30xx;
+ struct cvmx_pko_reg_gmx_port_mode_s cn31xx;
+ struct cvmx_pko_reg_gmx_port_mode_s cn38xx;
+ struct cvmx_pko_reg_gmx_port_mode_s cn38xxp2;
+ struct cvmx_pko_reg_gmx_port_mode_s cn50xx;
+ struct cvmx_pko_reg_gmx_port_mode_s cn52xx;
+ struct cvmx_pko_reg_gmx_port_mode_s cn52xxp1;
+ struct cvmx_pko_reg_gmx_port_mode_s cn56xx;
+ struct cvmx_pko_reg_gmx_port_mode_s cn56xxp1;
+ struct cvmx_pko_reg_gmx_port_mode_s cn58xx;
+ struct cvmx_pko_reg_gmx_port_mode_s cn58xxp1;
+};
+
+union cvmx_pko_reg_int_mask {
+ uint64_t u64;
+ struct cvmx_pko_reg_int_mask_s {
+ uint64_t reserved_3_63:61;
+ uint64_t currzero:1;
+ uint64_t doorbell:1;
+ uint64_t parity:1;
+ } s;
+ struct cvmx_pko_reg_int_mask_cn30xx {
+ uint64_t reserved_2_63:62;
+ uint64_t doorbell:1;
+ uint64_t parity:1;
+ } cn30xx;
+ struct cvmx_pko_reg_int_mask_cn30xx cn31xx;
+ struct cvmx_pko_reg_int_mask_cn30xx cn38xx;
+ struct cvmx_pko_reg_int_mask_cn30xx cn38xxp2;
+ struct cvmx_pko_reg_int_mask_s cn50xx;
+ struct cvmx_pko_reg_int_mask_s cn52xx;
+ struct cvmx_pko_reg_int_mask_s cn52xxp1;
+ struct cvmx_pko_reg_int_mask_s cn56xx;
+ struct cvmx_pko_reg_int_mask_s cn56xxp1;
+ struct cvmx_pko_reg_int_mask_s cn58xx;
+ struct cvmx_pko_reg_int_mask_s cn58xxp1;
+};
+
+union cvmx_pko_reg_queue_mode {
+ uint64_t u64;
+ struct cvmx_pko_reg_queue_mode_s {
+ uint64_t reserved_2_63:62;
+ uint64_t mode:2;
+ } s;
+ struct cvmx_pko_reg_queue_mode_s cn30xx;
+ struct cvmx_pko_reg_queue_mode_s cn31xx;
+ struct cvmx_pko_reg_queue_mode_s cn38xx;
+ struct cvmx_pko_reg_queue_mode_s cn38xxp2;
+ struct cvmx_pko_reg_queue_mode_s cn50xx;
+ struct cvmx_pko_reg_queue_mode_s cn52xx;
+ struct cvmx_pko_reg_queue_mode_s cn52xxp1;
+ struct cvmx_pko_reg_queue_mode_s cn56xx;
+ struct cvmx_pko_reg_queue_mode_s cn56xxp1;
+ struct cvmx_pko_reg_queue_mode_s cn58xx;
+ struct cvmx_pko_reg_queue_mode_s cn58xxp1;
+};
+
+union cvmx_pko_reg_queue_ptrs1 {
+ uint64_t u64;
+ struct cvmx_pko_reg_queue_ptrs1_s {
+ uint64_t reserved_2_63:62;
+ uint64_t idx3:1;
+ uint64_t qid7:1;
+ } s;
+ struct cvmx_pko_reg_queue_ptrs1_s cn50xx;
+ struct cvmx_pko_reg_queue_ptrs1_s cn52xx;
+ struct cvmx_pko_reg_queue_ptrs1_s cn52xxp1;
+ struct cvmx_pko_reg_queue_ptrs1_s cn56xx;
+ struct cvmx_pko_reg_queue_ptrs1_s cn56xxp1;
+ struct cvmx_pko_reg_queue_ptrs1_s cn58xx;
+ struct cvmx_pko_reg_queue_ptrs1_s cn58xxp1;
+};
+
+union cvmx_pko_reg_read_idx {
+ uint64_t u64;
+ struct cvmx_pko_reg_read_idx_s {
+ uint64_t reserved_16_63:48;
+ uint64_t inc:8;
+ uint64_t index:8;
+ } s;
+ struct cvmx_pko_reg_read_idx_s cn30xx;
+ struct cvmx_pko_reg_read_idx_s cn31xx;
+ struct cvmx_pko_reg_read_idx_s cn38xx;
+ struct cvmx_pko_reg_read_idx_s cn38xxp2;
+ struct cvmx_pko_reg_read_idx_s cn50xx;
+ struct cvmx_pko_reg_read_idx_s cn52xx;
+ struct cvmx_pko_reg_read_idx_s cn52xxp1;
+ struct cvmx_pko_reg_read_idx_s cn56xx;
+ struct cvmx_pko_reg_read_idx_s cn56xxp1;
+ struct cvmx_pko_reg_read_idx_s cn58xx;
+ struct cvmx_pko_reg_read_idx_s cn58xxp1;
+};
+
+#endif
diff --git a/drivers/staging/octeon/cvmx-pko.c b/drivers/staging/octeon/cvmx-pko.c
new file mode 100644
index 000000000000..00db91529b19
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-pko.c
@@ -0,0 +1,506 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * Support library for the hardware Packet Output unit.
+ */
+
+#include <asm/octeon/octeon.h>
+
+#include "cvmx-config.h"
+#include "cvmx-pko.h"
+#include "cvmx-helper.h"
+
+/**
+ * Internal state of packet output
+ */
+
+/**
+ * Call before any other calls to initialize the packet
+ * output system. This does chip global config, and should only be
+ * done by one core.
+ */
+
+void cvmx_pko_initialize_global(void)
+{
+ int i;
+ uint64_t priority = 8;
+ union cvmx_pko_reg_cmd_buf config;
+
+ /*
+ * Set the size of the PKO command buffers to an odd number of
+ * 64bit words. This allows the normal two word send to stay
+ * aligned and never span a comamnd word buffer.
+ */
+ config.u64 = 0;
+ config.s.pool = CVMX_FPA_OUTPUT_BUFFER_POOL;
+ config.s.size = CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE / 8 - 1;
+
+ cvmx_write_csr(CVMX_PKO_REG_CMD_BUF, config.u64);
+
+ for (i = 0; i < CVMX_PKO_MAX_OUTPUT_QUEUES; i++)
+ cvmx_pko_config_port(CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID, i, 1,
+ &priority);
+
+ /*
+ * If we aren't using all of the queues optimize PKO's
+ * internal memory.
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)
+ || OCTEON_IS_MODEL(OCTEON_CN56XX)
+ || OCTEON_IS_MODEL(OCTEON_CN52XX)) {
+ int num_interfaces = cvmx_helper_get_number_of_interfaces();
+ int last_port =
+ cvmx_helper_get_last_ipd_port(num_interfaces - 1);
+ int max_queues =
+ cvmx_pko_get_base_queue(last_port) +
+ cvmx_pko_get_num_queues(last_port);
+ if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
+ if (max_queues <= 32)
+ cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 2);
+ else if (max_queues <= 64)
+ cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 1);
+ } else {
+ if (max_queues <= 64)
+ cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 2);
+ else if (max_queues <= 128)
+ cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 1);
+ }
+ }
+}
+
+/**
+ * This function does per-core initialization required by the PKO routines.
+ * This must be called on all cores that will do packet output, and must
+ * be called after the FPA has been initialized and filled with pages.
+ *
+ * Returns 0 on success
+ * !0 on failure
+ */
+int cvmx_pko_initialize_local(void)
+{
+ /* Nothing to do */
+ return 0;
+}
+
+/**
+ * Enables the packet output hardware. It must already be
+ * configured.
+ */
+void cvmx_pko_enable(void)
+{
+ union cvmx_pko_reg_flags flags;
+
+ flags.u64 = cvmx_read_csr(CVMX_PKO_REG_FLAGS);
+ if (flags.s.ena_pko)
+ cvmx_dprintf
+ ("Warning: Enabling PKO when PKO already enabled.\n");
+
+ flags.s.ena_dwb = 1;
+ flags.s.ena_pko = 1;
+ /*
+ * always enable big endian for 3-word command. Does nothing
+ * for 2-word.
+ */
+ flags.s.store_be = 1;
+ cvmx_write_csr(CVMX_PKO_REG_FLAGS, flags.u64);
+}
+
+/**
+ * Disables the packet output. Does not affect any configuration.
+ */
+void cvmx_pko_disable(void)
+{
+ union cvmx_pko_reg_flags pko_reg_flags;
+ pko_reg_flags.u64 = cvmx_read_csr(CVMX_PKO_REG_FLAGS);
+ pko_reg_flags.s.ena_pko = 0;
+ cvmx_write_csr(CVMX_PKO_REG_FLAGS, pko_reg_flags.u64);
+}
+
+
+/**
+ * Reset the packet output.
+ */
+static void __cvmx_pko_reset(void)
+{
+ union cvmx_pko_reg_flags pko_reg_flags;
+ pko_reg_flags.u64 = cvmx_read_csr(CVMX_PKO_REG_FLAGS);
+ pko_reg_flags.s.reset = 1;
+ cvmx_write_csr(CVMX_PKO_REG_FLAGS, pko_reg_flags.u64);
+}
+
+/**
+ * Shutdown and free resources required by packet output.
+ */
+void cvmx_pko_shutdown(void)
+{
+ union cvmx_pko_mem_queue_ptrs config;
+ int queue;
+
+ cvmx_pko_disable();
+
+ for (queue = 0; queue < CVMX_PKO_MAX_OUTPUT_QUEUES; queue++) {
+ config.u64 = 0;
+ config.s.tail = 1;
+ config.s.index = 0;
+ config.s.port = CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID;
+ config.s.queue = queue & 0x7f;
+ config.s.qos_mask = 0;
+ config.s.buf_ptr = 0;
+ if (!OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
+ union cvmx_pko_reg_queue_ptrs1 config1;
+ config1.u64 = 0;
+ config1.s.qid7 = queue >> 7;
+ cvmx_write_csr(CVMX_PKO_REG_QUEUE_PTRS1, config1.u64);
+ }
+ cvmx_write_csr(CVMX_PKO_MEM_QUEUE_PTRS, config.u64);
+ cvmx_cmd_queue_shutdown(CVMX_CMD_QUEUE_PKO(queue));
+ }
+ __cvmx_pko_reset();
+}
+
+/**
+ * Configure a output port and the associated queues for use.
+ *
+ * @port: Port to configure.
+ * @base_queue: First queue number to associate with this port.
+ * @num_queues: Number of queues to associate with this port
+ * @priority: Array of priority levels for each queue. Values are
+ * allowed to be 0-8. A value of 8 get 8 times the traffic
+ * of a value of 1. A value of 0 indicates that no rounds
+ * will be participated in. These priorities can be changed
+ * on the fly while the pko is enabled. A priority of 9
+ * indicates that static priority should be used. If static
+ * priority is used all queues with static priority must be
+ * contiguous starting at the base_queue, and lower numbered
+ * queues have higher priority than higher numbered queues.
+ * There must be num_queues elements in the array.
+ */
+cvmx_pko_status_t cvmx_pko_config_port(uint64_t port, uint64_t base_queue,
+ uint64_t num_queues,
+ const uint64_t priority[])
+{
+ cvmx_pko_status_t result_code;
+ uint64_t queue;
+ union cvmx_pko_mem_queue_ptrs config;
+ union cvmx_pko_reg_queue_ptrs1 config1;
+ int static_priority_base = -1;
+ int static_priority_end = -1;
+
+ if ((port >= CVMX_PKO_NUM_OUTPUT_PORTS)
+ && (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID)) {
+ cvmx_dprintf("ERROR: cvmx_pko_config_port: Invalid port %llu\n",
+ (unsigned long long)port);
+ return CVMX_PKO_INVALID_PORT;
+ }
+
+ if (base_queue + num_queues > CVMX_PKO_MAX_OUTPUT_QUEUES) {
+ cvmx_dprintf
+ ("ERROR: cvmx_pko_config_port: Invalid queue range %llu\n",
+ (unsigned long long)(base_queue + num_queues));
+ return CVMX_PKO_INVALID_QUEUE;
+ }
+
+ if (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID) {
+ /*
+ * Validate the static queue priority setup and set
+ * static_priority_base and static_priority_end
+ * accordingly.
+ */
+ for (queue = 0; queue < num_queues; queue++) {
+ /* Find first queue of static priority */
+ if (static_priority_base == -1
+ && priority[queue] ==
+ CVMX_PKO_QUEUE_STATIC_PRIORITY)
+ static_priority_base = queue;
+ /* Find last queue of static priority */
+ if (static_priority_base != -1
+ && static_priority_end == -1
+ && priority[queue] != CVMX_PKO_QUEUE_STATIC_PRIORITY
+ && queue)
+ static_priority_end = queue - 1;
+ else if (static_priority_base != -1
+ && static_priority_end == -1
+ && queue == num_queues - 1)
+ /* all queues are static priority */
+ static_priority_end = queue;
+ /*
+ * Check to make sure all static priority
+ * queues are contiguous. Also catches some
+ * cases of static priorites not starting at
+ * queue 0.
+ */
+ if (static_priority_end != -1
+ && (int)queue > static_priority_end
+ && priority[queue] ==
+ CVMX_PKO_QUEUE_STATIC_PRIORITY) {
+ cvmx_dprintf("ERROR: cvmx_pko_config_port: "
+ "Static priority queues aren't "
+ "contiguous or don't start at "
+ "base queue. q: %d, eq: %d\n",
+ (int)queue, static_priority_end);
+ return CVMX_PKO_INVALID_PRIORITY;
+ }
+ }
+ if (static_priority_base > 0) {
+ cvmx_dprintf("ERROR: cvmx_pko_config_port: Static "
+ "priority queues don't start at base "
+ "queue. sq: %d\n",
+ static_priority_base);
+ return CVMX_PKO_INVALID_PRIORITY;
+ }
+#if 0
+ cvmx_dprintf("Port %d: Static priority queue base: %d, "
+ "end: %d\n", port,
+ static_priority_base, static_priority_end);
+#endif
+ }
+ /*
+ * At this point, static_priority_base and static_priority_end
+ * are either both -1, or are valid start/end queue
+ * numbers.
+ */
+
+ result_code = CVMX_PKO_SUCCESS;
+
+#ifdef PKO_DEBUG
+ cvmx_dprintf("num queues: %d (%lld,%lld)\n", num_queues,
+ CVMX_PKO_QUEUES_PER_PORT_INTERFACE0,
+ CVMX_PKO_QUEUES_PER_PORT_INTERFACE1);
+#endif
+
+ for (queue = 0; queue < num_queues; queue++) {
+ uint64_t *buf_ptr = NULL;
+
+ config1.u64 = 0;
+ config1.s.idx3 = queue >> 3;
+ config1.s.qid7 = (base_queue + queue) >> 7;
+
+ config.u64 = 0;
+ config.s.tail = queue == (num_queues - 1);
+ config.s.index = queue;
+ config.s.port = port;
+ config.s.queue = base_queue + queue;
+
+ if (!cvmx_octeon_is_pass1()) {
+ config.s.static_p = static_priority_base >= 0;
+ config.s.static_q = (int)queue <= static_priority_end;
+ config.s.s_tail = (int)queue == static_priority_end;
+ }
+ /*
+ * Convert the priority into an enable bit field. Try
+ * to space the bits out evenly so the packet don't
+ * get grouped up
+ */
+ switch ((int)priority[queue]) {
+ case 0:
+ config.s.qos_mask = 0x00;
+ break;
+ case 1:
+ config.s.qos_mask = 0x01;
+ break;
+ case 2:
+ config.s.qos_mask = 0x11;
+ break;
+ case 3:
+ config.s.qos_mask = 0x49;
+ break;
+ case 4:
+ config.s.qos_mask = 0x55;
+ break;
+ case 5:
+ config.s.qos_mask = 0x57;
+ break;
+ case 6:
+ config.s.qos_mask = 0x77;
+ break;
+ case 7:
+ config.s.qos_mask = 0x7f;
+ break;
+ case 8:
+ config.s.qos_mask = 0xff;
+ break;
+ case CVMX_PKO_QUEUE_STATIC_PRIORITY:
+ /* Pass 1 will fall through to the error case */
+ if (!cvmx_octeon_is_pass1()) {
+ config.s.qos_mask = 0xff;
+ break;
+ }
+ default:
+ cvmx_dprintf("ERROR: cvmx_pko_config_port: Invalid "
+ "priority %llu\n",
+ (unsigned long long)priority[queue]);
+ config.s.qos_mask = 0xff;
+ result_code = CVMX_PKO_INVALID_PRIORITY;
+ break;
+ }
+
+ if (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID) {
+ cvmx_cmd_queue_result_t cmd_res =
+ cvmx_cmd_queue_initialize(CVMX_CMD_QUEUE_PKO
+ (base_queue + queue),
+ CVMX_PKO_MAX_QUEUE_DEPTH,
+ CVMX_FPA_OUTPUT_BUFFER_POOL,
+ CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE
+ -
+ CVMX_PKO_COMMAND_BUFFER_SIZE_ADJUST
+ * 8);
+ if (cmd_res != CVMX_CMD_QUEUE_SUCCESS) {
+ switch (cmd_res) {
+ case CVMX_CMD_QUEUE_NO_MEMORY:
+ cvmx_dprintf("ERROR: "
+ "cvmx_pko_config_port: "
+ "Unable to allocate "
+ "output buffer.\n");
+ return CVMX_PKO_NO_MEMORY;
+ case CVMX_CMD_QUEUE_ALREADY_SETUP:
+ cvmx_dprintf
+ ("ERROR: cvmx_pko_config_port: Port already setup.\n");
+ return CVMX_PKO_PORT_ALREADY_SETUP;
+ case CVMX_CMD_QUEUE_INVALID_PARAM:
+ default:
+ cvmx_dprintf
+ ("ERROR: cvmx_pko_config_port: Command queue initialization failed.\n");
+ return CVMX_PKO_CMD_QUEUE_INIT_ERROR;
+ }
+ }
+
+ buf_ptr =
+ (uint64_t *)
+ cvmx_cmd_queue_buffer(CVMX_CMD_QUEUE_PKO
+ (base_queue + queue));
+ config.s.buf_ptr = cvmx_ptr_to_phys(buf_ptr);
+ } else
+ config.s.buf_ptr = 0;
+
+ CVMX_SYNCWS;
+
+ if (!OCTEON_IS_MODEL(OCTEON_CN3XXX))
+ cvmx_write_csr(CVMX_PKO_REG_QUEUE_PTRS1, config1.u64);
+ cvmx_write_csr(CVMX_PKO_MEM_QUEUE_PTRS, config.u64);
+ }
+
+ return result_code;
+}
+
+#ifdef PKO_DEBUG
+/**
+ * Show map of ports -> queues for different cores.
+ */
+void cvmx_pko_show_queue_map()
+{
+ int core, port;
+ int pko_output_ports = 36;
+
+ cvmx_dprintf("port");
+ for (port = 0; port < pko_output_ports; port++)
+ cvmx_dprintf("%3d ", port);
+ cvmx_dprintf("\n");
+
+ for (core = 0; core < CVMX_MAX_CORES; core++) {
+ cvmx_dprintf("\n%2d: ", core);
+ for (port = 0; port < pko_output_ports; port++) {
+ cvmx_dprintf("%3d ",
+ cvmx_pko_get_base_queue_per_core(port,
+ core));
+ }
+ }
+ cvmx_dprintf("\n");
+}
+#endif
+
+/**
+ * Rate limit a PKO port to a max packets/sec. This function is only
+ * supported on CN51XX and higher, excluding CN58XX.
+ *
+ * @port: Port to rate limit
+ * @packets_s: Maximum packet/sec
+ * @burst: Maximum number of packets to burst in a row before rate
+ * limiting cuts in.
+ *
+ * Returns Zero on success, negative on failure
+ */
+int cvmx_pko_rate_limit_packets(int port, int packets_s, int burst)
+{
+ union cvmx_pko_mem_port_rate0 pko_mem_port_rate0;
+ union cvmx_pko_mem_port_rate1 pko_mem_port_rate1;
+
+ pko_mem_port_rate0.u64 = 0;
+ pko_mem_port_rate0.s.pid = port;
+ pko_mem_port_rate0.s.rate_pkt =
+ cvmx_sysinfo_get()->cpu_clock_hz / packets_s / 16;
+ /* No cost per word since we are limited by packets/sec, not bits/sec */
+ pko_mem_port_rate0.s.rate_word = 0;
+
+ pko_mem_port_rate1.u64 = 0;
+ pko_mem_port_rate1.s.pid = port;
+ pko_mem_port_rate1.s.rate_lim =
+ ((uint64_t) pko_mem_port_rate0.s.rate_pkt * burst) >> 8;
+
+ cvmx_write_csr(CVMX_PKO_MEM_PORT_RATE0, pko_mem_port_rate0.u64);
+ cvmx_write_csr(CVMX_PKO_MEM_PORT_RATE1, pko_mem_port_rate1.u64);
+ return 0;
+}
+
+/**
+ * Rate limit a PKO port to a max bits/sec. This function is only
+ * supported on CN51XX and higher, excluding CN58XX.
+ *
+ * @port: Port to rate limit
+ * @bits_s: PKO rate limit in bits/sec
+ * @burst: Maximum number of bits to burst before rate
+ * limiting cuts in.
+ *
+ * Returns Zero on success, negative on failure
+ */
+int cvmx_pko_rate_limit_bits(int port, uint64_t bits_s, int burst)
+{
+ union cvmx_pko_mem_port_rate0 pko_mem_port_rate0;
+ union cvmx_pko_mem_port_rate1 pko_mem_port_rate1;
+ uint64_t clock_rate = cvmx_sysinfo_get()->cpu_clock_hz;
+ uint64_t tokens_per_bit = clock_rate * 16 / bits_s;
+
+ pko_mem_port_rate0.u64 = 0;
+ pko_mem_port_rate0.s.pid = port;
+ /*
+ * Each packet has a 12 bytes of interframe gap, an 8 byte
+ * preamble, and a 4 byte CRC. These are not included in the
+ * per word count. Multiply by 8 to covert to bits and divide
+ * by 256 for limit granularity.
+ */
+ pko_mem_port_rate0.s.rate_pkt = (12 + 8 + 4) * 8 * tokens_per_bit / 256;
+ /* Each 8 byte word has 64bits */
+ pko_mem_port_rate0.s.rate_word = 64 * tokens_per_bit;
+
+ pko_mem_port_rate1.u64 = 0;
+ pko_mem_port_rate1.s.pid = port;
+ pko_mem_port_rate1.s.rate_lim = tokens_per_bit * burst / 256;
+
+ cvmx_write_csr(CVMX_PKO_MEM_PORT_RATE0, pko_mem_port_rate0.u64);
+ cvmx_write_csr(CVMX_PKO_MEM_PORT_RATE1, pko_mem_port_rate1.u64);
+ return 0;
+}
diff --git a/drivers/staging/octeon/cvmx-pko.h b/drivers/staging/octeon/cvmx-pko.h
new file mode 100644
index 000000000000..f068c1982497
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-pko.h
@@ -0,0 +1,610 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/**
+ *
+ * Interface to the hardware Packet Output unit.
+ *
+ * Starting with SDK 1.7.0, the PKO output functions now support
+ * two types of locking. CVMX_PKO_LOCK_ATOMIC_TAG continues to
+ * function similarly to previous SDKs by using POW atomic tags
+ * to preserve ordering and exclusivity. As a new option, you
+ * can now pass CVMX_PKO_LOCK_CMD_QUEUE which uses a ll/sc
+ * memory based locking instead. This locking has the advantage
+ * of not affecting the tag state but doesn't preserve packet
+ * ordering. CVMX_PKO_LOCK_CMD_QUEUE is appropriate in most
+ * generic code while CVMX_PKO_LOCK_CMD_QUEUE should be used
+ * with hand tuned fast path code.
+ *
+ * Some of other SDK differences visible to the command command
+ * queuing:
+ * - PKO indexes are no longer stored in the FAU. A large
+ * percentage of the FAU register block used to be tied up
+ * maintaining PKO queue pointers. These are now stored in a
+ * global named block.
+ * - The PKO <b>use_locking</b> parameter can now have a global
+ * effect. Since all application use the same named block,
+ * queue locking correctly applies across all operating
+ * systems when using CVMX_PKO_LOCK_CMD_QUEUE.
+ * - PKO 3 word commands are now supported. Use
+ * cvmx_pko_send_packet_finish3().
+ *
+ */
+
+#ifndef __CVMX_PKO_H__
+#define __CVMX_PKO_H__
+
+#include "cvmx-fpa.h"
+#include "cvmx-pow.h"
+#include "cvmx-cmd-queue.h"
+#include "cvmx-pko-defs.h"
+
+/* Adjust the command buffer size by 1 word so that in the case of using only
+ * two word PKO commands no command words stradle buffers. The useful values
+ * for this are 0 and 1. */
+#define CVMX_PKO_COMMAND_BUFFER_SIZE_ADJUST (1)
+
+#define CVMX_PKO_MAX_OUTPUT_QUEUES_STATIC 256
+#define CVMX_PKO_MAX_OUTPUT_QUEUES ((OCTEON_IS_MODEL(OCTEON_CN31XX) || \
+ OCTEON_IS_MODEL(OCTEON_CN3010) || OCTEON_IS_MODEL(OCTEON_CN3005) || \
+ OCTEON_IS_MODEL(OCTEON_CN50XX)) ? 32 : \
+ (OCTEON_IS_MODEL(OCTEON_CN58XX) || \
+ OCTEON_IS_MODEL(OCTEON_CN56XX)) ? 256 : 128)
+#define CVMX_PKO_NUM_OUTPUT_PORTS 40
+/* use this for queues that are not used */
+#define CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID 63
+#define CVMX_PKO_QUEUE_STATIC_PRIORITY 9
+#define CVMX_PKO_ILLEGAL_QUEUE 0xFFFF
+#define CVMX_PKO_MAX_QUEUE_DEPTH 0
+
+typedef enum {
+ CVMX_PKO_SUCCESS,
+ CVMX_PKO_INVALID_PORT,
+ CVMX_PKO_INVALID_QUEUE,
+ CVMX_PKO_INVALID_PRIORITY,
+ CVMX_PKO_NO_MEMORY,
+ CVMX_PKO_PORT_ALREADY_SETUP,
+ CVMX_PKO_CMD_QUEUE_INIT_ERROR
+} cvmx_pko_status_t;
+
+/**
+ * This enumeration represents the differnet locking modes supported by PKO.
+ */
+typedef enum {
+ /*
+ * PKO doesn't do any locking. It is the responsibility of the
+ * application to make sure that no other core is accessing
+ * the same queue at the smae time
+ */
+ CVMX_PKO_LOCK_NONE = 0,
+ /*
+ * PKO performs an atomic tagswitch to insure exclusive access
+ * to the output queue. This will maintain packet ordering on
+ * output.
+ */
+ CVMX_PKO_LOCK_ATOMIC_TAG = 1,
+ /*
+ * PKO uses the common command queue locks to insure exclusive
+ * access to the output queue. This is a memory based
+ * ll/sc. This is the most portable locking mechanism.
+ */
+ CVMX_PKO_LOCK_CMD_QUEUE = 2,
+} cvmx_pko_lock_t;
+
+typedef struct {
+ uint32_t packets;
+ uint64_t octets;
+ uint64_t doorbell;
+} cvmx_pko_port_status_t;
+
+/**
+ * This structure defines the address to use on a packet enqueue
+ */
+typedef union {
+ uint64_t u64;
+ struct {
+ /* Must CVMX_IO_SEG */
+ uint64_t mem_space:2;
+ /* Must be zero */
+ uint64_t reserved:13;
+ /* Must be one */
+ uint64_t is_io:1;
+ /* The ID of the device on the non-coherent bus */
+ uint64_t did:8;
+ /* Must be zero */
+ uint64_t reserved2:4;
+ /* Must be zero */
+ uint64_t reserved3:18;
+ /*
+ * The hardware likes to have the output port in
+ * addition to the output queue,
+ */
+ uint64_t port:6;
+ /*
+ * The output queue to send the packet to (0-127 are
+ * legal)
+ */
+ uint64_t queue:9;
+ /* Must be zero */
+ uint64_t reserved4:3;
+ } s;
+} cvmx_pko_doorbell_address_t;
+
+/**
+ * Structure of the first packet output command word.
+ */
+typedef union {
+ uint64_t u64;
+ struct {
+ /*
+ * The size of the reg1 operation - could be 8, 16,
+ * 32, or 64 bits.
+ */
+ uint64_t size1:2;
+ /*
+ * The size of the reg0 operation - could be 8, 16,
+ * 32, or 64 bits.
+ */
+ uint64_t size0:2;
+ /*
+ * If set, subtract 1, if clear, subtract packet
+ * size.
+ */
+ uint64_t subone1:1;
+ /*
+ * The register, subtract will be done if reg1 is
+ * non-zero.
+ */
+ uint64_t reg1:11;
+ /* If set, subtract 1, if clear, subtract packet size */
+ uint64_t subone0:1;
+ /* The register, subtract will be done if reg0 is non-zero */
+ uint64_t reg0:11;
+ /*
+ * When set, interpret segment pointer and segment
+ * bytes in little endian order.
+ */
+ uint64_t le:1;
+ /*
+ * When set, packet data not allocated in L2 cache by
+ * PKO.
+ */
+ uint64_t n2:1;
+ /*
+ * If set and rsp is set, word3 contains a pointer to
+ * a work queue entry.
+ */
+ uint64_t wqp:1;
+ /* If set, the hardware will send a response when done */
+ uint64_t rsp:1;
+ /*
+ * If set, the supplied pkt_ptr is really a pointer to
+ * a list of pkt_ptr's.
+ */
+ uint64_t gather:1;
+ /*
+ * If ipoffp1 is non zero, (ipoffp1-1) is the number
+ * of bytes to IP header, and the hardware will
+ * calculate and insert the UDP/TCP checksum.
+ */
+ uint64_t ipoffp1:7;
+ /*
+ * If set, ignore the I bit (force to zero) from all
+ * pointer structures.
+ */
+ uint64_t ignore_i:1;
+ /*
+ * If clear, the hardware will attempt to free the
+ * buffers containing the packet.
+ */
+ uint64_t dontfree:1;
+ /*
+ * The total number of segs in the packet, if gather
+ * set, also gather list length.
+ */
+ uint64_t segs:6;
+ /* Including L2, but no trailing CRC */
+ uint64_t total_bytes:16;
+ } s;
+} cvmx_pko_command_word0_t;
+
+/* CSR typedefs have been moved to cvmx-csr-*.h */
+
+/**
+ * Definition of internal state for Packet output processing
+ */
+typedef struct {
+ /* ptr to start of buffer, offset kept in FAU reg */
+ uint64_t *start_ptr;
+} cvmx_pko_state_elem_t;
+
+/**
+ * Call before any other calls to initialize the packet
+ * output system.
+ */
+extern void cvmx_pko_initialize_global(void);
+extern int cvmx_pko_initialize_local(void);
+
+/**
+ * Enables the packet output hardware. It must already be
+ * configured.
+ */
+extern void cvmx_pko_enable(void);
+
+/**
+ * Disables the packet output. Does not affect any configuration.
+ */
+extern void cvmx_pko_disable(void);
+
+/**
+ * Shutdown and free resources required by packet output.
+ */
+
+extern void cvmx_pko_shutdown(void);
+
+/**
+ * Configure a output port and the associated queues for use.
+ *
+ * @port: Port to configure.
+ * @base_queue: First queue number to associate with this port.
+ * @num_queues: Number of queues t oassociate with this port
+ * @priority: Array of priority levels for each queue. Values are
+ * allowed to be 1-8. A value of 8 get 8 times the traffic
+ * of a value of 1. There must be num_queues elements in the
+ * array.
+ */
+extern cvmx_pko_status_t cvmx_pko_config_port(uint64_t port,
+ uint64_t base_queue,
+ uint64_t num_queues,
+ const uint64_t priority[]);
+
+/**
+ * Ring the packet output doorbell. This tells the packet
+ * output hardware that "len" command words have been added
+ * to its pending list. This command includes the required
+ * CVMX_SYNCWS before the doorbell ring.
+ *
+ * @port: Port the packet is for
+ * @queue: Queue the packet is for
+ * @len: Length of the command in 64 bit words
+ */
+static inline void cvmx_pko_doorbell(uint64_t port, uint64_t queue,
+ uint64_t len)
+{
+ cvmx_pko_doorbell_address_t ptr;
+
+ ptr.u64 = 0;
+ ptr.s.mem_space = CVMX_IO_SEG;
+ ptr.s.did = CVMX_OCT_DID_PKT_SEND;
+ ptr.s.is_io = 1;
+ ptr.s.port = port;
+ ptr.s.queue = queue;
+ /*
+ * Need to make sure output queue data is in DRAM before
+ * doorbell write.
+ */
+ CVMX_SYNCWS;
+ cvmx_write_io(ptr.u64, len);
+}
+
+/**
+ * Prepare to send a packet. This may initiate a tag switch to
+ * get exclusive access to the output queue structure, and
+ * performs other prep work for the packet send operation.
+ *
+ * cvmx_pko_send_packet_finish() MUST be called after this function is called,
+ * and must be called with the same port/queue/use_locking arguments.
+ *
+ * The use_locking parameter allows the caller to use three
+ * possible locking modes.
+ * - CVMX_PKO_LOCK_NONE
+ * - PKO doesn't do any locking. It is the responsibility
+ * of the application to make sure that no other core
+ * is accessing the same queue at the smae time.
+ * - CVMX_PKO_LOCK_ATOMIC_TAG
+ * - PKO performs an atomic tagswitch to insure exclusive
+ * access to the output queue. This will maintain
+ * packet ordering on output.
+ * - CVMX_PKO_LOCK_CMD_QUEUE
+ * - PKO uses the common command queue locks to insure
+ * exclusive access to the output queue. This is a
+ * memory based ll/sc. This is the most portable
+ * locking mechanism.
+ *
+ * NOTE: If atomic locking is used, the POW entry CANNOT be
+ * descheduled, as it does not contain a valid WQE pointer.
+ *
+ * @port: Port to send it on
+ * @queue: Queue to use
+ * @use_locking: CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG, or
+ * CVMX_PKO_LOCK_CMD_QUEUE
+ */
+
+static inline void cvmx_pko_send_packet_prepare(uint64_t port, uint64_t queue,
+ cvmx_pko_lock_t use_locking)
+{
+ if (use_locking == CVMX_PKO_LOCK_ATOMIC_TAG) {
+ /*
+ * Must do a full switch here to handle all cases. We
+ * use a fake WQE pointer, as the POW does not access
+ * this memory. The WQE pointer and group are only
+ * used if this work is descheduled, which is not
+ * supported by the
+ * cvmx_pko_send_packet_prepare/cvmx_pko_send_packet_finish
+ * combination. Note that this is a special case in
+ * which these fake values can be used - this is not a
+ * general technique.
+ */
+ uint32_t tag =
+ CVMX_TAG_SW_BITS_INTERNAL << CVMX_TAG_SW_SHIFT |
+ CVMX_TAG_SUBGROUP_PKO << CVMX_TAG_SUBGROUP_SHIFT |
+ (CVMX_TAG_SUBGROUP_MASK & queue);
+ cvmx_pow_tag_sw_full((cvmx_wqe_t *) cvmx_phys_to_ptr(0x80), tag,
+ CVMX_POW_TAG_TYPE_ATOMIC, 0);
+ }
+}
+
+/**
+ * Complete packet output. cvmx_pko_send_packet_prepare() must be
+ * called exactly once before this, and the same parameters must be
+ * passed to both cvmx_pko_send_packet_prepare() and
+ * cvmx_pko_send_packet_finish().
+ *
+ * @port: Port to send it on
+ * @queue: Queue to use
+ * @pko_command:
+ * PKO HW command word
+ * @packet: Packet to send
+ * @use_locking: CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG, or
+ * CVMX_PKO_LOCK_CMD_QUEUE
+ *
+ * Returns returns CVMX_PKO_SUCCESS on success, or error code on
+ * failure of output
+ */
+static inline cvmx_pko_status_t cvmx_pko_send_packet_finish(
+ uint64_t port,
+ uint64_t queue,
+ cvmx_pko_command_word0_t pko_command,
+ union cvmx_buf_ptr packet,
+ cvmx_pko_lock_t use_locking)
+{
+ cvmx_cmd_queue_result_t result;
+ if (use_locking == CVMX_PKO_LOCK_ATOMIC_TAG)
+ cvmx_pow_tag_sw_wait();
+ result = cvmx_cmd_queue_write2(CVMX_CMD_QUEUE_PKO(queue),
+ (use_locking == CVMX_PKO_LOCK_CMD_QUEUE),
+ pko_command.u64, packet.u64);
+ if (likely(result == CVMX_CMD_QUEUE_SUCCESS)) {
+ cvmx_pko_doorbell(port, queue, 2);
+ return CVMX_PKO_SUCCESS;
+ } else if ((result == CVMX_CMD_QUEUE_NO_MEMORY)
+ || (result == CVMX_CMD_QUEUE_FULL)) {
+ return CVMX_PKO_NO_MEMORY;
+ } else {
+ return CVMX_PKO_INVALID_QUEUE;
+ }
+}
+
+/**
+ * Complete packet output. cvmx_pko_send_packet_prepare() must be
+ * called exactly once before this, and the same parameters must be
+ * passed to both cvmx_pko_send_packet_prepare() and
+ * cvmx_pko_send_packet_finish().
+ *
+ * @port: Port to send it on
+ * @queue: Queue to use
+ * @pko_command:
+ * PKO HW command word
+ * @packet: Packet to send
+ * @addr: Plysical address of a work queue entry or physical address
+ * to zero on complete.
+ * @use_locking: CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG, or
+ * CVMX_PKO_LOCK_CMD_QUEUE
+ *
+ * Returns returns CVMX_PKO_SUCCESS on success, or error code on
+ * failure of output
+ */
+static inline cvmx_pko_status_t cvmx_pko_send_packet_finish3(
+ uint64_t port,
+ uint64_t queue,
+ cvmx_pko_command_word0_t pko_command,
+ union cvmx_buf_ptr packet,
+ uint64_t addr,
+ cvmx_pko_lock_t use_locking)
+{
+ cvmx_cmd_queue_result_t result;
+ if (use_locking == CVMX_PKO_LOCK_ATOMIC_TAG)
+ cvmx_pow_tag_sw_wait();
+ result = cvmx_cmd_queue_write3(CVMX_CMD_QUEUE_PKO(queue),
+ (use_locking == CVMX_PKO_LOCK_CMD_QUEUE),
+ pko_command.u64, packet.u64, addr);
+ if (likely(result == CVMX_CMD_QUEUE_SUCCESS)) {
+ cvmx_pko_doorbell(port, queue, 3);
+ return CVMX_PKO_SUCCESS;
+ } else if ((result == CVMX_CMD_QUEUE_NO_MEMORY)
+ || (result == CVMX_CMD_QUEUE_FULL)) {
+ return CVMX_PKO_NO_MEMORY;
+ } else {
+ return CVMX_PKO_INVALID_QUEUE;
+ }
+}
+
+/**
+ * Return the pko output queue associated with a port and a specific core.
+ * In normal mode (PKO lockless operation is disabled), the value returned
+ * is the base queue.
+ *
+ * @port: Port number
+ * @core: Core to get queue for
+ *
+ * Returns Core-specific output queue
+ */
+static inline int cvmx_pko_get_base_queue_per_core(int port, int core)
+{
+#ifndef CVMX_HELPER_PKO_MAX_PORTS_INTERFACE0
+#define CVMX_HELPER_PKO_MAX_PORTS_INTERFACE0 16
+#endif
+#ifndef CVMX_HELPER_PKO_MAX_PORTS_INTERFACE1
+#define CVMX_HELPER_PKO_MAX_PORTS_INTERFACE1 16
+#endif
+
+ if (port < CVMX_PKO_MAX_PORTS_INTERFACE0)
+ return port * CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 + core;
+ else if (port >= 16 && port < 16 + CVMX_PKO_MAX_PORTS_INTERFACE1)
+ return CVMX_PKO_MAX_PORTS_INTERFACE0 *
+ CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 + (port -
+ 16) *
+ CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 + core;
+ else if ((port >= 32) && (port < 36))
+ return CVMX_PKO_MAX_PORTS_INTERFACE0 *
+ CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 +
+ CVMX_PKO_MAX_PORTS_INTERFACE1 *
+ CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 + (port -
+ 32) *
+ CVMX_PKO_QUEUES_PER_PORT_PCI;
+ else if ((port >= 36) && (port < 40))
+ return CVMX_PKO_MAX_PORTS_INTERFACE0 *
+ CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 +
+ CVMX_PKO_MAX_PORTS_INTERFACE1 *
+ CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 +
+ 4 * CVMX_PKO_QUEUES_PER_PORT_PCI + (port -
+ 36) *
+ CVMX_PKO_QUEUES_PER_PORT_LOOP;
+ else
+ /* Given the limit on the number of ports we can map to
+ * CVMX_MAX_OUTPUT_QUEUES_STATIC queues (currently 256,
+ * divided among all cores), the remaining unmapped ports
+ * are assigned an illegal queue number */
+ return CVMX_PKO_ILLEGAL_QUEUE;
+}
+
+/**
+ * For a given port number, return the base pko output queue
+ * for the port.
+ *
+ * @port: Port number
+ * Returns Base output queue
+ */
+static inline int cvmx_pko_get_base_queue(int port)
+{
+ return cvmx_pko_get_base_queue_per_core(port, 0);
+}
+
+/**
+ * For a given port number, return the number of pko output queues.
+ *
+ * @port: Port number
+ * Returns Number of output queues
+ */
+static inline int cvmx_pko_get_num_queues(int port)
+{
+ if (port < 16)
+ return CVMX_PKO_QUEUES_PER_PORT_INTERFACE0;
+ else if (port < 32)
+ return CVMX_PKO_QUEUES_PER_PORT_INTERFACE1;
+ else if (port < 36)
+ return CVMX_PKO_QUEUES_PER_PORT_PCI;
+ else if (port < 40)
+ return CVMX_PKO_QUEUES_PER_PORT_LOOP;
+ else
+ return 0;
+}
+
+/**
+ * Get the status counters for a port.
+ *
+ * @port_num: Port number to get statistics for.
+ * @clear: Set to 1 to clear the counters after they are read
+ * @status: Where to put the results.
+ */
+static inline void cvmx_pko_get_port_status(uint64_t port_num, uint64_t clear,
+ cvmx_pko_port_status_t *status)
+{
+ union cvmx_pko_reg_read_idx pko_reg_read_idx;
+ union cvmx_pko_mem_count0 pko_mem_count0;
+ union cvmx_pko_mem_count1 pko_mem_count1;
+
+ pko_reg_read_idx.u64 = 0;
+ pko_reg_read_idx.s.index = port_num;
+ cvmx_write_csr(CVMX_PKO_REG_READ_IDX, pko_reg_read_idx.u64);
+
+ pko_mem_count0.u64 = cvmx_read_csr(CVMX_PKO_MEM_COUNT0);
+ status->packets = pko_mem_count0.s.count;
+ if (clear) {
+ pko_mem_count0.s.count = port_num;
+ cvmx_write_csr(CVMX_PKO_MEM_COUNT0, pko_mem_count0.u64);
+ }
+
+ pko_mem_count1.u64 = cvmx_read_csr(CVMX_PKO_MEM_COUNT1);
+ status->octets = pko_mem_count1.s.count;
+ if (clear) {
+ pko_mem_count1.s.count = port_num;
+ cvmx_write_csr(CVMX_PKO_MEM_COUNT1, pko_mem_count1.u64);
+ }
+
+ if (OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
+ union cvmx_pko_mem_debug9 debug9;
+ pko_reg_read_idx.s.index = cvmx_pko_get_base_queue(port_num);
+ cvmx_write_csr(CVMX_PKO_REG_READ_IDX, pko_reg_read_idx.u64);
+ debug9.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG9);
+ status->doorbell = debug9.cn38xx.doorbell;
+ } else {
+ union cvmx_pko_mem_debug8 debug8;
+ pko_reg_read_idx.s.index = cvmx_pko_get_base_queue(port_num);
+ cvmx_write_csr(CVMX_PKO_REG_READ_IDX, pko_reg_read_idx.u64);
+ debug8.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG8);
+ status->doorbell = debug8.cn58xx.doorbell;
+ }
+}
+
+/**
+ * Rate limit a PKO port to a max packets/sec. This function is only
+ * supported on CN57XX, CN56XX, CN55XX, and CN54XX.
+ *
+ * @port: Port to rate limit
+ * @packets_s: Maximum packet/sec
+ * @burst: Maximum number of packets to burst in a row before rate
+ * limiting cuts in.
+ *
+ * Returns Zero on success, negative on failure
+ */
+extern int cvmx_pko_rate_limit_packets(int port, int packets_s, int burst);
+
+/**
+ * Rate limit a PKO port to a max bits/sec. This function is only
+ * supported on CN57XX, CN56XX, CN55XX, and CN54XX.
+ *
+ * @port: Port to rate limit
+ * @bits_s: PKO rate limit in bits/sec
+ * @burst: Maximum number of bits to burst before rate
+ * limiting cuts in.
+ *
+ * Returns Zero on success, negative on failure
+ */
+extern int cvmx_pko_rate_limit_bits(int port, uint64_t bits_s, int burst);
+
+#endif /* __CVMX_PKO_H__ */
diff --git a/drivers/staging/octeon/cvmx-pow.h b/drivers/staging/octeon/cvmx-pow.h
new file mode 100644
index 000000000000..c5d66f272b0d
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-pow.h
@@ -0,0 +1,1982 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/**
+ * Interface to the hardware Packet Order / Work unit.
+ *
+ * New, starting with SDK 1.7.0, cvmx-pow supports a number of
+ * extended consistency checks. The define
+ * CVMX_ENABLE_POW_CHECKS controls the runtime insertion of POW
+ * internal state checks to find common programming errors. If
+ * CVMX_ENABLE_POW_CHECKS is not defined, checks are by default
+ * enabled. For example, cvmx-pow will check for the following
+ * program errors or POW state inconsistency.
+ * - Requesting a POW operation with an active tag switch in
+ * progress.
+ * - Waiting for a tag switch to complete for an excessively
+ * long period. This is normally a sign of an error in locking
+ * causing deadlock.
+ * - Illegal tag switches from NULL_NULL.
+ * - Illegal tag switches from NULL.
+ * - Illegal deschedule request.
+ * - WQE pointer not matching the one attached to the core by
+ * the POW.
+ *
+ */
+
+#ifndef __CVMX_POW_H__
+#define __CVMX_POW_H__
+
+#include <asm/octeon/cvmx-pow-defs.h>
+
+#include "cvmx-scratch.h"
+#include "cvmx-wqe.h"
+
+/* Default to having all POW constancy checks turned on */
+#ifndef CVMX_ENABLE_POW_CHECKS
+#define CVMX_ENABLE_POW_CHECKS 1
+#endif
+
+enum cvmx_pow_tag_type {
+ /* Tag ordering is maintained */
+ CVMX_POW_TAG_TYPE_ORDERED = 0L,
+ /* Tag ordering is maintained, and at most one PP has the tag */
+ CVMX_POW_TAG_TYPE_ATOMIC = 1L,
+ /*
+ * The work queue entry from the order - NEVER tag switch from
+ * NULL to NULL
+ */
+ CVMX_POW_TAG_TYPE_NULL = 2L,
+ /* A tag switch to NULL, and there is no space reserved in POW
+ * - NEVER tag switch to NULL_NULL
+ * - NEVER tag switch from NULL_NULL
+ * - NULL_NULL is entered at the beginning of time and on a deschedule.
+ * - NULL_NULL can be exited by a new work request. A NULL_SWITCH
+ * load can also switch the state to NULL
+ */
+ CVMX_POW_TAG_TYPE_NULL_NULL = 3L
+};
+
+/**
+ * Wait flag values for pow functions.
+ */
+typedef enum {
+ CVMX_POW_WAIT = 1,
+ CVMX_POW_NO_WAIT = 0,
+} cvmx_pow_wait_t;
+
+/**
+ * POW tag operations. These are used in the data stored to the POW.
+ */
+typedef enum {
+ /*
+ * switch the tag (only) for this PP
+ * - the previous tag should be non-NULL in this case
+ * - tag switch response required
+ * - fields used: op, type, tag
+ */
+ CVMX_POW_TAG_OP_SWTAG = 0L,
+ /*
+ * switch the tag for this PP, with full information
+ * - this should be used when the previous tag is NULL
+ * - tag switch response required
+ * - fields used: address, op, grp, type, tag
+ */
+ CVMX_POW_TAG_OP_SWTAG_FULL = 1L,
+ /*
+ * switch the tag (and/or group) for this PP and de-schedule
+ * - OK to keep the tag the same and only change the group
+ * - fields used: op, no_sched, grp, type, tag
+ */
+ CVMX_POW_TAG_OP_SWTAG_DESCH = 2L,
+ /*
+ * just de-schedule
+ * - fields used: op, no_sched
+ */
+ CVMX_POW_TAG_OP_DESCH = 3L,
+ /*
+ * create an entirely new work queue entry
+ * - fields used: address, op, qos, grp, type, tag
+ */
+ CVMX_POW_TAG_OP_ADDWQ = 4L,
+ /*
+ * just update the work queue pointer and grp for this PP
+ * - fields used: address, op, grp
+ */
+ CVMX_POW_TAG_OP_UPDATE_WQP_GRP = 5L,
+ /*
+ * set the no_sched bit on the de-schedule list
+ *
+ * - does nothing if the selected entry is not on the
+ * de-schedule list
+ *
+ * - does nothing if the stored work queue pointer does not
+ * match the address field
+ *
+ * - fields used: address, index, op
+ *
+ * Before issuing a *_NSCHED operation, SW must guarantee
+ * that all prior deschedules and set/clr NSCHED operations
+ * are complete and all prior switches are complete. The
+ * hardware provides the opsdone bit and swdone bit for SW
+ * polling. After issuing a *_NSCHED operation, SW must
+ * guarantee that the set/clr NSCHED is complete before any
+ * subsequent operations.
+ */
+ CVMX_POW_TAG_OP_SET_NSCHED = 6L,
+ /*
+ * clears the no_sched bit on the de-schedule list
+ *
+ * - does nothing if the selected entry is not on the
+ * de-schedule list
+ *
+ * - does nothing if the stored work queue pointer does not
+ * match the address field
+ *
+ * - fields used: address, index, op
+ *
+ * Before issuing a *_NSCHED operation, SW must guarantee that
+ * all prior deschedules and set/clr NSCHED operations are
+ * complete and all prior switches are complete. The hardware
+ * provides the opsdone bit and swdone bit for SW
+ * polling. After issuing a *_NSCHED operation, SW must
+ * guarantee that the set/clr NSCHED is complete before any
+ * subsequent operations.
+ */
+ CVMX_POW_TAG_OP_CLR_NSCHED = 7L,
+ /* do nothing */
+ CVMX_POW_TAG_OP_NOP = 15L
+} cvmx_pow_tag_op_t;
+
+/**
+ * This structure defines the store data on a store to POW
+ */
+typedef union {
+ uint64_t u64;
+ struct {
+ /*
+ * Don't reschedule this entry. no_sched is used for
+ * CVMX_POW_TAG_OP_SWTAG_DESCH and
+ * CVMX_POW_TAG_OP_DESCH
+ */
+ uint64_t no_sched:1;
+ uint64_t unused:2;
+ /* Tontains index of entry for a CVMX_POW_TAG_OP_*_NSCHED */
+ uint64_t index:13;
+ /* The operation to perform */
+ cvmx_pow_tag_op_t op:4;
+ uint64_t unused2:2;
+ /*
+ * The QOS level for the packet. qos is only used for
+ * CVMX_POW_TAG_OP_ADDWQ
+ */
+ uint64_t qos:3;
+ /*
+ * The group that the work queue entry will be
+ * scheduled to grp is used for CVMX_POW_TAG_OP_ADDWQ,
+ * CVMX_POW_TAG_OP_SWTAG_FULL,
+ * CVMX_POW_TAG_OP_SWTAG_DESCH, and
+ * CVMX_POW_TAG_OP_UPDATE_WQP_GRP
+ */
+ uint64_t grp:4;
+ /*
+ * The type of the tag. type is used for everything
+ * except CVMX_POW_TAG_OP_DESCH,
+ * CVMX_POW_TAG_OP_UPDATE_WQP_GRP, and
+ * CVMX_POW_TAG_OP_*_NSCHED
+ */
+ uint64_t type:3;
+ /*
+ * The actual tag. tag is used for everything except
+ * CVMX_POW_TAG_OP_DESCH,
+ * CVMX_POW_TAG_OP_UPDATE_WQP_GRP, and
+ * CVMX_POW_TAG_OP_*_NSCHED
+ */
+ uint64_t tag:32;
+ } s;
+} cvmx_pow_tag_req_t;
+
+/**
+ * This structure describes the address to load stuff from POW
+ */
+typedef union {
+ uint64_t u64;
+
+ /**
+ * Address for new work request loads (did<2:0> == 0)
+ */
+ struct {
+ /* Mips64 address region. Should be CVMX_IO_SEG */
+ uint64_t mem_region:2;
+ /* Must be zero */
+ uint64_t reserved_49_61:13;
+ /* Must be one */
+ uint64_t is_io:1;
+ /* the ID of POW -- did<2:0> == 0 in this case */
+ uint64_t did:8;
+ /* Must be zero */
+ uint64_t reserved_4_39:36;
+ /*
+ * If set, don't return load response until work is
+ * available.
+ */
+ uint64_t wait:1;
+ /* Must be zero */
+ uint64_t reserved_0_2:3;
+ } swork;
+
+ /**
+ * Address for loads to get POW internal status
+ */
+ struct {
+ /* Mips64 address region. Should be CVMX_IO_SEG */
+ uint64_t mem_region:2;
+ /* Must be zero */
+ uint64_t reserved_49_61:13;
+ /* Must be one */
+ uint64_t is_io:1;
+ /* the ID of POW -- did<2:0> == 1 in this case */
+ uint64_t did:8;
+ /* Must be zero */
+ uint64_t reserved_10_39:30;
+ /* The core id to get status for */
+ uint64_t coreid:4;
+ /*
+ * If set and get_cur is set, return reverse tag-list
+ * pointer rather than forward tag-list pointer.
+ */
+ uint64_t get_rev:1;
+ /*
+ * If set, return current status rather than pending
+ * status.
+ */
+ uint64_t get_cur:1;
+ /*
+ * If set, get the work-queue pointer rather than
+ * tag/type.
+ */
+ uint64_t get_wqp:1;
+ /* Must be zero */
+ uint64_t reserved_0_2:3;
+ } sstatus;
+
+ /**
+ * Address for memory loads to get POW internal state
+ */
+ struct {
+ /* Mips64 address region. Should be CVMX_IO_SEG */
+ uint64_t mem_region:2;
+ /* Must be zero */
+ uint64_t reserved_49_61:13;
+ /* Must be one */
+ uint64_t is_io:1;
+ /* the ID of POW -- did<2:0> == 2 in this case */
+ uint64_t did:8;
+ /* Must be zero */
+ uint64_t reserved_16_39:24;
+ /* POW memory index */
+ uint64_t index:11;
+ /*
+ * If set, return deschedule information rather than
+ * the standard response for work-queue index (invalid
+ * if the work-queue entry is not on the deschedule
+ * list).
+ */
+ uint64_t get_des:1;
+ /*
+ * If set, get the work-queue pointer rather than
+ * tag/type (no effect when get_des set).
+ */
+ uint64_t get_wqp:1;
+ /* Must be zero */
+ uint64_t reserved_0_2:3;
+ } smemload;
+
+ /**
+ * Address for index/pointer loads
+ */
+ struct {
+ /* Mips64 address region. Should be CVMX_IO_SEG */
+ uint64_t mem_region:2;
+ /* Must be zero */
+ uint64_t reserved_49_61:13;
+ /* Must be one */
+ uint64_t is_io:1;
+ /* the ID of POW -- did<2:0> == 3 in this case */
+ uint64_t did:8;
+ /* Must be zero */
+ uint64_t reserved_9_39:31;
+ /*
+ * when {get_rmt ==0 AND get_des_get_tail == 0}, this
+ * field selects one of eight POW internal-input
+ * queues (0-7), one per QOS level; values 8-15 are
+ * illegal in this case; when {get_rmt ==0 AND
+ * get_des_get_tail == 1}, this field selects one of
+ * 16 deschedule lists (per group); when get_rmt ==1,
+ * this field selects one of 16 memory-input queue
+ * lists. The two memory-input queue lists associated
+ * with each QOS level are:
+ *
+ * - qosgrp = 0, qosgrp = 8: QOS0
+ * - qosgrp = 1, qosgrp = 9: QOS1
+ * - qosgrp = 2, qosgrp = 10: QOS2
+ * - qosgrp = 3, qosgrp = 11: QOS3
+ * - qosgrp = 4, qosgrp = 12: QOS4
+ * - qosgrp = 5, qosgrp = 13: QOS5
+ * - qosgrp = 6, qosgrp = 14: QOS6
+ * - qosgrp = 7, qosgrp = 15: QOS7
+ */
+ uint64_t qosgrp:4;
+ /*
+ * If set and get_rmt is clear, return deschedule list
+ * indexes rather than indexes for the specified qos
+ * level; if set and get_rmt is set, return the tail
+ * pointer rather than the head pointer for the
+ * specified qos level.
+ */
+ uint64_t get_des_get_tail:1;
+ /*
+ * If set, return remote pointers rather than the
+ * local indexes for the specified qos level.
+ */
+ uint64_t get_rmt:1;
+ /* Must be zero */
+ uint64_t reserved_0_2:3;
+ } sindexload;
+
+ /**
+ * address for NULL_RD request (did<2:0> == 4) when this is read,
+ * HW attempts to change the state to NULL if it is NULL_NULL (the
+ * hardware cannot switch from NULL_NULL to NULL if a POW entry is
+ * not available - software may need to recover by finishing
+ * another piece of work before a POW entry can ever become
+ * available.)
+ */
+ struct {
+ /* Mips64 address region. Should be CVMX_IO_SEG */
+ uint64_t mem_region:2;
+ /* Must be zero */
+ uint64_t reserved_49_61:13;
+ /* Must be one */
+ uint64_t is_io:1;
+ /* the ID of POW -- did<2:0> == 4 in this case */
+ uint64_t did:8;
+ /* Must be zero */
+ uint64_t reserved_0_39:40;
+ } snull_rd;
+} cvmx_pow_load_addr_t;
+
+/**
+ * This structure defines the response to a load/SENDSINGLE to POW
+ * (except CSR reads)
+ */
+typedef union {
+ uint64_t u64;
+
+ /**
+ * Response to new work request loads
+ */
+ struct {
+ /*
+ * Set when no new work queue entry was returned. *
+ * If there was de-scheduled work, the HW will
+ * definitely return it. When this bit is set, it
+ * could mean either mean:
+ *
+ * - There was no work, or
+ *
+ * - There was no work that the HW could find. This
+ * case can happen, regardless of the wait bit value
+ * in the original request, when there is work in
+ * the IQ's that is too deep down the list.
+ */
+ uint64_t no_work:1;
+ /* Must be zero */
+ uint64_t reserved_40_62:23;
+ /* 36 in O1 -- the work queue pointer */
+ uint64_t addr:40;
+ } s_work;
+
+ /**
+ * Result for a POW Status Load (when get_cur==0 and get_wqp==0)
+ */
+ struct {
+ uint64_t reserved_62_63:2;
+ /* Set when there is a pending non-NULL SWTAG or
+ * SWTAG_FULL, and the POW entry has not left the list
+ * for the original tag. */
+ uint64_t pend_switch:1;
+ /* Set when SWTAG_FULL and pend_switch is set. */
+ uint64_t pend_switch_full:1;
+ /*
+ * Set when there is a pending NULL SWTAG, or an
+ * implicit switch to NULL.
+ */
+ uint64_t pend_switch_null:1;
+ /* Set when there is a pending DESCHED or SWTAG_DESCHED. */
+ uint64_t pend_desched:1;
+ /*
+ * Set when there is a pending SWTAG_DESCHED and
+ * pend_desched is set.
+ */
+ uint64_t pend_desched_switch:1;
+ /* Set when nosched is desired and pend_desched is set. */
+ uint64_t pend_nosched:1;
+ /* Set when there is a pending GET_WORK. */
+ uint64_t pend_new_work:1;
+ /*
+ * When pend_new_work is set, this bit indicates that
+ * the wait bit was set.
+ */
+ uint64_t pend_new_work_wait:1;
+ /* Set when there is a pending NULL_RD. */
+ uint64_t pend_null_rd:1;
+ /* Set when there is a pending CLR_NSCHED. */
+ uint64_t pend_nosched_clr:1;
+ uint64_t reserved_51:1;
+ /* This is the index when pend_nosched_clr is set. */
+ uint64_t pend_index:11;
+ /*
+ * This is the new_grp when (pend_desched AND
+ * pend_desched_switch) is set.
+ */
+ uint64_t pend_grp:4;
+ uint64_t reserved_34_35:2;
+ /*
+ * This is the tag type when pend_switch or
+ * (pend_desched AND pend_desched_switch) are set.
+ */
+ uint64_t pend_type:2;
+ /*
+ * - this is the tag when pend_switch or (pend_desched
+ * AND pend_desched_switch) are set.
+ */
+ uint64_t pend_tag:32;
+ } s_sstatus0;
+
+ /**
+ * Result for a POW Status Load (when get_cur==0 and get_wqp==1)
+ */
+ struct {
+ uint64_t reserved_62_63:2;
+ /*
+ * Set when there is a pending non-NULL SWTAG or
+ * SWTAG_FULL, and the POW entry has not left the list
+ * for the original tag.
+ */
+ uint64_t pend_switch:1;
+ /* Set when SWTAG_FULL and pend_switch is set. */
+ uint64_t pend_switch_full:1;
+ /*
+ * Set when there is a pending NULL SWTAG, or an
+ * implicit switch to NULL.
+ */
+ uint64_t pend_switch_null:1;
+ /*
+ * Set when there is a pending DESCHED or
+ * SWTAG_DESCHED.
+ */
+ uint64_t pend_desched:1;
+ /*
+ * Set when there is a pending SWTAG_DESCHED and
+ * pend_desched is set.
+ */
+ uint64_t pend_desched_switch:1;
+ /* Set when nosched is desired and pend_desched is set. */
+ uint64_t pend_nosched:1;
+ /* Set when there is a pending GET_WORK. */
+ uint64_t pend_new_work:1;
+ /*
+ * When pend_new_work is set, this bit indicates that
+ * the wait bit was set.
+ */
+ uint64_t pend_new_work_wait:1;
+ /* Set when there is a pending NULL_RD. */
+ uint64_t pend_null_rd:1;
+ /* Set when there is a pending CLR_NSCHED. */
+ uint64_t pend_nosched_clr:1;
+ uint64_t reserved_51:1;
+ /* This is the index when pend_nosched_clr is set. */
+ uint64_t pend_index:11;
+ /*
+ * This is the new_grp when (pend_desched AND
+ * pend_desched_switch) is set.
+ */
+ uint64_t pend_grp:4;
+ /* This is the wqp when pend_nosched_clr is set. */
+ uint64_t pend_wqp:36;
+ } s_sstatus1;
+
+ /**
+ * Result for a POW Status Load (when get_cur==1, get_wqp==0, and
+ * get_rev==0)
+ */
+ struct {
+ uint64_t reserved_62_63:2;
+ /*
+ * Points to the next POW entry in the tag list when
+ * tail == 0 (and tag_type is not NULL or NULL_NULL).
+ */
+ uint64_t link_index:11;
+ /* The POW entry attached to the core. */
+ uint64_t index:11;
+ /*
+ * The group attached to the core (updated when new
+ * tag list entered on SWTAG_FULL).
+ */
+ uint64_t grp:4;
+ /*
+ * Set when this POW entry is at the head of its tag
+ * list (also set when in the NULL or NULL_NULL
+ * state).
+ */
+ uint64_t head:1;
+ /*
+ * Set when this POW entry is at the tail of its tag
+ * list (also set when in the NULL or NULL_NULL
+ * state).
+ */
+ uint64_t tail:1;
+ /*
+ * The tag type attached to the core (updated when new
+ * tag list entered on SWTAG, SWTAG_FULL, or
+ * SWTAG_DESCHED).
+ */
+ uint64_t tag_type:2;
+ /*
+ * The tag attached to the core (updated when new tag
+ * list entered on SWTAG, SWTAG_FULL, or
+ * SWTAG_DESCHED).
+ */
+ uint64_t tag:32;
+ } s_sstatus2;
+
+ /**
+ * Result for a POW Status Load (when get_cur==1, get_wqp==0, and get_rev==1)
+ */
+ struct {
+ uint64_t reserved_62_63:2;
+ /*
+ * Points to the prior POW entry in the tag list when
+ * head == 0 (and tag_type is not NULL or
+ * NULL_NULL). This field is unpredictable when the
+ * core's state is NULL or NULL_NULL.
+ */
+ uint64_t revlink_index:11;
+ /* The POW entry attached to the core. */
+ uint64_t index:11;
+ /*
+ * The group attached to the core (updated when new
+ * tag list entered on SWTAG_FULL).
+ */
+ uint64_t grp:4;
+ /* Set when this POW entry is at the head of its tag
+ * list (also set when in the NULL or NULL_NULL
+ * state).
+ */
+ uint64_t head:1;
+ /*
+ * Set when this POW entry is at the tail of its tag
+ * list (also set when in the NULL or NULL_NULL
+ * state).
+ */
+ uint64_t tail:1;
+ /*
+ * The tag type attached to the core (updated when new
+ * tag list entered on SWTAG, SWTAG_FULL, or
+ * SWTAG_DESCHED).
+ */
+ uint64_t tag_type:2;
+ /*
+ * The tag attached to the core (updated when new tag
+ * list entered on SWTAG, SWTAG_FULL, or
+ * SWTAG_DESCHED).
+ */
+ uint64_t tag:32;
+ } s_sstatus3;
+
+ /**
+ * Result for a POW Status Load (when get_cur==1, get_wqp==1, and
+ * get_rev==0)
+ */
+ struct {
+ uint64_t reserved_62_63:2;
+ /*
+ * Points to the next POW entry in the tag list when
+ * tail == 0 (and tag_type is not NULL or NULL_NULL).
+ */
+ uint64_t link_index:11;
+ /* The POW entry attached to the core. */
+ uint64_t index:11;
+ /*
+ * The group attached to the core (updated when new
+ * tag list entered on SWTAG_FULL).
+ */
+ uint64_t grp:4;
+ /*
+ * The wqp attached to the core (updated when new tag
+ * list entered on SWTAG_FULL).
+ */
+ uint64_t wqp:36;
+ } s_sstatus4;
+
+ /**
+ * Result for a POW Status Load (when get_cur==1, get_wqp==1, and
+ * get_rev==1)
+ */
+ struct {
+ uint64_t reserved_62_63:2;
+ /*
+ * Points to the prior POW entry in the tag list when
+ * head == 0 (and tag_type is not NULL or
+ * NULL_NULL). This field is unpredictable when the
+ * core's state is NULL or NULL_NULL.
+ */
+ uint64_t revlink_index:11;
+ /* The POW entry attached to the core. */
+ uint64_t index:11;
+ /*
+ * The group attached to the core (updated when new
+ * tag list entered on SWTAG_FULL).
+ */
+ uint64_t grp:4;
+ /*
+ * The wqp attached to the core (updated when new tag
+ * list entered on SWTAG_FULL).
+ */
+ uint64_t wqp:36;
+ } s_sstatus5;
+
+ /**
+ * Result For POW Memory Load (get_des == 0 and get_wqp == 0)
+ */
+ struct {
+ uint64_t reserved_51_63:13;
+ /*
+ * The next entry in the input, free, descheduled_head
+ * list (unpredictable if entry is the tail of the
+ * list).
+ */
+ uint64_t next_index:11;
+ /* The group of the POW entry. */
+ uint64_t grp:4;
+ uint64_t reserved_35:1;
+ /*
+ * Set when this POW entry is at the tail of its tag
+ * list (also set when in the NULL or NULL_NULL
+ * state).
+ */
+ uint64_t tail:1;
+ /* The tag type of the POW entry. */
+ uint64_t tag_type:2;
+ /* The tag of the POW entry. */
+ uint64_t tag:32;
+ } s_smemload0;
+
+ /**
+ * Result For POW Memory Load (get_des == 0 and get_wqp == 1)
+ */
+ struct {
+ uint64_t reserved_51_63:13;
+ /*
+ * The next entry in the input, free, descheduled_head
+ * list (unpredictable if entry is the tail of the
+ * list).
+ */
+ uint64_t next_index:11;
+ /* The group of the POW entry. */
+ uint64_t grp:4;
+ /* The WQP held in the POW entry. */
+ uint64_t wqp:36;
+ } s_smemload1;
+
+ /**
+ * Result For POW Memory Load (get_des == 1)
+ */
+ struct {
+ uint64_t reserved_51_63:13;
+ /*
+ * The next entry in the tag list connected to the
+ * descheduled head.
+ */
+ uint64_t fwd_index:11;
+ /* The group of the POW entry. */
+ uint64_t grp:4;
+ /* The nosched bit for the POW entry. */
+ uint64_t nosched:1;
+ /* There is a pending tag switch */
+ uint64_t pend_switch:1;
+ /*
+ * The next tag type for the new tag list when
+ * pend_switch is set.
+ */
+ uint64_t pend_type:2;
+ /*
+ * The next tag for the new tag list when pend_switch
+ * is set.
+ */
+ uint64_t pend_tag:32;
+ } s_smemload2;
+
+ /**
+ * Result For POW Index/Pointer Load (get_rmt == 0/get_des_get_tail == 0)
+ */
+ struct {
+ uint64_t reserved_52_63:12;
+ /*
+ * set when there is one or more POW entries on the
+ * free list.
+ */
+ uint64_t free_val:1;
+ /*
+ * set when there is exactly one POW entry on the free
+ * list.
+ */
+ uint64_t free_one:1;
+ uint64_t reserved_49:1;
+ /*
+ * when free_val is set, indicates the first entry on
+ * the free list.
+ */
+ uint64_t free_head:11;
+ uint64_t reserved_37:1;
+ /*
+ * when free_val is set, indicates the last entry on
+ * the free list.
+ */
+ uint64_t free_tail:11;
+ /*
+ * set when there is one or more POW entries on the
+ * input Q list selected by qosgrp.
+ */
+ uint64_t loc_val:1;
+ /*
+ * set when there is exactly one POW entry on the
+ * input Q list selected by qosgrp.
+ */
+ uint64_t loc_one:1;
+ uint64_t reserved_23:1;
+ /*
+ * when loc_val is set, indicates the first entry on
+ * the input Q list selected by qosgrp.
+ */
+ uint64_t loc_head:11;
+ uint64_t reserved_11:1;
+ /*
+ * when loc_val is set, indicates the last entry on
+ * the input Q list selected by qosgrp.
+ */
+ uint64_t loc_tail:11;
+ } sindexload0;
+
+ /**
+ * Result For POW Index/Pointer Load (get_rmt == 0/get_des_get_tail == 1)
+ */
+ struct {
+ uint64_t reserved_52_63:12;
+ /*
+ * set when there is one or more POW entries on the
+ * nosched list.
+ */
+ uint64_t nosched_val:1;
+ /*
+ * set when there is exactly one POW entry on the
+ * nosched list.
+ */
+ uint64_t nosched_one:1;
+ uint64_t reserved_49:1;
+ /*
+ * when nosched_val is set, indicates the first entry
+ * on the nosched list.
+ */
+ uint64_t nosched_head:11;
+ uint64_t reserved_37:1;
+ /*
+ * when nosched_val is set, indicates the last entry
+ * on the nosched list.
+ */
+ uint64_t nosched_tail:11;
+ /*
+ * set when there is one or more descheduled heads on
+ * the descheduled list selected by qosgrp.
+ */
+ uint64_t des_val:1;
+ /*
+ * set when there is exactly one descheduled head on
+ * the descheduled list selected by qosgrp.
+ */
+ uint64_t des_one:1;
+ uint64_t reserved_23:1;
+ /*
+ * when des_val is set, indicates the first
+ * descheduled head on the descheduled list selected
+ * by qosgrp.
+ */
+ uint64_t des_head:11;
+ uint64_t reserved_11:1;
+ /*
+ * when des_val is set, indicates the last descheduled
+ * head on the descheduled list selected by qosgrp.
+ */
+ uint64_t des_tail:11;
+ } sindexload1;
+
+ /**
+ * Result For POW Index/Pointer Load (get_rmt == 1/get_des_get_tail == 0)
+ */
+ struct {
+ uint64_t reserved_39_63:25;
+ /*
+ * Set when this DRAM list is the current head
+ * (i.e. is the next to be reloaded when the POW
+ * hardware reloads a POW entry from DRAM). The POW
+ * hardware alternates between the two DRAM lists
+ * associated with a QOS level when it reloads work
+ * from DRAM into the POW unit.
+ */
+ uint64_t rmt_is_head:1;
+ /*
+ * Set when the DRAM portion of the input Q list
+ * selected by qosgrp contains one or more pieces of
+ * work.
+ */
+ uint64_t rmt_val:1;
+ /*
+ * Set when the DRAM portion of the input Q list
+ * selected by qosgrp contains exactly one piece of
+ * work.
+ */
+ uint64_t rmt_one:1;
+ /*
+ * When rmt_val is set, indicates the first piece of
+ * work on the DRAM input Q list selected by
+ * qosgrp.
+ */
+ uint64_t rmt_head:36;
+ } sindexload2;
+
+ /**
+ * Result For POW Index/Pointer Load (get_rmt ==
+ * 1/get_des_get_tail == 1)
+ */
+ struct {
+ uint64_t reserved_39_63:25;
+ /*
+ * set when this DRAM list is the current head
+ * (i.e. is the next to be reloaded when the POW
+ * hardware reloads a POW entry from DRAM). The POW
+ * hardware alternates between the two DRAM lists
+ * associated with a QOS level when it reloads work
+ * from DRAM into the POW unit.
+ */
+ uint64_t rmt_is_head:1;
+ /*
+ * set when the DRAM portion of the input Q list
+ * selected by qosgrp contains one or more pieces of
+ * work.
+ */
+ uint64_t rmt_val:1;
+ /*
+ * set when the DRAM portion of the input Q list
+ * selected by qosgrp contains exactly one piece of
+ * work.
+ */
+ uint64_t rmt_one:1;
+ /*
+ * when rmt_val is set, indicates the last piece of
+ * work on the DRAM input Q list selected by
+ * qosgrp.
+ */
+ uint64_t rmt_tail:36;
+ } sindexload3;
+
+ /**
+ * Response to NULL_RD request loads
+ */
+ struct {
+ uint64_t unused:62;
+ /* of type cvmx_pow_tag_type_t. state is one of the
+ * following:
+ *
+ * - CVMX_POW_TAG_TYPE_ORDERED
+ * - CVMX_POW_TAG_TYPE_ATOMIC
+ * - CVMX_POW_TAG_TYPE_NULL
+ * - CVMX_POW_TAG_TYPE_NULL_NULL
+ */
+ uint64_t state:2;
+ } s_null_rd;
+
+} cvmx_pow_tag_load_resp_t;
+
+/**
+ * This structure describes the address used for stores to the POW.
+ * The store address is meaningful on stores to the POW. The
+ * hardware assumes that an aligned 64-bit store was used for all
+ * these stores. Note the assumption that the work queue entry is
+ * aligned on an 8-byte boundary (since the low-order 3 address bits
+ * must be zero). Note that not all fields are used by all
+ * operations.
+ *
+ * NOTE: The following is the behavior of the pending switch bit at the PP
+ * for POW stores (i.e. when did<7:3> == 0xc)
+ * - did<2:0> == 0 => pending switch bit is set
+ * - did<2:0> == 1 => no affect on the pending switch bit
+ * - did<2:0> == 3 => pending switch bit is cleared
+ * - did<2:0> == 7 => no affect on the pending switch bit
+ * - did<2:0> == others => must not be used
+ * - No other loads/stores have an affect on the pending switch bit
+ * - The switch bus from POW can clear the pending switch bit
+ *
+ * NOTE: did<2:0> == 2 is used by the HW for a special single-cycle
+ * ADDWQ command that only contains the pointer). SW must never use
+ * did<2:0> == 2.
+ */
+typedef union {
+ /**
+ * Unsigned 64 bit integer representation of store address
+ */
+ uint64_t u64;
+
+ struct {
+ /* Memory region. Should be CVMX_IO_SEG in most cases */
+ uint64_t mem_reg:2;
+ uint64_t reserved_49_61:13; /* Must be zero */
+ uint64_t is_io:1; /* Must be one */
+ /* Device ID of POW. Note that different sub-dids are used. */
+ uint64_t did:8;
+ uint64_t reserved_36_39:4; /* Must be zero */
+ /* Address field. addr<2:0> must be zero */
+ uint64_t addr:36;
+ } stag;
+} cvmx_pow_tag_store_addr_t;
+
+/**
+ * decode of the store data when an IOBDMA SENDSINGLE is sent to POW
+ */
+typedef union {
+ uint64_t u64;
+
+ struct {
+ /*
+ * the (64-bit word) location in scratchpad to write
+ * to (if len != 0)
+ */
+ uint64_t scraddr:8;
+ /* the number of words in the response (0 => no response) */
+ uint64_t len:8;
+ /* the ID of the device on the non-coherent bus */
+ uint64_t did:8;
+ uint64_t unused:36;
+ /* if set, don't return load response until work is available */
+ uint64_t wait:1;
+ uint64_t unused2:3;
+ } s;
+
+} cvmx_pow_iobdma_store_t;
+
+/* CSR typedefs have been moved to cvmx-csr-*.h */
+
+/**
+ * Get the POW tag for this core. This returns the current
+ * tag type, tag, group, and POW entry index associated with
+ * this core. Index is only valid if the tag type isn't NULL_NULL.
+ * If a tag switch is pending this routine returns the tag before
+ * the tag switch, not after.
+ *
+ * Returns Current tag
+ */
+static inline cvmx_pow_tag_req_t cvmx_pow_get_current_tag(void)
+{
+ cvmx_pow_load_addr_t load_addr;
+ cvmx_pow_tag_load_resp_t load_resp;
+ cvmx_pow_tag_req_t result;
+
+ load_addr.u64 = 0;
+ load_addr.sstatus.mem_region = CVMX_IO_SEG;
+ load_addr.sstatus.is_io = 1;
+ load_addr.sstatus.did = CVMX_OCT_DID_TAG_TAG1;
+ load_addr.sstatus.coreid = cvmx_get_core_num();
+ load_addr.sstatus.get_cur = 1;
+ load_resp.u64 = cvmx_read_csr(load_addr.u64);
+ result.u64 = 0;
+ result.s.grp = load_resp.s_sstatus2.grp;
+ result.s.index = load_resp.s_sstatus2.index;
+ result.s.type = load_resp.s_sstatus2.tag_type;
+ result.s.tag = load_resp.s_sstatus2.tag;
+ return result;
+}
+
+/**
+ * Get the POW WQE for this core. This returns the work queue
+ * entry currently associated with this core.
+ *
+ * Returns WQE pointer
+ */
+static inline cvmx_wqe_t *cvmx_pow_get_current_wqp(void)
+{
+ cvmx_pow_load_addr_t load_addr;
+ cvmx_pow_tag_load_resp_t load_resp;
+
+ load_addr.u64 = 0;
+ load_addr.sstatus.mem_region = CVMX_IO_SEG;
+ load_addr.sstatus.is_io = 1;
+ load_addr.sstatus.did = CVMX_OCT_DID_TAG_TAG1;
+ load_addr.sstatus.coreid = cvmx_get_core_num();
+ load_addr.sstatus.get_cur = 1;
+ load_addr.sstatus.get_wqp = 1;
+ load_resp.u64 = cvmx_read_csr(load_addr.u64);
+ return (cvmx_wqe_t *) cvmx_phys_to_ptr(load_resp.s_sstatus4.wqp);
+}
+
+#ifndef CVMX_MF_CHORD
+#define CVMX_MF_CHORD(dest) CVMX_RDHWR(dest, 30)
+#endif
+
+/**
+ * Print a warning if a tag switch is pending for this core
+ *
+ * @function: Function name checking for a pending tag switch
+ */
+static inline void __cvmx_pow_warn_if_pending_switch(const char *function)
+{
+ uint64_t switch_complete;
+ CVMX_MF_CHORD(switch_complete);
+ if (!switch_complete)
+ pr_warning("%s called with tag switch in progress\n", function);
+}
+
+/**
+ * Waits for a tag switch to complete by polling the completion bit.
+ * Note that switches to NULL complete immediately and do not need
+ * to be waited for.
+ */
+static inline void cvmx_pow_tag_sw_wait(void)
+{
+ const uint64_t MAX_CYCLES = 1ull << 31;
+ uint64_t switch_complete;
+ uint64_t start_cycle = cvmx_get_cycle();
+ while (1) {
+ CVMX_MF_CHORD(switch_complete);
+ if (unlikely(switch_complete))
+ break;
+ if (unlikely(cvmx_get_cycle() > start_cycle + MAX_CYCLES)) {
+ pr_warning("Tag switch is taking a long time, "
+ "possible deadlock\n");
+ start_cycle = -MAX_CYCLES - 1;
+ }
+ }
+}
+
+/**
+ * Synchronous work request. Requests work from the POW.
+ * This function does NOT wait for previous tag switches to complete,
+ * so the caller must ensure that there is not a pending tag switch.
+ *
+ * @wait: When set, call stalls until work becomes avaiable, or times out.
+ * If not set, returns immediately.
+ *
+ * Returns Returns the WQE pointer from POW. Returns NULL if no work
+ * was available.
+ */
+static inline cvmx_wqe_t *cvmx_pow_work_request_sync_nocheck(cvmx_pow_wait_t
+ wait)
+{
+ cvmx_pow_load_addr_t ptr;
+ cvmx_pow_tag_load_resp_t result;
+
+ if (CVMX_ENABLE_POW_CHECKS)
+ __cvmx_pow_warn_if_pending_switch(__func__);
+
+ ptr.u64 = 0;
+ ptr.swork.mem_region = CVMX_IO_SEG;
+ ptr.swork.is_io = 1;
+ ptr.swork.did = CVMX_OCT_DID_TAG_SWTAG;
+ ptr.swork.wait = wait;
+
+ result.u64 = cvmx_read_csr(ptr.u64);
+
+ if (result.s_work.no_work)
+ return NULL;
+ else
+ return (cvmx_wqe_t *) cvmx_phys_to_ptr(result.s_work.addr);
+}
+
+/**
+ * Synchronous work request. Requests work from the POW.
+ * This function waits for any previous tag switch to complete before
+ * requesting the new work.
+ *
+ * @wait: When set, call stalls until work becomes avaiable, or times out.
+ * If not set, returns immediately.
+ *
+ * Returns Returns the WQE pointer from POW. Returns NULL if no work
+ * was available.
+ */
+static inline cvmx_wqe_t *cvmx_pow_work_request_sync(cvmx_pow_wait_t wait)
+{
+ if (CVMX_ENABLE_POW_CHECKS)
+ __cvmx_pow_warn_if_pending_switch(__func__);
+
+ /* Must not have a switch pending when requesting work */
+ cvmx_pow_tag_sw_wait();
+ return cvmx_pow_work_request_sync_nocheck(wait);
+
+}
+
+/**
+ * Synchronous null_rd request. Requests a switch out of NULL_NULL POW state.
+ * This function waits for any previous tag switch to complete before
+ * requesting the null_rd.
+ *
+ * Returns Returns the POW state of type cvmx_pow_tag_type_t.
+ */
+static inline enum cvmx_pow_tag_type cvmx_pow_work_request_null_rd(void)
+{
+ cvmx_pow_load_addr_t ptr;
+ cvmx_pow_tag_load_resp_t result;
+
+ if (CVMX_ENABLE_POW_CHECKS)
+ __cvmx_pow_warn_if_pending_switch(__func__);
+
+ /* Must not have a switch pending when requesting work */
+ cvmx_pow_tag_sw_wait();
+
+ ptr.u64 = 0;
+ ptr.snull_rd.mem_region = CVMX_IO_SEG;
+ ptr.snull_rd.is_io = 1;
+ ptr.snull_rd.did = CVMX_OCT_DID_TAG_NULL_RD;
+
+ result.u64 = cvmx_read_csr(ptr.u64);
+
+ return (enum cvmx_pow_tag_type) result.s_null_rd.state;
+}
+
+/**
+ * Asynchronous work request. Work is requested from the POW unit,
+ * and should later be checked with function
+ * cvmx_pow_work_response_async. This function does NOT wait for
+ * previous tag switches to complete, so the caller must ensure that
+ * there is not a pending tag switch.
+ *
+ * @scr_addr: Scratch memory address that response will be returned
+ * to, which is either a valid WQE, or a response with the
+ * invalid bit set. Byte address, must be 8 byte aligned.
+ *
+ * @wait: 1 to cause response to wait for work to become available (or
+ * timeout), 0 to cause response to return immediately
+ */
+static inline void cvmx_pow_work_request_async_nocheck(int scr_addr,
+ cvmx_pow_wait_t wait)
+{
+ cvmx_pow_iobdma_store_t data;
+
+ if (CVMX_ENABLE_POW_CHECKS)
+ __cvmx_pow_warn_if_pending_switch(__func__);
+
+ /* scr_addr must be 8 byte aligned */
+ data.s.scraddr = scr_addr >> 3;
+ data.s.len = 1;
+ data.s.did = CVMX_OCT_DID_TAG_SWTAG;
+ data.s.wait = wait;
+ cvmx_send_single(data.u64);
+}
+
+/**
+ * Asynchronous work request. Work is requested from the POW unit,
+ * and should later be checked with function
+ * cvmx_pow_work_response_async. This function waits for any previous
+ * tag switch to complete before requesting the new work.
+ *
+ * @scr_addr: Scratch memory address that response will be returned
+ * to, which is either a valid WQE, or a response with the
+ * invalid bit set. Byte address, must be 8 byte aligned.
+ *
+ * @wait: 1 to cause response to wait for work to become available (or
+ * timeout), 0 to cause response to return immediately
+ */
+static inline void cvmx_pow_work_request_async(int scr_addr,
+ cvmx_pow_wait_t wait)
+{
+ if (CVMX_ENABLE_POW_CHECKS)
+ __cvmx_pow_warn_if_pending_switch(__func__);
+
+ /* Must not have a switch pending when requesting work */
+ cvmx_pow_tag_sw_wait();
+ cvmx_pow_work_request_async_nocheck(scr_addr, wait);
+}
+
+/**
+ * Gets result of asynchronous work request. Performs a IOBDMA sync
+ * to wait for the response.
+ *
+ * @scr_addr: Scratch memory address to get result from Byte address,
+ * must be 8 byte aligned.
+ *
+ * Returns Returns the WQE from the scratch register, or NULL if no
+ * work was available.
+ */
+static inline cvmx_wqe_t *cvmx_pow_work_response_async(int scr_addr)
+{
+ cvmx_pow_tag_load_resp_t result;
+
+ CVMX_SYNCIOBDMA;
+ result.u64 = cvmx_scratch_read64(scr_addr);
+
+ if (result.s_work.no_work)
+ return NULL;
+ else
+ return (cvmx_wqe_t *) cvmx_phys_to_ptr(result.s_work.addr);
+}
+
+/**
+ * Checks if a work queue entry pointer returned by a work
+ * request is valid. It may be invalid due to no work
+ * being available or due to a timeout.
+ *
+ * @wqe_ptr: pointer to a work queue entry returned by the POW
+ *
+ * Returns 0 if pointer is valid
+ * 1 if invalid (no work was returned)
+ */
+static inline uint64_t cvmx_pow_work_invalid(cvmx_wqe_t *wqe_ptr)
+{
+ return wqe_ptr == NULL;
+}
+
+/**
+ * Starts a tag switch to the provided tag value and tag type.
+ * Completion for the tag switch must be checked for separately. This
+ * function does NOT update the work queue entry in dram to match tag
+ * value and type, so the application must keep track of these if they
+ * are important to the application. This tag switch command must not
+ * be used for switches to NULL, as the tag switch pending bit will be
+ * set by the switch request, but never cleared by the hardware.
+ *
+ * NOTE: This should not be used when switching from a NULL tag. Use
+ * cvmx_pow_tag_sw_full() instead.
+ *
+ * This function does no checks, so the caller must ensure that any
+ * previous tag switch has completed.
+ *
+ * @tag: new tag value
+ * @tag_type: new tag type (ordered or atomic)
+ */
+static inline void cvmx_pow_tag_sw_nocheck(uint32_t tag,
+ enum cvmx_pow_tag_type tag_type)
+{
+ cvmx_addr_t ptr;
+ cvmx_pow_tag_req_t tag_req;
+
+ if (CVMX_ENABLE_POW_CHECKS) {
+ cvmx_pow_tag_req_t current_tag;
+ __cvmx_pow_warn_if_pending_switch(__func__);
+ current_tag = cvmx_pow_get_current_tag();
+ if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL)
+ pr_warning("%s called with NULL_NULL tag\n",
+ __func__);
+ if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL)
+ pr_warning("%s called with NULL tag\n", __func__);
+ if ((current_tag.s.type == tag_type)
+ && (current_tag.s.tag == tag))
+ pr_warning("%s called to perform a tag switch to the "
+ "same tag\n",
+ __func__);
+ if (tag_type == CVMX_POW_TAG_TYPE_NULL)
+ pr_warning("%s called to perform a tag switch to "
+ "NULL. Use cvmx_pow_tag_sw_null() instead\n",
+ __func__);
+ }
+
+ /*
+ * Note that WQE in DRAM is not updated here, as the POW does
+ * not read from DRAM once the WQE is in flight. See hardware
+ * manual for complete details. It is the application's
+ * responsibility to keep track of the current tag value if
+ * that is important.
+ */
+
+ tag_req.u64 = 0;
+ tag_req.s.op = CVMX_POW_TAG_OP_SWTAG;
+ tag_req.s.tag = tag;
+ tag_req.s.type = tag_type;
+
+ ptr.u64 = 0;
+ ptr.sio.mem_region = CVMX_IO_SEG;
+ ptr.sio.is_io = 1;
+ ptr.sio.did = CVMX_OCT_DID_TAG_SWTAG;
+
+ /* once this store arrives at POW, it will attempt the switch
+ software must wait for the switch to complete separately */
+ cvmx_write_io(ptr.u64, tag_req.u64);
+}
+
+/**
+ * Starts a tag switch to the provided tag value and tag type.
+ * Completion for the tag switch must be checked for separately. This
+ * function does NOT update the work queue entry in dram to match tag
+ * value and type, so the application must keep track of these if they
+ * are important to the application. This tag switch command must not
+ * be used for switches to NULL, as the tag switch pending bit will be
+ * set by the switch request, but never cleared by the hardware.
+ *
+ * NOTE: This should not be used when switching from a NULL tag. Use
+ * cvmx_pow_tag_sw_full() instead.
+ *
+ * This function waits for any previous tag switch to complete, and also
+ * displays an error on tag switches to NULL.
+ *
+ * @tag: new tag value
+ * @tag_type: new tag type (ordered or atomic)
+ */
+static inline void cvmx_pow_tag_sw(uint32_t tag,
+ enum cvmx_pow_tag_type tag_type)
+{
+ if (CVMX_ENABLE_POW_CHECKS)
+ __cvmx_pow_warn_if_pending_switch(__func__);
+
+ /*
+ * Note that WQE in DRAM is not updated here, as the POW does
+ * not read from DRAM once the WQE is in flight. See hardware
+ * manual for complete details. It is the application's
+ * responsibility to keep track of the current tag value if
+ * that is important.
+ */
+
+ /*
+ * Ensure that there is not a pending tag switch, as a tag
+ * switch cannot be started if a previous switch is still
+ * pending.
+ */
+ cvmx_pow_tag_sw_wait();
+ cvmx_pow_tag_sw_nocheck(tag, tag_type);
+}
+
+/**
+ * Starts a tag switch to the provided tag value and tag type.
+ * Completion for the tag switch must be checked for separately. This
+ * function does NOT update the work queue entry in dram to match tag
+ * value and type, so the application must keep track of these if they
+ * are important to the application. This tag switch command must not
+ * be used for switches to NULL, as the tag switch pending bit will be
+ * set by the switch request, but never cleared by the hardware.
+ *
+ * This function must be used for tag switches from NULL.
+ *
+ * This function does no checks, so the caller must ensure that any
+ * previous tag switch has completed.
+ *
+ * @wqp: pointer to work queue entry to submit. This entry is
+ * updated to match the other parameters
+ * @tag: tag value to be assigned to work queue entry
+ * @tag_type: type of tag
+ * @group: group value for the work queue entry.
+ */
+static inline void cvmx_pow_tag_sw_full_nocheck(cvmx_wqe_t *wqp, uint32_t tag,
+ enum cvmx_pow_tag_type tag_type,
+ uint64_t group)
+{
+ cvmx_addr_t ptr;
+ cvmx_pow_tag_req_t tag_req;
+
+ if (CVMX_ENABLE_POW_CHECKS) {
+ cvmx_pow_tag_req_t current_tag;
+ __cvmx_pow_warn_if_pending_switch(__func__);
+ current_tag = cvmx_pow_get_current_tag();
+ if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL)
+ pr_warning("%s called with NULL_NULL tag\n",
+ __func__);
+ if ((current_tag.s.type == tag_type)
+ && (current_tag.s.tag == tag))
+ pr_warning("%s called to perform a tag switch to "
+ "the same tag\n",
+ __func__);
+ if (tag_type == CVMX_POW_TAG_TYPE_NULL)
+ pr_warning("%s called to perform a tag switch to "
+ "NULL. Use cvmx_pow_tag_sw_null() instead\n",
+ __func__);
+ if (wqp != cvmx_phys_to_ptr(0x80))
+ if (wqp != cvmx_pow_get_current_wqp())
+ pr_warning("%s passed WQE(%p) doesn't match "
+ "the address in the POW(%p)\n",
+ __func__, wqp,
+ cvmx_pow_get_current_wqp());
+ }
+
+ /*
+ * Note that WQE in DRAM is not updated here, as the POW does
+ * not read from DRAM once the WQE is in flight. See hardware
+ * manual for complete details. It is the application's
+ * responsibility to keep track of the current tag value if
+ * that is important.
+ */
+
+ tag_req.u64 = 0;
+ tag_req.s.op = CVMX_POW_TAG_OP_SWTAG_FULL;
+ tag_req.s.tag = tag;
+ tag_req.s.type = tag_type;
+ tag_req.s.grp = group;
+
+ ptr.u64 = 0;
+ ptr.sio.mem_region = CVMX_IO_SEG;
+ ptr.sio.is_io = 1;
+ ptr.sio.did = CVMX_OCT_DID_TAG_SWTAG;
+ ptr.sio.offset = CAST64(wqp);
+
+ /*
+ * once this store arrives at POW, it will attempt the switch
+ * software must wait for the switch to complete separately.
+ */
+ cvmx_write_io(ptr.u64, tag_req.u64);
+}
+
+/**
+ * Starts a tag switch to the provided tag value and tag type.
+ * Completion for the tag switch must be checked for separately. This
+ * function does NOT update the work queue entry in dram to match tag
+ * value and type, so the application must keep track of these if they
+ * are important to the application. This tag switch command must not
+ * be used for switches to NULL, as the tag switch pending bit will be
+ * set by the switch request, but never cleared by the hardware.
+ *
+ * This function must be used for tag switches from NULL.
+ *
+ * This function waits for any pending tag switches to complete
+ * before requesting the tag switch.
+ *
+ * @wqp: pointer to work queue entry to submit. This entry is updated
+ * to match the other parameters
+ * @tag: tag value to be assigned to work queue entry
+ * @tag_type: type of tag
+ * @group: group value for the work queue entry.
+ */
+static inline void cvmx_pow_tag_sw_full(cvmx_wqe_t *wqp, uint32_t tag,
+ enum cvmx_pow_tag_type tag_type,
+ uint64_t group)
+{
+ if (CVMX_ENABLE_POW_CHECKS)
+ __cvmx_pow_warn_if_pending_switch(__func__);
+
+ /*
+ * Ensure that there is not a pending tag switch, as a tag
+ * switch cannot be started if a previous switch is still
+ * pending.
+ */
+ cvmx_pow_tag_sw_wait();
+ cvmx_pow_tag_sw_full_nocheck(wqp, tag, tag_type, group);
+}
+
+/**
+ * Switch to a NULL tag, which ends any ordering or
+ * synchronization provided by the POW for the current
+ * work queue entry. This operation completes immediatly,
+ * so completetion should not be waited for.
+ * This function does NOT wait for previous tag switches to complete,
+ * so the caller must ensure that any previous tag switches have completed.
+ */
+static inline void cvmx_pow_tag_sw_null_nocheck(void)
+{
+ cvmx_addr_t ptr;
+ cvmx_pow_tag_req_t tag_req;
+
+ if (CVMX_ENABLE_POW_CHECKS) {
+ cvmx_pow_tag_req_t current_tag;
+ __cvmx_pow_warn_if_pending_switch(__func__);
+ current_tag = cvmx_pow_get_current_tag();
+ if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL)
+ pr_warning("%s called with NULL_NULL tag\n",
+ __func__);
+ if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL)
+ pr_warning("%s called when we already have a "
+ "NULL tag\n",
+ __func__);
+ }
+
+ tag_req.u64 = 0;
+ tag_req.s.op = CVMX_POW_TAG_OP_SWTAG;
+ tag_req.s.type = CVMX_POW_TAG_TYPE_NULL;
+
+ ptr.u64 = 0;
+ ptr.sio.mem_region = CVMX_IO_SEG;
+ ptr.sio.is_io = 1;
+ ptr.sio.did = CVMX_OCT_DID_TAG_TAG1;
+
+ cvmx_write_io(ptr.u64, tag_req.u64);
+
+ /* switch to NULL completes immediately */
+}
+
+/**
+ * Switch to a NULL tag, which ends any ordering or
+ * synchronization provided by the POW for the current
+ * work queue entry. This operation completes immediatly,
+ * so completetion should not be waited for.
+ * This function waits for any pending tag switches to complete
+ * before requesting the switch to NULL.
+ */
+static inline void cvmx_pow_tag_sw_null(void)
+{
+ if (CVMX_ENABLE_POW_CHECKS)
+ __cvmx_pow_warn_if_pending_switch(__func__);
+
+ /*
+ * Ensure that there is not a pending tag switch, as a tag
+ * switch cannot be started if a previous switch is still
+ * pending.
+ */
+ cvmx_pow_tag_sw_wait();
+ cvmx_pow_tag_sw_null_nocheck();
+
+ /* switch to NULL completes immediately */
+}
+
+/**
+ * Submits work to an input queue. This function updates the work
+ * queue entry in DRAM to match the arguments given. Note that the
+ * tag provided is for the work queue entry submitted, and is
+ * unrelated to the tag that the core currently holds.
+ *
+ * @wqp: pointer to work queue entry to submit. This entry is
+ * updated to match the other parameters
+ * @tag: tag value to be assigned to work queue entry
+ * @tag_type: type of tag
+ * @qos: Input queue to add to.
+ * @grp: group value for the work queue entry.
+ */
+static inline void cvmx_pow_work_submit(cvmx_wqe_t *wqp, uint32_t tag,
+ enum cvmx_pow_tag_type tag_type,
+ uint64_t qos, uint64_t grp)
+{
+ cvmx_addr_t ptr;
+ cvmx_pow_tag_req_t tag_req;
+
+ wqp->qos = qos;
+ wqp->tag = tag;
+ wqp->tag_type = tag_type;
+ wqp->grp = grp;
+
+ tag_req.u64 = 0;
+ tag_req.s.op = CVMX_POW_TAG_OP_ADDWQ;
+ tag_req.s.type = tag_type;
+ tag_req.s.tag = tag;
+ tag_req.s.qos = qos;
+ tag_req.s.grp = grp;
+
+ ptr.u64 = 0;
+ ptr.sio.mem_region = CVMX_IO_SEG;
+ ptr.sio.is_io = 1;
+ ptr.sio.did = CVMX_OCT_DID_TAG_TAG1;
+ ptr.sio.offset = cvmx_ptr_to_phys(wqp);
+
+ /*
+ * SYNC write to memory before the work submit. This is
+ * necessary as POW may read values from DRAM at this time.
+ */
+ CVMX_SYNCWS;
+ cvmx_write_io(ptr.u64, tag_req.u64);
+}
+
+/**
+ * This function sets the group mask for a core. The group mask
+ * indicates which groups each core will accept work from. There are
+ * 16 groups.
+ *
+ * @core_num: core to apply mask to
+ * @mask: Group mask. There are 16 groups, so only bits 0-15 are valid,
+ * representing groups 0-15.
+ * Each 1 bit in the mask enables the core to accept work from
+ * the corresponding group.
+ */
+static inline void cvmx_pow_set_group_mask(uint64_t core_num, uint64_t mask)
+{
+ union cvmx_pow_pp_grp_mskx grp_msk;
+
+ grp_msk.u64 = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(core_num));
+ grp_msk.s.grp_msk = mask;
+ cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(core_num), grp_msk.u64);
+}
+
+/**
+ * This function sets POW static priorities for a core. Each input queue has
+ * an associated priority value.
+ *
+ * @core_num: core to apply priorities to
+ * @priority: Vector of 8 priorities, one per POW Input Queue (0-7).
+ * Highest priority is 0 and lowest is 7. A priority value
+ * of 0xF instructs POW to skip the Input Queue when
+ * scheduling to this specific core.
+ * NOTE: priorities should not have gaps in values, meaning
+ * {0,1,1,1,1,1,1,1} is a valid configuration while
+ * {0,2,2,2,2,2,2,2} is not.
+ */
+static inline void cvmx_pow_set_priority(uint64_t core_num,
+ const uint8_t priority[])
+{
+ /* POW priorities are supported on CN5xxx and later */
+ if (!OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
+ union cvmx_pow_pp_grp_mskx grp_msk;
+
+ grp_msk.u64 = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(core_num));
+ grp_msk.s.qos0_pri = priority[0];
+ grp_msk.s.qos1_pri = priority[1];
+ grp_msk.s.qos2_pri = priority[2];
+ grp_msk.s.qos3_pri = priority[3];
+ grp_msk.s.qos4_pri = priority[4];
+ grp_msk.s.qos5_pri = priority[5];
+ grp_msk.s.qos6_pri = priority[6];
+ grp_msk.s.qos7_pri = priority[7];
+
+ /* Detect gaps between priorities and flag error */
+ {
+ int i;
+ uint32_t prio_mask = 0;
+
+ for (i = 0; i < 8; i++)
+ if (priority[i] != 0xF)
+ prio_mask |= 1 << priority[i];
+
+ if (prio_mask ^ ((1 << cvmx_pop(prio_mask)) - 1)) {
+ pr_err("POW static priorities should be "
+ "contiguous (0x%llx)\n",
+ (unsigned long long)prio_mask);
+ return;
+ }
+ }
+
+ cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(core_num), grp_msk.u64);
+ }
+}
+
+/**
+ * Performs a tag switch and then an immediate deschedule. This completes
+ * immediatly, so completion must not be waited for. This function does NOT
+ * update the wqe in DRAM to match arguments.
+ *
+ * This function does NOT wait for any prior tag switches to complete, so the
+ * calling code must do this.
+ *
+ * Note the following CAVEAT of the Octeon HW behavior when
+ * re-scheduling DE-SCHEDULEd items whose (next) state is
+ * ORDERED:
+ * - If there are no switches pending at the time that the
+ * HW executes the de-schedule, the HW will only re-schedule
+ * the head of the FIFO associated with the given tag. This
+ * means that in many respects, the HW treats this ORDERED
+ * tag as an ATOMIC tag. Note that in the SWTAG_DESCH
+ * case (to an ORDERED tag), the HW will do the switch
+ * before the deschedule whenever it is possible to do
+ * the switch immediately, so it may often look like
+ * this case.
+ * - If there is a pending switch to ORDERED at the time
+ * the HW executes the de-schedule, the HW will perform
+ * the switch at the time it re-schedules, and will be
+ * able to reschedule any/all of the entries with the
+ * same tag.
+ * Due to this behavior, the RECOMMENDATION to software is
+ * that they have a (next) state of ATOMIC when they
+ * DE-SCHEDULE. If an ORDERED tag is what was really desired,
+ * SW can choose to immediately switch to an ORDERED tag
+ * after the work (that has an ATOMIC tag) is re-scheduled.
+ * Note that since there are never any tag switches pending
+ * when the HW re-schedules, this switch can be IMMEDIATE upon
+ * the reception of the pointer during the re-schedule.
+ *
+ * @tag: New tag value
+ * @tag_type: New tag type
+ * @group: New group value
+ * @no_sched: Control whether this work queue entry will be rescheduled.
+ * - 1 : don't schedule this work
+ * - 0 : allow this work to be scheduled.
+ */
+static inline void cvmx_pow_tag_sw_desched_nocheck(
+ uint32_t tag,
+ enum cvmx_pow_tag_type tag_type,
+ uint64_t group,
+ uint64_t no_sched)
+{
+ cvmx_addr_t ptr;
+ cvmx_pow_tag_req_t tag_req;
+
+ if (CVMX_ENABLE_POW_CHECKS) {
+ cvmx_pow_tag_req_t current_tag;
+ __cvmx_pow_warn_if_pending_switch(__func__);
+ current_tag = cvmx_pow_get_current_tag();
+ if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL)
+ pr_warning("%s called with NULL_NULL tag\n",
+ __func__);
+ if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL)
+ pr_warning("%s called with NULL tag. Deschedule not "
+ "allowed from NULL state\n",
+ __func__);
+ if ((current_tag.s.type != CVMX_POW_TAG_TYPE_ATOMIC)
+ && (tag_type != CVMX_POW_TAG_TYPE_ATOMIC))
+ pr_warning("%s called where neither the before or "
+ "after tag is ATOMIC\n",
+ __func__);
+ }
+
+ tag_req.u64 = 0;
+ tag_req.s.op = CVMX_POW_TAG_OP_SWTAG_DESCH;
+ tag_req.s.tag = tag;
+ tag_req.s.type = tag_type;
+ tag_req.s.grp = group;
+ tag_req.s.no_sched = no_sched;
+
+ ptr.u64 = 0;
+ ptr.sio.mem_region = CVMX_IO_SEG;
+ ptr.sio.is_io = 1;
+ ptr.sio.did = CVMX_OCT_DID_TAG_TAG3;
+ /*
+ * since TAG3 is used, this store will clear the local pending
+ * switch bit.
+ */
+ cvmx_write_io(ptr.u64, tag_req.u64);
+}
+
+/**
+ * Performs a tag switch and then an immediate deschedule. This completes
+ * immediatly, so completion must not be waited for. This function does NOT
+ * update the wqe in DRAM to match arguments.
+ *
+ * This function waits for any prior tag switches to complete, so the
+ * calling code may call this function with a pending tag switch.
+ *
+ * Note the following CAVEAT of the Octeon HW behavior when
+ * re-scheduling DE-SCHEDULEd items whose (next) state is
+ * ORDERED:
+ * - If there are no switches pending at the time that the
+ * HW executes the de-schedule, the HW will only re-schedule
+ * the head of the FIFO associated with the given tag. This
+ * means that in many respects, the HW treats this ORDERED
+ * tag as an ATOMIC tag. Note that in the SWTAG_DESCH
+ * case (to an ORDERED tag), the HW will do the switch
+ * before the deschedule whenever it is possible to do
+ * the switch immediately, so it may often look like
+ * this case.
+ * - If there is a pending switch to ORDERED at the time
+ * the HW executes the de-schedule, the HW will perform
+ * the switch at the time it re-schedules, and will be
+ * able to reschedule any/all of the entries with the
+ * same tag.
+ * Due to this behavior, the RECOMMENDATION to software is
+ * that they have a (next) state of ATOMIC when they
+ * DE-SCHEDULE. If an ORDERED tag is what was really desired,
+ * SW can choose to immediately switch to an ORDERED tag
+ * after the work (that has an ATOMIC tag) is re-scheduled.
+ * Note that since there are never any tag switches pending
+ * when the HW re-schedules, this switch can be IMMEDIATE upon
+ * the reception of the pointer during the re-schedule.
+ *
+ * @tag: New tag value
+ * @tag_type: New tag type
+ * @group: New group value
+ * @no_sched: Control whether this work queue entry will be rescheduled.
+ * - 1 : don't schedule this work
+ * - 0 : allow this work to be scheduled.
+ */
+static inline void cvmx_pow_tag_sw_desched(uint32_t tag,
+ enum cvmx_pow_tag_type tag_type,
+ uint64_t group, uint64_t no_sched)
+{
+ if (CVMX_ENABLE_POW_CHECKS)
+ __cvmx_pow_warn_if_pending_switch(__func__);
+
+ /* Need to make sure any writes to the work queue entry are complete */
+ CVMX_SYNCWS;
+ /*
+ * Ensure that there is not a pending tag switch, as a tag
+ * switch cannot be started if a previous switch is still
+ * pending.
+ */
+ cvmx_pow_tag_sw_wait();
+ cvmx_pow_tag_sw_desched_nocheck(tag, tag_type, group, no_sched);
+}
+
+/**
+ * Descchedules the current work queue entry.
+ *
+ * @no_sched: no schedule flag value to be set on the work queue
+ * entry. If this is set the entry will not be
+ * rescheduled.
+ */
+static inline void cvmx_pow_desched(uint64_t no_sched)
+{
+ cvmx_addr_t ptr;
+ cvmx_pow_tag_req_t tag_req;
+
+ if (CVMX_ENABLE_POW_CHECKS) {
+ cvmx_pow_tag_req_t current_tag;
+ __cvmx_pow_warn_if_pending_switch(__func__);
+ current_tag = cvmx_pow_get_current_tag();
+ if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL)
+ pr_warning("%s called with NULL_NULL tag\n",
+ __func__);
+ if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL)
+ pr_warning("%s called with NULL tag. Deschedule not "
+ "expected from NULL state\n",
+ __func__);
+ }
+
+ /* Need to make sure any writes to the work queue entry are complete */
+ CVMX_SYNCWS;
+
+ tag_req.u64 = 0;
+ tag_req.s.op = CVMX_POW_TAG_OP_DESCH;
+ tag_req.s.no_sched = no_sched;
+
+ ptr.u64 = 0;
+ ptr.sio.mem_region = CVMX_IO_SEG;
+ ptr.sio.is_io = 1;
+ ptr.sio.did = CVMX_OCT_DID_TAG_TAG3;
+ /*
+ * since TAG3 is used, this store will clear the local pending
+ * switch bit.
+ */
+ cvmx_write_io(ptr.u64, tag_req.u64);
+}
+
+/****************************************************
+* Define usage of bits within the 32 bit tag values.
+*****************************************************/
+
+/*
+ * Number of bits of the tag used by software. The SW bits are always
+ * a contiguous block of the high starting at bit 31. The hardware
+ * bits are always the low bits. By default, the top 8 bits of the
+ * tag are reserved for software, and the low 24 are set by the IPD
+ * unit.
+ */
+#define CVMX_TAG_SW_BITS (8)
+#define CVMX_TAG_SW_SHIFT (32 - CVMX_TAG_SW_BITS)
+
+/* Below is the list of values for the top 8 bits of the tag. */
+/*
+ * Tag values with top byte of this value are reserved for internal
+ * executive uses.
+ */
+#define CVMX_TAG_SW_BITS_INTERNAL 0x1
+/* The executive divides the remaining 24 bits as follows:
+ * - the upper 8 bits (bits 23 - 16 of the tag) define a subgroup
+ *
+ * - the lower 16 bits (bits 15 - 0 of the tag) define are the value
+ * with the subgroup
+ *
+ * Note that this section describes the format of tags generated by
+ * software - refer to the hardware documentation for a description of
+ * the tags values generated by the packet input hardware. Subgroups
+ * are defined here.
+ */
+/* Mask for the value portion of the tag */
+#define CVMX_TAG_SUBGROUP_MASK 0xFFFF
+#define CVMX_TAG_SUBGROUP_SHIFT 16
+#define CVMX_TAG_SUBGROUP_PKO 0x1
+
+/* End of executive tag subgroup definitions */
+
+/*
+ * The remaining values software bit values 0x2 - 0xff are available
+ * for application use.
+ */
+
+/**
+ * This function creates a 32 bit tag value from the two values provided.
+ *
+ * @sw_bits: The upper bits (number depends on configuration) are set
+ * to this value. The remainder of bits are set by the
+ * hw_bits parameter.
+ *
+ * @hw_bits: The lower bits (number depends on configuration) are set
+ * to this value. The remainder of bits are set by the
+ * sw_bits parameter.
+ *
+ * Returns 32 bit value of the combined hw and sw bits.
+ */
+static inline uint32_t cvmx_pow_tag_compose(uint64_t sw_bits, uint64_t hw_bits)
+{
+ return ((sw_bits & cvmx_build_mask(CVMX_TAG_SW_BITS)) <<
+ CVMX_TAG_SW_SHIFT) |
+ (hw_bits & cvmx_build_mask(32 - CVMX_TAG_SW_BITS));
+}
+
+/**
+ * Extracts the bits allocated for software use from the tag
+ *
+ * @tag: 32 bit tag value
+ *
+ * Returns N bit software tag value, where N is configurable with the
+ * CVMX_TAG_SW_BITS define
+ */
+static inline uint32_t cvmx_pow_tag_get_sw_bits(uint64_t tag)
+{
+ return (tag >> (32 - CVMX_TAG_SW_BITS)) &
+ cvmx_build_mask(CVMX_TAG_SW_BITS);
+}
+
+/**
+ *
+ * Extracts the bits allocated for hardware use from the tag
+ *
+ * @tag: 32 bit tag value
+ *
+ * Returns (32 - N) bit software tag value, where N is configurable
+ * with the CVMX_TAG_SW_BITS define
+ */
+static inline uint32_t cvmx_pow_tag_get_hw_bits(uint64_t tag)
+{
+ return tag & cvmx_build_mask(32 - CVMX_TAG_SW_BITS);
+}
+
+/**
+ * Store the current POW internal state into the supplied
+ * buffer. It is recommended that you pass a buffer of at least
+ * 128KB. The format of the capture may change based on SDK
+ * version and Octeon chip.
+ *
+ * @buffer: Buffer to store capture into
+ * @buffer_size:
+ * The size of the supplied buffer
+ *
+ * Returns Zero on sucess, negative on failure
+ */
+extern int cvmx_pow_capture(void *buffer, int buffer_size);
+
+/**
+ * Dump a POW capture to the console in a human readable format.
+ *
+ * @buffer: POW capture from cvmx_pow_capture()
+ * @buffer_size:
+ * Size of the buffer
+ */
+extern void cvmx_pow_display(void *buffer, int buffer_size);
+
+/**
+ * Return the number of POW entries supported by this chip
+ *
+ * Returns Number of POW entries
+ */
+extern int cvmx_pow_get_num_entries(void);
+
+#endif /* __CVMX_POW_H__ */
diff --git a/drivers/staging/octeon/cvmx-scratch.h b/drivers/staging/octeon/cvmx-scratch.h
new file mode 100644
index 000000000000..96b70cfd6245
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-scratch.h
@@ -0,0 +1,139 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/**
+ *
+ * This file provides support for the processor local scratch memory.
+ * Scratch memory is byte addressable - all addresses are byte addresses.
+ *
+ */
+
+#ifndef __CVMX_SCRATCH_H__
+#define __CVMX_SCRATCH_H__
+
+/*
+ * Note: This define must be a long, not a long long in order to
+ * compile without warnings for both 32bit and 64bit.
+ */
+#define CVMX_SCRATCH_BASE (-32768l) /* 0xffffffffffff8000 */
+
+/**
+ * Reads an 8 bit value from the processor local scratchpad memory.
+ *
+ * @address: byte address to read from
+ *
+ * Returns value read
+ */
+static inline uint8_t cvmx_scratch_read8(uint64_t address)
+{
+ return *CASTPTR(volatile uint8_t, CVMX_SCRATCH_BASE + address);
+}
+
+/**
+ * Reads a 16 bit value from the processor local scratchpad memory.
+ *
+ * @address: byte address to read from
+ *
+ * Returns value read
+ */
+static inline uint16_t cvmx_scratch_read16(uint64_t address)
+{
+ return *CASTPTR(volatile uint16_t, CVMX_SCRATCH_BASE + address);
+}
+
+/**
+ * Reads a 32 bit value from the processor local scratchpad memory.
+ *
+ * @address: byte address to read from
+ *
+ * Returns value read
+ */
+static inline uint32_t cvmx_scratch_read32(uint64_t address)
+{
+ return *CASTPTR(volatile uint32_t, CVMX_SCRATCH_BASE + address);
+}
+
+/**
+ * Reads a 64 bit value from the processor local scratchpad memory.
+ *
+ * @address: byte address to read from
+ *
+ * Returns value read
+ */
+static inline uint64_t cvmx_scratch_read64(uint64_t address)
+{
+ return *CASTPTR(volatile uint64_t, CVMX_SCRATCH_BASE + address);
+}
+
+/**
+ * Writes an 8 bit value to the processor local scratchpad memory.
+ *
+ * @address: byte address to write to
+ * @value: value to write
+ */
+static inline void cvmx_scratch_write8(uint64_t address, uint64_t value)
+{
+ *CASTPTR(volatile uint8_t, CVMX_SCRATCH_BASE + address) =
+ (uint8_t) value;
+}
+
+/**
+ * Writes a 32 bit value to the processor local scratchpad memory.
+ *
+ * @address: byte address to write to
+ * @value: value to write
+ */
+static inline void cvmx_scratch_write16(uint64_t address, uint64_t value)
+{
+ *CASTPTR(volatile uint16_t, CVMX_SCRATCH_BASE + address) =
+ (uint16_t) value;
+}
+
+/**
+ * Writes a 16 bit value to the processor local scratchpad memory.
+ *
+ * @address: byte address to write to
+ * @value: value to write
+ */
+static inline void cvmx_scratch_write32(uint64_t address, uint64_t value)
+{
+ *CASTPTR(volatile uint32_t, CVMX_SCRATCH_BASE + address) =
+ (uint32_t) value;
+}
+
+/**
+ * Writes a 64 bit value to the processor local scratchpad memory.
+ *
+ * @address: byte address to write to
+ * @value: value to write
+ */
+static inline void cvmx_scratch_write64(uint64_t address, uint64_t value)
+{
+ *CASTPTR(volatile uint64_t, CVMX_SCRATCH_BASE + address) = value;
+}
+
+#endif /* __CVMX_SCRATCH_H__ */
diff --git a/drivers/staging/octeon/cvmx-smix-defs.h b/drivers/staging/octeon/cvmx-smix-defs.h
new file mode 100644
index 000000000000..9ae45fcbe3e3
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-smix-defs.h
@@ -0,0 +1,178 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_SMIX_DEFS_H__
+#define __CVMX_SMIX_DEFS_H__
+
+#define CVMX_SMIX_CLK(offset) \
+ CVMX_ADD_IO_SEG(0x0001180000001818ull + (((offset) & 1) * 256))
+#define CVMX_SMIX_CMD(offset) \
+ CVMX_ADD_IO_SEG(0x0001180000001800ull + (((offset) & 1) * 256))
+#define CVMX_SMIX_EN(offset) \
+ CVMX_ADD_IO_SEG(0x0001180000001820ull + (((offset) & 1) * 256))
+#define CVMX_SMIX_RD_DAT(offset) \
+ CVMX_ADD_IO_SEG(0x0001180000001810ull + (((offset) & 1) * 256))
+#define CVMX_SMIX_WR_DAT(offset) \
+ CVMX_ADD_IO_SEG(0x0001180000001808ull + (((offset) & 1) * 256))
+
+union cvmx_smix_clk {
+ uint64_t u64;
+ struct cvmx_smix_clk_s {
+ uint64_t reserved_25_63:39;
+ uint64_t mode:1;
+ uint64_t reserved_21_23:3;
+ uint64_t sample_hi:5;
+ uint64_t sample_mode:1;
+ uint64_t reserved_14_14:1;
+ uint64_t clk_idle:1;
+ uint64_t preamble:1;
+ uint64_t sample:4;
+ uint64_t phase:8;
+ } s;
+ struct cvmx_smix_clk_cn30xx {
+ uint64_t reserved_21_63:43;
+ uint64_t sample_hi:5;
+ uint64_t reserved_14_15:2;
+ uint64_t clk_idle:1;
+ uint64_t preamble:1;
+ uint64_t sample:4;
+ uint64_t phase:8;
+ } cn30xx;
+ struct cvmx_smix_clk_cn30xx cn31xx;
+ struct cvmx_smix_clk_cn30xx cn38xx;
+ struct cvmx_smix_clk_cn30xx cn38xxp2;
+ struct cvmx_smix_clk_cn50xx {
+ uint64_t reserved_25_63:39;
+ uint64_t mode:1;
+ uint64_t reserved_21_23:3;
+ uint64_t sample_hi:5;
+ uint64_t reserved_14_15:2;
+ uint64_t clk_idle:1;
+ uint64_t preamble:1;
+ uint64_t sample:4;
+ uint64_t phase:8;
+ } cn50xx;
+ struct cvmx_smix_clk_s cn52xx;
+ struct cvmx_smix_clk_cn50xx cn52xxp1;
+ struct cvmx_smix_clk_s cn56xx;
+ struct cvmx_smix_clk_cn50xx cn56xxp1;
+ struct cvmx_smix_clk_cn30xx cn58xx;
+ struct cvmx_smix_clk_cn30xx cn58xxp1;
+};
+
+union cvmx_smix_cmd {
+ uint64_t u64;
+ struct cvmx_smix_cmd_s {
+ uint64_t reserved_18_63:46;
+ uint64_t phy_op:2;
+ uint64_t reserved_13_15:3;
+ uint64_t phy_adr:5;
+ uint64_t reserved_5_7:3;
+ uint64_t reg_adr:5;
+ } s;
+ struct cvmx_smix_cmd_cn30xx {
+ uint64_t reserved_17_63:47;
+ uint64_t phy_op:1;
+ uint64_t reserved_13_15:3;
+ uint64_t phy_adr:5;
+ uint64_t reserved_5_7:3;
+ uint64_t reg_adr:5;
+ } cn30xx;
+ struct cvmx_smix_cmd_cn30xx cn31xx;
+ struct cvmx_smix_cmd_cn30xx cn38xx;
+ struct cvmx_smix_cmd_cn30xx cn38xxp2;
+ struct cvmx_smix_cmd_s cn50xx;
+ struct cvmx_smix_cmd_s cn52xx;
+ struct cvmx_smix_cmd_s cn52xxp1;
+ struct cvmx_smix_cmd_s cn56xx;
+ struct cvmx_smix_cmd_s cn56xxp1;
+ struct cvmx_smix_cmd_cn30xx cn58xx;
+ struct cvmx_smix_cmd_cn30xx cn58xxp1;
+};
+
+union cvmx_smix_en {
+ uint64_t u64;
+ struct cvmx_smix_en_s {
+ uint64_t reserved_1_63:63;
+ uint64_t en:1;
+ } s;
+ struct cvmx_smix_en_s cn30xx;
+ struct cvmx_smix_en_s cn31xx;
+ struct cvmx_smix_en_s cn38xx;
+ struct cvmx_smix_en_s cn38xxp2;
+ struct cvmx_smix_en_s cn50xx;
+ struct cvmx_smix_en_s cn52xx;
+ struct cvmx_smix_en_s cn52xxp1;
+ struct cvmx_smix_en_s cn56xx;
+ struct cvmx_smix_en_s cn56xxp1;
+ struct cvmx_smix_en_s cn58xx;
+ struct cvmx_smix_en_s cn58xxp1;
+};
+
+union cvmx_smix_rd_dat {
+ uint64_t u64;
+ struct cvmx_smix_rd_dat_s {
+ uint64_t reserved_18_63:46;
+ uint64_t pending:1;
+ uint64_t val:1;
+ uint64_t dat:16;
+ } s;
+ struct cvmx_smix_rd_dat_s cn30xx;
+ struct cvmx_smix_rd_dat_s cn31xx;
+ struct cvmx_smix_rd_dat_s cn38xx;
+ struct cvmx_smix_rd_dat_s cn38xxp2;
+ struct cvmx_smix_rd_dat_s cn50xx;
+ struct cvmx_smix_rd_dat_s cn52xx;
+ struct cvmx_smix_rd_dat_s cn52xxp1;
+ struct cvmx_smix_rd_dat_s cn56xx;
+ struct cvmx_smix_rd_dat_s cn56xxp1;
+ struct cvmx_smix_rd_dat_s cn58xx;
+ struct cvmx_smix_rd_dat_s cn58xxp1;
+};
+
+union cvmx_smix_wr_dat {
+ uint64_t u64;
+ struct cvmx_smix_wr_dat_s {
+ uint64_t reserved_18_63:46;
+ uint64_t pending:1;
+ uint64_t val:1;
+ uint64_t dat:16;
+ } s;
+ struct cvmx_smix_wr_dat_s cn30xx;
+ struct cvmx_smix_wr_dat_s cn31xx;
+ struct cvmx_smix_wr_dat_s cn38xx;
+ struct cvmx_smix_wr_dat_s cn38xxp2;
+ struct cvmx_smix_wr_dat_s cn50xx;
+ struct cvmx_smix_wr_dat_s cn52xx;
+ struct cvmx_smix_wr_dat_s cn52xxp1;
+ struct cvmx_smix_wr_dat_s cn56xx;
+ struct cvmx_smix_wr_dat_s cn56xxp1;
+ struct cvmx_smix_wr_dat_s cn58xx;
+ struct cvmx_smix_wr_dat_s cn58xxp1;
+};
+
+#endif
diff --git a/drivers/staging/octeon/cvmx-spi.c b/drivers/staging/octeon/cvmx-spi.c
new file mode 100644
index 000000000000..82794d920cec
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-spi.c
@@ -0,0 +1,667 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ *
+ * Support library for the SPI
+ */
+#include <asm/octeon/octeon.h>
+
+#include "cvmx-config.h"
+
+#include "cvmx-pko.h"
+#include "cvmx-spi.h"
+
+#include "cvmx-spxx-defs.h"
+#include "cvmx-stxx-defs.h"
+#include "cvmx-srxx-defs.h"
+
+#define INVOKE_CB(function_p, args...) \
+ do { \
+ if (function_p) { \
+ res = function_p(args); \
+ if (res) \
+ return res; \
+ } \
+ } while (0)
+
+#if CVMX_ENABLE_DEBUG_PRINTS
+static const char *modes[] =
+ { "UNKNOWN", "TX Halfplex", "Rx Halfplex", "Duplex" };
+#endif
+
+/* Default callbacks, can be overridden
+ * using cvmx_spi_get_callbacks/cvmx_spi_set_callbacks
+ */
+static cvmx_spi_callbacks_t cvmx_spi_callbacks = {
+ .reset_cb = cvmx_spi_reset_cb,
+ .calendar_setup_cb = cvmx_spi_calendar_setup_cb,
+ .clock_detect_cb = cvmx_spi_clock_detect_cb,
+ .training_cb = cvmx_spi_training_cb,
+ .calendar_sync_cb = cvmx_spi_calendar_sync_cb,
+ .interface_up_cb = cvmx_spi_interface_up_cb
+};
+
+/**
+ * Get current SPI4 initialization callbacks
+ *
+ * @callbacks: Pointer to the callbacks structure.to fill
+ *
+ * Returns Pointer to cvmx_spi_callbacks_t structure.
+ */
+void cvmx_spi_get_callbacks(cvmx_spi_callbacks_t *callbacks)
+{
+ memcpy(callbacks, &cvmx_spi_callbacks, sizeof(cvmx_spi_callbacks));
+}
+
+/**
+ * Set new SPI4 initialization callbacks
+ *
+ * @new_callbacks: Pointer to an updated callbacks structure.
+ */
+void cvmx_spi_set_callbacks(cvmx_spi_callbacks_t *new_callbacks)
+{
+ memcpy(&cvmx_spi_callbacks, new_callbacks, sizeof(cvmx_spi_callbacks));
+}
+
+/**
+ * Initialize and start the SPI interface.
+ *
+ * @interface: The identifier of the packet interface to configure and
+ * use as a SPI interface.
+ * @mode: The operating mode for the SPI interface. The interface
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
+ * @timeout: Timeout to wait for clock synchronization in seconds
+ * @num_ports: Number of SPI ports to configure
+ *
+ * Returns Zero on success, negative of failure.
+ */
+int cvmx_spi_start_interface(int interface, cvmx_spi_mode_t mode, int timeout,
+ int num_ports)
+{
+ int res = -1;
+
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ return res;
+
+ /* Callback to perform SPI4 reset */
+ INVOKE_CB(cvmx_spi_callbacks.reset_cb, interface, mode);
+
+ /* Callback to perform calendar setup */
+ INVOKE_CB(cvmx_spi_callbacks.calendar_setup_cb, interface, mode,
+ num_ports);
+
+ /* Callback to perform clock detection */
+ INVOKE_CB(cvmx_spi_callbacks.clock_detect_cb, interface, mode, timeout);
+
+ /* Callback to perform SPI4 link training */
+ INVOKE_CB(cvmx_spi_callbacks.training_cb, interface, mode, timeout);
+
+ /* Callback to perform calendar sync */
+ INVOKE_CB(cvmx_spi_callbacks.calendar_sync_cb, interface, mode,
+ timeout);
+
+ /* Callback to handle interface coming up */
+ INVOKE_CB(cvmx_spi_callbacks.interface_up_cb, interface, mode);
+
+ return res;
+}
+
+/**
+ * This routine restarts the SPI interface after it has lost synchronization
+ * with its correspondent system.
+ *
+ * @interface: The identifier of the packet interface to configure and
+ * use as a SPI interface.
+ * @mode: The operating mode for the SPI interface. The interface
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
+ * @timeout: Timeout to wait for clock synchronization in seconds
+ *
+ * Returns Zero on success, negative of failure.
+ */
+int cvmx_spi_restart_interface(int interface, cvmx_spi_mode_t mode, int timeout)
+{
+ int res = -1;
+
+ if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
+ return res;
+
+ cvmx_dprintf("SPI%d: Restart %s\n", interface, modes[mode]);
+
+ /* Callback to perform SPI4 reset */
+ INVOKE_CB(cvmx_spi_callbacks.reset_cb, interface, mode);
+
+ /* NOTE: Calendar setup is not performed during restart */
+ /* Refer to cvmx_spi_start_interface() for the full sequence */
+
+ /* Callback to perform clock detection */
+ INVOKE_CB(cvmx_spi_callbacks.clock_detect_cb, interface, mode, timeout);
+
+ /* Callback to perform SPI4 link training */
+ INVOKE_CB(cvmx_spi_callbacks.training_cb, interface, mode, timeout);
+
+ /* Callback to perform calendar sync */
+ INVOKE_CB(cvmx_spi_callbacks.calendar_sync_cb, interface, mode,
+ timeout);
+
+ /* Callback to handle interface coming up */
+ INVOKE_CB(cvmx_spi_callbacks.interface_up_cb, interface, mode);
+
+ return res;
+}
+
+/**
+ * Callback to perform SPI4 reset
+ *
+ * @interface: The identifier of the packet interface to configure and
+ * use as a SPI interface.
+ * @mode: The operating mode for the SPI interface. The interface
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
+ *
+ * Returns Zero on success, non-zero error code on failure (will cause
+ * SPI initialization to abort)
+ */
+int cvmx_spi_reset_cb(int interface, cvmx_spi_mode_t mode)
+{
+ union cvmx_spxx_dbg_deskew_ctl spxx_dbg_deskew_ctl;
+ union cvmx_spxx_clk_ctl spxx_clk_ctl;
+ union cvmx_spxx_bist_stat spxx_bist_stat;
+ union cvmx_spxx_int_msk spxx_int_msk;
+ union cvmx_stxx_int_msk stxx_int_msk;
+ union cvmx_spxx_trn4_ctl spxx_trn4_ctl;
+ int index;
+ uint64_t MS = cvmx_sysinfo_get()->cpu_clock_hz / 1000;
+
+ /* Disable SPI error events while we run BIST */
+ spxx_int_msk.u64 = cvmx_read_csr(CVMX_SPXX_INT_MSK(interface));
+ cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), 0);
+ stxx_int_msk.u64 = cvmx_read_csr(CVMX_STXX_INT_MSK(interface));
+ cvmx_write_csr(CVMX_STXX_INT_MSK(interface), 0);
+
+ /* Run BIST in the SPI interface */
+ cvmx_write_csr(CVMX_SRXX_COM_CTL(interface), 0);
+ cvmx_write_csr(CVMX_STXX_COM_CTL(interface), 0);
+ spxx_clk_ctl.u64 = 0;
+ spxx_clk_ctl.s.runbist = 1;
+ cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);
+ cvmx_wait(10 * MS);
+ spxx_bist_stat.u64 = cvmx_read_csr(CVMX_SPXX_BIST_STAT(interface));
+ if (spxx_bist_stat.s.stat0)
+ cvmx_dprintf
+ ("ERROR SPI%d: BIST failed on receive datapath FIFO\n",
+ interface);
+ if (spxx_bist_stat.s.stat1)
+ cvmx_dprintf("ERROR SPI%d: BIST failed on RX calendar table\n",
+ interface);
+ if (spxx_bist_stat.s.stat2)
+ cvmx_dprintf("ERROR SPI%d: BIST failed on TX calendar table\n",
+ interface);
+
+ /* Clear the calendar table after BIST to fix parity errors */
+ for (index = 0; index < 32; index++) {
+ union cvmx_srxx_spi4_calx srxx_spi4_calx;
+ union cvmx_stxx_spi4_calx stxx_spi4_calx;
+
+ srxx_spi4_calx.u64 = 0;
+ srxx_spi4_calx.s.oddpar = 1;
+ cvmx_write_csr(CVMX_SRXX_SPI4_CALX(index, interface),
+ srxx_spi4_calx.u64);
+
+ stxx_spi4_calx.u64 = 0;
+ stxx_spi4_calx.s.oddpar = 1;
+ cvmx_write_csr(CVMX_STXX_SPI4_CALX(index, interface),
+ stxx_spi4_calx.u64);
+ }
+
+ /* Re enable reporting of error interrupts */
+ cvmx_write_csr(CVMX_SPXX_INT_REG(interface),
+ cvmx_read_csr(CVMX_SPXX_INT_REG(interface)));
+ cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), spxx_int_msk.u64);
+ cvmx_write_csr(CVMX_STXX_INT_REG(interface),
+ cvmx_read_csr(CVMX_STXX_INT_REG(interface)));
+ cvmx_write_csr(CVMX_STXX_INT_MSK(interface), stxx_int_msk.u64);
+
+ /* Setup the CLKDLY right in the middle */
+ spxx_clk_ctl.u64 = 0;
+ spxx_clk_ctl.s.seetrn = 0;
+ spxx_clk_ctl.s.clkdly = 0x10;
+ spxx_clk_ctl.s.runbist = 0;
+ spxx_clk_ctl.s.statdrv = 0;
+ /* This should always be on the opposite edge as statdrv */
+ spxx_clk_ctl.s.statrcv = 1;
+ spxx_clk_ctl.s.sndtrn = 0;
+ spxx_clk_ctl.s.drptrn = 0;
+ spxx_clk_ctl.s.rcvtrn = 0;
+ spxx_clk_ctl.s.srxdlck = 0;
+ cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);
+ cvmx_wait(100 * MS);
+
+ /* Reset SRX0 DLL */
+ spxx_clk_ctl.s.srxdlck = 1;
+ cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);
+
+ /* Waiting for Inf0 Spi4 RX DLL to lock */
+ cvmx_wait(100 * MS);
+
+ /* Enable dynamic alignment */
+ spxx_trn4_ctl.s.trntest = 0;
+ spxx_trn4_ctl.s.jitter = 1;
+ spxx_trn4_ctl.s.clr_boot = 1;
+ spxx_trn4_ctl.s.set_boot = 0;
+ if (OCTEON_IS_MODEL(OCTEON_CN58XX))
+ spxx_trn4_ctl.s.maxdist = 3;
+ else
+ spxx_trn4_ctl.s.maxdist = 8;
+ spxx_trn4_ctl.s.macro_en = 1;
+ spxx_trn4_ctl.s.mux_en = 1;
+ cvmx_write_csr(CVMX_SPXX_TRN4_CTL(interface), spxx_trn4_ctl.u64);
+
+ spxx_dbg_deskew_ctl.u64 = 0;
+ cvmx_write_csr(CVMX_SPXX_DBG_DESKEW_CTL(interface),
+ spxx_dbg_deskew_ctl.u64);
+
+ return 0;
+}
+
+/**
+ * Callback to setup calendar and miscellaneous settings before clock detection
+ *
+ * @interface: The identifier of the packet interface to configure and
+ * use as a SPI interface.
+ * @mode: The operating mode for the SPI interface. The interface
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
+ * @num_ports: Number of ports to configure on SPI
+ *
+ * Returns Zero on success, non-zero error code on failure (will cause
+ * SPI initialization to abort)
+ */
+int cvmx_spi_calendar_setup_cb(int interface, cvmx_spi_mode_t mode,
+ int num_ports)
+{
+ int port;
+ int index;
+ if (mode & CVMX_SPI_MODE_RX_HALFPLEX) {
+ union cvmx_srxx_com_ctl srxx_com_ctl;
+ union cvmx_srxx_spi4_stat srxx_spi4_stat;
+
+ /* SRX0 number of Ports */
+ srxx_com_ctl.u64 = 0;
+ srxx_com_ctl.s.prts = num_ports - 1;
+ srxx_com_ctl.s.st_en = 0;
+ srxx_com_ctl.s.inf_en = 0;
+ cvmx_write_csr(CVMX_SRXX_COM_CTL(interface), srxx_com_ctl.u64);
+
+ /* SRX0 Calendar Table. This round robbins through all ports */
+ port = 0;
+ index = 0;
+ while (port < num_ports) {
+ union cvmx_srxx_spi4_calx srxx_spi4_calx;
+ srxx_spi4_calx.u64 = 0;
+ srxx_spi4_calx.s.prt0 = port++;
+ srxx_spi4_calx.s.prt1 = port++;
+ srxx_spi4_calx.s.prt2 = port++;
+ srxx_spi4_calx.s.prt3 = port++;
+ srxx_spi4_calx.s.oddpar =
+ ~(cvmx_dpop(srxx_spi4_calx.u64) & 1);
+ cvmx_write_csr(CVMX_SRXX_SPI4_CALX(index, interface),
+ srxx_spi4_calx.u64);
+ index++;
+ }
+ srxx_spi4_stat.u64 = 0;
+ srxx_spi4_stat.s.len = num_ports;
+ srxx_spi4_stat.s.m = 1;
+ cvmx_write_csr(CVMX_SRXX_SPI4_STAT(interface),
+ srxx_spi4_stat.u64);
+ }
+
+ if (mode & CVMX_SPI_MODE_TX_HALFPLEX) {
+ union cvmx_stxx_arb_ctl stxx_arb_ctl;
+ union cvmx_gmxx_tx_spi_max gmxx_tx_spi_max;
+ union cvmx_gmxx_tx_spi_thresh gmxx_tx_spi_thresh;
+ union cvmx_gmxx_tx_spi_ctl gmxx_tx_spi_ctl;
+ union cvmx_stxx_spi4_stat stxx_spi4_stat;
+ union cvmx_stxx_spi4_dat stxx_spi4_dat;
+
+ /* STX0 Config */
+ stxx_arb_ctl.u64 = 0;
+ stxx_arb_ctl.s.igntpa = 0;
+ stxx_arb_ctl.s.mintrn = 0;
+ cvmx_write_csr(CVMX_STXX_ARB_CTL(interface), stxx_arb_ctl.u64);
+
+ gmxx_tx_spi_max.u64 = 0;
+ gmxx_tx_spi_max.s.max1 = 8;
+ gmxx_tx_spi_max.s.max2 = 4;
+ gmxx_tx_spi_max.s.slice = 0;
+ cvmx_write_csr(CVMX_GMXX_TX_SPI_MAX(interface),
+ gmxx_tx_spi_max.u64);
+
+ gmxx_tx_spi_thresh.u64 = 0;
+ gmxx_tx_spi_thresh.s.thresh = 4;
+ cvmx_write_csr(CVMX_GMXX_TX_SPI_THRESH(interface),
+ gmxx_tx_spi_thresh.u64);
+
+ gmxx_tx_spi_ctl.u64 = 0;
+ gmxx_tx_spi_ctl.s.tpa_clr = 0;
+ gmxx_tx_spi_ctl.s.cont_pkt = 0;
+ cvmx_write_csr(CVMX_GMXX_TX_SPI_CTL(interface),
+ gmxx_tx_spi_ctl.u64);
+
+ /* STX0 Training Control */
+ stxx_spi4_dat.u64 = 0;
+ /*Minimum needed by dynamic alignment */
+ stxx_spi4_dat.s.alpha = 32;
+ stxx_spi4_dat.s.max_t = 0xFFFF; /*Minimum interval is 0x20 */
+ cvmx_write_csr(CVMX_STXX_SPI4_DAT(interface),
+ stxx_spi4_dat.u64);
+
+ /* STX0 Calendar Table. This round robbins through all ports */
+ port = 0;
+ index = 0;
+ while (port < num_ports) {
+ union cvmx_stxx_spi4_calx stxx_spi4_calx;
+ stxx_spi4_calx.u64 = 0;
+ stxx_spi4_calx.s.prt0 = port++;
+ stxx_spi4_calx.s.prt1 = port++;
+ stxx_spi4_calx.s.prt2 = port++;
+ stxx_spi4_calx.s.prt3 = port++;
+ stxx_spi4_calx.s.oddpar =
+ ~(cvmx_dpop(stxx_spi4_calx.u64) & 1);
+ cvmx_write_csr(CVMX_STXX_SPI4_CALX(index, interface),
+ stxx_spi4_calx.u64);
+ index++;
+ }
+ stxx_spi4_stat.u64 = 0;
+ stxx_spi4_stat.s.len = num_ports;
+ stxx_spi4_stat.s.m = 1;
+ cvmx_write_csr(CVMX_STXX_SPI4_STAT(interface),
+ stxx_spi4_stat.u64);
+ }
+
+ return 0;
+}
+
+/**
+ * Callback to perform clock detection
+ *
+ * @interface: The identifier of the packet interface to configure and
+ * use as a SPI interface.
+ * @mode: The operating mode for the SPI interface. The interface
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
+ * @timeout: Timeout to wait for clock synchronization in seconds
+ *
+ * Returns Zero on success, non-zero error code on failure (will cause
+ * SPI initialization to abort)
+ */
+int cvmx_spi_clock_detect_cb(int interface, cvmx_spi_mode_t mode, int timeout)
+{
+ int clock_transitions;
+ union cvmx_spxx_clk_stat stat;
+ uint64_t timeout_time;
+ uint64_t MS = cvmx_sysinfo_get()->cpu_clock_hz / 1000;
+
+ /*
+ * Regardless of operating mode, both Tx and Rx clocks must be
+ * present for the SPI interface to operate.
+ */
+ cvmx_dprintf("SPI%d: Waiting to see TsClk...\n", interface);
+ timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
+ /*
+ * Require 100 clock transitions in order to avoid any noise
+ * in the beginning.
+ */
+ clock_transitions = 100;
+ do {
+ stat.u64 = cvmx_read_csr(CVMX_SPXX_CLK_STAT(interface));
+ if (stat.s.s4clk0 && stat.s.s4clk1 && clock_transitions) {
+ /*
+ * We've seen a clock transition, so decrement
+ * the number we still need.
+ */
+ clock_transitions--;
+ cvmx_write_csr(CVMX_SPXX_CLK_STAT(interface), stat.u64);
+ stat.s.s4clk0 = 0;
+ stat.s.s4clk1 = 0;
+ }
+ if (cvmx_get_cycle() > timeout_time) {
+ cvmx_dprintf("SPI%d: Timeout\n", interface);
+ return -1;
+ }
+ } while (stat.s.s4clk0 == 0 || stat.s.s4clk1 == 0);
+
+ cvmx_dprintf("SPI%d: Waiting to see RsClk...\n", interface);
+ timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
+ /*
+ * Require 100 clock transitions in order to avoid any noise in the
+ * beginning.
+ */
+ clock_transitions = 100;
+ do {
+ stat.u64 = cvmx_read_csr(CVMX_SPXX_CLK_STAT(interface));
+ if (stat.s.d4clk0 && stat.s.d4clk1 && clock_transitions) {
+ /*
+ * We've seen a clock transition, so decrement
+ * the number we still need
+ */
+ clock_transitions--;
+ cvmx_write_csr(CVMX_SPXX_CLK_STAT(interface), stat.u64);
+ stat.s.d4clk0 = 0;
+ stat.s.d4clk1 = 0;
+ }
+ if (cvmx_get_cycle() > timeout_time) {
+ cvmx_dprintf("SPI%d: Timeout\n", interface);
+ return -1;
+ }
+ } while (stat.s.d4clk0 == 0 || stat.s.d4clk1 == 0);
+
+ return 0;
+}
+
+/**
+ * Callback to perform link training
+ *
+ * @interface: The identifier of the packet interface to configure and
+ * use as a SPI interface.
+ * @mode: The operating mode for the SPI interface. The interface
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
+ * @timeout: Timeout to wait for link to be trained (in seconds)
+ *
+ * Returns Zero on success, non-zero error code on failure (will cause
+ * SPI initialization to abort)
+ */
+int cvmx_spi_training_cb(int interface, cvmx_spi_mode_t mode, int timeout)
+{
+ union cvmx_spxx_trn4_ctl spxx_trn4_ctl;
+ union cvmx_spxx_clk_stat stat;
+ uint64_t MS = cvmx_sysinfo_get()->cpu_clock_hz / 1000;
+ uint64_t timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
+ int rx_training_needed;
+
+ /* SRX0 & STX0 Inf0 Links are configured - begin training */
+ union cvmx_spxx_clk_ctl spxx_clk_ctl;
+ spxx_clk_ctl.u64 = 0;
+ spxx_clk_ctl.s.seetrn = 0;
+ spxx_clk_ctl.s.clkdly = 0x10;
+ spxx_clk_ctl.s.runbist = 0;
+ spxx_clk_ctl.s.statdrv = 0;
+ /* This should always be on the opposite edge as statdrv */
+ spxx_clk_ctl.s.statrcv = 1;
+ spxx_clk_ctl.s.sndtrn = 1;
+ spxx_clk_ctl.s.drptrn = 1;
+ spxx_clk_ctl.s.rcvtrn = 1;
+ spxx_clk_ctl.s.srxdlck = 1;
+ cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);
+ cvmx_wait(1000 * MS);
+
+ /* SRX0 clear the boot bit */
+ spxx_trn4_ctl.u64 = cvmx_read_csr(CVMX_SPXX_TRN4_CTL(interface));
+ spxx_trn4_ctl.s.clr_boot = 1;
+ cvmx_write_csr(CVMX_SPXX_TRN4_CTL(interface), spxx_trn4_ctl.u64);
+
+ /* Wait for the training sequence to complete */
+ cvmx_dprintf("SPI%d: Waiting for training\n", interface);
+ cvmx_wait(1000 * MS);
+ /* Wait a really long time here */
+ timeout_time = cvmx_get_cycle() + 1000ull * MS * 600;
+ /*
+ * The HRM says we must wait for 34 + 16 * MAXDIST training sequences.
+ * We'll be pessimistic and wait for a lot more.
+ */
+ rx_training_needed = 500;
+ do {
+ stat.u64 = cvmx_read_csr(CVMX_SPXX_CLK_STAT(interface));
+ if (stat.s.srxtrn && rx_training_needed) {
+ rx_training_needed--;
+ cvmx_write_csr(CVMX_SPXX_CLK_STAT(interface), stat.u64);
+ stat.s.srxtrn = 0;
+ }
+ if (cvmx_get_cycle() > timeout_time) {
+ cvmx_dprintf("SPI%d: Timeout\n", interface);
+ return -1;
+ }
+ } while (stat.s.srxtrn == 0);
+
+ return 0;
+}
+
+/**
+ * Callback to perform calendar data synchronization
+ *
+ * @interface: The identifier of the packet interface to configure and
+ * use as a SPI interface.
+ * @mode: The operating mode for the SPI interface. The interface
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
+ * @timeout: Timeout to wait for calendar data in seconds
+ *
+ * Returns Zero on success, non-zero error code on failure (will cause
+ * SPI initialization to abort)
+ */
+int cvmx_spi_calendar_sync_cb(int interface, cvmx_spi_mode_t mode, int timeout)
+{
+ uint64_t MS = cvmx_sysinfo_get()->cpu_clock_hz / 1000;
+ if (mode & CVMX_SPI_MODE_RX_HALFPLEX) {
+ /* SRX0 interface should be good, send calendar data */
+ union cvmx_srxx_com_ctl srxx_com_ctl;
+ cvmx_dprintf
+ ("SPI%d: Rx is synchronized, start sending calendar data\n",
+ interface);
+ srxx_com_ctl.u64 = cvmx_read_csr(CVMX_SRXX_COM_CTL(interface));
+ srxx_com_ctl.s.inf_en = 1;
+ srxx_com_ctl.s.st_en = 1;
+ cvmx_write_csr(CVMX_SRXX_COM_CTL(interface), srxx_com_ctl.u64);
+ }
+
+ if (mode & CVMX_SPI_MODE_TX_HALFPLEX) {
+ /* STX0 has achieved sync */
+ /* The corespondant board should be sending calendar data */
+ /* Enable the STX0 STAT receiver. */
+ union cvmx_spxx_clk_stat stat;
+ uint64_t timeout_time;
+ union cvmx_stxx_com_ctl stxx_com_ctl;
+ stxx_com_ctl.u64 = 0;
+ stxx_com_ctl.s.st_en = 1;
+ cvmx_write_csr(CVMX_STXX_COM_CTL(interface), stxx_com_ctl.u64);
+
+ /* Waiting for calendar sync on STX0 STAT */
+ cvmx_dprintf("SPI%d: Waiting to sync on STX[%d] STAT\n",
+ interface, interface);
+ timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
+ /* SPX0_CLK_STAT - SPX0_CLK_STAT[STXCAL] should be 1 (bit10) */
+ do {
+ stat.u64 = cvmx_read_csr(CVMX_SPXX_CLK_STAT(interface));
+ if (cvmx_get_cycle() > timeout_time) {
+ cvmx_dprintf("SPI%d: Timeout\n", interface);
+ return -1;
+ }
+ } while (stat.s.stxcal == 0);
+ }
+
+ return 0;
+}
+
+/**
+ * Callback to handle interface up
+ *
+ * @interface: The identifier of the packet interface to configure and
+ * use as a SPI interface.
+ * @mode: The operating mode for the SPI interface. The interface
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
+ *
+ * Returns Zero on success, non-zero error code on failure (will cause
+ * SPI initialization to abort)
+ */
+int cvmx_spi_interface_up_cb(int interface, cvmx_spi_mode_t mode)
+{
+ union cvmx_gmxx_rxx_frm_min gmxx_rxx_frm_min;
+ union cvmx_gmxx_rxx_frm_max gmxx_rxx_frm_max;
+ union cvmx_gmxx_rxx_jabber gmxx_rxx_jabber;
+
+ if (mode & CVMX_SPI_MODE_RX_HALFPLEX) {
+ union cvmx_srxx_com_ctl srxx_com_ctl;
+ srxx_com_ctl.u64 = cvmx_read_csr(CVMX_SRXX_COM_CTL(interface));
+ srxx_com_ctl.s.inf_en = 1;
+ cvmx_write_csr(CVMX_SRXX_COM_CTL(interface), srxx_com_ctl.u64);
+ cvmx_dprintf("SPI%d: Rx is now up\n", interface);
+ }
+
+ if (mode & CVMX_SPI_MODE_TX_HALFPLEX) {
+ union cvmx_stxx_com_ctl stxx_com_ctl;
+ stxx_com_ctl.u64 = cvmx_read_csr(CVMX_STXX_COM_CTL(interface));
+ stxx_com_ctl.s.inf_en = 1;
+ cvmx_write_csr(CVMX_STXX_COM_CTL(interface), stxx_com_ctl.u64);
+ cvmx_dprintf("SPI%d: Tx is now up\n", interface);
+ }
+
+ gmxx_rxx_frm_min.u64 = 0;
+ gmxx_rxx_frm_min.s.len = 64;
+ cvmx_write_csr(CVMX_GMXX_RXX_FRM_MIN(0, interface),
+ gmxx_rxx_frm_min.u64);
+ gmxx_rxx_frm_max.u64 = 0;
+ gmxx_rxx_frm_max.s.len = 64 * 1024 - 4;
+ cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(0, interface),
+ gmxx_rxx_frm_max.u64);
+ gmxx_rxx_jabber.u64 = 0;
+ gmxx_rxx_jabber.s.cnt = 64 * 1024 - 4;
+ cvmx_write_csr(CVMX_GMXX_RXX_JABBER(0, interface), gmxx_rxx_jabber.u64);
+
+ return 0;
+}
diff --git a/drivers/staging/octeon/cvmx-spi.h b/drivers/staging/octeon/cvmx-spi.h
new file mode 100644
index 000000000000..e814648953a5
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-spi.h
@@ -0,0 +1,269 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ *
+ * This file contains defines for the SPI interface
+ */
+#ifndef __CVMX_SPI_H__
+#define __CVMX_SPI_H__
+
+#include "cvmx-gmxx-defs.h"
+
+/* CSR typedefs have been moved to cvmx-csr-*.h */
+
+typedef enum {
+ CVMX_SPI_MODE_UNKNOWN = 0,
+ CVMX_SPI_MODE_TX_HALFPLEX = 1,
+ CVMX_SPI_MODE_RX_HALFPLEX = 2,
+ CVMX_SPI_MODE_DUPLEX = 3
+} cvmx_spi_mode_t;
+
+/** Callbacks structure to customize SPI4 initialization sequence */
+typedef struct {
+ /** Called to reset SPI4 DLL */
+ int (*reset_cb) (int interface, cvmx_spi_mode_t mode);
+
+ /** Called to setup calendar */
+ int (*calendar_setup_cb) (int interface, cvmx_spi_mode_t mode,
+ int num_ports);
+
+ /** Called for Tx and Rx clock detection */
+ int (*clock_detect_cb) (int interface, cvmx_spi_mode_t mode,
+ int timeout);
+
+ /** Called to perform link training */
+ int (*training_cb) (int interface, cvmx_spi_mode_t mode, int timeout);
+
+ /** Called for calendar data synchronization */
+ int (*calendar_sync_cb) (int interface, cvmx_spi_mode_t mode,
+ int timeout);
+
+ /** Called when interface is up */
+ int (*interface_up_cb) (int interface, cvmx_spi_mode_t mode);
+
+} cvmx_spi_callbacks_t;
+
+/**
+ * Return true if the supplied interface is configured for SPI
+ *
+ * @interface: Interface to check
+ * Returns True if interface is SPI
+ */
+static inline int cvmx_spi_is_spi_interface(int interface)
+{
+ uint64_t gmxState = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
+ return (gmxState & 0x2) && (gmxState & 0x1);
+}
+
+/**
+ * Initialize and start the SPI interface.
+ *
+ * @interface: The identifier of the packet interface to configure and
+ * use as a SPI interface.
+ * @mode: The operating mode for the SPI interface. The interface
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
+ * @timeout: Timeout to wait for clock synchronization in seconds
+ * @num_ports: Number of SPI ports to configure
+ *
+ * Returns Zero on success, negative of failure.
+ */
+extern int cvmx_spi_start_interface(int interface, cvmx_spi_mode_t mode,
+ int timeout, int num_ports);
+
+/**
+ * This routine restarts the SPI interface after it has lost synchronization
+ * with its corespondant system.
+ *
+ * @interface: The identifier of the packet interface to configure and
+ * use as a SPI interface.
+ * @mode: The operating mode for the SPI interface. The interface
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
+ * @timeout: Timeout to wait for clock synchronization in seconds
+ * Returns Zero on success, negative of failure.
+ */
+extern int cvmx_spi_restart_interface(int interface, cvmx_spi_mode_t mode,
+ int timeout);
+
+/**
+ * Return non-zero if the SPI interface has a SPI4000 attached
+ *
+ * @interface: SPI interface the SPI4000 is connected to
+ *
+ * Returns
+ */
+static inline int cvmx_spi4000_is_present(int interface)
+{
+ return 0;
+}
+
+/**
+ * Initialize the SPI4000 for use
+ *
+ * @interface: SPI interface the SPI4000 is connected to
+ */
+static inline int cvmx_spi4000_initialize(int interface)
+{
+ return 0;
+}
+
+/**
+ * Poll all the SPI4000 port and check its speed
+ *
+ * @interface: Interface the SPI4000 is on
+ * @port: Port to poll (0-9)
+ * Returns Status of the port. 0=down. All other values the port is up.
+ */
+static inline union cvmx_gmxx_rxx_rx_inbnd cvmx_spi4000_check_speed(
+ int interface,
+ int port)
+{
+ union cvmx_gmxx_rxx_rx_inbnd r;
+ r.u64 = 0;
+ return r;
+}
+
+/**
+ * Get current SPI4 initialization callbacks
+ *
+ * @callbacks: Pointer to the callbacks structure.to fill
+ *
+ * Returns Pointer to cvmx_spi_callbacks_t structure.
+ */
+extern void cvmx_spi_get_callbacks(cvmx_spi_callbacks_t *callbacks);
+
+/**
+ * Set new SPI4 initialization callbacks
+ *
+ * @new_callbacks: Pointer to an updated callbacks structure.
+ */
+extern void cvmx_spi_set_callbacks(cvmx_spi_callbacks_t *new_callbacks);
+
+/**
+ * Callback to perform SPI4 reset
+ *
+ * @interface: The identifier of the packet interface to configure and
+ * use as a SPI interface.
+ * @mode: The operating mode for the SPI interface. The interface
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
+ *
+ * Returns Zero on success, non-zero error code on failure (will cause
+ * SPI initialization to abort)
+ */
+extern int cvmx_spi_reset_cb(int interface, cvmx_spi_mode_t mode);
+
+/**
+ * Callback to setup calendar and miscellaneous settings before clock
+ * detection
+ *
+ * @interface: The identifier of the packet interface to configure and
+ * use as a SPI interface.
+ * @mode: The operating mode for the SPI interface. The interface
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
+ * @num_ports: Number of ports to configure on SPI
+ *
+ * Returns Zero on success, non-zero error code on failure (will cause
+ * SPI initialization to abort)
+ */
+extern int cvmx_spi_calendar_setup_cb(int interface, cvmx_spi_mode_t mode,
+ int num_ports);
+
+/**
+ * Callback to perform clock detection
+ *
+ * @interface: The identifier of the packet interface to configure and
+ * use as a SPI interface.
+ * @mode: The operating mode for the SPI interface. The interface
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
+ * @timeout: Timeout to wait for clock synchronization in seconds
+ *
+ * Returns Zero on success, non-zero error code on failure (will cause
+ * SPI initialization to abort)
+ */
+extern int cvmx_spi_clock_detect_cb(int interface, cvmx_spi_mode_t mode,
+ int timeout);
+
+/**
+ * Callback to perform link training
+ *
+ * @interface: The identifier of the packet interface to configure and
+ * use as a SPI interface.
+ * @mode: The operating mode for the SPI interface. The interface
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
+ * @timeout: Timeout to wait for link to be trained (in seconds)
+ *
+ * Returns Zero on success, non-zero error code on failure (will cause
+ * SPI initialization to abort)
+ */
+extern int cvmx_spi_training_cb(int interface, cvmx_spi_mode_t mode,
+ int timeout);
+
+/**
+ * Callback to perform calendar data synchronization
+ *
+ * @interface: The identifier of the packet interface to configure and
+ * use as a SPI interface.
+ * @mode: The operating mode for the SPI interface. The interface
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
+ * @timeout: Timeout to wait for calendar data in seconds
+ *
+ * Returns Zero on success, non-zero error code on failure (will cause
+ * SPI initialization to abort)
+ */
+extern int cvmx_spi_calendar_sync_cb(int interface, cvmx_spi_mode_t mode,
+ int timeout);
+
+/**
+ * Callback to handle interface up
+ *
+ * @interface: The identifier of the packet interface to configure and
+ * use as a SPI interface.
+ * @mode: The operating mode for the SPI interface. The interface
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
+ *
+ * Returns Zero on success, non-zero error code on failure (will cause
+ * SPI initialization to abort)
+ */
+extern int cvmx_spi_interface_up_cb(int interface, cvmx_spi_mode_t mode);
+
+#endif /* __CVMX_SPI_H__ */
diff --git a/drivers/staging/octeon/cvmx-spxx-defs.h b/drivers/staging/octeon/cvmx-spxx-defs.h
new file mode 100644
index 000000000000..b16940e32c83
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-spxx-defs.h
@@ -0,0 +1,347 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_SPXX_DEFS_H__
+#define __CVMX_SPXX_DEFS_H__
+
+#define CVMX_SPXX_BCKPRS_CNT(block_id) \
+ CVMX_ADD_IO_SEG(0x0001180090000340ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_SPXX_BIST_STAT(block_id) \
+ CVMX_ADD_IO_SEG(0x00011800900007F8ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_SPXX_CLK_CTL(block_id) \
+ CVMX_ADD_IO_SEG(0x0001180090000348ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_SPXX_CLK_STAT(block_id) \
+ CVMX_ADD_IO_SEG(0x0001180090000350ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_SPXX_DBG_DESKEW_CTL(block_id) \
+ CVMX_ADD_IO_SEG(0x0001180090000368ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_SPXX_DBG_DESKEW_STATE(block_id) \
+ CVMX_ADD_IO_SEG(0x0001180090000370ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_SPXX_DRV_CTL(block_id) \
+ CVMX_ADD_IO_SEG(0x0001180090000358ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_SPXX_ERR_CTL(block_id) \
+ CVMX_ADD_IO_SEG(0x0001180090000320ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_SPXX_INT_DAT(block_id) \
+ CVMX_ADD_IO_SEG(0x0001180090000318ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_SPXX_INT_MSK(block_id) \
+ CVMX_ADD_IO_SEG(0x0001180090000308ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_SPXX_INT_REG(block_id) \
+ CVMX_ADD_IO_SEG(0x0001180090000300ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_SPXX_INT_SYNC(block_id) \
+ CVMX_ADD_IO_SEG(0x0001180090000310ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_SPXX_TPA_ACC(block_id) \
+ CVMX_ADD_IO_SEG(0x0001180090000338ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_SPXX_TPA_MAX(block_id) \
+ CVMX_ADD_IO_SEG(0x0001180090000330ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_SPXX_TPA_SEL(block_id) \
+ CVMX_ADD_IO_SEG(0x0001180090000328ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_SPXX_TRN4_CTL(block_id) \
+ CVMX_ADD_IO_SEG(0x0001180090000360ull + (((block_id) & 1) * 0x8000000ull))
+
+union cvmx_spxx_bckprs_cnt {
+ uint64_t u64;
+ struct cvmx_spxx_bckprs_cnt_s {
+ uint64_t reserved_32_63:32;
+ uint64_t cnt:32;
+ } s;
+ struct cvmx_spxx_bckprs_cnt_s cn38xx;
+ struct cvmx_spxx_bckprs_cnt_s cn38xxp2;
+ struct cvmx_spxx_bckprs_cnt_s cn58xx;
+ struct cvmx_spxx_bckprs_cnt_s cn58xxp1;
+};
+
+union cvmx_spxx_bist_stat {
+ uint64_t u64;
+ struct cvmx_spxx_bist_stat_s {
+ uint64_t reserved_3_63:61;
+ uint64_t stat2:1;
+ uint64_t stat1:1;
+ uint64_t stat0:1;
+ } s;
+ struct cvmx_spxx_bist_stat_s cn38xx;
+ struct cvmx_spxx_bist_stat_s cn38xxp2;
+ struct cvmx_spxx_bist_stat_s cn58xx;
+ struct cvmx_spxx_bist_stat_s cn58xxp1;
+};
+
+union cvmx_spxx_clk_ctl {
+ uint64_t u64;
+ struct cvmx_spxx_clk_ctl_s {
+ uint64_t reserved_17_63:47;
+ uint64_t seetrn:1;
+ uint64_t reserved_12_15:4;
+ uint64_t clkdly:5;
+ uint64_t runbist:1;
+ uint64_t statdrv:1;
+ uint64_t statrcv:1;
+ uint64_t sndtrn:1;
+ uint64_t drptrn:1;
+ uint64_t rcvtrn:1;
+ uint64_t srxdlck:1;
+ } s;
+ struct cvmx_spxx_clk_ctl_s cn38xx;
+ struct cvmx_spxx_clk_ctl_s cn38xxp2;
+ struct cvmx_spxx_clk_ctl_s cn58xx;
+ struct cvmx_spxx_clk_ctl_s cn58xxp1;
+};
+
+union cvmx_spxx_clk_stat {
+ uint64_t u64;
+ struct cvmx_spxx_clk_stat_s {
+ uint64_t reserved_11_63:53;
+ uint64_t stxcal:1;
+ uint64_t reserved_9_9:1;
+ uint64_t srxtrn:1;
+ uint64_t s4clk1:1;
+ uint64_t s4clk0:1;
+ uint64_t d4clk1:1;
+ uint64_t d4clk0:1;
+ uint64_t reserved_0_3:4;
+ } s;
+ struct cvmx_spxx_clk_stat_s cn38xx;
+ struct cvmx_spxx_clk_stat_s cn38xxp2;
+ struct cvmx_spxx_clk_stat_s cn58xx;
+ struct cvmx_spxx_clk_stat_s cn58xxp1;
+};
+
+union cvmx_spxx_dbg_deskew_ctl {
+ uint64_t u64;
+ struct cvmx_spxx_dbg_deskew_ctl_s {
+ uint64_t reserved_30_63:34;
+ uint64_t fallnop:1;
+ uint64_t fall8:1;
+ uint64_t reserved_26_27:2;
+ uint64_t sstep_go:1;
+ uint64_t sstep:1;
+ uint64_t reserved_22_23:2;
+ uint64_t clrdly:1;
+ uint64_t dec:1;
+ uint64_t inc:1;
+ uint64_t mux:1;
+ uint64_t offset:5;
+ uint64_t bitsel:5;
+ uint64_t offdly:6;
+ uint64_t dllfrc:1;
+ uint64_t dlldis:1;
+ } s;
+ struct cvmx_spxx_dbg_deskew_ctl_s cn38xx;
+ struct cvmx_spxx_dbg_deskew_ctl_s cn38xxp2;
+ struct cvmx_spxx_dbg_deskew_ctl_s cn58xx;
+ struct cvmx_spxx_dbg_deskew_ctl_s cn58xxp1;
+};
+
+union cvmx_spxx_dbg_deskew_state {
+ uint64_t u64;
+ struct cvmx_spxx_dbg_deskew_state_s {
+ uint64_t reserved_9_63:55;
+ uint64_t testres:1;
+ uint64_t unxterm:1;
+ uint64_t muxsel:2;
+ uint64_t offset:5;
+ } s;
+ struct cvmx_spxx_dbg_deskew_state_s cn38xx;
+ struct cvmx_spxx_dbg_deskew_state_s cn38xxp2;
+ struct cvmx_spxx_dbg_deskew_state_s cn58xx;
+ struct cvmx_spxx_dbg_deskew_state_s cn58xxp1;
+};
+
+union cvmx_spxx_drv_ctl {
+ uint64_t u64;
+ struct cvmx_spxx_drv_ctl_s {
+ uint64_t reserved_0_63:64;
+ } s;
+ struct cvmx_spxx_drv_ctl_cn38xx {
+ uint64_t reserved_16_63:48;
+ uint64_t stx4ncmp:4;
+ uint64_t stx4pcmp:4;
+ uint64_t srx4cmp:8;
+ } cn38xx;
+ struct cvmx_spxx_drv_ctl_cn38xx cn38xxp2;
+ struct cvmx_spxx_drv_ctl_cn58xx {
+ uint64_t reserved_24_63:40;
+ uint64_t stx4ncmp:4;
+ uint64_t stx4pcmp:4;
+ uint64_t reserved_10_15:6;
+ uint64_t srx4cmp:10;
+ } cn58xx;
+ struct cvmx_spxx_drv_ctl_cn58xx cn58xxp1;
+};
+
+union cvmx_spxx_err_ctl {
+ uint64_t u64;
+ struct cvmx_spxx_err_ctl_s {
+ uint64_t reserved_9_63:55;
+ uint64_t prtnxa:1;
+ uint64_t dipcls:1;
+ uint64_t dippay:1;
+ uint64_t reserved_4_5:2;
+ uint64_t errcnt:4;
+ } s;
+ struct cvmx_spxx_err_ctl_s cn38xx;
+ struct cvmx_spxx_err_ctl_s cn38xxp2;
+ struct cvmx_spxx_err_ctl_s cn58xx;
+ struct cvmx_spxx_err_ctl_s cn58xxp1;
+};
+
+union cvmx_spxx_int_dat {
+ uint64_t u64;
+ struct cvmx_spxx_int_dat_s {
+ uint64_t reserved_32_63:32;
+ uint64_t mul:1;
+ uint64_t reserved_14_30:17;
+ uint64_t calbnk:2;
+ uint64_t rsvop:4;
+ uint64_t prt:8;
+ } s;
+ struct cvmx_spxx_int_dat_s cn38xx;
+ struct cvmx_spxx_int_dat_s cn38xxp2;
+ struct cvmx_spxx_int_dat_s cn58xx;
+ struct cvmx_spxx_int_dat_s cn58xxp1;
+};
+
+union cvmx_spxx_int_msk {
+ uint64_t u64;
+ struct cvmx_spxx_int_msk_s {
+ uint64_t reserved_12_63:52;
+ uint64_t calerr:1;
+ uint64_t syncerr:1;
+ uint64_t diperr:1;
+ uint64_t tpaovr:1;
+ uint64_t rsverr:1;
+ uint64_t drwnng:1;
+ uint64_t clserr:1;
+ uint64_t spiovr:1;
+ uint64_t reserved_2_3:2;
+ uint64_t abnorm:1;
+ uint64_t prtnxa:1;
+ } s;
+ struct cvmx_spxx_int_msk_s cn38xx;
+ struct cvmx_spxx_int_msk_s cn38xxp2;
+ struct cvmx_spxx_int_msk_s cn58xx;
+ struct cvmx_spxx_int_msk_s cn58xxp1;
+};
+
+union cvmx_spxx_int_reg {
+ uint64_t u64;
+ struct cvmx_spxx_int_reg_s {
+ uint64_t reserved_32_63:32;
+ uint64_t spf:1;
+ uint64_t reserved_12_30:19;
+ uint64_t calerr:1;
+ uint64_t syncerr:1;
+ uint64_t diperr:1;
+ uint64_t tpaovr:1;
+ uint64_t rsverr:1;
+ uint64_t drwnng:1;
+ uint64_t clserr:1;
+ uint64_t spiovr:1;
+ uint64_t reserved_2_3:2;
+ uint64_t abnorm:1;
+ uint64_t prtnxa:1;
+ } s;
+ struct cvmx_spxx_int_reg_s cn38xx;
+ struct cvmx_spxx_int_reg_s cn38xxp2;
+ struct cvmx_spxx_int_reg_s cn58xx;
+ struct cvmx_spxx_int_reg_s cn58xxp1;
+};
+
+union cvmx_spxx_int_sync {
+ uint64_t u64;
+ struct cvmx_spxx_int_sync_s {
+ uint64_t reserved_12_63:52;
+ uint64_t calerr:1;
+ uint64_t syncerr:1;
+ uint64_t diperr:1;
+ uint64_t tpaovr:1;
+ uint64_t rsverr:1;
+ uint64_t drwnng:1;
+ uint64_t clserr:1;
+ uint64_t spiovr:1;
+ uint64_t reserved_2_3:2;
+ uint64_t abnorm:1;
+ uint64_t prtnxa:1;
+ } s;
+ struct cvmx_spxx_int_sync_s cn38xx;
+ struct cvmx_spxx_int_sync_s cn38xxp2;
+ struct cvmx_spxx_int_sync_s cn58xx;
+ struct cvmx_spxx_int_sync_s cn58xxp1;
+};
+
+union cvmx_spxx_tpa_acc {
+ uint64_t u64;
+ struct cvmx_spxx_tpa_acc_s {
+ uint64_t reserved_32_63:32;
+ uint64_t cnt:32;
+ } s;
+ struct cvmx_spxx_tpa_acc_s cn38xx;
+ struct cvmx_spxx_tpa_acc_s cn38xxp2;
+ struct cvmx_spxx_tpa_acc_s cn58xx;
+ struct cvmx_spxx_tpa_acc_s cn58xxp1;
+};
+
+union cvmx_spxx_tpa_max {
+ uint64_t u64;
+ struct cvmx_spxx_tpa_max_s {
+ uint64_t reserved_32_63:32;
+ uint64_t max:32;
+ } s;
+ struct cvmx_spxx_tpa_max_s cn38xx;
+ struct cvmx_spxx_tpa_max_s cn38xxp2;
+ struct cvmx_spxx_tpa_max_s cn58xx;
+ struct cvmx_spxx_tpa_max_s cn58xxp1;
+};
+
+union cvmx_spxx_tpa_sel {
+ uint64_t u64;
+ struct cvmx_spxx_tpa_sel_s {
+ uint64_t reserved_4_63:60;
+ uint64_t prtsel:4;
+ } s;
+ struct cvmx_spxx_tpa_sel_s cn38xx;
+ struct cvmx_spxx_tpa_sel_s cn38xxp2;
+ struct cvmx_spxx_tpa_sel_s cn58xx;
+ struct cvmx_spxx_tpa_sel_s cn58xxp1;
+};
+
+union cvmx_spxx_trn4_ctl {
+ uint64_t u64;
+ struct cvmx_spxx_trn4_ctl_s {
+ uint64_t reserved_13_63:51;
+ uint64_t trntest:1;
+ uint64_t jitter:3;
+ uint64_t clr_boot:1;
+ uint64_t set_boot:1;
+ uint64_t maxdist:5;
+ uint64_t macro_en:1;
+ uint64_t mux_en:1;
+ } s;
+ struct cvmx_spxx_trn4_ctl_s cn38xx;
+ struct cvmx_spxx_trn4_ctl_s cn38xxp2;
+ struct cvmx_spxx_trn4_ctl_s cn58xx;
+ struct cvmx_spxx_trn4_ctl_s cn58xxp1;
+};
+
+#endif
diff --git a/drivers/staging/octeon/cvmx-srxx-defs.h b/drivers/staging/octeon/cvmx-srxx-defs.h
new file mode 100644
index 000000000000..d82b366c279f
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-srxx-defs.h
@@ -0,0 +1,126 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_SRXX_DEFS_H__
+#define __CVMX_SRXX_DEFS_H__
+
+#define CVMX_SRXX_COM_CTL(block_id) \
+ CVMX_ADD_IO_SEG(0x0001180090000200ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_SRXX_IGN_RX_FULL(block_id) \
+ CVMX_ADD_IO_SEG(0x0001180090000218ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_SRXX_SPI4_CALX(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x0001180090000000ull + (((offset) & 31) * 8) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_SRXX_SPI4_STAT(block_id) \
+ CVMX_ADD_IO_SEG(0x0001180090000208ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_SRXX_SW_TICK_CTL(block_id) \
+ CVMX_ADD_IO_SEG(0x0001180090000220ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_SRXX_SW_TICK_DAT(block_id) \
+ CVMX_ADD_IO_SEG(0x0001180090000228ull + (((block_id) & 1) * 0x8000000ull))
+
+union cvmx_srxx_com_ctl {
+ uint64_t u64;
+ struct cvmx_srxx_com_ctl_s {
+ uint64_t reserved_8_63:56;
+ uint64_t prts:4;
+ uint64_t st_en:1;
+ uint64_t reserved_1_2:2;
+ uint64_t inf_en:1;
+ } s;
+ struct cvmx_srxx_com_ctl_s cn38xx;
+ struct cvmx_srxx_com_ctl_s cn38xxp2;
+ struct cvmx_srxx_com_ctl_s cn58xx;
+ struct cvmx_srxx_com_ctl_s cn58xxp1;
+};
+
+union cvmx_srxx_ign_rx_full {
+ uint64_t u64;
+ struct cvmx_srxx_ign_rx_full_s {
+ uint64_t reserved_16_63:48;
+ uint64_t ignore:16;
+ } s;
+ struct cvmx_srxx_ign_rx_full_s cn38xx;
+ struct cvmx_srxx_ign_rx_full_s cn38xxp2;
+ struct cvmx_srxx_ign_rx_full_s cn58xx;
+ struct cvmx_srxx_ign_rx_full_s cn58xxp1;
+};
+
+union cvmx_srxx_spi4_calx {
+ uint64_t u64;
+ struct cvmx_srxx_spi4_calx_s {
+ uint64_t reserved_17_63:47;
+ uint64_t oddpar:1;
+ uint64_t prt3:4;
+ uint64_t prt2:4;
+ uint64_t prt1:4;
+ uint64_t prt0:4;
+ } s;
+ struct cvmx_srxx_spi4_calx_s cn38xx;
+ struct cvmx_srxx_spi4_calx_s cn38xxp2;
+ struct cvmx_srxx_spi4_calx_s cn58xx;
+ struct cvmx_srxx_spi4_calx_s cn58xxp1;
+};
+
+union cvmx_srxx_spi4_stat {
+ uint64_t u64;
+ struct cvmx_srxx_spi4_stat_s {
+ uint64_t reserved_16_63:48;
+ uint64_t m:8;
+ uint64_t reserved_7_7:1;
+ uint64_t len:7;
+ } s;
+ struct cvmx_srxx_spi4_stat_s cn38xx;
+ struct cvmx_srxx_spi4_stat_s cn38xxp2;
+ struct cvmx_srxx_spi4_stat_s cn58xx;
+ struct cvmx_srxx_spi4_stat_s cn58xxp1;
+};
+
+union cvmx_srxx_sw_tick_ctl {
+ uint64_t u64;
+ struct cvmx_srxx_sw_tick_ctl_s {
+ uint64_t reserved_14_63:50;
+ uint64_t eop:1;
+ uint64_t sop:1;
+ uint64_t mod:4;
+ uint64_t opc:4;
+ uint64_t adr:4;
+ } s;
+ struct cvmx_srxx_sw_tick_ctl_s cn38xx;
+ struct cvmx_srxx_sw_tick_ctl_s cn58xx;
+ struct cvmx_srxx_sw_tick_ctl_s cn58xxp1;
+};
+
+union cvmx_srxx_sw_tick_dat {
+ uint64_t u64;
+ struct cvmx_srxx_sw_tick_dat_s {
+ uint64_t dat:64;
+ } s;
+ struct cvmx_srxx_sw_tick_dat_s cn38xx;
+ struct cvmx_srxx_sw_tick_dat_s cn58xx;
+ struct cvmx_srxx_sw_tick_dat_s cn58xxp1;
+};
+
+#endif
diff --git a/drivers/staging/octeon/cvmx-stxx-defs.h b/drivers/staging/octeon/cvmx-stxx-defs.h
new file mode 100644
index 000000000000..4f209b62cae1
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-stxx-defs.h
@@ -0,0 +1,292 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_STXX_DEFS_H__
+#define __CVMX_STXX_DEFS_H__
+
+#define CVMX_STXX_ARB_CTL(block_id) \
+ CVMX_ADD_IO_SEG(0x0001180090000608ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_STXX_BCKPRS_CNT(block_id) \
+ CVMX_ADD_IO_SEG(0x0001180090000688ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_STXX_COM_CTL(block_id) \
+ CVMX_ADD_IO_SEG(0x0001180090000600ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_STXX_DIP_CNT(block_id) \
+ CVMX_ADD_IO_SEG(0x0001180090000690ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_STXX_IGN_CAL(block_id) \
+ CVMX_ADD_IO_SEG(0x0001180090000610ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_STXX_INT_MSK(block_id) \
+ CVMX_ADD_IO_SEG(0x00011800900006A0ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_STXX_INT_REG(block_id) \
+ CVMX_ADD_IO_SEG(0x0001180090000698ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_STXX_INT_SYNC(block_id) \
+ CVMX_ADD_IO_SEG(0x00011800900006A8ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_STXX_MIN_BST(block_id) \
+ CVMX_ADD_IO_SEG(0x0001180090000618ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_STXX_SPI4_CALX(offset, block_id) \
+ CVMX_ADD_IO_SEG(0x0001180090000400ull + (((offset) & 31) * 8) + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_STXX_SPI4_DAT(block_id) \
+ CVMX_ADD_IO_SEG(0x0001180090000628ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_STXX_SPI4_STAT(block_id) \
+ CVMX_ADD_IO_SEG(0x0001180090000630ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_STXX_STAT_BYTES_HI(block_id) \
+ CVMX_ADD_IO_SEG(0x0001180090000648ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_STXX_STAT_BYTES_LO(block_id) \
+ CVMX_ADD_IO_SEG(0x0001180090000680ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_STXX_STAT_CTL(block_id) \
+ CVMX_ADD_IO_SEG(0x0001180090000638ull + (((block_id) & 1) * 0x8000000ull))
+#define CVMX_STXX_STAT_PKT_XMT(block_id) \
+ CVMX_ADD_IO_SEG(0x0001180090000640ull + (((block_id) & 1) * 0x8000000ull))
+
+union cvmx_stxx_arb_ctl {
+ uint64_t u64;
+ struct cvmx_stxx_arb_ctl_s {
+ uint64_t reserved_6_63:58;
+ uint64_t mintrn:1;
+ uint64_t reserved_4_4:1;
+ uint64_t igntpa:1;
+ uint64_t reserved_0_2:3;
+ } s;
+ struct cvmx_stxx_arb_ctl_s cn38xx;
+ struct cvmx_stxx_arb_ctl_s cn38xxp2;
+ struct cvmx_stxx_arb_ctl_s cn58xx;
+ struct cvmx_stxx_arb_ctl_s cn58xxp1;
+};
+
+union cvmx_stxx_bckprs_cnt {
+ uint64_t u64;
+ struct cvmx_stxx_bckprs_cnt_s {
+ uint64_t reserved_32_63:32;
+ uint64_t cnt:32;
+ } s;
+ struct cvmx_stxx_bckprs_cnt_s cn38xx;
+ struct cvmx_stxx_bckprs_cnt_s cn38xxp2;
+ struct cvmx_stxx_bckprs_cnt_s cn58xx;
+ struct cvmx_stxx_bckprs_cnt_s cn58xxp1;
+};
+
+union cvmx_stxx_com_ctl {
+ uint64_t u64;
+ struct cvmx_stxx_com_ctl_s {
+ uint64_t reserved_4_63:60;
+ uint64_t st_en:1;
+ uint64_t reserved_1_2:2;
+ uint64_t inf_en:1;
+ } s;
+ struct cvmx_stxx_com_ctl_s cn38xx;
+ struct cvmx_stxx_com_ctl_s cn38xxp2;
+ struct cvmx_stxx_com_ctl_s cn58xx;
+ struct cvmx_stxx_com_ctl_s cn58xxp1;
+};
+
+union cvmx_stxx_dip_cnt {
+ uint64_t u64;
+ struct cvmx_stxx_dip_cnt_s {
+ uint64_t reserved_8_63:56;
+ uint64_t frmmax:4;
+ uint64_t dipmax:4;
+ } s;
+ struct cvmx_stxx_dip_cnt_s cn38xx;
+ struct cvmx_stxx_dip_cnt_s cn38xxp2;
+ struct cvmx_stxx_dip_cnt_s cn58xx;
+ struct cvmx_stxx_dip_cnt_s cn58xxp1;
+};
+
+union cvmx_stxx_ign_cal {
+ uint64_t u64;
+ struct cvmx_stxx_ign_cal_s {
+ uint64_t reserved_16_63:48;
+ uint64_t igntpa:16;
+ } s;
+ struct cvmx_stxx_ign_cal_s cn38xx;
+ struct cvmx_stxx_ign_cal_s cn38xxp2;
+ struct cvmx_stxx_ign_cal_s cn58xx;
+ struct cvmx_stxx_ign_cal_s cn58xxp1;
+};
+
+union cvmx_stxx_int_msk {
+ uint64_t u64;
+ struct cvmx_stxx_int_msk_s {
+ uint64_t reserved_8_63:56;
+ uint64_t frmerr:1;
+ uint64_t unxfrm:1;
+ uint64_t nosync:1;
+ uint64_t diperr:1;
+ uint64_t datovr:1;
+ uint64_t ovrbst:1;
+ uint64_t calpar1:1;
+ uint64_t calpar0:1;
+ } s;
+ struct cvmx_stxx_int_msk_s cn38xx;
+ struct cvmx_stxx_int_msk_s cn38xxp2;
+ struct cvmx_stxx_int_msk_s cn58xx;
+ struct cvmx_stxx_int_msk_s cn58xxp1;
+};
+
+union cvmx_stxx_int_reg {
+ uint64_t u64;
+ struct cvmx_stxx_int_reg_s {
+ uint64_t reserved_9_63:55;
+ uint64_t syncerr:1;
+ uint64_t frmerr:1;
+ uint64_t unxfrm:1;
+ uint64_t nosync:1;
+ uint64_t diperr:1;
+ uint64_t datovr:1;
+ uint64_t ovrbst:1;
+ uint64_t calpar1:1;
+ uint64_t calpar0:1;
+ } s;
+ struct cvmx_stxx_int_reg_s cn38xx;
+ struct cvmx_stxx_int_reg_s cn38xxp2;
+ struct cvmx_stxx_int_reg_s cn58xx;
+ struct cvmx_stxx_int_reg_s cn58xxp1;
+};
+
+union cvmx_stxx_int_sync {
+ uint64_t u64;
+ struct cvmx_stxx_int_sync_s {
+ uint64_t reserved_8_63:56;
+ uint64_t frmerr:1;
+ uint64_t unxfrm:1;
+ uint64_t nosync:1;
+ uint64_t diperr:1;
+ uint64_t datovr:1;
+ uint64_t ovrbst:1;
+ uint64_t calpar1:1;
+ uint64_t calpar0:1;
+ } s;
+ struct cvmx_stxx_int_sync_s cn38xx;
+ struct cvmx_stxx_int_sync_s cn38xxp2;
+ struct cvmx_stxx_int_sync_s cn58xx;
+ struct cvmx_stxx_int_sync_s cn58xxp1;
+};
+
+union cvmx_stxx_min_bst {
+ uint64_t u64;
+ struct cvmx_stxx_min_bst_s {
+ uint64_t reserved_9_63:55;
+ uint64_t minb:9;
+ } s;
+ struct cvmx_stxx_min_bst_s cn38xx;
+ struct cvmx_stxx_min_bst_s cn38xxp2;
+ struct cvmx_stxx_min_bst_s cn58xx;
+ struct cvmx_stxx_min_bst_s cn58xxp1;
+};
+
+union cvmx_stxx_spi4_calx {
+ uint64_t u64;
+ struct cvmx_stxx_spi4_calx_s {
+ uint64_t reserved_17_63:47;
+ uint64_t oddpar:1;
+ uint64_t prt3:4;
+ uint64_t prt2:4;
+ uint64_t prt1:4;
+ uint64_t prt0:4;
+ } s;
+ struct cvmx_stxx_spi4_calx_s cn38xx;
+ struct cvmx_stxx_spi4_calx_s cn38xxp2;
+ struct cvmx_stxx_spi4_calx_s cn58xx;
+ struct cvmx_stxx_spi4_calx_s cn58xxp1;
+};
+
+union cvmx_stxx_spi4_dat {
+ uint64_t u64;
+ struct cvmx_stxx_spi4_dat_s {
+ uint64_t reserved_32_63:32;
+ uint64_t alpha:16;
+ uint64_t max_t:16;
+ } s;
+ struct cvmx_stxx_spi4_dat_s cn38xx;
+ struct cvmx_stxx_spi4_dat_s cn38xxp2;
+ struct cvmx_stxx_spi4_dat_s cn58xx;
+ struct cvmx_stxx_spi4_dat_s cn58xxp1;
+};
+
+union cvmx_stxx_spi4_stat {
+ uint64_t u64;
+ struct cvmx_stxx_spi4_stat_s {
+ uint64_t reserved_16_63:48;
+ uint64_t m:8;
+ uint64_t reserved_7_7:1;
+ uint64_t len:7;
+ } s;
+ struct cvmx_stxx_spi4_stat_s cn38xx;
+ struct cvmx_stxx_spi4_stat_s cn38xxp2;
+ struct cvmx_stxx_spi4_stat_s cn58xx;
+ struct cvmx_stxx_spi4_stat_s cn58xxp1;
+};
+
+union cvmx_stxx_stat_bytes_hi {
+ uint64_t u64;
+ struct cvmx_stxx_stat_bytes_hi_s {
+ uint64_t reserved_32_63:32;
+ uint64_t cnt:32;
+ } s;
+ struct cvmx_stxx_stat_bytes_hi_s cn38xx;
+ struct cvmx_stxx_stat_bytes_hi_s cn38xxp2;
+ struct cvmx_stxx_stat_bytes_hi_s cn58xx;
+ struct cvmx_stxx_stat_bytes_hi_s cn58xxp1;
+};
+
+union cvmx_stxx_stat_bytes_lo {
+ uint64_t u64;
+ struct cvmx_stxx_stat_bytes_lo_s {
+ uint64_t reserved_32_63:32;
+ uint64_t cnt:32;
+ } s;
+ struct cvmx_stxx_stat_bytes_lo_s cn38xx;
+ struct cvmx_stxx_stat_bytes_lo_s cn38xxp2;
+ struct cvmx_stxx_stat_bytes_lo_s cn58xx;
+ struct cvmx_stxx_stat_bytes_lo_s cn58xxp1;
+};
+
+union cvmx_stxx_stat_ctl {
+ uint64_t u64;
+ struct cvmx_stxx_stat_ctl_s {
+ uint64_t reserved_5_63:59;
+ uint64_t clr:1;
+ uint64_t bckprs:4;
+ } s;
+ struct cvmx_stxx_stat_ctl_s cn38xx;
+ struct cvmx_stxx_stat_ctl_s cn38xxp2;
+ struct cvmx_stxx_stat_ctl_s cn58xx;
+ struct cvmx_stxx_stat_ctl_s cn58xxp1;
+};
+
+union cvmx_stxx_stat_pkt_xmt {
+ uint64_t u64;
+ struct cvmx_stxx_stat_pkt_xmt_s {
+ uint64_t reserved_32_63:32;
+ uint64_t cnt:32;
+ } s;
+ struct cvmx_stxx_stat_pkt_xmt_s cn38xx;
+ struct cvmx_stxx_stat_pkt_xmt_s cn38xxp2;
+ struct cvmx_stxx_stat_pkt_xmt_s cn58xx;
+ struct cvmx_stxx_stat_pkt_xmt_s cn58xxp1;
+};
+
+#endif
diff --git a/drivers/staging/octeon/cvmx-wqe.h b/drivers/staging/octeon/cvmx-wqe.h
new file mode 100644
index 000000000000..653610953d28
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-wqe.h
@@ -0,0 +1,397 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/**
+ *
+ * This header file defines the work queue entry (wqe) data structure.
+ * Since this is a commonly used structure that depends on structures
+ * from several hardware blocks, those definitions have been placed
+ * in this file to create a single point of definition of the wqe
+ * format.
+ * Data structures are still named according to the block that they
+ * relate to.
+ *
+ */
+
+#ifndef __CVMX_WQE_H__
+#define __CVMX_WQE_H__
+
+#include "cvmx-packet.h"
+
+
+#define OCT_TAG_TYPE_STRING(x) \
+ (((x) == CVMX_POW_TAG_TYPE_ORDERED) ? "ORDERED" : \
+ (((x) == CVMX_POW_TAG_TYPE_ATOMIC) ? "ATOMIC" : \
+ (((x) == CVMX_POW_TAG_TYPE_NULL) ? "NULL" : \
+ "NULL_NULL")))
+
+/**
+ * HW decode / err_code in work queue entry
+ */
+typedef union {
+ uint64_t u64;
+
+ /* Use this struct if the hardware determines that the packet is IP */
+ struct {
+ /* HW sets this to the number of buffers used by this packet */
+ uint64_t bufs:8;
+ /* HW sets to the number of L2 bytes prior to the IP */
+ uint64_t ip_offset:8;
+ /* set to 1 if we found DSA/VLAN in the L2 */
+ uint64_t vlan_valid:1;
+ /* Set to 1 if the DSA/VLAN tag is stacked */
+ uint64_t vlan_stacked:1;
+ uint64_t unassigned:1;
+ /* HW sets to the DSA/VLAN CFI flag (valid when vlan_valid) */
+ uint64_t vlan_cfi:1;
+ /* HW sets to the DSA/VLAN_ID field (valid when vlan_valid) */
+ uint64_t vlan_id:12;
+ /* Ring Identifier (if PCIe). Requires PIP_GBL_CTL[RING_EN]=1 */
+ uint64_t pr:4;
+ uint64_t unassigned2:8;
+ /* the packet needs to be decompressed */
+ uint64_t dec_ipcomp:1;
+ /* the packet is either TCP or UDP */
+ uint64_t tcp_or_udp:1;
+ /* the packet needs to be decrypted (ESP or AH) */
+ uint64_t dec_ipsec:1;
+ /* the packet is IPv6 */
+ uint64_t is_v6:1;
+
+ /*
+ * (rcv_error, not_IP, IP_exc, is_frag, L4_error,
+ * software, etc.).
+ */
+
+ /*
+ * reserved for software use, hardware will clear on
+ * packet creation.
+ */
+ uint64_t software:1;
+ /* exceptional conditions below */
+ /* the receive interface hardware detected an L4 error
+ * (only applies if !is_frag) (only applies if
+ * !rcv_error && !not_IP && !IP_exc && !is_frag)
+ * failure indicated in err_code below, decode:
+ *
+ * - 1 = Malformed L4
+ * - 2 = L4 Checksum Error: the L4 checksum value is
+ * - 3 = UDP Length Error: The UDP length field would
+ * make the UDP data longer than what remains in
+ * the IP packet (as defined by the IP header
+ * length field).
+ * - 4 = Bad L4 Port: either the source or destination
+ * TCP/UDP port is 0.
+ * - 8 = TCP FIN Only: the packet is TCP and only the
+ * FIN flag set.
+ * - 9 = TCP No Flags: the packet is TCP and no flags
+ * are set.
+ * - 10 = TCP FIN RST: the packet is TCP and both FIN
+ * and RST are set.
+ * - 11 = TCP SYN URG: the packet is TCP and both SYN
+ * and URG are set.
+ * - 12 = TCP SYN RST: the packet is TCP and both SYN
+ * and RST are set.
+ * - 13 = TCP SYN FIN: the packet is TCP and both SYN
+ * and FIN are set.
+ */
+ uint64_t L4_error:1;
+ /* set if the packet is a fragment */
+ uint64_t is_frag:1;
+ /* the receive interface hardware detected an IP error
+ * / exception (only applies if !rcv_error && !not_IP)
+ * failure indicated in err_code below, decode:
+ *
+ * - 1 = Not IP: the IP version field is neither 4 nor
+ * 6.
+ * - 2 = IPv4 Header Checksum Error: the IPv4 header
+ * has a checksum violation.
+ * - 3 = IP Malformed Header: the packet is not long
+ * enough to contain the IP header.
+ * - 4 = IP Malformed: the packet is not long enough
+ * to contain the bytes indicated by the IP
+ * header. Pad is allowed.
+ * - 5 = IP TTL Hop: the IPv4 TTL field or the IPv6
+ * Hop Count field are zero.
+ * - 6 = IP Options
+ */
+ uint64_t IP_exc:1;
+ /*
+ * Set if the hardware determined that the packet is a
+ * broadcast.
+ */
+ uint64_t is_bcast:1;
+ /*
+ * St if the hardware determined that the packet is a
+ * multi-cast.
+ */
+ uint64_t is_mcast:1;
+ /*
+ * Set if the packet may not be IP (must be zero in
+ * this case).
+ */
+ uint64_t not_IP:1;
+ /*
+ * The receive interface hardware detected a receive
+ * error (must be zero in this case).
+ */
+ uint64_t rcv_error:1;
+ /* lower err_code = first-level descriptor of the
+ * work */
+ /* zero for packet submitted by hardware that isn't on
+ * the slow path */
+ /* type is cvmx_pip_err_t */
+ uint64_t err_code:8;
+ } s;
+
+ /* use this to get at the 16 vlan bits */
+ struct {
+ uint64_t unused1:16;
+ uint64_t vlan:16;
+ uint64_t unused2:32;
+ } svlan;
+
+ /*
+ * use this struct if the hardware could not determine that
+ * the packet is ip.
+ */
+ struct {
+ /*
+ * HW sets this to the number of buffers used by this
+ * packet.
+ */
+ uint64_t bufs:8;
+ uint64_t unused:8;
+ /* set to 1 if we found DSA/VLAN in the L2 */
+ uint64_t vlan_valid:1;
+ /* Set to 1 if the DSA/VLAN tag is stacked */
+ uint64_t vlan_stacked:1;
+ uint64_t unassigned:1;
+ /*
+ * HW sets to the DSA/VLAN CFI flag (valid when
+ * vlan_valid)
+ */
+ uint64_t vlan_cfi:1;
+ /*
+ * HW sets to the DSA/VLAN_ID field (valid when
+ * vlan_valid).
+ */
+ uint64_t vlan_id:12;
+ /*
+ * Ring Identifier (if PCIe). Requires
+ * PIP_GBL_CTL[RING_EN]=1
+ */
+ uint64_t pr:4;
+ uint64_t unassigned2:12;
+ /*
+ * reserved for software use, hardware will clear on
+ * packet creation.
+ */
+ uint64_t software:1;
+ uint64_t unassigned3:1;
+ /*
+ * set if the hardware determined that the packet is
+ * rarp.
+ */
+ uint64_t is_rarp:1;
+ /*
+ * set if the hardware determined that the packet is
+ * arp
+ */
+ uint64_t is_arp:1;
+ /*
+ * set if the hardware determined that the packet is a
+ * broadcast.
+ */
+ uint64_t is_bcast:1;
+ /*
+ * set if the hardware determined that the packet is a
+ * multi-cast
+ */
+ uint64_t is_mcast:1;
+ /*
+ * set if the packet may not be IP (must be one in
+ * this case)
+ */
+ uint64_t not_IP:1;
+ /* The receive interface hardware detected a receive
+ * error. Failure indicated in err_code below,
+ * decode:
+ *
+ * - 1 = partial error: a packet was partially
+ * received, but internal buffering / bandwidth
+ * was not adequate to receive the entire
+ * packet.
+ * - 2 = jabber error: the RGMII packet was too large
+ * and is truncated.
+ * - 3 = overrun error: the RGMII packet is longer
+ * than allowed and had an FCS error.
+ * - 4 = oversize error: the RGMII packet is longer
+ * than allowed.
+ * - 5 = alignment error: the RGMII packet is not an
+ * integer number of bytes
+ * and had an FCS error (100M and 10M only).
+ * - 6 = fragment error: the RGMII packet is shorter
+ * than allowed and had an FCS error.
+ * - 7 = GMX FCS error: the RGMII packet had an FCS
+ * error.
+ * - 8 = undersize error: the RGMII packet is shorter
+ * than allowed.
+ * - 9 = extend error: the RGMII packet had an extend
+ * error.
+ * - 10 = length mismatch error: the RGMII packet had
+ * a length that did not match the length field
+ * in the L2 HDR.
+ * - 11 = RGMII RX error/SPI4 DIP4 Error: the RGMII
+ * packet had one or more data reception errors
+ * (RXERR) or the SPI4 packet had one or more
+ * DIP4 errors.
+ * - 12 = RGMII skip error/SPI4 Abort Error: the RGMII
+ * packet was not large enough to cover the
+ * skipped bytes or the SPI4 packet was
+ * terminated with an About EOPS.
+ * - 13 = RGMII nibble error/SPI4 Port NXA Error: the
+ * RGMII packet had a studder error (data not
+ * repeated - 10/100M only) or the SPI4 packet
+ * was sent to an NXA.
+ * - 16 = FCS error: a SPI4.2 packet had an FCS error.
+ * - 17 = Skip error: a packet was not large enough to
+ * cover the skipped bytes.
+ * - 18 = L2 header malformed: the packet is not long
+ * enough to contain the L2.
+ */
+
+ uint64_t rcv_error:1;
+ /*
+ * lower err_code = first-level descriptor of the
+ * work
+ */
+ /*
+ * zero for packet submitted by hardware that isn't on
+ * the slow path
+ */
+ /* type is cvmx_pip_err_t (union, so can't use directly */
+ uint64_t err_code:8;
+ } snoip;
+
+} cvmx_pip_wqe_word2;
+
+/**
+ * Work queue entry format
+ *
+ * must be 8-byte aligned
+ */
+typedef struct {
+
+ /*****************************************************************
+ * WORD 0
+ * HW WRITE: the following 64 bits are filled by HW when a packet arrives
+ */
+
+ /**
+ * raw chksum result generated by the HW
+ */
+ uint16_t hw_chksum;
+ /**
+ * Field unused by hardware - available for software
+ */
+ uint8_t unused;
+ /**
+ * Next pointer used by hardware for list maintenance.
+ * May be written/read by HW before the work queue
+ * entry is scheduled to a PP
+ * (Only 36 bits used in Octeon 1)
+ */
+ uint64_t next_ptr:40;
+
+ /*****************************************************************
+ * WORD 1
+ * HW WRITE: the following 64 bits are filled by HW when a packet arrives
+ */
+
+ /**
+ * HW sets to the total number of bytes in the packet
+ */
+ uint64_t len:16;
+ /**
+ * HW sets this to input physical port
+ */
+ uint64_t ipprt:6;
+
+ /**
+ * HW sets this to what it thought the priority of the input packet was
+ */
+ uint64_t qos:3;
+
+ /**
+ * the group that the work queue entry will be scheduled to
+ */
+ uint64_t grp:4;
+ /**
+ * the type of the tag (ORDERED, ATOMIC, NULL)
+ */
+ uint64_t tag_type:3;
+ /**
+ * the synchronization/ordering tag
+ */
+ uint64_t tag:32;
+
+ /**
+ * WORD 2 HW WRITE: the following 64-bits are filled in by
+ * hardware when a packet arrives This indicates a variety of
+ * status and error conditions.
+ */
+ cvmx_pip_wqe_word2 word2;
+
+ /**
+ * Pointer to the first segment of the packet.
+ */
+ union cvmx_buf_ptr packet_ptr;
+
+ /**
+ * HW WRITE: octeon will fill in a programmable amount from the
+ * packet, up to (at most, but perhaps less) the amount
+ * needed to fill the work queue entry to 128 bytes
+ *
+ * If the packet is recognized to be IP, the hardware starts
+ * (except that the IPv4 header is padded for appropriate
+ * alignment) writing here where the IP header starts. If the
+ * packet is not recognized to be IP, the hardware starts
+ * writing the beginning of the packet here.
+ */
+ uint8_t packet_data[96];
+
+ /**
+ * If desired, SW can make the work Q entry any length. For the
+ * purposes of discussion here, Assume 128B always, as this is all that
+ * the hardware deals with.
+ *
+ */
+
+} CVMX_CACHE_LINE_ALIGNED cvmx_wqe_t;
+
+#endif /* __CVMX_WQE_H__ */
diff --git a/drivers/staging/octeon/ethernet-common.c b/drivers/staging/octeon/ethernet-common.c
new file mode 100644
index 000000000000..3e6f5b8cc63d
--- /dev/null
+++ b/drivers/staging/octeon/ethernet-common.c
@@ -0,0 +1,328 @@
+/**********************************************************************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2007 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+**********************************************************************/
+#include <linux/kernel.h>
+#include <linux/mii.h>
+#include <net/dst.h>
+
+#include <asm/atomic.h>
+#include <asm/octeon/octeon.h>
+
+#include "ethernet-defines.h"
+#include "ethernet-tx.h"
+#include "ethernet-mdio.h"
+#include "ethernet-util.h"
+#include "octeon-ethernet.h"
+#include "ethernet-common.h"
+
+#include "cvmx-pip.h"
+#include "cvmx-pko.h"
+#include "cvmx-fau.h"
+#include "cvmx-helper.h"
+
+#include "cvmx-gmxx-defs.h"
+
+/**
+ * Get the low level ethernet statistics
+ *
+ * @dev: Device to get the statistics from
+ * Returns Pointer to the statistics
+ */
+static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
+{
+ cvmx_pip_port_status_t rx_status;
+ cvmx_pko_port_status_t tx_status;
+ struct octeon_ethernet *priv = netdev_priv(dev);
+
+ if (priv->port < CVMX_PIP_NUM_INPUT_PORTS) {
+ if (octeon_is_simulation()) {
+ /* The simulator doesn't support statistics */
+ memset(&rx_status, 0, sizeof(rx_status));
+ memset(&tx_status, 0, sizeof(tx_status));
+ } else {
+ cvmx_pip_get_port_status(priv->port, 1, &rx_status);
+ cvmx_pko_get_port_status(priv->port, 1, &tx_status);
+ }
+
+ priv->stats.rx_packets += rx_status.inb_packets;
+ priv->stats.tx_packets += tx_status.packets;
+ priv->stats.rx_bytes += rx_status.inb_octets;
+ priv->stats.tx_bytes += tx_status.octets;
+ priv->stats.multicast += rx_status.multicast_packets;
+ priv->stats.rx_crc_errors += rx_status.inb_errors;
+ priv->stats.rx_frame_errors += rx_status.fcs_align_err_packets;
+
+ /*
+ * The drop counter must be incremented atomically
+ * since the RX tasklet also increments it.
+ */
+#ifdef CONFIG_64BIT
+ atomic64_add(rx_status.dropped_packets,
+ (atomic64_t *)&priv->stats.rx_dropped);
+#else
+ atomic_add(rx_status.dropped_packets,
+ (atomic_t *)&priv->stats.rx_dropped);
+#endif
+ }
+
+ return &priv->stats;
+}
+
+/**
+ * Set the multicast list. Currently unimplemented.
+ *
+ * @dev: Device to work on
+ */
+static void cvm_oct_common_set_multicast_list(struct net_device *dev)
+{
+ union cvmx_gmxx_prtx_cfg gmx_cfg;
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ int interface = INTERFACE(priv->port);
+ int index = INDEX(priv->port);
+
+ if ((interface < 2)
+ && (cvmx_helper_interface_get_mode(interface) !=
+ CVMX_HELPER_INTERFACE_MODE_SPI)) {
+ union cvmx_gmxx_rxx_adr_ctl control;
+ control.u64 = 0;
+ control.s.bcst = 1; /* Allow broadcast MAC addresses */
+
+ if (dev->mc_list || (dev->flags & IFF_ALLMULTI) ||
+ (dev->flags & IFF_PROMISC))
+ /* Force accept multicast packets */
+ control.s.mcst = 2;
+ else
+ /* Force reject multicat packets */
+ control.s.mcst = 1;
+
+ if (dev->flags & IFF_PROMISC)
+ /*
+ * Reject matches if promisc. Since CAM is
+ * shut off, should accept everything.
+ */
+ control.s.cam_mode = 0;
+ else
+ /* Filter packets based on the CAM */
+ control.s.cam_mode = 1;
+
+ gmx_cfg.u64 =
+ cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
+ gmx_cfg.u64 & ~1ull);
+
+ cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index, interface),
+ control.u64);
+ if (dev->flags & IFF_PROMISC)
+ cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
+ (index, interface), 0);
+ else
+ cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
+ (index, interface), 1);
+
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
+ gmx_cfg.u64);
+ }
+}
+
+/**
+ * Set the hardware MAC address for a device
+ *
+ * @dev: Device to change the MAC address for
+ * @addr: Address structure to change it too. MAC address is addr + 2.
+ * Returns Zero on success
+ */
+static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ union cvmx_gmxx_prtx_cfg gmx_cfg;
+ int interface = INTERFACE(priv->port);
+ int index = INDEX(priv->port);
+
+ memcpy(dev->dev_addr, addr + 2, 6);
+
+ if ((interface < 2)
+ && (cvmx_helper_interface_get_mode(interface) !=
+ CVMX_HELPER_INTERFACE_MODE_SPI)) {
+ int i;
+ uint8_t *ptr = addr;
+ uint64_t mac = 0;
+ for (i = 0; i < 6; i++)
+ mac = (mac << 8) | (uint64_t) (ptr[i + 2]);
+
+ gmx_cfg.u64 =
+ cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
+ gmx_cfg.u64 & ~1ull);
+
+ cvmx_write_csr(CVMX_GMXX_SMACX(index, interface), mac);
+ cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface),
+ ptr[2]);
+ cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface),
+ ptr[3]);
+ cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface),
+ ptr[4]);
+ cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface),
+ ptr[5]);
+ cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface),
+ ptr[6]);
+ cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface),
+ ptr[7]);
+ cvm_oct_common_set_multicast_list(dev);
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
+ gmx_cfg.u64);
+ }
+ return 0;
+}
+
+/**
+ * Change the link MTU. Unimplemented
+ *
+ * @dev: Device to change
+ * @new_mtu: The new MTU
+ *
+ * Returns Zero on success
+ */
+static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ int interface = INTERFACE(priv->port);
+ int index = INDEX(priv->port);
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+ int vlan_bytes = 4;
+#else
+ int vlan_bytes = 0;
+#endif
+
+ /*
+ * Limit the MTU to make sure the ethernet packets are between
+ * 64 bytes and 65535 bytes.
+ */
+ if ((new_mtu + 14 + 4 + vlan_bytes < 64)
+ || (new_mtu + 14 + 4 + vlan_bytes > 65392)) {
+ pr_err("MTU must be between %d and %d.\n",
+ 64 - 14 - 4 - vlan_bytes, 65392 - 14 - 4 - vlan_bytes);
+ return -EINVAL;
+ }
+ dev->mtu = new_mtu;
+
+ if ((interface < 2)
+ && (cvmx_helper_interface_get_mode(interface) !=
+ CVMX_HELPER_INTERFACE_MODE_SPI)) {
+ /* Add ethernet header and FCS, and VLAN if configured. */
+ int max_packet = new_mtu + 14 + 4 + vlan_bytes;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN3XXX)
+ || OCTEON_IS_MODEL(OCTEON_CN58XX)) {
+ /* Signal errors on packets larger than the MTU */
+ cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index, interface),
+ max_packet);
+ } else {
+ /*
+ * Set the hardware to truncate packets larger
+ * than the MTU and smaller the 64 bytes.
+ */
+ union cvmx_pip_frm_len_chkx frm_len_chk;
+ frm_len_chk.u64 = 0;
+ frm_len_chk.s.minlen = 64;
+ frm_len_chk.s.maxlen = max_packet;
+ cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface),
+ frm_len_chk.u64);
+ }
+ /*
+ * Set the hardware to truncate packets larger than
+ * the MTU. The jabber register must be set to a
+ * multiple of 8 bytes, so round up.
+ */
+ cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index, interface),
+ (max_packet + 7) & ~7u);
+ }
+ return 0;
+}
+
+/**
+ * Per network device initialization
+ *
+ * @dev: Device to initialize
+ * Returns Zero on success
+ */
+int cvm_oct_common_init(struct net_device *dev)
+{
+ static int count;
+ char mac[8] = { 0x00, 0x00,
+ octeon_bootinfo->mac_addr_base[0],
+ octeon_bootinfo->mac_addr_base[1],
+ octeon_bootinfo->mac_addr_base[2],
+ octeon_bootinfo->mac_addr_base[3],
+ octeon_bootinfo->mac_addr_base[4],
+ octeon_bootinfo->mac_addr_base[5] + count
+ };
+ struct octeon_ethernet *priv = netdev_priv(dev);
+
+ /*
+ * Force the interface to use the POW send if always_use_pow
+ * was specified or it is in the pow send list.
+ */
+ if ((pow_send_group != -1)
+ && (always_use_pow || strstr(pow_send_list, dev->name)))
+ priv->queue = -1;
+
+ if (priv->queue != -1) {
+ dev->hard_start_xmit = cvm_oct_xmit;
+ if (USE_HW_TCPUDP_CHECKSUM)
+ dev->features |= NETIF_F_IP_CSUM;
+ } else
+ dev->hard_start_xmit = cvm_oct_xmit_pow;
+ count++;
+
+ dev->get_stats = cvm_oct_common_get_stats;
+ dev->set_mac_address = cvm_oct_common_set_mac_address;
+ dev->set_multicast_list = cvm_oct_common_set_multicast_list;
+ dev->change_mtu = cvm_oct_common_change_mtu;
+ dev->do_ioctl = cvm_oct_ioctl;
+ /* We do our own locking, Linux doesn't need to */
+ dev->features |= NETIF_F_LLTX;
+ SET_ETHTOOL_OPS(dev, &cvm_oct_ethtool_ops);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = cvm_oct_poll_controller;
+#endif
+
+ cvm_oct_mdio_setup_device(dev);
+ dev->set_mac_address(dev, mac);
+ dev->change_mtu(dev, dev->mtu);
+
+ /*
+ * Zero out stats for port so we won't mistakenly show
+ * counters from the bootloader.
+ */
+ memset(dev->get_stats(dev), 0, sizeof(struct net_device_stats));
+
+ return 0;
+}
+
+void cvm_oct_common_uninit(struct net_device *dev)
+{
+ /* Currently nothing to do */
+}
diff --git a/drivers/staging/octeon/ethernet-common.h b/drivers/staging/octeon/ethernet-common.h
new file mode 100644
index 000000000000..2bd9cd76a398
--- /dev/null
+++ b/drivers/staging/octeon/ethernet-common.h
@@ -0,0 +1,29 @@
+/*********************************************************************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2007 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+*********************************************************************/
+
+int cvm_oct_common_init(struct net_device *dev);
+void cvm_oct_common_uninit(struct net_device *dev);
diff --git a/drivers/staging/octeon/ethernet-defines.h b/drivers/staging/octeon/ethernet-defines.h
new file mode 100644
index 000000000000..8f7374e7664c
--- /dev/null
+++ b/drivers/staging/octeon/ethernet-defines.h
@@ -0,0 +1,134 @@
+/**********************************************************************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2007 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+**********************************************************************/
+
+/*
+ * A few defines are used to control the operation of this driver:
+ * CONFIG_CAVIUM_RESERVE32
+ * This kernel config options controls the amount of memory configured
+ * in a wired TLB entry for all processes to share. If this is set, the
+ * driver will use this memory instead of kernel memory for pools. This
+ * allows 32bit userspace application to access the buffers, but also
+ * requires all received packets to be copied.
+ * CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS
+ * This kernel config option allows the user to control the number of
+ * packet and work queue buffers allocated by the driver. If this is zero,
+ * the driver uses the default from below.
+ * USE_SKBUFFS_IN_HW
+ * Tells the driver to populate the packet buffers with kernel skbuffs.
+ * This allows the driver to receive packets without copying them. It also
+ * means that 32bit userspace can't access the packet buffers.
+ * USE_32BIT_SHARED
+ * This define tells the driver to allocate memory for buffers from the
+ * 32bit sahred region instead of the kernel memory space.
+ * USE_HW_TCPUDP_CHECKSUM
+ * Controls if the Octeon TCP/UDP checksum engine is used for packet
+ * output. If this is zero, the kernel will perform the checksum in
+ * software.
+ * USE_MULTICORE_RECEIVE
+ * Process receive interrupts on multiple cores. This spreads the network
+ * load across the first 8 processors. If ths is zero, only one core
+ * processes incomming packets.
+ * USE_ASYNC_IOBDMA
+ * Use asynchronous IO access to hardware. This uses Octeon's asynchronous
+ * IOBDMAs to issue IO accesses without stalling. Set this to zero
+ * to disable this. Note that IOBDMAs require CVMSEG.
+ * REUSE_SKBUFFS_WITHOUT_FREE
+ * Allows the TX path to free an skbuff into the FPA hardware pool. This
+ * can significantly improve performance for forwarding and bridging, but
+ * may be somewhat dangerous. Checks are made, but if any buffer is reused
+ * without the proper Linux cleanup, the networking stack may have very
+ * bizarre bugs.
+ */
+#ifndef __ETHERNET_DEFINES_H__
+#define __ETHERNET_DEFINES_H__
+
+#include "cvmx-config.h"
+
+
+#define OCTEON_ETHERNET_VERSION "1.9"
+
+#ifndef CONFIG_CAVIUM_RESERVE32
+#define CONFIG_CAVIUM_RESERVE32 0
+#endif
+
+#if CONFIG_CAVIUM_RESERVE32
+#define USE_32BIT_SHARED 1
+#define USE_SKBUFFS_IN_HW 0
+#define REUSE_SKBUFFS_WITHOUT_FREE 0
+#else
+#define USE_32BIT_SHARED 0
+#define USE_SKBUFFS_IN_HW 1
+#ifdef CONFIG_NETFILTER
+#define REUSE_SKBUFFS_WITHOUT_FREE 0
+#else
+#define REUSE_SKBUFFS_WITHOUT_FREE 1
+#endif
+#endif
+
+/* Max interrupts per second per core */
+#define INTERRUPT_LIMIT 10000
+
+/* Don't limit the number of interrupts */
+/*#define INTERRUPT_LIMIT 0 */
+#define USE_HW_TCPUDP_CHECKSUM 1
+
+#define USE_MULTICORE_RECEIVE 1
+
+/* Enable Random Early Dropping under load */
+#define USE_RED 1
+#define USE_ASYNC_IOBDMA (CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0)
+
+/*
+ * Allow SW based preamble removal at 10Mbps to workaround PHYs giving
+ * us bad preambles.
+ */
+#define USE_10MBPS_PREAMBLE_WORKAROUND 1
+/*
+ * Use this to have all FPA frees also tell the L2 not to write data
+ * to memory.
+ */
+#define DONT_WRITEBACK(x) (x)
+/* Use this to not have FPA frees control L2 */
+/*#define DONT_WRITEBACK(x) 0 */
+
+/* Maximum number of packets to process per interrupt. */
+#define MAX_RX_PACKETS 120
+#define MAX_OUT_QUEUE_DEPTH 1000
+
+#ifndef CONFIG_SMP
+#undef USE_MULTICORE_RECEIVE
+#define USE_MULTICORE_RECEIVE 0
+#endif
+
+#define IP_PROTOCOL_TCP 6
+#define IP_PROTOCOL_UDP 0x11
+
+#define FAU_NUM_PACKET_BUFFERS_TO_FREE (CVMX_FAU_REG_END - sizeof(uint32_t))
+#define TOTAL_NUMBER_OF_PORTS (CVMX_PIP_NUM_INPUT_PORTS+1)
+
+
+#endif /* __ETHERNET_DEFINES_H__ */
diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c
new file mode 100644
index 000000000000..93cab0a48925
--- /dev/null
+++ b/drivers/staging/octeon/ethernet-mdio.c
@@ -0,0 +1,231 @@
+/**********************************************************************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2007 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+**********************************************************************/
+#include <linux/kernel.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <net/dst.h>
+
+#include <asm/octeon/octeon.h>
+
+#include "ethernet-defines.h"
+#include "octeon-ethernet.h"
+#include "ethernet-mdio.h"
+
+#include "cvmx-helper-board.h"
+
+#include "cvmx-smix-defs.h"
+
+DECLARE_MUTEX(mdio_sem);
+
+/**
+ * Perform an MII read. Called by the generic MII routines
+ *
+ * @dev: Device to perform read for
+ * @phy_id: The MII phy id
+ * @location: Register location to read
+ * Returns Result from the read or zero on failure
+ */
+static int cvm_oct_mdio_read(struct net_device *dev, int phy_id, int location)
+{
+ union cvmx_smix_cmd smi_cmd;
+ union cvmx_smix_rd_dat smi_rd;
+
+ smi_cmd.u64 = 0;
+ smi_cmd.s.phy_op = 1;
+ smi_cmd.s.phy_adr = phy_id;
+ smi_cmd.s.reg_adr = location;
+ cvmx_write_csr(CVMX_SMIX_CMD(0), smi_cmd.u64);
+
+ do {
+ if (!in_interrupt())
+ yield();
+ smi_rd.u64 = cvmx_read_csr(CVMX_SMIX_RD_DAT(0));
+ } while (smi_rd.s.pending);
+
+ if (smi_rd.s.val)
+ return smi_rd.s.dat;
+ else
+ return 0;
+}
+
+static int cvm_oct_mdio_dummy_read(struct net_device *dev, int phy_id,
+ int location)
+{
+ return 0xffff;
+}
+
+/**
+ * Perform an MII write. Called by the generic MII routines
+ *
+ * @dev: Device to perform write for
+ * @phy_id: The MII phy id
+ * @location: Register location to write
+ * @val: Value to write
+ */
+static void cvm_oct_mdio_write(struct net_device *dev, int phy_id, int location,
+ int val)
+{
+ union cvmx_smix_cmd smi_cmd;
+ union cvmx_smix_wr_dat smi_wr;
+
+ smi_wr.u64 = 0;
+ smi_wr.s.dat = val;
+ cvmx_write_csr(CVMX_SMIX_WR_DAT(0), smi_wr.u64);
+
+ smi_cmd.u64 = 0;
+ smi_cmd.s.phy_op = 0;
+ smi_cmd.s.phy_adr = phy_id;
+ smi_cmd.s.reg_adr = location;
+ cvmx_write_csr(CVMX_SMIX_CMD(0), smi_cmd.u64);
+
+ do {
+ if (!in_interrupt())
+ yield();
+ smi_wr.u64 = cvmx_read_csr(CVMX_SMIX_WR_DAT(0));
+ } while (smi_wr.s.pending);
+}
+
+static void cvm_oct_mdio_dummy_write(struct net_device *dev, int phy_id,
+ int location, int val)
+{
+}
+
+static void cvm_oct_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, "cavium-ethernet");
+ strcpy(info->version, OCTEON_ETHERNET_VERSION);
+ strcpy(info->bus_info, "Builtin");
+}
+
+static int cvm_oct_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ int ret;
+
+ down(&mdio_sem);
+ ret = mii_ethtool_gset(&priv->mii_info, cmd);
+ up(&mdio_sem);
+
+ return ret;
+}
+
+static int cvm_oct_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ int ret;
+
+ down(&mdio_sem);
+ ret = mii_ethtool_sset(&priv->mii_info, cmd);
+ up(&mdio_sem);
+
+ return ret;
+}
+
+static int cvm_oct_nway_reset(struct net_device *dev)
+{
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ int ret;
+
+ down(&mdio_sem);
+ ret = mii_nway_restart(&priv->mii_info);
+ up(&mdio_sem);
+
+ return ret;
+}
+
+static u32 cvm_oct_get_link(struct net_device *dev)
+{
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ u32 ret;
+
+ down(&mdio_sem);
+ ret = mii_link_ok(&priv->mii_info);
+ up(&mdio_sem);
+
+ return ret;
+}
+
+struct ethtool_ops cvm_oct_ethtool_ops = {
+ .get_drvinfo = cvm_oct_get_drvinfo,
+ .get_settings = cvm_oct_get_settings,
+ .set_settings = cvm_oct_set_settings,
+ .nway_reset = cvm_oct_nway_reset,
+ .get_link = cvm_oct_get_link,
+ .get_sg = ethtool_op_get_sg,
+ .get_tx_csum = ethtool_op_get_tx_csum,
+};
+
+/**
+ * IOCTL support for PHY control
+ *
+ * @dev: Device to change
+ * @rq: the request
+ * @cmd: the command
+ * Returns Zero on success
+ */
+int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ struct mii_ioctl_data *data = if_mii(rq);
+ unsigned int duplex_chg;
+ int ret;
+
+ down(&mdio_sem);
+ ret = generic_mii_ioctl(&priv->mii_info, data, cmd, &duplex_chg);
+ up(&mdio_sem);
+
+ return ret;
+}
+
+/**
+ * Setup the MDIO device structures
+ *
+ * @dev: Device to setup
+ *
+ * Returns Zero on success, negative on failure
+ */
+int cvm_oct_mdio_setup_device(struct net_device *dev)
+{
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ int phy_id = cvmx_helper_board_get_mii_address(priv->port);
+ if (phy_id != -1) {
+ priv->mii_info.dev = dev;
+ priv->mii_info.phy_id = phy_id;
+ priv->mii_info.phy_id_mask = 0xff;
+ priv->mii_info.supports_gmii = 1;
+ priv->mii_info.reg_num_mask = 0x1f;
+ priv->mii_info.mdio_read = cvm_oct_mdio_read;
+ priv->mii_info.mdio_write = cvm_oct_mdio_write;
+ } else {
+ /* Supply dummy MDIO routines so the kernel won't crash
+ if the user tries to read them */
+ priv->mii_info.mdio_read = cvm_oct_mdio_dummy_read;
+ priv->mii_info.mdio_write = cvm_oct_mdio_dummy_write;
+ }
+ return 0;
+}
diff --git a/drivers/staging/octeon/ethernet-mdio.h b/drivers/staging/octeon/ethernet-mdio.h
new file mode 100644
index 000000000000..6314141e5ef2
--- /dev/null
+++ b/drivers/staging/octeon/ethernet-mdio.h
@@ -0,0 +1,46 @@
+/*********************************************************************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2007 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+*********************************************************************/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/init.h>
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/string.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/seq_file.h>
+#include <linux/proc_fs.h>
+#include <net/dst.h>
+#ifdef CONFIG_XFRM
+#include <linux/xfrm.h>
+#include <net/xfrm.h>
+#endif /* CONFIG_XFRM */
+
+extern struct ethtool_ops cvm_oct_ethtool_ops;
+int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+int cvm_oct_mdio_setup_device(struct net_device *dev);
diff --git a/drivers/staging/octeon/ethernet-mem.c b/drivers/staging/octeon/ethernet-mem.c
new file mode 100644
index 000000000000..b595903e2af1
--- /dev/null
+++ b/drivers/staging/octeon/ethernet-mem.c
@@ -0,0 +1,198 @@
+/**********************************************************************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2007 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+**********************************************************************/
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/mii.h>
+#include <net/dst.h>
+
+#include <asm/octeon/octeon.h>
+
+#include "ethernet-defines.h"
+
+#include "cvmx-fpa.h"
+
+/**
+ * Fill the supplied hardware pool with skbuffs
+ *
+ * @pool: Pool to allocate an skbuff for
+ * @size: Size of the buffer needed for the pool
+ * @elements: Number of buffers to allocate
+ */
+static int cvm_oct_fill_hw_skbuff(int pool, int size, int elements)
+{
+ int freed = elements;
+ while (freed) {
+
+ struct sk_buff *skb = dev_alloc_skb(size + 128);
+ if (unlikely(skb == NULL)) {
+ pr_warning
+ ("Failed to allocate skb for hardware pool %d\n",
+ pool);
+ break;
+ }
+
+ skb_reserve(skb, 128 - (((unsigned long)skb->data) & 0x7f));
+ *(struct sk_buff **)(skb->data - sizeof(void *)) = skb;
+ cvmx_fpa_free(skb->data, pool, DONT_WRITEBACK(size / 128));
+ freed--;
+ }
+ return elements - freed;
+}
+
+/**
+ * Free the supplied hardware pool of skbuffs
+ *
+ * @pool: Pool to allocate an skbuff for
+ * @size: Size of the buffer needed for the pool
+ * @elements: Number of buffers to allocate
+ */
+static void cvm_oct_free_hw_skbuff(int pool, int size, int elements)
+{
+ char *memory;
+
+ do {
+ memory = cvmx_fpa_alloc(pool);
+ if (memory) {
+ struct sk_buff *skb =
+ *(struct sk_buff **)(memory - sizeof(void *));
+ elements--;
+ dev_kfree_skb(skb);
+ }
+ } while (memory);
+
+ if (elements < 0)
+ pr_warning("Freeing of pool %u had too many skbuffs (%d)\n",
+ pool, elements);
+ else if (elements > 0)
+ pr_warning("Freeing of pool %u is missing %d skbuffs\n",
+ pool, elements);
+}
+
+/**
+ * This function fills a hardware pool with memory. Depending
+ * on the config defines, this memory might come from the
+ * kernel or global 32bit memory allocated with
+ * cvmx_bootmem_alloc.
+ *
+ * @pool: Pool to populate
+ * @size: Size of each buffer in the pool
+ * @elements: Number of buffers to allocate
+ */
+static int cvm_oct_fill_hw_memory(int pool, int size, int elements)
+{
+ char *memory;
+ int freed = elements;
+
+ if (USE_32BIT_SHARED) {
+ extern uint64_t octeon_reserve32_memory;
+
+ memory =
+ cvmx_bootmem_alloc_range(elements * size, 128,
+ octeon_reserve32_memory,
+ octeon_reserve32_memory +
+ (CONFIG_CAVIUM_RESERVE32 << 20) -
+ 1);
+ if (memory == NULL)
+ panic("Unable to allocate %u bytes for FPA pool %d\n",
+ elements * size, pool);
+
+ pr_notice("Memory range %p - %p reserved for "
+ "hardware\n", memory,
+ memory + elements * size - 1);
+
+ while (freed) {
+ cvmx_fpa_free(memory, pool, 0);
+ memory += size;
+ freed--;
+ }
+ } else {
+ while (freed) {
+ /* We need to force alignment to 128 bytes here */
+ memory = kmalloc(size + 127, GFP_ATOMIC);
+ if (unlikely(memory == NULL)) {
+ pr_warning("Unable to allocate %u bytes for "
+ "FPA pool %d\n",
+ elements * size, pool);
+ break;
+ }
+ memory = (char *)(((unsigned long)memory + 127) & -128);
+ cvmx_fpa_free(memory, pool, 0);
+ freed--;
+ }
+ }
+ return elements - freed;
+}
+
+/**
+ * Free memory previously allocated with cvm_oct_fill_hw_memory
+ *
+ * @pool: FPA pool to free
+ * @size: Size of each buffer in the pool
+ * @elements: Number of buffers that should be in the pool
+ */
+static void cvm_oct_free_hw_memory(int pool, int size, int elements)
+{
+ if (USE_32BIT_SHARED) {
+ pr_warning("Warning: 32 shared memory is not freeable\n");
+ } else {
+ char *memory;
+ do {
+ memory = cvmx_fpa_alloc(pool);
+ if (memory) {
+ elements--;
+ kfree(phys_to_virt(cvmx_ptr_to_phys(memory)));
+ }
+ } while (memory);
+
+ if (elements < 0)
+ pr_warning("Freeing of pool %u had too many "
+ "buffers (%d)\n",
+ pool, elements);
+ else if (elements > 0)
+ pr_warning("Warning: Freeing of pool %u is "
+ "missing %d buffers\n",
+ pool, elements);
+ }
+}
+
+int cvm_oct_mem_fill_fpa(int pool, int size, int elements)
+{
+ int freed;
+ if (USE_SKBUFFS_IN_HW)
+ freed = cvm_oct_fill_hw_skbuff(pool, size, elements);
+ else
+ freed = cvm_oct_fill_hw_memory(pool, size, elements);
+ return freed;
+}
+
+void cvm_oct_mem_empty_fpa(int pool, int size, int elements)
+{
+ if (USE_SKBUFFS_IN_HW)
+ cvm_oct_free_hw_skbuff(pool, size, elements);
+ else
+ cvm_oct_free_hw_memory(pool, size, elements);
+}
diff --git a/drivers/staging/octeon/ethernet-mem.h b/drivers/staging/octeon/ethernet-mem.h
new file mode 100644
index 000000000000..713f2edc8b4f
--- /dev/null
+++ b/drivers/staging/octeon/ethernet-mem.h
@@ -0,0 +1,29 @@
+/*********************************************************************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2007 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+********************************************************************/
+
+int cvm_oct_mem_fill_fpa(int pool, int size, int elements);
+void cvm_oct_mem_empty_fpa(int pool, int size, int elements);
diff --git a/drivers/staging/octeon/ethernet-proc.c b/drivers/staging/octeon/ethernet-proc.c
new file mode 100644
index 000000000000..8fa88fc419b7
--- /dev/null
+++ b/drivers/staging/octeon/ethernet-proc.c
@@ -0,0 +1,256 @@
+/**********************************************************************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2007 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+**********************************************************************/
+#include <linux/kernel.h>
+#include <linux/mii.h>
+#include <linux/seq_file.h>
+#include <linux/proc_fs.h>
+#include <net/dst.h>
+
+#include <asm/octeon/octeon.h>
+
+#include "octeon-ethernet.h"
+#include "ethernet-defines.h"
+
+#include "cvmx-helper.h"
+#include "cvmx-pip.h"
+
+static unsigned long long cvm_oct_stats_read_switch(struct net_device *dev,
+ int phy_id, int offset)
+{
+ struct octeon_ethernet *priv = netdev_priv(dev);
+
+ priv->mii_info.mdio_write(dev, phy_id, 0x1d, 0xcc00 | offset);
+ return ((uint64_t) priv->mii_info.
+ mdio_read(dev, phy_id,
+ 0x1e) << 16) | (uint64_t) priv->mii_info.
+ mdio_read(dev, phy_id, 0x1f);
+}
+
+static int cvm_oct_stats_switch_show(struct seq_file *m, void *v)
+{
+ static const int ports[] = { 0, 1, 2, 3, 9, -1 };
+ struct net_device *dev = cvm_oct_device[0];
+ int index = 0;
+
+ while (ports[index] != -1) {
+
+ /* Latch port */
+ struct octeon_ethernet *priv = netdev_priv(dev);
+
+ priv->mii_info.mdio_write(dev, 0x1b, 0x1d,
+ 0xdc00 | ports[index]);
+ seq_printf(m, "\nSwitch Port %d\n", ports[index]);
+ seq_printf(m, "InGoodOctets: %12llu\t"
+ "OutOctets: %12llu\t"
+ "64 Octets: %12llu\n",
+ cvm_oct_stats_read_switch(dev, 0x1b,
+ 0x00) |
+ (cvm_oct_stats_read_switch(dev, 0x1b, 0x01) << 32),
+ cvm_oct_stats_read_switch(dev, 0x1b,
+ 0x0E) |
+ (cvm_oct_stats_read_switch(dev, 0x1b, 0x0F) << 32),
+ cvm_oct_stats_read_switch(dev, 0x1b, 0x08));
+
+ seq_printf(m, "InBadOctets: %12llu\t"
+ "OutUnicast: %12llu\t"
+ "65-127 Octets: %12llu\n",
+ cvm_oct_stats_read_switch(dev, 0x1b, 0x02),
+ cvm_oct_stats_read_switch(dev, 0x1b, 0x10),
+ cvm_oct_stats_read_switch(dev, 0x1b, 0x09));
+
+ seq_printf(m, "InUnicast: %12llu\t"
+ "OutBroadcasts: %12llu\t"
+ "128-255 Octets: %12llu\n",
+ cvm_oct_stats_read_switch(dev, 0x1b, 0x04),
+ cvm_oct_stats_read_switch(dev, 0x1b, 0x13),
+ cvm_oct_stats_read_switch(dev, 0x1b, 0x0A));
+
+ seq_printf(m, "InBroadcasts: %12llu\t"
+ "OutMulticasts: %12llu\t"
+ "256-511 Octets: %12llu\n",
+ cvm_oct_stats_read_switch(dev, 0x1b, 0x06),
+ cvm_oct_stats_read_switch(dev, 0x1b, 0x12),
+ cvm_oct_stats_read_switch(dev, 0x1b, 0x0B));
+
+ seq_printf(m, "InMulticasts: %12llu\t"
+ "OutPause: %12llu\t"
+ "512-1023 Octets:%12llu\n",
+ cvm_oct_stats_read_switch(dev, 0x1b, 0x07),
+ cvm_oct_stats_read_switch(dev, 0x1b, 0x15),
+ cvm_oct_stats_read_switch(dev, 0x1b, 0x0C));
+
+ seq_printf(m, "InPause: %12llu\t"
+ "Excessive: %12llu\t"
+ "1024-Max Octets:%12llu\n",
+ cvm_oct_stats_read_switch(dev, 0x1b, 0x16),
+ cvm_oct_stats_read_switch(dev, 0x1b, 0x11),
+ cvm_oct_stats_read_switch(dev, 0x1b, 0x0D));
+
+ seq_printf(m, "InUndersize: %12llu\t"
+ "Collisions: %12llu\n",
+ cvm_oct_stats_read_switch(dev, 0x1b, 0x18),
+ cvm_oct_stats_read_switch(dev, 0x1b, 0x1E));
+
+ seq_printf(m, "InFragments: %12llu\t"
+ "Deferred: %12llu\n",
+ cvm_oct_stats_read_switch(dev, 0x1b, 0x19),
+ cvm_oct_stats_read_switch(dev, 0x1b, 0x05));
+
+ seq_printf(m, "InOversize: %12llu\t"
+ "Single: %12llu\n",
+ cvm_oct_stats_read_switch(dev, 0x1b, 0x1A),
+ cvm_oct_stats_read_switch(dev, 0x1b, 0x14));
+
+ seq_printf(m, "InJabber: %12llu\t"
+ "Multiple: %12llu\n",
+ cvm_oct_stats_read_switch(dev, 0x1b, 0x1B),
+ cvm_oct_stats_read_switch(dev, 0x1b, 0x17));
+
+ seq_printf(m, "In RxErr: %12llu\t"
+ "OutFCSErr: %12llu\n",
+ cvm_oct_stats_read_switch(dev, 0x1b, 0x1C),
+ cvm_oct_stats_read_switch(dev, 0x1b, 0x03));
+
+ seq_printf(m, "InFCSErr: %12llu\t"
+ "Late: %12llu\n",
+ cvm_oct_stats_read_switch(dev, 0x1b, 0x1D),
+ cvm_oct_stats_read_switch(dev, 0x1b, 0x1F));
+ index++;
+ }
+ return 0;
+}
+
+/**
+ * User is reading /proc/octeon_ethernet_stats
+ *
+ * @m:
+ * @v:
+ * Returns
+ */
+static int cvm_oct_stats_show(struct seq_file *m, void *v)
+{
+ struct octeon_ethernet *priv;
+ int port;
+
+ for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
+
+ if (cvm_oct_device[port]) {
+ priv = netdev_priv(cvm_oct_device[port]);
+
+ seq_printf(m, "\nOcteon Port %d (%s)\n", port,
+ cvm_oct_device[port]->name);
+ seq_printf(m,
+ "rx_packets: %12lu\t"
+ "tx_packets: %12lu\n",
+ priv->stats.rx_packets,
+ priv->stats.tx_packets);
+ seq_printf(m,
+ "rx_bytes: %12lu\t"
+ "tx_bytes: %12lu\n",
+ priv->stats.rx_bytes, priv->stats.tx_bytes);
+ seq_printf(m,
+ "rx_errors: %12lu\t"
+ "tx_errors: %12lu\n",
+ priv->stats.rx_errors,
+ priv->stats.tx_errors);
+ seq_printf(m,
+ "rx_dropped: %12lu\t"
+ "tx_dropped: %12lu\n",
+ priv->stats.rx_dropped,
+ priv->stats.tx_dropped);
+ seq_printf(m,
+ "rx_length_errors: %12lu\t"
+ "tx_aborted_errors: %12lu\n",
+ priv->stats.rx_length_errors,
+ priv->stats.tx_aborted_errors);
+ seq_printf(m,
+ "rx_over_errors: %12lu\t"
+ "tx_carrier_errors: %12lu\n",
+ priv->stats.rx_over_errors,
+ priv->stats.tx_carrier_errors);
+ seq_printf(m,
+ "rx_crc_errors: %12lu\t"
+ "tx_fifo_errors: %12lu\n",
+ priv->stats.rx_crc_errors,
+ priv->stats.tx_fifo_errors);
+ seq_printf(m,
+ "rx_frame_errors: %12lu\t"
+ "tx_heartbeat_errors: %12lu\n",
+ priv->stats.rx_frame_errors,
+ priv->stats.tx_heartbeat_errors);
+ seq_printf(m,
+ "rx_fifo_errors: %12lu\t"
+ "tx_window_errors: %12lu\n",
+ priv->stats.rx_fifo_errors,
+ priv->stats.tx_window_errors);
+ seq_printf(m,
+ "rx_missed_errors: %12lu\t"
+ "multicast: %12lu\n",
+ priv->stats.rx_missed_errors,
+ priv->stats.multicast);
+ }
+ }
+
+ if (cvm_oct_device[0]) {
+ priv = netdev_priv(cvm_oct_device[0]);
+ if (priv->imode == CVMX_HELPER_INTERFACE_MODE_GMII)
+ cvm_oct_stats_switch_show(m, v);
+ }
+ return 0;
+}
+
+/**
+ * /proc/octeon_ethernet_stats was openned. Use the single_open iterator
+ *
+ * @inode:
+ * @file:
+ * Returns
+ */
+static int cvm_oct_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, cvm_oct_stats_show, NULL);
+}
+
+static const struct file_operations cvm_oct_stats_operations = {
+ .open = cvm_oct_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void cvm_oct_proc_initialize(void)
+{
+ struct proc_dir_entry *entry =
+ create_proc_entry("octeon_ethernet_stats", 0, NULL);
+ if (entry)
+ entry->proc_fops = &cvm_oct_stats_operations;
+}
+
+void cvm_oct_proc_shutdown(void)
+{
+ remove_proc_entry("octeon_ethernet_stats", NULL);
+}
diff --git a/drivers/staging/octeon/ethernet-proc.h b/drivers/staging/octeon/ethernet-proc.h
new file mode 100644
index 000000000000..82c7d9f78bc4
--- /dev/null
+++ b/drivers/staging/octeon/ethernet-proc.h
@@ -0,0 +1,29 @@
+/*********************************************************************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2007 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+*********************************************************************/
+
+void cvm_oct_proc_initialize(void);
+void cvm_oct_proc_shutdown(void);
diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c
new file mode 100644
index 000000000000..8579f1670d1e
--- /dev/null
+++ b/drivers/staging/octeon/ethernet-rgmii.c
@@ -0,0 +1,397 @@
+/*********************************************************************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2007 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+**********************************************************************/
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/mii.h>
+#include <net/dst.h>
+
+#include <asm/octeon/octeon.h>
+
+#include "ethernet-defines.h"
+#include "octeon-ethernet.h"
+#include "ethernet-common.h"
+#include "ethernet-util.h"
+
+#include "cvmx-helper.h"
+
+#include <asm/octeon/cvmx-ipd-defs.h>
+#include <asm/octeon/cvmx-npi-defs.h>
+#include "cvmx-gmxx-defs.h"
+
+DEFINE_SPINLOCK(global_register_lock);
+
+static int number_rgmii_ports;
+
+static void cvm_oct_rgmii_poll(struct net_device *dev)
+{
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ unsigned long flags;
+ cvmx_helper_link_info_t link_info;
+
+ /*
+ * Take the global register lock since we are going to touch
+ * registers that affect more than one port.
+ */
+ spin_lock_irqsave(&global_register_lock, flags);
+
+ link_info = cvmx_helper_link_get(priv->port);
+ if (link_info.u64 == priv->link_info) {
+
+ /*
+ * If the 10Mbps preamble workaround is supported and we're
+ * at 10Mbps we may need to do some special checking.
+ */
+ if (USE_10MBPS_PREAMBLE_WORKAROUND && (link_info.s.speed == 10)) {
+
+ /*
+ * Read the GMXX_RXX_INT_REG[PCTERR] bit and
+ * see if we are getting preamble errors.
+ */
+ int interface = INTERFACE(priv->port);
+ int index = INDEX(priv->port);
+ union cvmx_gmxx_rxx_int_reg gmxx_rxx_int_reg;
+ gmxx_rxx_int_reg.u64 =
+ cvmx_read_csr(CVMX_GMXX_RXX_INT_REG
+ (index, interface));
+ if (gmxx_rxx_int_reg.s.pcterr) {
+
+ /*
+ * We are getting preamble errors at
+ * 10Mbps. Most likely the PHY is
+ * giving us packets with mis aligned
+ * preambles. In order to get these
+ * packets we need to disable preamble
+ * checking and do it in software.
+ */
+ union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl;
+ union cvmx_ipd_sub_port_fcs ipd_sub_port_fcs;
+
+ /* Disable preamble checking */
+ gmxx_rxx_frm_ctl.u64 =
+ cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL
+ (index, interface));
+ gmxx_rxx_frm_ctl.s.pre_chk = 0;
+ cvmx_write_csr(CVMX_GMXX_RXX_FRM_CTL
+ (index, interface),
+ gmxx_rxx_frm_ctl.u64);
+
+ /* Disable FCS stripping */
+ ipd_sub_port_fcs.u64 =
+ cvmx_read_csr(CVMX_IPD_SUB_PORT_FCS);
+ ipd_sub_port_fcs.s.port_bit &=
+ 0xffffffffull ^ (1ull << priv->port);
+ cvmx_write_csr(CVMX_IPD_SUB_PORT_FCS,
+ ipd_sub_port_fcs.u64);
+
+ /* Clear any error bits */
+ cvmx_write_csr(CVMX_GMXX_RXX_INT_REG
+ (index, interface),
+ gmxx_rxx_int_reg.u64);
+ DEBUGPRINT("%s: Using 10Mbps with software "
+ "preamble removal\n",
+ dev->name);
+ }
+ }
+ spin_unlock_irqrestore(&global_register_lock, flags);
+ return;
+ }
+
+ /* If the 10Mbps preamble workaround is allowed we need to on
+ preamble checking, FCS stripping, and clear error bits on
+ every speed change. If errors occur during 10Mbps operation
+ the above code will change this stuff */
+ if (USE_10MBPS_PREAMBLE_WORKAROUND) {
+
+ union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl;
+ union cvmx_ipd_sub_port_fcs ipd_sub_port_fcs;
+ union cvmx_gmxx_rxx_int_reg gmxx_rxx_int_reg;
+ int interface = INTERFACE(priv->port);
+ int index = INDEX(priv->port);
+
+ /* Enable preamble checking */
+ gmxx_rxx_frm_ctl.u64 =
+ cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface));
+ gmxx_rxx_frm_ctl.s.pre_chk = 1;
+ cvmx_write_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface),
+ gmxx_rxx_frm_ctl.u64);
+ /* Enable FCS stripping */
+ ipd_sub_port_fcs.u64 = cvmx_read_csr(CVMX_IPD_SUB_PORT_FCS);
+ ipd_sub_port_fcs.s.port_bit |= 1ull << priv->port;
+ cvmx_write_csr(CVMX_IPD_SUB_PORT_FCS, ipd_sub_port_fcs.u64);
+ /* Clear any error bits */
+ gmxx_rxx_int_reg.u64 =
+ cvmx_read_csr(CVMX_GMXX_RXX_INT_REG(index, interface));
+ cvmx_write_csr(CVMX_GMXX_RXX_INT_REG(index, interface),
+ gmxx_rxx_int_reg.u64);
+ }
+
+ link_info = cvmx_helper_link_autoconf(priv->port);
+ priv->link_info = link_info.u64;
+ spin_unlock_irqrestore(&global_register_lock, flags);
+
+ /* Tell Linux */
+ if (link_info.s.link_up) {
+
+ if (!netif_carrier_ok(dev))
+ netif_carrier_on(dev);
+ if (priv->queue != -1)
+ DEBUGPRINT
+ ("%s: %u Mbps %s duplex, port %2d, queue %2d\n",
+ dev->name, link_info.s.speed,
+ (link_info.s.full_duplex) ? "Full" : "Half",
+ priv->port, priv->queue);
+ else
+ DEBUGPRINT("%s: %u Mbps %s duplex, port %2d, POW\n",
+ dev->name, link_info.s.speed,
+ (link_info.s.full_duplex) ? "Full" : "Half",
+ priv->port);
+ } else {
+
+ if (netif_carrier_ok(dev))
+ netif_carrier_off(dev);
+ DEBUGPRINT("%s: Link down\n", dev->name);
+ }
+}
+
+static irqreturn_t cvm_oct_rgmii_rml_interrupt(int cpl, void *dev_id)
+{
+ union cvmx_npi_rsl_int_blocks rsl_int_blocks;
+ int index;
+ irqreturn_t return_status = IRQ_NONE;
+
+ rsl_int_blocks.u64 = cvmx_read_csr(CVMX_NPI_RSL_INT_BLOCKS);
+
+ /* Check and see if this interrupt was caused by the GMX0 block */
+ if (rsl_int_blocks.s.gmx0) {
+
+ int interface = 0;
+ /* Loop through every port of this interface */
+ for (index = 0;
+ index < cvmx_helper_ports_on_interface(interface);
+ index++) {
+
+ /* Read the GMX interrupt status bits */
+ union cvmx_gmxx_rxx_int_reg gmx_rx_int_reg;
+ gmx_rx_int_reg.u64 =
+ cvmx_read_csr(CVMX_GMXX_RXX_INT_REG
+ (index, interface));
+ gmx_rx_int_reg.u64 &=
+ cvmx_read_csr(CVMX_GMXX_RXX_INT_EN
+ (index, interface));
+ /* Poll the port if inband status changed */
+ if (gmx_rx_int_reg.s.phy_dupx
+ || gmx_rx_int_reg.s.phy_link
+ || gmx_rx_int_reg.s.phy_spd) {
+
+ struct net_device *dev =
+ cvm_oct_device[cvmx_helper_get_ipd_port
+ (interface, index)];
+ if (dev)
+ cvm_oct_rgmii_poll(dev);
+ gmx_rx_int_reg.u64 = 0;
+ gmx_rx_int_reg.s.phy_dupx = 1;
+ gmx_rx_int_reg.s.phy_link = 1;
+ gmx_rx_int_reg.s.phy_spd = 1;
+ cvmx_write_csr(CVMX_GMXX_RXX_INT_REG
+ (index, interface),
+ gmx_rx_int_reg.u64);
+ return_status = IRQ_HANDLED;
+ }
+ }
+ }
+
+ /* Check and see if this interrupt was caused by the GMX1 block */
+ if (rsl_int_blocks.s.gmx1) {
+
+ int interface = 1;
+ /* Loop through every port of this interface */
+ for (index = 0;
+ index < cvmx_helper_ports_on_interface(interface);
+ index++) {
+
+ /* Read the GMX interrupt status bits */
+ union cvmx_gmxx_rxx_int_reg gmx_rx_int_reg;
+ gmx_rx_int_reg.u64 =
+ cvmx_read_csr(CVMX_GMXX_RXX_INT_REG
+ (index, interface));
+ gmx_rx_int_reg.u64 &=
+ cvmx_read_csr(CVMX_GMXX_RXX_INT_EN
+ (index, interface));
+ /* Poll the port if inband status changed */
+ if (gmx_rx_int_reg.s.phy_dupx
+ || gmx_rx_int_reg.s.phy_link
+ || gmx_rx_int_reg.s.phy_spd) {
+
+ struct net_device *dev =
+ cvm_oct_device[cvmx_helper_get_ipd_port
+ (interface, index)];
+ if (dev)
+ cvm_oct_rgmii_poll(dev);
+ gmx_rx_int_reg.u64 = 0;
+ gmx_rx_int_reg.s.phy_dupx = 1;
+ gmx_rx_int_reg.s.phy_link = 1;
+ gmx_rx_int_reg.s.phy_spd = 1;
+ cvmx_write_csr(CVMX_GMXX_RXX_INT_REG
+ (index, interface),
+ gmx_rx_int_reg.u64);
+ return_status = IRQ_HANDLED;
+ }
+ }
+ }
+ return return_status;
+}
+
+static int cvm_oct_rgmii_open(struct net_device *dev)
+{
+ union cvmx_gmxx_prtx_cfg gmx_cfg;
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ int interface = INTERFACE(priv->port);
+ int index = INDEX(priv->port);
+ cvmx_helper_link_info_t link_info;
+
+ gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+ gmx_cfg.s.en = 1;
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
+
+ if (!octeon_is_simulation()) {
+ link_info = cvmx_helper_link_get(priv->port);
+ if (!link_info.s.link_up)
+ netif_carrier_off(dev);
+ }
+
+ return 0;
+}
+
+static int cvm_oct_rgmii_stop(struct net_device *dev)
+{
+ union cvmx_gmxx_prtx_cfg gmx_cfg;
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ int interface = INTERFACE(priv->port);
+ int index = INDEX(priv->port);
+
+ gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+ gmx_cfg.s.en = 0;
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
+ return 0;
+}
+
+int cvm_oct_rgmii_init(struct net_device *dev)
+{
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ int r;
+
+ cvm_oct_common_init(dev);
+ dev->open = cvm_oct_rgmii_open;
+ dev->stop = cvm_oct_rgmii_stop;
+ dev->stop(dev);
+
+ /*
+ * Due to GMX errata in CN3XXX series chips, it is necessary
+ * to take the link down immediately whne the PHY changes
+ * state. In order to do this we call the poll function every
+ * time the RGMII inband status changes. This may cause
+ * problems if the PHY doesn't implement inband status
+ * properly.
+ */
+ if (number_rgmii_ports == 0) {
+ r = request_irq(OCTEON_IRQ_RML, cvm_oct_rgmii_rml_interrupt,
+ IRQF_SHARED, "RGMII", &number_rgmii_ports);
+ }
+ number_rgmii_ports++;
+
+ /*
+ * Only true RGMII ports need to be polled. In GMII mode, port
+ * 0 is really a RGMII port.
+ */
+ if (((priv->imode == CVMX_HELPER_INTERFACE_MODE_GMII)
+ && (priv->port == 0))
+ || (priv->imode == CVMX_HELPER_INTERFACE_MODE_RGMII)) {
+
+ if (!octeon_is_simulation()) {
+
+ union cvmx_gmxx_rxx_int_en gmx_rx_int_en;
+ int interface = INTERFACE(priv->port);
+ int index = INDEX(priv->port);
+
+ /*
+ * Enable interrupts on inband status changes
+ * for this port.
+ */
+ gmx_rx_int_en.u64 =
+ cvmx_read_csr(CVMX_GMXX_RXX_INT_EN
+ (index, interface));
+ gmx_rx_int_en.s.phy_dupx = 1;
+ gmx_rx_int_en.s.phy_link = 1;
+ gmx_rx_int_en.s.phy_spd = 1;
+ cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(index, interface),
+ gmx_rx_int_en.u64);
+ priv->poll = cvm_oct_rgmii_poll;
+ }
+ }
+
+ return 0;
+}
+
+void cvm_oct_rgmii_uninit(struct net_device *dev)
+{
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ cvm_oct_common_uninit(dev);
+
+ /*
+ * Only true RGMII ports need to be polled. In GMII mode, port
+ * 0 is really a RGMII port.
+ */
+ if (((priv->imode == CVMX_HELPER_INTERFACE_MODE_GMII)
+ && (priv->port == 0))
+ || (priv->imode == CVMX_HELPER_INTERFACE_MODE_RGMII)) {
+
+ if (!octeon_is_simulation()) {
+
+ union cvmx_gmxx_rxx_int_en gmx_rx_int_en;
+ int interface = INTERFACE(priv->port);
+ int index = INDEX(priv->port);
+
+ /*
+ * Disable interrupts on inband status changes
+ * for this port.
+ */
+ gmx_rx_int_en.u64 =
+ cvmx_read_csr(CVMX_GMXX_RXX_INT_EN
+ (index, interface));
+ gmx_rx_int_en.s.phy_dupx = 0;
+ gmx_rx_int_en.s.phy_link = 0;
+ gmx_rx_int_en.s.phy_spd = 0;
+ cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(index, interface),
+ gmx_rx_int_en.u64);
+ }
+ }
+
+ /* Remove the interrupt handler when the last port is removed. */
+ number_rgmii_ports--;
+ if (number_rgmii_ports == 0)
+ free_irq(OCTEON_IRQ_RML, &number_rgmii_ports);
+}
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
new file mode 100644
index 000000000000..1b237b7e689d
--- /dev/null
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -0,0 +1,505 @@
+/**********************************************************************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2007 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+**********************************************************************/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/cache.h>
+#include <linux/netdevice.h>
+#include <linux/init.h>
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/string.h>
+#include <linux/prefetch.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/seq_file.h>
+#include <linux/proc_fs.h>
+#include <net/dst.h>
+#ifdef CONFIG_XFRM
+#include <linux/xfrm.h>
+#include <net/xfrm.h>
+#endif /* CONFIG_XFRM */
+
+#include <asm/atomic.h>
+
+#include <asm/octeon/octeon.h>
+
+#include "ethernet-defines.h"
+#include "octeon-ethernet.h"
+#include "ethernet-mem.h"
+#include "ethernet-util.h"
+
+#include "cvmx-helper.h"
+#include "cvmx-wqe.h"
+#include "cvmx-fau.h"
+#include "cvmx-pow.h"
+#include "cvmx-pip.h"
+#include "cvmx-scratch.h"
+
+#include "cvmx-gmxx-defs.h"
+
+struct cvm_tasklet_wrapper {
+ struct tasklet_struct t;
+};
+
+/*
+ * Aligning the tasklet_struct on cachline boundries seems to decrease
+ * throughput even though in theory it would reduce contantion on the
+ * cache lines containing the locks.
+ */
+
+static struct cvm_tasklet_wrapper cvm_oct_tasklet[NR_CPUS];
+
+/**
+ * Interrupt handler. The interrupt occurs whenever the POW
+ * transitions from 0->1 packets in our group.
+ *
+ * @cpl:
+ * @dev_id:
+ * @regs:
+ * Returns
+ */
+irqreturn_t cvm_oct_do_interrupt(int cpl, void *dev_id)
+{
+ /* Acknowledge the interrupt */
+ if (INTERRUPT_LIMIT)
+ cvmx_write_csr(CVMX_POW_WQ_INT, 1 << pow_receive_group);
+ else
+ cvmx_write_csr(CVMX_POW_WQ_INT, 0x10001 << pow_receive_group);
+ preempt_disable();
+ tasklet_schedule(&cvm_oct_tasklet[smp_processor_id()].t);
+ preempt_enable();
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/**
+ * This is called when the kernel needs to manually poll the
+ * device. For Octeon, this is simply calling the interrupt
+ * handler. We actually poll all the devices, not just the
+ * one supplied.
+ *
+ * @dev: Device to poll. Unused
+ */
+void cvm_oct_poll_controller(struct net_device *dev)
+{
+ preempt_disable();
+ tasklet_schedule(&cvm_oct_tasklet[smp_processor_id()].t);
+ preempt_enable();
+}
+#endif
+
+/**
+ * This is called on receive errors, and determines if the packet
+ * can be dropped early-on in cvm_oct_tasklet_rx().
+ *
+ * @work: Work queue entry pointing to the packet.
+ * Returns Non-zero if the packet can be dropped, zero otherwise.
+ */
+static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
+{
+ if ((work->word2.snoip.err_code == 10) && (work->len <= 64)) {
+ /*
+ * Ignore length errors on min size packets. Some
+ * equipment incorrectly pads packets to 64+4FCS
+ * instead of 60+4FCS. Note these packets still get
+ * counted as frame errors.
+ */
+ } else
+ if (USE_10MBPS_PREAMBLE_WORKAROUND
+ && ((work->word2.snoip.err_code == 5)
+ || (work->word2.snoip.err_code == 7))) {
+
+ /*
+ * We received a packet with either an alignment error
+ * or a FCS error. This may be signalling that we are
+ * running 10Mbps with GMXX_RXX_FRM_CTL[PRE_CHK}
+ * off. If this is the case we need to parse the
+ * packet to determine if we can remove a non spec
+ * preamble and generate a correct packet.
+ */
+ int interface = cvmx_helper_get_interface_num(work->ipprt);
+ int index = cvmx_helper_get_interface_index_num(work->ipprt);
+ union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl;
+ gmxx_rxx_frm_ctl.u64 =
+ cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface));
+ if (gmxx_rxx_frm_ctl.s.pre_chk == 0) {
+
+ uint8_t *ptr =
+ cvmx_phys_to_ptr(work->packet_ptr.s.addr);
+ int i = 0;
+
+ while (i < work->len - 1) {
+ if (*ptr != 0x55)
+ break;
+ ptr++;
+ i++;
+ }
+
+ if (*ptr == 0xd5) {
+ /*
+ DEBUGPRINT("Port %d received 0xd5 preamble\n", work->ipprt);
+ */
+ work->packet_ptr.s.addr += i + 1;
+ work->len -= i + 5;
+ } else if ((*ptr & 0xf) == 0xd) {
+ /*
+ DEBUGPRINT("Port %d received 0x?d preamble\n", work->ipprt);
+ */
+ work->packet_ptr.s.addr += i;
+ work->len -= i + 4;
+ for (i = 0; i < work->len; i++) {
+ *ptr =
+ ((*ptr & 0xf0) >> 4) |
+ ((*(ptr + 1) & 0xf) << 4);
+ ptr++;
+ }
+ } else {
+ DEBUGPRINT("Port %d unknown preamble, packet "
+ "dropped\n",
+ work->ipprt);
+ /*
+ cvmx_helper_dump_packet(work);
+ */
+ cvm_oct_free_work(work);
+ return 1;
+ }
+ }
+ } else {
+ DEBUGPRINT("Port %d receive error code %d, packet dropped\n",
+ work->ipprt, work->word2.snoip.err_code);
+ cvm_oct_free_work(work);
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * Tasklet function that is scheduled on a core when an interrupt occurs.
+ *
+ * @unused:
+ */
+void cvm_oct_tasklet_rx(unsigned long unused)
+{
+ const int coreid = cvmx_get_core_num();
+ uint64_t old_group_mask;
+ uint64_t old_scratch;
+ int rx_count = 0;
+ int number_to_free;
+ int num_freed;
+ int packet_not_copied;
+
+ /* Prefetch cvm_oct_device since we know we need it soon */
+ prefetch(cvm_oct_device);
+
+ if (USE_ASYNC_IOBDMA) {
+ /* Save scratch in case userspace is using it */
+ CVMX_SYNCIOBDMA;
+ old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
+ }
+
+ /* Only allow work for our group (and preserve priorities) */
+ old_group_mask = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid));
+ cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid),
+ (old_group_mask & ~0xFFFFull) | 1 << pow_receive_group);
+
+ if (USE_ASYNC_IOBDMA)
+ cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
+
+ while (1) {
+ struct sk_buff *skb = NULL;
+ int skb_in_hw;
+ cvmx_wqe_t *work;
+
+ if (USE_ASYNC_IOBDMA) {
+ work = cvmx_pow_work_response_async(CVMX_SCR_SCRATCH);
+ } else {
+ if ((INTERRUPT_LIMIT == 0)
+ || likely(rx_count < MAX_RX_PACKETS))
+ work =
+ cvmx_pow_work_request_sync
+ (CVMX_POW_NO_WAIT);
+ else
+ work = NULL;
+ }
+ prefetch(work);
+ if (work == NULL)
+ break;
+
+ /*
+ * Limit each core to processing MAX_RX_PACKETS
+ * packets without a break. This way the RX can't
+ * starve the TX task.
+ */
+ if (USE_ASYNC_IOBDMA) {
+
+ if ((INTERRUPT_LIMIT == 0)
+ || likely(rx_count < MAX_RX_PACKETS))
+ cvmx_pow_work_request_async_nocheck
+ (CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
+ else {
+ cvmx_scratch_write64(CVMX_SCR_SCRATCH,
+ 0x8000000000000000ull);
+ cvmx_pow_tag_sw_null_nocheck();
+ }
+ }
+
+ skb_in_hw = USE_SKBUFFS_IN_HW && work->word2.s.bufs == 1;
+ if (likely(skb_in_hw)) {
+ skb =
+ *(struct sk_buff
+ **)(cvm_oct_get_buffer_ptr(work->packet_ptr) -
+ sizeof(void *));
+ prefetch(&skb->head);
+ prefetch(&skb->len);
+ }
+ prefetch(cvm_oct_device[work->ipprt]);
+
+ rx_count++;
+ /* Immediately throw away all packets with receive errors */
+ if (unlikely(work->word2.snoip.rcv_error)) {
+ if (cvm_oct_check_rcv_error(work))
+ continue;
+ }
+
+ /*
+ * We can only use the zero copy path if skbuffs are
+ * in the FPA pool and the packet fits in a single
+ * buffer.
+ */
+ if (likely(skb_in_hw)) {
+ /*
+ * This calculation was changed in case the
+ * skb header is using a different address
+ * aliasing type than the buffer. It doesn't
+ * make any differnece now, but the new one is
+ * more correct.
+ */
+ skb->data =
+ skb->head + work->packet_ptr.s.addr -
+ cvmx_ptr_to_phys(skb->head);
+ prefetch(skb->data);
+ skb->len = work->len;
+ skb_set_tail_pointer(skb, skb->len);
+ packet_not_copied = 1;
+ } else {
+
+ /*
+ * We have to copy the packet. First allocate
+ * an skbuff for it.
+ */
+ skb = dev_alloc_skb(work->len);
+ if (!skb) {
+ DEBUGPRINT("Port %d failed to allocate "
+ "skbuff, packet dropped\n",
+ work->ipprt);
+ cvm_oct_free_work(work);
+ continue;
+ }
+
+ /*
+ * Check if we've received a packet that was
+ * entirely stored in the work entry. This is
+ * untested.
+ */
+ if (unlikely(work->word2.s.bufs == 0)) {
+ uint8_t *ptr = work->packet_data;
+
+ if (likely(!work->word2.s.not_IP)) {
+ /*
+ * The beginning of the packet
+ * moves for IP packets.
+ */
+ if (work->word2.s.is_v6)
+ ptr += 2;
+ else
+ ptr += 6;
+ }
+ memcpy(skb_put(skb, work->len), ptr, work->len);
+ /* No packet buffers to free */
+ } else {
+ int segments = work->word2.s.bufs;
+ union cvmx_buf_ptr segment_ptr =
+ work->packet_ptr;
+ int len = work->len;
+
+ while (segments--) {
+ union cvmx_buf_ptr next_ptr =
+ *(union cvmx_buf_ptr *)
+ cvmx_phys_to_ptr(segment_ptr.s.
+ addr - 8);
+ /*
+ * Octeon Errata PKI-100: The segment size is
+ * wrong. Until it is fixed, calculate the
+ * segment size based on the packet pool
+ * buffer size. When it is fixed, the
+ * following line should be replaced with this
+ * one: int segment_size =
+ * segment_ptr.s.size;
+ */
+ int segment_size =
+ CVMX_FPA_PACKET_POOL_SIZE -
+ (segment_ptr.s.addr -
+ (((segment_ptr.s.addr >> 7) -
+ segment_ptr.s.back) << 7));
+ /* Don't copy more than what is left
+ in the packet */
+ if (segment_size > len)
+ segment_size = len;
+ /* Copy the data into the packet */
+ memcpy(skb_put(skb, segment_size),
+ cvmx_phys_to_ptr(segment_ptr.s.
+ addr),
+ segment_size);
+ /* Reduce the amount of bytes left
+ to copy */
+ len -= segment_size;
+ segment_ptr = next_ptr;
+ }
+ }
+ packet_not_copied = 0;
+ }
+
+ if (likely((work->ipprt < TOTAL_NUMBER_OF_PORTS) &&
+ cvm_oct_device[work->ipprt])) {
+ struct net_device *dev = cvm_oct_device[work->ipprt];
+ struct octeon_ethernet *priv = netdev_priv(dev);
+
+ /* Only accept packets for devices
+ that are currently up */
+ if (likely(dev->flags & IFF_UP)) {
+ skb->protocol = eth_type_trans(skb, dev);
+ skb->dev = dev;
+
+ if (unlikely
+ (work->word2.s.not_IP
+ || work->word2.s.IP_exc
+ || work->word2.s.L4_error))
+ skb->ip_summed = CHECKSUM_NONE;
+ else
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ /* Increment RX stats for virtual ports */
+ if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
+#ifdef CONFIG_64BIT
+ atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
+ atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
+#else
+ atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
+ atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
+#endif
+ }
+ netif_receive_skb(skb);
+ } else {
+ /*
+ * Drop any packet received for a
+ * device that isn't up.
+ */
+ /*
+ DEBUGPRINT("%s: Device not up, packet dropped\n",
+ dev->name);
+ */
+#ifdef CONFIG_64BIT
+ atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
+#else
+ atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
+#endif
+ dev_kfree_skb_irq(skb);
+ }
+ } else {
+ /*
+ * Drop any packet received for a device that
+ * doesn't exist.
+ */
+ DEBUGPRINT("Port %d not controlled by Linux, packet "
+ "dropped\n",
+ work->ipprt);
+ dev_kfree_skb_irq(skb);
+ }
+ /*
+ * Check to see if the skbuff and work share the same
+ * packet buffer.
+ */
+ if (USE_SKBUFFS_IN_HW && likely(packet_not_copied)) {
+ /*
+ * This buffer needs to be replaced, increment
+ * the number of buffers we need to free by
+ * one.
+ */
+ cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
+ 1);
+
+ cvmx_fpa_free(work, CVMX_FPA_WQE_POOL,
+ DONT_WRITEBACK(1));
+ } else {
+ cvm_oct_free_work(work);
+ }
+ }
+
+ /* Restore the original POW group mask */
+ cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask);
+ if (USE_ASYNC_IOBDMA) {
+ /* Restore the scratch area */
+ cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
+ }
+
+ if (USE_SKBUFFS_IN_HW) {
+ /* Refill the packet buffer pool */
+ number_to_free =
+ cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
+
+ if (number_to_free > 0) {
+ cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
+ -number_to_free);
+ num_freed =
+ cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL,
+ CVMX_FPA_PACKET_POOL_SIZE,
+ number_to_free);
+ if (num_freed != number_to_free) {
+ cvmx_fau_atomic_add32
+ (FAU_NUM_PACKET_BUFFERS_TO_FREE,
+ number_to_free - num_freed);
+ }
+ }
+ }
+}
+
+void cvm_oct_rx_initialize(void)
+{
+ int i;
+ /* Initialize all of the tasklets */
+ for (i = 0; i < NR_CPUS; i++)
+ tasklet_init(&cvm_oct_tasklet[i].t, cvm_oct_tasklet_rx, 0);
+}
+
+void cvm_oct_rx_shutdown(void)
+{
+ int i;
+ /* Shutdown all of the tasklets */
+ for (i = 0; i < NR_CPUS; i++)
+ tasklet_kill(&cvm_oct_tasklet[i].t);
+}
diff --git a/drivers/staging/octeon/ethernet-rx.h b/drivers/staging/octeon/ethernet-rx.h
new file mode 100644
index 000000000000..a9b72b87a7a6
--- /dev/null
+++ b/drivers/staging/octeon/ethernet-rx.h
@@ -0,0 +1,33 @@
+/*********************************************************************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2007 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+*********************************************************************/
+
+irqreturn_t cvm_oct_do_interrupt(int cpl, void *dev_id);
+void cvm_oct_poll_controller(struct net_device *dev);
+void cvm_oct_tasklet_rx(unsigned long unused);
+
+void cvm_oct_rx_initialize(void);
+void cvm_oct_rx_shutdown(void);
diff --git a/drivers/staging/octeon/ethernet-sgmii.c b/drivers/staging/octeon/ethernet-sgmii.c
new file mode 100644
index 000000000000..58fa39c1d675
--- /dev/null
+++ b/drivers/staging/octeon/ethernet-sgmii.c
@@ -0,0 +1,129 @@
+/**********************************************************************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2007 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+**********************************************************************/
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/mii.h>
+#include <net/dst.h>
+
+#include <asm/octeon/octeon.h>
+
+#include "ethernet-defines.h"
+#include "octeon-ethernet.h"
+#include "ethernet-util.h"
+#include "ethernet-common.h"
+
+#include "cvmx-helper.h"
+
+#include "cvmx-gmxx-defs.h"
+
+static int cvm_oct_sgmii_open(struct net_device *dev)
+{
+ union cvmx_gmxx_prtx_cfg gmx_cfg;
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ int interface = INTERFACE(priv->port);
+ int index = INDEX(priv->port);
+ cvmx_helper_link_info_t link_info;
+
+ gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+ gmx_cfg.s.en = 1;
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
+
+ if (!octeon_is_simulation()) {
+ link_info = cvmx_helper_link_get(priv->port);
+ if (!link_info.s.link_up)
+ netif_carrier_off(dev);
+ }
+
+ return 0;
+}
+
+static int cvm_oct_sgmii_stop(struct net_device *dev)
+{
+ union cvmx_gmxx_prtx_cfg gmx_cfg;
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ int interface = INTERFACE(priv->port);
+ int index = INDEX(priv->port);
+
+ gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+ gmx_cfg.s.en = 0;
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
+ return 0;
+}
+
+static void cvm_oct_sgmii_poll(struct net_device *dev)
+{
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ cvmx_helper_link_info_t link_info;
+
+ link_info = cvmx_helper_link_get(priv->port);
+ if (link_info.u64 == priv->link_info)
+ return;
+
+ link_info = cvmx_helper_link_autoconf(priv->port);
+ priv->link_info = link_info.u64;
+
+ /* Tell Linux */
+ if (link_info.s.link_up) {
+
+ if (!netif_carrier_ok(dev))
+ netif_carrier_on(dev);
+ if (priv->queue != -1)
+ DEBUGPRINT
+ ("%s: %u Mbps %s duplex, port %2d, queue %2d\n",
+ dev->name, link_info.s.speed,
+ (link_info.s.full_duplex) ? "Full" : "Half",
+ priv->port, priv->queue);
+ else
+ DEBUGPRINT("%s: %u Mbps %s duplex, port %2d, POW\n",
+ dev->name, link_info.s.speed,
+ (link_info.s.full_duplex) ? "Full" : "Half",
+ priv->port);
+ } else {
+ if (netif_carrier_ok(dev))
+ netif_carrier_off(dev);
+ DEBUGPRINT("%s: Link down\n", dev->name);
+ }
+}
+
+int cvm_oct_sgmii_init(struct net_device *dev)
+{
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ cvm_oct_common_init(dev);
+ dev->open = cvm_oct_sgmii_open;
+ dev->stop = cvm_oct_sgmii_stop;
+ dev->stop(dev);
+ if (!octeon_is_simulation())
+ priv->poll = cvm_oct_sgmii_poll;
+
+ /* FIXME: Need autoneg logic */
+ return 0;
+}
+
+void cvm_oct_sgmii_uninit(struct net_device *dev)
+{
+ cvm_oct_common_uninit(dev);
+}
diff --git a/drivers/staging/octeon/ethernet-spi.c b/drivers/staging/octeon/ethernet-spi.c
new file mode 100644
index 000000000000..e0971bbe4ddc
--- /dev/null
+++ b/drivers/staging/octeon/ethernet-spi.c
@@ -0,0 +1,323 @@
+/**********************************************************************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2007 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+**********************************************************************/
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/mii.h>
+#include <net/dst.h>
+
+#include <asm/octeon/octeon.h>
+
+#include "ethernet-defines.h"
+#include "octeon-ethernet.h"
+#include "ethernet-common.h"
+#include "ethernet-util.h"
+
+#include "cvmx-spi.h"
+
+#include <asm/octeon/cvmx-npi-defs.h>
+#include "cvmx-spxx-defs.h"
+#include "cvmx-stxx-defs.h"
+
+static int number_spi_ports;
+static int need_retrain[2] = { 0, 0 };
+
+static irqreturn_t cvm_oct_spi_rml_interrupt(int cpl, void *dev_id)
+{
+ irqreturn_t return_status = IRQ_NONE;
+ union cvmx_npi_rsl_int_blocks rsl_int_blocks;
+
+ /* Check and see if this interrupt was caused by the GMX block */
+ rsl_int_blocks.u64 = cvmx_read_csr(CVMX_NPI_RSL_INT_BLOCKS);
+ if (rsl_int_blocks.s.spx1) { /* 19 - SPX1_INT_REG & STX1_INT_REG */
+
+ union cvmx_spxx_int_reg spx_int_reg;
+ union cvmx_stxx_int_reg stx_int_reg;
+
+ spx_int_reg.u64 = cvmx_read_csr(CVMX_SPXX_INT_REG(1));
+ cvmx_write_csr(CVMX_SPXX_INT_REG(1), spx_int_reg.u64);
+ if (!need_retrain[1]) {
+
+ spx_int_reg.u64 &= cvmx_read_csr(CVMX_SPXX_INT_MSK(1));
+ if (spx_int_reg.s.spf)
+ pr_err("SPI1: SRX Spi4 interface down\n");
+ if (spx_int_reg.s.calerr)
+ pr_err("SPI1: SRX Spi4 Calendar table "
+ "parity error\n");
+ if (spx_int_reg.s.syncerr)
+ pr_err("SPI1: SRX Consecutive Spi4 DIP4 "
+ "errors have exceeded "
+ "SPX_ERR_CTL[ERRCNT]\n");
+ if (spx_int_reg.s.diperr)
+ pr_err("SPI1: SRX Spi4 DIP4 error\n");
+ if (spx_int_reg.s.tpaovr)
+ pr_err("SPI1: SRX Selected port has hit "
+ "TPA overflow\n");
+ if (spx_int_reg.s.rsverr)
+ pr_err("SPI1: SRX Spi4 reserved control "
+ "word detected\n");
+ if (spx_int_reg.s.drwnng)
+ pr_err("SPI1: SRX Spi4 receive FIFO "
+ "drowning/overflow\n");
+ if (spx_int_reg.s.clserr)
+ pr_err("SPI1: SRX Spi4 packet closed on "
+ "non-16B alignment without EOP\n");
+ if (spx_int_reg.s.spiovr)
+ pr_err("SPI1: SRX Spi4 async FIFO overflow\n");
+ if (spx_int_reg.s.abnorm)
+ pr_err("SPI1: SRX Abnormal packet "
+ "termination (ERR bit)\n");
+ if (spx_int_reg.s.prtnxa)
+ pr_err("SPI1: SRX Port out of range\n");
+ }
+
+ stx_int_reg.u64 = cvmx_read_csr(CVMX_STXX_INT_REG(1));
+ cvmx_write_csr(CVMX_STXX_INT_REG(1), stx_int_reg.u64);
+ if (!need_retrain[1]) {
+
+ stx_int_reg.u64 &= cvmx_read_csr(CVMX_STXX_INT_MSK(1));
+ if (stx_int_reg.s.syncerr)
+ pr_err("SPI1: STX Interface encountered a "
+ "fatal error\n");
+ if (stx_int_reg.s.frmerr)
+ pr_err("SPI1: STX FRMCNT has exceeded "
+ "STX_DIP_CNT[MAXFRM]\n");
+ if (stx_int_reg.s.unxfrm)
+ pr_err("SPI1: STX Unexpected framing "
+ "sequence\n");
+ if (stx_int_reg.s.nosync)
+ pr_err("SPI1: STX ERRCNT has exceeded "
+ "STX_DIP_CNT[MAXDIP]\n");
+ if (stx_int_reg.s.diperr)
+ pr_err("SPI1: STX DIP2 error on the Spi4 "
+ "Status channel\n");
+ if (stx_int_reg.s.datovr)
+ pr_err("SPI1: STX Spi4 FIFO overflow error\n");
+ if (stx_int_reg.s.ovrbst)
+ pr_err("SPI1: STX Transmit packet burst "
+ "too big\n");
+ if (stx_int_reg.s.calpar1)
+ pr_err("SPI1: STX Calendar Table Parity "
+ "Error Bank1\n");
+ if (stx_int_reg.s.calpar0)
+ pr_err("SPI1: STX Calendar Table Parity "
+ "Error Bank0\n");
+ }
+
+ cvmx_write_csr(CVMX_SPXX_INT_MSK(1), 0);
+ cvmx_write_csr(CVMX_STXX_INT_MSK(1), 0);
+ need_retrain[1] = 1;
+ return_status = IRQ_HANDLED;
+ }
+
+ if (rsl_int_blocks.s.spx0) { /* 18 - SPX0_INT_REG & STX0_INT_REG */
+ union cvmx_spxx_int_reg spx_int_reg;
+ union cvmx_stxx_int_reg stx_int_reg;
+
+ spx_int_reg.u64 = cvmx_read_csr(CVMX_SPXX_INT_REG(0));
+ cvmx_write_csr(CVMX_SPXX_INT_REG(0), spx_int_reg.u64);
+ if (!need_retrain[0]) {
+
+ spx_int_reg.u64 &= cvmx_read_csr(CVMX_SPXX_INT_MSK(0));
+ if (spx_int_reg.s.spf)
+ pr_err("SPI0: SRX Spi4 interface down\n");
+ if (spx_int_reg.s.calerr)
+ pr_err("SPI0: SRX Spi4 Calendar table "
+ "parity error\n");
+ if (spx_int_reg.s.syncerr)
+ pr_err("SPI0: SRX Consecutive Spi4 DIP4 "
+ "errors have exceeded "
+ "SPX_ERR_CTL[ERRCNT]\n");
+ if (spx_int_reg.s.diperr)
+ pr_err("SPI0: SRX Spi4 DIP4 error\n");
+ if (spx_int_reg.s.tpaovr)
+ pr_err("SPI0: SRX Selected port has hit "
+ "TPA overflow\n");
+ if (spx_int_reg.s.rsverr)
+ pr_err("SPI0: SRX Spi4 reserved control "
+ "word detected\n");
+ if (spx_int_reg.s.drwnng)
+ pr_err("SPI0: SRX Spi4 receive FIFO "
+ "drowning/overflow\n");
+ if (spx_int_reg.s.clserr)
+ pr_err("SPI0: SRX Spi4 packet closed on "
+ "non-16B alignment without EOP\n");
+ if (spx_int_reg.s.spiovr)
+ pr_err("SPI0: SRX Spi4 async FIFO overflow\n");
+ if (spx_int_reg.s.abnorm)
+ pr_err("SPI0: SRX Abnormal packet "
+ "termination (ERR bit)\n");
+ if (spx_int_reg.s.prtnxa)
+ pr_err("SPI0: SRX Port out of range\n");
+ }
+
+ stx_int_reg.u64 = cvmx_read_csr(CVMX_STXX_INT_REG(0));
+ cvmx_write_csr(CVMX_STXX_INT_REG(0), stx_int_reg.u64);
+ if (!need_retrain[0]) {
+
+ stx_int_reg.u64 &= cvmx_read_csr(CVMX_STXX_INT_MSK(0));
+ if (stx_int_reg.s.syncerr)
+ pr_err("SPI0: STX Interface encountered a "
+ "fatal error\n");
+ if (stx_int_reg.s.frmerr)
+ pr_err("SPI0: STX FRMCNT has exceeded "
+ "STX_DIP_CNT[MAXFRM]\n");
+ if (stx_int_reg.s.unxfrm)
+ pr_err("SPI0: STX Unexpected framing "
+ "sequence\n");
+ if (stx_int_reg.s.nosync)
+ pr_err("SPI0: STX ERRCNT has exceeded "
+ "STX_DIP_CNT[MAXDIP]\n");
+ if (stx_int_reg.s.diperr)
+ pr_err("SPI0: STX DIP2 error on the Spi4 "
+ "Status channel\n");
+ if (stx_int_reg.s.datovr)
+ pr_err("SPI0: STX Spi4 FIFO overflow error\n");
+ if (stx_int_reg.s.ovrbst)
+ pr_err("SPI0: STX Transmit packet burst "
+ "too big\n");
+ if (stx_int_reg.s.calpar1)
+ pr_err("SPI0: STX Calendar Table Parity "
+ "Error Bank1\n");
+ if (stx_int_reg.s.calpar0)
+ pr_err("SPI0: STX Calendar Table Parity "
+ "Error Bank0\n");
+ }
+
+ cvmx_write_csr(CVMX_SPXX_INT_MSK(0), 0);
+ cvmx_write_csr(CVMX_STXX_INT_MSK(0), 0);
+ need_retrain[0] = 1;
+ return_status = IRQ_HANDLED;
+ }
+
+ return return_status;
+}
+
+static void cvm_oct_spi_enable_error_reporting(int interface)
+{
+ union cvmx_spxx_int_msk spxx_int_msk;
+ union cvmx_stxx_int_msk stxx_int_msk;
+
+ spxx_int_msk.u64 = cvmx_read_csr(CVMX_SPXX_INT_MSK(interface));
+ spxx_int_msk.s.calerr = 1;
+ spxx_int_msk.s.syncerr = 1;
+ spxx_int_msk.s.diperr = 1;
+ spxx_int_msk.s.tpaovr = 1;
+ spxx_int_msk.s.rsverr = 1;
+ spxx_int_msk.s.drwnng = 1;
+ spxx_int_msk.s.clserr = 1;
+ spxx_int_msk.s.spiovr = 1;
+ spxx_int_msk.s.abnorm = 1;
+ spxx_int_msk.s.prtnxa = 1;
+ cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), spxx_int_msk.u64);
+
+ stxx_int_msk.u64 = cvmx_read_csr(CVMX_STXX_INT_MSK(interface));
+ stxx_int_msk.s.frmerr = 1;
+ stxx_int_msk.s.unxfrm = 1;
+ stxx_int_msk.s.nosync = 1;
+ stxx_int_msk.s.diperr = 1;
+ stxx_int_msk.s.datovr = 1;
+ stxx_int_msk.s.ovrbst = 1;
+ stxx_int_msk.s.calpar1 = 1;
+ stxx_int_msk.s.calpar0 = 1;
+ cvmx_write_csr(CVMX_STXX_INT_MSK(interface), stxx_int_msk.u64);
+}
+
+static void cvm_oct_spi_poll(struct net_device *dev)
+{
+ static int spi4000_port;
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ int interface;
+
+ for (interface = 0; interface < 2; interface++) {
+
+ if ((priv->port == interface * 16) && need_retrain[interface]) {
+
+ if (cvmx_spi_restart_interface
+ (interface, CVMX_SPI_MODE_DUPLEX, 10) == 0) {
+ need_retrain[interface] = 0;
+ cvm_oct_spi_enable_error_reporting(interface);
+ }
+ }
+
+ /*
+ * The SPI4000 TWSI interface is very slow. In order
+ * not to bring the system to a crawl, we only poll a
+ * single port every second. This means negotiation
+ * speed changes take up to 10 seconds, but at least
+ * we don't waste absurd amounts of time waiting for
+ * TWSI.
+ */
+ if (priv->port == spi4000_port) {
+ /*
+ * This function does nothing if it is called on an
+ * interface without a SPI4000.
+ */
+ cvmx_spi4000_check_speed(interface, priv->port);
+ /*
+ * Normal ordering increments. By decrementing
+ * we only match once per iteration.
+ */
+ spi4000_port--;
+ if (spi4000_port < 0)
+ spi4000_port = 10;
+ }
+ }
+}
+
+int cvm_oct_spi_init(struct net_device *dev)
+{
+ int r;
+ struct octeon_ethernet *priv = netdev_priv(dev);
+
+ if (number_spi_ports == 0) {
+ r = request_irq(OCTEON_IRQ_RML, cvm_oct_spi_rml_interrupt,
+ IRQF_SHARED, "SPI", &number_spi_ports);
+ }
+ number_spi_ports++;
+
+ if ((priv->port == 0) || (priv->port == 16)) {
+ cvm_oct_spi_enable_error_reporting(INTERFACE(priv->port));
+ priv->poll = cvm_oct_spi_poll;
+ }
+ cvm_oct_common_init(dev);
+ return 0;
+}
+
+void cvm_oct_spi_uninit(struct net_device *dev)
+{
+ int interface;
+
+ cvm_oct_common_uninit(dev);
+ number_spi_ports--;
+ if (number_spi_ports == 0) {
+ for (interface = 0; interface < 2; interface++) {
+ cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), 0);
+ cvmx_write_csr(CVMX_STXX_INT_MSK(interface), 0);
+ }
+ free_irq(8 + 46, &number_spi_ports);
+ }
+}
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
new file mode 100644
index 000000000000..77b7122c8fdb
--- /dev/null
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -0,0 +1,634 @@
+/*********************************************************************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2007 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+*********************************************************************/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/init.h>
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/string.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/seq_file.h>
+#include <linux/proc_fs.h>
+#include <net/dst.h>
+#ifdef CONFIG_XFRM
+#include <linux/xfrm.h>
+#include <net/xfrm.h>
+#endif /* CONFIG_XFRM */
+
+#include <asm/atomic.h>
+
+#include <asm/octeon/octeon.h>
+
+#include "ethernet-defines.h"
+#include "octeon-ethernet.h"
+#include "ethernet-util.h"
+
+#include "cvmx-wqe.h"
+#include "cvmx-fau.h"
+#include "cvmx-pko.h"
+#include "cvmx-helper.h"
+
+#include "cvmx-gmxx-defs.h"
+
+/*
+ * You can define GET_SKBUFF_QOS() to override how the skbuff output
+ * function determines which output queue is used. The default
+ * implementation always uses the base queue for the port. If, for
+ * example, you wanted to use the skb->priority fieid, define
+ * GET_SKBUFF_QOS as: #define GET_SKBUFF_QOS(skb) ((skb)->priority)
+ */
+#ifndef GET_SKBUFF_QOS
+#define GET_SKBUFF_QOS(skb) 0
+#endif
+
+/**
+ * Packet transmit
+ *
+ * @skb: Packet to send
+ * @dev: Device info structure
+ * Returns Always returns zero
+ */
+int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ cvmx_pko_command_word0_t pko_command;
+ union cvmx_buf_ptr hw_buffer;
+ uint64_t old_scratch;
+ uint64_t old_scratch2;
+ int dropped;
+ int qos;
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ int32_t in_use;
+ int32_t buffers_to_free;
+#if REUSE_SKBUFFS_WITHOUT_FREE
+ unsigned char *fpa_head;
+#endif
+
+ /*
+ * Prefetch the private data structure. It is larger that one
+ * cache line.
+ */
+ prefetch(priv);
+
+ /* Start off assuming no drop */
+ dropped = 0;
+
+ /*
+ * The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to
+ * completely remove "qos" in the event neither interface
+ * supports multiple queues per port.
+ */
+ if ((CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 > 1) ||
+ (CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 > 1)) {
+ qos = GET_SKBUFF_QOS(skb);
+ if (qos <= 0)
+ qos = 0;
+ else if (qos >= cvmx_pko_get_num_queues(priv->port))
+ qos = 0;
+ } else
+ qos = 0;
+
+ if (USE_ASYNC_IOBDMA) {
+ /* Save scratch in case userspace is using it */
+ CVMX_SYNCIOBDMA;
+ old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
+ old_scratch2 = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8);
+
+ /*
+ * Assume we're going to be able t osend this
+ * packet. Fetch and increment the number of pending
+ * packets for output.
+ */
+ cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH + 8,
+ FAU_NUM_PACKET_BUFFERS_TO_FREE,
+ 0);
+ cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH,
+ priv->fau + qos * 4, 1);
+ }
+
+ /*
+ * The CN3XXX series of parts has an errata (GMX-401) which
+ * causes the GMX block to hang if a collision occurs towards
+ * the end of a <68 byte packet. As a workaround for this, we
+ * pad packets to be 68 bytes whenever we are in half duplex
+ * mode. We don't handle the case of having a small packet but
+ * no room to add the padding. The kernel should always give
+ * us at least a cache line
+ */
+ if ((skb->len < 64) && OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
+ union cvmx_gmxx_prtx_cfg gmx_prt_cfg;
+ int interface = INTERFACE(priv->port);
+ int index = INDEX(priv->port);
+
+ if (interface < 2) {
+ /* We only need to pad packet in half duplex mode */
+ gmx_prt_cfg.u64 =
+ cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+ if (gmx_prt_cfg.s.duplex == 0) {
+ int add_bytes = 64 - skb->len;
+ if ((skb_tail_pointer(skb) + add_bytes) <=
+ skb_end_pointer(skb))
+ memset(__skb_put(skb, add_bytes), 0,
+ add_bytes);
+ }
+ }
+ }
+
+ /* Build the PKO buffer pointer */
+ hw_buffer.u64 = 0;
+ hw_buffer.s.addr = cvmx_ptr_to_phys(skb->data);
+ hw_buffer.s.pool = 0;
+ hw_buffer.s.size =
+ (unsigned long)skb_end_pointer(skb) - (unsigned long)skb->head;
+
+ /* Build the PKO command */
+ pko_command.u64 = 0;
+ pko_command.s.n2 = 1; /* Don't pollute L2 with the outgoing packet */
+ pko_command.s.segs = 1;
+ pko_command.s.total_bytes = skb->len;
+ pko_command.s.size0 = CVMX_FAU_OP_SIZE_32;
+ pko_command.s.subone0 = 1;
+
+ pko_command.s.dontfree = 1;
+ pko_command.s.reg0 = priv->fau + qos * 4;
+ /*
+ * See if we can put this skb in the FPA pool. Any strange
+ * behavior from the Linux networking stack will most likely
+ * be caused by a bug in the following code. If some field is
+ * in use by the network stack and get carried over when a
+ * buffer is reused, bad thing may happen. If in doubt and
+ * you dont need the absolute best performance, disable the
+ * define REUSE_SKBUFFS_WITHOUT_FREE. The reuse of buffers has
+ * shown a 25% increase in performance under some loads.
+ */
+#if REUSE_SKBUFFS_WITHOUT_FREE
+ fpa_head = skb->head + 128 - ((unsigned long)skb->head & 0x7f);
+ if (unlikely(skb->data < fpa_head)) {
+ /*
+ * printk("TX buffer beginning can't meet FPA
+ * alignment constraints\n");
+ */
+ goto dont_put_skbuff_in_hw;
+ }
+ if (unlikely
+ ((skb_end_pointer(skb) - fpa_head) < CVMX_FPA_PACKET_POOL_SIZE)) {
+ /*
+ printk("TX buffer isn't large enough for the FPA\n");
+ */
+ goto dont_put_skbuff_in_hw;
+ }
+ if (unlikely(skb_shared(skb))) {
+ /*
+ printk("TX buffer sharing data with someone else\n");
+ */
+ goto dont_put_skbuff_in_hw;
+ }
+ if (unlikely(skb_cloned(skb))) {
+ /*
+ printk("TX buffer has been cloned\n");
+ */
+ goto dont_put_skbuff_in_hw;
+ }
+ if (unlikely(skb_header_cloned(skb))) {
+ /*
+ printk("TX buffer header has been cloned\n");
+ */
+ goto dont_put_skbuff_in_hw;
+ }
+ if (unlikely(skb->destructor)) {
+ /*
+ printk("TX buffer has a destructor\n");
+ */
+ goto dont_put_skbuff_in_hw;
+ }
+ if (unlikely(skb_shinfo(skb)->nr_frags)) {
+ /*
+ printk("TX buffer has fragments\n");
+ */
+ goto dont_put_skbuff_in_hw;
+ }
+ if (unlikely
+ (skb->truesize !=
+ sizeof(*skb) + skb_end_pointer(skb) - skb->head)) {
+ /*
+ printk("TX buffer truesize has been changed\n");
+ */
+ goto dont_put_skbuff_in_hw;
+ }
+
+ /*
+ * We can use this buffer in the FPA. We don't need the FAU
+ * update anymore
+ */
+ pko_command.s.reg0 = 0;
+ pko_command.s.dontfree = 0;
+
+ hw_buffer.s.back = (skb->data - fpa_head) >> 7;
+ *(struct sk_buff **)(fpa_head - sizeof(void *)) = skb;
+
+ /*
+ * The skbuff will be reused without ever being freed. We must
+ * cleanup a bunch of Linux stuff.
+ */
+ dst_release(skb->dst);
+ skb->dst = NULL;
+#ifdef CONFIG_XFRM
+ secpath_put(skb->sp);
+ skb->sp = NULL;
+#endif
+ nf_reset(skb);
+
+#ifdef CONFIG_NET_SCHED
+ skb->tc_index = 0;
+#ifdef CONFIG_NET_CLS_ACT
+ skb->tc_verd = 0;
+#endif /* CONFIG_NET_CLS_ACT */
+#endif /* CONFIG_NET_SCHED */
+
+dont_put_skbuff_in_hw:
+#endif /* REUSE_SKBUFFS_WITHOUT_FREE */
+
+ /* Check if we can use the hardware checksumming */
+ if (USE_HW_TCPUDP_CHECKSUM && (skb->protocol == htons(ETH_P_IP)) &&
+ (ip_hdr(skb)->version == 4) && (ip_hdr(skb)->ihl == 5) &&
+ ((ip_hdr(skb)->frag_off == 0) || (ip_hdr(skb)->frag_off == 1 << 14))
+ && ((ip_hdr(skb)->protocol == IP_PROTOCOL_TCP)
+ || (ip_hdr(skb)->protocol == IP_PROTOCOL_UDP))) {
+ /* Use hardware checksum calc */
+ pko_command.s.ipoffp1 = sizeof(struct ethhdr) + 1;
+ }
+
+ if (USE_ASYNC_IOBDMA) {
+ /* Get the number of skbuffs in use by the hardware */
+ CVMX_SYNCIOBDMA;
+ in_use = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
+ buffers_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8);
+ } else {
+ /* Get the number of skbuffs in use by the hardware */
+ in_use = cvmx_fau_fetch_and_add32(priv->fau + qos * 4, 1);
+ buffers_to_free =
+ cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
+ }
+
+ /*
+ * If we're sending faster than the receive can free them then
+ * don't do the HW free.
+ */
+ if ((buffers_to_free < -100) && !pko_command.s.dontfree) {
+ pko_command.s.dontfree = 1;
+ pko_command.s.reg0 = priv->fau + qos * 4;
+ }
+
+ cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos,
+ CVMX_PKO_LOCK_CMD_QUEUE);
+
+ /* Drop this packet if we have too many already queued to the HW */
+ if (unlikely
+ (skb_queue_len(&priv->tx_free_list[qos]) >= MAX_OUT_QUEUE_DEPTH)) {
+ /*
+ DEBUGPRINT("%s: Tx dropped. Too many queued\n", dev->name);
+ */
+ dropped = 1;
+ }
+ /* Send the packet to the output queue */
+ else if (unlikely
+ (cvmx_pko_send_packet_finish
+ (priv->port, priv->queue + qos, pko_command, hw_buffer,
+ CVMX_PKO_LOCK_CMD_QUEUE))) {
+ DEBUGPRINT("%s: Failed to send the packet\n", dev->name);
+ dropped = 1;
+ }
+
+ if (USE_ASYNC_IOBDMA) {
+ /* Restore the scratch area */
+ cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
+ cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2);
+ }
+
+ if (unlikely(dropped)) {
+ dev_kfree_skb_any(skb);
+ cvmx_fau_atomic_add32(priv->fau + qos * 4, -1);
+ priv->stats.tx_dropped++;
+ } else {
+ if (USE_SKBUFFS_IN_HW) {
+ /* Put this packet on the queue to be freed later */
+ if (pko_command.s.dontfree)
+ skb_queue_tail(&priv->tx_free_list[qos], skb);
+ else {
+ cvmx_fau_atomic_add32
+ (FAU_NUM_PACKET_BUFFERS_TO_FREE, -1);
+ cvmx_fau_atomic_add32(priv->fau + qos * 4, -1);
+ }
+ } else {
+ /* Put this packet on the queue to be freed later */
+ skb_queue_tail(&priv->tx_free_list[qos], skb);
+ }
+ }
+
+ /* Free skbuffs not in use by the hardware, possibly two at a time */
+ if (skb_queue_len(&priv->tx_free_list[qos]) > in_use) {
+ spin_lock(&priv->tx_free_list[qos].lock);
+ /*
+ * Check again now that we have the lock. It might
+ * have changed.
+ */
+ if (skb_queue_len(&priv->tx_free_list[qos]) > in_use)
+ dev_kfree_skb(__skb_dequeue(&priv->tx_free_list[qos]));
+ if (skb_queue_len(&priv->tx_free_list[qos]) > in_use)
+ dev_kfree_skb(__skb_dequeue(&priv->tx_free_list[qos]));
+ spin_unlock(&priv->tx_free_list[qos].lock);
+ }
+
+ return 0;
+}
+
+/**
+ * Packet transmit to the POW
+ *
+ * @skb: Packet to send
+ * @dev: Device info structure
+ * Returns Always returns zero
+ */
+int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
+{
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ void *packet_buffer;
+ void *copy_location;
+
+ /* Get a work queue entry */
+ cvmx_wqe_t *work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL);
+ if (unlikely(work == NULL)) {
+ DEBUGPRINT("%s: Failed to allocate a work queue entry\n",
+ dev->name);
+ priv->stats.tx_dropped++;
+ dev_kfree_skb(skb);
+ return 0;
+ }
+
+ /* Get a packet buffer */
+ packet_buffer = cvmx_fpa_alloc(CVMX_FPA_PACKET_POOL);
+ if (unlikely(packet_buffer == NULL)) {
+ DEBUGPRINT("%s: Failed to allocate a packet buffer\n",
+ dev->name);
+ cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1));
+ priv->stats.tx_dropped++;
+ dev_kfree_skb(skb);
+ return 0;
+ }
+
+ /*
+ * Calculate where we need to copy the data to. We need to
+ * leave 8 bytes for a next pointer (unused). We also need to
+ * include any configure skip. Then we need to align the IP
+ * packet src and dest into the same 64bit word. The below
+ * calculation may add a little extra, but that doesn't
+ * hurt.
+ */
+ copy_location = packet_buffer + sizeof(uint64_t);
+ copy_location += ((CVMX_HELPER_FIRST_MBUFF_SKIP + 7) & 0xfff8) + 6;
+
+ /*
+ * We have to copy the packet since whoever processes this
+ * packet will free it to a hardware pool. We can't use the
+ * trick of counting outstanding packets like in
+ * cvm_oct_xmit.
+ */
+ memcpy(copy_location, skb->data, skb->len);
+
+ /*
+ * Fill in some of the work queue fields. We may need to add
+ * more if the software at the other end needs them.
+ */
+ work->hw_chksum = skb->csum;
+ work->len = skb->len;
+ work->ipprt = priv->port;
+ work->qos = priv->port & 0x7;
+ work->grp = pow_send_group;
+ work->tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
+ work->tag = pow_send_group; /* FIXME */
+ /* Default to zero. Sets of zero later are commented out */
+ work->word2.u64 = 0;
+ work->word2.s.bufs = 1;
+ work->packet_ptr.u64 = 0;
+ work->packet_ptr.s.addr = cvmx_ptr_to_phys(copy_location);
+ work->packet_ptr.s.pool = CVMX_FPA_PACKET_POOL;
+ work->packet_ptr.s.size = CVMX_FPA_PACKET_POOL_SIZE;
+ work->packet_ptr.s.back = (copy_location - packet_buffer) >> 7;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ work->word2.s.ip_offset = 14;
+#if 0
+ work->word2.s.vlan_valid = 0; /* FIXME */
+ work->word2.s.vlan_cfi = 0; /* FIXME */
+ work->word2.s.vlan_id = 0; /* FIXME */
+ work->word2.s.dec_ipcomp = 0; /* FIXME */
+#endif
+ work->word2.s.tcp_or_udp =
+ (ip_hdr(skb)->protocol == IP_PROTOCOL_TCP)
+ || (ip_hdr(skb)->protocol == IP_PROTOCOL_UDP);
+#if 0
+ /* FIXME */
+ work->word2.s.dec_ipsec = 0;
+ /* We only support IPv4 right now */
+ work->word2.s.is_v6 = 0;
+ /* Hardware would set to zero */
+ work->word2.s.software = 0;
+ /* No error, packet is internal */
+ work->word2.s.L4_error = 0;
+#endif
+ work->word2.s.is_frag = !((ip_hdr(skb)->frag_off == 0)
+ || (ip_hdr(skb)->frag_off ==
+ 1 << 14));
+#if 0
+ /* Assume Linux is sending a good packet */
+ work->word2.s.IP_exc = 0;
+#endif
+ work->word2.s.is_bcast = (skb->pkt_type == PACKET_BROADCAST);
+ work->word2.s.is_mcast = (skb->pkt_type == PACKET_MULTICAST);
+#if 0
+ /* This is an IP packet */
+ work->word2.s.not_IP = 0;
+ /* No error, packet is internal */
+ work->word2.s.rcv_error = 0;
+ /* No error, packet is internal */
+ work->word2.s.err_code = 0;
+#endif
+
+ /*
+ * When copying the data, include 4 bytes of the
+ * ethernet header to align the same way hardware
+ * does.
+ */
+ memcpy(work->packet_data, skb->data + 10,
+ sizeof(work->packet_data));
+ } else {
+#if 0
+ work->word2.snoip.vlan_valid = 0; /* FIXME */
+ work->word2.snoip.vlan_cfi = 0; /* FIXME */
+ work->word2.snoip.vlan_id = 0; /* FIXME */
+ work->word2.snoip.software = 0; /* Hardware would set to zero */
+#endif
+ work->word2.snoip.is_rarp = skb->protocol == htons(ETH_P_RARP);
+ work->word2.snoip.is_arp = skb->protocol == htons(ETH_P_ARP);
+ work->word2.snoip.is_bcast =
+ (skb->pkt_type == PACKET_BROADCAST);
+ work->word2.snoip.is_mcast =
+ (skb->pkt_type == PACKET_MULTICAST);
+ work->word2.snoip.not_IP = 1; /* IP was done up above */
+#if 0
+ /* No error, packet is internal */
+ work->word2.snoip.rcv_error = 0;
+ /* No error, packet is internal */
+ work->word2.snoip.err_code = 0;
+#endif
+ memcpy(work->packet_data, skb->data, sizeof(work->packet_data));
+ }
+
+ /* Submit the packet to the POW */
+ cvmx_pow_work_submit(work, work->tag, work->tag_type, work->qos,
+ work->grp);
+ priv->stats.tx_packets++;
+ priv->stats.tx_bytes += skb->len;
+ dev_kfree_skb(skb);
+ return 0;
+}
+
+/**
+ * Transmit a work queue entry out of the ethernet port. Both
+ * the work queue entry and the packet data can optionally be
+ * freed. The work will be freed on error as well.
+ *
+ * @dev: Device to transmit out.
+ * @work_queue_entry:
+ * Work queue entry to send
+ * @do_free: True if the work queue entry and packet data should be
+ * freed. If false, neither will be freed.
+ * @qos: Index into the queues for this port to transmit on. This
+ * is used to implement QoS if their are multiple queues per
+ * port. This parameter must be between 0 and the number of
+ * queues per port minus 1. Values outside of this range will
+ * be change to zero.
+ *
+ * Returns Zero on success, negative on failure.
+ */
+int cvm_oct_transmit_qos(struct net_device *dev, void *work_queue_entry,
+ int do_free, int qos)
+{
+ unsigned long flags;
+ union cvmx_buf_ptr hw_buffer;
+ cvmx_pko_command_word0_t pko_command;
+ int dropped;
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ cvmx_wqe_t *work = work_queue_entry;
+
+ if (!(dev->flags & IFF_UP)) {
+ DEBUGPRINT("%s: Device not up\n", dev->name);
+ if (do_free)
+ cvm_oct_free_work(work);
+ return -1;
+ }
+
+ /* The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to completely
+ remove "qos" in the event neither interface supports
+ multiple queues per port */
+ if ((CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 > 1) ||
+ (CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 > 1)) {
+ if (qos <= 0)
+ qos = 0;
+ else if (qos >= cvmx_pko_get_num_queues(priv->port))
+ qos = 0;
+ } else
+ qos = 0;
+
+ /* Start off assuming no drop */
+ dropped = 0;
+
+ local_irq_save(flags);
+ cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos,
+ CVMX_PKO_LOCK_CMD_QUEUE);
+
+ /* Build the PKO buffer pointer */
+ hw_buffer.u64 = 0;
+ hw_buffer.s.addr = work->packet_ptr.s.addr;
+ hw_buffer.s.pool = CVMX_FPA_PACKET_POOL;
+ hw_buffer.s.size = CVMX_FPA_PACKET_POOL_SIZE;
+ hw_buffer.s.back = work->packet_ptr.s.back;
+
+ /* Build the PKO command */
+ pko_command.u64 = 0;
+ pko_command.s.n2 = 1; /* Don't pollute L2 with the outgoing packet */
+ pko_command.s.dontfree = !do_free;
+ pko_command.s.segs = work->word2.s.bufs;
+ pko_command.s.total_bytes = work->len;
+
+ /* Check if we can use the hardware checksumming */
+ if (unlikely(work->word2.s.not_IP || work->word2.s.IP_exc))
+ pko_command.s.ipoffp1 = 0;
+ else
+ pko_command.s.ipoffp1 = sizeof(struct ethhdr) + 1;
+
+ /* Send the packet to the output queue */
+ if (unlikely
+ (cvmx_pko_send_packet_finish
+ (priv->port, priv->queue + qos, pko_command, hw_buffer,
+ CVMX_PKO_LOCK_CMD_QUEUE))) {
+ DEBUGPRINT("%s: Failed to send the packet\n", dev->name);
+ dropped = -1;
+ }
+ local_irq_restore(flags);
+
+ if (unlikely(dropped)) {
+ if (do_free)
+ cvm_oct_free_work(work);
+ priv->stats.tx_dropped++;
+ } else if (do_free)
+ cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1));
+
+ return dropped;
+}
+EXPORT_SYMBOL(cvm_oct_transmit_qos);
+
+/**
+ * This function frees all skb that are currenty queued for TX.
+ *
+ * @dev: Device being shutdown
+ */
+void cvm_oct_tx_shutdown(struct net_device *dev)
+{
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ unsigned long flags;
+ int qos;
+
+ for (qos = 0; qos < 16; qos++) {
+ spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
+ while (skb_queue_len(&priv->tx_free_list[qos]))
+ dev_kfree_skb_any(__skb_dequeue
+ (&priv->tx_free_list[qos]));
+ spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
+ }
+}
diff --git a/drivers/staging/octeon/ethernet-tx.h b/drivers/staging/octeon/ethernet-tx.h
new file mode 100644
index 000000000000..5106236fe981
--- /dev/null
+++ b/drivers/staging/octeon/ethernet-tx.h
@@ -0,0 +1,32 @@
+/*********************************************************************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2007 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+*********************************************************************/
+
+int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev);
+int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev);
+int cvm_oct_transmit_qos(struct net_device *dev, void *work_queue_entry,
+ int do_free, int qos);
+void cvm_oct_tx_shutdown(struct net_device *dev);
diff --git a/drivers/staging/octeon/ethernet-util.h b/drivers/staging/octeon/ethernet-util.h
new file mode 100644
index 000000000000..37b665918000
--- /dev/null
+++ b/drivers/staging/octeon/ethernet-util.h
@@ -0,0 +1,81 @@
+/**********************************************************************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2007 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+*********************************************************************/
+
+#define DEBUGPRINT(format, ...) do { if (printk_ratelimit()) \
+ printk(format, ##__VA_ARGS__); \
+ } while (0)
+
+/**
+ * Given a packet data address, return a pointer to the
+ * beginning of the packet buffer.
+ *
+ * @packet_ptr: Packet data hardware address
+ * Returns Packet buffer pointer
+ */
+static inline void *cvm_oct_get_buffer_ptr(union cvmx_buf_ptr packet_ptr)
+{
+ return cvmx_phys_to_ptr(((packet_ptr.s.addr >> 7) - packet_ptr.s.back)
+ << 7);
+}
+
+/**
+ * Given an IPD/PKO port number, return the logical interface it is
+ * on.
+ *
+ * @ipd_port: Port to check
+ *
+ * Returns Logical interface
+ */
+static inline int INTERFACE(int ipd_port)
+{
+ if (ipd_port < 32) /* Interface 0 or 1 for RGMII,GMII,SPI, etc */
+ return ipd_port >> 4;
+ else if (ipd_port < 36) /* Interface 2 for NPI */
+ return 2;
+ else if (ipd_port < 40) /* Interface 3 for loopback */
+ return 3;
+ else if (ipd_port == 40) /* Non existant interface for POW0 */
+ return 4;
+ else
+ panic("Illegal ipd_port %d passed to INTERFACE\n", ipd_port);
+}
+
+/**
+ * Given an IPD/PKO port number, return the port's index on a
+ * logical interface.
+ *
+ * @ipd_port: Port to check
+ *
+ * Returns Index into interface port list
+ */
+static inline int INDEX(int ipd_port)
+{
+ if (ipd_port < 32)
+ return ipd_port & 15;
+ else
+ return ipd_port & 3;
+}
diff --git a/drivers/staging/octeon/ethernet-xaui.c b/drivers/staging/octeon/ethernet-xaui.c
new file mode 100644
index 000000000000..f08eb32e04fc
--- /dev/null
+++ b/drivers/staging/octeon/ethernet-xaui.c
@@ -0,0 +1,127 @@
+/**********************************************************************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2007 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+**********************************************************************/
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/mii.h>
+#include <net/dst.h>
+
+#include <asm/octeon/octeon.h>
+
+#include "ethernet-defines.h"
+#include "octeon-ethernet.h"
+#include "ethernet-common.h"
+#include "ethernet-util.h"
+
+#include "cvmx-helper.h"
+
+#include "cvmx-gmxx-defs.h"
+
+static int cvm_oct_xaui_open(struct net_device *dev)
+{
+ union cvmx_gmxx_prtx_cfg gmx_cfg;
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ int interface = INTERFACE(priv->port);
+ int index = INDEX(priv->port);
+ cvmx_helper_link_info_t link_info;
+
+ gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+ gmx_cfg.s.en = 1;
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
+
+ if (!octeon_is_simulation()) {
+ link_info = cvmx_helper_link_get(priv->port);
+ if (!link_info.s.link_up)
+ netif_carrier_off(dev);
+ }
+ return 0;
+}
+
+static int cvm_oct_xaui_stop(struct net_device *dev)
+{
+ union cvmx_gmxx_prtx_cfg gmx_cfg;
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ int interface = INTERFACE(priv->port);
+ int index = INDEX(priv->port);
+
+ gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+ gmx_cfg.s.en = 0;
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
+ return 0;
+}
+
+static void cvm_oct_xaui_poll(struct net_device *dev)
+{
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ cvmx_helper_link_info_t link_info;
+
+ link_info = cvmx_helper_link_get(priv->port);
+ if (link_info.u64 == priv->link_info)
+ return;
+
+ link_info = cvmx_helper_link_autoconf(priv->port);
+ priv->link_info = link_info.u64;
+
+ /* Tell Linux */
+ if (link_info.s.link_up) {
+
+ if (!netif_carrier_ok(dev))
+ netif_carrier_on(dev);
+ if (priv->queue != -1)
+ DEBUGPRINT
+ ("%s: %u Mbps %s duplex, port %2d, queue %2d\n",
+ dev->name, link_info.s.speed,
+ (link_info.s.full_duplex) ? "Full" : "Half",
+ priv->port, priv->queue);
+ else
+ DEBUGPRINT("%s: %u Mbps %s duplex, port %2d, POW\n",
+ dev->name, link_info.s.speed,
+ (link_info.s.full_duplex) ? "Full" : "Half",
+ priv->port);
+ } else {
+ if (netif_carrier_ok(dev))
+ netif_carrier_off(dev);
+ DEBUGPRINT("%s: Link down\n", dev->name);
+ }
+}
+
+int cvm_oct_xaui_init(struct net_device *dev)
+{
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ cvm_oct_common_init(dev);
+ dev->open = cvm_oct_xaui_open;
+ dev->stop = cvm_oct_xaui_stop;
+ dev->stop(dev);
+ if (!octeon_is_simulation())
+ priv->poll = cvm_oct_xaui_poll;
+
+ return 0;
+}
+
+void cvm_oct_xaui_uninit(struct net_device *dev)
+{
+ cvm_oct_common_uninit(dev);
+}
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
new file mode 100644
index 000000000000..e8ef9e0b791f
--- /dev/null
+++ b/drivers/staging/octeon/ethernet.c
@@ -0,0 +1,507 @@
+/**********************************************************************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2007 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+**********************************************************************/
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/mii.h>
+
+#include <net/dst.h>
+
+#include <asm/octeon/octeon.h>
+
+#include "ethernet-defines.h"
+#include "ethernet-mem.h"
+#include "ethernet-rx.h"
+#include "ethernet-tx.h"
+#include "ethernet-util.h"
+#include "ethernet-proc.h"
+#include "ethernet-common.h"
+#include "octeon-ethernet.h"
+
+#include "cvmx-pip.h"
+#include "cvmx-pko.h"
+#include "cvmx-fau.h"
+#include "cvmx-ipd.h"
+#include "cvmx-helper.h"
+
+#include "cvmx-smix-defs.h"
+
+#if defined(CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS) \
+ && CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS
+int num_packet_buffers = CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS;
+#else
+int num_packet_buffers = 1024;
+#endif
+module_param(num_packet_buffers, int, 0444);
+MODULE_PARM_DESC(num_packet_buffers, "\n"
+ "\tNumber of packet buffers to allocate and store in the\n"
+ "\tFPA. By default, 1024 packet buffers are used unless\n"
+ "\tCONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS is defined.");
+
+int pow_receive_group = 15;
+module_param(pow_receive_group, int, 0444);
+MODULE_PARM_DESC(pow_receive_group, "\n"
+ "\tPOW group to receive packets from. All ethernet hardware\n"
+ "\twill be configured to send incomming packets to this POW\n"
+ "\tgroup. Also any other software can submit packets to this\n"
+ "\tgroup for the kernel to process.");
+
+int pow_send_group = -1;
+module_param(pow_send_group, int, 0644);
+MODULE_PARM_DESC(pow_send_group, "\n"
+ "\tPOW group to send packets to other software on. This\n"
+ "\tcontrols the creation of the virtual device pow0.\n"
+ "\talways_use_pow also depends on this value.");
+
+int always_use_pow;
+module_param(always_use_pow, int, 0444);
+MODULE_PARM_DESC(always_use_pow, "\n"
+ "\tWhen set, always send to the pow group. This will cause\n"
+ "\tpackets sent to real ethernet devices to be sent to the\n"
+ "\tPOW group instead of the hardware. Unless some other\n"
+ "\tapplication changes the config, packets will still be\n"
+ "\treceived from the low level hardware. Use this option\n"
+ "\tto allow a CVMX app to intercept all packets from the\n"
+ "\tlinux kernel. You must specify pow_send_group along with\n"
+ "\tthis option.");
+
+char pow_send_list[128] = "";
+module_param_string(pow_send_list, pow_send_list, sizeof(pow_send_list), 0444);
+MODULE_PARM_DESC(pow_send_list, "\n"
+ "\tComma separated list of ethernet devices that should use the\n"
+ "\tPOW for transmit instead of the actual ethernet hardware. This\n"
+ "\tis a per port version of always_use_pow. always_use_pow takes\n"
+ "\tprecedence over this list. For example, setting this to\n"
+ "\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n"
+ "\tusing the pow_send_group.");
+
+static int disable_core_queueing = 1;
+module_param(disable_core_queueing, int, 0444);
+MODULE_PARM_DESC(disable_core_queueing, "\n"
+ "\tWhen set the networking core's tx_queue_len is set to zero. This\n"
+ "\tallows packets to be sent without lock contention in the packet\n"
+ "\tscheduler resulting in some cases in improved throughput.\n");
+
+/**
+ * Periodic timer to check auto negotiation
+ */
+static struct timer_list cvm_oct_poll_timer;
+
+/**
+ * Array of every ethernet device owned by this driver indexed by
+ * the ipd input port number.
+ */
+struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS];
+
+extern struct semaphore mdio_sem;
+
+/**
+ * Periodic timer tick for slow management operations
+ *
+ * @arg: Device to check
+ */
+static void cvm_do_timer(unsigned long arg)
+{
+ static int port;
+ if (port < CVMX_PIP_NUM_INPUT_PORTS) {
+ if (cvm_oct_device[port]) {
+ int queues_per_port;
+ int qos;
+ struct octeon_ethernet *priv =
+ netdev_priv(cvm_oct_device[port]);
+ if (priv->poll) {
+ /* skip polling if we don't get the lock */
+ if (!down_trylock(&mdio_sem)) {
+ priv->poll(cvm_oct_device[port]);
+ up(&mdio_sem);
+ }
+ }
+
+ queues_per_port = cvmx_pko_get_num_queues(port);
+ /* Drain any pending packets in the free list */
+ for (qos = 0; qos < queues_per_port; qos++) {
+ if (skb_queue_len(&priv->tx_free_list[qos])) {
+ spin_lock(&priv->tx_free_list[qos].
+ lock);
+ while (skb_queue_len
+ (&priv->tx_free_list[qos]) >
+ cvmx_fau_fetch_and_add32(priv->
+ fau +
+ qos * 4,
+ 0))
+ dev_kfree_skb(__skb_dequeue
+ (&priv->
+ tx_free_list
+ [qos]));
+ spin_unlock(&priv->tx_free_list[qos].
+ lock);
+ }
+ }
+ cvm_oct_device[port]->get_stats(cvm_oct_device[port]);
+ }
+ port++;
+ /* Poll the next port in a 50th of a second.
+ This spreads the polling of ports out a little bit */
+ mod_timer(&cvm_oct_poll_timer, jiffies + HZ / 50);
+ } else {
+ port = 0;
+ /* All ports have been polled. Start the next iteration through
+ the ports in one second */
+ mod_timer(&cvm_oct_poll_timer, jiffies + HZ);
+ }
+}
+
+/**
+ * Configure common hardware for all interfaces
+ */
+static __init void cvm_oct_configure_common_hw(void)
+{
+ int r;
+ /* Setup the FPA */
+ cvmx_fpa_enable();
+ cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
+ num_packet_buffers);
+ cvm_oct_mem_fill_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
+ num_packet_buffers);
+ if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
+ cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
+ CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
+
+ if (USE_RED)
+ cvmx_helper_setup_red(num_packet_buffers / 4,
+ num_packet_buffers / 8);
+
+ /* Enable the MII interface */
+ if (!octeon_is_simulation())
+ cvmx_write_csr(CVMX_SMIX_EN(0), 1);
+
+ /* Register an IRQ hander for to receive POW interrupts */
+ r = request_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group,
+ cvm_oct_do_interrupt, IRQF_SHARED, "Ethernet",
+ cvm_oct_device);
+
+#if defined(CONFIG_SMP) && 0
+ if (USE_MULTICORE_RECEIVE) {
+ irq_set_affinity(OCTEON_IRQ_WORKQ0 + pow_receive_group,
+ cpu_online_mask);
+ }
+#endif
+}
+
+/**
+ * Free a work queue entry received in a intercept callback.
+ *
+ * @work_queue_entry:
+ * Work queue entry to free
+ * Returns Zero on success, Negative on failure.
+ */
+int cvm_oct_free_work(void *work_queue_entry)
+{
+ cvmx_wqe_t *work = work_queue_entry;
+
+ int segments = work->word2.s.bufs;
+ union cvmx_buf_ptr segment_ptr = work->packet_ptr;
+
+ while (segments--) {
+ union cvmx_buf_ptr next_ptr = *(union cvmx_buf_ptr *)
+ cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
+ if (unlikely(!segment_ptr.s.i))
+ cvmx_fpa_free(cvm_oct_get_buffer_ptr(segment_ptr),
+ segment_ptr.s.pool,
+ DONT_WRITEBACK(CVMX_FPA_PACKET_POOL_SIZE /
+ 128));
+ segment_ptr = next_ptr;
+ }
+ cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1));
+
+ return 0;
+}
+EXPORT_SYMBOL(cvm_oct_free_work);
+
+/**
+ * Module/ driver initialization. Creates the linux network
+ * devices.
+ *
+ * Returns Zero on success
+ */
+static int __init cvm_oct_init_module(void)
+{
+ int num_interfaces;
+ int interface;
+ int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
+ int qos;
+
+ pr_notice("cavium-ethernet %s\n", OCTEON_ETHERNET_VERSION);
+
+ cvm_oct_proc_initialize();
+ cvm_oct_rx_initialize();
+ cvm_oct_configure_common_hw();
+
+ cvmx_helper_initialize_packet_io_global();
+
+ /* Change the input group for all ports before input is enabled */
+ num_interfaces = cvmx_helper_get_number_of_interfaces();
+ for (interface = 0; interface < num_interfaces; interface++) {
+ int num_ports = cvmx_helper_ports_on_interface(interface);
+ int port;
+
+ for (port = cvmx_helper_get_ipd_port(interface, 0);
+ port < cvmx_helper_get_ipd_port(interface, num_ports);
+ port++) {
+ union cvmx_pip_prt_tagx pip_prt_tagx;
+ pip_prt_tagx.u64 =
+ cvmx_read_csr(CVMX_PIP_PRT_TAGX(port));
+ pip_prt_tagx.s.grp = pow_receive_group;
+ cvmx_write_csr(CVMX_PIP_PRT_TAGX(port),
+ pip_prt_tagx.u64);
+ }
+ }
+
+ cvmx_helper_ipd_and_packet_input_enable();
+
+ memset(cvm_oct_device, 0, sizeof(cvm_oct_device));
+
+ /*
+ * Initialize the FAU used for counting packet buffers that
+ * need to be freed.
+ */
+ cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
+
+ if ((pow_send_group != -1)) {
+ struct net_device *dev;
+ pr_info("\tConfiguring device for POW only access\n");
+ dev = alloc_etherdev(sizeof(struct octeon_ethernet));
+ if (dev) {
+ /* Initialize the device private structure. */
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ memset(priv, 0, sizeof(struct octeon_ethernet));
+
+ dev->init = cvm_oct_common_init;
+ priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
+ priv->port = CVMX_PIP_NUM_INPUT_PORTS;
+ priv->queue = -1;
+ strcpy(dev->name, "pow%d");
+ for (qos = 0; qos < 16; qos++)
+ skb_queue_head_init(&priv->tx_free_list[qos]);
+
+ if (register_netdev(dev) < 0) {
+ pr_err("Failed to register ethernet "
+ "device for POW\n");
+ kfree(dev);
+ } else {
+ cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev;
+ pr_info("%s: POW send group %d, receive "
+ "group %d\n",
+ dev->name, pow_send_group,
+ pow_receive_group);
+ }
+ } else {
+ pr_err("Failed to allocate ethernet device "
+ "for POW\n");
+ }
+ }
+
+ num_interfaces = cvmx_helper_get_number_of_interfaces();
+ for (interface = 0; interface < num_interfaces; interface++) {
+ cvmx_helper_interface_mode_t imode =
+ cvmx_helper_interface_get_mode(interface);
+ int num_ports = cvmx_helper_ports_on_interface(interface);
+ int port;
+
+ for (port = cvmx_helper_get_ipd_port(interface, 0);
+ port < cvmx_helper_get_ipd_port(interface, num_ports);
+ port++) {
+ struct octeon_ethernet *priv;
+ struct net_device *dev =
+ alloc_etherdev(sizeof(struct octeon_ethernet));
+ if (!dev) {
+ pr_err("Failed to allocate ethernet device "
+ "for port %d\n", port);
+ continue;
+ }
+ if (disable_core_queueing)
+ dev->tx_queue_len = 0;
+
+ /* Initialize the device private structure. */
+ priv = netdev_priv(dev);
+ memset(priv, 0, sizeof(struct octeon_ethernet));
+
+ priv->imode = imode;
+ priv->port = port;
+ priv->queue = cvmx_pko_get_base_queue(priv->port);
+ priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
+ for (qos = 0; qos < 16; qos++)
+ skb_queue_head_init(&priv->tx_free_list[qos]);
+ for (qos = 0; qos < cvmx_pko_get_num_queues(port);
+ qos++)
+ cvmx_fau_atomic_write32(priv->fau + qos * 4, 0);
+
+ switch (priv->imode) {
+
+ /* These types don't support ports to IPD/PKO */
+ case CVMX_HELPER_INTERFACE_MODE_DISABLED:
+ case CVMX_HELPER_INTERFACE_MODE_PCIE:
+ case CVMX_HELPER_INTERFACE_MODE_PICMG:
+ break;
+
+ case CVMX_HELPER_INTERFACE_MODE_NPI:
+ dev->init = cvm_oct_common_init;
+ dev->uninit = cvm_oct_common_uninit;
+ strcpy(dev->name, "npi%d");
+ break;
+
+ case CVMX_HELPER_INTERFACE_MODE_XAUI:
+ dev->init = cvm_oct_xaui_init;
+ dev->uninit = cvm_oct_xaui_uninit;
+ strcpy(dev->name, "xaui%d");
+ break;
+
+ case CVMX_HELPER_INTERFACE_MODE_LOOP:
+ dev->init = cvm_oct_common_init;
+ dev->uninit = cvm_oct_common_uninit;
+ strcpy(dev->name, "loop%d");
+ break;
+
+ case CVMX_HELPER_INTERFACE_MODE_SGMII:
+ dev->init = cvm_oct_sgmii_init;
+ dev->uninit = cvm_oct_sgmii_uninit;
+ strcpy(dev->name, "eth%d");
+ break;
+
+ case CVMX_HELPER_INTERFACE_MODE_SPI:
+ dev->init = cvm_oct_spi_init;
+ dev->uninit = cvm_oct_spi_uninit;
+ strcpy(dev->name, "spi%d");
+ break;
+
+ case CVMX_HELPER_INTERFACE_MODE_RGMII:
+ case CVMX_HELPER_INTERFACE_MODE_GMII:
+ dev->init = cvm_oct_rgmii_init;
+ dev->uninit = cvm_oct_rgmii_uninit;
+ strcpy(dev->name, "eth%d");
+ break;
+ }
+
+ if (!dev->init) {
+ kfree(dev);
+ } else if (register_netdev(dev) < 0) {
+ pr_err("Failed to register ethernet device "
+ "for interface %d, port %d\n",
+ interface, priv->port);
+ kfree(dev);
+ } else {
+ cvm_oct_device[priv->port] = dev;
+ fau -=
+ cvmx_pko_get_num_queues(priv->port) *
+ sizeof(uint32_t);
+ }
+ }
+ }
+
+ if (INTERRUPT_LIMIT) {
+ /*
+ * Set the POW timer rate to give an interrupt at most
+ * INTERRUPT_LIMIT times per second.
+ */
+ cvmx_write_csr(CVMX_POW_WQ_INT_PC,
+ octeon_bootinfo->eclock_hz / (INTERRUPT_LIMIT *
+ 16 * 256) << 8);
+
+ /*
+ * Enable POW timer interrupt. It will count when
+ * there are packets available.
+ */
+ cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group),
+ 0x1ful << 24);
+ } else {
+ /* Enable POW interrupt when our port has at least one packet */
+ cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0x1001);
+ }
+
+ /* Enable the poll timer for checking RGMII status */
+ init_timer(&cvm_oct_poll_timer);
+ cvm_oct_poll_timer.data = 0;
+ cvm_oct_poll_timer.function = cvm_do_timer;
+ mod_timer(&cvm_oct_poll_timer, jiffies + HZ);
+
+ return 0;
+}
+
+/**
+ * Module / driver shutdown
+ *
+ * Returns Zero on success
+ */
+static void __exit cvm_oct_cleanup_module(void)
+{
+ int port;
+
+ /* Disable POW interrupt */
+ cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0);
+
+ cvmx_ipd_disable();
+
+ /* Free the interrupt handler */
+ free_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group, cvm_oct_device);
+
+ del_timer(&cvm_oct_poll_timer);
+ cvm_oct_rx_shutdown();
+ cvmx_pko_disable();
+
+ /* Free the ethernet devices */
+ for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
+ if (cvm_oct_device[port]) {
+ cvm_oct_tx_shutdown(cvm_oct_device[port]);
+ unregister_netdev(cvm_oct_device[port]);
+ kfree(cvm_oct_device[port]);
+ cvm_oct_device[port] = NULL;
+ }
+ }
+
+ cvmx_pko_shutdown();
+ cvm_oct_proc_shutdown();
+
+ cvmx_ipd_free_ptr();
+
+ /* Free the HW pools */
+ cvm_oct_mem_empty_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
+ num_packet_buffers);
+ cvm_oct_mem_empty_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
+ num_packet_buffers);
+ if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
+ cvm_oct_mem_empty_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
+ CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
+}
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>");
+MODULE_DESCRIPTION("Cavium Networks Octeon ethernet driver.");
+module_init(cvm_oct_init_module);
+module_exit(cvm_oct_cleanup_module);
diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h
new file mode 100644
index 000000000000..b3199076ef5e
--- /dev/null
+++ b/drivers/staging/octeon/octeon-ethernet.h
@@ -0,0 +1,127 @@
+/**********************************************************************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2007 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+**********************************************************************/
+
+/*
+ * External interface for the Cavium Octeon ethernet driver.
+ */
+#ifndef OCTEON_ETHERNET_H
+#define OCTEON_ETHERNET_H
+
+/**
+ * This is the definition of the Ethernet driver's private
+ * driver state stored in netdev_priv(dev).
+ */
+struct octeon_ethernet {
+ /* PKO hardware output port */
+ int port;
+ /* PKO hardware queue for the port */
+ int queue;
+ /* Hardware fetch and add to count outstanding tx buffers */
+ int fau;
+ /*
+ * Type of port. This is one of the enums in
+ * cvmx_helper_interface_mode_t
+ */
+ int imode;
+ /* List of outstanding tx buffers per queue */
+ struct sk_buff_head tx_free_list[16];
+ /* Device statistics */
+ struct net_device_stats stats
+; /* Generic MII info structure */
+ struct mii_if_info mii_info;
+ /* Last negotiated link state */
+ uint64_t link_info;
+ /* Called periodically to check link status */
+ void (*poll) (struct net_device *dev);
+};
+
+/**
+ * Free a work queue entry received in a intercept callback.
+ *
+ * @work_queue_entry:
+ * Work queue entry to free
+ * Returns Zero on success, Negative on failure.
+ */
+int cvm_oct_free_work(void *work_queue_entry);
+
+/**
+ * Transmit a work queue entry out of the ethernet port. Both
+ * the work queue entry and the packet data can optionally be
+ * freed. The work will be freed on error as well.
+ *
+ * @dev: Device to transmit out.
+ * @work_queue_entry:
+ * Work queue entry to send
+ * @do_free: True if the work queue entry and packet data should be
+ * freed. If false, neither will be freed.
+ * @qos: Index into the queues for this port to transmit on. This
+ * is used to implement QoS if their are multiple queues per
+ * port. This parameter must be between 0 and the number of
+ * queues per port minus 1. Values outside of this range will
+ * be change to zero.
+ *
+ * Returns Zero on success, negative on failure.
+ */
+int cvm_oct_transmit_qos(struct net_device *dev, void *work_queue_entry,
+ int do_free, int qos);
+
+/**
+ * Transmit a work queue entry out of the ethernet port. Both
+ * the work queue entry and the packet data can optionally be
+ * freed. The work will be freed on error as well. This simply
+ * wraps cvmx_oct_transmit_qos() for backwards compatability.
+ *
+ * @dev: Device to transmit out.
+ * @work_queue_entry:
+ * Work queue entry to send
+ * @do_free: True if the work queue entry and packet data should be
+ * freed. If false, neither will be freed.
+ *
+ * Returns Zero on success, negative on failure.
+ */
+static inline int cvm_oct_transmit(struct net_device *dev,
+ void *work_queue_entry, int do_free)
+{
+ return cvm_oct_transmit_qos(dev, work_queue_entry, do_free, 0);
+}
+
+extern int cvm_oct_rgmii_init(struct net_device *dev);
+extern void cvm_oct_rgmii_uninit(struct net_device *dev);
+extern int cvm_oct_sgmii_init(struct net_device *dev);
+extern void cvm_oct_sgmii_uninit(struct net_device *dev);
+extern int cvm_oct_spi_init(struct net_device *dev);
+extern void cvm_oct_spi_uninit(struct net_device *dev);
+extern int cvm_oct_xaui_init(struct net_device *dev);
+extern void cvm_oct_xaui_uninit(struct net_device *dev);
+
+extern int always_use_pow;
+extern int pow_send_group;
+extern int pow_receive_group;
+extern char pow_send_list[];
+extern struct net_device *cvm_oct_device[];
+
+#endif
diff --git a/drivers/w1/masters/w1-gpio.c b/drivers/w1/masters/w1-gpio.c
index a411702413d6..6f8866d6a905 100644
--- a/drivers/w1/masters/w1-gpio.c
+++ b/drivers/w1/masters/w1-gpio.c
@@ -74,6 +74,9 @@ static int __init w1_gpio_probe(struct platform_device *pdev)
if (err)
goto free_gpio;
+ if (pdata->enable_external_pullup)
+ pdata->enable_external_pullup(1);
+
platform_set_drvdata(pdev, master);
return 0;
@@ -91,6 +94,9 @@ static int __exit w1_gpio_remove(struct platform_device *pdev)
struct w1_bus_master *master = platform_get_drvdata(pdev);
struct w1_gpio_platform_data *pdata = pdev->dev.platform_data;
+ if (pdata->enable_external_pullup)
+ pdata->enable_external_pullup(0);
+
w1_remove_master_device(master);
gpio_free(pdata->pin);
kfree(master);
@@ -98,12 +104,41 @@ static int __exit w1_gpio_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM
+
+static int w1_gpio_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct w1_gpio_platform_data *pdata = pdev->dev.platform_data;
+
+ if (pdata->enable_external_pullup)
+ pdata->enable_external_pullup(0);
+
+ return 0;
+}
+
+static int w1_gpio_resume(struct platform_device *pdev)
+{
+ struct w1_gpio_platform_data *pdata = pdev->dev.platform_data;
+
+ if (pdata->enable_external_pullup)
+ pdata->enable_external_pullup(1);
+
+ return 0;
+}
+
+#else
+#define w1_gpio_suspend NULL
+#define w1_gpio_resume NULL
+#endif
+
static struct platform_driver w1_gpio_driver = {
.driver = {
.name = "w1-gpio",
.owner = THIS_MODULE,
},
.remove = __exit_p(w1_gpio_remove),
+ .suspend = w1_gpio_suspend,
+ .resume = w1_gpio_resume,
};
static int __init w1_gpio_init(void)
diff --git a/drivers/watchdog/alim7101_wdt.c b/drivers/watchdog/alim7101_wdt.c
index 90f98df5f106..f90afdb1b255 100644
--- a/drivers/watchdog/alim7101_wdt.c
+++ b/drivers/watchdog/alim7101_wdt.c
@@ -322,7 +322,8 @@ static int wdt_notify_sys(struct notifier_block *this,
* watchdog on reboot with no heartbeat
*/
wdt_change(WDT_ENABLE);
- printk(KERN_INFO PFX "Watchdog timer is now enabled with no heartbeat - should reboot in ~1 second.\n");
+ printk(KERN_INFO PFX "Watchdog timer is now enabled "
+ "with no heartbeat - should reboot in ~1 second.\n");
}
return NOTIFY_DONE;
}
@@ -374,12 +375,17 @@ static int __init alim7101_wdt_init(void)
pci_dev_put(ali1543_south);
if ((tmp & 0x1e) == 0x00) {
if (!use_gpio) {
- printk(KERN_INFO PFX "Detected old alim7101 revision 'a1d'. If this is a cobalt board, set the 'use_gpio' module parameter.\n");
+ printk(KERN_INFO PFX
+ "Detected old alim7101 revision 'a1d'. "
+ "If this is a cobalt board, set the 'use_gpio' "
+ "module parameter.\n");
goto err_out;
}
nowayout = 1;
} else if ((tmp & 0x1e) != 0x12 && (tmp & 0x1e) != 0x00) {
- printk(KERN_INFO PFX "ALi 1543 South-Bridge does not have the correct revision number (???1001?) - WDT not set\n");
+ printk(KERN_INFO PFX
+ "ALi 1543 South-Bridge does not have the correct "
+ "revision number (???1001?) - WDT not set\n");
goto err_out;
}
@@ -409,7 +415,8 @@ static int __init alim7101_wdt_init(void)
if (nowayout)
__module_get(THIS_MODULE);
- printk(KERN_INFO PFX "WDT driver for ALi M7101 initialised. timeout=%d sec (nowayout=%d)\n",
+ printk(KERN_INFO PFX "WDT driver for ALi M7101 initialised. "
+ "timeout=%d sec (nowayout=%d)\n",
timeout, nowayout);
return 0;
diff --git a/drivers/watchdog/ar7_wdt.c b/drivers/watchdog/ar7_wdt.c
index 55dcbfe2bb72..3fe9742c23ca 100644
--- a/drivers/watchdog/ar7_wdt.c
+++ b/drivers/watchdog/ar7_wdt.c
@@ -246,7 +246,8 @@ static long ar7_wdt_ioctl(struct file *file,
static struct watchdog_info ident = {
.identity = LONGNAME,
.firmware_version = 1,
- .options = (WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING),
+ .options = (WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING |
+ WDIOF_MAGICCLOSE),
};
int new_margin;
diff --git a/drivers/watchdog/at91rm9200_wdt.c b/drivers/watchdog/at91rm9200_wdt.c
index 29e52c237a3b..b185dafe1494 100644
--- a/drivers/watchdog/at91rm9200_wdt.c
+++ b/drivers/watchdog/at91rm9200_wdt.c
@@ -268,7 +268,8 @@ static int __init at91_wdt_init(void)
if not reset to the default */
if (at91_wdt_settimeout(wdt_time)) {
at91_wdt_settimeout(WDT_DEFAULT_TIME);
- pr_info("at91_wdt: wdt_time value must be 1 <= wdt_time <= 256, using %d\n", wdt_time);
+ pr_info("at91_wdt: wdt_time value must be 1 <= wdt_time <= 256"
+ ", using %d\n", wdt_time);
}
return platform_driver_register(&at91wdt_driver);
diff --git a/drivers/watchdog/at91sam9_wdt.c b/drivers/watchdog/at91sam9_wdt.c
index 435b0573fb0a..eac26021e8da 100644
--- a/drivers/watchdog/at91sam9_wdt.c
+++ b/drivers/watchdog/at91sam9_wdt.c
@@ -156,7 +156,8 @@ static int at91_wdt_settimeout(unsigned int timeout)
static const struct watchdog_info at91_wdt_info = {
.identity = DRV_NAME,
- .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING |
+ WDIOF_MAGICCLOSE,
};
/*
diff --git a/drivers/watchdog/bfin_wdt.c b/drivers/watchdog/bfin_wdt.c
index 067a57cb3f82..c7b3f9df2317 100644
--- a/drivers/watchdog/bfin_wdt.c
+++ b/drivers/watchdog/bfin_wdt.c
@@ -27,10 +27,15 @@
#include <linux/uaccess.h>
#include <asm/blackfin.h>
-#define stamp(fmt, args...) pr_debug("%s:%i: " fmt "\n", __func__, __LINE__, ## args)
+#define stamp(fmt, args...) \
+ pr_debug("%s:%i: " fmt "\n", __func__, __LINE__, ## args)
#define stampit() stamp("here i am")
-#define pr_devinit(fmt, args...) ({ static const __devinitconst char __fmt[] = fmt; printk(__fmt, ## args); })
-#define pr_init(fmt, args...) ({ static const __initconst char __fmt[] = fmt; printk(__fmt, ## args); })
+#define pr_devinit(fmt, args...) \
+ ({ static const __devinitconst char __fmt[] = fmt; \
+ printk(__fmt, ## args); })
+#define pr_init(fmt, args...) \
+ ({ static const __initconst char __fmt[] = fmt; \
+ printk(__fmt, ## args); })
#define WATCHDOG_NAME "bfin-wdt"
#define PFX WATCHDOG_NAME ": "
@@ -476,7 +481,8 @@ static int __init bfin_wdt_init(void)
return ret;
}
- bfin_wdt_device = platform_device_register_simple(WATCHDOG_NAME, -1, NULL, 0);
+ bfin_wdt_device = platform_device_register_simple(WATCHDOG_NAME,
+ -1, NULL, 0);
if (IS_ERR(bfin_wdt_device)) {
pr_init(KERN_ERR PFX "unable to register device\n");
platform_driver_unregister(&bfin_wdt_driver);
diff --git a/drivers/watchdog/cpwd.c b/drivers/watchdog/cpwd.c
index 41070e4771a0..081f2955419e 100644
--- a/drivers/watchdog/cpwd.c
+++ b/drivers/watchdog/cpwd.c
@@ -154,9 +154,9 @@ static struct cpwd *cpwd_device;
static struct timer_list cpwd_timer;
-static int wd0_timeout = 0;
-static int wd1_timeout = 0;
-static int wd2_timeout = 0;
+static int wd0_timeout;
+static int wd1_timeout;
+static int wd2_timeout;
module_param(wd0_timeout, int, 0);
MODULE_PARM_DESC(wd0_timeout, "Default watchdog0 timeout in 1/10secs");
diff --git a/drivers/watchdog/davinci_wdt.c b/drivers/watchdog/davinci_wdt.c
index c51d0b0ea0c4..83e22e7ea4a2 100644
--- a/drivers/watchdog/davinci_wdt.c
+++ b/drivers/watchdog/davinci_wdt.c
@@ -193,7 +193,7 @@ static struct miscdevice davinci_wdt_miscdev = {
.fops = &davinci_wdt_fops,
};
-static int davinci_wdt_probe(struct platform_device *pdev)
+static int __devinit davinci_wdt_probe(struct platform_device *pdev)
{
int ret = 0, size;
struct resource *res;
@@ -237,7 +237,7 @@ static int davinci_wdt_probe(struct platform_device *pdev)
return ret;
}
-static int davinci_wdt_remove(struct platform_device *pdev)
+static int __devexit davinci_wdt_remove(struct platform_device *pdev)
{
misc_deregister(&davinci_wdt_miscdev);
if (wdt_mem) {
@@ -254,7 +254,7 @@ static struct platform_driver platform_wdt_driver = {
.owner = THIS_MODULE,
},
.probe = davinci_wdt_probe,
- .remove = davinci_wdt_remove,
+ .remove = __devexit_p(davinci_wdt_remove),
};
static int __init davinci_wdt_init(void)
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index 3137361ccbfe..c0b9169ba5d5 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -19,6 +19,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/nmi.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/mm.h>
@@ -47,7 +48,7 @@
#define PCI_BIOS32_PARAGRAPH_LEN 16
#define PCI_ROM_BASE1 0x000F0000
#define ROM_SIZE 0x10000
-#define HPWDT_VERSION "1.01"
+#define HPWDT_VERSION "1.1.1"
struct bios32_service_dir {
u32 signature;
@@ -119,6 +120,7 @@ static int nowayout = WATCHDOG_NOWAYOUT;
static char expect_release;
static unsigned long hpwdt_is_open;
static unsigned int allow_kdump;
+static int hpwdt_nmi_sourcing;
static void __iomem *pci_mem_addr; /* the PCI-memory address */
static unsigned long __iomem *hpwdt_timer_reg;
@@ -468,21 +470,22 @@ static int hpwdt_pretimeout(struct notifier_block *nb, unsigned long ulReason,
if (ulReason != DIE_NMI && ulReason != DIE_NMI_IPI)
return NOTIFY_OK;
- spin_lock_irqsave(&rom_lock, rom_pl);
- if (!die_nmi_called)
- asminline_call(&cmn_regs, cru_rom_addr);
- die_nmi_called = 1;
- spin_unlock_irqrestore(&rom_lock, rom_pl);
- if (cmn_regs.u1.ral == 0) {
- printk(KERN_WARNING "hpwdt: An NMI occurred, "
- "but unable to determine source.\n");
- } else {
- if (allow_kdump)
- hpwdt_stop();
- panic("An NMI occurred, please see the Integrated "
- "Management Log for details.\n");
+ if (hpwdt_nmi_sourcing) {
+ spin_lock_irqsave(&rom_lock, rom_pl);
+ if (!die_nmi_called)
+ asminline_call(&cmn_regs, cru_rom_addr);
+ die_nmi_called = 1;
+ spin_unlock_irqrestore(&rom_lock, rom_pl);
+ if (cmn_regs.u1.ral == 0) {
+ printk(KERN_WARNING "hpwdt: An NMI occurred, "
+ "but unable to determine source.\n");
+ } else {
+ if (allow_kdump)
+ hpwdt_stop();
+ panic("An NMI occurred, please see the Integrated "
+ "Management Log for details.\n");
+ }
}
-
return NOTIFY_OK;
}
@@ -627,12 +630,38 @@ static struct notifier_block die_notifier = {
* Init & Exit
*/
+#ifdef ARCH_HAS_NMI_WATCHDOG
+static void __devinit hpwdt_check_nmi_sourcing(struct pci_dev *dev)
+{
+ /*
+ * If nmi_watchdog is turned off then we can turn on
+ * our nmi sourcing capability.
+ */
+ if (!nmi_watchdog_active())
+ hpwdt_nmi_sourcing = 1;
+ else
+ dev_warn(&dev->dev, "NMI sourcing is disabled. To enable this "
+ "functionality you must reboot with nmi_watchdog=0.\n");
+}
+#else
+static void __devinit hpwdt_check_nmi_sourcing(struct pci_dev *dev)
+{
+ dev_warn(&dev->dev, "NMI sourcing is disabled. "
+ "Your kernel does not support a NMI Watchdog.\n");
+}
+#endif
+
static int __devinit hpwdt_init_one(struct pci_dev *dev,
const struct pci_device_id *ent)
{
int retval;
/*
+ * Check if we can do NMI sourcing or not
+ */
+ hpwdt_check_nmi_sourcing(dev);
+
+ /*
* First let's find out if we are on an iLO2 server. We will
* not run on a legacy ASM box.
* So we only support the G5 ProLiant servers and higher.
diff --git a/drivers/watchdog/iTCO_vendor_support.c b/drivers/watchdog/iTCO_vendor_support.c
index d3c0f6de5523..5133bca5ccbe 100644
--- a/drivers/watchdog/iTCO_vendor_support.c
+++ b/drivers/watchdog/iTCO_vendor_support.c
@@ -19,7 +19,7 @@
/* Module and version information */
#define DRV_NAME "iTCO_vendor_support"
-#define DRV_VERSION "1.03"
+#define DRV_VERSION "1.04"
#define PFX DRV_NAME ": "
/* Includes */
@@ -35,20 +35,23 @@
#include "iTCO_vendor.h"
/* iTCO defines */
-#define SMI_EN acpibase + 0x30 /* SMI Control and Enable Register */
-#define TCOBASE acpibase + 0x60 /* TCO base address */
-#define TCO1_STS TCOBASE + 0x04 /* TCO1 Status Register */
+#define SMI_EN (acpibase + 0x30) /* SMI Control and Enable Register */
+#define TCOBASE (acpibase + 0x60) /* TCO base address */
+#define TCO1_STS (TCOBASE + 0x04) /* TCO1 Status Register */
/* List of vendor support modes */
/* SuperMicro Pentium 3 Era 370SSE+-OEM1/P3TSSE */
#define SUPERMICRO_OLD_BOARD 1
/* SuperMicro Pentium 4 / Xeon 4 / EMT64T Era Systems */
#define SUPERMICRO_NEW_BOARD 2
+/* Broken BIOS */
+#define BROKEN_BIOS 911
static int vendorsupport;
module_param(vendorsupport, int, 0);
MODULE_PARM_DESC(vendorsupport, "iTCO vendor specific support mode, default="
- "0 (none), 1=SuperMicro Pent3, 2=SuperMicro Pent4+");
+ "0 (none), 1=SuperMicro Pent3, 2=SuperMicro Pent4+, "
+ "911=Broken SMI BIOS");
/*
* Vendor Specific Support
@@ -243,25 +246,92 @@ static void supermicro_new_pre_set_heartbeat(unsigned int heartbeat)
}
/*
+ * Vendor Support: 911
+ * Board: Some Intel ICHx based motherboards
+ * iTCO chipset: ICH7+
+ *
+ * Some Intel motherboards have a broken BIOS implementation: i.e.
+ * the SMI handler clear's the TIMEOUT bit in the TC01_STS register
+ * and does not reload the time. Thus the TCO watchdog does not reboot
+ * the system.
+ *
+ * These are the conclusions of Andriy Gapon <avg@icyb.net.ua> after
+ * debugging: the SMI handler is quite simple - it tests value in
+ * TCO1_CNT against 0x800, i.e. checks TCO_TMR_HLT. If the bit is set
+ * the handler goes into an infinite loop, apparently to allow the
+ * second timeout and reboot. Otherwise it simply clears TIMEOUT bit
+ * in TCO1_STS and that's it.
+ * So the logic seems to be reversed, because it is hard to see how
+ * TIMEOUT can get set to 1 and SMI generated when TCO_TMR_HLT is set
+ * (other than a transitional effect).
+ *
+ * The only fix found to get the motherboard(s) to reboot is to put
+ * the glb_smi_en bit to 0. This is a dirty hack that bypasses the
+ * broken code by disabling Global SMI.
+ *
+ * WARNING: globally disabling SMI could possibly lead to dramatic
+ * problems, especially on laptops! I.e. various ACPI things where
+ * SMI is used for communication between OS and firmware.
+ *
+ * Don't use this fix if you don't need to!!!
+ */
+
+static void broken_bios_start(unsigned long acpibase)
+{
+ unsigned long val32;
+
+ val32 = inl(SMI_EN);
+ /* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI#
+ Bit 0: GBL_SMI_EN -> 0 = No SMI# will be generated by ICH. */
+ val32 &= 0xffffdffe;
+ outl(val32, SMI_EN);
+}
+
+static void broken_bios_stop(unsigned long acpibase)
+{
+ unsigned long val32;
+
+ val32 = inl(SMI_EN);
+ /* Bit 13: TCO_EN -> 1 = Enables TCO logic generating an SMI#
+ Bit 0: GBL_SMI_EN -> 1 = Turn global SMI on again. */
+ val32 |= 0x00002001;
+ outl(val32, SMI_EN);
+}
+
+/*
* Generic Support Functions
*/
void iTCO_vendor_pre_start(unsigned long acpibase,
unsigned int heartbeat)
{
- if (vendorsupport == SUPERMICRO_OLD_BOARD)
+ switch (vendorsupport) {
+ case SUPERMICRO_OLD_BOARD:
supermicro_old_pre_start(acpibase);
- else if (vendorsupport == SUPERMICRO_NEW_BOARD)
+ break;
+ case SUPERMICRO_NEW_BOARD:
supermicro_new_pre_start(heartbeat);
+ break;
+ case BROKEN_BIOS:
+ broken_bios_start(acpibase);
+ break;
+ }
}
EXPORT_SYMBOL(iTCO_vendor_pre_start);
void iTCO_vendor_pre_stop(unsigned long acpibase)
{
- if (vendorsupport == SUPERMICRO_OLD_BOARD)
+ switch (vendorsupport) {
+ case SUPERMICRO_OLD_BOARD:
supermicro_old_pre_stop(acpibase);
- else if (vendorsupport == SUPERMICRO_NEW_BOARD)
+ break;
+ case SUPERMICRO_NEW_BOARD:
supermicro_new_pre_stop();
+ break;
+ case BROKEN_BIOS:
+ broken_bios_stop(acpibase);
+ break;
+ }
}
EXPORT_SYMBOL(iTCO_vendor_pre_stop);
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 648250b998c4..6a51edde6ea7 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -236,19 +236,19 @@ MODULE_DEVICE_TABLE(pci, iTCO_wdt_pci_tbl);
/* Address definitions for the TCO */
/* TCO base address */
-#define TCOBASE iTCO_wdt_private.ACPIBASE + 0x60
+#define TCOBASE (iTCO_wdt_private.ACPIBASE + 0x60)
/* SMI Control and Enable Register */
-#define SMI_EN iTCO_wdt_private.ACPIBASE + 0x30
-
-#define TCO_RLD TCOBASE + 0x00 /* TCO Timer Reload and Curr. Value */
-#define TCOv1_TMR TCOBASE + 0x01 /* TCOv1 Timer Initial Value */
-#define TCO_DAT_IN TCOBASE + 0x02 /* TCO Data In Register */
-#define TCO_DAT_OUT TCOBASE + 0x03 /* TCO Data Out Register */
-#define TCO1_STS TCOBASE + 0x04 /* TCO1 Status Register */
-#define TCO2_STS TCOBASE + 0x06 /* TCO2 Status Register */
-#define TCO1_CNT TCOBASE + 0x08 /* TCO1 Control Register */
-#define TCO2_CNT TCOBASE + 0x0a /* TCO2 Control Register */
-#define TCOv2_TMR TCOBASE + 0x12 /* TCOv2 Timer Initial Value */
+#define SMI_EN (iTCO_wdt_private.ACPIBASE + 0x30)
+
+#define TCO_RLD (TCOBASE + 0x00) /* TCO Timer Reload and Curr. Value */
+#define TCOv1_TMR (TCOBASE + 0x01) /* TCOv1 Timer Initial Value */
+#define TCO_DAT_IN (TCOBASE + 0x02) /* TCO Data In Register */
+#define TCO_DAT_OUT (TCOBASE + 0x03) /* TCO Data Out Register */
+#define TCO1_STS (TCOBASE + 0x04) /* TCO1 Status Register */
+#define TCO2_STS (TCOBASE + 0x06) /* TCO2 Status Register */
+#define TCO1_CNT (TCOBASE + 0x08) /* TCO1 Control Register */
+#define TCO2_CNT (TCOBASE + 0x0a) /* TCO2 Control Register */
+#define TCOv2_TMR (TCOBASE + 0x12) /* TCOv2 Timer Initial Value */
/* internal variables */
static unsigned long is_active;
@@ -666,6 +666,11 @@ static int __devinit iTCO_wdt_init(struct pci_dev *pdev,
GCS = RCBA + ICH6_GCS(0x3410). */
if (iTCO_wdt_private.iTCO_version == 2) {
pci_read_config_dword(pdev, 0xf0, &base_address);
+ if ((base_address & 1) == 0) {
+ printk(KERN_ERR PFX "RCBA is disabled by harddware\n");
+ ret = -ENODEV;
+ goto out;
+ }
RCBA = base_address & 0xffffc000;
iTCO_wdt_private.gcs = ioremap((RCBA + 0x3410), 4);
}
@@ -675,7 +680,7 @@ static int __devinit iTCO_wdt_init(struct pci_dev *pdev,
printk(KERN_ERR PFX "failed to reset NO_REBOOT flag, "
"reboot disabled by hardware\n");
ret = -ENODEV; /* Cannot reset NO_REBOOT bit */
- goto out;
+ goto out_unmap;
}
/* Set the NO_REBOOT bit to prevent later reboots, just for sure */
@@ -686,7 +691,7 @@ static int __devinit iTCO_wdt_init(struct pci_dev *pdev,
printk(KERN_ERR PFX
"I/O address 0x%04lx already in use\n", SMI_EN);
ret = -EIO;
- goto out;
+ goto out_unmap;
}
/* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI# */
val32 = inl(SMI_EN);
@@ -742,9 +747,10 @@ unreg_region:
release_region(TCOBASE, 0x20);
unreg_smi_en:
release_region(SMI_EN, 4);
-out:
+out_unmap:
if (iTCO_wdt_private.iTCO_version == 2)
iounmap(iTCO_wdt_private.gcs);
+out:
pci_dev_put(iTCO_wdt_private.pdev);
iTCO_wdt_private.ACPIBASE = 0;
return ret;
diff --git a/drivers/watchdog/indydog.c b/drivers/watchdog/indydog.c
index 0f761db9a27c..bea8a124a559 100644
--- a/drivers/watchdog/indydog.c
+++ b/drivers/watchdog/indydog.c
@@ -83,7 +83,6 @@ static int indydog_open(struct inode *inode, struct file *file)
indydog_start();
indydog_ping();
- indydog_alive = 1;
printk(KERN_INFO "Started watchdog timer.\n");
return nonseekable_open(inode, file);
@@ -113,8 +112,7 @@ static long indydog_ioctl(struct file *file, unsigned int cmd,
{
int options, retval = -EINVAL;
static struct watchdog_info ident = {
- .options = WDIOF_KEEPALIVEPING |
- WDIOF_MAGICCLOSE,
+ .options = WDIOF_KEEPALIVEPING,
.firmware_version = 0,
.identity = "Hardware Watchdog for SGI IP22",
};
diff --git a/drivers/watchdog/it8712f_wdt.c b/drivers/watchdog/it8712f_wdt.c
index 2270ee07c01b..daed48ded7fe 100644
--- a/drivers/watchdog/it8712f_wdt.c
+++ b/drivers/watchdog/it8712f_wdt.c
@@ -239,7 +239,8 @@ static long it8712f_wdt_ioctl(struct file *file, unsigned int cmd,
static struct watchdog_info ident = {
.identity = "IT8712F Watchdog",
.firmware_version = 1,
- .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING |
+ WDIOF_MAGICCLOSE,
};
int value;
diff --git a/drivers/watchdog/ks8695_wdt.c b/drivers/watchdog/ks8695_wdt.c
index ae3832110acb..00b03eb43bf0 100644
--- a/drivers/watchdog/ks8695_wdt.c
+++ b/drivers/watchdog/ks8695_wdt.c
@@ -293,8 +293,8 @@ static int __init ks8695_wdt_init(void)
if not reset to the default */
if (ks8695_wdt_settimeout(wdt_time)) {
ks8695_wdt_settimeout(WDT_DEFAULT_TIME);
- pr_info("ks8695_wdt: wdt_time value must be 1 <= wdt_time <= %i, using %d\n",
- wdt_time, WDT_MAX_TIME);
+ pr_info("ks8695_wdt: wdt_time value must be 1 <= wdt_time <= %i"
+ ", using %d\n", wdt_time, WDT_MAX_TIME);
}
return platform_driver_register(&ks8695wdt_driver);
}
diff --git a/drivers/watchdog/machzwd.c b/drivers/watchdog/machzwd.c
index 2dfc27559bf7..b6b3f59ab446 100644
--- a/drivers/watchdog/machzwd.c
+++ b/drivers/watchdog/machzwd.c
@@ -118,7 +118,8 @@ static struct watchdog_info zf_info = {
*/
static int action;
module_param(action, int, 0);
-MODULE_PARM_DESC(action, "after watchdog resets, generate: 0 = RESET(*) 1 = SMI 2 = NMI 3 = SCI");
+MODULE_PARM_DESC(action, "after watchdog resets, generate: "
+ "0 = RESET(*) 1 = SMI 2 = NMI 3 = SCI");
static void zf_ping(unsigned long data);
@@ -142,7 +143,8 @@ static unsigned long next_heartbeat;
#ifndef ZF_DEBUG
# define dprintk(format, args...)
#else
-# define dprintk(format, args...) printk(KERN_DEBUG PFX ":%s:%d: " format, __func__, __LINE__ , ## args)
+# define dprintk(format, args...) printk(KERN_DEBUG PFX
+ ":%s:%d: " format, __func__, __LINE__ , ## args)
#endif
@@ -340,7 +342,8 @@ static int zf_close(struct inode *inode, struct file *file)
zf_timer_off();
else {
del_timer(&zf_timer);
- printk(KERN_ERR PFX ": device file closed unexpectedly. Will not stop the WDT!\n");
+ printk(KERN_ERR PFX ": device file closed unexpectedly. "
+ "Will not stop the WDT!\n");
}
clear_bit(0, &zf_is_open);
zf_expect_close = 0;
diff --git a/drivers/watchdog/mpcore_wdt.c b/drivers/watchdog/mpcore_wdt.c
index 1512ab8b175b..83fa34b214b4 100644
--- a/drivers/watchdog/mpcore_wdt.c
+++ b/drivers/watchdog/mpcore_wdt.c
@@ -61,7 +61,9 @@ MODULE_PARM_DESC(nowayout,
#define ONLY_TESTING 0
static int mpcore_noboot = ONLY_TESTING;
module_param(mpcore_noboot, int, 0);
-MODULE_PARM_DESC(mpcore_noboot, "MPcore watchdog action, set to 1 to ignore reboots, 0 to reboot (default=" __MODULE_STRING(ONLY_TESTING) ")");
+MODULE_PARM_DESC(mpcore_noboot, "MPcore watchdog action, "
+ "set to 1 to ignore reboots, 0 to reboot (default="
+ __MODULE_STRING(ONLY_TESTING) ")");
/*
* This is the interrupt handler. Note that we only use this
@@ -416,7 +418,8 @@ static struct platform_driver mpcore_wdt_driver = {
},
};
-static char banner[] __initdata = KERN_INFO "MPcore Watchdog Timer: 0.1. mpcore_noboot=%d mpcore_margin=%d sec (nowayout= %d)\n";
+static char banner[] __initdata = KERN_INFO "MPcore Watchdog Timer: 0.1. "
+ "mpcore_noboot=%d mpcore_margin=%d sec (nowayout= %d)\n";
static int __init mpcore_wdt_init(void)
{
diff --git a/drivers/watchdog/mtx-1_wdt.c b/drivers/watchdog/mtx-1_wdt.c
index 539b6f6ba7f1..08e8a6ab74e1 100644
--- a/drivers/watchdog/mtx-1_wdt.c
+++ b/drivers/watchdog/mtx-1_wdt.c
@@ -206,7 +206,7 @@ static struct miscdevice mtx1_wdt_misc = {
};
-static int mtx1_wdt_probe(struct platform_device *pdev)
+static int __devinit mtx1_wdt_probe(struct platform_device *pdev)
{
int ret;
@@ -229,7 +229,7 @@ static int mtx1_wdt_probe(struct platform_device *pdev)
return 0;
}
-static int mtx1_wdt_remove(struct platform_device *pdev)
+static int __devexit mtx1_wdt_remove(struct platform_device *pdev)
{
/* FIXME: do we need to lock this test ? */
if (mtx1_wdt_device.queue) {
@@ -242,7 +242,7 @@ static int mtx1_wdt_remove(struct platform_device *pdev)
static struct platform_driver mtx1_wdt = {
.probe = mtx1_wdt_probe,
- .remove = mtx1_wdt_remove,
+ .remove = __devexit_p(mtx1_wdt_remove),
.driver.name = "mtx1-wdt",
.driver.owner = THIS_MODULE,
};
diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c
index 64135195f827..f24d04132eda 100644
--- a/drivers/watchdog/pnx4008_wdt.c
+++ b/drivers/watchdog/pnx4008_wdt.c
@@ -246,7 +246,7 @@ static struct miscdevice pnx4008_wdt_miscdev = {
.fops = &pnx4008_wdt_fops,
};
-static int pnx4008_wdt_probe(struct platform_device *pdev)
+static int __devinit pnx4008_wdt_probe(struct platform_device *pdev)
{
int ret = 0, size;
struct resource *res;
@@ -299,7 +299,7 @@ out:
return ret;
}
-static int pnx4008_wdt_remove(struct platform_device *pdev)
+static int __devexit pnx4008_wdt_remove(struct platform_device *pdev)
{
misc_deregister(&pnx4008_wdt_miscdev);
if (wdt_clk) {
@@ -321,7 +321,7 @@ static struct platform_driver platform_wdt_driver = {
.owner = THIS_MODULE,
},
.probe = pnx4008_wdt_probe,
- .remove = pnx4008_wdt_remove,
+ .remove = __devexit_p(pnx4008_wdt_remove),
};
static int __init pnx4008_wdt_init(void)
diff --git a/drivers/watchdog/rdc321x_wdt.c b/drivers/watchdog/rdc321x_wdt.c
index 36e221beedcd..4976bfd1fce6 100644
--- a/drivers/watchdog/rdc321x_wdt.c
+++ b/drivers/watchdog/rdc321x_wdt.c
@@ -245,7 +245,7 @@ static int __devinit rdc321x_wdt_probe(struct platform_device *pdev)
return 0;
}
-static int rdc321x_wdt_remove(struct platform_device *pdev)
+static int __devexit rdc321x_wdt_remove(struct platform_device *pdev)
{
if (rdc321x_wdt_device.queue) {
rdc321x_wdt_device.queue = 0;
@@ -259,7 +259,7 @@ static int rdc321x_wdt_remove(struct platform_device *pdev)
static struct platform_driver rdc321x_wdt_driver = {
.probe = rdc321x_wdt_probe,
- .remove = rdc321x_wdt_remove,
+ .remove = __devexit_p(rdc321x_wdt_remove),
.driver = {
.owner = THIS_MODULE,
.name = "rdc321x-wdt",
diff --git a/drivers/watchdog/rm9k_wdt.c b/drivers/watchdog/rm9k_wdt.c
index cce1982a1b58..2e4442642262 100644
--- a/drivers/watchdog/rm9k_wdt.c
+++ b/drivers/watchdog/rm9k_wdt.c
@@ -345,8 +345,8 @@ static const struct resource *wdt_gpi_get_resource(struct platform_device *pdv,
return platform_get_resource_byname(pdv, type, buf);
}
-/* No hotplugging on the platform bus - use __init */
-static int __init wdt_gpi_probe(struct platform_device *pdv)
+/* No hotplugging on the platform bus - use __devinit */
+static int __devinit wdt_gpi_probe(struct platform_device *pdv)
{
int res;
const struct resource
@@ -373,7 +373,7 @@ static int __init wdt_gpi_probe(struct platform_device *pdv)
return res;
}
-static int __exit wdt_gpi_remove(struct platform_device *dev)
+static int __devexit wdt_gpi_remove(struct platform_device *dev)
{
int res;
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index e31925ee8346..b57ac6b49147 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -68,15 +68,10 @@ MODULE_PARM_DESC(tmr_atboot,
__MODULE_STRING(CONFIG_S3C2410_WATCHDOG_ATBOOT));
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-MODULE_PARM_DESC(soft_noboot, "Watchdog action, set to 1 to ignore reboots, 0 to reboot (default depends on ONLY_TESTING)");
+MODULE_PARM_DESC(soft_noboot, "Watchdog action, set to 1 to ignore reboots, "
+ "0 to reboot (default depends on ONLY_TESTING)");
MODULE_PARM_DESC(debug, "Watchdog debug, set to >1 for debug, (default 0)");
-
-typedef enum close_state {
- CLOSE_STATE_NOT,
- CLOSE_STATE_ALLOW = 0x4021
-} close_state_t;
-
static unsigned long open_lock;
static struct device *wdt_dev; /* platform device attached to */
static struct resource *wdt_mem;
@@ -84,7 +79,7 @@ static struct resource *wdt_irq;
static struct clk *wdt_clock;
static void __iomem *wdt_base;
static unsigned int wdt_count;
-static close_state_t allow_close;
+static char expect_close;
static DEFINE_SPINLOCK(wdt_lock);
/* watchdog control routines */
@@ -211,7 +206,7 @@ static int s3c2410wdt_open(struct inode *inode, struct file *file)
if (nowayout)
__module_get(THIS_MODULE);
- allow_close = CLOSE_STATE_NOT;
+ expect_close = 0;
/* start the timer */
s3c2410wdt_start();
@@ -225,13 +220,13 @@ static int s3c2410wdt_release(struct inode *inode, struct file *file)
* Lock it in if it's a module and we set nowayout
*/
- if (allow_close == CLOSE_STATE_ALLOW)
+ if (expect_close == 42)
s3c2410wdt_stop();
else {
dev_err(wdt_dev, "Unexpected close, not stopping watchdog\n");
s3c2410wdt_keepalive();
}
- allow_close = CLOSE_STATE_NOT;
+ expect_close = 0;
clear_bit(0, &open_lock);
return 0;
}
@@ -247,7 +242,7 @@ static ssize_t s3c2410wdt_write(struct file *file, const char __user *data,
size_t i;
/* In case it was set long ago */
- allow_close = CLOSE_STATE_NOT;
+ expect_close = 0;
for (i = 0; i != len; i++) {
char c;
@@ -255,7 +250,7 @@ static ssize_t s3c2410wdt_write(struct file *file, const char __user *data,
if (get_user(c, data + i))
return -EFAULT;
if (c == 'V')
- allow_close = CLOSE_STATE_ALLOW;
+ expect_close = 42;
}
}
s3c2410wdt_keepalive();
@@ -263,7 +258,7 @@ static ssize_t s3c2410wdt_write(struct file *file, const char __user *data,
return len;
}
-#define OPTIONS WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE
+#define OPTIONS (WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE)
static const struct watchdog_info s3c2410_wdt_ident = {
.options = OPTIONS,
@@ -331,7 +326,7 @@ static irqreturn_t s3c2410wdt_irq(int irqno, void *param)
}
/* device interface */
-static int s3c2410wdt_probe(struct platform_device *pdev)
+static int __devinit s3c2410wdt_probe(struct platform_device *pdev)
{
struct resource *res;
struct device *dev;
@@ -404,7 +399,8 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
"tmr_margin value out of range, default %d used\n",
CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME);
else
- dev_info(dev, "default timer value is out of range, cannot start\n");
+ dev_info(dev, "default timer value is out of range, "
+ "cannot start\n");
}
ret = misc_register(&s3c2410wdt_miscdev);
@@ -453,7 +449,7 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
return ret;
}
-static int s3c2410wdt_remove(struct platform_device *dev)
+static int __devexit s3c2410wdt_remove(struct platform_device *dev)
{
release_resource(wdt_mem);
kfree(wdt_mem);
@@ -515,7 +511,7 @@ static int s3c2410wdt_resume(struct platform_device *dev)
static struct platform_driver s3c2410wdt_driver = {
.probe = s3c2410wdt_probe,
- .remove = s3c2410wdt_remove,
+ .remove = __devexit_p(s3c2410wdt_remove),
.shutdown = s3c2410wdt_shutdown,
.suspend = s3c2410wdt_suspend,
.resume = s3c2410wdt_resume,
diff --git a/drivers/watchdog/sb_wdog.c b/drivers/watchdog/sb_wdog.c
index 38f5831c9291..9748eed73196 100644
--- a/drivers/watchdog/sb_wdog.c
+++ b/drivers/watchdog/sb_wdog.c
@@ -93,7 +93,7 @@ static int expect_close;
static const struct watchdog_info ident = {
.options = WDIOF_CARDRESET | WDIOF_SETTIMEOUT |
- WDIOF_KEEPALIVEPING,
+ WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
.identity = "SiByte Watchdog",
};
@@ -269,9 +269,10 @@ irqreturn_t sbwdog_interrupt(int irq, void *addr)
* if it's the second watchdog timer, it's for those users
*/
if (wd_cfg_reg == user_dog)
- printk(KERN_CRIT
- "%s in danger of initiating system reset in %ld.%01ld seconds\n",
- ident.identity, wd_init / 1000000, (wd_init / 100000) % 10);
+ printk(KERN_CRIT "%s in danger of initiating system reset "
+ "in %ld.%01ld seconds\n",
+ ident.identity,
+ wd_init / 1000000, (wd_init / 100000) % 10);
else
cfg |= 1;
diff --git a/drivers/watchdog/sbc60xxwdt.c b/drivers/watchdog/sbc60xxwdt.c
index d1c390c7155c..626d0e8e56c3 100644
--- a/drivers/watchdog/sbc60xxwdt.c
+++ b/drivers/watchdog/sbc60xxwdt.c
@@ -372,8 +372,9 @@ static int __init sbc60xxwdt_init(void)
wdt_miscdev.minor, rc);
goto err_out_reboot;
}
- printk(KERN_INFO PFX "WDT driver for 60XX single board computer initialised. timeout=%d sec (nowayout=%d)\n",
- timeout, nowayout);
+ printk(KERN_INFO PFX
+ "WDT driver for 60XX single board computer initialised. "
+ "timeout=%d sec (nowayout=%d)\n", timeout, nowayout);
return 0;
diff --git a/drivers/watchdog/sbc8360.c b/drivers/watchdog/sbc8360.c
index b6e6799ec45d..68e2e2d6f73d 100644
--- a/drivers/watchdog/sbc8360.c
+++ b/drivers/watchdog/sbc8360.c
@@ -280,8 +280,8 @@ static int sbc8360_close(struct inode *inode, struct file *file)
if (expect_close == 42)
sbc8360_stop();
else
- printk(KERN_CRIT PFX
- "SBC8360 device closed unexpectedly. SBC8360 will not stop!\n");
+ printk(KERN_CRIT PFX "SBC8360 device closed unexpectedly. "
+ "SBC8360 will not stop!\n");
clear_bit(0, &sbc8360_is_open);
expect_close = 0;
diff --git a/drivers/watchdog/sbc_epx_c3.c b/drivers/watchdog/sbc_epx_c3.c
index e467ddcf796a..28f1214457bd 100644
--- a/drivers/watchdog/sbc_epx_c3.c
+++ b/drivers/watchdog/sbc_epx_c3.c
@@ -107,8 +107,7 @@ static long epx_c3_ioctl(struct file *file, unsigned int cmd,
int options, retval = -EINVAL;
int __user *argp = (void __user *)arg;
static const struct watchdog_info ident = {
- .options = WDIOF_KEEPALIVEPING |
- WDIOF_MAGICCLOSE,
+ .options = WDIOF_KEEPALIVEPING,
.firmware_version = 0,
.identity = "Winsystems EPX-C3 H/W Watchdog",
};
@@ -174,8 +173,8 @@ static struct notifier_block epx_c3_notifier = {
.notifier_call = epx_c3_notify_sys,
};
-static const char banner[] __initdata =
- KERN_INFO PFX "Hardware Watchdog Timer for Winsystems EPX-C3 SBC: 0.1\n";
+static const char banner[] __initdata = KERN_INFO PFX
+ "Hardware Watchdog Timer for Winsystems EPX-C3 SBC: 0.1\n";
static int __init watchdog_init(void)
{
@@ -219,6 +218,9 @@ module_init(watchdog_init);
module_exit(watchdog_exit);
MODULE_AUTHOR("Calin A. Culianu <calin@ajvar.org>");
-MODULE_DESCRIPTION("Hardware Watchdog Device for Winsystems EPX-C3 SBC. Note that there is no way to probe for this device -- so only use it if you are *sure* you are runnning on this specific SBC system from Winsystems! It writes to IO ports 0x1ee and 0x1ef!");
+MODULE_DESCRIPTION("Hardware Watchdog Device for Winsystems EPX-C3 SBC. "
+ "Note that there is no way to probe for this device -- "
+ "so only use it if you are *sure* you are runnning on this specific "
+ "SBC system from Winsystems! It writes to IO ports 0x1ee and 0x1ef!");
MODULE_LICENSE("GPL");
MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/scx200_wdt.c b/drivers/watchdog/scx200_wdt.c
index 9e19a10a5bb9..e67b76c0526c 100644
--- a/drivers/watchdog/scx200_wdt.c
+++ b/drivers/watchdog/scx200_wdt.c
@@ -108,7 +108,9 @@ static int scx200_wdt_open(struct inode *inode, struct file *file)
static int scx200_wdt_release(struct inode *inode, struct file *file)
{
if (expect_close != 42)
- printk(KERN_WARNING NAME ": watchdog device closed unexpectedly, will not disable the watchdog timer\n");
+ printk(KERN_WARNING NAME
+ ": watchdog device closed unexpectedly, "
+ "will not disable the watchdog timer\n");
else if (!nowayout)
scx200_wdt_disable();
expect_close = 0;
@@ -163,7 +165,8 @@ static long scx200_wdt_ioctl(struct file *file, unsigned int cmd,
static const struct watchdog_info ident = {
.identity = "NatSemi SCx200 Watchdog",
.firmware_version = 1,
- .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING |
+ WDIOF_MAGICCLOSE,
};
int new_margin;
diff --git a/drivers/watchdog/shwdt.c b/drivers/watchdog/shwdt.c
index cdc7138be301..a03f84e5ee1f 100644
--- a/drivers/watchdog/shwdt.c
+++ b/drivers/watchdog/shwdt.c
@@ -494,7 +494,9 @@ MODULE_LICENSE("GPL");
MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
module_param(clock_division_ratio, int, 0);
-MODULE_PARM_DESC(clock_division_ratio, "Clock division ratio. Valid ranges are from 0x5 (1.31ms) to 0x7 (5.25ms). (default=" __MODULE_STRING(clock_division_ratio) ")");
+MODULE_PARM_DESC(clock_division_ratio,
+ "Clock division ratio. Valid ranges are from 0x5 (1.31ms) "
+ "to 0x7 (5.25ms). (default=" __MODULE_STRING(clock_division_ratio) ")");
module_param(heartbeat, int, 0);
MODULE_PARM_DESC(heartbeat,
diff --git a/drivers/watchdog/softdog.c b/drivers/watchdog/softdog.c
index ebcc9cea5e99..833f49f43d43 100644
--- a/drivers/watchdog/softdog.c
+++ b/drivers/watchdog/softdog.c
@@ -71,7 +71,9 @@ static int soft_noboot = 0;
#endif /* ONLY_TESTING */
module_param(soft_noboot, int, 0);
-MODULE_PARM_DESC(soft_noboot, "Softdog action, set to 1 to ignore reboots, 0 to reboot (default depends on ONLY_TESTING)");
+MODULE_PARM_DESC(soft_noboot,
+ "Softdog action, set to 1 to ignore reboots, 0 to reboot "
+ "(default depends on ONLY_TESTING)");
/*
* Our timer
@@ -264,7 +266,8 @@ static struct notifier_block softdog_notifier = {
.notifier_call = softdog_notify_sys,
};
-static char banner[] __initdata = KERN_INFO "Software Watchdog Timer: 0.07 initialized. soft_noboot=%d soft_margin=%d sec (nowayout= %d)\n";
+static char banner[] __initdata = KERN_INFO "Software Watchdog Timer: 0.07 "
+ "initialized. soft_noboot=%d soft_margin=%d sec (nowayout= %d)\n";
static int __init watchdog_init(void)
{
diff --git a/drivers/watchdog/w83697hf_wdt.c b/drivers/watchdog/w83697hf_wdt.c
index a9c7f352fcbf..af08972de506 100644
--- a/drivers/watchdog/w83697hf_wdt.c
+++ b/drivers/watchdog/w83697hf_wdt.c
@@ -413,7 +413,8 @@ static int __init wdt_init(void)
w83697hf_init();
if (early_disable) {
if (wdt_running())
- printk(KERN_WARNING PFX "Stopping previously enabled watchdog until userland kicks in\n");
+ printk(KERN_WARNING PFX "Stopping previously enabled "
+ "watchdog until userland kicks in\n");
wdt_disable();
}
diff --git a/drivers/watchdog/wdrtas.c b/drivers/watchdog/wdrtas.c
index a38fa4907c92..a4fe7a38d9b0 100644
--- a/drivers/watchdog/wdrtas.c
+++ b/drivers/watchdog/wdrtas.c
@@ -49,12 +49,7 @@ MODULE_LICENSE("GPL");
MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
MODULE_ALIAS_MISCDEV(TEMP_MINOR);
-#ifdef CONFIG_WATCHDOG_NOWAYOUT
-static int wdrtas_nowayout = 1;
-#else
-static int wdrtas_nowayout = 0;
-#endif
-
+static int wdrtas_nowayout = WATCHDOG_NOWAYOUT;
static atomic_t wdrtas_miscdev_open = ATOMIC_INIT(0);
static char wdrtas_expect_close;
OpenPOWER on IntegriCloud