summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpica/acconfig.h2
-rw-r--r--drivers/acpi/apei/Kconfig1
-rw-r--r--drivers/acpi/apei/apei-base.c2
-rw-r--r--drivers/base/power/clock_ops.c75
-rw-r--r--drivers/base/regmap/regmap.c5
-rw-r--r--drivers/block/floppy.c8
-rw-r--r--drivers/block/xen-blkback/common.h2
-rw-r--r--drivers/block/xen-blkback/xenbus.c6
-rw-r--r--drivers/bluetooth/btusb.c6
-rw-r--r--drivers/bluetooth/btwilink.c16
-rw-r--r--drivers/char/tpm/Kconfig1
-rw-r--r--drivers/char/tpm/tpm.c9
-rw-r--r--drivers/char/tpm/tpm_nsc.c2
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c3
-rw-r--r--drivers/dma/ste_dma40.c42
-rw-r--r--drivers/firewire/ohci.c3
-rw-r--r--drivers/gpio/gpio-generic.c15
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c1
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c4
-rw-r--r--drivers/gpu/drm/i915/intel_display.c22
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h3
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c88
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c7
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c15
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c12
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c16
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c58
-rw-r--r--drivers/gpu/drm/radeon/ni.c44
-rw-r--r--drivers/gpu/drm/radeon/r100.c22
-rw-r--r--drivers/gpu/drm/radeon/r200.c4
-rw-r--r--drivers/gpu/drm/radeon/r600.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon.h7
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h8
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c45
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c40
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c21
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c7
-rw-r--r--drivers/gpu/drm/radeon/rv770.c51
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c3
-rw-r--r--drivers/hid/Kconfig29
-rw-r--r--drivers/hid/Makefile3
-rw-r--r--drivers/hid/hid-axff.c36
-rw-r--r--drivers/hid/hid-core.c11
-rw-r--r--drivers/hid/hid-ids.h10
-rw-r--r--drivers/hid/hid-input.c11
-rw-r--r--drivers/hid/hid-lg.c29
-rw-r--r--drivers/hid/hid-lg.h4
-rw-r--r--drivers/hid/hid-lg4ff.c403
-rw-r--r--drivers/hid/hid-lgff.c13
-rw-r--r--drivers/hid/hid-logitech-dj.c922
-rw-r--r--drivers/hid/hid-logitech-dj.h123
-rw-r--r--drivers/hid/hid-magicmouse.c66
-rw-r--r--drivers/hid/hid-multitouch.c93
-rw-r--r--drivers/hid/hid-wacom.c24
-rw-r--r--drivers/hid/hid-wiimote.c800
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hwmon/coretemp.c216
-rw-r--r--drivers/hwmon/ds620.c2
-rw-r--r--drivers/hwmon/max16065.c2
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c9
-rw-r--r--drivers/hwmon/pmbus/ucd9000.c6
-rw-r--r--drivers/hwmon/pmbus/ucd9200.c6
-rw-r--r--drivers/hwmon/w83791d.c4
-rw-r--r--drivers/i2c/busses/i2c-pxa-pci.c5
-rw-r--r--drivers/i2c/busses/i2c-tegra.c60
-rw-r--r--drivers/ide/ide-disk.c7
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c10
-rw-r--r--drivers/input/keyboard/adp5588-keys.c1
-rw-r--r--drivers/input/misc/cm109.c2
-rw-r--r--drivers/input/mouse/bcm5974.c20
-rw-r--r--drivers/input/tablet/wacom_sys.c14
-rw-r--r--drivers/input/tablet/wacom_wac.c52
-rw-r--r--drivers/input/touchscreen/wacom_w8001.c2
-rw-r--r--drivers/iommu/amd_iommu.c18
-rw-r--r--drivers/iommu/dmar.c2
-rw-r--r--drivers/leds/ledtrig-timer.c2
-rw-r--r--drivers/md/dm-crypt.c2
-rw-r--r--drivers/md/dm-flakey.c4
-rw-r--r--drivers/md/dm-raid.c2
-rw-r--r--drivers/md/dm-table.c32
-rw-r--r--drivers/md/md.c34
-rw-r--r--drivers/md/md.h2
-rw-r--r--drivers/md/multipath.c3
-rw-r--r--drivers/md/raid1.c17
-rw-r--r--drivers/md/raid10.c52
-rw-r--r--drivers/md/raid5.c6
-rw-r--r--drivers/media/dvb/dvb-usb/vp7045.c26
-rw-r--r--drivers/media/rc/nuvoton-cir.c45
-rw-r--r--drivers/media/rc/nuvoton-cir.h1
-rw-r--r--drivers/media/video/gspca/ov519.c22
-rw-r--r--drivers/media/video/gspca/sonixj.c6
-rw-r--r--drivers/media/video/omap/omap_vout.c13
-rw-r--r--drivers/media/video/omap3isp/ispccdc.c1
-rw-r--r--drivers/media/video/pwc/pwc-v4l.c2
-rw-r--r--drivers/media/video/uvc/uvc_driver.c2
-rw-r--r--drivers/media/video/uvc/uvc_entity.c2
-rw-r--r--drivers/media/video/uvc/uvc_video.c10
-rw-r--r--drivers/media/video/uvc/uvcvideo.h2
-rw-r--r--drivers/media/video/v4l2-dev.c11
-rw-r--r--drivers/media/video/v4l2-device.c2
-rw-r--r--drivers/media/video/via-camera.c2
-rw-r--r--drivers/mfd/jz4740-adc.c2
-rw-r--r--drivers/mfd/max8997.c5
-rw-r--r--drivers/mfd/omap-usb-host.c2
-rw-r--r--drivers/mfd/tps65910-irq.c2
-rw-r--r--drivers/mfd/twl4030-madc.c5
-rw-r--r--drivers/mfd/wm8350-gpio.c4
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.c14
-rw-r--r--drivers/misc/pti.c12
-rw-r--r--drivers/mmc/card/block.c3
-rw-r--r--drivers/mmc/core/core.c35
-rw-r--r--drivers/mmc/core/host.c12
-rw-r--r--drivers/mmc/core/host.h8
-rw-r--r--drivers/mmc/core/sd.c81
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c1
-rw-r--r--drivers/mmc/host/sdhci-s3c.c2
-rw-r--r--drivers/mmc/host/sh_mobile_sdhi.c4
-rw-r--r--drivers/mtd/ubi/debug.h2
-rw-r--r--drivers/net/Kconfig11
-rw-r--r--drivers/net/arm/am79c961a.c3
-rw-r--r--drivers/net/bnx2x/bnx2x.h124
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c27
-rw-r--r--drivers/net/bnx2x/bnx2x_dcb.c1
-rw-r--r--drivers/net/bnx2x/bnx2x_ethtool.c48
-rw-r--r--drivers/net/bnx2x/bnx2x_link.c46
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c178
-rw-r--r--drivers/net/bnx2x/bnx2x_reg.h19
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.c7
-rw-r--r--drivers/net/bonding/bond_3ad.c3
-rw-r--r--drivers/net/bonding/bond_alb.c3
-rw-r--r--drivers/net/bonding/bond_main.c13
-rw-r--r--drivers/net/can/ti_hecc.c1
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c23
-rw-r--r--drivers/net/cxgb3/l2t.c15
-rw-r--r--drivers/net/cxgb3/l2t.h16
-rw-r--r--drivers/net/cxgb4/cxgb4_main.c3
-rw-r--r--drivers/net/e1000/e1000_hw.c6
-rw-r--r--drivers/net/gianfar_ethtool.c8
-rw-r--r--drivers/net/greth.c12
-rw-r--r--drivers/net/greth.h1
-rw-r--r--drivers/net/ibmveth.c52
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c4
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/netconsole.c8
-rw-r--r--drivers/net/pch_gbe/pch_gbe.h12
-rw-r--r--drivers/net/pch_gbe/pch_gbe_main.c346
-rw-r--r--drivers/net/phy/dp83640.c4
-rw-r--r--drivers/net/ppp_generic.c7
-rw-r--r--drivers/net/pxa168_eth.c1
-rw-r--r--drivers/net/r8169.c32
-rw-r--r--drivers/net/sfc/efx.c18
-rw-r--r--drivers/net/sfc/io.h6
-rw-r--r--drivers/net/sfc/mcdi.c46
-rw-r--r--drivers/net/sfc/nic.c7
-rw-r--r--drivers/net/sfc/nic.h2
-rw-r--r--drivers/net/sfc/siena.c25
-rw-r--r--drivers/net/sfc/workarounds.h2
-rw-r--r--drivers/net/tg3.c2
-rw-r--r--drivers/net/usb/ipheth.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c10
-rw-r--r--drivers/net/wireless/b43/main.c3
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c21
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c39
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-rs.c13
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-core.c4
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-hcmd.c2
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-tx.c4
-rw-r--r--drivers/net/wireless/iwlegacy/iwl3945-base.c8
-rw-r--r--drivers/net/wireless/iwlegacy/iwl4965-base.c10
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-ucode.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c30
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c47
-rw-r--r--drivers/net/wireless/rtlwifi/core.c8
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.c11
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c1
-rw-r--r--drivers/net/xen-netback/interface.c4
-rw-r--r--drivers/pci/hotplug/pcihp_slot.c4
-rw-r--r--drivers/pci/pci.c6
-rw-r--r--drivers/pci/probe.c59
-rw-r--r--drivers/rtc/rtc-ep93xx.c16
-rw-r--r--drivers/rtc/rtc-imxdi.c1
-rw-r--r--drivers/rtc/rtc-lib.c2
-rw-r--r--drivers/rtc/rtc-s3c.c26
-rw-r--r--drivers/rtc/rtc-twl.c60
-rw-r--r--drivers/s390/cio/cio.c8
-rw-r--r--drivers/scsi/3w-9xxx.c2
-rw-r--r--drivers/scsi/Kconfig1
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/aacraid/commsup.c2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c2
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c2
-rw-r--r--drivers/scsi/fcoe/fcoe.c13
-rw-r--r--drivers/scsi/hpsa.c57
-rw-r--r--drivers/scsi/isci/host.c13
-rw-r--r--drivers/scsi/isci/host.h3
-rw-r--r--drivers/scsi/isci/init.c47
-rw-r--r--drivers/scsi/isci/phy.c13
-rw-r--r--drivers/scsi/isci/registers.h12
-rw-r--r--drivers/scsi/isci/request.c30
-rw-r--r--drivers/scsi/isci/unsolicited_frame_control.c2
-rw-r--r--drivers/scsi/isci/unsolicited_frame_control.h2
-rw-r--r--drivers/scsi/libfc/fc_exch.c59
-rw-r--r--drivers/scsi/libfc/fc_fcp.c11
-rw-r--r--drivers/scsi/libfc/fc_lport.c11
-rw-r--r--drivers/scsi/libsas/sas_expander.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c36
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h5
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h29
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c282
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c109
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c25
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c39
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla4xxx/Kconfig2
-rw-r--r--drivers/spi/spi-fsl-spi.c3
-rw-r--r--drivers/spi/spi-imx.c4
-rw-r--r--drivers/spi/spi-topcliff-pch.c93
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc.c4
-rw-r--r--drivers/staging/zcache/zcache-main.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c270
-rw-r--r--drivers/target/target_core_cdb.c35
-rw-r--r--drivers/target/target_core_transport.c9
-rw-r--r--drivers/target/tcm_fc/tcm_fc.h12
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c90
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c7
-rw-r--r--drivers/target/tcm_fc/tfc_io.c62
-rw-r--r--drivers/tty/serial/crisv10.c4
-rw-r--r--drivers/usb/host/xhci-hub.c2
-rw-r--r--drivers/usb/host/xhci-ring.c19
-rw-r--r--drivers/video/backlight/backlight.c2
-rw-r--r--drivers/watchdog/hpwdt.c9
-rw-r--r--drivers/watchdog/lantiq_wdt.c8
-rw-r--r--drivers/watchdog/sbc_epx_c3.c2
-rw-r--r--drivers/watchdog/watchdog_dev.c14
-rw-r--r--drivers/xen/events.c40
-rw-r--r--drivers/zorro/zorro.c7
249 files changed, 5143 insertions, 2179 deletions
diff --git a/drivers/acpi/acpica/acconfig.h b/drivers/acpi/acpica/acconfig.h
index bc533dde16c4..f895a244ca7e 100644
--- a/drivers/acpi/acpica/acconfig.h
+++ b/drivers/acpi/acpica/acconfig.h
@@ -121,7 +121,7 @@
/* Maximum sleep allowed via Sleep() operator */
-#define ACPI_MAX_SLEEP 20000 /* Two seconds */
+#define ACPI_MAX_SLEEP 2000 /* Two seconds */
/******************************************************************************
*
diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig
index c34aa51af4ee..e3f47872ec22 100644
--- a/drivers/acpi/apei/Kconfig
+++ b/drivers/acpi/apei/Kconfig
@@ -13,6 +13,7 @@ config ACPI_APEI_GHES
bool "APEI Generic Hardware Error Source"
depends on ACPI_APEI && X86
select ACPI_HED
+ select IRQ_WORK
select LLIST
select GENERIC_ALLOCATOR
help
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index 8041248fce9b..61540360d5ce 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -618,7 +618,7 @@ int apei_osc_setup(void)
};
capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
- capbuf[OSC_SUPPORT_TYPE] = 0;
+ capbuf[OSC_SUPPORT_TYPE] = 1;
capbuf[OSC_CONTROL_TYPE] = 0;
if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle))
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index 2c18d584066d..b97294e2d95b 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -42,6 +42,22 @@ static struct pm_clk_data *__to_pcd(struct device *dev)
}
/**
+ * pm_clk_acquire - Acquire a device clock.
+ * @dev: Device whose clock is to be acquired.
+ * @ce: PM clock entry corresponding to the clock.
+ */
+static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce)
+{
+ ce->clk = clk_get(dev, ce->con_id);
+ if (IS_ERR(ce->clk)) {
+ ce->status = PCE_STATUS_ERROR;
+ } else {
+ ce->status = PCE_STATUS_ACQUIRED;
+ dev_dbg(dev, "Clock %s managed by runtime PM.\n", ce->con_id);
+ }
+}
+
+/**
* pm_clk_add - Start using a device clock for power management.
* @dev: Device whose clock is going to be used for power management.
* @con_id: Connection ID of the clock.
@@ -73,6 +89,8 @@ int pm_clk_add(struct device *dev, const char *con_id)
}
}
+ pm_clk_acquire(dev, ce);
+
spin_lock_irq(&pcd->lock);
list_add_tail(&ce->node, &pcd->clock_list);
spin_unlock_irq(&pcd->lock);
@@ -82,17 +100,12 @@ int pm_clk_add(struct device *dev, const char *con_id)
/**
* __pm_clk_remove - Destroy PM clock entry.
* @ce: PM clock entry to destroy.
- *
- * This routine must be called under the spinlock protecting the PM list of
- * clocks corresponding the the @ce's device.
*/
static void __pm_clk_remove(struct pm_clock_entry *ce)
{
if (!ce)
return;
- list_del(&ce->node);
-
if (ce->status < PCE_STATUS_ERROR) {
if (ce->status == PCE_STATUS_ENABLED)
clk_disable(ce->clk);
@@ -126,18 +139,22 @@ void pm_clk_remove(struct device *dev, const char *con_id)
spin_lock_irq(&pcd->lock);
list_for_each_entry(ce, &pcd->clock_list, node) {
- if (!con_id && !ce->con_id) {
- __pm_clk_remove(ce);
- break;
- } else if (!con_id || !ce->con_id) {
+ if (!con_id && !ce->con_id)
+ goto remove;
+ else if (!con_id || !ce->con_id)
continue;
- } else if (!strcmp(con_id, ce->con_id)) {
- __pm_clk_remove(ce);
- break;
- }
+ else if (!strcmp(con_id, ce->con_id))
+ goto remove;
}
spin_unlock_irq(&pcd->lock);
+ return;
+
+ remove:
+ list_del(&ce->node);
+ spin_unlock_irq(&pcd->lock);
+
+ __pm_clk_remove(ce);
}
/**
@@ -175,20 +192,27 @@ void pm_clk_destroy(struct device *dev)
{
struct pm_clk_data *pcd = __to_pcd(dev);
struct pm_clock_entry *ce, *c;
+ struct list_head list;
if (!pcd)
return;
dev->power.subsys_data = NULL;
+ INIT_LIST_HEAD(&list);
spin_lock_irq(&pcd->lock);
list_for_each_entry_safe_reverse(ce, c, &pcd->clock_list, node)
- __pm_clk_remove(ce);
+ list_move(&ce->node, &list);
spin_unlock_irq(&pcd->lock);
kfree(pcd);
+
+ list_for_each_entry_safe_reverse(ce, c, &list, node) {
+ list_del(&ce->node);
+ __pm_clk_remove(ce);
+ }
}
#endif /* CONFIG_PM */
@@ -196,23 +220,6 @@ void pm_clk_destroy(struct device *dev)
#ifdef CONFIG_PM_RUNTIME
/**
- * pm_clk_acquire - Acquire a device clock.
- * @dev: Device whose clock is to be acquired.
- * @con_id: Connection ID of the clock.
- */
-static void pm_clk_acquire(struct device *dev,
- struct pm_clock_entry *ce)
-{
- ce->clk = clk_get(dev, ce->con_id);
- if (IS_ERR(ce->clk)) {
- ce->status = PCE_STATUS_ERROR;
- } else {
- ce->status = PCE_STATUS_ACQUIRED;
- dev_dbg(dev, "Clock %s managed by runtime PM.\n", ce->con_id);
- }
-}
-
-/**
* pm_clk_suspend - Disable clocks in a device's PM clock list.
* @dev: Device to disable the clocks for.
*/
@@ -230,9 +237,6 @@ int pm_clk_suspend(struct device *dev)
spin_lock_irqsave(&pcd->lock, flags);
list_for_each_entry_reverse(ce, &pcd->clock_list, node) {
- if (ce->status == PCE_STATUS_NONE)
- pm_clk_acquire(dev, ce);
-
if (ce->status < PCE_STATUS_ERROR) {
clk_disable(ce->clk);
ce->status = PCE_STATUS_ACQUIRED;
@@ -262,9 +266,6 @@ int pm_clk_resume(struct device *dev)
spin_lock_irqsave(&pcd->lock, flags);
list_for_each_entry(ce, &pcd->clock_list, node) {
- if (ce->status == PCE_STATUS_NONE)
- pm_clk_acquire(dev, ce);
-
if (ce->status < PCE_STATUS_ERROR) {
clk_enable(ce->clk);
ce->status = PCE_STATUS_ENABLED;
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 0eef4da1ac61..20663f8dae45 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -168,13 +168,11 @@ struct regmap *regmap_init(struct device *dev,
map->work_buf = kmalloc(map->format.buf_size, GFP_KERNEL);
if (map->work_buf == NULL) {
ret = -ENOMEM;
- goto err_bus;
+ goto err_map;
}
return map;
-err_bus:
- module_put(map->bus->owner);
err_map:
kfree(map);
err:
@@ -188,7 +186,6 @@ EXPORT_SYMBOL_GPL(regmap_init);
void regmap_exit(struct regmap *map)
{
kfree(map->work_buf);
- module_put(map->bus->owner);
kfree(map);
}
EXPORT_SYMBOL_GPL(regmap_exit);
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 98de8f418676..9955a53733b2 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -4250,7 +4250,7 @@ static int __init floppy_init(void)
use_virtual_dma = can_use_virtual_dma & 1;
fdc_state[0].address = FDC1;
if (fdc_state[0].address == -1) {
- del_timer(&fd_timeout);
+ del_timer_sync(&fd_timeout);
err = -ENODEV;
goto out_unreg_region;
}
@@ -4261,7 +4261,7 @@ static int __init floppy_init(void)
fdc = 0; /* reset fdc in case of unexpected interrupt */
err = floppy_grab_irq_and_dma();
if (err) {
- del_timer(&fd_timeout);
+ del_timer_sync(&fd_timeout);
err = -EBUSY;
goto out_unreg_region;
}
@@ -4318,7 +4318,7 @@ static int __init floppy_init(void)
user_reset_fdc(-1, FD_RESET_ALWAYS, false);
}
fdc = 0;
- del_timer(&fd_timeout);
+ del_timer_sync(&fd_timeout);
current_drive = 0;
initialized = true;
if (have_no_fdc) {
@@ -4368,7 +4368,7 @@ out_unreg_blkdev:
unregister_blkdev(FLOPPY_MAJOR, "fd");
out_put_disk:
while (dr--) {
- del_timer(&motor_off_timer[dr]);
+ del_timer_sync(&motor_off_timer[dr]);
if (disks[dr]->queue)
blk_cleanup_queue(disks[dr]->queue);
put_disk(disks[dr]);
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index 9e40b283a468..00c57c90e2d6 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -46,7 +46,7 @@
#define DRV_PFX "xen-blkback:"
#define DPRINTK(fmt, args...) \
- pr_debug(DRV_PFX "(%s:%d) " fmt ".\n", \
+ pr_debug(DRV_PFX "(%s:%d) " fmt ".\n", \
__func__, __LINE__, ##args)
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 3f129b45451a..5fd2010f7d2b 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -590,7 +590,7 @@ static void frontend_changed(struct xenbus_device *dev,
/*
* Enforce precondition before potential leak point.
- * blkif_disconnect() is idempotent.
+ * xen_blkif_disconnect() is idempotent.
*/
xen_blkif_disconnect(be->blkif);
@@ -601,17 +601,17 @@ static void frontend_changed(struct xenbus_device *dev,
break;
case XenbusStateClosing:
- xen_blkif_disconnect(be->blkif);
xenbus_switch_state(dev, XenbusStateClosing);
break;
case XenbusStateClosed:
+ xen_blkif_disconnect(be->blkif);
xenbus_switch_state(dev, XenbusStateClosed);
if (xenbus_dev_is_online(dev))
break;
/* fall through if not online */
case XenbusStateUnknown:
- /* implies blkif_disconnect() via blkback_remove() */
+ /* implies xen_blkif_disconnect() via xen_blkbk_remove() */
device_unregister(&dev->dev);
break;
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 3ef476070baf..9cbac6b445e1 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -72,9 +72,15 @@ static struct usb_device_id btusb_table[] = {
/* Apple MacBookAir3,1, MacBookAir3,2 */
{ USB_DEVICE(0x05ac, 0x821b) },
+ /* Apple MacBookAir4,1 */
+ { USB_DEVICE(0x05ac, 0x821f) },
+
/* Apple MacBookPro8,2 */
{ USB_DEVICE(0x05ac, 0x821a) },
+ /* Apple MacMini5,1 */
+ { USB_DEVICE(0x05ac, 0x8281) },
+
/* AVM BlueFRITZ! USB v2.0 */
{ USB_DEVICE(0x057c, 0x3800) },
diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
index 65d27aff553a..04d353f58d71 100644
--- a/drivers/bluetooth/btwilink.c
+++ b/drivers/bluetooth/btwilink.c
@@ -125,6 +125,13 @@ static long st_receive(void *priv_data, struct sk_buff *skb)
/* protocol structure registered with shared transport */
static struct st_proto_s ti_st_proto[MAX_BT_CHNL_IDS] = {
{
+ .chnl_id = HCI_EVENT_PKT, /* HCI Events */
+ .hdr_len = sizeof(struct hci_event_hdr),
+ .offset_len_in_hdr = offsetof(struct hci_event_hdr, plen),
+ .len_size = 1, /* sizeof(plen) in struct hci_event_hdr */
+ .reserve = 8,
+ },
+ {
.chnl_id = HCI_ACLDATA_PKT, /* ACL */
.hdr_len = sizeof(struct hci_acl_hdr),
.offset_len_in_hdr = offsetof(struct hci_acl_hdr, dlen),
@@ -138,13 +145,6 @@ static struct st_proto_s ti_st_proto[MAX_BT_CHNL_IDS] = {
.len_size = 1, /* sizeof(dlen) in struct hci_sco_hdr */
.reserve = 8,
},
- {
- .chnl_id = HCI_EVENT_PKT, /* HCI Events */
- .hdr_len = sizeof(struct hci_event_hdr),
- .offset_len_in_hdr = offsetof(struct hci_event_hdr, plen),
- .len_size = 1, /* sizeof(plen) in struct hci_event_hdr */
- .reserve = 8,
- },
};
/* Called from HCI core to initialize the device */
@@ -240,7 +240,7 @@ static int ti_st_close(struct hci_dev *hdev)
if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
return 0;
- for (i = 0; i < MAX_BT_CHNL_IDS; i++) {
+ for (i = MAX_BT_CHNL_IDS-1; i >= 0; i--) {
err = st_unregister(&ti_st_proto[i]);
if (err)
BT_ERR("st_unregister(%d) failed with error %d",
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index f6595aba4f0f..fa567f1158c2 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -43,6 +43,7 @@ config TCG_NSC
config TCG_ATMEL
tristate "Atmel TPM Interface"
+ depends on PPC64 || HAS_IOPORT
---help---
If you have a TPM security chip from Atmel say Yes and it
will be accessible from within Linux. To compile this driver
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index caf8012ef47c..9ca5c021d0b6 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -383,6 +383,9 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
u32 count, ordinal;
unsigned long stop;
+ if (bufsiz > TPM_BUFSIZE)
+ bufsiz = TPM_BUFSIZE;
+
count = be32_to_cpu(*((__be32 *) (buf + 2)));
ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
if (count == 0)
@@ -1102,6 +1105,7 @@ ssize_t tpm_read(struct file *file, char __user *buf,
{
struct tpm_chip *chip = file->private_data;
ssize_t ret_size;
+ int rc;
del_singleshot_timer_sync(&chip->user_read_timer);
flush_work_sync(&chip->work);
@@ -1112,8 +1116,11 @@ ssize_t tpm_read(struct file *file, char __user *buf,
ret_size = size;
mutex_lock(&chip->buffer_mutex);
- if (copy_to_user(buf, chip->data_buffer, ret_size))
+ rc = copy_to_user(buf, chip->data_buffer, ret_size);
+ memset(chip->data_buffer, 0, ret_size);
+ if (rc)
ret_size = -EFAULT;
+
mutex_unlock(&chip->buffer_mutex);
}
diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c
index 82facc9104c7..4d2464871ada 100644
--- a/drivers/char/tpm/tpm_nsc.c
+++ b/drivers/char/tpm/tpm_nsc.c
@@ -396,8 +396,6 @@ static void __exit cleanup_nsc(void)
if (pdev) {
tpm_nsc_remove(&pdev->dev);
platform_device_unregister(pdev);
- kfree(pdev);
- pdev = NULL;
}
platform_driver_unregister(&nsc_drv);
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index 7b0603eb0129..cdc02ac8f41a 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -261,6 +261,9 @@ static int pcc_get_offset(int cpu)
pr = per_cpu(processors, cpu);
pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu);
+ if (!pr)
+ return -ENODEV;
+
status = acpi_evaluate_object(pr->handle, "PCCP", NULL, &buffer);
if (ACPI_FAILURE(status))
return -ENODEV;
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index cd3a7c726bf8..467e4dcb20a0 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -174,8 +174,10 @@ struct d40_base;
* @tasklet: Tasklet that gets scheduled from interrupt context to complete a
* transfer and call client callback.
* @client: Cliented owned descriptor list.
+ * @pending_queue: Submitted jobs, to be issued by issue_pending()
* @active: Active descriptor.
* @queue: Queued jobs.
+ * @prepare_queue: Prepared jobs.
* @dma_cfg: The client configuration of this dma channel.
* @configured: whether the dma_cfg configuration is valid
* @base: Pointer to the device instance struct.
@@ -203,6 +205,7 @@ struct d40_chan {
struct list_head pending_queue;
struct list_head active;
struct list_head queue;
+ struct list_head prepare_queue;
struct stedma40_chan_cfg dma_cfg;
bool configured;
struct d40_base *base;
@@ -477,7 +480,6 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
list_for_each_entry_safe(d, _d, &d40c->client, node)
if (async_tx_test_ack(&d->txd)) {
- d40_pool_lli_free(d40c, d);
d40_desc_remove(d);
desc = d;
memset(desc, 0, sizeof(*desc));
@@ -644,8 +646,11 @@ static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
return d;
}
+/* remove desc from current queue and add it to the pending_queue */
static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
{
+ d40_desc_remove(desc);
+ desc->is_in_client_list = false;
list_add_tail(&desc->node, &d40c->pending_queue);
}
@@ -803,6 +808,7 @@ done:
static void d40_term_all(struct d40_chan *d40c)
{
struct d40_desc *d40d;
+ struct d40_desc *_d;
/* Release active descriptors */
while ((d40d = d40_first_active_get(d40c))) {
@@ -822,6 +828,21 @@ static void d40_term_all(struct d40_chan *d40c)
d40_desc_free(d40c, d40d);
}
+ /* Release client owned descriptors */
+ if (!list_empty(&d40c->client))
+ list_for_each_entry_safe(d40d, _d, &d40c->client, node) {
+ d40_desc_remove(d40d);
+ d40_desc_free(d40c, d40d);
+ }
+
+ /* Release descriptors in prepare queue */
+ if (!list_empty(&d40c->prepare_queue))
+ list_for_each_entry_safe(d40d, _d,
+ &d40c->prepare_queue, node) {
+ d40_desc_remove(d40d);
+ d40_desc_free(d40c, d40d);
+ }
+
d40c->pending_tx = 0;
d40c->busy = false;
}
@@ -1208,7 +1229,6 @@ static void dma_tasklet(unsigned long data)
if (!d40d->cyclic) {
if (async_tx_test_ack(&d40d->txd)) {
- d40_pool_lli_free(d40c, d40d);
d40_desc_remove(d40d);
d40_desc_free(d40c, d40d);
} else {
@@ -1595,21 +1615,10 @@ static int d40_free_dma(struct d40_chan *d40c)
u32 event;
struct d40_phy_res *phy = d40c->phy_chan;
bool is_src;
- struct d40_desc *d;
- struct d40_desc *_d;
-
/* Terminate all queued and active transfers */
d40_term_all(d40c);
- /* Release client owned descriptors */
- if (!list_empty(&d40c->client))
- list_for_each_entry_safe(d, _d, &d40c->client, node) {
- d40_pool_lli_free(d40c, d);
- d40_desc_remove(d);
- d40_desc_free(d40c, d);
- }
-
if (phy == NULL) {
chan_err(d40c, "phy == null\n");
return -EINVAL;
@@ -1911,6 +1920,12 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
goto err;
}
+ /*
+ * add descriptor to the prepare queue in order to be able
+ * to free them later in terminate_all
+ */
+ list_add_tail(&desc->node, &chan->prepare_queue);
+
spin_unlock_irqrestore(&chan->lock, flags);
return &desc->txd;
@@ -2400,6 +2415,7 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
INIT_LIST_HEAD(&d40c->queue);
INIT_LIST_HEAD(&d40c->pending_queue);
INIT_LIST_HEAD(&d40c->client);
+ INIT_LIST_HEAD(&d40c->prepare_queue);
tasklet_init(&d40c->tasklet, dma_tasklet,
(unsigned long) d40c);
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 57cd3a406edf..fd7170a9ad2c 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -290,6 +290,9 @@ static const struct {
{PCI_VENDOR_ID_NEC, PCI_ANY_ID, PCI_ANY_ID,
QUIRK_CYCLE_TIMER},
+ {PCI_VENDOR_ID_O2, PCI_ANY_ID, PCI_ANY_ID,
+ QUIRK_NO_MSI},
+
{PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID,
QUIRK_CYCLE_TIMER},
diff --git a/drivers/gpio/gpio-generic.c b/drivers/gpio/gpio-generic.c
index 231714def4d2..4e24436b0f82 100644
--- a/drivers/gpio/gpio-generic.c
+++ b/drivers/gpio/gpio-generic.c
@@ -351,7 +351,7 @@ static int bgpio_setup_direction(struct bgpio_chip *bgc,
return 0;
}
-int __devexit bgpio_remove(struct bgpio_chip *bgc)
+int bgpio_remove(struct bgpio_chip *bgc)
{
int err = gpiochip_remove(&bgc->gc);
@@ -361,15 +361,10 @@ int __devexit bgpio_remove(struct bgpio_chip *bgc)
}
EXPORT_SYMBOL_GPL(bgpio_remove);
-int __devinit bgpio_init(struct bgpio_chip *bgc,
- struct device *dev,
- unsigned long sz,
- void __iomem *dat,
- void __iomem *set,
- void __iomem *clr,
- void __iomem *dirout,
- void __iomem *dirin,
- bool big_endian)
+int bgpio_init(struct bgpio_chip *bgc, struct device *dev,
+ unsigned long sz, void __iomem *dat, void __iomem *set,
+ void __iomem *clr, void __iomem *dirout, void __iomem *dirin,
+ bool big_endian)
{
int ret;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 802b61ac3139..f7c6854eb4dd 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -256,7 +256,6 @@ int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
{
printk(KERN_ERR "panic occurred, switching back to text console\n");
return drm_fb_helper_force_kernel_mode();
- return 0;
}
EXPORT_SYMBOL(drm_fb_helper_panic);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index ce045a8cf82c..f07e4252b708 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -67,11 +67,11 @@ module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
MODULE_PARM_DESC(i915_enable_rc6,
"Enable power-saving render C-state 6 (default: true)");
-unsigned int i915_enable_fbc __read_mostly = 1;
+unsigned int i915_enable_fbc __read_mostly = -1;
module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
MODULE_PARM_DESC(i915_enable_fbc,
"Enable frame buffer compression for power savings "
- "(default: false)");
+ "(default: -1 (use per-chip default))");
unsigned int i915_lvds_downclock __read_mostly = 0;
module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 56a8554d9039..04411ad2e779 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1799,6 +1799,7 @@ static void intel_update_fbc(struct drm_device *dev)
struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj;
+ int enable_fbc;
DRM_DEBUG_KMS("\n");
@@ -1839,8 +1840,15 @@ static void intel_update_fbc(struct drm_device *dev)
intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
- if (!i915_enable_fbc) {
- DRM_DEBUG_KMS("fbc disabled per module param (default off)\n");
+ enable_fbc = i915_enable_fbc;
+ if (enable_fbc < 0) {
+ DRM_DEBUG_KMS("fbc set to per-chip default\n");
+ enable_fbc = 1;
+ if (INTEL_INFO(dev)->gen <= 5)
+ enable_fbc = 0;
+ }
+ if (!enable_fbc) {
+ DRM_DEBUG_KMS("fbc disabled per module param\n");
dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
goto out_disable;
}
@@ -4687,13 +4695,13 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
bpc = 6; /* min is 18bpp */
break;
case 24:
- bpc = min((unsigned int)8, display_bpc);
+ bpc = 8;
break;
case 30:
- bpc = min((unsigned int)10, display_bpc);
+ bpc = 10;
break;
case 48:
- bpc = min((unsigned int)12, display_bpc);
+ bpc = 12;
break;
default:
DRM_DEBUG("unsupported depth, assuming 24 bits\n");
@@ -4701,10 +4709,12 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
break;
}
+ display_bpc = min(display_bpc, bpc);
+
DRM_DEBUG_DRIVER("setting pipe bpc to %d (max display bpc %d)\n",
bpc, display_bpc);
- *pipe_bpp = bpc * 3;
+ *pipe_bpp = display_bpc * 3;
return display_bpc != bpc;
}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 0b2ee9d39980..fe1099d8817e 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -337,9 +337,6 @@ extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
struct drm_connector *connector,
struct intel_load_detect_pipe *old);
-extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB);
-extern int intel_sdvo_supports_hotplug(struct drm_connector *connector);
-extern void intel_sdvo_set_hotplug(struct drm_connector *connector, int enable);
extern void intelfb_restore(void);
extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 30fe554d8936..6348c499616f 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -92,6 +92,11 @@ struct intel_sdvo {
*/
uint16_t attached_output;
+ /*
+ * Hotplug activation bits for this device
+ */
+ uint8_t hotplug_active[2];
+
/**
* This is used to select the color range of RBG outputs in HDMI mode.
* It is only valid when using TMDS encoding and 8 bit per color mode.
@@ -1208,74 +1213,20 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in
return true;
}
-/* No use! */
-#if 0
-struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB)
-{
- struct drm_connector *connector = NULL;
- struct intel_sdvo *iout = NULL;
- struct intel_sdvo *sdvo;
-
- /* find the sdvo connector */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- iout = to_intel_sdvo(connector);
-
- if (iout->type != INTEL_OUTPUT_SDVO)
- continue;
-
- sdvo = iout->dev_priv;
-
- if (sdvo->sdvo_reg == SDVOB && sdvoB)
- return connector;
-
- if (sdvo->sdvo_reg == SDVOC && !sdvoB)
- return connector;
-
- }
-
- return NULL;
-}
-
-int intel_sdvo_supports_hotplug(struct drm_connector *connector)
+static int intel_sdvo_supports_hotplug(struct intel_sdvo *intel_sdvo)
{
u8 response[2];
- u8 status;
- struct intel_sdvo *intel_sdvo;
- DRM_DEBUG_KMS("\n");
-
- if (!connector)
- return 0;
-
- intel_sdvo = to_intel_sdvo(connector);
return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
&response, 2) && response[0];
}
-void intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
+static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder)
{
- u8 response[2];
- u8 status;
- struct intel_sdvo *intel_sdvo = to_intel_sdvo(connector);
-
- intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
- intel_sdvo_read_response(intel_sdvo, &response, 2);
-
- if (on) {
- intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
- status = intel_sdvo_read_response(intel_sdvo, &response, 2);
-
- intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
- } else {
- response[0] = 0;
- response[1] = 0;
- intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
- }
+ struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
- intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
- intel_sdvo_read_response(intel_sdvo, &response, 2);
+ intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &intel_sdvo->hotplug_active, 2);
}
-#endif
static bool
intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo)
@@ -2045,6 +1996,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
{
struct drm_encoder *encoder = &intel_sdvo->base.base;
struct drm_connector *connector;
+ struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
struct intel_connector *intel_connector;
struct intel_sdvo_connector *intel_sdvo_connector;
@@ -2062,7 +2014,17 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
intel_connector = &intel_sdvo_connector->base;
connector = &intel_connector->base;
- connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+ if (intel_sdvo_supports_hotplug(intel_sdvo) & (1 << device)) {
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ intel_sdvo->hotplug_active[0] |= 1 << device;
+ /* Some SDVO devices have one-shot hotplug interrupts.
+ * Ensure that they get re-enabled when an interrupt happens.
+ */
+ intel_encoder->hot_plug = intel_sdvo_enable_hotplug;
+ intel_sdvo_enable_hotplug(intel_encoder);
+ }
+ else
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
connector->connector_type = DRM_MODE_CONNECTOR_DVID;
@@ -2569,6 +2531,14 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))
goto err;
+ /* Set up hotplug command - note paranoia about contents of reply.
+ * We assume that the hardware is in a sane state, and only touch
+ * the bits we think we understand.
+ */
+ intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG,
+ &intel_sdvo->hotplug_active, 2);
+ intel_sdvo->hotplug_active[0] &= ~0x3;
+
if (intel_sdvo_output_setup(intel_sdvo,
intel_sdvo->caps.output_flags) != true) {
DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 8d02d875376d..c919cfc8f2fd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -530,7 +530,8 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
nouveau_gpuobj_ref(NULL, &obj);
if (ret)
return ret;
- } else {
+ } else
+ if (USE_SEMA(dev)) {
/* map fence bo into channel's vm */
ret = nouveau_bo_vma_add(dev_priv->fence.bo, chan->vm,
&chan->fence.vma);
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index c444cadbf849..2706cb3d871a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -37,8 +37,11 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
return -ENOMEM;
nvbe->ttm_alloced = kmalloc(sizeof(bool) * num_pages, GFP_KERNEL);
- if (!nvbe->ttm_alloced)
+ if (!nvbe->ttm_alloced) {
+ kfree(nvbe->pages);
+ nvbe->pages = NULL;
return -ENOMEM;
+ }
nvbe->nr_pages = 0;
while (num_pages--) {
@@ -126,7 +129,7 @@ nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) {
nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
- dma_offset += NV_CTXDMA_PAGE_SIZE;
+ offset_l += NV_CTXDMA_PAGE_SIZE;
}
}
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index 118261d4927a..5e45398a9e2d 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -781,11 +781,20 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
- struct drm_framebuffer *drm_fb = nv_crtc->base.fb;
- struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
+ struct drm_framebuffer *drm_fb;
+ struct nouveau_framebuffer *fb;
int arb_burst, arb_lwm;
int ret;
+ NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
+
+ /* no fb bound */
+ if (!atomic && !crtc->fb) {
+ NV_DEBUG_KMS(dev, "No FB bound\n");
+ return 0;
+ }
+
+
/* If atomic, we want to switch to the fb we were passed, so
* now we update pointers to do that. (We don't pin; just
* assume we're already pinned and update the base address.)
@@ -794,6 +803,8 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
drm_fb = passed_fb;
fb = nouveau_framebuffer(passed_fb);
} else {
+ drm_fb = crtc->fb;
+ fb = nouveau_framebuffer(crtc->fb);
/* If not atomic, we can go ahead and pin, and unpin the
* old fb we were passed.
*/
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index 46ad59ea2185..5d989073ba6e 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -519,12 +519,18 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
struct drm_device *dev = nv_crtc->base.dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *evo = nv50_display(dev)->master;
- struct drm_framebuffer *drm_fb = nv_crtc->base.fb;
- struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
+ struct drm_framebuffer *drm_fb;
+ struct nouveau_framebuffer *fb;
int ret;
NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
+ /* no fb bound */
+ if (!atomic && !crtc->fb) {
+ NV_DEBUG_KMS(dev, "No FB bound\n");
+ return 0;
+ }
+
/* If atomic, we want to switch to the fb we were passed, so
* now we update pointers to do that. (We don't pin; just
* assume we're already pinned and update the base address.)
@@ -533,6 +539,8 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
drm_fb = passed_fb;
fb = nouveau_framebuffer(passed_fb);
} else {
+ drm_fb = crtc->fb;
+ fb = nouveau_framebuffer(crtc->fb);
/* If not atomic, we can go ahead and pin, and unpin the
* old fb we were passed.
*/
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 7ad43c6b1db7..4da23889fea6 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -115,6 +115,7 @@ static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector,
u8 msg[20];
int msg_bytes = send_bytes + 4;
u8 ack;
+ unsigned retry;
if (send_bytes > 16)
return -1;
@@ -125,20 +126,20 @@ static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector,
msg[3] = (msg_bytes << 4) | (send_bytes - 1);
memcpy(&msg[4], send, send_bytes);
- while (1) {
+ for (retry = 0; retry < 4; retry++) {
ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
msg, msg_bytes, NULL, 0, delay, &ack);
if (ret < 0)
return ret;
if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
- break;
+ return send_bytes;
else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
udelay(400);
else
return -EIO;
}
- return send_bytes;
+ return -EIO;
}
static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector,
@@ -149,26 +150,29 @@ static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector,
int msg_bytes = 4;
u8 ack;
int ret;
+ unsigned retry;
msg[0] = address;
msg[1] = address >> 8;
msg[2] = AUX_NATIVE_READ << 4;
msg[3] = (msg_bytes << 4) | (recv_bytes - 1);
- while (1) {
+ for (retry = 0; retry < 4; retry++) {
ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
msg, msg_bytes, recv, recv_bytes, delay, &ack);
- if (ret == 0)
- return -EPROTO;
if (ret < 0)
return ret;
if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
return ret;
else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
udelay(400);
+ else if (ret == 0)
+ return -EPROTO;
else
return -EIO;
}
+
+ return -EIO;
}
static void radeon_write_dpcd_reg(struct radeon_connector *radeon_connector,
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index dc0a5b56c81a..c4ffa14fb2f4 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1404,7 +1404,8 @@ int evergreen_cp_resume(struct radeon_device *rdev)
/* Initialize the ring buffer's read and write pointers */
WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
WREG32(CP_RB_RPTR_WR, 0);
- WREG32(CP_RB_WPTR, 0);
+ rdev->cp.wptr = 0;
+ WREG32(CP_RB_WPTR, rdev->cp.wptr);
/* set the wb address wether it's enabled or not */
WREG32(CP_RB_RPTR_ADDR,
@@ -1426,7 +1427,6 @@ int evergreen_cp_resume(struct radeon_device *rdev)
WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
rdev->cp.rptr = RREG32(CP_RB_RPTR);
- rdev->cp.wptr = RREG32(CP_RB_WPTR);
evergreen_cp_start(rdev);
rdev->cp.ready = true;
@@ -1590,48 +1590,6 @@ static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
return backend_map;
}
-static void evergreen_program_channel_remap(struct radeon_device *rdev)
-{
- u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp;
-
- tmp = RREG32(MC_SHARED_CHMAP);
- switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
- case 0:
- case 1:
- case 2:
- case 3:
- default:
- /* default mapping */
- mc_shared_chremap = 0x00fac688;
- break;
- }
-
- switch (rdev->family) {
- case CHIP_HEMLOCK:
- case CHIP_CYPRESS:
- case CHIP_BARTS:
- tcp_chan_steer_lo = 0x54763210;
- tcp_chan_steer_hi = 0x0000ba98;
- break;
- case CHIP_JUNIPER:
- case CHIP_REDWOOD:
- case CHIP_CEDAR:
- case CHIP_PALM:
- case CHIP_SUMO:
- case CHIP_SUMO2:
- case CHIP_TURKS:
- case CHIP_CAICOS:
- default:
- tcp_chan_steer_lo = 0x76543210;
- tcp_chan_steer_hi = 0x0000ba98;
- break;
- }
-
- WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo);
- WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi);
- WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
-}
-
static void evergreen_gpu_init(struct radeon_device *rdev)
{
u32 cc_rb_backend_disable = 0;
@@ -2078,8 +2036,6 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
- evergreen_program_channel_remap(rdev);
-
num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1;
grbm_gfx_index = INSTANCE_BROADCAST_WRITES;
@@ -3171,21 +3127,23 @@ int evergreen_suspend(struct radeon_device *rdev)
}
int evergreen_copy_blit(struct radeon_device *rdev,
- uint64_t src_offset, uint64_t dst_offset,
- unsigned num_pages, struct radeon_fence *fence)
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence *fence)
{
int r;
mutex_lock(&rdev->r600_blit.mutex);
rdev->r600_blit.vb_ib = NULL;
- r = evergreen_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
+ r = evergreen_blit_prepare_copy(rdev, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
if (r) {
if (rdev->r600_blit.vb_ib)
radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
mutex_unlock(&rdev->r600_blit.mutex);
return r;
}
- evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
+ evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
evergreen_blit_done_copy(rdev, fence);
mutex_unlock(&rdev->r600_blit.mutex);
return 0;
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index cbf57d75d925..8c79ca97753d 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -569,36 +569,6 @@ static u32 cayman_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
return backend_map;
}
-static void cayman_program_channel_remap(struct radeon_device *rdev)
-{
- u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp;
-
- tmp = RREG32(MC_SHARED_CHMAP);
- switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
- case 0:
- case 1:
- case 2:
- case 3:
- default:
- /* default mapping */
- mc_shared_chremap = 0x00fac688;
- break;
- }
-
- switch (rdev->family) {
- case CHIP_CAYMAN:
- default:
- //tcp_chan_steer_lo = 0x54763210
- tcp_chan_steer_lo = 0x76543210;
- tcp_chan_steer_hi = 0x0000ba98;
- break;
- }
-
- WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo);
- WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi);
- WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
-}
-
static u32 cayman_get_disable_mask_per_asic(struct radeon_device *rdev,
u32 disable_mask_per_se,
u32 max_disable_mask_per_se,
@@ -842,8 +812,6 @@ static void cayman_gpu_init(struct radeon_device *rdev)
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
- cayman_program_channel_remap(rdev);
-
/* primary versions */
WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
@@ -1187,7 +1155,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
/* Initialize the ring buffer's read and write pointers */
WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
- WREG32(CP_RB0_WPTR, 0);
+ rdev->cp.wptr = 0;
+ WREG32(CP_RB0_WPTR, rdev->cp.wptr);
/* set the wb address wether it's enabled or not */
WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
@@ -1207,7 +1176,6 @@ int cayman_cp_resume(struct radeon_device *rdev)
WREG32(CP_RB0_BASE, rdev->cp.gpu_addr >> 8);
rdev->cp.rptr = RREG32(CP_RB0_RPTR);
- rdev->cp.wptr = RREG32(CP_RB0_WPTR);
/* ring1 - compute only */
/* Set ring buffer size */
@@ -1220,7 +1188,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
/* Initialize the ring buffer's read and write pointers */
WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA);
- WREG32(CP_RB1_WPTR, 0);
+ rdev->cp1.wptr = 0;
+ WREG32(CP_RB1_WPTR, rdev->cp1.wptr);
/* set the wb address wether it's enabled or not */
WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC);
@@ -1232,7 +1201,6 @@ int cayman_cp_resume(struct radeon_device *rdev)
WREG32(CP_RB1_BASE, rdev->cp1.gpu_addr >> 8);
rdev->cp1.rptr = RREG32(CP_RB1_RPTR);
- rdev->cp1.wptr = RREG32(CP_RB1_WPTR);
/* ring2 - compute only */
/* Set ring buffer size */
@@ -1245,7 +1213,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
/* Initialize the ring buffer's read and write pointers */
WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA);
- WREG32(CP_RB2_WPTR, 0);
+ rdev->cp2.wptr = 0;
+ WREG32(CP_RB2_WPTR, rdev->cp2.wptr);
/* set the wb address wether it's enabled or not */
WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC);
@@ -1257,7 +1226,6 @@ int cayman_cp_resume(struct radeon_device *rdev)
WREG32(CP_RB2_BASE, rdev->cp2.gpu_addr >> 8);
rdev->cp2.rptr = RREG32(CP_RB2_RPTR);
- rdev->cp2.wptr = RREG32(CP_RB2_WPTR);
/* start the rings */
cayman_cp_start(rdev);
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index f2204cb1ccdf..7fcdbbbf2979 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -721,11 +721,11 @@ void r100_fence_ring_emit(struct radeon_device *rdev,
int r100_copy_blit(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
- unsigned num_pages,
+ unsigned num_gpu_pages,
struct radeon_fence *fence)
{
uint32_t cur_pages;
- uint32_t stride_bytes = PAGE_SIZE;
+ uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE;
uint32_t pitch;
uint32_t stride_pixels;
unsigned ndw;
@@ -737,7 +737,7 @@ int r100_copy_blit(struct radeon_device *rdev,
/* radeon pitch is /64 */
pitch = stride_bytes / 64;
stride_pixels = stride_bytes / 4;
- num_loops = DIV_ROUND_UP(num_pages, 8191);
+ num_loops = DIV_ROUND_UP(num_gpu_pages, 8191);
/* Ask for enough room for blit + flush + fence */
ndw = 64 + (10 * num_loops);
@@ -746,12 +746,12 @@ int r100_copy_blit(struct radeon_device *rdev,
DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
return -EINVAL;
}
- while (num_pages > 0) {
- cur_pages = num_pages;
+ while (num_gpu_pages > 0) {
+ cur_pages = num_gpu_pages;
if (cur_pages > 8191) {
cur_pages = 8191;
}
- num_pages -= cur_pages;
+ num_gpu_pages -= cur_pages;
/* pages are in Y direction - height
page width in X direction - width */
@@ -773,8 +773,8 @@ int r100_copy_blit(struct radeon_device *rdev,
radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
- radeon_ring_write(rdev, num_pages);
- radeon_ring_write(rdev, num_pages);
+ radeon_ring_write(rdev, num_gpu_pages);
+ radeon_ring_write(rdev, num_gpu_pages);
radeon_ring_write(rdev, cur_pages | (stride_pixels << 16));
}
radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
@@ -990,7 +990,8 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
/* Force read & write ptr to 0 */
WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE);
WREG32(RADEON_CP_RB_RPTR_WR, 0);
- WREG32(RADEON_CP_RB_WPTR, 0);
+ rdev->cp.wptr = 0;
+ WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
/* set the wb address whether it's enabled or not */
WREG32(R_00070C_CP_RB_RPTR_ADDR,
@@ -1007,9 +1008,6 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
WREG32(RADEON_CP_RB_CNTL, tmp);
udelay(10);
rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
- rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR);
- /* protect against crazy HW on resume */
- rdev->cp.wptr &= rdev->cp.ptr_mask;
/* Set cp mode to bus mastering & enable cp*/
WREG32(RADEON_CP_CSQ_MODE,
REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index f24058300413..a1f3ba063c2d 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -84,7 +84,7 @@ static int r200_get_vtx_size_0(uint32_t vtx_fmt_0)
int r200_copy_dma(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
- unsigned num_pages,
+ unsigned num_gpu_pages,
struct radeon_fence *fence)
{
uint32_t size;
@@ -93,7 +93,7 @@ int r200_copy_dma(struct radeon_device *rdev,
int r = 0;
/* radeon pitch is /64 */
- size = num_pages << PAGE_SHIFT;
+ size = num_gpu_pages << RADEON_GPU_PAGE_SHIFT;
num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
r = radeon_ring_lock(rdev, num_loops * 4 + 64);
if (r) {
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index aa5571b73aa0..720dd99163f8 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2209,7 +2209,8 @@ int r600_cp_resume(struct radeon_device *rdev)
/* Initialize the ring buffer's read and write pointers */
WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
WREG32(CP_RB_RPTR_WR, 0);
- WREG32(CP_RB_WPTR, 0);
+ rdev->cp.wptr = 0;
+ WREG32(CP_RB_WPTR, rdev->cp.wptr);
/* set the wb address whether it's enabled or not */
WREG32(CP_RB_RPTR_ADDR,
@@ -2231,7 +2232,6 @@ int r600_cp_resume(struct radeon_device *rdev)
WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
rdev->cp.rptr = RREG32(CP_RB_RPTR);
- rdev->cp.wptr = RREG32(CP_RB_WPTR);
r600_cp_start(rdev);
rdev->cp.ready = true;
@@ -2353,21 +2353,23 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
}
int r600_copy_blit(struct radeon_device *rdev,
- uint64_t src_offset, uint64_t dst_offset,
- unsigned num_pages, struct radeon_fence *fence)
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence *fence)
{
int r;
mutex_lock(&rdev->r600_blit.mutex);
rdev->r600_blit.vb_ib = NULL;
- r = r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
+ r = r600_blit_prepare_copy(rdev, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
if (r) {
if (rdev->r600_blit.vb_ib)
radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
mutex_unlock(&rdev->r600_blit.mutex);
return r;
}
- r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
+ r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
r600_blit_done_copy(rdev, fence);
mutex_unlock(&rdev->r600_blit.mutex);
return 0;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 32807baf55e2..c1e056b35b29 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -322,6 +322,7 @@ union radeon_gart_table {
#define RADEON_GPU_PAGE_SIZE 4096
#define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1)
+#define RADEON_GPU_PAGE_SHIFT 12
struct radeon_gart {
dma_addr_t table_addr;
@@ -914,17 +915,17 @@ struct radeon_asic {
int (*copy_blit)(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
- unsigned num_pages,
+ unsigned num_gpu_pages,
struct radeon_fence *fence);
int (*copy_dma)(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
- unsigned num_pages,
+ unsigned num_gpu_pages,
struct radeon_fence *fence);
int (*copy)(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
- unsigned num_pages,
+ unsigned num_gpu_pages,
struct radeon_fence *fence);
uint32_t (*get_engine_clock)(struct radeon_device *rdev);
void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 3d7a0d7c6a9a..3dedaa07aac1 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -75,7 +75,7 @@ uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg);
int r100_copy_blit(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
- unsigned num_pages,
+ unsigned num_gpu_pages,
struct radeon_fence *fence);
int r100_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
@@ -143,7 +143,7 @@ extern void r100_post_page_flip(struct radeon_device *rdev, int crtc);
extern int r200_copy_dma(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
- unsigned num_pages,
+ unsigned num_gpu_pages,
struct radeon_fence *fence);
void r200_set_safe_registers(struct radeon_device *rdev);
@@ -311,7 +311,7 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int r600_ring_test(struct radeon_device *rdev);
int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
- unsigned num_pages, struct radeon_fence *fence);
+ unsigned num_gpu_pages, struct radeon_fence *fence);
void r600_hpd_init(struct radeon_device *rdev);
void r600_hpd_fini(struct radeon_device *rdev);
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
@@ -403,7 +403,7 @@ void evergreen_bandwidth_update(struct radeon_device *rdev);
void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int evergreen_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
- unsigned num_pages, struct radeon_fence *fence);
+ unsigned num_gpu_pages, struct radeon_fence *fence);
void evergreen_hpd_init(struct radeon_device *rdev);
void evergreen_hpd_fini(struct radeon_device *rdev);
bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 4f0c1ecac72e..bce63fd329d4 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -68,11 +68,11 @@ void radeon_connector_hotplug(struct drm_connector *connector)
if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
int saved_dpms = connector->dpms;
- if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd) &&
- radeon_dp_needs_link_train(radeon_connector))
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
- else
+ /* Only turn off the display it it's physically disconnected */
+ if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ else if (radeon_dp_needs_link_train(radeon_connector))
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
connector->dpms = saved_dpms;
}
}
@@ -1297,12 +1297,33 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
if (!radeon_dig_connector->edp_on)
atombios_set_edp_panel_power(connector,
ATOM_TRANSMITTER_ACTION_POWER_OFF);
- } else {
- /* need to setup ddc on the bridge */
- if (radeon_connector_encoder_is_dp_bridge(connector)) {
+ } else if (radeon_connector_encoder_is_dp_bridge(connector)) {
+ /* DP bridges are always DP */
+ radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT;
+ /* get the DPCD from the bridge */
+ radeon_dp_getdpcd(radeon_connector);
+
+ if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
+ ret = connector_status_connected;
+ else {
+ /* need to setup ddc on the bridge */
if (encoder)
radeon_atom_ext_encoder_setup_ddc(encoder);
+ if (radeon_ddc_probe(radeon_connector,
+ radeon_connector->requires_extended_probe))
+ ret = connector_status_connected;
}
+
+ if ((ret == connector_status_disconnected) &&
+ radeon_connector->dac_load_detect) {
+ struct drm_encoder *encoder = radeon_best_single_encoder(connector);
+ struct drm_encoder_helper_funcs *encoder_funcs;
+ if (encoder) {
+ encoder_funcs = encoder->helper_private;
+ ret = encoder_funcs->detect(encoder, connector);
+ }
+ }
+ } else {
radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
ret = connector_status_connected;
@@ -1318,16 +1339,6 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
ret = connector_status_connected;
}
}
-
- if ((ret == connector_status_disconnected) &&
- radeon_connector->dac_load_detect) {
- struct drm_encoder *encoder = radeon_best_single_encoder(connector);
- struct drm_encoder_helper_funcs *encoder_funcs;
- if (encoder) {
- encoder_funcs = encoder->helper_private;
- ret = encoder_funcs->detect(encoder, connector);
- }
- }
}
radeon_connector_update_scratch_regs(connector, ret);
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 3189a7efb2e9..fde25c0d65a0 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -208,23 +208,25 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
int xorigin = 0, yorigin = 0;
int w = radeon_crtc->cursor_width;
- if (x < 0)
- xorigin = -x + 1;
- if (y < 0)
- yorigin = -y + 1;
- if (xorigin >= CURSOR_WIDTH)
- xorigin = CURSOR_WIDTH - 1;
- if (yorigin >= CURSOR_HEIGHT)
- yorigin = CURSOR_HEIGHT - 1;
-
if (ASIC_IS_AVIVO(rdev)) {
- int i = 0;
- struct drm_crtc *crtc_p;
-
/* avivo cursor are offset into the total surface */
x += crtc->x;
y += crtc->y;
- DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
+ }
+ DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
+
+ if (x < 0) {
+ xorigin = min(-x, CURSOR_WIDTH - 1);
+ x = 0;
+ }
+ if (y < 0) {
+ yorigin = min(-y, CURSOR_HEIGHT - 1);
+ y = 0;
+ }
+
+ if (ASIC_IS_AVIVO(rdev)) {
+ int i = 0;
+ struct drm_crtc *crtc_p;
/* avivo cursor image can't end on 128 pixel boundary or
* go past the end of the frame if both crtcs are enabled
@@ -253,16 +255,12 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
radeon_lock_cursor(crtc, true);
if (ASIC_IS_DCE4(rdev)) {
- WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset,
- ((xorigin ? 0 : x) << 16) |
- (yorigin ? 0 : y));
+ WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y);
WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset,
((w - 1) << 16) | (radeon_crtc->cursor_height - 1));
} else if (ASIC_IS_AVIVO(rdev)) {
- WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset,
- ((xorigin ? 0 : x) << 16) |
- (yorigin ? 0 : y));
+ WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y);
WREG32(AVIVO_D1CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
WREG32(AVIVO_D1CUR_SIZE + radeon_crtc->crtc_offset,
((w - 1) << 16) | (radeon_crtc->cursor_height - 1));
@@ -276,8 +274,8 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
| yorigin));
WREG32(RADEON_CUR_HORZ_VERT_POSN + radeon_crtc->crtc_offset,
(RADEON_CUR_LOCK
- | ((xorigin ? 0 : x) << 16)
- | (yorigin ? 0 : y)));
+ | (x << 16)
+ | y));
/* offset is from DISP(2)_BASE_ADDRESS */
WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset +
(yorigin * 256)));
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 1a858944e4f3..6adb3e58affd 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -473,8 +473,8 @@ pflip_cleanup:
spin_lock_irqsave(&dev->event_lock, flags);
radeon_crtc->unpin_work = NULL;
unlock_free:
- drm_gem_object_unreference_unlocked(old_radeon_fb->obj);
spin_unlock_irqrestore(&dev->event_lock, flags);
+ drm_gem_object_unreference_unlocked(old_radeon_fb->obj);
radeon_fence_unref(&work->fence);
kfree(work);
@@ -707,16 +707,21 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
radeon_router_select_ddc_port(radeon_connector);
if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
- (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
+ (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) ||
+ radeon_connector_encoder_is_dp_bridge(&radeon_connector->base)) {
struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
+
if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) && dig->dp_i2c_bus)
- radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter);
- }
- if (!radeon_connector->ddc_bus)
- return -1;
- if (!radeon_connector->edid) {
- radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
+ radeon_connector->edid = drm_get_edid(&radeon_connector->base,
+ &dig->dp_i2c_bus->adapter);
+ else if (radeon_connector->ddc_bus && !radeon_connector->edid)
+ radeon_connector->edid = drm_get_edid(&radeon_connector->base,
+ &radeon_connector->ddc_bus->adapter);
+ } else {
+ if (radeon_connector->ddc_bus && !radeon_connector->edid)
+ radeon_connector->edid = drm_get_edid(&radeon_connector->base,
+ &radeon_connector->ddc_bus->adapter);
}
if (!radeon_connector->edid) {
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 319d85d7e759..13690f3eb4a4 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -1507,7 +1507,14 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
switch (mode) {
case DRM_MODE_DPMS_ON:
args.ucAction = ATOM_ENABLE;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ /* workaround for DVOOutputControl on some RS690 systems */
+ if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DDI) {
+ u32 reg = RREG32(RADEON_BIOS_3_SCRATCH);
+ WREG32(RADEON_BIOS_3_SCRATCH, reg & ~ATOM_S3_DFP2I_ACTIVE);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ WREG32(RADEON_BIOS_3_SCRATCH, reg);
+ } else
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
args.ucAction = ATOM_LCD_BLON;
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 9b86fb0e4122..0b5468bfaf54 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -277,7 +277,12 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
DRM_ERROR("Trying to move memory with CP turned off.\n");
return -EINVAL;
}
- r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence);
+
+ BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
+
+ r = radeon_copy(rdev, old_start, new_start,
+ new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */
+ fence);
/* FIXME: handle copy error */
r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
evict, no_wait_reserve, no_wait_gpu, new_mem);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 4720d000d440..b13c2eedc321 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -536,55 +536,6 @@ static u32 r700_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
return backend_map;
}
-static void rv770_program_channel_remap(struct radeon_device *rdev)
-{
- u32 tcp_chan_steer, mc_shared_chremap, tmp;
- bool force_no_swizzle;
-
- switch (rdev->family) {
- case CHIP_RV770:
- case CHIP_RV730:
- force_no_swizzle = false;
- break;
- case CHIP_RV710:
- case CHIP_RV740:
- default:
- force_no_swizzle = true;
- break;
- }
-
- tmp = RREG32(MC_SHARED_CHMAP);
- switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
- case 0:
- case 1:
- default:
- /* default mapping */
- mc_shared_chremap = 0x00fac688;
- break;
- case 2:
- case 3:
- if (force_no_swizzle)
- mc_shared_chremap = 0x00fac688;
- else
- mc_shared_chremap = 0x00bbc298;
- break;
- }
-
- if (rdev->family == CHIP_RV740)
- tcp_chan_steer = 0x00ef2a60;
- else
- tcp_chan_steer = 0x00fac688;
-
- /* RV770 CE has special chremap setup */
- if (rdev->pdev->device == 0x944e) {
- tcp_chan_steer = 0x00b08b08;
- mc_shared_chremap = 0x00b08b08;
- }
-
- WREG32(TCP_CHAN_STEER, tcp_chan_steer);
- WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
-}
-
static void rv770_gpu_init(struct radeon_device *rdev)
{
int i, j, num_qd_pipes;
@@ -785,8 +736,6 @@ static void rv770_gpu_init(struct radeon_device *rdev)
WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
- rv770_program_channel_remap(rdev);
-
WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index a4d38d85909a..ef06194c5aa6 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -394,7 +394,8 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
if (bo->ttm == NULL) {
- ret = ttm_bo_add_ttm(bo, false);
+ bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
+ ret = ttm_bo_add_ttm(bo, zero);
if (ret)
goto out_err;
}
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 1130a8987125..2b006bfa0935 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -69,7 +69,7 @@ config HID_ACRUX
Say Y here if you want to enable support for ACRUX game controllers.
config HID_ACRUX_FF
- tristate "ACRUX force feedback support"
+ bool "ACRUX force feedback support"
depends on HID_ACRUX
select INPUT_FF_MEMLESS
---help---
@@ -245,6 +245,15 @@ config HID_LOGITECH
---help---
Support for Logitech devices that are not fully compliant with HID standard.
+config HID_LOGITECH_DJ
+ tristate "Logitech Unifying receivers full support"
+ depends on HID_LOGITECH
+ default m
+ ---help---
+ Say Y if you want support for Logitech Unifying receivers and devices.
+ Unifying receivers are capable of pairing up to 6 Logitech compliant
+ devices to the same receiver.
+
config LOGITECH_FF
bool "Logitech force feedback support"
depends on HID_LOGITECH
@@ -278,13 +287,21 @@ config LOGIG940_FF
Say Y here if you want to enable force feedback support for Logitech
Flight System G940 devices.
-config LOGIWII_FF
- bool "Logitech Speed Force Wireless force feedback support"
+config LOGIWHEELS_FF
+ bool "Logitech wheels configuration and force feedback support"
depends on HID_LOGITECH
select INPUT_FF_MEMLESS
+ default LOGITECH_FF
help
- Say Y here if you want to enable force feedback support for Logitech
- Speed Force Wireless (Wii) devices.
+ Say Y here if you want to enable force feedback and range setting
+ support for following Logitech wheels:
+ - Logitech Driving Force
+ - Logitech Driving Force Pro
+ - Logitech Driving Force GT
+ - Logitech G25
+ - Logitech G27
+ - Logitech MOMO/MOMO 2
+ - Logitech Formula Force EX
config HID_MAGICMOUSE
tristate "Apple MagicMouse multi-touch support"
@@ -328,6 +345,7 @@ config HID_MULTITOUCH
- Hanvon dual touch panels
- Ilitek dual touch panels
- IrTouch Infrared USB panels
+ - LG Display panels (Dell ST2220Tc)
- Lumio CrystalTouch panels
- MosArt dual-touch panels
- PenMount dual touch panels
@@ -590,6 +608,7 @@ config HID_WIIMOTE
tristate "Nintendo Wii Remote support"
depends on BT_HIDP
depends on LEDS_CLASS
+ select POWER_SUPPLY
---help---
Support for the Nintendo Wii Remote bluetooth device.
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 0a0a38e9fd28..b7ddabb0b34c 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -21,7 +21,7 @@ endif
ifdef CONFIG_LOGIG940_FF
hid-logitech-y += hid-lg3ff.o
endif
-ifdef CONFIG_LOGIWII_FF
+ifdef CONFIG_LOGIWHEELS_FF
hid-logitech-y += hid-lg4ff.o
endif
@@ -43,6 +43,7 @@ obj-$(CONFIG_HID_KEYTOUCH) += hid-keytouch.o
obj-$(CONFIG_HID_KYE) += hid-kye.o
obj-$(CONFIG_HID_LCPOWER) += hid-lcpower.o
obj-$(CONFIG_HID_LOGITECH) += hid-logitech.o
+obj-$(CONFIG_HID_LOGITECH_DJ) += hid-logitech-dj.o
obj-$(CONFIG_HID_MAGICMOUSE) += hid-magicmouse.o
obj-$(CONFIG_HID_MICROSOFT) += hid-microsoft.o
obj-$(CONFIG_HID_MONTEREY) += hid-monterey.o
diff --git a/drivers/hid/hid-axff.c b/drivers/hid/hid-axff.c
index 121514149e0b..3bdb4500f95e 100644
--- a/drivers/hid/hid-axff.c
+++ b/drivers/hid/hid-axff.c
@@ -6,7 +6,7 @@
* Xbox 360 controller.
*
* 1a34:0802 "ACRUX USB GAMEPAD 8116"
- * - tested with a EXEQ EQ-PCU-02090 game controller.
+ * - tested with an EXEQ EQ-PCU-02090 game controller.
*
* Copyright (c) 2010 Sergei Kolzun <x0r@dv-life.ru>
*/
@@ -45,7 +45,10 @@ static int axff_play(struct input_dev *dev, void *data, struct ff_effect *effect
{
struct hid_device *hid = input_get_drvdata(dev);
struct axff_device *axff = data;
+ struct hid_report *report = axff->report;
+ int field_count = 0;
int left, right;
+ int i, j;
left = effect->u.rumble.strong_magnitude;
right = effect->u.rumble.weak_magnitude;
@@ -55,10 +58,14 @@ static int axff_play(struct input_dev *dev, void *data, struct ff_effect *effect
left = left * 0xff / 0xffff;
right = right * 0xff / 0xffff;
- axff->report->field[0]->value[0] = left;
- axff->report->field[1]->value[0] = right;
- axff->report->field[2]->value[0] = left;
- axff->report->field[3]->value[0] = right;
+ for (i = 0; i < report->maxfield; i++) {
+ for (j = 0; j < report->field[i]->report_count; j++) {
+ report->field[i]->value[j] =
+ field_count % 2 ? right : left;
+ field_count++;
+ }
+ }
+
dbg_hid("running with 0x%02x 0x%02x", left, right);
usbhid_submit_report(hid, axff->report, USB_DIR_OUT);
@@ -72,6 +79,8 @@ static int axff_init(struct hid_device *hid)
struct hid_input *hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
struct list_head *report_list =&hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct input_dev *dev = hidinput->input;
+ int field_count = 0;
+ int i, j;
int error;
if (list_empty(report_list)) {
@@ -80,9 +89,16 @@ static int axff_init(struct hid_device *hid)
}
report = list_first_entry(report_list, struct hid_report, list);
+ for (i = 0; i < report->maxfield; i++) {
+ for (j = 0; j < report->field[i]->report_count; j++) {
+ report->field[i]->value[j] = 0x00;
+ field_count++;
+ }
+ }
- if (report->maxfield < 4) {
- hid_err(hid, "no fields in the report: %d\n", report->maxfield);
+ if (field_count < 4) {
+ hid_err(hid, "not enough fields in the report: %d\n",
+ field_count);
return -ENODEV;
}
@@ -97,13 +113,9 @@ static int axff_init(struct hid_device *hid)
goto err_free_mem;
axff->report = report;
- axff->report->field[0]->value[0] = 0x00;
- axff->report->field[1]->value[0] = 0x00;
- axff->report->field[2]->value[0] = 0x00;
- axff->report->field[3]->value[0] = 0x00;
usbhid_submit_report(hid, axff->report, USB_DIR_OUT);
- hid_info(hid, "Force Feedback for ACRUX game controllers by Sergei Kolzun<x0r@dv-life.ru>\n");
+ hid_info(hid, "Force Feedback for ACRUX game controllers by Sergei Kolzun <x0r@dv-life.ru>\n");
return 0;
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 242353df3dc4..c5606724b77a 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1212,6 +1212,12 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
if ((connect_mask & HID_CONNECT_HIDINPUT) && !hidinput_connect(hdev,
connect_mask & HID_CONNECT_HIDINPUT_FORCE))
hdev->claimed |= HID_CLAIMED_INPUT;
+ if (hdev->quirks & HID_QUIRK_MULTITOUCH) {
+ /* this device should be handled by hid-multitouch, skip it */
+ hdev->quirks &= ~HID_QUIRK_MULTITOUCH;
+ return -ENODEV;
+ }
+
if ((connect_mask & HID_CONNECT_HIDDEV) && hdev->hiddev_connect &&
!hdev->hiddev_connect(hdev,
connect_mask & HID_CONNECT_HIDDEV_FORCE))
@@ -1391,6 +1397,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HANVON, USB_DEVICE_ID_HANVON_MULTITOUCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6650) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK, USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ILITEK, USB_DEVICE_ID_ILITEK_MULTITOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_IRTOUCHSYSTEMS, USB_DEVICE_ID_IRTOUCH_INFRARED_USB) },
@@ -1399,6 +1406,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LG, USB_DEVICE_ID_LG_MULTITOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) },
@@ -1420,8 +1428,11 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFP_WHEEL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFGT_WHEEL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G27_WHEEL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WII_WHEEL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 7d27d2b0445a..d5dbb1a4f76b 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -277,6 +277,7 @@
#define USB_DEVICE_ID_PENPOWER 0x00f4
#define USB_VENDOR_ID_GREENASIA 0x0e8f
+#define USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD 0x3013
#define USB_VENDOR_ID_GRETAGMACBETH 0x0971
#define USB_DEVICE_ID_GRETAGMACBETH_HUEY 0x2005
@@ -350,6 +351,9 @@
#define USB_DEVICE_ID_UGCI_FLYING 0x0020
#define USB_DEVICE_ID_UGCI_FIGHTING 0x0030
+#define USB_VENDOR_ID_IDEACOM 0x1cb6
+#define USB_DEVICE_ID_IDEACOM_IDC6650 0x6650
+
#define USB_VENDOR_ID_ILITEK 0x222a
#define USB_DEVICE_ID_ILITEK_MULTITOUCH 0x0001
@@ -422,6 +426,9 @@
#define USB_DEVICE_ID_LD_HYBRID 0x2090
#define USB_DEVICE_ID_LD_HEATCONTROL 0x20A0
+#define USB_VENDOR_ID_LG 0x1fd2
+#define USB_DEVICE_ID_LG_MULTITOUCH 0x0064
+
#define USB_VENDOR_ID_LOGITECH 0x046d
#define USB_DEVICE_ID_LOGITECH_RECEIVER 0xc101
#define USB_DEVICE_ID_LOGITECH_HARMONY_FIRST 0xc110
@@ -439,6 +446,7 @@
#define USB_DEVICE_ID_LOGITECH_MOMO_WHEEL 0xc295
#define USB_DEVICE_ID_LOGITECH_DFP_WHEEL 0xc298
#define USB_DEVICE_ID_LOGITECH_G25_WHEEL 0xc299
+#define USB_DEVICE_ID_LOGITECH_DFGT_WHEEL 0xc29a
#define USB_DEVICE_ID_LOGITECH_G27_WHEEL 0xc29b
#define USB_DEVICE_ID_LOGITECH_WII_WHEEL 0xc29c
#define USB_DEVICE_ID_LOGITECH_ELITE_KBD 0xc30a
@@ -446,6 +454,8 @@
#define USB_DEVICE_ID_S510_RECEIVER_2 0xc517
#define USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500 0xc512
#define USB_DEVICE_ID_MX3000_RECEIVER 0xc513
+#define USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER 0xc52b
+#define USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2 0xc532
#define USB_DEVICE_ID_SPACETRAVELLER 0xc623
#define USB_DEVICE_ID_SPACENAVIGATOR 0xc626
#define USB_DEVICE_ID_DINOVO_DESKTOP 0xc704
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 6559e2e3364e..f333139d1a48 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -474,6 +474,10 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
map_key_clear(BTN_STYLUS2);
break;
+ case 0x51: /* ContactID */
+ device->quirks |= HID_QUIRK_MULTITOUCH;
+ goto unknown;
+
default: goto unknown;
}
break;
@@ -978,6 +982,13 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)
}
}
+ if (hid->quirks & HID_QUIRK_MULTITOUCH) {
+ /* generic hid does not know how to handle multitouch devices */
+ if (hidinput)
+ goto out_cleanup;
+ goto out_unwind;
+ }
+
if (hidinput && input_register_device(hidinput->input))
goto out_cleanup;
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index a7f916e8fc32..e7a7bd1eb34a 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -363,7 +363,7 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
goto err_free;
}
- if (quirks & (LG_FF | LG_FF2 | LG_FF3))
+ if (quirks & (LG_FF | LG_FF2 | LG_FF3 | LG_FF4))
connect_mask &= ~HID_CONNECT_FF;
ret = hid_hw_start(hdev, connect_mask);
@@ -372,7 +372,8 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
goto err_free;
}
- if (quirks & LG_FF4) {
+ /* Setup wireless link with Logitech Wii wheel */
+ if(hdev->product == USB_DEVICE_ID_LOGITECH_WII_WHEEL) {
unsigned char buf[] = { 0x00, 0xAF, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
ret = hdev->hid_output_raw_report(hdev, buf, sizeof(buf), HID_FEATURE_REPORT);
@@ -405,6 +406,15 @@ err_free:
return ret;
}
+static void lg_remove(struct hid_device *hdev)
+{
+ unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
+ if(quirks & LG_FF4)
+ lg4ff_deinit(hdev);
+
+ hid_hw_stop(hdev);
+}
+
static const struct hid_device_id lg_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER),
.driver_data = LG_RDESC | LG_WIRELESS },
@@ -431,7 +441,7 @@ static const struct hid_device_id lg_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_EXTREME_3D),
.driver_data = LG_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WHEEL),
- .driver_data = LG_NOGET | LG_FF },
+ .driver_data = LG_NOGET | LG_FF4 },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD_CORD),
.driver_data = LG_FF2 },
@@ -444,15 +454,17 @@ static const struct hid_device_id lg_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FORCE3D_PRO),
.driver_data = LG_FF },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL),
- .driver_data = LG_FF },
+ .driver_data = LG_FF4 },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2),
- .driver_data = LG_FF },
+ .driver_data = LG_FF4 },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL),
- .driver_data = LG_FF },
+ .driver_data = LG_FF4 },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFGT_WHEEL),
+ .driver_data = LG_FF4 },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G27_WHEEL),
- .driver_data = LG_FF },
+ .driver_data = LG_FF4 },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFP_WHEEL),
- .driver_data = LG_NOGET | LG_FF },
+ .driver_data = LG_NOGET | LG_FF4 },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WII_WHEEL),
.driver_data = LG_FF4 },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG ),
@@ -478,6 +490,7 @@ static struct hid_driver lg_driver = {
.input_mapped = lg_input_mapped,
.event = lg_event,
.probe = lg_probe,
+ .remove = lg_remove,
};
static int __init lg_init(void)
diff --git a/drivers/hid/hid-lg.h b/drivers/hid/hid-lg.h
index b0100ba2ae0b..4b097286dc78 100644
--- a/drivers/hid/hid-lg.h
+++ b/drivers/hid/hid-lg.h
@@ -19,10 +19,12 @@ int lg3ff_init(struct hid_device *hdev);
static inline int lg3ff_init(struct hid_device *hdev) { return -1; }
#endif
-#ifdef CONFIG_LOGIWII_FF
+#ifdef CONFIG_LOGIWHEELS_FF
int lg4ff_init(struct hid_device *hdev);
+int lg4ff_deinit(struct hid_device *hdev);
#else
static inline int lg4ff_init(struct hid_device *hdev) { return -1; }
+static inline int lg4ff_deinit(struct hid_device *hdev) { return -1; }
#endif
#endif
diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
index fa550c8e1d1b..103f30d93f76 100644
--- a/drivers/hid/hid-lg4ff.c
+++ b/drivers/hid/hid-lg4ff.c
@@ -29,19 +29,108 @@
#include "usbhid/usbhid.h"
#include "hid-lg.h"
+#include "hid-ids.h"
-struct lg4ff_device {
- struct hid_report *report;
+#define DFGT_REV_MAJ 0x13
+#define DFGT_REV_MIN 0x22
+#define DFP_REV_MAJ 0x11
+#define DFP_REV_MIN 0x06
+#define FFEX_REV_MAJ 0x21
+#define FFEX_REV_MIN 0x00
+#define G25_REV_MAJ 0x12
+#define G25_REV_MIN 0x22
+#define G27_REV_MAJ 0x12
+#define G27_REV_MIN 0x38
+
+#define to_hid_device(pdev) container_of(pdev, struct hid_device, dev)
+
+static void hid_lg4ff_set_range_dfp(struct hid_device *hid, u16 range);
+static void hid_lg4ff_set_range_g25(struct hid_device *hid, u16 range);
+static ssize_t lg4ff_range_show(struct device *dev, struct device_attribute *attr, char *buf);
+static ssize_t lg4ff_range_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count);
+
+static DEVICE_ATTR(range, S_IRWXU | S_IRWXG | S_IRWXO, lg4ff_range_show, lg4ff_range_store);
+
+static bool list_inited;
+
+struct lg4ff_device_entry {
+ char *device_id; /* Use name in respective kobject structure's address as the ID */
+ __u16 range;
+ __u16 min_range;
+ __u16 max_range;
+ __u8 leds;
+ struct list_head list;
+ void (*set_range)(struct hid_device *hid, u16 range);
};
-static const signed short ff4_wheel_ac[] = {
+static struct lg4ff_device_entry device_list;
+
+static const signed short lg4ff_wheel_effects[] = {
FF_CONSTANT,
FF_AUTOCENTER,
-1
};
-static int hid_lg4ff_play(struct input_dev *dev, void *data,
- struct ff_effect *effect)
+struct lg4ff_wheel {
+ const __u32 product_id;
+ const signed short *ff_effects;
+ const __u16 min_range;
+ const __u16 max_range;
+ void (*set_range)(struct hid_device *hid, u16 range);
+};
+
+static const struct lg4ff_wheel lg4ff_devices[] = {
+ {USB_DEVICE_ID_LOGITECH_WHEEL, lg4ff_wheel_effects, 40, 270, NULL},
+ {USB_DEVICE_ID_LOGITECH_MOMO_WHEEL, lg4ff_wheel_effects, 40, 270, NULL},
+ {USB_DEVICE_ID_LOGITECH_DFP_WHEEL, lg4ff_wheel_effects, 40, 900, hid_lg4ff_set_range_dfp},
+ {USB_DEVICE_ID_LOGITECH_G25_WHEEL, lg4ff_wheel_effects, 40, 900, hid_lg4ff_set_range_g25},
+ {USB_DEVICE_ID_LOGITECH_DFGT_WHEEL, lg4ff_wheel_effects, 40, 900, hid_lg4ff_set_range_g25},
+ {USB_DEVICE_ID_LOGITECH_G27_WHEEL, lg4ff_wheel_effects, 40, 900, hid_lg4ff_set_range_g25},
+ {USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2, lg4ff_wheel_effects, 40, 270, NULL},
+ {USB_DEVICE_ID_LOGITECH_WII_WHEEL, lg4ff_wheel_effects, 40, 270, NULL}
+};
+
+struct lg4ff_native_cmd {
+ const __u8 cmd_num; /* Number of commands to send */
+ const __u8 cmd[];
+};
+
+struct lg4ff_usb_revision {
+ const __u16 rev_maj;
+ const __u16 rev_min;
+ const struct lg4ff_native_cmd *command;
+};
+
+static const struct lg4ff_native_cmd native_dfp = {
+ 1,
+ {0xf8, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00}
+};
+
+static const struct lg4ff_native_cmd native_dfgt = {
+ 2,
+ {0xf8, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, /* 1st command */
+ 0xf8, 0x09, 0x03, 0x01, 0x00, 0x00, 0x00} /* 2nd command */
+};
+
+static const struct lg4ff_native_cmd native_g25 = {
+ 1,
+ {0xf8, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00}
+};
+
+static const struct lg4ff_native_cmd native_g27 = {
+ 2,
+ {0xf8, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, /* 1st command */
+ 0xf8, 0x09, 0x04, 0x01, 0x00, 0x00, 0x00} /* 2nd command */
+};
+
+static const struct lg4ff_usb_revision lg4ff_revs[] = {
+ {DFGT_REV_MAJ, DFGT_REV_MIN, &native_dfgt}, /* Driving Force GT */
+ {DFP_REV_MAJ, DFP_REV_MIN, &native_dfp}, /* Driving Force Pro */
+ {G25_REV_MAJ, G25_REV_MIN, &native_g25}, /* G25 */
+ {G27_REV_MAJ, G27_REV_MIN, &native_g27}, /* G27 */
+};
+
+static int hid_lg4ff_play(struct input_dev *dev, void *data, struct ff_effect *effect)
{
struct hid_device *hid = input_get_drvdata(dev);
struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
@@ -55,13 +144,12 @@ static int hid_lg4ff_play(struct input_dev *dev, void *data,
x = effect->u.ramp.start_level + 0x80; /* 0x80 is no force */
CLAMP(x);
report->field[0]->value[0] = 0x11; /* Slot 1 */
- report->field[0]->value[1] = 0x10;
+ report->field[0]->value[1] = 0x08;
report->field[0]->value[2] = x;
- report->field[0]->value[3] = 0x00;
+ report->field[0]->value[3] = 0x80;
report->field[0]->value[4] = 0x00;
- report->field[0]->value[5] = 0x08;
+ report->field[0]->value[5] = 0x00;
report->field[0]->value[6] = 0x00;
- dbg_hid("Autocenter, x=0x%02X\n", x);
usbhid_submit_report(hid, report, USB_DIR_OUT);
break;
@@ -69,24 +157,184 @@ static int hid_lg4ff_play(struct input_dev *dev, void *data,
return 0;
}
-static void hid_lg4ff_set_autocenter(struct input_dev *dev, u16 magnitude)
+/* Sends default autocentering command compatible with
+ * all wheels except Formula Force EX */
+static void hid_lg4ff_set_autocenter_default(struct input_dev *dev, u16 magnitude)
{
struct hid_device *hid = input_get_drvdata(dev);
struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
- __s32 *value = report->field[0]->value;
- *value++ = 0xfe;
- *value++ = 0x0d;
- *value++ = 0x07;
- *value++ = 0x07;
- *value++ = (magnitude >> 8) & 0xff;
- *value++ = 0x00;
- *value = 0x00;
+ report->field[0]->value[0] = 0xfe;
+ report->field[0]->value[1] = 0x0d;
+ report->field[0]->value[2] = magnitude >> 13;
+ report->field[0]->value[3] = magnitude >> 13;
+ report->field[0]->value[4] = magnitude >> 8;
+ report->field[0]->value[5] = 0x00;
+ report->field[0]->value[6] = 0x00;
+
+ usbhid_submit_report(hid, report, USB_DIR_OUT);
+}
+
+/* Sends autocentering command compatible with Formula Force EX */
+static void hid_lg4ff_set_autocenter_ffex(struct input_dev *dev, u16 magnitude)
+{
+ struct hid_device *hid = input_get_drvdata(dev);
+ struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+ struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
+ magnitude = magnitude * 90 / 65535;
+
+
+ report->field[0]->value[0] = 0xfe;
+ report->field[0]->value[1] = 0x03;
+ report->field[0]->value[2] = magnitude >> 14;
+ report->field[0]->value[3] = magnitude >> 14;
+ report->field[0]->value[4] = magnitude;
+ report->field[0]->value[5] = 0x00;
+ report->field[0]->value[6] = 0x00;
+
+ usbhid_submit_report(hid, report, USB_DIR_OUT);
+}
+
+/* Sends command to set range compatible with G25/G27/Driving Force GT */
+static void hid_lg4ff_set_range_g25(struct hid_device *hid, u16 range)
+{
+ struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+ struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
+ dbg_hid("G25/G27/DFGT: setting range to %u\n", range);
+
+ report->field[0]->value[0] = 0xf8;
+ report->field[0]->value[1] = 0x81;
+ report->field[0]->value[2] = range & 0x00ff;
+ report->field[0]->value[3] = (range & 0xff00) >> 8;
+ report->field[0]->value[4] = 0x00;
+ report->field[0]->value[5] = 0x00;
+ report->field[0]->value[6] = 0x00;
+
+ usbhid_submit_report(hid, report, USB_DIR_OUT);
+}
+
+/* Sends commands to set range compatible with Driving Force Pro wheel */
+static void hid_lg4ff_set_range_dfp(struct hid_device *hid, __u16 range)
+{
+ struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+ struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
+ int start_left, start_right, full_range;
+ dbg_hid("Driving Force Pro: setting range to %u\n", range);
+
+ /* Prepare "coarse" limit command */
+ report->field[0]->value[0] = 0xf8;
+ report->field[0]->value[1] = 0x00; /* Set later */
+ report->field[0]->value[2] = 0x00;
+ report->field[0]->value[3] = 0x00;
+ report->field[0]->value[4] = 0x00;
+ report->field[0]->value[5] = 0x00;
+ report->field[0]->value[6] = 0x00;
+
+ if (range > 200) {
+ report->field[0]->value[1] = 0x03;
+ full_range = 900;
+ } else {
+ report->field[0]->value[1] = 0x02;
+ full_range = 200;
+ }
+ usbhid_submit_report(hid, report, USB_DIR_OUT);
+
+ /* Prepare "fine" limit command */
+ report->field[0]->value[0] = 0x81;
+ report->field[0]->value[1] = 0x0b;
+ report->field[0]->value[2] = 0x00;
+ report->field[0]->value[3] = 0x00;
+ report->field[0]->value[4] = 0x00;
+ report->field[0]->value[5] = 0x00;
+ report->field[0]->value[6] = 0x00;
+
+ if (range == 200 || range == 900) { /* Do not apply any fine limit */
+ usbhid_submit_report(hid, report, USB_DIR_OUT);
+ return;
+ }
+
+ /* Construct fine limit command */
+ start_left = (((full_range - range + 1) * 2047) / full_range);
+ start_right = 0xfff - start_left;
+
+ report->field[0]->value[2] = start_left >> 4;
+ report->field[0]->value[3] = start_right >> 4;
+ report->field[0]->value[4] = 0xff;
+ report->field[0]->value[5] = (start_right & 0xe) << 4 | (start_left & 0xe);
+ report->field[0]->value[6] = 0xff;
usbhid_submit_report(hid, report, USB_DIR_OUT);
}
+static void hid_lg4ff_switch_native(struct hid_device *hid, const struct lg4ff_native_cmd *cmd)
+{
+ struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+ struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
+ __u8 i, j;
+
+ j = 0;
+ while (j < 7*cmd->cmd_num) {
+ for (i = 0; i < 7; i++)
+ report->field[0]->value[i] = cmd->cmd[j++];
+
+ usbhid_submit_report(hid, report, USB_DIR_OUT);
+ }
+}
+
+/* Read current range and display it in terminal */
+static ssize_t lg4ff_range_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct lg4ff_device_entry *uninitialized_var(entry);
+ struct list_head *h;
+ struct hid_device *hid = to_hid_device(dev);
+ size_t count;
+
+ list_for_each(h, &device_list.list) {
+ entry = list_entry(h, struct lg4ff_device_entry, list);
+ if (strcmp(entry->device_id, (&hid->dev)->kobj.name) == 0)
+ break;
+ }
+ if (h == &device_list.list) {
+ dbg_hid("Device not found!");
+ return 0;
+ }
+
+ count = scnprintf(buf, PAGE_SIZE, "%u\n", entry->range);
+ return count;
+}
+
+/* Set range to user specified value, call appropriate function
+ * according to the type of the wheel */
+static ssize_t lg4ff_range_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct lg4ff_device_entry *uninitialized_var(entry);
+ struct list_head *h;
+ struct hid_device *hid = to_hid_device(dev);
+ __u16 range = simple_strtoul(buf, NULL, 10);
+
+ list_for_each(h, &device_list.list) {
+ entry = list_entry(h, struct lg4ff_device_entry, list);
+ if (strcmp(entry->device_id, (&hid->dev)->kobj.name) == 0)
+ break;
+ }
+ if (h == &device_list.list) {
+ dbg_hid("Device not found!");
+ return count;
+ }
+
+ if (range == 0)
+ range = entry->max_range;
+
+ /* Check if the wheel supports range setting
+ * and that the range is within limits for the wheel */
+ if (entry->set_range != NULL && range >= entry->min_range && range <= entry->max_range) {
+ entry->set_range(hid, range);
+ entry->range = range;
+ }
+
+ return count;
+}
int lg4ff_init(struct hid_device *hid)
{
@@ -95,9 +343,10 @@ int lg4ff_init(struct hid_device *hid)
struct input_dev *dev = hidinput->input;
struct hid_report *report;
struct hid_field *field;
- const signed short *ff_bits = ff4_wheel_ac;
- int error;
- int i;
+ struct lg4ff_device_entry *entry;
+ struct usb_device_descriptor *udesc;
+ int error, i, j;
+ __u16 bcdDevice, rev_maj, rev_min;
/* Find the report to use */
if (list_empty(report_list)) {
@@ -118,18 +367,122 @@ int lg4ff_init(struct hid_device *hid)
return -1;
}
- for (i = 0; ff_bits[i] >= 0; i++)
- set_bit(ff_bits[i], dev->ffbit);
+ /* Check what wheel has been connected */
+ for (i = 0; i < ARRAY_SIZE(lg4ff_devices); i++) {
+ if (hid->product == lg4ff_devices[i].product_id) {
+ dbg_hid("Found compatible device, product ID %04X\n", lg4ff_devices[i].product_id);
+ break;
+ }
+ }
+
+ if (i == ARRAY_SIZE(lg4ff_devices)) {
+ hid_err(hid, "Device is not supported by lg4ff driver. If you think it should be, consider reporting a bug to"
+ "LKML, Simon Wood <simon@mungewell.org> or Michal Maly <madcatxster@gmail.com>\n");
+ return -1;
+ }
+
+ /* Attempt to switch wheel to native mode when applicable */
+ udesc = &(hid_to_usb_dev(hid)->descriptor);
+ if (!udesc) {
+ hid_err(hid, "NULL USB device descriptor\n");
+ return -1;
+ }
+ bcdDevice = le16_to_cpu(udesc->bcdDevice);
+ rev_maj = bcdDevice >> 8;
+ rev_min = bcdDevice & 0xff;
+
+ if (lg4ff_devices[i].product_id == USB_DEVICE_ID_LOGITECH_WHEEL) {
+ dbg_hid("Generic wheel detected, can it do native?\n");
+ dbg_hid("USB revision: %2x.%02x\n", rev_maj, rev_min);
+
+ for (j = 0; j < ARRAY_SIZE(lg4ff_revs); j++) {
+ if (lg4ff_revs[j].rev_maj == rev_maj && lg4ff_revs[j].rev_min == rev_min) {
+ hid_lg4ff_switch_native(hid, lg4ff_revs[j].command);
+ hid_info(hid, "Switched to native mode\n");
+ }
+ }
+ }
+
+ /* Set supported force feedback capabilities */
+ for (j = 0; lg4ff_devices[i].ff_effects[j] >= 0; j++)
+ set_bit(lg4ff_devices[i].ff_effects[j], dev->ffbit);
error = input_ff_create_memless(dev, NULL, hid_lg4ff_play);
if (error)
return error;
- if (test_bit(FF_AUTOCENTER, dev->ffbit))
- dev->ff->set_autocenter = hid_lg4ff_set_autocenter;
+ /* Check if autocentering is available and
+ * set the centering force to zero by default */
+ if (test_bit(FF_AUTOCENTER, dev->ffbit)) {
+ if(rev_maj == FFEX_REV_MAJ && rev_min == FFEX_REV_MIN) /* Formula Force EX expects different autocentering command */
+ dev->ff->set_autocenter = hid_lg4ff_set_autocenter_ffex;
+ else
+ dev->ff->set_autocenter = hid_lg4ff_set_autocenter_default;
+
+ dev->ff->set_autocenter(dev, 0);
+ }
+
+ /* Initialize device_list if this is the first device to handle by lg4ff */
+ if (!list_inited) {
+ INIT_LIST_HEAD(&device_list.list);
+ list_inited = 1;
+ }
+
+ /* Add the device to device_list */
+ entry = (struct lg4ff_device_entry *)kzalloc(sizeof(struct lg4ff_device_entry), GFP_KERNEL);
+ if (!entry) {
+ hid_err(hid, "Cannot add device, insufficient memory.\n");
+ return -ENOMEM;
+ }
+ entry->device_id = kstrdup((&hid->dev)->kobj.name, GFP_KERNEL);
+ if (!entry->device_id) {
+ hid_err(hid, "Cannot set device_id, insufficient memory.\n");
+ kfree(entry);
+ return -ENOMEM;
+ }
+ entry->min_range = lg4ff_devices[i].min_range;
+ entry->max_range = lg4ff_devices[i].max_range;
+ entry->set_range = lg4ff_devices[i].set_range;
+ list_add(&entry->list, &device_list.list);
+
+ /* Create sysfs interface */
+ error = device_create_file(&hid->dev, &dev_attr_range);
+ if (error)
+ return error;
+ dbg_hid("sysfs interface created\n");
+
+ /* Set the maximum range to start with */
+ entry->range = entry->max_range;
+ if (entry->set_range != NULL)
+ entry->set_range(hid, entry->range);
hid_info(hid, "Force feedback for Logitech Speed Force Wireless by Simon Wood <simon@mungewell.org>\n");
return 0;
}
+int lg4ff_deinit(struct hid_device *hid)
+{
+ bool found = 0;
+ struct lg4ff_device_entry *entry;
+ struct list_head *h, *g;
+ list_for_each_safe(h, g, &device_list.list) {
+ entry = list_entry(h, struct lg4ff_device_entry, list);
+ if (strcmp(entry->device_id, (&hid->dev)->kobj.name) == 0) {
+ list_del(h);
+ kfree(entry->device_id);
+ kfree(entry);
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found) {
+ dbg_hid("Device entry not found!\n");
+ return -1;
+ }
+
+ device_remove_file(&hid->dev, &dev_attr_range);
+ dbg_hid("Device successfully unregistered\n");
+ return 0;
+}
diff --git a/drivers/hid/hid-lgff.c b/drivers/hid/hid-lgff.c
index 088f85049290..27bc54f92f44 100644
--- a/drivers/hid/hid-lgff.c
+++ b/drivers/hid/hid-lgff.c
@@ -58,12 +58,6 @@ static const signed short ff_joystick_ac[] = {
-1
};
-static const signed short ff_wheel[] = {
- FF_CONSTANT,
- FF_AUTOCENTER,
- -1
-};
-
static const struct dev_type devices[] = {
{ 0x046d, 0xc211, ff_rumble },
{ 0x046d, 0xc219, ff_rumble },
@@ -71,14 +65,7 @@ static const struct dev_type devices[] = {
{ 0x046d, 0xc286, ff_joystick_ac },
{ 0x046d, 0xc287, ff_joystick_ac },
{ 0x046d, 0xc293, ff_joystick },
- { 0x046d, 0xc294, ff_wheel },
- { 0x046d, 0xc298, ff_wheel },
- { 0x046d, 0xc299, ff_wheel },
- { 0x046d, 0xc29b, ff_wheel },
{ 0x046d, 0xc295, ff_joystick },
- { 0x046d, 0xc298, ff_wheel },
- { 0x046d, 0xc299, ff_wheel },
- { 0x046d, 0xca03, ff_wheel },
};
static int hid_lgff_play(struct input_dev *dev, void *data, struct ff_effect *effect)
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
new file mode 100644
index 000000000000..38b12e45780c
--- /dev/null
+++ b/drivers/hid/hid-logitech-dj.c
@@ -0,0 +1,922 @@
+/*
+ * HID driver for Logitech Unifying receivers
+ *
+ * Copyright (c) 2011 Logitech
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+#include "usbhid/usbhid.h"
+#include "hid-ids.h"
+#include "hid-logitech-dj.h"
+
+/* Keyboard descriptor (1) */
+static const char kbd_descriptor[] = {
+ 0x05, 0x01, /* USAGE_PAGE (generic Desktop) */
+ 0x09, 0x06, /* USAGE (Keyboard) */
+ 0xA1, 0x01, /* COLLECTION (Application) */
+ 0x85, 0x01, /* REPORT_ID (1) */
+ 0x95, 0x08, /* REPORT_COUNT (8) */
+ 0x75, 0x01, /* REPORT_SIZE (1) */
+ 0x15, 0x00, /* LOGICAL_MINIMUM (0) */
+ 0x25, 0x01, /* LOGICAL_MAXIMUM (1) */
+ 0x05, 0x07, /* USAGE_PAGE (Keyboard) */
+ 0x19, 0xE0, /* USAGE_MINIMUM (Left Control) */
+ 0x29, 0xE7, /* USAGE_MAXIMUM (Right GUI) */
+ 0x81, 0x02, /* INPUT (Data,Var,Abs) */
+ 0x95, 0x05, /* REPORT COUNT (5) */
+ 0x05, 0x08, /* USAGE PAGE (LED page) */
+ 0x19, 0x01, /* USAGE MINIMUM (1) */
+ 0x29, 0x05, /* USAGE MAXIMUM (5) */
+ 0x91, 0x02, /* OUTPUT (Data, Variable, Absolute) */
+ 0x95, 0x01, /* REPORT COUNT (1) */
+ 0x75, 0x03, /* REPORT SIZE (3) */
+ 0x91, 0x01, /* OUTPUT (Constant) */
+ 0x95, 0x06, /* REPORT_COUNT (6) */
+ 0x75, 0x08, /* REPORT_SIZE (8) */
+ 0x15, 0x00, /* LOGICAL_MINIMUM (0) */
+ 0x26, 0xFF, 0x00, /* LOGICAL_MAXIMUM (255) */
+ 0x05, 0x07, /* USAGE_PAGE (Keyboard) */
+ 0x19, 0x00, /* USAGE_MINIMUM (no event) */
+ 0x2A, 0xFF, 0x00, /* USAGE_MAXIMUM (reserved) */
+ 0x81, 0x00, /* INPUT (Data,Ary,Abs) */
+ 0xC0
+};
+
+/* Mouse descriptor (2) */
+static const char mse_descriptor[] = {
+ 0x05, 0x01, /* USAGE_PAGE (Generic Desktop) */
+ 0x09, 0x02, /* USAGE (Mouse) */
+ 0xA1, 0x01, /* COLLECTION (Application) */
+ 0x85, 0x02, /* REPORT_ID = 2 */
+ 0x09, 0x01, /* USAGE (pointer) */
+ 0xA1, 0x00, /* COLLECTION (physical) */
+ 0x05, 0x09, /* USAGE_PAGE (buttons) */
+ 0x19, 0x01, /* USAGE_MIN (1) */
+ 0x29, 0x10, /* USAGE_MAX (16) */
+ 0x15, 0x00, /* LOGICAL_MIN (0) */
+ 0x25, 0x01, /* LOGICAL_MAX (1) */
+ 0x95, 0x10, /* REPORT_COUNT (16) */
+ 0x75, 0x01, /* REPORT_SIZE (1) */
+ 0x81, 0x02, /* INPUT (data var abs) */
+ 0x05, 0x01, /* USAGE_PAGE (generic desktop) */
+ 0x16, 0x01, 0xF8, /* LOGICAL_MIN (-2047) */
+ 0x26, 0xFF, 0x07, /* LOGICAL_MAX (2047) */
+ 0x75, 0x0C, /* REPORT_SIZE (12) */
+ 0x95, 0x02, /* REPORT_COUNT (2) */
+ 0x09, 0x30, /* USAGE (X) */
+ 0x09, 0x31, /* USAGE (Y) */
+ 0x81, 0x06, /* INPUT */
+ 0x15, 0x81, /* LOGICAL_MIN (-127) */
+ 0x25, 0x7F, /* LOGICAL_MAX (127) */
+ 0x75, 0x08, /* REPORT_SIZE (8) */
+ 0x95, 0x01, /* REPORT_COUNT (1) */
+ 0x09, 0x38, /* USAGE (wheel) */
+ 0x81, 0x06, /* INPUT */
+ 0x05, 0x0C, /* USAGE_PAGE(consumer) */
+ 0x0A, 0x38, 0x02, /* USAGE(AC Pan) */
+ 0x95, 0x01, /* REPORT_COUNT (1) */
+ 0x81, 0x06, /* INPUT */
+ 0xC0, /* END_COLLECTION */
+ 0xC0, /* END_COLLECTION */
+};
+
+/* Consumer Control descriptor (3) */
+static const char consumer_descriptor[] = {
+ 0x05, 0x0C, /* USAGE_PAGE (Consumer Devices) */
+ 0x09, 0x01, /* USAGE (Consumer Control) */
+ 0xA1, 0x01, /* COLLECTION (Application) */
+ 0x85, 0x03, /* REPORT_ID = 3 */
+ 0x75, 0x10, /* REPORT_SIZE (16) */
+ 0x95, 0x02, /* REPORT_COUNT (2) */
+ 0x15, 0x01, /* LOGICAL_MIN (1) */
+ 0x26, 0x8C, 0x02, /* LOGICAL_MAX (652) */
+ 0x19, 0x01, /* USAGE_MIN (1) */
+ 0x2A, 0x8C, 0x02, /* USAGE_MAX (652) */
+ 0x81, 0x00, /* INPUT (Data Ary Abs) */
+ 0xC0, /* END_COLLECTION */
+}; /* */
+
+/* System control descriptor (4) */
+static const char syscontrol_descriptor[] = {
+ 0x05, 0x01, /* USAGE_PAGE (Generic Desktop) */
+ 0x09, 0x80, /* USAGE (System Control) */
+ 0xA1, 0x01, /* COLLECTION (Application) */
+ 0x85, 0x04, /* REPORT_ID = 4 */
+ 0x75, 0x02, /* REPORT_SIZE (2) */
+ 0x95, 0x01, /* REPORT_COUNT (1) */
+ 0x15, 0x01, /* LOGICAL_MIN (1) */
+ 0x25, 0x03, /* LOGICAL_MAX (3) */
+ 0x09, 0x82, /* USAGE (System Sleep) */
+ 0x09, 0x81, /* USAGE (System Power Down) */
+ 0x09, 0x83, /* USAGE (System Wake Up) */
+ 0x81, 0x60, /* INPUT (Data Ary Abs NPrf Null) */
+ 0x75, 0x06, /* REPORT_SIZE (6) */
+ 0x81, 0x03, /* INPUT (Cnst Var Abs) */
+ 0xC0, /* END_COLLECTION */
+};
+
+/* Media descriptor (8) */
+static const char media_descriptor[] = {
+ 0x06, 0xbc, 0xff, /* Usage Page 0xffbc */
+ 0x09, 0x88, /* Usage 0x0088 */
+ 0xa1, 0x01, /* BeginCollection */
+ 0x85, 0x08, /* Report ID 8 */
+ 0x19, 0x01, /* Usage Min 0x0001 */
+ 0x29, 0xff, /* Usage Max 0x00ff */
+ 0x15, 0x01, /* Logical Min 1 */
+ 0x26, 0xff, 0x00, /* Logical Max 255 */
+ 0x75, 0x08, /* Report Size 8 */
+ 0x95, 0x01, /* Report Count 1 */
+ 0x81, 0x00, /* Input */
+ 0xc0, /* EndCollection */
+}; /* */
+
+/* Maximum size of all defined hid reports in bytes (including report id) */
+#define MAX_REPORT_SIZE 8
+
+/* Number of possible hid report types that can be created by this driver.
+ *
+ * Right now, RF report types have the same report types (or report id's)
+ * than the hid report created from those RF reports. In the future
+ * this doesnt have to be true.
+ *
+ * For instance, RF report type 0x01 which has a size of 8 bytes, corresponds
+ * to hid report id 0x01, this is standard keyboard. Same thing applies to mice
+ * reports and consumer control, etc. If a new RF report is created, it doesn't
+ * has to have the same report id as its corresponding hid report, so an
+ * translation may have to take place for future report types.
+ */
+#define NUMBER_OF_HID_REPORTS 32
+static const u8 hid_reportid_size_map[NUMBER_OF_HID_REPORTS] = {
+ [1] = 8, /* Standard keyboard */
+ [2] = 8, /* Standard mouse */
+ [3] = 5, /* Consumer control */
+ [4] = 2, /* System control */
+ [8] = 2, /* Media Center */
+};
+
+
+#define LOGITECH_DJ_INTERFACE_NUMBER 0x02
+
+static struct hid_ll_driver logi_dj_ll_driver;
+
+static int logi_dj_output_hidraw_report(struct hid_device *hid, u8 * buf,
+ size_t count,
+ unsigned char report_type);
+
+static void logi_dj_recv_destroy_djhid_device(struct dj_receiver_dev *djrcv_dev,
+ struct dj_report *dj_report)
+{
+ /* Called in delayed work context */
+ struct dj_device *dj_dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&djrcv_dev->lock, flags);
+ dj_dev = djrcv_dev->paired_dj_devices[dj_report->device_index];
+ djrcv_dev->paired_dj_devices[dj_report->device_index] = NULL;
+ spin_unlock_irqrestore(&djrcv_dev->lock, flags);
+
+ if (dj_dev != NULL) {
+ hid_destroy_device(dj_dev->hdev);
+ kfree(dj_dev);
+ } else {
+ dev_err(&djrcv_dev->hdev->dev, "%s: can't destroy a NULL device\n",
+ __func__);
+ }
+}
+
+static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev,
+ struct dj_report *dj_report)
+{
+ /* Called in delayed work context */
+ struct hid_device *djrcv_hdev = djrcv_dev->hdev;
+ struct usb_interface *intf = to_usb_interface(djrcv_hdev->dev.parent);
+ struct usb_device *usbdev = interface_to_usbdev(intf);
+ struct hid_device *dj_hiddev;
+ struct dj_device *dj_dev;
+
+ /* Device index goes from 1 to 6, we need 3 bytes to store the
+ * semicolon, the index, and a null terminator
+ */
+ unsigned char tmpstr[3];
+
+ if (dj_report->report_params[DEVICE_PAIRED_PARAM_SPFUNCTION] &
+ SPFUNCTION_DEVICE_LIST_EMPTY) {
+ dbg_hid("%s: device list is empty\n", __func__);
+ return;
+ }
+
+ if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) ||
+ (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) {
+ dev_err(&djrcv_hdev->dev, "%s: invalid device index:%d\n",
+ __func__, dj_report->device_index);
+ return;
+ }
+
+ dj_hiddev = hid_allocate_device();
+ if (IS_ERR(dj_hiddev)) {
+ dev_err(&djrcv_hdev->dev, "%s: hid_allocate_device failed\n",
+ __func__);
+ return;
+ }
+
+ dj_hiddev->ll_driver = &logi_dj_ll_driver;
+ dj_hiddev->hid_output_raw_report = logi_dj_output_hidraw_report;
+
+ dj_hiddev->dev.parent = &djrcv_hdev->dev;
+ dj_hiddev->bus = BUS_USB;
+ dj_hiddev->vendor = le16_to_cpu(usbdev->descriptor.idVendor);
+ dj_hiddev->product = le16_to_cpu(usbdev->descriptor.idProduct);
+ snprintf(dj_hiddev->name, sizeof(dj_hiddev->name),
+ "Logitech Unifying Device. Wireless PID:%02x%02x",
+ dj_report->report_params[DEVICE_PAIRED_PARAM_EQUAD_ID_MSB],
+ dj_report->report_params[DEVICE_PAIRED_PARAM_EQUAD_ID_LSB]);
+
+ usb_make_path(usbdev, dj_hiddev->phys, sizeof(dj_hiddev->phys));
+ snprintf(tmpstr, sizeof(tmpstr), ":%d", dj_report->device_index);
+ strlcat(dj_hiddev->phys, tmpstr, sizeof(dj_hiddev->phys));
+
+ dj_dev = kzalloc(sizeof(struct dj_device), GFP_KERNEL);
+
+ if (!dj_dev) {
+ dev_err(&djrcv_hdev->dev, "%s: failed allocating dj_device\n",
+ __func__);
+ goto dj_device_allocate_fail;
+ }
+
+ dj_dev->reports_supported = le32_to_cpu(
+ dj_report->report_params[DEVICE_PAIRED_RF_REPORT_TYPE]);
+ dj_dev->hdev = dj_hiddev;
+ dj_dev->dj_receiver_dev = djrcv_dev;
+ dj_dev->device_index = dj_report->device_index;
+ dj_hiddev->driver_data = dj_dev;
+
+ djrcv_dev->paired_dj_devices[dj_report->device_index] = dj_dev;
+
+ if (hid_add_device(dj_hiddev)) {
+ dev_err(&djrcv_hdev->dev, "%s: failed adding dj_device\n",
+ __func__);
+ goto hid_add_device_fail;
+ }
+
+ return;
+
+hid_add_device_fail:
+ djrcv_dev->paired_dj_devices[dj_report->device_index] = NULL;
+ kfree(dj_dev);
+dj_device_allocate_fail:
+ hid_destroy_device(dj_hiddev);
+}
+
+static void delayedwork_callback(struct work_struct *work)
+{
+ struct dj_receiver_dev *djrcv_dev =
+ container_of(work, struct dj_receiver_dev, work);
+
+ struct dj_report dj_report;
+ unsigned long flags;
+ int count;
+
+ dbg_hid("%s\n", __func__);
+
+ spin_lock_irqsave(&djrcv_dev->lock, flags);
+
+ count = kfifo_out(&djrcv_dev->notif_fifo, &dj_report,
+ sizeof(struct dj_report));
+
+ if (count != sizeof(struct dj_report)) {
+ dev_err(&djrcv_dev->hdev->dev, "%s: workitem triggered without "
+ "notifications available\n", __func__);
+ spin_unlock_irqrestore(&djrcv_dev->lock, flags);
+ return;
+ }
+
+ if (!kfifo_is_empty(&djrcv_dev->notif_fifo)) {
+ if (schedule_work(&djrcv_dev->work) == 0) {
+ dbg_hid("%s: did not schedule the work item, was "
+ "already queued\n", __func__);
+ }
+ }
+
+ spin_unlock_irqrestore(&djrcv_dev->lock, flags);
+
+ switch (dj_report.report_type) {
+ case REPORT_TYPE_NOTIF_DEVICE_PAIRED:
+ logi_dj_recv_add_djhid_device(djrcv_dev, &dj_report);
+ break;
+ case REPORT_TYPE_NOTIF_DEVICE_UNPAIRED:
+ logi_dj_recv_destroy_djhid_device(djrcv_dev, &dj_report);
+ break;
+ default:
+ dbg_hid("%s: unexpected report type\n", __func__);
+ }
+}
+
+static void logi_dj_recv_queue_notification(struct dj_receiver_dev *djrcv_dev,
+ struct dj_report *dj_report)
+{
+ /* We are called from atomic context (tasklet && djrcv->lock held) */
+
+ kfifo_in(&djrcv_dev->notif_fifo, dj_report, sizeof(struct dj_report));
+
+ if (schedule_work(&djrcv_dev->work) == 0) {
+ dbg_hid("%s: did not schedule the work item, was already "
+ "queued\n", __func__);
+ }
+}
+
+static void logi_dj_recv_forward_null_report(struct dj_receiver_dev *djrcv_dev,
+ struct dj_report *dj_report)
+{
+ /* We are called from atomic context (tasklet && djrcv->lock held) */
+ unsigned int i;
+ u8 reportbuffer[MAX_REPORT_SIZE];
+ struct dj_device *djdev;
+
+ djdev = djrcv_dev->paired_dj_devices[dj_report->device_index];
+
+ if (!djdev) {
+ dbg_hid("djrcv_dev->paired_dj_devices[dj_report->device_index]"
+ " is NULL, index %d\n", dj_report->device_index);
+ return;
+ }
+
+ memset(reportbuffer, 0, sizeof(reportbuffer));
+
+ for (i = 0; i < NUMBER_OF_HID_REPORTS; i++) {
+ if (djdev->reports_supported & (1 << i)) {
+ reportbuffer[0] = i;
+ if (hid_input_report(djdev->hdev,
+ HID_INPUT_REPORT,
+ reportbuffer,
+ hid_reportid_size_map[i], 1)) {
+ dbg_hid("hid_input_report error sending null "
+ "report\n");
+ }
+ }
+ }
+}
+
+static void logi_dj_recv_forward_report(struct dj_receiver_dev *djrcv_dev,
+ struct dj_report *dj_report)
+{
+ /* We are called from atomic context (tasklet && djrcv->lock held) */
+ struct dj_device *dj_device;
+
+ dj_device = djrcv_dev->paired_dj_devices[dj_report->device_index];
+
+ if (dj_device == NULL) {
+ dbg_hid("djrcv_dev->paired_dj_devices[dj_report->device_index]"
+ " is NULL, index %d\n", dj_report->device_index);
+ return;
+ }
+
+ if ((dj_report->report_type > ARRAY_SIZE(hid_reportid_size_map) - 1) ||
+ (hid_reportid_size_map[dj_report->report_type] == 0)) {
+ dbg_hid("invalid report type:%x\n", dj_report->report_type);
+ return;
+ }
+
+ if (hid_input_report(dj_device->hdev,
+ HID_INPUT_REPORT, &dj_report->report_type,
+ hid_reportid_size_map[dj_report->report_type], 1)) {
+ dbg_hid("hid_input_report error\n");
+ }
+}
+
+
+static int logi_dj_recv_send_report(struct dj_receiver_dev *djrcv_dev,
+ struct dj_report *dj_report)
+{
+ struct hid_device *hdev = djrcv_dev->hdev;
+ int sent_bytes;
+
+ if (!hdev->hid_output_raw_report) {
+ dev_err(&hdev->dev, "%s:"
+ "hid_output_raw_report is null\n", __func__);
+ return -ENODEV;
+ }
+
+ sent_bytes = hdev->hid_output_raw_report(hdev, (u8 *) dj_report,
+ sizeof(struct dj_report),
+ HID_OUTPUT_REPORT);
+
+ return (sent_bytes < 0) ? sent_bytes : 0;
+}
+
+static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev)
+{
+ struct dj_report dj_report;
+
+ memset(&dj_report, 0, sizeof(dj_report));
+ dj_report.report_id = REPORT_ID_DJ_SHORT;
+ dj_report.device_index = 0xFF;
+ dj_report.report_type = REPORT_TYPE_CMD_GET_PAIRED_DEVICES;
+ return logi_dj_recv_send_report(djrcv_dev, &dj_report);
+}
+
+static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
+ unsigned timeout)
+{
+ struct dj_report dj_report;
+
+ memset(&dj_report, 0, sizeof(dj_report));
+ dj_report.report_id = REPORT_ID_DJ_SHORT;
+ dj_report.device_index = 0xFF;
+ dj_report.report_type = REPORT_TYPE_CMD_SWITCH;
+ dj_report.report_params[CMD_SWITCH_PARAM_DEVBITFIELD] = 0x1F;
+ dj_report.report_params[CMD_SWITCH_PARAM_TIMEOUT_SECONDS] = (u8)timeout;
+ return logi_dj_recv_send_report(djrcv_dev, &dj_report);
+}
+
+
+static int logi_dj_ll_open(struct hid_device *hid)
+{
+ dbg_hid("%s:%s\n", __func__, hid->phys);
+ return 0;
+
+}
+
+static void logi_dj_ll_close(struct hid_device *hid)
+{
+ dbg_hid("%s:%s\n", __func__, hid->phys);
+}
+
+static int logi_dj_output_hidraw_report(struct hid_device *hid, u8 * buf,
+ size_t count,
+ unsigned char report_type)
+{
+ /* Called by hid raw to send data */
+ dbg_hid("%s\n", __func__);
+
+ return 0;
+}
+
+static int logi_dj_ll_parse(struct hid_device *hid)
+{
+ struct dj_device *djdev = hid->driver_data;
+ int retval;
+
+ dbg_hid("%s\n", __func__);
+
+ djdev->hdev->version = 0x0111;
+ djdev->hdev->country = 0x00;
+
+ if (djdev->reports_supported & STD_KEYBOARD) {
+ dbg_hid("%s: sending a kbd descriptor, reports_supported: %x\n",
+ __func__, djdev->reports_supported);
+ retval = hid_parse_report(hid,
+ (u8 *) kbd_descriptor,
+ sizeof(kbd_descriptor));
+ if (retval) {
+ dbg_hid("%s: sending a kbd descriptor, hid_parse failed"
+ " error: %d\n", __func__, retval);
+ return retval;
+ }
+ }
+
+ if (djdev->reports_supported & STD_MOUSE) {
+ dbg_hid("%s: sending a mouse descriptor, reports_supported: "
+ "%x\n", __func__, djdev->reports_supported);
+ retval = hid_parse_report(hid,
+ (u8 *) mse_descriptor,
+ sizeof(mse_descriptor));
+ if (retval) {
+ dbg_hid("%s: sending a mouse descriptor, hid_parse "
+ "failed error: %d\n", __func__, retval);
+ return retval;
+ }
+ }
+
+ if (djdev->reports_supported & MULTIMEDIA) {
+ dbg_hid("%s: sending a multimedia report descriptor: %x\n",
+ __func__, djdev->reports_supported);
+ retval = hid_parse_report(hid,
+ (u8 *) consumer_descriptor,
+ sizeof(consumer_descriptor));
+ if (retval) {
+ dbg_hid("%s: sending a consumer_descriptor, hid_parse "
+ "failed error: %d\n", __func__, retval);
+ return retval;
+ }
+ }
+
+ if (djdev->reports_supported & POWER_KEYS) {
+ dbg_hid("%s: sending a power keys report descriptor: %x\n",
+ __func__, djdev->reports_supported);
+ retval = hid_parse_report(hid,
+ (u8 *) syscontrol_descriptor,
+ sizeof(syscontrol_descriptor));
+ if (retval) {
+ dbg_hid("%s: sending a syscontrol_descriptor, "
+ "hid_parse failed error: %d\n",
+ __func__, retval);
+ return retval;
+ }
+ }
+
+ if (djdev->reports_supported & MEDIA_CENTER) {
+ dbg_hid("%s: sending a media center report descriptor: %x\n",
+ __func__, djdev->reports_supported);
+ retval = hid_parse_report(hid,
+ (u8 *) media_descriptor,
+ sizeof(media_descriptor));
+ if (retval) {
+ dbg_hid("%s: sending a media_descriptor, hid_parse "
+ "failed error: %d\n", __func__, retval);
+ return retval;
+ }
+ }
+
+ if (djdev->reports_supported & KBD_LEDS) {
+ dbg_hid("%s: need to send kbd leds report descriptor: %x\n",
+ __func__, djdev->reports_supported);
+ }
+
+ return 0;
+}
+
+static int logi_dj_ll_input_event(struct input_dev *dev, unsigned int type,
+ unsigned int code, int value)
+{
+ /* Sent by the input layer to handle leds and Force Feedback */
+ struct hid_device *dj_hiddev = input_get_drvdata(dev);
+ struct dj_device *dj_dev = dj_hiddev->driver_data;
+
+ struct dj_receiver_dev *djrcv_dev =
+ dev_get_drvdata(dj_hiddev->dev.parent);
+ struct hid_device *dj_rcv_hiddev = djrcv_dev->hdev;
+ struct hid_report_enum *output_report_enum;
+
+ struct hid_field *field;
+ struct hid_report *report;
+ unsigned char data[8];
+ int offset;
+
+ dbg_hid("%s: %s, type:%d | code:%d | value:%d\n",
+ __func__, dev->phys, type, code, value);
+
+ if (type != EV_LED)
+ return -1;
+
+ offset = hidinput_find_field(dj_hiddev, type, code, &field);
+
+ if (offset == -1) {
+ dev_warn(&dev->dev, "event field not found\n");
+ return -1;
+ }
+ hid_set_field(field, offset, value);
+ hid_output_report(field->report, &data[0]);
+
+ output_report_enum = &dj_rcv_hiddev->report_enum[HID_OUTPUT_REPORT];
+ report = output_report_enum->report_id_hash[REPORT_ID_DJ_SHORT];
+ hid_set_field(report->field[0], 0, dj_dev->device_index);
+ hid_set_field(report->field[0], 1, REPORT_TYPE_LEDS);
+ hid_set_field(report->field[0], 2, data[1]);
+
+ usbhid_submit_report(dj_rcv_hiddev, report, USB_DIR_OUT);
+
+ return 0;
+
+}
+
+static int logi_dj_ll_start(struct hid_device *hid)
+{
+ dbg_hid("%s\n", __func__);
+ return 0;
+}
+
+static void logi_dj_ll_stop(struct hid_device *hid)
+{
+ dbg_hid("%s\n", __func__);
+}
+
+
+static struct hid_ll_driver logi_dj_ll_driver = {
+ .parse = logi_dj_ll_parse,
+ .start = logi_dj_ll_start,
+ .stop = logi_dj_ll_stop,
+ .open = logi_dj_ll_open,
+ .close = logi_dj_ll_close,
+ .hidinput_input_event = logi_dj_ll_input_event,
+};
+
+
+static int logi_dj_raw_event(struct hid_device *hdev,
+ struct hid_report *report, u8 *data,
+ int size)
+{
+ struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev);
+ struct dj_report *dj_report = (struct dj_report *) data;
+ unsigned long flags;
+ bool report_processed = false;
+
+ dbg_hid("%s, size:%d\n", __func__, size);
+
+ /* Here we receive all data coming from iface 2, there are 4 cases:
+ *
+ * 1) Data should continue its normal processing i.e. data does not
+ * come from the DJ collection, in which case we do nothing and
+ * return 0, so hid-core can continue normal processing (will forward
+ * to associated hidraw device)
+ *
+ * 2) Data is from DJ collection, and is intended for this driver i. e.
+ * data contains arrival, departure, etc notifications, in which case
+ * we queue them for delayed processing by the work queue. We return 1
+ * to hid-core as no further processing is required from it.
+ *
+ * 3) Data is from DJ collection, and informs a connection change,
+ * if the change means rf link loss, then we must send a null report
+ * to the upper layer to discard potentially pressed keys that may be
+ * repeated forever by the input layer. Return 1 to hid-core as no
+ * further processing is required.
+ *
+ * 4) Data is from DJ collection and is an actual input event from
+ * a paired DJ device in which case we forward it to the correct hid
+ * device (via hid_input_report() ) and return 1 so hid-core does not do
+ * anything else with it.
+ */
+
+ spin_lock_irqsave(&djrcv_dev->lock, flags);
+ if (dj_report->report_id == REPORT_ID_DJ_SHORT) {
+ switch (dj_report->report_type) {
+ case REPORT_TYPE_NOTIF_DEVICE_PAIRED:
+ case REPORT_TYPE_NOTIF_DEVICE_UNPAIRED:
+ logi_dj_recv_queue_notification(djrcv_dev, dj_report);
+ break;
+ case REPORT_TYPE_NOTIF_CONNECTION_STATUS:
+ if (dj_report->report_params[CONNECTION_STATUS_PARAM_STATUS] ==
+ STATUS_LINKLOSS) {
+ logi_dj_recv_forward_null_report(djrcv_dev, dj_report);
+ }
+ break;
+ default:
+ logi_dj_recv_forward_report(djrcv_dev, dj_report);
+ }
+ report_processed = true;
+ }
+ spin_unlock_irqrestore(&djrcv_dev->lock, flags);
+
+ return report_processed;
+}
+
+static int logi_dj_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct dj_receiver_dev *djrcv_dev;
+ int retval;
+
+ if (is_dj_device((struct dj_device *)hdev->driver_data))
+ return -ENODEV;
+
+ dbg_hid("%s called for ifnum %d\n", __func__,
+ intf->cur_altsetting->desc.bInterfaceNumber);
+
+ /* Ignore interfaces 0 and 1, they will not carry any data, dont create
+ * any hid_device for them */
+ if (intf->cur_altsetting->desc.bInterfaceNumber !=
+ LOGITECH_DJ_INTERFACE_NUMBER) {
+ dbg_hid("%s: ignoring ifnum %d\n", __func__,
+ intf->cur_altsetting->desc.bInterfaceNumber);
+ return -ENODEV;
+ }
+
+ /* Treat interface 2 */
+
+ djrcv_dev = kzalloc(sizeof(struct dj_receiver_dev), GFP_KERNEL);
+ if (!djrcv_dev) {
+ dev_err(&hdev->dev,
+ "%s:failed allocating dj_receiver_dev\n", __func__);
+ return -ENOMEM;
+ }
+ djrcv_dev->hdev = hdev;
+ INIT_WORK(&djrcv_dev->work, delayedwork_callback);
+ spin_lock_init(&djrcv_dev->lock);
+ if (kfifo_alloc(&djrcv_dev->notif_fifo,
+ DJ_MAX_NUMBER_NOTIFICATIONS * sizeof(struct dj_report),
+ GFP_KERNEL)) {
+ dev_err(&hdev->dev,
+ "%s:failed allocating notif_fifo\n", __func__);
+ kfree(djrcv_dev);
+ return -ENOMEM;
+ }
+ hid_set_drvdata(hdev, djrcv_dev);
+
+ /* Call to usbhid to fetch the HID descriptors of interface 2 and
+ * subsequently call to the hid/hid-core to parse the fetched
+ * descriptors, this will in turn create the hidraw and hiddev nodes
+ * for interface 2 of the receiver */
+ retval = hid_parse(hdev);
+ if (retval) {
+ dev_err(&hdev->dev,
+ "%s:parse of interface 2 failed\n", __func__);
+ goto hid_parse_fail;
+ }
+
+ /* Starts the usb device and connects to upper interfaces hiddev and
+ * hidraw */
+ retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (retval) {
+ dev_err(&hdev->dev,
+ "%s:hid_hw_start returned error\n", __func__);
+ goto hid_hw_start_fail;
+ }
+
+ retval = logi_dj_recv_switch_to_dj_mode(djrcv_dev, 0);
+ if (retval < 0) {
+ dev_err(&hdev->dev,
+ "%s:logi_dj_recv_switch_to_dj_mode returned error:%d\n",
+ __func__, retval);
+ goto switch_to_dj_mode_fail;
+ }
+
+ /* This is enabling the polling urb on the IN endpoint */
+ retval = hdev->ll_driver->open(hdev);
+ if (retval < 0) {
+ dev_err(&hdev->dev, "%s:hdev->ll_driver->open returned "
+ "error:%d\n", __func__, retval);
+ goto llopen_failed;
+ }
+
+ retval = logi_dj_recv_query_paired_devices(djrcv_dev);
+ if (retval < 0) {
+ dev_err(&hdev->dev, "%s:logi_dj_recv_query_paired_devices "
+ "error:%d\n", __func__, retval);
+ goto logi_dj_recv_query_paired_devices_failed;
+ }
+
+ return retval;
+
+logi_dj_recv_query_paired_devices_failed:
+ hdev->ll_driver->close(hdev);
+
+llopen_failed:
+switch_to_dj_mode_fail:
+ hid_hw_stop(hdev);
+
+hid_hw_start_fail:
+hid_parse_fail:
+ kfifo_free(&djrcv_dev->notif_fifo);
+ kfree(djrcv_dev);
+ hid_set_drvdata(hdev, NULL);
+ return retval;
+
+}
+
+#ifdef CONFIG_PM
+static int logi_dj_reset_resume(struct hid_device *hdev)
+{
+ int retval;
+ struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev);
+
+ retval = logi_dj_recv_switch_to_dj_mode(djrcv_dev, 0);
+ if (retval < 0) {
+ dev_err(&hdev->dev,
+ "%s:logi_dj_recv_switch_to_dj_mode returned error:%d\n",
+ __func__, retval);
+ }
+
+ return 0;
+}
+#endif
+
+static void logi_dj_remove(struct hid_device *hdev)
+{
+ struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev);
+ struct dj_device *dj_dev;
+ int i;
+
+ dbg_hid("%s\n", __func__);
+
+ cancel_work_sync(&djrcv_dev->work);
+
+ hdev->ll_driver->close(hdev);
+ hid_hw_stop(hdev);
+
+ /* I suppose that at this point the only context that can access
+ * the djrecv_data is this thread as the work item is guaranteed to
+ * have finished and no more raw_event callbacks should arrive after
+ * the remove callback was triggered so no locks are put around the
+ * code below */
+ for (i = 0; i < (DJ_MAX_PAIRED_DEVICES + DJ_DEVICE_INDEX_MIN); i++) {
+ dj_dev = djrcv_dev->paired_dj_devices[i];
+ if (dj_dev != NULL) {
+ hid_destroy_device(dj_dev->hdev);
+ kfree(dj_dev);
+ djrcv_dev->paired_dj_devices[i] = NULL;
+ }
+ }
+
+ kfifo_free(&djrcv_dev->notif_fifo);
+ kfree(djrcv_dev);
+ hid_set_drvdata(hdev, NULL);
+}
+
+static int logi_djdevice_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ int ret;
+ struct dj_device *dj_dev = hdev->driver_data;
+
+ if (!is_dj_device(dj_dev))
+ return -ENODEV;
+
+ ret = hid_parse(hdev);
+ if (!ret)
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+
+ return ret;
+}
+
+static const struct hid_device_id logi_dj_receivers[] = {
+ {HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER)},
+ {HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2)},
+ {}
+};
+
+MODULE_DEVICE_TABLE(hid, logi_dj_receivers);
+
+static struct hid_driver logi_djreceiver_driver = {
+ .name = "logitech-djreceiver",
+ .id_table = logi_dj_receivers,
+ .probe = logi_dj_probe,
+ .remove = logi_dj_remove,
+ .raw_event = logi_dj_raw_event,
+#ifdef CONFIG_PM
+ .reset_resume = logi_dj_reset_resume,
+#endif
+};
+
+
+static const struct hid_device_id logi_dj_devices[] = {
+ {HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER)},
+ {HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2)},
+ {}
+};
+
+static struct hid_driver logi_djdevice_driver = {
+ .name = "logitech-djdevice",
+ .id_table = logi_dj_devices,
+ .probe = logi_djdevice_probe,
+};
+
+
+static int __init logi_dj_init(void)
+{
+ int retval;
+
+ dbg_hid("Logitech-DJ:%s\n", __func__);
+
+ retval = hid_register_driver(&logi_djreceiver_driver);
+ if (retval)
+ return retval;
+
+ retval = hid_register_driver(&logi_djdevice_driver);
+ if (retval)
+ hid_unregister_driver(&logi_djreceiver_driver);
+
+ return retval;
+
+}
+
+static void __exit logi_dj_exit(void)
+{
+ dbg_hid("Logitech-DJ:%s\n", __func__);
+
+ hid_unregister_driver(&logi_djdevice_driver);
+ hid_unregister_driver(&logi_djreceiver_driver);
+
+}
+
+module_init(logi_dj_init);
+module_exit(logi_dj_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Logitech");
+MODULE_AUTHOR("Nestor Lopez Casado");
+MODULE_AUTHOR("nlopezcasad@logitech.com");
diff --git a/drivers/hid/hid-logitech-dj.h b/drivers/hid/hid-logitech-dj.h
new file mode 100644
index 000000000000..fd28a5e0ca3b
--- /dev/null
+++ b/drivers/hid/hid-logitech-dj.h
@@ -0,0 +1,123 @@
+#ifndef __HID_LOGITECH_DJ_H
+#define __HID_LOGITECH_DJ_H
+
+/*
+ * HID driver for Logitech Unifying receivers
+ *
+ * Copyright (c) 2011 Logitech
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/kfifo.h>
+
+#define DJ_MAX_PAIRED_DEVICES 6
+#define DJ_MAX_NUMBER_NOTIFICATIONS 8
+#define DJ_DEVICE_INDEX_MIN 1
+#define DJ_DEVICE_INDEX_MAX 6
+
+#define DJREPORT_SHORT_LENGTH 15
+#define DJREPORT_LONG_LENGTH 32
+
+#define REPORT_ID_DJ_SHORT 0x20
+#define REPORT_ID_DJ_LONG 0x21
+
+#define REPORT_TYPE_RFREPORT_FIRST 0x01
+#define REPORT_TYPE_RFREPORT_LAST 0x1F
+
+/* Command Switch to DJ mode */
+#define REPORT_TYPE_CMD_SWITCH 0x80
+#define CMD_SWITCH_PARAM_DEVBITFIELD 0x00
+#define CMD_SWITCH_PARAM_TIMEOUT_SECONDS 0x01
+#define TIMEOUT_NO_KEEPALIVE 0x00
+
+/* Command to Get the list of Paired devices */
+#define REPORT_TYPE_CMD_GET_PAIRED_DEVICES 0x81
+
+/* Device Paired Notification */
+#define REPORT_TYPE_NOTIF_DEVICE_PAIRED 0x41
+#define SPFUNCTION_MORE_NOTIF_EXPECTED 0x01
+#define SPFUNCTION_DEVICE_LIST_EMPTY 0x02
+#define DEVICE_PAIRED_PARAM_SPFUNCTION 0x00
+#define DEVICE_PAIRED_PARAM_EQUAD_ID_LSB 0x01
+#define DEVICE_PAIRED_PARAM_EQUAD_ID_MSB 0x02
+#define DEVICE_PAIRED_RF_REPORT_TYPE 0x03
+
+/* Device Un-Paired Notification */
+#define REPORT_TYPE_NOTIF_DEVICE_UNPAIRED 0x40
+
+
+/* Connection Status Notification */
+#define REPORT_TYPE_NOTIF_CONNECTION_STATUS 0x42
+#define CONNECTION_STATUS_PARAM_STATUS 0x00
+#define STATUS_LINKLOSS 0x01
+
+/* Error Notification */
+#define REPORT_TYPE_NOTIF_ERROR 0x7F
+#define NOTIF_ERROR_PARAM_ETYPE 0x00
+#define ETYPE_KEEPALIVE_TIMEOUT 0x01
+
+/* supported DJ HID && RF report types */
+#define REPORT_TYPE_KEYBOARD 0x01
+#define REPORT_TYPE_MOUSE 0x02
+#define REPORT_TYPE_CONSUMER_CONTROL 0x03
+#define REPORT_TYPE_SYSTEM_CONTROL 0x04
+#define REPORT_TYPE_MEDIA_CENTER 0x08
+#define REPORT_TYPE_LEDS 0x0E
+
+/* RF Report types bitfield */
+#define STD_KEYBOARD 0x00000002
+#define STD_MOUSE 0x00000004
+#define MULTIMEDIA 0x00000008
+#define POWER_KEYS 0x00000010
+#define MEDIA_CENTER 0x00000100
+#define KBD_LEDS 0x00004000
+
+struct dj_report {
+ u8 report_id;
+ u8 device_index;
+ u8 report_type;
+ u8 report_params[DJREPORT_SHORT_LENGTH - 3];
+};
+
+struct dj_receiver_dev {
+ struct hid_device *hdev;
+ struct dj_device *paired_dj_devices[DJ_MAX_PAIRED_DEVICES +
+ DJ_DEVICE_INDEX_MIN];
+ struct work_struct work;
+ struct kfifo notif_fifo;
+ spinlock_t lock;
+};
+
+struct dj_device {
+ struct hid_device *hdev;
+ struct dj_receiver_dev *dj_receiver_dev;
+ u32 reports_supported;
+ u8 device_index;
+};
+
+/**
+ * is_dj_device - know if the given dj_device is not the receiver.
+ * @dj_dev: the dj device to test
+ *
+ * This macro tests if a struct dj_device pointer is a device created
+ * by the bus enumarator.
+ */
+#define is_dj_device(dj_dev) \
+ (&(dj_dev)->dj_receiver_dev->hdev->dev == (dj_dev)->hdev->dev.parent)
+
+#endif
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index 0ec91c18a421..f0fbd7bd239e 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -81,6 +81,28 @@ MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state fie
#define NO_TOUCHES -1
#define SINGLE_TOUCH_UP -2
+/* Touch surface information. Dimension is in hundredths of a mm, min and max
+ * are in units. */
+#define MOUSE_DIMENSION_X (float)9056
+#define MOUSE_MIN_X -1100
+#define MOUSE_MAX_X 1258
+#define MOUSE_RES_X ((MOUSE_MAX_X - MOUSE_MIN_X) / (MOUSE_DIMENSION_X / 100))
+#define MOUSE_DIMENSION_Y (float)5152
+#define MOUSE_MIN_Y -1589
+#define MOUSE_MAX_Y 2047
+#define MOUSE_RES_Y ((MOUSE_MAX_Y - MOUSE_MIN_Y) / (MOUSE_DIMENSION_Y / 100))
+
+#define TRACKPAD_DIMENSION_X (float)13000
+#define TRACKPAD_MIN_X -2909
+#define TRACKPAD_MAX_X 3167
+#define TRACKPAD_RES_X \
+ ((TRACKPAD_MAX_X - TRACKPAD_MIN_X) / (TRACKPAD_DIMENSION_X / 100))
+#define TRACKPAD_DIMENSION_Y (float)11000
+#define TRACKPAD_MIN_Y -2456
+#define TRACKPAD_MAX_Y 2565
+#define TRACKPAD_RES_Y \
+ ((TRACKPAD_MAX_Y - TRACKPAD_MIN_Y) / (TRACKPAD_DIMENSION_Y / 100))
+
/**
* struct magicmouse_sc - Tracks Magic Mouse-specific data.
* @input: Input device through which we report events.
@@ -406,17 +428,31 @@ static void magicmouse_setup_input(struct input_dev *input, struct hid_device *h
* inverse of the reported Y.
*/
if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
- input_set_abs_params(input, ABS_MT_POSITION_X, -1100,
- 1358, 4, 0);
- input_set_abs_params(input, ABS_MT_POSITION_Y, -1589,
- 2047, 4, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_X,
+ MOUSE_MIN_X, MOUSE_MAX_X, 4, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y,
+ MOUSE_MIN_Y, MOUSE_MAX_Y, 4, 0);
+
+ input_abs_set_res(input, ABS_MT_POSITION_X,
+ MOUSE_RES_X);
+ input_abs_set_res(input, ABS_MT_POSITION_Y,
+ MOUSE_RES_Y);
} else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
- input_set_abs_params(input, ABS_X, -2909, 3167, 4, 0);
- input_set_abs_params(input, ABS_Y, -2456, 2565, 4, 0);
- input_set_abs_params(input, ABS_MT_POSITION_X, -2909,
- 3167, 4, 0);
- input_set_abs_params(input, ABS_MT_POSITION_Y, -2456,
- 2565, 4, 0);
+ input_set_abs_params(input, ABS_X, TRACKPAD_MIN_X,
+ TRACKPAD_MAX_X, 4, 0);
+ input_set_abs_params(input, ABS_Y, TRACKPAD_MIN_Y,
+ TRACKPAD_MAX_Y, 4, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_X,
+ TRACKPAD_MIN_X, TRACKPAD_MAX_X, 4, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y,
+ TRACKPAD_MIN_Y, TRACKPAD_MAX_Y, 4, 0);
+
+ input_abs_set_res(input, ABS_X, TRACKPAD_RES_X);
+ input_abs_set_res(input, ABS_Y, TRACKPAD_RES_Y);
+ input_abs_set_res(input, ABS_MT_POSITION_X,
+ TRACKPAD_RES_X);
+ input_abs_set_res(input, ABS_MT_POSITION_Y,
+ TRACKPAD_RES_Y);
}
input_set_events_per_packet(input, 60);
@@ -501,9 +537,17 @@ static int magicmouse_probe(struct hid_device *hdev,
}
report->size = 6;
+ /*
+ * Some devices repond with 'invalid report id' when feature
+ * report switching it into multitouch mode is sent to it.
+ *
+ * This results in -EIO from the _raw low-level transport callback,
+ * but there seems to be no other way of switching the mode.
+ * Thus the super-ugly hacky success check below.
+ */
ret = hdev->hid_output_raw_report(hdev, feature, sizeof(feature),
HID_FEATURE_REPORT);
- if (ret != sizeof(feature)) {
+ if (ret != -EIO && ret != sizeof(feature)) {
hid_err(hdev, "unable to request touch data (%d)\n", ret);
goto err_stop_hw;
}
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 58d0e7aaf088..fa5d7a1ffa9e 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -47,10 +47,11 @@ MODULE_LICENSE("GPL");
#define MT_QUIRK_SLOT_IS_CONTACTID (1 << 1)
#define MT_QUIRK_CYPRESS (1 << 2)
#define MT_QUIRK_SLOT_IS_CONTACTNUMBER (1 << 3)
-#define MT_QUIRK_VALID_IS_INRANGE (1 << 4)
-#define MT_QUIRK_VALID_IS_CONFIDENCE (1 << 5)
-#define MT_QUIRK_EGALAX_XYZ_FIXUP (1 << 6)
-#define MT_QUIRK_SLOT_IS_CONTACTID_MINUS_ONE (1 << 7)
+#define MT_QUIRK_ALWAYS_VALID (1 << 4)
+#define MT_QUIRK_VALID_IS_INRANGE (1 << 5)
+#define MT_QUIRK_VALID_IS_CONFIDENCE (1 << 6)
+#define MT_QUIRK_EGALAX_XYZ_FIXUP (1 << 7)
+#define MT_QUIRK_SLOT_IS_CONTACTID_MINUS_ONE (1 << 8)
struct mt_slot {
__s32 x, y, p, w, h;
@@ -86,11 +87,12 @@ struct mt_class {
/* classes of device behavior */
#define MT_CLS_DEFAULT 0x0001
-#define MT_CLS_CONFIDENCE 0x0002
-#define MT_CLS_CONFIDENCE_MINUS_ONE 0x0003
-#define MT_CLS_DUAL_INRANGE_CONTACTID 0x0004
-#define MT_CLS_DUAL_INRANGE_CONTACTNUMBER 0x0005
-#define MT_CLS_DUAL_NSMU_CONTACTID 0x0006
+#define MT_CLS_SERIAL 0x0002
+#define MT_CLS_CONFIDENCE 0x0003
+#define MT_CLS_CONFIDENCE_MINUS_ONE 0x0004
+#define MT_CLS_DUAL_INRANGE_CONTACTID 0x0005
+#define MT_CLS_DUAL_INRANGE_CONTACTNUMBER 0x0006
+#define MT_CLS_DUAL_NSMU_CONTACTID 0x0007
/* vendor specific classes */
#define MT_CLS_3M 0x0101
@@ -134,6 +136,8 @@ static int find_slot_from_contactid(struct mt_device *td)
struct mt_class mt_classes[] = {
{ .name = MT_CLS_DEFAULT,
.quirks = MT_QUIRK_NOT_SEEN_MEANS_UP },
+ { .name = MT_CLS_SERIAL,
+ .quirks = MT_QUIRK_ALWAYS_VALID},
{ .name = MT_CLS_CONFIDENCE,
.quirks = MT_QUIRK_VALID_IS_CONFIDENCE },
{ .name = MT_CLS_CONFIDENCE_MINUS_ONE,
@@ -213,6 +217,16 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
struct mt_class *cls = td->mtclass;
__s32 quirks = cls->quirks;
+ /* Only map fields from TouchScreen or TouchPad collections.
+ * We need to ignore fields that belong to other collections
+ * such as Mouse that might have the same GenericDesktop usages. */
+ if (field->application == HID_DG_TOUCHSCREEN)
+ set_bit(INPUT_PROP_DIRECT, hi->input->propbit);
+ else if (field->application == HID_DG_TOUCHPAD)
+ set_bit(INPUT_PROP_POINTER, hi->input->propbit);
+ else
+ return 0;
+
switch (usage->hid & HID_USAGE_PAGE) {
case HID_UP_GENDESK:
@@ -277,6 +291,7 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
td->last_slot_field = usage->hid;
td->last_field_index = field->index;
td->last_mt_collection = usage->collection_index;
+ hdev->quirks &= ~HID_QUIRK_MULTITOUCH;
return 1;
case HID_DG_WIDTH:
hid_map_usage(hi, usage, bit, max,
@@ -435,7 +450,9 @@ static int mt_event(struct hid_device *hid, struct hid_field *field,
if (hid->claimed & HID_CLAIMED_INPUT && td->slots) {
switch (usage->hid) {
case HID_DG_INRANGE:
- if (quirks & MT_QUIRK_VALID_IS_INRANGE)
+ if (quirks & MT_QUIRK_ALWAYS_VALID)
+ td->curvalid = true;
+ else if (quirks & MT_QUIRK_VALID_IS_INRANGE)
td->curvalid = value;
break;
case HID_DG_TIPSWITCH:
@@ -513,12 +530,44 @@ static void mt_set_input_mode(struct hid_device *hdev)
}
}
+/* a list of devices for which there is a specialized multitouch driver */
+static const struct hid_device_id mt_have_special_driver[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, 0x0001) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, 0x0006) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA,
+ USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA,
+ USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH) },
+ { }
+};
+
+static bool mt_match_one_id(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ return id->bus == hdev->bus &&
+ (id->vendor == HID_ANY_ID || id->vendor == hdev->vendor) &&
+ (id->product == HID_ANY_ID || id->product == hdev->product);
+}
+
+static const struct hid_device_id *mt_match_id(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ for (; id->bus; id++)
+ if (mt_match_one_id(hdev, id))
+ return id;
+
+ return NULL;
+}
+
static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
int ret, i;
struct mt_device *td;
struct mt_class *mtclass = mt_classes; /* MT_CLS_DEFAULT */
+ if (mt_match_id(hdev, mt_have_special_driver))
+ return -ENODEV;
+
for (i = 0; mt_classes[i].name ; i++) {
if (id->driver_data == mt_classes[i].name) {
mtclass = &(mt_classes[i]);
@@ -526,10 +575,6 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
}
}
- /* This allows the driver to correctly support devices
- * that emit events over several HID messages.
- */
- hdev->quirks |= HID_QUIRK_NO_INPUT_SYNC;
td = kzalloc(sizeof(struct mt_device), GFP_KERNEL);
if (!td) {
@@ -545,10 +590,16 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (ret != 0)
goto fail;
+ hdev->quirks |= HID_QUIRK_MULTITOUCH;
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
if (ret)
goto fail;
+ /* This allows the driver to correctly support devices
+ * that emit events over several HID messages.
+ */
+ hdev->quirks |= HID_QUIRK_NO_INPUT_SYNC;
+
td->slots = kzalloc(td->maxcontacts * sizeof(struct mt_slot),
GFP_KERNEL);
if (!td->slots) {
@@ -662,6 +713,11 @@ static const struct hid_device_id mt_devices[] = {
HID_USB_DEVICE(USB_VENDOR_ID_GOODTOUCH,
USB_DEVICE_ID_GOODTOUCH_000f) },
+ /* Ideacom panel */
+ { .driver_data = MT_CLS_SERIAL,
+ HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM,
+ USB_DEVICE_ID_IDEACOM_IDC6650) },
+
/* Ilitek dual touch panel */
{ .driver_data = MT_CLS_DEFAULT,
HID_USB_DEVICE(USB_VENDOR_ID_ILITEK,
@@ -672,6 +728,11 @@ static const struct hid_device_id mt_devices[] = {
HID_USB_DEVICE(USB_VENDOR_ID_IRTOUCHSYSTEMS,
USB_DEVICE_ID_IRTOUCH_INFRARED_USB) },
+ /* LG Display panels */
+ { .driver_data = MT_CLS_DEFAULT,
+ HID_USB_DEVICE(USB_VENDOR_ID_LG,
+ USB_DEVICE_ID_LG_MULTITOUCH) },
+
/* Lumio panels */
{ .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
HID_USB_DEVICE(USB_VENDOR_ID_LUMIO,
@@ -732,6 +793,10 @@ static const struct hid_device_id mt_devices[] = {
HID_USB_DEVICE(USB_VENDOR_ID_XAT,
USB_DEVICE_ID_XAT_CSR) },
+ /* Rest of the world */
+ { .driver_data = MT_CLS_DEFAULT,
+ HID_USB_DEVICE(HID_ANY_ID, HID_ANY_ID) },
+
{ }
};
MODULE_DEVICE_TABLE(hid, mt_devices);
diff --git a/drivers/hid/hid-wacom.c b/drivers/hid/hid-wacom.c
index 06888323828c..72ca689b6474 100644
--- a/drivers/hid/hid-wacom.c
+++ b/drivers/hid/hid-wacom.c
@@ -353,11 +353,7 @@ static int wacom_probe(struct hid_device *hdev,
if (ret) {
hid_warn(hdev, "can't create sysfs battery attribute, err: %d\n",
ret);
- /*
- * battery attribute is not critical for the tablet, but if it
- * failed then there is no need to create ac attribute
- */
- goto move_on;
+ goto err_battery;
}
wdata->ac.properties = wacom_ac_props;
@@ -371,18 +367,14 @@ static int wacom_probe(struct hid_device *hdev,
if (ret) {
hid_warn(hdev,
"can't create ac battery attribute, err: %d\n", ret);
- /*
- * ac attribute is not critical for the tablet, but if it
- * failed then we don't want to battery attribute to exist
- */
- power_supply_unregister(&wdata->battery);
+ goto err_ac;
}
-
-move_on:
#endif
hidinput = list_entry(hdev->inputs.next, struct hid_input, list);
input = hidinput->input;
+ __set_bit(INPUT_PROP_POINTER, input->propbit);
+
/* Basics */
input->evbit[0] |= BIT(EV_KEY) | BIT(EV_ABS) | BIT(EV_REL);
@@ -416,6 +408,13 @@ move_on:
return 0;
+#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
+err_ac:
+ power_supply_unregister(&wdata->battery);
+err_battery:
+ device_remove_file(&hdev->dev, &dev_attr_speed);
+ hid_hw_stop(hdev);
+#endif
err_free:
kfree(wdata);
return ret;
@@ -426,6 +425,7 @@ static void wacom_remove(struct hid_device *hdev)
#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
struct wacom_data *wdata = hid_get_drvdata(hdev);
#endif
+ device_remove_file(&hdev->dev, &dev_attr_speed);
hid_hw_stop(hdev);
#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
diff --git a/drivers/hid/hid-wiimote.c b/drivers/hid/hid-wiimote.c
index 85a02e5f9fe8..76739c07fa3c 100644
--- a/drivers/hid/hid-wiimote.c
+++ b/drivers/hid/hid-wiimote.c
@@ -10,15 +10,18 @@
* any later version.
*/
+#include <linux/completion.h>
#include <linux/device.h>
#include <linux/hid.h>
#include <linux/input.h>
#include <linux/leds.h>
#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/power_supply.h>
#include <linux/spinlock.h>
#include "hid-ids.h"
-#define WIIMOTE_VERSION "0.1"
+#define WIIMOTE_VERSION "0.2"
#define WIIMOTE_NAME "Nintendo Wii Remote"
#define WIIMOTE_BUFSIZE 32
@@ -30,12 +33,26 @@ struct wiimote_buf {
struct wiimote_state {
spinlock_t lock;
__u8 flags;
+ __u8 accel_split[2];
+
+ /* synchronous cmd requests */
+ struct mutex sync;
+ struct completion ready;
+ int cmd;
+ __u32 opt;
+
+ /* results of synchronous requests */
+ __u8 cmd_battery;
+ __u8 cmd_err;
};
struct wiimote_data {
struct hid_device *hdev;
struct input_dev *input;
struct led_classdev *leds[4];
+ struct input_dev *accel;
+ struct input_dev *ir;
+ struct power_supply battery;
spinlock_t qlock;
__u8 head;
@@ -46,23 +63,47 @@ struct wiimote_data {
struct wiimote_state state;
};
-#define WIIPROTO_FLAG_LED1 0x01
-#define WIIPROTO_FLAG_LED2 0x02
-#define WIIPROTO_FLAG_LED3 0x04
-#define WIIPROTO_FLAG_LED4 0x08
+#define WIIPROTO_FLAG_LED1 0x01
+#define WIIPROTO_FLAG_LED2 0x02
+#define WIIPROTO_FLAG_LED3 0x04
+#define WIIPROTO_FLAG_LED4 0x08
+#define WIIPROTO_FLAG_RUMBLE 0x10
+#define WIIPROTO_FLAG_ACCEL 0x20
+#define WIIPROTO_FLAG_IR_BASIC 0x40
+#define WIIPROTO_FLAG_IR_EXT 0x80
+#define WIIPROTO_FLAG_IR_FULL 0xc0 /* IR_BASIC | IR_EXT */
#define WIIPROTO_FLAGS_LEDS (WIIPROTO_FLAG_LED1 | WIIPROTO_FLAG_LED2 | \
WIIPROTO_FLAG_LED3 | WIIPROTO_FLAG_LED4)
+#define WIIPROTO_FLAGS_IR (WIIPROTO_FLAG_IR_BASIC | WIIPROTO_FLAG_IR_EXT | \
+ WIIPROTO_FLAG_IR_FULL)
/* return flag for led \num */
#define WIIPROTO_FLAG_LED(num) (WIIPROTO_FLAG_LED1 << (num - 1))
enum wiiproto_reqs {
WIIPROTO_REQ_NULL = 0x0,
+ WIIPROTO_REQ_RUMBLE = 0x10,
WIIPROTO_REQ_LED = 0x11,
WIIPROTO_REQ_DRM = 0x12,
+ WIIPROTO_REQ_IR1 = 0x13,
+ WIIPROTO_REQ_SREQ = 0x15,
+ WIIPROTO_REQ_WMEM = 0x16,
+ WIIPROTO_REQ_RMEM = 0x17,
+ WIIPROTO_REQ_IR2 = 0x1a,
WIIPROTO_REQ_STATUS = 0x20,
+ WIIPROTO_REQ_DATA = 0x21,
WIIPROTO_REQ_RETURN = 0x22,
WIIPROTO_REQ_DRM_K = 0x30,
+ WIIPROTO_REQ_DRM_KA = 0x31,
+ WIIPROTO_REQ_DRM_KE = 0x32,
+ WIIPROTO_REQ_DRM_KAI = 0x33,
+ WIIPROTO_REQ_DRM_KEE = 0x34,
+ WIIPROTO_REQ_DRM_KAE = 0x35,
+ WIIPROTO_REQ_DRM_KIE = 0x36,
+ WIIPROTO_REQ_DRM_KAIE = 0x37,
+ WIIPROTO_REQ_DRM_E = 0x3d,
+ WIIPROTO_REQ_DRM_SKAI1 = 0x3e,
+ WIIPROTO_REQ_DRM_SKAI2 = 0x3f,
};
enum wiiproto_keys {
@@ -94,6 +135,56 @@ static __u16 wiiproto_keymap[] = {
BTN_MODE, /* WIIPROTO_KEY_HOME */
};
+static enum power_supply_property wiimote_battery_props[] = {
+ POWER_SUPPLY_PROP_CAPACITY
+};
+
+/* requires the state.lock spinlock to be held */
+static inline bool wiimote_cmd_pending(struct wiimote_data *wdata, int cmd,
+ __u32 opt)
+{
+ return wdata->state.cmd == cmd && wdata->state.opt == opt;
+}
+
+/* requires the state.lock spinlock to be held */
+static inline void wiimote_cmd_complete(struct wiimote_data *wdata)
+{
+ wdata->state.cmd = WIIPROTO_REQ_NULL;
+ complete(&wdata->state.ready);
+}
+
+static inline int wiimote_cmd_acquire(struct wiimote_data *wdata)
+{
+ return mutex_lock_interruptible(&wdata->state.sync) ? -ERESTARTSYS : 0;
+}
+
+/* requires the state.lock spinlock to be held */
+static inline void wiimote_cmd_set(struct wiimote_data *wdata, int cmd,
+ __u32 opt)
+{
+ INIT_COMPLETION(wdata->state.ready);
+ wdata->state.cmd = cmd;
+ wdata->state.opt = opt;
+}
+
+static inline void wiimote_cmd_release(struct wiimote_data *wdata)
+{
+ mutex_unlock(&wdata->state.sync);
+}
+
+static inline int wiimote_cmd_wait(struct wiimote_data *wdata)
+{
+ int ret;
+
+ ret = wait_for_completion_interruptible_timeout(&wdata->state.ready, HZ);
+ if (ret < 0)
+ return -ERESTARTSYS;
+ else if (ret == 0)
+ return -EIO;
+ else
+ return 0;
+}
+
static ssize_t wiimote_hid_send(struct hid_device *hdev, __u8 *buffer,
size_t count)
{
@@ -172,6 +263,39 @@ static void wiimote_queue(struct wiimote_data *wdata, const __u8 *buffer,
spin_unlock_irqrestore(&wdata->qlock, flags);
}
+/*
+ * This sets the rumble bit on the given output report if rumble is
+ * currently enabled.
+ * \cmd1 must point to the second byte in the output report => &cmd[1]
+ * This must be called on nearly every output report before passing it
+ * into the output queue!
+ */
+static inline void wiiproto_keep_rumble(struct wiimote_data *wdata, __u8 *cmd1)
+{
+ if (wdata->state.flags & WIIPROTO_FLAG_RUMBLE)
+ *cmd1 |= 0x01;
+}
+
+static void wiiproto_req_rumble(struct wiimote_data *wdata, __u8 rumble)
+{
+ __u8 cmd[2];
+
+ rumble = !!rumble;
+ if (rumble == !!(wdata->state.flags & WIIPROTO_FLAG_RUMBLE))
+ return;
+
+ if (rumble)
+ wdata->state.flags |= WIIPROTO_FLAG_RUMBLE;
+ else
+ wdata->state.flags &= ~WIIPROTO_FLAG_RUMBLE;
+
+ cmd[0] = WIIPROTO_REQ_RUMBLE;
+ cmd[1] = 0;
+
+ wiiproto_keep_rumble(wdata, &cmd[1]);
+ wiimote_queue(wdata, cmd, sizeof(cmd));
+}
+
static void wiiproto_req_leds(struct wiimote_data *wdata, int leds)
{
__u8 cmd[2];
@@ -193,6 +317,7 @@ static void wiiproto_req_leds(struct wiimote_data *wdata, int leds)
if (leds & WIIPROTO_FLAG_LED4)
cmd[1] |= 0x80;
+ wiiproto_keep_rumble(wdata, &cmd[1]);
wiimote_queue(wdata, cmd, sizeof(cmd));
}
@@ -203,7 +328,23 @@ static void wiiproto_req_leds(struct wiimote_data *wdata, int leds)
*/
static __u8 select_drm(struct wiimote_data *wdata)
{
- return WIIPROTO_REQ_DRM_K;
+ __u8 ir = wdata->state.flags & WIIPROTO_FLAGS_IR;
+
+ if (ir == WIIPROTO_FLAG_IR_BASIC) {
+ if (wdata->state.flags & WIIPROTO_FLAG_ACCEL)
+ return WIIPROTO_REQ_DRM_KAIE;
+ else
+ return WIIPROTO_REQ_DRM_KIE;
+ } else if (ir == WIIPROTO_FLAG_IR_EXT) {
+ return WIIPROTO_REQ_DRM_KAI;
+ } else if (ir == WIIPROTO_FLAG_IR_FULL) {
+ return WIIPROTO_REQ_DRM_SKAI1;
+ } else {
+ if (wdata->state.flags & WIIPROTO_FLAG_ACCEL)
+ return WIIPROTO_REQ_DRM_KA;
+ else
+ return WIIPROTO_REQ_DRM_K;
+ }
}
static void wiiproto_req_drm(struct wiimote_data *wdata, __u8 drm)
@@ -217,9 +358,256 @@ static void wiiproto_req_drm(struct wiimote_data *wdata, __u8 drm)
cmd[1] = 0;
cmd[2] = drm;
+ wiiproto_keep_rumble(wdata, &cmd[1]);
+ wiimote_queue(wdata, cmd, sizeof(cmd));
+}
+
+static void wiiproto_req_status(struct wiimote_data *wdata)
+{
+ __u8 cmd[2];
+
+ cmd[0] = WIIPROTO_REQ_SREQ;
+ cmd[1] = 0;
+
+ wiiproto_keep_rumble(wdata, &cmd[1]);
+ wiimote_queue(wdata, cmd, sizeof(cmd));
+}
+
+static void wiiproto_req_accel(struct wiimote_data *wdata, __u8 accel)
+{
+ accel = !!accel;
+ if (accel == !!(wdata->state.flags & WIIPROTO_FLAG_ACCEL))
+ return;
+
+ if (accel)
+ wdata->state.flags |= WIIPROTO_FLAG_ACCEL;
+ else
+ wdata->state.flags &= ~WIIPROTO_FLAG_ACCEL;
+
+ wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL);
+}
+
+static void wiiproto_req_ir1(struct wiimote_data *wdata, __u8 flags)
+{
+ __u8 cmd[2];
+
+ cmd[0] = WIIPROTO_REQ_IR1;
+ cmd[1] = flags;
+
+ wiiproto_keep_rumble(wdata, &cmd[1]);
+ wiimote_queue(wdata, cmd, sizeof(cmd));
+}
+
+static void wiiproto_req_ir2(struct wiimote_data *wdata, __u8 flags)
+{
+ __u8 cmd[2];
+
+ cmd[0] = WIIPROTO_REQ_IR2;
+ cmd[1] = flags;
+
+ wiiproto_keep_rumble(wdata, &cmd[1]);
+ wiimote_queue(wdata, cmd, sizeof(cmd));
+}
+
+#define wiiproto_req_wreg(wdata, os, buf, sz) \
+ wiiproto_req_wmem((wdata), false, (os), (buf), (sz))
+
+#define wiiproto_req_weeprom(wdata, os, buf, sz) \
+ wiiproto_req_wmem((wdata), true, (os), (buf), (sz))
+
+static void wiiproto_req_wmem(struct wiimote_data *wdata, bool eeprom,
+ __u32 offset, const __u8 *buf, __u8 size)
+{
+ __u8 cmd[22];
+
+ if (size > 16 || size == 0) {
+ hid_warn(wdata->hdev, "Invalid length %d wmem request\n", size);
+ return;
+ }
+
+ memset(cmd, 0, sizeof(cmd));
+ cmd[0] = WIIPROTO_REQ_WMEM;
+ cmd[2] = (offset >> 16) & 0xff;
+ cmd[3] = (offset >> 8) & 0xff;
+ cmd[4] = offset & 0xff;
+ cmd[5] = size;
+ memcpy(&cmd[6], buf, size);
+
+ if (!eeprom)
+ cmd[1] |= 0x04;
+
+ wiiproto_keep_rumble(wdata, &cmd[1]);
wiimote_queue(wdata, cmd, sizeof(cmd));
}
+/* requries the cmd-mutex to be held */
+static int wiimote_cmd_write(struct wiimote_data *wdata, __u32 offset,
+ const __u8 *wmem, __u8 size)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wiimote_cmd_set(wdata, WIIPROTO_REQ_WMEM, 0);
+ wiiproto_req_wreg(wdata, offset, wmem, size);
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+ ret = wiimote_cmd_wait(wdata);
+ if (!ret && wdata->state.cmd_err)
+ ret = -EIO;
+
+ return ret;
+}
+
+static int wiimote_battery_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct wiimote_data *wdata = container_of(psy,
+ struct wiimote_data, battery);
+ int ret = 0, state;
+ unsigned long flags;
+
+ ret = wiimote_cmd_acquire(wdata);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wiimote_cmd_set(wdata, WIIPROTO_REQ_SREQ, 0);
+ wiiproto_req_status(wdata);
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+ ret = wiimote_cmd_wait(wdata);
+ state = wdata->state.cmd_battery;
+ wiimote_cmd_release(wdata);
+
+ if (ret)
+ return ret;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = state * 100 / 255;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int wiimote_init_ir(struct wiimote_data *wdata, __u16 mode)
+{
+ int ret;
+ unsigned long flags;
+ __u8 format = 0;
+ static const __u8 data_enable[] = { 0x01 };
+ static const __u8 data_sens1[] = { 0x02, 0x00, 0x00, 0x71, 0x01,
+ 0x00, 0xaa, 0x00, 0x64 };
+ static const __u8 data_sens2[] = { 0x63, 0x03 };
+ static const __u8 data_fin[] = { 0x08 };
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+
+ if (mode == (wdata->state.flags & WIIPROTO_FLAGS_IR)) {
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+ return 0;
+ }
+
+ if (mode == 0) {
+ wdata->state.flags &= ~WIIPROTO_FLAGS_IR;
+ wiiproto_req_ir1(wdata, 0);
+ wiiproto_req_ir2(wdata, 0);
+ wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL);
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+ return 0;
+ }
+
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+ ret = wiimote_cmd_acquire(wdata);
+ if (ret)
+ return ret;
+
+ /* send PIXEL CLOCK ENABLE cmd first */
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wiimote_cmd_set(wdata, WIIPROTO_REQ_IR1, 0);
+ wiiproto_req_ir1(wdata, 0x06);
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+ ret = wiimote_cmd_wait(wdata);
+ if (ret)
+ goto unlock;
+ if (wdata->state.cmd_err) {
+ ret = -EIO;
+ goto unlock;
+ }
+
+ /* enable IR LOGIC */
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wiimote_cmd_set(wdata, WIIPROTO_REQ_IR2, 0);
+ wiiproto_req_ir2(wdata, 0x06);
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+ ret = wiimote_cmd_wait(wdata);
+ if (ret)
+ goto unlock;
+ if (wdata->state.cmd_err) {
+ ret = -EIO;
+ goto unlock;
+ }
+
+ /* enable IR cam but do not make it send data, yet */
+ ret = wiimote_cmd_write(wdata, 0xb00030, data_enable,
+ sizeof(data_enable));
+ if (ret)
+ goto unlock;
+
+ /* write first sensitivity block */
+ ret = wiimote_cmd_write(wdata, 0xb00000, data_sens1,
+ sizeof(data_sens1));
+ if (ret)
+ goto unlock;
+
+ /* write second sensitivity block */
+ ret = wiimote_cmd_write(wdata, 0xb0001a, data_sens2,
+ sizeof(data_sens2));
+ if (ret)
+ goto unlock;
+
+ /* put IR cam into desired state */
+ switch (mode) {
+ case WIIPROTO_FLAG_IR_FULL:
+ format = 5;
+ break;
+ case WIIPROTO_FLAG_IR_EXT:
+ format = 3;
+ break;
+ case WIIPROTO_FLAG_IR_BASIC:
+ format = 1;
+ break;
+ }
+ ret = wiimote_cmd_write(wdata, 0xb00033, &format, sizeof(format));
+ if (ret)
+ goto unlock;
+
+ /* make IR cam send data */
+ ret = wiimote_cmd_write(wdata, 0xb00030, data_fin, sizeof(data_fin));
+ if (ret)
+ goto unlock;
+
+ /* request new DRM mode compatible to IR mode */
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wdata->state.flags &= ~WIIPROTO_FLAGS_IR;
+ wdata->state.flags |= mode & WIIPROTO_FLAGS_IR;
+ wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL);
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+unlock:
+ wiimote_cmd_release(wdata);
+ return ret;
+}
+
static enum led_brightness wiimote_leds_get(struct led_classdev *led_dev)
{
struct wiimote_data *wdata;
@@ -268,9 +656,28 @@ static void wiimote_leds_set(struct led_classdev *led_dev,
}
}
-static int wiimote_input_event(struct input_dev *dev, unsigned int type,
- unsigned int code, int value)
+static int wiimote_ff_play(struct input_dev *dev, void *data,
+ struct ff_effect *eff)
{
+ struct wiimote_data *wdata = input_get_drvdata(dev);
+ __u8 value;
+ unsigned long flags;
+
+ /*
+ * The wiimote supports only a single rumble motor so if any magnitude
+ * is set to non-zero then we start the rumble motor. If both are set to
+ * zero, we stop the rumble motor.
+ */
+
+ if (eff->u.rumble.strong_magnitude || eff->u.rumble.weak_magnitude)
+ value = 1;
+ else
+ value = 0;
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wiiproto_req_rumble(wdata, value);
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
return 0;
}
@@ -288,6 +695,61 @@ static void wiimote_input_close(struct input_dev *dev)
hid_hw_close(wdata->hdev);
}
+static int wiimote_accel_open(struct input_dev *dev)
+{
+ struct wiimote_data *wdata = input_get_drvdata(dev);
+ int ret;
+ unsigned long flags;
+
+ ret = hid_hw_open(wdata->hdev);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wiiproto_req_accel(wdata, true);
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+ return 0;
+}
+
+static void wiimote_accel_close(struct input_dev *dev)
+{
+ struct wiimote_data *wdata = input_get_drvdata(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wiiproto_req_accel(wdata, false);
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+ hid_hw_close(wdata->hdev);
+}
+
+static int wiimote_ir_open(struct input_dev *dev)
+{
+ struct wiimote_data *wdata = input_get_drvdata(dev);
+ int ret;
+
+ ret = hid_hw_open(wdata->hdev);
+ if (ret)
+ return ret;
+
+ ret = wiimote_init_ir(wdata, WIIPROTO_FLAG_IR_BASIC);
+ if (ret) {
+ hid_hw_close(wdata->hdev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void wiimote_ir_close(struct input_dev *dev)
+{
+ struct wiimote_data *wdata = input_get_drvdata(dev);
+
+ wiimote_init_ir(wdata, 0);
+ hid_hw_close(wdata->hdev);
+}
+
static void handler_keys(struct wiimote_data *wdata, const __u8 *payload)
{
input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_LEFT],
@@ -315,12 +777,100 @@ static void handler_keys(struct wiimote_data *wdata, const __u8 *payload)
input_sync(wdata->input);
}
+static void handler_accel(struct wiimote_data *wdata, const __u8 *payload)
+{
+ __u16 x, y, z;
+
+ if (!(wdata->state.flags & WIIPROTO_FLAG_ACCEL))
+ return;
+
+ /*
+ * payload is: BB BB XX YY ZZ
+ * Accelerometer data is encoded into 3 10bit values. XX, YY and ZZ
+ * contain the upper 8 bits of each value. The lower 2 bits are
+ * contained in the buttons data BB BB.
+ * Bits 6 and 7 of the first buttons byte BB is the lower 2 bits of the
+ * X accel value. Bit 5 of the second buttons byte is the 2nd bit of Y
+ * accel value and bit 6 is the second bit of the Z value.
+ * The first bit of Y and Z values is not available and always set to 0.
+ * 0x200 is returned on no movement.
+ */
+
+ x = payload[2] << 2;
+ y = payload[3] << 2;
+ z = payload[4] << 2;
+
+ x |= (payload[0] >> 5) & 0x3;
+ y |= (payload[1] >> 4) & 0x2;
+ z |= (payload[1] >> 5) & 0x2;
+
+ input_report_abs(wdata->accel, ABS_RX, x - 0x200);
+ input_report_abs(wdata->accel, ABS_RY, y - 0x200);
+ input_report_abs(wdata->accel, ABS_RZ, z - 0x200);
+ input_sync(wdata->accel);
+}
+
+#define ir_to_input0(wdata, ir, packed) __ir_to_input((wdata), (ir), (packed), \
+ ABS_HAT0X, ABS_HAT0Y)
+#define ir_to_input1(wdata, ir, packed) __ir_to_input((wdata), (ir), (packed), \
+ ABS_HAT1X, ABS_HAT1Y)
+#define ir_to_input2(wdata, ir, packed) __ir_to_input((wdata), (ir), (packed), \
+ ABS_HAT2X, ABS_HAT2Y)
+#define ir_to_input3(wdata, ir, packed) __ir_to_input((wdata), (ir), (packed), \
+ ABS_HAT3X, ABS_HAT3Y)
+
+static void __ir_to_input(struct wiimote_data *wdata, const __u8 *ir,
+ bool packed, __u8 xid, __u8 yid)
+{
+ __u16 x, y;
+
+ if (!(wdata->state.flags & WIIPROTO_FLAGS_IR))
+ return;
+
+ /*
+ * Basic IR data is encoded into 3 bytes. The first two bytes are the
+ * upper 8 bit of the X/Y data, the 3rd byte contains the lower 2 bits
+ * of both.
+ * If data is packed, then the 3rd byte is put first and slightly
+ * reordered. This allows to interleave packed and non-packed data to
+ * have two IR sets in 5 bytes instead of 6.
+ * The resulting 10bit X/Y values are passed to the ABS_HATXY input dev.
+ */
+
+ if (packed) {
+ x = ir[1] << 2;
+ y = ir[2] << 2;
+
+ x |= ir[0] & 0x3;
+ y |= (ir[0] >> 2) & 0x3;
+ } else {
+ x = ir[0] << 2;
+ y = ir[1] << 2;
+
+ x |= (ir[2] >> 4) & 0x3;
+ y |= (ir[2] >> 6) & 0x3;
+ }
+
+ input_report_abs(wdata->ir, xid, x);
+ input_report_abs(wdata->ir, yid, y);
+}
+
static void handler_status(struct wiimote_data *wdata, const __u8 *payload)
{
handler_keys(wdata, payload);
/* on status reports the drm is reset so we need to resend the drm */
wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL);
+
+ if (wiimote_cmd_pending(wdata, WIIPROTO_REQ_SREQ, 0)) {
+ wdata->state.cmd_battery = payload[5];
+ wiimote_cmd_complete(wdata);
+ }
+}
+
+static void handler_data(struct wiimote_data *wdata, const __u8 *payload)
+{
+ handler_keys(wdata, payload);
}
static void handler_return(struct wiimote_data *wdata, const __u8 *payload)
@@ -330,9 +880,105 @@ static void handler_return(struct wiimote_data *wdata, const __u8 *payload)
handler_keys(wdata, payload);
- if (err)
+ if (wiimote_cmd_pending(wdata, cmd, 0)) {
+ wdata->state.cmd_err = err;
+ wiimote_cmd_complete(wdata);
+ } else if (err) {
hid_warn(wdata->hdev, "Remote error %hhu on req %hhu\n", err,
cmd);
+ }
+}
+
+static void handler_drm_KA(struct wiimote_data *wdata, const __u8 *payload)
+{
+ handler_keys(wdata, payload);
+ handler_accel(wdata, payload);
+}
+
+static void handler_drm_KE(struct wiimote_data *wdata, const __u8 *payload)
+{
+ handler_keys(wdata, payload);
+}
+
+static void handler_drm_KAI(struct wiimote_data *wdata, const __u8 *payload)
+{
+ handler_keys(wdata, payload);
+ handler_accel(wdata, payload);
+ ir_to_input0(wdata, &payload[5], false);
+ ir_to_input1(wdata, &payload[8], false);
+ ir_to_input2(wdata, &payload[11], false);
+ ir_to_input3(wdata, &payload[14], false);
+ input_sync(wdata->ir);
+}
+
+static void handler_drm_KEE(struct wiimote_data *wdata, const __u8 *payload)
+{
+ handler_keys(wdata, payload);
+}
+
+static void handler_drm_KIE(struct wiimote_data *wdata, const __u8 *payload)
+{
+ handler_keys(wdata, payload);
+ ir_to_input0(wdata, &payload[2], false);
+ ir_to_input1(wdata, &payload[4], true);
+ ir_to_input2(wdata, &payload[7], false);
+ ir_to_input3(wdata, &payload[9], true);
+ input_sync(wdata->ir);
+}
+
+static void handler_drm_KAE(struct wiimote_data *wdata, const __u8 *payload)
+{
+ handler_keys(wdata, payload);
+ handler_accel(wdata, payload);
+}
+
+static void handler_drm_KAIE(struct wiimote_data *wdata, const __u8 *payload)
+{
+ handler_keys(wdata, payload);
+ handler_accel(wdata, payload);
+ ir_to_input0(wdata, &payload[5], false);
+ ir_to_input1(wdata, &payload[7], true);
+ ir_to_input2(wdata, &payload[10], false);
+ ir_to_input3(wdata, &payload[12], true);
+ input_sync(wdata->ir);
+}
+
+static void handler_drm_E(struct wiimote_data *wdata, const __u8 *payload)
+{
+}
+
+static void handler_drm_SKAI1(struct wiimote_data *wdata, const __u8 *payload)
+{
+ handler_keys(wdata, payload);
+
+ wdata->state.accel_split[0] = payload[2];
+ wdata->state.accel_split[1] = (payload[0] >> 1) & (0x10 | 0x20);
+ wdata->state.accel_split[1] |= (payload[1] << 1) & (0x40 | 0x80);
+
+ ir_to_input0(wdata, &payload[3], false);
+ ir_to_input1(wdata, &payload[12], false);
+ input_sync(wdata->ir);
+}
+
+static void handler_drm_SKAI2(struct wiimote_data *wdata, const __u8 *payload)
+{
+ __u8 buf[5];
+
+ handler_keys(wdata, payload);
+
+ wdata->state.accel_split[1] |= (payload[0] >> 5) & (0x01 | 0x02);
+ wdata->state.accel_split[1] |= (payload[1] >> 3) & (0x04 | 0x08);
+
+ buf[0] = 0;
+ buf[1] = 0;
+ buf[2] = wdata->state.accel_split[0];
+ buf[3] = payload[2];
+ buf[4] = wdata->state.accel_split[1];
+ handler_accel(wdata, buf);
+
+ ir_to_input2(wdata, &payload[3], false);
+ ir_to_input3(wdata, &payload[12], false);
+ input_sync(wdata->ir);
}
struct wiiproto_handler {
@@ -343,8 +989,19 @@ struct wiiproto_handler {
static struct wiiproto_handler handlers[] = {
{ .id = WIIPROTO_REQ_STATUS, .size = 6, .func = handler_status },
+ { .id = WIIPROTO_REQ_DATA, .size = 21, .func = handler_data },
{ .id = WIIPROTO_REQ_RETURN, .size = 4, .func = handler_return },
{ .id = WIIPROTO_REQ_DRM_K, .size = 2, .func = handler_keys },
+ { .id = WIIPROTO_REQ_DRM_KA, .size = 5, .func = handler_drm_KA },
+ { .id = WIIPROTO_REQ_DRM_KE, .size = 10, .func = handler_drm_KE },
+ { .id = WIIPROTO_REQ_DRM_KAI, .size = 17, .func = handler_drm_KAI },
+ { .id = WIIPROTO_REQ_DRM_KEE, .size = 21, .func = handler_drm_KEE },
+ { .id = WIIPROTO_REQ_DRM_KAE, .size = 21, .func = handler_drm_KAE },
+ { .id = WIIPROTO_REQ_DRM_KIE, .size = 21, .func = handler_drm_KIE },
+ { .id = WIIPROTO_REQ_DRM_KAIE, .size = 21, .func = handler_drm_KAIE },
+ { .id = WIIPROTO_REQ_DRM_E, .size = 21, .func = handler_drm_E },
+ { .id = WIIPROTO_REQ_DRM_SKAI1, .size = 21, .func = handler_drm_SKAI1 },
+ { .id = WIIPROTO_REQ_DRM_SKAI2, .size = 21, .func = handler_drm_SKAI2 },
{ .id = 0 }
};
@@ -355,6 +1012,7 @@ static int wiimote_hid_event(struct hid_device *hdev, struct hid_report *report,
struct wiiproto_handler *h;
int i;
unsigned long flags;
+ bool handled = false;
if (size < 1)
return -EINVAL;
@@ -363,10 +1021,16 @@ static int wiimote_hid_event(struct hid_device *hdev, struct hid_report *report,
for (i = 0; handlers[i].id; ++i) {
h = &handlers[i];
- if (h->id == raw_data[0] && h->size < size)
+ if (h->id == raw_data[0] && h->size < size) {
h->func(wdata, &raw_data[1]);
+ handled = true;
+ }
}
+ if (!handled)
+ hid_warn(hdev, "Unhandled report %hhu size %d\n", raw_data[0],
+ size);
+
spin_unlock_irqrestore(&wdata->state.lock, flags);
return 0;
@@ -434,16 +1098,13 @@ static struct wiimote_data *wiimote_create(struct hid_device *hdev)
return NULL;
wdata->input = input_allocate_device();
- if (!wdata->input) {
- kfree(wdata);
- return NULL;
- }
+ if (!wdata->input)
+ goto err;
wdata->hdev = hdev;
hid_set_drvdata(hdev, wdata);
input_set_drvdata(wdata->input, wdata);
- wdata->input->event = wiimote_input_event;
wdata->input->open = wiimote_input_open;
wdata->input->close = wiimote_input_close;
wdata->input->dev.parent = &wdata->hdev->dev;
@@ -457,18 +1118,89 @@ static struct wiimote_data *wiimote_create(struct hid_device *hdev)
for (i = 0; i < WIIPROTO_KEY_COUNT; ++i)
set_bit(wiiproto_keymap[i], wdata->input->keybit);
+ set_bit(FF_RUMBLE, wdata->input->ffbit);
+ if (input_ff_create_memless(wdata->input, NULL, wiimote_ff_play))
+ goto err_input;
+
+ wdata->accel = input_allocate_device();
+ if (!wdata->accel)
+ goto err_input;
+
+ input_set_drvdata(wdata->accel, wdata);
+ wdata->accel->open = wiimote_accel_open;
+ wdata->accel->close = wiimote_accel_close;
+ wdata->accel->dev.parent = &wdata->hdev->dev;
+ wdata->accel->id.bustype = wdata->hdev->bus;
+ wdata->accel->id.vendor = wdata->hdev->vendor;
+ wdata->accel->id.product = wdata->hdev->product;
+ wdata->accel->id.version = wdata->hdev->version;
+ wdata->accel->name = WIIMOTE_NAME " Accelerometer";
+
+ set_bit(EV_ABS, wdata->accel->evbit);
+ set_bit(ABS_RX, wdata->accel->absbit);
+ set_bit(ABS_RY, wdata->accel->absbit);
+ set_bit(ABS_RZ, wdata->accel->absbit);
+ input_set_abs_params(wdata->accel, ABS_RX, -500, 500, 2, 4);
+ input_set_abs_params(wdata->accel, ABS_RY, -500, 500, 2, 4);
+ input_set_abs_params(wdata->accel, ABS_RZ, -500, 500, 2, 4);
+
+ wdata->ir = input_allocate_device();
+ if (!wdata->ir)
+ goto err_ir;
+
+ input_set_drvdata(wdata->ir, wdata);
+ wdata->ir->open = wiimote_ir_open;
+ wdata->ir->close = wiimote_ir_close;
+ wdata->ir->dev.parent = &wdata->hdev->dev;
+ wdata->ir->id.bustype = wdata->hdev->bus;
+ wdata->ir->id.vendor = wdata->hdev->vendor;
+ wdata->ir->id.product = wdata->hdev->product;
+ wdata->ir->id.version = wdata->hdev->version;
+ wdata->ir->name = WIIMOTE_NAME " IR";
+
+ set_bit(EV_ABS, wdata->ir->evbit);
+ set_bit(ABS_HAT0X, wdata->ir->absbit);
+ set_bit(ABS_HAT0Y, wdata->ir->absbit);
+ set_bit(ABS_HAT1X, wdata->ir->absbit);
+ set_bit(ABS_HAT1Y, wdata->ir->absbit);
+ set_bit(ABS_HAT2X, wdata->ir->absbit);
+ set_bit(ABS_HAT2Y, wdata->ir->absbit);
+ set_bit(ABS_HAT3X, wdata->ir->absbit);
+ set_bit(ABS_HAT3Y, wdata->ir->absbit);
+ input_set_abs_params(wdata->ir, ABS_HAT0X, 0, 1023, 2, 4);
+ input_set_abs_params(wdata->ir, ABS_HAT0Y, 0, 767, 2, 4);
+ input_set_abs_params(wdata->ir, ABS_HAT1X, 0, 1023, 2, 4);
+ input_set_abs_params(wdata->ir, ABS_HAT1Y, 0, 767, 2, 4);
+ input_set_abs_params(wdata->ir, ABS_HAT2X, 0, 1023, 2, 4);
+ input_set_abs_params(wdata->ir, ABS_HAT2Y, 0, 767, 2, 4);
+ input_set_abs_params(wdata->ir, ABS_HAT3X, 0, 1023, 2, 4);
+ input_set_abs_params(wdata->ir, ABS_HAT3Y, 0, 767, 2, 4);
+
spin_lock_init(&wdata->qlock);
INIT_WORK(&wdata->worker, wiimote_worker);
spin_lock_init(&wdata->state.lock);
+ init_completion(&wdata->state.ready);
+ mutex_init(&wdata->state.sync);
return wdata;
+
+err_ir:
+ input_free_device(wdata->accel);
+err_input:
+ input_free_device(wdata->input);
+err:
+ kfree(wdata);
+ return NULL;
}
static void wiimote_destroy(struct wiimote_data *wdata)
{
wiimote_leds_destroy(wdata);
+ power_supply_unregister(&wdata->battery);
+ input_unregister_device(wdata->accel);
+ input_unregister_device(wdata->ir);
input_unregister_device(wdata->input);
cancel_work_sync(&wdata->worker);
hid_hw_stop(wdata->hdev);
@@ -500,12 +1232,37 @@ static int wiimote_hid_probe(struct hid_device *hdev,
goto err;
}
- ret = input_register_device(wdata->input);
+ ret = input_register_device(wdata->accel);
if (ret) {
hid_err(hdev, "Cannot register input device\n");
goto err_stop;
}
+ ret = input_register_device(wdata->ir);
+ if (ret) {
+ hid_err(hdev, "Cannot register input device\n");
+ goto err_ir;
+ }
+
+ ret = input_register_device(wdata->input);
+ if (ret) {
+ hid_err(hdev, "Cannot register input device\n");
+ goto err_input;
+ }
+
+ wdata->battery.properties = wiimote_battery_props;
+ wdata->battery.num_properties = ARRAY_SIZE(wiimote_battery_props);
+ wdata->battery.get_property = wiimote_battery_get_property;
+ wdata->battery.name = "wiimote_battery";
+ wdata->battery.type = POWER_SUPPLY_TYPE_BATTERY;
+ wdata->battery.use_for_apm = 0;
+
+ ret = power_supply_register(&wdata->hdev->dev, &wdata->battery);
+ if (ret) {
+ hid_err(hdev, "Cannot register battery device\n");
+ goto err_battery;
+ }
+
ret = wiimote_leds_create(wdata);
if (ret)
goto err_free;
@@ -523,9 +1280,20 @@ err_free:
wiimote_destroy(wdata);
return ret;
+err_battery:
+ input_unregister_device(wdata->input);
+ wdata->input = NULL;
+err_input:
+ input_unregister_device(wdata->ir);
+ wdata->ir = NULL;
+err_ir:
+ input_unregister_device(wdata->accel);
+ wdata->accel = NULL;
err_stop:
hid_hw_stop(hdev);
err:
+ input_free_device(wdata->ir);
+ input_free_device(wdata->accel);
input_free_device(wdata->input);
kfree(wdata);
return ret;
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 4bdb5d46c52c..3146fdcda272 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -47,6 +47,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_AFATECH, USB_DEVICE_ID_AFATECH_AF9016, HID_QUIRK_FULLSPEED_INTERVAL },
{ USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH, HID_QUIRK_MULTI_INPUT },
+ { USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
{ USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS, HID_QUIRK_MULTI_INPUT },
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 59d83e83da7f..932383786642 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -36,17 +36,25 @@
#include <linux/cpu.h>
#include <linux/pci.h>
#include <linux/smp.h>
+#include <linux/moduleparam.h>
#include <asm/msr.h>
#include <asm/processor.h>
#define DRVNAME "coretemp"
+/*
+ * force_tjmax only matters when TjMax can't be read from the CPU itself.
+ * When set, it replaces the driver's suboptimal heuristic.
+ */
+static int force_tjmax;
+module_param_named(tjmax, force_tjmax, int, 0444);
+MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
+
#define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */
#define NUM_REAL_CORES 16 /* Number of Real cores per cpu */
#define CORETEMP_NAME_LENGTH 17 /* String Length of attrs */
#define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */
-#define MAX_THRESH_ATTRS 3 /* Maximum no of Threshold attrs */
-#define TOTAL_ATTRS (MAX_CORE_ATTRS + MAX_THRESH_ATTRS)
+#define TOTAL_ATTRS (MAX_CORE_ATTRS + 1)
#define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
#ifdef CONFIG_SMP
@@ -69,8 +77,6 @@
* This value is passed as "id" field to rdmsr/wrmsr functions.
* @status_reg: One of IA32_THERM_STATUS or IA32_PACKAGE_THERM_STATUS,
* from where the temperature values should be read.
- * @intrpt_reg: One of IA32_THERM_INTERRUPT or IA32_PACKAGE_THERM_INTERRUPT,
- * from where the thresholds are read.
* @attr_size: Total number of pre-core attrs displayed in the sysfs.
* @is_pkg_data: If this is 1, the temp_data holds pkgtemp data.
* Otherwise, temp_data holds coretemp data.
@@ -79,13 +85,11 @@
struct temp_data {
int temp;
int ttarget;
- int tmin;
int tjmax;
unsigned long last_updated;
unsigned int cpu;
u32 cpu_core_id;
u32 status_reg;
- u32 intrpt_reg;
int attr_size;
bool is_pkg_data;
bool valid;
@@ -143,19 +147,6 @@ static ssize_t show_crit_alarm(struct device *dev,
return sprintf(buf, "%d\n", (eax >> 5) & 1);
}
-static ssize_t show_max_alarm(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- u32 eax, edx;
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct platform_data *pdata = dev_get_drvdata(dev);
- struct temp_data *tdata = pdata->core_data[attr->index];
-
- rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx);
-
- return sprintf(buf, "%d\n", !!(eax & THERM_STATUS_THRESHOLD1));
-}
-
static ssize_t show_tjmax(struct device *dev,
struct device_attribute *devattr, char *buf)
{
@@ -174,83 +165,6 @@ static ssize_t show_ttarget(struct device *dev,
return sprintf(buf, "%d\n", pdata->core_data[attr->index]->ttarget);
}
-static ssize_t store_ttarget(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
-{
- struct platform_data *pdata = dev_get_drvdata(dev);
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct temp_data *tdata = pdata->core_data[attr->index];
- u32 eax, edx;
- unsigned long val;
- int diff;
-
- if (strict_strtoul(buf, 10, &val))
- return -EINVAL;
-
- /*
- * THERM_MASK_THRESHOLD1 is 7 bits wide. Values are entered in terms
- * of milli degree celsius. Hence don't accept val > (127 * 1000)
- */
- if (val > tdata->tjmax || val > 127000)
- return -EINVAL;
-
- diff = (tdata->tjmax - val) / 1000;
-
- mutex_lock(&tdata->update_lock);
- rdmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, &eax, &edx);
- eax = (eax & ~THERM_MASK_THRESHOLD1) |
- (diff << THERM_SHIFT_THRESHOLD1);
- wrmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, eax, edx);
- tdata->ttarget = val;
- mutex_unlock(&tdata->update_lock);
-
- return count;
-}
-
-static ssize_t show_tmin(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct platform_data *pdata = dev_get_drvdata(dev);
-
- return sprintf(buf, "%d\n", pdata->core_data[attr->index]->tmin);
-}
-
-static ssize_t store_tmin(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
-{
- struct platform_data *pdata = dev_get_drvdata(dev);
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct temp_data *tdata = pdata->core_data[attr->index];
- u32 eax, edx;
- unsigned long val;
- int diff;
-
- if (strict_strtoul(buf, 10, &val))
- return -EINVAL;
-
- /*
- * THERM_MASK_THRESHOLD0 is 7 bits wide. Values are entered in terms
- * of milli degree celsius. Hence don't accept val > (127 * 1000)
- */
- if (val > tdata->tjmax || val > 127000)
- return -EINVAL;
-
- diff = (tdata->tjmax - val) / 1000;
-
- mutex_lock(&tdata->update_lock);
- rdmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, &eax, &edx);
- eax = (eax & ~THERM_MASK_THRESHOLD0) |
- (diff << THERM_SHIFT_THRESHOLD0);
- wrmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, eax, edx);
- tdata->tmin = val;
- mutex_unlock(&tdata->update_lock);
-
- return count;
-}
-
static ssize_t show_temp(struct device *dev,
struct device_attribute *devattr, char *buf)
{
@@ -374,7 +288,6 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
{
- /* The 100C is default for both mobile and non mobile CPUs */
int err;
u32 eax, edx;
u32 val;
@@ -385,7 +298,8 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
*/
err = rdmsr_safe_on_cpu(id, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
if (err) {
- dev_warn(dev, "Unable to read TjMax from CPU.\n");
+ if (c->x86_model > 0xe && c->x86_model != 0x1c)
+ dev_warn(dev, "Unable to read TjMax from CPU %u\n", id);
} else {
val = (eax >> 16) & 0xff;
/*
@@ -393,11 +307,17 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
* will be used
*/
if (val) {
- dev_info(dev, "TjMax is %d C.\n", val);
+ dev_dbg(dev, "TjMax is %d degrees C\n", val);
return val * 1000;
}
}
+ if (force_tjmax) {
+ dev_notice(dev, "TjMax forced to %d degrees C by user\n",
+ force_tjmax);
+ return force_tjmax * 1000;
+ }
+
/*
* An assumption is made for early CPUs and unreadable MSR.
* NOTE: the calculated value may not be correct.
@@ -414,21 +334,6 @@ static void __devinit get_ucode_rev_on_cpu(void *edx)
rdmsr(MSR_IA32_UCODE_REV, eax, *(u32 *)edx);
}
-static int get_pkg_tjmax(unsigned int cpu, struct device *dev)
-{
- int err;
- u32 eax, edx, val;
-
- err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
- if (!err) {
- val = (eax >> 16) & 0xff;
- if (val)
- return val * 1000;
- }
- dev_warn(dev, "Unable to read Pkg-TjMax from CPU:%u\n", cpu);
- return 100000; /* Default TjMax: 100 degree celsius */
-}
-
static int create_name_attr(struct platform_data *pdata, struct device *dev)
{
sysfs_attr_init(&pdata->name_attr.attr);
@@ -442,19 +347,14 @@ static int create_core_attrs(struct temp_data *tdata, struct device *dev,
int attr_no)
{
int err, i;
- static ssize_t (*rd_ptr[TOTAL_ATTRS]) (struct device *dev,
+ static ssize_t (*const rd_ptr[TOTAL_ATTRS]) (struct device *dev,
struct device_attribute *devattr, char *buf) = {
show_label, show_crit_alarm, show_temp, show_tjmax,
- show_max_alarm, show_ttarget, show_tmin };
- static ssize_t (*rw_ptr[TOTAL_ATTRS]) (struct device *dev,
- struct device_attribute *devattr, const char *buf,
- size_t count) = { NULL, NULL, NULL, NULL, NULL,
- store_ttarget, store_tmin };
- static const char *names[TOTAL_ATTRS] = {
+ show_ttarget };
+ static const char *const names[TOTAL_ATTRS] = {
"temp%d_label", "temp%d_crit_alarm",
"temp%d_input", "temp%d_crit",
- "temp%d_max_alarm", "temp%d_max",
- "temp%d_max_hyst" };
+ "temp%d_max" };
for (i = 0; i < tdata->attr_size; i++) {
snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH, names[i],
@@ -462,10 +362,6 @@ static int create_core_attrs(struct temp_data *tdata, struct device *dev,
sysfs_attr_init(&tdata->sd_attrs[i].dev_attr.attr);
tdata->sd_attrs[i].dev_attr.attr.name = tdata->attr_name[i];
tdata->sd_attrs[i].dev_attr.attr.mode = S_IRUGO;
- if (rw_ptr[i]) {
- tdata->sd_attrs[i].dev_attr.attr.mode |= S_IWUSR;
- tdata->sd_attrs[i].dev_attr.store = rw_ptr[i];
- }
tdata->sd_attrs[i].dev_attr.show = rd_ptr[i];
tdata->sd_attrs[i].index = attr_no;
err = device_create_file(dev, &tdata->sd_attrs[i].dev_attr);
@@ -481,9 +377,9 @@ exit_free:
}
-static int __devinit chk_ucode_version(struct platform_device *pdev)
+static int __cpuinit chk_ucode_version(unsigned int cpu)
{
- struct cpuinfo_x86 *c = &cpu_data(pdev->id);
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
int err;
u32 edx;
@@ -494,17 +390,15 @@ static int __devinit chk_ucode_version(struct platform_device *pdev)
*/
if (c->x86_model == 0xe && c->x86_mask < 0xc) {
/* check for microcode update */
- err = smp_call_function_single(pdev->id, get_ucode_rev_on_cpu,
+ err = smp_call_function_single(cpu, get_ucode_rev_on_cpu,
&edx, 1);
if (err) {
- dev_err(&pdev->dev,
- "Cannot determine microcode revision of "
- "CPU#%u (%d)!\n", pdev->id, err);
+ pr_err("Cannot determine microcode revision of "
+ "CPU#%u (%d)!\n", cpu, err);
return -ENODEV;
} else if (edx < 0x39) {
- dev_err(&pdev->dev,
- "Errata AE18 not fixed, update BIOS or "
- "microcode of the CPU!\n");
+ pr_err("Errata AE18 not fixed, update BIOS or "
+ "microcode of the CPU!\n");
return -ENODEV;
}
}
@@ -538,8 +432,6 @@ static struct temp_data *init_temp_data(unsigned int cpu, int pkg_flag)
tdata->status_reg = pkg_flag ? MSR_IA32_PACKAGE_THERM_STATUS :
MSR_IA32_THERM_STATUS;
- tdata->intrpt_reg = pkg_flag ? MSR_IA32_PACKAGE_THERM_INTERRUPT :
- MSR_IA32_THERM_INTERRUPT;
tdata->is_pkg_data = pkg_flag;
tdata->cpu = cpu;
tdata->cpu_core_id = TO_CORE_ID(cpu);
@@ -548,11 +440,11 @@ static struct temp_data *init_temp_data(unsigned int cpu, int pkg_flag)
return tdata;
}
-static int create_core_data(struct platform_data *pdata,
- struct platform_device *pdev,
+static int create_core_data(struct platform_device *pdev,
unsigned int cpu, int pkg_flag)
{
struct temp_data *tdata;
+ struct platform_data *pdata = platform_get_drvdata(pdev);
struct cpuinfo_x86 *c = &cpu_data(cpu);
u32 eax, edx;
int err, attr_no;
@@ -588,20 +480,21 @@ static int create_core_data(struct platform_data *pdata,
goto exit_free;
/* We can access status register. Get Critical Temperature */
- if (pkg_flag)
- tdata->tjmax = get_pkg_tjmax(pdev->id, &pdev->dev);
- else
- tdata->tjmax = get_tjmax(c, cpu, &pdev->dev);
+ tdata->tjmax = get_tjmax(c, cpu, &pdev->dev);
/*
- * Test if we can access the intrpt register. If so, increase the
- * 'size' enough to have ttarget/tmin/max_alarm interfaces.
- * Initialize ttarget with bits 16:22 of MSR_IA32_THERM_INTERRUPT
+ * Read the still undocumented bits 8:15 of IA32_TEMPERATURE_TARGET.
+ * The target temperature is available on older CPUs but not in this
+ * register. Atoms don't have the register at all.
*/
- err = rdmsr_safe_on_cpu(cpu, tdata->intrpt_reg, &eax, &edx);
- if (!err) {
- tdata->attr_size += MAX_THRESH_ATTRS;
- tdata->ttarget = tdata->tjmax - ((eax >> 16) & 0x7f) * 1000;
+ if (c->x86_model > 0xe && c->x86_model != 0x1c) {
+ err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET,
+ &eax, &edx);
+ if (!err) {
+ tdata->ttarget
+ = tdata->tjmax - ((eax >> 8) & 0xff) * 1000;
+ tdata->attr_size++;
+ }
}
pdata->core_data[attr_no] = tdata;
@@ -613,22 +506,20 @@ static int create_core_data(struct platform_data *pdata,
return 0;
exit_free:
+ pdata->core_data[attr_no] = NULL;
kfree(tdata);
return err;
}
static void coretemp_add_core(unsigned int cpu, int pkg_flag)
{
- struct platform_data *pdata;
struct platform_device *pdev = coretemp_get_pdev(cpu);
int err;
if (!pdev)
return;
- pdata = platform_get_drvdata(pdev);
-
- err = create_core_data(pdata, pdev, cpu, pkg_flag);
+ err = create_core_data(pdev, cpu, pkg_flag);
if (err)
dev_err(&pdev->dev, "Adding Core %u failed\n", cpu);
}
@@ -652,11 +543,6 @@ static int __devinit coretemp_probe(struct platform_device *pdev)
struct platform_data *pdata;
int err;
- /* Check the microcode version of the CPU */
- err = chk_ucode_version(pdev);
- if (err)
- return err;
-
/* Initialize the per-package data structures */
pdata = kzalloc(sizeof(struct platform_data), GFP_KERNEL);
if (!pdata)
@@ -666,7 +552,7 @@ static int __devinit coretemp_probe(struct platform_device *pdev)
if (err)
goto exit_free;
- pdata->phys_proc_id = TO_PHYS_ID(pdev->id);
+ pdata->phys_proc_id = pdev->id;
platform_set_drvdata(pdev, pdata);
pdata->hwmon_dev = hwmon_device_register(&pdev->dev);
@@ -718,7 +604,7 @@ static int __cpuinit coretemp_device_add(unsigned int cpu)
mutex_lock(&pdev_list_mutex);
- pdev = platform_device_alloc(DRVNAME, cpu);
+ pdev = platform_device_alloc(DRVNAME, TO_PHYS_ID(cpu));
if (!pdev) {
err = -ENOMEM;
pr_err("Device allocation failed\n");
@@ -738,7 +624,7 @@ static int __cpuinit coretemp_device_add(unsigned int cpu)
}
pdev_entry->pdev = pdev;
- pdev_entry->phys_proc_id = TO_PHYS_ID(cpu);
+ pdev_entry->phys_proc_id = pdev->id;
list_add_tail(&pdev_entry->list, &pdev_list);
mutex_unlock(&pdev_list_mutex);
@@ -799,6 +685,10 @@ static void __cpuinit get_core_online(unsigned int cpu)
return;
if (!pdev) {
+ /* Check the microcode version of the CPU */
+ if (chk_ucode_version(cpu))
+ return;
+
/*
* Alright, we have DTS support.
* We are bringing the _first_ core in this pkg
diff --git a/drivers/hwmon/ds620.c b/drivers/hwmon/ds620.c
index 257957c69d92..4f7c3fc40a89 100644
--- a/drivers/hwmon/ds620.c
+++ b/drivers/hwmon/ds620.c
@@ -72,7 +72,7 @@ struct ds620_data {
char valid; /* !=0 if following fields are valid */
unsigned long last_updated; /* In jiffies */
- u16 temp[3]; /* Register values, word */
+ s16 temp[3]; /* Register values, word */
};
/*
diff --git a/drivers/hwmon/max16065.c b/drivers/hwmon/max16065.c
index d94a24fdf4ba..dd2d7b9620c2 100644
--- a/drivers/hwmon/max16065.c
+++ b/drivers/hwmon/max16065.c
@@ -124,7 +124,7 @@ static inline int MV_TO_LIMIT(int mv, int range)
static inline int ADC_TO_CURR(int adc, int gain)
{
- return adc * 1400000 / gain * 255;
+ return adc * 1400000 / (gain * 255);
}
/*
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index a561c3a0e916..397fc59b5682 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -978,6 +978,8 @@ static void pmbus_find_max_attr(struct i2c_client *client,
struct pmbus_limit_attr {
u16 reg; /* Limit register */
bool update; /* True if register needs updates */
+ bool low; /* True if low limit; for limits with compare
+ functions only */
const char *attr; /* Attribute name */
const char *alarm; /* Alarm attribute name */
u32 sbit; /* Alarm attribute status bit */
@@ -1029,7 +1031,8 @@ static bool pmbus_add_limit_attrs(struct i2c_client *client,
if (attr->compare) {
pmbus_add_boolean_cmp(data, name,
l->alarm, index,
- cbase, cindex,
+ l->low ? cindex : cbase,
+ l->low ? cbase : cindex,
attr->sbase + page, l->sbit);
} else {
pmbus_add_boolean_reg(data, name,
@@ -1366,11 +1369,13 @@ static const struct pmbus_sensor_attr power_attributes[] = {
static const struct pmbus_limit_attr temp_limit_attrs[] = {
{
.reg = PMBUS_UT_WARN_LIMIT,
+ .low = true,
.attr = "min",
.alarm = "min_alarm",
.sbit = PB_TEMP_UT_WARNING,
}, {
.reg = PMBUS_UT_FAULT_LIMIT,
+ .low = true,
.attr = "lcrit",
.alarm = "lcrit_alarm",
.sbit = PB_TEMP_UT_FAULT,
@@ -1399,11 +1404,13 @@ static const struct pmbus_limit_attr temp_limit_attrs[] = {
static const struct pmbus_limit_attr temp_limit_attrs23[] = {
{
.reg = PMBUS_UT_WARN_LIMIT,
+ .low = true,
.attr = "min",
.alarm = "min_alarm",
.sbit = PB_TEMP_UT_WARNING,
}, {
.reg = PMBUS_UT_FAULT_LIMIT,
+ .low = true,
.attr = "lcrit",
.alarm = "lcrit_alarm",
.sbit = PB_TEMP_UT_FAULT,
diff --git a/drivers/hwmon/pmbus/ucd9000.c b/drivers/hwmon/pmbus/ucd9000.c
index ace1c7319734..d0ddb60155c9 100644
--- a/drivers/hwmon/pmbus/ucd9000.c
+++ b/drivers/hwmon/pmbus/ucd9000.c
@@ -141,13 +141,11 @@ static int ucd9000_probe(struct i2c_client *client,
block_buffer[ret] = '\0';
dev_info(&client->dev, "Device ID %s\n", block_buffer);
- mid = NULL;
- for (i = 0; i < ARRAY_SIZE(ucd9000_id); i++) {
- mid = &ucd9000_id[i];
+ for (mid = ucd9000_id; mid->name[0]; mid++) {
if (!strncasecmp(mid->name, block_buffer, strlen(mid->name)))
break;
}
- if (!mid || !strlen(mid->name)) {
+ if (!mid->name[0]) {
dev_err(&client->dev, "Unsupported device\n");
return -ENODEV;
}
diff --git a/drivers/hwmon/pmbus/ucd9200.c b/drivers/hwmon/pmbus/ucd9200.c
index ffcc1cf3609d..c65e9da707cc 100644
--- a/drivers/hwmon/pmbus/ucd9200.c
+++ b/drivers/hwmon/pmbus/ucd9200.c
@@ -68,13 +68,11 @@ static int ucd9200_probe(struct i2c_client *client,
block_buffer[ret] = '\0';
dev_info(&client->dev, "Device ID %s\n", block_buffer);
- mid = NULL;
- for (i = 0; i < ARRAY_SIZE(ucd9200_id); i++) {
- mid = &ucd9200_id[i];
+ for (mid = ucd9200_id; mid->name[0]; mid++) {
if (!strncasecmp(mid->name, block_buffer, strlen(mid->name)))
break;
}
- if (!mid || !strlen(mid->name)) {
+ if (!mid->name[0]) {
dev_err(&client->dev, "Unsupported device\n");
return -ENODEV;
}
diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
index 17cf1ab95521..8c2844e5691c 100644
--- a/drivers/hwmon/w83791d.c
+++ b/drivers/hwmon/w83791d.c
@@ -329,8 +329,8 @@ static int w83791d_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int w83791d_remove(struct i2c_client *client);
-static int w83791d_read(struct i2c_client *client, u8 register);
-static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
+static int w83791d_read(struct i2c_client *client, u8 reg);
+static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
static struct w83791d_data *w83791d_update_device(struct device *dev);
#ifdef DEBUG
diff --git a/drivers/i2c/busses/i2c-pxa-pci.c b/drivers/i2c/busses/i2c-pxa-pci.c
index 6659d269b841..b73da6cd6f91 100644
--- a/drivers/i2c/busses/i2c-pxa-pci.c
+++ b/drivers/i2c/busses/i2c-pxa-pci.c
@@ -109,12 +109,15 @@ static int __devinit ce4100_i2c_probe(struct pci_dev *dev,
return -EINVAL;
}
sds = kzalloc(sizeof(*sds), GFP_KERNEL);
- if (!sds)
+ if (!sds) {
+ ret = -ENOMEM;
goto err_mem;
+ }
for (i = 0; i < ARRAY_SIZE(sds->pdev); i++) {
sds->pdev[i] = add_i2c_device(dev, i);
if (IS_ERR(sds->pdev[i])) {
+ ret = PTR_ERR(sds->pdev[i]);
while (--i >= 0)
platform_device_unregister(sds->pdev[i]);
goto err_dev_add;
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 2440b7411978..3c94c4a81a55 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -270,14 +270,30 @@ static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev)
/* Rounds down to not include partial word at the end of buf */
words_to_transfer = buf_remaining / BYTES_PER_FIFO_WORD;
- if (words_to_transfer > tx_fifo_avail)
- words_to_transfer = tx_fifo_avail;
- i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
-
- buf += words_to_transfer * BYTES_PER_FIFO_WORD;
- buf_remaining -= words_to_transfer * BYTES_PER_FIFO_WORD;
- tx_fifo_avail -= words_to_transfer;
+ /* It's very common to have < 4 bytes, so optimize that case. */
+ if (words_to_transfer) {
+ if (words_to_transfer > tx_fifo_avail)
+ words_to_transfer = tx_fifo_avail;
+
+ /*
+ * Update state before writing to FIFO. If this casues us
+ * to finish writing all bytes (AKA buf_remaining goes to 0) we
+ * have a potential for an interrupt (PACKET_XFER_COMPLETE is
+ * not maskable). We need to make sure that the isr sees
+ * buf_remaining as 0 and doesn't call us back re-entrantly.
+ */
+ buf_remaining -= words_to_transfer * BYTES_PER_FIFO_WORD;
+ tx_fifo_avail -= words_to_transfer;
+ i2c_dev->msg_buf_remaining = buf_remaining;
+ i2c_dev->msg_buf = buf +
+ words_to_transfer * BYTES_PER_FIFO_WORD;
+ barrier();
+
+ i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
+
+ buf += words_to_transfer * BYTES_PER_FIFO_WORD;
+ }
/*
* If there is a partial word at the end of buf, handle it manually to
@@ -287,14 +303,15 @@ static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev)
if (tx_fifo_avail > 0 && buf_remaining > 0) {
BUG_ON(buf_remaining > 3);
memcpy(&val, buf, buf_remaining);
+
+ /* Again update before writing to FIFO to make sure isr sees. */
+ i2c_dev->msg_buf_remaining = 0;
+ i2c_dev->msg_buf = NULL;
+ barrier();
+
i2c_writel(i2c_dev, val, I2C_TX_FIFO);
- buf_remaining = 0;
- tx_fifo_avail--;
}
- BUG_ON(tx_fifo_avail > 0 && buf_remaining > 0);
- i2c_dev->msg_buf_remaining = buf_remaining;
- i2c_dev->msg_buf = buf;
return 0;
}
@@ -411,9 +428,10 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
tegra_i2c_mask_irq(i2c_dev, I2C_INT_TX_FIFO_DATA_REQ);
}
- if ((status & I2C_INT_PACKET_XFER_COMPLETE) &&
- !i2c_dev->msg_buf_remaining)
+ if (status & I2C_INT_PACKET_XFER_COMPLETE) {
+ BUG_ON(i2c_dev->msg_buf_remaining);
complete(&i2c_dev->msg_complete);
+ }
i2c_writel(i2c_dev, status, I2C_INT_STATUS);
if (i2c_dev->is_dvc)
@@ -531,7 +549,7 @@ static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
static u32 tegra_i2c_func(struct i2c_adapter *adap)
{
- return I2C_FUNC_I2C;
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
static const struct i2c_algorithm tegra_i2c_algo = {
@@ -719,6 +737,17 @@ static int tegra_i2c_resume(struct platform_device *pdev)
}
#endif
+#if defined(CONFIG_OF)
+/* Match table for of_platform binding */
+static const struct of_device_id tegra_i2c_of_match[] __devinitconst = {
+ { .compatible = "nvidia,tegra20-i2c", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, tegra_i2c_of_match);
+#else
+#define tegra_i2c_of_match NULL
+#endif
+
static struct platform_driver tegra_i2c_driver = {
.probe = tegra_i2c_probe,
.remove = tegra_i2c_remove,
@@ -729,6 +758,7 @@ static struct platform_driver tegra_i2c_driver = {
.driver = {
.name = "tegra-i2c",
.owner = THIS_MODULE,
+ .of_match_table = tegra_i2c_of_match,
},
};
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 274798068a54..16f69be820c7 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -435,7 +435,12 @@ static int idedisk_prep_fn(struct request_queue *q, struct request *rq)
if (!(rq->cmd_flags & REQ_FLUSH))
return BLKPREP_OK;
- cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
+ if (rq->special) {
+ cmd = rq->special;
+ memset(cmd, 0, sizeof(*cmd));
+ } else {
+ cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
+ }
/* FIXME: map struct ide_taskfile on rq->cmd[] */
BUG_ON(cmd == NULL);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 17bf9d95463c..6cd642aaa4de 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -287,7 +287,7 @@ void __free_ep(struct kref *kref)
if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid);
dst_release(ep->dst);
- l2t_release(L2DATA(ep->com.tdev), ep->l2t);
+ l2t_release(ep->com.tdev, ep->l2t);
}
kfree(ep);
}
@@ -1178,7 +1178,7 @@ static int act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
release_tid(ep->com.tdev, GET_TID(rpl), NULL);
cxgb3_free_atid(ep->com.tdev, ep->atid);
dst_release(ep->dst);
- l2t_release(L2DATA(ep->com.tdev), ep->l2t);
+ l2t_release(ep->com.tdev, ep->l2t);
put_ep(&ep->com);
return CPL_RET_BUF_DONE;
}
@@ -1377,7 +1377,7 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
if (!child_ep) {
printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
__func__);
- l2t_release(L2DATA(tdev), l2t);
+ l2t_release(tdev, l2t);
dst_release(dst);
goto reject;
}
@@ -1956,7 +1956,7 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
if (!err)
goto out;
- l2t_release(L2DATA(h->rdev.t3cdev_p), ep->l2t);
+ l2t_release(h->rdev.t3cdev_p, ep->l2t);
fail4:
dst_release(ep->dst);
fail3:
@@ -2127,7 +2127,7 @@ int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new,
l2t);
dst_hold(new);
- l2t_release(L2DATA(ep->com.tdev), ep->l2t);
+ l2t_release(ep->com.tdev, ep->l2t);
ep->l2t = l2t;
dst_release(old);
ep->dst = new;
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
index 7b404e5443ed..e34eeb8ae371 100644
--- a/drivers/input/keyboard/adp5588-keys.c
+++ b/drivers/input/keyboard/adp5588-keys.c
@@ -668,4 +668,3 @@ module_exit(adp5588_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("ADP5588/87 Keypad driver");
-MODULE_ALIAS("platform:adp5588-keys");
diff --git a/drivers/input/misc/cm109.c b/drivers/input/misc/cm109.c
index b09c7d127219..ab860511f016 100644
--- a/drivers/input/misc/cm109.c
+++ b/drivers/input/misc/cm109.c
@@ -475,7 +475,7 @@ static void cm109_toggle_buzzer_sync(struct cm109_dev *dev, int on)
le16_to_cpu(dev->ctl_req->wIndex),
dev->ctl_data,
USB_PKT_LEN, USB_CTRL_SET_TIMEOUT);
- if (error && error != EINTR)
+ if (error < 0 && error != -EINTR)
err("%s: usb_control_msg() failed %d", __func__, error);
}
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index da280189ef07..5ec617e28f7e 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -67,6 +67,10 @@
#define USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI 0x0245
#define USB_DEVICE_ID_APPLE_WELLSPRING5_ISO 0x0246
#define USB_DEVICE_ID_APPLE_WELLSPRING5_JIS 0x0247
+/* MacbookAir4,1 (unibody, July 2011) */
+#define USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI 0x0249
+#define USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO 0x024a
+#define USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS 0x024b
/* MacbookAir4,2 (unibody, July 2011) */
#define USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI 0x024c
#define USB_DEVICE_ID_APPLE_WELLSPRING6_ISO 0x024d
@@ -112,6 +116,10 @@ static const struct usb_device_id bcm5974_table[] = {
BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI),
BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ISO),
BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_JIS),
+ /* MacbookAir4,1 */
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI),
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO),
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS),
/* MacbookAir4,2 */
BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI),
BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6_ISO),
@@ -334,6 +342,18 @@ static const struct bcm5974_config bcm5974_config_table[] = {
{ DIM_X, DIM_X / SN_COORD, -4750, 5280 },
{ DIM_Y, DIM_Y / SN_COORD, -150, 6730 }
},
+ {
+ USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI,
+ USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO,
+ USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS,
+ HAS_INTEGRATED_BUTTON,
+ 0x84, sizeof(struct bt_data),
+ 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+ { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 },
+ { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 },
+ { DIM_X, DIM_X / SN_COORD, -4620, 5140 },
+ { DIM_Y, DIM_Y / SN_COORD, -150, 6600 }
+ },
{}
};
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index d27c9d91630b..958b4eb6369d 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -229,13 +229,6 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
get_unaligned_le16(&report[i + 3]);
i += 4;
}
- } else if (usage == WCM_DIGITIZER) {
- /* max pressure isn't reported
- features->pressure_max = (unsigned short)
- (report[i+4] << 8 | report[i + 3]);
- */
- features->pressure_max = 255;
- i += 4;
}
break;
@@ -291,13 +284,6 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
pen = 1;
i++;
break;
-
- case HID_USAGE_UNDEFINED:
- if (usage == WCM_DESKTOP && finger) /* capacity */
- features->pressure_max =
- get_unaligned_le16(&report[i + 3]);
- i += 4;
- break;
}
break;
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index c1c2f7b28d89..9dea71849f40 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -800,25 +800,26 @@ static int wacom_bpt_touch(struct wacom_wac *wacom)
int i;
for (i = 0; i < 2; i++) {
- int p = data[9 * i + 2];
- bool touch = p && !wacom->shared->stylus_in_proximity;
+ int offset = (data[1] & 0x80) ? (8 * i) : (9 * i);
+ bool touch = data[offset + 3] & 0x80;
- input_mt_slot(input, i);
- input_mt_report_slot_state(input, MT_TOOL_FINGER, touch);
/*
* Touch events need to be disabled while stylus is
* in proximity because user's hand is resting on touchpad
* and sending unwanted events. User expects tablet buttons
* to continue working though.
*/
+ touch = touch && !wacom->shared->stylus_in_proximity;
+
+ input_mt_slot(input, i);
+ input_mt_report_slot_state(input, MT_TOOL_FINGER, touch);
if (touch) {
- int x = get_unaligned_be16(&data[9 * i + 3]) & 0x7ff;
- int y = get_unaligned_be16(&data[9 * i + 5]) & 0x7ff;
+ int x = get_unaligned_be16(&data[offset + 3]) & 0x7ff;
+ int y = get_unaligned_be16(&data[offset + 5]) & 0x7ff;
if (features->quirks & WACOM_QUIRK_BBTOUCH_LOWRES) {
x <<= 5;
y <<= 5;
}
- input_report_abs(input, ABS_MT_PRESSURE, p);
input_report_abs(input, ABS_MT_POSITION_X, x);
input_report_abs(input, ABS_MT_POSITION_Y, y);
}
@@ -1056,10 +1057,11 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
features->x_fuzz, 0);
input_set_abs_params(input_dev, ABS_Y, 0, features->y_max,
features->y_fuzz, 0);
- input_set_abs_params(input_dev, ABS_PRESSURE, 0, features->pressure_max,
- features->pressure_fuzz, 0);
if (features->device_type == BTN_TOOL_PEN) {
+ input_set_abs_params(input_dev, ABS_PRESSURE, 0, features->pressure_max,
+ features->pressure_fuzz, 0);
+
/* penabled devices have fixed resolution for each model */
input_abs_set_res(input_dev, ABS_X, features->x_resolution);
input_abs_set_res(input_dev, ABS_Y, features->y_resolution);
@@ -1098,6 +1100,8 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
__set_bit(BTN_TOOL_MOUSE, input_dev->keybit);
__set_bit(BTN_STYLUS, input_dev->keybit);
__set_bit(BTN_STYLUS2, input_dev->keybit);
+
+ __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
break;
case WACOM_21UX2:
@@ -1120,12 +1124,12 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
for (i = 0; i < 8; i++)
__set_bit(BTN_0 + i, input_dev->keybit);
- if (wacom_wac->features.type != WACOM_21UX2) {
- input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0);
- input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0);
- }
-
+ input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0);
+ input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0);
input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
+
+ __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
+
wacom_setup_cintiq(wacom_wac);
break;
@@ -1150,6 +1154,8 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
/* fall through */
case INTUOS:
+ __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
+
wacom_setup_intuos(wacom_wac);
break;
@@ -1165,6 +1171,8 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
wacom_setup_intuos(wacom_wac);
+
+ __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
break;
case TABLETPC2FG:
@@ -1183,26 +1191,40 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
case TABLETPC:
__clear_bit(ABS_MISC, input_dev->absbit);
+ __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
+
if (features->device_type != BTN_TOOL_PEN)
break; /* no need to process stylus stuff */
/* fall through */
case PL:
- case PTU:
case DTU:
__set_bit(BTN_TOOL_PEN, input_dev->keybit);
+ __set_bit(BTN_TOOL_RUBBER, input_dev->keybit);
__set_bit(BTN_STYLUS, input_dev->keybit);
__set_bit(BTN_STYLUS2, input_dev->keybit);
+
+ __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
+ break;
+
+ case PTU:
+ __set_bit(BTN_STYLUS2, input_dev->keybit);
/* fall through */
case PENPARTNER:
+ __set_bit(BTN_TOOL_PEN, input_dev->keybit);
__set_bit(BTN_TOOL_RUBBER, input_dev->keybit);
+ __set_bit(BTN_STYLUS, input_dev->keybit);
+
+ __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
break;
case BAMBOO_PT:
__clear_bit(ABS_MISC, input_dev->absbit);
+ __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
+
if (features->device_type == BTN_TOOL_DOUBLETAP) {
__set_bit(BTN_LEFT, input_dev->keybit);
__set_bit(BTN_FORWARD, input_dev->keybit);
diff --git a/drivers/input/touchscreen/wacom_w8001.c b/drivers/input/touchscreen/wacom_w8001.c
index c14412ef4648..9941d39df43d 100644
--- a/drivers/input/touchscreen/wacom_w8001.c
+++ b/drivers/input/touchscreen/wacom_w8001.c
@@ -383,6 +383,8 @@ static int w8001_setup(struct w8001 *w8001)
dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
strlcat(w8001->name, "Wacom Serial", sizeof(w8001->name));
+ __set_bit(INPUT_PROP_DIRECT, dev->propbit);
+
/* penabled? */
error = w8001_command(w8001, W8001_CMD_QUERY, true);
if (!error) {
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index a14f8dc23462..0e4227f457af 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -605,7 +605,9 @@ static void build_inv_all(struct iommu_cmd *cmd)
* Writes the command to the IOMMUs command buffer and informs the
* hardware about the new command.
*/
-static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
+static int iommu_queue_command_sync(struct amd_iommu *iommu,
+ struct iommu_cmd *cmd,
+ bool sync)
{
u32 left, tail, head, next_tail;
unsigned long flags;
@@ -639,13 +641,18 @@ again:
copy_cmd_to_buffer(iommu, cmd, tail);
/* We need to sync now to make sure all commands are processed */
- iommu->need_sync = true;
+ iommu->need_sync = sync;
spin_unlock_irqrestore(&iommu->lock, flags);
return 0;
}
+static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
+{
+ return iommu_queue_command_sync(iommu, cmd, true);
+}
+
/*
* This function queues a completion wait command into the command
* buffer of an IOMMU
@@ -661,7 +668,7 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
build_completion_wait(&cmd, (u64)&sem);
- ret = iommu_queue_command(iommu, &cmd);
+ ret = iommu_queue_command_sync(iommu, &cmd, false);
if (ret)
return ret;
@@ -840,14 +847,9 @@ static void domain_flush_complete(struct protection_domain *domain)
static void domain_flush_devices(struct protection_domain *domain)
{
struct iommu_dev_data *dev_data;
- unsigned long flags;
-
- spin_lock_irqsave(&domain->lock, flags);
list_for_each_entry(dev_data, &domain->dev_list, list)
device_flush_dte(dev_data);
-
- spin_unlock_irqrestore(&domain->lock, flags);
}
/****************************************************************************
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 3dc9befa5aec..6dcc7e2d54de 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -1388,7 +1388,7 @@ int dmar_set_interrupt(struct intel_iommu *iommu)
return ret;
}
- ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
+ ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
if (ret)
printk(KERN_ERR "IOMMU: can't request irq\n");
return ret;
diff --git a/drivers/leds/ledtrig-timer.c b/drivers/leds/ledtrig-timer.c
index d87c9d02f786..328c64c0841c 100644
--- a/drivers/leds/ledtrig-timer.c
+++ b/drivers/leds/ledtrig-timer.c
@@ -41,6 +41,7 @@ static ssize_t led_delay_on_store(struct device *dev,
if (count == size) {
led_blink_set(led_cdev, &state, &led_cdev->blink_delay_off);
+ led_cdev->blink_delay_on = state;
ret = count;
}
@@ -69,6 +70,7 @@ static ssize_t led_delay_off_store(struct device *dev,
if (count == size) {
led_blink_set(led_cdev, &led_cdev->blink_delay_on, &state);
+ led_cdev->blink_delay_off = state;
ret = count;
}
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 49da55c1528a..8c2a000cf3f5 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1698,6 +1698,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
ti->num_flush_requests = 1;
+ ti->discard_zeroes_data_unsupported = 1;
+
return 0;
bad:
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index 89f73ca22cfa..f84c08029b21 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -81,8 +81,10 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
* corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags>
*/
if (!strcasecmp(arg_name, "corrupt_bio_byte")) {
- if (!argc)
+ if (!argc) {
ti->error = "Feature corrupt_bio_byte requires parameters";
+ return -EINVAL;
+ }
r = dm_read_arg(_args + 1, as, &fc->corrupt_bio_byte, &ti->error);
if (r)
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index a002dd85db1e..86df8b2cf927 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -449,7 +449,7 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
rs->ti->error = "write_mostly option is only valid for RAID1";
return -EINVAL;
}
- if (value > rs->md.raid_disks) {
+ if (value >= rs->md.raid_disks) {
rs->ti->error = "Invalid write_mostly drive index given";
return -EINVAL;
}
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 986b8754bb08..bc04518e9d8b 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1238,14 +1238,15 @@ static void dm_table_set_integrity(struct dm_table *t)
return;
template_disk = dm_table_get_integrity_disk(t, true);
- if (!template_disk &&
- blk_integrity_is_initialized(dm_disk(t->md))) {
+ if (template_disk)
+ blk_integrity_register(dm_disk(t->md),
+ blk_get_integrity(template_disk));
+ else if (blk_integrity_is_initialized(dm_disk(t->md)))
DMWARN("%s: device no longer has a valid integrity profile",
dm_device_name(t->md));
- return;
- }
- blk_integrity_register(dm_disk(t->md),
- blk_get_integrity(template_disk));
+ else
+ DMWARN("%s: unable to establish an integrity profile",
+ dm_device_name(t->md));
}
static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
@@ -1282,6 +1283,22 @@ static bool dm_table_supports_flush(struct dm_table *t, unsigned flush)
return 0;
}
+static bool dm_table_discard_zeroes_data(struct dm_table *t)
+{
+ struct dm_target *ti;
+ unsigned i = 0;
+
+ /* Ensure that all targets supports discard_zeroes_data. */
+ while (i < dm_table_get_num_targets(t)) {
+ ti = dm_table_get_target(t, i++);
+
+ if (ti->discard_zeroes_data_unsupported)
+ return 0;
+ }
+
+ return 1;
+}
+
void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
struct queue_limits *limits)
{
@@ -1304,6 +1321,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
}
blk_queue_flush(q, flush);
+ if (!dm_table_discard_zeroes_data(t))
+ q->limits.discard_zeroes_data = 0;
+
dm_table_set_integrity(t);
/*
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 3742ce8b0acf..5c95ccb59500 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -61,6 +61,11 @@
static void autostart_arrays(int part);
#endif
+/* pers_list is a list of registered personalities protected
+ * by pers_lock.
+ * pers_lock does extra service to protect accesses to
+ * mddev->thread when the mutex cannot be held.
+ */
static LIST_HEAD(pers_list);
static DEFINE_SPINLOCK(pers_lock);
@@ -739,7 +744,12 @@ static void mddev_unlock(mddev_t * mddev)
} else
mutex_unlock(&mddev->reconfig_mutex);
+ /* was we've dropped the mutex we need a spinlock to
+ * make sur the thread doesn't disappear
+ */
+ spin_lock(&pers_lock);
md_wakeup_thread(mddev->thread);
+ spin_unlock(&pers_lock);
}
static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
@@ -1138,8 +1148,11 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
ret = 0;
}
rdev->sectors = rdev->sb_start;
+ /* Limit to 4TB as metadata cannot record more than that */
+ if (rdev->sectors >= (2ULL << 32))
+ rdev->sectors = (2ULL << 32) - 2;
- if (rdev->sectors < sb->size * 2 && sb->level > 1)
+ if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
/* "this cannot possibly happen" ... */
ret = -EINVAL;
@@ -1173,7 +1186,7 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
mddev->clevel[0] = 0;
mddev->layout = sb->layout;
mddev->raid_disks = sb->raid_disks;
- mddev->dev_sectors = sb->size * 2;
+ mddev->dev_sectors = ((sector_t)sb->size) * 2;
mddev->events = ev1;
mddev->bitmap_info.offset = 0;
mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
@@ -1415,6 +1428,11 @@ super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
rdev->sb_start = calc_dev_sboffset(rdev);
if (!num_sectors || num_sectors > rdev->sb_start)
num_sectors = rdev->sb_start;
+ /* Limit to 4TB as metadata cannot record more than that.
+ * 4TB == 2^32 KB, or 2*2^32 sectors.
+ */
+ if (num_sectors >= (2ULL << 32))
+ num_sectors = (2ULL << 32) - 2;
md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
rdev->sb_page);
md_super_wait(rdev->mddev);
@@ -6421,11 +6439,18 @@ mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
return thread;
}
-void md_unregister_thread(mdk_thread_t *thread)
+void md_unregister_thread(mdk_thread_t **threadp)
{
+ mdk_thread_t *thread = *threadp;
if (!thread)
return;
dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
+ /* Locking ensures that mddev_unlock does not wake_up a
+ * non-existent thread
+ */
+ spin_lock(&pers_lock);
+ *threadp = NULL;
+ spin_unlock(&pers_lock);
kthread_stop(thread->tsk);
kfree(thread);
@@ -7332,8 +7357,7 @@ static void reap_sync_thread(mddev_t *mddev)
mdk_rdev_t *rdev;
/* resync has finished, collect result */
- md_unregister_thread(mddev->sync_thread);
- mddev->sync_thread = NULL;
+ md_unregister_thread(&mddev->sync_thread);
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
/* success...*/
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 1e586bb4452e..0a309dc29b45 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -560,7 +560,7 @@ extern int register_md_personality(struct mdk_personality *p);
extern int unregister_md_personality(struct mdk_personality *p);
extern mdk_thread_t * md_register_thread(void (*run) (mddev_t *mddev),
mddev_t *mddev, const char *name);
-extern void md_unregister_thread(mdk_thread_t *thread);
+extern void md_unregister_thread(mdk_thread_t **threadp);
extern void md_wakeup_thread(mdk_thread_t *thread);
extern void md_check_recovery(mddev_t *mddev);
extern void md_write_start(mddev_t *mddev, struct bio *bi);
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 3535c23af288..d5b5fb300171 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -514,8 +514,7 @@ static int multipath_stop (mddev_t *mddev)
{
multipath_conf_t *conf = mddev->private;
- md_unregister_thread(mddev->thread);
- mddev->thread = NULL;
+ md_unregister_thread(&mddev->thread);
blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
mempool_destroy(conf->pool);
kfree(conf->multipaths);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 32323f0afd89..d9587dffe533 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1099,12 +1099,11 @@ read_again:
bio_list_add(&conf->pending_bio_list, mbio);
spin_unlock_irqrestore(&conf->device_lock, flags);
}
- r1_bio_write_done(r1_bio);
-
- /* In case raid1d snuck in to freeze_array */
- wake_up(&conf->wait_barrier);
-
+ /* Mustn't call r1_bio_write_done before this next test,
+ * as it could result in the bio being freed.
+ */
if (sectors_handled < (bio->bi_size >> 9)) {
+ r1_bio_write_done(r1_bio);
/* We need another r1_bio. It has already been counted
* in bio->bi_phys_segments
*/
@@ -1117,6 +1116,11 @@ read_again:
goto retry_write;
}
+ r1_bio_write_done(r1_bio);
+
+ /* In case raid1d snuck in to freeze_array */
+ wake_up(&conf->wait_barrier);
+
if (do_sync || !bitmap || !plugged)
md_wakeup_thread(mddev->thread);
@@ -2558,8 +2562,7 @@ static int stop(mddev_t *mddev)
raise_barrier(conf);
lower_barrier(conf);
- md_unregister_thread(mddev->thread);
- mddev->thread = NULL;
+ md_unregister_thread(&mddev->thread);
if (conf->r1bio_pool)
mempool_destroy(conf->r1bio_pool);
kfree(conf->mirrors);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 8b29cd4f01c8..0cd9672cf9cb 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -337,6 +337,21 @@ static void close_write(r10bio_t *r10_bio)
md_write_end(r10_bio->mddev);
}
+static void one_write_done(r10bio_t *r10_bio)
+{
+ if (atomic_dec_and_test(&r10_bio->remaining)) {
+ if (test_bit(R10BIO_WriteError, &r10_bio->state))
+ reschedule_retry(r10_bio);
+ else {
+ close_write(r10_bio);
+ if (test_bit(R10BIO_MadeGood, &r10_bio->state))
+ reschedule_retry(r10_bio);
+ else
+ raid_end_bio_io(r10_bio);
+ }
+ }
+}
+
static void raid10_end_write_request(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
@@ -387,17 +402,7 @@ static void raid10_end_write_request(struct bio *bio, int error)
* Let's see if all mirrored write operations have finished
* already.
*/
- if (atomic_dec_and_test(&r10_bio->remaining)) {
- if (test_bit(R10BIO_WriteError, &r10_bio->state))
- reschedule_retry(r10_bio);
- else {
- close_write(r10_bio);
- if (test_bit(R10BIO_MadeGood, &r10_bio->state))
- reschedule_retry(r10_bio);
- else
- raid_end_bio_io(r10_bio);
- }
- }
+ one_write_done(r10_bio);
if (dec_rdev)
rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
}
@@ -1127,20 +1132,12 @@ retry_write:
spin_unlock_irqrestore(&conf->device_lock, flags);
}
- if (atomic_dec_and_test(&r10_bio->remaining)) {
- /* This matches the end of raid10_end_write_request() */
- bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
- r10_bio->sectors,
- !test_bit(R10BIO_Degraded, &r10_bio->state),
- 0);
- md_write_end(mddev);
- raid_end_bio_io(r10_bio);
- }
-
- /* In case raid10d snuck in to freeze_array */
- wake_up(&conf->wait_barrier);
+ /* Don't remove the bias on 'remaining' (one_write_done) until
+ * after checking if we need to go around again.
+ */
if (sectors_handled < (bio->bi_size >> 9)) {
+ one_write_done(r10_bio);
/* We need another r10_bio. It has already been counted
* in bio->bi_phys_segments.
*/
@@ -1154,6 +1151,10 @@ retry_write:
r10_bio->state = 0;
goto retry_write;
}
+ one_write_done(r10_bio);
+
+ /* In case raid10d snuck in to freeze_array */
+ wake_up(&conf->wait_barrier);
if (do_sync || !mddev->bitmap || !plugged)
md_wakeup_thread(mddev->thread);
@@ -2954,7 +2955,7 @@ static int run(mddev_t *mddev)
return 0;
out_free_conf:
- md_unregister_thread(mddev->thread);
+ md_unregister_thread(&mddev->thread);
if (conf->r10bio_pool)
mempool_destroy(conf->r10bio_pool);
safe_put_page(conf->tmppage);
@@ -2972,8 +2973,7 @@ static int stop(mddev_t *mddev)
raise_barrier(conf, 0);
lower_barrier(conf);
- md_unregister_thread(mddev->thread);
- mddev->thread = NULL;
+ md_unregister_thread(&mddev->thread);
blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
if (conf->r10bio_pool)
mempool_destroy(conf->r10bio_pool);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 43709fa6b6df..ac5e8b57e50f 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -4941,8 +4941,7 @@ static int run(mddev_t *mddev)
return 0;
abort:
- md_unregister_thread(mddev->thread);
- mddev->thread = NULL;
+ md_unregister_thread(&mddev->thread);
if (conf) {
print_raid5_conf(conf);
free_conf(conf);
@@ -4956,8 +4955,7 @@ static int stop(mddev_t *mddev)
{
raid5_conf_t *conf = mddev->private;
- md_unregister_thread(mddev->thread);
- mddev->thread = NULL;
+ md_unregister_thread(&mddev->thread);
if (mddev->queue)
mddev->queue->backing_dev_info.congested_fn = NULL;
free_conf(conf);
diff --git a/drivers/media/dvb/dvb-usb/vp7045.c b/drivers/media/dvb/dvb-usb/vp7045.c
index 3db89e3cb0bb..536c16c943bd 100644
--- a/drivers/media/dvb/dvb-usb/vp7045.c
+++ b/drivers/media/dvb/dvb-usb/vp7045.c
@@ -224,26 +224,8 @@ static struct dvb_usb_device_properties vp7045_properties;
static int vp7045_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
- struct dvb_usb_device *d;
- int ret = dvb_usb_device_init(intf, &vp7045_properties,
- THIS_MODULE, &d, adapter_nr);
- if (ret)
- return ret;
-
- d->priv = kmalloc(20, GFP_KERNEL);
- if (!d->priv) {
- dvb_usb_device_exit(intf);
- return -ENOMEM;
- }
-
- return ret;
-}
-
-static void vp7045_usb_disconnect(struct usb_interface *intf)
-{
- struct dvb_usb_device *d = usb_get_intfdata(intf);
- kfree(d->priv);
- dvb_usb_device_exit(intf);
+ return dvb_usb_device_init(intf, &vp7045_properties,
+ THIS_MODULE, NULL, adapter_nr);
}
static struct usb_device_id vp7045_usb_table [] = {
@@ -258,7 +240,7 @@ MODULE_DEVICE_TABLE(usb, vp7045_usb_table);
static struct dvb_usb_device_properties vp7045_properties = {
.usb_ctrl = CYPRESS_FX2,
.firmware = "dvb-usb-vp7045-01.fw",
- .size_of_priv = sizeof(u8 *),
+ .size_of_priv = 20,
.num_adapters = 1,
.adapter = {
@@ -305,7 +287,7 @@ static struct dvb_usb_device_properties vp7045_properties = {
static struct usb_driver vp7045_usb_driver = {
.name = "dvb_usb_vp7045",
.probe = vp7045_usb_probe,
- .disconnect = vp7045_usb_disconnect,
+ .disconnect = dvb_usb_device_exit,
.id_table = vp7045_usb_table,
};
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
index eae05b500476..144f3f55d765 100644
--- a/drivers/media/rc/nuvoton-cir.c
+++ b/drivers/media/rc/nuvoton-cir.c
@@ -618,7 +618,6 @@ static void nvt_dump_rx_buf(struct nvt_dev *nvt)
static void nvt_process_rx_ir_data(struct nvt_dev *nvt)
{
DEFINE_IR_RAW_EVENT(rawir);
- unsigned int count;
u32 carrier;
u8 sample;
int i;
@@ -631,65 +630,38 @@ static void nvt_process_rx_ir_data(struct nvt_dev *nvt)
if (nvt->carrier_detect_enabled)
carrier = nvt_rx_carrier_detect(nvt);
- count = nvt->pkts;
- nvt_dbg_verbose("Processing buffer of len %d", count);
+ nvt_dbg_verbose("Processing buffer of len %d", nvt->pkts);
init_ir_raw_event(&rawir);
- for (i = 0; i < count; i++) {
- nvt->pkts--;
+ for (i = 0; i < nvt->pkts; i++) {
sample = nvt->buf[i];
rawir.pulse = ((sample & BUF_PULSE_BIT) != 0);
rawir.duration = US_TO_NS((sample & BUF_LEN_MASK)
* SAMPLE_PERIOD);
- if ((sample & BUF_LEN_MASK) == BUF_LEN_MASK) {
- if (nvt->rawir.pulse == rawir.pulse)
- nvt->rawir.duration += rawir.duration;
- else {
- nvt->rawir.duration = rawir.duration;
- nvt->rawir.pulse = rawir.pulse;
- }
- continue;
- }
-
- rawir.duration += nvt->rawir.duration;
+ nvt_dbg("Storing %s with duration %d",
+ rawir.pulse ? "pulse" : "space", rawir.duration);
- init_ir_raw_event(&nvt->rawir);
- nvt->rawir.duration = 0;
- nvt->rawir.pulse = rawir.pulse;
-
- if (sample == BUF_PULSE_BIT)
- rawir.pulse = false;
-
- if (rawir.duration) {
- nvt_dbg("Storing %s with duration %d",
- rawir.pulse ? "pulse" : "space",
- rawir.duration);
-
- ir_raw_event_store_with_filter(nvt->rdev, &rawir);
- }
+ ir_raw_event_store_with_filter(nvt->rdev, &rawir);
/*
* BUF_PULSE_BIT indicates end of IR data, BUF_REPEAT_BYTE
* indicates end of IR signal, but new data incoming. In both
* cases, it means we're ready to call ir_raw_event_handle
*/
- if ((sample == BUF_PULSE_BIT) && nvt->pkts) {
+ if ((sample == BUF_PULSE_BIT) && (i + 1 < nvt->pkts)) {
nvt_dbg("Calling ir_raw_event_handle (signal end)\n");
ir_raw_event_handle(nvt->rdev);
}
}
+ nvt->pkts = 0;
+
nvt_dbg("Calling ir_raw_event_handle (buffer empty)\n");
ir_raw_event_handle(nvt->rdev);
- if (nvt->pkts) {
- nvt_dbg("Odd, pkts should be 0 now... (its %u)", nvt->pkts);
- nvt->pkts = 0;
- }
-
nvt_dbg_verbose("%s done", __func__);
}
@@ -1048,7 +1020,6 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
spin_lock_init(&nvt->nvt_lock);
spin_lock_init(&nvt->tx.lock);
- init_ir_raw_event(&nvt->rawir);
ret = -EBUSY;
/* now claim resources */
diff --git a/drivers/media/rc/nuvoton-cir.h b/drivers/media/rc/nuvoton-cir.h
index 1241fc89a36c..0d5e0872a2ea 100644
--- a/drivers/media/rc/nuvoton-cir.h
+++ b/drivers/media/rc/nuvoton-cir.h
@@ -67,7 +67,6 @@ static int debug;
struct nvt_dev {
struct pnp_dev *pdev;
struct rc_dev *rdev;
- struct ir_raw_event rawir;
spinlock_t nvt_lock;
diff --git a/drivers/media/video/gspca/ov519.c b/drivers/media/video/gspca/ov519.c
index 0800433b2092..18305c89083c 100644
--- a/drivers/media/video/gspca/ov519.c
+++ b/drivers/media/video/gspca/ov519.c
@@ -2858,7 +2858,6 @@ static void ov7xx0_configure(struct sd *sd)
case 0x60:
PDEBUG(D_PROBE, "Sensor is a OV7660");
sd->sensor = SEN_OV7660;
- sd->invert_led = 0;
break;
default:
PDEBUG(D_PROBE, "Unknown sensor: 0x76%x", low);
@@ -3337,7 +3336,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
case BRIDGE_OV519:
cam->cam_mode = ov519_vga_mode;
cam->nmodes = ARRAY_SIZE(ov519_vga_mode);
- sd->invert_led = !sd->invert_led;
break;
case BRIDGE_OVFX2:
cam->cam_mode = ov519_vga_mode;
@@ -5005,24 +5003,24 @@ static const struct sd_desc sd_desc = {
/* -- module initialisation -- */
static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x041e, 0x4003), .driver_info = BRIDGE_W9968CF },
- {USB_DEVICE(0x041e, 0x4052), .driver_info = BRIDGE_OV519 },
- {USB_DEVICE(0x041e, 0x405f),
+ {USB_DEVICE(0x041e, 0x4052),
.driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
+ {USB_DEVICE(0x041e, 0x405f), .driver_info = BRIDGE_OV519 },
{USB_DEVICE(0x041e, 0x4060), .driver_info = BRIDGE_OV519 },
{USB_DEVICE(0x041e, 0x4061), .driver_info = BRIDGE_OV519 },
- {USB_DEVICE(0x041e, 0x4064),
- .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
+ {USB_DEVICE(0x041e, 0x4064), .driver_info = BRIDGE_OV519 },
{USB_DEVICE(0x041e, 0x4067), .driver_info = BRIDGE_OV519 },
- {USB_DEVICE(0x041e, 0x4068),
+ {USB_DEVICE(0x041e, 0x4068), .driver_info = BRIDGE_OV519 },
+ {USB_DEVICE(0x045e, 0x028c),
.driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
- {USB_DEVICE(0x045e, 0x028c), .driver_info = BRIDGE_OV519 },
{USB_DEVICE(0x054c, 0x0154), .driver_info = BRIDGE_OV519 },
- {USB_DEVICE(0x054c, 0x0155),
- .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
+ {USB_DEVICE(0x054c, 0x0155), .driver_info = BRIDGE_OV519 },
{USB_DEVICE(0x05a9, 0x0511), .driver_info = BRIDGE_OV511 },
{USB_DEVICE(0x05a9, 0x0518), .driver_info = BRIDGE_OV518 },
- {USB_DEVICE(0x05a9, 0x0519), .driver_info = BRIDGE_OV519 },
- {USB_DEVICE(0x05a9, 0x0530), .driver_info = BRIDGE_OV519 },
+ {USB_DEVICE(0x05a9, 0x0519),
+ .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
+ {USB_DEVICE(0x05a9, 0x0530),
+ .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
{USB_DEVICE(0x05a9, 0x2800), .driver_info = BRIDGE_OVFX2 },
{USB_DEVICE(0x05a9, 0x4519), .driver_info = BRIDGE_OV519 },
{USB_DEVICE(0x05a9, 0x8519), .driver_info = BRIDGE_OV519 },
diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c
index 81b8a600783b..c477ad11f103 100644
--- a/drivers/media/video/gspca/sonixj.c
+++ b/drivers/media/video/gspca/sonixj.c
@@ -2386,7 +2386,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
reg_w1(gspca_dev, 0x01, 0x22);
msleep(100);
reg01 = SCL_SEL_OD | S_PDN_INV;
- reg17 &= MCK_SIZE_MASK;
+ reg17 &= ~MCK_SIZE_MASK;
reg17 |= 0x04; /* clock / 4 */
break;
}
@@ -2532,6 +2532,10 @@ static int sd_start(struct gspca_dev *gspca_dev)
if (!mode) { /* if 640x480 */
reg17 &= ~MCK_SIZE_MASK;
reg17 |= 0x04; /* clock / 4 */
+ } else {
+ reg01 &= ~SYS_SEL_48M; /* clk 24Mz */
+ reg17 &= ~MCK_SIZE_MASK;
+ reg17 |= 0x02; /* clock / 2 */
}
break;
case SENSOR_OV7630:
diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
index b5ef36222440..b3a5ecdb33ac 100644
--- a/drivers/media/video/omap/omap_vout.c
+++ b/drivers/media/video/omap/omap_vout.c
@@ -2194,19 +2194,6 @@ static int __init omap_vout_probe(struct platform_device *pdev)
"'%s' Display already enabled\n",
def_display->name);
}
- /* set the update mode */
- if (def_display->caps &
- OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
- if (dssdrv->enable_te)
- dssdrv->enable_te(def_display, 0);
- if (dssdrv->set_update_mode)
- dssdrv->set_update_mode(def_display,
- OMAP_DSS_UPDATE_MANUAL);
- } else {
- if (dssdrv->set_update_mode)
- dssdrv->set_update_mode(def_display,
- OMAP_DSS_UPDATE_AUTO);
- }
}
}
diff --git a/drivers/media/video/omap3isp/ispccdc.c b/drivers/media/video/omap3isp/ispccdc.c
index 9d3459de04b2..80796eb0c53e 100644
--- a/drivers/media/video/omap3isp/ispccdc.c
+++ b/drivers/media/video/omap3isp/ispccdc.c
@@ -31,6 +31,7 @@
#include <linux/dma-mapping.h>
#include <linux/mm.h>
#include <linux/sched.h>
+#include <linux/slab.h>
#include <media/v4l2-event.h>
#include "isp.h"
diff --git a/drivers/media/video/pwc/pwc-v4l.c b/drivers/media/video/pwc/pwc-v4l.c
index e9a0e94b9995..8c70e64444e7 100644
--- a/drivers/media/video/pwc/pwc-v4l.c
+++ b/drivers/media/video/pwc/pwc-v4l.c
@@ -338,7 +338,7 @@ int pwc_init_controls(struct pwc_device *pdev)
if (pdev->restore_factory)
pdev->restore_factory->flags = V4L2_CTRL_FLAG_UPDATE;
- if (!pdev->features & FEATURE_MOTOR_PANTILT)
+ if (!(pdev->features & FEATURE_MOTOR_PANTILT))
return hdl->error;
/* Motor pan / tilt / reset */
diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c
index d29f9c2d0854..e4100b1f68df 100644
--- a/drivers/media/video/uvc/uvc_driver.c
+++ b/drivers/media/video/uvc/uvc_driver.c
@@ -1961,7 +1961,7 @@ static int __uvc_resume(struct usb_interface *intf, int reset)
list_for_each_entry(stream, &dev->streams, list) {
if (stream->intf == intf)
- return uvc_video_resume(stream);
+ return uvc_video_resume(stream, reset);
}
uvc_trace(UVC_TRACE_SUSPEND, "Resume: video streaming USB interface "
diff --git a/drivers/media/video/uvc/uvc_entity.c b/drivers/media/video/uvc/uvc_entity.c
index 48fea373c25a..29e239911d0e 100644
--- a/drivers/media/video/uvc/uvc_entity.c
+++ b/drivers/media/video/uvc/uvc_entity.c
@@ -49,7 +49,7 @@ static int uvc_mc_register_entity(struct uvc_video_chain *chain,
if (remote == NULL)
return -EINVAL;
- source = (UVC_ENTITY_TYPE(remote) != UVC_TT_STREAMING)
+ source = (UVC_ENTITY_TYPE(remote) == UVC_TT_STREAMING)
? (remote->vdev ? &remote->vdev->entity : NULL)
: &remote->subdev.entity;
if (source == NULL)
diff --git a/drivers/media/video/uvc/uvc_video.c b/drivers/media/video/uvc/uvc_video.c
index 8244167c8915..ffd1158628b6 100644
--- a/drivers/media/video/uvc/uvc_video.c
+++ b/drivers/media/video/uvc/uvc_video.c
@@ -1104,10 +1104,18 @@ int uvc_video_suspend(struct uvc_streaming *stream)
* buffers, making sure userspace applications are notified of the problem
* instead of waiting forever.
*/
-int uvc_video_resume(struct uvc_streaming *stream)
+int uvc_video_resume(struct uvc_streaming *stream, int reset)
{
int ret;
+ /* If the bus has been reset on resume, set the alternate setting to 0.
+ * This should be the default value, but some devices crash or otherwise
+ * misbehave if they don't receive a SET_INTERFACE request before any
+ * other video control request.
+ */
+ if (reset)
+ usb_set_interface(stream->dev->udev, stream->intfnum, 0);
+
stream->frozen = 0;
ret = uvc_commit_video(stream, &stream->ctrl);
diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h
index df32a43ca86a..cbdd49bf8b67 100644
--- a/drivers/media/video/uvc/uvcvideo.h
+++ b/drivers/media/video/uvc/uvcvideo.h
@@ -638,7 +638,7 @@ extern void uvc_mc_cleanup_entity(struct uvc_entity *entity);
/* Video */
extern int uvc_video_init(struct uvc_streaming *stream);
extern int uvc_video_suspend(struct uvc_streaming *stream);
-extern int uvc_video_resume(struct uvc_streaming *stream);
+extern int uvc_video_resume(struct uvc_streaming *stream, int reset);
extern int uvc_video_enable(struct uvc_streaming *stream, int enable);
extern int uvc_probe_video(struct uvc_streaming *stream,
struct uvc_streaming_control *probe);
diff --git a/drivers/media/video/v4l2-dev.c b/drivers/media/video/v4l2-dev.c
index 06f14008b346..d72156517726 100644
--- a/drivers/media/video/v4l2-dev.c
+++ b/drivers/media/video/v4l2-dev.c
@@ -173,6 +173,17 @@ static void v4l2_device_release(struct device *cd)
media_device_unregister_entity(&vdev->entity);
#endif
+ /* Do not call v4l2_device_put if there is no release callback set.
+ * Drivers that have no v4l2_device release callback might free the
+ * v4l2_dev instance in the video_device release callback below, so we
+ * must perform this check here.
+ *
+ * TODO: In the long run all drivers that use v4l2_device should use the
+ * v4l2_device release callback. This check will then be unnecessary.
+ */
+ if (v4l2_dev->release == NULL)
+ v4l2_dev = NULL;
+
/* Release video_device and perform other
cleanups as needed. */
vdev->release(vdev);
diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c
index c72856c41434..e6a2c3b302d4 100644
--- a/drivers/media/video/v4l2-device.c
+++ b/drivers/media/video/v4l2-device.c
@@ -38,6 +38,7 @@ int v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev)
mutex_init(&v4l2_dev->ioctl_lock);
v4l2_prio_init(&v4l2_dev->prio);
kref_init(&v4l2_dev->ref);
+ get_device(dev);
v4l2_dev->dev = dev;
if (dev == NULL) {
/* If dev == NULL, then name must be filled in by the caller */
@@ -93,6 +94,7 @@ void v4l2_device_disconnect(struct v4l2_device *v4l2_dev)
if (dev_get_drvdata(v4l2_dev->dev) == v4l2_dev)
dev_set_drvdata(v4l2_dev->dev, NULL);
+ put_device(v4l2_dev->dev);
v4l2_dev->dev = NULL;
}
EXPORT_SYMBOL_GPL(v4l2_device_disconnect);
diff --git a/drivers/media/video/via-camera.c b/drivers/media/video/via-camera.c
index 85d3048c1d67..bb7f17f2a33c 100644
--- a/drivers/media/video/via-camera.c
+++ b/drivers/media/video/via-camera.c
@@ -1332,6 +1332,8 @@ static __devinit bool viacam_serial_is_enabled(void)
struct pci_bus *pbus = pci_find_bus(0, 0);
u8 cbyte;
+ if (!pbus)
+ return false;
pci_bus_read_config_byte(pbus, VIACAM_SERIAL_DEVFN,
VIACAM_SERIAL_CREG, &cbyte);
if ((cbyte & VIACAM_SERIAL_BIT) == 0)
diff --git a/drivers/mfd/jz4740-adc.c b/drivers/mfd/jz4740-adc.c
index 21131c7b0f1e..563654c9b19e 100644
--- a/drivers/mfd/jz4740-adc.c
+++ b/drivers/mfd/jz4740-adc.c
@@ -273,7 +273,7 @@ static int __devinit jz4740_adc_probe(struct platform_device *pdev)
ct->regs.ack = JZ_REG_ADC_STATUS;
ct->chip.irq_mask = irq_gc_mask_set_bit;
ct->chip.irq_unmask = irq_gc_mask_clr_bit;
- ct->chip.irq_ack = irq_gc_ack;
+ ct->chip.irq_ack = irq_gc_ack_set_bit;
irq_setup_generic_chip(gc, IRQ_MSK(5), 0, 0, IRQ_NOPROBE | IRQ_LEVEL);
diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c
index 5d1fca0277ef..f83103b8970d 100644
--- a/drivers/mfd/max8997.c
+++ b/drivers/mfd/max8997.c
@@ -135,10 +135,13 @@ static int max8997_i2c_probe(struct i2c_client *i2c,
max8997->dev = &i2c->dev;
max8997->i2c = i2c;
max8997->type = id->driver_data;
+ max8997->irq = i2c->irq;
if (!pdata)
goto err;
+ max8997->irq_base = pdata->irq_base;
+ max8997->ono = pdata->ono;
max8997->wakeup = pdata->wakeup;
mutex_init(&max8997->iolock);
@@ -152,6 +155,8 @@ static int max8997_i2c_probe(struct i2c_client *i2c,
pm_runtime_set_active(max8997->dev);
+ max8997_irq_init(max8997);
+
mfd_add_devices(max8997->dev, -1, max8997_devs,
ARRAY_SIZE(max8997_devs),
NULL, 0);
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index 29601e7d606d..86e14583a082 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -17,6 +17,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/delay.h>
@@ -676,7 +677,6 @@ static void usbhs_omap_tll_init(struct device *dev, u8 tll_channel_count)
| OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF
| OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE);
- reg |= (1 << (i + 1));
} else
continue;
diff --git a/drivers/mfd/tps65910-irq.c b/drivers/mfd/tps65910-irq.c
index 2bfad5c86cc7..a56be931551c 100644
--- a/drivers/mfd/tps65910-irq.c
+++ b/drivers/mfd/tps65910-irq.c
@@ -178,8 +178,10 @@ int tps65910_irq_init(struct tps65910 *tps65910, int irq,
switch (tps65910_chip_id(tps65910)) {
case TPS65910:
tps65910->irq_num = TPS65910_NUM_IRQ;
+ break;
case TPS65911:
tps65910->irq_num = TPS65911_NUM_IRQ;
+ break;
}
/* Register with genirq */
diff --git a/drivers/mfd/twl4030-madc.c b/drivers/mfd/twl4030-madc.c
index b5d598c3aa71..7cbf2aa9e64f 100644
--- a/drivers/mfd/twl4030-madc.c
+++ b/drivers/mfd/twl4030-madc.c
@@ -510,8 +510,9 @@ int twl4030_madc_conversion(struct twl4030_madc_request *req)
u8 ch_msb, ch_lsb;
int ret;
- if (!req)
+ if (!req || !twl4030_madc)
return -EINVAL;
+
mutex_lock(&twl4030_madc->lock);
if (req->method < TWL4030_MADC_RT || req->method > TWL4030_MADC_SW2) {
ret = -EINVAL;
@@ -706,6 +707,8 @@ static int __devinit twl4030_madc_probe(struct platform_device *pdev)
if (!madc)
return -ENOMEM;
+ madc->dev = &pdev->dev;
+
/*
* Phoenix provides 2 interrupt lines. The first one is connected to
* the OMAP. The other one can be connected to the other processor such
diff --git a/drivers/mfd/wm8350-gpio.c b/drivers/mfd/wm8350-gpio.c
index ebf99bef392f..d584f6b4d6e2 100644
--- a/drivers/mfd/wm8350-gpio.c
+++ b/drivers/mfd/wm8350-gpio.c
@@ -37,7 +37,7 @@ static int gpio_set_dir(struct wm8350 *wm8350, int gpio, int dir)
return ret;
}
-static int gpio_set_debounce(struct wm8350 *wm8350, int gpio, int db)
+static int wm8350_gpio_set_debounce(struct wm8350 *wm8350, int gpio, int db)
{
if (db == WM8350_GPIO_DEBOUNCE_ON)
return wm8350_set_bits(wm8350, WM8350_GPIO_DEBOUNCE,
@@ -210,7 +210,7 @@ int wm8350_gpio_config(struct wm8350 *wm8350, int gpio, int dir, int func,
goto err;
if (gpio_set_polarity(wm8350, gpio, pol))
goto err;
- if (gpio_set_debounce(wm8350, gpio, debounce))
+ if (wm8350_gpio_set_debounce(wm8350, gpio, debounce))
goto err;
if (gpio_set_dir(wm8350, gpio, dir))
goto err;
diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
index b928bc14e97b..8b51cd62d067 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
@@ -375,12 +375,14 @@ void lis3lv02d_poweron(struct lis3lv02d *lis3)
* both have been read. So the value read will always be correct.
* Set BOOT bit to refresh factory tuning values.
*/
- lis3->read(lis3, CTRL_REG2, &reg);
- if (lis3->whoami == WAI_12B)
- reg |= CTRL2_BDU | CTRL2_BOOT;
- else
- reg |= CTRL2_BOOT_8B;
- lis3->write(lis3, CTRL_REG2, reg);
+ if (lis3->pdata) {
+ lis3->read(lis3, CTRL_REG2, &reg);
+ if (lis3->whoami == WAI_12B)
+ reg |= CTRL2_BDU | CTRL2_BOOT;
+ else
+ reg |= CTRL2_BOOT_8B;
+ lis3->write(lis3, CTRL_REG2, reg);
+ }
/* LIS3 power on delay is quite long */
msleep(lis3->pwron_delay / lis3lv02d_get_odr());
diff --git a/drivers/misc/pti.c b/drivers/misc/pti.c
index 06df1877ad0f..0b56e3f43573 100644
--- a/drivers/misc/pti.c
+++ b/drivers/misc/pti.c
@@ -165,6 +165,11 @@ static void pti_write_to_aperture(struct pti_masterchannel *mc,
static void pti_control_frame_built_and_sent(struct pti_masterchannel *mc,
const char *thread_name)
{
+ /*
+ * Since we access the comm member in current's task_struct, we only
+ * need to be as large as what 'comm' in that structure is.
+ */
+ char comm[TASK_COMM_LEN];
struct pti_masterchannel mccontrol = {.master = CONTROL_ID,
.channel = 0};
const char *thread_name_p;
@@ -172,13 +177,6 @@ static void pti_control_frame_built_and_sent(struct pti_masterchannel *mc,
u8 control_frame[CONTROL_FRAME_LEN];
if (!thread_name) {
- /*
- * Since we access the comm member in current's task_struct,
- * we only need to be as large as what 'comm' in that
- * structure is.
- */
- char comm[TASK_COMM_LEN];
-
if (!in_interrupt())
get_task_comm(comm, current);
else
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 1ff5486213fb..4c1a648d00fc 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -926,6 +926,9 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
/*
* Reliable writes are used to implement Forced Unit Access and
* REQ_META accesses, and are supported only on MMCs.
+ *
+ * XXX: this really needs a good explanation of why REQ_META
+ * is treated special.
*/
bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
(req->cmd_flags & REQ_META)) &&
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 91a0a7460ebb..b27b94078c21 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -133,7 +133,7 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
if (mrq->done)
mrq->done(mrq);
- mmc_host_clk_gate(host);
+ mmc_host_clk_release(host);
}
}
@@ -192,7 +192,7 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
mrq->stop->mrq = mrq;
}
}
- mmc_host_clk_ungate(host);
+ mmc_host_clk_hold(host);
led_trigger_event(host->led, LED_FULL);
host->ops->request(host, mrq);
}
@@ -728,15 +728,17 @@ static inline void mmc_set_ios(struct mmc_host *host)
*/
void mmc_set_chip_select(struct mmc_host *host, int mode)
{
+ mmc_host_clk_hold(host);
host->ios.chip_select = mode;
mmc_set_ios(host);
+ mmc_host_clk_release(host);
}
/*
* Sets the host clock to the highest possible frequency that
* is below "hz".
*/
-void mmc_set_clock(struct mmc_host *host, unsigned int hz)
+static void __mmc_set_clock(struct mmc_host *host, unsigned int hz)
{
WARN_ON(hz < host->f_min);
@@ -747,6 +749,13 @@ void mmc_set_clock(struct mmc_host *host, unsigned int hz)
mmc_set_ios(host);
}
+void mmc_set_clock(struct mmc_host *host, unsigned int hz)
+{
+ mmc_host_clk_hold(host);
+ __mmc_set_clock(host, hz);
+ mmc_host_clk_release(host);
+}
+
#ifdef CONFIG_MMC_CLKGATE
/*
* This gates the clock by setting it to 0 Hz.
@@ -779,7 +788,7 @@ void mmc_ungate_clock(struct mmc_host *host)
if (host->clk_old) {
BUG_ON(host->ios.clock);
/* This call will also set host->clk_gated to false */
- mmc_set_clock(host, host->clk_old);
+ __mmc_set_clock(host, host->clk_old);
}
}
@@ -807,8 +816,10 @@ void mmc_set_ungated(struct mmc_host *host)
*/
void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
{
+ mmc_host_clk_hold(host);
host->ios.bus_mode = mode;
mmc_set_ios(host);
+ mmc_host_clk_release(host);
}
/*
@@ -816,8 +827,10 @@ void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
*/
void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
{
+ mmc_host_clk_hold(host);
host->ios.bus_width = width;
mmc_set_ios(host);
+ mmc_host_clk_release(host);
}
/**
@@ -1015,8 +1028,10 @@ u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
ocr &= 3 << bit;
+ mmc_host_clk_hold(host);
host->ios.vdd = bit;
mmc_set_ios(host);
+ mmc_host_clk_release(host);
} else {
pr_warning("%s: host doesn't support card's voltages\n",
mmc_hostname(host));
@@ -1063,8 +1078,10 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11
*/
void mmc_set_timing(struct mmc_host *host, unsigned int timing)
{
+ mmc_host_clk_hold(host);
host->ios.timing = timing;
mmc_set_ios(host);
+ mmc_host_clk_release(host);
}
/*
@@ -1072,8 +1089,10 @@ void mmc_set_timing(struct mmc_host *host, unsigned int timing)
*/
void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
{
+ mmc_host_clk_hold(host);
host->ios.drv_type = drv_type;
mmc_set_ios(host);
+ mmc_host_clk_release(host);
}
/*
@@ -1091,6 +1110,8 @@ static void mmc_power_up(struct mmc_host *host)
{
int bit;
+ mmc_host_clk_hold(host);
+
/* If ocr is set, we use it */
if (host->ocr)
bit = ffs(host->ocr) - 1;
@@ -1126,10 +1147,14 @@ static void mmc_power_up(struct mmc_host *host)
* time required to reach a stable voltage.
*/
mmc_delay(10);
+
+ mmc_host_clk_release(host);
}
static void mmc_power_off(struct mmc_host *host)
{
+ mmc_host_clk_hold(host);
+
host->ios.clock = 0;
host->ios.vdd = 0;
@@ -1147,6 +1172,8 @@ static void mmc_power_off(struct mmc_host *host)
host->ios.bus_width = MMC_BUS_WIDTH_1;
host->ios.timing = MMC_TIMING_LEGACY;
mmc_set_ios(host);
+
+ mmc_host_clk_release(host);
}
/*
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index b29d3e8fd3a2..793d0a0dad8d 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -119,14 +119,14 @@ static void mmc_host_clk_gate_work(struct work_struct *work)
}
/**
- * mmc_host_clk_ungate - ungate hardware MCI clocks
+ * mmc_host_clk_hold - ungate hardware MCI clocks
* @host: host to ungate.
*
* Makes sure the host ios.clock is restored to a non-zero value
* past this call. Increase clock reference count and ungate clock
* if we're the first user.
*/
-void mmc_host_clk_ungate(struct mmc_host *host)
+void mmc_host_clk_hold(struct mmc_host *host)
{
unsigned long flags;
@@ -164,14 +164,14 @@ static bool mmc_host_may_gate_card(struct mmc_card *card)
}
/**
- * mmc_host_clk_gate - gate off hardware MCI clocks
+ * mmc_host_clk_release - gate off hardware MCI clocks
* @host: host to gate.
*
* Calls the host driver with ios.clock set to zero as often as possible
* in order to gate off hardware MCI clocks. Decrease clock reference
* count and schedule disabling of clock.
*/
-void mmc_host_clk_gate(struct mmc_host *host)
+void mmc_host_clk_release(struct mmc_host *host)
{
unsigned long flags;
@@ -179,7 +179,7 @@ void mmc_host_clk_gate(struct mmc_host *host)
host->clk_requests--;
if (mmc_host_may_gate_card(host->card) &&
!host->clk_requests)
- schedule_work(&host->clk_gate_work);
+ queue_work(system_nrt_wq, &host->clk_gate_work);
spin_unlock_irqrestore(&host->clk_lock, flags);
}
@@ -231,7 +231,7 @@ static inline void mmc_host_clk_exit(struct mmc_host *host)
if (cancel_work_sync(&host->clk_gate_work))
mmc_host_clk_gate_delayed(host);
if (host->clk_gated)
- mmc_host_clk_ungate(host);
+ mmc_host_clk_hold(host);
/* There should be only one user now */
WARN_ON(host->clk_requests > 1);
}
diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h
index de199f911928..fb8a5cd2e4a1 100644
--- a/drivers/mmc/core/host.h
+++ b/drivers/mmc/core/host.h
@@ -16,16 +16,16 @@ int mmc_register_host_class(void);
void mmc_unregister_host_class(void);
#ifdef CONFIG_MMC_CLKGATE
-void mmc_host_clk_ungate(struct mmc_host *host);
-void mmc_host_clk_gate(struct mmc_host *host);
+void mmc_host_clk_hold(struct mmc_host *host);
+void mmc_host_clk_release(struct mmc_host *host);
unsigned int mmc_host_clk_rate(struct mmc_host *host);
#else
-static inline void mmc_host_clk_ungate(struct mmc_host *host)
+static inline void mmc_host_clk_hold(struct mmc_host *host)
{
}
-static inline void mmc_host_clk_gate(struct mmc_host *host)
+static inline void mmc_host_clk_release(struct mmc_host *host)
{
}
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 633975ff2bb3..0370e03e3142 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -469,56 +469,75 @@ static int sd_select_driver_type(struct mmc_card *card, u8 *status)
return 0;
}
-static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status)
+static void sd_update_bus_speed_mode(struct mmc_card *card)
{
- unsigned int bus_speed = 0, timing = 0;
- int err;
-
/*
* If the host doesn't support any of the UHS-I modes, fallback on
* default speed.
*/
if (!(card->host->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
- MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50)))
- return 0;
+ MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50))) {
+ card->sd_bus_speed = 0;
+ return;
+ }
if ((card->host->caps & MMC_CAP_UHS_SDR104) &&
(card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)) {
- bus_speed = UHS_SDR104_BUS_SPEED;
- timing = MMC_TIMING_UHS_SDR104;
- card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR;
+ card->sd_bus_speed = UHS_SDR104_BUS_SPEED;
} else if ((card->host->caps & MMC_CAP_UHS_DDR50) &&
(card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) {
- bus_speed = UHS_DDR50_BUS_SPEED;
- timing = MMC_TIMING_UHS_DDR50;
- card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR;
+ card->sd_bus_speed = UHS_DDR50_BUS_SPEED;
} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode &
SD_MODE_UHS_SDR50)) {
- bus_speed = UHS_SDR50_BUS_SPEED;
- timing = MMC_TIMING_UHS_SDR50;
- card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR;
+ card->sd_bus_speed = UHS_SDR50_BUS_SPEED;
} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) &&
(card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) {
- bus_speed = UHS_SDR25_BUS_SPEED;
- timing = MMC_TIMING_UHS_SDR25;
- card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR;
+ card->sd_bus_speed = UHS_SDR25_BUS_SPEED;
} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 |
MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode &
SD_MODE_UHS_SDR12)) {
- bus_speed = UHS_SDR12_BUS_SPEED;
- timing = MMC_TIMING_UHS_SDR12;
- card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR;
+ card->sd_bus_speed = UHS_SDR12_BUS_SPEED;
+ }
+}
+
+static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status)
+{
+ int err;
+ unsigned int timing = 0;
+
+ switch (card->sd_bus_speed) {
+ case UHS_SDR104_BUS_SPEED:
+ timing = MMC_TIMING_UHS_SDR104;
+ card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR;
+ break;
+ case UHS_DDR50_BUS_SPEED:
+ timing = MMC_TIMING_UHS_DDR50;
+ card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR;
+ break;
+ case UHS_SDR50_BUS_SPEED:
+ timing = MMC_TIMING_UHS_SDR50;
+ card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR;
+ break;
+ case UHS_SDR25_BUS_SPEED:
+ timing = MMC_TIMING_UHS_SDR25;
+ card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR;
+ break;
+ case UHS_SDR12_BUS_SPEED:
+ timing = MMC_TIMING_UHS_SDR12;
+ card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR;
+ break;
+ default:
+ return 0;
}
- card->sd_bus_speed = bus_speed;
- err = mmc_sd_switch(card, 1, 0, bus_speed, status);
+ err = mmc_sd_switch(card, 1, 0, card->sd_bus_speed, status);
if (err)
return err;
- if ((status[16] & 0xF) != bus_speed)
+ if ((status[16] & 0xF) != card->sd_bus_speed)
printk(KERN_WARNING "%s: Problem setting bus speed mode!\n",
mmc_hostname(card->host));
else {
@@ -618,18 +637,24 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card)
mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4);
}
+ /*
+ * Select the bus speed mode depending on host
+ * and card capability.
+ */
+ sd_update_bus_speed_mode(card);
+
/* Set the driver strength for the card */
err = sd_select_driver_type(card, status);
if (err)
goto out;
- /* Set bus speed mode of the card */
- err = sd_set_bus_speed_mode(card, status);
+ /* Set current limit for the card */
+ err = sd_set_current_limit(card, status);
if (err)
goto out;
- /* Set current limit for the card */
- err = sd_set_current_limit(card, status);
+ /* Set bus speed mode of the card */
+ err = sd_set_bus_speed_mode(card, status);
if (err)
goto out;
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 0e9780f5a4a9..4dc0028086a3 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -16,6 +16,7 @@
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/gpio.h>
+#include <linux/module.h>
#include <linux/slab.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 2bd7bf4fece7..fe886d6c474a 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -302,6 +302,8 @@ static int sdhci_s3c_platform_8bit_width(struct sdhci_host *host, int width)
ctrl &= ~SDHCI_CTRL_8BITBUS;
break;
default:
+ ctrl &= ~SDHCI_CTRL_4BITBUS;
+ ctrl &= ~SDHCI_CTRL_8BITBUS;
break;
}
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c
index 774f6439d7ce..0c4a672f5db6 100644
--- a/drivers/mmc/host/sh_mobile_sdhi.c
+++ b/drivers/mmc/host/sh_mobile_sdhi.c
@@ -120,11 +120,11 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
mmc_data->hclk = clk_get_rate(priv->clk);
mmc_data->set_pwr = sh_mobile_sdhi_set_pwr;
mmc_data->get_cd = sh_mobile_sdhi_get_cd;
- if (mmc_data->flags & TMIO_MMC_HAS_IDLE_WAIT)
- mmc_data->write16_hook = sh_mobile_sdhi_write16_hook;
mmc_data->capabilities = MMC_CAP_MMC_HIGHSPEED;
if (p) {
mmc_data->flags = p->tmio_flags;
+ if (mmc_data->flags & TMIO_MMC_HAS_IDLE_WAIT)
+ mmc_data->write16_hook = sh_mobile_sdhi_write16_hook;
mmc_data->ocr_mask = p->tmio_ocr_mask;
mmc_data->capabilities |= p->tmio_caps;
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index 65b5b76cc379..64fbb0021825 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -181,7 +181,7 @@ static inline int ubi_dbg_is_erase_failure(const struct ubi_device *ubi)
#define ubi_dbg_msg(fmt, ...) do { \
if (0) \
- pr_debug(fmt "\n", ##__VA_ARGS__); \
+ printk(KERN_DEBUG fmt "\n", ##__VA_ARGS__); \
} while (0)
#define dbg_msg(fmt, ...) ubi_dbg_msg(fmt, ##__VA_ARGS__)
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 8d0314dbd946..a44874e24f2a 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2535,7 +2535,7 @@ config S6GMAC
source "drivers/net/stmmac/Kconfig"
config PCH_GBE
- tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7223 IOH GbE"
+ tristate "Intel EG20T PCH/OKI SEMICONDUCTOR IOH(ML7223/ML7831) GbE"
depends on PCI
select MII
---help---
@@ -2548,10 +2548,11 @@ config PCH_GBE
This driver enables Gigabit Ethernet function.
This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
- Output Hub), ML7223.
- ML7223 IOH is for MP(Media Phone) use.
- ML7223 is companion chip for Intel Atom E6xx series.
- ML7223 is completely compatible for Intel EG20T PCH.
+ Output Hub), ML7223/ML7831.
+ ML7223 IOH is for MP(Media Phone) use. ML7831 IOH is for general
+ purpose use.
+ ML7223/ML7831 is companion chip for Intel Atom E6xx series.
+ ML7223/ML7831 is completely compatible for Intel EG20T PCH.
config FTGMAC100
tristate "Faraday FTGMAC100 Gigabit Ethernet support"
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c
index 52fe21e1e2cd..3b1416e3d217 100644
--- a/drivers/net/arm/am79c961a.c
+++ b/drivers/net/arm/am79c961a.c
@@ -308,8 +308,11 @@ static void am79c961_timer(unsigned long data)
struct net_device *dev = (struct net_device *)data;
struct dev_priv *priv = netdev_priv(dev);
unsigned int lnkstat, carrier;
+ unsigned long flags;
+ spin_lock_irqsave(&priv->chip_lock, flags);
lnkstat = read_ireg(dev->base_addr, ISALED0) & ISALED0_LNKST;
+ spin_unlock_irqrestore(&priv->chip_lock, flags);
carrier = netif_carrier_ok(dev);
if (lnkstat && !carrier) {
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index c423504a755f..e46df5331c55 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -315,6 +315,14 @@ union db_prod {
u32 raw;
};
+/* dropless fc FW/HW related params */
+#define BRB_SIZE(bp) (CHIP_IS_E3(bp) ? 1024 : 512)
+#define MAX_AGG_QS(bp) (CHIP_IS_E1(bp) ? \
+ ETH_MAX_AGGREGATION_QUEUES_E1 :\
+ ETH_MAX_AGGREGATION_QUEUES_E1H_E2)
+#define FW_DROP_LEVEL(bp) (3 + MAX_SPQ_PENDING + MAX_AGG_QS(bp))
+#define FW_PREFETCH_CNT 16
+#define DROPLESS_FC_HEADROOM 100
/* MC hsi */
#define BCM_PAGE_SHIFT 12
@@ -331,15 +339,35 @@ union db_prod {
/* SGE ring related macros */
#define NUM_RX_SGE_PAGES 2
#define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge))
-#define MAX_RX_SGE_CNT (RX_SGE_CNT - 2)
+#define NEXT_PAGE_SGE_DESC_CNT 2
+#define MAX_RX_SGE_CNT (RX_SGE_CNT - NEXT_PAGE_SGE_DESC_CNT)
/* RX_SGE_CNT is promised to be a power of 2 */
#define RX_SGE_MASK (RX_SGE_CNT - 1)
#define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES)
#define MAX_RX_SGE (NUM_RX_SGE - 1)
#define NEXT_SGE_IDX(x) ((((x) & RX_SGE_MASK) == \
- (MAX_RX_SGE_CNT - 1)) ? (x) + 3 : (x) + 1)
+ (MAX_RX_SGE_CNT - 1)) ? \
+ (x) + 1 + NEXT_PAGE_SGE_DESC_CNT : \
+ (x) + 1)
#define RX_SGE(x) ((x) & MAX_RX_SGE)
+/*
+ * Number of required SGEs is the sum of two:
+ * 1. Number of possible opened aggregations (next packet for
+ * these aggregations will probably consume SGE immidiatelly)
+ * 2. Rest of BRB blocks divided by 2 (block will consume new SGE only
+ * after placement on BD for new TPA aggregation)
+ *
+ * Takes into account NEXT_PAGE_SGE_DESC_CNT "next" elements on each page
+ */
+#define NUM_SGE_REQ (MAX_AGG_QS(bp) + \
+ (BRB_SIZE(bp) - MAX_AGG_QS(bp)) / 2)
+#define NUM_SGE_PG_REQ ((NUM_SGE_REQ + MAX_RX_SGE_CNT - 1) / \
+ MAX_RX_SGE_CNT)
+#define SGE_TH_LO(bp) (NUM_SGE_REQ + \
+ NUM_SGE_PG_REQ * NEXT_PAGE_SGE_DESC_CNT)
+#define SGE_TH_HI(bp) (SGE_TH_LO(bp) + DROPLESS_FC_HEADROOM)
+
/* Manipulate a bit vector defined as an array of u64 */
/* Number of bits in one sge_mask array element */
@@ -551,24 +579,43 @@ struct bnx2x_fastpath {
#define NUM_TX_RINGS 16
#define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types))
-#define MAX_TX_DESC_CNT (TX_DESC_CNT - 1)
+#define NEXT_PAGE_TX_DESC_CNT 1
+#define MAX_TX_DESC_CNT (TX_DESC_CNT - NEXT_PAGE_TX_DESC_CNT)
#define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS)
#define MAX_TX_BD (NUM_TX_BD - 1)
#define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2)
#define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \
- (MAX_TX_DESC_CNT - 1)) ? (x) + 2 : (x) + 1)
+ (MAX_TX_DESC_CNT - 1)) ? \
+ (x) + 1 + NEXT_PAGE_TX_DESC_CNT : \
+ (x) + 1)
#define TX_BD(x) ((x) & MAX_TX_BD)
#define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT)
/* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */
#define NUM_RX_RINGS 8
#define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd))
-#define MAX_RX_DESC_CNT (RX_DESC_CNT - 2)
+#define NEXT_PAGE_RX_DESC_CNT 2
+#define MAX_RX_DESC_CNT (RX_DESC_CNT - NEXT_PAGE_RX_DESC_CNT)
#define RX_DESC_MASK (RX_DESC_CNT - 1)
#define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS)
#define MAX_RX_BD (NUM_RX_BD - 1)
#define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2)
-#define MIN_RX_AVAIL 128
+
+/* dropless fc calculations for BDs
+ *
+ * Number of BDs should as number of buffers in BRB:
+ * Low threshold takes into account NEXT_PAGE_RX_DESC_CNT
+ * "next" elements on each page
+ */
+#define NUM_BD_REQ BRB_SIZE(bp)
+#define NUM_BD_PG_REQ ((NUM_BD_REQ + MAX_RX_DESC_CNT - 1) / \
+ MAX_RX_DESC_CNT)
+#define BD_TH_LO(bp) (NUM_BD_REQ + \
+ NUM_BD_PG_REQ * NEXT_PAGE_RX_DESC_CNT + \
+ FW_DROP_LEVEL(bp))
+#define BD_TH_HI(bp) (BD_TH_LO(bp) + DROPLESS_FC_HEADROOM)
+
+#define MIN_RX_AVAIL ((bp)->dropless_fc ? BD_TH_HI(bp) + 128 : 128)
#define MIN_RX_SIZE_TPA_HW (CHIP_IS_E1(bp) ? \
ETH_MIN_RX_CQES_WITH_TPA_E1 : \
@@ -579,7 +626,9 @@ struct bnx2x_fastpath {
MIN_RX_AVAIL))
#define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \
- (MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1)
+ (MAX_RX_DESC_CNT - 1)) ? \
+ (x) + 1 + NEXT_PAGE_RX_DESC_CNT : \
+ (x) + 1)
#define RX_BD(x) ((x) & MAX_RX_BD)
/*
@@ -589,14 +638,31 @@ struct bnx2x_fastpath {
#define CQE_BD_REL (sizeof(union eth_rx_cqe) / sizeof(struct eth_rx_bd))
#define NUM_RCQ_RINGS (NUM_RX_RINGS * CQE_BD_REL)
#define RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe))
-#define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - 1)
+#define NEXT_PAGE_RCQ_DESC_CNT 1
+#define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - NEXT_PAGE_RCQ_DESC_CNT)
#define NUM_RCQ_BD (RCQ_DESC_CNT * NUM_RCQ_RINGS)
#define MAX_RCQ_BD (NUM_RCQ_BD - 1)
#define MAX_RCQ_AVAIL (MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2)
#define NEXT_RCQ_IDX(x) ((((x) & MAX_RCQ_DESC_CNT) == \
- (MAX_RCQ_DESC_CNT - 1)) ? (x) + 2 : (x) + 1)
+ (MAX_RCQ_DESC_CNT - 1)) ? \
+ (x) + 1 + NEXT_PAGE_RCQ_DESC_CNT : \
+ (x) + 1)
#define RCQ_BD(x) ((x) & MAX_RCQ_BD)
+/* dropless fc calculations for RCQs
+ *
+ * Number of RCQs should be as number of buffers in BRB:
+ * Low threshold takes into account NEXT_PAGE_RCQ_DESC_CNT
+ * "next" elements on each page
+ */
+#define NUM_RCQ_REQ BRB_SIZE(bp)
+#define NUM_RCQ_PG_REQ ((NUM_BD_REQ + MAX_RCQ_DESC_CNT - 1) / \
+ MAX_RCQ_DESC_CNT)
+#define RCQ_TH_LO(bp) (NUM_RCQ_REQ + \
+ NUM_RCQ_PG_REQ * NEXT_PAGE_RCQ_DESC_CNT + \
+ FW_DROP_LEVEL(bp))
+#define RCQ_TH_HI(bp) (RCQ_TH_LO(bp) + DROPLESS_FC_HEADROOM)
+
/* This is needed for determining of last_max */
#define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b))
@@ -685,24 +751,17 @@ struct bnx2x_fastpath {
#define FP_CSB_FUNC_OFF \
offsetof(struct cstorm_status_block_c, func)
-#define HC_INDEX_TOE_RX_CQ_CONS 0 /* Formerly Ustorm TOE CQ index */
- /* (HC_INDEX_U_TOE_RX_CQ_CONS) */
-#define HC_INDEX_ETH_RX_CQ_CONS 1 /* Formerly Ustorm ETH CQ index */
- /* (HC_INDEX_U_ETH_RX_CQ_CONS) */
-#define HC_INDEX_ETH_RX_BD_CONS 2 /* Formerly Ustorm ETH BD index */
- /* (HC_INDEX_U_ETH_RX_BD_CONS) */
-
-#define HC_INDEX_TOE_TX_CQ_CONS 4 /* Formerly Cstorm TOE CQ index */
- /* (HC_INDEX_C_TOE_TX_CQ_CONS) */
-#define HC_INDEX_ETH_TX_CQ_CONS_COS0 5 /* Formerly Cstorm ETH CQ index */
- /* (HC_INDEX_C_ETH_TX_CQ_CONS) */
-#define HC_INDEX_ETH_TX_CQ_CONS_COS1 6 /* Formerly Cstorm ETH CQ index */
- /* (HC_INDEX_C_ETH_TX_CQ_CONS) */
-#define HC_INDEX_ETH_TX_CQ_CONS_COS2 7 /* Formerly Cstorm ETH CQ index */
- /* (HC_INDEX_C_ETH_TX_CQ_CONS) */
+#define HC_INDEX_ETH_RX_CQ_CONS 1
-#define HC_INDEX_ETH_FIRST_TX_CQ_CONS HC_INDEX_ETH_TX_CQ_CONS_COS0
+#define HC_INDEX_OOO_TX_CQ_CONS 4
+#define HC_INDEX_ETH_TX_CQ_CONS_COS0 5
+
+#define HC_INDEX_ETH_TX_CQ_CONS_COS1 6
+
+#define HC_INDEX_ETH_TX_CQ_CONS_COS2 7
+
+#define HC_INDEX_ETH_FIRST_TX_CQ_CONS HC_INDEX_ETH_TX_CQ_CONS_COS0
#define BNX2X_RX_SB_INDEX \
(&fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS])
@@ -1100,11 +1159,12 @@ struct bnx2x {
#define BP_PORT(bp) (bp->pfid & 1)
#define BP_FUNC(bp) (bp->pfid)
#define BP_ABS_FUNC(bp) (bp->pf_num)
-#define BP_E1HVN(bp) (bp->pfid >> 1)
-#define BP_VN(bp) (BP_E1HVN(bp)) /*remove when approved*/
-#define BP_L_ID(bp) (BP_E1HVN(bp) << 2)
-#define BP_FW_MB_IDX(bp) (BP_PORT(bp) +\
- BP_VN(bp) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2 : 1))
+#define BP_VN(bp) ((bp)->pfid >> 1)
+#define BP_MAX_VN_NUM(bp) (CHIP_MODE_IS_4_PORT(bp) ? 2 : 4)
+#define BP_L_ID(bp) (BP_VN(bp) << 2)
+#define BP_FW_MB_IDX_VN(bp, vn) (BP_PORT(bp) +\
+ (vn) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2 : 1))
+#define BP_FW_MB_IDX(bp) BP_FW_MB_IDX_VN(bp, BP_VN(bp))
struct net_device *dev;
struct pci_dev *pdev;
@@ -1767,7 +1827,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define MAX_DMAE_C_PER_PORT 8
#define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
- BP_E1HVN(bp))
+ BP_VN(bp))
#define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
E1HVN_MAX)
@@ -1793,7 +1853,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
/* must be used on a CID before placing it on a HW ring */
#define HW_CID(bp, x) ((BP_PORT(bp) << 23) | \
- (BP_E1HVN(bp) << BNX2X_SWCID_SHIFT) | \
+ (BP_VN(bp) << BNX2X_SWCID_SHIFT) | \
(x))
#define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe))
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index 37e5790681ad..c4cbf9736414 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -987,8 +987,6 @@ void __bnx2x_link_report(struct bnx2x *bp)
void bnx2x_init_rx_rings(struct bnx2x *bp)
{
int func = BP_FUNC(bp);
- int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
- ETH_MAX_AGGREGATION_QUEUES_E1H_E2;
u16 ring_prod;
int i, j;
@@ -1001,7 +999,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
if (!fp->disable_tpa) {
/* Fill the per-aggregtion pool */
- for (i = 0; i < max_agg_queues; i++) {
+ for (i = 0; i < MAX_AGG_QS(bp); i++) {
struct bnx2x_agg_info *tpa_info =
&fp->tpa_info[i];
struct sw_rx_bd *first_buf =
@@ -1041,7 +1039,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
bnx2x_free_rx_sge_range(bp, fp,
ring_prod);
bnx2x_free_tpa_pool(bp, fp,
- max_agg_queues);
+ MAX_AGG_QS(bp));
fp->disable_tpa = 1;
ring_prod = 0;
break;
@@ -1137,9 +1135,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
bnx2x_free_rx_bds(fp);
if (!fp->disable_tpa)
- bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
- ETH_MAX_AGGREGATION_QUEUES_E1 :
- ETH_MAX_AGGREGATION_QUEUES_E1H_E2);
+ bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
}
}
@@ -3095,15 +3091,20 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
struct bnx2x_fastpath *fp = &bp->fp[index];
int ring_size = 0;
u8 cos;
+ int rx_ring_size = 0;
/* if rx_ring_size specified - use it */
- int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
- MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
+ if (!bp->rx_ring_size) {
- /* allocate at least number of buffers required by FW */
- rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
- MIN_RX_SIZE_TPA,
- rx_ring_size);
+ rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
+
+ /* allocate at least number of buffers required by FW */
+ rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
+ MIN_RX_SIZE_TPA, rx_ring_size);
+
+ bp->rx_ring_size = rx_ring_size;
+ } else
+ rx_ring_size = bp->rx_ring_size;
/* Common */
sb = &bnx2x_fp(bp, index, status_blk);
diff --git a/drivers/net/bnx2x/bnx2x_dcb.c b/drivers/net/bnx2x/bnx2x_dcb.c
index a1e004a82f7a..0b4acf67e0c6 100644
--- a/drivers/net/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/bnx2x/bnx2x_dcb.c
@@ -2120,6 +2120,7 @@ static u8 bnx2x_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap)
break;
case DCB_CAP_ATTR_DCBX:
*cap = BNX2X_DCBX_CAPS;
+ break;
default:
rval = -EINVAL;
break;
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
index 221863059dae..cf3e47914dd7 100644
--- a/drivers/net/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
@@ -363,13 +363,50 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
}
/* advertise the requested speed and duplex if supported */
- cmd->advertising &= bp->port.supported[cfg_idx];
+ if (cmd->advertising & ~(bp->port.supported[cfg_idx])) {
+ DP(NETIF_MSG_LINK, "Advertisement parameters "
+ "are not supported\n");
+ return -EINVAL;
+ }
bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG;
- bp->link_params.req_duplex[cfg_idx] = DUPLEX_FULL;
- bp->port.advertising[cfg_idx] |= (ADVERTISED_Autoneg |
+ bp->link_params.req_duplex[cfg_idx] = cmd->duplex;
+ bp->port.advertising[cfg_idx] = (ADVERTISED_Autoneg |
cmd->advertising);
+ if (cmd->advertising) {
+
+ bp->link_params.speed_cap_mask[cfg_idx] = 0;
+ if (cmd->advertising & ADVERTISED_10baseT_Half) {
+ bp->link_params.speed_cap_mask[cfg_idx] |=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF;
+ }
+ if (cmd->advertising & ADVERTISED_10baseT_Full)
+ bp->link_params.speed_cap_mask[cfg_idx] |=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL;
+ if (cmd->advertising & ADVERTISED_100baseT_Full)
+ bp->link_params.speed_cap_mask[cfg_idx] |=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL;
+
+ if (cmd->advertising & ADVERTISED_100baseT_Half) {
+ bp->link_params.speed_cap_mask[cfg_idx] |=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF;
+ }
+ if (cmd->advertising & ADVERTISED_1000baseT_Half) {
+ bp->link_params.speed_cap_mask[cfg_idx] |=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G;
+ }
+ if (cmd->advertising & (ADVERTISED_1000baseT_Full |
+ ADVERTISED_1000baseKX_Full))
+ bp->link_params.speed_cap_mask[cfg_idx] |=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G;
+
+ if (cmd->advertising & (ADVERTISED_10000baseT_Full |
+ ADVERTISED_10000baseKX4_Full |
+ ADVERTISED_10000baseKR_Full))
+ bp->link_params.speed_cap_mask[cfg_idx] |=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G;
+ }
} else { /* forced speed */
/* advertise the requested speed and duplex if supported */
switch (speed) {
@@ -1310,10 +1347,7 @@ static void bnx2x_get_ringparam(struct net_device *dev,
if (bp->rx_ring_size)
ering->rx_pending = bp->rx_ring_size;
else
- if (bp->state == BNX2X_STATE_OPEN && bp->num_queues)
- ering->rx_pending = MAX_RX_AVAIL/bp->num_queues;
- else
- ering->rx_pending = MAX_RX_AVAIL;
+ ering->rx_pending = MAX_RX_AVAIL;
ering->rx_mini_pending = 0;
ering->rx_jumbo_pending = 0;
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c
index d45b1555a602..ba15bdc5a1a9 100644
--- a/drivers/net/bnx2x/bnx2x_link.c
+++ b/drivers/net/bnx2x/bnx2x_link.c
@@ -778,9 +778,9 @@ static int bnx2x_ets_e3b0_set_cos_bw(struct bnx2x *bp,
{
u32 nig_reg_adress_crd_weight = 0;
u32 pbf_reg_adress_crd_weight = 0;
- /* Calculate and set BW for this COS*/
- const u32 cos_bw_nig = (bw * min_w_val_nig) / total_bw;
- const u32 cos_bw_pbf = (bw * min_w_val_pbf) / total_bw;
+ /* Calculate and set BW for this COS - use 1 instead of 0 for BW */
+ const u32 cos_bw_nig = ((bw ? bw : 1) * min_w_val_nig) / total_bw;
+ const u32 cos_bw_pbf = ((bw ? bw : 1) * min_w_val_pbf) / total_bw;
switch (cos_entry) {
case 0:
@@ -852,18 +852,12 @@ static int bnx2x_ets_e3b0_get_total_bw(
/* Calculate total BW requested */
for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) {
if (bnx2x_cos_state_bw == ets_params->cos[cos_idx].state) {
-
- if (0 == ets_params->cos[cos_idx].params.bw_params.bw) {
- DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config BW"
- "was set to 0\n");
- return -EINVAL;
+ *total_bw +=
+ ets_params->cos[cos_idx].params.bw_params.bw;
}
- *total_bw +=
- ets_params->cos[cos_idx].params.bw_params.bw;
- }
}
- /*Check taotl BW is valid */
+ /* Check total BW is valid */
if ((100 != *total_bw) || (0 == *total_bw)) {
if (0 == *total_bw) {
DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config toatl BW"
@@ -1726,7 +1720,7 @@ static int bnx2x_xmac_enable(struct link_params *params,
/* Check loopback mode */
if (lb)
- val |= XMAC_CTRL_REG_CORE_LOCAL_LPBK;
+ val |= XMAC_CTRL_REG_LINE_LOCAL_LPBK;
REG_WR(bp, xmac_base + XMAC_REG_CTRL, val);
bnx2x_set_xumac_nig(params,
((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1);
@@ -3630,6 +3624,12 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, val16);
+ /* Advertised and set FEC (Forward Error Correction) */
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+ MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2,
+ (MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY |
+ MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ));
+
/* Enable CL37 BAM */
if (REG_RD(bp, params->shmem_base +
offsetof(struct shmem_region, dev_info.
@@ -5924,7 +5924,7 @@ int bnx2x_set_led(struct link_params *params,
(tmp | EMAC_LED_OVERRIDE));
/*
* return here without enabling traffic
- * LED blink andsetting rate in ON mode.
+ * LED blink and setting rate in ON mode.
* In oper mode, enabling LED blink
* and setting rate is needed.
*/
@@ -5936,7 +5936,11 @@ int bnx2x_set_led(struct link_params *params,
* This is a work-around for HW issue found when link
* is up in CL73
*/
- REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
+ if ((!CHIP_IS_E3(bp)) ||
+ (CHIP_IS_E3(bp) &&
+ mode == LED_MODE_ON))
+ REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
+
if (CHIP_IS_E1x(bp) ||
CHIP_IS_E2(bp) ||
(mode == LED_MODE_ON))
@@ -10638,8 +10642,7 @@ static struct bnx2x_phy phy_warpcore = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
.addr = 0xff,
.def_md_devad = 0,
- .flags = (FLAGS_HW_LOCK_REQUIRED |
- FLAGS_TX_ERROR_CHECK),
+ .flags = FLAGS_HW_LOCK_REQUIRED,
.rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.mdio_ctrl = 0,
@@ -10765,8 +10768,7 @@ static struct bnx2x_phy phy_8706 = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706,
.addr = 0xff,
.def_md_devad = 0,
- .flags = (FLAGS_INIT_XGXS_FIRST |
- FLAGS_TX_ERROR_CHECK),
+ .flags = FLAGS_INIT_XGXS_FIRST,
.rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.mdio_ctrl = 0,
@@ -10797,8 +10799,7 @@ static struct bnx2x_phy phy_8726 = {
.addr = 0xff,
.def_md_devad = 0,
.flags = (FLAGS_HW_LOCK_REQUIRED |
- FLAGS_INIT_XGXS_FIRST |
- FLAGS_TX_ERROR_CHECK),
+ FLAGS_INIT_XGXS_FIRST),
.rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.mdio_ctrl = 0,
@@ -10829,8 +10830,7 @@ static struct bnx2x_phy phy_8727 = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
.addr = 0xff,
.def_md_devad = 0,
- .flags = (FLAGS_FAN_FAILURE_DET_REQ |
- FLAGS_TX_ERROR_CHECK),
+ .flags = FLAGS_FAN_FAILURE_DET_REQ,
.rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.mdio_ctrl = 0,
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index f74582a22c68..15f800085bb2 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -407,8 +407,8 @@ u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
- opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) |
- (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
+ opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) |
+ (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
#ifdef __BIG_ENDIAN
@@ -1419,7 +1419,7 @@ static void bnx2x_hc_int_enable(struct bnx2x *bp)
if (!CHIP_IS_E1(bp)) {
/* init leading/trailing edge */
if (IS_MF(bp)) {
- val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
+ val = (0xee0f | (1 << (BP_VN(bp) + 4)));
if (bp->port.pmf)
/* enable nig and gpio3 attention */
val |= 0x1100;
@@ -1471,7 +1471,7 @@ static void bnx2x_igu_int_enable(struct bnx2x *bp)
/* init leading/trailing edge */
if (IS_MF(bp)) {
- val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
+ val = (0xee0f | (1 << (BP_VN(bp) + 4)));
if (bp->port.pmf)
/* enable nig and gpio3 attention */
val |= 0x1100;
@@ -2287,7 +2287,7 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
int vn;
bp->vn_weight_sum = 0;
- for (vn = VN_0; vn < E1HVN_MAX; vn++) {
+ for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
u32 vn_cfg = bp->mf_config[vn];
u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
@@ -2320,12 +2320,18 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
}
+/* returns func by VN for current port */
+static inline int func_by_vn(struct bnx2x *bp, int vn)
+{
+ return 2 * vn + BP_PORT(bp);
+}
+
static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
{
struct rate_shaping_vars_per_vn m_rs_vn;
struct fairness_vars_per_vn m_fair_vn;
u32 vn_cfg = bp->mf_config[vn];
- int func = 2*vn + BP_PORT(bp);
+ int func = func_by_vn(bp, vn);
u16 vn_min_rate, vn_max_rate;
int i;
@@ -2422,7 +2428,7 @@ void bnx2x_read_mf_cfg(struct bnx2x *bp)
*
* and there are 2 functions per port
*/
- for (vn = VN_0; vn < E1HVN_MAX; vn++) {
+ for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
if (func >= E1H_FUNC_MAX)
@@ -2454,7 +2460,7 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
/* calculate and set min-max rate for each vn */
if (bp->port.pmf)
- for (vn = VN_0; vn < E1HVN_MAX; vn++)
+ for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++)
bnx2x_init_vn_minmax(bp, vn);
/* always enable rate shaping and fairness */
@@ -2473,16 +2479,15 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
{
- int port = BP_PORT(bp);
int func;
int vn;
/* Set the attention towards other drivers on the same port */
- for (vn = VN_0; vn < E1HVN_MAX; vn++) {
- if (vn == BP_E1HVN(bp))
+ for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
+ if (vn == BP_VN(bp))
continue;
- func = ((vn << 1) | port);
+ func = func_by_vn(bp, vn);
REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
(LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
}
@@ -2577,7 +2582,7 @@ static void bnx2x_pmf_update(struct bnx2x *bp)
bnx2x_dcbx_pmf_update(bp);
/* enable nig attention */
- val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
+ val = (0xff0f | (1 << (BP_VN(bp) + 4)));
if (bp->common.int_block == INT_BLOCK_HC) {
REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
@@ -2756,8 +2761,14 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
u16 tpa_agg_size = 0;
if (!fp->disable_tpa) {
- pause->sge_th_hi = 250;
- pause->sge_th_lo = 150;
+ pause->sge_th_lo = SGE_TH_LO(bp);
+ pause->sge_th_hi = SGE_TH_HI(bp);
+
+ /* validate SGE ring has enough to cross high threshold */
+ WARN_ON(bp->dropless_fc &&
+ pause->sge_th_hi + FW_PREFETCH_CNT >
+ MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES);
+
tpa_agg_size = min_t(u32,
(min_t(u32, 8, MAX_SKB_FRAGS) *
SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
@@ -2771,10 +2782,21 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
/* pause - not for e1 */
if (!CHIP_IS_E1(bp)) {
- pause->bd_th_hi = 350;
- pause->bd_th_lo = 250;
- pause->rcq_th_hi = 350;
- pause->rcq_th_lo = 250;
+ pause->bd_th_lo = BD_TH_LO(bp);
+ pause->bd_th_hi = BD_TH_HI(bp);
+
+ pause->rcq_th_lo = RCQ_TH_LO(bp);
+ pause->rcq_th_hi = RCQ_TH_HI(bp);
+ /*
+ * validate that rings have enough entries to cross
+ * high thresholds
+ */
+ WARN_ON(bp->dropless_fc &&
+ pause->bd_th_hi + FW_PREFETCH_CNT >
+ bp->rx_ring_size);
+ WARN_ON(bp->dropless_fc &&
+ pause->rcq_th_hi + FW_PREFETCH_CNT >
+ NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT);
pause->pri_map = 1;
}
@@ -2802,9 +2824,7 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
* For PF Clients it should be the maximum avaliable number.
* VF driver(s) may want to define it to a smaller value.
*/
- rxq_init->max_tpa_queues =
- (CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
- ETH_MAX_AGGREGATION_QUEUES_E1H_E2);
+ rxq_init->max_tpa_queues = MAX_AGG_QS(bp);
rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
rxq_init->fw_sb_id = fp->fw_sb_id;
@@ -4808,6 +4828,37 @@ void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
hc_sm->time_to_expire = 0xFFFFFFFF;
}
+
+/* allocates state machine ids. */
+static inline
+void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
+{
+ /* zero out state machine indices */
+ /* rx indices */
+ index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
+
+ /* tx indices */
+ index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
+ index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
+ index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
+ index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
+
+ /* map indices */
+ /* rx indices */
+ index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
+ SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
+
+ /* tx indices */
+ index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
+ SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
+ index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
+ SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
+ index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
+ SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
+ index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
+ SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
+}
+
static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
u8 vf_valid, int fw_sb_id, int igu_sb_id)
{
@@ -4839,6 +4890,7 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
hc_sm_p = sb_data_e2.common.state_machine;
sb_data_p = (u32 *)&sb_data_e2;
data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
+ bnx2x_map_sb_state_machines(sb_data_e2.index_data);
} else {
memset(&sb_data_e1x, 0,
sizeof(struct hc_status_block_data_e1x));
@@ -4853,6 +4905,7 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
hc_sm_p = sb_data_e1x.common.state_machine;
sb_data_p = (u32 *)&sb_data_e1x;
data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
+ bnx2x_map_sb_state_machines(sb_data_e1x.index_data);
}
bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
@@ -4890,7 +4943,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp)
int igu_seg_id;
int port = BP_PORT(bp);
int func = BP_FUNC(bp);
- int reg_offset;
+ int reg_offset, reg_offset_en5;
u64 section;
int index;
struct hc_sp_status_block_data sp_sb_data;
@@ -4913,6 +4966,8 @@ static void bnx2x_init_def_sb(struct bnx2x *bp)
reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
+ reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
+ MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0);
for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
int sindex;
/* take care of sig[0]..sig[4] */
@@ -4927,7 +4982,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp)
* and not 16 between the different groups
*/
bp->attn_group[index].sig[4] = REG_RD(bp,
- reg_offset + 0x10 + 0x4*index);
+ reg_offset_en5 + 0x4*index);
else
bp->attn_group[index].sig[4] = 0;
}
@@ -5802,7 +5857,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
* take the UNDI lock to protect undi_unload flow from accessing
* registers while we're resetting the chip
*/
- bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
bnx2x_reset_common(bp);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
@@ -5814,7 +5869,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
}
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val);
- bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON);
@@ -6671,12 +6726,16 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
if (CHIP_MODE_IS_4_PORT(bp))
dsb_idx = BP_FUNC(bp);
else
- dsb_idx = BP_E1HVN(bp);
+ dsb_idx = BP_VN(bp);
prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
IGU_BC_BASE_DSB_PROD + dsb_idx :
IGU_NORM_BASE_DSB_PROD + dsb_idx);
+ /*
+ * igu prods come in chunks of E1HVN_MAX (4) -
+ * does not matters what is the current chip mode
+ */
for (i = 0; i < (num_segs * E1HVN_MAX);
i += E1HVN_MAX) {
addr = IGU_REG_PROD_CONS_MEMORY +
@@ -7568,9 +7627,12 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
u8 *mac_addr = bp->dev->dev_addr;
u32 val;
+ u16 pmc;
+
/* The mac address is written to entries 1-4 to
- preserve entry 0 which is used by the PMF */
- u8 entry = (BP_E1HVN(bp) + 1)*8;
+ * preserve entry 0 which is used by the PMF
+ */
+ u8 entry = (BP_VN(bp) + 1)*8;
val = (mac_addr[0] << 8) | mac_addr[1];
EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
@@ -7579,6 +7641,11 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
(mac_addr[4] << 8) | mac_addr[5];
EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
+ /* Enable the PME and clear the status */
+ pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmc);
+ pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS;
+ pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, pmc);
+
reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
} else
@@ -8546,10 +8613,12 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
/* Check if there is any driver already loaded */
val = REG_RD(bp, MISC_REG_UNPREPARED);
if (val == 0x1) {
- /* Check if it is the UNDI driver
+
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
+ /*
+ * Check if it is the UNDI driver
* UNDI driver initializes CID offset for normal bell to 0x7
*/
- bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
if (val == 0x7) {
u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
@@ -8587,9 +8656,6 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
bnx2x_fw_command(bp, reset_code, 0);
}
- /* now it's safe to release the lock */
- bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
-
bnx2x_undi_int_disable(bp);
port = BP_PORT(bp);
@@ -8639,8 +8705,10 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
bp->fw_seq =
(SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
DRV_MSG_SEQ_NUMBER_MASK);
- } else
- bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
+ }
+
+ /* now it's safe to release the lock */
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
}
}
@@ -8777,13 +8845,13 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
{
int pfid = BP_FUNC(bp);
- int vn = BP_E1HVN(bp);
int igu_sb_id;
u32 val;
u8 fid, igu_sb_cnt = 0;
bp->igu_base_sb = 0xff;
if (CHIP_INT_MODE_IS_BC(bp)) {
+ int vn = BP_VN(bp);
igu_sb_cnt = bp->igu_sb_cnt;
bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
FP_SB_MAX_E1x;
@@ -9416,6 +9484,10 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
bp->igu_base_sb = 0;
} else {
bp->common.int_block = INT_BLOCK_IGU;
+
+ /* do not allow device reset during IGU info preocessing */
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
+
val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
@@ -9447,6 +9519,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
bnx2x_get_igu_cam_info(bp);
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
}
/*
@@ -9473,7 +9546,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
bp->mf_ov = 0;
bp->mf_mode = 0;
- vn = BP_E1HVN(bp);
+ vn = BP_VN(bp);
if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n",
@@ -9593,13 +9666,6 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
/* port info */
bnx2x_get_port_hwinfo(bp);
- if (!BP_NOMCP(bp)) {
- bp->fw_seq =
- (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
- DRV_MSG_SEQ_NUMBER_MASK);
- BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
- }
-
/* Get MAC addresses */
bnx2x_get_mac_hwinfo(bp);
@@ -9765,6 +9831,14 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
if (!BP_NOMCP(bp))
bnx2x_undi_unload(bp);
+ /* init fw_seq after undi_unload! */
+ if (!BP_NOMCP(bp)) {
+ bp->fw_seq =
+ (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK);
+ BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
+ }
+
if (CHIP_REV_IS_FPGA(bp))
dev_err(&bp->pdev->dev, "FPGA detected\n");
@@ -10259,17 +10333,21 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
/* clean indirect addresses */
pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
PCICFG_VENDOR_ID_OFFSET);
- /* Clean the following indirect addresses for all functions since it
+ /*
+ * Clean the following indirect addresses for all functions since it
* is not used by the driver.
*/
REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
- REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
- REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
- REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
- REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
+
+ if (CHIP_IS_E1x(bp)) {
+ REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
+ }
/*
* Enable internal target-read (in case we are probed after PF FLR).
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h
index 40266c14e6dc..fc7bd0f23c0b 100644
--- a/drivers/net/bnx2x/bnx2x_reg.h
+++ b/drivers/net/bnx2x/bnx2x_reg.h
@@ -1384,6 +1384,18 @@
Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
#define MISC_REG_AEU_ENABLE4_PXP_0 0xa108
#define MISC_REG_AEU_ENABLE4_PXP_1 0xa1a8
+/* [RW 32] fifth 32b for enabling the output for function 0 output0. Mapped
+ * as follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC
+ * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6]
+ * mstat0 attention; [7] mstat0 parity; [8] mstat1 attention; [9] mstat1
+ * parity; [31-10] Reserved; */
+#define MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0 0xa688
+/* [RW 32] Fifth 32b for enabling the output for function 1 output0. Mapped
+ * as follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC
+ * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6]
+ * mstat0 attention; [7] mstat0 parity; [8] mstat1 attention; [9] mstat1
+ * parity; [31-10] Reserved; */
+#define MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 0xa6b0
/* [RW 1] set/clr general attention 0; this will set/clr bit 94 in the aeu
128 bit vector */
#define MISC_REG_AEU_GENERAL_ATTN_0 0xa000
@@ -5320,7 +5332,7 @@
#define XCM_REG_XX_OVFL_EVNT_ID 0x20058
#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS (0x1<<0)
#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS (0x1<<1)
-#define XMAC_CTRL_REG_CORE_LOCAL_LPBK (0x1<<3)
+#define XMAC_CTRL_REG_LINE_LOCAL_LPBK (0x1<<2)
#define XMAC_CTRL_REG_RX_EN (0x1<<1)
#define XMAC_CTRL_REG_SOFT_RESET (0x1<<6)
#define XMAC_CTRL_REG_TX_EN (0x1<<0)
@@ -5766,7 +5778,7 @@
#define HW_LOCK_RESOURCE_RECOVERY_LEADER_0 8
#define HW_LOCK_RESOURCE_RECOVERY_LEADER_1 9
#define HW_LOCK_RESOURCE_SPIO 2
-#define HW_LOCK_RESOURCE_UNDI 5
+#define HW_LOCK_RESOURCE_RESET 5
#define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4)
#define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5)
#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (0x1<<18)
@@ -6853,6 +6865,9 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_WC_REG_IEEE0BLK_AUTONEGNP 0x7
#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT0 0x10
#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1 0x11
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2 0x12
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY 0x4000
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ 0x8000
#define MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150 0x96
#define MDIO_WC_REG_XGXSBLK0_XGXSCONTROL 0x8000
#define MDIO_WC_REG_XGXSBLK0_MISCCONTROL1 0x800e
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c
index 771f6803b238..9908f2bbcf73 100644
--- a/drivers/net/bnx2x/bnx2x_stats.c
+++ b/drivers/net/bnx2x/bnx2x_stats.c
@@ -710,7 +710,8 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
break;
case MAC_TYPE_NONE: /* unreached */
- BNX2X_ERR("stats updated by DMAE but no MAC active\n");
+ DP(BNX2X_MSG_STATS,
+ "stats updated by DMAE but no MAC active\n");
return -1;
default: /* unreached */
@@ -1391,7 +1392,7 @@ static void bnx2x_port_stats_base_init(struct bnx2x *bp)
static void bnx2x_func_stats_base_init(struct bnx2x *bp)
{
- int vn, vn_max = IS_MF(bp) ? E1HVN_MAX : E1VN_MAX;
+ int vn, vn_max = IS_MF(bp) ? BP_MAX_VN_NUM(bp) : E1VN_MAX;
u32 func_stx;
/* sanity */
@@ -1404,7 +1405,7 @@ static void bnx2x_func_stats_base_init(struct bnx2x *bp)
func_stx = bp->func_stx;
for (vn = VN_0; vn < vn_max; vn++) {
- int mb_idx = CHIP_IS_E1x(bp) ? 2*vn + BP_PORT(bp) : vn;
+ int mb_idx = BP_FW_MB_IDX_VN(bp, vn);
bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param);
bnx2x_func_stats_init(bp);
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index a047eb973e3b..47b928ed08f8 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2168,7 +2168,8 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
}
re_arm:
- queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks);
+ if (!bond->kill_timers)
+ queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks);
out:
read_unlock(&bond->lock);
}
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 7f8b20a34ee3..d4fbd2e62616 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -1440,7 +1440,8 @@ void bond_alb_monitor(struct work_struct *work)
}
re_arm:
- queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks);
+ if (!bond->kill_timers)
+ queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks);
out:
read_unlock(&bond->lock);
}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 43f2ea541088..6d79b78cfc75 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -777,6 +777,9 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
read_lock(&bond->lock);
+ if (bond->kill_timers)
+ goto out;
+
/* rejoin all groups on bond device */
__bond_resend_igmp_join_requests(bond->dev);
@@ -790,9 +793,9 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
__bond_resend_igmp_join_requests(vlan_dev);
}
- if (--bond->igmp_retrans > 0)
+ if ((--bond->igmp_retrans > 0) && !bond->kill_timers)
queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5);
-
+out:
read_unlock(&bond->lock);
}
@@ -2538,7 +2541,7 @@ void bond_mii_monitor(struct work_struct *work)
}
re_arm:
- if (bond->params.miimon)
+ if (bond->params.miimon && !bond->kill_timers)
queue_delayed_work(bond->wq, &bond->mii_work,
msecs_to_jiffies(bond->params.miimon));
out:
@@ -2886,7 +2889,7 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
}
re_arm:
- if (bond->params.arp_interval)
+ if (bond->params.arp_interval && !bond->kill_timers)
queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
out:
read_unlock(&bond->lock);
@@ -3154,7 +3157,7 @@ void bond_activebackup_arp_mon(struct work_struct *work)
bond_ab_arp_probe(bond);
re_arm:
- if (bond->params.arp_interval)
+ if (bond->params.arp_interval && !bond->kill_timers)
queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
out:
read_unlock(&bond->lock);
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index a81249246ece..2adc294f512a 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -46,6 +46,7 @@
#include <linux/skbuff.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
+#include <linux/io.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index 805076c54f1b..da5a5d9b8aff 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -1146,12 +1146,14 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
if (te && te->ctx && te->client && te->client->redirect) {
update_tcb = te->client->redirect(te->ctx, old, new, e);
if (update_tcb) {
+ rcu_read_lock();
l2t_hold(L2DATA(tdev), e);
+ rcu_read_unlock();
set_l2t_ix(tdev, tid, e);
}
}
}
- l2t_release(L2DATA(tdev), e);
+ l2t_release(tdev, e);
}
/*
@@ -1264,7 +1266,7 @@ int cxgb3_offload_activate(struct adapter *adapter)
goto out_free;
err = -ENOMEM;
- L2DATA(dev) = t3_init_l2t(l2t_capacity);
+ RCU_INIT_POINTER(dev->l2opt, t3_init_l2t(l2t_capacity));
if (!L2DATA(dev))
goto out_free;
@@ -1298,16 +1300,24 @@ int cxgb3_offload_activate(struct adapter *adapter)
out_free_l2t:
t3_free_l2t(L2DATA(dev));
- L2DATA(dev) = NULL;
+ rcu_assign_pointer(dev->l2opt, NULL);
out_free:
kfree(t);
return err;
}
+static void clean_l2_data(struct rcu_head *head)
+{
+ struct l2t_data *d = container_of(head, struct l2t_data, rcu_head);
+ t3_free_l2t(d);
+}
+
+
void cxgb3_offload_deactivate(struct adapter *adapter)
{
struct t3cdev *tdev = &adapter->tdev;
struct t3c_data *t = T3C_DATA(tdev);
+ struct l2t_data *d;
remove_adapter(adapter);
if (list_empty(&adapter_list))
@@ -1315,8 +1325,11 @@ void cxgb3_offload_deactivate(struct adapter *adapter)
free_tid_maps(&t->tid_maps);
T3C_DATA(tdev) = NULL;
- t3_free_l2t(L2DATA(tdev));
- L2DATA(tdev) = NULL;
+ rcu_read_lock();
+ d = L2DATA(tdev);
+ rcu_read_unlock();
+ rcu_assign_pointer(tdev->l2opt, NULL);
+ call_rcu(&d->rcu_head, clean_l2_data);
if (t->nofail_skb)
kfree_skb(t->nofail_skb);
kfree(t);
diff --git a/drivers/net/cxgb3/l2t.c b/drivers/net/cxgb3/l2t.c
index f452c4003253..41540978a173 100644
--- a/drivers/net/cxgb3/l2t.c
+++ b/drivers/net/cxgb3/l2t.c
@@ -300,14 +300,21 @@ static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh)
struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
struct net_device *dev)
{
- struct l2t_entry *e;
- struct l2t_data *d = L2DATA(cdev);
+ struct l2t_entry *e = NULL;
+ struct l2t_data *d;
+ int hash;
u32 addr = *(u32 *) neigh->primary_key;
int ifidx = neigh->dev->ifindex;
- int hash = arp_hash(addr, ifidx, d);
struct port_info *p = netdev_priv(dev);
int smt_idx = p->port_id;
+ rcu_read_lock();
+ d = L2DATA(cdev);
+ if (!d)
+ goto done_rcu;
+
+ hash = arp_hash(addr, ifidx, d);
+
write_lock_bh(&d->lock);
for (e = d->l2tab[hash].first; e; e = e->next)
if (e->addr == addr && e->ifindex == ifidx &&
@@ -338,6 +345,8 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
}
done:
write_unlock_bh(&d->lock);
+done_rcu:
+ rcu_read_unlock();
return e;
}
diff --git a/drivers/net/cxgb3/l2t.h b/drivers/net/cxgb3/l2t.h
index 7a12d52ed4fc..c5f54796e2cb 100644
--- a/drivers/net/cxgb3/l2t.h
+++ b/drivers/net/cxgb3/l2t.h
@@ -76,6 +76,7 @@ struct l2t_data {
atomic_t nfree; /* number of free entries */
rwlock_t lock;
struct l2t_entry l2tab[0];
+ struct rcu_head rcu_head; /* to handle rcu cleanup */
};
typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
@@ -99,7 +100,7 @@ static inline void set_arp_failure_handler(struct sk_buff *skb,
/*
* Getting to the L2 data from an offload device.
*/
-#define L2DATA(dev) ((dev)->l2opt)
+#define L2DATA(cdev) (rcu_dereference((cdev)->l2opt))
#define W_TCB_L2T_IX 0
#define S_TCB_L2T_IX 7
@@ -126,15 +127,22 @@ static inline int l2t_send(struct t3cdev *dev, struct sk_buff *skb,
return t3_l2t_send_slow(dev, skb, e);
}
-static inline void l2t_release(struct l2t_data *d, struct l2t_entry *e)
+static inline void l2t_release(struct t3cdev *t, struct l2t_entry *e)
{
- if (atomic_dec_and_test(&e->refcnt))
+ struct l2t_data *d;
+
+ rcu_read_lock();
+ d = L2DATA(t);
+
+ if (atomic_dec_and_test(&e->refcnt) && d)
t3_l2e_free(d, e);
+
+ rcu_read_unlock();
}
static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e)
{
- if (atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */
+ if (d && atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */
atomic_dec(&d->nfree);
}
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index c9957b7f17b5..b4efa292fd6f 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -3712,6 +3712,9 @@ static int __devinit init_one(struct pci_dev *pdev,
setup_debugfs(adapter);
}
+ /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
+ pdev->needs_freset = 1;
+
if (is_offload(adapter))
attach_ulds(adapter);
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index 8545c7aa93eb..a5a89ecb6f36 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -4026,6 +4026,12 @@ s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw)
checksum += eeprom_data;
}
+#ifdef CONFIG_PARISC
+ /* This is a signature and not a checksum on HP c8000 */
+ if ((hw->subsystem_vendor_id == 0x103C) && (eeprom_data == 0x16d6))
+ return E1000_SUCCESS;
+
+#endif
if (checksum == (u16) EEPROM_SUM)
return E1000_SUCCESS;
else {
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c
index 25a8c2adb001..0caf3c323ec0 100644
--- a/drivers/net/gianfar_ethtool.c
+++ b/drivers/net/gianfar_ethtool.c
@@ -1669,10 +1669,10 @@ static int gfar_get_cls_all(struct gfar_private *priv,
u32 i = 0;
list_for_each_entry(comp, &priv->rx_list.list, list) {
- if (i <= cmd->rule_cnt) {
- rule_locs[i] = comp->fs.location;
- i++;
- }
+ if (i == cmd->rule_cnt)
+ return -EMSGSIZE;
+ rule_locs[i] = comp->fs.location;
+ i++;
}
cmd->data = MAX_FILER_IDX;
diff --git a/drivers/net/greth.c b/drivers/net/greth.c
index 16ce45c11934..52a39000c42c 100644
--- a/drivers/net/greth.c
+++ b/drivers/net/greth.c
@@ -428,6 +428,7 @@ greth_start_xmit(struct sk_buff *skb, struct net_device *dev)
dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE);
status = GRETH_BD_EN | GRETH_BD_IE | (skb->len & GRETH_BD_LEN);
+ greth->tx_bufs_length[greth->tx_next] = skb->len & GRETH_BD_LEN;
/* Wrap around descriptor ring */
if (greth->tx_next == GRETH_TXBD_NUM_MASK) {
@@ -490,7 +491,8 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
if (nr_frags != 0)
status = GRETH_TXBD_MORE;
- status |= GRETH_TXBD_CSALL;
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ status |= GRETH_TXBD_CSALL;
status |= skb_headlen(skb) & GRETH_BD_LEN;
if (greth->tx_next == GRETH_TXBD_NUM_MASK)
status |= GRETH_BD_WR;
@@ -513,7 +515,9 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
greth->tx_skbuff[curr_tx] = NULL;
bdp = greth->tx_bd_base + curr_tx;
- status = GRETH_TXBD_CSALL | GRETH_BD_EN;
+ status = GRETH_BD_EN;
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ status |= GRETH_TXBD_CSALL;
status |= frag->size & GRETH_BD_LEN;
/* Wrap around descriptor ring */
@@ -641,6 +645,7 @@ static void greth_clean_tx(struct net_device *dev)
dev->stats.tx_fifo_errors++;
}
dev->stats.tx_packets++;
+ dev->stats.tx_bytes += greth->tx_bufs_length[greth->tx_last];
greth->tx_last = NEXT_TX(greth->tx_last);
greth->tx_free++;
}
@@ -695,6 +700,7 @@ static void greth_clean_tx_gbit(struct net_device *dev)
greth->tx_skbuff[greth->tx_last] = NULL;
greth_update_tx_stats(dev, stat);
+ dev->stats.tx_bytes += skb->len;
bdp = greth->tx_bd_base + greth->tx_last;
@@ -796,6 +802,7 @@ static int greth_rx(struct net_device *dev, int limit)
memcpy(skb_put(skb, pkt_len), phys_to_virt(dma_addr), pkt_len);
skb->protocol = eth_type_trans(skb, dev);
+ dev->stats.rx_bytes += pkt_len;
dev->stats.rx_packets++;
netif_receive_skb(skb);
}
@@ -910,6 +917,7 @@ static int greth_rx_gbit(struct net_device *dev, int limit)
skb->protocol = eth_type_trans(skb, dev);
dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
netif_receive_skb(skb);
greth->rx_skbuff[greth->rx_cur] = newskb;
diff --git a/drivers/net/greth.h b/drivers/net/greth.h
index 9a0040dee4da..232a622a85b7 100644
--- a/drivers/net/greth.h
+++ b/drivers/net/greth.h
@@ -103,6 +103,7 @@ struct greth_private {
unsigned char *tx_bufs[GRETH_TXBD_NUM];
unsigned char *rx_bufs[GRETH_RXBD_NUM];
+ u16 tx_bufs_length[GRETH_TXBD_NUM];
u16 tx_next;
u16 tx_last;
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index 3e6679269400..d393f1e764ed 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -636,8 +636,8 @@ static int ibmveth_open(struct net_device *netdev)
netdev_err(netdev, "unable to request irq 0x%x, rc %d\n",
netdev->irq, rc);
do {
- rc = h_free_logical_lan(adapter->vdev->unit_address);
- } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
+ lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
+ } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
goto err_out;
}
@@ -757,7 +757,7 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
struct ibmveth_adapter *adapter = netdev_priv(dev);
unsigned long set_attr, clr_attr, ret_attr;
unsigned long set_attr6, clr_attr6;
- long ret, ret6;
+ long ret, ret4, ret6;
int rc1 = 0, rc2 = 0;
int restart = 0;
@@ -770,6 +770,8 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
set_attr = 0;
clr_attr = 0;
+ set_attr6 = 0;
+ clr_attr6 = 0;
if (data) {
set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
@@ -784,16 +786,20 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) &&
!(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) &&
(ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) {
- ret = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
+ ret4 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
set_attr, &ret_attr);
- if (ret != H_SUCCESS) {
+ if (ret4 != H_SUCCESS) {
netdev_err(dev, "unable to change IPv4 checksum "
"offload settings. %d rc=%ld\n",
- data, ret);
+ data, ret4);
+
+ h_illan_attributes(adapter->vdev->unit_address,
+ set_attr, clr_attr, &ret_attr);
+
+ if (data == 1)
+ dev->features &= ~NETIF_F_IP_CSUM;
- ret = h_illan_attributes(adapter->vdev->unit_address,
- set_attr, clr_attr, &ret_attr);
} else {
adapter->fw_ipv4_csum_support = data;
}
@@ -804,15 +810,18 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
if (ret6 != H_SUCCESS) {
netdev_err(dev, "unable to change IPv6 checksum "
"offload settings. %d rc=%ld\n",
- data, ret);
+ data, ret6);
+
+ h_illan_attributes(adapter->vdev->unit_address,
+ set_attr6, clr_attr6, &ret_attr);
+
+ if (data == 1)
+ dev->features &= ~NETIF_F_IPV6_CSUM;
- ret = h_illan_attributes(adapter->vdev->unit_address,
- set_attr6, clr_attr6,
- &ret_attr);
} else
adapter->fw_ipv6_csum_support = data;
- if (ret != H_SUCCESS || ret6 != H_SUCCESS)
+ if (ret4 == H_SUCCESS || ret6 == H_SUCCESS)
adapter->rx_csum = data;
else
rc1 = -EIO;
@@ -930,6 +939,7 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
union ibmveth_buf_desc descs[6];
int last, i;
int force_bounce = 0;
+ dma_addr_t dma_addr;
/*
* veth handles a maximum of 6 segments including the header, so
@@ -994,17 +1004,16 @@ retry_bounce:
}
/* Map the header */
- descs[0].fields.address = dma_map_single(&adapter->vdev->dev, skb->data,
- skb_headlen(skb),
- DMA_TO_DEVICE);
- if (dma_mapping_error(&adapter->vdev->dev, descs[0].fields.address))
+ dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
goto map_failed;
descs[0].fields.flags_len = desc_flags | skb_headlen(skb);
+ descs[0].fields.address = dma_addr;
/* Map the frags */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- unsigned long dma_addr;
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
dma_addr = dma_map_page(&adapter->vdev->dev, frag->page,
@@ -1026,7 +1035,12 @@ retry_bounce:
netdev->stats.tx_bytes += skb->len;
}
- for (i = 0; i < skb_shinfo(skb)->nr_frags + 1; i++)
+ dma_unmap_single(&adapter->vdev->dev,
+ descs[0].fields.address,
+ descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
+ DMA_TO_DEVICE);
+
+ for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
DMA_TO_DEVICE);
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 22790394318a..e1fcc9589278 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -1321,8 +1321,8 @@ static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
if (ring_is_rsc_enabled(rx_ring))
pkt_is_rsc = ixgbe_get_rsc_state(rx_desc);
- /* if this is a skb from previous receive DMA will be 0 */
- if (rx_buffer_info->dma) {
+ /* linear means we are building an skb from multiple pages */
+ if (!skb_is_nonlinear(skb)) {
u16 hlen;
if (pkt_is_rsc &&
!(staterr & IXGBE_RXD_STAT_EOP) &&
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 05172c39a0ce..376e3e94bae0 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -239,7 +239,7 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
dest = macvlan_hash_lookup(port, eth->h_dest);
if (dest && dest->mode == MACVLAN_MODE_BRIDGE) {
/* send to lowerdev first for its network taps */
- vlan->forward(vlan->lowerdev, skb);
+ dev_forward_skb(vlan->lowerdev, skb);
return NET_XMIT_SUCCESS;
}
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index dfc82720065a..ed2a3977c6e7 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -799,5 +799,11 @@ static void __exit cleanup_netconsole(void)
}
}
-module_init(init_netconsole);
+/*
+ * Use late_initcall to ensure netconsole is
+ * initialized after network device driver if built-in.
+ *
+ * late_initcall() and module_init() are identical if built as module.
+ */
+late_initcall(init_netconsole);
module_exit(cleanup_netconsole);
diff --git a/drivers/net/pch_gbe/pch_gbe.h b/drivers/net/pch_gbe/pch_gbe.h
index 59fac77d0dbb..a09a07197eb5 100644
--- a/drivers/net/pch_gbe/pch_gbe.h
+++ b/drivers/net/pch_gbe/pch_gbe.h
@@ -127,8 +127,8 @@ struct pch_gbe_regs {
/* Reset */
#define PCH_GBE_ALL_RST 0x80000000 /* All reset */
-#define PCH_GBE_TX_RST 0x40000000 /* TX MAC, TX FIFO, TX DMA reset */
-#define PCH_GBE_RX_RST 0x04000000 /* RX MAC, RX FIFO, RX DMA reset */
+#define PCH_GBE_TX_RST 0x00008000 /* TX MAC, TX FIFO, TX DMA reset */
+#define PCH_GBE_RX_RST 0x00004000 /* RX MAC, RX FIFO, RX DMA reset */
/* TCP/IP Accelerator Control */
#define PCH_GBE_EX_LIST_EN 0x00000008 /* External List Enable */
@@ -276,6 +276,9 @@ struct pch_gbe_regs {
#define PCH_GBE_RX_DMA_EN 0x00000002 /* Enables Receive DMA */
#define PCH_GBE_TX_DMA_EN 0x00000001 /* Enables Transmission DMA */
+/* RX DMA STATUS */
+#define PCH_GBE_IDLE_CHECK 0xFFFFFFFE
+
/* Wake On LAN Status */
#define PCH_GBE_WLS_BR 0x00000008 /* Broadcas Address */
#define PCH_GBE_WLS_MLT 0x00000004 /* Multicast Address */
@@ -471,6 +474,7 @@ struct pch_gbe_tx_desc {
struct pch_gbe_buffer {
struct sk_buff *skb;
dma_addr_t dma;
+ unsigned char *rx_buffer;
unsigned long time_stamp;
u16 length;
bool mapped;
@@ -511,6 +515,9 @@ struct pch_gbe_tx_ring {
struct pch_gbe_rx_ring {
struct pch_gbe_rx_desc *desc;
dma_addr_t dma;
+ unsigned char *rx_buff_pool;
+ dma_addr_t rx_buff_pool_logic;
+ unsigned int rx_buff_pool_size;
unsigned int size;
unsigned int count;
unsigned int next_to_use;
@@ -622,6 +629,7 @@ struct pch_gbe_adapter {
unsigned long rx_buffer_len;
unsigned long tx_queue_len;
bool have_msi;
+ bool rx_stop_flag;
};
extern const char pch_driver_version[];
diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c
index eac3c5ca9731..b8b4ba27b0e7 100644
--- a/drivers/net/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/pch_gbe/pch_gbe_main.c
@@ -20,7 +20,6 @@
#include "pch_gbe.h"
#include "pch_gbe_api.h"
-#include <linux/prefetch.h>
#define DRV_VERSION "1.00"
const char pch_driver_version[] = DRV_VERSION;
@@ -34,11 +33,15 @@ const char pch_driver_version[] = DRV_VERSION;
#define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */
#define PCH_GBE_COPYBREAK_DEFAULT 256
#define PCH_GBE_PCI_BAR 1
+#define PCH_GBE_RESERVE_MEMORY 0x200000 /* 2MB */
/* Macros for ML7223 */
#define PCI_VENDOR_ID_ROHM 0x10db
#define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013
+/* Macros for ML7831 */
+#define PCI_DEVICE_ID_ROHM_ML7831_GBE 0x8802
+
#define PCH_GBE_TX_WEIGHT 64
#define PCH_GBE_RX_WEIGHT 64
#define PCH_GBE_RX_BUFFER_WRITE 16
@@ -52,6 +55,7 @@ const char pch_driver_version[] = DRV_VERSION;
)
/* Ethertype field values */
+#define PCH_GBE_MAX_RX_BUFFER_SIZE 0x2880
#define PCH_GBE_MAX_JUMBO_FRAME_SIZE 10318
#define PCH_GBE_FRAME_SIZE_2048 2048
#define PCH_GBE_FRAME_SIZE_4096 4096
@@ -83,10 +87,12 @@ const char pch_driver_version[] = DRV_VERSION;
#define PCH_GBE_INT_ENABLE_MASK ( \
PCH_GBE_INT_RX_DMA_CMPLT | \
PCH_GBE_INT_RX_DSC_EMP | \
+ PCH_GBE_INT_RX_FIFO_ERR | \
PCH_GBE_INT_WOL_DET | \
PCH_GBE_INT_TX_CMPLT \
)
+#define PCH_GBE_INT_DISABLE_ALL 0
static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
@@ -138,6 +144,27 @@ static void pch_gbe_wait_clr_bit(void *reg, u32 bit)
if (!tmp)
pr_err("Error: busy bit is not cleared\n");
}
+
+/**
+ * pch_gbe_wait_clr_bit_irq - Wait to clear a bit for interrupt context
+ * @reg: Pointer of register
+ * @busy: Busy bit
+ */
+static int pch_gbe_wait_clr_bit_irq(void *reg, u32 bit)
+{
+ u32 tmp;
+ int ret = -1;
+ /* wait busy */
+ tmp = 20;
+ while ((ioread32(reg) & bit) && --tmp)
+ udelay(5);
+ if (!tmp)
+ pr_err("Error: busy bit is not cleared\n");
+ else
+ ret = 0;
+ return ret;
+}
+
/**
* pch_gbe_mac_mar_set - Set MAC address register
* @hw: Pointer to the HW structure
@@ -189,6 +216,17 @@ static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw)
return;
}
+static void pch_gbe_mac_reset_rx(struct pch_gbe_hw *hw)
+{
+ /* Read the MAC address. and store to the private data */
+ pch_gbe_mac_read_mac_addr(hw);
+ iowrite32(PCH_GBE_RX_RST, &hw->reg->RESET);
+ pch_gbe_wait_clr_bit_irq(&hw->reg->RESET, PCH_GBE_RX_RST);
+ /* Setup the MAC address */
+ pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
+ return;
+}
+
/**
* pch_gbe_mac_init_rx_addrs - Initialize receive address's
* @hw: Pointer to the HW structure
@@ -671,13 +709,8 @@ static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter)
tcpip = ioread32(&hw->reg->TCPIP_ACC);
- if (netdev->features & NETIF_F_RXCSUM) {
- tcpip &= ~PCH_GBE_RX_TCPIPACC_OFF;
- tcpip |= PCH_GBE_RX_TCPIPACC_EN;
- } else {
- tcpip |= PCH_GBE_RX_TCPIPACC_OFF;
- tcpip &= ~PCH_GBE_RX_TCPIPACC_EN;
- }
+ tcpip |= PCH_GBE_RX_TCPIPACC_OFF;
+ tcpip &= ~PCH_GBE_RX_TCPIPACC_EN;
iowrite32(tcpip, &hw->reg->TCPIP_ACC);
return;
}
@@ -717,13 +750,6 @@ static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter)
iowrite32(rdba, &hw->reg->RX_DSC_BASE);
iowrite32(rdlen, &hw->reg->RX_DSC_SIZE);
iowrite32((rdba + rdlen), &hw->reg->RX_DSC_SW_P);
-
- /* Enables Receive DMA */
- rxdma = ioread32(&hw->reg->DMA_CTRL);
- rxdma |= PCH_GBE_RX_DMA_EN;
- iowrite32(rxdma, &hw->reg->DMA_CTRL);
- /* Enables Receive */
- iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN);
}
/**
@@ -1097,6 +1123,48 @@ void pch_gbe_update_stats(struct pch_gbe_adapter *adapter)
spin_unlock_irqrestore(&adapter->stats_lock, flags);
}
+static void pch_gbe_stop_receive(struct pch_gbe_adapter *adapter)
+{
+ struct pch_gbe_hw *hw = &adapter->hw;
+ u32 rxdma;
+ u16 value;
+ int ret;
+
+ /* Disable Receive DMA */
+ rxdma = ioread32(&hw->reg->DMA_CTRL);
+ rxdma &= ~PCH_GBE_RX_DMA_EN;
+ iowrite32(rxdma, &hw->reg->DMA_CTRL);
+ /* Wait Rx DMA BUS is IDLE */
+ ret = pch_gbe_wait_clr_bit_irq(&hw->reg->RX_DMA_ST, PCH_GBE_IDLE_CHECK);
+ if (ret) {
+ /* Disable Bus master */
+ pci_read_config_word(adapter->pdev, PCI_COMMAND, &value);
+ value &= ~PCI_COMMAND_MASTER;
+ pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
+ /* Stop Receive */
+ pch_gbe_mac_reset_rx(hw);
+ /* Enable Bus master */
+ value |= PCI_COMMAND_MASTER;
+ pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
+ } else {
+ /* Stop Receive */
+ pch_gbe_mac_reset_rx(hw);
+ }
+}
+
+static void pch_gbe_start_receive(struct pch_gbe_hw *hw)
+{
+ u32 rxdma;
+
+ /* Enables Receive DMA */
+ rxdma = ioread32(&hw->reg->DMA_CTRL);
+ rxdma |= PCH_GBE_RX_DMA_EN;
+ iowrite32(rxdma, &hw->reg->DMA_CTRL);
+ /* Enables Receive */
+ iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN);
+ return;
+}
+
/**
* pch_gbe_intr - Interrupt Handler
* @irq: Interrupt number
@@ -1123,7 +1191,17 @@ static irqreturn_t pch_gbe_intr(int irq, void *data)
if (int_st & PCH_GBE_INT_RX_FRAME_ERR)
adapter->stats.intr_rx_frame_err_count++;
if (int_st & PCH_GBE_INT_RX_FIFO_ERR)
- adapter->stats.intr_rx_fifo_err_count++;
+ if (!adapter->rx_stop_flag) {
+ adapter->stats.intr_rx_fifo_err_count++;
+ pr_debug("Rx fifo over run\n");
+ adapter->rx_stop_flag = true;
+ int_en = ioread32(&hw->reg->INT_EN);
+ iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR),
+ &hw->reg->INT_EN);
+ pch_gbe_stop_receive(adapter);
+ int_st |= ioread32(&hw->reg->INT_ST);
+ int_st = int_st & ioread32(&hw->reg->INT_EN);
+ }
if (int_st & PCH_GBE_INT_RX_DMA_ERR)
adapter->stats.intr_rx_dma_err_count++;
if (int_st & PCH_GBE_INT_TX_FIFO_ERR)
@@ -1135,21 +1213,18 @@ static irqreturn_t pch_gbe_intr(int irq, void *data)
/* When Rx descriptor is empty */
if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) {
adapter->stats.intr_rx_dsc_empty_count++;
- pr_err("Rx descriptor is empty\n");
+ pr_debug("Rx descriptor is empty\n");
int_en = ioread32(&hw->reg->INT_EN);
iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN);
if (hw->mac.tx_fc_enable) {
/* Set Pause packet */
pch_gbe_mac_set_pause_packet(hw);
}
- if ((int_en & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT))
- == 0) {
- return IRQ_HANDLED;
- }
}
/* When request status is Receive interruption */
- if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT))) {
+ if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT)) ||
+ (adapter->rx_stop_flag == true)) {
if (likely(napi_schedule_prep(&adapter->napi))) {
/* Enable only Rx Descriptor empty */
atomic_inc(&adapter->irq_sem);
@@ -1185,29 +1260,23 @@ pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter,
unsigned int i;
unsigned int bufsz;
- bufsz = adapter->rx_buffer_len + PCH_GBE_DMA_ALIGN;
+ bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
i = rx_ring->next_to_use;
while ((cleaned_count--)) {
buffer_info = &rx_ring->buffer_info[i];
- skb = buffer_info->skb;
- if (skb) {
- skb_trim(skb, 0);
- } else {
- skb = netdev_alloc_skb(netdev, bufsz);
- if (unlikely(!skb)) {
- /* Better luck next round */
- adapter->stats.rx_alloc_buff_failed++;
- break;
- }
- /* 64byte align */
- skb_reserve(skb, PCH_GBE_DMA_ALIGN);
-
- buffer_info->skb = skb;
- buffer_info->length = adapter->rx_buffer_len;
+ skb = netdev_alloc_skb(netdev, bufsz);
+ if (unlikely(!skb)) {
+ /* Better luck next round */
+ adapter->stats.rx_alloc_buff_failed++;
+ break;
}
+ /* align */
+ skb_reserve(skb, NET_IP_ALIGN);
+ buffer_info->skb = skb;
+
buffer_info->dma = dma_map_single(&pdev->dev,
- skb->data,
+ buffer_info->rx_buffer,
buffer_info->length,
DMA_FROM_DEVICE);
if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
@@ -1240,6 +1309,36 @@ pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter,
return;
}
+static int
+pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter,
+ struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct pch_gbe_buffer *buffer_info;
+ unsigned int i;
+ unsigned int bufsz;
+ unsigned int size;
+
+ bufsz = adapter->rx_buffer_len;
+
+ size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY;
+ rx_ring->rx_buff_pool = dma_alloc_coherent(&pdev->dev, size,
+ &rx_ring->rx_buff_pool_logic,
+ GFP_KERNEL);
+ if (!rx_ring->rx_buff_pool) {
+ pr_err("Unable to allocate memory for the receive poll buffer\n");
+ return -ENOMEM;
+ }
+ memset(rx_ring->rx_buff_pool, 0, size);
+ rx_ring->rx_buff_pool_size = size;
+ for (i = 0; i < rx_ring->count; i++) {
+ buffer_info = &rx_ring->buffer_info[i];
+ buffer_info->rx_buffer = rx_ring->rx_buff_pool + bufsz * i;
+ buffer_info->length = bufsz;
+ }
+ return 0;
+}
+
/**
* pch_gbe_alloc_tx_buffers - Allocate transmit buffers
* @adapter: Board private structure
@@ -1285,7 +1384,7 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
struct sk_buff *skb;
unsigned int i;
unsigned int cleaned_count = 0;
- bool cleaned = false;
+ bool cleaned = true;
pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
@@ -1296,7 +1395,6 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) {
pr_debug("gbec_status:0x%04x\n", tx_desc->gbec_status);
- cleaned = true;
buffer_info = &tx_ring->buffer_info[i];
skb = buffer_info->skb;
@@ -1339,8 +1437,10 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
/* weight of a sort for tx, to avoid endless transmit cleanup */
- if (cleaned_count++ == PCH_GBE_TX_WEIGHT)
+ if (cleaned_count++ == PCH_GBE_TX_WEIGHT) {
+ cleaned = false;
break;
+ }
}
pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n",
cleaned_count);
@@ -1380,7 +1480,7 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
unsigned int i;
unsigned int cleaned_count = 0;
bool cleaned = false;
- struct sk_buff *skb, *new_skb;
+ struct sk_buff *skb;
u8 dma_status;
u16 gbec_status;
u32 tcp_ip_status;
@@ -1401,13 +1501,12 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
rx_desc->gbec_status = DSC_INIT16;
buffer_info = &rx_ring->buffer_info[i];
skb = buffer_info->skb;
+ buffer_info->skb = NULL;
/* unmap dma */
dma_unmap_single(&pdev->dev, buffer_info->dma,
buffer_info->length, DMA_FROM_DEVICE);
buffer_info->mapped = false;
- /* Prefetch the packet */
- prefetch(skb->data);
pr_debug("RxDecNo = 0x%04x Status[DMA:0x%02x GBE:0x%04x "
"TCP:0x%08x] BufInf = 0x%p\n",
@@ -1427,70 +1526,16 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
pr_err("Receive CRC Error\n");
} else {
/* get receive length */
- /* length convert[-3] */
- length = (rx_desc->rx_words_eob) - 3;
-
- /* Decide the data conversion method */
- if (!(netdev->features & NETIF_F_RXCSUM)) {
- /* [Header:14][payload] */
- if (NET_IP_ALIGN) {
- /* Because alignment differs,
- * the new_skb is newly allocated,
- * and data is copied to new_skb.*/
- new_skb = netdev_alloc_skb(netdev,
- length + NET_IP_ALIGN);
- if (!new_skb) {
- /* dorrop error */
- pr_err("New skb allocation "
- "Error\n");
- goto dorrop;
- }
- skb_reserve(new_skb, NET_IP_ALIGN);
- memcpy(new_skb->data, skb->data,
- length);
- skb = new_skb;
- } else {
- /* DMA buffer is used as SKB as it is.*/
- buffer_info->skb = NULL;
- }
- } else {
- /* [Header:14][padding:2][payload] */
- /* The length includes padding length */
- length = length - PCH_GBE_DMA_PADDING;
- if ((length < copybreak) ||
- (NET_IP_ALIGN != PCH_GBE_DMA_PADDING)) {
- /* Because alignment differs,
- * the new_skb is newly allocated,
- * and data is copied to new_skb.
- * Padding data is deleted
- * at the time of a copy.*/
- new_skb = netdev_alloc_skb(netdev,
- length + NET_IP_ALIGN);
- if (!new_skb) {
- /* dorrop error */
- pr_err("New skb allocation "
- "Error\n");
- goto dorrop;
- }
- skb_reserve(new_skb, NET_IP_ALIGN);
- memcpy(new_skb->data, skb->data,
- ETH_HLEN);
- memcpy(&new_skb->data[ETH_HLEN],
- &skb->data[ETH_HLEN +
- PCH_GBE_DMA_PADDING],
- length - ETH_HLEN);
- skb = new_skb;
- } else {
- /* Padding data is deleted
- * by moving header data.*/
- memmove(&skb->data[PCH_GBE_DMA_PADDING],
- &skb->data[0], ETH_HLEN);
- skb_reserve(skb, NET_IP_ALIGN);
- buffer_info->skb = NULL;
- }
- }
- /* The length includes FCS length */
- length = length - ETH_FCS_LEN;
+ /* length convert[-3], length includes FCS length */
+ length = (rx_desc->rx_words_eob) - 3 - ETH_FCS_LEN;
+ if (rx_desc->rx_words_eob & 0x02)
+ length = length - 4;
+ /*
+ * buffer_info->rx_buffer: [Header:14][payload]
+ * skb->data: [Reserve:2][Header:14][payload]
+ */
+ memcpy(skb->data, buffer_info->rx_buffer, length);
+
/* update status of driver */
adapter->stats.rx_bytes += length;
adapter->stats.rx_packets++;
@@ -1509,7 +1554,6 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
pr_debug("Receive skb->ip_summed: %d length: %d\n",
skb->ip_summed, length);
}
-dorrop:
/* return some buffers to hardware, one at a time is too slow */
if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) {
pch_gbe_alloc_rx_buffers(adapter, rx_ring,
@@ -1714,9 +1758,15 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter)
pr_err("Error: can't bring device up\n");
return err;
}
+ err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count);
+ if (err) {
+ pr_err("Error: can't bring device up\n");
+ return err;
+ }
pch_gbe_alloc_tx_buffers(adapter, tx_ring);
pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count);
adapter->tx_queue_len = netdev->tx_queue_len;
+ pch_gbe_start_receive(&adapter->hw);
mod_timer(&adapter->watchdog_timer, jiffies);
@@ -1734,6 +1784,7 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter)
void pch_gbe_down(struct pch_gbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
+ struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
/* signal that we're down so the interrupt handler does not
* reschedule our watchdog timer */
@@ -1752,6 +1803,12 @@ void pch_gbe_down(struct pch_gbe_adapter *adapter)
pch_gbe_reset(adapter);
pch_gbe_clean_tx_ring(adapter, adapter->tx_ring);
pch_gbe_clean_rx_ring(adapter, adapter->rx_ring);
+
+ pci_free_consistent(adapter->pdev, rx_ring->rx_buff_pool_size,
+ rx_ring->rx_buff_pool, rx_ring->rx_buff_pool_logic);
+ rx_ring->rx_buff_pool_logic = 0;
+ rx_ring->rx_buff_pool_size = 0;
+ rx_ring->rx_buff_pool = NULL;
}
/**
@@ -2004,6 +2061,8 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
{
struct pch_gbe_adapter *adapter = netdev_priv(netdev);
int max_frame;
+ unsigned long old_rx_buffer_len = adapter->rx_buffer_len;
+ int err;
max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
@@ -2018,14 +2077,24 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
else if (max_frame <= PCH_GBE_FRAME_SIZE_8192)
adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_8192;
else
- adapter->rx_buffer_len = PCH_GBE_MAX_JUMBO_FRAME_SIZE;
- netdev->mtu = new_mtu;
- adapter->hw.mac.max_frame_size = max_frame;
+ adapter->rx_buffer_len = PCH_GBE_MAX_RX_BUFFER_SIZE;
- if (netif_running(netdev))
- pch_gbe_reinit_locked(adapter);
- else
+ if (netif_running(netdev)) {
+ pch_gbe_down(adapter);
+ err = pch_gbe_up(adapter);
+ if (err) {
+ adapter->rx_buffer_len = old_rx_buffer_len;
+ pch_gbe_up(adapter);
+ return -ENOMEM;
+ } else {
+ netdev->mtu = new_mtu;
+ adapter->hw.mac.max_frame_size = max_frame;
+ }
+ } else {
pch_gbe_reset(adapter);
+ netdev->mtu = new_mtu;
+ adapter->hw.mac.max_frame_size = max_frame;
+ }
pr_debug("max_frame : %d rx_buffer_len : %d mtu : %d max_frame_size : %d\n",
max_frame, (u32) adapter->rx_buffer_len, netdev->mtu,
@@ -2099,33 +2168,39 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
{
struct pch_gbe_adapter *adapter =
container_of(napi, struct pch_gbe_adapter, napi);
- struct net_device *netdev = adapter->netdev;
int work_done = 0;
bool poll_end_flag = false;
bool cleaned = false;
+ u32 int_en;
pr_debug("budget : %d\n", budget);
- /* Keep link state information with original netdev */
- if (!netif_carrier_ok(netdev)) {
- poll_end_flag = true;
- } else {
- cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
- pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget);
+ pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget);
+ cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
- if (cleaned)
- work_done = budget;
- /* If no Tx and not enough Rx work done,
- * exit the polling mode
- */
- if ((work_done < budget) || !netif_running(netdev))
- poll_end_flag = true;
- }
+ if (!cleaned)
+ work_done = budget;
+ /* If no Tx and not enough Rx work done,
+ * exit the polling mode
+ */
+ if (work_done < budget)
+ poll_end_flag = true;
if (poll_end_flag) {
napi_complete(napi);
+ if (adapter->rx_stop_flag) {
+ adapter->rx_stop_flag = false;
+ pch_gbe_start_receive(&adapter->hw);
+ }
pch_gbe_irq_enable(adapter);
- }
+ } else
+ if (adapter->rx_stop_flag) {
+ adapter->rx_stop_flag = false;
+ pch_gbe_start_receive(&adapter->hw);
+ int_en = ioread32(&adapter->hw.reg->INT_EN);
+ iowrite32((int_en | PCH_GBE_INT_RX_FIFO_ERR),
+ &adapter->hw.reg->INT_EN);
+ }
pr_debug("poll_end_flag : %d work_done : %d budget : %d\n",
poll_end_flag, work_done, budget);
@@ -2452,6 +2527,13 @@ static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = {
.class = (PCI_CLASS_NETWORK_ETHERNET << 8),
.class_mask = (0xFFFF00)
},
+ {.vendor = PCI_VENDOR_ID_ROHM,
+ .device = PCI_DEVICE_ID_ROHM_ML7831_GBE,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
+ .class_mask = (0xFFFF00)
+ },
/* required last entry */
{0}
};
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index cb6e0b486b1e..edd7304773eb 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -589,7 +589,7 @@ static void decode_rxts(struct dp83640_private *dp83640,
prune_rx_ts(dp83640);
if (list_empty(&dp83640->rxpool)) {
- pr_warning("dp83640: rx timestamp pool is empty\n");
+ pr_debug("dp83640: rx timestamp pool is empty\n");
goto out;
}
rxts = list_first_entry(&dp83640->rxpool, struct rxts, list);
@@ -612,7 +612,7 @@ static void decode_txts(struct dp83640_private *dp83640,
skb = skb_dequeue(&dp83640->tx_queue);
if (!skb) {
- pr_warning("dp83640: have timestamp but tx_queue empty\n");
+ pr_debug("dp83640: have timestamp but tx_queue empty\n");
return;
}
ns = phy2txts(phy_txts);
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 10e5d985afa3..edfa15d2e795 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -1465,7 +1465,12 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
continue;
}
- mtu = pch->chan->mtu - hdrlen;
+ /*
+ * hdrlen includes the 2-byte PPP protocol field, but the
+ * MTU counts only the payload excluding the protocol field.
+ * (RFC1661 Section 2)
+ */
+ mtu = pch->chan->mtu - (hdrlen - 2);
if (mtu < 4)
mtu = 4;
if (flen > mtu)
diff --git a/drivers/net/pxa168_eth.c b/drivers/net/pxa168_eth.c
index 1a3033d8e7ed..d17d0624c5e6 100644
--- a/drivers/net/pxa168_eth.c
+++ b/drivers/net/pxa168_eth.c
@@ -40,6 +40,7 @@
#include <linux/clk.h>
#include <linux/phy.h>
#include <linux/io.h>
+#include <linux/interrupt.h>
#include <linux/types.h>
#include <asm/pgtable.h>
#include <asm/system.h>
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 02339b3352e7..c23667017922 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -407,6 +407,7 @@ enum rtl_register_content {
RxOK = 0x0001,
/* RxStatusDesc */
+ RxBOVF = (1 << 24),
RxFOVF = (1 << 23),
RxRWT = (1 << 22),
RxRES = (1 << 21),
@@ -682,6 +683,7 @@ struct rtl8169_private {
struct mii_if_info mii;
struct rtl8169_counters counters;
u32 saved_wolopts;
+ u32 opts1_mask;
struct rtl_fw {
const struct firmware *fw;
@@ -710,6 +712,7 @@ MODULE_FIRMWARE(FIRMWARE_8168D_1);
MODULE_FIRMWARE(FIRMWARE_8168D_2);
MODULE_FIRMWARE(FIRMWARE_8168E_1);
MODULE_FIRMWARE(FIRMWARE_8168E_2);
+MODULE_FIRMWARE(FIRMWARE_8168E_3);
MODULE_FIRMWARE(FIRMWARE_8105E_1);
static int rtl8169_open(struct net_device *dev);
@@ -3077,6 +3080,14 @@ static void rtl8169_phy_reset(struct net_device *dev,
netif_err(tp, link, dev, "PHY reset failed\n");
}
+static bool rtl_tbi_enabled(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ return (tp->mac_version == RTL_GIGA_MAC_VER_01) &&
+ (RTL_R8(PHYstatus) & TBI_Enable);
+}
+
static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
@@ -3109,7 +3120,7 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
ADVERTISED_1000baseT_Half |
ADVERTISED_1000baseT_Full : 0));
- if (RTL_R8(PHYstatus) & TBI_Enable)
+ if (rtl_tbi_enabled(tp))
netif_info(tp, link, dev, "TBI auto-negotiating\n");
}
@@ -3319,9 +3330,16 @@ static void r810x_phy_power_up(struct rtl8169_private *tp)
static void r810x_pll_power_down(struct rtl8169_private *tp)
{
+ void __iomem *ioaddr = tp->mmio_addr;
+
if (__rtl8169_get_wol(tp) & WAKE_ANY) {
rtl_writephy(tp, 0x1f, 0x0000);
rtl_writephy(tp, MII_BMCR, 0x0000);
+
+ if (tp->mac_version == RTL_GIGA_MAC_VER_29 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_30)
+ RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast |
+ AcceptMulticast | AcceptMyPhys);
return;
}
@@ -3417,7 +3435,8 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
rtl_writephy(tp, MII_BMCR, 0x0000);
if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
- tp->mac_version == RTL_GIGA_MAC_VER_33)
+ tp->mac_version == RTL_GIGA_MAC_VER_33 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_34)
RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast |
AcceptMulticast | AcceptMyPhys);
return;
@@ -3727,8 +3746,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->features |= rtl_try_msi(pdev, ioaddr, cfg);
RTL_W8(Cfg9346, Cfg9346_Lock);
- if ((tp->mac_version <= RTL_GIGA_MAC_VER_06) &&
- (RTL_R8(PHYstatus) & TBI_Enable)) {
+ if (rtl_tbi_enabled(tp)) {
tp->set_speed = rtl8169_set_speed_tbi;
tp->get_settings = rtl8169_gset_tbi;
tp->phy_reset_enable = rtl8169_tbi_reset_enable;
@@ -3777,6 +3795,9 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->intr_event = cfg->intr_event;
tp->napi_event = cfg->napi_event;
+ tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ?
+ ~(RxBOVF | RxFOVF) : ~0;
+
init_timer(&tp->timer);
tp->timer.data = (unsigned long) dev;
tp->timer.function = rtl8169_phy_timer;
@@ -3988,6 +4009,7 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
while (RTL_R8(TxPoll) & NPQ)
udelay(20);
} else if (tp->mac_version == RTL_GIGA_MAC_VER_34) {
+ RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
while (!(RTL_R32(TxConfig) & TXCFG_EMPTY))
udelay(100);
} else {
@@ -5314,7 +5336,7 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
u32 status;
rmb();
- status = le32_to_cpu(desc->opts1);
+ status = le32_to_cpu(desc->opts1) & tp->opts1_mask;
if (status & DescOwn)
break;
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index faca764aa21b..b59abc706d93 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -1050,7 +1050,6 @@ static int efx_init_io(struct efx_nic *efx)
{
struct pci_dev *pci_dev = efx->pci_dev;
dma_addr_t dma_mask = efx->type->max_dma_mask;
- bool use_wc;
int rc;
netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
@@ -1101,21 +1100,8 @@ static int efx_init_io(struct efx_nic *efx)
rc = -EIO;
goto fail3;
}
-
- /* bug22643: If SR-IOV is enabled then tx push over a write combined
- * mapping is unsafe. We need to disable write combining in this case.
- * MSI is unsupported when SR-IOV is enabled, and the firmware will
- * have removed the MSI capability. So write combining is safe if
- * there is an MSI capability.
- */
- use_wc = (!EFX_WORKAROUND_22643(efx) ||
- pci_find_capability(pci_dev, PCI_CAP_ID_MSI));
- if (use_wc)
- efx->membase = ioremap_wc(efx->membase_phys,
- efx->type->mem_map_size);
- else
- efx->membase = ioremap_nocache(efx->membase_phys,
- efx->type->mem_map_size);
+ efx->membase = ioremap_nocache(efx->membase_phys,
+ efx->type->mem_map_size);
if (!efx->membase) {
netif_err(efx, probe, efx->net_dev,
"could not map memory BAR at %llx+%x\n",
diff --git a/drivers/net/sfc/io.h b/drivers/net/sfc/io.h
index cc978803d484..751d1ec112cc 100644
--- a/drivers/net/sfc/io.h
+++ b/drivers/net/sfc/io.h
@@ -103,7 +103,6 @@ static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value,
_efx_writed(efx, value->u32[2], reg + 8);
_efx_writed(efx, value->u32[3], reg + 12);
#endif
- wmb();
mmiowb();
spin_unlock_irqrestore(&efx->biu_lock, flags);
}
@@ -126,7 +125,6 @@ static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
__raw_writel((__force u32)value->u32[0], membase + addr);
__raw_writel((__force u32)value->u32[1], membase + addr + 4);
#endif
- wmb();
mmiowb();
spin_unlock_irqrestore(&efx->biu_lock, flags);
}
@@ -141,7 +139,6 @@ static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value,
/* No lock required */
_efx_writed(efx, value->u32[0], reg);
- wmb();
}
/* Read a 128-bit CSR, locking as appropriate. */
@@ -152,7 +149,6 @@ static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
spin_lock_irqsave(&efx->biu_lock, flags);
value->u32[0] = _efx_readd(efx, reg + 0);
- rmb();
value->u32[1] = _efx_readd(efx, reg + 4);
value->u32[2] = _efx_readd(efx, reg + 8);
value->u32[3] = _efx_readd(efx, reg + 12);
@@ -175,7 +171,6 @@ static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
value->u64[0] = (__force __le64)__raw_readq(membase + addr);
#else
value->u32[0] = (__force __le32)__raw_readl(membase + addr);
- rmb();
value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4);
#endif
spin_unlock_irqrestore(&efx->biu_lock, flags);
@@ -249,7 +244,6 @@ static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
_efx_writed(efx, value->u32[2], reg + 8);
_efx_writed(efx, value->u32[3], reg + 12);
#endif
- wmb();
}
#define efx_writeo_page(efx, value, reg, page) \
_efx_writeo_page(efx, value, \
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c
index 3dd45ed61f0a..81a425397468 100644
--- a/drivers/net/sfc/mcdi.c
+++ b/drivers/net/sfc/mcdi.c
@@ -50,20 +50,6 @@ static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
return &nic_data->mcdi;
}
-static inline void
-efx_mcdi_readd(struct efx_nic *efx, efx_dword_t *value, unsigned reg)
-{
- struct siena_nic_data *nic_data = efx->nic_data;
- value->u32[0] = (__force __le32)__raw_readl(nic_data->mcdi_smem + reg);
-}
-
-static inline void
-efx_mcdi_writed(struct efx_nic *efx, const efx_dword_t *value, unsigned reg)
-{
- struct siena_nic_data *nic_data = efx->nic_data;
- __raw_writel((__force u32)value->u32[0], nic_data->mcdi_smem + reg);
-}
-
void efx_mcdi_init(struct efx_nic *efx)
{
struct efx_mcdi_iface *mcdi;
@@ -84,8 +70,8 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
const u8 *inbuf, size_t inlen)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
- unsigned pdu = MCDI_PDU(efx);
- unsigned doorbell = MCDI_DOORBELL(efx);
+ unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
+ unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx);
unsigned int i;
efx_dword_t hdr;
u32 xflags, seqno;
@@ -106,28 +92,29 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
MCDI_HEADER_SEQ, seqno,
MCDI_HEADER_XFLAGS, xflags);
- efx_mcdi_writed(efx, &hdr, pdu);
+ efx_writed(efx, &hdr, pdu);
for (i = 0; i < inlen; i += 4)
- efx_mcdi_writed(efx, (const efx_dword_t *)(inbuf + i),
- pdu + 4 + i);
+ _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i);
+
+ /* Ensure the payload is written out before the header */
+ wmb();
/* ring the doorbell with a distinctive value */
- EFX_POPULATE_DWORD_1(hdr, EFX_DWORD_0, 0x45789abc);
- efx_mcdi_writed(efx, &hdr, doorbell);
+ _efx_writed(efx, (__force __le32) 0x45789abc, doorbell);
}
static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
- unsigned int pdu = MCDI_PDU(efx);
+ unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
int i;
BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
BUG_ON(outlen & 3 || outlen >= 0x100);
for (i = 0; i < outlen; i += 4)
- efx_mcdi_readd(efx, (efx_dword_t *)(outbuf + i), pdu + 4 + i);
+ *((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i);
}
static int efx_mcdi_poll(struct efx_nic *efx)
@@ -135,7 +122,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
unsigned int time, finish;
unsigned int respseq, respcmd, error;
- unsigned int pdu = MCDI_PDU(efx);
+ unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
unsigned int rc, spins;
efx_dword_t reg;
@@ -161,7 +148,8 @@ static int efx_mcdi_poll(struct efx_nic *efx)
time = get_seconds();
- efx_mcdi_readd(efx, &reg, pdu);
+ rmb();
+ efx_readd(efx, &reg, pdu);
/* All 1's indicates that shared memory is in reset (and is
* not a valid header). Wait for it to come out reset before
@@ -188,7 +176,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
respseq, mcdi->seqno);
rc = EIO;
} else if (error) {
- efx_mcdi_readd(efx, &reg, pdu + 4);
+ efx_readd(efx, &reg, pdu + 4);
switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) {
#define TRANSLATE_ERROR(name) \
case MC_CMD_ERR_ ## name: \
@@ -222,21 +210,21 @@ out:
/* Test and clear MC-rebooted flag for this port/function */
int efx_mcdi_poll_reboot(struct efx_nic *efx)
{
- unsigned int addr = MCDI_REBOOT_FLAG(efx);
+ unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_REBOOT_FLAG(efx);
efx_dword_t reg;
uint32_t value;
if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
return false;
- efx_mcdi_readd(efx, &reg, addr);
+ efx_readd(efx, &reg, addr);
value = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
if (value == 0)
return 0;
EFX_ZERO_DWORD(reg);
- efx_mcdi_writed(efx, &reg, addr);
+ efx_writed(efx, &reg, addr);
if (value == MC_STATUS_DWORD_ASSERT)
return -EINTR;
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index bafa23a6874c..3edfbaf5f022 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -1936,13 +1936,6 @@ void efx_nic_get_regs(struct efx_nic *efx, void *buf)
size = min_t(size_t, table->step, 16);
- if (table->offset >= efx->type->mem_map_size) {
- /* No longer mapped; return dummy data */
- memcpy(buf, "\xde\xc0\xad\xde", 4);
- buf += table->rows * size;
- continue;
- }
-
for (i = 0; i < table->rows; i++) {
switch (table->step) {
case 4: /* 32-bit register or SRAM */
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h
index 4bd1f2839dfe..7443f99c977f 100644
--- a/drivers/net/sfc/nic.h
+++ b/drivers/net/sfc/nic.h
@@ -143,12 +143,10 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx)
/**
* struct siena_nic_data - Siena NIC state
* @mcdi: Management-Controller-to-Driver Interface
- * @mcdi_smem: MCDI shared memory mapping. The mapping is always uncacheable.
* @wol_filter_id: Wake-on-LAN packet filter id
*/
struct siena_nic_data {
struct efx_mcdi_iface mcdi;
- void __iomem *mcdi_smem;
int wol_filter_id;
};
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
index 5735e84c69de..2c3bd93fab54 100644
--- a/drivers/net/sfc/siena.c
+++ b/drivers/net/sfc/siena.c
@@ -250,26 +250,12 @@ static int siena_probe_nic(struct efx_nic *efx)
efx_reado(efx, &reg, FR_AZ_CS_DEBUG);
efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1;
- /* Initialise MCDI */
- nic_data->mcdi_smem = ioremap_nocache(efx->membase_phys +
- FR_CZ_MC_TREG_SMEM,
- FR_CZ_MC_TREG_SMEM_STEP *
- FR_CZ_MC_TREG_SMEM_ROWS);
- if (!nic_data->mcdi_smem) {
- netif_err(efx, probe, efx->net_dev,
- "could not map MCDI at %llx+%x\n",
- (unsigned long long)efx->membase_phys +
- FR_CZ_MC_TREG_SMEM,
- FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS);
- rc = -ENOMEM;
- goto fail1;
- }
efx_mcdi_init(efx);
/* Recover from a failed assertion before probing */
rc = efx_mcdi_handle_assertion(efx);
if (rc)
- goto fail2;
+ goto fail1;
/* Let the BMC know that the driver is now in charge of link and
* filter settings. We must do this before we reset the NIC */
@@ -324,7 +310,6 @@ fail4:
fail3:
efx_mcdi_drv_attach(efx, false, NULL);
fail2:
- iounmap(nic_data->mcdi_smem);
fail1:
kfree(efx->nic_data);
return rc;
@@ -404,8 +389,6 @@ static int siena_init_nic(struct efx_nic *efx)
static void siena_remove_nic(struct efx_nic *efx)
{
- struct siena_nic_data *nic_data = efx->nic_data;
-
efx_nic_free_buffer(efx, &efx->irq_status);
siena_reset_hw(efx, RESET_TYPE_ALL);
@@ -415,8 +398,7 @@ static void siena_remove_nic(struct efx_nic *efx)
efx_mcdi_drv_attach(efx, false, NULL);
/* Tear down the private nic state */
- iounmap(nic_data->mcdi_smem);
- kfree(nic_data);
+ kfree(efx->nic_data);
efx->nic_data = NULL;
}
@@ -656,7 +638,8 @@ const struct efx_nic_type siena_a0_nic_type = {
.default_mac_ops = &efx_mcdi_mac_operations,
.revision = EFX_REV_SIENA_A0,
- .mem_map_size = FR_CZ_MC_TREG_SMEM, /* MC_TREG_SMEM mapped separately */
+ .mem_map_size = (FR_CZ_MC_TREG_SMEM +
+ FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS),
.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
.rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
.buf_tbl_base = FR_BZ_BUF_FULL_TBL,
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h
index 99ff11400cef..e4dd3a7f304b 100644
--- a/drivers/net/sfc/workarounds.h
+++ b/drivers/net/sfc/workarounds.h
@@ -38,8 +38,6 @@
#define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS
/* Legacy interrupt storm when interrupt fifo fills */
#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA
-/* Write combining and sriov=enabled are incompatible */
-#define EFX_WORKAROUND_22643 EFX_WORKAROUND_SIENA
/* Spurious parity errors in TSORT buffers */
#define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index dc3fbf61910b..4a1374df6084 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -6234,12 +6234,10 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
-#ifdef BCM_KERNEL_SUPPORTS_8021Q
if (vlan_tx_tag_present(skb)) {
base_flags |= TXD_FLAG_VLAN;
vlan = vlan_tx_tag_get(skb);
}
-#endif
if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
!mss && skb->len > VLAN_ETH_FRAME_LEN)
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 15772b1b6a91..13c1f044b40d 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -59,6 +59,7 @@
#define USB_PRODUCT_IPHONE_3G 0x1292
#define USB_PRODUCT_IPHONE_3GS 0x1294
#define USB_PRODUCT_IPHONE_4 0x1297
+#define USB_PRODUCT_IPHONE_4_VZW 0x129c
#define IPHETH_USBINTF_CLASS 255
#define IPHETH_USBINTF_SUBCLASS 253
@@ -98,6 +99,10 @@ static struct usb_device_id ipheth_table[] = {
USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4,
IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
IPHETH_USBINTF_PROTO) },
+ { USB_DEVICE_AND_INTERFACE_INFO(
+ USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4_VZW,
+ IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
+ IPHETH_USBINTF_PROTO) },
{ }
};
MODULE_DEVICE_TABLE(usb, ipheth_table);
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index 2d4c0910295b..2d394af82171 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -41,7 +41,8 @@ static bool ar9002_hw_is_cal_supported(struct ath_hw *ah,
case ADC_DC_CAL:
/* Run ADC Gain Cal for non-CCK & non 2GHz-HT20 only */
if (!IS_CHAN_B(chan) &&
- !(IS_CHAN_2GHZ(chan) && IS_CHAN_HT20(chan)))
+ !((IS_CHAN_2GHZ(chan) || IS_CHAN_A_FAST_CLOCK(ah, chan)) &&
+ IS_CHAN_HT20(chan)))
supported = true;
break;
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
index 2339728a7306..3e69c631ebb4 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
@@ -1514,7 +1514,7 @@ static const u32 ar9300_2p2_mac_core[][2] = {
{0x00008258, 0x00000000},
{0x0000825c, 0x40000000},
{0x00008260, 0x00080922},
- {0x00008264, 0x9bc00010},
+ {0x00008264, 0x9d400010},
{0x00008268, 0xffffffff},
{0x0000826c, 0x0000ffff},
{0x00008270, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index 1baca8e4715d..fcafec0605f4 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -671,7 +671,7 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
REG_WRITE_ARRAY(&ah->iniModesAdditional,
modesIndex, regWrites);
- if (AR_SREV_9300(ah))
+ if (AR_SREV_9330(ah))
REG_WRITE_ARRAY(&ah->iniModesAdditional, 1, regWrites);
if (AR_SREV_9340(ah) && !ah->is_clk_25mhz)
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 6530694a59ae..722967b86cf1 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -2303,6 +2303,12 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
mutex_lock(&sc->mutex);
cancel_delayed_work_sync(&sc->tx_complete_work);
+ if (ah->ah_flags & AH_UNPLUGGED) {
+ ath_dbg(common, ATH_DBG_ANY, "Device has been unplugged!\n");
+ mutex_unlock(&sc->mutex);
+ return;
+ }
+
if (sc->sc_flags & SC_OP_INVALID) {
ath_dbg(common, ATH_DBG_ANY, "Device not present\n");
mutex_unlock(&sc->mutex);
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 9a4850154fb2..4c21f8cbdeb5 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -205,14 +205,22 @@ static void ath_rx_remove_buffer(struct ath_softc *sc,
static void ath_rx_edma_cleanup(struct ath_softc *sc)
{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
struct ath_buf *bf;
ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
list_for_each_entry(bf, &sc->rx.rxbuf, list) {
- if (bf->bf_mpdu)
+ if (bf->bf_mpdu) {
+ dma_unmap_single(sc->dev, bf->bf_buf_addr,
+ common->rx_bufsize,
+ DMA_BIDIRECTIONAL);
dev_kfree_skb_any(bf->bf_mpdu);
+ bf->bf_buf_addr = 0;
+ bf->bf_mpdu = NULL;
+ }
}
INIT_LIST_HEAD(&sc->rx.rxbuf);
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 26f1ab840cc7..e293a7921bf0 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -1632,7 +1632,8 @@ static void handle_irq_beacon(struct b43_wldev *dev)
u32 cmd, beacon0_valid, beacon1_valid;
if (!b43_is_mode(wl, NL80211_IFTYPE_AP) &&
- !b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT))
+ !b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT) &&
+ !b43_is_mode(wl, NL80211_IFTYPE_ADHOC))
return;
/* This is the bottom half of the asynchronous beacon update. */
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 3774dd034746..ef9ad79d1bfd 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -1903,15 +1903,17 @@ static void ipw2100_down(struct ipw2100_priv *priv)
static int ipw2100_net_init(struct net_device *dev)
{
struct ipw2100_priv *priv = libipw_priv(dev);
+
+ return ipw2100_up(priv, 1);
+}
+
+static int ipw2100_wdev_init(struct net_device *dev)
+{
+ struct ipw2100_priv *priv = libipw_priv(dev);
const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
struct wireless_dev *wdev = &priv->ieee->wdev;
- int ret;
int i;
- ret = ipw2100_up(priv, 1);
- if (ret)
- return ret;
-
memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN);
/* fill-out priv->ieee->bg_band */
@@ -6350,9 +6352,13 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
"Error calling register_netdev.\n");
goto fail;
}
+ registered = 1;
+
+ err = ipw2100_wdev_init(dev);
+ if (err)
+ goto fail;
mutex_lock(&priv->action_mutex);
- registered = 1;
IPW_DEBUG_INFO("%s: Bound to %s\n", dev->name, pci_name(pci_dev));
@@ -6389,7 +6395,8 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
fail_unlock:
mutex_unlock(&priv->action_mutex);
-
+ wiphy_unregister(priv->ieee->wdev.wiphy);
+ kfree(priv->ieee->bg_band.channels);
fail:
if (dev) {
if (registered)
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 87813c33bdc2..4ffebede5e03 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -11425,16 +11425,23 @@ static void ipw_bg_down(struct work_struct *work)
/* Called by register_netdev() */
static int ipw_net_init(struct net_device *dev)
{
+ int rc = 0;
+ struct ipw_priv *priv = libipw_priv(dev);
+
+ mutex_lock(&priv->mutex);
+ if (ipw_up(priv))
+ rc = -EIO;
+ mutex_unlock(&priv->mutex);
+
+ return rc;
+}
+
+static int ipw_wdev_init(struct net_device *dev)
+{
int i, rc = 0;
struct ipw_priv *priv = libipw_priv(dev);
const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
struct wireless_dev *wdev = &priv->ieee->wdev;
- mutex_lock(&priv->mutex);
-
- if (ipw_up(priv)) {
- rc = -EIO;
- goto out;
- }
memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN);
@@ -11519,13 +11526,9 @@ static int ipw_net_init(struct net_device *dev)
set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
/* With that information in place, we can now register the wiphy... */
- if (wiphy_register(wdev->wiphy)) {
+ if (wiphy_register(wdev->wiphy))
rc = -EIO;
- goto out;
- }
-
out:
- mutex_unlock(&priv->mutex);
return rc;
}
@@ -11832,14 +11835,22 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
goto out_remove_sysfs;
}
+ err = ipw_wdev_init(net_dev);
+ if (err) {
+ IPW_ERROR("failed to register wireless device\n");
+ goto out_unregister_netdev;
+ }
+
#ifdef CONFIG_IPW2200_PROMISCUOUS
if (rtap_iface) {
err = ipw_prom_alloc(priv);
if (err) {
IPW_ERROR("Failed to register promiscuous network "
"device (error %d).\n", err);
- unregister_netdev(priv->net_dev);
- goto out_remove_sysfs;
+ wiphy_unregister(priv->ieee->wdev.wiphy);
+ kfree(priv->ieee->a_band.channels);
+ kfree(priv->ieee->bg_band.channels);
+ goto out_unregister_netdev;
}
}
#endif
@@ -11851,6 +11862,8 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
return 0;
+ out_unregister_netdev:
+ unregister_netdev(priv->net_dev);
out_remove_sysfs:
sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
out_release_irq:
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-rs.c b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
index 977bd2477c6a..164bcae821f8 100644
--- a/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
@@ -822,12 +822,15 @@ static void iwl3945_rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
out:
- rs_sta->last_txrate_idx = index;
- if (sband->band == IEEE80211_BAND_5GHZ)
- info->control.rates[0].idx = rs_sta->last_txrate_idx -
- IWL_FIRST_OFDM_RATE;
- else
+ if (sband->band == IEEE80211_BAND_5GHZ) {
+ if (WARN_ON_ONCE(index < IWL_FIRST_OFDM_RATE))
+ index = IWL_FIRST_OFDM_RATE;
+ rs_sta->last_txrate_idx = index;
+ info->control.rates[0].idx = index - IWL_FIRST_OFDM_RATE;
+ } else {
+ rs_sta->last_txrate_idx = index;
info->control.rates[0].idx = rs_sta->last_txrate_idx;
+ }
IWL_DEBUG_RATE(priv, "leave: %d\n", index);
}
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c
index 35cd2537e7fd..e5971fe9d169 100644
--- a/drivers/net/wireless/iwlegacy/iwl-core.c
+++ b/drivers/net/wireless/iwlegacy/iwl-core.c
@@ -937,7 +937,7 @@ void iwl_legacy_irq_handle_error(struct iwl_priv *priv)
&priv->contexts[IWL_RXON_CTX_BSS]);
#endif
- wake_up_interruptible(&priv->wait_command_queue);
+ wake_up(&priv->wait_command_queue);
/* Keep the restart process from trying to send host
* commands by clearing the INIT status bit */
@@ -1746,7 +1746,7 @@ int iwl_legacy_force_reset(struct iwl_priv *priv, bool external)
/* Set the FW error flag -- cleared on iwl_down */
set_bit(STATUS_FW_ERROR, &priv->status);
- wake_up_interruptible(&priv->wait_command_queue);
+ wake_up(&priv->wait_command_queue);
/*
* Keep the restart process from trying to send host
* commands by clearing the INIT status bit
diff --git a/drivers/net/wireless/iwlegacy/iwl-hcmd.c b/drivers/net/wireless/iwlegacy/iwl-hcmd.c
index 62b4b09122cb..ce1fc9feb61f 100644
--- a/drivers/net/wireless/iwlegacy/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlegacy/iwl-hcmd.c
@@ -167,7 +167,7 @@ int iwl_legacy_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
goto out;
}
- ret = wait_event_interruptible_timeout(priv->wait_command_queue,
+ ret = wait_event_timeout(priv->wait_command_queue,
!test_bit(STATUS_HCMD_ACTIVE, &priv->status),
HOST_COMPLETE_TIMEOUT);
if (!ret) {
diff --git a/drivers/net/wireless/iwlegacy/iwl-tx.c b/drivers/net/wireless/iwlegacy/iwl-tx.c
index 4fff995c6f3e..ef9e268bf8a0 100644
--- a/drivers/net/wireless/iwlegacy/iwl-tx.c
+++ b/drivers/net/wireless/iwlegacy/iwl-tx.c
@@ -625,6 +625,8 @@ iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
cmd = txq->cmd[cmd_index];
meta = &txq->meta[cmd_index];
+ txq->time_stamp = jiffies;
+
pci_unmap_single(priv->pci_dev,
dma_unmap_addr(meta, mapping),
dma_unmap_len(meta, len),
@@ -645,7 +647,7 @@ iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n",
iwl_legacy_get_cmd_string(cmd->hdr.cmd));
- wake_up_interruptible(&priv->wait_command_queue);
+ wake_up(&priv->wait_command_queue);
}
/* Mark as unmapped */
diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
index 795826a014ed..66ee15629a76 100644
--- a/drivers/net/wireless/iwlegacy/iwl3945-base.c
+++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c
@@ -841,7 +841,7 @@ static void iwl3945_rx_card_state_notif(struct iwl_priv *priv,
wiphy_rfkill_set_hw_state(priv->hw->wiphy,
test_bit(STATUS_RF_KILL_HW, &priv->status));
else
- wake_up_interruptible(&priv->wait_command_queue);
+ wake_up(&priv->wait_command_queue);
}
/**
@@ -2269,7 +2269,7 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
iwl3945_reg_txpower_periodic(priv);
IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
- wake_up_interruptible(&priv->wait_command_queue);
+ wake_up(&priv->wait_command_queue);
return;
@@ -2300,7 +2300,7 @@ static void __iwl3945_down(struct iwl_priv *priv)
iwl_legacy_clear_driver_stations(priv);
/* Unblock any waiting calls */
- wake_up_interruptible_all(&priv->wait_command_queue);
+ wake_up_all(&priv->wait_command_queue);
/* Wipe out the EXIT_PENDING status bit if we are not actually
* exiting the module */
@@ -2853,7 +2853,7 @@ static int iwl3945_mac_start(struct ieee80211_hw *hw)
/* Wait for START_ALIVE from ucode. Otherwise callbacks from
* mac80211 will not be run successfully. */
- ret = wait_event_interruptible_timeout(priv->wait_command_queue,
+ ret = wait_event_timeout(priv->wait_command_queue,
test_bit(STATUS_READY, &priv->status),
UCODE_READY_TIMEOUT);
if (!ret) {
diff --git a/drivers/net/wireless/iwlegacy/iwl4965-base.c b/drivers/net/wireless/iwlegacy/iwl4965-base.c
index 14334668034e..aa0c2539761e 100644
--- a/drivers/net/wireless/iwlegacy/iwl4965-base.c
+++ b/drivers/net/wireless/iwlegacy/iwl4965-base.c
@@ -576,7 +576,7 @@ static void iwl4965_rx_card_state_notif(struct iwl_priv *priv,
wiphy_rfkill_set_hw_state(priv->hw->wiphy,
test_bit(STATUS_RF_KILL_HW, &priv->status));
else
- wake_up_interruptible(&priv->wait_command_queue);
+ wake_up(&priv->wait_command_queue);
}
/**
@@ -926,7 +926,7 @@ static void iwl4965_irq_tasklet(struct iwl_priv *priv)
handled |= CSR_INT_BIT_FH_TX;
/* Wake up uCode load routine, now that load is complete */
priv->ucode_write_complete = 1;
- wake_up_interruptible(&priv->wait_command_queue);
+ wake_up(&priv->wait_command_queue);
}
if (inta & ~handled) {
@@ -1795,7 +1795,7 @@ static void iwl4965_alive_start(struct iwl_priv *priv)
iwl4965_rf_kill_ct_config(priv);
IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
- wake_up_interruptible(&priv->wait_command_queue);
+ wake_up(&priv->wait_command_queue);
iwl_legacy_power_update_mode(priv, true);
IWL_DEBUG_INFO(priv, "Updated power mode\n");
@@ -1828,7 +1828,7 @@ static void __iwl4965_down(struct iwl_priv *priv)
iwl_legacy_clear_driver_stations(priv);
/* Unblock any waiting calls */
- wake_up_interruptible_all(&priv->wait_command_queue);
+ wake_up_all(&priv->wait_command_queue);
/* Wipe out the EXIT_PENDING status bit if we are not actually
* exiting the module */
@@ -2266,7 +2266,7 @@ int iwl4965_mac_start(struct ieee80211_hw *hw)
/* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from
* mac80211 will not be run successfully. */
- ret = wait_event_interruptible_timeout(priv->wait_command_queue,
+ ret = wait_event_timeout(priv->wait_command_queue,
test_bit(STATUS_READY, &priv->status),
UCODE_READY_TIMEOUT);
if (!ret) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
index a895a099d086..56211006a182 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
@@ -167,7 +167,7 @@ static int iwlagn_set_temperature_offset_calib(struct iwl_priv *priv)
memset(&cmd, 0, sizeof(cmd));
iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
- memcpy(&cmd.radio_sensor_offset, offset_calib, sizeof(offset_calib));
+ memcpy(&cmd.radio_sensor_offset, offset_calib, sizeof(*offset_calib));
if (!(cmd.radio_sensor_offset))
cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET;
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index b0ae4de7f083..f9c3cd95d614 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -2140,7 +2140,12 @@ static int iwl_mac_setup_register(struct iwl_priv *priv,
IEEE80211_HW_SPECTRUM_MGMT |
IEEE80211_HW_REPORTS_TX_ACK_STATUS;
+ /*
+ * Including the following line will crash some AP's. This
+ * workaround removes the stimulus which causes the crash until
+ * the AP software can be fixed.
hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+ */
hw->flags |= IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index dd6937e97055..77e528f5db88 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -405,31 +405,33 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,
mutex_lock(&priv->mutex);
- if (test_bit(STATUS_SCANNING, &priv->status) &&
- priv->scan_type != IWL_SCAN_NORMAL) {
- IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
- ret = -EAGAIN;
- goto out_unlock;
- }
-
- /* mac80211 will only ask for one band at a time */
- priv->scan_request = req;
- priv->scan_vif = vif;
-
/*
* If an internal scan is in progress, just set
* up the scan_request as per above.
*/
if (priv->scan_type != IWL_SCAN_NORMAL) {
- IWL_DEBUG_SCAN(priv, "SCAN request during internal scan\n");
+ IWL_DEBUG_SCAN(priv,
+ "SCAN request during internal scan - defer\n");
+ priv->scan_request = req;
+ priv->scan_vif = vif;
ret = 0;
- } else
+ } else {
+ priv->scan_request = req;
+ priv->scan_vif = vif;
+ /*
+ * mac80211 will only ask for one band at a time
+ * so using channels[0] here is ok
+ */
ret = iwl_scan_initiate(priv, vif, IWL_SCAN_NORMAL,
req->channels[0]->band);
+ if (ret) {
+ priv->scan_request = NULL;
+ priv->scan_vif = NULL;
+ }
+ }
IWL_DEBUG_MAC80211(priv, "leave\n");
-out_unlock:
mutex_unlock(&priv->mutex);
return ret;
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c
index a6b2b1db0b1d..222d410c586e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c
@@ -771,6 +771,8 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
cmd = txq->cmd[cmd_index];
meta = &txq->meta[cmd_index];
+ txq->time_stamp = jiffies;
+
iwlagn_unmap_tfd(priv, meta, &txq->tfds[index], DMA_BIDIRECTIONAL);
/* Input error checking is done when commands are added to queue. */
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index ef67f6786a84..0019dfd8fb01 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -3697,14 +3697,15 @@ static void rt2800_efuse_read(struct rt2x00_dev *rt2x00dev, unsigned int i)
rt2800_regbusy_read(rt2x00dev, EFUSE_CTRL, EFUSE_CTRL_KICK, &reg);
/* Apparently the data is read from end to start */
- rt2800_register_read_lock(rt2x00dev, EFUSE_DATA3,
- (u32 *)&rt2x00dev->eeprom[i]);
- rt2800_register_read_lock(rt2x00dev, EFUSE_DATA2,
- (u32 *)&rt2x00dev->eeprom[i + 2]);
- rt2800_register_read_lock(rt2x00dev, EFUSE_DATA1,
- (u32 *)&rt2x00dev->eeprom[i + 4]);
- rt2800_register_read_lock(rt2x00dev, EFUSE_DATA0,
- (u32 *)&rt2x00dev->eeprom[i + 6]);
+ rt2800_register_read_lock(rt2x00dev, EFUSE_DATA3, &reg);
+ /* The returned value is in CPU order, but eeprom is le */
+ rt2x00dev->eeprom[i] = cpu_to_le32(reg);
+ rt2800_register_read_lock(rt2x00dev, EFUSE_DATA2, &reg);
+ *(u32 *)&rt2x00dev->eeprom[i + 2] = cpu_to_le32(reg);
+ rt2800_register_read_lock(rt2x00dev, EFUSE_DATA1, &reg);
+ *(u32 *)&rt2x00dev->eeprom[i + 4] = cpu_to_le32(reg);
+ rt2800_register_read_lock(rt2x00dev, EFUSE_DATA0, &reg);
+ *(u32 *)&rt2x00dev->eeprom[i + 6] = cpu_to_le32(reg);
mutex_unlock(&rt2x00dev->csr_mutex);
}
@@ -3870,19 +3871,23 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
return -ENODEV;
}
- if (!rt2x00_rf(rt2x00dev, RF2820) &&
- !rt2x00_rf(rt2x00dev, RF2850) &&
- !rt2x00_rf(rt2x00dev, RF2720) &&
- !rt2x00_rf(rt2x00dev, RF2750) &&
- !rt2x00_rf(rt2x00dev, RF3020) &&
- !rt2x00_rf(rt2x00dev, RF2020) &&
- !rt2x00_rf(rt2x00dev, RF3021) &&
- !rt2x00_rf(rt2x00dev, RF3022) &&
- !rt2x00_rf(rt2x00dev, RF3052) &&
- !rt2x00_rf(rt2x00dev, RF3320) &&
- !rt2x00_rf(rt2x00dev, RF5370) &&
- !rt2x00_rf(rt2x00dev, RF5390)) {
- ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
+ switch (rt2x00dev->chip.rf) {
+ case RF2820:
+ case RF2850:
+ case RF2720:
+ case RF2750:
+ case RF3020:
+ case RF2020:
+ case RF3021:
+ case RF3022:
+ case RF3052:
+ case RF3320:
+ case RF5370:
+ case RF5390:
+ break;
+ default:
+ ERROR(rt2x00dev, "Invalid RF chipset 0x%x detected.\n",
+ rt2x00dev->chip.rf);
return -ENODEV;
}
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index 1bdc1aa305c0..04c4e9eb6ee6 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -610,6 +610,11 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
mac->link_state = MAC80211_NOLINK;
memset(mac->bssid, 0, 6);
+
+ /* reset sec info */
+ rtl_cam_reset_sec_info(hw);
+
+ rtl_cam_reset_all_entry(hw);
mac->vendor = PEER_UNKNOWN;
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
@@ -1063,6 +1068,9 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
*or clear all entry here.
*/
rtl_cam_delete_one_entry(hw, mac_addr, key_idx);
+
+ rtl_cam_reset_sec_info(hw);
+
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
index 906e7aa55bc3..3e52a5496224 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
@@ -549,15 +549,16 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
(tcb_desc->rts_use_shortpreamble ? 1 : 0)
: (tcb_desc->rts_use_shortgi ? 1 : 0)));
if (mac->bw_40) {
- if (tcb_desc->packet_bw) {
+ if (rate_flag & IEEE80211_TX_RC_DUP_DATA) {
SET_TX_DESC_DATA_BW(txdesc, 1);
SET_TX_DESC_DATA_SC(txdesc, 3);
+ } else if(rate_flag & IEEE80211_TX_RC_40_MHZ_WIDTH){
+ SET_TX_DESC_DATA_BW(txdesc, 1);
+ SET_TX_DESC_DATA_SC(txdesc, mac->cur_40_prime_sc);
} else {
SET_TX_DESC_DATA_BW(txdesc, 0);
- if (rate_flag & IEEE80211_TX_RC_DUP_DATA)
- SET_TX_DESC_DATA_SC(txdesc,
- mac->cur_40_prime_sc);
- }
+ SET_TX_DESC_DATA_SC(txdesc, 0);
+ }
} else {
SET_TX_DESC_DATA_BW(txdesc, 0);
SET_TX_DESC_DATA_SC(txdesc, 0);
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index 8b1cef0ffde6..4bf3cf457ef0 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -863,6 +863,7 @@ static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw, struct sk_buff *skb,
u8 tid = 0;
u16 seq_number = 0;
+ memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
if (ieee80211_is_auth(fc)) {
RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, ("MAC80211_LINKING\n"));
rtl_ips_nic_on(hw);
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 0ca86f9ec4ed..182562952c79 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -327,12 +327,12 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
xenvif_get(vif);
rtnl_lock();
- if (netif_running(vif->dev))
- xenvif_up(vif);
if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
dev_set_mtu(vif->dev, ETH_DATA_LEN);
netdev_update_features(vif->dev);
netif_carrier_on(vif->dev);
+ if (netif_running(vif->dev))
+ xenvif_up(vif);
rtnl_unlock();
return 0;
diff --git a/drivers/pci/hotplug/pcihp_slot.c b/drivers/pci/hotplug/pcihp_slot.c
index 753b21aaea61..3ffd9c1acc0a 100644
--- a/drivers/pci/hotplug/pcihp_slot.c
+++ b/drivers/pci/hotplug/pcihp_slot.c
@@ -169,7 +169,9 @@ void pci_configure_slot(struct pci_dev *dev)
(dev->class >> 8) == PCI_CLASS_BRIDGE_PCI)))
return;
- pcie_bus_configure_settings(dev->bus, dev->bus->self->pcie_mpss);
+ if (dev->bus && dev->bus->self)
+ pcie_bus_configure_settings(dev->bus,
+ dev->bus->self->pcie_mpss);
memset(&hpp, 0, sizeof(hpp));
ret = pci_get_hp_params(dev, &hpp);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 0ce67423a0a3..e9651f0a8817 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -77,7 +77,7 @@ unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
-enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PERFORMANCE;
+enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
/*
* The default CLS is used if arch didn't set CLS explicitly and not
@@ -3568,10 +3568,14 @@ static int __init pci_setup(char *str)
pci_hotplug_io_size = memparse(str + 9, &str);
} else if (!strncmp(str, "hpmemsize=", 10)) {
pci_hotplug_mem_size = memparse(str + 10, &str);
+ } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
+ pcie_bus_config = PCIE_BUS_TUNE_OFF;
} else if (!strncmp(str, "pcie_bus_safe", 13)) {
pcie_bus_config = PCIE_BUS_SAFE;
} else if (!strncmp(str, "pcie_bus_perf", 13)) {
pcie_bus_config = PCIE_BUS_PERFORMANCE;
+ } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
+ pcie_bus_config = PCIE_BUS_PEER2PEER;
} else {
printk(KERN_ERR "PCI: Unknown option `%s'\n",
str);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 8473727b29fa..6ab6bd3df4b2 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1351,7 +1351,8 @@ static int pcie_find_smpss(struct pci_dev *dev, void *data)
* will occur as normal.
*/
if (dev->is_hotplug_bridge && (!list_is_singular(&dev->bus->devices) ||
- dev->bus->self->pcie_type != PCI_EXP_TYPE_ROOT_PORT))
+ (dev->bus->self &&
+ dev->bus->self->pcie_type != PCI_EXP_TYPE_ROOT_PORT)))
*smpss = 0;
if (*smpss > dev->pcie_mpss)
@@ -1396,34 +1397,37 @@ static void pcie_write_mps(struct pci_dev *dev, int mps)
static void pcie_write_mrrs(struct pci_dev *dev, int mps)
{
- int rc, mrrs;
+ int rc, mrrs, dev_mpss;
- if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
- int dev_mpss = 128 << dev->pcie_mpss;
+ /* In the "safe" case, do not configure the MRRS. There appear to be
+ * issues with setting MRRS to 0 on a number of devices.
+ */
- /* For Max performance, the MRRS must be set to the largest
- * supported value. However, it cannot be configured larger
- * than the MPS the device or the bus can support. This assumes
- * that the largest MRRS available on the device cannot be
- * smaller than the device MPSS.
- */
- mrrs = mps < dev_mpss ? mps : dev_mpss;
- } else
- /* In the "safe" case, configure the MRRS for fairness on the
- * bus by making all devices have the same size
- */
- mrrs = mps;
+ if (pcie_bus_config != PCIE_BUS_PERFORMANCE)
+ return;
+ dev_mpss = 128 << dev->pcie_mpss;
+
+ /* For Max performance, the MRRS must be set to the largest supported
+ * value. However, it cannot be configured larger than the MPS the
+ * device or the bus can support. This assumes that the largest MRRS
+ * available on the device cannot be smaller than the device MPSS.
+ */
+ mrrs = min(mps, dev_mpss);
/* MRRS is a R/W register. Invalid values can be written, but a
- * subsiquent read will verify if the value is acceptable or not.
+ * subsequent read will verify if the value is acceptable or not.
* If the MRRS value provided is not acceptable (e.g., too large),
* shrink the value until it is acceptable to the HW.
*/
while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
+ dev_warn(&dev->dev, "Attempting to modify the PCI-E MRRS value"
+ " to %d. If any issues are encountered, please try "
+ "running with pci=pcie_bus_safe\n", mrrs);
rc = pcie_set_readrq(dev, mrrs);
if (rc)
- dev_err(&dev->dev, "Failed attempting to set the MRRS\n");
+ dev_err(&dev->dev,
+ "Failed attempting to set the MRRS\n");
mrrs /= 2;
}
@@ -1436,13 +1440,13 @@ static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
if (!pci_is_pcie(dev))
return 0;
- dev_info(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n",
+ dev_dbg(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n",
pcie_get_mps(dev), 128<<dev->pcie_mpss, pcie_get_readrq(dev));
pcie_write_mps(dev, mps);
pcie_write_mrrs(dev, mps);
- dev_info(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n",
+ dev_dbg(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n",
pcie_get_mps(dev), 128<<dev->pcie_mpss, pcie_get_readrq(dev));
return 0;
@@ -1454,15 +1458,24 @@ static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
*/
void pcie_bus_configure_settings(struct pci_bus *bus, u8 mpss)
{
- u8 smpss = mpss;
+ u8 smpss;
- if (!bus->self)
+ if (!pci_is_pcie(bus->self))
return;
- if (!pci_is_pcie(bus->self))
+ if (pcie_bus_config == PCIE_BUS_TUNE_OFF)
return;
+ /* FIXME - Peer to peer DMA is possible, though the endpoint would need
+ * to be aware to the MPS of the destination. To work around this,
+ * simply force the MPS of the entire system to the smallest possible.
+ */
+ if (pcie_bus_config == PCIE_BUS_PEER2PEER)
+ smpss = 0;
+
if (pcie_bus_config == PCIE_BUS_SAFE) {
+ smpss = mpss;
+
pcie_find_smpss(bus->self, &smpss);
pci_walk_bus(bus, pcie_find_smpss, &smpss);
}
diff --git a/drivers/rtc/rtc-ep93xx.c b/drivers/rtc/rtc-ep93xx.c
index 335551d333b2..14a42a1edc66 100644
--- a/drivers/rtc/rtc-ep93xx.c
+++ b/drivers/rtc/rtc-ep93xx.c
@@ -36,6 +36,7 @@
*/
struct ep93xx_rtc {
void __iomem *mmio_base;
+ struct rtc_device *rtc;
};
static int ep93xx_rtc_get_swcomp(struct device *dev, unsigned short *preload,
@@ -130,7 +131,6 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev)
{
struct ep93xx_rtc *ep93xx_rtc;
struct resource *res;
- struct rtc_device *rtc;
int err;
ep93xx_rtc = devm_kzalloc(&pdev->dev, sizeof(*ep93xx_rtc), GFP_KERNEL);
@@ -151,12 +151,12 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev)
return -ENXIO;
pdev->dev.platform_data = ep93xx_rtc;
- platform_set_drvdata(pdev, rtc);
+ platform_set_drvdata(pdev, ep93xx_rtc);
- rtc = rtc_device_register(pdev->name,
+ ep93xx_rtc->rtc = rtc_device_register(pdev->name,
&pdev->dev, &ep93xx_rtc_ops, THIS_MODULE);
- if (IS_ERR(rtc)) {
- err = PTR_ERR(rtc);
+ if (IS_ERR(ep93xx_rtc->rtc)) {
+ err = PTR_ERR(ep93xx_rtc->rtc);
goto exit;
}
@@ -167,7 +167,7 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev)
return 0;
fail:
- rtc_device_unregister(rtc);
+ rtc_device_unregister(ep93xx_rtc->rtc);
exit:
platform_set_drvdata(pdev, NULL);
pdev->dev.platform_data = NULL;
@@ -176,11 +176,11 @@ exit:
static int __exit ep93xx_rtc_remove(struct platform_device *pdev)
{
- struct rtc_device *rtc = platform_get_drvdata(pdev);
+ struct ep93xx_rtc *ep93xx_rtc = platform_get_drvdata(pdev);
sysfs_remove_group(&pdev->dev.kobj, &ep93xx_rtc_sysfs_files);
platform_set_drvdata(pdev, NULL);
- rtc_device_unregister(rtc);
+ rtc_device_unregister(ep93xx_rtc->rtc);
pdev->dev.platform_data = NULL;
return 0;
diff --git a/drivers/rtc/rtc-imxdi.c b/drivers/rtc/rtc-imxdi.c
index 2dd3c0163272..d93a9608b1f0 100644
--- a/drivers/rtc/rtc-imxdi.c
+++ b/drivers/rtc/rtc-imxdi.c
@@ -35,6 +35,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/rtc.h>
+#include <linux/sched.h>
#include <linux/workqueue.h>
/* DryIce Register Definitions */
diff --git a/drivers/rtc/rtc-lib.c b/drivers/rtc/rtc-lib.c
index 075f1708deae..c4cf05731118 100644
--- a/drivers/rtc/rtc-lib.c
+++ b/drivers/rtc/rtc-lib.c
@@ -85,6 +85,8 @@ void rtc_time_to_tm(unsigned long time, struct rtc_time *tm)
time -= tm->tm_hour * 3600;
tm->tm_min = time / 60;
tm->tm_sec = time - tm->tm_min * 60;
+
+ tm->tm_isdst = 0;
}
EXPORT_SYMBOL(rtc_time_to_tm);
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 4e7c04e773e0..7639ab906f02 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -51,6 +51,27 @@ static enum s3c_cpu_type s3c_rtc_cpu_type;
static DEFINE_SPINLOCK(s3c_rtc_pie_lock);
+static void s3c_rtc_alarm_clk_enable(bool enable)
+{
+ static DEFINE_SPINLOCK(s3c_rtc_alarm_clk_lock);
+ static bool alarm_clk_enabled;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&s3c_rtc_alarm_clk_lock, irq_flags);
+ if (enable) {
+ if (!alarm_clk_enabled) {
+ clk_enable(rtc_clk);
+ alarm_clk_enabled = true;
+ }
+ } else {
+ if (alarm_clk_enabled) {
+ clk_disable(rtc_clk);
+ alarm_clk_enabled = false;
+ }
+ }
+ spin_unlock_irqrestore(&s3c_rtc_alarm_clk_lock, irq_flags);
+}
+
/* IRQ Handlers */
static irqreturn_t s3c_rtc_alarmirq(int irq, void *id)
@@ -64,6 +85,9 @@ static irqreturn_t s3c_rtc_alarmirq(int irq, void *id)
writeb(S3C2410_INTP_ALM, s3c_rtc_base + S3C2410_INTP);
clk_disable(rtc_clk);
+
+ s3c_rtc_alarm_clk_enable(false);
+
return IRQ_HANDLED;
}
@@ -97,6 +121,8 @@ static int s3c_rtc_setaie(struct device *dev, unsigned int enabled)
writeb(tmp, s3c_rtc_base + S3C2410_RTCALM);
clk_disable(rtc_clk);
+ s3c_rtc_alarm_clk_enable(enabled);
+
return 0;
}
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
index 9a81f778d6b2..20687d55e7a7 100644
--- a/drivers/rtc/rtc-twl.c
+++ b/drivers/rtc/rtc-twl.c
@@ -362,14 +362,6 @@ static irqreturn_t twl_rtc_interrupt(int irq, void *rtc)
int res;
u8 rd_reg;
-#ifdef CONFIG_LOCKDEP
- /* WORKAROUND for lockdep forcing IRQF_DISABLED on us, which
- * we don't want and can't tolerate. Although it might be
- * friendlier not to borrow this thread context...
- */
- local_irq_enable();
-#endif
-
res = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG);
if (res)
goto out;
@@ -428,24 +420,12 @@ static struct rtc_class_ops twl_rtc_ops = {
static int __devinit twl_rtc_probe(struct platform_device *pdev)
{
struct rtc_device *rtc;
- int ret = 0;
+ int ret = -EINVAL;
int irq = platform_get_irq(pdev, 0);
u8 rd_reg;
if (irq <= 0)
- return -EINVAL;
-
- rtc = rtc_device_register(pdev->name,
- &pdev->dev, &twl_rtc_ops, THIS_MODULE);
- if (IS_ERR(rtc)) {
- ret = PTR_ERR(rtc);
- dev_err(&pdev->dev, "can't register RTC device, err %ld\n",
- PTR_ERR(rtc));
- goto out0;
-
- }
-
- platform_set_drvdata(pdev, rtc);
+ goto out1;
ret = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG);
if (ret < 0)
@@ -462,14 +442,6 @@ static int __devinit twl_rtc_probe(struct platform_device *pdev)
if (ret < 0)
goto out1;
- ret = request_irq(irq, twl_rtc_interrupt,
- IRQF_TRIGGER_RISING,
- dev_name(&rtc->dev), rtc);
- if (ret < 0) {
- dev_err(&pdev->dev, "IRQ is not free.\n");
- goto out1;
- }
-
if (twl_class_is_6030()) {
twl6030_interrupt_unmask(TWL6030_RTC_INT_MASK,
REG_INT_MSK_LINE_A);
@@ -480,28 +452,44 @@ static int __devinit twl_rtc_probe(struct platform_device *pdev)
/* Check RTC module status, Enable if it is off */
ret = twl_rtc_read_u8(&rd_reg, REG_RTC_CTRL_REG);
if (ret < 0)
- goto out2;
+ goto out1;
if (!(rd_reg & BIT_RTC_CTRL_REG_STOP_RTC_M)) {
dev_info(&pdev->dev, "Enabling TWL-RTC.\n");
rd_reg = BIT_RTC_CTRL_REG_STOP_RTC_M;
ret = twl_rtc_write_u8(rd_reg, REG_RTC_CTRL_REG);
if (ret < 0)
- goto out2;
+ goto out1;
}
/* init cached IRQ enable bits */
ret = twl_rtc_read_u8(&rtc_irq_bits, REG_RTC_INTERRUPTS_REG);
if (ret < 0)
+ goto out1;
+
+ rtc = rtc_device_register(pdev->name,
+ &pdev->dev, &twl_rtc_ops, THIS_MODULE);
+ if (IS_ERR(rtc)) {
+ ret = PTR_ERR(rtc);
+ dev_err(&pdev->dev, "can't register RTC device, err %ld\n",
+ PTR_ERR(rtc));
+ goto out1;
+ }
+
+ ret = request_threaded_irq(irq, NULL, twl_rtc_interrupt,
+ IRQF_TRIGGER_RISING,
+ dev_name(&rtc->dev), rtc);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "IRQ is not free.\n");
goto out2;
+ }
- return ret;
+ platform_set_drvdata(pdev, rtc);
+ return 0;
out2:
- free_irq(irq, rtc);
-out1:
rtc_device_unregister(rtc);
-out0:
+out1:
return ret;
}
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index cbde448f9947..eb3140ee821e 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -654,8 +654,8 @@ static struct io_subchannel_private console_priv;
static int console_subchannel_in_use;
/*
- * Use tpi to get a pending interrupt, call the interrupt handler and
- * return a pointer to the subchannel structure.
+ * Use cio_tpi to get a pending interrupt and call the interrupt handler.
+ * Return non-zero if an interrupt was processed, zero otherwise.
*/
static int cio_tpi(void)
{
@@ -667,6 +667,10 @@ static int cio_tpi(void)
tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id;
if (tpi(NULL) != 1)
return 0;
+ if (tpi_info->adapter_IO) {
+ do_adapter_IO(tpi_info->isc);
+ return 1;
+ }
irb = (struct irb *)&S390_lowcore.irb;
/* Store interrupt response block to lowcore. */
if (tsch(tpi_info->schid, irb) != 0)
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index b7bd5b0cc7aa..3868ab2397c6 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -1800,10 +1800,12 @@ static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
switch (retval) {
case SCSI_MLQUEUE_HOST_BUSY:
twa_free_request_id(tw_dev, request_id);
+ twa_unmap_scsi_data(tw_dev, request_id);
break;
case 1:
tw_dev->state[request_id] = TW_S_COMPLETED;
twa_free_request_id(tw_dev, request_id);
+ twa_unmap_scsi_data(tw_dev, request_id);
SCpnt->result = (DID_ERROR << 16);
done(SCpnt);
retval = 0;
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 8d9dae89f065..3878b7395081 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -837,6 +837,7 @@ config SCSI_ISCI
# (temporary): known alpha quality driver
depends on EXPERIMENTAL
select SCSI_SAS_LIBSAS
+ select SCSI_SAS_HOST_SMP
---help---
This driver supports the 6Gb/s SAS capabilities of the storage
control unit found in the Intel(R) C600 series chipset.
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 3c08f5352b2d..6153a66a8a31 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -88,7 +88,7 @@ obj-$(CONFIG_SCSI_QLOGIC_FAS) += qlogicfas408.o qlogicfas.o
obj-$(CONFIG_PCMCIA_QLOGIC) += qlogicfas408.o
obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o
obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx/
-obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx/
+obj-$(CONFIG_SCSI_QLA_ISCSI) += libiscsi.o qla4xxx/
obj-$(CONFIG_SCSI_LPFC) += lpfc/
obj-$(CONFIG_SCSI_BFA_FC) += bfa/
obj-$(CONFIG_SCSI_PAS16) += pas16.o
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index e7d0d47b9185..e5f2d7d9002e 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -1283,6 +1283,8 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced)
kfree(aac->queues);
aac->queues = NULL;
free_irq(aac->pdev->irq, aac);
+ if (aac->msi)
+ pci_disable_msi(aac->pdev);
kfree(aac->fsa_dev);
aac->fsa_dev = NULL;
quirks = aac_get_driver_ident(index)->quirks;
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 9ae80cd5953b..dba72a4e6a1c 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -563,7 +563,7 @@ int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn,
nopout_wqe->itt = ((u16)task->itt |
(ISCSI_TASK_TYPE_MPATH <<
ISCSI_TMF_REQUEST_TYPE_SHIFT));
- nopout_wqe->ttt = nopout_hdr->ttt;
+ nopout_wqe->ttt = be32_to_cpu(nopout_hdr->ttt);
nopout_wqe->flags = 0;
if (!unsol)
nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION;
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index bd22041e2789..f58644850333 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -913,7 +913,7 @@ static void l2t_put(struct cxgbi_sock *csk)
struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev;
if (csk->l2t) {
- l2t_release(L2DATA(t3dev), csk->l2t);
+ l2t_release(t3dev, csk->l2t);
csk->l2t = NULL;
cxgbi_sock_put(csk);
}
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index ba710e350ac5..5d0e9a24ae94 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -432,6 +432,8 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
u8 flogi_maddr[ETH_ALEN];
const struct net_device_ops *ops;
+ rtnl_lock();
+
/*
* Don't listen for Ethernet packets anymore.
* synchronize_net() ensures that the packet handlers are not running
@@ -461,6 +463,8 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
" specific feature for LLD.\n");
}
+ rtnl_unlock();
+
/* Release the self-reference taken during fcoe_interface_create() */
fcoe_interface_put(fcoe);
}
@@ -1951,11 +1955,8 @@ static void fcoe_destroy_work(struct work_struct *work)
fcoe_if_destroy(port->lport);
/* Do not tear down the fcoe interface for NPIV port */
- if (!npiv) {
- rtnl_lock();
+ if (!npiv)
fcoe_interface_cleanup(fcoe);
- rtnl_unlock();
- }
mutex_unlock(&fcoe_config_mutex);
}
@@ -2009,8 +2010,9 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
printk(KERN_ERR "fcoe: Failed to create interface (%s)\n",
netdev->name);
rc = -EIO;
+ rtnl_unlock();
fcoe_interface_cleanup(fcoe);
- goto out_nodev;
+ goto out_nortnl;
}
/* Make this the "master" N_Port */
@@ -2027,6 +2029,7 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
out_nodev:
rtnl_unlock();
+out_nortnl:
mutex_unlock(&fcoe_config_mutex);
return rc;
}
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index ec61bdb833ac..b200b736b000 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -676,6 +676,16 @@ static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,
BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA);
removed[*nremoved] = h->dev[entry];
(*nremoved)++;
+
+ /*
+ * New physical devices won't have target/lun assigned yet
+ * so we need to preserve the values in the slot we are replacing.
+ */
+ if (new_entry->target == -1) {
+ new_entry->target = h->dev[entry]->target;
+ new_entry->lun = h->dev[entry]->lun;
+ }
+
h->dev[entry] = new_entry;
added[*nadded] = new_entry;
(*nadded)++;
@@ -1548,10 +1558,17 @@ static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
}
static int hpsa_update_device_info(struct ctlr_info *h,
- unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device)
+ unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
+ unsigned char *is_OBDR_device)
{
-#define OBDR_TAPE_INQ_SIZE 49
+
+#define OBDR_SIG_OFFSET 43
+#define OBDR_TAPE_SIG "$DR-10"
+#define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
+#define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
+
unsigned char *inq_buff;
+ unsigned char *obdr_sig;
inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
if (!inq_buff)
@@ -1583,6 +1600,16 @@ static int hpsa_update_device_info(struct ctlr_info *h,
else
this_device->raid_level = RAID_UNKNOWN;
+ if (is_OBDR_device) {
+ /* See if this is a One-Button-Disaster-Recovery device
+ * by looking for "$DR-10" at offset 43 in inquiry data.
+ */
+ obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
+ *is_OBDR_device = (this_device->devtype == TYPE_ROM &&
+ strncmp(obdr_sig, OBDR_TAPE_SIG,
+ OBDR_SIG_LEN) == 0);
+ }
+
kfree(inq_buff);
return 0;
@@ -1716,7 +1743,7 @@ static int add_msa2xxx_enclosure_device(struct ctlr_info *h,
return 0;
}
- if (hpsa_update_device_info(h, scsi3addr, this_device))
+ if (hpsa_update_device_info(h, scsi3addr, this_device, NULL))
return 0;
(*nmsa2xxx_enclosures)++;
hpsa_set_bus_target_lun(this_device, bus, target, 0);
@@ -1808,7 +1835,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
*/
struct ReportLUNdata *physdev_list = NULL;
struct ReportLUNdata *logdev_list = NULL;
- unsigned char *inq_buff = NULL;
u32 nphysicals = 0;
u32 nlogicals = 0;
u32 ndev_allocated = 0;
@@ -1824,11 +1850,9 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
GFP_KERNEL);
physdev_list = kzalloc(reportlunsize, GFP_KERNEL);
logdev_list = kzalloc(reportlunsize, GFP_KERNEL);
- inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
- if (!currentsd || !physdev_list || !logdev_list ||
- !inq_buff || !tmpdevice) {
+ if (!currentsd || !physdev_list || !logdev_list || !tmpdevice) {
dev_err(&h->pdev->dev, "out of memory\n");
goto out;
}
@@ -1863,7 +1887,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
/* adjust our table of devices */
nmsa2xxx_enclosures = 0;
for (i = 0; i < nphysicals + nlogicals + 1; i++) {
- u8 *lunaddrbytes;
+ u8 *lunaddrbytes, is_OBDR = 0;
/* Figure out where the LUN ID info is coming from */
lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
@@ -1874,7 +1898,8 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
continue;
/* Get device type, vendor, model, device id */
- if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice))
+ if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
+ &is_OBDR))
continue; /* skip it if we can't talk to it. */
figure_bus_target_lun(h, lunaddrbytes, &bus, &target, &lun,
tmpdevice);
@@ -1898,7 +1923,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
hpsa_set_bus_target_lun(this_device, bus, target, lun);
switch (this_device->devtype) {
- case TYPE_ROM: {
+ case TYPE_ROM:
/* We don't *really* support actual CD-ROM devices,
* just "One Button Disaster Recovery" tape drive
* which temporarily pretends to be a CD-ROM drive.
@@ -1906,15 +1931,8 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
* device by checking for "$DR-10" in bytes 43-48 of
* the inquiry data.
*/
- char obdr_sig[7];
-#define OBDR_TAPE_SIG "$DR-10"
- strncpy(obdr_sig, &inq_buff[43], 6);
- obdr_sig[6] = '\0';
- if (strncmp(obdr_sig, OBDR_TAPE_SIG, 6) != 0)
- /* Not OBDR device, ignore it. */
- break;
- }
- ncurrent++;
+ if (is_OBDR)
+ ncurrent++;
break;
case TYPE_DISK:
if (i < nphysicals)
@@ -1947,7 +1965,6 @@ out:
for (i = 0; i < ndev_allocated; i++)
kfree(currentsd[i]);
kfree(currentsd);
- kfree(inq_buff);
kfree(physdev_list);
kfree(logdev_list);
}
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
index 26072f1e9852..6981b773a88d 100644
--- a/drivers/scsi/isci/host.c
+++ b/drivers/scsi/isci/host.c
@@ -531,6 +531,9 @@ static void sci_controller_process_completions(struct isci_host *ihost)
break;
case SCU_COMPLETION_TYPE_EVENT:
+ sci_controller_event_completion(ihost, ent);
+ break;
+
case SCU_COMPLETION_TYPE_NOTIFY: {
event_cycle ^= ((event_get+1) & SCU_MAX_EVENTS) <<
(SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT - SCU_MAX_EVENTS_SHIFT);
@@ -1091,6 +1094,7 @@ static void isci_host_completion_routine(unsigned long data)
struct isci_request *request;
struct isci_request *next_request;
struct sas_task *task;
+ u16 active;
INIT_LIST_HEAD(&completed_request_list);
INIT_LIST_HEAD(&errored_request_list);
@@ -1181,6 +1185,13 @@ static void isci_host_completion_routine(unsigned long data)
}
}
+ /* the coalesence timeout doubles at each encoding step, so
+ * update it based on the ilog2 value of the outstanding requests
+ */
+ active = isci_tci_active(ihost);
+ writel(SMU_ICC_GEN_VAL(NUMBER, active) |
+ SMU_ICC_GEN_VAL(TIMER, ISCI_COALESCE_BASE + ilog2(active)),
+ &ihost->smu_registers->interrupt_coalesce_control);
}
/**
@@ -1471,7 +1482,7 @@ static void sci_controller_ready_state_enter(struct sci_base_state_machine *sm)
struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
/* set the default interrupt coalescence number and timeout value. */
- sci_controller_set_interrupt_coalescence(ihost, 0x10, 250);
+ sci_controller_set_interrupt_coalescence(ihost, 0, 0);
}
static void sci_controller_ready_state_exit(struct sci_base_state_machine *sm)
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
index 062101a39f79..9f33831a2f04 100644
--- a/drivers/scsi/isci/host.h
+++ b/drivers/scsi/isci/host.h
@@ -369,6 +369,9 @@ static inline struct isci_host *dev_to_ihost(struct domain_device *dev)
#define ISCI_TAG_SEQ(tag) (((tag) >> 12) & (SCI_MAX_SEQ-1))
#define ISCI_TAG_TCI(tag) ((tag) & (SCI_MAX_IO_REQUESTS-1))
+/* interrupt coalescing baseline: 9 == 3 to 5us interrupt delay per command */
+#define ISCI_COALESCE_BASE 9
+
/* expander attached sata devices require 3 rnc slots */
static inline int sci_remote_device_node_count(struct isci_remote_device *idev)
{
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 61e0d09e2b57..29aa34efb0f5 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -59,10 +59,19 @@
#include <linux/firmware.h>
#include <linux/efi.h>
#include <asm/string.h>
+#include <scsi/scsi_host.h>
#include "isci.h"
#include "task.h"
#include "probe_roms.h"
+#define MAJ 1
+#define MIN 0
+#define BUILD 0
+#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
+ __stringify(BUILD)
+
+MODULE_VERSION(DRV_VERSION);
+
static struct scsi_transport_template *isci_transport_template;
static DEFINE_PCI_DEVICE_TABLE(isci_id_table) = {
@@ -113,6 +122,22 @@ unsigned char max_concurr_spinup = 1;
module_param(max_concurr_spinup, byte, 0);
MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup");
+static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev);
+ struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
+ struct isci_host *ihost = container_of(sas_ha, typeof(*ihost), sas_ha);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ihost->id);
+}
+
+static DEVICE_ATTR(isci_id, S_IRUGO, isci_show_id, NULL);
+
+struct device_attribute *isci_host_attrs[] = {
+ &dev_attr_isci_id,
+ NULL
+};
+
static struct scsi_host_template isci_sht = {
.module = THIS_MODULE,
@@ -138,6 +163,7 @@ static struct scsi_host_template isci_sht = {
.slave_alloc = sas_slave_alloc,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
+ .shost_attrs = isci_host_attrs,
};
static struct sas_domain_function_template isci_transport_ops = {
@@ -232,17 +258,6 @@ static int isci_register_sas_ha(struct isci_host *isci_host)
return 0;
}
-static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev);
- struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
- struct isci_host *ihost = container_of(sas_ha, typeof(*ihost), sas_ha);
-
- return snprintf(buf, PAGE_SIZE, "%d\n", ihost->id);
-}
-
-static DEVICE_ATTR(isci_id, S_IRUGO, isci_show_id, NULL);
-
static void isci_unregister(struct isci_host *isci_host)
{
struct Scsi_Host *shost;
@@ -251,7 +266,6 @@ static void isci_unregister(struct isci_host *isci_host)
return;
shost = isci_host->shost;
- device_remove_file(&shost->shost_dev, &dev_attr_isci_id);
sas_unregister_ha(&isci_host->sas_ha);
@@ -415,14 +429,8 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
if (err)
goto err_shost_remove;
- err = device_create_file(&shost->shost_dev, &dev_attr_isci_id);
- if (err)
- goto err_unregister_ha;
-
return isci_host;
- err_unregister_ha:
- sas_unregister_ha(&(isci_host->sas_ha));
err_shost_remove:
scsi_remove_host(shost);
err_shost:
@@ -540,7 +548,8 @@ static __init int isci_init(void)
{
int err;
- pr_info("%s: Intel(R) C600 SAS Controller Driver\n", DRV_NAME);
+ pr_info("%s: Intel(R) C600 SAS Controller Driver - version %s\n",
+ DRV_NAME, DRV_VERSION);
isci_transport_template = sas_domain_attach_transport(&isci_transport_ops);
if (!isci_transport_template)
diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c
index 79313a7a2356..430fc8ff014a 100644
--- a/drivers/scsi/isci/phy.c
+++ b/drivers/scsi/isci/phy.c
@@ -104,6 +104,7 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
u32 parity_count = 0;
u32 llctl, link_rate;
u32 clksm_value = 0;
+ u32 sp_timeouts = 0;
iphy->link_layer_registers = reg;
@@ -211,6 +212,18 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
llctl |= SCU_SAS_LLCTL_GEN_VAL(MAX_LINK_RATE, link_rate);
writel(llctl, &iphy->link_layer_registers->link_layer_control);
+ sp_timeouts = readl(&iphy->link_layer_registers->sas_phy_timeouts);
+
+ /* Clear the default 0x36 (54us) RATE_CHANGE timeout value. */
+ sp_timeouts &= ~SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0xFF);
+
+ /* Set RATE_CHANGE timeout value to 0x3B (59us). This ensures SCU can
+ * lock with 3Gb drive when SCU max rate is set to 1.5Gb.
+ */
+ sp_timeouts |= SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0x3B);
+
+ writel(sp_timeouts, &iphy->link_layer_registers->sas_phy_timeouts);
+
if (is_a2(ihost->pdev)) {
/* Program the max ARB time for the PHY to 700us so we inter-operate with
* the PMC expander which shuts down PHYs if the expander PHY generates too
diff --git a/drivers/scsi/isci/registers.h b/drivers/scsi/isci/registers.h
index 9b266c7428e8..00afc738bbed 100644
--- a/drivers/scsi/isci/registers.h
+++ b/drivers/scsi/isci/registers.h
@@ -1299,6 +1299,18 @@ struct scu_transport_layer_registers {
#define SCU_AFE_XCVRCR_OFFSET 0x00DC
#define SCU_AFE_LUTCR_OFFSET 0x00E0
+#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_ALIGN_DETECTION_SHIFT (0UL)
+#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_ALIGN_DETECTION_MASK (0x000000FFUL)
+#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_HOT_PLUG_SHIFT (8UL)
+#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_HOT_PLUG_MASK (0x0000FF00UL)
+#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_COMSAS_DETECTION_SHIFT (16UL)
+#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_COMSAS_DETECTION_MASK (0x00FF0000UL)
+#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_RATE_CHANGE_SHIFT (24UL)
+#define SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_RATE_CHANGE_MASK (0xFF000000UL)
+
+#define SCU_SAS_PHYTOV_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_SAS_PHY_TIMER_TIMEOUT_VALUES_##name, value)
+
#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_SHIFT (0)
#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_MASK (0x00000003)
#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN1 (0)
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index a46e07ac789f..b5d3a8c4d329 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -732,12 +732,20 @@ sci_io_request_terminate(struct isci_request *ireq)
sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
return SCI_SUCCESS;
case SCI_REQ_TASK_WAIT_TC_RESP:
+ /* The task frame was already confirmed to have been
+ * sent by the SCU HW. Since the state machine is
+ * now only waiting for the task response itself,
+ * abort the request and complete it immediately
+ * and don't wait for the task response.
+ */
sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
return SCI_SUCCESS;
case SCI_REQ_ABORTING:
- sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
- return SCI_SUCCESS;
+ /* If a request has a termination requested twice, return
+ * a failure indication, since HW confirmation of the first
+ * abort is still outstanding.
+ */
case SCI_REQ_COMPLETED:
default:
dev_warn(&ireq->owning_controller->pdev->dev,
@@ -2399,22 +2407,19 @@ static void isci_task_save_for_upper_layer_completion(
}
}
-static void isci_request_process_stp_response(struct sas_task *task,
- void *response_buffer)
+static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis)
{
- struct dev_to_host_fis *d2h_reg_fis = response_buffer;
struct task_status_struct *ts = &task->task_status;
struct ata_task_resp *resp = (void *)&ts->buf[0];
- resp->frame_len = le16_to_cpu(*(__le16 *)(response_buffer + 6));
- memcpy(&resp->ending_fis[0], response_buffer + 16, 24);
+ resp->frame_len = sizeof(*fis);
+ memcpy(resp->ending_fis, fis, sizeof(*fis));
ts->buf_valid_size = sizeof(*resp);
- /**
- * If the device fault bit is set in the status register, then
+ /* If the device fault bit is set in the status register, then
* set the sense data and return.
*/
- if (d2h_reg_fis->status & ATA_DF)
+ if (fis->status & ATA_DF)
ts->stat = SAS_PROTO_RESPONSE;
else
ts->stat = SAM_STAT_GOOD;
@@ -2428,7 +2433,6 @@ static void isci_request_io_request_complete(struct isci_host *ihost,
{
struct sas_task *task = isci_request_access_task(request);
struct ssp_response_iu *resp_iu;
- void *resp_buf;
unsigned long task_flags;
struct isci_remote_device *idev = isci_lookup_device(task->dev);
enum service_response response = SAS_TASK_UNDELIVERED;
@@ -2565,9 +2569,7 @@ static void isci_request_io_request_complete(struct isci_host *ihost,
task);
if (sas_protocol_ata(task->task_proto)) {
- resp_buf = &request->stp.rsp;
- isci_request_process_stp_response(task,
- resp_buf);
+ isci_process_stp_response(task, &request->stp.rsp);
} else if (SAS_PROTOCOL_SSP == task->task_proto) {
/* crack the iu response buffer. */
diff --git a/drivers/scsi/isci/unsolicited_frame_control.c b/drivers/scsi/isci/unsolicited_frame_control.c
index e9e1e2abacb9..16f88ab939c8 100644
--- a/drivers/scsi/isci/unsolicited_frame_control.c
+++ b/drivers/scsi/isci/unsolicited_frame_control.c
@@ -72,7 +72,7 @@ int sci_unsolicited_frame_control_construct(struct isci_host *ihost)
*/
buf_len = SCU_MAX_UNSOLICITED_FRAMES * SCU_UNSOLICITED_FRAME_BUFFER_SIZE;
header_len = SCU_MAX_UNSOLICITED_FRAMES * sizeof(struct scu_unsolicited_frame_header);
- size = buf_len + header_len + SCU_MAX_UNSOLICITED_FRAMES * sizeof(dma_addr_t);
+ size = buf_len + header_len + SCU_MAX_UNSOLICITED_FRAMES * sizeof(uf_control->address_table.array[0]);
/*
* The Unsolicited Frame buffers are set at the start of the UF
diff --git a/drivers/scsi/isci/unsolicited_frame_control.h b/drivers/scsi/isci/unsolicited_frame_control.h
index 31cb9506f52d..75d896686f5a 100644
--- a/drivers/scsi/isci/unsolicited_frame_control.h
+++ b/drivers/scsi/isci/unsolicited_frame_control.h
@@ -214,7 +214,7 @@ struct sci_uf_address_table_array {
* starting address of the UF address table.
* 64-bit pointers are required by the hardware.
*/
- dma_addr_t *array;
+ u64 *array;
/**
* This field specifies the physical address location for the UF
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 01ff082dc34c..d261e982a2fa 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -494,6 +494,9 @@ static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,
*/
error = lport->tt.frame_send(lport, fp);
+ if (fh->fh_type == FC_TYPE_BLS)
+ return error;
+
/*
* Update the exchange and sequence flags,
* assuming all frames for the sequence have been sent.
@@ -575,42 +578,35 @@ static void fc_seq_set_resp(struct fc_seq *sp,
}
/**
- * fc_seq_exch_abort() - Abort an exchange and sequence
- * @req_sp: The sequence to be aborted
+ * fc_exch_abort_locked() - Abort an exchange
+ * @ep: The exchange to be aborted
* @timer_msec: The period of time to wait before aborting
*
- * Generally called because of a timeout or an abort from the upper layer.
+ * Locking notes: Called with exch lock held
+ *
+ * Return value: 0 on success else error code
*/
-static int fc_seq_exch_abort(const struct fc_seq *req_sp,
- unsigned int timer_msec)
+static int fc_exch_abort_locked(struct fc_exch *ep,
+ unsigned int timer_msec)
{
struct fc_seq *sp;
- struct fc_exch *ep;
struct fc_frame *fp;
int error;
- ep = fc_seq_exch(req_sp);
-
- spin_lock_bh(&ep->ex_lock);
if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL) ||
- ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP)) {
- spin_unlock_bh(&ep->ex_lock);
+ ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP))
return -ENXIO;
- }
/*
* Send the abort on a new sequence if possible.
*/
sp = fc_seq_start_next_locked(&ep->seq);
- if (!sp) {
- spin_unlock_bh(&ep->ex_lock);
+ if (!sp)
return -ENOMEM;
- }
ep->esb_stat |= ESB_ST_SEQ_INIT | ESB_ST_ABNORMAL;
if (timer_msec)
fc_exch_timer_set_locked(ep, timer_msec);
- spin_unlock_bh(&ep->ex_lock);
/*
* If not logged into the fabric, don't send ABTS but leave
@@ -633,6 +629,28 @@ static int fc_seq_exch_abort(const struct fc_seq *req_sp,
}
/**
+ * fc_seq_exch_abort() - Abort an exchange and sequence
+ * @req_sp: The sequence to be aborted
+ * @timer_msec: The period of time to wait before aborting
+ *
+ * Generally called because of a timeout or an abort from the upper layer.
+ *
+ * Return value: 0 on success else error code
+ */
+static int fc_seq_exch_abort(const struct fc_seq *req_sp,
+ unsigned int timer_msec)
+{
+ struct fc_exch *ep;
+ int error;
+
+ ep = fc_seq_exch(req_sp);
+ spin_lock_bh(&ep->ex_lock);
+ error = fc_exch_abort_locked(ep, timer_msec);
+ spin_unlock_bh(&ep->ex_lock);
+ return error;
+}
+
+/**
* fc_exch_timeout() - Handle exchange timer expiration
* @work: The work_struct identifying the exchange that timed out
*/
@@ -1715,6 +1733,7 @@ static void fc_exch_reset(struct fc_exch *ep)
int rc = 1;
spin_lock_bh(&ep->ex_lock);
+ fc_exch_abort_locked(ep, 0);
ep->state |= FC_EX_RST_CLEANUP;
if (cancel_delayed_work(&ep->timeout_work))
atomic_dec(&ep->ex_refcnt); /* drop hold for timer */
@@ -1962,6 +1981,7 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
struct fc_exch *ep;
struct fc_seq *sp = NULL;
struct fc_frame_header *fh;
+ struct fc_fcp_pkt *fsp = NULL;
int rc = 1;
ep = fc_exch_alloc(lport, fp);
@@ -1984,8 +2004,10 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
fc_exch_setup_hdr(ep, fp, ep->f_ctl);
sp->cnt++;
- if (ep->xid <= lport->lro_xid && fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD)
+ if (ep->xid <= lport->lro_xid && fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) {
+ fsp = fr_fsp(fp);
fc_fcp_ddp_setup(fr_fsp(fp), ep->xid);
+ }
if (unlikely(lport->tt.frame_send(lport, fp)))
goto err;
@@ -1999,7 +2021,8 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
spin_unlock_bh(&ep->ex_lock);
return sp;
err:
- fc_fcp_ddp_done(fr_fsp(fp));
+ if (fsp)
+ fc_fcp_ddp_done(fsp);
rc = fc_exch_done_locked(ep);
spin_unlock_bh(&ep->ex_lock);
if (!rc)
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index afb63c843144..4c41ee816f0b 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -2019,6 +2019,11 @@ int fc_eh_abort(struct scsi_cmnd *sc_cmd)
struct fc_fcp_internal *si;
int rc = FAILED;
unsigned long flags;
+ int rval;
+
+ rval = fc_block_scsi_eh(sc_cmd);
+ if (rval)
+ return rval;
lport = shost_priv(sc_cmd->device->host);
if (lport->state != LPORT_ST_READY)
@@ -2068,9 +2073,9 @@ int fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
int rc = FAILED;
int rval;
- rval = fc_remote_port_chkready(rport);
+ rval = fc_block_scsi_eh(sc_cmd);
if (rval)
- goto out;
+ return rval;
lport = shost_priv(sc_cmd->device->host);
@@ -2116,6 +2121,8 @@ int fc_eh_host_reset(struct scsi_cmnd *sc_cmd)
FC_SCSI_DBG(lport, "Resetting host\n");
+ fc_block_scsi_eh(sc_cmd);
+
lport->tt.lport_reset(lport);
wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT;
while (!fc_fcp_lport_queue_ready(lport) && time_before(jiffies,
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index e55ed9cf23fb..628f347404f9 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -88,6 +88,7 @@
*/
#include <linux/timer.h>
+#include <linux/delay.h>
#include <linux/slab.h>
#include <asm/unaligned.h>
@@ -1029,8 +1030,16 @@ static void fc_lport_enter_reset(struct fc_lport *lport)
FCH_EVT_LIPRESET, 0);
fc_vports_linkchange(lport);
fc_lport_reset_locked(lport);
- if (lport->link_up)
+ if (lport->link_up) {
+ /*
+ * Wait upto resource allocation time out before
+ * doing re-login since incomplete FIP exchanged
+ * from last session may collide with exchanges
+ * in new session.
+ */
+ msleep(lport->r_a_tov);
fc_lport_enter_flogi(lport);
+ }
}
/**
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index f84084bba2f0..16ad97df5ba6 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -1721,7 +1721,7 @@ static int sas_find_bcast_dev(struct domain_device *dev,
list_for_each_entry(ch, &ex->children, siblings) {
if (ch->dev_type == EDGE_DEV || ch->dev_type == FANOUT_DEV) {
res = sas_find_bcast_dev(ch, src_dev);
- if (src_dev)
+ if (*src_dev)
return res;
}
}
@@ -1769,10 +1769,12 @@ static void sas_unregister_devs_sas_addr(struct domain_device *parent,
sas_disable_routing(parent, phy->attached_sas_addr);
}
memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
- sas_port_delete_phy(phy->port, phy->phy);
- if (phy->port->num_phys == 0)
- sas_port_delete(phy->port);
- phy->port = NULL;
+ if (phy->port) {
+ sas_port_delete_phy(phy->port, phy->phy);
+ if (phy->port->num_phys == 0)
+ sas_port_delete(phy->port);
+ phy->port = NULL;
+ }
}
static int sas_discover_bfs_by_root_level(struct domain_device *root,
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 7836eb01c7fc..a31e05f3bfd4 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1786,13 +1786,16 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
}
- if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) {
+ if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
if (ha->fw_attributes & BIT_4) {
+ int prot = 0;
vha->flags.difdix_supported = 1;
ql_dbg(ql_dbg_user, vha, 0x7082,
"Registered for DIF/DIX type 1 and 3 protection.\n");
+ if (ql2xenabledif == 1)
+ prot = SHOST_DIX_TYPE0_PROTECTION;
scsi_host_set_prot(vha->host,
- SHOST_DIF_TYPE1_PROTECTION
+ prot | SHOST_DIF_TYPE1_PROTECTION
| SHOST_DIF_TYPE2_PROTECTION
| SHOST_DIF_TYPE3_PROTECTION
| SHOST_DIX_TYPE1_PROTECTION
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 2155071f3100..d79cd8a5f831 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -8,24 +8,24 @@
/*
* Table for showing the current message id in use for particular level
* Change this table for addition of log/debug messages.
- * -----------------------------------------------------
- * | Level | Last Value Used |
- * -----------------------------------------------------
- * | Module Init and Probe | 0x0116 |
- * | Mailbox commands | 0x111e |
- * | Device Discovery | 0x2083 |
- * | Queue Command and IO tracing | 0x302e |
- * | DPC Thread | 0x401c |
- * | Async Events | 0x5059 |
- * | Timer Routines | 0x600d |
- * | User Space Interactions | 0x709c |
- * | Task Management | 0x8043 |
- * | AER/EEH | 0x900f |
- * | Virtual Port | 0xa007 |
- * | ISP82XX Specific | 0xb027 |
- * | MultiQ | 0xc00b |
- * | Misc | 0xd00b |
- * -----------------------------------------------------
+ * ----------------------------------------------------------------------
+ * | Level | Last Value Used | Holes |
+ * ----------------------------------------------------------------------
+ * | Module Init and Probe | 0x0116 | |
+ * | Mailbox commands | 0x1126 | |
+ * | Device Discovery | 0x2083 | |
+ * | Queue Command and IO tracing | 0x302e | 0x3008 |
+ * | DPC Thread | 0x401c | |
+ * | Async Events | 0x5059 | |
+ * | Timer Routines | 0x600d | |
+ * | User Space Interactions | 0x709d | |
+ * | Task Management | 0x8041 | |
+ * | AER/EEH | 0x900f | |
+ * | Virtual Port | 0xa007 | |
+ * | ISP82XX Specific | 0xb04f | |
+ * | MultiQ | 0xc00b | |
+ * | Misc | 0xd00b | |
+ * ----------------------------------------------------------------------
*/
#include "qla_def.h"
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index cc5a79259d33..a03eaf40f377 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2529,6 +2529,7 @@ struct qla_hw_data {
#define DT_ISP8021 BIT_14
#define DT_ISP_LAST (DT_ISP8021 << 1)
+#define DT_T10_PI BIT_25
#define DT_IIDMA BIT_26
#define DT_FWI2 BIT_27
#define DT_ZIO_SUPPORTED BIT_28
@@ -2572,6 +2573,7 @@ struct qla_hw_data {
#define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha))
#define IS_ALOGIO_CAPABLE(ha) (IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha))
+#define IS_T10_PI_CAPABLE(ha) ((ha)->device_type & DT_T10_PI)
#define IS_IIDMA_CAPABLE(ha) ((ha)->device_type & DT_IIDMA)
#define IS_FWI2_CAPABLE(ha) ((ha)->device_type & DT_FWI2)
#define IS_ZIO_SUPPORTED(ha) ((ha)->device_type & DT_ZIO_SUPPORTED)
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 691783abfb69..aa69486dc064 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -537,6 +537,11 @@ struct sts_entry_24xx {
/*
* If DIF Error is set in comp_status, these additional fields are
* defined:
+ *
+ * !!! NOTE: Firmware sends expected/actual DIF data in big endian
+ * format; but all of the "data" field gets swab32-d in the beginning
+ * of qla2x00_status_entry().
+ *
* &data[10] : uint8_t report_runt_bg[2]; - computed guard
* &data[12] : uint8_t actual_dif[8]; - DIF Data received
* &data[20] : uint8_t expected_dif[8]; - DIF Data computed
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index def694271bf7..37da04d3db26 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -3838,15 +3838,12 @@ qla2x00_loop_resync(scsi_qla_host_t *vha)
req = vha->req;
rsp = req->rsp;
- atomic_set(&vha->loop_state, LOOP_UPDATE);
clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
if (vha->flags.online) {
if (!(rval = qla2x00_fw_ready(vha))) {
/* Wait at most MAX_TARGET RSCNs for a stable link. */
wait_time = 256;
do {
- atomic_set(&vha->loop_state, LOOP_UPDATE);
-
/* Issue a marker after FW becomes ready. */
qla2x00_marker(vha, req, rsp, 0, 0,
MK_SYNC_ALL);
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index d2e904bc21c0..9902834e0b74 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -102,3 +102,32 @@ qla2x00_set_fcport_state(fc_port_t *fcport, int state)
fcport->d_id.b.al_pa);
}
}
+
+static inline int
+qla2x00_hba_err_chk_enabled(srb_t *sp)
+{
+ /*
+ * Uncomment when corresponding SCSI changes are done.
+ *
+ if (!sp->cmd->prot_chk)
+ return 0;
+ *
+ */
+
+ switch (scsi_get_prot_op(sp->cmd)) {
+ case SCSI_PROT_READ_STRIP:
+ case SCSI_PROT_WRITE_INSERT:
+ if (ql2xenablehba_err_chk >= 1)
+ return 1;
+ break;
+ case SCSI_PROT_READ_PASS:
+ case SCSI_PROT_WRITE_PASS:
+ if (ql2xenablehba_err_chk >= 2)
+ return 1;
+ break;
+ case SCSI_PROT_READ_INSERT:
+ case SCSI_PROT_WRITE_STRIP:
+ return 1;
+ }
+ return 0;
+}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 49d6906af886..dbec89622a0f 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -709,20 +709,28 @@ struct fw_dif_context {
*
*/
static inline void
-qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
+qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
unsigned int protcnt)
{
- struct sd_dif_tuple *spt;
+ struct scsi_cmnd *cmd = sp->cmd;
scsi_qla_host_t *vha = shost_priv(cmd->device->host);
- unsigned char op = scsi_get_prot_op(cmd);
switch (scsi_get_prot_type(cmd)) {
- /* For TYPE 0 protection: no checking */
case SCSI_PROT_DIF_TYPE0:
- pkt->ref_tag_mask[0] = 0x00;
- pkt->ref_tag_mask[1] = 0x00;
- pkt->ref_tag_mask[2] = 0x00;
- pkt->ref_tag_mask[3] = 0x00;
+ /*
+ * No check for ql2xenablehba_err_chk, as it would be an
+ * I/O error if hba tag generation is not done.
+ */
+ pkt->ref_tag = cpu_to_le32((uint32_t)
+ (0xffffffff & scsi_get_lba(cmd)));
+
+ if (!qla2x00_hba_err_chk_enabled(sp))
+ break;
+
+ pkt->ref_tag_mask[0] = 0xff;
+ pkt->ref_tag_mask[1] = 0xff;
+ pkt->ref_tag_mask[2] = 0xff;
+ pkt->ref_tag_mask[3] = 0xff;
break;
/*
@@ -730,20 +738,16 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
* match LBA in CDB + N
*/
case SCSI_PROT_DIF_TYPE2:
- if (!ql2xenablehba_err_chk)
- break;
-
- if (scsi_prot_sg_count(cmd)) {
- spt = page_address(sg_page(scsi_prot_sglist(cmd))) +
- scsi_prot_sglist(cmd)[0].offset;
- pkt->app_tag = swab32(spt->app_tag);
- pkt->app_tag_mask[0] = 0xff;
- pkt->app_tag_mask[1] = 0xff;
- }
+ pkt->app_tag = __constant_cpu_to_le16(0);
+ pkt->app_tag_mask[0] = 0x0;
+ pkt->app_tag_mask[1] = 0x0;
pkt->ref_tag = cpu_to_le32((uint32_t)
(0xffffffff & scsi_get_lba(cmd)));
+ if (!qla2x00_hba_err_chk_enabled(sp))
+ break;
+
/* enable ALL bytes of the ref tag */
pkt->ref_tag_mask[0] = 0xff;
pkt->ref_tag_mask[1] = 0xff;
@@ -763,26 +767,15 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
* 16 bit app tag.
*/
case SCSI_PROT_DIF_TYPE1:
- if (!ql2xenablehba_err_chk)
+ pkt->ref_tag = cpu_to_le32((uint32_t)
+ (0xffffffff & scsi_get_lba(cmd)));
+ pkt->app_tag = __constant_cpu_to_le16(0);
+ pkt->app_tag_mask[0] = 0x0;
+ pkt->app_tag_mask[1] = 0x0;
+
+ if (!qla2x00_hba_err_chk_enabled(sp))
break;
- if (protcnt && (op == SCSI_PROT_WRITE_STRIP ||
- op == SCSI_PROT_WRITE_PASS)) {
- spt = page_address(sg_page(scsi_prot_sglist(cmd))) +
- scsi_prot_sglist(cmd)[0].offset;
- ql_dbg(ql_dbg_io, vha, 0x3008,
- "LBA from user %p, lba = 0x%x for cmd=%p.\n",
- spt, (int)spt->ref_tag, cmd);
- pkt->ref_tag = swab32(spt->ref_tag);
- pkt->app_tag_mask[0] = 0x0;
- pkt->app_tag_mask[1] = 0x0;
- } else {
- pkt->ref_tag = cpu_to_le32((uint32_t)
- (0xffffffff & scsi_get_lba(cmd)));
- pkt->app_tag = __constant_cpu_to_le16(0);
- pkt->app_tag_mask[0] = 0x0;
- pkt->app_tag_mask[1] = 0x0;
- }
/* enable ALL bytes of the ref tag */
pkt->ref_tag_mask[0] = 0xff;
pkt->ref_tag_mask[1] = 0xff;
@@ -798,8 +791,162 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
scsi_get_prot_type(cmd), cmd);
}
+struct qla2_sgx {
+ dma_addr_t dma_addr; /* OUT */
+ uint32_t dma_len; /* OUT */
+
+ uint32_t tot_bytes; /* IN */
+ struct scatterlist *cur_sg; /* IN */
+
+ /* for book keeping, bzero on initial invocation */
+ uint32_t bytes_consumed;
+ uint32_t num_bytes;
+ uint32_t tot_partial;
+
+ /* for debugging */
+ uint32_t num_sg;
+ srb_t *sp;
+};
static int
+qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
+ uint32_t *partial)
+{
+ struct scatterlist *sg;
+ uint32_t cumulative_partial, sg_len;
+ dma_addr_t sg_dma_addr;
+
+ if (sgx->num_bytes == sgx->tot_bytes)
+ return 0;
+
+ sg = sgx->cur_sg;
+ cumulative_partial = sgx->tot_partial;
+
+ sg_dma_addr = sg_dma_address(sg);
+ sg_len = sg_dma_len(sg);
+
+ sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
+
+ if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
+ sgx->dma_len = (blk_sz - cumulative_partial);
+ sgx->tot_partial = 0;
+ sgx->num_bytes += blk_sz;
+ *partial = 0;
+ } else {
+ sgx->dma_len = sg_len - sgx->bytes_consumed;
+ sgx->tot_partial += sgx->dma_len;
+ *partial = 1;
+ }
+
+ sgx->bytes_consumed += sgx->dma_len;
+
+ if (sg_len == sgx->bytes_consumed) {
+ sg = sg_next(sg);
+ sgx->num_sg++;
+ sgx->cur_sg = sg;
+ sgx->bytes_consumed = 0;
+ }
+
+ return 1;
+}
+
+static int
+qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
+ uint32_t *dsd, uint16_t tot_dsds)
+{
+ void *next_dsd;
+ uint8_t avail_dsds = 0;
+ uint32_t dsd_list_len;
+ struct dsd_dma *dsd_ptr;
+ struct scatterlist *sg_prot;
+ uint32_t *cur_dsd = dsd;
+ uint16_t used_dsds = tot_dsds;
+
+ uint32_t prot_int;
+ uint32_t partial;
+ struct qla2_sgx sgx;
+ dma_addr_t sle_dma;
+ uint32_t sle_dma_len, tot_prot_dma_len = 0;
+ struct scsi_cmnd *cmd = sp->cmd;
+
+ prot_int = cmd->device->sector_size;
+
+ memset(&sgx, 0, sizeof(struct qla2_sgx));
+ sgx.tot_bytes = scsi_bufflen(sp->cmd);
+ sgx.cur_sg = scsi_sglist(sp->cmd);
+ sgx.sp = sp;
+
+ sg_prot = scsi_prot_sglist(sp->cmd);
+
+ while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
+
+ sle_dma = sgx.dma_addr;
+ sle_dma_len = sgx.dma_len;
+alloc_and_fill:
+ /* Allocate additional continuation packets? */
+ if (avail_dsds == 0) {
+ avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
+ QLA_DSDS_PER_IOCB : used_dsds;
+ dsd_list_len = (avail_dsds + 1) * 12;
+ used_dsds -= avail_dsds;
+
+ /* allocate tracking DS */
+ dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
+ if (!dsd_ptr)
+ return 1;
+
+ /* allocate new list */
+ dsd_ptr->dsd_addr = next_dsd =
+ dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
+ &dsd_ptr->dsd_list_dma);
+
+ if (!next_dsd) {
+ /*
+ * Need to cleanup only this dsd_ptr, rest
+ * will be done by sp_free_dma()
+ */
+ kfree(dsd_ptr);
+ return 1;
+ }
+
+ list_add_tail(&dsd_ptr->list,
+ &((struct crc_context *)sp->ctx)->dsd_list);
+
+ sp->flags |= SRB_CRC_CTX_DSD_VALID;
+
+ /* add new list to cmd iocb or last list */
+ *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
+ *cur_dsd++ = dsd_list_len;
+ cur_dsd = (uint32_t *)next_dsd;
+ }
+ *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(sle_dma_len);
+ avail_dsds--;
+
+ if (partial == 0) {
+ /* Got a full protection interval */
+ sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
+ sle_dma_len = 8;
+
+ tot_prot_dma_len += sle_dma_len;
+ if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
+ tot_prot_dma_len = 0;
+ sg_prot = sg_next(sg_prot);
+ }
+
+ partial = 1; /* So as to not re-enter this block */
+ goto alloc_and_fill;
+ }
+ }
+ /* Null termination */
+ *cur_dsd++ = 0;
+ *cur_dsd++ = 0;
+ *cur_dsd++ = 0;
+ return 0;
+}
+static int
qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
uint16_t tot_dsds)
{
@@ -981,7 +1128,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
struct scsi_cmnd *cmd;
struct scatterlist *cur_seg;
int sgc;
- uint32_t total_bytes;
+ uint32_t total_bytes = 0;
uint32_t data_bytes;
uint32_t dif_bytes;
uint8_t bundling = 1;
@@ -1023,8 +1170,10 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
__constant_cpu_to_le16(CF_READ_DATA);
}
- tot_prot_dsds = scsi_prot_sg_count(cmd);
- if (!tot_prot_dsds)
+ if ((scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_INSERT) ||
+ (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_STRIP) ||
+ (scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_STRIP) ||
+ (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_INSERT))
bundling = 0;
/* Allocate CRC context from global pool */
@@ -1047,7 +1196,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
- qla24xx_set_t10dif_tags(cmd, (struct fw_dif_context *)
+ qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
&crc_ctx_pkt->ref_tag, tot_prot_dsds);
cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
@@ -1076,7 +1225,6 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
fcp_cmnd->additional_cdb_len |= 2;
int_to_scsilun(sp->cmd->device->lun, &fcp_cmnd->lun);
- host_to_fcp_swap((uint8_t *)&fcp_cmnd->lun, sizeof(fcp_cmnd->lun));
memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
@@ -1107,15 +1255,28 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
/* Compute dif len and adjust data len to incude protection */
- total_bytes = data_bytes;
dif_bytes = 0;
blk_size = cmd->device->sector_size;
- if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
- dif_bytes = (data_bytes / blk_size) * 8;
- total_bytes += dif_bytes;
+ dif_bytes = (data_bytes / blk_size) * 8;
+
+ switch (scsi_get_prot_op(sp->cmd)) {
+ case SCSI_PROT_READ_INSERT:
+ case SCSI_PROT_WRITE_STRIP:
+ total_bytes = data_bytes;
+ data_bytes += dif_bytes;
+ break;
+
+ case SCSI_PROT_READ_STRIP:
+ case SCSI_PROT_WRITE_INSERT:
+ case SCSI_PROT_READ_PASS:
+ case SCSI_PROT_WRITE_PASS:
+ total_bytes = data_bytes + dif_bytes;
+ break;
+ default:
+ BUG();
}
- if (!ql2xenablehba_err_chk)
+ if (!qla2x00_hba_err_chk_enabled(sp))
fw_prot_opts |= 0x10; /* Disable Guard tag checking */
if (!bundling) {
@@ -1151,7 +1312,12 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
cmd_pkt->control_flags |=
__constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
- if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
+
+ if (!bundling && tot_prot_dsds) {
+ if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
+ cur_dsd, tot_dsds))
+ goto crc_queuing_error;
+ } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
(tot_dsds - tot_prot_dsds)))
goto crc_queuing_error;
@@ -1414,6 +1580,22 @@ qla24xx_dif_start_scsi(srb_t *sp)
goto queuing_error;
else
sp->flags |= SRB_DMA_VALID;
+
+ if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
+ (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
+ struct qla2_sgx sgx;
+ uint32_t partial;
+
+ memset(&sgx, 0, sizeof(struct qla2_sgx));
+ sgx.tot_bytes = scsi_bufflen(cmd);
+ sgx.cur_sg = scsi_sglist(cmd);
+ sgx.sp = sp;
+
+ nseg = 0;
+ while (qla24xx_get_one_block_sg(
+ cmd->device->sector_size, &sgx, &partial))
+ nseg++;
+ }
} else
nseg = 0;
@@ -1428,6 +1610,11 @@ qla24xx_dif_start_scsi(srb_t *sp)
goto queuing_error;
else
sp->flags |= SRB_CRC_PROT_DMA_VALID;
+
+ if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
+ (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
+ nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
+ }
} else {
nseg = 0;
}
@@ -1454,6 +1641,7 @@ qla24xx_dif_start_scsi(srb_t *sp)
/* Build header part of command packet (excluding the OPCODE). */
req->current_outstanding_cmd = handle;
req->outstanding_cmds[handle] = sp;
+ sp->handle = handle;
sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
req->cnt -= req_cnt;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index b16b7725dee0..8a7591f035e6 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -719,7 +719,6 @@ skip_rio:
vha->flags.rscn_queue_overflow = 1;
}
- atomic_set(&vha->loop_state, LOOP_UPDATE);
atomic_set(&vha->loop_down_timer, 0);
vha->flags.management_server_logged_in = 0;
@@ -1435,25 +1434,27 @@ struct scsi_dif_tuple {
* ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
* to indicate to the kernel that the HBA detected error.
*/
-static inline void
+static inline int
qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
{
struct scsi_qla_host *vha = sp->fcport->vha;
struct scsi_cmnd *cmd = sp->cmd;
- struct scsi_dif_tuple *ep =
- (struct scsi_dif_tuple *)&sts24->data[20];
- struct scsi_dif_tuple *ap =
- (struct scsi_dif_tuple *)&sts24->data[12];
+ uint8_t *ap = &sts24->data[12];
+ uint8_t *ep = &sts24->data[20];
uint32_t e_ref_tag, a_ref_tag;
uint16_t e_app_tag, a_app_tag;
uint16_t e_guard, a_guard;
- e_ref_tag = be32_to_cpu(ep->ref_tag);
- a_ref_tag = be32_to_cpu(ap->ref_tag);
- e_app_tag = be16_to_cpu(ep->app_tag);
- a_app_tag = be16_to_cpu(ap->app_tag);
- e_guard = be16_to_cpu(ep->guard);
- a_guard = be16_to_cpu(ap->guard);
+ /*
+ * swab32 of the "data" field in the beginning of qla2x00_status_entry()
+ * would make guard field appear at offset 2
+ */
+ a_guard = le16_to_cpu(*(uint16_t *)(ap + 2));
+ a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0));
+ a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4));
+ e_guard = le16_to_cpu(*(uint16_t *)(ep + 2));
+ e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0));
+ e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4));
ql_dbg(ql_dbg_io, vha, 0x3023,
"iocb(s) %p Returned STATUS.\n", sts24);
@@ -1465,6 +1466,63 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
a_app_tag, e_app_tag, a_guard, e_guard);
+ /*
+ * Ignore sector if:
+ * For type 3: ref & app tag is all 'f's
+ * For type 0,1,2: app tag is all 'f's
+ */
+ if ((a_app_tag == 0xffff) &&
+ ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) ||
+ (a_ref_tag == 0xffffffff))) {
+ uint32_t blocks_done, resid;
+ sector_t lba_s = scsi_get_lba(cmd);
+
+ /* 2TB boundary case covered automatically with this */
+ blocks_done = e_ref_tag - (uint32_t)lba_s + 1;
+
+ resid = scsi_bufflen(cmd) - (blocks_done *
+ cmd->device->sector_size);
+
+ scsi_set_resid(cmd, resid);
+ cmd->result = DID_OK << 16;
+
+ /* Update protection tag */
+ if (scsi_prot_sg_count(cmd)) {
+ uint32_t i, j = 0, k = 0, num_ent;
+ struct scatterlist *sg;
+ struct sd_dif_tuple *spt;
+
+ /* Patch the corresponding protection tags */
+ scsi_for_each_prot_sg(cmd, sg,
+ scsi_prot_sg_count(cmd), i) {
+ num_ent = sg_dma_len(sg) / 8;
+ if (k + num_ent < blocks_done) {
+ k += num_ent;
+ continue;
+ }
+ j = blocks_done - k - 1;
+ k = blocks_done;
+ break;
+ }
+
+ if (k != blocks_done) {
+ qla_printk(KERN_WARNING, sp->fcport->vha->hw,
+ "unexpected tag values tag:lba=%x:%llx)\n",
+ e_ref_tag, (unsigned long long)lba_s);
+ return 1;
+ }
+
+ spt = page_address(sg_page(sg)) + sg->offset;
+ spt += j;
+
+ spt->app_tag = 0xffff;
+ if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3)
+ spt->ref_tag = 0xffffffff;
+ }
+
+ return 0;
+ }
+
/* check guard */
if (e_guard != a_guard) {
scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
@@ -1472,28 +1530,30 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
set_driver_byte(cmd, DRIVER_SENSE);
set_host_byte(cmd, DID_ABORT);
cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
- return;
+ return 1;
}
- /* check appl tag */
- if (e_app_tag != a_app_tag) {
+ /* check ref tag */
+ if (e_ref_tag != a_ref_tag) {
scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
- 0x10, 0x2);
+ 0x10, 0x3);
set_driver_byte(cmd, DRIVER_SENSE);
set_host_byte(cmd, DID_ABORT);
cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
- return;
+ return 1;
}
- /* check ref tag */
- if (e_ref_tag != a_ref_tag) {
+ /* check appl tag */
+ if (e_app_tag != a_app_tag) {
scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
- 0x10, 0x3);
+ 0x10, 0x2);
set_driver_byte(cmd, DRIVER_SENSE);
set_host_byte(cmd, DID_ABORT);
cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
- return;
+ return 1;
}
+
+ return 1;
}
/**
@@ -1767,7 +1827,7 @@ check_scsi_status:
break;
case CS_DIF_ERROR:
- qla2x00_handle_dif_error(sp, sts24);
+ logit = qla2x00_handle_dif_error(sp, sts24);
break;
default:
cp->result = DID_ERROR << 16;
@@ -2468,11 +2528,10 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
goto skip_msi;
}
- if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX ||
- !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) {
+ if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {
ql_log(ql_log_warn, vha, 0x0035,
"MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
- ha->pdev->revision, ha->fw_attributes);
+ ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);
goto skip_msix;
}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index c706ed370000..f488cc69fc79 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -472,7 +472,7 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
host->can_queue = base_vha->req->length + 128;
host->this_id = 255;
host->cmd_per_lun = 3;
- if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif)
+ if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
host->max_cmd_len = 32;
else
host->max_cmd_len = MAX_CMDSZ;
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 5cbf33a50b14..049807cda419 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -2208,6 +2208,7 @@ qla82xx_msix_rsp_q(int irq, void *dev_id)
struct qla_hw_data *ha;
struct rsp_que *rsp;
struct device_reg_82xx __iomem *reg;
+ unsigned long flags;
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
@@ -2218,11 +2219,11 @@ qla82xx_msix_rsp_q(int irq, void *dev_id)
ha = rsp->hw;
reg = &ha->iobase->isp82;
- spin_lock_irq(&ha->hardware_lock);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
vha = pci_get_drvdata(ha->pdev);
qla24xx_process_response_queue(vha, rsp);
WRT_REG_DWORD(&reg->host_int, 0);
- spin_unlock_irq(&ha->hardware_lock);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
return IRQ_HANDLED;
}
@@ -2838,6 +2839,16 @@ sufficient_dsds:
int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
+ /* build FCP_CMND IU */
+ memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
+ int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun);
+ ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
+
+ if (cmd->sc_data_direction == DMA_TO_DEVICE)
+ ctx->fcp_cmnd->additional_cdb_len |= 1;
+ else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
+ ctx->fcp_cmnd->additional_cdb_len |= 2;
+
/*
* Update tagged queuing modifier -- default is TSK_SIMPLE (0).
*/
@@ -2854,16 +2865,6 @@ sufficient_dsds:
}
}
- /* build FCP_CMND IU */
- memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
- int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun);
- ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
-
- if (cmd->sc_data_direction == DMA_TO_DEVICE)
- ctx->fcp_cmnd->additional_cdb_len |= 1;
- else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
- ctx->fcp_cmnd->additional_cdb_len |= 2;
-
memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index e02df276804e..1e69527f1e4e 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -106,17 +106,21 @@ MODULE_PARM_DESC(ql2xmaxqdepth,
"Maximum queue depth to report for target devices.");
/* Do not change the value of this after module load */
-int ql2xenabledif = 1;
+int ql2xenabledif = 0;
module_param(ql2xenabledif, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xenabledif,
" Enable T10-CRC-DIF "
- " Default is 0 - No DIF Support. 1 - Enable it");
+ " Default is 0 - No DIF Support. 1 - Enable it"
+ ", 2 - Enable DIF for all types, except Type 0.");
-int ql2xenablehba_err_chk;
+int ql2xenablehba_err_chk = 2;
module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xenablehba_err_chk,
- " Enable T10-CRC-DIF Error isolation by HBA"
- " Default is 0 - Error isolation disabled, 1 - Enable it");
+ " Enable T10-CRC-DIF Error isolation by HBA:\n"
+ " Default is 1.\n"
+ " 0 -- Error isolation disabled\n"
+ " 1 -- Error isolation enabled only for DIX Type 0\n"
+ " 2 -- Error isolation enabled for all Types\n");
int ql2xiidmaenable=1;
module_param(ql2xiidmaenable, int, S_IRUGO);
@@ -909,7 +913,14 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
"Abort command mbx success.\n");
wait = 1;
}
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
qla2x00_sp_compl(ha, sp);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ /* Did the command return during mailbox execution? */
+ if (ret == FAILED && !CMD_SP(cmd))
+ ret = SUCCESS;
/* Wait for the command to be returned. */
if (wait) {
@@ -1317,10 +1328,9 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
qla2x00_sp_compl(ha, sp);
} else {
ctx = sp->ctx;
- if (ctx->type == SRB_LOGIN_CMD ||
- ctx->type == SRB_LOGOUT_CMD) {
- ctx->u.iocb_cmd->free(sp);
- } else {
+ if (ctx->type == SRB_ELS_CMD_RPT ||
+ ctx->type == SRB_ELS_CMD_HST ||
+ ctx->type == SRB_CT_CMD) {
struct fc_bsg_job *bsg_job =
ctx->u.bsg_job;
if (bsg_job->request->msgcode
@@ -1332,6 +1342,8 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
kfree(sp->ctx);
mempool_free(sp,
ha->srb_mempool);
+ } else {
+ ctx->u.iocb_cmd->free(sp);
}
}
}
@@ -2251,7 +2263,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host->this_id = 255;
host->cmd_per_lun = 3;
host->unique_id = host->host_no;
- if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif)
+ if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
host->max_cmd_len = 32;
else
host->max_cmd_len = MAX_CMDSZ;
@@ -2378,13 +2390,16 @@ skip_dpc:
"Detected hba at address=%p.\n",
ha);
- if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) {
+ if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
if (ha->fw_attributes & BIT_4) {
+ int prot = 0;
base_vha->flags.difdix_supported = 1;
ql_dbg(ql_dbg_init, base_vha, 0x00f1,
"Registering for DIF/DIX type 1 and 3 protection.\n");
+ if (ql2xenabledif == 1)
+ prot = SHOST_DIX_TYPE0_PROTECTION;
scsi_host_set_prot(host,
- SHOST_DIF_TYPE1_PROTECTION
+ prot | SHOST_DIF_TYPE1_PROTECTION
| SHOST_DIF_TYPE2_PROTECTION
| SHOST_DIF_TYPE3_PROTECTION
| SHOST_DIX_TYPE1_PROTECTION
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 062c97bf62f5..13b6357c1fa2 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.03.07.03-k"
+#define QLA2XXX_VERSION "8.03.07.07-k"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 3
diff --git a/drivers/scsi/qla4xxx/Kconfig b/drivers/scsi/qla4xxx/Kconfig
index 2c33ce6eac1e..0f5599e0abf6 100644
--- a/drivers/scsi/qla4xxx/Kconfig
+++ b/drivers/scsi/qla4xxx/Kconfig
@@ -1,6 +1,6 @@
config SCSI_QLA_ISCSI
tristate "QLogic ISP4XXX and ISP82XX host adapter family support"
- depends on PCI && SCSI
+ depends on PCI && SCSI && NET
select SCSI_ISCSI_ATTRS
---help---
This driver supports the QLogic 40xx (ISP4XXX) and 8022 (ISP82XX)
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index d2407558773f..24cacff57786 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -825,6 +825,9 @@ static void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi)
{
struct device *dev = mspi->dev;
+ if (!(mspi->flags & SPI_CPM_MODE))
+ return;
+
dma_unmap_single(dev, mspi->dma_dummy_rx, SPI_MRBLR, DMA_FROM_DEVICE);
dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE);
cpm_muram_free(cpm_muram_offset(mspi->tx_bd));
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 8ac6542aedcd..fa594d604aca 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -786,9 +786,11 @@ static int __devinit spi_imx_probe(struct platform_device *pdev)
int cs_gpio = of_get_named_gpio(np, "cs-gpios", i);
if (cs_gpio < 0)
cs_gpio = mxc_platform_info->chipselect[i];
+
+ spi_imx->chipselect[i] = cs_gpio;
if (cs_gpio < 0)
continue;
- spi_imx->chipselect[i] = cs_gpio;
+
ret = gpio_request(spi_imx->chipselect[i], DRIVER_NAME);
if (ret) {
while (i > 0) {
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index 1d23f3831866..6a80749391db 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -50,6 +50,8 @@
#define PCH_RX_THOLD 7
#define PCH_RX_THOLD_MAX 15
+#define PCH_TX_THOLD 2
+
#define PCH_MAX_BAUDRATE 5000000
#define PCH_MAX_FIFO_DEPTH 16
@@ -58,6 +60,7 @@
#define PCH_SLEEP_TIME 10
#define SSN_LOW 0x02U
+#define SSN_HIGH 0x03U
#define SSN_NO_CONTROL 0x00U
#define PCH_MAX_CS 0xFF
#define PCI_DEVICE_ID_GE_SPI 0x8816
@@ -316,16 +319,19 @@ static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val,
/* if transfer complete interrupt */
if (reg_spsr_val & SPSR_FI_BIT) {
- if (tx_index < bpw_len)
+ if ((tx_index == bpw_len) && (rx_index == tx_index)) {
+ /* disable interrupts */
+ pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
+
+ /* transfer is completed;
+ inform pch_spi_process_messages */
+ data->transfer_complete = true;
+ data->transfer_active = false;
+ wake_up(&data->wait);
+ } else {
dev_err(&data->master->dev,
"%s : Transfer is not completed", __func__);
- /* disable interrupts */
- pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
-
- /* transfer is completed;inform pch_spi_process_messages */
- data->transfer_complete = true;
- data->transfer_active = false;
- wake_up(&data->wait);
+ }
}
}
@@ -348,16 +354,26 @@ static irqreturn_t pch_spi_handler(int irq, void *dev_id)
"%s returning due to suspend\n", __func__);
return IRQ_NONE;
}
- if (data->use_dma)
- return IRQ_NONE;
io_remap_addr = data->io_remap_addr;
spsr = io_remap_addr + PCH_SPSR;
reg_spsr_val = ioread32(spsr);
- if (reg_spsr_val & SPSR_ORF_BIT)
- dev_err(&board_dat->pdev->dev, "%s Over run error", __func__);
+ if (reg_spsr_val & SPSR_ORF_BIT) {
+ dev_err(&board_dat->pdev->dev, "%s Over run error\n", __func__);
+ if (data->current_msg->complete != 0) {
+ data->transfer_complete = true;
+ data->current_msg->status = -EIO;
+ data->current_msg->complete(data->current_msg->context);
+ data->bcurrent_msg_processing = false;
+ data->current_msg = NULL;
+ data->cur_trans = NULL;
+ }
+ }
+
+ if (data->use_dma)
+ return IRQ_NONE;
/* Check if the interrupt is for SPI device */
if (reg_spsr_val & (SPSR_FI_BIT | SPSR_RFI_BIT)) {
@@ -756,10 +772,6 @@ static void pch_spi_set_ir(struct pch_spi_data *data)
wait_event_interruptible(data->wait, data->transfer_complete);
- pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL);
- dev_dbg(&data->master->dev,
- "%s:no more control over SSN-writing 0 to SSNXCR.", __func__);
-
/* clear all interrupts */
pch_spi_writereg(data->master, PCH_SPSR,
pch_spi_readreg(data->master, PCH_SPSR));
@@ -815,10 +827,11 @@ static void pch_spi_copy_rx_data_for_dma(struct pch_spi_data *data, int bpw)
}
}
-static void pch_spi_start_transfer(struct pch_spi_data *data)
+static int pch_spi_start_transfer(struct pch_spi_data *data)
{
struct pch_spi_dma_ctrl *dma;
unsigned long flags;
+ int rtn;
dma = &data->dma;
@@ -833,19 +846,23 @@ static void pch_spi_start_transfer(struct pch_spi_data *data)
initiating the transfer. */
dev_dbg(&data->master->dev,
"%s:waiting for transfer to get over\n", __func__);
- wait_event_interruptible(data->wait, data->transfer_complete);
+ rtn = wait_event_interruptible_timeout(data->wait,
+ data->transfer_complete,
+ msecs_to_jiffies(2 * HZ));
dma_sync_sg_for_cpu(&data->master->dev, dma->sg_rx_p, dma->nent,
DMA_FROM_DEVICE);
+
+ dma_sync_sg_for_cpu(&data->master->dev, dma->sg_tx_p, dma->nent,
+ DMA_FROM_DEVICE);
+ memset(data->dma.tx_buf_virt, 0, PAGE_SIZE);
+
async_tx_ack(dma->desc_rx);
async_tx_ack(dma->desc_tx);
kfree(dma->sg_tx_p);
kfree(dma->sg_rx_p);
spin_lock_irqsave(&data->lock, flags);
- pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL);
- dev_dbg(&data->master->dev,
- "%s:no more control over SSN-writing 0 to SSNXCR.", __func__);
/* clear fifo threshold, disable interrupts, disable SPI transfer */
pch_spi_setclr_reg(data->master, PCH_SPCR, 0,
@@ -858,6 +875,8 @@ static void pch_spi_start_transfer(struct pch_spi_data *data)
pch_spi_clear_fifo(data->master);
spin_unlock_irqrestore(&data->lock, flags);
+
+ return rtn;
}
static void pch_dma_rx_complete(void *arg)
@@ -1023,8 +1042,7 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
/* set receive fifo threshold and transmit fifo threshold */
pch_spi_setclr_reg(data->master, PCH_SPCR,
((size - 1) << SPCR_RFIC_FIELD) |
- ((PCH_MAX_FIFO_DEPTH - PCH_DMA_TRANS_SIZE) <<
- SPCR_TFIC_FIELD),
+ (PCH_TX_THOLD << SPCR_TFIC_FIELD),
MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS);
spin_unlock_irqrestore(&data->lock, flags);
@@ -1035,13 +1053,20 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
/* offset, length setting */
sg = dma->sg_rx_p;
for (i = 0; i < num; i++, sg++) {
- if (i == 0) {
- sg->offset = 0;
+ if (i == (num - 2)) {
+ sg->offset = size * i;
+ sg->offset = sg->offset * (*bpw / 8);
sg_set_page(sg, virt_to_page(dma->rx_buf_virt), rem,
sg->offset);
sg_dma_len(sg) = rem;
+ } else if (i == (num - 1)) {
+ sg->offset = size * (i - 1) + rem;
+ sg->offset = sg->offset * (*bpw / 8);
+ sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size,
+ sg->offset);
+ sg_dma_len(sg) = size;
} else {
- sg->offset = rem + size * (i - 1);
+ sg->offset = size * i;
sg->offset = sg->offset * (*bpw / 8);
sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size,
sg->offset);
@@ -1065,6 +1090,16 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
dma->desc_rx = desc_rx;
/* TX */
+ if (data->bpw_len > PCH_DMA_TRANS_SIZE) {
+ num = data->bpw_len / PCH_DMA_TRANS_SIZE;
+ size = PCH_DMA_TRANS_SIZE;
+ rem = 16;
+ } else {
+ num = 1;
+ size = data->bpw_len;
+ rem = data->bpw_len;
+ }
+
dma->sg_tx_p = kzalloc(sizeof(struct scatterlist)*num, GFP_ATOMIC);
sg_init_table(dma->sg_tx_p, num); /* Initialize SG table */
/* offset, length setting */
@@ -1162,6 +1197,7 @@ static void pch_spi_process_messages(struct work_struct *pwork)
if (data->use_dma)
pch_spi_request_dma(data,
data->current_msg->spi->bits_per_word);
+ pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL);
do {
/* If we are already processing a message get the next
transfer structure from the message otherwise retrieve
@@ -1184,7 +1220,8 @@ static void pch_spi_process_messages(struct work_struct *pwork)
if (data->use_dma) {
pch_spi_handle_dma(data, &bpw);
- pch_spi_start_transfer(data);
+ if (!pch_spi_start_transfer(data))
+ goto out;
pch_spi_copy_rx_data_for_dma(data, bpw);
} else {
pch_spi_set_tx(data, &bpw);
@@ -1222,6 +1259,8 @@ static void pch_spi_process_messages(struct work_struct *pwork)
} while (data->cur_trans != NULL);
+out:
+ pch_spi_writereg(data->master, PCH_SSNXCR, SSN_HIGH);
if (data->use_dma)
pch_spi_release_dma(data);
}
diff --git a/drivers/staging/comedi/drivers/ni_labpc.c b/drivers/staging/comedi/drivers/ni_labpc.c
index 6859af0778cf..7611def97d06 100644
--- a/drivers/staging/comedi/drivers/ni_labpc.c
+++ b/drivers/staging/comedi/drivers/ni_labpc.c
@@ -241,8 +241,10 @@ static int labpc_eeprom_write_insn(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data);
static void labpc_adc_timing(struct comedi_device *dev, struct comedi_cmd *cmd);
-#ifdef CONFIG_COMEDI_PCI
+#ifdef CONFIG_ISA_DMA_API
static unsigned int labpc_suggest_transfer_size(struct comedi_cmd cmd);
+#endif
+#ifdef CONFIG_COMEDI_PCI
static int labpc_find_device(struct comedi_device *dev, int bus, int slot);
#endif
static int labpc_dio_mem_callback(int dir, int port, int data,
diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c
index a3f5162bfedc..462fbc20561f 100644
--- a/drivers/staging/zcache/zcache-main.c
+++ b/drivers/staging/zcache/zcache-main.c
@@ -1242,7 +1242,7 @@ static int zcache_pampd_get_data_and_free(char *data, size_t *bufsize, bool raw,
int ret = 0;
BUG_ON(!is_ephemeral(pool));
- zbud_decompress(virt_to_page(data), pampd);
+ zbud_decompress((struct page *)(data), pampd);
zbud_free_and_delist((struct zbud_hdr *)pampd);
atomic_dec(&zcache_curr_eph_pampd_count);
return ret;
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index 497b2e718a76..5b773160200f 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -1430,7 +1430,7 @@ static int iscsi_enforce_integrity_rules(
u8 DataSequenceInOrder = 0;
u8 ErrorRecoveryLevel = 0, SessionType = 0;
u8 IFMarker = 0, OFMarker = 0;
- u8 IFMarkInt_Reject = 0, OFMarkInt_Reject = 0;
+ u8 IFMarkInt_Reject = 1, OFMarkInt_Reject = 1;
u32 FirstBurstLength = 0, MaxBurstLength = 0;
struct iscsi_param *param = NULL;
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index a0d23bc0fc98..f00137f377b2 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -875,40 +875,6 @@ void iscsit_inc_session_usage_count(struct iscsi_session *sess)
}
/*
- * Used before iscsi_do[rx,tx]_data() to determine iov and [rx,tx]_marker
- * array counts needed for sync and steering.
- */
-static int iscsit_determine_sync_and_steering_counts(
- struct iscsi_conn *conn,
- struct iscsi_data_count *count)
-{
- u32 length = count->data_length;
- u32 marker, markint;
-
- count->sync_and_steering = 1;
-
- marker = (count->type == ISCSI_RX_DATA) ?
- conn->of_marker : conn->if_marker;
- markint = (count->type == ISCSI_RX_DATA) ?
- (conn->conn_ops->OFMarkInt * 4) :
- (conn->conn_ops->IFMarkInt * 4);
- count->ss_iov_count = count->iov_count;
-
- while (length > 0) {
- if (length >= marker) {
- count->ss_iov_count += 3;
- count->ss_marker_count += 2;
-
- length -= marker;
- marker = markint;
- } else
- length = 0;
- }
-
- return 0;
-}
-
-/*
* Setup conn->if_marker and conn->of_marker values based upon
* the initial marker-less interval. (see iSCSI v19 A.2)
*/
@@ -1290,7 +1256,7 @@ int iscsit_fe_sendpage_sg(
struct kvec iov;
u32 tx_hdr_size, data_len;
u32 offset = cmd->first_data_sg_off;
- int tx_sent;
+ int tx_sent, iov_off;
send_hdr:
tx_hdr_size = ISCSI_HDR_LEN;
@@ -1310,9 +1276,19 @@ send_hdr:
}
data_len = cmd->tx_size - tx_hdr_size - cmd->padding;
- if (conn->conn_ops->DataDigest)
+ /*
+ * Set iov_off used by padding and data digest tx_data() calls below
+ * in order to determine proper offset into cmd->iov_data[]
+ */
+ if (conn->conn_ops->DataDigest) {
data_len -= ISCSI_CRC_LEN;
-
+ if (cmd->padding)
+ iov_off = (cmd->iov_data_count - 2);
+ else
+ iov_off = (cmd->iov_data_count - 1);
+ } else {
+ iov_off = (cmd->iov_data_count - 1);
+ }
/*
* Perform sendpage() for each page in the scatterlist
*/
@@ -1341,8 +1317,7 @@ send_pg:
send_padding:
if (cmd->padding) {
- struct kvec *iov_p =
- &cmd->iov_data[cmd->iov_data_count-1];
+ struct kvec *iov_p = &cmd->iov_data[iov_off++];
tx_sent = tx_data(conn, iov_p, 1, cmd->padding);
if (cmd->padding != tx_sent) {
@@ -1356,8 +1331,7 @@ send_padding:
send_datacrc:
if (conn->conn_ops->DataDigest) {
- struct kvec *iov_d =
- &cmd->iov_data[cmd->iov_data_count];
+ struct kvec *iov_d = &cmd->iov_data[iov_off];
tx_sent = tx_data(conn, iov_d, 1, ISCSI_CRC_LEN);
if (ISCSI_CRC_LEN != tx_sent) {
@@ -1431,8 +1405,7 @@ static int iscsit_do_rx_data(
struct iscsi_data_count *count)
{
int data = count->data_length, rx_loop = 0, total_rx = 0, iov_len;
- u32 rx_marker_val[count->ss_marker_count], rx_marker_iov = 0;
- struct kvec iov[count->ss_iov_count], *iov_p;
+ struct kvec *iov_p;
struct msghdr msg;
if (!conn || !conn->sock || !conn->conn_ops)
@@ -1440,93 +1413,8 @@ static int iscsit_do_rx_data(
memset(&msg, 0, sizeof(struct msghdr));
- if (count->sync_and_steering) {
- int size = 0;
- u32 i, orig_iov_count = 0;
- u32 orig_iov_len = 0, orig_iov_loc = 0;
- u32 iov_count = 0, per_iov_bytes = 0;
- u32 *rx_marker, old_rx_marker = 0;
- struct kvec *iov_record;
-
- memset(&rx_marker_val, 0,
- count->ss_marker_count * sizeof(u32));
- memset(&iov, 0, count->ss_iov_count * sizeof(struct kvec));
-
- iov_record = count->iov;
- orig_iov_count = count->iov_count;
- rx_marker = &conn->of_marker;
-
- i = 0;
- size = data;
- orig_iov_len = iov_record[orig_iov_loc].iov_len;
- while (size > 0) {
- pr_debug("rx_data: #1 orig_iov_len %u,"
- " orig_iov_loc %u\n", orig_iov_len, orig_iov_loc);
- pr_debug("rx_data: #2 rx_marker %u, size"
- " %u\n", *rx_marker, size);
-
- if (orig_iov_len >= *rx_marker) {
- iov[iov_count].iov_len = *rx_marker;
- iov[iov_count++].iov_base =
- (iov_record[orig_iov_loc].iov_base +
- per_iov_bytes);
-
- iov[iov_count].iov_len = (MARKER_SIZE / 2);
- iov[iov_count++].iov_base =
- &rx_marker_val[rx_marker_iov++];
- iov[iov_count].iov_len = (MARKER_SIZE / 2);
- iov[iov_count++].iov_base =
- &rx_marker_val[rx_marker_iov++];
- old_rx_marker = *rx_marker;
-
- /*
- * OFMarkInt is in 32-bit words.
- */
- *rx_marker = (conn->conn_ops->OFMarkInt * 4);
- size -= old_rx_marker;
- orig_iov_len -= old_rx_marker;
- per_iov_bytes += old_rx_marker;
-
- pr_debug("rx_data: #3 new_rx_marker"
- " %u, size %u\n", *rx_marker, size);
- } else {
- iov[iov_count].iov_len = orig_iov_len;
- iov[iov_count++].iov_base =
- (iov_record[orig_iov_loc].iov_base +
- per_iov_bytes);
-
- per_iov_bytes = 0;
- *rx_marker -= orig_iov_len;
- size -= orig_iov_len;
-
- if (size)
- orig_iov_len =
- iov_record[++orig_iov_loc].iov_len;
-
- pr_debug("rx_data: #4 new_rx_marker"
- " %u, size %u\n", *rx_marker, size);
- }
- }
- data += (rx_marker_iov * (MARKER_SIZE / 2));
-
- iov_p = &iov[0];
- iov_len = iov_count;
-
- if (iov_count > count->ss_iov_count) {
- pr_err("iov_count: %d, count->ss_iov_count:"
- " %d\n", iov_count, count->ss_iov_count);
- return -1;
- }
- if (rx_marker_iov > count->ss_marker_count) {
- pr_err("rx_marker_iov: %d, count->ss_marker"
- "_count: %d\n", rx_marker_iov,
- count->ss_marker_count);
- return -1;
- }
- } else {
- iov_p = count->iov;
- iov_len = count->iov_count;
- }
+ iov_p = count->iov;
+ iov_len = count->iov_count;
while (total_rx < data) {
rx_loop = kernel_recvmsg(conn->sock, &msg, iov_p, iov_len,
@@ -1541,16 +1429,6 @@ static int iscsit_do_rx_data(
rx_loop, total_rx, data);
}
- if (count->sync_and_steering) {
- int j;
- for (j = 0; j < rx_marker_iov; j++) {
- pr_debug("rx_data: #5 j: %d, offset: %d\n",
- j, rx_marker_val[j]);
- conn->of_marker_offset = rx_marker_val[j];
- }
- total_rx -= (rx_marker_iov * (MARKER_SIZE / 2));
- }
-
return total_rx;
}
@@ -1559,8 +1437,7 @@ static int iscsit_do_tx_data(
struct iscsi_data_count *count)
{
int data = count->data_length, total_tx = 0, tx_loop = 0, iov_len;
- u32 tx_marker_val[count->ss_marker_count], tx_marker_iov = 0;
- struct kvec iov[count->ss_iov_count], *iov_p;
+ struct kvec *iov_p;
struct msghdr msg;
if (!conn || !conn->sock || !conn->conn_ops)
@@ -1573,98 +1450,8 @@ static int iscsit_do_tx_data(
memset(&msg, 0, sizeof(struct msghdr));
- if (count->sync_and_steering) {
- int size = 0;
- u32 i, orig_iov_count = 0;
- u32 orig_iov_len = 0, orig_iov_loc = 0;
- u32 iov_count = 0, per_iov_bytes = 0;
- u32 *tx_marker, old_tx_marker = 0;
- struct kvec *iov_record;
-
- memset(&tx_marker_val, 0,
- count->ss_marker_count * sizeof(u32));
- memset(&iov, 0, count->ss_iov_count * sizeof(struct kvec));
-
- iov_record = count->iov;
- orig_iov_count = count->iov_count;
- tx_marker = &conn->if_marker;
-
- i = 0;
- size = data;
- orig_iov_len = iov_record[orig_iov_loc].iov_len;
- while (size > 0) {
- pr_debug("tx_data: #1 orig_iov_len %u,"
- " orig_iov_loc %u\n", orig_iov_len, orig_iov_loc);
- pr_debug("tx_data: #2 tx_marker %u, size"
- " %u\n", *tx_marker, size);
-
- if (orig_iov_len >= *tx_marker) {
- iov[iov_count].iov_len = *tx_marker;
- iov[iov_count++].iov_base =
- (iov_record[orig_iov_loc].iov_base +
- per_iov_bytes);
-
- tx_marker_val[tx_marker_iov] =
- (size - *tx_marker);
- iov[iov_count].iov_len = (MARKER_SIZE / 2);
- iov[iov_count++].iov_base =
- &tx_marker_val[tx_marker_iov++];
- iov[iov_count].iov_len = (MARKER_SIZE / 2);
- iov[iov_count++].iov_base =
- &tx_marker_val[tx_marker_iov++];
- old_tx_marker = *tx_marker;
-
- /*
- * IFMarkInt is in 32-bit words.
- */
- *tx_marker = (conn->conn_ops->IFMarkInt * 4);
- size -= old_tx_marker;
- orig_iov_len -= old_tx_marker;
- per_iov_bytes += old_tx_marker;
-
- pr_debug("tx_data: #3 new_tx_marker"
- " %u, size %u\n", *tx_marker, size);
- pr_debug("tx_data: #4 offset %u\n",
- tx_marker_val[tx_marker_iov-1]);
- } else {
- iov[iov_count].iov_len = orig_iov_len;
- iov[iov_count++].iov_base
- = (iov_record[orig_iov_loc].iov_base +
- per_iov_bytes);
-
- per_iov_bytes = 0;
- *tx_marker -= orig_iov_len;
- size -= orig_iov_len;
-
- if (size)
- orig_iov_len =
- iov_record[++orig_iov_loc].iov_len;
-
- pr_debug("tx_data: #5 new_tx_marker"
- " %u, size %u\n", *tx_marker, size);
- }
- }
-
- data += (tx_marker_iov * (MARKER_SIZE / 2));
-
- iov_p = &iov[0];
- iov_len = iov_count;
-
- if (iov_count > count->ss_iov_count) {
- pr_err("iov_count: %d, count->ss_iov_count:"
- " %d\n", iov_count, count->ss_iov_count);
- return -1;
- }
- if (tx_marker_iov > count->ss_marker_count) {
- pr_err("tx_marker_iov: %d, count->ss_marker"
- "_count: %d\n", tx_marker_iov,
- count->ss_marker_count);
- return -1;
- }
- } else {
- iov_p = count->iov;
- iov_len = count->iov_count;
- }
+ iov_p = count->iov;
+ iov_len = count->iov_count;
while (total_tx < data) {
tx_loop = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len,
@@ -1679,9 +1466,6 @@ static int iscsit_do_tx_data(
tx_loop, total_tx, data);
}
- if (count->sync_and_steering)
- total_tx -= (tx_marker_iov * (MARKER_SIZE / 2));
-
return total_tx;
}
@@ -1702,12 +1486,6 @@ int rx_data(
c.data_length = data;
c.type = ISCSI_RX_DATA;
- if (conn->conn_ops->OFMarker &&
- (conn->conn_state >= TARG_CONN_STATE_LOGGED_IN)) {
- if (iscsit_determine_sync_and_steering_counts(conn, &c) < 0)
- return -1;
- }
-
return iscsit_do_rx_data(conn, &c);
}
@@ -1728,12 +1506,6 @@ int tx_data(
c.data_length = data;
c.type = ISCSI_TX_DATA;
- if (conn->conn_ops->IFMarker &&
- (conn->conn_state >= TARG_CONN_STATE_LOGGED_IN)) {
- if (iscsit_determine_sync_and_steering_counts(conn, &c) < 0)
- return -1;
- }
-
return iscsit_do_tx_data(conn, &c);
}
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
index 89ae923c5da6..f04d4ef99dca 100644
--- a/drivers/target/target_core_cdb.c
+++ b/drivers/target/target_core_cdb.c
@@ -24,6 +24,7 @@
*/
#include <linux/kernel.h>
+#include <linux/ctype.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
@@ -154,6 +155,37 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
return 0;
}
+static void
+target_parse_naa_6h_vendor_specific(struct se_device *dev, unsigned char *buf_off)
+{
+ unsigned char *p = &dev->se_sub_dev->t10_wwn.unit_serial[0];
+ unsigned char *buf = buf_off;
+ int cnt = 0, next = 1;
+ /*
+ * Generate up to 36 bits of VENDOR SPECIFIC IDENTIFIER starting on
+ * byte 3 bit 3-0 for NAA IEEE Registered Extended DESIGNATOR field
+ * format, followed by 64 bits of VENDOR SPECIFIC IDENTIFIER EXTENSION
+ * to complete the payload. These are based from VPD=0x80 PRODUCT SERIAL
+ * NUMBER set via vpd_unit_serial in target_core_configfs.c to ensure
+ * per device uniqeness.
+ */
+ while (*p != '\0') {
+ if (cnt >= 13)
+ break;
+ if (!isxdigit(*p)) {
+ p++;
+ continue;
+ }
+ if (next != 0) {
+ buf[cnt++] |= hex_to_bin(*p++);
+ next = 0;
+ } else {
+ buf[cnt] = hex_to_bin(*p++) << 4;
+ next = 1;
+ }
+ }
+}
+
/*
* Device identification VPD, for a complete list of
* DESIGNATOR TYPEs see spc4r17 Table 459.
@@ -219,8 +251,7 @@ target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
* VENDOR_SPECIFIC_IDENTIFIER and
* VENDOR_SPECIFIC_IDENTIFIER_EXTENTION
*/
- buf[off++] |= hex_to_bin(dev->se_sub_dev->t10_wwn.unit_serial[0]);
- hex2bin(&buf[off], &dev->se_sub_dev->t10_wwn.unit_serial[1], 12);
+ target_parse_naa_6h_vendor_specific(dev, &buf[off]);
len = 20;
off = (len + 4);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 8d0c58ea6316..a4b0a8d27f25 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -977,15 +977,17 @@ static void target_qf_do_work(struct work_struct *work)
{
struct se_device *dev = container_of(work, struct se_device,
qf_work_queue);
+ LIST_HEAD(qf_cmd_list);
struct se_cmd *cmd, *cmd_tmp;
spin_lock_irq(&dev->qf_cmd_lock);
- list_for_each_entry_safe(cmd, cmd_tmp, &dev->qf_cmd_list, se_qf_node) {
+ list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
+ spin_unlock_irq(&dev->qf_cmd_lock);
+ list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
list_del(&cmd->se_qf_node);
atomic_dec(&dev->dev_qf_count);
smp_mb__after_atomic_dec();
- spin_unlock_irq(&dev->qf_cmd_lock);
pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
" context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
@@ -997,10 +999,7 @@ static void target_qf_do_work(struct work_struct *work)
* has been added to head of queue
*/
transport_add_cmd_to_queue(cmd, cmd->t_state);
-
- spin_lock_irq(&dev->qf_cmd_lock);
}
- spin_unlock_irq(&dev->qf_cmd_lock);
}
unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h
index bd4fe21a23b8..3749d8b4b423 100644
--- a/drivers/target/tcm_fc/tcm_fc.h
+++ b/drivers/target/tcm_fc/tcm_fc.h
@@ -98,8 +98,7 @@ struct ft_tpg {
struct list_head list; /* linkage in ft_lport_acl tpg_list */
struct list_head lun_list; /* head of LUNs */
struct se_portal_group se_tpg;
- struct task_struct *thread; /* processing thread */
- struct se_queue_obj qobj; /* queue for processing thread */
+ struct workqueue_struct *workqueue;
};
struct ft_lport_acl {
@@ -110,16 +109,10 @@ struct ft_lport_acl {
struct se_wwn fc_lport_wwn;
};
-enum ft_cmd_state {
- FC_CMD_ST_NEW = 0,
- FC_CMD_ST_REJ
-};
-
/*
* Commands
*/
struct ft_cmd {
- enum ft_cmd_state state;
u32 lun; /* LUN from request */
struct ft_sess *sess; /* session held for cmd */
struct fc_seq *seq; /* sequence in exchange mgr */
@@ -127,7 +120,7 @@ struct ft_cmd {
struct fc_frame *req_frame;
unsigned char *cdb; /* pointer to CDB inside frame */
u32 write_data_len; /* data received on writes */
- struct se_queue_req se_req;
+ struct work_struct work;
/* Local sense buffer */
unsigned char ft_sense_buffer[TRANSPORT_SENSE_BUFFER];
u32 was_ddp_setup:1; /* Set only if ddp is setup */
@@ -177,7 +170,6 @@ int ft_is_state_remove(struct se_cmd *);
/*
* other internal functions.
*/
-int ft_thread(void *);
void ft_recv_req(struct ft_sess *, struct fc_frame *);
struct ft_tpg *ft_lport_find_tpg(struct fc_lport *);
struct ft_node_acl *ft_acl_get(struct ft_tpg *, struct fc_rport_priv *);
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 5654dc22f7ae..80fbcde00cb6 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -62,8 +62,8 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
int count;
se_cmd = &cmd->se_cmd;
- pr_debug("%s: cmd %p state %d sess %p seq %p se_cmd %p\n",
- caller, cmd, cmd->state, cmd->sess, cmd->seq, se_cmd);
+ pr_debug("%s: cmd %p sess %p seq %p se_cmd %p\n",
+ caller, cmd, cmd->sess, cmd->seq, se_cmd);
pr_debug("%s: cmd %p cdb %p\n",
caller, cmd, cmd->cdb);
pr_debug("%s: cmd %p lun %d\n", caller, cmd, cmd->lun);
@@ -90,38 +90,6 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
16, 4, cmd->cdb, MAX_COMMAND_SIZE, 0);
}
-static void ft_queue_cmd(struct ft_sess *sess, struct ft_cmd *cmd)
-{
- struct ft_tpg *tpg = sess->tport->tpg;
- struct se_queue_obj *qobj = &tpg->qobj;
- unsigned long flags;
-
- qobj = &sess->tport->tpg->qobj;
- spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
- list_add_tail(&cmd->se_req.qr_list, &qobj->qobj_list);
- atomic_inc(&qobj->queue_cnt);
- spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
-
- wake_up_process(tpg->thread);
-}
-
-static struct ft_cmd *ft_dequeue_cmd(struct se_queue_obj *qobj)
-{
- unsigned long flags;
- struct se_queue_req *qr;
-
- spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
- if (list_empty(&qobj->qobj_list)) {
- spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
- return NULL;
- }
- qr = list_first_entry(&qobj->qobj_list, struct se_queue_req, qr_list);
- list_del(&qr->qr_list);
- atomic_dec(&qobj->queue_cnt);
- spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
- return container_of(qr, struct ft_cmd, se_req);
-}
-
static void ft_free_cmd(struct ft_cmd *cmd)
{
struct fc_frame *fp;
@@ -282,9 +250,7 @@ u32 ft_get_task_tag(struct se_cmd *se_cmd)
int ft_get_cmd_state(struct se_cmd *se_cmd)
{
- struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
-
- return cmd->state;
+ return 0;
}
int ft_is_state_remove(struct se_cmd *se_cmd)
@@ -505,6 +471,8 @@ int ft_queue_tm_resp(struct se_cmd *se_cmd)
return 0;
}
+static void ft_send_work(struct work_struct *work);
+
/*
* Handle incoming FCP command.
*/
@@ -523,7 +491,9 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
goto busy;
}
cmd->req_frame = fp; /* hold frame during cmd */
- ft_queue_cmd(sess, cmd);
+
+ INIT_WORK(&cmd->work, ft_send_work);
+ queue_work(sess->tport->tpg->workqueue, &cmd->work);
return;
busy:
@@ -563,12 +533,13 @@ void ft_recv_req(struct ft_sess *sess, struct fc_frame *fp)
/*
* Send new command to target.
*/
-static void ft_send_cmd(struct ft_cmd *cmd)
+static void ft_send_work(struct work_struct *work)
{
+ struct ft_cmd *cmd = container_of(work, struct ft_cmd, work);
struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame);
struct se_cmd *se_cmd;
struct fcp_cmnd *fcp;
- int data_dir;
+ int data_dir = 0;
u32 data_len;
int task_attr;
int ret;
@@ -675,42 +646,3 @@ static void ft_send_cmd(struct ft_cmd *cmd)
err:
ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID);
}
-
-/*
- * Handle request in the command thread.
- */
-static void ft_exec_req(struct ft_cmd *cmd)
-{
- pr_debug("cmd state %x\n", cmd->state);
- switch (cmd->state) {
- case FC_CMD_ST_NEW:
- ft_send_cmd(cmd);
- break;
- default:
- break;
- }
-}
-
-/*
- * Processing thread.
- * Currently one thread per tpg.
- */
-int ft_thread(void *arg)
-{
- struct ft_tpg *tpg = arg;
- struct se_queue_obj *qobj = &tpg->qobj;
- struct ft_cmd *cmd;
-
- while (!kthread_should_stop()) {
- schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT);
- if (kthread_should_stop())
- goto out;
-
- cmd = ft_dequeue_cmd(qobj);
- if (cmd)
- ft_exec_req(cmd);
- }
-
-out:
- return 0;
-}
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index b15879d43e22..8fa39b74f22c 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -327,7 +327,6 @@ static struct se_portal_group *ft_add_tpg(
tpg->index = index;
tpg->lport_acl = lacl;
INIT_LIST_HEAD(&tpg->lun_list);
- transport_init_queue_obj(&tpg->qobj);
ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg,
tpg, TRANSPORT_TPG_TYPE_NORMAL);
@@ -336,8 +335,8 @@ static struct se_portal_group *ft_add_tpg(
return NULL;
}
- tpg->thread = kthread_run(ft_thread, tpg, "ft_tpg%lu", index);
- if (IS_ERR(tpg->thread)) {
+ tpg->workqueue = alloc_workqueue("tcm_fc", 0, 1);
+ if (!tpg->workqueue) {
kfree(tpg);
return NULL;
}
@@ -356,7 +355,7 @@ static void ft_del_tpg(struct se_portal_group *se_tpg)
pr_debug("del tpg %s\n",
config_item_name(&tpg->se_tpg.tpg_group.cg_item));
- kthread_stop(tpg->thread);
+ destroy_workqueue(tpg->workqueue);
/* Wait for sessions to be freed thru RCU, for BUG_ON below */
synchronize_rcu();
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index c37f4cd96452..d35ea5a3d56c 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -219,43 +219,41 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
if (cmd->was_ddp_setup) {
BUG_ON(!ep);
BUG_ON(!lport);
- }
-
- /*
- * Doesn't expect payload if DDP is setup. Payload
- * is expected to be copied directly to user buffers
- * due to DDP (Large Rx offload),
- */
- buf = fc_frame_payload_get(fp, 1);
- if (buf)
- pr_err("%s: xid 0x%x, f_ctl 0x%x, cmd->sg %p, "
+ /*
+ * Since DDP (Large Rx offload) was setup for this request,
+ * payload is expected to be copied directly to user buffers.
+ */
+ buf = fc_frame_payload_get(fp, 1);
+ if (buf)
+ pr_err("%s: xid 0x%x, f_ctl 0x%x, cmd->sg %p, "
"cmd->sg_cnt 0x%x. DDP was setup"
" hence not expected to receive frame with "
- "payload, Frame will be dropped if "
- "'Sequence Initiative' bit in f_ctl is "
+ "payload, Frame will be dropped if"
+ "'Sequence Initiative' bit in f_ctl is"
"not set\n", __func__, ep->xid, f_ctl,
cmd->sg, cmd->sg_cnt);
- /*
- * Invalidate HW DDP context if it was setup for respective
- * command. Invalidation of HW DDP context is requited in both
- * situation (success and error).
- */
- ft_invl_hw_context(cmd);
+ /*
+ * Invalidate HW DDP context if it was setup for respective
+ * command. Invalidation of HW DDP context is requited in both
+ * situation (success and error).
+ */
+ ft_invl_hw_context(cmd);
- /*
- * If "Sequence Initiative (TSI)" bit set in f_ctl, means last
- * write data frame is received successfully where payload is
- * posted directly to user buffer and only the last frame's
- * header is posted in receive queue.
- *
- * If "Sequence Initiative (TSI)" bit is not set, means error
- * condition w.r.t. DDP, hence drop the packet and let explict
- * ABORTS from other end of exchange timer trigger the recovery.
- */
- if (f_ctl & FC_FC_SEQ_INIT)
- goto last_frame;
- else
- goto drop;
+ /*
+ * If "Sequence Initiative (TSI)" bit set in f_ctl, means last
+ * write data frame is received successfully where payload is
+ * posted directly to user buffer and only the last frame's
+ * header is posted in receive queue.
+ *
+ * If "Sequence Initiative (TSI)" bit is not set, means error
+ * condition w.r.t. DDP, hence drop the packet and let explict
+ * ABORTS from other end of exchange timer trigger the recovery.
+ */
+ if (f_ctl & FC_FC_SEQ_INIT)
+ goto last_frame;
+ else
+ goto drop;
+ }
rel_off = ntohl(fh->fh_parm_offset);
frame_len = fr_len(fp);
diff --git a/drivers/tty/serial/crisv10.c b/drivers/tty/serial/crisv10.c
index 225123b37f19..58be715913cd 100644
--- a/drivers/tty/serial/crisv10.c
+++ b/drivers/tty/serial/crisv10.c
@@ -4450,7 +4450,7 @@ static int __init rs_init(void)
#if defined(CONFIG_ETRAX_RS485)
#if defined(CONFIG_ETRAX_RS485_ON_PA)
- if (cris_io_interface_allocate_pins(if_ser0, 'a', rs485_pa_bit,
+ if (cris_io_interface_allocate_pins(if_serial_0, 'a', rs485_pa_bit,
rs485_pa_bit)) {
printk(KERN_CRIT "ETRAX100LX serial: Could not allocate "
"RS485 pin\n");
@@ -4459,7 +4459,7 @@ static int __init rs_init(void)
}
#endif
#if defined(CONFIG_ETRAX_RS485_ON_PORT_G)
- if (cris_io_interface_allocate_pins(if_ser0, 'g', rs485_pa_bit,
+ if (cris_io_interface_allocate_pins(if_serial_0, 'g', rs485_pa_bit,
rs485_port_g_bit)) {
printk(KERN_CRIT "ETRAX100LX serial: Could not allocate "
"RS485 pin\n");
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 1e96d1f1fe6b..723f8231193d 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -761,7 +761,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
memset(buf, 0, retval);
status = 0;
- mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC;
+ mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC;
spin_lock_irqsave(&xhci->lock, flags);
/* For each port, did anything change? If so, set that bit in buf. */
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 54139a2f06ce..952e2ded61af 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1934,8 +1934,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
int status = -EINPROGRESS;
struct urb_priv *urb_priv;
struct xhci_ep_ctx *ep_ctx;
+ struct list_head *tmp;
u32 trb_comp_code;
int ret = 0;
+ int td_num = 0;
slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
xdev = xhci->devs[slot_id];
@@ -1957,6 +1959,12 @@ static int handle_tx_event(struct xhci_hcd *xhci,
return -ENODEV;
}
+ /* Count current td numbers if ep->skip is set */
+ if (ep->skip) {
+ list_for_each(tmp, &ep_ring->td_list)
+ td_num++;
+ }
+
event_dma = le64_to_cpu(event->buffer);
trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
/* Look for common error cases */
@@ -2068,7 +2076,18 @@ static int handle_tx_event(struct xhci_hcd *xhci,
goto cleanup;
}
+ /* We've skipped all the TDs on the ep ring when ep->skip set */
+ if (ep->skip && td_num == 0) {
+ ep->skip = false;
+ xhci_dbg(xhci, "All tds on the ep_ring skipped. "
+ "Clear skip flag.\n");
+ ret = 0;
+ goto cleanup;
+ }
+
td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
+ if (ep->skip)
+ td_num--;
/* Is this a TRB in the currently executing TD? */
event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index 80d292fb92d8..7363c1b169e8 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -19,7 +19,7 @@
#include <asm/backlight.h>
#endif
-static const char const *backlight_types[] = {
+static const char *const backlight_types[] = {
[BACKLIGHT_RAW] = "raw",
[BACKLIGHT_PLATFORM] = "platform",
[BACKLIGHT_FIRMWARE] = "firmware",
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index 410fba45378d..809cbda03d7a 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -494,15 +494,16 @@ static int hpwdt_pretimeout(struct notifier_block *nb, unsigned long ulReason,
asminline_call(&cmn_regs, cru_rom_addr);
die_nmi_called = 1;
spin_unlock_irqrestore(&rom_lock, rom_pl);
+
+ if (allow_kdump)
+ hpwdt_stop();
+
if (!is_icru) {
if (cmn_regs.u1.ral == 0) {
- printk(KERN_WARNING "hpwdt: An NMI occurred, "
+ panic("An NMI occurred, "
"but unable to determine source.\n");
}
}
-
- if (allow_kdump)
- hpwdt_stop();
panic("An NMI occurred, please see the Integrated "
"Management Log for details.\n");
diff --git a/drivers/watchdog/lantiq_wdt.c b/drivers/watchdog/lantiq_wdt.c
index 7d82adac1cb2..102aed0efbf1 100644
--- a/drivers/watchdog/lantiq_wdt.c
+++ b/drivers/watchdog/lantiq_wdt.c
@@ -51,16 +51,16 @@ static int ltq_wdt_ok_to_close;
static void
ltq_wdt_enable(void)
{
- ltq_wdt_timeout = ltq_wdt_timeout *
+ unsigned long int timeout = ltq_wdt_timeout *
(ltq_io_region_clk_rate / LTQ_WDT_DIVIDER) + 0x1000;
- if (ltq_wdt_timeout > LTQ_MAX_TIMEOUT)
- ltq_wdt_timeout = LTQ_MAX_TIMEOUT;
+ if (timeout > LTQ_MAX_TIMEOUT)
+ timeout = LTQ_MAX_TIMEOUT;
/* write the first password magic */
ltq_w32(LTQ_WDT_PW1, ltq_wdt_membase + LTQ_WDT_CR);
/* write the second magic plus the configuration and new timeout */
ltq_w32(LTQ_WDT_SR_EN | LTQ_WDT_SR_PWD | LTQ_WDT_SR_CLKDIV |
- LTQ_WDT_PW2 | ltq_wdt_timeout, ltq_wdt_membase + LTQ_WDT_CR);
+ LTQ_WDT_PW2 | timeout, ltq_wdt_membase + LTQ_WDT_CR);
}
static void
diff --git a/drivers/watchdog/sbc_epx_c3.c b/drivers/watchdog/sbc_epx_c3.c
index 3066a5127ca8..eaca366b7234 100644
--- a/drivers/watchdog/sbc_epx_c3.c
+++ b/drivers/watchdog/sbc_epx_c3.c
@@ -173,7 +173,7 @@ static struct notifier_block epx_c3_notifier = {
.notifier_call = epx_c3_notify_sys,
};
-static const char banner[] __initdata = KERN_INFO PFX
+static const char banner[] __initconst = KERN_INFO PFX
"Hardware Watchdog Timer for Winsystems EPX-C3 SBC: 0.1\n";
static int __init watchdog_init(void)
diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
index d33520d0b4c9..1199da0f98cf 100644
--- a/drivers/watchdog/watchdog_dev.c
+++ b/drivers/watchdog/watchdog_dev.c
@@ -59,7 +59,7 @@ static struct watchdog_device *wdd;
static int watchdog_ping(struct watchdog_device *wddev)
{
- if (test_bit(WDOG_ACTIVE, &wdd->status)) {
+ if (test_bit(WDOG_ACTIVE, &wddev->status)) {
if (wddev->ops->ping)
return wddev->ops->ping(wddev); /* ping the watchdog */
else
@@ -81,12 +81,12 @@ static int watchdog_start(struct watchdog_device *wddev)
{
int err;
- if (!test_bit(WDOG_ACTIVE, &wdd->status)) {
+ if (!test_bit(WDOG_ACTIVE, &wddev->status)) {
err = wddev->ops->start(wddev);
if (err < 0)
return err;
- set_bit(WDOG_ACTIVE, &wdd->status);
+ set_bit(WDOG_ACTIVE, &wddev->status);
}
return 0;
}
@@ -105,18 +105,18 @@ static int watchdog_stop(struct watchdog_device *wddev)
{
int err = -EBUSY;
- if (test_bit(WDOG_NO_WAY_OUT, &wdd->status)) {
+ if (test_bit(WDOG_NO_WAY_OUT, &wddev->status)) {
pr_info("%s: nowayout prevents watchdog to be stopped!\n",
- wdd->info->identity);
+ wddev->info->identity);
return err;
}
- if (test_bit(WDOG_ACTIVE, &wdd->status)) {
+ if (test_bit(WDOG_ACTIVE, &wddev->status)) {
err = wddev->ops->stop(wddev);
if (err < 0)
return err;
- clear_bit(WDOG_ACTIVE, &wdd->status);
+ clear_bit(WDOG_ACTIVE, &wddev->status);
}
return 0;
}
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index da70f5c32eb9..7523719bf8a4 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -54,7 +54,7 @@
* This lock protects updates to the following mapping and reference-count
* arrays. The lock does not need to be acquired to read the mapping tables.
*/
-static DEFINE_SPINLOCK(irq_mapping_update_lock);
+static DEFINE_MUTEX(irq_mapping_update_lock);
static LIST_HEAD(xen_irq_list_head);
@@ -631,7 +631,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
int irq = -1;
struct physdev_irq irq_op;
- spin_lock(&irq_mapping_update_lock);
+ mutex_lock(&irq_mapping_update_lock);
irq = find_irq_by_gsi(gsi);
if (irq != -1) {
@@ -684,7 +684,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
handle_edge_irq, name);
out:
- spin_unlock(&irq_mapping_update_lock);
+ mutex_unlock(&irq_mapping_update_lock);
return irq;
}
@@ -710,7 +710,7 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
{
int irq, ret;
- spin_lock(&irq_mapping_update_lock);
+ mutex_lock(&irq_mapping_update_lock);
irq = xen_allocate_irq_dynamic();
if (irq == -1)
@@ -724,10 +724,10 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
if (ret < 0)
goto error_irq;
out:
- spin_unlock(&irq_mapping_update_lock);
+ mutex_unlock(&irq_mapping_update_lock);
return irq;
error_irq:
- spin_unlock(&irq_mapping_update_lock);
+ mutex_unlock(&irq_mapping_update_lock);
xen_free_irq(irq);
return -1;
}
@@ -740,7 +740,7 @@ int xen_destroy_irq(int irq)
struct irq_info *info = info_for_irq(irq);
int rc = -ENOENT;
- spin_lock(&irq_mapping_update_lock);
+ mutex_lock(&irq_mapping_update_lock);
desc = irq_to_desc(irq);
if (!desc)
@@ -766,7 +766,7 @@ int xen_destroy_irq(int irq)
xen_free_irq(irq);
out:
- spin_unlock(&irq_mapping_update_lock);
+ mutex_unlock(&irq_mapping_update_lock);
return rc;
}
@@ -776,7 +776,7 @@ int xen_irq_from_pirq(unsigned pirq)
struct irq_info *info;
- spin_lock(&irq_mapping_update_lock);
+ mutex_lock(&irq_mapping_update_lock);
list_for_each_entry(info, &xen_irq_list_head, list) {
if (info == NULL || info->type != IRQT_PIRQ)
@@ -787,7 +787,7 @@ int xen_irq_from_pirq(unsigned pirq)
}
irq = -1;
out:
- spin_unlock(&irq_mapping_update_lock);
+ mutex_unlock(&irq_mapping_update_lock);
return irq;
}
@@ -802,7 +802,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)
{
int irq;
- spin_lock(&irq_mapping_update_lock);
+ mutex_lock(&irq_mapping_update_lock);
irq = evtchn_to_irq[evtchn];
@@ -818,7 +818,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)
}
out:
- spin_unlock(&irq_mapping_update_lock);
+ mutex_unlock(&irq_mapping_update_lock);
return irq;
}
@@ -829,7 +829,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
struct evtchn_bind_ipi bind_ipi;
int evtchn, irq;
- spin_lock(&irq_mapping_update_lock);
+ mutex_lock(&irq_mapping_update_lock);
irq = per_cpu(ipi_to_irq, cpu)[ipi];
@@ -853,7 +853,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
}
out:
- spin_unlock(&irq_mapping_update_lock);
+ mutex_unlock(&irq_mapping_update_lock);
return irq;
}
@@ -878,7 +878,7 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
struct evtchn_bind_virq bind_virq;
int evtchn, irq;
- spin_lock(&irq_mapping_update_lock);
+ mutex_lock(&irq_mapping_update_lock);
irq = per_cpu(virq_to_irq, cpu)[virq];
@@ -903,7 +903,7 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
}
out:
- spin_unlock(&irq_mapping_update_lock);
+ mutex_unlock(&irq_mapping_update_lock);
return irq;
}
@@ -913,7 +913,7 @@ static void unbind_from_irq(unsigned int irq)
struct evtchn_close close;
int evtchn = evtchn_from_irq(irq);
- spin_lock(&irq_mapping_update_lock);
+ mutex_lock(&irq_mapping_update_lock);
if (VALID_EVTCHN(evtchn)) {
close.port = evtchn;
@@ -943,7 +943,7 @@ static void unbind_from_irq(unsigned int irq)
xen_free_irq(irq);
- spin_unlock(&irq_mapping_update_lock);
+ mutex_unlock(&irq_mapping_update_lock);
}
int bind_evtchn_to_irqhandler(unsigned int evtchn,
@@ -1279,7 +1279,7 @@ void rebind_evtchn_irq(int evtchn, int irq)
will also be masked. */
disable_irq(irq);
- spin_lock(&irq_mapping_update_lock);
+ mutex_lock(&irq_mapping_update_lock);
/* After resume the irq<->evtchn mappings are all cleared out */
BUG_ON(evtchn_to_irq[evtchn] != -1);
@@ -1289,7 +1289,7 @@ void rebind_evtchn_irq(int evtchn, int irq)
xen_irq_info_evtchn_init(irq, evtchn);
- spin_unlock(&irq_mapping_update_lock);
+ mutex_unlock(&irq_mapping_update_lock);
/* new event channels are always bound to cpu 0 */
irq_set_affinity(irq, cpumask_of(0));
diff --git a/drivers/zorro/zorro.c b/drivers/zorro/zorro.c
index e0c2807b0970..181fa8158a8b 100644
--- a/drivers/zorro/zorro.c
+++ b/drivers/zorro/zorro.c
@@ -148,10 +148,10 @@ static int __init amiga_zorro_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, bus);
- /* Register all devices */
pr_info("Zorro: Probing AutoConfig expansion devices: %u device%s\n",
zorro_num_autocon, zorro_num_autocon == 1 ? "" : "s");
+ /* First identify all devices ... */
for (i = 0; i < zorro_num_autocon; i++) {
z = &zorro_autocon[i];
z->id = (z->rom.er_Manufacturer<<16) | (z->rom.er_Product<<8);
@@ -172,6 +172,11 @@ static int __init amiga_zorro_probe(struct platform_device *pdev)
dev_set_name(&z->dev, "%02x", i);
z->dev.parent = &bus->dev;
z->dev.bus = &zorro_bus_type;
+ }
+
+ /* ... then register them */
+ for (i = 0; i < zorro_num_autocon; i++) {
+ z = &zorro_autocon[i];
error = device_register(&z->dev);
if (error) {
dev_err(&bus->dev, "Error registering device %s\n",
OpenPOWER on IntegriCloud