summaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2009-12-22 17:21:47 -0700
committerDan Williams <dan.j.williams@intel.com>2009-12-22 17:21:47 -0700
commitf80ca163d65903276bec7045a484a79c0897eb2d (patch)
tree97c7d61d43248b9db0757a76af80ff58b98b5599 /drivers/dma
parent0794ec8ce327ec74416b569b8fb1951274693700 (diff)
parenta6d52d70677e99bdb89b6921c265d0a58c22e597 (diff)
downloadtalos-obmc-linux-f80ca163d65903276bec7045a484a79c0897eb2d.tar.gz
talos-obmc-linux-f80ca163d65903276bec7045a484a79c0897eb2d.zip
Merge branch 'ioat' into fixes
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/Kconfig2
-rw-r--r--drivers/dma/dmaengine.c10
-rw-r--r--drivers/dma/ioat/dca.c6
-rw-r--r--drivers/dma/ioat/dma.c2
-rw-r--r--drivers/dma/ioat/dma.h22
-rw-r--r--drivers/dma/ioat/dma_v2.c71
-rw-r--r--drivers/dma/ioat/dma_v2.h2
-rw-r--r--drivers/dma/ioat/dma_v3.c100
-rw-r--r--drivers/dma/ioat/hw.h2
-rw-r--r--drivers/dma/ioat/registers.h5
-rw-r--r--drivers/dma/shdma.c12
11 files changed, 179 insertions, 55 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index fe93d70f2e37..93b058fec1a5 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -26,6 +26,8 @@ config INTEL_IOATDMA
select DMA_ENGINE
select DCA
select ASYNC_TX_DISABLE_CHANNEL_SWITCH
+ select ASYNC_TX_DISABLE_PQ_VAL_DMA
+ select ASYNC_TX_DISABLE_XOR_VAL_DMA
help
Enable support for the Intel(R) I/OAT DMA engine present
in recent Intel Xeon chipsets.
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index bd0b248de2cf..8f99354082ce 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -632,11 +632,21 @@ static bool device_has_all_tx_types(struct dma_device *device)
#if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
if (!dma_has_cap(DMA_XOR, device->cap_mask))
return false;
+
+ #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
+ if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
+ return false;
+ #endif
#endif
#if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE)
if (!dma_has_cap(DMA_PQ, device->cap_mask))
return false;
+
+ #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
+ if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
+ return false;
+ #endif
#endif
return true;
diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c
index 69d02615c4d6..abd9038e06b1 100644
--- a/drivers/dma/ioat/dca.c
+++ b/drivers/dma/ioat/dca.c
@@ -98,17 +98,17 @@ static int dca_enabled_in_bios(struct pci_dev *pdev)
cpuid_level_9 = cpuid_eax(9);
res = test_bit(0, &cpuid_level_9);
if (!res)
- dev_err(&pdev->dev, "DCA is disabled in BIOS\n");
+ dev_dbg(&pdev->dev, "DCA is disabled in BIOS\n");
return res;
}
-static int system_has_dca_enabled(struct pci_dev *pdev)
+int system_has_dca_enabled(struct pci_dev *pdev)
{
if (boot_cpu_has(X86_FEATURE_DCA))
return dca_enabled_in_bios(pdev);
- dev_err(&pdev->dev, "boot cpu doesn't have X86_FEATURE_DCA\n");
+ dev_dbg(&pdev->dev, "boot cpu doesn't have X86_FEATURE_DCA\n");
return 0;
}
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index c524d36d3c2e..dcc4ab78b32b 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -1032,7 +1032,7 @@ int __devinit ioat_probe(struct ioatdma_device *device)
dma->dev = &pdev->dev;
if (!dma->chancnt) {
- dev_err(dev, "zero channels detected\n");
+ dev_err(dev, "channel enumeration error\n");
goto err_setup_interrupts;
}
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index c14fdfeb7f33..bbc3e78ef333 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -60,6 +60,7 @@
* @dca: direct cache access context
* @intr_quirk: interrupt setup quirk (for ioat_v1 devices)
* @enumerate_channels: hw version specific channel enumeration
+ * @reset_hw: hw version specific channel (re)initialization
* @cleanup_tasklet: select between the v2 and v3 cleanup routines
* @timer_fn: select between the v2 and v3 timer watchdog routines
* @self_test: hardware version specific self test for each supported op type
@@ -78,6 +79,7 @@ struct ioatdma_device {
struct dca_provider *dca;
void (*intr_quirk)(struct ioatdma_device *device);
int (*enumerate_channels)(struct ioatdma_device *device);
+ int (*reset_hw)(struct ioat_chan_common *chan);
void (*cleanup_tasklet)(unsigned long data);
void (*timer_fn)(unsigned long data);
int (*self_test)(struct ioatdma_device *device);
@@ -264,6 +266,22 @@ static inline void ioat_suspend(struct ioat_chan_common *chan)
writeb(IOAT_CHANCMD_SUSPEND, chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
}
+static inline void ioat_reset(struct ioat_chan_common *chan)
+{
+ u8 ver = chan->device->version;
+
+ writeb(IOAT_CHANCMD_RESET, chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
+}
+
+static inline bool ioat_reset_pending(struct ioat_chan_common *chan)
+{
+ u8 ver = chan->device->version;
+ u8 cmd;
+
+ cmd = readb(chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
+ return (cmd & IOAT_CHANCMD_RESET) == IOAT_CHANCMD_RESET;
+}
+
static inline void ioat_set_chainaddr(struct ioat_dma_chan *ioat, u64 addr)
{
struct ioat_chan_common *chan = &ioat->base;
@@ -297,9 +315,7 @@ static inline bool is_ioat_suspended(unsigned long status)
/* channel was fatally programmed */
static inline bool is_ioat_bug(unsigned long err)
{
- return !!(err & (IOAT_CHANERR_SRC_ADDR_ERR|IOAT_CHANERR_DEST_ADDR_ERR|
- IOAT_CHANERR_NEXT_ADDR_ERR|IOAT_CHANERR_CONTROL_ERR|
- IOAT_CHANERR_LENGTH_ERR));
+ return !!err;
}
static inline void ioat_unmap(struct pci_dev *pdev, dma_addr_t addr, size_t len,
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index 96ffab7d37a7..5f7a500e18d0 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -239,20 +239,50 @@ void __ioat2_restart_chan(struct ioat2_dma_chan *ioat)
__ioat2_start_null_desc(ioat);
}
-static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
+int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo)
{
- struct ioat_chan_common *chan = &ioat->base;
- unsigned long phys_complete;
+ unsigned long end = jiffies + tmo;
+ int err = 0;
u32 status;
status = ioat_chansts(chan);
if (is_ioat_active(status) || is_ioat_idle(status))
ioat_suspend(chan);
while (is_ioat_active(status) || is_ioat_idle(status)) {
+ if (end && time_after(jiffies, end)) {
+ err = -ETIMEDOUT;
+ break;
+ }
status = ioat_chansts(chan);
cpu_relax();
}
+ return err;
+}
+
+int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo)
+{
+ unsigned long end = jiffies + tmo;
+ int err = 0;
+
+ ioat_reset(chan);
+ while (ioat_reset_pending(chan)) {
+ if (end && time_after(jiffies, end)) {
+ err = -ETIMEDOUT;
+ break;
+ }
+ cpu_relax();
+ }
+
+ return err;
+}
+
+static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
+{
+ struct ioat_chan_common *chan = &ioat->base;
+ unsigned long phys_complete;
+
+ ioat2_quiesce(chan, 0);
if (ioat_cleanup_preamble(chan, &phys_complete))
__cleanup(ioat, phys_complete);
@@ -279,6 +309,8 @@ void ioat2_timer_event(unsigned long data)
u32 chanerr;
chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
+ dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
+ __func__, chanerr);
BUG_ON(is_ioat_bug(chanerr));
}
@@ -316,6 +348,19 @@ void ioat2_timer_event(unsigned long data)
spin_unlock_bh(&chan->cleanup_lock);
}
+static int ioat2_reset_hw(struct ioat_chan_common *chan)
+{
+ /* throw away whatever the channel was doing and get it initialized */
+ u32 chanerr;
+
+ ioat2_quiesce(chan, msecs_to_jiffies(100));
+
+ chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
+ writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
+
+ return ioat2_reset_sync(chan, msecs_to_jiffies(200));
+}
+
/**
* ioat2_enumerate_channels - find and initialize the device's channels
* @device: the device to be enumerated
@@ -358,6 +403,10 @@ int ioat2_enumerate_channels(struct ioatdma_device *device)
(unsigned long) ioat);
ioat->xfercap_log = xfercap_log;
spin_lock_init(&ioat->ring_lock);
+ if (device->reset_hw(&ioat->base)) {
+ i = 0;
+ break;
+ }
}
dma->chancnt = i;
return i;
@@ -465,7 +514,6 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
struct ioat_chan_common *chan = &ioat->base;
struct ioat_ring_ent **ring;
- u32 chanerr;
int order;
/* have we already been set up? */
@@ -475,12 +523,6 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
/* Setup register to interrupt and write completion status on error */
writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET);
- chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
- if (chanerr) {
- dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr);
- writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
- }
-
/* allocate a completion writeback area */
/* doing 2 32bit writes to mmio since 1 64b write doesn't work */
chan->completion = pci_pool_alloc(chan->device->completion_pool,
@@ -744,13 +786,7 @@ void ioat2_free_chan_resources(struct dma_chan *c)
tasklet_disable(&chan->cleanup_task);
del_timer_sync(&chan->timer);
device->cleanup_tasklet((unsigned long) ioat);
-
- /* Delay 100ms after reset to allow internal DMA logic to quiesce
- * before removing DMA descriptor resources.
- */
- writeb(IOAT_CHANCMD_RESET,
- chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
- mdelay(100);
+ device->reset_hw(chan);
spin_lock_bh(&ioat->ring_lock);
descs = ioat2_ring_space(ioat);
@@ -837,6 +873,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca)
int err;
device->enumerate_channels = ioat2_enumerate_channels;
+ device->reset_hw = ioat2_reset_hw;
device->cleanup_tasklet = ioat2_cleanup_tasklet;
device->timer_fn = ioat2_timer_event;
device->self_test = ioat_dma_self_test;
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h
index 1d849ef74d5f..3afad8da43cc 100644
--- a/drivers/dma/ioat/dma_v2.h
+++ b/drivers/dma/ioat/dma_v2.h
@@ -185,6 +185,8 @@ bool reshape_ring(struct ioat2_dma_chan *ioat, int order);
void __ioat2_issue_pending(struct ioat2_dma_chan *ioat);
void ioat2_cleanup_tasklet(unsigned long data);
void ioat2_timer_event(unsigned long data);
+int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo);
+int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo);
extern struct kobj_type ioat2_ktype;
extern struct kmem_cache *ioat2_cache;
#endif /* IOATDMA_V2_H */
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 35d1e33afd5b..9908c9e94b2d 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -378,6 +378,8 @@ static void ioat3_timer_event(unsigned long data)
u32 chanerr;
chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
+ dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
+ __func__, chanerr);
BUG_ON(is_ioat_bug(chanerr));
}
@@ -569,7 +571,7 @@ __ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result,
dump_desc_dbg(ioat, compl_desc);
/* we leave the channel locked to ensure in order submission */
- return &desc->txd;
+ return &compl_desc->txd;
}
static struct dma_async_tx_descriptor *
@@ -648,9 +650,11 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
num_descs = ioat2_xferlen_to_descs(ioat, len);
/* we need 2x the number of descriptors to cover greater than 3
- * sources
+ * sources (we need 1 extra source in the q-only continuation
+ * case and 3 extra sources in the p+q continuation case.
*/
- if (src_cnt > 3 || flags & DMA_PREP_CONTINUE) {
+ if (src_cnt + dmaf_p_disabled_continue(flags) > 3 ||
+ (dmaf_continue(flags) && !dmaf_p_disabled_continue(flags))) {
with_ext = 1;
num_descs *= 2;
} else
@@ -728,7 +732,7 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
dump_desc_dbg(ioat, compl_desc);
/* we leave the channel locked to ensure in order submission */
- return &desc->txd;
+ return &compl_desc->txd;
}
static struct dma_async_tx_descriptor *
@@ -736,10 +740,16 @@ ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
unsigned int src_cnt, const unsigned char *scf, size_t len,
unsigned long flags)
{
+ /* specify valid address for disabled result */
+ if (flags & DMA_PREP_PQ_DISABLE_P)
+ dst[0] = dst[1];
+ if (flags & DMA_PREP_PQ_DISABLE_Q)
+ dst[1] = dst[0];
+
/* handle the single source multiply case from the raid6
* recovery path
*/
- if (unlikely((flags & DMA_PREP_PQ_DISABLE_P) && src_cnt == 1)) {
+ if ((flags & DMA_PREP_PQ_DISABLE_P) && src_cnt == 1) {
dma_addr_t single_source[2];
unsigned char single_source_coef[2];
@@ -761,6 +771,12 @@ ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
unsigned int src_cnt, const unsigned char *scf, size_t len,
enum sum_check_flags *pqres, unsigned long flags)
{
+ /* specify valid address for disabled result */
+ if (flags & DMA_PREP_PQ_DISABLE_P)
+ pq[0] = pq[1];
+ if (flags & DMA_PREP_PQ_DISABLE_Q)
+ pq[1] = pq[0];
+
/* the cleanup routine only sets bits on validate failure, it
* does not clear bits on validate success... so clear it here
*/
@@ -778,9 +794,9 @@ ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
dma_addr_t pq[2];
memset(scf, 0, src_cnt);
- flags |= DMA_PREP_PQ_DISABLE_Q;
pq[0] = dst;
- pq[1] = ~0;
+ flags |= DMA_PREP_PQ_DISABLE_Q;
+ pq[1] = dst; /* specify valid address for disabled result */
return __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len,
flags);
@@ -800,9 +816,9 @@ ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
*result = 0;
memset(scf, 0, src_cnt);
- flags |= DMA_PREP_PQ_DISABLE_Q;
pq[0] = src[0];
- pq[1] = ~0;
+ flags |= DMA_PREP_PQ_DISABLE_Q;
+ pq[1] = pq[0]; /* specify valid address for disabled result */
return __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, scf,
len, flags);
@@ -1114,18 +1130,58 @@ static int __devinit ioat3_dma_self_test(struct ioatdma_device *device)
return 0;
}
+static int ioat3_reset_hw(struct ioat_chan_common *chan)
+{
+ /* throw away whatever the channel was doing and get it
+ * initialized, with ioat3 specific workarounds
+ */
+ struct ioatdma_device *device = chan->device;
+ struct pci_dev *pdev = device->pdev;
+ u32 chanerr;
+ u16 dev_id;
+ int err;
+
+ ioat2_quiesce(chan, msecs_to_jiffies(100));
+
+ chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
+ writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
+
+ /* -= IOAT ver.3 workarounds =- */
+ /* Write CHANERRMSK_INT with 3E07h to mask out the errors
+ * that can cause stability issues for IOAT ver.3, and clear any
+ * pending errors
+ */
+ pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07);
+ err = pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
+ if (err) {
+ dev_err(&pdev->dev, "channel error register unreachable\n");
+ return err;
+ }
+ pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
+
+ /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
+ * (workaround for spurious config parity error after restart)
+ */
+ pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
+ if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0)
+ pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10);
+
+ return ioat2_reset_sync(chan, msecs_to_jiffies(200));
+}
+
int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
{
struct pci_dev *pdev = device->pdev;
+ int dca_en = system_has_dca_enabled(pdev);
struct dma_device *dma;
struct dma_chan *c;
struct ioat_chan_common *chan;
bool is_raid_device = false;
int err;
- u16 dev_id;
u32 cap;
device->enumerate_channels = ioat2_enumerate_channels;
+ device->reset_hw = ioat3_reset_hw;
device->self_test = ioat3_dma_self_test;
dma = &device->common;
dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
@@ -1137,6 +1193,11 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock;
cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET);
+
+ /* dca is incompatible with raid operations */
+ if (dca_en && (cap & (IOAT_CAP_XOR|IOAT_CAP_PQ)))
+ cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ);
+
if (cap & IOAT_CAP_XOR) {
is_raid_device = true;
dma->max_xor = 8;
@@ -1186,18 +1247,15 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
device->timer_fn = ioat2_timer_event;
}
- /* -= IOAT ver.3 workarounds =- */
- /* Write CHANERRMSK_INT with 3E07h to mask out the errors
- * that can cause stability issues for IOAT ver.3
- */
- pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07);
+ #ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
+ dma_cap_clear(DMA_PQ_VAL, dma->cap_mask);
+ dma->device_prep_dma_pq_val = NULL;
+ #endif
- /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
- * (workaround for spurious config parity error after restart)
- */
- pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
- if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0)
- pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10);
+ #ifdef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
+ dma_cap_clear(DMA_XOR_VAL, dma->cap_mask);
+ dma->device_prep_dma_xor_val = NULL;
+ #endif
err = ioat_probe(device);
if (err)
diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h
index 99afb12bd409..60e675455b6a 100644
--- a/drivers/dma/ioat/hw.h
+++ b/drivers/dma/ioat/hw.h
@@ -39,6 +39,8 @@
#define IOAT_VER_3_0 0x30 /* Version 3.0 */
#define IOAT_VER_3_2 0x32 /* Version 3.2 */
+int system_has_dca_enabled(struct pci_dev *pdev);
+
struct ioat_dma_descriptor {
uint32_t size;
union {
diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h
index 63038e18ab03..e8ae63baf588 100644
--- a/drivers/dma/ioat/registers.h
+++ b/drivers/dma/ioat/registers.h
@@ -27,6 +27,7 @@
#define IOAT_PCI_DEVICE_ID_OFFSET 0x02
#define IOAT_PCI_DMAUNCERRSTS_OFFSET 0x148
+#define IOAT_PCI_CHANERR_INT_OFFSET 0x180
#define IOAT_PCI_CHANERRMASK_INT_OFFSET 0x184
/* MMIO Device Registers */
@@ -92,9 +93,7 @@
#define IOAT_CHANCTRL_ERR_COMPLETION_EN 0x0004
#define IOAT_CHANCTRL_INT_REARM 0x0001
#define IOAT_CHANCTRL_RUN (IOAT_CHANCTRL_INT_REARM |\
- IOAT_CHANCTRL_ERR_COMPLETION_EN |\
- IOAT_CHANCTRL_ANY_ERR_ABORT_EN |\
- IOAT_CHANCTRL_ERR_INT_EN)
+ IOAT_CHANCTRL_ANY_ERR_ABORT_EN)
#define IOAT_DMA_COMP_OFFSET 0x02 /* 16-bit DMA channel compatibility */
#define IOAT_DMA_COMP_V1 0x0001 /* Compatibility with DMA version 1 */
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 9546f5f356eb..d10cc899c460 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -730,17 +730,16 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
#endif
struct sh_dmae_device *shdev;
+ /* get platform data */
+ if (!pdev->dev.platform_data)
+ return -ENODEV;
+
shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL);
if (!shdev) {
dev_err(&pdev->dev, "No enough memory\n");
- err = -ENOMEM;
- goto shdev_err;
+ return -ENOMEM;
}
- /* get platform data */
- if (!pdev->dev.platform_data)
- goto shdev_err;
-
/* platform data */
memcpy(&shdev->pdata, pdev->dev.platform_data,
sizeof(struct sh_dmae_pdata));
@@ -814,7 +813,6 @@ eirq_err:
rst_err:
kfree(shdev);
-shdev_err:
return err;
}
OpenPOWER on IntegriCloud