diff options
author | Dave Jiang <dave.jiang@intel.com> | 2015-08-11 08:48:55 -0700 |
---|---|---|
committer | Vinod Koul <vinod.koul@intel.com> | 2015-08-17 13:37:30 +0530 |
commit | 3372de5813e4da8305002ff6ffbfc0c7012cb319 (patch) | |
tree | 87e4b66fbfeb47b6ab22a4a993f86b19be27ddb3 /drivers/dma/ioat | |
parent | 599d49de7f69cb5a23e913db24e168ba2f09bd05 (diff) | |
download | blackbird-op-linux-3372de5813e4da8305002ff6ffbfc0c7012cb319.tar.gz blackbird-op-linux-3372de5813e4da8305002ff6ffbfc0c7012cb319.zip |
dmaengine: ioatdma: removal of dma_v3.c and relevant ioat3 references
Moving the relevant functions to their respective .c files and removal of
dma_v3.c file. Also removed various ioat3 references when appropriate.
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Acked-by: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma/ioat')
-rw-r--r-- | drivers/dma/ioat/Makefile | 2 | ||||
-rw-r--r-- | drivers/dma/ioat/dca.c | 22 | ||||
-rw-r--r-- | drivers/dma/ioat/dma.c | 525 | ||||
-rw-r--r-- | drivers/dma/ioat/dma.h | 11 | ||||
-rw-r--r-- | drivers/dma/ioat/dma_v3.c | 525 | ||||
-rw-r--r-- | drivers/dma/ioat/init.c | 19 |
6 files changed, 487 insertions, 617 deletions
diff --git a/drivers/dma/ioat/Makefile b/drivers/dma/ioat/Makefile index 3a7e66464d0c..cf5fedbe2b75 100644 --- a/drivers/dma/ioat/Makefile +++ b/drivers/dma/ioat/Makefile @@ -1,2 +1,2 @@ obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o -ioatdma-y := init.o dma.o dma_v3.o prep.o dca.o sysfs.o +ioatdma-y := init.o dma.o prep.o dca.o sysfs.o diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c index f2b9a421985a..2cb7c308d5c7 100644 --- a/drivers/dma/ioat/dca.c +++ b/drivers/dma/ioat/dca.c @@ -132,7 +132,7 @@ static int ioat_dca_dev_managed(struct dca_provider *dca, return 0; } -static int ioat3_dca_add_requester(struct dca_provider *dca, struct device *dev) +static int ioat_dca_add_requester(struct dca_provider *dca, struct device *dev) { struct ioat_dca_priv *ioatdca = dca_priv(dca); struct pci_dev *pdev; @@ -166,7 +166,7 @@ static int ioat3_dca_add_requester(struct dca_provider *dca, struct device *dev) return -EFAULT; } -static int ioat3_dca_remove_requester(struct dca_provider *dca, +static int ioat_dca_remove_requester(struct dca_provider *dca, struct device *dev) { struct ioat_dca_priv *ioatdca = dca_priv(dca); @@ -193,7 +193,7 @@ static int ioat3_dca_remove_requester(struct dca_provider *dca, return -ENODEV; } -static u8 ioat3_dca_get_tag(struct dca_provider *dca, +static u8 ioat_dca_get_tag(struct dca_provider *dca, struct device *dev, int cpu) { @@ -224,14 +224,14 @@ static u8 ioat3_dca_get_tag(struct dca_provider *dca, return tag; } -static struct dca_ops ioat3_dca_ops = { - .add_requester = ioat3_dca_add_requester, - .remove_requester = ioat3_dca_remove_requester, - .get_tag = ioat3_dca_get_tag, +static struct dca_ops ioat_dca_ops = { + .add_requester = ioat_dca_add_requester, + .remove_requester = ioat_dca_remove_requester, + .get_tag = ioat_dca_get_tag, .dev_managed = ioat_dca_dev_managed, }; -static int ioat3_dca_count_dca_slots(void *iobase, u16 dca_offset) +static int ioat_dca_count_dca_slots(void *iobase, u16 dca_offset) { int slots = 0; u32 req; @@ -266,7 +266,7 @@ static inline int dca3_tag_map_invalid(u8 *tag_map) (tag_map[4] == DCA_TAG_MAP_VALID)); } -struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase) +struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase) { struct dca_provider *dca; struct ioat_dca_priv *ioatdca; @@ -293,11 +293,11 @@ struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase) if (dca_offset == 0) return NULL; - slots = ioat3_dca_count_dca_slots(iobase, dca_offset); + slots = ioat_dca_count_dca_slots(iobase, dca_offset); if (slots == 0) return NULL; - dca = alloc_dca_provider(&ioat3_dca_ops, + dca = alloc_dca_provider(&ioat_dca_ops, sizeof(*ioatdca) + (sizeof(struct ioat_dca_slot) * slots)); if (!dca) diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index e67eda055ea5..2031bb4ad536 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -37,6 +37,8 @@ #include "../dmaengine.h" +static void ioat_eh(struct ioatdma_chan *ioat_chan); + /** * ioat_dma_do_interrupt - handler used for single vector interrupt mode * @irq: interrupt id @@ -122,59 +124,7 @@ void ioat_stop(struct ioatdma_chan *ioat_chan) ioat_dma->cleanup_fn((unsigned long)&ioat_chan->dma_chan); } -dma_addr_t ioat_get_current_completion(struct ioatdma_chan *ioat_chan) -{ - dma_addr_t phys_complete; - u64 completion; - - completion = *ioat_chan->completion; - phys_complete = ioat_chansts_to_addr(completion); - - dev_dbg(to_dev(ioat_chan), "%s: phys_complete: %#llx\n", __func__, - (unsigned long long) phys_complete); - - if (is_ioat_halted(completion)) { - u32 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); - - dev_err(to_dev(ioat_chan), "Channel halted, chanerr = %x\n", - chanerr); - - /* TODO do something to salvage the situation */ - } - - return phys_complete; -} - -bool ioat_cleanup_preamble(struct ioatdma_chan *ioat_chan, - dma_addr_t *phys_complete) -{ - *phys_complete = ioat_get_current_completion(ioat_chan); - if (*phys_complete == ioat_chan->last_completion) - return false; - clear_bit(IOAT_COMPLETION_ACK, &ioat_chan->state); - mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT); - - return true; -} - -enum dma_status -ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, - struct dma_tx_state *txstate) -{ - struct ioatdma_chan *ioat_chan = to_ioat_chan(c); - struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; - enum dma_status ret; - - ret = dma_cookie_status(c, cookie, txstate); - if (ret == DMA_COMPLETE) - return ret; - - ioat_dma->cleanup_fn((unsigned long) c); - - return dma_cookie_status(c, cookie, txstate); -} - -void __ioat_issue_pending(struct ioatdma_chan *ioat_chan) +static void __ioat_issue_pending(struct ioatdma_chan *ioat_chan) { ioat_chan->dmacount += ioat_ring_pending(ioat_chan); ioat_chan->issued = ioat_chan->head; @@ -251,7 +201,7 @@ void ioat_start_null_desc(struct ioatdma_chan *ioat_chan) spin_unlock_bh(&ioat_chan->prep_lock); } -void __ioat_restart_chan(struct ioatdma_chan *ioat_chan) +static void __ioat_restart_chan(struct ioatdma_chan *ioat_chan) { /* set the tail to be re-issued */ ioat_chan->issued = ioat_chan->tail; @@ -274,7 +224,7 @@ void __ioat_restart_chan(struct ioatdma_chan *ioat_chan) __ioat_start_null_desc(ioat_chan); } -int ioat_quiesce(struct ioatdma_chan *ioat_chan, unsigned long tmo) +static int ioat_quiesce(struct ioatdma_chan *ioat_chan, unsigned long tmo) { unsigned long end = jiffies + tmo; int err = 0; @@ -295,7 +245,7 @@ int ioat_quiesce(struct ioatdma_chan *ioat_chan, unsigned long tmo) return err; } -int ioat_reset_sync(struct ioatdma_chan *ioat_chan, unsigned long tmo) +static int ioat_reset_sync(struct ioatdma_chan *ioat_chan, unsigned long tmo) { unsigned long end = jiffies + tmo; int err = 0; @@ -411,7 +361,7 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags) return ring; } -bool reshape_ring(struct ioatdma_chan *ioat_chan, int order) +static bool reshape_ring(struct ioatdma_chan *ioat_chan, int order) { /* reshape differs from normal ring allocation in that we want * to allocate a new software ring while only @@ -578,3 +528,464 @@ int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs) return -ENOMEM; } + +static bool desc_has_ext(struct ioat_ring_ent *desc) +{ + struct ioat_dma_descriptor *hw = desc->hw; + + if (hw->ctl_f.op == IOAT_OP_XOR || + hw->ctl_f.op == IOAT_OP_XOR_VAL) { + struct ioat_xor_descriptor *xor = desc->xor; + + if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5) + return true; + } else if (hw->ctl_f.op == IOAT_OP_PQ || + hw->ctl_f.op == IOAT_OP_PQ_VAL) { + struct ioat_pq_descriptor *pq = desc->pq; + + if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3) + return true; + } + + return false; +} + +static void +ioat_free_sed(struct ioatdma_device *ioat_dma, struct ioat_sed_ent *sed) +{ + if (!sed) + return; + + dma_pool_free(ioat_dma->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma); + kmem_cache_free(ioat_sed_cache, sed); +} + +static u64 ioat_get_current_completion(struct ioatdma_chan *ioat_chan) +{ + u64 phys_complete; + u64 completion; + + completion = *ioat_chan->completion; + phys_complete = ioat_chansts_to_addr(completion); + + dev_dbg(to_dev(ioat_chan), "%s: phys_complete: %#llx\n", __func__, + (unsigned long long) phys_complete); + + return phys_complete; +} + +static bool ioat_cleanup_preamble(struct ioatdma_chan *ioat_chan, + u64 *phys_complete) +{ + *phys_complete = ioat_get_current_completion(ioat_chan); + if (*phys_complete == ioat_chan->last_completion) + return false; + + clear_bit(IOAT_COMPLETION_ACK, &ioat_chan->state); + mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT); + + return true; +} + +static void +desc_get_errstat(struct ioatdma_chan *ioat_chan, struct ioat_ring_ent *desc) +{ + struct ioat_dma_descriptor *hw = desc->hw; + + switch (hw->ctl_f.op) { + case IOAT_OP_PQ_VAL: + case IOAT_OP_PQ_VAL_16S: + { + struct ioat_pq_descriptor *pq = desc->pq; + + /* check if there's error written */ + if (!pq->dwbes_f.wbes) + return; + + /* need to set a chanerr var for checking to clear later */ + + if (pq->dwbes_f.p_val_err) + *desc->result |= SUM_CHECK_P_RESULT; + + if (pq->dwbes_f.q_val_err) + *desc->result |= SUM_CHECK_Q_RESULT; + + return; + } + default: + return; + } +} + +/** + * __cleanup - reclaim used descriptors + * @ioat: channel (ring) to clean + */ +static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete) +{ + struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; + struct ioat_ring_ent *desc; + bool seen_current = false; + int idx = ioat_chan->tail, i; + u16 active; + + dev_dbg(to_dev(ioat_chan), "%s: head: %#x tail: %#x issued: %#x\n", + __func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued); + + /* + * At restart of the channel, the completion address and the + * channel status will be 0 due to starting a new chain. Since + * it's new chain and the first descriptor "fails", there is + * nothing to clean up. We do not want to reap the entire submitted + * chain due to this 0 address value and then BUG. + */ + if (!phys_complete) + return; + + active = ioat_ring_active(ioat_chan); + for (i = 0; i < active && !seen_current; i++) { + struct dma_async_tx_descriptor *tx; + + smp_read_barrier_depends(); + prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1)); + desc = ioat_get_ring_ent(ioat_chan, idx + i); + dump_desc_dbg(ioat_chan, desc); + + /* set err stat if we are using dwbes */ + if (ioat_dma->cap & IOAT_CAP_DWBES) + desc_get_errstat(ioat_chan, desc); + + tx = &desc->txd; + if (tx->cookie) { + dma_cookie_complete(tx); + dma_descriptor_unmap(tx); + if (tx->callback) { + tx->callback(tx->callback_param); + tx->callback = NULL; + } + } + + if (tx->phys == phys_complete) + seen_current = true; + + /* skip extended descriptors */ + if (desc_has_ext(desc)) { + BUG_ON(i + 1 >= active); + i++; + } + + /* cleanup super extended descriptors */ + if (desc->sed) { + ioat_free_sed(ioat_dma, desc->sed); + desc->sed = NULL; + } + } + + /* finish all descriptor reads before incrementing tail */ + smp_mb(); + ioat_chan->tail = idx + i; + /* no active descs have written a completion? */ + BUG_ON(active && !seen_current); + ioat_chan->last_completion = phys_complete; + + if (active - i == 0) { + dev_dbg(to_dev(ioat_chan), "%s: cancel completion timeout\n", + __func__); + clear_bit(IOAT_COMPLETION_PENDING, &ioat_chan->state); + mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); + } + + /* 5 microsecond delay per pending descriptor */ + writew(min((5 * (active - i)), IOAT_INTRDELAY_MASK), + ioat_chan->ioat_dma->reg_base + IOAT_INTRDELAY_OFFSET); +} + +static void ioat_cleanup(struct ioatdma_chan *ioat_chan) +{ + u64 phys_complete; + + spin_lock_bh(&ioat_chan->cleanup_lock); + + if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) + __cleanup(ioat_chan, phys_complete); + + if (is_ioat_halted(*ioat_chan->completion)) { + u32 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); + + if (chanerr & IOAT_CHANERR_HANDLE_MASK) { + mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); + ioat_eh(ioat_chan); + } + } + + spin_unlock_bh(&ioat_chan->cleanup_lock); +} + +void ioat_cleanup_event(unsigned long data) +{ + struct ioatdma_chan *ioat_chan = to_ioat_chan((void *)data); + + ioat_cleanup(ioat_chan); + if (!test_bit(IOAT_RUN, &ioat_chan->state)) + return; + writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET); +} + +static void ioat_restart_channel(struct ioatdma_chan *ioat_chan) +{ + u64 phys_complete; + + ioat_quiesce(ioat_chan, 0); + if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) + __cleanup(ioat_chan, phys_complete); + + __ioat_restart_chan(ioat_chan); +} + +static void ioat_eh(struct ioatdma_chan *ioat_chan) +{ + struct pci_dev *pdev = to_pdev(ioat_chan); + struct ioat_dma_descriptor *hw; + struct dma_async_tx_descriptor *tx; + u64 phys_complete; + struct ioat_ring_ent *desc; + u32 err_handled = 0; + u32 chanerr_int; + u32 chanerr; + + /* cleanup so tail points to descriptor that caused the error */ + if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) + __cleanup(ioat_chan, phys_complete); + + chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); + pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int); + + dev_dbg(to_dev(ioat_chan), "%s: error = %x:%x\n", + __func__, chanerr, chanerr_int); + + desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail); + hw = desc->hw; + dump_desc_dbg(ioat_chan, desc); + + switch (hw->ctl_f.op) { + case IOAT_OP_XOR_VAL: + if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) { + *desc->result |= SUM_CHECK_P_RESULT; + err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR; + } + break; + case IOAT_OP_PQ_VAL: + case IOAT_OP_PQ_VAL_16S: + if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) { + *desc->result |= SUM_CHECK_P_RESULT; + err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR; + } + if (chanerr & IOAT_CHANERR_XOR_Q_ERR) { + *desc->result |= SUM_CHECK_Q_RESULT; + err_handled |= IOAT_CHANERR_XOR_Q_ERR; + } + break; + } + + /* fault on unhandled error or spurious halt */ + if (chanerr ^ err_handled || chanerr == 0) { + dev_err(to_dev(ioat_chan), "%s: fatal error (%x:%x)\n", + __func__, chanerr, err_handled); + BUG(); + } else { /* cleanup the faulty descriptor */ + tx = &desc->txd; + if (tx->cookie) { + dma_cookie_complete(tx); + dma_descriptor_unmap(tx); + if (tx->callback) { + tx->callback(tx->callback_param); + tx->callback = NULL; + } + } + } + + writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); + pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int); + + /* mark faulting descriptor as complete */ + *ioat_chan->completion = desc->txd.phys; + + spin_lock_bh(&ioat_chan->prep_lock); + ioat_restart_channel(ioat_chan); + spin_unlock_bh(&ioat_chan->prep_lock); +} + +static void check_active(struct ioatdma_chan *ioat_chan) +{ + if (ioat_ring_active(ioat_chan)) { + mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT); + return; + } + + if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state)) + mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); + else if (ioat_chan->alloc_order > ioat_get_alloc_order()) { + /* if the ring is idle, empty, and oversized try to step + * down the size + */ + reshape_ring(ioat_chan, ioat_chan->alloc_order - 1); + + /* keep shrinking until we get back to our minimum + * default size + */ + if (ioat_chan->alloc_order > ioat_get_alloc_order()) + mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); + } + +} + +void ioat_timer_event(unsigned long data) +{ + struct ioatdma_chan *ioat_chan = to_ioat_chan((void *)data); + dma_addr_t phys_complete; + u64 status; + + status = ioat_chansts(ioat_chan); + + /* when halted due to errors check for channel + * programming errors before advancing the completion state + */ + if (is_ioat_halted(status)) { + u32 chanerr; + + chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); + dev_err(to_dev(ioat_chan), "%s: Channel halted (%x)\n", + __func__, chanerr); + if (test_bit(IOAT_RUN, &ioat_chan->state)) + BUG_ON(is_ioat_bug(chanerr)); + else /* we never got off the ground */ + return; + } + + /* if we haven't made progress and we have already + * acknowledged a pending completion once, then be more + * forceful with a restart + */ + spin_lock_bh(&ioat_chan->cleanup_lock); + if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) + __cleanup(ioat_chan, phys_complete); + else if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) { + spin_lock_bh(&ioat_chan->prep_lock); + ioat_restart_channel(ioat_chan); + spin_unlock_bh(&ioat_chan->prep_lock); + spin_unlock_bh(&ioat_chan->cleanup_lock); + return; + } else { + set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state); + mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT); + } + + + if (ioat_ring_active(ioat_chan)) + mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT); + else { + spin_lock_bh(&ioat_chan->prep_lock); + check_active(ioat_chan); + spin_unlock_bh(&ioat_chan->prep_lock); + } + spin_unlock_bh(&ioat_chan->cleanup_lock); +} + +enum dma_status +ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + struct ioatdma_chan *ioat_chan = to_ioat_chan(c); + enum dma_status ret; + + ret = dma_cookie_status(c, cookie, txstate); + if (ret == DMA_COMPLETE) + return ret; + + ioat_cleanup(ioat_chan); + + return dma_cookie_status(c, cookie, txstate); +} + +static int ioat_irq_reinit(struct ioatdma_device *ioat_dma) +{ + struct pci_dev *pdev = ioat_dma->pdev; + int irq = pdev->irq, i; + + if (!is_bwd_ioat(pdev)) + return 0; + + switch (ioat_dma->irq_mode) { + case IOAT_MSIX: + for (i = 0; i < ioat_dma->dma_dev.chancnt; i++) { + struct msix_entry *msix = &ioat_dma->msix_entries[i]; + struct ioatdma_chan *ioat_chan; + + ioat_chan = ioat_chan_by_index(ioat_dma, i); + devm_free_irq(&pdev->dev, msix->vector, ioat_chan); + } + + pci_disable_msix(pdev); + break; + case IOAT_MSI: + pci_disable_msi(pdev); + /* fall through */ + case IOAT_INTX: + devm_free_irq(&pdev->dev, irq, ioat_dma); + break; + default: + return 0; + } + ioat_dma->irq_mode = IOAT_NOIRQ; + + return ioat_dma_setup_interrupts(ioat_dma); +} + +int ioat_reset_hw(struct ioatdma_chan *ioat_chan) +{ + /* throw away whatever the channel was doing and get it + * initialized, with ioat3 specific workarounds + */ + struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; + struct pci_dev *pdev = ioat_dma->pdev; + u32 chanerr; + u16 dev_id; + int err; + + ioat_quiesce(ioat_chan, msecs_to_jiffies(100)); + + chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); + writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); + + if (ioat_dma->version < IOAT_VER_3_3) { + /* clear any pending errors */ + err = pci_read_config_dword(pdev, + IOAT_PCI_CHANERR_INT_OFFSET, &chanerr); + if (err) { + dev_err(&pdev->dev, + "channel error register unreachable\n"); + return err; + } + pci_write_config_dword(pdev, + IOAT_PCI_CHANERR_INT_OFFSET, chanerr); + + /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit + * (workaround for spurious config parity error after restart) + */ + pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id); + if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) { + pci_write_config_dword(pdev, + IOAT_PCI_DMAUNCERRSTS_OFFSET, + 0x10); + } + } + + err = ioat_reset_sync(ioat_chan, msecs_to_jiffies(200)); + if (!err) + err = ioat_irq_reinit(ioat_dma); + + if (err) + dev_err(&pdev->dev, "Failed to reset: %d\n", err); + + return err; +} diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index a319befad1a3..2e1f05464703 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -438,24 +438,15 @@ ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie, struct dma_tx_state *txstate); void ioat_cleanup_event(unsigned long data); void ioat_timer_event(unsigned long data); -enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, - struct dma_tx_state *txstate); -bool ioat_cleanup_preamble(struct ioatdma_chan *ioat_chan, - dma_addr_t *phys_complete); int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs); void ioat_issue_pending(struct dma_chan *chan); -bool reshape_ring(struct ioatdma_chan *ioat, int order); -void __ioat_issue_pending(struct ioatdma_chan *ioat_chan); void ioat_timer_event(unsigned long data); -int ioat_quiesce(struct ioatdma_chan *ioat_chan, unsigned long tmo); -int ioat_reset_sync(struct ioatdma_chan *ioat_chan, unsigned long tmo); -void __ioat_restart_chan(struct ioatdma_chan *ioat_chan); /* IOAT Init functions */ bool is_bwd_ioat(struct pci_dev *pdev); +struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase); void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type); void ioat_kobject_del(struct ioatdma_device *ioat_dma); int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma); void ioat_stop(struct ioatdma_chan *ioat_chan); -struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); #endif /* IOATDMA_H */ diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c deleted file mode 100644 index d0ae8f7c97a6..000000000000 --- a/drivers/dma/ioat/dma_v3.c +++ /dev/null @@ -1,525 +0,0 @@ -/* - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * BSD LICENSE - * - * Copyright(c) 2004-2009 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/* - * Support routines for v3+ hardware - */ -#include <linux/module.h> -#include <linux/pci.h> -#include <linux/gfp.h> -#include <linux/dmaengine.h> -#include <linux/dma-mapping.h> -#include <linux/prefetch.h> -#include "../dmaengine.h" -#include "registers.h" -#include "hw.h" -#include "dma.h" - -static void ioat3_eh(struct ioatdma_chan *ioat_chan); - -static bool desc_has_ext(struct ioat_ring_ent *desc) -{ - struct ioat_dma_descriptor *hw = desc->hw; - - if (hw->ctl_f.op == IOAT_OP_XOR || - hw->ctl_f.op == IOAT_OP_XOR_VAL) { - struct ioat_xor_descriptor *xor = desc->xor; - - if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5) - return true; - } else if (hw->ctl_f.op == IOAT_OP_PQ || - hw->ctl_f.op == IOAT_OP_PQ_VAL) { - struct ioat_pq_descriptor *pq = desc->pq; - - if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3) - return true; - } - - return false; -} - -static void -ioat3_free_sed(struct ioatdma_device *ioat_dma, struct ioat_sed_ent *sed) -{ - if (!sed) - return; - - dma_pool_free(ioat_dma->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma); - kmem_cache_free(ioat_sed_cache, sed); -} - -static u64 ioat3_get_current_completion(struct ioatdma_chan *ioat_chan) -{ - u64 phys_complete; - u64 completion; - - completion = *ioat_chan->completion; - phys_complete = ioat_chansts_to_addr(completion); - - dev_dbg(to_dev(ioat_chan), "%s: phys_complete: %#llx\n", __func__, - (unsigned long long) phys_complete); - - return phys_complete; -} - -static bool ioat3_cleanup_preamble(struct ioatdma_chan *ioat_chan, - u64 *phys_complete) -{ - *phys_complete = ioat3_get_current_completion(ioat_chan); - if (*phys_complete == ioat_chan->last_completion) - return false; - - clear_bit(IOAT_COMPLETION_ACK, &ioat_chan->state); - mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT); - - return true; -} - -static void -desc_get_errstat(struct ioatdma_chan *ioat_chan, struct ioat_ring_ent *desc) -{ - struct ioat_dma_descriptor *hw = desc->hw; - - switch (hw->ctl_f.op) { - case IOAT_OP_PQ_VAL: - case IOAT_OP_PQ_VAL_16S: - { - struct ioat_pq_descriptor *pq = desc->pq; - - /* check if there's error written */ - if (!pq->dwbes_f.wbes) - return; - - /* need to set a chanerr var for checking to clear later */ - - if (pq->dwbes_f.p_val_err) - *desc->result |= SUM_CHECK_P_RESULT; - - if (pq->dwbes_f.q_val_err) - *desc->result |= SUM_CHECK_Q_RESULT; - - return; - } - default: - return; - } -} - -/** - * __cleanup - reclaim used descriptors - * @ioat: channel (ring) to clean - * - * The difference from the dma_v2.c __cleanup() is that this routine - * handles extended descriptors and dma-unmapping raid operations. - */ -static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete) -{ - struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; - struct ioat_ring_ent *desc; - bool seen_current = false; - int idx = ioat_chan->tail, i; - u16 active; - - dev_dbg(to_dev(ioat_chan), "%s: head: %#x tail: %#x issued: %#x\n", - __func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued); - - /* - * At restart of the channel, the completion address and the - * channel status will be 0 due to starting a new chain. Since - * it's new chain and the first descriptor "fails", there is - * nothing to clean up. We do not want to reap the entire submitted - * chain due to this 0 address value and then BUG. - */ - if (!phys_complete) - return; - - active = ioat_ring_active(ioat_chan); - for (i = 0; i < active && !seen_current; i++) { - struct dma_async_tx_descriptor *tx; - - smp_read_barrier_depends(); - prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1)); - desc = ioat_get_ring_ent(ioat_chan, idx + i); - dump_desc_dbg(ioat_chan, desc); - - /* set err stat if we are using dwbes */ - if (ioat_dma->cap & IOAT_CAP_DWBES) - desc_get_errstat(ioat_chan, desc); - - tx = &desc->txd; - if (tx->cookie) { - dma_cookie_complete(tx); - dma_descriptor_unmap(tx); - if (tx->callback) { - tx->callback(tx->callback_param); - tx->callback = NULL; - } - } - - if (tx->phys == phys_complete) - seen_current = true; - - /* skip extended descriptors */ - if (desc_has_ext(desc)) { - BUG_ON(i + 1 >= active); - i++; - } - - /* cleanup super extended descriptors */ - if (desc->sed) { - ioat3_free_sed(ioat_dma, desc->sed); - desc->sed = NULL; - } - } - smp_mb(); /* finish all descriptor reads before incrementing tail */ - ioat_chan->tail = idx + i; - BUG_ON(active && !seen_current); /* no active descs have written a completion? */ - ioat_chan->last_completion = phys_complete; - - if (active - i == 0) { - dev_dbg(to_dev(ioat_chan), "%s: cancel completion timeout\n", - __func__); - clear_bit(IOAT_COMPLETION_PENDING, &ioat_chan->state); - mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); - } - /* 5 microsecond delay per pending descriptor */ - writew(min((5 * (active - i)), IOAT_INTRDELAY_MASK), - ioat_chan->ioat_dma->reg_base + IOAT_INTRDELAY_OFFSET); -} - -static void ioat3_cleanup(struct ioatdma_chan *ioat_chan) -{ - u64 phys_complete; - - spin_lock_bh(&ioat_chan->cleanup_lock); - - if (ioat3_cleanup_preamble(ioat_chan, &phys_complete)) - __cleanup(ioat_chan, phys_complete); - - if (is_ioat_halted(*ioat_chan->completion)) { - u32 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); - - if (chanerr & IOAT_CHANERR_HANDLE_MASK) { - mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); - ioat3_eh(ioat_chan); - } - } - - spin_unlock_bh(&ioat_chan->cleanup_lock); -} - -void ioat_cleanup_event(unsigned long data) -{ - struct ioatdma_chan *ioat_chan = to_ioat_chan((void *)data); - - ioat3_cleanup(ioat_chan); - if (!test_bit(IOAT_RUN, &ioat_chan->state)) - return; - writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET); -} - -static void ioat3_restart_channel(struct ioatdma_chan *ioat_chan) -{ - u64 phys_complete; - - ioat_quiesce(ioat_chan, 0); - if (ioat3_cleanup_preamble(ioat_chan, &phys_complete)) - __cleanup(ioat_chan, phys_complete); - - __ioat_restart_chan(ioat_chan); -} - -static void ioat3_eh(struct ioatdma_chan *ioat_chan) -{ - struct pci_dev *pdev = to_pdev(ioat_chan); - struct ioat_dma_descriptor *hw; - struct dma_async_tx_descriptor *tx; - u64 phys_complete; - struct ioat_ring_ent *desc; - u32 err_handled = 0; - u32 chanerr_int; - u32 chanerr; - - /* cleanup so tail points to descriptor that caused the error */ - if (ioat3_cleanup_preamble(ioat_chan, &phys_complete)) - __cleanup(ioat_chan, phys_complete); - - chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); - pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int); - - dev_dbg(to_dev(ioat_chan), "%s: error = %x:%x\n", - __func__, chanerr, chanerr_int); - - desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail); - hw = desc->hw; - dump_desc_dbg(ioat_chan, desc); - - switch (hw->ctl_f.op) { - case IOAT_OP_XOR_VAL: - if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) { - *desc->result |= SUM_CHECK_P_RESULT; - err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR; - } - break; - case IOAT_OP_PQ_VAL: - case IOAT_OP_PQ_VAL_16S: - if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) { - *desc->result |= SUM_CHECK_P_RESULT; - err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR; - } - if (chanerr & IOAT_CHANERR_XOR_Q_ERR) { - *desc->result |= SUM_CHECK_Q_RESULT; - err_handled |= IOAT_CHANERR_XOR_Q_ERR; - } - break; - } - - /* fault on unhandled error or spurious halt */ - if (chanerr ^ err_handled || chanerr == 0) { - dev_err(to_dev(ioat_chan), "%s: fatal error (%x:%x)\n", - __func__, chanerr, err_handled); - BUG(); - } else { /* cleanup the faulty descriptor */ - tx = &desc->txd; - if (tx->cookie) { - dma_cookie_complete(tx); - dma_descriptor_unmap(tx); - if (tx->callback) { - tx->callback(tx->callback_param); - tx->callback = NULL; - } - } - } - - writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); - pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int); - - /* mark faulting descriptor as complete */ - *ioat_chan->completion = desc->txd.phys; - - spin_lock_bh(&ioat_chan->prep_lock); - ioat3_restart_channel(ioat_chan); - spin_unlock_bh(&ioat_chan->prep_lock); -} - -static void check_active(struct ioatdma_chan *ioat_chan) -{ - if (ioat_ring_active(ioat_chan)) { - mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT); - return; - } - - if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state)) - mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); - else if (ioat_chan->alloc_order > ioat_get_alloc_order()) { - /* if the ring is idle, empty, and oversized try to step - * down the size - */ - reshape_ring(ioat_chan, ioat_chan->alloc_order - 1); - - /* keep shrinking until we get back to our minimum - * default size - */ - if (ioat_chan->alloc_order > ioat_get_alloc_order()) - mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); - } - -} - -void ioat_timer_event(unsigned long data) -{ - struct ioatdma_chan *ioat_chan = to_ioat_chan((void *)data); - dma_addr_t phys_complete; - u64 status; - - status = ioat_chansts(ioat_chan); - - /* when halted due to errors check for channel - * programming errors before advancing the completion state - */ - if (is_ioat_halted(status)) { - u32 chanerr; - - chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); - dev_err(to_dev(ioat_chan), "%s: Channel halted (%x)\n", - __func__, chanerr); - if (test_bit(IOAT_RUN, &ioat_chan->state)) - BUG_ON(is_ioat_bug(chanerr)); - else /* we never got off the ground */ - return; - } - - /* if we haven't made progress and we have already - * acknowledged a pending completion once, then be more - * forceful with a restart - */ - spin_lock_bh(&ioat_chan->cleanup_lock); - if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) - __cleanup(ioat_chan, phys_complete); - else if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) { - spin_lock_bh(&ioat_chan->prep_lock); - ioat3_restart_channel(ioat_chan); - spin_unlock_bh(&ioat_chan->prep_lock); - spin_unlock_bh(&ioat_chan->cleanup_lock); - return; - } else { - set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state); - mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT); - } - - - if (ioat_ring_active(ioat_chan)) - mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT); - else { - spin_lock_bh(&ioat_chan->prep_lock); - check_active(ioat_chan); - spin_unlock_bh(&ioat_chan->prep_lock); - } - spin_unlock_bh(&ioat_chan->cleanup_lock); -} - -enum dma_status -ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie, - struct dma_tx_state *txstate) -{ - struct ioatdma_chan *ioat_chan = to_ioat_chan(c); - enum dma_status ret; - - ret = dma_cookie_status(c, cookie, txstate); - if (ret == DMA_COMPLETE) - return ret; - - ioat3_cleanup(ioat_chan); - - return dma_cookie_status(c, cookie, txstate); -} - -static int ioat3_irq_reinit(struct ioatdma_device *ioat_dma) -{ - struct pci_dev *pdev = ioat_dma->pdev; - int irq = pdev->irq, i; - - if (!is_bwd_ioat(pdev)) - return 0; - - switch (ioat_dma->irq_mode) { - case IOAT_MSIX: - for (i = 0; i < ioat_dma->dma_dev.chancnt; i++) { - struct msix_entry *msix = &ioat_dma->msix_entries[i]; - struct ioatdma_chan *ioat_chan; - - ioat_chan = ioat_chan_by_index(ioat_dma, i); - devm_free_irq(&pdev->dev, msix->vector, ioat_chan); - } - - pci_disable_msix(pdev); - break; - case IOAT_MSI: - pci_disable_msi(pdev); - /* fall through */ - case IOAT_INTX: - devm_free_irq(&pdev->dev, irq, ioat_dma); - break; - default: - return 0; - } - ioat_dma->irq_mode = IOAT_NOIRQ; - - return ioat_dma_setup_interrupts(ioat_dma); -} - -int ioat_reset_hw(struct ioatdma_chan *ioat_chan) -{ - /* throw away whatever the channel was doing and get it - * initialized, with ioat3 specific workarounds - */ - struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; - struct pci_dev *pdev = ioat_dma->pdev; - u32 chanerr; - u16 dev_id; - int err; - - ioat_quiesce(ioat_chan, msecs_to_jiffies(100)); - - chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); - writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); - - if (ioat_dma->version < IOAT_VER_3_3) { - /* clear any pending errors */ - err = pci_read_config_dword(pdev, - IOAT_PCI_CHANERR_INT_OFFSET, &chanerr); - if (err) { - dev_err(&pdev->dev, - "channel error register unreachable\n"); - return err; - } - pci_write_config_dword(pdev, - IOAT_PCI_CHANERR_INT_OFFSET, chanerr); - - /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit - * (workaround for spurious config parity error after restart) - */ - pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id); - if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) { - pci_write_config_dword(pdev, - IOAT_PCI_DMAUNCERRSTS_OFFSET, - 0x10); - } - } - - err = ioat_reset_sync(ioat_chan, msecs_to_jiffies(200)); - if (!err) - err = ioat3_irq_reinit(ioat_dma); - - if (err) - dev_err(&pdev->dev, "Failed to reset: %d\n", err); - - return err; -} diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c index 6b8fd49cf718..e6969809d723 100644 --- a/drivers/dma/ioat/init.c +++ b/drivers/dma/ioat/init.c @@ -735,13 +735,6 @@ ioat_init_channel(struct ioatdma_device *ioat_dma, tasklet_init(&ioat_chan->cleanup_task, ioat_dma->cleanup_fn, data); } -static void ioat3_dma_test_callback(void *dma_async_param) -{ - struct completion *cmp = dma_async_param; - - complete(cmp); -} - #define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma) { @@ -835,7 +828,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma) async_tx_ack(tx); init_completion(&cmp); - tx->callback = ioat3_dma_test_callback; + tx->callback = ioat_dma_test_callback; tx->callback_param = &cmp; cookie = tx->tx_submit(tx); if (cookie < 0) { @@ -903,7 +896,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma) async_tx_ack(tx); init_completion(&cmp); - tx->callback = ioat3_dma_test_callback; + tx->callback = ioat_dma_test_callback; tx->callback_param = &cmp; cookie = tx->tx_submit(tx); if (cookie < 0) { @@ -956,7 +949,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma) async_tx_ack(tx); init_completion(&cmp); - tx->callback = ioat3_dma_test_callback; + tx->callback = ioat_dma_test_callback; tx->callback_param = &cmp; cookie = tx->tx_submit(tx); if (cookie < 0) { @@ -1024,7 +1017,7 @@ static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma) return 0; } -static void ioat3_intr_quirk(struct ioatdma_device *ioat_dma) +static void ioat_intr_quirk(struct ioatdma_device *ioat_dma) { struct dma_device *dma; struct dma_chan *c; @@ -1063,7 +1056,7 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca) ioat_dma->enumerate_channels = ioat_enumerate_channels; ioat_dma->reset_hw = ioat_reset_hw; ioat_dma->self_test = ioat3_dma_self_test; - ioat_dma->intr_quirk = ioat3_intr_quirk; + ioat_dma->intr_quirk = ioat_intr_quirk; dma = &ioat_dma->dma_dev; dma->device_prep_dma_memcpy = ioat_dma_prep_memcpy_lock; dma->device_issue_pending = ioat_issue_pending; @@ -1162,7 +1155,7 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca) ioat_kobject_add(ioat_dma, &ioat_ktype); if (dca) - ioat_dma->dca = ioat3_dca_init(pdev, ioat_dma->reg_base); + ioat_dma->dca = ioat_dca_init(pdev, ioat_dma->reg_base); return 0; } |