diff options
Diffstat (limited to 'drivers/dma')
33 files changed, 674 insertions, 300 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index f238cfd33847..cd325866d9f4 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -89,14 +89,15 @@ config AT_HDMAC Support the Atmel AHB DMA controller. config FSL_DMA - tristate "Freescale Elo and Elo Plus DMA support" + tristate "Freescale Elo series DMA support" depends on FSL_SOC select DMA_ENGINE select ASYNC_TX_ENABLE_CHANNEL_SWITCH ---help--- - Enable support for the Freescale Elo and Elo Plus DMA controllers. - The Elo is the DMA controller on some 82xx and 83xx parts, and the - Elo Plus is the DMA controller on 85xx and 86xx parts. + Enable support for the Freescale Elo series DMA controllers. + The Elo is the DMA controller on some mpc82xx and mpc83xx parts, the + EloPlus is on mpc85xx and mpc86xx and Pxxx parts, and the Elo3 is on + some Txxx and Bxxx parts. config MPC512X_DMA tristate "Freescale MPC512x built-in DMA engine support" diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 6a5f782ec7eb..f2917e2035ba 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -1222,7 +1222,7 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan, size_t bytes = 0; ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_SUCCESS) + if (ret == DMA_COMPLETE) return ret; /* @@ -1237,7 +1237,7 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan, spin_lock_irqsave(&plchan->vc.lock, flags); ret = dma_cookie_status(chan, cookie, txstate); - if (ret != DMA_SUCCESS) { + if (ret != DMA_COMPLETE) { vd = vchan_find_desc(&plchan->vc, cookie); if (vd) { /* On the issued list, so hasn't been processed yet */ @@ -2103,8 +2103,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR); writel(0x000000FF, pl08x->base + PL080_TC_CLEAR); - ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED, - DRIVER_NAME, pl08x); + ret = request_irq(adev->irq[0], pl08x_irq, 0, DRIVER_NAME, pl08x); if (ret) { dev_err(&adev->dev, "%s failed to request interrupt %d\n", __func__, adev->irq[0]); diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 6deaefbec0b0..e2c04dc81e2a 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -1078,7 +1078,7 @@ atc_tx_status(struct dma_chan *chan, int bytes = 0; ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_SUCCESS) + if (ret == DMA_COMPLETE) return ret; /* * There's no point calculating the residue if there's diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index 31011d2a26fc..3c6716e0b78e 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c @@ -2369,7 +2369,7 @@ coh901318_tx_status(struct dma_chan *chan, dma_cookie_t cookie, enum dma_status ret; ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_SUCCESS) + if (ret == DMA_COMPLETE) return ret; dma_set_residue(txstate, coh901318_get_bytes_left(chan)); @@ -2694,7 +2694,7 @@ static int __init coh901318_probe(struct platform_device *pdev) if (irq < 0) return irq; - err = devm_request_irq(&pdev->dev, irq, dma_irq_handler, IRQF_DISABLED, + err = devm_request_irq(&pdev->dev, irq, dma_irq_handler, 0, "coh901318", base); if (err) return err; diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c index 7c82b92f9b16..c29dacff66fa 100644 --- a/drivers/dma/cppi41.c +++ b/drivers/dma/cppi41.c @@ -141,6 +141,9 @@ struct cppi41_dd { const struct chan_queues *queues_rx; const struct chan_queues *queues_tx; struct chan_queues td_queue; + + /* context for suspend/resume */ + unsigned int dma_tdfdq; }; #define FIST_COMPLETION_QUEUE 93 @@ -263,6 +266,15 @@ static u32 pd_trans_len(u32 val) return val & ((1 << (DESC_LENGTH_BITS_NUM + 1)) - 1); } +static u32 cppi41_pop_desc(struct cppi41_dd *cdd, unsigned queue_num) +{ + u32 desc; + + desc = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(queue_num)); + desc &= ~0x1f; + return desc; +} + static irqreturn_t cppi41_irq(int irq, void *data) { struct cppi41_dd *cdd = data; @@ -300,8 +312,7 @@ static irqreturn_t cppi41_irq(int irq, void *data) q_num = __fls(val); val &= ~(1 << q_num); q_num += 32 * i; - desc = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(q_num)); - desc &= ~0x1f; + desc = cppi41_pop_desc(cdd, q_num); c = desc_to_chan(cdd, desc); if (WARN_ON(!c)) { pr_err("%s() q %d desc %08x\n", __func__, @@ -353,7 +364,7 @@ static enum dma_status cppi41_dma_tx_status(struct dma_chan *chan, /* lock */ ret = dma_cookie_status(chan, cookie, txstate); - if (txstate && ret == DMA_SUCCESS) + if (txstate && ret == DMA_COMPLETE) txstate->residue = c->residue; /* unlock */ @@ -517,15 +528,6 @@ static void cppi41_compute_td_desc(struct cppi41_desc *d) d->pd0 = DESC_TYPE_TEARD << DESC_TYPE; } -static u32 cppi41_pop_desc(struct cppi41_dd *cdd, unsigned queue_num) -{ - u32 desc; - - desc = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(queue_num)); - desc &= ~0x1f; - return desc; -} - static int cppi41_tear_down_chan(struct cppi41_channel *c) { struct cppi41_dd *cdd = c->cdd; @@ -561,36 +563,26 @@ static int cppi41_tear_down_chan(struct cppi41_channel *c) c->td_retry = 100; } - if (!c->td_seen) { - unsigned td_comp_queue; + if (!c->td_seen || !c->td_desc_seen) { - if (c->is_tx) - td_comp_queue = cdd->td_queue.complete; - else - td_comp_queue = c->q_comp_num; + desc_phys = cppi41_pop_desc(cdd, cdd->td_queue.complete); + if (!desc_phys) + desc_phys = cppi41_pop_desc(cdd, c->q_comp_num); - desc_phys = cppi41_pop_desc(cdd, td_comp_queue); - if (desc_phys) { - __iormb(); + if (desc_phys == c->desc_phys) { + c->td_desc_seen = 1; + + } else if (desc_phys == td_desc_phys) { + u32 pd0; - if (desc_phys == td_desc_phys) { - u32 pd0; - pd0 = td->pd0; - WARN_ON((pd0 >> DESC_TYPE) != DESC_TYPE_TEARD); - WARN_ON(!c->is_tx && !(pd0 & TD_DESC_IS_RX)); - WARN_ON((pd0 & 0x1f) != c->port_num); - } else { - WARN_ON_ONCE(1); - } - c->td_seen = 1; - } - } - if (!c->td_desc_seen) { - desc_phys = cppi41_pop_desc(cdd, c->q_comp_num); - if (desc_phys) { __iormb(); - WARN_ON(c->desc_phys != desc_phys); - c->td_desc_seen = 1; + pd0 = td->pd0; + WARN_ON((pd0 >> DESC_TYPE) != DESC_TYPE_TEARD); + WARN_ON(!c->is_tx && !(pd0 & TD_DESC_IS_RX)); + WARN_ON((pd0 & 0x1f) != c->port_num); + c->td_seen = 1; + } else if (desc_phys) { + WARN_ON_ONCE(1); } } c->td_retry--; @@ -609,7 +601,7 @@ static int cppi41_tear_down_chan(struct cppi41_channel *c) WARN_ON(!c->td_retry); if (!c->td_desc_seen) { - desc_phys = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num)); + desc_phys = cppi41_pop_desc(cdd, c->q_num); WARN_ON(!desc_phys); } @@ -674,14 +666,14 @@ static void cleanup_chans(struct cppi41_dd *cdd) } } -static int cppi41_add_chans(struct platform_device *pdev, struct cppi41_dd *cdd) +static int cppi41_add_chans(struct device *dev, struct cppi41_dd *cdd) { struct cppi41_channel *cchan; int i; int ret; u32 n_chans; - ret = of_property_read_u32(pdev->dev.of_node, "#dma-channels", + ret = of_property_read_u32(dev->of_node, "#dma-channels", &n_chans); if (ret) return ret; @@ -719,7 +711,7 @@ err: return -ENOMEM; } -static void purge_descs(struct platform_device *pdev, struct cppi41_dd *cdd) +static void purge_descs(struct device *dev, struct cppi41_dd *cdd) { unsigned int mem_decs; int i; @@ -731,7 +723,7 @@ static void purge_descs(struct platform_device *pdev, struct cppi41_dd *cdd) cppi_writel(0, cdd->qmgr_mem + QMGR_MEMBASE(i)); cppi_writel(0, cdd->qmgr_mem + QMGR_MEMCTRL(i)); - dma_free_coherent(&pdev->dev, mem_decs, cdd->cd, + dma_free_coherent(dev, mem_decs, cdd->cd, cdd->descs_phys); } } @@ -741,19 +733,19 @@ static void disable_sched(struct cppi41_dd *cdd) cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL); } -static void deinit_cpii41(struct platform_device *pdev, struct cppi41_dd *cdd) +static void deinit_cppi41(struct device *dev, struct cppi41_dd *cdd) { disable_sched(cdd); - purge_descs(pdev, cdd); + purge_descs(dev, cdd); cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE); cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE); - dma_free_coherent(&pdev->dev, QMGR_SCRATCH_SIZE, cdd->qmgr_scratch, + dma_free_coherent(dev, QMGR_SCRATCH_SIZE, cdd->qmgr_scratch, cdd->scratch_phys); } -static int init_descs(struct platform_device *pdev, struct cppi41_dd *cdd) +static int init_descs(struct device *dev, struct cppi41_dd *cdd) { unsigned int desc_size; unsigned int mem_decs; @@ -777,7 +769,7 @@ static int init_descs(struct platform_device *pdev, struct cppi41_dd *cdd) reg |= ilog2(ALLOC_DECS_NUM) - 5; BUILD_BUG_ON(DESCS_AREAS != 1); - cdd->cd = dma_alloc_coherent(&pdev->dev, mem_decs, + cdd->cd = dma_alloc_coherent(dev, mem_decs, &cdd->descs_phys, GFP_KERNEL); if (!cdd->cd) return -ENOMEM; @@ -813,12 +805,12 @@ static void init_sched(struct cppi41_dd *cdd) cppi_writel(reg, cdd->sched_mem + DMA_SCHED_CTRL); } -static int init_cppi41(struct platform_device *pdev, struct cppi41_dd *cdd) +static int init_cppi41(struct device *dev, struct cppi41_dd *cdd) { int ret; BUILD_BUG_ON(QMGR_SCRATCH_SIZE > ((1 << 14) - 1)); - cdd->qmgr_scratch = dma_alloc_coherent(&pdev->dev, QMGR_SCRATCH_SIZE, + cdd->qmgr_scratch = dma_alloc_coherent(dev, QMGR_SCRATCH_SIZE, &cdd->scratch_phys, GFP_KERNEL); if (!cdd->qmgr_scratch) return -ENOMEM; @@ -827,7 +819,7 @@ static int init_cppi41(struct platform_device *pdev, struct cppi41_dd *cdd) cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE); cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE); - ret = init_descs(pdev, cdd); + ret = init_descs(dev, cdd); if (ret) goto err_td; @@ -835,7 +827,7 @@ static int init_cppi41(struct platform_device *pdev, struct cppi41_dd *cdd) init_sched(cdd); return 0; err_td: - deinit_cpii41(pdev, cdd); + deinit_cppi41(dev, cdd); return ret; } @@ -914,11 +906,11 @@ static const struct of_device_id cppi41_dma_ids[] = { }; MODULE_DEVICE_TABLE(of, cppi41_dma_ids); -static const struct cppi_glue_infos *get_glue_info(struct platform_device *pdev) +static const struct cppi_glue_infos *get_glue_info(struct device *dev) { const struct of_device_id *of_id; - of_id = of_match_node(cppi41_dma_ids, pdev->dev.of_node); + of_id = of_match_node(cppi41_dma_ids, dev->of_node); if (!of_id) return NULL; return of_id->data; @@ -927,11 +919,12 @@ static const struct cppi_glue_infos *get_glue_info(struct platform_device *pdev) static int cppi41_dma_probe(struct platform_device *pdev) { struct cppi41_dd *cdd; + struct device *dev = &pdev->dev; const struct cppi_glue_infos *glue_info; int irq; int ret; - glue_info = get_glue_info(pdev); + glue_info = get_glue_info(dev); if (!glue_info) return -EINVAL; @@ -946,14 +939,14 @@ static int cppi41_dma_probe(struct platform_device *pdev) cdd->ddev.device_issue_pending = cppi41_dma_issue_pending; cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg; cdd->ddev.device_control = cppi41_dma_control; - cdd->ddev.dev = &pdev->dev; + cdd->ddev.dev = dev; INIT_LIST_HEAD(&cdd->ddev.channels); cpp41_dma_info.dma_cap = cdd->ddev.cap_mask; - cdd->usbss_mem = of_iomap(pdev->dev.of_node, 0); - cdd->ctrl_mem = of_iomap(pdev->dev.of_node, 1); - cdd->sched_mem = of_iomap(pdev->dev.of_node, 2); - cdd->qmgr_mem = of_iomap(pdev->dev.of_node, 3); + cdd->usbss_mem = of_iomap(dev->of_node, 0); + cdd->ctrl_mem = of_iomap(dev->of_node, 1); + cdd->sched_mem = of_iomap(dev->of_node, 2); + cdd->qmgr_mem = of_iomap(dev->of_node, 3); if (!cdd->usbss_mem || !cdd->ctrl_mem || !cdd->sched_mem || !cdd->qmgr_mem) { @@ -961,31 +954,31 @@ static int cppi41_dma_probe(struct platform_device *pdev) goto err_remap; } - pm_runtime_enable(&pdev->dev); - ret = pm_runtime_get_sync(&pdev->dev); - if (ret) + pm_runtime_enable(dev); + ret = pm_runtime_get_sync(dev); + if (ret < 0) goto err_get_sync; cdd->queues_rx = glue_info->queues_rx; cdd->queues_tx = glue_info->queues_tx; cdd->td_queue = glue_info->td_queue; - ret = init_cppi41(pdev, cdd); + ret = init_cppi41(dev, cdd); if (ret) goto err_init_cppi; - ret = cppi41_add_chans(pdev, cdd); + ret = cppi41_add_chans(dev, cdd); if (ret) goto err_chans; - irq = irq_of_parse_and_map(pdev->dev.of_node, 0); + irq = irq_of_parse_and_map(dev->of_node, 0); if (!irq) goto err_irq; cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER); ret = request_irq(irq, glue_info->isr, IRQF_SHARED, - dev_name(&pdev->dev), cdd); + dev_name(dev), cdd); if (ret) goto err_irq; cdd->irq = irq; @@ -994,7 +987,7 @@ static int cppi41_dma_probe(struct platform_device *pdev) if (ret) goto err_dma_reg; - ret = of_dma_controller_register(pdev->dev.of_node, + ret = of_dma_controller_register(dev->of_node, cppi41_dma_xlate, &cpp41_dma_info); if (ret) goto err_of; @@ -1009,11 +1002,11 @@ err_irq: cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR); cleanup_chans(cdd); err_chans: - deinit_cpii41(pdev, cdd); + deinit_cppi41(dev, cdd); err_init_cppi: - pm_runtime_put(&pdev->dev); + pm_runtime_put(dev); err_get_sync: - pm_runtime_disable(&pdev->dev); + pm_runtime_disable(dev); iounmap(cdd->usbss_mem); iounmap(cdd->ctrl_mem); iounmap(cdd->sched_mem); @@ -1033,7 +1026,7 @@ static int cppi41_dma_remove(struct platform_device *pdev) cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR); free_irq(cdd->irq, cdd); cleanup_chans(cdd); - deinit_cpii41(pdev, cdd); + deinit_cppi41(&pdev->dev, cdd); iounmap(cdd->usbss_mem); iounmap(cdd->ctrl_mem); iounmap(cdd->sched_mem); @@ -1044,12 +1037,53 @@ static int cppi41_dma_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM_SLEEP +static int cppi41_suspend(struct device *dev) +{ + struct cppi41_dd *cdd = dev_get_drvdata(dev); + + cdd->dma_tdfdq = cppi_readl(cdd->ctrl_mem + DMA_TDFDQ); + cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR); + disable_sched(cdd); + + return 0; +} + +static int cppi41_resume(struct device *dev) +{ + struct cppi41_dd *cdd = dev_get_drvdata(dev); + struct cppi41_channel *c; + int i; + + for (i = 0; i < DESCS_AREAS; i++) + cppi_writel(cdd->descs_phys, cdd->qmgr_mem + QMGR_MEMBASE(i)); + + list_for_each_entry(c, &cdd->ddev.channels, chan.device_node) + if (!c->is_tx) + cppi_writel(c->q_num, c->gcr_reg + RXHPCRA0); + + init_sched(cdd); + + cppi_writel(cdd->dma_tdfdq, cdd->ctrl_mem + DMA_TDFDQ); + cppi_writel(cdd->scratch_phys, cdd->qmgr_mem + QMGR_LRAM0_BASE); + cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE); + cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE); + + cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER); + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(cppi41_pm_ops, cppi41_suspend, cppi41_resume); + static struct platform_driver cpp41_dma_driver = { .probe = cppi41_dma_probe, .remove = cppi41_dma_remove, .driver = { .name = "cppi41-dma-engine", .owner = THIS_MODULE, + .pm = &cppi41_pm_ops, .of_match_table = of_match_ptr(cppi41_dma_ids), }, }; diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c index b0c0c8268d42..94c380f07538 100644 --- a/drivers/dma/dma-jz4740.c +++ b/drivers/dma/dma-jz4740.c @@ -491,7 +491,7 @@ static enum dma_status jz4740_dma_tx_status(struct dma_chan *c, unsigned long flags; status = dma_cookie_status(c, cookie, state); - if (status == DMA_SUCCESS || !state) + if (status == DMA_COMPLETE || !state) return status; spin_lock_irqsave(&chan->vchan.lock, flags); diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index b69ac3892b86..ea806bdc12ef 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -1152,7 +1152,7 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000); if (!tx) - return DMA_SUCCESS; + return DMA_COMPLETE; while (tx->cookie == -EBUSY) { if (time_after_eq(jiffies, dma_sync_wait_timeout)) { diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 329b7cf02f8e..20f9a3aaf926 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -638,7 +638,7 @@ static int dmatest_func(void *data) len, 0); failed_tests++; continue; - } else if (status != DMA_SUCCESS) { + } else if (status != DMA_COMPLETE) { dmaengine_unmap_put(um); result(status == DMA_ERROR ? "completion error status" : diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index 1f39ccce2727..7516be4677cf 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -1075,13 +1075,13 @@ dwc_tx_status(struct dma_chan *chan, enum dma_status ret; ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_SUCCESS) + if (ret == DMA_COMPLETE) return ret; dwc_scan_descriptors(to_dw_dma(chan->device), dwc); ret = dma_cookie_status(chan, cookie, txstate); - if (ret != DMA_SUCCESS) + if (ret != DMA_COMPLETE) dma_set_residue(txstate, dwc_get_residue(dwc)); if (dwc->paused && ret == DMA_IN_PROGRESS) diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index 3519111c566b..ab48430765b8 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c @@ -46,14 +46,21 @@ #define EDMA_CHANS 64 #endif /* CONFIG_ARCH_DAVINCI_DA8XX */ -/* Max of 16 segments per channel to conserve PaRAM slots */ -#define MAX_NR_SG 16 +/* + * Max of 20 segments per channel to conserve PaRAM slots + * Also note that MAX_NR_SG should be atleast the no.of periods + * that are required for ASoC, otherwise DMA prep calls will + * fail. Today davinci-pcm is the only user of this driver and + * requires atleast 17 slots, so we setup the default to 20. + */ +#define MAX_NR_SG 20 #define EDMA_MAX_SLOTS MAX_NR_SG #define EDMA_DESCRIPTORS 16 struct edma_desc { struct virt_dma_desc vdesc; struct list_head node; + int cyclic; int absync; int pset_nr; int processed; @@ -167,8 +174,13 @@ static void edma_execute(struct edma_chan *echan) * then setup a link to the dummy slot, this results in all future * events being absorbed and that's OK because we're done */ - if (edesc->processed == edesc->pset_nr) - edma_link(echan->slot[nslots-1], echan->ecc->dummy_slot); + if (edesc->processed == edesc->pset_nr) { + if (edesc->cyclic) + edma_link(echan->slot[nslots-1], echan->slot[1]); + else + edma_link(echan->slot[nslots-1], + echan->ecc->dummy_slot); + } edma_resume(echan->ch_num); @@ -250,6 +262,117 @@ static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, return ret; } +/* + * A PaRAM set configuration abstraction used by other modes + * @chan: Channel who's PaRAM set we're configuring + * @pset: PaRAM set to initialize and setup. + * @src_addr: Source address of the DMA + * @dst_addr: Destination address of the DMA + * @burst: In units of dev_width, how much to send + * @dev_width: How much is the dev_width + * @dma_length: Total length of the DMA transfer + * @direction: Direction of the transfer + */ +static int edma_config_pset(struct dma_chan *chan, struct edmacc_param *pset, + dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst, + enum dma_slave_buswidth dev_width, unsigned int dma_length, + enum dma_transfer_direction direction) +{ + struct edma_chan *echan = to_edma_chan(chan); + struct device *dev = chan->device->dev; + int acnt, bcnt, ccnt, cidx; + int src_bidx, dst_bidx, src_cidx, dst_cidx; + int absync; + + acnt = dev_width; + /* + * If the maxburst is equal to the fifo width, use + * A-synced transfers. This allows for large contiguous + * buffer transfers using only one PaRAM set. + */ + if (burst == 1) { + /* + * For the A-sync case, bcnt and ccnt are the remainder + * and quotient respectively of the division of: + * (dma_length / acnt) by (SZ_64K -1). This is so + * that in case bcnt over flows, we have ccnt to use. + * Note: In A-sync tranfer only, bcntrld is used, but it + * only applies for sg_dma_len(sg) >= SZ_64K. + * In this case, the best way adopted is- bccnt for the + * first frame will be the remainder below. Then for + * every successive frame, bcnt will be SZ_64K-1. This + * is assured as bcntrld = 0xffff in end of function. + */ + absync = false; + ccnt = dma_length / acnt / (SZ_64K - 1); + bcnt = dma_length / acnt - ccnt * (SZ_64K - 1); + /* + * If bcnt is non-zero, we have a remainder and hence an + * extra frame to transfer, so increment ccnt. + */ + if (bcnt) + ccnt++; + else + bcnt = SZ_64K - 1; + cidx = acnt; + } else { + /* + * If maxburst is greater than the fifo address_width, + * use AB-synced transfers where A count is the fifo + * address_width and B count is the maxburst. In this + * case, we are limited to transfers of C count frames + * of (address_width * maxburst) where C count is limited + * to SZ_64K-1. This places an upper bound on the length + * of an SG segment that can be handled. + */ + absync = true; + bcnt = burst; + ccnt = dma_length / (acnt * bcnt); + if (ccnt > (SZ_64K - 1)) { + dev_err(dev, "Exceeded max SG segment size\n"); + return -EINVAL; + } + cidx = acnt * bcnt; + } + + if (direction == DMA_MEM_TO_DEV) { + src_bidx = acnt; + src_cidx = cidx; + dst_bidx = 0; + dst_cidx = 0; + } else if (direction == DMA_DEV_TO_MEM) { + src_bidx = 0; + src_cidx = 0; + dst_bidx = acnt; + dst_cidx = cidx; + } else { + dev_err(dev, "%s: direction not implemented yet\n", __func__); + return -EINVAL; + } + + pset->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num)); + /* Configure A or AB synchronized transfers */ + if (absync) + pset->opt |= SYNCDIM; + + pset->src = src_addr; + pset->dst = dst_addr; + + pset->src_dst_bidx = (dst_bidx << 16) | src_bidx; + pset->src_dst_cidx = (dst_cidx << 16) | src_cidx; + + pset->a_b_cnt = bcnt << 16 | acnt; + pset->ccnt = ccnt; + /* + * Only time when (bcntrld) auto reload is required is for + * A-sync case, and in this case, a requirement of reload value + * of SZ_64K-1 only is assured. 'link' is initially set to NULL + * and then later will be populated by edma_execute. + */ + pset->link_bcntrld = 0xffffffff; + return absync; +} + static struct dma_async_tx_descriptor *edma_prep_slave_sg( struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, enum dma_transfer_direction direction, @@ -258,23 +381,21 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( struct edma_chan *echan = to_edma_chan(chan); struct device *dev = chan->device->dev; struct edma_desc *edesc; - dma_addr_t dev_addr; + dma_addr_t src_addr = 0, dst_addr = 0; enum dma_slave_buswidth dev_width; u32 burst; struct scatterlist *sg; - int acnt, bcnt, ccnt, src, dst, cidx; - int src_bidx, dst_bidx, src_cidx, dst_cidx; - int i, nslots; + int i, nslots, ret; if (unlikely(!echan || !sgl || !sg_len)) return NULL; if (direction == DMA_DEV_TO_MEM) { - dev_addr = echan->cfg.src_addr; + src_addr = echan->cfg.src_addr; dev_width = echan->cfg.src_addr_width; burst = echan->cfg.src_maxburst; } else if (direction == DMA_MEM_TO_DEV) { - dev_addr = echan->cfg.dst_addr; + dst_addr = echan->cfg.dst_addr; dev_width = echan->cfg.dst_addr_width; burst = echan->cfg.dst_maxburst; } else { @@ -305,8 +426,8 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( edma_alloc_slot(EDMA_CTLR(echan->ch_num), EDMA_SLOT_ANY); if (echan->slot[i] < 0) { - dev_err(dev, "Failed to allocate slot\n"); kfree(edesc); + dev_err(dev, "Failed to allocate slot\n"); return NULL; } } @@ -314,63 +435,21 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( /* Configure PaRAM sets for each SG */ for_each_sg(sgl, sg, sg_len, i) { - - acnt = dev_width; - - /* - * If the maxburst is equal to the fifo width, use - * A-synced transfers. This allows for large contiguous - * buffer transfers using only one PaRAM set. - */ - if (burst == 1) { - edesc->absync = false; - ccnt = sg_dma_len(sg) / acnt / (SZ_64K - 1); - bcnt = sg_dma_len(sg) / acnt - ccnt * (SZ_64K - 1); - if (bcnt) - ccnt++; - else - bcnt = SZ_64K - 1; - cidx = acnt; - /* - * If maxburst is greater than the fifo address_width, - * use AB-synced transfers where A count is the fifo - * address_width and B count is the maxburst. In this - * case, we are limited to transfers of C count frames - * of (address_width * maxburst) where C count is limited - * to SZ_64K-1. This places an upper bound on the length - * of an SG segment that can be handled. - */ - } else { - edesc->absync = true; - bcnt = burst; - ccnt = sg_dma_len(sg) / (acnt * bcnt); - if (ccnt > (SZ_64K - 1)) { - dev_err(dev, "Exceeded max SG segment size\n"); - return NULL; - } - cidx = acnt * bcnt; + /* Get address for each SG */ + if (direction == DMA_DEV_TO_MEM) + dst_addr = sg_dma_address(sg); + else + src_addr = sg_dma_address(sg); + + ret = edma_config_pset(chan, &edesc->pset[i], src_addr, + dst_addr, burst, dev_width, + sg_dma_len(sg), direction); + if (ret < 0) { + kfree(edesc); + return NULL; } - if (direction == DMA_MEM_TO_DEV) { - src = sg_dma_address(sg); - dst = dev_addr; - src_bidx = acnt; - src_cidx = cidx; - dst_bidx = 0; - dst_cidx = 0; - } else { - src = dev_addr; - dst = sg_dma_address(sg); - src_bidx = 0; - src_cidx = 0; - dst_bidx = acnt; - dst_cidx = cidx; - } - - edesc->pset[i].opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num)); - /* Configure A or AB synchronized transfers */ - if (edesc->absync) - edesc->pset[i].opt |= SYNCDIM; + edesc->absync = ret; /* If this is the last in a current SG set of transactions, enable interrupts so that next set is processed */ @@ -380,17 +459,138 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( /* If this is the last set, enable completion interrupt flag */ if (i == sg_len - 1) edesc->pset[i].opt |= TCINTEN; + } - edesc->pset[i].src = src; - edesc->pset[i].dst = dst; + return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); +} - edesc->pset[i].src_dst_bidx = (dst_bidx << 16) | src_bidx; - edesc->pset[i].src_dst_cidx = (dst_cidx << 16) | src_cidx; +static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( + struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, + size_t period_len, enum dma_transfer_direction direction, + unsigned long tx_flags, void *context) +{ + struct edma_chan *echan = to_edma_chan(chan); + struct device *dev = chan->device->dev; + struct edma_desc *edesc; + dma_addr_t src_addr, dst_addr; + enum dma_slave_buswidth dev_width; + u32 burst; + int i, ret, nslots; + + if (unlikely(!echan || !buf_len || !period_len)) + return NULL; + + if (direction == DMA_DEV_TO_MEM) { + src_addr = echan->cfg.src_addr; + dst_addr = buf_addr; + dev_width = echan->cfg.src_addr_width; + burst = echan->cfg.src_maxburst; + } else if (direction == DMA_MEM_TO_DEV) { + src_addr = buf_addr; + dst_addr = echan->cfg.dst_addr; + dev_width = echan->cfg.dst_addr_width; + burst = echan->cfg.dst_maxburst; + } else { + dev_err(dev, "%s: bad direction?\n", __func__); + return NULL; + } + + if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) { + dev_err(dev, "Undefined slave buswidth\n"); + return NULL; + } + + if (unlikely(buf_len % period_len)) { + dev_err(dev, "Period should be multiple of Buffer length\n"); + return NULL; + } + + nslots = (buf_len / period_len) + 1; + + /* + * Cyclic DMA users such as audio cannot tolerate delays introduced + * by cases where the number of periods is more than the maximum + * number of SGs the EDMA driver can handle at a time. For DMA types + * such as Slave SGs, such delays are tolerable and synchronized, + * but the synchronization is difficult to achieve with Cyclic and + * cannot be guaranteed, so we error out early. + */ + if (nslots > MAX_NR_SG) + return NULL; + + edesc = kzalloc(sizeof(*edesc) + nslots * + sizeof(edesc->pset[0]), GFP_ATOMIC); + if (!edesc) { + dev_dbg(dev, "Failed to allocate a descriptor\n"); + return NULL; + } + + edesc->cyclic = 1; + edesc->pset_nr = nslots; + + dev_dbg(dev, "%s: nslots=%d\n", __func__, nslots); + dev_dbg(dev, "%s: period_len=%d\n", __func__, period_len); + dev_dbg(dev, "%s: buf_len=%d\n", __func__, buf_len); + + for (i = 0; i < nslots; i++) { + /* Allocate a PaRAM slot, if needed */ + if (echan->slot[i] < 0) { + echan->slot[i] = + edma_alloc_slot(EDMA_CTLR(echan->ch_num), + EDMA_SLOT_ANY); + if (echan->slot[i] < 0) { + dev_err(dev, "Failed to allocate slot\n"); + return NULL; + } + } + + if (i == nslots - 1) { + memcpy(&edesc->pset[i], &edesc->pset[0], + sizeof(edesc->pset[0])); + break; + } + + ret = edma_config_pset(chan, &edesc->pset[i], src_addr, + dst_addr, burst, dev_width, period_len, + direction); + if (ret < 0) + return NULL; - edesc->pset[i].a_b_cnt = bcnt << 16 | acnt; - edesc->pset[i].ccnt = ccnt; - edesc->pset[i].link_bcntrld = 0xffffffff; + if (direction == DMA_DEV_TO_MEM) + dst_addr += period_len; + else + src_addr += period_len; + dev_dbg(dev, "%s: Configure period %d of buf:\n", __func__, i); + dev_dbg(dev, + "\n pset[%d]:\n" + " chnum\t%d\n" + " slot\t%d\n" + " opt\t%08x\n" + " src\t%08x\n" + " dst\t%08x\n" + " abcnt\t%08x\n" + " ccnt\t%08x\n" + " bidx\t%08x\n" + " cidx\t%08x\n" + " lkrld\t%08x\n", + i, echan->ch_num, echan->slot[i], + edesc->pset[i].opt, + edesc->pset[i].src, + edesc->pset[i].dst, + edesc->pset[i].a_b_cnt, + edesc->pset[i].ccnt, + edesc->pset[i].src_dst_bidx, + edesc->pset[i].src_dst_cidx, + edesc->pset[i].link_bcntrld); + + edesc->absync = ret; + + /* + * Enable interrupts for every period because callback + * has to be called for every period. + */ + edesc->pset[i].opt |= TCINTEN; } return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); @@ -404,30 +604,34 @@ static void edma_callback(unsigned ch_num, u16 ch_status, void *data) unsigned long flags; struct edmacc_param p; - /* Pause the channel */ - edma_pause(echan->ch_num); + edesc = echan->edesc; + + /* Pause the channel for non-cyclic */ + if (!edesc || (edesc && !edesc->cyclic)) + edma_pause(echan->ch_num); switch (ch_status) { - case DMA_COMPLETE: + case EDMA_DMA_COMPLETE: spin_lock_irqsave(&echan->vchan.lock, flags); - edesc = echan->edesc; if (edesc) { - if (edesc->processed == edesc->pset_nr) { + if (edesc->cyclic) { + vchan_cyclic_callback(&edesc->vdesc); + } else if (edesc->processed == edesc->pset_nr) { dev_dbg(dev, "Transfer complete, stopping channel %d\n", ch_num); edma_stop(echan->ch_num); vchan_cookie_complete(&edesc->vdesc); + edma_execute(echan); } else { dev_dbg(dev, "Intermediate transfer complete on channel %d\n", ch_num); + edma_execute(echan); } - - edma_execute(echan); } spin_unlock_irqrestore(&echan->vchan.lock, flags); break; - case DMA_CC_ERROR: + case EDMA_DMA_CC_ERROR: spin_lock_irqsave(&echan->vchan.lock, flags); edma_read_slot(EDMA_CHAN_SLOT(echan->slot[0]), &p); @@ -577,7 +781,7 @@ static enum dma_status edma_tx_status(struct dma_chan *chan, unsigned long flags; ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_SUCCESS || !txstate) + if (ret == DMA_COMPLETE || !txstate) return ret; spin_lock_irqsave(&echan->vchan.lock, flags); @@ -617,6 +821,7 @@ static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma, struct device *dev) { dma->device_prep_slave_sg = edma_prep_slave_sg; + dma->device_prep_dma_cyclic = edma_prep_dma_cyclic; dma->device_alloc_chan_resources = edma_alloc_chan_resources; dma->device_free_chan_resources = edma_free_chan_resources; dma->device_issue_pending = edma_issue_pending; diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index d9e6381b2b16..bd7f888b7601 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -1238,7 +1238,9 @@ static int fsl_dma_chan_probe(struct fsldma_device *fdev, WARN_ON(fdev->feature != chan->feature); chan->dev = fdev->dev; - chan->id = ((res.start - 0x100) & 0xfff) >> 7; + chan->id = (res.start & 0xfff) < 0x300 ? + ((res.start - 0x100) & 0xfff) >> 7 : + ((res.start - 0x200) & 0xfff) >> 7; if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) { dev_err(fdev->dev, "too many channels for device\n"); err = -EINVAL; @@ -1411,6 +1413,7 @@ static int fsldma_of_remove(struct platform_device *op) } static const struct of_device_id fsldma_of_ids[] = { + { .compatible = "fsl,elo3-dma", }, { .compatible = "fsl,eloplus-dma", }, { .compatible = "fsl,elo-dma", }, {} @@ -1432,7 +1435,7 @@ static struct platform_driver fsldma_of_driver = { static __init int fsldma_init(void) { - pr_info("Freescale Elo / Elo Plus DMA driver\n"); + pr_info("Freescale Elo series DMA driver\n"); return platform_driver_register(&fsldma_of_driver); } @@ -1444,5 +1447,5 @@ static void __exit fsldma_exit(void) subsys_initcall(fsldma_init); module_exit(fsldma_exit); -MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver"); +MODULE_DESCRIPTION("Freescale Elo series DMA driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h index f5c38791fc74..1ffc24484d23 100644 --- a/drivers/dma/fsldma.h +++ b/drivers/dma/fsldma.h @@ -112,7 +112,7 @@ struct fsldma_chan_regs { }; struct fsldma_chan; -#define FSL_DMA_MAX_CHANS_PER_DEVICE 4 +#define FSL_DMA_MAX_CHANS_PER_DEVICE 8 struct fsldma_device { void __iomem *regs; /* DGSR register base */ diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index 55852c026791..6f9ac2022abd 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -572,9 +572,11 @@ static int imxdma_xfer_desc(struct imxdma_desc *d) imx_dmav1_writel(imxdma, d->len, DMA_CNTR(imxdmac->channel)); - dev_dbg(imxdma->dev, "%s channel: %d dest=0x%08x src=0x%08x " - "dma_length=%d\n", __func__, imxdmac->channel, - d->dest, d->src, d->len); + dev_dbg(imxdma->dev, + "%s channel: %d dest=0x%08llx src=0x%08llx dma_length=%zu\n", + __func__, imxdmac->channel, + (unsigned long long)d->dest, + (unsigned long long)d->src, d->len); break; /* Cyclic transfer is the same as slave_sg with special sg configuration. */ @@ -586,20 +588,22 @@ static int imxdma_xfer_desc(struct imxdma_desc *d) imx_dmav1_writel(imxdma, imxdmac->ccr_from_device, DMA_CCR(imxdmac->channel)); - dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d " - "total length=%d dev_addr=0x%08x (dev2mem)\n", - __func__, imxdmac->channel, d->sg, d->sgcount, - d->len, imxdmac->per_address); + dev_dbg(imxdma->dev, + "%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (dev2mem)\n", + __func__, imxdmac->channel, + d->sg, d->sgcount, d->len, + (unsigned long long)imxdmac->per_address); } else if (d->direction == DMA_MEM_TO_DEV) { imx_dmav1_writel(imxdma, imxdmac->per_address, DMA_DAR(imxdmac->channel)); imx_dmav1_writel(imxdma, imxdmac->ccr_to_device, DMA_CCR(imxdmac->channel)); - dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d " - "total length=%d dev_addr=0x%08x (mem2dev)\n", - __func__, imxdmac->channel, d->sg, d->sgcount, - d->len, imxdmac->per_address); + dev_dbg(imxdma->dev, + "%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (mem2dev)\n", + __func__, imxdmac->channel, + d->sg, d->sgcount, d->len, + (unsigned long long)imxdmac->per_address); } else { dev_err(imxdma->dev, "%s channel: %d bad dma mode\n", __func__, imxdmac->channel); @@ -771,7 +775,7 @@ static int imxdma_alloc_chan_resources(struct dma_chan *chan) desc->desc.tx_submit = imxdma_tx_submit; /* txd.flags will be overwritten in prep funcs */ desc->desc.flags = DMA_CTRL_ACK; - desc->status = DMA_SUCCESS; + desc->status = DMA_COMPLETE; list_add_tail(&desc->node, &imxdmac->ld_free); imxdmac->descs_allocated++; @@ -870,7 +874,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( int i; unsigned int periods = buf_len / period_len; - dev_dbg(imxdma->dev, "%s channel: %d buf_len=%d period_len=%d\n", + dev_dbg(imxdma->dev, "%s channel: %d buf_len=%zu period_len=%zu\n", __func__, imxdmac->channel, buf_len, period_len); if (list_empty(&imxdmac->ld_free) || @@ -926,8 +930,9 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy( struct imxdma_engine *imxdma = imxdmac->imxdma; struct imxdma_desc *desc; - dev_dbg(imxdma->dev, "%s channel: %d src=0x%x dst=0x%x len=%d\n", - __func__, imxdmac->channel, src, dest, len); + dev_dbg(imxdma->dev, "%s channel: %d src=0x%llx dst=0x%llx len=%zu\n", + __func__, imxdmac->channel, (unsigned long long)src, + (unsigned long long)dest, len); if (list_empty(&imxdmac->ld_free) || imxdma_chan_is_doing_cyclic(imxdmac)) @@ -956,9 +961,10 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_interleaved( struct imxdma_engine *imxdma = imxdmac->imxdma; struct imxdma_desc *desc; - dev_dbg(imxdma->dev, "%s channel: %d src_start=0x%x dst_start=0x%x\n" - " src_sgl=%s dst_sgl=%s numf=%d frame_size=%d\n", __func__, - imxdmac->channel, xt->src_start, xt->dst_start, + dev_dbg(imxdma->dev, "%s channel: %d src_start=0x%llx dst_start=0x%llx\n" + " src_sgl=%s dst_sgl=%s numf=%zu frame_size=%zu\n", __func__, + imxdmac->channel, (unsigned long long)xt->src_start, + (unsigned long long) xt->dst_start, xt->src_sgl ? "true" : "false", xt->dst_sgl ? "true" : "false", xt->numf, xt->frame_size); diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index fc43603cf0bb..0c18f6a70ed0 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -638,7 +638,7 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac) if (error) sdmac->status = DMA_ERROR; else - sdmac->status = DMA_SUCCESS; + sdmac->status = DMA_COMPLETE; dma_cookie_complete(&sdmac->desc); if (sdmac->desc.callback) @@ -1089,8 +1089,8 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg( param &= ~BD_CONT; } - dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n", - i, count, sg->dma_address, + dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n", + i, count, (u64)sg->dma_address, param & BD_WRAP ? "wrap" : "", param & BD_INTR ? " intr" : ""); @@ -1163,8 +1163,8 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic( if (i + 1 == num_periods) param |= BD_WRAP; - dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n", - i, period_len, dma_addr, + dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n", + i, period_len, (u64)dma_addr, param & BD_WRAP ? "wrap" : "", param & BD_INTR ? " intr" : ""); diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c index a975ebebea8a..1aab8130efa1 100644 --- a/drivers/dma/intel_mid_dma.c +++ b/drivers/dma/intel_mid_dma.c @@ -309,7 +309,7 @@ static void midc_descriptor_complete(struct intel_mid_dma_chan *midc, callback_txd(param_txd); } if (midc->raw_tfr) { - desc->status = DMA_SUCCESS; + desc->status = DMA_COMPLETE; if (desc->lli != NULL) { pci_pool_free(desc->lli_pool, desc->lli, desc->lli_phys); @@ -481,7 +481,7 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan, enum dma_status ret; ret = dma_cookie_status(chan, cookie, txstate); - if (ret != DMA_SUCCESS) { + if (ret != DMA_COMPLETE) { spin_lock_bh(&midc->lock); midc_scan_descriptors(to_middma_device(chan->device), midc); spin_unlock_bh(&midc->lock); diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 4f67473f3b81..1a49c777607c 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -718,7 +718,7 @@ ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, enum dma_status ret; ret = dma_cookie_status(c, cookie, txstate); - if (ret == DMA_SUCCESS) + if (ret == DMA_COMPLETE) return ret; device->cleanup_fn((unsigned long) c); @@ -843,7 +843,7 @@ int ioat_dma_self_test(struct ioatdma_device *device) if (tmo == 0 || dma->device_tx_status(dma_chan, cookie, NULL) - != DMA_SUCCESS) { + != DMA_COMPLETE) { dev_err(dev, "Self-test copy timed out, disabling\n"); err = -ENODEV; goto unmap_dma; diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index 915b1f66f2da..820817e97e62 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c @@ -631,7 +631,7 @@ ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie, enum dma_status ret; ret = dma_cookie_status(c, cookie, txstate); - if (ret == DMA_SUCCESS) + if (ret == DMA_COMPLETE) return ret; ioat3_cleanup(ioat); @@ -1289,7 +1289,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); - if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { + if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { dev_err(dev, "Self-test xor timed out\n"); err = -ENODEV; goto dma_unmap; @@ -1349,7 +1349,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); - if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { + if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { dev_err(dev, "Self-test validate timed out\n"); err = -ENODEV; goto dma_unmap; @@ -1396,7 +1396,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); - if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { + if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { dev_err(dev, "Self-test 2nd validate timed out\n"); err = -ENODEV; goto dma_unmap; diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index 173e26ff18f8..c56137bc3868 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c @@ -773,7 +773,7 @@ static enum dma_status iop_adma_status(struct dma_chan *chan, int ret; ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_SUCCESS) + if (ret == DMA_COMPLETE) return ret; iop_adma_slot_cleanup(iop_chan); @@ -892,7 +892,7 @@ static int iop_adma_memcpy_self_test(struct iop_adma_device *device) msleep(1); if (iop_adma_status(dma_chan, cookie, NULL) != - DMA_SUCCESS) { + DMA_COMPLETE) { dev_err(dma_chan->device->dev, "Self-test copy timed out, disabling\n"); err = -ENODEV; @@ -992,7 +992,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device) msleep(8); if (iop_adma_status(dma_chan, cookie, NULL) != - DMA_SUCCESS) { + DMA_COMPLETE) { dev_err(dma_chan->device->dev, "Self-test xor timed out, disabling\n"); err = -ENODEV; @@ -1038,7 +1038,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device) iop_adma_issue_pending(dma_chan); msleep(8); - if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { + if (iop_adma_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { dev_err(dma_chan->device->dev, "Self-test zero sum timed out, disabling\n"); err = -ENODEV; @@ -1067,7 +1067,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device) iop_adma_issue_pending(dma_chan); msleep(8); - if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { + if (iop_adma_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { dev_err(dma_chan->device->dev, "Self-test non-zero sum timed out, disabling\n"); err = -ENODEV; @@ -1163,7 +1163,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device) msleep(8); if (iop_adma_status(dma_chan, cookie, NULL) != - DMA_SUCCESS) { + DMA_COMPLETE) { dev_err(dev, "Self-test pq timed out, disabling\n"); err = -ENODEV; goto free_resources; @@ -1200,7 +1200,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device) msleep(8); if (iop_adma_status(dma_chan, cookie, NULL) != - DMA_SUCCESS) { + DMA_COMPLETE) { dev_err(dev, "Self-test pq-zero-sum timed out, disabling\n"); err = -ENODEV; goto free_resources; @@ -1232,7 +1232,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device) msleep(8); if (iop_adma_status(dma_chan, cookie, NULL) != - DMA_SUCCESS) { + DMA_COMPLETE) { dev_err(dev, "Self-test !pq-zero-sum timed out, disabling\n"); err = -ENODEV; goto free_resources; diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index cb9c0bc317e8..128ca143486d 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c @@ -1232,8 +1232,10 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id) desc = list_entry(ichan->queue.next, struct idmac_tx_desc, list); descnew = desc; - dev_dbg(dev, "IDMAC irq %d, dma 0x%08x, next dma 0x%08x, current %d, curbuf 0x%08x\n", - irq, sg_dma_address(*sg), sgnext ? sg_dma_address(sgnext) : 0, ichan->active_buffer, curbuf); + dev_dbg(dev, "IDMAC irq %d, dma %#llx, next dma %#llx, current %d, curbuf %#x\n", + irq, (u64)sg_dma_address(*sg), + sgnext ? (u64)sg_dma_address(sgnext) : 0, + ichan->active_buffer, curbuf); /* Find the descriptor of sgnext */ sgnew = idmac_sg_next(ichan, &descnew, *sg); diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c index a2c330f5f952..e26075408e9b 100644 --- a/drivers/dma/k3dma.c +++ b/drivers/dma/k3dma.c @@ -344,7 +344,7 @@ static enum dma_status k3_dma_tx_status(struct dma_chan *chan, size_t bytes = 0; ret = dma_cookie_status(&c->vc.chan, cookie, state); - if (ret == DMA_SUCCESS) + if (ret == DMA_COMPLETE) return ret; spin_lock_irqsave(&c->vc.lock, flags); @@ -693,7 +693,7 @@ static int k3_dma_probe(struct platform_device *op) irq = platform_get_irq(op, 0); ret = devm_request_irq(&op->dev, irq, - k3_dma_int_handler, IRQF_DISABLED, DRIVER_NAME, d); + k3_dma_int_handler, 0, DRIVER_NAME, d); if (ret) return ret; diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c index ff8d7827f8cb..dcb1e05149a7 100644 --- a/drivers/dma/mmp_pdma.c +++ b/drivers/dma/mmp_pdma.c @@ -798,8 +798,7 @@ static void dma_do_tasklet(unsigned long data) * move the descriptors to a temporary list so we can drop * the lock during the entire cleanup operation */ - list_del(&desc->node); - list_add(&desc->node, &chain_cleanup); + list_move(&desc->node, &chain_cleanup); /* * Look for the first list entry which has the ENDIRQEN flag @@ -863,7 +862,7 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, if (irq) { ret = devm_request_irq(pdev->dev, irq, - mmp_pdma_chan_handler, IRQF_DISABLED, "pdma", phy); + mmp_pdma_chan_handler, 0, "pdma", phy); if (ret) { dev_err(pdev->dev, "channel request irq fail!\n"); return ret; @@ -970,7 +969,7 @@ static int mmp_pdma_probe(struct platform_device *op) /* all chan share one irq, demux inside */ irq = platform_get_irq(op, 0); ret = devm_request_irq(pdev->dev, irq, - mmp_pdma_int_handler, IRQF_DISABLED, "pdma", pdev); + mmp_pdma_int_handler, 0, "pdma", pdev); if (ret) return ret; } diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c index 38cb517fb2eb..2b4026d1f31d 100644 --- a/drivers/dma/mmp_tdma.c +++ b/drivers/dma/mmp_tdma.c @@ -62,6 +62,11 @@ #define TDCR_BURSTSZ_16B (0x3 << 6) #define TDCR_BURSTSZ_32B (0x6 << 6) #define TDCR_BURSTSZ_64B (0x7 << 6) +#define TDCR_BURSTSZ_SQU_1B (0x5 << 6) +#define TDCR_BURSTSZ_SQU_2B (0x6 << 6) +#define TDCR_BURSTSZ_SQU_4B (0x0 << 6) +#define TDCR_BURSTSZ_SQU_8B (0x1 << 6) +#define TDCR_BURSTSZ_SQU_16B (0x3 << 6) #define TDCR_BURSTSZ_SQU_32B (0x7 << 6) #define TDCR_BURSTSZ_128B (0x5 << 6) #define TDCR_DSTDIR_MSK (0x3 << 4) /* Dst Direction */ @@ -158,7 +163,7 @@ static void mmp_tdma_disable_chan(struct mmp_tdma_chan *tdmac) /* disable irq */ writel(0, tdmac->reg_base + TDIMR); - tdmac->status = DMA_SUCCESS; + tdmac->status = DMA_COMPLETE; } static void mmp_tdma_resume_chan(struct mmp_tdma_chan *tdmac) @@ -228,8 +233,31 @@ static int mmp_tdma_config_chan(struct mmp_tdma_chan *tdmac) return -EINVAL; } } else if (tdmac->type == PXA910_SQU) { - tdcr |= TDCR_BURSTSZ_SQU_32B; tdcr |= TDCR_SSPMOD; + + switch (tdmac->burst_sz) { + case 1: + tdcr |= TDCR_BURSTSZ_SQU_1B; + break; + case 2: + tdcr |= TDCR_BURSTSZ_SQU_2B; + break; + case 4: + tdcr |= TDCR_BURSTSZ_SQU_4B; + break; + case 8: + tdcr |= TDCR_BURSTSZ_SQU_8B; + break; + case 16: + tdcr |= TDCR_BURSTSZ_SQU_16B; + break; + case 32: + tdcr |= TDCR_BURSTSZ_SQU_32B; + break; + default: + dev_err(tdmac->dev, "mmp_tdma: unknown burst size.\n"); + return -EINVAL; + } } writel(tdcr, tdmac->reg_base + TDCR); @@ -324,7 +352,7 @@ static int mmp_tdma_alloc_chan_resources(struct dma_chan *chan) if (tdmac->irq) { ret = devm_request_irq(tdmac->dev, tdmac->irq, - mmp_tdma_chan_handler, IRQF_DISABLED, "tdma", tdmac); + mmp_tdma_chan_handler, 0, "tdma", tdmac); if (ret) return ret; } @@ -370,7 +398,7 @@ static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic( int num_periods = buf_len / period_len; int i = 0, buf = 0; - if (tdmac->status != DMA_SUCCESS) + if (tdmac->status != DMA_COMPLETE) return NULL; if (period_len > TDMA_MAX_XFER_BYTES) { @@ -504,7 +532,7 @@ static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev, tdmac->idx = idx; tdmac->type = type; tdmac->reg_base = (unsigned long)tdev->base + idx * 4; - tdmac->status = DMA_SUCCESS; + tdmac->status = DMA_COMPLETE; tdev->tdmac[tdmac->idx] = tdmac; tasklet_init(&tdmac->tasklet, dma_do_tasklet, (unsigned long)tdmac); @@ -559,7 +587,7 @@ static int mmp_tdma_probe(struct platform_device *pdev) if (irq_num != chan_num) { irq = platform_get_irq(pdev, 0); ret = devm_request_irq(&pdev->dev, irq, - mmp_tdma_int_handler, IRQF_DISABLED, "tdma", tdev); + mmp_tdma_int_handler, 0, "tdma", tdev); if (ret) return ret; } diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 1b846d5d8408..7807f0ef4e20 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -708,7 +708,7 @@ static enum dma_status mv_xor_status(struct dma_chan *chan, enum dma_status ret; ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_SUCCESS) { + if (ret == DMA_COMPLETE) { mv_xor_clean_completed_slots(mv_chan); return ret; } @@ -833,7 +833,7 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) msleep(1); if (mv_xor_status(dma_chan, cookie, NULL) != - DMA_SUCCESS) { + DMA_COMPLETE) { dev_err(dma_chan->device->dev, "Self-test copy timed out, disabling\n"); err = -ENODEV; @@ -927,7 +927,7 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) msleep(8); if (mv_xor_status(dma_chan, cookie, NULL) != - DMA_SUCCESS) { + DMA_COMPLETE) { dev_err(dma_chan->device->dev, "Self-test xor timed out, disabling\n"); err = -ENODEV; diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index ccd13df841db..ead491346da7 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c @@ -27,6 +27,7 @@ #include <linux/of.h> #include <linux/of_device.h> #include <linux/of_dma.h> +#include <linux/list.h> #include <asm/irq.h> @@ -57,6 +58,9 @@ (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x050 : 0x110) + (n) * 0x70) #define HW_APBHX_CHn_SEMA(d, n) \ (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x080 : 0x140) + (n) * 0x70) +#define HW_APBHX_CHn_BAR(d, n) \ + (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x070 : 0x130) + (n) * 0x70) +#define HW_APBX_CHn_DEBUG1(d, n) (0x150 + (n) * 0x70) /* * ccw bits definitions @@ -115,7 +119,9 @@ struct mxs_dma_chan { int desc_count; enum dma_status status; unsigned int flags; + bool reset; #define MXS_DMA_SG_LOOP (1 << 0) +#define MXS_DMA_USE_SEMAPHORE (1 << 1) }; #define MXS_DMA_CHANNELS 16 @@ -201,12 +207,47 @@ static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan) struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; int chan_id = mxs_chan->chan.chan_id; - if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma)) + /* + * mxs dma channel resets can cause a channel stall. To recover from a + * channel stall, we have to reset the whole DMA engine. To avoid this, + * we use cyclic DMA with semaphores, that are enhanced in + * mxs_dma_int_handler. To reset the channel, we can simply stop writing + * into the semaphore counter. + */ + if (mxs_chan->flags & MXS_DMA_USE_SEMAPHORE && + mxs_chan->flags & MXS_DMA_SG_LOOP) { + mxs_chan->reset = true; + } else if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma)) { writel(1 << (chan_id + BP_APBH_CTRL0_RESET_CHANNEL), mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET); - else + } else { + unsigned long elapsed = 0; + const unsigned long max_wait = 50000; /* 50ms */ + void __iomem *reg_dbg1 = mxs_dma->base + + HW_APBX_CHn_DEBUG1(mxs_dma, chan_id); + + /* + * On i.MX28 APBX, the DMA channel can stop working if we reset + * the channel while it is in READ_FLUSH (0x08) state. + * We wait here until we leave the state. Then we trigger the + * reset. Waiting a maximum of 50ms, the kernel shouldn't crash + * because of this. + */ + while ((readl(reg_dbg1) & 0xf) == 0x8 && elapsed < max_wait) { + udelay(100); + elapsed += 100; + } + + if (elapsed >= max_wait) + dev_err(&mxs_chan->mxs_dma->pdev->dev, + "Failed waiting for the DMA channel %d to leave state READ_FLUSH, trying to reset channel in READ_FLUSH state now\n", + chan_id); + writel(1 << (chan_id + BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL), mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET); + } + + mxs_chan->status = DMA_COMPLETE; } static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan) @@ -219,12 +260,21 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan) mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(mxs_dma, chan_id)); /* write 1 to SEMA to kick off the channel */ - writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id)); + if (mxs_chan->flags & MXS_DMA_USE_SEMAPHORE && + mxs_chan->flags & MXS_DMA_SG_LOOP) { + /* A cyclic DMA consists of at least 2 segments, so initialize + * the semaphore with 2 so we have enough time to add 1 to the + * semaphore if we need to */ + writel(2, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id)); + } else { + writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id)); + } + mxs_chan->reset = false; } static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan) { - mxs_chan->status = DMA_SUCCESS; + mxs_chan->status = DMA_COMPLETE; } static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan) @@ -272,58 +322,88 @@ static void mxs_dma_tasklet(unsigned long data) mxs_chan->desc.callback(mxs_chan->desc.callback_param); } +static int mxs_dma_irq_to_chan(struct mxs_dma_engine *mxs_dma, int irq) +{ + int i; + + for (i = 0; i != mxs_dma->nr_channels; ++i) + if (mxs_dma->mxs_chans[i].chan_irq == irq) + return i; + + return -EINVAL; +} + static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id) { struct mxs_dma_engine *mxs_dma = dev_id; - u32 stat1, stat2; + struct mxs_dma_chan *mxs_chan; + u32 completed; + u32 err; + int chan = mxs_dma_irq_to_chan(mxs_dma, irq); + + if (chan < 0) + return IRQ_NONE; /* completion status */ - stat1 = readl(mxs_dma->base + HW_APBHX_CTRL1); - stat1 &= MXS_DMA_CHANNELS_MASK; - writel(stat1, mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_CLR); + completed = readl(mxs_dma->base + HW_APBHX_CTRL1); + completed = (completed >> chan) & 0x1; + + /* Clear interrupt */ + writel((1 << chan), + mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_CLR); /* error status */ - stat2 = readl(mxs_dma->base + HW_APBHX_CTRL2); - writel(stat2, mxs_dma->base + HW_APBHX_CTRL2 + STMP_OFFSET_REG_CLR); + err = readl(mxs_dma->base + HW_APBHX_CTRL2); + err &= (1 << (MXS_DMA_CHANNELS + chan)) | (1 << chan); + + /* + * error status bit is in the upper 16 bits, error irq bit in the lower + * 16 bits. We transform it into a simpler error code: + * err: 0x00 = no error, 0x01 = TERMINATION, 0x02 = BUS_ERROR + */ + err = (err >> (MXS_DMA_CHANNELS + chan)) + (err >> chan); + + /* Clear error irq */ + writel((1 << chan), + mxs_dma->base + HW_APBHX_CTRL2 + STMP_OFFSET_REG_CLR); /* * When both completion and error of termination bits set at the * same time, we do not take it as an error. IOW, it only becomes - * an error we need to handle here in case of either it's (1) a bus - * error or (2) a termination error with no completion. + * an error we need to handle here in case of either it's a bus + * error or a termination error with no completion. 0x01 is termination + * error, so we can subtract err & completed to get the real error case. */ - stat2 = ((stat2 >> MXS_DMA_CHANNELS) & stat2) | /* (1) */ - (~(stat2 >> MXS_DMA_CHANNELS) & stat2 & ~stat1); /* (2) */ - - /* combine error and completion status for checking */ - stat1 = (stat2 << MXS_DMA_CHANNELS) | stat1; - while (stat1) { - int channel = fls(stat1) - 1; - struct mxs_dma_chan *mxs_chan = - &mxs_dma->mxs_chans[channel % MXS_DMA_CHANNELS]; - - if (channel >= MXS_DMA_CHANNELS) { - dev_dbg(mxs_dma->dma_device.dev, - "%s: error in channel %d\n", __func__, - channel - MXS_DMA_CHANNELS); - mxs_chan->status = DMA_ERROR; - mxs_dma_reset_chan(mxs_chan); - } else { - if (mxs_chan->flags & MXS_DMA_SG_LOOP) - mxs_chan->status = DMA_IN_PROGRESS; - else - mxs_chan->status = DMA_SUCCESS; - } + err -= err & completed; - stat1 &= ~(1 << channel); + mxs_chan = &mxs_dma->mxs_chans[chan]; - if (mxs_chan->status == DMA_SUCCESS) - dma_cookie_complete(&mxs_chan->desc); + if (err) { + dev_dbg(mxs_dma->dma_device.dev, + "%s: error in channel %d\n", __func__, + chan); + mxs_chan->status = DMA_ERROR; + mxs_dma_reset_chan(mxs_chan); + } else if (mxs_chan->status != DMA_COMPLETE) { + if (mxs_chan->flags & MXS_DMA_SG_LOOP) { + mxs_chan->status = DMA_IN_PROGRESS; + if (mxs_chan->flags & MXS_DMA_USE_SEMAPHORE) + writel(1, mxs_dma->base + + HW_APBHX_CHn_SEMA(mxs_dma, chan)); + } else { + mxs_chan->status = DMA_COMPLETE; + } + } - /* schedule tasklet on this channel */ - tasklet_schedule(&mxs_chan->tasklet); + if (mxs_chan->status == DMA_COMPLETE) { + if (mxs_chan->reset) + return IRQ_HANDLED; + dma_cookie_complete(&mxs_chan->desc); } + /* schedule tasklet on this channel */ + tasklet_schedule(&mxs_chan->tasklet); + return IRQ_HANDLED; } @@ -523,6 +603,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( mxs_chan->status = DMA_IN_PROGRESS; mxs_chan->flags |= MXS_DMA_SG_LOOP; + mxs_chan->flags |= MXS_DMA_USE_SEMAPHORE; if (num_periods > NUM_CCW) { dev_err(mxs_dma->dma_device.dev, @@ -554,6 +635,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( ccw->bits |= CCW_IRQ; ccw->bits |= CCW_HALT_ON_TERM; ccw->bits |= CCW_TERM_FLUSH; + ccw->bits |= CCW_DEC_SEM; ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ? MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND); @@ -599,8 +681,24 @@ static enum dma_status mxs_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate) { struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); + struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; + u32 residue = 0; + + if (mxs_chan->status == DMA_IN_PROGRESS && + mxs_chan->flags & MXS_DMA_SG_LOOP) { + struct mxs_dma_ccw *last_ccw; + u32 bar; + + last_ccw = &mxs_chan->ccw[mxs_chan->desc_count - 1]; + residue = last_ccw->xfer_bytes + last_ccw->bufaddr; + + bar = readl(mxs_dma->base + + HW_APBHX_CHn_BAR(mxs_dma, chan->chan_id)); + residue -= bar; + } - dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, 0); + dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, + residue); return mxs_chan->status; } diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index ec3fc4fd9160..2f66cf4e54fe 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c @@ -248,7 +248,7 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan, unsigned long flags; ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_SUCCESS || !txstate) + if (ret == DMA_COMPLETE || !txstate) return ret; spin_lock_irqsave(&c->vc.lock, flags); diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index ab25e52cd43b..877d7bb767d9 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -2316,7 +2316,7 @@ bool pl330_filter(struct dma_chan *chan, void *param) return false; peri_id = chan->private; - return *peri_id == (unsigned)param; + return *peri_id == (unsigned long)param; } EXPORT_SYMBOL(pl330_filter); @@ -2924,16 +2924,23 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) amba_set_drvdata(adev, pdmac); - irq = adev->irq[0]; - ret = request_irq(irq, pl330_irq_handler, 0, - dev_name(&adev->dev), pi); - if (ret) - return ret; + for (i = 0; i < AMBA_NR_IRQS; i++) { + irq = adev->irq[i]; + if (irq) { + ret = devm_request_irq(&adev->dev, irq, + pl330_irq_handler, 0, + dev_name(&adev->dev), pi); + if (ret) + return ret; + } else { + break; + } + } pi->pcfg.periph_id = adev->periphid; ret = pl330_add(pi); if (ret) - goto probe_err1; + return ret; INIT_LIST_HEAD(&pdmac->desc_pool); spin_lock_init(&pdmac->pool_lock); @@ -3031,8 +3038,6 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) return 0; probe_err3: - amba_set_drvdata(adev, NULL); - /* Idle the DMAC */ list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels, chan.device_node) { @@ -3046,8 +3051,6 @@ probe_err3: } probe_err2: pl330_del(pi); -probe_err1: - free_irq(irq, pi); return ret; } @@ -3057,7 +3060,6 @@ static int pl330_remove(struct amba_device *adev) struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev); struct dma_pl330_chan *pch, *_p; struct pl330_info *pi; - int irq; if (!pdmac) return 0; @@ -3066,7 +3068,6 @@ static int pl330_remove(struct amba_device *adev) of_dma_controller_free(adev->dev.of_node); dma_async_device_unregister(&pdmac->ddma); - amba_set_drvdata(adev, NULL); /* Idle the DMAC */ list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels, @@ -3084,9 +3085,6 @@ static int pl330_remove(struct amba_device *adev) pl330_del(pi); - irq = adev->irq[0]; - free_irq(irq, pi); - return 0; } diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c index 429be432ab7e..3005938ba47a 100644 --- a/drivers/dma/ppc4xx/adma.c +++ b/drivers/dma/ppc4xx/adma.c @@ -3623,7 +3623,7 @@ static enum dma_status ppc440spe_adma_tx_status(struct dma_chan *chan, ppc440spe_chan = to_ppc440spe_adma_chan(chan); ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_SUCCESS) + if (ret == DMA_COMPLETE) return ret; ppc440spe_adma_slot_cleanup(ppc440spe_chan); diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c index 461a91ab70bb..ab26d46bbe15 100644 --- a/drivers/dma/sa11x0-dma.c +++ b/drivers/dma/sa11x0-dma.c @@ -436,7 +436,7 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan, enum dma_status ret; ret = dma_cookie_status(&c->vc.chan, cookie, state); - if (ret == DMA_SUCCESS) + if (ret == DMA_COMPLETE) return ret; if (!state) diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c index d94ab592cc1b..2e7b394def80 100644 --- a/drivers/dma/sh/shdma-base.c +++ b/drivers/dma/sh/shdma-base.c @@ -724,7 +724,7 @@ static enum dma_status shdma_tx_status(struct dma_chan *chan, * If we don't find cookie on the queue, it has been aborted and we have * to report error */ - if (status != DMA_SUCCESS) { + if (status != DMA_COMPLETE) { struct shdma_desc *sdesc; status = DMA_ERROR; list_for_each_entry(sdesc, &schan->ld_queue, node) diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c index 1069e8869f20..0d765c0e21ec 100644 --- a/drivers/dma/sh/shdmac.c +++ b/drivers/dma/sh/shdmac.c @@ -685,7 +685,7 @@ MODULE_DEVICE_TABLE(of, sh_dmae_of_match); static int sh_dmae_probe(struct platform_device *pdev) { const struct sh_dmae_pdata *pdata; - unsigned long irqflags = IRQF_DISABLED, + unsigned long irqflags = 0, chan_flag[SH_DMAE_MAX_CHANNELS] = {}; int errirq, chan_irq[SH_DMAE_MAX_CHANNELS]; int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0; @@ -838,7 +838,7 @@ static int sh_dmae_probe(struct platform_device *pdev) IORESOURCE_IRQ_SHAREABLE) chan_flag[irq_cnt] = IRQF_SHARED; else - chan_flag[irq_cnt] = IRQF_DISABLED; + chan_flag[irq_cnt] = 0; dev_dbg(&pdev->dev, "Found IRQ %d for channel %d\n", i, irq_cnt); diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 82d2b97ad942..b8c031b7de4e 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -14,6 +14,7 @@ #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/delay.h> +#include <linux/log2.h> #include <linux/pm.h> #include <linux/pm_runtime.h> #include <linux/err.h> @@ -2626,7 +2627,7 @@ static enum dma_status d40_tx_status(struct dma_chan *chan, } ret = dma_cookie_status(chan, cookie, txstate); - if (ret != DMA_SUCCESS) + if (ret != DMA_COMPLETE) dma_set_residue(txstate, stedma40_residue(chan)); if (d40_is_paused(d40c)) @@ -2796,8 +2797,8 @@ static int d40_set_runtime_config(struct dma_chan *chan, src_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES || dst_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED || dst_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES || - ((src_addr_width > 1) && (src_addr_width & 1)) || - ((dst_addr_width > 1) && (dst_addr_width & 1))) + !is_power_of_2(src_addr_width) || + !is_power_of_2(dst_addr_width)) return -EINVAL; cfg->src_info.data_width = src_addr_width; diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index 5d4986e5f5fa..73654e33f13b 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c @@ -570,7 +570,7 @@ static void handle_once_dma_done(struct tegra_dma_channel *tdc, list_del(&sgreq->node); if (sgreq->last_sg) { - dma_desc->dma_status = DMA_SUCCESS; + dma_desc->dma_status = DMA_COMPLETE; dma_cookie_complete(&dma_desc->txd); if (!dma_desc->cb_count) list_add_tail(&dma_desc->cb_node, &tdc->cb_desc); @@ -768,7 +768,7 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc, unsigned int residual; ret = dma_cookie_status(dc, cookie, txstate); - if (ret == DMA_SUCCESS) + if (ret == DMA_COMPLETE) return ret; spin_lock_irqsave(&tdc->lock, flags); @@ -1018,7 +1018,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg( return &dma_desc->txd; } -struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic( +static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic( struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len, size_t period_len, enum dma_transfer_direction direction, unsigned long flags, void *context) diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c index 6f2729874016..bae6c29f5502 100644 --- a/drivers/dma/txx9dmac.c +++ b/drivers/dma/txx9dmac.c @@ -939,8 +939,8 @@ txx9dmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, enum dma_status ret; ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_SUCCESS) - return DMA_SUCCESS; + if (ret == DMA_COMPLETE) + return DMA_COMPLETE; spin_lock_bh(&dc->lock); txx9dmac_scan_descriptors(dc); |