summaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/amba-pl08x.c381
-rw-r--r--drivers/dma/at_hdmac.c5
-rw-r--r--drivers/dma/dw_dmac.c5
-rw-r--r--drivers/dma/imx-dma.c1
-rw-r--r--drivers/dma/imx-sdma.c1
-rw-r--r--drivers/dma/intel_mid_dma.c2
-rw-r--r--drivers/dma/mpc512x_dma.c1
-rw-r--r--drivers/dma/pch_dma.c2
-rw-r--r--drivers/dma/pl330.c2
-rw-r--r--drivers/dma/timb_dma.c3
10 files changed, 224 insertions, 179 deletions
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index cd8df7f5b5c8..b7cbd1ab1db1 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -352,7 +352,9 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
if (!list_empty(&plchan->pend_list)) {
struct pl08x_txd *txdi;
list_for_each_entry(txdi, &plchan->pend_list, node) {
- bytes += txdi->len;
+ struct pl08x_sg *dsg;
+ list_for_each_entry(dsg, &txd->dsg_list, node)
+ bytes += dsg->len;
}
}
@@ -567,8 +569,9 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
struct pl08x_lli_build_data bd;
int num_llis = 0;
u32 cctl, early_bytes = 0;
- size_t max_bytes_per_lli, total_bytes = 0;
+ size_t max_bytes_per_lli, total_bytes;
struct pl08x_lli *llis_va;
+ struct pl08x_sg *dsg;
txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus);
if (!txd->llis_va) {
@@ -578,13 +581,9 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
pl08x->pool_ctr++;
- /* Get the default CCTL */
- cctl = txd->cctl;
-
bd.txd = txd;
- bd.srcbus.addr = txd->src_addr;
- bd.dstbus.addr = txd->dst_addr;
bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0;
+ cctl = txd->cctl;
/* Find maximum width of the source bus */
bd.srcbus.maxwidth =
@@ -596,162 +595,179 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >>
PL080_CONTROL_DWIDTH_SHIFT);
- /* Set up the bus widths to the maximum */
- bd.srcbus.buswidth = bd.srcbus.maxwidth;
- bd.dstbus.buswidth = bd.dstbus.maxwidth;
+ list_for_each_entry(dsg, &txd->dsg_list, node) {
+ total_bytes = 0;
+ cctl = txd->cctl;
- /* We need to count this down to zero */
- bd.remainder = txd->len;
+ bd.srcbus.addr = dsg->src_addr;
+ bd.dstbus.addr = dsg->dst_addr;
+ bd.remainder = dsg->len;
+ bd.srcbus.buswidth = bd.srcbus.maxwidth;
+ bd.dstbus.buswidth = bd.dstbus.maxwidth;
- pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl);
+ pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl);
- dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu\n",
- bd.srcbus.addr, cctl & PL080_CONTROL_SRC_INCR ? "+" : "",
- bd.srcbus.buswidth,
- bd.dstbus.addr, cctl & PL080_CONTROL_DST_INCR ? "+" : "",
- bd.dstbus.buswidth,
- bd.remainder);
- dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n",
- mbus == &bd.srcbus ? "src" : "dst",
- sbus == &bd.srcbus ? "src" : "dst");
+ dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu\n",
+ bd.srcbus.addr, cctl & PL080_CONTROL_SRC_INCR ? "+" : "",
+ bd.srcbus.buswidth,
+ bd.dstbus.addr, cctl & PL080_CONTROL_DST_INCR ? "+" : "",
+ bd.dstbus.buswidth,
+ bd.remainder);
+ dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n",
+ mbus == &bd.srcbus ? "src" : "dst",
+ sbus == &bd.srcbus ? "src" : "dst");
- /*
- * Zero length is only allowed if all these requirements are met:
- * - flow controller is peripheral.
- * - src.addr is aligned to src.width
- * - dst.addr is aligned to dst.width
- *
- * sg_len == 1 should be true, as there can be two cases here:
- * - Memory addresses are contiguous and are not scattered. Here, Only
- * one sg will be passed by user driver, with memory address and zero
- * length. We pass this to controller and after the transfer it will
- * receive the last burst request from peripheral and so transfer
- * finishes.
- *
- * - Memory addresses are scattered and are not contiguous. Here,
- * Obviously as DMA controller doesn't know when a lli's transfer gets
- * over, it can't load next lli. So in this case, there has to be an
- * assumption that only one lli is supported. Thus, we can't have
- * scattered addresses.
- */
- if (!bd.remainder) {
- u32 fc = (txd->ccfg & PL080_CONFIG_FLOW_CONTROL_MASK) >>
- PL080_CONFIG_FLOW_CONTROL_SHIFT;
- if (!((fc >= PL080_FLOW_SRC2DST_DST) &&
+ /*
+ * Zero length is only allowed if all these requirements are
+ * met:
+ * - flow controller is peripheral.
+ * - src.addr is aligned to src.width
+ * - dst.addr is aligned to dst.width
+ *
+ * sg_len == 1 should be true, as there can be two cases here:
+ *
+ * - Memory addresses are contiguous and are not scattered.
+ * Here, Only one sg will be passed by user driver, with
+ * memory address and zero length. We pass this to controller
+ * and after the transfer it will receive the last burst
+ * request from peripheral and so transfer finishes.
+ *
+ * - Memory addresses are scattered and are not contiguous.
+ * Here, Obviously as DMA controller doesn't know when a lli's
+ * transfer gets over, it can't load next lli. So in this
+ * case, there has to be an assumption that only one lli is
+ * supported. Thus, we can't have scattered addresses.
+ */
+ if (!bd.remainder) {
+ u32 fc = (txd->ccfg & PL080_CONFIG_FLOW_CONTROL_MASK) >>
+ PL080_CONFIG_FLOW_CONTROL_SHIFT;
+ if (!((fc >= PL080_FLOW_SRC2DST_DST) &&
(fc <= PL080_FLOW_SRC2DST_SRC))) {
- dev_err(&pl08x->adev->dev, "%s sg len can't be zero",
- __func__);
- return 0;
- }
-
- if ((bd.srcbus.addr % bd.srcbus.buswidth) ||
- (bd.srcbus.addr % bd.srcbus.buswidth)) {
- dev_err(&pl08x->adev->dev,
- "%s src & dst address must be aligned to src"
- " & dst width if peripheral is flow controller",
- __func__);
- return 0;
- }
-
- cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
- bd.dstbus.buswidth, 0);
- pl08x_fill_lli_for_desc(&bd, num_llis++, 0, cctl);
- }
+ dev_err(&pl08x->adev->dev, "%s sg len can't be zero",
+ __func__);
+ return 0;
+ }
- /*
- * Send byte by byte for following cases
- * - Less than a bus width available
- * - until master bus is aligned
- */
- if (bd.remainder < mbus->buswidth)
- early_bytes = bd.remainder;
- else if ((mbus->addr) % (mbus->buswidth)) {
- early_bytes = mbus->buswidth - (mbus->addr) % (mbus->buswidth);
- if ((bd.remainder - early_bytes) < mbus->buswidth)
- early_bytes = bd.remainder;
- }
+ if ((bd.srcbus.addr % bd.srcbus.buswidth) ||
+ (bd.srcbus.addr % bd.srcbus.buswidth)) {
+ dev_err(&pl08x->adev->dev,
+ "%s src & dst address must be aligned to src"
+ " & dst width if peripheral is flow controller",
+ __func__);
+ return 0;
+ }
- if (early_bytes) {
- dev_vdbg(&pl08x->adev->dev, "%s byte width LLIs "
- "(remain 0x%08x)\n", __func__, bd.remainder);
- prep_byte_width_lli(&bd, &cctl, early_bytes, num_llis++,
- &total_bytes);
- }
+ cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
+ bd.dstbus.buswidth, 0);
+ pl08x_fill_lli_for_desc(&bd, num_llis++, 0, cctl);
+ break;
+ }
- if (bd.remainder) {
/*
- * Master now aligned
- * - if slave is not then we must set its width down
+ * Send byte by byte for following cases
+ * - Less than a bus width available
+ * - until master bus is aligned
*/
- if (sbus->addr % sbus->buswidth) {
- dev_dbg(&pl08x->adev->dev,
- "%s set down bus width to one byte\n",
- __func__);
+ if (bd.remainder < mbus->buswidth)
+ early_bytes = bd.remainder;
+ else if ((mbus->addr) % (mbus->buswidth)) {
+ early_bytes = mbus->buswidth - (mbus->addr) %
+ (mbus->buswidth);
+ if ((bd.remainder - early_bytes) < mbus->buswidth)
+ early_bytes = bd.remainder;
+ }
- sbus->buswidth = 1;
+ if (early_bytes) {
+ dev_vdbg(&pl08x->adev->dev,
+ "%s byte width LLIs (remain 0x%08x)\n",
+ __func__, bd.remainder);
+ prep_byte_width_lli(&bd, &cctl, early_bytes, num_llis++,
+ &total_bytes);
}
- /* Bytes transferred = tsize * src width, not MIN(buswidths) */
- max_bytes_per_lli = bd.srcbus.buswidth *
- PL080_CONTROL_TRANSFER_SIZE_MASK;
+ if (bd.remainder) {
+ /*
+ * Master now aligned
+ * - if slave is not then we must set its width down
+ */
+ if (sbus->addr % sbus->buswidth) {
+ dev_dbg(&pl08x->adev->dev,
+ "%s set down bus width to one byte\n",
+ __func__);
- /*
- * Make largest possible LLIs until less than one bus
- * width left
- */
- while (bd.remainder > (mbus->buswidth - 1)) {
- size_t lli_len, tsize, width;
+ sbus->buswidth = 1;
+ }
/*
- * If enough left try to send max possible,
- * otherwise try to send the remainder
+ * Bytes transferred = tsize * src width, not
+ * MIN(buswidths)
*/
- lli_len = min(bd.remainder, max_bytes_per_lli);
+ max_bytes_per_lli = bd.srcbus.buswidth *
+ PL080_CONTROL_TRANSFER_SIZE_MASK;
+ dev_vdbg(&pl08x->adev->dev,
+ "%s max bytes per lli = %zu\n",
+ __func__, max_bytes_per_lli);
/*
- * Check against maximum bus alignment: Calculate actual
- * transfer size in relation to bus width and get a
- * maximum remainder of the highest bus width - 1
+ * Make largest possible LLIs until less than one bus
+ * width left
*/
- width = max(mbus->buswidth, sbus->buswidth);
- lli_len = (lli_len / width) * width;
- tsize = lli_len / bd.srcbus.buswidth;
+ while (bd.remainder > (mbus->buswidth - 1)) {
+ size_t lli_len, tsize, width;
- dev_vdbg(&pl08x->adev->dev,
- "%s fill lli with single lli chunk of "
- "size 0x%08zx (remainder 0x%08zx)\n",
- __func__, lli_len, bd.remainder);
+ /*
+ * If enough left try to send max possible,
+ * otherwise try to send the remainder
+ */
+ lli_len = min(bd.remainder, max_bytes_per_lli);
- cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
+ /*
+ * Check against maximum bus alignment:
+ * Calculate actual transfer size in relation to
+ * bus width an get a maximum remainder of the
+ * highest bus width - 1
+ */
+ width = max(mbus->buswidth, sbus->buswidth);
+ lli_len = (lli_len / width) * width;
+ tsize = lli_len / bd.srcbus.buswidth;
+
+ dev_vdbg(&pl08x->adev->dev,
+ "%s fill lli with single lli chunk of "
+ "size 0x%08zx (remainder 0x%08zx)\n",
+ __func__, lli_len, bd.remainder);
+
+ cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
bd.dstbus.buswidth, tsize);
- pl08x_fill_lli_for_desc(&bd, num_llis++, lli_len, cctl);
- total_bytes += lli_len;
- }
+ pl08x_fill_lli_for_desc(&bd, num_llis++,
+ lli_len, cctl);
+ total_bytes += lli_len;
+ }
- /*
- * Send any odd bytes
- */
- if (bd.remainder) {
- dev_vdbg(&pl08x->adev->dev,
- "%s align with boundary, send odd bytes (remain %zu)\n",
- __func__, bd.remainder);
- prep_byte_width_lli(&bd, &cctl, bd.remainder,
- num_llis++, &total_bytes);
+ /*
+ * Send any odd bytes
+ */
+ if (bd.remainder) {
+ dev_vdbg(&pl08x->adev->dev,
+ "%s align with boundary, send odd bytes (remain %zu)\n",
+ __func__, bd.remainder);
+ prep_byte_width_lli(&bd, &cctl, bd.remainder,
+ num_llis++, &total_bytes);
+ }
}
- }
- if (total_bytes != txd->len) {
- dev_err(&pl08x->adev->dev,
- "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n",
- __func__, total_bytes, txd->len);
- return 0;
- }
+ if (total_bytes != dsg->len) {
+ dev_err(&pl08x->adev->dev,
+ "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n",
+ __func__, total_bytes, dsg->len);
+ return 0;
+ }
- if (num_llis >= MAX_NUM_TSFR_LLIS) {
- dev_err(&pl08x->adev->dev,
- "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n",
- __func__, (u32) MAX_NUM_TSFR_LLIS);
- return 0;
+ if (num_llis >= MAX_NUM_TSFR_LLIS) {
+ dev_err(&pl08x->adev->dev,
+ "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n",
+ __func__, (u32) MAX_NUM_TSFR_LLIS);
+ return 0;
+ }
}
llis_va = txd->llis_va;
@@ -784,11 +800,19 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
struct pl08x_txd *txd)
{
+ struct pl08x_sg *dsg, *_dsg;
+
/* Free the LLI */
- dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus);
+ if (txd->llis_va)
+ dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus);
pl08x->pool_ctr--;
+ list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) {
+ list_del(&dsg->node);
+ kfree(dsg);
+ }
+
kfree(txd);
}
@@ -1234,6 +1258,7 @@ static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan,
txd->tx.flags = flags;
txd->tx.tx_submit = pl08x_tx_submit;
INIT_LIST_HEAD(&txd->node);
+ INIT_LIST_HEAD(&txd->dsg_list);
/* Always enable error and terminal interrupts */
txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK |
@@ -1252,6 +1277,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
struct pl08x_driver_data *pl08x = plchan->host;
struct pl08x_txd *txd;
+ struct pl08x_sg *dsg;
int ret;
txd = pl08x_get_txd(plchan, flags);
@@ -1261,10 +1287,19 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
return NULL;
}
+ dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
+ if (!dsg) {
+ pl08x_free_txd(pl08x, txd);
+ dev_err(&pl08x->adev->dev, "%s no memory for pl080 sg\n",
+ __func__);
+ return NULL;
+ }
+ list_add_tail(&dsg->node, &txd->dsg_list);
+
txd->direction = DMA_NONE;
- txd->src_addr = src;
- txd->dst_addr = dest;
- txd->len = len;
+ dsg->src_addr = src;
+ dsg->dst_addr = dest;
+ dsg->len = len;
/* Set platform data for m2m */
txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
@@ -1293,19 +1328,13 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
struct pl08x_driver_data *pl08x = plchan->host;
struct pl08x_txd *txd;
+ struct pl08x_sg *dsg;
+ struct scatterlist *sg;
+ dma_addr_t slave_addr;
int ret, tmp;
- /*
- * Current implementation ASSUMES only one sg
- */
- if (sg_len != 1) {
- dev_err(&pl08x->adev->dev, "%s prepared too long sglist\n",
- __func__);
- BUG();
- }
-
dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
- __func__, sgl->length, plchan->name);
+ __func__, sgl->length, plchan->name);
txd = pl08x_get_txd(plchan, flags);
if (!txd) {
@@ -1324,17 +1353,15 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
* channel target address dynamically at runtime.
*/
txd->direction = direction;
- txd->len = sgl->length;
if (direction == DMA_TO_DEVICE) {
txd->cctl = plchan->dst_cctl;
- txd->src_addr = sgl->dma_address;
- txd->dst_addr = plchan->dst_addr;
+ slave_addr = plchan->dst_addr;
} else if (direction == DMA_FROM_DEVICE) {
txd->cctl = plchan->src_cctl;
- txd->src_addr = plchan->src_addr;
- txd->dst_addr = sgl->dma_address;
+ slave_addr = plchan->src_addr;
} else {
+ pl08x_free_txd(pl08x, txd);
dev_err(&pl08x->adev->dev,
"%s direction unsupported\n", __func__);
return NULL;
@@ -1349,6 +1376,26 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT;
+ for_each_sg(sgl, sg, sg_len, tmp) {
+ dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
+ if (!dsg) {
+ pl08x_free_txd(pl08x, txd);
+ dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n",
+ __func__);
+ return NULL;
+ }
+ list_add_tail(&dsg->node, &txd->dsg_list);
+
+ dsg->len = sg_dma_len(sg);
+ if (direction == DMA_TO_DEVICE) {
+ dsg->src_addr = sg_phys(sg);
+ dsg->dst_addr = slave_addr;
+ } else {
+ dsg->src_addr = slave_addr;
+ dsg->dst_addr = sg_phys(sg);
+ }
+ }
+
ret = pl08x_prep_channel_resources(plchan, txd);
if (ret)
return NULL;
@@ -1452,22 +1499,28 @@ static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
static void pl08x_unmap_buffers(struct pl08x_txd *txd)
{
struct device *dev = txd->tx.chan->device->dev;
+ struct pl08x_sg *dsg;
if (!(txd->tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
if (txd->tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
- dma_unmap_single(dev, txd->src_addr, txd->len,
- DMA_TO_DEVICE);
- else
- dma_unmap_page(dev, txd->src_addr, txd->len,
- DMA_TO_DEVICE);
+ list_for_each_entry(dsg, &txd->dsg_list, node)
+ dma_unmap_single(dev, dsg->src_addr, dsg->len,
+ DMA_TO_DEVICE);
+ else {
+ list_for_each_entry(dsg, &txd->dsg_list, node)
+ dma_unmap_page(dev, dsg->src_addr, dsg->len,
+ DMA_TO_DEVICE);
+ }
}
if (!(txd->tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
if (txd->tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
- dma_unmap_single(dev, txd->dst_addr, txd->len,
- DMA_FROM_DEVICE);
+ list_for_each_entry(dsg, &txd->dsg_list, node)
+ dma_unmap_single(dev, dsg->dst_addr, dsg->len,
+ DMA_FROM_DEVICE);
else
- dma_unmap_page(dev, txd->dst_addr, txd->len,
- DMA_FROM_DEVICE);
+ list_for_each_entry(dsg, &txd->dsg_list, node)
+ dma_unmap_page(dev, dsg->dst_addr, dsg->len,
+ DMA_FROM_DEVICE);
}
}
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 3b99dc62874b..fcfa0a8b5c59 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -1268,12 +1268,11 @@ static int __init at_dma_probe(struct platform_device *pdev)
/* initialize channels related values */
INIT_LIST_HEAD(&atdma->dma_common.channels);
- for (i = 0; i < pdata->nr_channels; i++, atdma->dma_common.chancnt++) {
+ for (i = 0; i < pdata->nr_channels; i++) {
struct at_dma_chan *atchan = &atdma->chan[i];
atchan->chan_common.device = &atdma->dma_common;
atchan->chan_common.cookie = atchan->completed_cookie = 1;
- atchan->chan_common.chan_id = i;
list_add_tail(&atchan->chan_common.device_node,
&atdma->dma_common.channels);
@@ -1314,7 +1313,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n",
dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
- atdma->dma_common.chancnt);
+ pdata->nr_channels);
dma_async_device_register(&atdma->dma_common);
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 4d180ca9a1d8..9bfd6d360718 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -1407,12 +1407,11 @@ static int __init dw_probe(struct platform_device *pdev)
dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
INIT_LIST_HEAD(&dw->dma.channels);
- for (i = 0; i < pdata->nr_channels; i++, dw->dma.chancnt++) {
+ for (i = 0; i < pdata->nr_channels; i++) {
struct dw_dma_chan *dwc = &dw->chan[i];
dwc->chan.device = &dw->dma;
dwc->chan.cookie = dwc->completed = 1;
- dwc->chan.chan_id = i;
if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
list_add_tail(&dwc->chan.device_node,
&dw->dma.channels);
@@ -1468,7 +1467,7 @@ static int __init dw_probe(struct platform_device *pdev)
dma_writel(dw, CFG, DW_CFG_DMA_EN);
printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n",
- dev_name(&pdev->dev), dw->dma.chancnt);
+ dev_name(&pdev->dev), pdata->nr_channels);
dma_async_device_register(&dw->dma);
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index d99f71c356b5..d746899f36e1 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -14,6 +14,7 @@
* http://www.gnu.org/copyleft/gpl.html
*/
#include <linux/init.h>
+#include <linux/module.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index b5cc27dc9a51..eab1fe71259e 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -18,6 +18,7 @@
*/
#include <linux/init.h>
+#include <linux/module.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index 8a3fdd87db97..cf74a664c5e0 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -1114,7 +1114,6 @@ static int mid_setup_dma(struct pci_dev *pdev)
midch->chan.device = &dma->common;
midch->chan.cookie = 1;
- midch->chan.chan_id = i;
midch->ch_id = dma->chan_base + i;
pr_debug("MDMA:Init CH %d, ID %d\n", i, midch->ch_id);
@@ -1150,7 +1149,6 @@ static int mid_setup_dma(struct pci_dev *pdev)
dma_cap_set(DMA_SLAVE, dma->common.cap_mask);
dma_cap_set(DMA_PRIVATE, dma->common.cap_mask);
dma->common.dev = &pdev->dev;
- dma->common.chancnt = dma->max_chan;
dma->common.device_alloc_chan_resources =
intel_mid_dma_alloc_chan_resources;
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index b9bae94f2015..8ba4edc6185e 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -741,7 +741,6 @@ static int __devinit mpc_dma_probe(struct platform_device *op)
mchan = &mdma->channels[i];
mchan->chan.device = dma;
- mchan->chan.chan_id = i;
mchan->chan.cookie = 1;
mchan->completed_cookie = mchan->chan.cookie;
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 1ac8d4b580b7..5b65362024fd 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -926,7 +926,6 @@ static int __devinit pch_dma_probe(struct pci_dev *pdev,
}
pd->dma.dev = &pdev->dev;
- pd->dma.chancnt = nr_channels;
INIT_LIST_HEAD(&pd->dma.channels);
@@ -935,7 +934,6 @@ static int __devinit pch_dma_probe(struct pci_dev *pdev,
pd_chan->chan.device = &pd->dma;
pd_chan->chan.cookie = 1;
- pd_chan->chan.chan_id = i;
pd_chan->membase = &regs->desc[i];
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 621134fdba4c..571041477ab2 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -887,11 +887,9 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
spin_lock_init(&pch->lock);
pch->pl330_chid = NULL;
pch->chan.device = pd;
- pch->chan.chan_id = i;
pch->dmac = pdmac;
/* Add the channel to the DMAC list */
- pd->chancnt++;
list_add_tail(&pch->chan.device_node, &pd->channels);
}
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index f69f90a61873..6dbdf451128e 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -753,7 +753,7 @@ static int __devinit td_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&td->dma.channels);
- for (i = 0; i < pdata->nr_channels; i++, td->dma.chancnt++) {
+ for (i = 0; i < pdata->nr_channels; i++) {
struct timb_dma_chan *td_chan = &td->channels[i];
struct timb_dma_platform_data_channel *pchan =
pdata->channels + i;
@@ -767,7 +767,6 @@ static int __devinit td_probe(struct platform_device *pdev)
td_chan->chan.device = &td->dma;
td_chan->chan.cookie = 1;
- td_chan->chan.chan_id = i;
spin_lock_init(&td_chan->lock);
INIT_LIST_HEAD(&td_chan->active_list);
INIT_LIST_HEAD(&td_chan->queue);
OpenPOWER on IntegriCloud