diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-10 10:28:45 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-10 10:28:45 -0700 |
commit | 77c32bbbe0d0e963ba5723b8d1f6c42c5d56858b (patch) | |
tree | 2b819d3b7c00173c072fe61cadb0fd82ba51f417 /drivers/dma/sh/shdma-base.c | |
parent | fad0701eaa091beb8ce5ef2eef04b5e833617368 (diff) | |
parent | 06822788faa21529a9a97965e266d77a596dc921 (diff) | |
download | talos-op-linux-77c32bbbe0d0e963ba5723b8d1f6c42c5d56858b.tar.gz talos-op-linux-77c32bbbe0d0e963ba5723b8d1f6c42c5d56858b.zip |
Merge branch 'for-linus' of git://git.infradead.org/users/vkoul/slave-dma
Pull slave-dmaengine updates from Vinod Koul:
- new Xilixn VDMA driver from Srikanth
- bunch of updates for edma driver by Thomas, Joel and Peter
- fixes and updates on dw, ste_dma, freescale, mpc512x, sudmac etc
* 'for-linus' of git://git.infradead.org/users/vkoul/slave-dma: (45 commits)
dmaengine: sh: don't use dynamic static allocation
dmaengine: sh: fix print specifier warnings
dmaengine: sh: make shdma_prep_dma_cyclic static
dmaengine: Kconfig: Update MXS_DMA help text to include MX6Q/MX6DL
of: dma: Grammar s/requests/request/, s/used required/required/
dmaengine: shdma: Enable driver compilation with COMPILE_TEST
dmaengine: rcar-hpbdma: Include linux/err.h
dmaengine: sudmac: Include linux/err.h
dmaengine: sudmac: Keep #include sorted alphabetically
dmaengine: shdmac: Include linux/err.h
dmaengine: shdmac: Keep #include sorted alphabetically
dmaengine: s3c24xx-dma: Add cyclic transfer support
dmaengine: s3c24xx-dma: Process whole SG chain
dmaengine: imx: correct sdmac->status for cyclic dma tx
dmaengine: pch: fix compilation for alpha target
dmaengine: dw: check return code of dma_async_device_register()
dmaengine: dw: fix regression in dw_probe() function
dmaengine: dw: enable clock before access
dma: pch_dma: Fix Kconfig dependencies
dmaengine: mpc512x: add support for peripheral transfers
...
Diffstat (limited to 'drivers/dma/sh/shdma-base.c')
-rw-r--r-- | drivers/dma/sh/shdma-base.c | 98 |
1 files changed, 82 insertions, 16 deletions
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c index 52396771acbe..b35007e21e6b 100644 --- a/drivers/dma/sh/shdma-base.c +++ b/drivers/dma/sh/shdma-base.c @@ -73,8 +73,7 @@ static void shdma_chan_xfer_ld_queue(struct shdma_chan *schan) static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx) { struct shdma_desc *chunk, *c, *desc = - container_of(tx, struct shdma_desc, async_tx), - *last = desc; + container_of(tx, struct shdma_desc, async_tx); struct shdma_chan *schan = to_shdma_chan(tx->chan); dma_async_tx_callback callback = tx->callback; dma_cookie_t cookie; @@ -98,19 +97,20 @@ static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx) &chunk->node == &schan->ld_free)) break; chunk->mark = DESC_SUBMITTED; - /* Callback goes to the last chunk */ - chunk->async_tx.callback = NULL; + if (chunk->chunks == 1) { + chunk->async_tx.callback = callback; + chunk->async_tx.callback_param = tx->callback_param; + } else { + /* Callback goes to the last chunk */ + chunk->async_tx.callback = NULL; + } chunk->cookie = cookie; list_move_tail(&chunk->node, &schan->ld_queue); - last = chunk; dev_dbg(schan->dev, "submit #%d@%p on %d\n", - tx->cookie, &last->async_tx, schan->id); + tx->cookie, &chunk->async_tx, schan->id); } - last->async_tx.callback = callback; - last->async_tx.callback_param = tx->callback_param; - if (power_up) { int ret; schan->pm_state = SHDMA_PM_BUSY; @@ -304,6 +304,7 @@ static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all) dma_async_tx_callback callback = NULL; void *param = NULL; unsigned long flags; + LIST_HEAD(cyclic_list); spin_lock_irqsave(&schan->chan_lock, flags); list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) { @@ -369,10 +370,16 @@ static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all) if (((desc->mark == DESC_COMPLETED || desc->mark == DESC_WAITING) && async_tx_test_ack(&desc->async_tx)) || all) { - /* Remove from ld_queue list */ - desc->mark = DESC_IDLE; - list_move(&desc->node, &schan->ld_free); + if (all || !desc->cyclic) { + /* Remove from ld_queue list */ + desc->mark = DESC_IDLE; + list_move(&desc->node, &schan->ld_free); + } else { + /* reuse as cyclic */ + desc->mark = DESC_SUBMITTED; + list_move_tail(&desc->node, &cyclic_list); + } if (list_empty(&schan->ld_queue)) { dev_dbg(schan->dev, "Bring down channel %d\n", schan->id); @@ -389,6 +396,8 @@ static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all) */ schan->dma_chan.completed_cookie = schan->dma_chan.cookie; + list_splice_tail(&cyclic_list, &schan->ld_queue); + spin_unlock_irqrestore(&schan->chan_lock, flags); if (callback) @@ -521,7 +530,7 @@ static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan, */ static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan, struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr, - enum dma_transfer_direction direction, unsigned long flags) + enum dma_transfer_direction direction, unsigned long flags, bool cyclic) { struct scatterlist *sg; struct shdma_desc *first = NULL, *new = NULL /* compiler... */; @@ -569,7 +578,11 @@ static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan, if (!new) goto err_get_desc; - new->chunks = chunks--; + new->cyclic = cyclic; + if (cyclic) + new->chunks = 1; + else + new->chunks = chunks--; list_add_tail(&new->node, &tx_list); } while (len); } @@ -612,7 +625,8 @@ static struct dma_async_tx_descriptor *shdma_prep_memcpy( sg_dma_address(&sg) = dma_src; sg_dma_len(&sg) = len; - return shdma_prep_sg(schan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM, flags); + return shdma_prep_sg(schan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM, + flags, false); } static struct dma_async_tx_descriptor *shdma_prep_slave_sg( @@ -640,7 +654,58 @@ static struct dma_async_tx_descriptor *shdma_prep_slave_sg( slave_addr = ops->slave_addr(schan); return shdma_prep_sg(schan, sgl, sg_len, &slave_addr, - direction, flags); + direction, flags, false); +} + +#define SHDMA_MAX_SG_LEN 32 + +static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic( + struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, + size_t period_len, enum dma_transfer_direction direction, + unsigned long flags, void *context) +{ + struct shdma_chan *schan = to_shdma_chan(chan); + struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); + const struct shdma_ops *ops = sdev->ops; + unsigned int sg_len = buf_len / period_len; + int slave_id = schan->slave_id; + dma_addr_t slave_addr; + struct scatterlist sgl[SHDMA_MAX_SG_LEN]; + int i; + + if (!chan) + return NULL; + + BUG_ON(!schan->desc_num); + + if (sg_len > SHDMA_MAX_SG_LEN) { + dev_err(schan->dev, "sg length %d exceds limit %d", + sg_len, SHDMA_MAX_SG_LEN); + return NULL; + } + + /* Someone calling slave DMA on a generic channel? */ + if (slave_id < 0 || (buf_len < period_len)) { + dev_warn(schan->dev, + "%s: bad parameter: buf_len=%zu, period_len=%zu, id=%d\n", + __func__, buf_len, period_len, slave_id); + return NULL; + } + + slave_addr = ops->slave_addr(schan); + + sg_init_table(sgl, sg_len); + for (i = 0; i < sg_len; i++) { + dma_addr_t src = buf_addr + (period_len * i); + + sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(src)), period_len, + offset_in_page(src)); + sg_dma_address(&sgl[i]) = src; + sg_dma_len(&sgl[i]) = period_len; + } + + return shdma_prep_sg(schan, sgl, sg_len, &slave_addr, + direction, flags, true); } static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, @@ -915,6 +980,7 @@ int shdma_init(struct device *dev, struct shdma_dev *sdev, /* Compulsory for DMA_SLAVE fields */ dma_dev->device_prep_slave_sg = shdma_prep_slave_sg; + dma_dev->device_prep_dma_cyclic = shdma_prep_dma_cyclic; dma_dev->device_control = shdma_control; dma_dev->dev = dev; |