summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/dmaengine.txt8
-rw-r--r--drivers/dma/dmaengine.c4
-rw-r--r--include/linux/dmaengine.h78
3 files changed, 85 insertions, 5 deletions
diff --git a/Documentation/dmaengine.txt b/Documentation/dmaengine.txt
index 94b7e0f96b38..bbe6cb3d1856 100644
--- a/Documentation/dmaengine.txt
+++ b/Documentation/dmaengine.txt
@@ -75,6 +75,10 @@ The slave DMA usage consists of following steps:
slave_sg - DMA a list of scatter gather buffers from/to a peripheral
dma_cyclic - Perform a cyclic DMA operation from/to a peripheral till the
operation is explicitly stopped.
+ interleaved_dma - This is common to Slave as well as M2M clients. For slave
+ address of devices' fifo could be already known to the driver.
+ Various types of operations could be expressed by setting
+ appropriate values to the 'dma_interleaved_template' members.
A non-NULL return of this transfer API represents a "descriptor" for
the given transaction.
@@ -89,6 +93,10 @@ The slave DMA usage consists of following steps:
struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
size_t period_len, enum dma_data_direction direction);
+ struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)(
+ struct dma_chan *chan, struct dma_interleaved_template *xt,
+ unsigned long flags);
+
The peripheral driver is expected to have mapped the scatterlist for
the DMA operation prior to calling device_prep_slave_sg, and must
keep the scatterlist mapped until the DMA operation has completed.
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index b48967b499da..a6c6051ec858 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -693,12 +693,12 @@ int dma_async_device_register(struct dma_device *device)
!device->device_prep_dma_interrupt);
BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
!device->device_prep_dma_sg);
- BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
- !device->device_prep_slave_sg);
BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
!device->device_prep_dma_cyclic);
BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
!device->device_control);
+ BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
+ !device->device_prep_interleaved_dma);
BUG_ON(!device->device_alloc_chan_resources);
BUG_ON(!device->device_free_chan_resources);
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index a865b3a354cd..5532bb8b500c 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -71,10 +71,10 @@ enum dma_transaction_type {
DMA_ASYNC_TX,
DMA_SLAVE,
DMA_CYCLIC,
-};
-
+ DMA_INTERLEAVE,
/* last transaction type for creation of the capabilities mask */
-#define DMA_TX_TYPE_END (DMA_CYCLIC + 1)
+ DMA_TX_TYPE_END,
+};
/**
* enum dma_transfer_direction - dma transfer mode and direction indicator
@@ -91,6 +91,74 @@ enum dma_transfer_direction {
};
/**
+ * Interleaved Transfer Request
+ * ----------------------------
+ * A chunk is collection of contiguous bytes to be transfered.
+ * The gap(in bytes) between two chunks is called inter-chunk-gap(ICG).
+ * ICGs may or maynot change between chunks.
+ * A FRAME is the smallest series of contiguous {chunk,icg} pairs,
+ * that when repeated an integral number of times, specifies the transfer.
+ * A transfer template is specification of a Frame, the number of times
+ * it is to be repeated and other per-transfer attributes.
+ *
+ * Practically, a client driver would have ready a template for each
+ * type of transfer it is going to need during its lifetime and
+ * set only 'src_start' and 'dst_start' before submitting the requests.
+ *
+ *
+ * | Frame-1 | Frame-2 | ~ | Frame-'numf' |
+ * |====....==.===...=...|====....==.===...=...| ~ |====....==.===...=...|
+ *
+ * == Chunk size
+ * ... ICG
+ */
+
+/**
+ * struct data_chunk - Element of scatter-gather list that makes a frame.
+ * @size: Number of bytes to read from source.
+ * size_dst := fn(op, size_src), so doesn't mean much for destination.
+ * @icg: Number of bytes to jump after last src/dst address of this
+ * chunk and before first src/dst address for next chunk.
+ * Ignored for dst(assumed 0), if dst_inc is true and dst_sgl is false.
+ * Ignored for src(assumed 0), if src_inc is true and src_sgl is false.
+ */
+struct data_chunk {
+ size_t size;
+ size_t icg;
+};
+
+/**
+ * struct dma_interleaved_template - Template to convey DMAC the transfer pattern
+ * and attributes.
+ * @src_start: Bus address of source for the first chunk.
+ * @dst_start: Bus address of destination for the first chunk.
+ * @dir: Specifies the type of Source and Destination.
+ * @src_inc: If the source address increments after reading from it.
+ * @dst_inc: If the destination address increments after writing to it.
+ * @src_sgl: If the 'icg' of sgl[] applies to Source (scattered read).
+ * Otherwise, source is read contiguously (icg ignored).
+ * Ignored if src_inc is false.
+ * @dst_sgl: If the 'icg' of sgl[] applies to Destination (scattered write).
+ * Otherwise, destination is filled contiguously (icg ignored).
+ * Ignored if dst_inc is false.
+ * @numf: Number of frames in this template.
+ * @frame_size: Number of chunks in a frame i.e, size of sgl[].
+ * @sgl: Array of {chunk,icg} pairs that make up a frame.
+ */
+struct dma_interleaved_template {
+ dma_addr_t src_start;
+ dma_addr_t dst_start;
+ enum dma_transfer_direction dir;
+ bool src_inc;
+ bool dst_inc;
+ bool src_sgl;
+ bool dst_sgl;
+ size_t numf;
+ size_t frame_size;
+ struct data_chunk sgl[0];
+};
+
+/**
* enum dma_ctrl_flags - DMA flags to augment operation preparation,
* control completion, and communicate status.
* @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of
@@ -445,6 +513,7 @@ struct dma_tx_state {
* @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio.
* The function takes a buffer of size buf_len. The callback function will
* be called after period_len bytes have been transferred.
+ * @device_prep_interleaved_dma: Transfer expression in a generic way.
* @device_control: manipulate all pending operations on a channel, returns
* zero or error code
* @device_tx_status: poll for transaction completion, the optional
@@ -509,6 +578,9 @@ struct dma_device {
struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)(
struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction);
+ struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)(
+ struct dma_chan *chan, struct dma_interleaved_template *xt,
+ unsigned long flags);
int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
unsigned long arg);
OpenPOWER on IntegriCloud