summaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/Kconfig30
-rw-r--r--drivers/dma/Makefile3
-rw-r--r--drivers/dma/acpi-dma.c7
-rw-r--r--drivers/dma/at_xdmac.c65
-rw-r--r--drivers/dma/dma-axi-dmac.c8
-rw-r--r--drivers/dma/dmaengine.c179
-rw-r--r--drivers/dma/dw/core.c49
-rw-r--r--drivers/dma/dw/pci.c4
-rw-r--r--drivers/dma/dw/platform.c7
-rw-r--r--drivers/dma/dw/regs.h2
-rw-r--r--drivers/dma/edma.c116
-rw-r--r--drivers/dma/ep93xx_dma.c28
-rw-r--r--drivers/dma/fsl-edma.c85
-rw-r--r--drivers/dma/fsldma.c2
-rw-r--r--drivers/dma/hsu/hsu.c17
-rw-r--r--drivers/dma/hsu/hsu.h1
-rw-r--r--drivers/dma/idma64.c25
-rw-r--r--drivers/dma/idma64.h5
-rw-r--r--drivers/dma/img-mdc-dma.c78
-rw-r--r--drivers/dma/ioat/dca.c2
-rw-r--r--drivers/dma/ioat/dma.c302
-rw-r--r--drivers/dma/ioat/dma.h57
-rw-r--r--drivers/dma/ioat/hw.h2
-rw-r--r--drivers/dma/ioat/init.c49
-rw-r--r--drivers/dma/ioat/prep.c2
-rw-r--r--drivers/dma/ioat/registers.h16
-rw-r--r--drivers/dma/iop-adma.c8
-rw-r--r--drivers/dma/mic_x100_dma.c2
-rw-r--r--drivers/dma/mv_xor.c99
-rw-r--r--drivers/dma/mv_xor.h2
-rw-r--r--drivers/dma/omap-dma.c90
-rw-r--r--drivers/dma/pl330.c101
-rw-r--r--drivers/dma/pxa_dma.c9
-rw-r--r--drivers/dma/qcom/Kconfig29
-rw-r--r--drivers/dma/qcom/Makefile3
-rw-r--r--drivers/dma/qcom/bam_dma.c (renamed from drivers/dma/qcom_bam_dma.c)37
-rw-r--r--drivers/dma/qcom/hidma.c706
-rw-r--r--drivers/dma/qcom/hidma.h160
-rw-r--r--drivers/dma/qcom/hidma_mgmt.c302
-rw-r--r--drivers/dma/qcom/hidma_mgmt.h39
-rw-r--r--drivers/dma/qcom/hidma_mgmt_sys.c295
-rw-r--r--drivers/dma/sh/Kconfig12
-rw-r--r--drivers/dma/sh/Makefile1
-rw-r--r--drivers/dma/sh/rcar-dmac.c2
-rw-r--r--drivers/dma/sh/rcar-hpbdma.c669
-rw-r--r--drivers/dma/sh/shdmac.c2
-rw-r--r--drivers/dma/sh/usb-dmac.c4
-rw-r--r--drivers/dma/sirf-dma.c10
-rw-r--r--drivers/dma/ste_dma40.c87
-rw-r--r--drivers/dma/stm32-dma.c1141
-rw-r--r--drivers/dma/sun4i-dma.c1
-rw-r--r--drivers/dma/tegra20-apb-dma.c120
-rw-r--r--drivers/dma/ti-dma-crossbar.c81
-rw-r--r--drivers/dma/virt-dma.c46
-rw-r--r--drivers/dma/virt-dma.h25
-rw-r--r--drivers/dma/xilinx/xilinx_vdma.c204
56 files changed, 3865 insertions, 1563 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index e6cd1a32025a..d96d87c56f2e 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -341,12 +341,13 @@ config MV_XOR
config MXS_DMA
bool "MXS DMA support"
- depends on SOC_IMX23 || SOC_IMX28 || SOC_IMX6Q
+ depends on SOC_IMX23 || SOC_IMX28 || SOC_IMX6Q || SOC_IMX6UL
select STMP_DEVICE
select DMA_ENGINE
help
Support the MXS DMA engine. This engine including APBH-DMA
- and APBX-DMA is integrated into Freescale i.MX23/28/MX6Q/MX6DL chips.
+ and APBX-DMA is integrated into Freescale
+ i.MX23/28/MX6Q/MX6DL/MX6UL chips.
config MX3_IPU
bool "MX3x Image Processing Unit support"
@@ -408,15 +409,6 @@ config PXA_DMA
16 to 32 channels for peripheral to memory or memory to memory
transfers.
-config QCOM_BAM_DMA
- tristate "QCOM BAM DMA support"
- depends on ARCH_QCOM || (COMPILE_TEST && OF && ARM)
- select DMA_ENGINE
- select DMA_VIRTUAL_CHANNELS
- ---help---
- Enable support for the QCOM BAM DMA controller. This controller
- provides DMA capabilities for a variety of on-chip devices.
-
config SIRF_DMA
tristate "CSR SiRFprimaII/SiRFmarco DMA support"
depends on ARCH_SIRF
@@ -431,8 +423,20 @@ config STE_DMA40
help
Support for ST-Ericsson DMA40 controller
+config STM32_DMA
+ bool "STMicroelectronics STM32 DMA support"
+ depends on ARCH_STM32
+ select DMA_ENGINE
+ select DMA_OF
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Enable support for the on-chip DMA controller on STMicroelectronics
+ STM32 MCUs.
+ If you have a board based on such a MCU and wish to use DMA say Y or M
+ here.
+
config S3C24XX_DMAC
- tristate "Samsung S3C24XX DMA support"
+ bool "Samsung S3C24XX DMA support"
depends on ARCH_S3C24XX
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
@@ -527,6 +531,8 @@ config ZX_DMA
# driver files
source "drivers/dma/bestcomm/Kconfig"
+source "drivers/dma/qcom/Kconfig"
+
source "drivers/dma/dw/Kconfig"
source "drivers/dma/hsu/Kconfig"
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index ef9c099bd2b6..6084127c1486 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -52,10 +52,10 @@ obj-$(CONFIG_PCH_DMA) += pch_dma.o
obj-$(CONFIG_PL330_DMA) += pl330.o
obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
obj-$(CONFIG_PXA_DMA) += pxa_dma.o
-obj-$(CONFIG_QCOM_BAM_DMA) += qcom_bam_dma.o
obj-$(CONFIG_RENESAS_DMA) += sh/
obj-$(CONFIG_SIRF_DMA) += sirf-dma.o
obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
+obj-$(CONFIG_STM32_DMA) += stm32-dma.o
obj-$(CONFIG_S3C24XX_DMAC) += s3c24xx-dma.o
obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o
@@ -66,4 +66,5 @@ obj-$(CONFIG_TI_EDMA) += edma.o
obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
obj-$(CONFIG_ZX_DMA) += zx296702_dma.o
+obj-y += qcom/
obj-y += xilinx/
diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c
index 16d0daa058a5..4a748c3435d7 100644
--- a/drivers/dma/acpi-dma.c
+++ b/drivers/dma/acpi-dma.c
@@ -15,6 +15,7 @@
#include <linux/device.h>
#include <linux/err.h>
#include <linux/module.h>
+#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/slab.h>
@@ -72,7 +73,9 @@ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp,
si = (const struct acpi_csrt_shared_info *)&grp[1];
/* Match device by MMIO and IRQ */
- if (si->mmio_base_low != mem || si->gsi_interrupt != irq)
+ if (si->mmio_base_low != lower_32_bits(mem) ||
+ si->mmio_base_high != upper_32_bits(mem) ||
+ si->gsi_interrupt != irq)
return 0;
dev_dbg(&adev->dev, "matches with %.4s%04X (rev %u)\n",
@@ -435,7 +438,7 @@ struct dma_chan *acpi_dma_request_slave_chan_by_name(struct device *dev,
return ERR_PTR(-ENODEV);
}
- dev_dbg(dev, "found DMA channel \"%s\" at index %d\n", name, index);
+ dev_dbg(dev, "Looking for DMA channel \"%s\" at index %d...\n", name, index);
return acpi_dma_request_slave_chan_by_index(dev, index);
}
EXPORT_SYMBOL_GPL(acpi_dma_request_slave_chan_by_name);
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 370c661c7d7b..8e304b1befc5 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -176,6 +176,7 @@
#define AT_XDMAC_MAX_CHAN 0x20
#define AT_XDMAC_MAX_CSIZE 16 /* 16 data */
#define AT_XDMAC_MAX_DWIDTH 8 /* 64 bits */
+#define AT_XDMAC_RESIDUE_MAX_RETRIES 5
#define AT_XDMAC_DMA_BUSWIDTHS\
(BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
@@ -863,8 +864,12 @@ at_xdmac_interleaved_queue_desc(struct dma_chan *chan,
* access. Hopefully we can access DDR through both ports (at least on
* SAMA5D4x), so we can use the same interface for source and dest,
* that solves the fact we don't know the direction.
+ * ERRATA: Even if useless for memory transfers, the PERID has to not
+ * match the one of another channel. If not, it could lead to spurious
+ * flag status.
*/
- u32 chan_cc = AT_XDMAC_CC_DIF(0)
+ u32 chan_cc = AT_XDMAC_CC_PERID(0x3f)
+ | AT_XDMAC_CC_DIF(0)
| AT_XDMAC_CC_SIF(0)
| AT_XDMAC_CC_MBSIZE_SIXTEEN
| AT_XDMAC_CC_TYPE_MEM_TRAN;
@@ -1041,8 +1046,12 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
* access DDR through both ports (at least on SAMA5D4x), so we can use
* the same interface for source and dest, that solves the fact we
* don't know the direction.
+ * ERRATA: Even if useless for memory transfers, the PERID has to not
+ * match the one of another channel. If not, it could lead to spurious
+ * flag status.
*/
- u32 chan_cc = AT_XDMAC_CC_DAM_INCREMENTED_AM
+ u32 chan_cc = AT_XDMAC_CC_PERID(0x3f)
+ | AT_XDMAC_CC_DAM_INCREMENTED_AM
| AT_XDMAC_CC_SAM_INCREMENTED_AM
| AT_XDMAC_CC_DIF(0)
| AT_XDMAC_CC_SIF(0)
@@ -1143,8 +1152,12 @@ static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan,
* access. Hopefully we can access DDR through both ports (at least on
* SAMA5D4x), so we can use the same interface for source and dest,
* that solves the fact we don't know the direction.
+ * ERRATA: Even if useless for memory transfers, the PERID has to not
+ * match the one of another channel. If not, it could lead to spurious
+ * flag status.
*/
- u32 chan_cc = AT_XDMAC_CC_DAM_UBS_AM
+ u32 chan_cc = AT_XDMAC_CC_PERID(0x3f)
+ | AT_XDMAC_CC_DAM_UBS_AM
| AT_XDMAC_CC_SAM_INCREMENTED_AM
| AT_XDMAC_CC_DIF(0)
| AT_XDMAC_CC_SIF(0)
@@ -1383,8 +1396,8 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
struct at_xdmac_desc *desc, *_desc;
struct list_head *descs_list;
enum dma_status ret;
- int residue;
- u32 cur_nda, mask, value;
+ int residue, retry;
+ u32 cur_nda, check_nda, cur_ubc, mask, value;
u8 dwidth = 0;
unsigned long flags;
@@ -1421,7 +1434,42 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
cpu_relax();
}
+ /*
+ * When processing the residue, we need to read two registers but we
+ * can't do it in an atomic way. AT_XDMAC_CNDA is used to find where
+ * we stand in the descriptor list and AT_XDMAC_CUBC is used
+ * to know how many data are remaining for the current descriptor.
+ * Since the dma channel is not paused to not loose data, between the
+ * AT_XDMAC_CNDA and AT_XDMAC_CUBC read, we may have change of
+ * descriptor.
+ * For that reason, after reading AT_XDMAC_CUBC, we check if we are
+ * still using the same descriptor by reading a second time
+ * AT_XDMAC_CNDA. If AT_XDMAC_CNDA has changed, it means we have to
+ * read again AT_XDMAC_CUBC.
+ * Memory barriers are used to ensure the read order of the registers.
+ * A max number of retries is set because unlikely it can never ends if
+ * we are transferring a lot of data with small buffers.
+ */
cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
+ rmb();
+ cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
+ for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
+ rmb();
+ check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
+
+ if (likely(cur_nda == check_nda))
+ break;
+
+ cur_nda = check_nda;
+ rmb();
+ cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
+ }
+
+ if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) {
+ ret = DMA_ERROR;
+ goto spin_unlock;
+ }
+
/*
* Remove size of all microblocks already transferred and the current
* one. Then add the remaining size to transfer of the current
@@ -1434,7 +1482,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda)
break;
}
- residue += at_xdmac_chan_read(atchan, AT_XDMAC_CUBC) << dwidth;
+ residue += cur_ubc << dwidth;
dma_set_residue(txstate, residue);
@@ -1688,6 +1736,7 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan)
list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node)
at_xdmac_remove_xfer(atchan, desc);
+ clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
spin_unlock_irqrestore(&atchan->lock, flags);
@@ -1820,6 +1869,8 @@ static int atmel_xdmac_resume(struct device *dev)
atchan = to_at_xdmac_chan(chan);
at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc);
if (at_xdmac_chan_is_cyclic(atchan)) {
+ if (at_xdmac_chan_is_paused(atchan))
+ at_xdmac_device_resume(chan);
at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda);
at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc);
at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim);
@@ -1998,8 +2049,6 @@ static int at_xdmac_remove(struct platform_device *pdev)
dma_async_device_unregister(&atxdmac->dma);
clk_disable_unprepare(atxdmac->clk);
- synchronize_irq(atxdmac->irq);
-
free_irq(atxdmac->irq, atxdmac->dma.dev);
for (i = 0; i < atxdmac->dma.chancnt; i++) {
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
index 5b2395e7e04d..c3468094393e 100644
--- a/drivers/dma/dma-axi-dmac.c
+++ b/drivers/dma/dma-axi-dmac.c
@@ -307,6 +307,13 @@ static int axi_dmac_terminate_all(struct dma_chan *c)
return 0;
}
+static void axi_dmac_synchronize(struct dma_chan *c)
+{
+ struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
+
+ vchan_synchronize(&chan->vchan);
+}
+
static void axi_dmac_issue_pending(struct dma_chan *c)
{
struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
@@ -613,6 +620,7 @@ static int axi_dmac_probe(struct platform_device *pdev)
dma_dev->device_prep_dma_cyclic = axi_dmac_prep_dma_cyclic;
dma_dev->device_prep_interleaved_dma = axi_dmac_prep_interleaved;
dma_dev->device_terminate_all = axi_dmac_terminate_all;
+ dma_dev->device_synchronize = axi_dmac_synchronize;
dma_dev->dev = &pdev->dev;
dma_dev->chancnt = 1;
dma_dev->src_addr_widths = BIT(dmac->chan.src_width);
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 3ecec1445adf..0cb259c59916 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -43,6 +43,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -265,8 +266,11 @@ static void dma_chan_put(struct dma_chan *chan)
module_put(dma_chan_to_owner(chan));
/* This channel is not in use anymore, free it */
- if (!chan->client_count && chan->device->device_free_chan_resources)
+ if (!chan->client_count && chan->device->device_free_chan_resources) {
+ /* Make sure all operations have completed */
+ dmaengine_synchronize(chan);
chan->device->device_free_chan_resources(chan);
+ }
/* If the channel is used via a DMA request router, free the mapping */
if (chan->router && chan->router->route_free) {
@@ -492,7 +496,9 @@ int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
caps->src_addr_widths = device->src_addr_widths;
caps->dst_addr_widths = device->dst_addr_widths;
caps->directions = device->directions;
+ caps->max_burst = device->max_burst;
caps->residue_granularity = device->residue_granularity;
+ caps->descriptor_reuse = device->descriptor_reuse;
/*
* Some devices implement only pause (e.g. to get residuum) but no
@@ -511,7 +517,7 @@ static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
{
struct dma_chan *chan;
- if (!__dma_device_satisfies_mask(dev, mask)) {
+ if (mask && !__dma_device_satisfies_mask(dev, mask)) {
pr_debug("%s: wrong capabilities\n", __func__);
return NULL;
}
@@ -542,6 +548,42 @@ static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
return NULL;
}
+static struct dma_chan *find_candidate(struct dma_device *device,
+ const dma_cap_mask_t *mask,
+ dma_filter_fn fn, void *fn_param)
+{
+ struct dma_chan *chan = private_candidate(mask, device, fn, fn_param);
+ int err;
+
+ if (chan) {
+ /* Found a suitable channel, try to grab, prep, and return it.
+ * We first set DMA_PRIVATE to disable balance_ref_count as this
+ * channel will not be published in the general-purpose
+ * allocator
+ */
+ dma_cap_set(DMA_PRIVATE, device->cap_mask);
+ device->privatecnt++;
+ err = dma_chan_get(chan);
+
+ if (err) {
+ if (err == -ENODEV) {
+ pr_debug("%s: %s module removed\n", __func__,
+ dma_chan_name(chan));
+ list_del_rcu(&device->global_node);
+ } else
+ pr_debug("%s: failed to get %s: (%d)\n",
+ __func__, dma_chan_name(chan), err);
+
+ if (--device->privatecnt == 0)
+ dma_cap_clear(DMA_PRIVATE, device->cap_mask);
+
+ chan = ERR_PTR(err);
+ }
+ }
+
+ return chan ? chan : ERR_PTR(-EPROBE_DEFER);
+}
+
/**
* dma_get_slave_channel - try to get specific channel exclusively
* @chan: target channel
@@ -580,7 +622,6 @@ struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
{
dma_cap_mask_t mask;
struct dma_chan *chan;
- int err;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
@@ -588,23 +629,11 @@ struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
/* lock against __dma_request_channel */
mutex_lock(&dma_list_mutex);
- chan = private_candidate(&mask, device, NULL, NULL);
- if (chan) {
- dma_cap_set(DMA_PRIVATE, device->cap_mask);
- device->privatecnt++;
- err = dma_chan_get(chan);
- if (err) {
- pr_debug("%s: failed to get %s: (%d)\n",
- __func__, dma_chan_name(chan), err);
- chan = NULL;
- if (--device->privatecnt == 0)
- dma_cap_clear(DMA_PRIVATE, device->cap_mask);
- }
- }
+ chan = find_candidate(device, &mask, NULL, NULL);
mutex_unlock(&dma_list_mutex);
- return chan;
+ return IS_ERR(chan) ? NULL : chan;
}
EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);
@@ -621,35 +650,15 @@ struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
{
struct dma_device *device, *_d;
struct dma_chan *chan = NULL;
- int err;
/* Find a channel */
mutex_lock(&dma_list_mutex);
list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
- chan = private_candidate(mask, device, fn, fn_param);
- if (chan) {
- /* Found a suitable channel, try to grab, prep, and
- * return it. We first set DMA_PRIVATE to disable
- * balance_ref_count as this channel will not be
- * published in the general-purpose allocator
- */
- dma_cap_set(DMA_PRIVATE, device->cap_mask);
- device->privatecnt++;
- err = dma_chan_get(chan);
+ chan = find_candidate(device, mask, fn, fn_param);
+ if (!IS_ERR(chan))
+ break;
- if (err == -ENODEV) {
- pr_debug("%s: %s module removed\n",
- __func__, dma_chan_name(chan));
- list_del_rcu(&device->global_node);
- } else if (err)
- pr_debug("%s: failed to get %s: (%d)\n",
- __func__, dma_chan_name(chan), err);
- else
- break;
- if (--device->privatecnt == 0)
- dma_cap_clear(DMA_PRIVATE, device->cap_mask);
- chan = NULL;
- }
+ chan = NULL;
}
mutex_unlock(&dma_list_mutex);
@@ -662,27 +671,73 @@ struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
}
EXPORT_SYMBOL_GPL(__dma_request_channel);
+static const struct dma_slave_map *dma_filter_match(struct dma_device *device,
+ const char *name,
+ struct device *dev)
+{
+ int i;
+
+ if (!device->filter.mapcnt)
+ return NULL;
+
+ for (i = 0; i < device->filter.mapcnt; i++) {
+ const struct dma_slave_map *map = &device->filter.map[i];
+
+ if (!strcmp(map->devname, dev_name(dev)) &&
+ !strcmp(map->slave, name))
+ return map;
+ }
+
+ return NULL;
+}
+
/**
- * dma_request_slave_channel_reason - try to allocate an exclusive slave channel
+ * dma_request_chan - try to allocate an exclusive slave channel
* @dev: pointer to client device structure
* @name: slave channel name
*
* Returns pointer to appropriate DMA channel on success or an error pointer.
*/
-struct dma_chan *dma_request_slave_channel_reason(struct device *dev,
- const char *name)
+struct dma_chan *dma_request_chan(struct device *dev, const char *name)
{
+ struct dma_device *d, *_d;
+ struct dma_chan *chan = NULL;
+
/* If device-tree is present get slave info from here */
if (dev->of_node)
- return of_dma_request_slave_channel(dev->of_node, name);
+ chan = of_dma_request_slave_channel(dev->of_node, name);
/* If device was enumerated by ACPI get slave info from here */
- if (ACPI_HANDLE(dev))
- return acpi_dma_request_slave_chan_by_name(dev, name);
+ if (has_acpi_companion(dev) && !chan)
+ chan = acpi_dma_request_slave_chan_by_name(dev, name);
+
+ if (chan) {
+ /* Valid channel found or requester need to be deferred */
+ if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
+ return chan;
+ }
- return ERR_PTR(-ENODEV);
+ /* Try to find the channel via the DMA filter map(s) */
+ mutex_lock(&dma_list_mutex);
+ list_for_each_entry_safe(d, _d, &dma_device_list, global_node) {
+ dma_cap_mask_t mask;
+ const struct dma_slave_map *map = dma_filter_match(d, name, dev);
+
+ if (!map)
+ continue;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ chan = find_candidate(d, &mask, d->filter.fn, map->param);
+ if (!IS_ERR(chan))
+ break;
+ }
+ mutex_unlock(&dma_list_mutex);
+
+ return chan ? chan : ERR_PTR(-EPROBE_DEFER);
}
-EXPORT_SYMBOL_GPL(dma_request_slave_channel_reason);
+EXPORT_SYMBOL_GPL(dma_request_chan);
/**
* dma_request_slave_channel - try to allocate an exclusive slave channel
@@ -694,17 +749,35 @@ EXPORT_SYMBOL_GPL(dma_request_slave_channel_reason);
struct dma_chan *dma_request_slave_channel(struct device *dev,
const char *name)
{
- struct dma_chan *ch = dma_request_slave_channel_reason(dev, name);
+ struct dma_chan *ch = dma_request_chan(dev, name);
if (IS_ERR(ch))
return NULL;
- dma_cap_set(DMA_PRIVATE, ch->device->cap_mask);
- ch->device->privatecnt++;
-
return ch;
}
EXPORT_SYMBOL_GPL(dma_request_slave_channel);
+/**
+ * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities
+ * @mask: capabilities that the channel must satisfy
+ *
+ * Returns pointer to appropriate DMA channel on success or an error pointer.
+ */
+struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask)
+{
+ struct dma_chan *chan;
+
+ if (!mask)
+ return ERR_PTR(-ENODEV);
+
+ chan = __dma_request_channel(mask, NULL, NULL);
+ if (!chan)
+ chan = ERR_PTR(-ENODEV);
+
+ return chan;
+}
+EXPORT_SYMBOL_GPL(dma_request_chan_by_mask);
+
void dma_release_channel(struct dma_chan *chan)
{
mutex_lock(&dma_list_mutex);
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 8b20930ade98..5ad0ec1f0e29 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -536,16 +536,17 @@ EXPORT_SYMBOL(dw_dma_get_dst_addr);
/* Called with dwc->lock held and all DMAC interrupts disabled */
static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
- u32 status_err, u32 status_xfer)
+ u32 status_block, u32 status_err, u32 status_xfer)
{
unsigned long flags;
- if (dwc->mask) {
+ if (status_block & dwc->mask) {
void (*callback)(void *param);
void *callback_param;
dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
channel_readl(dwc, LLP));
+ dma_writel(dw, CLEAR.BLOCK, dwc->mask);
callback = dwc->cdesc->period_callback;
callback_param = dwc->cdesc->period_callback_param;
@@ -577,6 +578,7 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
channel_writel(dwc, CTL_LO, 0);
channel_writel(dwc, CTL_HI, 0);
+ dma_writel(dw, CLEAR.BLOCK, dwc->mask);
dma_writel(dw, CLEAR.ERROR, dwc->mask);
dma_writel(dw, CLEAR.XFER, dwc->mask);
@@ -585,6 +587,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
spin_unlock_irqrestore(&dwc->lock, flags);
}
+
+ /* Re-enable interrupts */
+ channel_set_bit(dw, MASK.BLOCK, dwc->mask);
}
/* ------------------------------------------------------------------------- */
@@ -593,10 +598,12 @@ static void dw_dma_tasklet(unsigned long data)
{
struct dw_dma *dw = (struct dw_dma *)data;
struct dw_dma_chan *dwc;
+ u32 status_block;
u32 status_xfer;
u32 status_err;
int i;
+ status_block = dma_readl(dw, RAW.BLOCK);
status_xfer = dma_readl(dw, RAW.XFER);
status_err = dma_readl(dw, RAW.ERROR);
@@ -605,16 +612,15 @@ static void dw_dma_tasklet(unsigned long data)
for (i = 0; i < dw->dma.chancnt; i++) {
dwc = &dw->chan[i];
if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
- dwc_handle_cyclic(dw, dwc, status_err, status_xfer);
+ dwc_handle_cyclic(dw, dwc, status_block, status_err,
+ status_xfer);
else if (status_err & (1 << i))
dwc_handle_error(dw, dwc);
else if (status_xfer & (1 << i))
dwc_scan_descriptors(dw, dwc);
}
- /*
- * Re-enable interrupts.
- */
+ /* Re-enable interrupts */
channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
}
@@ -640,6 +646,7 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
* softirq handler.
*/
channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
+ channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
status = dma_readl(dw, STATUS_INT);
@@ -650,6 +657,7 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
/* Try to recover */
channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
+ channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1);
channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
@@ -1116,6 +1124,7 @@ static void dw_dma_off(struct dw_dma *dw)
dma_writel(dw, CFG, 0);
channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
+ channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
@@ -1221,6 +1230,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
/* Disable interrupts */
channel_clear_bit(dw, MASK.XFER, dwc->mask);
+ channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
channel_clear_bit(dw, MASK.ERROR, dwc->mask);
spin_unlock_irqrestore(&dwc->lock, flags);
@@ -1250,7 +1260,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
int dw_dma_cyclic_start(struct dma_chan *chan)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
- struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+ struct dw_dma *dw = to_dw_dma(chan->device);
unsigned long flags;
if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
@@ -1260,25 +1270,10 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
spin_lock_irqsave(&dwc->lock, flags);
- /* Assert channel is idle */
- if (dma_readl(dw, CH_EN) & dwc->mask) {
- dev_err(chan2dev(&dwc->chan),
- "%s: BUG: Attempted to start non-idle channel\n",
- __func__);
- dwc_dump_chan_regs(dwc);
- spin_unlock_irqrestore(&dwc->lock, flags);
- return -EBUSY;
- }
-
- dma_writel(dw, CLEAR.ERROR, dwc->mask);
- dma_writel(dw, CLEAR.XFER, dwc->mask);
+ /* Enable interrupts to perform cyclic transfer */
+ channel_set_bit(dw, MASK.BLOCK, dwc->mask);
- /* Setup DMAC channel registers */
- channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys);
- channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
- channel_writel(dwc, CTL_HI, 0);
-
- channel_set_bit(dw, CH_EN, dwc->mask);
+ dwc_dostart(dwc, dwc->cdesc->desc[0]);
spin_unlock_irqrestore(&dwc->lock, flags);
@@ -1484,6 +1479,7 @@ void dw_dma_cyclic_free(struct dma_chan *chan)
dwc_chan_disable(dw, dwc);
+ dma_writel(dw, CLEAR.BLOCK, dwc->mask);
dma_writel(dw, CLEAR.ERROR, dwc->mask);
dma_writel(dw, CLEAR.XFER, dwc->mask);
@@ -1572,9 +1568,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
/* Force dma off, just in case */
dw_dma_off(dw);
- /* Disable BLOCK interrupts as well */
- channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
-
/* Create a pool of consistent memory blocks for hardware descriptors */
dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev,
sizeof(struct dw_desc), 4, 0);
diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c
index 4c30fdd092b3..358f9689a3f5 100644
--- a/drivers/dma/dw/pci.c
+++ b/drivers/dma/dw/pci.c
@@ -108,6 +108,10 @@ static const struct pci_device_id dw_pci_id_table[] = {
/* Haswell */
{ PCI_VDEVICE(INTEL, 0x9c60) },
+
+ /* Broadwell */
+ { PCI_VDEVICE(INTEL, 0x9ce0) },
+
{ }
};
MODULE_DEVICE_TABLE(pci, dw_pci_id_table);
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index 127093a0c0e8..26edbe3a27ac 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -103,18 +103,21 @@ dw_dma_parse_dt(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct dw_dma_platform_data *pdata;
u32 tmp, arr[DW_DMA_MAX_NR_MASTERS];
+ u32 nr_channels;
if (!np) {
dev_err(&pdev->dev, "Missing DT data\n");
return NULL;
}
+ if (of_property_read_u32(np, "dma-channels", &nr_channels))
+ return NULL;
+
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return NULL;
- if (of_property_read_u32(np, "dma-channels", &pdata->nr_channels))
- return NULL;
+ pdata->nr_channels = nr_channels;
if (of_property_read_bool(np, "is_private"))
pdata->is_private = true;
diff --git a/drivers/dma/dw/regs.h b/drivers/dma/dw/regs.h
index 241ff2b1402b..0a50c18d85b8 100644
--- a/drivers/dma/dw/regs.h
+++ b/drivers/dma/dw/regs.h
@@ -150,7 +150,7 @@ enum dw_dma_msize {
#define DWC_CTLL_DST_INC (0<<7) /* DAR update/not */
#define DWC_CTLL_DST_DEC (1<<7)
#define DWC_CTLL_DST_FIX (2<<7)
-#define DWC_CTLL_SRC_INC (0<<7) /* SAR update/not */
+#define DWC_CTLL_SRC_INC (0<<9) /* SAR update/not */
#define DWC_CTLL_SRC_DEC (1<<9)
#define DWC_CTLL_SRC_FIX (2<<9)
#define DWC_CTLL_DST_MSIZE(n) ((n)<<11) /* burst, #elements */
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 16fe773fb846..ee3463e774f8 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -113,6 +113,9 @@
#define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */
#define CHMAP_EXIST BIT(24)
+/* CCSTAT register */
+#define EDMA_CCSTAT_ACTV BIT(4)
+
/*
* Max of 20 segments per channel to conserve PaRAM slots
* Also note that MAX_NR_SG should be atleast the no.of periods
@@ -484,7 +487,7 @@ static void edma_read_slot(struct edma_cc *ecc, unsigned slot,
*/
static int edma_alloc_slot(struct edma_cc *ecc, int slot)
{
- if (slot > 0) {
+ if (slot >= 0) {
slot = EDMA_CHAN_SLOT(slot);
/* Requesting entry paRAM slot for a HW triggered channel. */
if (ecc->chmap_exist && slot < ecc->num_channels)
@@ -866,6 +869,13 @@ static int edma_terminate_all(struct dma_chan *chan)
return 0;
}
+static void edma_synchronize(struct dma_chan *chan)
+{
+ struct edma_chan *echan = to_edma_chan(chan);
+
+ vchan_synchronize(&echan->vchan);
+}
+
static int edma_slave_config(struct dma_chan *chan,
struct dma_slave_config *cfg)
{
@@ -1362,36 +1372,36 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
static void edma_completion_handler(struct edma_chan *echan)
{
struct device *dev = echan->vchan.chan.device->dev;
- struct edma_desc *edesc = echan->edesc;
-
- if (!edesc)
- return;
+ struct edma_desc *edesc;
spin_lock(&echan->vchan.lock);
- if (edesc->cyclic) {
- vchan_cyclic_callback(&edesc->vdesc);
- spin_unlock(&echan->vchan.lock);
- return;
- } else if (edesc->processed == edesc->pset_nr) {
- edesc->residue = 0;
- edma_stop(echan);
- vchan_cookie_complete(&edesc->vdesc);
- echan->edesc = NULL;
-
- dev_dbg(dev, "Transfer completed on channel %d\n",
- echan->ch_num);
- } else {
- dev_dbg(dev, "Sub transfer completed on channel %d\n",
- echan->ch_num);
-
- edma_pause(echan);
-
- /* Update statistics for tx_status */
- edesc->residue -= edesc->sg_len;
- edesc->residue_stat = edesc->residue;
- edesc->processed_stat = edesc->processed;
+ edesc = echan->edesc;
+ if (edesc) {
+ if (edesc->cyclic) {
+ vchan_cyclic_callback(&edesc->vdesc);
+ spin_unlock(&echan->vchan.lock);
+ return;
+ } else if (edesc->processed == edesc->pset_nr) {
+ edesc->residue = 0;
+ edma_stop(echan);
+ vchan_cookie_complete(&edesc->vdesc);
+ echan->edesc = NULL;
+
+ dev_dbg(dev, "Transfer completed on channel %d\n",
+ echan->ch_num);
+ } else {
+ dev_dbg(dev, "Sub transfer completed on channel %d\n",
+ echan->ch_num);
+
+ edma_pause(echan);
+
+ /* Update statistics for tx_status */
+ edesc->residue -= edesc->sg_len;
+ edesc->residue_stat = edesc->residue;
+ edesc->processed_stat = edesc->processed;
+ }
+ edma_execute(echan);
}
- edma_execute(echan);
spin_unlock(&echan->vchan.lock);
}
@@ -1680,9 +1690,20 @@ static void edma_issue_pending(struct dma_chan *chan)
spin_unlock_irqrestore(&echan->vchan.lock, flags);
}
+/*
+ * This limit exists to avoid a possible infinite loop when waiting for proof
+ * that a particular transfer is completed. This limit can be hit if there
+ * are large bursts to/from slow devices or the CPU is never able to catch
+ * the DMA hardware idle. On an AM335x transfering 48 bytes from the UART
+ * RX-FIFO, as many as 55 loops have been seen.
+ */
+#define EDMA_MAX_TR_WAIT_LOOPS 1000
+
static u32 edma_residue(struct edma_desc *edesc)
{
bool dst = edesc->direction == DMA_DEV_TO_MEM;
+ int loop_count = EDMA_MAX_TR_WAIT_LOOPS;
+ struct edma_chan *echan = edesc->echan;
struct edma_pset *pset = edesc->pset;
dma_addr_t done, pos;
int i;
@@ -1691,7 +1712,32 @@ static u32 edma_residue(struct edma_desc *edesc)
* We always read the dst/src position from the first RamPar
* pset. That's the one which is active now.
*/
- pos = edma_get_position(edesc->echan->ecc, edesc->echan->slot[0], dst);
+ pos = edma_get_position(echan->ecc, echan->slot[0], dst);
+
+ /*
+ * "pos" may represent a transfer request that is still being
+ * processed by the EDMACC or EDMATC. We will busy wait until
+ * any one of the situations occurs:
+ * 1. the DMA hardware is idle
+ * 2. a new transfer request is setup
+ * 3. we hit the loop limit
+ */
+ while (edma_read(echan->ecc, EDMA_CCSTAT) & EDMA_CCSTAT_ACTV) {
+ /* check if a new transfer request is setup */
+ if (edma_get_position(echan->ecc,
+ echan->slot[0], dst) != pos) {
+ break;
+ }
+
+ if (!--loop_count) {
+ dev_dbg_ratelimited(echan->vchan.chan.device->dev,
+ "%s: timeout waiting for PaRAM update\n",
+ __func__);
+ break;
+ }
+
+ cpu_relax();
+ }
/*
* Cyclic is simple. Just subtract pset[0].addr from pos.
@@ -1798,6 +1844,7 @@ static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode)
s_ddev->device_pause = edma_dma_pause;
s_ddev->device_resume = edma_dma_resume;
s_ddev->device_terminate_all = edma_terminate_all;
+ s_ddev->device_synchronize = edma_synchronize;
s_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS;
s_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS;
@@ -1823,6 +1870,7 @@ static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode)
m_ddev->device_pause = edma_dma_pause;
m_ddev->device_resume = edma_dma_resume;
m_ddev->device_terminate_all = edma_terminate_all;
+ m_ddev->device_synchronize = edma_synchronize;
m_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS;
m_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS;
@@ -2314,6 +2362,10 @@ static int edma_probe(struct platform_device *pdev)
edma_set_chmap(&ecc->slave_chans[i], ecc->dummy_slot);
}
+ ecc->dma_slave.filter.map = info->slave_map;
+ ecc->dma_slave.filter.mapcnt = info->slavecnt;
+ ecc->dma_slave.filter.fn = edma_filter_fn;
+
ret = dma_async_device_register(&ecc->dma_slave);
if (ret) {
dev_err(dev, "slave ddev registration failed (%d)\n", ret);
@@ -2421,7 +2473,13 @@ static struct platform_driver edma_driver = {
},
};
+static int edma_tptc_probe(struct platform_device *pdev)
+{
+ return 0;
+}
+
static struct platform_driver edma_tptc_driver = {
+ .probe = edma_tptc_probe,
.driver = {
.name = "edma3-tptc",
.of_match_table = edma_tptc_of_ids,
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index 57ff46284f15..21f08cc3352b 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -421,23 +421,25 @@ static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac)
desc->size);
}
- switch (irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)) {
- case M2P_INTERRUPT_STALL:
- /* Disable interrupts */
- control = readl(edmac->regs + M2P_CONTROL);
- control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
- m2p_set_control(edmac, control);
-
- return INTERRUPT_DONE;
-
- case M2P_INTERRUPT_NFB:
- if (ep93xx_dma_advance_active(edmac))
- m2p_fill_desc(edmac);
+ /*
+ * Even latest E2 silicon revision sometimes assert STALL interrupt
+ * instead of NFB. Therefore we treat them equally, basing on the
+ * amount of data we still have to transfer.
+ */
+ if (!(irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)))
+ return INTERRUPT_UNKNOWN;
+ if (ep93xx_dma_advance_active(edmac)) {
+ m2p_fill_desc(edmac);
return INTERRUPT_NEXT_BUFFER;
}
- return INTERRUPT_UNKNOWN;
+ /* Disable interrupts */
+ control = readl(edmac->regs + M2P_CONTROL);
+ control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
+ m2p_set_control(edmac, control);
+
+ return INTERRUPT_DONE;
}
/*
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c
index 915eec3cc279..be2e62b87948 100644
--- a/drivers/dma/fsl-edma.c
+++ b/drivers/dma/fsl-edma.c
@@ -116,6 +116,10 @@
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)
+enum fsl_edma_pm_state {
+ RUNNING = 0,
+ SUSPENDED,
+};
struct fsl_edma_hw_tcd {
__le32 saddr;
@@ -147,6 +151,9 @@ struct fsl_edma_slave_config {
struct fsl_edma_chan {
struct virt_dma_chan vchan;
enum dma_status status;
+ enum fsl_edma_pm_state pm_state;
+ bool idle;
+ u32 slave_id;
struct fsl_edma_engine *edma;
struct fsl_edma_desc *edesc;
struct fsl_edma_slave_config fsc;
@@ -298,6 +305,7 @@ static int fsl_edma_terminate_all(struct dma_chan *chan)
spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
fsl_edma_disable_request(fsl_chan);
fsl_chan->edesc = NULL;
+ fsl_chan->idle = true;
vchan_get_all_descriptors(&fsl_chan->vchan, &head);
spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
@@ -313,6 +321,7 @@ static int fsl_edma_pause(struct dma_chan *chan)
if (fsl_chan->edesc) {
fsl_edma_disable_request(fsl_chan);
fsl_chan->status = DMA_PAUSED;
+ fsl_chan->idle = true;
}
spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
return 0;
@@ -327,6 +336,7 @@ static int fsl_edma_resume(struct dma_chan *chan)
if (fsl_chan->edesc) {
fsl_edma_enable_request(fsl_chan);
fsl_chan->status = DMA_IN_PROGRESS;
+ fsl_chan->idle = false;
}
spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
return 0;
@@ -648,6 +658,7 @@ static void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
fsl_edma_set_tcd_regs(fsl_chan, fsl_chan->edesc->tcd[0].vtcd);
fsl_edma_enable_request(fsl_chan);
fsl_chan->status = DMA_IN_PROGRESS;
+ fsl_chan->idle = false;
}
static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id)
@@ -676,6 +687,7 @@ static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id)
vchan_cookie_complete(&fsl_chan->edesc->vdesc);
fsl_chan->edesc = NULL;
fsl_chan->status = DMA_COMPLETE;
+ fsl_chan->idle = true;
} else {
vchan_cyclic_callback(&fsl_chan->edesc->vdesc);
}
@@ -704,6 +716,7 @@ static irqreturn_t fsl_edma_err_handler(int irq, void *dev_id)
edma_writeb(fsl_edma, EDMA_CERR_CERR(ch),
fsl_edma->membase + EDMA_CERR);
fsl_edma->chans[ch].status = DMA_ERROR;
+ fsl_edma->chans[ch].idle = true;
}
}
return IRQ_HANDLED;
@@ -724,6 +737,12 @@ static void fsl_edma_issue_pending(struct dma_chan *chan)
spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+ if (unlikely(fsl_chan->pm_state != RUNNING)) {
+ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+ /* cannot submit due to suspend */
+ return;
+ }
+
if (vchan_issue_pending(&fsl_chan->vchan) && !fsl_chan->edesc)
fsl_edma_xfer_desc(fsl_chan);
@@ -735,6 +754,7 @@ static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec,
{
struct fsl_edma_engine *fsl_edma = ofdma->of_dma_data;
struct dma_chan *chan, *_chan;
+ struct fsl_edma_chan *fsl_chan;
unsigned long chans_per_mux = fsl_edma->n_chans / DMAMUX_NR;
if (dma_spec->args_count != 2)
@@ -748,8 +768,10 @@ static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec,
chan = dma_get_slave_channel(chan);
if (chan) {
chan->device->privatecnt++;
- fsl_edma_chan_mux(to_fsl_edma_chan(chan),
- dma_spec->args[1], true);
+ fsl_chan = to_fsl_edma_chan(chan);
+ fsl_chan->slave_id = dma_spec->args[1];
+ fsl_edma_chan_mux(fsl_chan, fsl_chan->slave_id,
+ true);
mutex_unlock(&fsl_edma->fsl_edma_mutex);
return chan;
}
@@ -888,7 +910,9 @@ static int fsl_edma_probe(struct platform_device *pdev)
struct fsl_edma_chan *fsl_chan = &fsl_edma->chans[i];
fsl_chan->edma = fsl_edma;
-
+ fsl_chan->pm_state = RUNNING;
+ fsl_chan->slave_id = 0;
+ fsl_chan->idle = true;
fsl_chan->vchan.desc_free = fsl_edma_free_desc;
vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev);
@@ -959,6 +983,60 @@ static int fsl_edma_remove(struct platform_device *pdev)
return 0;
}
+static int fsl_edma_suspend_late(struct device *dev)
+{
+ struct fsl_edma_engine *fsl_edma = dev_get_drvdata(dev);
+ struct fsl_edma_chan *fsl_chan;
+ unsigned long flags;
+ int i;
+
+ for (i = 0; i < fsl_edma->n_chans; i++) {
+ fsl_chan = &fsl_edma->chans[i];
+ spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+ /* Make sure chan is idle or will force disable. */
+ if (unlikely(!fsl_chan->idle)) {
+ dev_warn(dev, "WARN: There is non-idle channel.");
+ fsl_edma_disable_request(fsl_chan);
+ fsl_edma_chan_mux(fsl_chan, 0, false);
+ }
+
+ fsl_chan->pm_state = SUSPENDED;
+ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+ }
+
+ return 0;
+}
+
+static int fsl_edma_resume_early(struct device *dev)
+{
+ struct fsl_edma_engine *fsl_edma = dev_get_drvdata(dev);
+ struct fsl_edma_chan *fsl_chan;
+ int i;
+
+ for (i = 0; i < fsl_edma->n_chans; i++) {
+ fsl_chan = &fsl_edma->chans[i];
+ fsl_chan->pm_state = RUNNING;
+ edma_writew(fsl_edma, 0x0, fsl_edma->membase + EDMA_TCD_CSR(i));
+ if (fsl_chan->slave_id != 0)
+ fsl_edma_chan_mux(fsl_chan, fsl_chan->slave_id, true);
+ }
+
+ edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA,
+ fsl_edma->membase + EDMA_CR);
+
+ return 0;
+}
+
+/*
+ * eDMA provides the service to others, so it should be suspend late
+ * and resume early. When eDMA suspend, all of the clients should stop
+ * the DMA data transmission and let the channel idle.
+ */
+static const struct dev_pm_ops fsl_edma_pm_ops = {
+ .suspend_late = fsl_edma_suspend_late,
+ .resume_early = fsl_edma_resume_early,
+};
+
static const struct of_device_id fsl_edma_dt_ids[] = {
{ .compatible = "fsl,vf610-edma", },
{ /* sentinel */ }
@@ -969,6 +1047,7 @@ static struct platform_driver fsl_edma_driver = {
.driver = {
.name = "fsl-edma",
.of_match_table = fsl_edma_dt_ids,
+ .pm = &fsl_edma_pm_ops,
},
.probe = fsl_edma_probe,
.remove = fsl_edma_remove,
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 2209f75fdf05..aac85c30c2cf 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -522,6 +522,8 @@ static dma_cookie_t fsldma_run_tx_complete_actions(struct fsldma_chan *chan,
chan_dbg(chan, "LD %p callback\n", desc);
txd->callback(txd->callback_param);
}
+
+ dma_descriptor_unmap(txd);
}
/* Run any dependencies */
diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
index 823ad728aecf..eef145edb936 100644
--- a/drivers/dma/hsu/hsu.c
+++ b/drivers/dma/hsu/hsu.c
@@ -228,6 +228,8 @@ static struct dma_async_tx_descriptor *hsu_dma_prep_slave_sg(
for_each_sg(sgl, sg, sg_len, i) {
desc->sg[i].addr = sg_dma_address(sg);
desc->sg[i].len = sg_dma_len(sg);
+
+ desc->length += sg_dma_len(sg);
}
desc->nents = sg_len;
@@ -249,21 +251,10 @@ static void hsu_dma_issue_pending(struct dma_chan *chan)
spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
}
-static size_t hsu_dma_desc_size(struct hsu_dma_desc *desc)
-{
- size_t bytes = 0;
- unsigned int i;
-
- for (i = desc->active; i < desc->nents; i++)
- bytes += desc->sg[i].len;
-
- return bytes;
-}
-
static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc)
{
struct hsu_dma_desc *desc = hsuc->desc;
- size_t bytes = hsu_dma_desc_size(desc);
+ size_t bytes = desc->length;
int i;
i = desc->active % HSU_DMA_CHAN_NR_DESC;
@@ -294,7 +285,7 @@ static enum dma_status hsu_dma_tx_status(struct dma_chan *chan,
dma_set_residue(state, bytes);
status = hsuc->desc->status;
} else if (vdesc) {
- bytes = hsu_dma_desc_size(to_hsu_dma_desc(vdesc));
+ bytes = to_hsu_dma_desc(vdesc)->length;
dma_set_residue(state, bytes);
}
spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
diff --git a/drivers/dma/hsu/hsu.h b/drivers/dma/hsu/hsu.h
index f06579c6d548..578a8ee8cd05 100644
--- a/drivers/dma/hsu/hsu.h
+++ b/drivers/dma/hsu/hsu.h
@@ -65,6 +65,7 @@ struct hsu_dma_desc {
enum dma_transfer_direction direction;
struct hsu_dma_sg *sg;
unsigned int nents;
+ size_t length;
unsigned int active;
enum dma_status status;
};
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
index 7d56b47e4fcf..1953e57505f4 100644
--- a/drivers/dma/idma64.c
+++ b/drivers/dma/idma64.c
@@ -178,20 +178,12 @@ static irqreturn_t idma64_irq(int irq, void *dev)
if (!status)
return IRQ_NONE;
- /* Disable interrupts */
- channel_clear_bit(idma64, MASK(XFER), idma64->all_chan_mask);
- channel_clear_bit(idma64, MASK(ERROR), idma64->all_chan_mask);
-
status_xfer = dma_readl(idma64, RAW(XFER));
status_err = dma_readl(idma64, RAW(ERROR));
for (i = 0; i < idma64->dma.chancnt; i++)
idma64_chan_irq(idma64, i, status_err, status_xfer);
- /* Re-enable interrupts */
- channel_set_bit(idma64, MASK(XFER), idma64->all_chan_mask);
- channel_set_bit(idma64, MASK(ERROR), idma64->all_chan_mask);
-
return IRQ_HANDLED;
}
@@ -239,7 +231,7 @@ static void idma64_vdesc_free(struct virt_dma_desc *vdesc)
idma64_desc_free(idma64c, to_idma64_desc(vdesc));
}
-static u64 idma64_hw_desc_fill(struct idma64_hw_desc *hw,
+static void idma64_hw_desc_fill(struct idma64_hw_desc *hw,
struct dma_slave_config *config,
enum dma_transfer_direction direction, u64 llp)
{
@@ -276,27 +268,30 @@ static u64 idma64_hw_desc_fill(struct idma64_hw_desc *hw,
IDMA64C_CTLL_SRC_WIDTH(src_width);
lli->llp = llp;
- return hw->llp;
}
static void idma64_desc_fill(struct idma64_chan *idma64c,
struct idma64_desc *desc)
{
struct dma_slave_config *config = &idma64c->config;
- struct idma64_hw_desc *hw = &desc->hw[desc->ndesc - 1];
+ unsigned int i = desc->ndesc;
+ struct idma64_hw_desc *hw = &desc->hw[i - 1];
struct idma64_lli *lli = hw->lli;
u64 llp = 0;
- unsigned int i = desc->ndesc;
/* Fill the hardware descriptors and link them to a list */
do {
hw = &desc->hw[--i];
- llp = idma64_hw_desc_fill(hw, config, desc->direction, llp);
+ idma64_hw_desc_fill(hw, config, desc->direction, llp);
+ llp = hw->llp;
desc->length += hw->len;
} while (i);
- /* Trigger interrupt after last block */
+ /* Trigger an interrupt after the last block is transfered */
lli->ctllo |= IDMA64C_CTLL_INT_EN;
+
+ /* Disable LLP transfer in the last block */
+ lli->ctllo &= ~(IDMA64C_CTLL_LLP_S_EN | IDMA64C_CTLL_LLP_D_EN);
}
static struct dma_async_tx_descriptor *idma64_prep_slave_sg(
@@ -596,6 +591,8 @@ static int idma64_probe(struct idma64_chip *chip)
idma64->dma.dev = chip->dev;
+ dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK);
+
ret = dma_async_device_register(&idma64->dma);
if (ret)
return ret;
diff --git a/drivers/dma/idma64.h b/drivers/dma/idma64.h
index f6aeff0af8a5..dc6874424188 100644
--- a/drivers/dma/idma64.h
+++ b/drivers/dma/idma64.h
@@ -54,7 +54,8 @@
#define IDMA64C_CTLL_LLP_S_EN (1 << 28) /* src block chain */
/* Bitfields in CTL_HI */
-#define IDMA64C_CTLH_BLOCK_TS(x) ((x) & ((1 << 17) - 1))
+#define IDMA64C_CTLH_BLOCK_TS_MASK ((1 << 17) - 1)
+#define IDMA64C_CTLH_BLOCK_TS(x) ((x) & IDMA64C_CTLH_BLOCK_TS_MASK)
#define IDMA64C_CTLH_DONE (1 << 17)
/* Bitfields in CFG_LO */
@@ -70,7 +71,7 @@
#define IDMA64C_CFGH_SRC_PER(x) ((x) << 0) /* src peripheral */
#define IDMA64C_CFGH_DST_PER(x) ((x) << 4) /* dst peripheral */
#define IDMA64C_CFGH_RD_ISSUE_THD(x) ((x) << 8)
-#define IDMA64C_CFGH_RW_ISSUE_THD(x) ((x) << 18)
+#define IDMA64C_CFGH_WR_ISSUE_THD(x) ((x) << 18)
/* Interrupt registers */
diff --git a/drivers/dma/img-mdc-dma.c b/drivers/dma/img-mdc-dma.c
index 9ca56830cc63..a4c53be482cf 100644
--- a/drivers/dma/img-mdc-dma.c
+++ b/drivers/dma/img-mdc-dma.c
@@ -651,6 +651,48 @@ static enum dma_status mdc_tx_status(struct dma_chan *chan,
return ret;
}
+static unsigned int mdc_get_new_events(struct mdc_chan *mchan)
+{
+ u32 val, processed, done1, done2;
+ unsigned int ret;
+
+ val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED);
+ processed = (val >> MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) &
+ MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK;
+ /*
+ * CMDS_DONE may have incremented between reading CMDS_PROCESSED
+ * and clearing INT_ACTIVE. Re-read CMDS_PROCESSED to ensure we
+ * didn't miss a command completion.
+ */
+ do {
+ val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED);
+
+ done1 = (val >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) &
+ MDC_CMDS_PROCESSED_CMDS_DONE_MASK;
+
+ val &= ~((MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK <<
+ MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) |
+ MDC_CMDS_PROCESSED_INT_ACTIVE);
+
+ val |= done1 << MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT;
+
+ mdc_chan_writel(mchan, val, MDC_CMDS_PROCESSED);
+
+ val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED);
+
+ done2 = (val >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) &
+ MDC_CMDS_PROCESSED_CMDS_DONE_MASK;
+ } while (done1 != done2);
+
+ if (done1 >= processed)
+ ret = done1 - processed;
+ else
+ ret = ((MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK + 1) -
+ processed) + done1;
+
+ return ret;
+}
+
static int mdc_terminate_all(struct dma_chan *chan)
{
struct mdc_chan *mchan = to_mdc_chan(chan);
@@ -667,6 +709,8 @@ static int mdc_terminate_all(struct dma_chan *chan)
mchan->desc = NULL;
vchan_get_all_descriptors(&mchan->vc, &head);
+ mdc_get_new_events(mchan);
+
spin_unlock_irqrestore(&mchan->vc.lock, flags);
if (mdesc)
@@ -703,35 +747,17 @@ static irqreturn_t mdc_chan_irq(int irq, void *dev_id)
{
struct mdc_chan *mchan = (struct mdc_chan *)dev_id;
struct mdc_tx_desc *mdesc;
- u32 val, processed, done1, done2;
- unsigned int i;
+ unsigned int i, new_events;
spin_lock(&mchan->vc.lock);
- val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED);
- processed = (val >> MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) &
- MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK;
- /*
- * CMDS_DONE may have incremented between reading CMDS_PROCESSED
- * and clearing INT_ACTIVE. Re-read CMDS_PROCESSED to ensure we
- * didn't miss a command completion.
- */
- do {
- val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED);
- done1 = (val >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) &
- MDC_CMDS_PROCESSED_CMDS_DONE_MASK;
- val &= ~((MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK <<
- MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) |
- MDC_CMDS_PROCESSED_INT_ACTIVE);
- val |= done1 << MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT;
- mdc_chan_writel(mchan, val, MDC_CMDS_PROCESSED);
- val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED);
- done2 = (val >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) &
- MDC_CMDS_PROCESSED_CMDS_DONE_MASK;
- } while (done1 != done2);
-
dev_dbg(mdma2dev(mchan->mdma), "IRQ on channel %d\n", mchan->chan_nr);
+ new_events = mdc_get_new_events(mchan);
+
+ if (!new_events)
+ goto out;
+
mdesc = mchan->desc;
if (!mdesc) {
dev_warn(mdma2dev(mchan->mdma),
@@ -740,8 +766,7 @@ static irqreturn_t mdc_chan_irq(int irq, void *dev_id)
goto out;
}
- for (i = processed; i != done1;
- i = (i + 1) % (MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK + 1)) {
+ for (i = 0; i < new_events; i++) {
/*
* The first interrupt in a transfer indicates that the
* command list has been loaded, not that a command has
@@ -979,7 +1004,6 @@ static int mdc_dma_remove(struct platform_device *pdev)
vc.chan.device_node) {
list_del(&mchan->vc.chan.device_node);
- synchronize_irq(mchan->irq);
devm_free_irq(&pdev->dev, mchan->irq, mchan);
tasklet_kill(&mchan->vc.task);
diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c
index 2cb7c308d5c7..0b9b6b07db9e 100644
--- a/drivers/dma/ioat/dca.c
+++ b/drivers/dma/ioat/dca.c
@@ -224,7 +224,7 @@ static u8 ioat_dca_get_tag(struct dca_provider *dca,
return tag;
}
-static struct dca_ops ioat_dca_ops = {
+static const struct dca_ops ioat_dca_ops = {
.add_requester = ioat_dca_add_requester,
.remove_requester = ioat_dca_remove_requester,
.get_tag = ioat_dca_get_tag,
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 1d5df2ef148b..bd09961443b1 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -31,6 +31,7 @@
#include <linux/dma-mapping.h>
#include <linux/workqueue.h>
#include <linux/prefetch.h>
+#include <linux/sizes.h>
#include "dma.h"
#include "registers.h"
#include "hw.h"
@@ -290,24 +291,30 @@ static dma_cookie_t ioat_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
}
static struct ioat_ring_ent *
-ioat_alloc_ring_ent(struct dma_chan *chan, gfp_t flags)
+ioat_alloc_ring_ent(struct dma_chan *chan, int idx, gfp_t flags)
{
struct ioat_dma_descriptor *hw;
struct ioat_ring_ent *desc;
struct ioatdma_device *ioat_dma;
+ struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
+ int chunk;
dma_addr_t phys;
+ u8 *pos;
+ off_t offs;
ioat_dma = to_ioatdma_device(chan->device);
- hw = pci_pool_alloc(ioat_dma->dma_pool, flags, &phys);
- if (!hw)
- return NULL;
+
+ chunk = idx / IOAT_DESCS_PER_2M;
+ idx &= (IOAT_DESCS_PER_2M - 1);
+ offs = idx * IOAT_DESC_SZ;
+ pos = (u8 *)ioat_chan->descs[chunk].virt + offs;
+ phys = ioat_chan->descs[chunk].hw + offs;
+ hw = (struct ioat_dma_descriptor *)pos;
memset(hw, 0, sizeof(*hw));
desc = kmem_cache_zalloc(ioat_cache, flags);
- if (!desc) {
- pci_pool_free(ioat_dma->dma_pool, hw, phys);
+ if (!desc)
return NULL;
- }
dma_async_tx_descriptor_init(&desc->txd, chan);
desc->txd.tx_submit = ioat_tx_submit_unlock;
@@ -318,32 +325,63 @@ ioat_alloc_ring_ent(struct dma_chan *chan, gfp_t flags)
void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan)
{
- struct ioatdma_device *ioat_dma;
-
- ioat_dma = to_ioatdma_device(chan->device);
- pci_pool_free(ioat_dma->dma_pool, desc->hw, desc->txd.phys);
kmem_cache_free(ioat_cache, desc);
}
struct ioat_ring_ent **
ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
{
+ struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
struct ioat_ring_ent **ring;
- int descs = 1 << order;
- int i;
-
- if (order > ioat_get_max_alloc_order())
- return NULL;
+ int total_descs = 1 << order;
+ int i, chunks;
/* allocate the array to hold the software ring */
- ring = kcalloc(descs, sizeof(*ring), flags);
+ ring = kcalloc(total_descs, sizeof(*ring), flags);
if (!ring)
return NULL;
- for (i = 0; i < descs; i++) {
- ring[i] = ioat_alloc_ring_ent(c, flags);
+
+ ioat_chan->desc_chunks = chunks = (total_descs * IOAT_DESC_SZ) / SZ_2M;
+
+ for (i = 0; i < chunks; i++) {
+ struct ioat_descs *descs = &ioat_chan->descs[i];
+
+ descs->virt = dma_alloc_coherent(to_dev(ioat_chan),
+ SZ_2M, &descs->hw, flags);
+ if (!descs->virt && (i > 0)) {
+ int idx;
+
+ for (idx = 0; idx < i; idx++) {
+ dma_free_coherent(to_dev(ioat_chan), SZ_2M,
+ descs->virt, descs->hw);
+ descs->virt = NULL;
+ descs->hw = 0;
+ }
+
+ ioat_chan->desc_chunks = 0;
+ kfree(ring);
+ return NULL;
+ }
+ }
+
+ for (i = 0; i < total_descs; i++) {
+ ring[i] = ioat_alloc_ring_ent(c, i, flags);
if (!ring[i]) {
+ int idx;
+
while (i--)
ioat_free_ring_ent(ring[i], c);
+
+ for (idx = 0; idx < ioat_chan->desc_chunks; idx++) {
+ dma_free_coherent(to_dev(ioat_chan),
+ SZ_2M,
+ ioat_chan->descs[idx].virt,
+ ioat_chan->descs[idx].hw);
+ ioat_chan->descs[idx].virt = NULL;
+ ioat_chan->descs[idx].hw = 0;
+ }
+
+ ioat_chan->desc_chunks = 0;
kfree(ring);
return NULL;
}
@@ -351,7 +389,7 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
}
/* link descs */
- for (i = 0; i < descs-1; i++) {
+ for (i = 0; i < total_descs-1; i++) {
struct ioat_ring_ent *next = ring[i+1];
struct ioat_dma_descriptor *hw = ring[i]->hw;
@@ -362,114 +400,6 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
return ring;
}
-static bool reshape_ring(struct ioatdma_chan *ioat_chan, int order)
-{
- /* reshape differs from normal ring allocation in that we want
- * to allocate a new software ring while only
- * extending/truncating the hardware ring
- */
- struct dma_chan *c = &ioat_chan->dma_chan;
- const u32 curr_size = ioat_ring_size(ioat_chan);
- const u16 active = ioat_ring_active(ioat_chan);
- const u32 new_size = 1 << order;
- struct ioat_ring_ent **ring;
- u32 i;
-
- if (order > ioat_get_max_alloc_order())
- return false;
-
- /* double check that we have at least 1 free descriptor */
- if (active == curr_size)
- return false;
-
- /* when shrinking, verify that we can hold the current active
- * set in the new ring
- */
- if (active >= new_size)
- return false;
-
- /* allocate the array to hold the software ring */
- ring = kcalloc(new_size, sizeof(*ring), GFP_NOWAIT);
- if (!ring)
- return false;
-
- /* allocate/trim descriptors as needed */
- if (new_size > curr_size) {
- /* copy current descriptors to the new ring */
- for (i = 0; i < curr_size; i++) {
- u16 curr_idx = (ioat_chan->tail+i) & (curr_size-1);
- u16 new_idx = (ioat_chan->tail+i) & (new_size-1);
-
- ring[new_idx] = ioat_chan->ring[curr_idx];
- set_desc_id(ring[new_idx], new_idx);
- }
-
- /* add new descriptors to the ring */
- for (i = curr_size; i < new_size; i++) {
- u16 new_idx = (ioat_chan->tail+i) & (new_size-1);
-
- ring[new_idx] = ioat_alloc_ring_ent(c, GFP_NOWAIT);
- if (!ring[new_idx]) {
- while (i--) {
- u16 new_idx = (ioat_chan->tail+i) &
- (new_size-1);
-
- ioat_free_ring_ent(ring[new_idx], c);
- }
- kfree(ring);
- return false;
- }
- set_desc_id(ring[new_idx], new_idx);
- }
-
- /* hw link new descriptors */
- for (i = curr_size-1; i < new_size; i++) {
- u16 new_idx = (ioat_chan->tail+i) & (new_size-1);
- struct ioat_ring_ent *next =
- ring[(new_idx+1) & (new_size-1)];
- struct ioat_dma_descriptor *hw = ring[new_idx]->hw;
-
- hw->next = next->txd.phys;
- }
- } else {
- struct ioat_dma_descriptor *hw;
- struct ioat_ring_ent *next;
-
- /* copy current descriptors to the new ring, dropping the
- * removed descriptors
- */
- for (i = 0; i < new_size; i++) {
- u16 curr_idx = (ioat_chan->tail+i) & (curr_size-1);
- u16 new_idx = (ioat_chan->tail+i) & (new_size-1);
-
- ring[new_idx] = ioat_chan->ring[curr_idx];
- set_desc_id(ring[new_idx], new_idx);
- }
-
- /* free deleted descriptors */
- for (i = new_size; i < curr_size; i++) {
- struct ioat_ring_ent *ent;
-
- ent = ioat_get_ring_ent(ioat_chan, ioat_chan->tail+i);
- ioat_free_ring_ent(ent, c);
- }
-
- /* fix up hardware ring */
- hw = ring[(ioat_chan->tail+new_size-1) & (new_size-1)]->hw;
- next = ring[(ioat_chan->tail+new_size) & (new_size-1)];
- hw->next = next->txd.phys;
- }
-
- dev_dbg(to_dev(ioat_chan), "%s: allocated %d descriptors\n",
- __func__, new_size);
-
- kfree(ioat_chan->ring);
- ioat_chan->ring = ring;
- ioat_chan->alloc_order = order;
-
- return true;
-}
-
/**
* ioat_check_space_lock - verify space and grab ring producer lock
* @ioat: ioat,3 channel (ring) to operate on
@@ -478,9 +408,6 @@ static bool reshape_ring(struct ioatdma_chan *ioat_chan, int order)
int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs)
__acquires(&ioat_chan->prep_lock)
{
- bool retry;
-
- retry:
spin_lock_bh(&ioat_chan->prep_lock);
/* never allow the last descriptor to be consumed, we need at
* least one free at all times to allow for on-the-fly ring
@@ -493,23 +420,7 @@ int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs)
ioat_chan->produce = num_descs;
return 0; /* with ioat->prep_lock held */
}
- retry = test_and_set_bit(IOAT_RESHAPE_PENDING, &ioat_chan->state);
- spin_unlock_bh(&ioat_chan->prep_lock);
-
- /* is another cpu already trying to expand the ring? */
- if (retry)
- goto retry;
-
- spin_lock_bh(&ioat_chan->cleanup_lock);
- spin_lock_bh(&ioat_chan->prep_lock);
- retry = reshape_ring(ioat_chan, ioat_chan->alloc_order + 1);
- clear_bit(IOAT_RESHAPE_PENDING, &ioat_chan->state);
spin_unlock_bh(&ioat_chan->prep_lock);
- spin_unlock_bh(&ioat_chan->cleanup_lock);
-
- /* if we were able to expand the ring retry the allocation */
- if (retry)
- goto retry;
dev_dbg_ratelimited(to_dev(ioat_chan),
"%s: ring full! num_descs: %d (%x:%x:%x)\n",
@@ -823,19 +734,6 @@ static void check_active(struct ioatdma_chan *ioat_chan)
if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
- else if (ioat_chan->alloc_order > ioat_get_alloc_order()) {
- /* if the ring is idle, empty, and oversized try to step
- * down the size
- */
- reshape_ring(ioat_chan, ioat_chan->alloc_order - 1);
-
- /* keep shrinking until we get back to our minimum
- * default size
- */
- if (ioat_chan->alloc_order > ioat_get_alloc_order())
- mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
- }
-
}
void ioat_timer_event(unsigned long data)
@@ -861,32 +759,42 @@ void ioat_timer_event(unsigned long data)
return;
}
+ spin_lock_bh(&ioat_chan->cleanup_lock);
+
+ /* handle the no-actives case */
+ if (!ioat_ring_active(ioat_chan)) {
+ spin_lock_bh(&ioat_chan->prep_lock);
+ check_active(ioat_chan);
+ spin_unlock_bh(&ioat_chan->prep_lock);
+ spin_unlock_bh(&ioat_chan->cleanup_lock);
+ return;
+ }
+
/* if we haven't made progress and we have already
* acknowledged a pending completion once, then be more
* forceful with a restart
*/
- spin_lock_bh(&ioat_chan->cleanup_lock);
if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
__cleanup(ioat_chan, phys_complete);
else if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) {
+ u32 chanerr;
+
+ chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
+ dev_warn(to_dev(ioat_chan), "Restarting channel...\n");
+ dev_warn(to_dev(ioat_chan), "CHANSTS: %#Lx CHANERR: %#x\n",
+ status, chanerr);
+ dev_warn(to_dev(ioat_chan), "Active descriptors: %d\n",
+ ioat_ring_active(ioat_chan));
+
spin_lock_bh(&ioat_chan->prep_lock);
ioat_restart_channel(ioat_chan);
spin_unlock_bh(&ioat_chan->prep_lock);
spin_unlock_bh(&ioat_chan->cleanup_lock);
return;
- } else {
+ } else
set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
- mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
- }
-
- if (ioat_ring_active(ioat_chan))
- mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
- else {
- spin_lock_bh(&ioat_chan->prep_lock);
- check_active(ioat_chan);
- spin_unlock_bh(&ioat_chan->prep_lock);
- }
+ mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
spin_unlock_bh(&ioat_chan->cleanup_lock);
}
@@ -906,40 +814,6 @@ ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
return dma_cookie_status(c, cookie, txstate);
}
-static int ioat_irq_reinit(struct ioatdma_device *ioat_dma)
-{
- struct pci_dev *pdev = ioat_dma->pdev;
- int irq = pdev->irq, i;
-
- if (!is_bwd_ioat(pdev))
- return 0;
-
- switch (ioat_dma->irq_mode) {
- case IOAT_MSIX:
- for (i = 0; i < ioat_dma->dma_dev.chancnt; i++) {
- struct msix_entry *msix = &ioat_dma->msix_entries[i];
- struct ioatdma_chan *ioat_chan;
-
- ioat_chan = ioat_chan_by_index(ioat_dma, i);
- devm_free_irq(&pdev->dev, msix->vector, ioat_chan);
- }
-
- pci_disable_msix(pdev);
- break;
- case IOAT_MSI:
- pci_disable_msi(pdev);
- /* fall through */
- case IOAT_INTX:
- devm_free_irq(&pdev->dev, irq, ioat_dma);
- break;
- default:
- return 0;
- }
- ioat_dma->irq_mode = IOAT_NOIRQ;
-
- return ioat_dma_setup_interrupts(ioat_dma);
-}
-
int ioat_reset_hw(struct ioatdma_chan *ioat_chan)
{
/* throw away whatever the channel was doing and get it
@@ -979,9 +853,21 @@ int ioat_reset_hw(struct ioatdma_chan *ioat_chan)
}
}
+ if (is_bwd_ioat(pdev) && (ioat_dma->irq_mode == IOAT_MSIX)) {
+ ioat_dma->msixtba0 = readq(ioat_dma->reg_base + 0x1000);
+ ioat_dma->msixdata0 = readq(ioat_dma->reg_base + 0x1008);
+ ioat_dma->msixpba = readq(ioat_dma->reg_base + 0x1800);
+ }
+
+
err = ioat_reset_sync(ioat_chan, msecs_to_jiffies(200));
- if (!err)
- err = ioat_irq_reinit(ioat_dma);
+ if (!err) {
+ if (is_bwd_ioat(pdev) && (ioat_dma->irq_mode == IOAT_MSIX)) {
+ writeq(ioat_dma->msixtba0, ioat_dma->reg_base + 0x1000);
+ writeq(ioat_dma->msixdata0, ioat_dma->reg_base + 0x1008);
+ writeq(ioat_dma->msixpba, ioat_dma->reg_base + 0x1800);
+ }
+ }
if (err)
dev_err(&pdev->dev, "Failed to reset: %d\n", err);
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 8f4e607d5817..a9bc1a15b0d1 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -62,7 +62,6 @@ enum ioat_irq_mode {
* struct ioatdma_device - internal representation of a IOAT device
* @pdev: PCI-Express device
* @reg_base: MMIO register space base address
- * @dma_pool: for allocating DMA descriptors
* @completion_pool: DMA buffers for completion ops
* @sed_hw_pool: DMA super descriptor pools
* @dma_dev: embedded struct dma_device
@@ -76,8 +75,7 @@ enum ioat_irq_mode {
struct ioatdma_device {
struct pci_dev *pdev;
void __iomem *reg_base;
- struct pci_pool *dma_pool;
- struct pci_pool *completion_pool;
+ struct dma_pool *completion_pool;
#define MAX_SED_POOLS 5
struct dma_pool *sed_hw_pool[MAX_SED_POOLS];
struct dma_device dma_dev;
@@ -88,6 +86,16 @@ struct ioatdma_device {
struct dca_provider *dca;
enum ioat_irq_mode irq_mode;
u32 cap;
+
+ /* shadow version for CB3.3 chan reset errata workaround */
+ u64 msixtba0;
+ u64 msixdata0;
+ u32 msixpba;
+};
+
+struct ioat_descs {
+ void *virt;
+ dma_addr_t hw;
};
struct ioatdma_chan {
@@ -100,7 +108,6 @@ struct ioatdma_chan {
#define IOAT_COMPLETION_ACK 1
#define IOAT_RESET_PENDING 2
#define IOAT_KOBJ_INIT_FAIL 3
- #define IOAT_RESHAPE_PENDING 4
#define IOAT_RUN 5
#define IOAT_CHAN_ACTIVE 6
struct timer_list timer;
@@ -133,6 +140,8 @@ struct ioatdma_chan {
u16 produce;
struct ioat_ring_ent **ring;
spinlock_t prep_lock;
+ struct ioat_descs descs[2];
+ int desc_chunks;
};
struct ioat_sysfs_entry {
@@ -235,43 +244,11 @@ ioat_chan_by_index(struct ioatdma_device *ioat_dma, int index)
return ioat_dma->idx[index];
}
-static inline u64 ioat_chansts_32(struct ioatdma_chan *ioat_chan)
-{
- u8 ver = ioat_chan->ioat_dma->version;
- u64 status;
- u32 status_lo;
-
- /* We need to read the low address first as this causes the
- * chipset to latch the upper bits for the subsequent read
- */
- status_lo = readl(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET_LOW(ver));
- status = readl(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET_HIGH(ver));
- status <<= 32;
- status |= status_lo;
-
- return status;
-}
-
-#if BITS_PER_LONG == 64
-
static inline u64 ioat_chansts(struct ioatdma_chan *ioat_chan)
{
- u8 ver = ioat_chan->ioat_dma->version;
- u64 status;
-
- /* With IOAT v3.3 the status register is 64bit. */
- if (ver >= IOAT_VER_3_3)
- status = readq(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET(ver));
- else
- status = ioat_chansts_32(ioat_chan);
-
- return status;
+ return readq(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET);
}
-#else
-#define ioat_chansts ioat_chansts_32
-#endif
-
static inline u64 ioat_chansts_to_addr(u64 status)
{
return status & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
@@ -334,10 +311,8 @@ static inline bool is_ioat_bug(unsigned long err)
}
#define IOAT_MAX_ORDER 16
-#define ioat_get_alloc_order() \
- (min(ioat_ring_alloc_order, IOAT_MAX_ORDER))
-#define ioat_get_max_alloc_order() \
- (min(ioat_ring_max_alloc_order, IOAT_MAX_ORDER))
+#define IOAT_MAX_DESCS 65536
+#define IOAT_DESCS_PER_2M 32768
static inline u32 ioat_ring_size(struct ioatdma_chan *ioat_chan)
{
diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h
index 690e3b4f8202..8e67895bcca3 100644
--- a/drivers/dma/ioat/hw.h
+++ b/drivers/dma/ioat/hw.h
@@ -73,6 +73,8 @@
int system_has_dca_enabled(struct pci_dev *pdev);
+#define IOAT_DESC_SZ 64
+
struct ioat_dma_descriptor {
uint32_t size;
union {
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index 4ef0c5e07912..efdee1a69fc4 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -28,6 +28,7 @@
#include <linux/prefetch.h>
#include <linux/dca.h>
#include <linux/aer.h>
+#include <linux/sizes.h>
#include "dma.h"
#include "registers.h"
#include "hw.h"
@@ -136,14 +137,6 @@ int ioat_pending_level = 4;
module_param(ioat_pending_level, int, 0644);
MODULE_PARM_DESC(ioat_pending_level,
"high-water mark for pushing ioat descriptors (default: 4)");
-int ioat_ring_alloc_order = 8;
-module_param(ioat_ring_alloc_order, int, 0644);
-MODULE_PARM_DESC(ioat_ring_alloc_order,
- "ioat+: allocate 2^n descriptors per channel (default: 8 max: 16)");
-int ioat_ring_max_alloc_order = IOAT_MAX_ORDER;
-module_param(ioat_ring_max_alloc_order, int, 0644);
-MODULE_PARM_DESC(ioat_ring_max_alloc_order,
- "ioat+: upper limit for ring size (default: 16)");
static char ioat_interrupt_style[32] = "msix";
module_param_string(ioat_interrupt_style, ioat_interrupt_style,
sizeof(ioat_interrupt_style), 0644);
@@ -504,23 +497,14 @@ static int ioat_probe(struct ioatdma_device *ioat_dma)
struct pci_dev *pdev = ioat_dma->pdev;
struct device *dev = &pdev->dev;
- /* DMA coherent memory pool for DMA descriptor allocations */
- ioat_dma->dma_pool = pci_pool_create("dma_desc_pool", pdev,
- sizeof(struct ioat_dma_descriptor),
- 64, 0);
- if (!ioat_dma->dma_pool) {
- err = -ENOMEM;
- goto err_dma_pool;
- }
-
- ioat_dma->completion_pool = pci_pool_create("completion_pool", pdev,
+ ioat_dma->completion_pool = dma_pool_create("completion_pool", dev,
sizeof(u64),
SMP_CACHE_BYTES,
SMP_CACHE_BYTES);
if (!ioat_dma->completion_pool) {
err = -ENOMEM;
- goto err_completion_pool;
+ goto err_out;
}
ioat_enumerate_channels(ioat_dma);
@@ -546,10 +530,8 @@ static int ioat_probe(struct ioatdma_device *ioat_dma)
err_self_test:
ioat_disable_interrupts(ioat_dma);
err_setup_interrupts:
- pci_pool_destroy(ioat_dma->completion_pool);
-err_completion_pool:
- pci_pool_destroy(ioat_dma->dma_pool);
-err_dma_pool:
+ dma_pool_destroy(ioat_dma->completion_pool);
+err_out:
return err;
}
@@ -559,8 +541,7 @@ static int ioat_register(struct ioatdma_device *ioat_dma)
if (err) {
ioat_disable_interrupts(ioat_dma);
- pci_pool_destroy(ioat_dma->completion_pool);
- pci_pool_destroy(ioat_dma->dma_pool);
+ dma_pool_destroy(ioat_dma->completion_pool);
}
return err;
@@ -576,8 +557,7 @@ static void ioat_dma_remove(struct ioatdma_device *ioat_dma)
dma_async_device_unregister(dma);
- pci_pool_destroy(ioat_dma->dma_pool);
- pci_pool_destroy(ioat_dma->completion_pool);
+ dma_pool_destroy(ioat_dma->completion_pool);
INIT_LIST_HEAD(&dma->channels);
}
@@ -666,10 +646,19 @@ static void ioat_free_chan_resources(struct dma_chan *c)
ioat_free_ring_ent(desc, c);
}
+ for (i = 0; i < ioat_chan->desc_chunks; i++) {
+ dma_free_coherent(to_dev(ioat_chan), SZ_2M,
+ ioat_chan->descs[i].virt,
+ ioat_chan->descs[i].hw);
+ ioat_chan->descs[i].virt = NULL;
+ ioat_chan->descs[i].hw = 0;
+ }
+ ioat_chan->desc_chunks = 0;
+
kfree(ioat_chan->ring);
ioat_chan->ring = NULL;
ioat_chan->alloc_order = 0;
- pci_pool_free(ioat_dma->completion_pool, ioat_chan->completion,
+ dma_pool_free(ioat_dma->completion_pool, ioat_chan->completion,
ioat_chan->completion_dma);
spin_unlock_bh(&ioat_chan->prep_lock);
spin_unlock_bh(&ioat_chan->cleanup_lock);
@@ -701,7 +690,7 @@ static int ioat_alloc_chan_resources(struct dma_chan *c)
/* allocate a completion writeback area */
/* doing 2 32bit writes to mmio since 1 64b write doesn't work */
ioat_chan->completion =
- pci_pool_alloc(ioat_chan->ioat_dma->completion_pool,
+ dma_pool_alloc(ioat_chan->ioat_dma->completion_pool,
GFP_KERNEL, &ioat_chan->completion_dma);
if (!ioat_chan->completion)
return -ENOMEM;
@@ -712,7 +701,7 @@ static int ioat_alloc_chan_resources(struct dma_chan *c)
writel(((u64)ioat_chan->completion_dma) >> 32,
ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
- order = ioat_get_alloc_order();
+ order = IOAT_MAX_ORDER;
ring = ioat_alloc_ring(c, order, GFP_KERNEL);
if (!ring)
return -ENOMEM;
diff --git a/drivers/dma/ioat/prep.c b/drivers/dma/ioat/prep.c
index 6bb4a13a8fbd..243421af888f 100644
--- a/drivers/dma/ioat/prep.c
+++ b/drivers/dma/ioat/prep.c
@@ -26,7 +26,7 @@
#include "hw.h"
#include "dma.h"
-#define MAX_SCF 1024
+#define MAX_SCF 256
/* provide a lookup table for setting the source address in the base or
* extended descriptor of an xor or pq descriptor
diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h
index 909352f74c89..4994a3623aee 100644
--- a/drivers/dma/ioat/registers.h
+++ b/drivers/dma/ioat/registers.h
@@ -99,19 +99,9 @@
#define IOAT_DMA_COMP_V1 0x0001 /* Compatibility with DMA version 1 */
#define IOAT_DMA_COMP_V2 0x0002 /* Compatibility with DMA version 2 */
-
-#define IOAT1_CHANSTS_OFFSET 0x04 /* 64-bit Channel Status Register */
-#define IOAT2_CHANSTS_OFFSET 0x08 /* 64-bit Channel Status Register */
-#define IOAT_CHANSTS_OFFSET(ver) ((ver) < IOAT_VER_2_0 \
- ? IOAT1_CHANSTS_OFFSET : IOAT2_CHANSTS_OFFSET)
-#define IOAT1_CHANSTS_OFFSET_LOW 0x04
-#define IOAT2_CHANSTS_OFFSET_LOW 0x08
-#define IOAT_CHANSTS_OFFSET_LOW(ver) ((ver) < IOAT_VER_2_0 \
- ? IOAT1_CHANSTS_OFFSET_LOW : IOAT2_CHANSTS_OFFSET_LOW)
-#define IOAT1_CHANSTS_OFFSET_HIGH 0x08
-#define IOAT2_CHANSTS_OFFSET_HIGH 0x0C
-#define IOAT_CHANSTS_OFFSET_HIGH(ver) ((ver) < IOAT_VER_2_0 \
- ? IOAT1_CHANSTS_OFFSET_HIGH : IOAT2_CHANSTS_OFFSET_HIGH)
+/* IOAT1 define left for i7300_idle driver to not fail compiling */
+#define IOAT1_CHANSTS_OFFSET 0x04
+#define IOAT_CHANSTS_OFFSET 0x08 /* 64-bit Channel Status Register */
#define IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR (~0x3fULL)
#define IOAT_CHANSTS_SOFT_ERR 0x10ULL
#define IOAT_CHANSTS_UNAFFILIATED_ERR 0x8ULL
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index e4f43125e0fb..f039cfadf17b 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -1300,10 +1300,10 @@ static int iop_adma_probe(struct platform_device *pdev)
* note: writecombine gives slightly better performance, but
* requires that we explicitly flush the writes
*/
- adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev,
- plat_data->pool_size,
- &adev->dma_desc_pool,
- GFP_KERNEL);
+ adev->dma_desc_pool_virt = dma_alloc_wc(&pdev->dev,
+ plat_data->pool_size,
+ &adev->dma_desc_pool,
+ GFP_KERNEL);
if (!adev->dma_desc_pool_virt) {
ret = -ENOMEM;
goto err_free_adev;
diff --git a/drivers/dma/mic_x100_dma.c b/drivers/dma/mic_x100_dma.c
index 068e920ecb68..1502b24b7c7d 100644
--- a/drivers/dma/mic_x100_dma.c
+++ b/drivers/dma/mic_x100_dma.c
@@ -483,7 +483,7 @@ static int mic_dma_setup_irq(struct mic_dma_chan *ch)
mic_dma_intr_handler, mic_dma_thread_fn,
"mic dma_channel", ch, ch->ch_num);
if (IS_ERR(ch->cookie))
- return IS_ERR(ch->cookie);
+ return PTR_ERR(ch->cookie);
return 0;
}
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 1c2de9a834a9..3922a5d56806 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -139,46 +139,10 @@ static void mv_chan_clear_err_status(struct mv_xor_chan *chan)
}
static void mv_chan_set_mode(struct mv_xor_chan *chan,
- enum dma_transaction_type type)
+ u32 op_mode)
{
- u32 op_mode;
u32 config = readl_relaxed(XOR_CONFIG(chan));
- switch (type) {
- case DMA_XOR:
- op_mode = XOR_OPERATION_MODE_XOR;
- break;
- case DMA_MEMCPY:
- op_mode = XOR_OPERATION_MODE_MEMCPY;
- break;
- default:
- dev_err(mv_chan_to_devp(chan),
- "error: unsupported operation %d\n",
- type);
- BUG();
- return;
- }
-
- config &= ~0x7;
- config |= op_mode;
-
-#if defined(__BIG_ENDIAN)
- config |= XOR_DESCRIPTOR_SWAP;
-#else
- config &= ~XOR_DESCRIPTOR_SWAP;
-#endif
-
- writel_relaxed(config, XOR_CONFIG(chan));
- chan->current_type = type;
-}
-
-static void mv_chan_set_mode_to_desc(struct mv_xor_chan *chan)
-{
- u32 op_mode;
- u32 config = readl_relaxed(XOR_CONFIG(chan));
-
- op_mode = XOR_OPERATION_MODE_IN_DESC;
-
config &= ~0x7;
config |= op_mode;
@@ -1000,8 +964,8 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
* requires that we explicitly flush the writes
*/
mv_chan->dma_desc_pool_virt =
- dma_alloc_writecombine(&pdev->dev, MV_XOR_POOL_SIZE,
- &mv_chan->dma_desc_pool, GFP_KERNEL);
+ dma_alloc_wc(&pdev->dev, MV_XOR_POOL_SIZE, &mv_chan->dma_desc_pool,
+ GFP_KERNEL);
if (!mv_chan->dma_desc_pool_virt)
return ERR_PTR(-ENOMEM);
@@ -1043,9 +1007,9 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
mv_chan_unmask_interrupts(mv_chan);
if (mv_chan->op_in_desc == XOR_MODE_IN_DESC)
- mv_chan_set_mode_to_desc(mv_chan);
+ mv_chan_set_mode(mv_chan, XOR_OPERATION_MODE_IN_DESC);
else
- mv_chan_set_mode(mv_chan, DMA_XOR);
+ mv_chan_set_mode(mv_chan, XOR_OPERATION_MODE_XOR);
spin_lock_init(&mv_chan->lock);
INIT_LIST_HEAD(&mv_chan->chain);
@@ -1121,6 +1085,57 @@ mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
writel(0, base + WINDOW_OVERRIDE_CTRL(1));
}
+/*
+ * Since this XOR driver is basically used only for RAID5, we don't
+ * need to care about synchronizing ->suspend with DMA activity,
+ * because the DMA engine will naturally be quiet due to the block
+ * devices being suspended.
+ */
+static int mv_xor_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct mv_xor_device *xordev = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
+ struct mv_xor_chan *mv_chan = xordev->channels[i];
+
+ if (!mv_chan)
+ continue;
+
+ mv_chan->saved_config_reg =
+ readl_relaxed(XOR_CONFIG(mv_chan));
+ mv_chan->saved_int_mask_reg =
+ readl_relaxed(XOR_INTR_MASK(mv_chan));
+ }
+
+ return 0;
+}
+
+static int mv_xor_resume(struct platform_device *dev)
+{
+ struct mv_xor_device *xordev = platform_get_drvdata(dev);
+ const struct mbus_dram_target_info *dram;
+ int i;
+
+ for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
+ struct mv_xor_chan *mv_chan = xordev->channels[i];
+
+ if (!mv_chan)
+ continue;
+
+ writel_relaxed(mv_chan->saved_config_reg,
+ XOR_CONFIG(mv_chan));
+ writel_relaxed(mv_chan->saved_int_mask_reg,
+ XOR_INTR_MASK(mv_chan));
+ }
+
+ dram = mv_mbus_dram_info();
+ if (dram)
+ mv_xor_conf_mbus_windows(xordev, dram);
+
+ return 0;
+}
+
static const struct of_device_id mv_xor_dt_ids[] = {
{ .compatible = "marvell,orion-xor", .data = (void *)XOR_MODE_IN_REG },
{ .compatible = "marvell,armada-380-xor", .data = (void *)XOR_MODE_IN_DESC },
@@ -1282,6 +1297,8 @@ err_channel_add:
static struct platform_driver mv_xor_driver = {
.probe = mv_xor_probe,
+ .suspend = mv_xor_suspend,
+ .resume = mv_xor_resume,
.driver = {
.name = MV_XOR_NAME,
.of_match_table = of_match_ptr(mv_xor_dt_ids),
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
index b7455b42137b..c19fe30e5ae9 100644
--- a/drivers/dma/mv_xor.h
+++ b/drivers/dma/mv_xor.h
@@ -110,7 +110,6 @@ struct mv_xor_chan {
void __iomem *mmr_high_base;
unsigned int idx;
int irq;
- enum dma_transaction_type current_type;
struct list_head chain;
struct list_head free_slots;
struct list_head allocated_slots;
@@ -126,6 +125,7 @@ struct mv_xor_chan {
char dummy_src[MV_XOR_MIN_BYTE_COUNT];
char dummy_dst[MV_XOR_MIN_BYTE_COUNT];
dma_addr_t dummy_src_addr, dummy_dst_addr;
+ u32 saved_config_reg, saved_int_mask_reg;
};
/**
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index 1dfc71c90123..43bd5aee7ffe 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -28,8 +28,6 @@
struct omap_dmadev {
struct dma_device ddev;
spinlock_t lock;
- struct tasklet_struct task;
- struct list_head pending;
void __iomem *base;
const struct omap_dma_reg *reg_map;
struct omap_system_dma_plat_info *plat;
@@ -42,7 +40,6 @@ struct omap_dmadev {
struct omap_chan {
struct virt_dma_chan vc;
- struct list_head node;
void __iomem *channel_base;
const struct omap_dma_reg *reg_map;
uint32_t ccr;
@@ -454,33 +451,6 @@ static void omap_dma_callback(int ch, u16 status, void *data)
spin_unlock_irqrestore(&c->vc.lock, flags);
}
-/*
- * This callback schedules all pending channels. We could be more
- * clever here by postponing allocation of the real DMA channels to
- * this point, and freeing them when our virtual channel becomes idle.
- *
- * We would then need to deal with 'all channels in-use'
- */
-static void omap_dma_sched(unsigned long data)
-{
- struct omap_dmadev *d = (struct omap_dmadev *)data;
- LIST_HEAD(head);
-
- spin_lock_irq(&d->lock);
- list_splice_tail_init(&d->pending, &head);
- spin_unlock_irq(&d->lock);
-
- while (!list_empty(&head)) {
- struct omap_chan *c = list_first_entry(&head,
- struct omap_chan, node);
-
- spin_lock_irq(&c->vc.lock);
- list_del_init(&c->node);
- omap_dma_start_desc(c);
- spin_unlock_irq(&c->vc.lock);
- }
-}
-
static irqreturn_t omap_dma_irq(int irq, void *devid)
{
struct omap_dmadev *od = devid;
@@ -703,8 +673,14 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
struct omap_chan *c = to_omap_dma_chan(chan);
struct virt_dma_desc *vd;
enum dma_status ret;
+ uint32_t ccr;
unsigned long flags;
+ ccr = omap_dma_chan_read(c, CCR);
+ /* The channel is no longer active, handle the completion right away */
+ if (!(ccr & CCR_ENABLE))
+ omap_dma_callback(c->dma_ch, 0, c);
+
ret = dma_cookie_status(chan, cookie, txstate);
if (ret == DMA_COMPLETE || !txstate)
return ret;
@@ -719,7 +695,7 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
if (d->dir == DMA_MEM_TO_DEV)
pos = omap_dma_get_src_pos(c);
- else if (d->dir == DMA_DEV_TO_MEM)
+ else if (d->dir == DMA_DEV_TO_MEM || d->dir == DMA_MEM_TO_MEM)
pos = omap_dma_get_dst_pos(c);
else
pos = 0;
@@ -739,22 +715,8 @@ static void omap_dma_issue_pending(struct dma_chan *chan)
unsigned long flags;
spin_lock_irqsave(&c->vc.lock, flags);
- if (vchan_issue_pending(&c->vc) && !c->desc) {
- /*
- * c->cyclic is used only by audio and in this case the DMA need
- * to be started without delay.
- */
- if (!c->cyclic) {
- struct omap_dmadev *d = to_omap_dma_dev(chan->device);
- spin_lock(&d->lock);
- if (list_empty(&c->node))
- list_add_tail(&c->node, &d->pending);
- spin_unlock(&d->lock);
- tasklet_schedule(&d->task);
- } else {
- omap_dma_start_desc(c);
- }
- }
+ if (vchan_issue_pending(&c->vc) && !c->desc)
+ omap_dma_start_desc(c);
spin_unlock_irqrestore(&c->vc.lock, flags);
}
@@ -768,7 +730,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
struct scatterlist *sgent;
struct omap_desc *d;
dma_addr_t dev_addr;
- unsigned i, j = 0, es, en, frame_bytes;
+ unsigned i, es, en, frame_bytes;
u32 burst;
if (dir == DMA_DEV_TO_MEM) {
@@ -845,13 +807,12 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
en = burst;
frame_bytes = es_bytes[es] * en;
for_each_sg(sgl, sgent, sglen, i) {
- d->sg[j].addr = sg_dma_address(sgent);
- d->sg[j].en = en;
- d->sg[j].fn = sg_dma_len(sgent) / frame_bytes;
- j++;
+ d->sg[i].addr = sg_dma_address(sgent);
+ d->sg[i].en = en;
+ d->sg[i].fn = sg_dma_len(sgent) / frame_bytes;
}
- d->sglen = j;
+ d->sglen = sglen;
return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
}
@@ -1018,17 +979,11 @@ static int omap_dma_slave_config(struct dma_chan *chan, struct dma_slave_config
static int omap_dma_terminate_all(struct dma_chan *chan)
{
struct omap_chan *c = to_omap_dma_chan(chan);
- struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device);
unsigned long flags;
LIST_HEAD(head);
spin_lock_irqsave(&c->vc.lock, flags);
- /* Prevent this channel being scheduled */
- spin_lock(&d->lock);
- list_del_init(&c->node);
- spin_unlock(&d->lock);
-
/*
* Stop DMA activity: we assume the callback will not be called
* after omap_dma_stop() returns (even if it does, it will see
@@ -1054,6 +1009,13 @@ static int omap_dma_terminate_all(struct dma_chan *chan)
return 0;
}
+static void omap_dma_synchronize(struct dma_chan *chan)
+{
+ struct omap_chan *c = to_omap_dma_chan(chan);
+
+ vchan_synchronize(&c->vc);
+}
+
static int omap_dma_pause(struct dma_chan *chan)
{
struct omap_chan *c = to_omap_dma_chan(chan);
@@ -1102,14 +1064,12 @@ static int omap_dma_chan_init(struct omap_dmadev *od)
c->reg_map = od->reg_map;
c->vc.desc_free = omap_dma_desc_free;
vchan_init(&c->vc, &od->ddev);
- INIT_LIST_HEAD(&c->node);
return 0;
}
static void omap_dma_free(struct omap_dmadev *od)
{
- tasklet_kill(&od->task);
while (!list_empty(&od->ddev.channels)) {
struct omap_chan *c = list_first_entry(&od->ddev.channels,
struct omap_chan, vc.chan.device_node);
@@ -1159,18 +1119,16 @@ static int omap_dma_probe(struct platform_device *pdev)
od->ddev.device_pause = omap_dma_pause;
od->ddev.device_resume = omap_dma_resume;
od->ddev.device_terminate_all = omap_dma_terminate_all;
+ od->ddev.device_synchronize = omap_dma_synchronize;
od->ddev.src_addr_widths = OMAP_DMA_BUSWIDTHS;
od->ddev.dst_addr_widths = OMAP_DMA_BUSWIDTHS;
od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
od->ddev.dev = &pdev->dev;
INIT_LIST_HEAD(&od->ddev.channels);
- INIT_LIST_HEAD(&od->pending);
spin_lock_init(&od->lock);
spin_lock_init(&od->irq_lock);
- tasklet_init(&od->task, omap_dma_sched, (unsigned long)od);
-
od->dma_requests = OMAP_SDMA_REQUESTS;
if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node,
"dma-requests",
@@ -1203,6 +1161,10 @@ static int omap_dma_probe(struct platform_device *pdev)
return rc;
}
+ od->ddev.filter.map = od->plat->slave_map;
+ od->ddev.filter.mapcnt = od->plat->slavecnt;
+ od->ddev.filter.fn = omap_dma_filter_fn;
+
rc = dma_async_device_register(&od->ddev);
if (rc) {
pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 17ee758b419f..372b4359da97 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -33,6 +33,9 @@
#define PL330_MAX_CHAN 8
#define PL330_MAX_IRQS 32
#define PL330_MAX_PERI 32
+#define PL330_MAX_BURST 16
+
+#define PL330_QUIRK_BROKEN_NO_FLUSHP BIT(0)
enum pl330_cachectrl {
CCTRL0, /* Noncacheable and nonbufferable */
@@ -488,6 +491,17 @@ struct pl330_dmac {
/* Peripheral channels connected to this DMAC */
unsigned int num_peripherals;
struct dma_pl330_chan *peripherals; /* keep at end */
+ int quirks;
+};
+
+static struct pl330_of_quirks {
+ char *quirk;
+ int id;
+} of_quirks[] = {
+ {
+ .quirk = "arm,pl330-broken-no-flushp",
+ .id = PL330_QUIRK_BROKEN_NO_FLUSHP,
+ }
};
struct dma_pl330_desc {
@@ -1137,47 +1151,67 @@ static inline int _ldst_memtomem(unsigned dry_run, u8 buf[],
return off;
}
-static inline int _ldst_devtomem(unsigned dry_run, u8 buf[],
- const struct _xfer_spec *pxs, int cyc)
+static inline int _ldst_devtomem(struct pl330_dmac *pl330, unsigned dry_run,
+ u8 buf[], const struct _xfer_spec *pxs,
+ int cyc)
{
int off = 0;
+ enum pl330_cond cond;
+
+ if (pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP)
+ cond = BURST;
+ else
+ cond = SINGLE;
while (cyc--) {
- off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->desc->peri);
- off += _emit_LDP(dry_run, &buf[off], SINGLE, pxs->desc->peri);
+ off += _emit_WFP(dry_run, &buf[off], cond, pxs->desc->peri);
+ off += _emit_LDP(dry_run, &buf[off], cond, pxs->desc->peri);
off += _emit_ST(dry_run, &buf[off], ALWAYS);
- off += _emit_FLUSHP(dry_run, &buf[off], pxs->desc->peri);
+
+ if (!(pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP))
+ off += _emit_FLUSHP(dry_run, &buf[off],
+ pxs->desc->peri);
}
return off;
}
-static inline int _ldst_memtodev(unsigned dry_run, u8 buf[],
- const struct _xfer_spec *pxs, int cyc)
+static inline int _ldst_memtodev(struct pl330_dmac *pl330,
+ unsigned dry_run, u8 buf[],
+ const struct _xfer_spec *pxs, int cyc)
{
int off = 0;
+ enum pl330_cond cond;
+
+ if (pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP)
+ cond = BURST;
+ else
+ cond = SINGLE;
while (cyc--) {
- off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->desc->peri);
+ off += _emit_WFP(dry_run, &buf[off], cond, pxs->desc->peri);
off += _emit_LD(dry_run, &buf[off], ALWAYS);
- off += _emit_STP(dry_run, &buf[off], SINGLE, pxs->desc->peri);
- off += _emit_FLUSHP(dry_run, &buf[off], pxs->desc->peri);
+ off += _emit_STP(dry_run, &buf[off], cond, pxs->desc->peri);
+
+ if (!(pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP))
+ off += _emit_FLUSHP(dry_run, &buf[off],
+ pxs->desc->peri);
}
return off;
}
-static int _bursts(unsigned dry_run, u8 buf[],
+static int _bursts(struct pl330_dmac *pl330, unsigned dry_run, u8 buf[],
const struct _xfer_spec *pxs, int cyc)
{
int off = 0;
switch (pxs->desc->rqtype) {
case DMA_MEM_TO_DEV:
- off += _ldst_memtodev(dry_run, &buf[off], pxs, cyc);
+ off += _ldst_memtodev(pl330, dry_run, &buf[off], pxs, cyc);
break;
case DMA_DEV_TO_MEM:
- off += _ldst_devtomem(dry_run, &buf[off], pxs, cyc);
+ off += _ldst_devtomem(pl330, dry_run, &buf[off], pxs, cyc);
break;
case DMA_MEM_TO_MEM:
off += _ldst_memtomem(dry_run, &buf[off], pxs, cyc);
@@ -1191,7 +1225,7 @@ static int _bursts(unsigned dry_run, u8 buf[],
}
/* Returns bytes consumed and updates bursts */
-static inline int _loop(unsigned dry_run, u8 buf[],
+static inline int _loop(struct pl330_dmac *pl330, unsigned dry_run, u8 buf[],
unsigned long *bursts, const struct _xfer_spec *pxs)
{
int cyc, cycmax, szlp, szlpend, szbrst, off;
@@ -1199,7 +1233,7 @@ static inline int _loop(unsigned dry_run, u8 buf[],
struct _arg_LPEND lpend;
if (*bursts == 1)
- return _bursts(dry_run, buf, pxs, 1);
+ return _bursts(pl330, dry_run, buf, pxs, 1);
/* Max iterations possible in DMALP is 256 */
if (*bursts >= 256*256) {
@@ -1217,7 +1251,7 @@ static inline int _loop(unsigned dry_run, u8 buf[],
}
szlp = _emit_LP(1, buf, 0, 0);
- szbrst = _bursts(1, buf, pxs, 1);
+ szbrst = _bursts(pl330, 1, buf, pxs, 1);
lpend.cond = ALWAYS;
lpend.forever = false;
@@ -1249,7 +1283,7 @@ static inline int _loop(unsigned dry_run, u8 buf[],
off += _emit_LP(dry_run, &buf[off], 1, lcnt1);
ljmp1 = off;
- off += _bursts(dry_run, &buf[off], pxs, cyc);
+ off += _bursts(pl330, dry_run, &buf[off], pxs, cyc);
lpend.cond = ALWAYS;
lpend.forever = false;
@@ -1272,8 +1306,9 @@ static inline int _loop(unsigned dry_run, u8 buf[],
return off;
}
-static inline int _setup_loops(unsigned dry_run, u8 buf[],
- const struct _xfer_spec *pxs)
+static inline int _setup_loops(struct pl330_dmac *pl330,
+ unsigned dry_run, u8 buf[],
+ const struct _xfer_spec *pxs)
{
struct pl330_xfer *x = &pxs->desc->px;
u32 ccr = pxs->ccr;
@@ -1282,15 +1317,16 @@ static inline int _setup_loops(unsigned dry_run, u8 buf[],
while (bursts) {
c = bursts;
- off += _loop(dry_run, &buf[off], &c, pxs);
+ off += _loop(pl330, dry_run, &buf[off], &c, pxs);
bursts -= c;
}
return off;
}
-static inline int _setup_xfer(unsigned dry_run, u8 buf[],
- const struct _xfer_spec *pxs)
+static inline int _setup_xfer(struct pl330_dmac *pl330,
+ unsigned dry_run, u8 buf[],
+ const struct _xfer_spec *pxs)
{
struct pl330_xfer *x = &pxs->desc->px;
int off = 0;
@@ -1301,7 +1337,7 @@ static inline int _setup_xfer(unsigned dry_run, u8 buf[],
off += _emit_MOV(dry_run, &buf[off], DAR, x->dst_addr);
/* Setup Loop(s) */
- off += _setup_loops(dry_run, &buf[off], pxs);
+ off += _setup_loops(pl330, dry_run, &buf[off], pxs);
return off;
}
@@ -1310,8 +1346,9 @@ static inline int _setup_xfer(unsigned dry_run, u8 buf[],
* A req is a sequence of one or more xfer units.
* Returns the number of bytes taken to setup the MC for the req.
*/
-static int _setup_req(unsigned dry_run, struct pl330_thread *thrd,
- unsigned index, struct _xfer_spec *pxs)
+static int _setup_req(struct pl330_dmac *pl330, unsigned dry_run,
+ struct pl330_thread *thrd, unsigned index,
+ struct _xfer_spec *pxs)
{
struct _pl330_req *req = &thrd->req[index];
struct pl330_xfer *x;
@@ -1328,7 +1365,7 @@ static int _setup_req(unsigned dry_run, struct pl330_thread *thrd,
if (x->bytes % (BRST_SIZE(pxs->ccr) * BRST_LEN(pxs->ccr)))
return -EINVAL;
- off += _setup_xfer(dry_run, &buf[off], pxs);
+ off += _setup_xfer(pl330, dry_run, &buf[off], pxs);
/* DMASEV peripheral/event */
off += _emit_SEV(dry_run, &buf[off], thrd->ev);
@@ -1422,7 +1459,7 @@ static int pl330_submit_req(struct pl330_thread *thrd,
xs.desc = desc;
/* First dry run to check if req is acceptable */
- ret = _setup_req(1, thrd, idx, &xs);
+ ret = _setup_req(pl330, 1, thrd, idx, &xs);
if (ret < 0)
goto xfer_exit;
@@ -1436,7 +1473,7 @@ static int pl330_submit_req(struct pl330_thread *thrd,
/* Hook the request */
thrd->lstenq = idx;
thrd->req[idx].desc = desc;
- _setup_req(0, thrd, idx, &xs);
+ _setup_req(pl330, 0, thrd, idx, &xs);
ret = 0;
@@ -2781,6 +2818,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
struct resource *res;
int i, ret, irq;
int num_chan;
+ struct device_node *np = adev->dev.of_node;
pdat = dev_get_platdata(&adev->dev);
@@ -2800,6 +2838,11 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pl330->mcbufsz = pdat ? pdat->mcbuf_sz : 0;
+ /* get quirk */
+ for (i = 0; i < ARRAY_SIZE(of_quirks); i++)
+ if (of_property_read_bool(np, of_quirks[i].quirk))
+ pl330->quirks |= of_quirks[i].id;
+
res = &adev->res;
pl330->base = devm_ioremap_resource(&adev->dev, res);
if (IS_ERR(pl330->base))
@@ -2895,6 +2938,8 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pd->dst_addr_widths = PL330_DMA_BUSWIDTHS;
pd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
pd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
+ pd->max_burst = ((pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP) ?
+ 1 : PL330_MAX_BURST);
ret = dma_async_device_register(pd);
if (ret) {
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index fc4156afa070..debca824bed6 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -583,6 +583,8 @@ static void set_updater_desc(struct pxad_desc_sw *sw_desc,
(PXA_DCMD_LENGTH & sizeof(u32));
if (flags & DMA_PREP_INTERRUPT)
updater->dcmd |= PXA_DCMD_ENDIRQEN;
+ if (sw_desc->cyclic)
+ sw_desc->hw_desc[sw_desc->nb_desc - 2]->ddadr = sw_desc->first;
}
static bool is_desc_completed(struct virt_dma_desc *vd)
@@ -673,6 +675,10 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id)
dev_dbg(&chan->vc.chan.dev->device,
"%s(): checking txd %p[%x]: completed=%d\n",
__func__, vd, vd->tx.cookie, is_desc_completed(vd));
+ if (to_pxad_sw_desc(vd)->cyclic) {
+ vchan_cyclic_callback(vd);
+ break;
+ }
if (is_desc_completed(vd)) {
list_del(&vd->node);
vchan_cookie_complete(vd);
@@ -1080,7 +1086,7 @@ pxad_prep_dma_cyclic(struct dma_chan *dchan,
return NULL;
pxad_get_config(chan, dir, &dcmd, &dsadr, &dtadr);
- dcmd |= PXA_DCMD_ENDIRQEN | (PXA_DCMD_LENGTH | period_len);
+ dcmd |= PXA_DCMD_ENDIRQEN | (PXA_DCMD_LENGTH & period_len);
dev_dbg(&chan->vc.chan.dev->device,
"%s(): buf_addr=0x%lx len=%zu period=%zu dir=%d flags=%lx\n",
__func__, (unsigned long)buf_addr, len, period_len, dir, flags);
@@ -1414,6 +1420,7 @@ static int pxad_probe(struct platform_device *op)
pdev->slave.dst_addr_widths = widths;
pdev->slave.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
pdev->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+ pdev->slave.descriptor_reuse = true;
pdev->slave.dev = &op->dev;
ret = pxad_init_dmadev(op, pdev, dma_channels);
diff --git a/drivers/dma/qcom/Kconfig b/drivers/dma/qcom/Kconfig
new file mode 100644
index 000000000000..a7761c4025f4
--- /dev/null
+++ b/drivers/dma/qcom/Kconfig
@@ -0,0 +1,29 @@
+config QCOM_BAM_DMA
+ tristate "QCOM BAM DMA support"
+ depends on ARCH_QCOM || (COMPILE_TEST && OF && ARM)
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ ---help---
+ Enable support for the QCOM BAM DMA controller. This controller
+ provides DMA capabilities for a variety of on-chip devices.
+
+config QCOM_HIDMA_MGMT
+ tristate "Qualcomm Technologies HIDMA Management support"
+ select DMA_ENGINE
+ help
+ Enable support for the Qualcomm Technologies HIDMA Management.
+ Each DMA device requires one management interface driver
+ for basic initialization before QCOM_HIDMA channel driver can
+ start managing the channels. In a virtualized environment,
+ the guest OS would run QCOM_HIDMA channel driver and the
+ host would run the QCOM_HIDMA_MGMT management driver.
+
+config QCOM_HIDMA
+ tristate "Qualcomm Technologies HIDMA Channel support"
+ select DMA_ENGINE
+ help
+ Enable support for the Qualcomm Technologies HIDMA controller.
+ The HIDMA controller supports optimized buffer copies
+ (user to kernel, kernel to kernel, etc.). It only supports
+ memcpy interface. The core is not intended for general
+ purpose slave DMA.
diff --git a/drivers/dma/qcom/Makefile b/drivers/dma/qcom/Makefile
new file mode 100644
index 000000000000..bfea6990229f
--- /dev/null
+++ b/drivers/dma/qcom/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_QCOM_BAM_DMA) += bam_dma.o
+obj-$(CONFIG_QCOM_HIDMA_MGMT) += hdma_mgmt.o
+hdma_mgmt-objs := hidma_mgmt.o hidma_mgmt_sys.o
diff --git a/drivers/dma/qcom_bam_dma.c b/drivers/dma/qcom/bam_dma.c
index 5a250cdc8376..d5e0a9c3ad5d 100644
--- a/drivers/dma/qcom_bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -49,13 +49,13 @@
#include <linux/clk.h>
#include <linux/dmaengine.h>
-#include "dmaengine.h"
-#include "virt-dma.h"
+#include "../dmaengine.h"
+#include "../virt-dma.h"
struct bam_desc_hw {
- u32 addr; /* Buffer physical address */
- u16 size; /* Buffer size in bytes */
- u16 flags;
+ __le32 addr; /* Buffer physical address */
+ __le16 size; /* Buffer size in bytes */
+ __le16 flags;
};
#define DESC_FLAG_INT BIT(15)
@@ -502,8 +502,8 @@ static int bam_alloc_chan(struct dma_chan *chan)
return 0;
/* allocate FIFO descriptor space, but only if necessary */
- bchan->fifo_virt = dma_alloc_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE,
- &bchan->fifo_phys, GFP_KERNEL);
+ bchan->fifo_virt = dma_alloc_wc(bdev->dev, BAM_DESC_FIFO_SIZE,
+ &bchan->fifo_phys, GFP_KERNEL);
if (!bchan->fifo_virt) {
dev_err(bdev->dev, "Failed to allocate desc fifo\n");
@@ -538,8 +538,8 @@ static void bam_free_chan(struct dma_chan *chan)
bam_reset_channel(bchan);
spin_unlock_irqrestore(&bchan->vc.lock, flags);
- dma_free_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE, bchan->fifo_virt,
- bchan->fifo_phys);
+ dma_free_wc(bdev->dev, BAM_DESC_FIFO_SIZE, bchan->fifo_virt,
+ bchan->fifo_phys);
bchan->fifo_virt = NULL;
/* mask irq for pipe/channel */
@@ -632,14 +632,15 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
unsigned int curr_offset = 0;
do {
- desc->addr = sg_dma_address(sg) + curr_offset;
+ desc->addr = cpu_to_le32(sg_dma_address(sg) +
+ curr_offset);
if (remainder > BAM_MAX_DATA_SIZE) {
- desc->size = BAM_MAX_DATA_SIZE;
+ desc->size = cpu_to_le16(BAM_MAX_DATA_SIZE);
remainder -= BAM_MAX_DATA_SIZE;
curr_offset += BAM_MAX_DATA_SIZE;
} else {
- desc->size = remainder;
+ desc->size = cpu_to_le16(remainder);
remainder = 0;
}
@@ -915,9 +916,11 @@ static void bam_start_dma(struct bam_chan *bchan)
/* set any special flags on the last descriptor */
if (async_desc->num_desc == async_desc->xfer_len)
- desc[async_desc->xfer_len - 1].flags = async_desc->flags;
+ desc[async_desc->xfer_len - 1].flags =
+ cpu_to_le16(async_desc->flags);
else
- desc[async_desc->xfer_len - 1].flags |= DESC_FLAG_INT;
+ desc[async_desc->xfer_len - 1].flags |=
+ cpu_to_le16(DESC_FLAG_INT);
if (bchan->tail + async_desc->xfer_len > MAX_DESCRIPTORS) {
u32 partial = MAX_DESCRIPTORS - bchan->tail;
@@ -1231,9 +1234,9 @@ static int bam_dma_remove(struct platform_device *pdev)
bam_dma_terminate_all(&bdev->channels[i].vc.chan);
tasklet_kill(&bdev->channels[i].vc.task);
- dma_free_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE,
- bdev->channels[i].fifo_virt,
- bdev->channels[i].fifo_phys);
+ dma_free_wc(bdev->dev, BAM_DESC_FIFO_SIZE,
+ bdev->channels[i].fifo_virt,
+ bdev->channels[i].fifo_phys);
}
tasklet_kill(&bdev->task);
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
new file mode 100644
index 000000000000..cccc78efbca9
--- /dev/null
+++ b/drivers/dma/qcom/hidma.c
@@ -0,0 +1,706 @@
+/*
+ * Qualcomm Technologies HIDMA DMA engine interface
+ *
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
+ * Copyright (C) Semihalf 2009
+ * Copyright (C) Ilya Yanok, Emcraft Systems 2010
+ * Copyright (C) Alexander Popov, Promcontroller 2014
+ *
+ * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
+ * (defines, structures and comments) was taken from MPC5121 DMA driver
+ * written by Hongjun Chen <hong-jun.chen@freescale.com>.
+ *
+ * Approved as OSADL project by a majority of OSADL members and funded
+ * by OSADL membership fees in 2009; for details see www.osadl.org.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called COPYING.
+ */
+
+/* Linux Foundation elects GPLv2 license only. */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/of_dma.h>
+#include <linux/property.h>
+#include <linux/delay.h>
+#include <linux/acpi.h>
+#include <linux/irq.h>
+#include <linux/atomic.h>
+#include <linux/pm_runtime.h>
+
+#include "../dmaengine.h"
+#include "hidma.h"
+
+/*
+ * Default idle time is 2 seconds. This parameter can
+ * be overridden by changing the following
+ * /sys/bus/platform/devices/QCOM8061:<xy>/power/autosuspend_delay_ms
+ * during kernel boot.
+ */
+#define HIDMA_AUTOSUSPEND_TIMEOUT 2000
+#define HIDMA_ERR_INFO_SW 0xFF
+#define HIDMA_ERR_CODE_UNEXPECTED_TERMINATE 0x0
+#define HIDMA_NR_DEFAULT_DESC 10
+
+static inline struct hidma_dev *to_hidma_dev(struct dma_device *dmadev)
+{
+ return container_of(dmadev, struct hidma_dev, ddev);
+}
+
+static inline
+struct hidma_dev *to_hidma_dev_from_lldev(struct hidma_lldev **_lldevp)
+{
+ return container_of(_lldevp, struct hidma_dev, lldev);
+}
+
+static inline struct hidma_chan *to_hidma_chan(struct dma_chan *dmach)
+{
+ return container_of(dmach, struct hidma_chan, chan);
+}
+
+static inline
+struct hidma_desc *to_hidma_desc(struct dma_async_tx_descriptor *t)
+{
+ return container_of(t, struct hidma_desc, desc);
+}
+
+static void hidma_free(struct hidma_dev *dmadev)
+{
+ INIT_LIST_HEAD(&dmadev->ddev.channels);
+}
+
+static unsigned int nr_desc_prm;
+module_param(nr_desc_prm, uint, 0644);
+MODULE_PARM_DESC(nr_desc_prm, "number of descriptors (default: 0)");
+
+
+/* process completed descriptors */
+static void hidma_process_completed(struct hidma_chan *mchan)
+{
+ struct dma_device *ddev = mchan->chan.device;
+ struct hidma_dev *mdma = to_hidma_dev(ddev);
+ struct dma_async_tx_descriptor *desc;
+ dma_cookie_t last_cookie;
+ struct hidma_desc *mdesc;
+ unsigned long irqflags;
+ struct list_head list;
+
+ INIT_LIST_HEAD(&list);
+
+ /* Get all completed descriptors */
+ spin_lock_irqsave(&mchan->lock, irqflags);
+ list_splice_tail_init(&mchan->completed, &list);
+ spin_unlock_irqrestore(&mchan->lock, irqflags);
+
+ /* Execute callbacks and run dependencies */
+ list_for_each_entry(mdesc, &list, node) {
+ enum dma_status llstat;
+
+ desc = &mdesc->desc;
+
+ spin_lock_irqsave(&mchan->lock, irqflags);
+ dma_cookie_complete(desc);
+ spin_unlock_irqrestore(&mchan->lock, irqflags);
+
+ llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch);
+ if (desc->callback && (llstat == DMA_COMPLETE))
+ desc->callback(desc->callback_param);
+
+ last_cookie = desc->cookie;
+ dma_run_dependencies(desc);
+ }
+
+ /* Free descriptors */
+ spin_lock_irqsave(&mchan->lock, irqflags);
+ list_splice_tail_init(&list, &mchan->free);
+ spin_unlock_irqrestore(&mchan->lock, irqflags);
+
+}
+
+/*
+ * Called once for each submitted descriptor.
+ * PM is locked once for each descriptor that is currently
+ * in execution.
+ */
+static void hidma_callback(void *data)
+{
+ struct hidma_desc *mdesc = data;
+ struct hidma_chan *mchan = to_hidma_chan(mdesc->desc.chan);
+ struct dma_device *ddev = mchan->chan.device;
+ struct hidma_dev *dmadev = to_hidma_dev(ddev);
+ unsigned long irqflags;
+ bool queued = false;
+
+ spin_lock_irqsave(&mchan->lock, irqflags);
+ if (mdesc->node.next) {
+ /* Delete from the active list, add to completed list */
+ list_move_tail(&mdesc->node, &mchan->completed);
+ queued = true;
+
+ /* calculate the next running descriptor */
+ mchan->running = list_first_entry(&mchan->active,
+ struct hidma_desc, node);
+ }
+ spin_unlock_irqrestore(&mchan->lock, irqflags);
+
+ hidma_process_completed(mchan);
+
+ if (queued) {
+ pm_runtime_mark_last_busy(dmadev->ddev.dev);
+ pm_runtime_put_autosuspend(dmadev->ddev.dev);
+ }
+}
+
+static int hidma_chan_init(struct hidma_dev *dmadev, u32 dma_sig)
+{
+ struct hidma_chan *mchan;
+ struct dma_device *ddev;
+
+ mchan = devm_kzalloc(dmadev->ddev.dev, sizeof(*mchan), GFP_KERNEL);
+ if (!mchan)
+ return -ENOMEM;
+
+ ddev = &dmadev->ddev;
+ mchan->dma_sig = dma_sig;
+ mchan->dmadev = dmadev;
+ mchan->chan.device = ddev;
+ dma_cookie_init(&mchan->chan);
+
+ INIT_LIST_HEAD(&mchan->free);
+ INIT_LIST_HEAD(&mchan->prepared);
+ INIT_LIST_HEAD(&mchan->active);
+ INIT_LIST_HEAD(&mchan->completed);
+
+ spin_lock_init(&mchan->lock);
+ list_add_tail(&mchan->chan.device_node, &ddev->channels);
+ dmadev->ddev.chancnt++;
+ return 0;
+}
+
+static void hidma_issue_task(unsigned long arg)
+{
+ struct hidma_dev *dmadev = (struct hidma_dev *)arg;
+
+ pm_runtime_get_sync(dmadev->ddev.dev);
+ hidma_ll_start(dmadev->lldev);
+}
+
+static void hidma_issue_pending(struct dma_chan *dmach)
+{
+ struct hidma_chan *mchan = to_hidma_chan(dmach);
+ struct hidma_dev *dmadev = mchan->dmadev;
+ unsigned long flags;
+ int status;
+
+ spin_lock_irqsave(&mchan->lock, flags);
+ if (!mchan->running) {
+ struct hidma_desc *desc = list_first_entry(&mchan->active,
+ struct hidma_desc,
+ node);
+ mchan->running = desc;
+ }
+ spin_unlock_irqrestore(&mchan->lock, flags);
+
+ /* PM will be released in hidma_callback function. */
+ status = pm_runtime_get(dmadev->ddev.dev);
+ if (status < 0)
+ tasklet_schedule(&dmadev->task);
+ else
+ hidma_ll_start(dmadev->lldev);
+}
+
+static enum dma_status hidma_tx_status(struct dma_chan *dmach,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct hidma_chan *mchan = to_hidma_chan(dmach);
+ enum dma_status ret;
+
+ ret = dma_cookie_status(dmach, cookie, txstate);
+ if (ret == DMA_COMPLETE)
+ return ret;
+
+ if (mchan->paused && (ret == DMA_IN_PROGRESS)) {
+ unsigned long flags;
+ dma_cookie_t runcookie;
+
+ spin_lock_irqsave(&mchan->lock, flags);
+ if (mchan->running)
+ runcookie = mchan->running->desc.cookie;
+ else
+ runcookie = -EINVAL;
+
+ if (runcookie == cookie)
+ ret = DMA_PAUSED;
+
+ spin_unlock_irqrestore(&mchan->lock, flags);
+ }
+
+ return ret;
+}
+
+/*
+ * Submit descriptor to hardware.
+ * Lock the PM for each descriptor we are sending.
+ */
+static dma_cookie_t hidma_tx_submit(struct dma_async_tx_descriptor *txd)
+{
+ struct hidma_chan *mchan = to_hidma_chan(txd->chan);
+ struct hidma_dev *dmadev = mchan->dmadev;
+ struct hidma_desc *mdesc;
+ unsigned long irqflags;
+ dma_cookie_t cookie;
+
+ pm_runtime_get_sync(dmadev->ddev.dev);
+ if (!hidma_ll_isenabled(dmadev->lldev)) {
+ pm_runtime_mark_last_busy(dmadev->ddev.dev);
+ pm_runtime_put_autosuspend(dmadev->ddev.dev);
+ return -ENODEV;
+ }
+
+ mdesc = container_of(txd, struct hidma_desc, desc);
+ spin_lock_irqsave(&mchan->lock, irqflags);
+
+ /* Move descriptor to active */
+ list_move_tail(&mdesc->node, &mchan->active);
+
+ /* Update cookie */
+ cookie = dma_cookie_assign(txd);
+
+ hidma_ll_queue_request(dmadev->lldev, mdesc->tre_ch);
+ spin_unlock_irqrestore(&mchan->lock, irqflags);
+
+ return cookie;
+}
+
+static int hidma_alloc_chan_resources(struct dma_chan *dmach)
+{
+ struct hidma_chan *mchan = to_hidma_chan(dmach);
+ struct hidma_dev *dmadev = mchan->dmadev;
+ struct hidma_desc *mdesc, *tmp;
+ unsigned long irqflags;
+ LIST_HEAD(descs);
+ unsigned int i;
+ int rc = 0;
+
+ if (mchan->allocated)
+ return 0;
+
+ /* Alloc descriptors for this channel */
+ for (i = 0; i < dmadev->nr_descriptors; i++) {
+ mdesc = kzalloc(sizeof(struct hidma_desc), GFP_NOWAIT);
+ if (!mdesc) {
+ rc = -ENOMEM;
+ break;
+ }
+ dma_async_tx_descriptor_init(&mdesc->desc, dmach);
+ mdesc->desc.tx_submit = hidma_tx_submit;
+
+ rc = hidma_ll_request(dmadev->lldev, mchan->dma_sig,
+ "DMA engine", hidma_callback, mdesc,
+ &mdesc->tre_ch);
+ if (rc) {
+ dev_err(dmach->device->dev,
+ "channel alloc failed at %u\n", i);
+ kfree(mdesc);
+ break;
+ }
+ list_add_tail(&mdesc->node, &descs);
+ }
+
+ if (rc) {
+ /* return the allocated descriptors */
+ list_for_each_entry_safe(mdesc, tmp, &descs, node) {
+ hidma_ll_free(dmadev->lldev, mdesc->tre_ch);
+ kfree(mdesc);
+ }
+ return rc;
+ }
+
+ spin_lock_irqsave(&mchan->lock, irqflags);
+ list_splice_tail_init(&descs, &mchan->free);
+ mchan->allocated = true;
+ spin_unlock_irqrestore(&mchan->lock, irqflags);
+ return 1;
+}
+
+static struct dma_async_tx_descriptor *
+hidma_prep_dma_memcpy(struct dma_chan *dmach, dma_addr_t dest, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct hidma_chan *mchan = to_hidma_chan(dmach);
+ struct hidma_desc *mdesc = NULL;
+ struct hidma_dev *mdma = mchan->dmadev;
+ unsigned long irqflags;
+
+ /* Get free descriptor */
+ spin_lock_irqsave(&mchan->lock, irqflags);
+ if (!list_empty(&mchan->free)) {
+ mdesc = list_first_entry(&mchan->free, struct hidma_desc, node);
+ list_del(&mdesc->node);
+ }
+ spin_unlock_irqrestore(&mchan->lock, irqflags);
+
+ if (!mdesc)
+ return NULL;
+
+ hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
+ src, dest, len, flags);
+
+ /* Place descriptor in prepared list */
+ spin_lock_irqsave(&mchan->lock, irqflags);
+ list_add_tail(&mdesc->node, &mchan->prepared);
+ spin_unlock_irqrestore(&mchan->lock, irqflags);
+
+ return &mdesc->desc;
+}
+
+static int hidma_terminate_channel(struct dma_chan *chan)
+{
+ struct hidma_chan *mchan = to_hidma_chan(chan);
+ struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device);
+ struct hidma_desc *tmp, *mdesc;
+ unsigned long irqflags;
+ LIST_HEAD(list);
+ int rc;
+
+ pm_runtime_get_sync(dmadev->ddev.dev);
+ /* give completed requests a chance to finish */
+ hidma_process_completed(mchan);
+
+ spin_lock_irqsave(&mchan->lock, irqflags);
+ list_splice_init(&mchan->active, &list);
+ list_splice_init(&mchan->prepared, &list);
+ list_splice_init(&mchan->completed, &list);
+ spin_unlock_irqrestore(&mchan->lock, irqflags);
+
+ /* this suspends the existing transfer */
+ rc = hidma_ll_pause(dmadev->lldev);
+ if (rc) {
+ dev_err(dmadev->ddev.dev, "channel did not pause\n");
+ goto out;
+ }
+
+ /* return all user requests */
+ list_for_each_entry_safe(mdesc, tmp, &list, node) {
+ struct dma_async_tx_descriptor *txd = &mdesc->desc;
+ dma_async_tx_callback callback = mdesc->desc.callback;
+ void *param = mdesc->desc.callback_param;
+
+ dma_descriptor_unmap(txd);
+
+ if (callback)
+ callback(param);
+
+ dma_run_dependencies(txd);
+
+ /* move myself to free_list */
+ list_move(&mdesc->node, &mchan->free);
+ }
+
+ rc = hidma_ll_resume(dmadev->lldev);
+out:
+ pm_runtime_mark_last_busy(dmadev->ddev.dev);
+ pm_runtime_put_autosuspend(dmadev->ddev.dev);
+ return rc;
+}
+
+static int hidma_terminate_all(struct dma_chan *chan)
+{
+ struct hidma_chan *mchan = to_hidma_chan(chan);
+ struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device);
+ int rc;
+
+ rc = hidma_terminate_channel(chan);
+ if (rc)
+ return rc;
+
+ /* reinitialize the hardware */
+ pm_runtime_get_sync(dmadev->ddev.dev);
+ rc = hidma_ll_setup(dmadev->lldev);
+ pm_runtime_mark_last_busy(dmadev->ddev.dev);
+ pm_runtime_put_autosuspend(dmadev->ddev.dev);
+ return rc;
+}
+
+static void hidma_free_chan_resources(struct dma_chan *dmach)
+{
+ struct hidma_chan *mchan = to_hidma_chan(dmach);
+ struct hidma_dev *mdma = mchan->dmadev;
+ struct hidma_desc *mdesc, *tmp;
+ unsigned long irqflags;
+ LIST_HEAD(descs);
+
+ /* terminate running transactions and free descriptors */
+ hidma_terminate_channel(dmach);
+
+ spin_lock_irqsave(&mchan->lock, irqflags);
+
+ /* Move data */
+ list_splice_tail_init(&mchan->free, &descs);
+
+ /* Free descriptors */
+ list_for_each_entry_safe(mdesc, tmp, &descs, node) {
+ hidma_ll_free(mdma->lldev, mdesc->tre_ch);
+ list_del(&mdesc->node);
+ kfree(mdesc);
+ }
+
+ mchan->allocated = 0;
+ spin_unlock_irqrestore(&mchan->lock, irqflags);
+}
+
+static int hidma_pause(struct dma_chan *chan)
+{
+ struct hidma_chan *mchan;
+ struct hidma_dev *dmadev;
+
+ mchan = to_hidma_chan(chan);
+ dmadev = to_hidma_dev(mchan->chan.device);
+ if (!mchan->paused) {
+ pm_runtime_get_sync(dmadev->ddev.dev);
+ if (hidma_ll_pause(dmadev->lldev))
+ dev_warn(dmadev->ddev.dev, "channel did not stop\n");
+ mchan->paused = true;
+ pm_runtime_mark_last_busy(dmadev->ddev.dev);
+ pm_runtime_put_autosuspend(dmadev->ddev.dev);
+ }
+ return 0;
+}
+
+static int hidma_resume(struct dma_chan *chan)
+{
+ struct hidma_chan *mchan;
+ struct hidma_dev *dmadev;
+ int rc = 0;
+
+ mchan = to_hidma_chan(chan);
+ dmadev = to_hidma_dev(mchan->chan.device);
+ if (mchan->paused) {
+ pm_runtime_get_sync(dmadev->ddev.dev);
+ rc = hidma_ll_resume(dmadev->lldev);
+ if (!rc)
+ mchan->paused = false;
+ else
+ dev_err(dmadev->ddev.dev,
+ "failed to resume the channel");
+ pm_runtime_mark_last_busy(dmadev->ddev.dev);
+ pm_runtime_put_autosuspend(dmadev->ddev.dev);
+ }
+ return rc;
+}
+
+static irqreturn_t hidma_chirq_handler(int chirq, void *arg)
+{
+ struct hidma_lldev *lldev = arg;
+
+ /*
+ * All interrupts are request driven.
+ * HW doesn't send an interrupt by itself.
+ */
+ return hidma_ll_inthandler(chirq, lldev);
+}
+
+static int hidma_probe(struct platform_device *pdev)
+{
+ struct hidma_dev *dmadev;
+ struct resource *trca_resource;
+ struct resource *evca_resource;
+ int chirq;
+ void __iomem *evca;
+ void __iomem *trca;
+ int rc;
+
+ pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ trca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ trca = devm_ioremap_resource(&pdev->dev, trca_resource);
+ if (IS_ERR(trca)) {
+ rc = -ENOMEM;
+ goto bailout;
+ }
+
+ evca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ evca = devm_ioremap_resource(&pdev->dev, evca_resource);
+ if (IS_ERR(evca)) {
+ rc = -ENOMEM;
+ goto bailout;
+ }
+
+ /*
+ * This driver only handles the channel IRQs.
+ * Common IRQ is handled by the management driver.
+ */
+ chirq = platform_get_irq(pdev, 0);
+ if (chirq < 0) {
+ rc = -ENODEV;
+ goto bailout;
+ }
+
+ dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL);
+ if (!dmadev) {
+ rc = -ENOMEM;
+ goto bailout;
+ }
+
+ INIT_LIST_HEAD(&dmadev->ddev.channels);
+ spin_lock_init(&dmadev->lock);
+ dmadev->ddev.dev = &pdev->dev;
+ pm_runtime_get_sync(dmadev->ddev.dev);
+
+ dma_cap_set(DMA_MEMCPY, dmadev->ddev.cap_mask);
+ if (WARN_ON(!pdev->dev.dma_mask)) {
+ rc = -ENXIO;
+ goto dmafree;
+ }
+
+ dmadev->dev_evca = evca;
+ dmadev->evca_resource = evca_resource;
+ dmadev->dev_trca = trca;
+ dmadev->trca_resource = trca_resource;
+ dmadev->ddev.device_prep_dma_memcpy = hidma_prep_dma_memcpy;
+ dmadev->ddev.device_alloc_chan_resources = hidma_alloc_chan_resources;
+ dmadev->ddev.device_free_chan_resources = hidma_free_chan_resources;
+ dmadev->ddev.device_tx_status = hidma_tx_status;
+ dmadev->ddev.device_issue_pending = hidma_issue_pending;
+ dmadev->ddev.device_pause = hidma_pause;
+ dmadev->ddev.device_resume = hidma_resume;
+ dmadev->ddev.device_terminate_all = hidma_terminate_all;
+ dmadev->ddev.copy_align = 8;
+
+ device_property_read_u32(&pdev->dev, "desc-count",
+ &dmadev->nr_descriptors);
+
+ if (!dmadev->nr_descriptors && nr_desc_prm)
+ dmadev->nr_descriptors = nr_desc_prm;
+
+ if (!dmadev->nr_descriptors)
+ dmadev->nr_descriptors = HIDMA_NR_DEFAULT_DESC;
+
+ dmadev->chidx = readl(dmadev->dev_trca + 0x28);
+
+ /* Set DMA mask to 64 bits. */
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (rc) {
+ dev_warn(&pdev->dev, "unable to set coherent mask to 64");
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (rc)
+ goto dmafree;
+ }
+
+ dmadev->lldev = hidma_ll_init(dmadev->ddev.dev,
+ dmadev->nr_descriptors, dmadev->dev_trca,
+ dmadev->dev_evca, dmadev->chidx);
+ if (!dmadev->lldev) {
+ rc = -EPROBE_DEFER;
+ goto dmafree;
+ }
+
+ rc = devm_request_irq(&pdev->dev, chirq, hidma_chirq_handler, 0,
+ "qcom-hidma", dmadev->lldev);
+ if (rc)
+ goto uninit;
+
+ INIT_LIST_HEAD(&dmadev->ddev.channels);
+ rc = hidma_chan_init(dmadev, 0);
+ if (rc)
+ goto uninit;
+
+ rc = dma_async_device_register(&dmadev->ddev);
+ if (rc)
+ goto uninit;
+
+ dmadev->irq = chirq;
+ tasklet_init(&dmadev->task, hidma_issue_task, (unsigned long)dmadev);
+ dev_info(&pdev->dev, "HI-DMA engine driver registration complete\n");
+ platform_set_drvdata(pdev, dmadev);
+ pm_runtime_mark_last_busy(dmadev->ddev.dev);
+ pm_runtime_put_autosuspend(dmadev->ddev.dev);
+ return 0;
+
+uninit:
+ hidma_ll_uninit(dmadev->lldev);
+dmafree:
+ if (dmadev)
+ hidma_free(dmadev);
+bailout:
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return rc;
+}
+
+static int hidma_remove(struct platform_device *pdev)
+{
+ struct hidma_dev *dmadev = platform_get_drvdata(pdev);
+
+ pm_runtime_get_sync(dmadev->ddev.dev);
+ dma_async_device_unregister(&dmadev->ddev);
+ devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev);
+ hidma_ll_uninit(dmadev->lldev);
+ hidma_free(dmadev);
+
+ dev_info(&pdev->dev, "HI-DMA engine removed\n");
+ pm_runtime_put_sync_suspend(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_ACPI)
+static const struct acpi_device_id hidma_acpi_ids[] = {
+ {"QCOM8061"},
+ {},
+};
+#endif
+
+static const struct of_device_id hidma_match[] = {
+ {.compatible = "qcom,hidma-1.0",},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, hidma_match);
+
+static struct platform_driver hidma_driver = {
+ .probe = hidma_probe,
+ .remove = hidma_remove,
+ .driver = {
+ .name = "hidma",
+ .of_match_table = hidma_match,
+ .acpi_match_table = ACPI_PTR(hidma_acpi_ids),
+ },
+};
+
+module_platform_driver(hidma_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/qcom/hidma.h b/drivers/dma/qcom/hidma.h
new file mode 100644
index 000000000000..231e306f6d87
--- /dev/null
+++ b/drivers/dma/qcom/hidma.h
@@ -0,0 +1,160 @@
+/*
+ * Qualcomm Technologies HIDMA data structures
+ *
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef QCOM_HIDMA_H
+#define QCOM_HIDMA_H
+
+#include <linux/kfifo.h>
+#include <linux/interrupt.h>
+#include <linux/dmaengine.h>
+
+#define TRE_SIZE 32 /* each TRE is 32 bytes */
+#define TRE_CFG_IDX 0
+#define TRE_LEN_IDX 1
+#define TRE_SRC_LOW_IDX 2
+#define TRE_SRC_HI_IDX 3
+#define TRE_DEST_LOW_IDX 4
+#define TRE_DEST_HI_IDX 5
+
+struct hidma_tx_status {
+ u8 err_info; /* error record in this transfer */
+ u8 err_code; /* completion code */
+};
+
+struct hidma_tre {
+ atomic_t allocated; /* if this channel is allocated */
+ bool queued; /* flag whether this is pending */
+ u16 status; /* status */
+ u32 chidx; /* index of the tre */
+ u32 dma_sig; /* signature of the tre */
+ const char *dev_name; /* name of the device */
+ void (*callback)(void *data); /* requester callback */
+ void *data; /* Data associated with this channel*/
+ struct hidma_lldev *lldev; /* lldma device pointer */
+ u32 tre_local[TRE_SIZE / sizeof(u32) + 1]; /* TRE local copy */
+ u32 tre_index; /* the offset where this was written*/
+ u32 int_flags; /* interrupt flags */
+};
+
+struct hidma_lldev {
+ bool initialized; /* initialized flag */
+ u8 trch_state; /* trch_state of the device */
+ u8 evch_state; /* evch_state of the device */
+ u8 chidx; /* channel index in the core */
+ u32 nr_tres; /* max number of configs */
+ spinlock_t lock; /* reentrancy */
+ struct hidma_tre *trepool; /* trepool of user configs */
+ struct device *dev; /* device */
+ void __iomem *trca; /* Transfer Channel address */
+ void __iomem *evca; /* Event Channel address */
+ struct hidma_tre
+ **pending_tre_list; /* Pointers to pending TREs */
+ struct hidma_tx_status
+ *tx_status_list; /* Pointers to pending TREs status*/
+ s32 pending_tre_count; /* Number of TREs pending */
+
+ void *tre_ring; /* TRE ring */
+ dma_addr_t tre_ring_handle; /* TRE ring to be shared with HW */
+ u32 tre_ring_size; /* Byte size of the ring */
+ u32 tre_processed_off; /* last processed TRE */
+
+ void *evre_ring; /* EVRE ring */
+ dma_addr_t evre_ring_handle; /* EVRE ring to be shared with HW */
+ u32 evre_ring_size; /* Byte size of the ring */
+ u32 evre_processed_off; /* last processed EVRE */
+
+ u32 tre_write_offset; /* TRE write location */
+ struct tasklet_struct task; /* task delivering notifications */
+ DECLARE_KFIFO_PTR(handoff_fifo,
+ struct hidma_tre *); /* pending TREs FIFO */
+};
+
+struct hidma_desc {
+ struct dma_async_tx_descriptor desc;
+ /* link list node for this channel*/
+ struct list_head node;
+ u32 tre_ch;
+};
+
+struct hidma_chan {
+ bool paused;
+ bool allocated;
+ char dbg_name[16];
+ u32 dma_sig;
+
+ /*
+ * active descriptor on this channel
+ * It is used by the DMA complete notification to
+ * locate the descriptor that initiated the transfer.
+ */
+ struct dentry *debugfs;
+ struct dentry *stats;
+ struct hidma_dev *dmadev;
+ struct hidma_desc *running;
+
+ struct dma_chan chan;
+ struct list_head free;
+ struct list_head prepared;
+ struct list_head active;
+ struct list_head completed;
+
+ /* Lock for this structure */
+ spinlock_t lock;
+};
+
+struct hidma_dev {
+ int irq;
+ int chidx;
+ u32 nr_descriptors;
+
+ struct hidma_lldev *lldev;
+ void __iomem *dev_trca;
+ struct resource *trca_resource;
+ void __iomem *dev_evca;
+ struct resource *evca_resource;
+
+ /* used to protect the pending channel list*/
+ spinlock_t lock;
+ struct dma_device ddev;
+
+ struct dentry *debugfs;
+ struct dentry *stats;
+
+ /* Task delivering issue_pending */
+ struct tasklet_struct task;
+};
+
+int hidma_ll_request(struct hidma_lldev *llhndl, u32 dev_id,
+ const char *dev_name,
+ void (*callback)(void *data), void *data, u32 *tre_ch);
+
+void hidma_ll_free(struct hidma_lldev *llhndl, u32 tre_ch);
+enum dma_status hidma_ll_status(struct hidma_lldev *llhndl, u32 tre_ch);
+bool hidma_ll_isenabled(struct hidma_lldev *llhndl);
+void hidma_ll_queue_request(struct hidma_lldev *llhndl, u32 tre_ch);
+void hidma_ll_start(struct hidma_lldev *llhndl);
+int hidma_ll_pause(struct hidma_lldev *llhndl);
+int hidma_ll_resume(struct hidma_lldev *llhndl);
+void hidma_ll_set_transfer_params(struct hidma_lldev *llhndl, u32 tre_ch,
+ dma_addr_t src, dma_addr_t dest, u32 len, u32 flags);
+int hidma_ll_setup(struct hidma_lldev *lldev);
+struct hidma_lldev *hidma_ll_init(struct device *dev, u32 max_channels,
+ void __iomem *trca, void __iomem *evca,
+ u8 chidx);
+int hidma_ll_uninit(struct hidma_lldev *llhndl);
+irqreturn_t hidma_ll_inthandler(int irq, void *arg);
+void hidma_cleanup_pending_tre(struct hidma_lldev *llhndl, u8 err_info,
+ u8 err_code);
+#endif
diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c
new file mode 100644
index 000000000000..ef491b893f40
--- /dev/null
+++ b/drivers/dma/qcom/hidma_mgmt.c
@@ -0,0 +1,302 @@
+/*
+ * Qualcomm Technologies HIDMA DMA engine Management interface
+ *
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/acpi.h>
+#include <linux/of.h>
+#include <linux/property.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/bitops.h>
+
+#include "hidma_mgmt.h"
+
+#define HIDMA_QOS_N_OFFSET 0x300
+#define HIDMA_CFG_OFFSET 0x400
+#define HIDMA_MAX_BUS_REQ_LEN_OFFSET 0x41C
+#define HIDMA_MAX_XACTIONS_OFFSET 0x420
+#define HIDMA_HW_VERSION_OFFSET 0x424
+#define HIDMA_CHRESET_TIMEOUT_OFFSET 0x418
+
+#define HIDMA_MAX_WR_XACTIONS_MASK GENMASK(4, 0)
+#define HIDMA_MAX_RD_XACTIONS_MASK GENMASK(4, 0)
+#define HIDMA_WEIGHT_MASK GENMASK(6, 0)
+#define HIDMA_MAX_BUS_REQ_LEN_MASK GENMASK(15, 0)
+#define HIDMA_CHRESET_TIMEOUT_MASK GENMASK(19, 0)
+
+#define HIDMA_MAX_WR_XACTIONS_BIT_POS 16
+#define HIDMA_MAX_BUS_WR_REQ_BIT_POS 16
+#define HIDMA_WRR_BIT_POS 8
+#define HIDMA_PRIORITY_BIT_POS 15
+
+#define HIDMA_AUTOSUSPEND_TIMEOUT 2000
+#define HIDMA_MAX_CHANNEL_WEIGHT 15
+
+int hidma_mgmt_setup(struct hidma_mgmt_dev *mgmtdev)
+{
+ unsigned int i;
+ u32 val;
+
+ if (!is_power_of_2(mgmtdev->max_write_request) ||
+ (mgmtdev->max_write_request < 128) ||
+ (mgmtdev->max_write_request > 1024)) {
+ dev_err(&mgmtdev->pdev->dev, "invalid write request %d\n",
+ mgmtdev->max_write_request);
+ return -EINVAL;
+ }
+
+ if (!is_power_of_2(mgmtdev->max_read_request) ||
+ (mgmtdev->max_read_request < 128) ||
+ (mgmtdev->max_read_request > 1024)) {
+ dev_err(&mgmtdev->pdev->dev, "invalid read request %d\n",
+ mgmtdev->max_read_request);
+ return -EINVAL;
+ }
+
+ if (mgmtdev->max_wr_xactions > HIDMA_MAX_WR_XACTIONS_MASK) {
+ dev_err(&mgmtdev->pdev->dev,
+ "max_wr_xactions cannot be bigger than %ld\n",
+ HIDMA_MAX_WR_XACTIONS_MASK);
+ return -EINVAL;
+ }
+
+ if (mgmtdev->max_rd_xactions > HIDMA_MAX_RD_XACTIONS_MASK) {
+ dev_err(&mgmtdev->pdev->dev,
+ "max_rd_xactions cannot be bigger than %ld\n",
+ HIDMA_MAX_RD_XACTIONS_MASK);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < mgmtdev->dma_channels; i++) {
+ if (mgmtdev->priority[i] > 1) {
+ dev_err(&mgmtdev->pdev->dev,
+ "priority can be 0 or 1\n");
+ return -EINVAL;
+ }
+
+ if (mgmtdev->weight[i] > HIDMA_MAX_CHANNEL_WEIGHT) {
+ dev_err(&mgmtdev->pdev->dev,
+ "max value of weight can be %d.\n",
+ HIDMA_MAX_CHANNEL_WEIGHT);
+ return -EINVAL;
+ }
+
+ /* weight needs to be at least one */
+ if (mgmtdev->weight[i] == 0)
+ mgmtdev->weight[i] = 1;
+ }
+
+ pm_runtime_get_sync(&mgmtdev->pdev->dev);
+ val = readl(mgmtdev->virtaddr + HIDMA_MAX_BUS_REQ_LEN_OFFSET);
+ val &= ~(HIDMA_MAX_BUS_REQ_LEN_MASK << HIDMA_MAX_BUS_WR_REQ_BIT_POS);
+ val |= mgmtdev->max_write_request << HIDMA_MAX_BUS_WR_REQ_BIT_POS;
+ val &= ~HIDMA_MAX_BUS_REQ_LEN_MASK;
+ val |= mgmtdev->max_read_request;
+ writel(val, mgmtdev->virtaddr + HIDMA_MAX_BUS_REQ_LEN_OFFSET);
+
+ val = readl(mgmtdev->virtaddr + HIDMA_MAX_XACTIONS_OFFSET);
+ val &= ~(HIDMA_MAX_WR_XACTIONS_MASK << HIDMA_MAX_WR_XACTIONS_BIT_POS);
+ val |= mgmtdev->max_wr_xactions << HIDMA_MAX_WR_XACTIONS_BIT_POS;
+ val &= ~HIDMA_MAX_RD_XACTIONS_MASK;
+ val |= mgmtdev->max_rd_xactions;
+ writel(val, mgmtdev->virtaddr + HIDMA_MAX_XACTIONS_OFFSET);
+
+ mgmtdev->hw_version =
+ readl(mgmtdev->virtaddr + HIDMA_HW_VERSION_OFFSET);
+ mgmtdev->hw_version_major = (mgmtdev->hw_version >> 28) & 0xF;
+ mgmtdev->hw_version_minor = (mgmtdev->hw_version >> 16) & 0xF;
+
+ for (i = 0; i < mgmtdev->dma_channels; i++) {
+ u32 weight = mgmtdev->weight[i];
+ u32 priority = mgmtdev->priority[i];
+
+ val = readl(mgmtdev->virtaddr + HIDMA_QOS_N_OFFSET + (4 * i));
+ val &= ~(1 << HIDMA_PRIORITY_BIT_POS);
+ val |= (priority & 0x1) << HIDMA_PRIORITY_BIT_POS;
+ val &= ~(HIDMA_WEIGHT_MASK << HIDMA_WRR_BIT_POS);
+ val |= (weight & HIDMA_WEIGHT_MASK) << HIDMA_WRR_BIT_POS;
+ writel(val, mgmtdev->virtaddr + HIDMA_QOS_N_OFFSET + (4 * i));
+ }
+
+ val = readl(mgmtdev->virtaddr + HIDMA_CHRESET_TIMEOUT_OFFSET);
+ val &= ~HIDMA_CHRESET_TIMEOUT_MASK;
+ val |= mgmtdev->chreset_timeout_cycles & HIDMA_CHRESET_TIMEOUT_MASK;
+ writel(val, mgmtdev->virtaddr + HIDMA_CHRESET_TIMEOUT_OFFSET);
+
+ pm_runtime_mark_last_busy(&mgmtdev->pdev->dev);
+ pm_runtime_put_autosuspend(&mgmtdev->pdev->dev);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hidma_mgmt_setup);
+
+static int hidma_mgmt_probe(struct platform_device *pdev)
+{
+ struct hidma_mgmt_dev *mgmtdev;
+ struct resource *res;
+ void __iomem *virtaddr;
+ int irq;
+ int rc;
+ u32 val;
+
+ pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ virtaddr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(virtaddr)) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "irq resources not found\n");
+ rc = irq;
+ goto out;
+ }
+
+ mgmtdev = devm_kzalloc(&pdev->dev, sizeof(*mgmtdev), GFP_KERNEL);
+ if (!mgmtdev) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ mgmtdev->pdev = pdev;
+ mgmtdev->addrsize = resource_size(res);
+ mgmtdev->virtaddr = virtaddr;
+
+ rc = device_property_read_u32(&pdev->dev, "dma-channels",
+ &mgmtdev->dma_channels);
+ if (rc) {
+ dev_err(&pdev->dev, "number of channels missing\n");
+ goto out;
+ }
+
+ rc = device_property_read_u32(&pdev->dev,
+ "channel-reset-timeout-cycles",
+ &mgmtdev->chreset_timeout_cycles);
+ if (rc) {
+ dev_err(&pdev->dev, "channel reset timeout missing\n");
+ goto out;
+ }
+
+ rc = device_property_read_u32(&pdev->dev, "max-write-burst-bytes",
+ &mgmtdev->max_write_request);
+ if (rc) {
+ dev_err(&pdev->dev, "max-write-burst-bytes missing\n");
+ goto out;
+ }
+
+ rc = device_property_read_u32(&pdev->dev, "max-read-burst-bytes",
+ &mgmtdev->max_read_request);
+ if (rc) {
+ dev_err(&pdev->dev, "max-read-burst-bytes missing\n");
+ goto out;
+ }
+
+ rc = device_property_read_u32(&pdev->dev, "max-write-transactions",
+ &mgmtdev->max_wr_xactions);
+ if (rc) {
+ dev_err(&pdev->dev, "max-write-transactions missing\n");
+ goto out;
+ }
+
+ rc = device_property_read_u32(&pdev->dev, "max-read-transactions",
+ &mgmtdev->max_rd_xactions);
+ if (rc) {
+ dev_err(&pdev->dev, "max-read-transactions missing\n");
+ goto out;
+ }
+
+ mgmtdev->priority = devm_kcalloc(&pdev->dev,
+ mgmtdev->dma_channels,
+ sizeof(*mgmtdev->priority),
+ GFP_KERNEL);
+ if (!mgmtdev->priority) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ mgmtdev->weight = devm_kcalloc(&pdev->dev,
+ mgmtdev->dma_channels,
+ sizeof(*mgmtdev->weight), GFP_KERNEL);
+ if (!mgmtdev->weight) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ rc = hidma_mgmt_setup(mgmtdev);
+ if (rc) {
+ dev_err(&pdev->dev, "setup failed\n");
+ goto out;
+ }
+
+ /* start the HW */
+ val = readl(mgmtdev->virtaddr + HIDMA_CFG_OFFSET);
+ val |= 1;
+ writel(val, mgmtdev->virtaddr + HIDMA_CFG_OFFSET);
+
+ rc = hidma_mgmt_init_sys(mgmtdev);
+ if (rc) {
+ dev_err(&pdev->dev, "sysfs setup failed\n");
+ goto out;
+ }
+
+ dev_info(&pdev->dev,
+ "HW rev: %d.%d @ %pa with %d physical channels\n",
+ mgmtdev->hw_version_major, mgmtdev->hw_version_minor,
+ &res->start, mgmtdev->dma_channels);
+
+ platform_set_drvdata(pdev, mgmtdev);
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
+ return 0;
+out:
+ pm_runtime_put_sync_suspend(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return rc;
+}
+
+#if IS_ENABLED(CONFIG_ACPI)
+static const struct acpi_device_id hidma_mgmt_acpi_ids[] = {
+ {"QCOM8060"},
+ {},
+};
+#endif
+
+static const struct of_device_id hidma_mgmt_match[] = {
+ {.compatible = "qcom,hidma-mgmt-1.0",},
+ {},
+};
+MODULE_DEVICE_TABLE(of, hidma_mgmt_match);
+
+static struct platform_driver hidma_mgmt_driver = {
+ .probe = hidma_mgmt_probe,
+ .driver = {
+ .name = "hidma-mgmt",
+ .of_match_table = hidma_mgmt_match,
+ .acpi_match_table = ACPI_PTR(hidma_mgmt_acpi_ids),
+ },
+};
+
+module_platform_driver(hidma_mgmt_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/qcom/hidma_mgmt.h b/drivers/dma/qcom/hidma_mgmt.h
new file mode 100644
index 000000000000..f7daf33769f4
--- /dev/null
+++ b/drivers/dma/qcom/hidma_mgmt.h
@@ -0,0 +1,39 @@
+/*
+ * Qualcomm Technologies HIDMA Management common header
+ *
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+struct hidma_mgmt_dev {
+ u8 hw_version_major;
+ u8 hw_version_minor;
+
+ u32 max_wr_xactions;
+ u32 max_rd_xactions;
+ u32 max_write_request;
+ u32 max_read_request;
+ u32 dma_channels;
+ u32 chreset_timeout_cycles;
+ u32 hw_version;
+ u32 *priority;
+ u32 *weight;
+
+ /* Hardware device constants */
+ void __iomem *virtaddr;
+ resource_size_t addrsize;
+
+ struct kobject **chroots;
+ struct platform_device *pdev;
+};
+
+int hidma_mgmt_init_sys(struct hidma_mgmt_dev *dev);
+int hidma_mgmt_setup(struct hidma_mgmt_dev *mgmtdev);
diff --git a/drivers/dma/qcom/hidma_mgmt_sys.c b/drivers/dma/qcom/hidma_mgmt_sys.c
new file mode 100644
index 000000000000..d61f1068a34b
--- /dev/null
+++ b/drivers/dma/qcom/hidma_mgmt_sys.c
@@ -0,0 +1,295 @@
+/*
+ * Qualcomm Technologies HIDMA Management SYS interface
+ *
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/sysfs.h>
+#include <linux/platform_device.h>
+
+#include "hidma_mgmt.h"
+
+struct hidma_chan_attr {
+ struct hidma_mgmt_dev *mdev;
+ int index;
+ struct kobj_attribute attr;
+};
+
+struct hidma_mgmt_fileinfo {
+ char *name;
+ int mode;
+ int (*get)(struct hidma_mgmt_dev *mdev);
+ int (*set)(struct hidma_mgmt_dev *mdev, u64 val);
+};
+
+#define IMPLEMENT_GETSET(name) \
+static int get_##name(struct hidma_mgmt_dev *mdev) \
+{ \
+ return mdev->name; \
+} \
+static int set_##name(struct hidma_mgmt_dev *mdev, u64 val) \
+{ \
+ u64 tmp; \
+ int rc; \
+ \
+ tmp = mdev->name; \
+ mdev->name = val; \
+ rc = hidma_mgmt_setup(mdev); \
+ if (rc) \
+ mdev->name = tmp; \
+ return rc; \
+}
+
+#define DECLARE_ATTRIBUTE(name, mode) \
+ {#name, mode, get_##name, set_##name}
+
+IMPLEMENT_GETSET(hw_version_major)
+IMPLEMENT_GETSET(hw_version_minor)
+IMPLEMENT_GETSET(max_wr_xactions)
+IMPLEMENT_GETSET(max_rd_xactions)
+IMPLEMENT_GETSET(max_write_request)
+IMPLEMENT_GETSET(max_read_request)
+IMPLEMENT_GETSET(dma_channels)
+IMPLEMENT_GETSET(chreset_timeout_cycles)
+
+static int set_priority(struct hidma_mgmt_dev *mdev, unsigned int i, u64 val)
+{
+ u64 tmp;
+ int rc;
+
+ if (i >= mdev->dma_channels)
+ return -EINVAL;
+
+ tmp = mdev->priority[i];
+ mdev->priority[i] = val;
+ rc = hidma_mgmt_setup(mdev);
+ if (rc)
+ mdev->priority[i] = tmp;
+ return rc;
+}
+
+static int set_weight(struct hidma_mgmt_dev *mdev, unsigned int i, u64 val)
+{
+ u64 tmp;
+ int rc;
+
+ if (i >= mdev->dma_channels)
+ return -EINVAL;
+
+ tmp = mdev->weight[i];
+ mdev->weight[i] = val;
+ rc = hidma_mgmt_setup(mdev);
+ if (rc)
+ mdev->weight[i] = tmp;
+ return rc;
+}
+
+static struct hidma_mgmt_fileinfo hidma_mgmt_files[] = {
+ DECLARE_ATTRIBUTE(hw_version_major, S_IRUGO),
+ DECLARE_ATTRIBUTE(hw_version_minor, S_IRUGO),
+ DECLARE_ATTRIBUTE(dma_channels, S_IRUGO),
+ DECLARE_ATTRIBUTE(chreset_timeout_cycles, S_IRUGO),
+ DECLARE_ATTRIBUTE(max_wr_xactions, S_IRUGO),
+ DECLARE_ATTRIBUTE(max_rd_xactions, S_IRUGO),
+ DECLARE_ATTRIBUTE(max_write_request, S_IRUGO),
+ DECLARE_ATTRIBUTE(max_read_request, S_IRUGO),
+};
+
+static ssize_t show_values(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct hidma_mgmt_dev *mdev = platform_get_drvdata(pdev);
+ unsigned int i;
+
+ buf[0] = 0;
+
+ for (i = 0; i < ARRAY_SIZE(hidma_mgmt_files); i++) {
+ if (strcmp(attr->attr.name, hidma_mgmt_files[i].name) == 0) {
+ sprintf(buf, "%d\n", hidma_mgmt_files[i].get(mdev));
+ break;
+ }
+ }
+ return strlen(buf);
+}
+
+static ssize_t set_values(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct hidma_mgmt_dev *mdev = platform_get_drvdata(pdev);
+ unsigned long tmp;
+ unsigned int i;
+ int rc;
+
+ rc = kstrtoul(buf, 0, &tmp);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < ARRAY_SIZE(hidma_mgmt_files); i++) {
+ if (strcmp(attr->attr.name, hidma_mgmt_files[i].name) == 0) {
+ rc = hidma_mgmt_files[i].set(mdev, tmp);
+ if (rc)
+ return rc;
+
+ break;
+ }
+ }
+ return count;
+}
+
+static ssize_t show_values_channel(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct hidma_chan_attr *chattr;
+ struct hidma_mgmt_dev *mdev;
+
+ buf[0] = 0;
+ chattr = container_of(attr, struct hidma_chan_attr, attr);
+ mdev = chattr->mdev;
+ if (strcmp(attr->attr.name, "priority") == 0)
+ sprintf(buf, "%d\n", mdev->priority[chattr->index]);
+ else if (strcmp(attr->attr.name, "weight") == 0)
+ sprintf(buf, "%d\n", mdev->weight[chattr->index]);
+
+ return strlen(buf);
+}
+
+static ssize_t set_values_channel(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct hidma_chan_attr *chattr;
+ struct hidma_mgmt_dev *mdev;
+ unsigned long tmp;
+ int rc;
+
+ chattr = container_of(attr, struct hidma_chan_attr, attr);
+ mdev = chattr->mdev;
+
+ rc = kstrtoul(buf, 0, &tmp);
+ if (rc)
+ return rc;
+
+ if (strcmp(attr->attr.name, "priority") == 0) {
+ rc = set_priority(mdev, chattr->index, tmp);
+ if (rc)
+ return rc;
+ } else if (strcmp(attr->attr.name, "weight") == 0) {
+ rc = set_weight(mdev, chattr->index, tmp);
+ if (rc)
+ return rc;
+ }
+ return count;
+}
+
+static int create_sysfs_entry(struct hidma_mgmt_dev *dev, char *name, int mode)
+{
+ struct device_attribute *attrs;
+ char *name_copy;
+
+ attrs = devm_kmalloc(&dev->pdev->dev,
+ sizeof(struct device_attribute), GFP_KERNEL);
+ if (!attrs)
+ return -ENOMEM;
+
+ name_copy = devm_kstrdup(&dev->pdev->dev, name, GFP_KERNEL);
+ if (!name_copy)
+ return -ENOMEM;
+
+ attrs->attr.name = name_copy;
+ attrs->attr.mode = mode;
+ attrs->show = show_values;
+ attrs->store = set_values;
+ sysfs_attr_init(&attrs->attr);
+
+ return device_create_file(&dev->pdev->dev, attrs);
+}
+
+static int create_sysfs_entry_channel(struct hidma_mgmt_dev *mdev, char *name,
+ int mode, int index,
+ struct kobject *parent)
+{
+ struct hidma_chan_attr *chattr;
+ char *name_copy;
+
+ chattr = devm_kmalloc(&mdev->pdev->dev, sizeof(*chattr), GFP_KERNEL);
+ if (!chattr)
+ return -ENOMEM;
+
+ name_copy = devm_kstrdup(&mdev->pdev->dev, name, GFP_KERNEL);
+ if (!name_copy)
+ return -ENOMEM;
+
+ chattr->mdev = mdev;
+ chattr->index = index;
+ chattr->attr.attr.name = name_copy;
+ chattr->attr.attr.mode = mode;
+ chattr->attr.show = show_values_channel;
+ chattr->attr.store = set_values_channel;
+ sysfs_attr_init(&chattr->attr.attr);
+
+ return sysfs_create_file(parent, &chattr->attr.attr);
+}
+
+int hidma_mgmt_init_sys(struct hidma_mgmt_dev *mdev)
+{
+ unsigned int i;
+ int rc;
+ int required;
+ struct kobject *chanops;
+
+ required = sizeof(*mdev->chroots) * mdev->dma_channels;
+ mdev->chroots = devm_kmalloc(&mdev->pdev->dev, required, GFP_KERNEL);
+ if (!mdev->chroots)
+ return -ENOMEM;
+
+ chanops = kobject_create_and_add("chanops", &mdev->pdev->dev.kobj);
+ if (!chanops)
+ return -ENOMEM;
+
+ /* create each channel directory here */
+ for (i = 0; i < mdev->dma_channels; i++) {
+ char name[20];
+
+ snprintf(name, sizeof(name), "chan%d", i);
+ mdev->chroots[i] = kobject_create_and_add(name, chanops);
+ if (!mdev->chroots[i])
+ return -ENOMEM;
+ }
+
+ /* populate common parameters */
+ for (i = 0; i < ARRAY_SIZE(hidma_mgmt_files); i++) {
+ rc = create_sysfs_entry(mdev, hidma_mgmt_files[i].name,
+ hidma_mgmt_files[i].mode);
+ if (rc)
+ return rc;
+ }
+
+ /* populate parameters that are per channel */
+ for (i = 0; i < mdev->dma_channels; i++) {
+ rc = create_sysfs_entry_channel(mdev, "priority",
+ (S_IRUGO | S_IWUGO), i,
+ mdev->chroots[i]);
+ if (rc)
+ return rc;
+
+ rc = create_sysfs_entry_channel(mdev, "weight",
+ (S_IRUGO | S_IWUGO), i,
+ mdev->chroots[i]);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hidma_mgmt_init_sys);
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig
index 9fda65af841e..6e0685f1a838 100644
--- a/drivers/dma/sh/Kconfig
+++ b/drivers/dma/sh/Kconfig
@@ -12,7 +12,7 @@ config RENESAS_DMA
config SH_DMAE_BASE
bool "Renesas SuperH DMA Engine support"
- depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
+ depends on SUPERH || ARCH_RENESAS || COMPILE_TEST
depends on !SUPERH || SH_DMA
depends on !SH_DMA_API
default y
@@ -41,21 +41,15 @@ endif
config RCAR_DMAC
tristate "Renesas R-Car Gen2 DMA Controller"
- depends on ARCH_SHMOBILE || COMPILE_TEST
+ depends on ARCH_RENESAS || COMPILE_TEST
select RENESAS_DMA
help
This driver supports the general purpose DMA controller found in the
Renesas R-Car second generation SoCs.
-config RCAR_HPB_DMAE
- tristate "Renesas R-Car HPB DMAC support"
- depends on SH_DMAE_BASE
- help
- Enable support for the Renesas R-Car series DMA controllers.
-
config RENESAS_USB_DMAC
tristate "Renesas USB-DMA Controller"
- depends on ARCH_SHMOBILE || COMPILE_TEST
+ depends on ARCH_RENESAS || COMPILE_TEST
select RENESAS_DMA
select DMA_VIRTUAL_CHANNELS
help
diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile
index 0133e4658196..f1e2fd64f279 100644
--- a/drivers/dma/sh/Makefile
+++ b/drivers/dma/sh/Makefile
@@ -14,6 +14,5 @@ shdma-objs := $(shdma-y)
obj-$(CONFIG_SH_DMAE) += shdma.o
obj-$(CONFIG_RCAR_DMAC) += rcar-dmac.o
-obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o
obj-$(CONFIG_RENESAS_USB_DMAC) += usb-dmac.o
obj-$(CONFIG_SUDMAC) += sudmac.o
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 7820d07e7bee..dfb17926297b 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -413,7 +413,7 @@ static int rcar_dmac_init(struct rcar_dmac *dmac)
u16 dmaor;
/* Clear all channels and enable the DMAC globally. */
- rcar_dmac_write(dmac, RCAR_DMACHCLR, 0x7fff);
+ rcar_dmac_write(dmac, RCAR_DMACHCLR, GENMASK(dmac->n_channels - 1, 0));
rcar_dmac_write(dmac, RCAR_DMAOR,
RCAR_DMAOR_PRI_FIXED | RCAR_DMAOR_DME);
diff --git a/drivers/dma/sh/rcar-hpbdma.c b/drivers/dma/sh/rcar-hpbdma.c
deleted file mode 100644
index 749f26ecd3b3..000000000000
--- a/drivers/dma/sh/rcar-hpbdma.c
+++ /dev/null
@@ -1,669 +0,0 @@
-/*
- * Copyright (C) 2011-2013 Renesas Electronics Corporation
- * Copyright (C) 2013 Cogent Embedded, Inc.
- *
- * This file is based on the drivers/dma/sh/shdma.c
- *
- * Renesas SuperH DMA Engine support
- *
- * This is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * - DMA of SuperH does not have Hardware DMA chain mode.
- * - max DMA size is 16MB.
- *
- */
-
-#include <linux/dmaengine.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/platform_data/dma-rcar-hpbdma.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/shdma-base.h>
-#include <linux/slab.h>
-
-/* DMA channel registers */
-#define HPB_DMAE_DSAR0 0x00
-#define HPB_DMAE_DDAR0 0x04
-#define HPB_DMAE_DTCR0 0x08
-#define HPB_DMAE_DSAR1 0x0C
-#define HPB_DMAE_DDAR1 0x10
-#define HPB_DMAE_DTCR1 0x14
-#define HPB_DMAE_DSASR 0x18
-#define HPB_DMAE_DDASR 0x1C
-#define HPB_DMAE_DTCSR 0x20
-#define HPB_DMAE_DPTR 0x24
-#define HPB_DMAE_DCR 0x28
-#define HPB_DMAE_DCMDR 0x2C
-#define HPB_DMAE_DSTPR 0x30
-#define HPB_DMAE_DSTSR 0x34
-#define HPB_DMAE_DDBGR 0x38
-#define HPB_DMAE_DDBGR2 0x3C
-#define HPB_DMAE_CHAN(n) (0x40 * (n))
-
-/* DMA command register (DCMDR) bits */
-#define HPB_DMAE_DCMDR_BDOUT BIT(7)
-#define HPB_DMAE_DCMDR_DQSPD BIT(6)
-#define HPB_DMAE_DCMDR_DQSPC BIT(5)
-#define HPB_DMAE_DCMDR_DMSPD BIT(4)
-#define HPB_DMAE_DCMDR_DMSPC BIT(3)
-#define HPB_DMAE_DCMDR_DQEND BIT(2)
-#define HPB_DMAE_DCMDR_DNXT BIT(1)
-#define HPB_DMAE_DCMDR_DMEN BIT(0)
-
-/* DMA forced stop register (DSTPR) bits */
-#define HPB_DMAE_DSTPR_DMSTP BIT(0)
-
-/* DMA status register (DSTSR) bits */
-#define HPB_DMAE_DSTSR_DQSTS BIT(2)
-#define HPB_DMAE_DSTSR_DMSTS BIT(0)
-
-/* DMA common registers */
-#define HPB_DMAE_DTIMR 0x00
-#define HPB_DMAE_DINTSR0 0x0C
-#define HPB_DMAE_DINTSR1 0x10
-#define HPB_DMAE_DINTCR0 0x14
-#define HPB_DMAE_DINTCR1 0x18
-#define HPB_DMAE_DINTMR0 0x1C
-#define HPB_DMAE_DINTMR1 0x20
-#define HPB_DMAE_DACTSR0 0x24
-#define HPB_DMAE_DACTSR1 0x28
-#define HPB_DMAE_HSRSTR(n) (0x40 + (n) * 4)
-#define HPB_DMAE_HPB_DMASPR(n) (0x140 + (n) * 4)
-#define HPB_DMAE_HPB_DMLVLR0 0x160
-#define HPB_DMAE_HPB_DMLVLR1 0x164
-#define HPB_DMAE_HPB_DMSHPT0 0x168
-#define HPB_DMAE_HPB_DMSHPT1 0x16C
-
-#define HPB_DMA_SLAVE_NUMBER 256
-#define HPB_DMA_TCR_MAX 0x01000000 /* 16 MiB */
-
-struct hpb_dmae_chan {
- struct shdma_chan shdma_chan;
- int xfer_mode; /* DMA transfer mode */
-#define XFER_SINGLE 1
-#define XFER_DOUBLE 2
- unsigned plane_idx; /* current DMA information set */
- bool first_desc; /* first/next transfer */
- int xmit_shift; /* log_2(bytes_per_xfer) */
- void __iomem *base;
- const struct hpb_dmae_slave_config *cfg;
- char dev_id[16]; /* unique name per DMAC of channel */
- dma_addr_t slave_addr;
-};
-
-struct hpb_dmae_device {
- struct shdma_dev shdma_dev;
- spinlock_t reg_lock; /* comm_reg operation lock */
- struct hpb_dmae_pdata *pdata;
- void __iomem *chan_reg;
- void __iomem *comm_reg;
- void __iomem *reset_reg;
- void __iomem *mode_reg;
-};
-
-struct hpb_dmae_regs {
- u32 sar; /* SAR / source address */
- u32 dar; /* DAR / destination address */
- u32 tcr; /* TCR / transfer count */
-};
-
-struct hpb_desc {
- struct shdma_desc shdma_desc;
- struct hpb_dmae_regs hw;
- unsigned plane_idx;
-};
-
-#define to_chan(schan) container_of(schan, struct hpb_dmae_chan, shdma_chan)
-#define to_desc(sdesc) container_of(sdesc, struct hpb_desc, shdma_desc)
-#define to_dev(sc) container_of(sc->shdma_chan.dma_chan.device, \
- struct hpb_dmae_device, shdma_dev.dma_dev)
-
-static void ch_reg_write(struct hpb_dmae_chan *hpb_dc, u32 data, u32 reg)
-{
- iowrite32(data, hpb_dc->base + reg);
-}
-
-static u32 ch_reg_read(struct hpb_dmae_chan *hpb_dc, u32 reg)
-{
- return ioread32(hpb_dc->base + reg);
-}
-
-static void dcmdr_write(struct hpb_dmae_device *hpbdev, u32 data)
-{
- iowrite32(data, hpbdev->chan_reg + HPB_DMAE_DCMDR);
-}
-
-static void hsrstr_write(struct hpb_dmae_device *hpbdev, u32 ch)
-{
- iowrite32(0x1, hpbdev->comm_reg + HPB_DMAE_HSRSTR(ch));
-}
-
-static u32 dintsr_read(struct hpb_dmae_device *hpbdev, u32 ch)
-{
- u32 v;
-
- if (ch < 32)
- v = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTSR0) >> ch;
- else
- v = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTSR1) >> (ch - 32);
- return v & 0x1;
-}
-
-static void dintcr_write(struct hpb_dmae_device *hpbdev, u32 ch)
-{
- if (ch < 32)
- iowrite32((0x1 << ch), hpbdev->comm_reg + HPB_DMAE_DINTCR0);
- else
- iowrite32((0x1 << (ch - 32)),
- hpbdev->comm_reg + HPB_DMAE_DINTCR1);
-}
-
-static void asyncmdr_write(struct hpb_dmae_device *hpbdev, u32 data)
-{
- iowrite32(data, hpbdev->mode_reg);
-}
-
-static u32 asyncmdr_read(struct hpb_dmae_device *hpbdev)
-{
- return ioread32(hpbdev->mode_reg);
-}
-
-static void hpb_dmae_enable_int(struct hpb_dmae_device *hpbdev, u32 ch)
-{
- u32 intreg;
-
- spin_lock_irq(&hpbdev->reg_lock);
- if (ch < 32) {
- intreg = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTMR0);
- iowrite32(BIT(ch) | intreg,
- hpbdev->comm_reg + HPB_DMAE_DINTMR0);
- } else {
- intreg = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTMR1);
- iowrite32(BIT(ch - 32) | intreg,
- hpbdev->comm_reg + HPB_DMAE_DINTMR1);
- }
- spin_unlock_irq(&hpbdev->reg_lock);
-}
-
-static void hpb_dmae_async_reset(struct hpb_dmae_device *hpbdev, u32 data)
-{
- u32 rstr;
- int timeout = 10000; /* 100 ms */
-
- spin_lock(&hpbdev->reg_lock);
- rstr = ioread32(hpbdev->reset_reg);
- rstr |= data;
- iowrite32(rstr, hpbdev->reset_reg);
- do {
- rstr = ioread32(hpbdev->reset_reg);
- if ((rstr & data) == data)
- break;
- udelay(10);
- } while (timeout--);
-
- if (timeout < 0)
- dev_err(hpbdev->shdma_dev.dma_dev.dev,
- "%s timeout\n", __func__);
-
- rstr &= ~data;
- iowrite32(rstr, hpbdev->reset_reg);
- spin_unlock(&hpbdev->reg_lock);
-}
-
-static void hpb_dmae_set_async_mode(struct hpb_dmae_device *hpbdev,
- u32 mask, u32 data)
-{
- u32 mode;
-
- spin_lock_irq(&hpbdev->reg_lock);
- mode = asyncmdr_read(hpbdev);
- mode &= ~mask;
- mode |= data;
- asyncmdr_write(hpbdev, mode);
- spin_unlock_irq(&hpbdev->reg_lock);
-}
-
-static void hpb_dmae_ctl_stop(struct hpb_dmae_device *hpbdev)
-{
- dcmdr_write(hpbdev, HPB_DMAE_DCMDR_DQSPD);
-}
-
-static void hpb_dmae_reset(struct hpb_dmae_device *hpbdev)
-{
- u32 ch;
-
- for (ch = 0; ch < hpbdev->pdata->num_hw_channels; ch++)
- hsrstr_write(hpbdev, ch);
-}
-
-static unsigned int calc_xmit_shift(struct hpb_dmae_chan *hpb_chan)
-{
- struct hpb_dmae_device *hpbdev = to_dev(hpb_chan);
- struct hpb_dmae_pdata *pdata = hpbdev->pdata;
- int width = ch_reg_read(hpb_chan, HPB_DMAE_DCR);
- int i;
-
- switch (width & (HPB_DMAE_DCR_SPDS_MASK | HPB_DMAE_DCR_DPDS_MASK)) {
- case HPB_DMAE_DCR_SPDS_8BIT | HPB_DMAE_DCR_DPDS_8BIT:
- default:
- i = XMIT_SZ_8BIT;
- break;
- case HPB_DMAE_DCR_SPDS_16BIT | HPB_DMAE_DCR_DPDS_16BIT:
- i = XMIT_SZ_16BIT;
- break;
- case HPB_DMAE_DCR_SPDS_32BIT | HPB_DMAE_DCR_DPDS_32BIT:
- i = XMIT_SZ_32BIT;
- break;
- }
- return pdata->ts_shift[i];
-}
-
-static void hpb_dmae_set_reg(struct hpb_dmae_chan *hpb_chan,
- struct hpb_dmae_regs *hw, unsigned plane)
-{
- ch_reg_write(hpb_chan, hw->sar,
- plane ? HPB_DMAE_DSAR1 : HPB_DMAE_DSAR0);
- ch_reg_write(hpb_chan, hw->dar,
- plane ? HPB_DMAE_DDAR1 : HPB_DMAE_DDAR0);
- ch_reg_write(hpb_chan, hw->tcr >> hpb_chan->xmit_shift,
- plane ? HPB_DMAE_DTCR1 : HPB_DMAE_DTCR0);
-}
-
-static void hpb_dmae_start(struct hpb_dmae_chan *hpb_chan, bool next)
-{
- ch_reg_write(hpb_chan, (next ? HPB_DMAE_DCMDR_DNXT : 0) |
- HPB_DMAE_DCMDR_DMEN, HPB_DMAE_DCMDR);
-}
-
-static void hpb_dmae_halt(struct shdma_chan *schan)
-{
- struct hpb_dmae_chan *chan = to_chan(schan);
-
- ch_reg_write(chan, HPB_DMAE_DCMDR_DQEND, HPB_DMAE_DCMDR);
- ch_reg_write(chan, HPB_DMAE_DSTPR_DMSTP, HPB_DMAE_DSTPR);
-
- chan->plane_idx = 0;
- chan->first_desc = true;
-}
-
-static const struct hpb_dmae_slave_config *
-hpb_dmae_find_slave(struct hpb_dmae_chan *hpb_chan, int slave_id)
-{
- struct hpb_dmae_device *hpbdev = to_dev(hpb_chan);
- struct hpb_dmae_pdata *pdata = hpbdev->pdata;
- int i;
-
- if (slave_id >= HPB_DMA_SLAVE_NUMBER)
- return NULL;
-
- for (i = 0; i < pdata->num_slaves; i++)
- if (pdata->slaves[i].id == slave_id)
- return pdata->slaves + i;
-
- return NULL;
-}
-
-static void hpb_dmae_start_xfer(struct shdma_chan *schan,
- struct shdma_desc *sdesc)
-{
- struct hpb_dmae_chan *chan = to_chan(schan);
- struct hpb_dmae_device *hpbdev = to_dev(chan);
- struct hpb_desc *desc = to_desc(sdesc);
-
- if (chan->cfg->flags & HPB_DMAE_SET_ASYNC_RESET)
- hpb_dmae_async_reset(hpbdev, chan->cfg->rstr);
-
- desc->plane_idx = chan->plane_idx;
- hpb_dmae_set_reg(chan, &desc->hw, chan->plane_idx);
- hpb_dmae_start(chan, !chan->first_desc);
-
- if (chan->xfer_mode == XFER_DOUBLE) {
- chan->plane_idx ^= 1;
- chan->first_desc = false;
- }
-}
-
-static bool hpb_dmae_desc_completed(struct shdma_chan *schan,
- struct shdma_desc *sdesc)
-{
- /*
- * This is correct since we always have at most single
- * outstanding DMA transfer per channel, and by the time
- * we get completion interrupt the transfer is completed.
- * This will change if we ever use alternating DMA
- * information sets and submit two descriptors at once.
- */
- return true;
-}
-
-static bool hpb_dmae_chan_irq(struct shdma_chan *schan, int irq)
-{
- struct hpb_dmae_chan *chan = to_chan(schan);
- struct hpb_dmae_device *hpbdev = to_dev(chan);
- int ch = chan->cfg->dma_ch;
-
- /* Check Complete DMA Transfer */
- if (dintsr_read(hpbdev, ch)) {
- /* Clear Interrupt status */
- dintcr_write(hpbdev, ch);
- return true;
- }
- return false;
-}
-
-static int hpb_dmae_desc_setup(struct shdma_chan *schan,
- struct shdma_desc *sdesc,
- dma_addr_t src, dma_addr_t dst, size_t *len)
-{
- struct hpb_desc *desc = to_desc(sdesc);
-
- if (*len > (size_t)HPB_DMA_TCR_MAX)
- *len = (size_t)HPB_DMA_TCR_MAX;
-
- desc->hw.sar = src;
- desc->hw.dar = dst;
- desc->hw.tcr = *len;
-
- return 0;
-}
-
-static size_t hpb_dmae_get_partial(struct shdma_chan *schan,
- struct shdma_desc *sdesc)
-{
- struct hpb_desc *desc = to_desc(sdesc);
- struct hpb_dmae_chan *chan = to_chan(schan);
- u32 tcr = ch_reg_read(chan, desc->plane_idx ?
- HPB_DMAE_DTCR1 : HPB_DMAE_DTCR0);
-
- return (desc->hw.tcr - tcr) << chan->xmit_shift;
-}
-
-static bool hpb_dmae_channel_busy(struct shdma_chan *schan)
-{
- struct hpb_dmae_chan *chan = to_chan(schan);
- u32 dstsr = ch_reg_read(chan, HPB_DMAE_DSTSR);
-
- if (chan->xfer_mode == XFER_DOUBLE)
- return dstsr & HPB_DMAE_DSTSR_DQSTS;
- else
- return dstsr & HPB_DMAE_DSTSR_DMSTS;
-}
-
-static int
-hpb_dmae_alloc_chan_resources(struct hpb_dmae_chan *hpb_chan,
- const struct hpb_dmae_slave_config *cfg)
-{
- struct hpb_dmae_device *hpbdev = to_dev(hpb_chan);
- struct hpb_dmae_pdata *pdata = hpbdev->pdata;
- const struct hpb_dmae_channel *channel = pdata->channels;
- int slave_id = cfg->id;
- int i, err;
-
- for (i = 0; i < pdata->num_channels; i++, channel++) {
- if (channel->s_id == slave_id) {
- struct device *dev = hpb_chan->shdma_chan.dev;
-
- hpb_chan->base = hpbdev->chan_reg +
- HPB_DMAE_CHAN(cfg->dma_ch);
-
- dev_dbg(dev, "Detected Slave device\n");
- dev_dbg(dev, " -- slave_id : 0x%x\n", slave_id);
- dev_dbg(dev, " -- cfg->dma_ch : %d\n", cfg->dma_ch);
- dev_dbg(dev, " -- channel->ch_irq: %d\n",
- channel->ch_irq);
- break;
- }
- }
-
- err = shdma_request_irq(&hpb_chan->shdma_chan, channel->ch_irq,
- IRQF_SHARED, hpb_chan->dev_id);
- if (err) {
- dev_err(hpb_chan->shdma_chan.dev,
- "DMA channel request_irq %d failed with error %d\n",
- channel->ch_irq, err);
- return err;
- }
-
- hpb_chan->plane_idx = 0;
- hpb_chan->first_desc = true;
-
- if ((cfg->dcr & (HPB_DMAE_DCR_CT | HPB_DMAE_DCR_DIP)) == 0) {
- hpb_chan->xfer_mode = XFER_SINGLE;
- } else if ((cfg->dcr & (HPB_DMAE_DCR_CT | HPB_DMAE_DCR_DIP)) ==
- (HPB_DMAE_DCR_CT | HPB_DMAE_DCR_DIP)) {
- hpb_chan->xfer_mode = XFER_DOUBLE;
- } else {
- dev_err(hpb_chan->shdma_chan.dev, "DCR setting error");
- return -EINVAL;
- }
-
- if (cfg->flags & HPB_DMAE_SET_ASYNC_MODE)
- hpb_dmae_set_async_mode(hpbdev, cfg->mdm, cfg->mdr);
- ch_reg_write(hpb_chan, cfg->dcr, HPB_DMAE_DCR);
- ch_reg_write(hpb_chan, cfg->port, HPB_DMAE_DPTR);
- hpb_chan->xmit_shift = calc_xmit_shift(hpb_chan);
- hpb_dmae_enable_int(hpbdev, cfg->dma_ch);
-
- return 0;
-}
-
-static int hpb_dmae_set_slave(struct shdma_chan *schan, int slave_id,
- dma_addr_t slave_addr, bool try)
-{
- struct hpb_dmae_chan *chan = to_chan(schan);
- const struct hpb_dmae_slave_config *sc =
- hpb_dmae_find_slave(chan, slave_id);
-
- if (!sc)
- return -ENODEV;
- if (try)
- return 0;
- chan->cfg = sc;
- chan->slave_addr = slave_addr ? : sc->addr;
- return hpb_dmae_alloc_chan_resources(chan, sc);
-}
-
-static void hpb_dmae_setup_xfer(struct shdma_chan *schan, int slave_id)
-{
-}
-
-static dma_addr_t hpb_dmae_slave_addr(struct shdma_chan *schan)
-{
- struct hpb_dmae_chan *chan = to_chan(schan);
-
- return chan->slave_addr;
-}
-
-static struct shdma_desc *hpb_dmae_embedded_desc(void *buf, int i)
-{
- return &((struct hpb_desc *)buf)[i].shdma_desc;
-}
-
-static const struct shdma_ops hpb_dmae_ops = {
- .desc_completed = hpb_dmae_desc_completed,
- .halt_channel = hpb_dmae_halt,
- .channel_busy = hpb_dmae_channel_busy,
- .slave_addr = hpb_dmae_slave_addr,
- .desc_setup = hpb_dmae_desc_setup,
- .set_slave = hpb_dmae_set_slave,
- .setup_xfer = hpb_dmae_setup_xfer,
- .start_xfer = hpb_dmae_start_xfer,
- .embedded_desc = hpb_dmae_embedded_desc,
- .chan_irq = hpb_dmae_chan_irq,
- .get_partial = hpb_dmae_get_partial,
-};
-
-static int hpb_dmae_chan_probe(struct hpb_dmae_device *hpbdev, int id)
-{
- struct shdma_dev *sdev = &hpbdev->shdma_dev;
- struct platform_device *pdev =
- to_platform_device(hpbdev->shdma_dev.dma_dev.dev);
- struct hpb_dmae_chan *new_hpb_chan;
- struct shdma_chan *schan;
-
- /* Alloc channel */
- new_hpb_chan = devm_kzalloc(&pdev->dev,
- sizeof(struct hpb_dmae_chan), GFP_KERNEL);
- if (!new_hpb_chan) {
- dev_err(hpbdev->shdma_dev.dma_dev.dev,
- "No free memory for allocating DMA channels!\n");
- return -ENOMEM;
- }
-
- schan = &new_hpb_chan->shdma_chan;
- schan->max_xfer_len = HPB_DMA_TCR_MAX;
-
- shdma_chan_probe(sdev, schan, id);
-
- if (pdev->id >= 0)
- snprintf(new_hpb_chan->dev_id, sizeof(new_hpb_chan->dev_id),
- "hpb-dmae%d.%d", pdev->id, id);
- else
- snprintf(new_hpb_chan->dev_id, sizeof(new_hpb_chan->dev_id),
- "hpb-dma.%d", id);
-
- return 0;
-}
-
-static int hpb_dmae_probe(struct platform_device *pdev)
-{
- const enum dma_slave_buswidth widths = DMA_SLAVE_BUSWIDTH_1_BYTE |
- DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES;
- struct hpb_dmae_pdata *pdata = pdev->dev.platform_data;
- struct hpb_dmae_device *hpbdev;
- struct dma_device *dma_dev;
- struct resource *chan, *comm, *rest, *mode, *irq_res;
- int err, i;
-
- /* Get platform data */
- if (!pdata || !pdata->num_channels)
- return -ENODEV;
-
- chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- comm = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- rest = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- mode = platform_get_resource(pdev, IORESOURCE_MEM, 3);
-
- irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!irq_res)
- return -ENODEV;
-
- hpbdev = devm_kzalloc(&pdev->dev, sizeof(struct hpb_dmae_device),
- GFP_KERNEL);
- if (!hpbdev) {
- dev_err(&pdev->dev, "Not enough memory\n");
- return -ENOMEM;
- }
-
- hpbdev->chan_reg = devm_ioremap_resource(&pdev->dev, chan);
- if (IS_ERR(hpbdev->chan_reg))
- return PTR_ERR(hpbdev->chan_reg);
-
- hpbdev->comm_reg = devm_ioremap_resource(&pdev->dev, comm);
- if (IS_ERR(hpbdev->comm_reg))
- return PTR_ERR(hpbdev->comm_reg);
-
- hpbdev->reset_reg = devm_ioremap_resource(&pdev->dev, rest);
- if (IS_ERR(hpbdev->reset_reg))
- return PTR_ERR(hpbdev->reset_reg);
-
- hpbdev->mode_reg = devm_ioremap_resource(&pdev->dev, mode);
- if (IS_ERR(hpbdev->mode_reg))
- return PTR_ERR(hpbdev->mode_reg);
-
- dma_dev = &hpbdev->shdma_dev.dma_dev;
-
- spin_lock_init(&hpbdev->reg_lock);
-
- /* Platform data */
- hpbdev->pdata = pdata;
-
- pm_runtime_enable(&pdev->dev);
- err = pm_runtime_get_sync(&pdev->dev);
- if (err < 0)
- dev_err(&pdev->dev, "%s(): GET = %d\n", __func__, err);
-
- /* Reset DMA controller */
- hpb_dmae_reset(hpbdev);
-
- pm_runtime_put(&pdev->dev);
-
- dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
- dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
- dma_dev->src_addr_widths = widths;
- dma_dev->dst_addr_widths = widths;
- dma_dev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
- dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
-
- hpbdev->shdma_dev.ops = &hpb_dmae_ops;
- hpbdev->shdma_dev.desc_size = sizeof(struct hpb_desc);
- err = shdma_init(&pdev->dev, &hpbdev->shdma_dev, pdata->num_channels);
- if (err < 0)
- goto error;
-
- /* Create DMA channels */
- for (i = 0; i < pdata->num_channels; i++)
- hpb_dmae_chan_probe(hpbdev, i);
-
- platform_set_drvdata(pdev, hpbdev);
- err = dma_async_device_register(dma_dev);
- if (!err)
- return 0;
-
- shdma_cleanup(&hpbdev->shdma_dev);
-error:
- pm_runtime_disable(&pdev->dev);
- return err;
-}
-
-static void hpb_dmae_chan_remove(struct hpb_dmae_device *hpbdev)
-{
- struct shdma_chan *schan;
- int i;
-
- shdma_for_each_chan(schan, &hpbdev->shdma_dev, i) {
- BUG_ON(!schan);
-
- shdma_chan_remove(schan);
- }
-}
-
-static int hpb_dmae_remove(struct platform_device *pdev)
-{
- struct hpb_dmae_device *hpbdev = platform_get_drvdata(pdev);
-
- dma_async_device_unregister(&hpbdev->shdma_dev.dma_dev);
-
- pm_runtime_disable(&pdev->dev);
-
- hpb_dmae_chan_remove(hpbdev);
-
- return 0;
-}
-
-static void hpb_dmae_shutdown(struct platform_device *pdev)
-{
- struct hpb_dmae_device *hpbdev = platform_get_drvdata(pdev);
- hpb_dmae_ctl_stop(hpbdev);
-}
-
-static struct platform_driver hpb_dmae_driver = {
- .probe = hpb_dmae_probe,
- .remove = hpb_dmae_remove,
- .shutdown = hpb_dmae_shutdown,
- .driver = {
- .name = "hpb-dma-engine",
- },
-};
-module_platform_driver(hpb_dmae_driver);
-
-MODULE_AUTHOR("Max Filippov <max.filippov@cogentembedded.com>");
-MODULE_DESCRIPTION("Renesas HPB DMA Engine driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
index 11707df1a689..80d86402490e 100644
--- a/drivers/dma/sh/shdmac.c
+++ b/drivers/dma/sh/shdmac.c
@@ -699,7 +699,7 @@ static int sh_dmae_probe(struct platform_device *pdev)
struct resource *chan, *dmars, *errirq_res, *chanirq_res;
if (pdev->dev.of_node)
- pdata = of_match_device(sh_dmae_of_match, &pdev->dev)->data;
+ pdata = of_device_get_match_data(&pdev->dev);
else
pdata = dev_get_platdata(&pdev->dev);
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
index f1bcc2a163b3..749f1bd5d65d 100644
--- a/drivers/dma/sh/usb-dmac.c
+++ b/drivers/dma/sh/usb-dmac.c
@@ -448,7 +448,7 @@ usb_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
static int usb_dmac_chan_terminate_all(struct dma_chan *chan)
{
struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
- struct usb_dmac_desc *desc;
+ struct usb_dmac_desc *desc, *_desc;
unsigned long flags;
LIST_HEAD(head);
LIST_HEAD(list);
@@ -459,7 +459,7 @@ static int usb_dmac_chan_terminate_all(struct dma_chan *chan)
if (uchan->desc)
uchan->desc = NULL;
list_splice_init(&uchan->desc_got, &list);
- list_for_each_entry(desc, &list, node)
+ list_for_each_entry_safe(desc, _desc, &list, node)
list_move_tail(&desc->node, &uchan->desc_freed);
spin_unlock_irqrestore(&uchan->vc.lock, flags);
vchan_dma_desc_free_list(&uchan->vc, &head);
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index 22ea2419ee56..e48350e65089 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -989,7 +989,7 @@ static int sirfsoc_dma_remove(struct platform_device *op)
return 0;
}
-static int sirfsoc_dma_runtime_suspend(struct device *dev)
+static int __maybe_unused sirfsoc_dma_runtime_suspend(struct device *dev)
{
struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
@@ -997,7 +997,7 @@ static int sirfsoc_dma_runtime_suspend(struct device *dev)
return 0;
}
-static int sirfsoc_dma_runtime_resume(struct device *dev)
+static int __maybe_unused sirfsoc_dma_runtime_resume(struct device *dev)
{
struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
int ret;
@@ -1010,8 +1010,7 @@ static int sirfsoc_dma_runtime_resume(struct device *dev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int sirfsoc_dma_pm_suspend(struct device *dev)
+static int __maybe_unused sirfsoc_dma_pm_suspend(struct device *dev)
{
struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
struct sirfsoc_dma_regs *save = &sdma->regs_save;
@@ -1062,7 +1061,7 @@ static int sirfsoc_dma_pm_suspend(struct device *dev)
return 0;
}
-static int sirfsoc_dma_pm_resume(struct device *dev)
+static int __maybe_unused sirfsoc_dma_pm_resume(struct device *dev)
{
struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
struct sirfsoc_dma_regs *save = &sdma->regs_save;
@@ -1121,7 +1120,6 @@ static int sirfsoc_dma_pm_resume(struct device *dev)
return 0;
}
-#endif
static const struct dev_pm_ops sirfsoc_dma_pm_ops = {
SET_RUNTIME_PM_OPS(sirfsoc_dma_runtime_suspend, sirfsoc_dma_runtime_resume, NULL)
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index dd3e7ba273ad..6fb8307468ab 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -3543,8 +3543,8 @@ static int __init d40_probe(struct platform_device *pdev)
struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev);
struct device_node *np = pdev->dev.of_node;
int ret = -ENOENT;
- struct d40_base *base = NULL;
- struct resource *res = NULL;
+ struct d40_base *base;
+ struct resource *res;
int num_reserved_chans;
u32 val;
@@ -3552,17 +3552,17 @@ static int __init d40_probe(struct platform_device *pdev)
if (np) {
if (d40_of_probe(pdev, np)) {
ret = -ENOMEM;
- goto failure;
+ goto report_failure;
}
} else {
d40_err(&pdev->dev, "No pdata or Device Tree provided\n");
- goto failure;
+ goto report_failure;
}
}
base = d40_hw_detect_init(pdev);
if (!base)
- goto failure;
+ goto report_failure;
num_reserved_chans = d40_phy_res_init(base);
@@ -3693,51 +3693,48 @@ static int __init d40_probe(struct platform_device *pdev)
return 0;
failure:
- if (base) {
- if (base->desc_slab)
- kmem_cache_destroy(base->desc_slab);
- if (base->virtbase)
- iounmap(base->virtbase);
-
- if (base->lcla_pool.base && base->plat_data->use_esram_lcla) {
- iounmap(base->lcla_pool.base);
- base->lcla_pool.base = NULL;
- }
+ kmem_cache_destroy(base->desc_slab);
+ if (base->virtbase)
+ iounmap(base->virtbase);
- if (base->lcla_pool.dma_addr)
- dma_unmap_single(base->dev, base->lcla_pool.dma_addr,
- SZ_1K * base->num_phy_chans,
- DMA_TO_DEVICE);
-
- if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
- free_pages((unsigned long)base->lcla_pool.base,
- base->lcla_pool.pages);
-
- kfree(base->lcla_pool.base_unaligned);
-
- if (base->phy_lcpa)
- release_mem_region(base->phy_lcpa,
- base->lcpa_size);
- if (base->phy_start)
- release_mem_region(base->phy_start,
- base->phy_size);
- if (base->clk) {
- clk_disable_unprepare(base->clk);
- clk_put(base->clk);
- }
+ if (base->lcla_pool.base && base->plat_data->use_esram_lcla) {
+ iounmap(base->lcla_pool.base);
+ base->lcla_pool.base = NULL;
+ }
- if (base->lcpa_regulator) {
- regulator_disable(base->lcpa_regulator);
- regulator_put(base->lcpa_regulator);
- }
+ if (base->lcla_pool.dma_addr)
+ dma_unmap_single(base->dev, base->lcla_pool.dma_addr,
+ SZ_1K * base->num_phy_chans,
+ DMA_TO_DEVICE);
- kfree(base->lcla_pool.alloc_map);
- kfree(base->lookup_log_chans);
- kfree(base->lookup_phy_chans);
- kfree(base->phy_res);
- kfree(base);
+ if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
+ free_pages((unsigned long)base->lcla_pool.base,
+ base->lcla_pool.pages);
+
+ kfree(base->lcla_pool.base_unaligned);
+
+ if (base->phy_lcpa)
+ release_mem_region(base->phy_lcpa,
+ base->lcpa_size);
+ if (base->phy_start)
+ release_mem_region(base->phy_start,
+ base->phy_size);
+ if (base->clk) {
+ clk_disable_unprepare(base->clk);
+ clk_put(base->clk);
+ }
+
+ if (base->lcpa_regulator) {
+ regulator_disable(base->lcpa_regulator);
+ regulator_put(base->lcpa_regulator);
}
+ kfree(base->lcla_pool.alloc_map);
+ kfree(base->lookup_log_chans);
+ kfree(base->lookup_phy_chans);
+ kfree(base->phy_res);
+ kfree(base);
+report_failure:
d40_err(&pdev->dev, "probe failed\n");
return ret;
}
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
new file mode 100644
index 000000000000..047476a1383d
--- /dev/null
+++ b/drivers/dma/stm32-dma.c
@@ -0,0 +1,1141 @@
+/*
+ * Driver for STM32 DMA controller
+ *
+ * Inspired by dma-jz4740.c and tegra20-apb-dma.c
+ *
+ * Copyright (C) M'boumba Cedric Madianga 2015
+ * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com>
+ *
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include "virt-dma.h"
+
+#define STM32_DMA_LISR 0x0000 /* DMA Low Int Status Reg */
+#define STM32_DMA_HISR 0x0004 /* DMA High Int Status Reg */
+#define STM32_DMA_LIFCR 0x0008 /* DMA Low Int Flag Clear Reg */
+#define STM32_DMA_HIFCR 0x000c /* DMA High Int Flag Clear Reg */
+#define STM32_DMA_TCI BIT(5) /* Transfer Complete Interrupt */
+#define STM32_DMA_TEI BIT(3) /* Transfer Error Interrupt */
+#define STM32_DMA_DMEI BIT(2) /* Direct Mode Error Interrupt */
+#define STM32_DMA_FEI BIT(0) /* FIFO Error Interrupt */
+
+/* DMA Stream x Configuration Register */
+#define STM32_DMA_SCR(x) (0x0010 + 0x18 * (x)) /* x = 0..7 */
+#define STM32_DMA_SCR_REQ(n) ((n & 0x7) << 25)
+#define STM32_DMA_SCR_MBURST_MASK GENMASK(24, 23)
+#define STM32_DMA_SCR_MBURST(n) ((n & 0x3) << 23)
+#define STM32_DMA_SCR_PBURST_MASK GENMASK(22, 21)
+#define STM32_DMA_SCR_PBURST(n) ((n & 0x3) << 21)
+#define STM32_DMA_SCR_PL_MASK GENMASK(17, 16)
+#define STM32_DMA_SCR_PL(n) ((n & 0x3) << 16)
+#define STM32_DMA_SCR_MSIZE_MASK GENMASK(14, 13)
+#define STM32_DMA_SCR_MSIZE(n) ((n & 0x3) << 13)
+#define STM32_DMA_SCR_PSIZE_MASK GENMASK(12, 11)
+#define STM32_DMA_SCR_PSIZE(n) ((n & 0x3) << 11)
+#define STM32_DMA_SCR_PSIZE_GET(n) ((n & STM32_DMA_SCR_PSIZE_MASK) >> 11)
+#define STM32_DMA_SCR_DIR_MASK GENMASK(7, 6)
+#define STM32_DMA_SCR_DIR(n) ((n & 0x3) << 6)
+#define STM32_DMA_SCR_CT BIT(19) /* Target in double buffer */
+#define STM32_DMA_SCR_DBM BIT(18) /* Double Buffer Mode */
+#define STM32_DMA_SCR_PINCOS BIT(15) /* Peripheral inc offset size */
+#define STM32_DMA_SCR_MINC BIT(10) /* Memory increment mode */
+#define STM32_DMA_SCR_PINC BIT(9) /* Peripheral increment mode */
+#define STM32_DMA_SCR_CIRC BIT(8) /* Circular mode */
+#define STM32_DMA_SCR_PFCTRL BIT(5) /* Peripheral Flow Controller */
+#define STM32_DMA_SCR_TCIE BIT(4) /* Transfer Cplete Int Enable*/
+#define STM32_DMA_SCR_TEIE BIT(2) /* Transfer Error Int Enable */
+#define STM32_DMA_SCR_DMEIE BIT(1) /* Direct Mode Err Int Enable */
+#define STM32_DMA_SCR_EN BIT(0) /* Stream Enable */
+#define STM32_DMA_SCR_CFG_MASK (STM32_DMA_SCR_PINC \
+ | STM32_DMA_SCR_MINC \
+ | STM32_DMA_SCR_PINCOS \
+ | STM32_DMA_SCR_PL_MASK)
+#define STM32_DMA_SCR_IRQ_MASK (STM32_DMA_SCR_TCIE \
+ | STM32_DMA_SCR_TEIE \
+ | STM32_DMA_SCR_DMEIE)
+
+/* DMA Stream x number of data register */
+#define STM32_DMA_SNDTR(x) (0x0014 + 0x18 * (x))
+
+/* DMA stream peripheral address register */
+#define STM32_DMA_SPAR(x) (0x0018 + 0x18 * (x))
+
+/* DMA stream x memory 0 address register */
+#define STM32_DMA_SM0AR(x) (0x001c + 0x18 * (x))
+
+/* DMA stream x memory 1 address register */
+#define STM32_DMA_SM1AR(x) (0x0020 + 0x18 * (x))
+
+/* DMA stream x FIFO control register */
+#define STM32_DMA_SFCR(x) (0x0024 + 0x18 * (x))
+#define STM32_DMA_SFCR_FTH_MASK GENMASK(1, 0)
+#define STM32_DMA_SFCR_FTH(n) (n & STM32_DMA_SFCR_FTH_MASK)
+#define STM32_DMA_SFCR_FEIE BIT(7) /* FIFO error interrupt enable */
+#define STM32_DMA_SFCR_DMDIS BIT(2) /* Direct mode disable */
+#define STM32_DMA_SFCR_MASK (STM32_DMA_SFCR_FEIE \
+ | STM32_DMA_SFCR_DMDIS)
+
+/* DMA direction */
+#define STM32_DMA_DEV_TO_MEM 0x00
+#define STM32_DMA_MEM_TO_DEV 0x01
+#define STM32_DMA_MEM_TO_MEM 0x02
+
+/* DMA priority level */
+#define STM32_DMA_PRIORITY_LOW 0x00
+#define STM32_DMA_PRIORITY_MEDIUM 0x01
+#define STM32_DMA_PRIORITY_HIGH 0x02
+#define STM32_DMA_PRIORITY_VERY_HIGH 0x03
+
+/* DMA FIFO threshold selection */
+#define STM32_DMA_FIFO_THRESHOLD_1QUARTERFULL 0x00
+#define STM32_DMA_FIFO_THRESHOLD_HALFFULL 0x01
+#define STM32_DMA_FIFO_THRESHOLD_3QUARTERSFULL 0x02
+#define STM32_DMA_FIFO_THRESHOLD_FULL 0x03
+
+#define STM32_DMA_MAX_DATA_ITEMS 0xffff
+#define STM32_DMA_MAX_CHANNELS 0x08
+#define STM32_DMA_MAX_REQUEST_ID 0x08
+#define STM32_DMA_MAX_DATA_PARAM 0x03
+
+enum stm32_dma_width {
+ STM32_DMA_BYTE,
+ STM32_DMA_HALF_WORD,
+ STM32_DMA_WORD,
+};
+
+enum stm32_dma_burst_size {
+ STM32_DMA_BURST_SINGLE,
+ STM32_DMA_BURST_INCR4,
+ STM32_DMA_BURST_INCR8,
+ STM32_DMA_BURST_INCR16,
+};
+
+struct stm32_dma_cfg {
+ u32 channel_id;
+ u32 request_line;
+ u32 stream_config;
+ u32 threshold;
+};
+
+struct stm32_dma_chan_reg {
+ u32 dma_lisr;
+ u32 dma_hisr;
+ u32 dma_lifcr;
+ u32 dma_hifcr;
+ u32 dma_scr;
+ u32 dma_sndtr;
+ u32 dma_spar;
+ u32 dma_sm0ar;
+ u32 dma_sm1ar;
+ u32 dma_sfcr;
+};
+
+struct stm32_dma_sg_req {
+ u32 len;
+ struct stm32_dma_chan_reg chan_reg;
+};
+
+struct stm32_dma_desc {
+ struct virt_dma_desc vdesc;
+ bool cyclic;
+ u32 num_sgs;
+ struct stm32_dma_sg_req sg_req[];
+};
+
+struct stm32_dma_chan {
+ struct virt_dma_chan vchan;
+ bool config_init;
+ bool busy;
+ u32 id;
+ u32 irq;
+ struct stm32_dma_desc *desc;
+ u32 next_sg;
+ struct dma_slave_config dma_sconfig;
+ struct stm32_dma_chan_reg chan_reg;
+};
+
+struct stm32_dma_device {
+ struct dma_device ddev;
+ void __iomem *base;
+ struct clk *clk;
+ struct reset_control *rst;
+ bool mem2mem;
+ struct stm32_dma_chan chan[STM32_DMA_MAX_CHANNELS];
+};
+
+static struct stm32_dma_device *stm32_dma_get_dev(struct stm32_dma_chan *chan)
+{
+ return container_of(chan->vchan.chan.device, struct stm32_dma_device,
+ ddev);
+}
+
+static struct stm32_dma_chan *to_stm32_dma_chan(struct dma_chan *c)
+{
+ return container_of(c, struct stm32_dma_chan, vchan.chan);
+}
+
+static struct stm32_dma_desc *to_stm32_dma_desc(struct virt_dma_desc *vdesc)
+{
+ return container_of(vdesc, struct stm32_dma_desc, vdesc);
+}
+
+static struct device *chan2dev(struct stm32_dma_chan *chan)
+{
+ return &chan->vchan.chan.dev->device;
+}
+
+static u32 stm32_dma_read(struct stm32_dma_device *dmadev, u32 reg)
+{
+ return readl_relaxed(dmadev->base + reg);
+}
+
+static void stm32_dma_write(struct stm32_dma_device *dmadev, u32 reg, u32 val)
+{
+ writel_relaxed(val, dmadev->base + reg);
+}
+
+static struct stm32_dma_desc *stm32_dma_alloc_desc(u32 num_sgs)
+{
+ return kzalloc(sizeof(struct stm32_dma_desc) +
+ sizeof(struct stm32_dma_sg_req) * num_sgs, GFP_NOWAIT);
+}
+
+static int stm32_dma_get_width(struct stm32_dma_chan *chan,
+ enum dma_slave_buswidth width)
+{
+ switch (width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ return STM32_DMA_BYTE;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ return STM32_DMA_HALF_WORD;
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ return STM32_DMA_WORD;
+ default:
+ dev_err(chan2dev(chan), "Dma bus width not supported\n");
+ return -EINVAL;
+ }
+}
+
+static int stm32_dma_get_burst(struct stm32_dma_chan *chan, u32 maxburst)
+{
+ switch (maxburst) {
+ case 0:
+ case 1:
+ return STM32_DMA_BURST_SINGLE;
+ case 4:
+ return STM32_DMA_BURST_INCR4;
+ case 8:
+ return STM32_DMA_BURST_INCR8;
+ case 16:
+ return STM32_DMA_BURST_INCR16;
+ default:
+ dev_err(chan2dev(chan), "Dma burst size not supported\n");
+ return -EINVAL;
+ }
+}
+
+static void stm32_dma_set_fifo_config(struct stm32_dma_chan *chan,
+ u32 src_maxburst, u32 dst_maxburst)
+{
+ chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_MASK;
+ chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_DMEIE;
+
+ if ((!src_maxburst) && (!dst_maxburst)) {
+ /* Using direct mode */
+ chan->chan_reg.dma_scr |= STM32_DMA_SCR_DMEIE;
+ } else {
+ /* Using FIFO mode */
+ chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_MASK;
+ }
+}
+
+static int stm32_dma_slave_config(struct dma_chan *c,
+ struct dma_slave_config *config)
+{
+ struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
+
+ memcpy(&chan->dma_sconfig, config, sizeof(*config));
+
+ chan->config_init = true;
+
+ return 0;
+}
+
+static u32 stm32_dma_irq_status(struct stm32_dma_chan *chan)
+{
+ struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
+ u32 flags, dma_isr;
+
+ /*
+ * Read "flags" from DMA_xISR register corresponding to the selected
+ * DMA channel at the correct bit offset inside that register.
+ *
+ * If (ch % 4) is 2 or 3, left shift the mask by 16 bits.
+ * If (ch % 4) is 1 or 3, additionally left shift the mask by 6 bits.
+ */
+
+ if (chan->id & 4)
+ dma_isr = stm32_dma_read(dmadev, STM32_DMA_HISR);
+ else
+ dma_isr = stm32_dma_read(dmadev, STM32_DMA_LISR);
+
+ flags = dma_isr >> (((chan->id & 2) << 3) | ((chan->id & 1) * 6));
+
+ return flags;
+}
+
+static void stm32_dma_irq_clear(struct stm32_dma_chan *chan, u32 flags)
+{
+ struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
+ u32 dma_ifcr;
+
+ /*
+ * Write "flags" to the DMA_xIFCR register corresponding to the selected
+ * DMA channel at the correct bit offset inside that register.
+ *
+ * If (ch % 4) is 2 or 3, left shift the mask by 16 bits.
+ * If (ch % 4) is 1 or 3, additionally left shift the mask by 6 bits.
+ */
+ dma_ifcr = flags << (((chan->id & 2) << 3) | ((chan->id & 1) * 6));
+
+ if (chan->id & 4)
+ stm32_dma_write(dmadev, STM32_DMA_HIFCR, dma_ifcr);
+ else
+ stm32_dma_write(dmadev, STM32_DMA_LIFCR, dma_ifcr);
+}
+
+static int stm32_dma_disable_chan(struct stm32_dma_chan *chan)
+{
+ struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
+ unsigned long timeout = jiffies + msecs_to_jiffies(5000);
+ u32 dma_scr, id;
+
+ id = chan->id;
+ dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
+
+ if (dma_scr & STM32_DMA_SCR_EN) {
+ dma_scr &= ~STM32_DMA_SCR_EN;
+ stm32_dma_write(dmadev, STM32_DMA_SCR(id), dma_scr);
+
+ do {
+ dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
+ dma_scr &= STM32_DMA_SCR_EN;
+ if (!dma_scr)
+ break;
+
+ if (time_after_eq(jiffies, timeout)) {
+ dev_err(chan2dev(chan), "%s: timeout!\n",
+ __func__);
+ return -EBUSY;
+ }
+ cond_resched();
+ } while (1);
+ }
+
+ return 0;
+}
+
+static void stm32_dma_stop(struct stm32_dma_chan *chan)
+{
+ struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
+ u32 dma_scr, dma_sfcr, status;
+ int ret;
+
+ /* Disable interrupts */
+ dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
+ dma_scr &= ~STM32_DMA_SCR_IRQ_MASK;
+ stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr);
+ dma_sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id));
+ dma_sfcr &= ~STM32_DMA_SFCR_FEIE;
+ stm32_dma_write(dmadev, STM32_DMA_SFCR(chan->id), dma_sfcr);
+
+ /* Disable DMA */
+ ret = stm32_dma_disable_chan(chan);
+ if (ret < 0)
+ return;
+
+ /* Clear interrupt status if it is there */
+ status = stm32_dma_irq_status(chan);
+ if (status) {
+ dev_dbg(chan2dev(chan), "%s(): clearing interrupt: 0x%08x\n",
+ __func__, status);
+ stm32_dma_irq_clear(chan, status);
+ }
+
+ chan->busy = false;
+}
+
+static int stm32_dma_terminate_all(struct dma_chan *c)
+{
+ struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&chan->vchan.lock, flags);
+
+ if (chan->busy) {
+ stm32_dma_stop(chan);
+ chan->desc = NULL;
+ }
+
+ vchan_get_all_descriptors(&chan->vchan, &head);
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
+ vchan_dma_desc_free_list(&chan->vchan, &head);
+
+ return 0;
+}
+
+static void stm32_dma_dump_reg(struct stm32_dma_chan *chan)
+{
+ struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
+ u32 scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
+ u32 ndtr = stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id));
+ u32 spar = stm32_dma_read(dmadev, STM32_DMA_SPAR(chan->id));
+ u32 sm0ar = stm32_dma_read(dmadev, STM32_DMA_SM0AR(chan->id));
+ u32 sm1ar = stm32_dma_read(dmadev, STM32_DMA_SM1AR(chan->id));
+ u32 sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id));
+
+ dev_dbg(chan2dev(chan), "SCR: 0x%08x\n", scr);
+ dev_dbg(chan2dev(chan), "NDTR: 0x%08x\n", ndtr);
+ dev_dbg(chan2dev(chan), "SPAR: 0x%08x\n", spar);
+ dev_dbg(chan2dev(chan), "SM0AR: 0x%08x\n", sm0ar);
+ dev_dbg(chan2dev(chan), "SM1AR: 0x%08x\n", sm1ar);
+ dev_dbg(chan2dev(chan), "SFCR: 0x%08x\n", sfcr);
+}
+
+static int stm32_dma_start_transfer(struct stm32_dma_chan *chan)
+{
+ struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
+ struct virt_dma_desc *vdesc;
+ struct stm32_dma_sg_req *sg_req;
+ struct stm32_dma_chan_reg *reg;
+ u32 status;
+ int ret;
+
+ ret = stm32_dma_disable_chan(chan);
+ if (ret < 0)
+ return ret;
+
+ if (!chan->desc) {
+ vdesc = vchan_next_desc(&chan->vchan);
+ if (!vdesc)
+ return -EPERM;
+
+ chan->desc = to_stm32_dma_desc(vdesc);
+ chan->next_sg = 0;
+ }
+
+ if (chan->next_sg == chan->desc->num_sgs)
+ chan->next_sg = 0;
+
+ sg_req = &chan->desc->sg_req[chan->next_sg];
+ reg = &sg_req->chan_reg;
+
+ stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), reg->dma_scr);
+ stm32_dma_write(dmadev, STM32_DMA_SPAR(chan->id), reg->dma_spar);
+ stm32_dma_write(dmadev, STM32_DMA_SM0AR(chan->id), reg->dma_sm0ar);
+ stm32_dma_write(dmadev, STM32_DMA_SFCR(chan->id), reg->dma_sfcr);
+ stm32_dma_write(dmadev, STM32_DMA_SM1AR(chan->id), reg->dma_sm1ar);
+ stm32_dma_write(dmadev, STM32_DMA_SNDTR(chan->id), reg->dma_sndtr);
+
+ chan->next_sg++;
+
+ /* Clear interrupt status if it is there */
+ status = stm32_dma_irq_status(chan);
+ if (status)
+ stm32_dma_irq_clear(chan, status);
+
+ stm32_dma_dump_reg(chan);
+
+ /* Start DMA */
+ reg->dma_scr |= STM32_DMA_SCR_EN;
+ stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), reg->dma_scr);
+
+ chan->busy = true;
+
+ return 0;
+}
+
+static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan)
+{
+ struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
+ struct stm32_dma_sg_req *sg_req;
+ u32 dma_scr, dma_sm0ar, dma_sm1ar, id;
+
+ id = chan->id;
+ dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
+
+ if (dma_scr & STM32_DMA_SCR_DBM) {
+ if (chan->next_sg == chan->desc->num_sgs)
+ chan->next_sg = 0;
+
+ sg_req = &chan->desc->sg_req[chan->next_sg];
+
+ if (dma_scr & STM32_DMA_SCR_CT) {
+ dma_sm0ar = sg_req->chan_reg.dma_sm0ar;
+ stm32_dma_write(dmadev, STM32_DMA_SM0AR(id), dma_sm0ar);
+ dev_dbg(chan2dev(chan), "CT=1 <=> SM0AR: 0x%08x\n",
+ stm32_dma_read(dmadev, STM32_DMA_SM0AR(id)));
+ } else {
+ dma_sm1ar = sg_req->chan_reg.dma_sm1ar;
+ stm32_dma_write(dmadev, STM32_DMA_SM1AR(id), dma_sm1ar);
+ dev_dbg(chan2dev(chan), "CT=0 <=> SM1AR: 0x%08x\n",
+ stm32_dma_read(dmadev, STM32_DMA_SM1AR(id)));
+ }
+
+ chan->next_sg++;
+ }
+}
+
+static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan)
+{
+ if (chan->desc) {
+ if (chan->desc->cyclic) {
+ vchan_cyclic_callback(&chan->desc->vdesc);
+ stm32_dma_configure_next_sg(chan);
+ } else {
+ chan->busy = false;
+ if (chan->next_sg == chan->desc->num_sgs) {
+ list_del(&chan->desc->vdesc.node);
+ vchan_cookie_complete(&chan->desc->vdesc);
+ chan->desc = NULL;
+ }
+ stm32_dma_start_transfer(chan);
+ }
+ }
+}
+
+static irqreturn_t stm32_dma_chan_irq(int irq, void *devid)
+{
+ struct stm32_dma_chan *chan = devid;
+ struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
+ u32 status, scr, sfcr;
+
+ spin_lock(&chan->vchan.lock);
+
+ status = stm32_dma_irq_status(chan);
+ scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
+ sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id));
+
+ if ((status & STM32_DMA_TCI) && (scr & STM32_DMA_SCR_TCIE)) {
+ stm32_dma_irq_clear(chan, STM32_DMA_TCI);
+ stm32_dma_handle_chan_done(chan);
+
+ } else {
+ stm32_dma_irq_clear(chan, status);
+ dev_err(chan2dev(chan), "DMA error: status=0x%08x\n", status);
+ }
+
+ spin_unlock(&chan->vchan.lock);
+
+ return IRQ_HANDLED;
+}
+
+static void stm32_dma_issue_pending(struct dma_chan *c)
+{
+ struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&chan->vchan.lock, flags);
+ if (!chan->busy) {
+ if (vchan_issue_pending(&chan->vchan) && !chan->desc) {
+ ret = stm32_dma_start_transfer(chan);
+ if ((!ret) && (chan->desc->cyclic))
+ stm32_dma_configure_next_sg(chan);
+ }
+ }
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
+}
+
+static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
+ enum dma_transfer_direction direction,
+ enum dma_slave_buswidth *buswidth)
+{
+ enum dma_slave_buswidth src_addr_width, dst_addr_width;
+ int src_bus_width, dst_bus_width;
+ int src_burst_size, dst_burst_size;
+ u32 src_maxburst, dst_maxburst;
+ dma_addr_t src_addr, dst_addr;
+ u32 dma_scr = 0;
+
+ src_addr_width = chan->dma_sconfig.src_addr_width;
+ dst_addr_width = chan->dma_sconfig.dst_addr_width;
+ src_maxburst = chan->dma_sconfig.src_maxburst;
+ dst_maxburst = chan->dma_sconfig.dst_maxburst;
+ src_addr = chan->dma_sconfig.src_addr;
+ dst_addr = chan->dma_sconfig.dst_addr;
+
+ switch (direction) {
+ case DMA_MEM_TO_DEV:
+ dst_bus_width = stm32_dma_get_width(chan, dst_addr_width);
+ if (dst_bus_width < 0)
+ return dst_bus_width;
+
+ dst_burst_size = stm32_dma_get_burst(chan, dst_maxburst);
+ if (dst_burst_size < 0)
+ return dst_burst_size;
+
+ if (!src_addr_width)
+ src_addr_width = dst_addr_width;
+
+ src_bus_width = stm32_dma_get_width(chan, src_addr_width);
+ if (src_bus_width < 0)
+ return src_bus_width;
+
+ src_burst_size = stm32_dma_get_burst(chan, src_maxburst);
+ if (src_burst_size < 0)
+ return src_burst_size;
+
+ dma_scr = STM32_DMA_SCR_DIR(STM32_DMA_MEM_TO_DEV) |
+ STM32_DMA_SCR_PSIZE(dst_bus_width) |
+ STM32_DMA_SCR_MSIZE(src_bus_width) |
+ STM32_DMA_SCR_PBURST(dst_burst_size) |
+ STM32_DMA_SCR_MBURST(src_burst_size);
+
+ chan->chan_reg.dma_spar = chan->dma_sconfig.dst_addr;
+ *buswidth = dst_addr_width;
+ break;
+
+ case DMA_DEV_TO_MEM:
+ src_bus_width = stm32_dma_get_width(chan, src_addr_width);
+ if (src_bus_width < 0)
+ return src_bus_width;
+
+ src_burst_size = stm32_dma_get_burst(chan, src_maxburst);
+ if (src_burst_size < 0)
+ return src_burst_size;
+
+ if (!dst_addr_width)
+ dst_addr_width = src_addr_width;
+
+ dst_bus_width = stm32_dma_get_width(chan, dst_addr_width);
+ if (dst_bus_width < 0)
+ return dst_bus_width;
+
+ dst_burst_size = stm32_dma_get_burst(chan, dst_maxburst);
+ if (dst_burst_size < 0)
+ return dst_burst_size;
+
+ dma_scr = STM32_DMA_SCR_DIR(STM32_DMA_DEV_TO_MEM) |
+ STM32_DMA_SCR_PSIZE(src_bus_width) |
+ STM32_DMA_SCR_MSIZE(dst_bus_width) |
+ STM32_DMA_SCR_PBURST(src_burst_size) |
+ STM32_DMA_SCR_MBURST(dst_burst_size);
+
+ chan->chan_reg.dma_spar = chan->dma_sconfig.src_addr;
+ *buswidth = chan->dma_sconfig.src_addr_width;
+ break;
+
+ default:
+ dev_err(chan2dev(chan), "Dma direction is not supported\n");
+ return -EINVAL;
+ }
+
+ stm32_dma_set_fifo_config(chan, src_maxburst, dst_maxburst);
+
+ chan->chan_reg.dma_scr &= ~(STM32_DMA_SCR_DIR_MASK |
+ STM32_DMA_SCR_PSIZE_MASK | STM32_DMA_SCR_MSIZE_MASK |
+ STM32_DMA_SCR_PBURST_MASK | STM32_DMA_SCR_MBURST_MASK);
+ chan->chan_reg.dma_scr |= dma_scr;
+
+ return 0;
+}
+
+static void stm32_dma_clear_reg(struct stm32_dma_chan_reg *regs)
+{
+ memset(regs, 0, sizeof(struct stm32_dma_chan_reg));
+}
+
+static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg(
+ struct dma_chan *c, struct scatterlist *sgl,
+ u32 sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
+ struct stm32_dma_desc *desc;
+ struct scatterlist *sg;
+ enum dma_slave_buswidth buswidth;
+ u32 nb_data_items;
+ int i, ret;
+
+ if (!chan->config_init) {
+ dev_err(chan2dev(chan), "dma channel is not configured\n");
+ return NULL;
+ }
+
+ if (sg_len < 1) {
+ dev_err(chan2dev(chan), "Invalid segment length %d\n", sg_len);
+ return NULL;
+ }
+
+ desc = stm32_dma_alloc_desc(sg_len);
+ if (!desc)
+ return NULL;
+
+ ret = stm32_dma_set_xfer_param(chan, direction, &buswidth);
+ if (ret < 0)
+ goto err;
+
+ /* Set peripheral flow controller */
+ if (chan->dma_sconfig.device_fc)
+ chan->chan_reg.dma_scr |= STM32_DMA_SCR_PFCTRL;
+ else
+ chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_PFCTRL;
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ desc->sg_req[i].len = sg_dma_len(sg);
+
+ nb_data_items = desc->sg_req[i].len / buswidth;
+ if (nb_data_items > STM32_DMA_MAX_DATA_ITEMS) {
+ dev_err(chan2dev(chan), "nb items not supported\n");
+ goto err;
+ }
+
+ stm32_dma_clear_reg(&desc->sg_req[i].chan_reg);
+ desc->sg_req[i].chan_reg.dma_scr = chan->chan_reg.dma_scr;
+ desc->sg_req[i].chan_reg.dma_sfcr = chan->chan_reg.dma_sfcr;
+ desc->sg_req[i].chan_reg.dma_spar = chan->chan_reg.dma_spar;
+ desc->sg_req[i].chan_reg.dma_sm0ar = sg_dma_address(sg);
+ desc->sg_req[i].chan_reg.dma_sm1ar = sg_dma_address(sg);
+ desc->sg_req[i].chan_reg.dma_sndtr = nb_data_items;
+ }
+
+ desc->num_sgs = sg_len;
+ desc->cyclic = false;
+
+ return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
+
+err:
+ kfree(desc);
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor *stm32_dma_prep_dma_cyclic(
+ struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction direction,
+ unsigned long flags)
+{
+ struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
+ struct stm32_dma_desc *desc;
+ enum dma_slave_buswidth buswidth;
+ u32 num_periods, nb_data_items;
+ int i, ret;
+
+ if (!buf_len || !period_len) {
+ dev_err(chan2dev(chan), "Invalid buffer/period len\n");
+ return NULL;
+ }
+
+ if (!chan->config_init) {
+ dev_err(chan2dev(chan), "dma channel is not configured\n");
+ return NULL;
+ }
+
+ if (buf_len % period_len) {
+ dev_err(chan2dev(chan), "buf_len not multiple of period_len\n");
+ return NULL;
+ }
+
+ /*
+ * We allow to take more number of requests till DMA is
+ * not started. The driver will loop over all requests.
+ * Once DMA is started then new requests can be queued only after
+ * terminating the DMA.
+ */
+ if (chan->busy) {
+ dev_err(chan2dev(chan), "Request not allowed when dma busy\n");
+ return NULL;
+ }
+
+ ret = stm32_dma_set_xfer_param(chan, direction, &buswidth);
+ if (ret < 0)
+ return NULL;
+
+ nb_data_items = period_len / buswidth;
+ if (nb_data_items > STM32_DMA_MAX_DATA_ITEMS) {
+ dev_err(chan2dev(chan), "number of items not supported\n");
+ return NULL;
+ }
+
+ /* Enable Circular mode or double buffer mode */
+ if (buf_len == period_len)
+ chan->chan_reg.dma_scr |= STM32_DMA_SCR_CIRC;
+ else
+ chan->chan_reg.dma_scr |= STM32_DMA_SCR_DBM;
+
+ /* Clear periph ctrl if client set it */
+ chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_PFCTRL;
+
+ num_periods = buf_len / period_len;
+
+ desc = stm32_dma_alloc_desc(num_periods);
+ if (!desc)
+ return NULL;
+
+ for (i = 0; i < num_periods; i++) {
+ desc->sg_req[i].len = period_len;
+
+ stm32_dma_clear_reg(&desc->sg_req[i].chan_reg);
+ desc->sg_req[i].chan_reg.dma_scr = chan->chan_reg.dma_scr;
+ desc->sg_req[i].chan_reg.dma_sfcr = chan->chan_reg.dma_sfcr;
+ desc->sg_req[i].chan_reg.dma_spar = chan->chan_reg.dma_spar;
+ desc->sg_req[i].chan_reg.dma_sm0ar = buf_addr;
+ desc->sg_req[i].chan_reg.dma_sm1ar = buf_addr;
+ desc->sg_req[i].chan_reg.dma_sndtr = nb_data_items;
+ buf_addr += period_len;
+ }
+
+ desc->num_sgs = num_periods;
+ desc->cyclic = true;
+
+ return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
+}
+
+static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy(
+ struct dma_chan *c, dma_addr_t dest,
+ dma_addr_t src, size_t len, unsigned long flags)
+{
+ struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
+ u32 num_sgs;
+ struct stm32_dma_desc *desc;
+ size_t xfer_count, offset;
+ int i;
+
+ num_sgs = DIV_ROUND_UP(len, STM32_DMA_MAX_DATA_ITEMS);
+ desc = stm32_dma_alloc_desc(num_sgs);
+ if (!desc)
+ return NULL;
+
+ for (offset = 0, i = 0; offset < len; offset += xfer_count, i++) {
+ xfer_count = min_t(size_t, len - offset,
+ STM32_DMA_MAX_DATA_ITEMS);
+
+ desc->sg_req[i].len = xfer_count;
+
+ stm32_dma_clear_reg(&desc->sg_req[i].chan_reg);
+ desc->sg_req[i].chan_reg.dma_scr =
+ STM32_DMA_SCR_DIR(STM32_DMA_MEM_TO_MEM) |
+ STM32_DMA_SCR_MINC |
+ STM32_DMA_SCR_PINC |
+ STM32_DMA_SCR_TCIE |
+ STM32_DMA_SCR_TEIE;
+ desc->sg_req[i].chan_reg.dma_sfcr = STM32_DMA_SFCR_DMDIS |
+ STM32_DMA_SFCR_FTH(STM32_DMA_FIFO_THRESHOLD_FULL) |
+ STM32_DMA_SFCR_FEIE;
+ desc->sg_req[i].chan_reg.dma_spar = src + offset;
+ desc->sg_req[i].chan_reg.dma_sm0ar = dest + offset;
+ desc->sg_req[i].chan_reg.dma_sndtr = xfer_count;
+ }
+
+ desc->num_sgs = num_sgs;
+ desc->cyclic = false;
+
+ return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
+}
+
+static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan,
+ struct stm32_dma_desc *desc,
+ u32 next_sg)
+{
+ struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
+ u32 dma_scr, width, residue, count;
+ int i;
+
+ residue = 0;
+
+ for (i = next_sg; i < desc->num_sgs; i++)
+ residue += desc->sg_req[i].len;
+
+ if (next_sg != 0) {
+ dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
+ width = STM32_DMA_SCR_PSIZE_GET(dma_scr);
+ count = stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id));
+
+ residue += count << width;
+ }
+
+ return residue;
+}
+
+static enum dma_status stm32_dma_tx_status(struct dma_chan *c,
+ dma_cookie_t cookie,
+ struct dma_tx_state *state)
+{
+ struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
+ struct virt_dma_desc *vdesc;
+ enum dma_status status;
+ unsigned long flags;
+ u32 residue;
+
+ status = dma_cookie_status(c, cookie, state);
+ if ((status == DMA_COMPLETE) || (!state))
+ return status;
+
+ spin_lock_irqsave(&chan->vchan.lock, flags);
+ vdesc = vchan_find_desc(&chan->vchan, cookie);
+ if (cookie == chan->desc->vdesc.tx.cookie) {
+ residue = stm32_dma_desc_residue(chan, chan->desc,
+ chan->next_sg);
+ } else if (vdesc) {
+ residue = stm32_dma_desc_residue(chan,
+ to_stm32_dma_desc(vdesc), 0);
+ } else {
+ residue = 0;
+ }
+
+ dma_set_residue(state, residue);
+
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
+
+ return status;
+}
+
+static int stm32_dma_alloc_chan_resources(struct dma_chan *c)
+{
+ struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
+ struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
+ int ret;
+
+ chan->config_init = false;
+ ret = clk_prepare_enable(dmadev->clk);
+ if (ret < 0) {
+ dev_err(chan2dev(chan), "clk_prepare_enable failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = stm32_dma_disable_chan(chan);
+ if (ret < 0)
+ clk_disable_unprepare(dmadev->clk);
+
+ return ret;
+}
+
+static void stm32_dma_free_chan_resources(struct dma_chan *c)
+{
+ struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
+ struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
+ unsigned long flags;
+
+ dev_dbg(chan2dev(chan), "Freeing channel %d\n", chan->id);
+
+ if (chan->busy) {
+ spin_lock_irqsave(&chan->vchan.lock, flags);
+ stm32_dma_stop(chan);
+ chan->desc = NULL;
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
+ }
+
+ clk_disable_unprepare(dmadev->clk);
+
+ vchan_free_chan_resources(to_virt_chan(c));
+}
+
+static void stm32_dma_desc_free(struct virt_dma_desc *vdesc)
+{
+ kfree(container_of(vdesc, struct stm32_dma_desc, vdesc));
+}
+
+void stm32_dma_set_config(struct stm32_dma_chan *chan,
+ struct stm32_dma_cfg *cfg)
+{
+ stm32_dma_clear_reg(&chan->chan_reg);
+
+ chan->chan_reg.dma_scr = cfg->stream_config & STM32_DMA_SCR_CFG_MASK;
+ chan->chan_reg.dma_scr |= STM32_DMA_SCR_REQ(cfg->request_line);
+
+ /* Enable Interrupts */
+ chan->chan_reg.dma_scr |= STM32_DMA_SCR_TEIE | STM32_DMA_SCR_TCIE;
+
+ chan->chan_reg.dma_sfcr = cfg->threshold & STM32_DMA_SFCR_FTH_MASK;
+}
+
+static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct stm32_dma_device *dmadev = ofdma->of_dma_data;
+ struct stm32_dma_cfg cfg;
+ struct stm32_dma_chan *chan;
+ struct dma_chan *c;
+
+ if (dma_spec->args_count < 3)
+ return NULL;
+
+ cfg.channel_id = dma_spec->args[0];
+ cfg.request_line = dma_spec->args[1];
+ cfg.stream_config = dma_spec->args[2];
+ cfg.threshold = 0;
+
+ if ((cfg.channel_id >= STM32_DMA_MAX_CHANNELS) || (cfg.request_line >=
+ STM32_DMA_MAX_REQUEST_ID))
+ return NULL;
+
+ if (dma_spec->args_count > 3)
+ cfg.threshold = dma_spec->args[3];
+
+ chan = &dmadev->chan[cfg.channel_id];
+
+ c = dma_get_slave_channel(&chan->vchan.chan);
+ if (c)
+ stm32_dma_set_config(chan, &cfg);
+
+ return c;
+}
+
+static const struct of_device_id stm32_dma_of_match[] = {
+ { .compatible = "st,stm32-dma", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, stm32_dma_of_match);
+
+static int stm32_dma_probe(struct platform_device *pdev)
+{
+ struct stm32_dma_chan *chan;
+ struct stm32_dma_device *dmadev;
+ struct dma_device *dd;
+ const struct of_device_id *match;
+ struct resource *res;
+ int i, ret;
+
+ match = of_match_device(stm32_dma_of_match, &pdev->dev);
+ if (!match) {
+ dev_err(&pdev->dev, "Error: No device match found\n");
+ return -ENODEV;
+ }
+
+ dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL);
+ if (!dmadev)
+ return -ENOMEM;
+
+ dd = &dmadev->ddev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dmadev->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dmadev->base))
+ return PTR_ERR(dmadev->base);
+
+ dmadev->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(dmadev->clk)) {
+ dev_err(&pdev->dev, "Error: Missing controller clock\n");
+ return PTR_ERR(dmadev->clk);
+ }
+
+ dmadev->mem2mem = of_property_read_bool(pdev->dev.of_node,
+ "st,mem2mem");
+
+ dmadev->rst = devm_reset_control_get(&pdev->dev, NULL);
+ if (!IS_ERR(dmadev->rst)) {
+ reset_control_assert(dmadev->rst);
+ udelay(2);
+ reset_control_deassert(dmadev->rst);
+ }
+
+ dma_cap_set(DMA_SLAVE, dd->cap_mask);
+ dma_cap_set(DMA_PRIVATE, dd->cap_mask);
+ dma_cap_set(DMA_CYCLIC, dd->cap_mask);
+ dd->device_alloc_chan_resources = stm32_dma_alloc_chan_resources;
+ dd->device_free_chan_resources = stm32_dma_free_chan_resources;
+ dd->device_tx_status = stm32_dma_tx_status;
+ dd->device_issue_pending = stm32_dma_issue_pending;
+ dd->device_prep_slave_sg = stm32_dma_prep_slave_sg;
+ dd->device_prep_dma_cyclic = stm32_dma_prep_dma_cyclic;
+ dd->device_config = stm32_dma_slave_config;
+ dd->device_terminate_all = stm32_dma_terminate_all;
+ dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+ dd->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+ dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+ dd->dev = &pdev->dev;
+ INIT_LIST_HEAD(&dd->channels);
+
+ if (dmadev->mem2mem) {
+ dma_cap_set(DMA_MEMCPY, dd->cap_mask);
+ dd->device_prep_dma_memcpy = stm32_dma_prep_dma_memcpy;
+ dd->directions |= BIT(DMA_MEM_TO_MEM);
+ }
+
+ for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) {
+ chan = &dmadev->chan[i];
+ chan->id = i;
+ chan->vchan.desc_free = stm32_dma_desc_free;
+ vchan_init(&chan->vchan, dd);
+ }
+
+ ret = dma_async_device_register(dd);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) {
+ chan = &dmadev->chan[i];
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
+ if (!res) {
+ ret = -EINVAL;
+ dev_err(&pdev->dev, "No irq resource for chan %d\n", i);
+ goto err_unregister;
+ }
+ chan->irq = res->start;
+ ret = devm_request_irq(&pdev->dev, chan->irq,
+ stm32_dma_chan_irq, 0,
+ dev_name(chan2dev(chan)), chan);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "request_irq failed with err %d channel %d\n",
+ ret, i);
+ goto err_unregister;
+ }
+ }
+
+ ret = of_dma_controller_register(pdev->dev.of_node,
+ stm32_dma_of_xlate, dmadev);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "STM32 DMA DMA OF registration failed %d\n", ret);
+ goto err_unregister;
+ }
+
+ platform_set_drvdata(pdev, dmadev);
+
+ dev_info(&pdev->dev, "STM32 DMA driver registered\n");
+
+ return 0;
+
+err_unregister:
+ dma_async_device_unregister(dd);
+
+ return ret;
+}
+
+static struct platform_driver stm32_dma_driver = {
+ .driver = {
+ .name = "stm32-dma",
+ .of_match_table = stm32_dma_of_match,
+ },
+};
+
+static int __init stm32_dma_init(void)
+{
+ return platform_driver_probe(&stm32_dma_driver, stm32_dma_probe);
+}
+subsys_initcall(stm32_dma_init);
diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c
index 1661d518224a..e0df233dde92 100644
--- a/drivers/dma/sun4i-dma.c
+++ b/drivers/dma/sun4i-dma.c
@@ -1271,6 +1271,7 @@ static const struct of_device_id sun4i_dma_match[] = {
{ .compatible = "allwinner,sun4i-a10-dma" },
{ /* sentinel */ },
};
+MODULE_DEVICE_TABLE(of, sun4i_dma_match);
static struct platform_driver sun4i_dma_driver = {
.probe = sun4i_dma_probe,
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index c8f79dcaaee8..3871f29e523d 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -296,7 +296,7 @@ static struct tegra_dma_desc *tegra_dma_desc_get(
spin_unlock_irqrestore(&tdc->lock, flags);
/* Allocate DMA desc */
- dma_desc = kzalloc(sizeof(*dma_desc), GFP_ATOMIC);
+ dma_desc = kzalloc(sizeof(*dma_desc), GFP_NOWAIT);
if (!dma_desc) {
dev_err(tdc2dev(tdc), "dma_desc alloc failed\n");
return NULL;
@@ -336,7 +336,7 @@ static struct tegra_dma_sg_req *tegra_dma_sg_req_get(
}
spin_unlock_irqrestore(&tdc->lock, flags);
- sg_req = kzalloc(sizeof(struct tegra_dma_sg_req), GFP_ATOMIC);
+ sg_req = kzalloc(sizeof(struct tegra_dma_sg_req), GFP_NOWAIT);
if (!sg_req)
dev_err(tdc2dev(tdc), "sg_req alloc failed\n");
return sg_req;
@@ -1186,10 +1186,12 @@ static int tegra_dma_alloc_chan_resources(struct dma_chan *dc)
dma_cookie_init(&tdc->dma_chan);
tdc->config_init = false;
- ret = clk_prepare_enable(tdma->dma_clk);
+
+ ret = pm_runtime_get_sync(tdma->dev);
if (ret < 0)
- dev_err(tdc2dev(tdc), "clk_prepare_enable failed: %d\n", ret);
- return ret;
+ return ret;
+
+ return 0;
}
static void tegra_dma_free_chan_resources(struct dma_chan *dc)
@@ -1232,7 +1234,7 @@ static void tegra_dma_free_chan_resources(struct dma_chan *dc)
list_del(&sg_req->node);
kfree(sg_req);
}
- clk_disable_unprepare(tdma->dma_clk);
+ pm_runtime_put(tdma->dev);
tdc->slave_id = 0;
}
@@ -1290,40 +1292,19 @@ static const struct tegra_dma_chip_data tegra148_dma_chip_data = {
.support_separate_wcount_reg = true,
};
-
-static const struct of_device_id tegra_dma_of_match[] = {
- {
- .compatible = "nvidia,tegra148-apbdma",
- .data = &tegra148_dma_chip_data,
- }, {
- .compatible = "nvidia,tegra114-apbdma",
- .data = &tegra114_dma_chip_data,
- }, {
- .compatible = "nvidia,tegra30-apbdma",
- .data = &tegra30_dma_chip_data,
- }, {
- .compatible = "nvidia,tegra20-apbdma",
- .data = &tegra20_dma_chip_data,
- }, {
- },
-};
-MODULE_DEVICE_TABLE(of, tegra_dma_of_match);
-
static int tegra_dma_probe(struct platform_device *pdev)
{
struct resource *res;
struct tegra_dma *tdma;
int ret;
int i;
- const struct tegra_dma_chip_data *cdata = NULL;
- const struct of_device_id *match;
+ const struct tegra_dma_chip_data *cdata;
- match = of_match_device(tegra_dma_of_match, &pdev->dev);
- if (!match) {
- dev_err(&pdev->dev, "Error: No device match found\n");
+ cdata = of_device_get_match_data(&pdev->dev);
+ if (!cdata) {
+ dev_err(&pdev->dev, "Error: No device match data found\n");
return -ENODEV;
}
- cdata = match->data;
tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels *
sizeof(struct tegra_dma_channel), GFP_KERNEL);
@@ -1356,20 +1337,14 @@ static int tegra_dma_probe(struct platform_device *pdev)
spin_lock_init(&tdma->global_lock);
pm_runtime_enable(&pdev->dev);
- if (!pm_runtime_enabled(&pdev->dev)) {
+ if (!pm_runtime_enabled(&pdev->dev))
ret = tegra_dma_runtime_resume(&pdev->dev);
- if (ret) {
- dev_err(&pdev->dev, "dma_runtime_resume failed %d\n",
- ret);
- goto err_pm_disable;
- }
- }
+ else
+ ret = pm_runtime_get_sync(&pdev->dev);
- /* Enable clock before accessing registers */
- ret = clk_prepare_enable(tdma->dma_clk);
if (ret < 0) {
- dev_err(&pdev->dev, "clk_prepare_enable failed: %d\n", ret);
- goto err_pm_disable;
+ pm_runtime_disable(&pdev->dev);
+ return ret;
}
/* Reset DMA controller */
@@ -1382,7 +1357,7 @@ static int tegra_dma_probe(struct platform_device *pdev)
tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0);
tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul);
- clk_disable_unprepare(tdma->dma_clk);
+ pm_runtime_put(&pdev->dev);
INIT_LIST_HEAD(&tdma->dma_dev.channels);
for (i = 0; i < cdata->nr_channels; i++) {
@@ -1400,8 +1375,7 @@ static int tegra_dma_probe(struct platform_device *pdev)
}
tdc->irq = res->start;
snprintf(tdc->name, sizeof(tdc->name), "apbdma.%d", i);
- ret = devm_request_irq(&pdev->dev, tdc->irq,
- tegra_dma_isr, 0, tdc->name, tdc);
+ ret = request_irq(tdc->irq, tegra_dma_isr, 0, tdc->name, tdc);
if (ret) {
dev_err(&pdev->dev,
"request_irq failed with err %d channel %d\n",
@@ -1482,10 +1456,11 @@ err_unregister_dma_dev:
err_irq:
while (--i >= 0) {
struct tegra_dma_channel *tdc = &tdma->channels[i];
+
+ free_irq(tdc->irq, tdc);
tasklet_kill(&tdc->tasklet);
}
-err_pm_disable:
pm_runtime_disable(&pdev->dev);
if (!pm_runtime_status_suspended(&pdev->dev))
tegra_dma_runtime_suspend(&pdev->dev);
@@ -1502,6 +1477,7 @@ static int tegra_dma_remove(struct platform_device *pdev)
for (i = 0; i < tdma->chip_data->nr_channels; ++i) {
tdc = &tdma->channels[i];
+ free_irq(tdc->irq, tdc);
tasklet_kill(&tdc->tasklet);
}
@@ -1514,8 +1490,7 @@ static int tegra_dma_remove(struct platform_device *pdev)
static int tegra_dma_runtime_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct tegra_dma *tdma = platform_get_drvdata(pdev);
+ struct tegra_dma *tdma = dev_get_drvdata(dev);
clk_disable_unprepare(tdma->dma_clk);
return 0;
@@ -1523,8 +1498,7 @@ static int tegra_dma_runtime_suspend(struct device *dev)
static int tegra_dma_runtime_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct tegra_dma *tdma = platform_get_drvdata(pdev);
+ struct tegra_dma *tdma = dev_get_drvdata(dev);
int ret;
ret = clk_prepare_enable(tdma->dma_clk);
@@ -1543,7 +1517,7 @@ static int tegra_dma_pm_suspend(struct device *dev)
int ret;
/* Enable clock before accessing register */
- ret = tegra_dma_runtime_resume(dev);
+ ret = pm_runtime_get_sync(dev);
if (ret < 0)
return ret;
@@ -1552,15 +1526,22 @@ static int tegra_dma_pm_suspend(struct device *dev)
struct tegra_dma_channel *tdc = &tdma->channels[i];
struct tegra_dma_channel_regs *ch_reg = &tdc->channel_reg;
+ /* Only save the state of DMA channels that are in use */
+ if (!tdc->config_init)
+ continue;
+
ch_reg->csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR);
ch_reg->ahb_ptr = tdc_read(tdc, TEGRA_APBDMA_CHAN_AHBPTR);
ch_reg->apb_ptr = tdc_read(tdc, TEGRA_APBDMA_CHAN_APBPTR);
ch_reg->ahb_seq = tdc_read(tdc, TEGRA_APBDMA_CHAN_AHBSEQ);
ch_reg->apb_seq = tdc_read(tdc, TEGRA_APBDMA_CHAN_APBSEQ);
+ if (tdma->chip_data->support_separate_wcount_reg)
+ ch_reg->wcount = tdc_read(tdc,
+ TEGRA_APBDMA_CHAN_WCOUNT);
}
/* Disable clock */
- tegra_dma_runtime_suspend(dev);
+ pm_runtime_put(dev);
return 0;
}
@@ -1571,7 +1552,7 @@ static int tegra_dma_pm_resume(struct device *dev)
int ret;
/* Enable clock before accessing register */
- ret = tegra_dma_runtime_resume(dev);
+ ret = pm_runtime_get_sync(dev);
if (ret < 0)
return ret;
@@ -1583,6 +1564,13 @@ static int tegra_dma_pm_resume(struct device *dev)
struct tegra_dma_channel *tdc = &tdma->channels[i];
struct tegra_dma_channel_regs *ch_reg = &tdc->channel_reg;
+ /* Only restore the state of DMA channels that are in use */
+ if (!tdc->config_init)
+ continue;
+
+ if (tdma->chip_data->support_separate_wcount_reg)
+ tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT,
+ ch_reg->wcount);
tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_reg->apb_seq);
tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_reg->apb_ptr);
tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_reg->ahb_seq);
@@ -1592,19 +1580,35 @@ static int tegra_dma_pm_resume(struct device *dev)
}
/* Disable clock */
- tegra_dma_runtime_suspend(dev);
+ pm_runtime_put(dev);
return 0;
}
#endif
static const struct dev_pm_ops tegra_dma_dev_pm_ops = {
-#ifdef CONFIG_PM
- .runtime_suspend = tegra_dma_runtime_suspend,
- .runtime_resume = tegra_dma_runtime_resume,
-#endif
+ SET_RUNTIME_PM_OPS(tegra_dma_runtime_suspend, tegra_dma_runtime_resume,
+ NULL)
SET_SYSTEM_SLEEP_PM_OPS(tegra_dma_pm_suspend, tegra_dma_pm_resume)
};
+static const struct of_device_id tegra_dma_of_match[] = {
+ {
+ .compatible = "nvidia,tegra148-apbdma",
+ .data = &tegra148_dma_chip_data,
+ }, {
+ .compatible = "nvidia,tegra114-apbdma",
+ .data = &tegra114_dma_chip_data,
+ }, {
+ .compatible = "nvidia,tegra30-apbdma",
+ .data = &tegra30_dma_chip_data,
+ }, {
+ .compatible = "nvidia,tegra20-apbdma",
+ .data = &tegra20_dma_chip_data,
+ }, {
+ },
+};
+MODULE_DEVICE_TABLE(of, tegra_dma_of_match);
+
static struct platform_driver tegra_dmac_driver = {
.driver = {
.name = "tegra-apbdma",
diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c
index a415edbe61b1..e107779b1a2e 100644
--- a/drivers/dma/ti-dma-crossbar.c
+++ b/drivers/dma/ti-dma-crossbar.c
@@ -12,7 +12,6 @@
#include <linux/init.h>
#include <linux/list.h>
#include <linux/io.h>
-#include <linux/idr.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_dma.h>
@@ -198,7 +197,8 @@ struct ti_dra7_xbar_data {
void __iomem *iomem;
struct dma_router dmarouter;
- struct idr map_idr;
+ struct mutex mutex;
+ unsigned long *dma_inuse;
u16 safe_val; /* Value to rest the crossbar lines */
u32 xbar_requests; /* number of DMA requests connected to XBAR */
@@ -225,7 +225,9 @@ static void ti_dra7_xbar_free(struct device *dev, void *route_data)
map->xbar_in, map->xbar_out);
ti_dra7_xbar_write(xbar->iomem, map->xbar_out, xbar->safe_val);
- idr_remove(&xbar->map_idr, map->xbar_out);
+ mutex_lock(&xbar->mutex);
+ clear_bit(map->xbar_out, xbar->dma_inuse);
+ mutex_unlock(&xbar->mutex);
kfree(map);
}
@@ -255,8 +257,17 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec,
return ERR_PTR(-ENOMEM);
}
- map->xbar_out = idr_alloc(&xbar->map_idr, NULL, 0, xbar->dma_requests,
- GFP_KERNEL);
+ mutex_lock(&xbar->mutex);
+ map->xbar_out = find_first_zero_bit(xbar->dma_inuse,
+ xbar->dma_requests);
+ mutex_unlock(&xbar->mutex);
+ if (map->xbar_out == xbar->dma_requests) {
+ dev_err(&pdev->dev, "Run out of free DMA requests\n");
+ kfree(map);
+ return ERR_PTR(-ENOMEM);
+ }
+ set_bit(map->xbar_out, xbar->dma_inuse);
+
map->xbar_in = (u16)dma_spec->args[0];
dma_spec->args[0] = map->xbar_out + xbar->dma_offset;
@@ -278,17 +289,29 @@ static const struct of_device_id ti_dra7_master_match[] = {
.compatible = "ti,edma3",
.data = (void *)TI_XBAR_EDMA_OFFSET,
},
+ {
+ .compatible = "ti,edma3-tpcc",
+ .data = (void *)TI_XBAR_EDMA_OFFSET,
+ },
{},
};
+static inline void ti_dra7_xbar_reserve(int offset, int len, unsigned long *p)
+{
+ for (; len > 0; len--)
+ clear_bit(offset + (len - 1), p);
+}
+
static int ti_dra7_xbar_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
const struct of_device_id *match;
struct device_node *dma_node;
struct ti_dra7_xbar_data *xbar;
+ struct property *prop;
struct resource *res;
u32 safe_val;
+ size_t sz;
void __iomem *iomem;
int i, ret;
@@ -299,8 +322,6 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev)
if (!xbar)
return -ENOMEM;
- idr_init(&xbar->map_idr);
-
dma_node = of_parse_phandle(node, "dma-masters", 0);
if (!dma_node) {
dev_err(&pdev->dev, "Can't get DMA master node\n");
@@ -322,6 +343,12 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev)
}
of_node_put(dma_node);
+ xbar->dma_inuse = devm_kcalloc(&pdev->dev,
+ BITS_TO_LONGS(xbar->dma_requests),
+ sizeof(unsigned long), GFP_KERNEL);
+ if (!xbar->dma_inuse)
+ return -ENOMEM;
+
if (of_property_read_u32(node, "dma-requests", &xbar->xbar_requests)) {
dev_info(&pdev->dev,
"Missing XBAR input information, using %u.\n",
@@ -332,6 +359,33 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev)
if (!of_property_read_u32(node, "ti,dma-safe-map", &safe_val))
xbar->safe_val = (u16)safe_val;
+
+ prop = of_find_property(node, "ti,reserved-dma-request-ranges", &sz);
+ if (prop) {
+ const char pname[] = "ti,reserved-dma-request-ranges";
+ u32 (*rsv_events)[2];
+ size_t nelm = sz / sizeof(*rsv_events);
+ int i;
+
+ if (!nelm)
+ return -EINVAL;
+
+ rsv_events = kcalloc(nelm, sizeof(*rsv_events), GFP_KERNEL);
+ if (!rsv_events)
+ return -ENOMEM;
+
+ ret = of_property_read_u32_array(node, pname, (u32 *)rsv_events,
+ nelm * 2);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < nelm; i++) {
+ ti_dra7_xbar_reserve(rsv_events[i][0], rsv_events[i][1],
+ xbar->dma_inuse);
+ }
+ kfree(rsv_events);
+ }
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
iomem = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(iomem))
@@ -343,18 +397,23 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev)
xbar->dmarouter.route_free = ti_dra7_xbar_free;
xbar->dma_offset = (u32)match->data;
+ mutex_init(&xbar->mutex);
platform_set_drvdata(pdev, xbar);
/* Reset the crossbar */
- for (i = 0; i < xbar->dma_requests; i++)
- ti_dra7_xbar_write(xbar->iomem, i, xbar->safe_val);
+ for (i = 0; i < xbar->dma_requests; i++) {
+ if (!test_bit(i, xbar->dma_inuse))
+ ti_dra7_xbar_write(xbar->iomem, i, xbar->safe_val);
+ }
ret = of_dma_router_register(node, ti_dra7_xbar_route_allocate,
&xbar->dmarouter);
if (ret) {
/* Restore the defaults for the crossbar */
- for (i = 0; i < xbar->dma_requests; i++)
- ti_dra7_xbar_write(xbar->iomem, i, i);
+ for (i = 0; i < xbar->dma_requests; i++) {
+ if (!test_bit(i, xbar->dma_inuse))
+ ti_dra7_xbar_write(xbar->iomem, i, i);
+ }
}
return ret;
diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c
index 6f80432a3f0a..a35c211857dd 100644
--- a/drivers/dma/virt-dma.c
+++ b/drivers/dma/virt-dma.c
@@ -29,7 +29,7 @@ dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
spin_lock_irqsave(&vc->lock, flags);
cookie = dma_cookie_assign(tx);
- list_add_tail(&vd->node, &vc->desc_submitted);
+ list_move_tail(&vd->node, &vc->desc_submitted);
spin_unlock_irqrestore(&vc->lock, flags);
dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n",
@@ -39,6 +39,33 @@ dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
}
EXPORT_SYMBOL_GPL(vchan_tx_submit);
+/**
+ * vchan_tx_desc_free - free a reusable descriptor
+ * @tx: the transfer
+ *
+ * This function frees a previously allocated reusable descriptor. The only
+ * other way is to clear the DMA_CTRL_REUSE flag and submit one last time the
+ * transfer.
+ *
+ * Returns 0 upon success
+ */
+int vchan_tx_desc_free(struct dma_async_tx_descriptor *tx)
+{
+ struct virt_dma_chan *vc = to_virt_chan(tx->chan);
+ struct virt_dma_desc *vd = to_virt_desc(tx);
+ unsigned long flags;
+
+ spin_lock_irqsave(&vc->lock, flags);
+ list_del(&vd->node);
+ spin_unlock_irqrestore(&vc->lock, flags);
+
+ dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: freeing\n",
+ vc, vd, vd->tx.cookie);
+ vc->desc_free(vd);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vchan_tx_desc_free);
+
struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc,
dma_cookie_t cookie)
{
@@ -83,8 +110,10 @@ static void vchan_complete(unsigned long arg)
cb_data = vd->tx.callback_param;
list_del(&vd->node);
-
- vc->desc_free(vd);
+ if (dmaengine_desc_test_reuse(&vd->tx))
+ list_add(&vd->node, &vc->desc_allocated);
+ else
+ vc->desc_free(vd);
if (cb)
cb(cb_data);
@@ -96,9 +125,13 @@ void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
while (!list_empty(head)) {
struct virt_dma_desc *vd = list_first_entry(head,
struct virt_dma_desc, node);
- list_del(&vd->node);
- dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
- vc->desc_free(vd);
+ if (dmaengine_desc_test_reuse(&vd->tx)) {
+ list_move_tail(&vd->node, &vc->desc_allocated);
+ } else {
+ dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
+ list_del(&vd->node);
+ vc->desc_free(vd);
+ }
}
}
EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
@@ -108,6 +141,7 @@ void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
dma_cookie_init(&vc->chan);
spin_lock_init(&vc->lock);
+ INIT_LIST_HEAD(&vc->desc_allocated);
INIT_LIST_HEAD(&vc->desc_submitted);
INIT_LIST_HEAD(&vc->desc_issued);
INIT_LIST_HEAD(&vc->desc_completed);
diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h
index 2fa47745a41f..d9731ca5e262 100644
--- a/drivers/dma/virt-dma.h
+++ b/drivers/dma/virt-dma.h
@@ -29,6 +29,7 @@ struct virt_dma_chan {
spinlock_t lock;
/* protected by vc.lock */
+ struct list_head desc_allocated;
struct list_head desc_submitted;
struct list_head desc_issued;
struct list_head desc_completed;
@@ -55,10 +56,17 @@ static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan
struct virt_dma_desc *vd, unsigned long tx_flags)
{
extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *);
+ extern int vchan_tx_desc_free(struct dma_async_tx_descriptor *);
+ unsigned long flags;
dma_async_tx_descriptor_init(&vd->tx, &vc->chan);
vd->tx.flags = tx_flags;
vd->tx.tx_submit = vchan_tx_submit;
+ vd->tx.desc_free = vchan_tx_desc_free;
+
+ spin_lock_irqsave(&vc->lock, flags);
+ list_add_tail(&vd->node, &vc->desc_allocated);
+ spin_unlock_irqrestore(&vc->lock, flags);
return &vd->tx;
}
@@ -134,6 +142,7 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
struct list_head *head)
{
+ list_splice_tail_init(&vc->desc_allocated, head);
list_splice_tail_init(&vc->desc_submitted, head);
list_splice_tail_init(&vc->desc_issued, head);
list_splice_tail_init(&vc->desc_completed, head);
@@ -141,14 +150,30 @@ static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
{
+ struct virt_dma_desc *vd;
unsigned long flags;
LIST_HEAD(head);
spin_lock_irqsave(&vc->lock, flags);
vchan_get_all_descriptors(vc, &head);
+ list_for_each_entry(vd, &head, node)
+ dmaengine_desc_clear_reuse(&vd->tx);
spin_unlock_irqrestore(&vc->lock, flags);
vchan_dma_desc_free_list(vc, &head);
}
+/**
+ * vchan_synchronize() - synchronize callback execution to the current context
+ * @vc: virtual channel to synchronize
+ *
+ * Makes sure that all scheduled or active callbacks have finished running. For
+ * proper operation the caller has to ensure that no new callbacks are scheduled
+ * after the invocation of this function started.
+ */
+static inline void vchan_synchronize(struct virt_dma_chan *vc)
+{
+ tasklet_kill(&vc->task);
+}
+
#endif
diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c
index 6f4b5017ca3b..0ee0321868d3 100644
--- a/drivers/dma/xilinx/xilinx_vdma.c
+++ b/drivers/dma/xilinx/xilinx_vdma.c
@@ -28,6 +28,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_dma.h>
@@ -190,8 +191,7 @@ struct xilinx_vdma_tx_descriptor {
* @desc_offset: TX descriptor registers offset
* @lock: Descriptor operation lock
* @pending_list: Descriptors waiting
- * @active_desc: Active descriptor
- * @allocated_desc: Allocated descriptor
+ * @active_list: Descriptors ready to submit
* @done_list: Complete descriptors
* @common: DMA common channel
* @desc_pool: Descriptors pool
@@ -206,6 +206,7 @@ struct xilinx_vdma_tx_descriptor {
* @tasklet: Cleanup work after irq
* @config: Device configuration info
* @flush_on_fsync: Flush on Frame sync
+ * @desc_pendingcount: Descriptor pending count
*/
struct xilinx_vdma_chan {
struct xilinx_vdma_device *xdev;
@@ -213,8 +214,7 @@ struct xilinx_vdma_chan {
u32 desc_offset;
spinlock_t lock;
struct list_head pending_list;
- struct xilinx_vdma_tx_descriptor *active_desc;
- struct xilinx_vdma_tx_descriptor *allocated_desc;
+ struct list_head active_list;
struct list_head done_list;
struct dma_chan common;
struct dma_pool *desc_pool;
@@ -229,6 +229,7 @@ struct xilinx_vdma_chan {
struct tasklet_struct tasklet;
struct xilinx_vdma_config config;
bool flush_on_fsync;
+ u32 desc_pendingcount;
};
/**
@@ -254,6 +255,9 @@ struct xilinx_vdma_device {
container_of(chan, struct xilinx_vdma_chan, common)
#define to_vdma_tx_descriptor(tx) \
container_of(tx, struct xilinx_vdma_tx_descriptor, async_tx)
+#define xilinx_vdma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
+ readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \
+ cond, delay_us, timeout_us)
/* IO accessors */
static inline u32 vdma_read(struct xilinx_vdma_chan *chan, u32 reg)
@@ -342,19 +346,11 @@ static struct xilinx_vdma_tx_descriptor *
xilinx_vdma_alloc_tx_descriptor(struct xilinx_vdma_chan *chan)
{
struct xilinx_vdma_tx_descriptor *desc;
- unsigned long flags;
-
- if (chan->allocated_desc)
- return chan->allocated_desc;
desc = kzalloc(sizeof(*desc), GFP_KERNEL);
if (!desc)
return NULL;
- spin_lock_irqsave(&chan->lock, flags);
- chan->allocated_desc = desc;
- spin_unlock_irqrestore(&chan->lock, flags);
-
INIT_LIST_HEAD(&desc->segments);
return desc;
@@ -412,9 +408,7 @@ static void xilinx_vdma_free_descriptors(struct xilinx_vdma_chan *chan)
xilinx_vdma_free_desc_list(chan, &chan->pending_list);
xilinx_vdma_free_desc_list(chan, &chan->done_list);
-
- xilinx_vdma_free_tx_descriptor(chan, chan->active_desc);
- chan->active_desc = NULL;
+ xilinx_vdma_free_desc_list(chan, &chan->active_list);
spin_unlock_irqrestore(&chan->lock, flags);
}
@@ -560,18 +554,17 @@ static bool xilinx_vdma_is_idle(struct xilinx_vdma_chan *chan)
*/
static void xilinx_vdma_halt(struct xilinx_vdma_chan *chan)
{
- int loop = XILINX_VDMA_LOOP_COUNT;
+ int err;
+ u32 val;
vdma_ctrl_clr(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RUNSTOP);
/* Wait for the hardware to halt */
- do {
- if (vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) &
- XILINX_VDMA_DMASR_HALTED)
- break;
- } while (loop--);
+ err = xilinx_vdma_poll_timeout(chan, XILINX_VDMA_REG_DMASR, val,
+ (val & XILINX_VDMA_DMASR_HALTED), 0,
+ XILINX_VDMA_LOOP_COUNT);
- if (!loop) {
+ if (err) {
dev_err(chan->dev, "Cannot stop channel %p: %x\n",
chan, vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR));
chan->err = true;
@@ -586,18 +579,17 @@ static void xilinx_vdma_halt(struct xilinx_vdma_chan *chan)
*/
static void xilinx_vdma_start(struct xilinx_vdma_chan *chan)
{
- int loop = XILINX_VDMA_LOOP_COUNT;
+ int err;
+ u32 val;
vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RUNSTOP);
/* Wait for the hardware to start */
- do {
- if (!(vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) &
- XILINX_VDMA_DMASR_HALTED))
- break;
- } while (loop--);
+ err = xilinx_vdma_poll_timeout(chan, XILINX_VDMA_REG_DMASR, val,
+ !(val & XILINX_VDMA_DMASR_HALTED), 0,
+ XILINX_VDMA_LOOP_COUNT);
- if (!loop) {
+ if (err) {
dev_err(chan->dev, "Cannot start channel %p: %x\n",
chan, vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR));
@@ -614,45 +606,39 @@ static void xilinx_vdma_start(struct xilinx_vdma_chan *chan)
static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
{
struct xilinx_vdma_config *config = &chan->config;
- struct xilinx_vdma_tx_descriptor *desc;
- unsigned long flags;
+ struct xilinx_vdma_tx_descriptor *desc, *tail_desc;
u32 reg;
- struct xilinx_vdma_tx_segment *head, *tail = NULL;
+ struct xilinx_vdma_tx_segment *tail_segment;
+ /* This function was invoked with lock held */
if (chan->err)
return;
- spin_lock_irqsave(&chan->lock, flags);
-
- /* There's already an active descriptor, bail out. */
- if (chan->active_desc)
- goto out_unlock;
-
if (list_empty(&chan->pending_list))
- goto out_unlock;
+ return;
desc = list_first_entry(&chan->pending_list,
struct xilinx_vdma_tx_descriptor, node);
+ tail_desc = list_last_entry(&chan->pending_list,
+ struct xilinx_vdma_tx_descriptor, node);
+
+ tail_segment = list_last_entry(&tail_desc->segments,
+ struct xilinx_vdma_tx_segment, node);
/* If it is SG mode and hardware is busy, cannot submit */
if (chan->has_sg && xilinx_vdma_is_running(chan) &&
!xilinx_vdma_is_idle(chan)) {
dev_dbg(chan->dev, "DMA controller still busy\n");
- goto out_unlock;
+ return;
}
/*
* If hardware is idle, then all descriptors on the running lists are
* done, start new transfers
*/
- if (chan->has_sg) {
- head = list_first_entry(&desc->segments,
- struct xilinx_vdma_tx_segment, node);
- tail = list_entry(desc->segments.prev,
- struct xilinx_vdma_tx_segment, node);
-
- vdma_ctrl_write(chan, XILINX_VDMA_REG_CURDESC, head->phys);
- }
+ if (chan->has_sg)
+ vdma_ctrl_write(chan, XILINX_VDMA_REG_CURDESC,
+ desc->async_tx.phys);
/* Configure the hardware using info in the config structure */
reg = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR);
@@ -662,6 +648,10 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
else
reg &= ~XILINX_VDMA_DMACR_FRAMECNT_EN;
+ /* Configure channel to allow number frame buffers */
+ vdma_ctrl_write(chan, XILINX_VDMA_REG_FRMSTORE,
+ chan->desc_pendingcount);
+
/*
* With SG, start with circular mode, so that BDs can be fetched.
* In direct register mode, if not parking, enable circular mode
@@ -690,16 +680,19 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
xilinx_vdma_start(chan);
if (chan->err)
- goto out_unlock;
+ return;
/* Start the transfer */
if (chan->has_sg) {
- vdma_ctrl_write(chan, XILINX_VDMA_REG_TAILDESC, tail->phys);
+ vdma_ctrl_write(chan, XILINX_VDMA_REG_TAILDESC,
+ tail_segment->phys);
} else {
struct xilinx_vdma_tx_segment *segment, *last = NULL;
int i = 0;
- list_for_each_entry(segment, &desc->segments, node) {
+ list_for_each_entry(desc, &chan->pending_list, node) {
+ segment = list_first_entry(&desc->segments,
+ struct xilinx_vdma_tx_segment, node);
vdma_desc_write(chan,
XILINX_VDMA_REG_START_ADDRESS(i++),
segment->hw.buf_addr);
@@ -707,7 +700,7 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
}
if (!last)
- goto out_unlock;
+ return;
/* HW expects these parameters to be same for one transaction */
vdma_desc_write(chan, XILINX_VDMA_REG_HSIZE, last->hw.hsize);
@@ -716,11 +709,8 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
vdma_desc_write(chan, XILINX_VDMA_REG_VSIZE, last->hw.vsize);
}
- list_del(&desc->node);
- chan->active_desc = desc;
-
-out_unlock:
- spin_unlock_irqrestore(&chan->lock, flags);
+ list_splice_tail_init(&chan->pending_list, &chan->active_list);
+ chan->desc_pendingcount = 0;
}
/**
@@ -730,8 +720,11 @@ out_unlock:
static void xilinx_vdma_issue_pending(struct dma_chan *dchan)
{
struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan);
+ unsigned long flags;
+ spin_lock_irqsave(&chan->lock, flags);
xilinx_vdma_start_transfer(chan);
+ spin_unlock_irqrestore(&chan->lock, flags);
}
/**
@@ -742,24 +735,17 @@ static void xilinx_vdma_issue_pending(struct dma_chan *dchan)
*/
static void xilinx_vdma_complete_descriptor(struct xilinx_vdma_chan *chan)
{
- struct xilinx_vdma_tx_descriptor *desc;
- unsigned long flags;
+ struct xilinx_vdma_tx_descriptor *desc, *next;
- spin_lock_irqsave(&chan->lock, flags);
+ /* This function was invoked with lock held */
+ if (list_empty(&chan->active_list))
+ return;
- desc = chan->active_desc;
- if (!desc) {
- dev_dbg(chan->dev, "no running descriptors\n");
- goto out_unlock;
+ list_for_each_entry_safe(desc, next, &chan->active_list, node) {
+ list_del(&desc->node);
+ dma_cookie_complete(&desc->async_tx);
+ list_add_tail(&desc->node, &chan->done_list);
}
-
- dma_cookie_complete(&desc->async_tx);
- list_add_tail(&desc->node, &chan->done_list);
-
- chan->active_desc = NULL;
-
-out_unlock:
- spin_unlock_irqrestore(&chan->lock, flags);
}
/**
@@ -770,21 +756,17 @@ out_unlock:
*/
static int xilinx_vdma_reset(struct xilinx_vdma_chan *chan)
{
- int loop = XILINX_VDMA_LOOP_COUNT;
+ int err;
u32 tmp;
vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RESET);
- tmp = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR) &
- XILINX_VDMA_DMACR_RESET;
-
/* Wait for the hardware to finish reset */
- do {
- tmp = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR) &
- XILINX_VDMA_DMACR_RESET;
- } while (loop-- && tmp);
+ err = xilinx_vdma_poll_timeout(chan, XILINX_VDMA_REG_DMACR, tmp,
+ !(tmp & XILINX_VDMA_DMACR_RESET), 0,
+ XILINX_VDMA_LOOP_COUNT);
- if (!loop) {
+ if (err) {
dev_err(chan->dev, "reset timeout, cr %x, sr %x\n",
vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR),
vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR));
@@ -793,7 +775,7 @@ static int xilinx_vdma_reset(struct xilinx_vdma_chan *chan)
chan->err = false;
- return 0;
+ return err;
}
/**
@@ -870,8 +852,10 @@ static irqreturn_t xilinx_vdma_irq_handler(int irq, void *data)
}
if (status & XILINX_VDMA_DMASR_FRM_CNT_IRQ) {
+ spin_lock(&chan->lock);
xilinx_vdma_complete_descriptor(chan);
xilinx_vdma_start_transfer(chan);
+ spin_unlock(&chan->lock);
}
tasklet_schedule(&chan->tasklet);
@@ -879,6 +863,44 @@ static irqreturn_t xilinx_vdma_irq_handler(int irq, void *data)
}
/**
+ * append_desc_queue - Queuing descriptor
+ * @chan: Driver specific dma channel
+ * @desc: dma transaction descriptor
+ */
+static void append_desc_queue(struct xilinx_vdma_chan *chan,
+ struct xilinx_vdma_tx_descriptor *desc)
+{
+ struct xilinx_vdma_tx_segment *tail_segment;
+ struct xilinx_vdma_tx_descriptor *tail_desc;
+
+ if (list_empty(&chan->pending_list))
+ goto append;
+
+ /*
+ * Add the hardware descriptor to the chain of hardware descriptors
+ * that already exists in memory.
+ */
+ tail_desc = list_last_entry(&chan->pending_list,
+ struct xilinx_vdma_tx_descriptor, node);
+ tail_segment = list_last_entry(&tail_desc->segments,
+ struct xilinx_vdma_tx_segment, node);
+ tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
+
+ /*
+ * Add the software descriptor and all children to the list
+ * of pending transactions
+ */
+append:
+ list_add_tail(&desc->node, &chan->pending_list);
+ chan->desc_pendingcount++;
+
+ if (unlikely(chan->desc_pendingcount > chan->num_frms)) {
+ dev_dbg(chan->dev, "desc pendingcount is too high\n");
+ chan->desc_pendingcount = chan->num_frms;
+ }
+}
+
+/**
* xilinx_vdma_tx_submit - Submit DMA transaction
* @tx: Async transaction descriptor
*
@@ -906,11 +928,8 @@ static dma_cookie_t xilinx_vdma_tx_submit(struct dma_async_tx_descriptor *tx)
cookie = dma_cookie_assign(tx);
- /* Append the transaction to the pending transactions queue. */
- list_add_tail(&desc->node, &chan->pending_list);
-
- /* Free the allocated desc */
- chan->allocated_desc = NULL;
+ /* Put this transaction onto the tail of the pending queue */
+ append_desc_queue(chan, desc);
spin_unlock_irqrestore(&chan->lock, flags);
@@ -973,13 +992,6 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
else
hw->buf_addr = xt->src_start;
- /* Link the previous next descriptor to current */
- if (!list_empty(&desc->segments)) {
- prev = list_last_entry(&desc->segments,
- struct xilinx_vdma_tx_segment, node);
- prev->hw.next_desc = segment->phys;
- }
-
/* Insert the segment into the descriptor segments list. */
list_add_tail(&segment->node, &desc->segments);
@@ -988,7 +1000,7 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
/* Link the last hardware descriptor with the first. */
segment = list_first_entry(&desc->segments,
struct xilinx_vdma_tx_segment, node);
- prev->hw.next_desc = segment->phys;
+ desc->async_tx.phys = segment->phys;
return &desc->async_tx;
@@ -1127,10 +1139,12 @@ static int xilinx_vdma_chan_probe(struct xilinx_vdma_device *xdev,
chan->dev = xdev->dev;
chan->xdev = xdev;
chan->has_sg = xdev->has_sg;
+ chan->desc_pendingcount = 0x0;
spin_lock_init(&chan->lock);
INIT_LIST_HEAD(&chan->pending_list);
INIT_LIST_HEAD(&chan->done_list);
+ INIT_LIST_HEAD(&chan->active_list);
/* Retrieve the channel properties from the device tree */
has_dre = of_property_read_bool(node, "xlnx,include-dre");
OpenPOWER on IntegriCloud