summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/dma/brcm,iproc-sba.txt29
-rw-r--r--crypto/async_tx/async_pq.c5
-rw-r--r--drivers/dma/Kconfig15
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/bcm-sba-raid.c1785
-rw-r--r--drivers/dma/dw/Kconfig7
-rw-r--r--drivers/dma/dw/core.c332
-rw-r--r--drivers/dma/dw/platform.c6
-rw-r--r--drivers/dma/dw/regs.h50
-rw-r--r--drivers/dma/fsl_raid.c2
-rw-r--r--drivers/dma/fsldma.c5
-rw-r--r--drivers/dma/fsldma.h4
-rw-r--r--drivers/dma/imx-dma.c7
-rw-r--r--drivers/dma/imx-sdma.c27
-rw-r--r--drivers/dma/ioat/dca.c8
-rw-r--r--drivers/dma/qcom/hidma.c22
-rw-r--r--drivers/dma/qcom/hidma.h1
-rw-r--r--drivers/dma/qcom/hidma_mgmt.c47
-rw-r--r--drivers/dma/ste_dma40.c5
-rw-r--r--drivers/dma/tegra20-apb-dma.c50
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c3
-rw-r--r--include/linux/dma/dw.h21
-rw-r--r--include/linux/raid/pq.h1
-rw-r--r--lib/raid6/mktables.c20
-rw-r--r--sound/atmel/Kconfig13
-rw-r--r--sound/atmel/Makefile2
-rw-r--r--sound/atmel/abdac.c610
-rw-r--r--sound/atmel/ac97c.c447
28 files changed, 2056 insertions, 1469 deletions
diff --git a/Documentation/devicetree/bindings/dma/brcm,iproc-sba.txt b/Documentation/devicetree/bindings/dma/brcm,iproc-sba.txt
new file mode 100644
index 000000000000..092913a28457
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/brcm,iproc-sba.txt
@@ -0,0 +1,29 @@
+* Broadcom SBA RAID engine
+
+Required properties:
+- compatible: Should be one of the following
+ "brcm,iproc-sba"
+ "brcm,iproc-sba-v2"
+ The "brcm,iproc-sba" has support for only 6 PQ coefficients
+ The "brcm,iproc-sba-v2" has support for only 30 PQ coefficients
+- mboxes: List of phandle and mailbox channel specifiers
+
+Example:
+
+raid_mbox: mbox@67400000 {
+ ...
+ #mbox-cells = <3>;
+ ...
+};
+
+raid0 {
+ compatible = "brcm,iproc-sba-v2";
+ mboxes = <&raid_mbox 0 0x1 0xffff>,
+ <&raid_mbox 1 0x1 0xffff>,
+ <&raid_mbox 2 0x1 0xffff>,
+ <&raid_mbox 3 0x1 0xffff>,
+ <&raid_mbox 4 0x1 0xffff>,
+ <&raid_mbox 5 0x1 0xffff>,
+ <&raid_mbox 6 0x1 0xffff>,
+ <&raid_mbox 7 0x1 0xffff>;
+};
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
index f83de99d7d71..56bd612927ab 100644
--- a/crypto/async_tx/async_pq.c
+++ b/crypto/async_tx/async_pq.c
@@ -62,9 +62,6 @@ do_async_gen_syndrome(struct dma_chan *chan,
dma_addr_t dma_dest[2];
int src_off = 0;
- if (submit->flags & ASYNC_TX_FENCE)
- dma_flags |= DMA_PREP_FENCE;
-
while (src_cnt > 0) {
submit->flags = flags_orig;
pq_src_cnt = min(src_cnt, dma_maxpq(dma, dma_flags));
@@ -83,6 +80,8 @@ do_async_gen_syndrome(struct dma_chan *chan,
if (cb_fn_orig)
dma_flags |= DMA_PREP_INTERRUPT;
}
+ if (submit->flags & ASYNC_TX_FENCE)
+ dma_flags |= DMA_PREP_FENCE;
/* Drivers force forward progress in case they can not provide
* a descriptor
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 24e8597b2c3e..686ae67d7e2a 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -99,6 +99,21 @@ config AXI_DMAC
controller is often used in Analog Device's reference designs for FPGA
platforms.
+config BCM_SBA_RAID
+ tristate "Broadcom SBA RAID engine support"
+ depends on ARM64 || COMPILE_TEST
+ depends on MAILBOX && RAID6_PQ
+ select DMA_ENGINE
+ select DMA_ENGINE_RAID
+ select ASYNC_TX_DISABLE_XOR_VAL_DMA
+ select ASYNC_TX_DISABLE_PQ_VAL_DMA
+ default ARCH_BCM_IPROC
+ help
+ Enable support for Broadcom SBA RAID Engine. The SBA RAID
+ engine is available on most of the Broadcom iProc SoCs. It
+ has the capability to offload memcpy, xor and pq computation
+ for raid5/6.
+
config COH901318
bool "ST-Ericsson COH901318 DMA support"
select DMA_ENGINE
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 0b723e94d9e6..d12ab2985ed1 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
obj-$(CONFIG_AT_XDMAC) += at_xdmac.o
obj-$(CONFIG_AXI_DMAC) += dma-axi-dmac.o
+obj-$(CONFIG_BCM_SBA_RAID) += bcm-sba-raid.o
obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
obj-$(CONFIG_DMA_BCM2835) += bcm2835-dma.o
obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
diff --git a/drivers/dma/bcm-sba-raid.c b/drivers/dma/bcm-sba-raid.c
new file mode 100644
index 000000000000..e41bbc7cb094
--- /dev/null
+++ b/drivers/dma/bcm-sba-raid.c
@@ -0,0 +1,1785 @@
+/*
+ * Copyright (C) 2017 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * Broadcom SBA RAID Driver
+ *
+ * The Broadcom stream buffer accelerator (SBA) provides offloading
+ * capabilities for RAID operations. The SBA offload engine is accessible
+ * via Broadcom SoC specific ring manager. Two or more offload engines
+ * can share same Broadcom SoC specific ring manager due to this Broadcom
+ * SoC specific ring manager driver is implemented as a mailbox controller
+ * driver and offload engine drivers are implemented as mallbox clients.
+ *
+ * Typically, Broadcom SoC specific ring manager will implement larger
+ * number of hardware rings over one or more SBA hardware devices. By
+ * design, the internal buffer size of SBA hardware device is limited
+ * but all offload operations supported by SBA can be broken down into
+ * multiple small size requests and executed parallely on multiple SBA
+ * hardware devices for achieving high through-put.
+ *
+ * The Broadcom SBA RAID driver does not require any register programming
+ * except submitting request to SBA hardware device via mailbox channels.
+ * This driver implements a DMA device with one DMA channel using a set
+ * of mailbox channels provided by Broadcom SoC specific ring manager
+ * driver. To exploit parallelism (as described above), all DMA request
+ * coming to SBA RAID DMA channel are broken down to smaller requests
+ * and submitted to multiple mailbox channels in round-robin fashion.
+ * For having more SBA DMA channels, we can create more SBA device nodes
+ * in Broadcom SoC specific DTS based on number of hardware rings supported
+ * by Broadcom SoC ring manager.
+ */
+
+#include <linux/bitops.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/list.h>
+#include <linux/mailbox_client.h>
+#include <linux/mailbox/brcm-message.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <linux/raid/pq.h>
+
+#include "dmaengine.h"
+
+/* SBA command related defines */
+#define SBA_TYPE_SHIFT 48
+#define SBA_TYPE_MASK GENMASK(1, 0)
+#define SBA_TYPE_A 0x0
+#define SBA_TYPE_B 0x2
+#define SBA_TYPE_C 0x3
+#define SBA_USER_DEF_SHIFT 32
+#define SBA_USER_DEF_MASK GENMASK(15, 0)
+#define SBA_R_MDATA_SHIFT 24
+#define SBA_R_MDATA_MASK GENMASK(7, 0)
+#define SBA_C_MDATA_MS_SHIFT 18
+#define SBA_C_MDATA_MS_MASK GENMASK(1, 0)
+#define SBA_INT_SHIFT 17
+#define SBA_INT_MASK BIT(0)
+#define SBA_RESP_SHIFT 16
+#define SBA_RESP_MASK BIT(0)
+#define SBA_C_MDATA_SHIFT 8
+#define SBA_C_MDATA_MASK GENMASK(7, 0)
+#define SBA_C_MDATA_BNUMx_SHIFT(__bnum) (2 * (__bnum))
+#define SBA_C_MDATA_BNUMx_MASK GENMASK(1, 0)
+#define SBA_C_MDATA_DNUM_SHIFT 5
+#define SBA_C_MDATA_DNUM_MASK GENMASK(4, 0)
+#define SBA_C_MDATA_LS(__v) ((__v) & 0xff)
+#define SBA_C_MDATA_MS(__v) (((__v) >> 8) & 0x3)
+#define SBA_CMD_SHIFT 0
+#define SBA_CMD_MASK GENMASK(3, 0)
+#define SBA_CMD_ZERO_BUFFER 0x4
+#define SBA_CMD_ZERO_ALL_BUFFERS 0x8
+#define SBA_CMD_LOAD_BUFFER 0x9
+#define SBA_CMD_XOR 0xa
+#define SBA_CMD_GALOIS_XOR 0xb
+#define SBA_CMD_WRITE_BUFFER 0xc
+#define SBA_CMD_GALOIS 0xe
+
+/* Driver helper macros */
+#define to_sba_request(tx) \
+ container_of(tx, struct sba_request, tx)
+#define to_sba_device(dchan) \
+ container_of(dchan, struct sba_device, dma_chan)
+
+enum sba_request_state {
+ SBA_REQUEST_STATE_FREE = 1,
+ SBA_REQUEST_STATE_ALLOCED = 2,
+ SBA_REQUEST_STATE_PENDING = 3,
+ SBA_REQUEST_STATE_ACTIVE = 4,
+ SBA_REQUEST_STATE_RECEIVED = 5,
+ SBA_REQUEST_STATE_COMPLETED = 6,
+ SBA_REQUEST_STATE_ABORTED = 7,
+};
+
+struct sba_request {
+ /* Global state */
+ struct list_head node;
+ struct sba_device *sba;
+ enum sba_request_state state;
+ bool fence;
+ /* Chained requests management */
+ struct sba_request *first;
+ struct list_head next;
+ unsigned int next_count;
+ atomic_t next_pending_count;
+ /* BRCM message data */
+ void *resp;
+ dma_addr_t resp_dma;
+ struct brcm_sba_command *cmds;
+ struct brcm_message msg;
+ struct dma_async_tx_descriptor tx;
+};
+
+enum sba_version {
+ SBA_VER_1 = 0,
+ SBA_VER_2
+};
+
+struct sba_device {
+ /* Underlying device */
+ struct device *dev;
+ /* DT configuration parameters */
+ enum sba_version ver;
+ /* Derived configuration parameters */
+ u32 max_req;
+ u32 hw_buf_size;
+ u32 hw_resp_size;
+ u32 max_pq_coefs;
+ u32 max_pq_srcs;
+ u32 max_cmd_per_req;
+ u32 max_xor_srcs;
+ u32 max_resp_pool_size;
+ u32 max_cmds_pool_size;
+ /* Maibox client and Mailbox channels */
+ struct mbox_client client;
+ int mchans_count;
+ atomic_t mchans_current;
+ struct mbox_chan **mchans;
+ struct device *mbox_dev;
+ /* DMA device and DMA channel */
+ struct dma_device dma_dev;
+ struct dma_chan dma_chan;
+ /* DMA channel resources */
+ void *resp_base;
+ dma_addr_t resp_dma_base;
+ void *cmds_base;
+ dma_addr_t cmds_dma_base;
+ spinlock_t reqs_lock;
+ struct sba_request *reqs;
+ bool reqs_fence;
+ struct list_head reqs_alloc_list;
+ struct list_head reqs_pending_list;
+ struct list_head reqs_active_list;
+ struct list_head reqs_received_list;
+ struct list_head reqs_completed_list;
+ struct list_head reqs_aborted_list;
+ struct list_head reqs_free_list;
+ int reqs_free_count;
+};
+
+/* ====== SBA command helper routines ===== */
+
+static inline u64 __pure sba_cmd_enc(u64 cmd, u32 val, u32 shift, u32 mask)
+{
+ cmd &= ~((u64)mask << shift);
+ cmd |= ((u64)(val & mask) << shift);
+ return cmd;
+}
+
+static inline u32 __pure sba_cmd_load_c_mdata(u32 b0)
+{
+ return b0 & SBA_C_MDATA_BNUMx_MASK;
+}
+
+static inline u32 __pure sba_cmd_write_c_mdata(u32 b0)
+{
+ return b0 & SBA_C_MDATA_BNUMx_MASK;
+}
+
+static inline u32 __pure sba_cmd_xor_c_mdata(u32 b1, u32 b0)
+{
+ return (b0 & SBA_C_MDATA_BNUMx_MASK) |
+ ((b1 & SBA_C_MDATA_BNUMx_MASK) << SBA_C_MDATA_BNUMx_SHIFT(1));
+}
+
+static inline u32 __pure sba_cmd_pq_c_mdata(u32 d, u32 b1, u32 b0)
+{
+ return (b0 & SBA_C_MDATA_BNUMx_MASK) |
+ ((b1 & SBA_C_MDATA_BNUMx_MASK) << SBA_C_MDATA_BNUMx_SHIFT(1)) |
+ ((d & SBA_C_MDATA_DNUM_MASK) << SBA_C_MDATA_DNUM_SHIFT);
+}
+
+/* ====== Channel resource management routines ===== */
+
+static struct sba_request *sba_alloc_request(struct sba_device *sba)
+{
+ unsigned long flags;
+ struct sba_request *req = NULL;
+
+ spin_lock_irqsave(&sba->reqs_lock, flags);
+
+ req = list_first_entry_or_null(&sba->reqs_free_list,
+ struct sba_request, node);
+ if (req) {
+ list_move_tail(&req->node, &sba->reqs_alloc_list);
+ req->state = SBA_REQUEST_STATE_ALLOCED;
+ req->fence = false;
+ req->first = req;
+ INIT_LIST_HEAD(&req->next);
+ req->next_count = 1;
+ atomic_set(&req->next_pending_count, 1);
+
+ sba->reqs_free_count--;
+
+ dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan);
+ }
+
+ spin_unlock_irqrestore(&sba->reqs_lock, flags);
+
+ return req;
+}
+
+/* Note: Must be called with sba->reqs_lock held */
+static void _sba_pending_request(struct sba_device *sba,
+ struct sba_request *req)
+{
+ lockdep_assert_held(&sba->reqs_lock);
+ req->state = SBA_REQUEST_STATE_PENDING;
+ list_move_tail(&req->node, &sba->reqs_pending_list);
+ if (list_empty(&sba->reqs_active_list))
+ sba->reqs_fence = false;
+}
+
+/* Note: Must be called with sba->reqs_lock held */
+static bool _sba_active_request(struct sba_device *sba,
+ struct sba_request *req)
+{
+ lockdep_assert_held(&sba->reqs_lock);
+ if (list_empty(&sba->reqs_active_list))
+ sba->reqs_fence = false;
+ if (sba->reqs_fence)
+ return false;
+ req->state = SBA_REQUEST_STATE_ACTIVE;
+ list_move_tail(&req->node, &sba->reqs_active_list);
+ if (req->fence)
+ sba->reqs_fence = true;
+ return true;
+}
+
+/* Note: Must be called with sba->reqs_lock held */
+static void _sba_abort_request(struct sba_device *sba,
+ struct sba_request *req)
+{
+ lockdep_assert_held(&sba->reqs_lock);
+ req->state = SBA_REQUEST_STATE_ABORTED;
+ list_move_tail(&req->node, &sba->reqs_aborted_list);
+ if (list_empty(&sba->reqs_active_list))
+ sba->reqs_fence = false;
+}
+
+/* Note: Must be called with sba->reqs_lock held */
+static void _sba_free_request(struct sba_device *sba,
+ struct sba_request *req)
+{
+ lockdep_assert_held(&sba->reqs_lock);
+ req->state = SBA_REQUEST_STATE_FREE;
+ list_move_tail(&req->node, &sba->reqs_free_list);
+ if (list_empty(&sba->reqs_active_list))
+ sba->reqs_fence = false;
+ sba->reqs_free_count++;
+}
+
+static void sba_received_request(struct sba_request *req)
+{
+ unsigned long flags;
+ struct sba_device *sba = req->sba;
+
+ spin_lock_irqsave(&sba->reqs_lock, flags);
+ req->state = SBA_REQUEST_STATE_RECEIVED;
+ list_move_tail(&req->node, &sba->reqs_received_list);
+ spin_unlock_irqrestore(&sba->reqs_lock, flags);
+}
+
+static void sba_complete_chained_requests(struct sba_request *req)
+{
+ unsigned long flags;
+ struct sba_request *nreq;
+ struct sba_device *sba = req->sba;
+
+ spin_lock_irqsave(&sba->reqs_lock, flags);
+
+ req->state = SBA_REQUEST_STATE_COMPLETED;
+ list_move_tail(&req->node, &sba->reqs_completed_list);
+ list_for_each_entry(nreq, &req->next, next) {
+ nreq->state = SBA_REQUEST_STATE_COMPLETED;
+ list_move_tail(&nreq->node, &sba->reqs_completed_list);
+ }
+ if (list_empty(&sba->reqs_active_list))
+ sba->reqs_fence = false;
+
+ spin_unlock_irqrestore(&sba->reqs_lock, flags);
+}
+
+static void sba_free_chained_requests(struct sba_request *req)
+{
+ unsigned long flags;
+ struct sba_request *nreq;
+ struct sba_device *sba = req->sba;
+
+ spin_lock_irqsave(&sba->reqs_lock, flags);
+
+ _sba_free_request(sba, req);
+ list_for_each_entry(nreq, &req->next, next)
+ _sba_free_request(sba, nreq);
+
+ spin_unlock_irqrestore(&sba->reqs_lock, flags);
+}
+
+static void sba_chain_request(struct sba_request *first,
+ struct sba_request *req)
+{
+ unsigned long flags;
+ struct sba_device *sba = req->sba;
+
+ spin_lock_irqsave(&sba->reqs_lock, flags);
+
+ list_add_tail(&req->next, &first->next);
+ req->first = first;
+ first->next_count++;
+ atomic_set(&first->next_pending_count, first->next_count);
+
+ spin_unlock_irqrestore(&sba->reqs_lock, flags);
+}
+
+static void sba_cleanup_nonpending_requests(struct sba_device *sba)
+{
+ unsigned long flags;
+ struct sba_request *req, *req1;
+
+ spin_lock_irqsave(&sba->reqs_lock, flags);
+
+ /* Freeup all alloced request */
+ list_for_each_entry_safe(req, req1, &sba->reqs_alloc_list, node)
+ _sba_free_request(sba, req);
+
+ /* Freeup all received request */
+ list_for_each_entry_safe(req, req1, &sba->reqs_received_list, node)
+ _sba_free_request(sba, req);
+
+ /* Freeup all completed request */
+ list_for_each_entry_safe(req, req1, &sba->reqs_completed_list, node)
+ _sba_free_request(sba, req);
+
+ /* Set all active requests as aborted */
+ list_for_each_entry_safe(req, req1, &sba->reqs_active_list, node)
+ _sba_abort_request(sba, req);
+
+ /*
+ * Note: We expect that aborted request will be eventually
+ * freed by sba_receive_message()
+ */
+
+ spin_unlock_irqrestore(&sba->reqs_lock, flags);
+}
+
+static void sba_cleanup_pending_requests(struct sba_device *sba)
+{
+ unsigned long flags;
+ struct sba_request *req, *req1;
+
+ spin_lock_irqsave(&sba->reqs_lock, flags);
+
+ /* Freeup all pending request */
+ list_for_each_entry_safe(req, req1, &sba->reqs_pending_list, node)
+ _sba_free_request(sba, req);
+
+ spin_unlock_irqrestore(&sba->reqs_lock, flags);
+}
+
+/* ====== DMAENGINE callbacks ===== */
+
+static void sba_free_chan_resources(struct dma_chan *dchan)
+{
+ /*
+ * Channel resources are pre-alloced so we just free-up
+ * whatever we can so that we can re-use pre-alloced
+ * channel resources next time.
+ */
+ sba_cleanup_nonpending_requests(to_sba_device(dchan));
+}
+
+static int sba_device_terminate_all(struct dma_chan *dchan)
+{
+ /* Cleanup all pending requests */
+ sba_cleanup_pending_requests(to_sba_device(dchan));
+
+ return 0;
+}
+
+static int sba_send_mbox_request(struct sba_device *sba,
+ struct sba_request *req)
+{
+ int mchans_idx, ret = 0;
+
+ /* Select mailbox channel in round-robin fashion */
+ mchans_idx = atomic_inc_return(&sba->mchans_current);
+ mchans_idx = mchans_idx % sba->mchans_count;
+
+ /* Send message for the request */
+ req->msg.error = 0;
+ ret = mbox_send_message(sba->mchans[mchans_idx], &req->msg);
+ if (ret < 0) {
+ dev_err(sba->dev, "send message failed with error %d", ret);
+ return ret;
+ }
+ ret = req->msg.error;
+ if (ret < 0) {
+ dev_err(sba->dev, "message error %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void sba_issue_pending(struct dma_chan *dchan)
+{
+ int ret;
+ unsigned long flags;
+ struct sba_request *req, *req1;
+ struct sba_device *sba = to_sba_device(dchan);
+
+ spin_lock_irqsave(&sba->reqs_lock, flags);
+
+ /* Process all pending request */
+ list_for_each_entry_safe(req, req1, &sba->reqs_pending_list, node) {
+ /* Try to make request active */
+ if (!_sba_active_request(sba, req))
+ break;
+
+ /* Send request to mailbox channel */
+ spin_unlock_irqrestore(&sba->reqs_lock, flags);
+ ret = sba_send_mbox_request(sba, req);
+ spin_lock_irqsave(&sba->reqs_lock, flags);
+
+ /* If something went wrong then keep request pending */
+ if (ret < 0) {
+ _sba_pending_request(sba, req);
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&sba->reqs_lock, flags);
+}
+
+static dma_cookie_t sba_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ unsigned long flags;
+ dma_cookie_t cookie;
+ struct sba_device *sba;
+ struct sba_request *req, *nreq;
+
+ if (unlikely(!tx))
+ return -EINVAL;
+
+ sba = to_sba_device(tx->chan);
+ req = to_sba_request(tx);
+
+ /* Assign cookie and mark all chained requests pending */
+ spin_lock_irqsave(&sba->reqs_lock, flags);
+ cookie = dma_cookie_assign(tx);
+ _sba_pending_request(sba, req);
+ list_for_each_entry(nreq, &req->next, next)
+ _sba_pending_request(sba, nreq);
+ spin_unlock_irqrestore(&sba->reqs_lock, flags);
+
+ return cookie;
+}
+
+static enum dma_status sba_tx_status(struct dma_chan *dchan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ int mchan_idx;
+ enum dma_status ret;
+ struct sba_device *sba = to_sba_device(dchan);
+
+ for (mchan_idx = 0; mchan_idx < sba->mchans_count; mchan_idx++)
+ mbox_client_peek_data(sba->mchans[mchan_idx]);
+
+ ret = dma_cookie_status(dchan, cookie, txstate);
+ if (ret == DMA_COMPLETE)
+ return ret;
+
+ return dma_cookie_status(dchan, cookie, txstate);
+}
+
+static void sba_fillup_interrupt_msg(struct sba_request *req,
+ struct brcm_sba_command *cmds,
+ struct brcm_message *msg)
+{
+ u64 cmd;
+ u32 c_mdata;
+ struct brcm_sba_command *cmdsp = cmds;
+
+ /* Type-B command to load dummy data into buf0 */
+ cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
+ SBA_TYPE_SHIFT, SBA_TYPE_MASK);
+ cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size,
+ SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
+ c_mdata = sba_cmd_load_c_mdata(0);
+ cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
+ SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
+ cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
+ SBA_CMD_SHIFT, SBA_CMD_MASK);
+ cmdsp->cmd = cmd;
+ *cmdsp->cmd_dma = cpu_to_le64(cmd);
+ cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
+ cmdsp->data = req->resp_dma;
+ cmdsp->data_len = req->sba->hw_resp_size;
+ cmdsp++;
+
+ /* Type-A command to write buf0 to dummy location */
+ cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
+ SBA_TYPE_SHIFT, SBA_TYPE_MASK);
+ cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size,
+ SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
+ cmd = sba_cmd_enc(cmd, 0x1,
+ SBA_RESP_SHIFT, SBA_RESP_MASK);
+ c_mdata = sba_cmd_write_c_mdata(0);
+ cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
+ SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
+ cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
+ SBA_CMD_SHIFT, SBA_CMD_MASK);
+ cmdsp->cmd = cmd;
+ *cmdsp->cmd_dma = cpu_to_le64(cmd);
+ cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
+ if (req->sba->hw_resp_size) {
+ cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
+ cmdsp->resp = req->resp_dma;
+ cmdsp->resp_len = req->sba->hw_resp_size;
+ }
+ cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
+ cmdsp->data = req->resp_dma;
+ cmdsp->data_len = req->sba->hw_resp_size;
+ cmdsp++;
+
+ /* Fillup brcm_message */
+ msg->type = BRCM_MESSAGE_SBA;
+ msg->sba.cmds = cmds;
+ msg->sba.cmds_count = cmdsp - cmds;
+ msg->ctx = req;
+ msg->error = 0;
+}
+
+static struct dma_async_tx_descriptor *
+sba_prep_dma_interrupt(struct dma_chan *dchan, unsigned long flags)
+{
+ struct sba_request *req = NULL;
+ struct sba_device *sba = to_sba_device(dchan);
+
+ /* Alloc new request */
+ req = sba_alloc_request(sba);
+ if (!req)
+ return NULL;
+
+ /*
+ * Force fence so that no requests are submitted
+ * until DMA callback for this request is invoked.
+ */
+ req->fence = true;
+
+ /* Fillup request message */
+ sba_fillup_interrupt_msg(req, req->cmds, &req->msg);
+
+ /* Init async_tx descriptor */
+ req->tx.flags = flags;
+ req->tx.cookie = -EBUSY;
+
+ return &req->tx;
+}
+
+static void sba_fillup_memcpy_msg(struct sba_request *req,
+ struct brcm_sba_command *cmds,
+ struct brcm_message *msg,
+ dma_addr_t msg_offset, size_t msg_len,
+ dma_addr_t dst, dma_addr_t src)
+{
+ u64 cmd;
+ u32 c_mdata;
+ struct brcm_sba_command *cmdsp = cmds;
+
+ /* Type-B command to load data into buf0 */
+ cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
+ SBA_TYPE_SHIFT, SBA_TYPE_MASK);
+ cmd = sba_cmd_enc(cmd, msg_len,
+ SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
+ c_mdata = sba_cmd_load_c_mdata(0);
+ cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
+ SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
+ cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
+ SBA_CMD_SHIFT, SBA_CMD_MASK);
+ cmdsp->cmd = cmd;
+ *cmdsp->cmd_dma = cpu_to_le64(cmd);
+ cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
+ cmdsp->data = src + msg_offset;
+ cmdsp->data_len = msg_len;
+ cmdsp++;
+
+ /* Type-A command to write buf0 */
+ cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
+ SBA_TYPE_SHIFT, SBA_TYPE_MASK);
+ cmd = sba_cmd_enc(cmd, msg_len,
+ SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
+ cmd = sba_cmd_enc(cmd, 0x1,
+ SBA_RESP_SHIFT, SBA_RESP_MASK);
+ c_mdata = sba_cmd_write_c_mdata(0);
+ cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
+ SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
+ cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
+ SBA_CMD_SHIFT, SBA_CMD_MASK);
+ cmdsp->cmd = cmd;
+ *cmdsp->cmd_dma = cpu_to_le64(cmd);
+ cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
+ if (req->sba->hw_resp_size) {
+ cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
+ cmdsp->resp = req->resp_dma;
+ cmdsp->resp_len = req->sba->hw_resp_size;
+ }
+ cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
+ cmdsp->data = dst + msg_offset;
+ cmdsp->data_len = msg_len;
+ cmdsp++;
+
+ /* Fillup brcm_message */
+ msg->type = BRCM_MESSAGE_SBA;
+ msg->sba.cmds = cmds;
+ msg->sba.cmds_count = cmdsp - cmds;
+ msg->ctx = req;
+ msg->error = 0;
+}
+
+static struct sba_request *
+sba_prep_dma_memcpy_req(struct sba_device *sba,
+ dma_addr_t off, dma_addr_t dst, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct sba_request *req = NULL;
+
+ /* Alloc new request */
+ req = sba_alloc_request(sba);
+ if (!req)
+ return NULL;
+ req->fence = (flags & DMA_PREP_FENCE) ? true : false;
+
+ /* Fillup request message */
+ sba_fillup_memcpy_msg(req, req->cmds, &req->msg,
+ off, len, dst, src);
+
+ /* Init async_tx descriptor */
+ req->tx.flags = flags;
+ req->tx.cookie = -EBUSY;
+
+ return req;
+}
+
+static struct dma_async_tx_descriptor *
+sba_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ size_t req_len;
+ dma_addr_t off = 0;
+ struct sba_device *sba = to_sba_device(dchan);
+ struct sba_request *first = NULL, *req;
+
+ /* Create chained requests where each request is upto hw_buf_size */
+ while (len) {
+ req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size;
+
+ req = sba_prep_dma_memcpy_req(sba, off, dst, src,
+ req_len, flags);
+ if (!req) {
+ if (first)
+ sba_free_chained_requests(first);
+ return NULL;
+ }
+
+ if (first)
+ sba_chain_request(first, req);
+ else
+ first = req;
+
+ off += req_len;
+ len -= req_len;
+ }
+
+ return (first) ? &first->tx : NULL;
+}
+
+static void sba_fillup_xor_msg(struct sba_request *req,
+ struct brcm_sba_command *cmds,
+ struct brcm_message *msg,
+ dma_addr_t msg_offset, size_t msg_len,
+ dma_addr_t dst, dma_addr_t *src, u32 src_cnt)
+{
+ u64 cmd;
+ u32 c_mdata;
+ unsigned int i;
+ struct brcm_sba_command *cmdsp = cmds;
+
+ /* Type-B command to load data into buf0 */
+ cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
+ SBA_TYPE_SHIFT, SBA_TYPE_MASK);
+ cmd = sba_cmd_enc(cmd, msg_len,
+ SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
+ c_mdata = sba_cmd_load_c_mdata(0);
+ cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
+ SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
+ cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
+ SBA_CMD_SHIFT, SBA_CMD_MASK);
+ cmdsp->cmd = cmd;
+ *cmdsp->cmd_dma = cpu_to_le64(cmd);
+ cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
+ cmdsp->data = src[0] + msg_offset;
+ cmdsp->data_len = msg_len;
+ cmdsp++;
+
+ /* Type-B commands to xor data with buf0 and put it back in buf0 */
+ for (i = 1; i < src_cnt; i++) {
+ cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
+ SBA_TYPE_SHIFT, SBA_TYPE_MASK);
+ cmd = sba_cmd_enc(cmd, msg_len,
+ SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
+ c_mdata = sba_cmd_xor_c_mdata(0, 0);
+ cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
+ SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
+ cmd = sba_cmd_enc(cmd, SBA_CMD_XOR,
+ SBA_CMD_SHIFT, SBA_CMD_MASK);
+ cmdsp->cmd = cmd;
+ *cmdsp->cmd_dma = cpu_to_le64(cmd);
+ cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
+ cmdsp->data = src[i] + msg_offset;
+ cmdsp->data_len = msg_len;
+ cmdsp++;
+ }
+
+ /* Type-A command to write buf0 */
+ cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
+ SBA_TYPE_SHIFT, SBA_TYPE_MASK);
+ cmd = sba_cmd_enc(cmd, msg_len,
+ SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
+ cmd = sba_cmd_enc(cmd, 0x1,
+ SBA_RESP_SHIFT, SBA_RESP_MASK);
+ c_mdata = sba_cmd_write_c_mdata(0);
+ cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
+ SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
+ cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
+ SBA_CMD_SHIFT, SBA_CMD_MASK);
+ cmdsp->cmd = cmd;
+ *cmdsp->cmd_dma = cpu_to_le64(cmd);
+ cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
+ if (req->sba->hw_resp_size) {
+ cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
+ cmdsp->resp = req->resp_dma;
+ cmdsp->resp_len = req->sba->hw_resp_size;
+ }
+ cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
+ cmdsp->data = dst + msg_offset;
+ cmdsp->data_len = msg_len;
+ cmdsp++;
+
+ /* Fillup brcm_message */
+ msg->type = BRCM_MESSAGE_SBA;
+ msg->sba.cmds = cmds;
+ msg->sba.cmds_count = cmdsp - cmds;
+ msg->ctx = req;
+ msg->error = 0;
+}
+
+struct sba_request *
+sba_prep_dma_xor_req(struct sba_device *sba,
+ dma_addr_t off, dma_addr_t dst, dma_addr_t *src,
+ u32 src_cnt, size_t len, unsigned long flags)
+{
+ struct sba_request *req = NULL;
+
+ /* Alloc new request */
+ req = sba_alloc_request(sba);
+ if (!req)
+ return NULL;
+ req->fence = (flags & DMA_PREP_FENCE) ? true : false;
+
+ /* Fillup request message */
+ sba_fillup_xor_msg(req, req->cmds, &req->msg,
+ off, len, dst, src, src_cnt);
+
+ /* Init async_tx descriptor */
+ req->tx.flags = flags;
+ req->tx.cookie = -EBUSY;
+
+ return req;
+}
+
+static struct dma_async_tx_descriptor *
+sba_prep_dma_xor(struct dma_chan *dchan, dma_addr_t dst, dma_addr_t *src,
+ u32 src_cnt, size_t len, unsigned long flags)
+{
+ size_t req_len;
+ dma_addr_t off = 0;
+ struct sba_device *sba = to_sba_device(dchan);
+ struct sba_request *first = NULL, *req;
+
+ /* Sanity checks */
+ if (unlikely(src_cnt > sba->max_xor_srcs))
+ return NULL;
+
+ /* Create chained requests where each request is upto hw_buf_size */
+ while (len) {
+ req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size;
+
+ req = sba_prep_dma_xor_req(sba, off, dst, src, src_cnt,
+ req_len, flags);
+ if (!req) {
+ if (first)
+ sba_free_chained_requests(first);
+ return NULL;
+ }
+
+ if (first)
+ sba_chain_request(first, req);
+ else
+ first = req;
+
+ off += req_len;
+ len -= req_len;
+ }
+
+ return (first) ? &first->tx : NULL;
+}
+
+static void sba_fillup_pq_msg(struct sba_request *req,
+ bool pq_continue,
+ struct brcm_sba_command *cmds,
+ struct brcm_message *msg,
+ dma_addr_t msg_offset, size_t msg_len,
+ dma_addr_t *dst_p, dma_addr_t *dst_q,
+ const u8 *scf, dma_addr_t *src, u32 src_cnt)
+{
+ u64 cmd;
+ u32 c_mdata;
+ unsigned int i;
+ struct brcm_sba_command *cmdsp = cmds;
+
+ if (pq_continue) {
+ /* Type-B command to load old P into buf0 */
+ if (dst_p) {
+ cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
+ SBA_TYPE_SHIFT, SBA_TYPE_MASK);
+ cmd = sba_cmd_enc(cmd, msg_len,
+ SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
+ c_mdata = sba_cmd_load_c_mdata(0);
+ cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
+ SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
+ cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
+ SBA_CMD_SHIFT, SBA_CMD_MASK);
+ cmdsp->cmd = cmd;
+ *cmdsp->cmd_dma = cpu_to_le64(cmd);
+ cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
+ cmdsp->data = *dst_p + msg_offset;
+ cmdsp->data_len = msg_len;
+ cmdsp++;
+ }
+
+ /* Type-B command to load old Q into buf1 */
+ if (dst_q) {
+ cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
+ SBA_TYPE_SHIFT, SBA_TYPE_MASK);
+ cmd = sba_cmd_enc(cmd, msg_len,
+ SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
+ c_mdata = sba_cmd_load_c_mdata(1);
+ cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
+ SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
+ cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
+ SBA_CMD_SHIFT, SBA_CMD_MASK);
+ cmdsp->cmd = cmd;
+ *cmdsp->cmd_dma = cpu_to_le64(cmd);
+ cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
+ cmdsp->data = *dst_q + msg_offset;
+ cmdsp->data_len = msg_len;
+ cmdsp++;
+ }
+ } else {
+ /* Type-A command to zero all buffers */
+ cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
+ SBA_TYPE_SHIFT, SBA_TYPE_MASK);
+ cmd = sba_cmd_enc(cmd, msg_len,
+ SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
+ cmd = sba_cmd_enc(cmd, SBA_CMD_ZERO_ALL_BUFFERS,
+ SBA_CMD_SHIFT, SBA_CMD_MASK);
+ cmdsp->cmd = cmd;
+ *cmdsp->cmd_dma = cpu_to_le64(cmd);
+ cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
+ cmdsp++;
+ }
+
+ /* Type-B commands for generate P onto buf0 and Q onto buf1 */
+ for (i = 0; i < src_cnt; i++) {
+ cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
+ SBA_TYPE_SHIFT, SBA_TYPE_MASK);
+ cmd = sba_cmd_enc(cmd, msg_len,
+ SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
+ c_mdata = sba_cmd_pq_c_mdata(raid6_gflog[scf[i]], 1, 0);
+ cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
+ SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
+ cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata),
+ SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK);
+ cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS_XOR,
+ SBA_CMD_SHIFT, SBA_CMD_MASK);
+ cmdsp->cmd = cmd;
+ *cmdsp->cmd_dma = cpu_to_le64(cmd);
+ cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
+ cmdsp->data = src[i] + msg_offset;
+ cmdsp->data_len = msg_len;
+ cmdsp++;
+ }
+
+ /* Type-A command to write buf0 */
+ if (dst_p) {
+ cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
+ SBA_TYPE_SHIFT, SBA_TYPE_MASK);
+ cmd = sba_cmd_enc(cmd, msg_len,
+ SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
+ cmd = sba_cmd_enc(cmd, 0x1,
+ SBA_RESP_SHIFT, SBA_RESP_MASK);
+ c_mdata = sba_cmd_write_c_mdata(0);
+ cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
+ SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
+ cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
+ SBA_CMD_SHIFT, SBA_CMD_MASK);
+ cmdsp->cmd = cmd;
+ *cmdsp->cmd_dma = cpu_to_le64(cmd);
+ cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
+ if (req->sba->hw_resp_size) {
+ cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
+ cmdsp->resp = req->resp_dma;
+ cmdsp->resp_len = req->sba->hw_resp_size;
+ }
+ cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
+ cmdsp->data = *dst_p + msg_offset;
+ cmdsp->data_len = msg_len;
+ cmdsp++;
+ }
+
+ /* Type-A command to write buf1 */
+ if (dst_q) {
+ cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
+ SBA_TYPE_SHIFT, SBA_TYPE_MASK);
+ cmd = sba_cmd_enc(cmd, msg_len,
+ SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
+ cmd = sba_cmd_enc(cmd, 0x1,
+ SBA_RESP_SHIFT, SBA_RESP_MASK);
+ c_mdata = sba_cmd_write_c_mdata(1);
+ cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
+ SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
+ cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
+ SBA_CMD_SHIFT, SBA_CMD_MASK);
+ cmdsp->cmd = cmd;
+ *cmdsp->cmd_dma = cpu_to_le64(cmd);
+ cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
+ if (req->sba->hw_resp_size) {
+ cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
+ cmdsp->resp = req->resp_dma;
+ cmdsp->resp_len = req->sba->hw_resp_size;
+ }
+ cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
+ cmdsp->data = *dst_q + msg_offset;
+ cmdsp->data_len = msg_len;
+ cmdsp++;
+ }
+
+ /* Fillup brcm_message */
+ msg->type = BRCM_MESSAGE_SBA;
+ msg->sba.cmds = cmds;
+ msg->sba.cmds_count = cmdsp - cmds;
+ msg->ctx = req;
+ msg->error = 0;
+}
+
+struct sba_request *
+sba_prep_dma_pq_req(struct sba_device *sba, dma_addr_t off,
+ dma_addr_t *dst_p, dma_addr_t *dst_q, dma_addr_t *src,
+ u32 src_cnt, const u8 *scf, size_t len, unsigned long flags)
+{
+ struct sba_request *req = NULL;
+
+ /* Alloc new request */
+ req = sba_alloc_request(sba);
+ if (!req)
+ return NULL;
+ req->fence = (flags & DMA_PREP_FENCE) ? true : false;
+
+ /* Fillup request messages */
+ sba_fillup_pq_msg(req, dmaf_continue(flags),
+ req->cmds, &req->msg,
+ off, len, dst_p, dst_q, scf, src, src_cnt);
+
+ /* Init async_tx descriptor */
+ req->tx.flags = flags;
+ req->tx.cookie = -EBUSY;
+
+ return req;
+}
+
+static void sba_fillup_pq_single_msg(struct sba_request *req,
+ bool pq_continue,
+ struct brcm_sba_command *cmds,
+ struct brcm_message *msg,
+ dma_addr_t msg_offset, size_t msg_len,
+ dma_addr_t *dst_p, dma_addr_t *dst_q,
+ dma_addr_t src, u8 scf)
+{
+ u64 cmd;
+ u32 c_mdata;
+ u8 pos, dpos = raid6_gflog[scf];
+ struct brcm_sba_command *cmdsp = cmds;
+
+ if (!dst_p)
+ goto skip_p;
+
+ if (pq_continue) {
+ /* Type-B command to load old P into buf0 */
+ cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
+ SBA_TYPE_SHIFT, SBA_TYPE_MASK);
+ cmd = sba_cmd_enc(cmd, msg_len,
+ SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
+ c_mdata = sba_cmd_load_c_mdata(0);
+ cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
+ SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
+ cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
+ SBA_CMD_SHIFT, SBA_CMD_MASK);
+ cmdsp->cmd = cmd;
+ *cmdsp->cmd_dma = cpu_to_le64(cmd);
+ cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
+ cmdsp->data = *dst_p + msg_offset;
+ cmdsp->data_len = msg_len;
+ cmdsp++;
+
+ /*
+ * Type-B commands to xor data with buf0 and put it
+ * back in buf0
+ */
+ cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
+ SBA_TYPE_SHIFT, SBA_TYPE_MASK);
+ cmd = sba_cmd_enc(cmd, msg_len,
+ SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
+ c_mdata = sba_cmd_xor_c_mdata(0, 0);
+ cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
+ SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
+ cmd = sba_cmd_enc(cmd, SBA_CMD_XOR,
+ SBA_CMD_SHIFT, SBA_CMD_MASK);
+ cmdsp->cmd = cmd;
+ *cmdsp->cmd_dma = cpu_to_le64(cmd);
+ cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
+ cmdsp->data = src + msg_offset;
+ cmdsp->data_len = msg_len;
+ cmdsp++;
+ } else {
+ /* Type-B command to load old P into buf0 */
+ cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
+ SBA_TYPE_SHIFT, SBA_TYPE_MASK);
+ cmd = sba_cmd_enc(cmd, msg_len,
+ SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
+ c_mdata = sba_cmd_load_c_mdata(0);
+ cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
+ SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
+ cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
+ SBA_CMD_SHIFT, SBA_CMD_MASK);
+ cmdsp->cmd = cmd;
+ *cmdsp->cmd_dma = cpu_to_le64(cmd);
+ cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
+ cmdsp->data = src + msg_offset;
+ cmdsp->data_len = msg_len;
+ cmdsp++;
+ }
+
+ /* Type-A command to write buf0 */
+ cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
+ SBA_TYPE_SHIFT, SBA_TYPE_MASK);
+ cmd = sba_cmd_enc(cmd, msg_len,
+ SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
+ cmd = sba_cmd_enc(cmd, 0x1,
+ SBA_RESP_SHIFT, SBA_RESP_MASK);
+ c_mdata = sba_cmd_write_c_mdata(0);
+ cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
+ SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
+ cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
+ SBA_CMD_SHIFT, SBA_CMD_MASK);
+ cmdsp->cmd = cmd;
+ *cmdsp->cmd_dma = cpu_to_le64(cmd);
+ cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
+ if (req->sba->hw_resp_size) {
+ cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
+ cmdsp->resp = req->resp_dma;
+ cmdsp->resp_len = req->sba->hw_resp_size;
+ }
+ cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
+ cmdsp->data = *dst_p + msg_offset;
+ cmdsp->data_len = msg_len;
+ cmdsp++;
+
+skip_p:
+ if (!dst_q)
+ goto skip_q;
+
+ /* Type-A command to zero all buffers */
+ cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
+ SBA_TYPE_SHIFT, SBA_TYPE_MASK);
+ cmd = sba_cmd_enc(cmd, msg_len,
+ SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
+ cmd = sba_cmd_enc(cmd, SBA_CMD_ZERO_ALL_BUFFERS,
+ SBA_CMD_SHIFT, SBA_CMD_MASK);
+ cmdsp->cmd = cmd;
+ *cmdsp->cmd_dma = cpu_to_le64(cmd);
+ cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
+ cmdsp++;
+
+ if (dpos == 255)
+ goto skip_q_computation;
+ pos = (dpos < req->sba->max_pq_coefs) ?
+ dpos : (req->sba->max_pq_coefs - 1);
+
+ /*
+ * Type-B command to generate initial Q from data
+ * and store output into buf0
+ */
+ cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
+ SBA_TYPE_SHIFT, SBA_TYPE_MASK);
+ cmd = sba_cmd_enc(cmd, msg_len,
+ SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
+ c_mdata = sba_cmd_pq_c_mdata(pos, 0, 0);
+ cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
+ SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
+ cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata),
+ SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK);
+ cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS,
+ SBA_CMD_SHIFT, SBA_CMD_MASK);
+ cmdsp->cmd = cmd;
+ *cmdsp->cmd_dma = cpu_to_le64(cmd);
+ cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
+ cmdsp->data = src + msg_offset;
+ cmdsp->data_len = msg_len;
+ cmdsp++;
+
+ dpos -= pos;
+
+ /* Multiple Type-A command to generate final Q */
+ while (dpos) {
+ pos = (dpos < req->sba->max_pq_coefs) ?
+ dpos : (req->sba->max_pq_coefs - 1);
+
+ /*
+ * Type-A command to generate Q with buf0 and
+ * buf1 store result in buf0
+ */
+ cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
+ SBA_TYPE_SHIFT, SBA_TYPE_MASK);
+ cmd = sba_cmd_enc(cmd, msg_len,
+ SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
+ c_mdata = sba_cmd_pq_c_mdata(pos, 0, 1);
+ cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
+ SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
+ cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata),
+ SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK);
+ cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS,
+ SBA_CMD_SHIFT, SBA_CMD_MASK);
+ cmdsp->cmd = cmd;
+ *cmdsp->cmd_dma = cpu_to_le64(cmd);
+ cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
+ cmdsp++;
+
+ dpos -= pos;
+ }
+
+skip_q_computation:
+ if (pq_continue) {
+ /*
+ * Type-B command to XOR previous output with
+ * buf0 and write it into buf0
+ */
+ cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
+ SBA_TYPE_SHIFT, SBA_TYPE_MASK);
+ cmd = sba_cmd_enc(cmd, msg_len,
+ SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
+ c_mdata = sba_cmd_xor_c_mdata(0, 0);
+ cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
+ SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
+ cmd = sba_cmd_enc(cmd, SBA_CMD_XOR,
+ SBA_CMD_SHIFT, SBA_CMD_MASK);
+ cmdsp->cmd = cmd;
+ *cmdsp->cmd_dma = cpu_to_le64(cmd);
+ cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
+ cmdsp->data = *dst_q + msg_offset;
+ cmdsp->data_len = msg_len;
+ cmdsp++;
+ }
+
+ /* Type-A command to write buf0 */
+ cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
+ SBA_TYPE_SHIFT, SBA_TYPE_MASK);
+ cmd = sba_cmd_enc(cmd, msg_len,
+ SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
+ cmd = sba_cmd_enc(cmd, 0x1,
+ SBA_RESP_SHIFT, SBA_RESP_MASK);
+ c_mdata = sba_cmd_write_c_mdata(0);
+ cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
+ SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
+ cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
+ SBA_CMD_SHIFT, SBA_CMD_MASK);
+ cmdsp->cmd = cmd;
+ *cmdsp->cmd_dma = cpu_to_le64(cmd);
+ cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
+ if (req->sba->hw_resp_size) {
+ cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
+ cmdsp->resp = req->resp_dma;
+ cmdsp->resp_len = req->sba->hw_resp_size;
+ }
+ cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
+ cmdsp->data = *dst_q + msg_offset;
+ cmdsp->data_len = msg_len;
+ cmdsp++;
+
+skip_q:
+ /* Fillup brcm_message */
+ msg->type = BRCM_MESSAGE_SBA;
+ msg->sba.cmds = cmds;
+ msg->sba.cmds_count = cmdsp - cmds;
+ msg->ctx = req;
+ msg->error = 0;
+}
+
+struct sba_request *
+sba_prep_dma_pq_single_req(struct sba_device *sba, dma_addr_t off,
+ dma_addr_t *dst_p, dma_addr_t *dst_q,
+ dma_addr_t src, u8 scf, size_t len,
+ unsigned long flags)
+{
+ struct sba_request *req = NULL;
+
+ /* Alloc new request */
+ req = sba_alloc_request(sba);
+ if (!req)
+ return NULL;
+ req->fence = (flags & DMA_PREP_FENCE) ? true : false;
+
+ /* Fillup request messages */
+ sba_fillup_pq_single_msg(req, dmaf_continue(flags),
+ req->cmds, &req->msg, off, len,
+ dst_p, dst_q, src, scf);
+
+ /* Init async_tx descriptor */
+ req->tx.flags = flags;
+ req->tx.cookie = -EBUSY;
+
+ return req;
+}
+
+static struct dma_async_tx_descriptor *
+sba_prep_dma_pq(struct dma_chan *dchan, dma_addr_t *dst, dma_addr_t *src,
+ u32 src_cnt, const u8 *scf, size_t len, unsigned long flags)
+{
+ u32 i, dst_q_index;
+ size_t req_len;
+ bool slow = false;
+ dma_addr_t off = 0;
+ dma_addr_t *dst_p = NULL, *dst_q = NULL;
+ struct sba_device *sba = to_sba_device(dchan);
+ struct sba_request *first = NULL, *req;
+
+ /* Sanity checks */
+ if (unlikely(src_cnt > sba->max_pq_srcs))
+ return NULL;
+ for (i = 0; i < src_cnt; i++)
+ if (sba->max_pq_coefs <= raid6_gflog[scf[i]])
+ slow = true;
+
+ /* Figure-out P and Q destination addresses */
+ if (!(flags & DMA_PREP_PQ_DISABLE_P))
+ dst_p = &dst[0];
+ if (!(flags & DMA_PREP_PQ_DISABLE_Q))
+ dst_q = &dst[1];
+
+ /* Create chained requests where each request is upto hw_buf_size */
+ while (len) {
+ req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size;
+
+ if (slow) {
+ dst_q_index = src_cnt;
+
+ if (dst_q) {
+ for (i = 0; i < src_cnt; i++) {
+ if (*dst_q == src[i]) {
+ dst_q_index = i;
+ break;
+ }
+ }
+ }
+
+ if (dst_q_index < src_cnt) {
+ i = dst_q_index;
+ req = sba_prep_dma_pq_single_req(sba,
+ off, dst_p, dst_q, src[i], scf[i],
+ req_len, flags | DMA_PREP_FENCE);
+ if (!req)
+ goto fail;
+
+ if (first)
+ sba_chain_request(first, req);
+ else
+ first = req;
+
+ flags |= DMA_PREP_CONTINUE;
+ }
+
+ for (i = 0; i < src_cnt; i++) {
+ if (dst_q_index == i)
+ continue;
+
+ req = sba_prep_dma_pq_single_req(sba,
+ off, dst_p, dst_q, src[i], scf[i],
+ req_len, flags | DMA_PREP_FENCE);
+ if (!req)
+ goto fail;
+
+ if (first)
+ sba_chain_request(first, req);
+ else
+ first = req;
+
+ flags |= DMA_PREP_CONTINUE;
+ }
+ } else {
+ req = sba_prep_dma_pq_req(sba, off,
+ dst_p, dst_q, src, src_cnt,
+ scf, req_len, flags);
+ if (!req)
+ goto fail;
+
+ if (first)
+ sba_chain_request(first, req);
+ else
+ first = req;
+ }
+
+ off += req_len;
+ len -= req_len;
+ }
+
+ return (first) ? &first->tx : NULL;
+
+fail:
+ if (first)
+ sba_free_chained_requests(first);
+ return NULL;
+}
+
+/* ====== Mailbox callbacks ===== */
+
+static void sba_dma_tx_actions(struct sba_request *req)
+{
+ struct dma_async_tx_descriptor *tx = &req->tx;
+
+ WARN_ON(tx->cookie < 0);
+
+ if (tx->cookie > 0) {
+ dma_cookie_complete(tx);
+
+ /*
+ * Call the callback (must not sleep or submit new
+ * operations to this channel)
+ */
+ if (tx->callback)
+ tx->callback(tx->callback_param);
+
+ dma_descriptor_unmap(tx);
+ }
+
+ /* Run dependent operations */
+ dma_run_dependencies(tx);
+
+ /* If waiting for 'ack' then move to completed list */
+ if (!async_tx_test_ack(&req->tx))
+ sba_complete_chained_requests(req);
+ else
+ sba_free_chained_requests(req);
+}
+
+static void sba_receive_message(struct mbox_client *cl, void *msg)
+{
+ unsigned long flags;
+ struct brcm_message *m = msg;
+ struct sba_request *req = m->ctx, *req1;
+ struct sba_device *sba = req->sba;
+
+ /* Error count if message has error */
+ if (m->error < 0)
+ dev_err(sba->dev, "%s got message with error %d",
+ dma_chan_name(&sba->dma_chan), m->error);
+
+ /* Mark request as received */
+ sba_received_request(req);
+
+ /* Wait for all chained requests to be completed */
+ if (atomic_dec_return(&req->first->next_pending_count))
+ goto done;
+
+ /* Point to first request */
+ req = req->first;
+
+ /* Update request */
+ if (req->state == SBA_REQUEST_STATE_RECEIVED)
+ sba_dma_tx_actions(req);
+ else
+ sba_free_chained_requests(req);
+
+ spin_lock_irqsave(&sba->reqs_lock, flags);
+
+ /* Re-check all completed request waiting for 'ack' */
+ list_for_each_entry_safe(req, req1, &sba->reqs_completed_list, node) {
+ spin_unlock_irqrestore(&sba->reqs_lock, flags);
+ sba_dma_tx_actions(req);
+ spin_lock_irqsave(&sba->reqs_lock, flags);
+ }
+
+ spin_unlock_irqrestore(&sba->reqs_lock, flags);
+
+done:
+ /* Try to submit pending request */
+ sba_issue_pending(&sba->dma_chan);
+}
+
+/* ====== Platform driver routines ===== */
+
+static int sba_prealloc_channel_resources(struct sba_device *sba)
+{
+ int i, j, p, ret = 0;
+ struct sba_request *req = NULL;
+
+ sba->resp_base = dma_alloc_coherent(sba->dma_dev.dev,
+ sba->max_resp_pool_size,
+ &sba->resp_dma_base, GFP_KERNEL);
+ if (!sba->resp_base)
+ return -ENOMEM;
+
+ sba->cmds_base = dma_alloc_coherent(sba->dma_dev.dev,
+ sba->max_cmds_pool_size,
+ &sba->cmds_dma_base, GFP_KERNEL);
+ if (!sba->cmds_base) {
+ ret = -ENOMEM;
+ goto fail_free_resp_pool;
+ }
+
+ spin_lock_init(&sba->reqs_lock);
+ sba->reqs_fence = false;
+ INIT_LIST_HEAD(&sba->reqs_alloc_list);
+ INIT_LIST_HEAD(&sba->reqs_pending_list);
+ INIT_LIST_HEAD(&sba->reqs_active_list);
+ INIT_LIST_HEAD(&sba->reqs_received_list);
+ INIT_LIST_HEAD(&sba->reqs_completed_list);
+ INIT_LIST_HEAD(&sba->reqs_aborted_list);
+ INIT_LIST_HEAD(&sba->reqs_free_list);
+
+ sba->reqs = devm_kcalloc(sba->dev, sba->max_req,
+ sizeof(*req), GFP_KERNEL);
+ if (!sba->reqs) {
+ ret = -ENOMEM;
+ goto fail_free_cmds_pool;
+ }
+
+ for (i = 0, p = 0; i < sba->max_req; i++) {
+ req = &sba->reqs[i];
+ INIT_LIST_HEAD(&req->node);
+ req->sba = sba;
+ req->state = SBA_REQUEST_STATE_FREE;
+ INIT_LIST_HEAD(&req->next);
+ req->next_count = 1;
+ atomic_set(&req->next_pending_count, 0);
+ req->fence = false;
+ req->resp = sba->resp_base + p;
+ req->resp_dma = sba->resp_dma_base + p;
+ p += sba->hw_resp_size;
+ req->cmds = devm_kcalloc(sba->dev, sba->max_cmd_per_req,
+ sizeof(*req->cmds), GFP_KERNEL);
+ if (!req->cmds) {
+ ret = -ENOMEM;
+ goto fail_free_cmds_pool;
+ }
+ for (j = 0; j < sba->max_cmd_per_req; j++) {
+ req->cmds[j].cmd = 0;
+ req->cmds[j].cmd_dma = sba->cmds_base +
+ (i * sba->max_cmd_per_req + j) * sizeof(u64);
+ req->cmds[j].cmd_dma_addr = sba->cmds_dma_base +
+ (i * sba->max_cmd_per_req + j) * sizeof(u64);
+ req->cmds[j].flags = 0;
+ }
+ memset(&req->msg, 0, sizeof(req->msg));
+ dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan);
+ req->tx.tx_submit = sba_tx_submit;
+ req->tx.phys = req->resp_dma;
+ list_add_tail(&req->node, &sba->reqs_free_list);
+ }
+
+ sba->reqs_free_count = sba->max_req;
+
+ return 0;
+
+fail_free_cmds_pool:
+ dma_free_coherent(sba->dma_dev.dev,
+ sba->max_cmds_pool_size,
+ sba->cmds_base, sba->cmds_dma_base);
+fail_free_resp_pool:
+ dma_free_coherent(sba->dma_dev.dev,
+ sba->max_resp_pool_size,
+ sba->resp_base, sba->resp_dma_base);
+ return ret;
+}
+
+static void sba_freeup_channel_resources(struct sba_device *sba)
+{
+ dmaengine_terminate_all(&sba->dma_chan);
+ dma_free_coherent(sba->dma_dev.dev, sba->max_cmds_pool_size,
+ sba->cmds_base, sba->cmds_dma_base);
+ dma_free_coherent(sba->dma_dev.dev, sba->max_resp_pool_size,
+ sba->resp_base, sba->resp_dma_base);
+ sba->resp_base = NULL;
+ sba->resp_dma_base = 0;
+}
+
+static int sba_async_register(struct sba_device *sba)
+{
+ int ret;
+ struct dma_device *dma_dev = &sba->dma_dev;
+
+ /* Initialize DMA channel cookie */
+ sba->dma_chan.device = dma_dev;
+ dma_cookie_init(&sba->dma_chan);
+
+ /* Initialize DMA device capability mask */
+ dma_cap_zero(dma_dev->cap_mask);
+ dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
+ dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
+ dma_cap_set(DMA_XOR, dma_dev->cap_mask);
+ dma_cap_set(DMA_PQ, dma_dev->cap_mask);
+
+ /*
+ * Set mailbox channel device as the base device of
+ * our dma_device because the actual memory accesses
+ * will be done by mailbox controller
+ */
+ dma_dev->dev = sba->mbox_dev;
+
+ /* Set base prep routines */
+ dma_dev->device_free_chan_resources = sba_free_chan_resources;
+ dma_dev->device_terminate_all = sba_device_terminate_all;
+ dma_dev->device_issue_pending = sba_issue_pending;
+ dma_dev->device_tx_status = sba_tx_status;
+
+ /* Set interrupt routine */
+ if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
+ dma_dev->device_prep_dma_interrupt = sba_prep_dma_interrupt;
+
+ /* Set memcpy routine */
+ if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
+ dma_dev->device_prep_dma_memcpy = sba_prep_dma_memcpy;
+
+ /* Set xor routine and capability */
+ if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
+ dma_dev->device_prep_dma_xor = sba_prep_dma_xor;
+ dma_dev->max_xor = sba->max_xor_srcs;
+ }
+
+ /* Set pq routine and capability */
+ if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
+ dma_dev->device_prep_dma_pq = sba_prep_dma_pq;
+ dma_set_maxpq(dma_dev, sba->max_pq_srcs, 0);
+ }
+
+ /* Initialize DMA device channel list */
+ INIT_LIST_HEAD(&dma_dev->channels);
+ list_add_tail(&sba->dma_chan.device_node, &dma_dev->channels);
+
+ /* Register with Linux async DMA framework*/
+ ret = dma_async_device_register(dma_dev);
+ if (ret) {
+ dev_err(sba->dev, "async device register error %d", ret);
+ return ret;
+ }
+
+ dev_info(sba->dev, "%s capabilities: %s%s%s%s\n",
+ dma_chan_name(&sba->dma_chan),
+ dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "interrupt " : "",
+ dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "memcpy " : "",
+ dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
+ dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : "");
+
+ return 0;
+}
+
+static int sba_probe(struct platform_device *pdev)
+{
+ int i, ret = 0, mchans_count;
+ struct sba_device *sba;
+ struct platform_device *mbox_pdev;
+ struct of_phandle_args args;
+
+ /* Allocate main SBA struct */
+ sba = devm_kzalloc(&pdev->dev, sizeof(*sba), GFP_KERNEL);
+ if (!sba)
+ return -ENOMEM;
+
+ sba->dev = &pdev->dev;
+ platform_set_drvdata(pdev, sba);
+
+ /* Determine SBA version from DT compatible string */
+ if (of_device_is_compatible(sba->dev->of_node, "brcm,iproc-sba"))
+ sba->ver = SBA_VER_1;
+ else if (of_device_is_compatible(sba->dev->of_node,
+ "brcm,iproc-sba-v2"))
+ sba->ver = SBA_VER_2;
+ else
+ return -ENODEV;
+
+ /* Derived Configuration parameters */
+ switch (sba->ver) {
+ case SBA_VER_1:
+ sba->max_req = 1024;
+ sba->hw_buf_size = 4096;
+ sba->hw_resp_size = 8;
+ sba->max_pq_coefs = 6;
+ sba->max_pq_srcs = 6;
+ break;
+ case SBA_VER_2:
+ sba->max_req = 1024;
+ sba->hw_buf_size = 4096;
+ sba->hw_resp_size = 8;
+ sba->max_pq_coefs = 30;
+ /*
+ * We can support max_pq_srcs == max_pq_coefs because
+ * we are limited by number of SBA commands that we can
+ * fit in one message for underlying ring manager HW.
+ */
+ sba->max_pq_srcs = 12;
+ break;
+ default:
+ return -EINVAL;
+ }
+ sba->max_cmd_per_req = sba->max_pq_srcs + 3;
+ sba->max_xor_srcs = sba->max_cmd_per_req - 1;
+ sba->max_resp_pool_size = sba->max_req * sba->hw_resp_size;
+ sba->max_cmds_pool_size = sba->max_req *
+ sba->max_cmd_per_req * sizeof(u64);
+
+ /* Setup mailbox client */
+ sba->client.dev = &pdev->dev;
+ sba->client.rx_callback = sba_receive_message;
+ sba->client.tx_block = false;
+ sba->client.knows_txdone = false;
+ sba->client.tx_tout = 0;
+
+ /* Number of channels equals number of mailbox channels */
+ ret = of_count_phandle_with_args(pdev->dev.of_node,
+ "mboxes", "#mbox-cells");
+ if (ret <= 0)
+ return -ENODEV;
+ mchans_count = ret;
+ sba->mchans_count = 0;
+ atomic_set(&sba->mchans_current, 0);
+
+ /* Allocate mailbox channel array */
+ sba->mchans = devm_kcalloc(&pdev->dev, sba->mchans_count,
+ sizeof(*sba->mchans), GFP_KERNEL);
+ if (!sba->mchans)
+ return -ENOMEM;
+
+ /* Request mailbox channels */
+ for (i = 0; i < mchans_count; i++) {
+ sba->mchans[i] = mbox_request_channel(&sba->client, i);
+ if (IS_ERR(sba->mchans[i])) {
+ ret = PTR_ERR(sba->mchans[i]);
+ goto fail_free_mchans;
+ }
+ sba->mchans_count++;
+ }
+
+ /* Find-out underlying mailbox device */
+ ret = of_parse_phandle_with_args(pdev->dev.of_node,
+ "mboxes", "#mbox-cells", 0, &args);
+ if (ret)
+ goto fail_free_mchans;
+ mbox_pdev = of_find_device_by_node(args.np);
+ of_node_put(args.np);
+ if (!mbox_pdev) {
+ ret = -ENODEV;
+ goto fail_free_mchans;
+ }
+ sba->mbox_dev = &mbox_pdev->dev;
+
+ /* All mailbox channels should be of same ring manager device */
+ for (i = 1; i < mchans_count; i++) {
+ ret = of_parse_phandle_with_args(pdev->dev.of_node,
+ "mboxes", "#mbox-cells", i, &args);
+ if (ret)
+ goto fail_free_mchans;
+ mbox_pdev = of_find_device_by_node(args.np);
+ of_node_put(args.np);
+ if (sba->mbox_dev != &mbox_pdev->dev) {
+ ret = -EINVAL;
+ goto fail_free_mchans;
+ }
+ }
+
+ /* Register DMA device with linux async framework */
+ ret = sba_async_register(sba);
+ if (ret)
+ goto fail_free_mchans;
+
+ /* Prealloc channel resource */
+ ret = sba_prealloc_channel_resources(sba);
+ if (ret)
+ goto fail_async_dev_unreg;
+
+ /* Print device info */
+ dev_info(sba->dev, "%s using SBAv%d and %d mailbox channels",
+ dma_chan_name(&sba->dma_chan), sba->ver+1,
+ sba->mchans_count);
+
+ return 0;
+
+fail_async_dev_unreg:
+ dma_async_device_unregister(&sba->dma_dev);
+fail_free_mchans:
+ for (i = 0; i < sba->mchans_count; i++)
+ mbox_free_channel(sba->mchans[i]);
+ return ret;
+}
+
+static int sba_remove(struct platform_device *pdev)
+{
+ int i;
+ struct sba_device *sba = platform_get_drvdata(pdev);
+
+ sba_freeup_channel_resources(sba);
+
+ dma_async_device_unregister(&sba->dma_dev);
+
+ for (i = 0; i < sba->mchans_count; i++)
+ mbox_free_channel(sba->mchans[i]);
+
+ return 0;
+}
+
+static const struct of_device_id sba_of_match[] = {
+ { .compatible = "brcm,iproc-sba", },
+ { .compatible = "brcm,iproc-sba-v2", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sba_of_match);
+
+static struct platform_driver sba_driver = {
+ .probe = sba_probe,
+ .remove = sba_remove,
+ .driver = {
+ .name = "bcm-sba-raid",
+ .of_match_table = sba_of_match,
+ },
+};
+module_platform_driver(sba_driver);
+
+MODULE_DESCRIPTION("Broadcom SBA RAID driver");
+MODULE_AUTHOR("Anup Patel <anup.patel@broadcom.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/dw/Kconfig b/drivers/dma/dw/Kconfig
index 5a37b9fcf40d..04b9728c1d26 100644
--- a/drivers/dma/dw/Kconfig
+++ b/drivers/dma/dw/Kconfig
@@ -6,17 +6,12 @@ config DW_DMAC_CORE
tristate
select DMA_ENGINE
-config DW_DMAC_BIG_ENDIAN_IO
- bool
-
config DW_DMAC
tristate "Synopsys DesignWare AHB DMA platform driver"
select DW_DMAC_CORE
- select DW_DMAC_BIG_ENDIAN_IO if AVR32
- default y if CPU_AT32AP7000
help
Support the Synopsys DesignWare AHB DMA controller. This
- can be integrated in chips such as the Atmel AT32ap7000.
+ can be integrated in chips such as the Intel Cherrytrail.
config DW_DMAC_PCI
tristate "Synopsys DesignWare AHB DMA PCI driver"
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index e500950dad82..f43e6dafe446 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -561,92 +561,14 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
dwc_descriptor_complete(dwc, bad_desc, true);
}
-/* --------------------- Cyclic DMA API extensions -------------------- */
-
-dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan)
-{
- struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
- return channel_readl(dwc, SAR);
-}
-EXPORT_SYMBOL(dw_dma_get_src_addr);
-
-dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan)
-{
- struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
- return channel_readl(dwc, DAR);
-}
-EXPORT_SYMBOL(dw_dma_get_dst_addr);
-
-/* Called with dwc->lock held and all DMAC interrupts disabled */
-static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
- u32 status_block, u32 status_err, u32 status_xfer)
-{
- unsigned long flags;
-
- if (status_block & dwc->mask) {
- void (*callback)(void *param);
- void *callback_param;
-
- dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
- channel_readl(dwc, LLP));
- dma_writel(dw, CLEAR.BLOCK, dwc->mask);
-
- callback = dwc->cdesc->period_callback;
- callback_param = dwc->cdesc->period_callback_param;
-
- if (callback)
- callback(callback_param);
- }
-
- /*
- * Error and transfer complete are highly unlikely, and will most
- * likely be due to a configuration error by the user.
- */
- if (unlikely(status_err & dwc->mask) ||
- unlikely(status_xfer & dwc->mask)) {
- unsigned int i;
-
- dev_err(chan2dev(&dwc->chan),
- "cyclic DMA unexpected %s interrupt, stopping DMA transfer\n",
- status_xfer ? "xfer" : "error");
-
- spin_lock_irqsave(&dwc->lock, flags);
-
- dwc_dump_chan_regs(dwc);
-
- dwc_chan_disable(dw, dwc);
-
- /* Make sure DMA does not restart by loading a new list */
- channel_writel(dwc, LLP, 0);
- channel_writel(dwc, CTL_LO, 0);
- channel_writel(dwc, CTL_HI, 0);
-
- dma_writel(dw, CLEAR.BLOCK, dwc->mask);
- dma_writel(dw, CLEAR.ERROR, dwc->mask);
- dma_writel(dw, CLEAR.XFER, dwc->mask);
-
- for (i = 0; i < dwc->cdesc->periods; i++)
- dwc_dump_lli(dwc, dwc->cdesc->desc[i]);
-
- spin_unlock_irqrestore(&dwc->lock, flags);
- }
-
- /* Re-enable interrupts */
- channel_set_bit(dw, MASK.BLOCK, dwc->mask);
-}
-
-/* ------------------------------------------------------------------------- */
-
static void dw_dma_tasklet(unsigned long data)
{
struct dw_dma *dw = (struct dw_dma *)data;
struct dw_dma_chan *dwc;
- u32 status_block;
u32 status_xfer;
u32 status_err;
unsigned int i;
- status_block = dma_readl(dw, RAW.BLOCK);
status_xfer = dma_readl(dw, RAW.XFER);
status_err = dma_readl(dw, RAW.ERROR);
@@ -655,8 +577,7 @@ static void dw_dma_tasklet(unsigned long data)
for (i = 0; i < dw->dma.chancnt; i++) {
dwc = &dw->chan[i];
if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
- dwc_handle_cyclic(dw, dwc, status_block, status_err,
- status_xfer);
+ dev_vdbg(dw->dma.dev, "Cyclic xfer is not implemented\n");
else if (status_err & (1 << i))
dwc_handle_error(dw, dwc);
else if (status_xfer & (1 << i))
@@ -1264,255 +1185,6 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
}
-/* --------------------- Cyclic DMA API extensions -------------------- */
-
-/**
- * dw_dma_cyclic_start - start the cyclic DMA transfer
- * @chan: the DMA channel to start
- *
- * Must be called with soft interrupts disabled. Returns zero on success or
- * -errno on failure.
- */
-int dw_dma_cyclic_start(struct dma_chan *chan)
-{
- struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
- struct dw_dma *dw = to_dw_dma(chan->device);
- unsigned long flags;
-
- if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
- dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n");
- return -ENODEV;
- }
-
- spin_lock_irqsave(&dwc->lock, flags);
-
- /* Enable interrupts to perform cyclic transfer */
- channel_set_bit(dw, MASK.BLOCK, dwc->mask);
-
- dwc_dostart(dwc, dwc->cdesc->desc[0]);
-
- spin_unlock_irqrestore(&dwc->lock, flags);
-
- return 0;
-}
-EXPORT_SYMBOL(dw_dma_cyclic_start);
-
-/**
- * dw_dma_cyclic_stop - stop the cyclic DMA transfer
- * @chan: the DMA channel to stop
- *
- * Must be called with soft interrupts disabled.
- */
-void dw_dma_cyclic_stop(struct dma_chan *chan)
-{
- struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
- struct dw_dma *dw = to_dw_dma(dwc->chan.device);
- unsigned long flags;
-
- spin_lock_irqsave(&dwc->lock, flags);
-
- dwc_chan_disable(dw, dwc);
-
- spin_unlock_irqrestore(&dwc->lock, flags);
-}
-EXPORT_SYMBOL(dw_dma_cyclic_stop);
-
-/**
- * dw_dma_cyclic_prep - prepare the cyclic DMA transfer
- * @chan: the DMA channel to prepare
- * @buf_addr: physical DMA address where the buffer starts
- * @buf_len: total number of bytes for the entire buffer
- * @period_len: number of bytes for each period
- * @direction: transfer direction, to or from device
- *
- * Must be called before trying to start the transfer. Returns a valid struct
- * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
- */
-struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
- dma_addr_t buf_addr, size_t buf_len, size_t period_len,
- enum dma_transfer_direction direction)
-{
- struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
- struct dma_slave_config *sconfig = &dwc->dma_sconfig;
- struct dw_cyclic_desc *cdesc;
- struct dw_cyclic_desc *retval = NULL;
- struct dw_desc *desc;
- struct dw_desc *last = NULL;
- u8 lms = DWC_LLP_LMS(dwc->dws.m_master);
- unsigned long was_cyclic;
- unsigned int reg_width;
- unsigned int periods;
- unsigned int i;
- unsigned long flags;
-
- spin_lock_irqsave(&dwc->lock, flags);
- if (dwc->nollp) {
- spin_unlock_irqrestore(&dwc->lock, flags);
- dev_dbg(chan2dev(&dwc->chan),
- "channel doesn't support LLP transfers\n");
- return ERR_PTR(-EINVAL);
- }
-
- if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
- spin_unlock_irqrestore(&dwc->lock, flags);
- dev_dbg(chan2dev(&dwc->chan),
- "queue and/or active list are not empty\n");
- return ERR_PTR(-EBUSY);
- }
-
- was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
- spin_unlock_irqrestore(&dwc->lock, flags);
- if (was_cyclic) {
- dev_dbg(chan2dev(&dwc->chan),
- "channel already prepared for cyclic DMA\n");
- return ERR_PTR(-EBUSY);
- }
-
- retval = ERR_PTR(-EINVAL);
-
- if (unlikely(!is_slave_direction(direction)))
- goto out_err;
-
- dwc->direction = direction;
-
- if (direction == DMA_MEM_TO_DEV)
- reg_width = __ffs(sconfig->dst_addr_width);
- else
- reg_width = __ffs(sconfig->src_addr_width);
-
- periods = buf_len / period_len;
-
- /* Check for too big/unaligned periods and unaligned DMA buffer. */
- if (period_len > (dwc->block_size << reg_width))
- goto out_err;
- if (unlikely(period_len & ((1 << reg_width) - 1)))
- goto out_err;
- if (unlikely(buf_addr & ((1 << reg_width) - 1)))
- goto out_err;
-
- retval = ERR_PTR(-ENOMEM);
-
- cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL);
- if (!cdesc)
- goto out_err;
-
- cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL);
- if (!cdesc->desc)
- goto out_err_alloc;
-
- for (i = 0; i < periods; i++) {
- desc = dwc_desc_get(dwc);
- if (!desc)
- goto out_err_desc_get;
-
- switch (direction) {
- case DMA_MEM_TO_DEV:
- lli_write(desc, dar, sconfig->dst_addr);
- lli_write(desc, sar, buf_addr + period_len * i);
- lli_write(desc, ctllo, (DWC_DEFAULT_CTLLO(chan)
- | DWC_CTLL_DST_WIDTH(reg_width)
- | DWC_CTLL_SRC_WIDTH(reg_width)
- | DWC_CTLL_DST_FIX
- | DWC_CTLL_SRC_INC
- | DWC_CTLL_INT_EN));
-
- lli_set(desc, ctllo, sconfig->device_fc ?
- DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
- DWC_CTLL_FC(DW_DMA_FC_D_M2P));
-
- break;
- case DMA_DEV_TO_MEM:
- lli_write(desc, dar, buf_addr + period_len * i);
- lli_write(desc, sar, sconfig->src_addr);
- lli_write(desc, ctllo, (DWC_DEFAULT_CTLLO(chan)
- | DWC_CTLL_SRC_WIDTH(reg_width)
- | DWC_CTLL_DST_WIDTH(reg_width)
- | DWC_CTLL_DST_INC
- | DWC_CTLL_SRC_FIX
- | DWC_CTLL_INT_EN));
-
- lli_set(desc, ctllo, sconfig->device_fc ?
- DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
- DWC_CTLL_FC(DW_DMA_FC_D_P2M));
-
- break;
- default:
- break;
- }
-
- lli_write(desc, ctlhi, period_len >> reg_width);
- cdesc->desc[i] = desc;
-
- if (last)
- lli_write(last, llp, desc->txd.phys | lms);
-
- last = desc;
- }
-
- /* Let's make a cyclic list */
- lli_write(last, llp, cdesc->desc[0]->txd.phys | lms);
-
- dev_dbg(chan2dev(&dwc->chan),
- "cyclic prepared buf %pad len %zu period %zu periods %d\n",
- &buf_addr, buf_len, period_len, periods);
-
- cdesc->periods = periods;
- dwc->cdesc = cdesc;
-
- return cdesc;
-
-out_err_desc_get:
- while (i--)
- dwc_desc_put(dwc, cdesc->desc[i]);
-out_err_alloc:
- kfree(cdesc);
-out_err:
- clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
- return (struct dw_cyclic_desc *)retval;
-}
-EXPORT_SYMBOL(dw_dma_cyclic_prep);
-
-/**
- * dw_dma_cyclic_free - free a prepared cyclic DMA transfer
- * @chan: the DMA channel to free
- */
-void dw_dma_cyclic_free(struct dma_chan *chan)
-{
- struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
- struct dw_dma *dw = to_dw_dma(dwc->chan.device);
- struct dw_cyclic_desc *cdesc = dwc->cdesc;
- unsigned int i;
- unsigned long flags;
-
- dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__);
-
- if (!cdesc)
- return;
-
- spin_lock_irqsave(&dwc->lock, flags);
-
- dwc_chan_disable(dw, dwc);
-
- dma_writel(dw, CLEAR.BLOCK, dwc->mask);
- dma_writel(dw, CLEAR.ERROR, dwc->mask);
- dma_writel(dw, CLEAR.XFER, dwc->mask);
-
- spin_unlock_irqrestore(&dwc->lock, flags);
-
- for (i = 0; i < cdesc->periods; i++)
- dwc_desc_put(dwc, cdesc->desc[i]);
-
- kfree(cdesc->desc);
- kfree(cdesc);
-
- dwc->cdesc = NULL;
-
- clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
-}
-EXPORT_SYMBOL(dw_dma_cyclic_free);
-
-/*----------------------------------------------------------------------*/
-
int dw_dma_probe(struct dw_dma_chip *chip)
{
struct dw_dma_platform_data *pdata;
@@ -1642,7 +1314,7 @@ int dw_dma_probe(struct dw_dma_chip *chip)
if (autocfg) {
unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1;
void __iomem *addr = &__dw_regs(dw)->DWC_PARAMS[r];
- unsigned int dwc_params = dma_readl_native(addr);
+ unsigned int dwc_params = readl(addr);
dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i,
dwc_params);
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index c639c60b825a..bc31fe802061 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -306,8 +306,12 @@ static int dw_resume_early(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct dw_dma_chip *chip = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = clk_prepare_enable(chip->clk);
+ if (ret)
+ return ret;
- clk_prepare_enable(chip->clk);
return dw_dma_enable(chip);
}
diff --git a/drivers/dma/dw/regs.h b/drivers/dma/dw/regs.h
index 32a328721c88..09e7dfdbb790 100644
--- a/drivers/dma/dw/regs.h
+++ b/drivers/dma/dw/regs.h
@@ -116,20 +116,6 @@ struct dw_dma_regs {
DW_REG(GLOBAL_CFG);
};
-/*
- * Big endian I/O access when reading and writing to the DMA controller
- * registers. This is needed on some platforms, like the Atmel AVR32
- * architecture.
- */
-
-#ifdef CONFIG_DW_DMAC_BIG_ENDIAN_IO
-#define dma_readl_native ioread32be
-#define dma_writel_native iowrite32be
-#else
-#define dma_readl_native readl
-#define dma_writel_native writel
-#endif
-
/* Bitfields in DW_PARAMS */
#define DW_PARAMS_NR_CHAN 8 /* number of channels */
#define DW_PARAMS_NR_MASTER 11 /* number of AHB masters */
@@ -280,7 +266,6 @@ struct dw_dma_chan {
unsigned long flags;
struct list_head active_list;
struct list_head queue;
- struct dw_cyclic_desc *cdesc;
unsigned int descs_allocated;
@@ -302,9 +287,9 @@ __dwc_regs(struct dw_dma_chan *dwc)
}
#define channel_readl(dwc, name) \
- dma_readl_native(&(__dwc_regs(dwc)->name))
+ readl(&(__dwc_regs(dwc)->name))
#define channel_writel(dwc, name, val) \
- dma_writel_native((val), &(__dwc_regs(dwc)->name))
+ writel((val), &(__dwc_regs(dwc)->name))
static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan)
{
@@ -333,9 +318,9 @@ static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw)
}
#define dma_readl(dw, name) \
- dma_readl_native(&(__dw_regs(dw)->name))
+ readl(&(__dw_regs(dw)->name))
#define dma_writel(dw, name, val) \
- dma_writel_native((val), &(__dw_regs(dw)->name))
+ writel((val), &(__dw_regs(dw)->name))
#define idma32_readq(dw, name) \
hi_lo_readq(&(__dw_regs(dw)->name))
@@ -352,43 +337,30 @@ static inline struct dw_dma *to_dw_dma(struct dma_device *ddev)
return container_of(ddev, struct dw_dma, dma);
}
-#ifdef CONFIG_DW_DMAC_BIG_ENDIAN_IO
-typedef __be32 __dw32;
-#else
-typedef __le32 __dw32;
-#endif
-
/* LLI == Linked List Item; a.k.a. DMA block descriptor */
struct dw_lli {
/* values that are not changed by hardware */
- __dw32 sar;
- __dw32 dar;
- __dw32 llp; /* chain to next lli */
- __dw32 ctllo;
+ __le32 sar;
+ __le32 dar;
+ __le32 llp; /* chain to next lli */
+ __le32 ctllo;
/* values that may get written back: */
- __dw32 ctlhi;
+ __le32 ctlhi;
/* sstat and dstat can snapshot peripheral register state.
* silicon config may discard either or both...
*/
- __dw32 sstat;
- __dw32 dstat;
+ __le32 sstat;
+ __le32 dstat;
};
struct dw_desc {
/* FIRST values the hardware uses */
struct dw_lli lli;
-#ifdef CONFIG_DW_DMAC_BIG_ENDIAN_IO
-#define lli_set(d, reg, v) ((d)->lli.reg |= cpu_to_be32(v))
-#define lli_clear(d, reg, v) ((d)->lli.reg &= ~cpu_to_be32(v))
-#define lli_read(d, reg) be32_to_cpu((d)->lli.reg)
-#define lli_write(d, reg, v) ((d)->lli.reg = cpu_to_be32(v))
-#else
#define lli_set(d, reg, v) ((d)->lli.reg |= cpu_to_le32(v))
#define lli_clear(d, reg, v) ((d)->lli.reg &= ~cpu_to_le32(v))
#define lli_read(d, reg) le32_to_cpu((d)->lli.reg)
#define lli_write(d, reg, v) ((d)->lli.reg = cpu_to_le32(v))
-#endif
/* THEN values for driver housekeeping */
struct list_head desc_node;
diff --git a/drivers/dma/fsl_raid.c b/drivers/dma/fsl_raid.c
index 90d29f90acfb..493dc6c59d1d 100644
--- a/drivers/dma/fsl_raid.c
+++ b/drivers/dma/fsl_raid.c
@@ -877,7 +877,7 @@ static int fsl_re_remove(struct platform_device *ofdev)
return 0;
}
-static struct of_device_id fsl_re_ids[] = {
+static const struct of_device_id fsl_re_ids[] = {
{ .compatible = "fsl,raideng-v1.0", },
{}
};
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 51c75bf2b9b6..3b8b752ede2d 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -269,6 +269,7 @@ static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size)
case 2:
case 4:
case 8:
+ mode &= ~FSL_DMA_MR_SAHTS_MASK;
mode |= FSL_DMA_MR_SAHE | (__ilog2(size) << 14);
break;
}
@@ -301,6 +302,7 @@ static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size)
case 2:
case 4:
case 8:
+ mode &= ~FSL_DMA_MR_DAHTS_MASK;
mode |= FSL_DMA_MR_DAHE | (__ilog2(size) << 16);
break;
}
@@ -327,7 +329,8 @@ static void fsl_chan_set_request_count(struct fsldma_chan *chan, int size)
BUG_ON(size > 1024);
mode = get_mr(chan);
- mode |= (__ilog2(size) << 24) & 0x0f000000;
+ mode &= ~FSL_DMA_MR_BWC_MASK;
+ mode |= (__ilog2(size) << 24) & FSL_DMA_MR_BWC_MASK;
set_mr(chan, mode);
}
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
index 31bffccdcc75..4787d485dd76 100644
--- a/drivers/dma/fsldma.h
+++ b/drivers/dma/fsldma.h
@@ -36,6 +36,10 @@
#define FSL_DMA_MR_DAHE 0x00002000
#define FSL_DMA_MR_SAHE 0x00001000
+#define FSL_DMA_MR_SAHTS_MASK 0x0000C000
+#define FSL_DMA_MR_DAHTS_MASK 0x00030000
+#define FSL_DMA_MR_BWC_MASK 0x0f000000
+
/*
* Bandwidth/pause control determines how many bytes a given
* channel is allowed to transfer before the DMA engine pauses
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index ab0fb804fb1e..f681df8f0ed3 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -888,7 +888,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
sg_init_table(imxdmac->sg_list, periods);
for (i = 0; i < periods; i++) {
- imxdmac->sg_list[i].page_link = 0;
+ sg_assign_page(&imxdmac->sg_list[i], NULL);
imxdmac->sg_list[i].offset = 0;
imxdmac->sg_list[i].dma_address = dma_addr;
sg_dma_len(&imxdmac->sg_list[i]) = period_len;
@@ -896,10 +896,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
}
/* close the loop */
- imxdmac->sg_list[periods].offset = 0;
- sg_dma_len(&imxdmac->sg_list[periods]) = 0;
- imxdmac->sg_list[periods].page_link =
- ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02;
+ sg_chain(imxdmac->sg_list, periods + 1, imxdmac->sg_list);
desc->type = IMXDMA_DESC_CYCLIC;
desc->sg = imxdmac->sg_list;
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 085993cb2ccc..a67ec1bdc4e0 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -1323,7 +1323,7 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
}
if (period_len > 0xffff) {
- dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %d > %d\n",
+ dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %zu > %d\n",
channel, period_len, 0xffff);
goto err_out;
}
@@ -1347,7 +1347,7 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
if (i + 1 == num_periods)
param |= BD_WRAP;
- dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n",
+ dev_dbg(sdma->dev, "entry %d: count: %zu dma: %#llx %s%s\n",
i, period_len, (u64)dma_addr,
param & BD_WRAP ? "wrap" : "",
param & BD_INTR ? " intr" : "");
@@ -1755,19 +1755,26 @@ static int sdma_probe(struct platform_device *pdev)
if (IS_ERR(sdma->clk_ahb))
return PTR_ERR(sdma->clk_ahb);
- clk_prepare(sdma->clk_ipg);
- clk_prepare(sdma->clk_ahb);
+ ret = clk_prepare(sdma->clk_ipg);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare(sdma->clk_ahb);
+ if (ret)
+ goto err_clk;
ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0, "sdma",
sdma);
if (ret)
- return ret;
+ goto err_irq;
sdma->irq = irq;
sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL);
- if (!sdma->script_addrs)
- return -ENOMEM;
+ if (!sdma->script_addrs) {
+ ret = -ENOMEM;
+ goto err_irq;
+ }
/* initially no scripts available */
saddr_arr = (s32 *)sdma->script_addrs;
@@ -1882,6 +1889,10 @@ err_register:
dma_async_device_unregister(&sdma->dma_device);
err_init:
kfree(sdma->script_addrs);
+err_irq:
+ clk_unprepare(sdma->clk_ahb);
+err_clk:
+ clk_unprepare(sdma->clk_ipg);
return ret;
}
@@ -1893,6 +1904,8 @@ static int sdma_remove(struct platform_device *pdev)
devm_free_irq(&pdev->dev, sdma->irq, sdma);
dma_async_device_unregister(&sdma->dma_device);
kfree(sdma->script_addrs);
+ clk_unprepare(sdma->clk_ahb);
+ clk_unprepare(sdma->clk_ipg);
/* Kill the tasklet */
for (i = 0; i < MAX_DMA_CHANNELS; i++) {
struct sdma_channel *sdmac = &sdma->channel[i];
diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c
index 0b9b6b07db9e..eab2fdda29ec 100644
--- a/drivers/dma/ioat/dca.c
+++ b/drivers/dma/ioat/dca.c
@@ -336,10 +336,10 @@ struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase)
}
if (dca3_tag_map_invalid(ioatdca->tag_map)) {
- WARN_TAINT_ONCE(1, TAINT_FIRMWARE_WORKAROUND,
- "%s %s: APICID_TAG_MAP set incorrectly by BIOS, disabling DCA\n",
- dev_driver_string(&pdev->dev),
- dev_name(&pdev->dev));
+ add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
+ pr_warn_once("%s %s: APICID_TAG_MAP set incorrectly by BIOS, disabling DCA\n",
+ dev_driver_string(&pdev->dev),
+ dev_name(&pdev->dev));
free_dca_provider(dca);
return NULL;
}
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index 5072a7d306d4..34fb6afd229b 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -1,7 +1,7 @@
/*
* Qualcomm Technologies HIDMA DMA engine interface
*
- * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -210,6 +210,7 @@ static int hidma_chan_init(struct hidma_dev *dmadev, u32 dma_sig)
INIT_LIST_HEAD(&mchan->prepared);
INIT_LIST_HEAD(&mchan->active);
INIT_LIST_HEAD(&mchan->completed);
+ INIT_LIST_HEAD(&mchan->queued);
spin_lock_init(&mchan->lock);
list_add_tail(&mchan->chan.device_node, &ddev->channels);
@@ -230,9 +231,15 @@ static void hidma_issue_pending(struct dma_chan *dmach)
struct hidma_chan *mchan = to_hidma_chan(dmach);
struct hidma_dev *dmadev = mchan->dmadev;
unsigned long flags;
+ struct hidma_desc *qdesc, *next;
int status;
spin_lock_irqsave(&mchan->lock, flags);
+ list_for_each_entry_safe(qdesc, next, &mchan->queued, node) {
+ hidma_ll_queue_request(dmadev->lldev, qdesc->tre_ch);
+ list_move_tail(&qdesc->node, &mchan->active);
+ }
+
if (!mchan->running) {
struct hidma_desc *desc = list_first_entry(&mchan->active,
struct hidma_desc,
@@ -315,17 +322,18 @@ static dma_cookie_t hidma_tx_submit(struct dma_async_tx_descriptor *txd)
pm_runtime_put_autosuspend(dmadev->ddev.dev);
return -ENODEV;
}
+ pm_runtime_mark_last_busy(dmadev->ddev.dev);
+ pm_runtime_put_autosuspend(dmadev->ddev.dev);
mdesc = container_of(txd, struct hidma_desc, desc);
spin_lock_irqsave(&mchan->lock, irqflags);
- /* Move descriptor to active */
- list_move_tail(&mdesc->node, &mchan->active);
+ /* Move descriptor to queued */
+ list_move_tail(&mdesc->node, &mchan->queued);
/* Update cookie */
cookie = dma_cookie_assign(txd);
- hidma_ll_queue_request(dmadev->lldev, mdesc->tre_ch);
spin_unlock_irqrestore(&mchan->lock, irqflags);
return cookie;
@@ -431,6 +439,7 @@ static int hidma_terminate_channel(struct dma_chan *chan)
list_splice_init(&mchan->active, &list);
list_splice_init(&mchan->prepared, &list);
list_splice_init(&mchan->completed, &list);
+ list_splice_init(&mchan->queued, &list);
spin_unlock_irqrestore(&mchan->lock, irqflags);
/* this suspends the existing transfer */
@@ -795,8 +804,11 @@ static int hidma_probe(struct platform_device *pdev)
device_property_read_u32(&pdev->dev, "desc-count",
&dmadev->nr_descriptors);
- if (!dmadev->nr_descriptors && nr_desc_prm)
+ if (nr_desc_prm) {
+ dev_info(&pdev->dev, "overriding number of descriptors as %d\n",
+ nr_desc_prm);
dmadev->nr_descriptors = nr_desc_prm;
+ }
if (!dmadev->nr_descriptors)
dmadev->nr_descriptors = HIDMA_NR_DEFAULT_DESC;
diff --git a/drivers/dma/qcom/hidma.h b/drivers/dma/qcom/hidma.h
index c7d014235c32..41e0aa283828 100644
--- a/drivers/dma/qcom/hidma.h
+++ b/drivers/dma/qcom/hidma.h
@@ -104,6 +104,7 @@ struct hidma_chan {
struct dma_chan chan;
struct list_head free;
struct list_head prepared;
+ struct list_head queued;
struct list_head active;
struct list_head completed;
diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c
index f847d32cc4b5..5a0991bc4787 100644
--- a/drivers/dma/qcom/hidma_mgmt.c
+++ b/drivers/dma/qcom/hidma_mgmt.c
@@ -1,7 +1,7 @@
/*
* Qualcomm Technologies HIDMA DMA engine Management interface
*
- * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -49,6 +49,26 @@
#define HIDMA_AUTOSUSPEND_TIMEOUT 2000
#define HIDMA_MAX_CHANNEL_WEIGHT 15
+static unsigned int max_write_request;
+module_param(max_write_request, uint, 0644);
+MODULE_PARM_DESC(max_write_request,
+ "maximum write burst (default: ACPI/DT value)");
+
+static unsigned int max_read_request;
+module_param(max_read_request, uint, 0644);
+MODULE_PARM_DESC(max_read_request,
+ "maximum read burst (default: ACPI/DT value)");
+
+static unsigned int max_wr_xactions;
+module_param(max_wr_xactions, uint, 0644);
+MODULE_PARM_DESC(max_wr_xactions,
+ "maximum number of write transactions (default: ACPI/DT value)");
+
+static unsigned int max_rd_xactions;
+module_param(max_rd_xactions, uint, 0644);
+MODULE_PARM_DESC(max_rd_xactions,
+ "maximum number of read transactions (default: ACPI/DT value)");
+
int hidma_mgmt_setup(struct hidma_mgmt_dev *mgmtdev)
{
unsigned int i;
@@ -207,12 +227,25 @@ static int hidma_mgmt_probe(struct platform_device *pdev)
goto out;
}
+ if (max_write_request) {
+ dev_info(&pdev->dev, "overriding max-write-burst-bytes: %d\n",
+ max_write_request);
+ mgmtdev->max_write_request = max_write_request;
+ } else
+ max_write_request = mgmtdev->max_write_request;
+
rc = device_property_read_u32(&pdev->dev, "max-read-burst-bytes",
&mgmtdev->max_read_request);
if (rc) {
dev_err(&pdev->dev, "max-read-burst-bytes missing\n");
goto out;
}
+ if (max_read_request) {
+ dev_info(&pdev->dev, "overriding max-read-burst-bytes: %d\n",
+ max_read_request);
+ mgmtdev->max_read_request = max_read_request;
+ } else
+ max_read_request = mgmtdev->max_read_request;
rc = device_property_read_u32(&pdev->dev, "max-write-transactions",
&mgmtdev->max_wr_xactions);
@@ -220,6 +253,12 @@ static int hidma_mgmt_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "max-write-transactions missing\n");
goto out;
}
+ if (max_wr_xactions) {
+ dev_info(&pdev->dev, "overriding max-write-transactions: %d\n",
+ max_wr_xactions);
+ mgmtdev->max_wr_xactions = max_wr_xactions;
+ } else
+ max_wr_xactions = mgmtdev->max_wr_xactions;
rc = device_property_read_u32(&pdev->dev, "max-read-transactions",
&mgmtdev->max_rd_xactions);
@@ -227,6 +266,12 @@ static int hidma_mgmt_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "max-read-transactions missing\n");
goto out;
}
+ if (max_rd_xactions) {
+ dev_info(&pdev->dev, "overriding max-read-transactions: %d\n",
+ max_rd_xactions);
+ mgmtdev->max_rd_xactions = max_rd_xactions;
+ } else
+ max_rd_xactions = mgmtdev->max_rd_xactions;
mgmtdev->priority = devm_kcalloc(&pdev->dev,
mgmtdev->dma_channels,
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index a6620b671d1d..c3052fbfd092 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -2528,10 +2528,7 @@ dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
dma_addr += period_len;
}
- sg[periods].offset = 0;
- sg_dma_len(&sg[periods]) = 0;
- sg[periods].page_link =
- ((unsigned long)sg | 0x01) & ~0x02;
+ sg_chain(sg, periods + 1, sg);
txd = d40_prep_sg(chan, sg, sg, periods, direction,
DMA_PREP_INTERRUPT);
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index 3722b9d8d9fe..b9d75a54c896 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -1494,35 +1494,7 @@ static int tegra_dma_remove(struct platform_device *pdev)
static int tegra_dma_runtime_suspend(struct device *dev)
{
struct tegra_dma *tdma = dev_get_drvdata(dev);
-
- clk_disable_unprepare(tdma->dma_clk);
- return 0;
-}
-
-static int tegra_dma_runtime_resume(struct device *dev)
-{
- struct tegra_dma *tdma = dev_get_drvdata(dev);
- int ret;
-
- ret = clk_prepare_enable(tdma->dma_clk);
- if (ret < 0) {
- dev_err(dev, "clk_enable failed: %d\n", ret);
- return ret;
- }
- return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int tegra_dma_pm_suspend(struct device *dev)
-{
- struct tegra_dma *tdma = dev_get_drvdata(dev);
int i;
- int ret;
-
- /* Enable clock before accessing register */
- ret = pm_runtime_get_sync(dev);
- if (ret < 0)
- return ret;
tdma->reg_gen = tdma_read(tdma, TEGRA_APBDMA_GENERAL);
for (i = 0; i < tdma->chip_data->nr_channels; i++) {
@@ -1543,21 +1515,21 @@ static int tegra_dma_pm_suspend(struct device *dev)
TEGRA_APBDMA_CHAN_WCOUNT);
}
- /* Disable clock */
- pm_runtime_put(dev);
+ clk_disable_unprepare(tdma->dma_clk);
+
return 0;
}
-static int tegra_dma_pm_resume(struct device *dev)
+static int tegra_dma_runtime_resume(struct device *dev)
{
struct tegra_dma *tdma = dev_get_drvdata(dev);
- int i;
- int ret;
+ int i, ret;
- /* Enable clock before accessing register */
- ret = pm_runtime_get_sync(dev);
- if (ret < 0)
+ ret = clk_prepare_enable(tdma->dma_clk);
+ if (ret < 0) {
+ dev_err(dev, "clk_enable failed: %d\n", ret);
return ret;
+ }
tdma_write(tdma, TEGRA_APBDMA_GENERAL, tdma->reg_gen);
tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0);
@@ -1582,16 +1554,14 @@ static int tegra_dma_pm_resume(struct device *dev)
(ch_reg->csr & ~TEGRA_APBDMA_CSR_ENB));
}
- /* Disable clock */
- pm_runtime_put(dev);
return 0;
}
-#endif
static const struct dev_pm_ops tegra_dma_dev_pm_ops = {
SET_RUNTIME_PM_OPS(tegra_dma_runtime_suspend, tegra_dma_runtime_resume,
NULL)
- SET_SYSTEM_SLEEP_PM_OPS(tegra_dma_pm_suspend, tegra_dma_pm_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
static const struct of_device_id tegra_dma_of_match[] = {
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index 6d221e5c72ee..47f64192d2fd 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -794,9 +794,6 @@ static struct dma_async_tx_descriptor *zynqmp_dma_prep_memcpy(
chan = to_chan(dchan);
- if (len > ZYNQMP_DMA_MAX_TRANS_LEN)
- return NULL;
-
desc_cnt = DIV_ROUND_UP(len, ZYNQMP_DMA_MAX_TRANS_LEN);
spin_lock_bh(&chan->lock);
diff --git a/include/linux/dma/dw.h b/include/linux/dma/dw.h
index b63b25814d77..e166cac8e870 100644
--- a/include/linux/dma/dw.h
+++ b/include/linux/dma/dw.h
@@ -50,25 +50,4 @@ static inline int dw_dma_probe(struct dw_dma_chip *chip) { return -ENODEV; }
static inline int dw_dma_remove(struct dw_dma_chip *chip) { return 0; }
#endif /* CONFIG_DW_DMAC_CORE */
-/* DMA API extensions */
-struct dw_desc;
-
-struct dw_cyclic_desc {
- struct dw_desc **desc;
- unsigned long periods;
- void (*period_callback)(void *param);
- void *period_callback_param;
-};
-
-struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
- dma_addr_t buf_addr, size_t buf_len, size_t period_len,
- enum dma_transfer_direction direction);
-void dw_dma_cyclic_free(struct dma_chan *chan);
-int dw_dma_cyclic_start(struct dma_chan *chan);
-void dw_dma_cyclic_stop(struct dma_chan *chan);
-
-dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan);
-
-dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan);
-
#endif /* _DMA_DW_H */
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
index 4d57bbaaa1bf..30f945329818 100644
--- a/include/linux/raid/pq.h
+++ b/include/linux/raid/pq.h
@@ -142,6 +142,7 @@ int raid6_select_algo(void);
extern const u8 raid6_gfmul[256][256] __attribute__((aligned(256)));
extern const u8 raid6_vgfmul[256][32] __attribute__((aligned(256)));
extern const u8 raid6_gfexp[256] __attribute__((aligned(256)));
+extern const u8 raid6_gflog[256] __attribute__((aligned(256)));
extern const u8 raid6_gfinv[256] __attribute__((aligned(256)));
extern const u8 raid6_gfexi[256] __attribute__((aligned(256)));
diff --git a/lib/raid6/mktables.c b/lib/raid6/mktables.c
index 39787db588b0..e824d088f72c 100644
--- a/lib/raid6/mktables.c
+++ b/lib/raid6/mktables.c
@@ -125,6 +125,26 @@ int main(int argc, char *argv[])
printf("EXPORT_SYMBOL(raid6_gfexp);\n");
printf("#endif\n");
+ /* Compute log-of-2 table */
+ printf("\nconst u8 __attribute__((aligned(256)))\n"
+ "raid6_gflog[256] =\n" "{\n");
+ for (i = 0; i < 256; i += 8) {
+ printf("\t");
+ for (j = 0; j < 8; j++) {
+ v = 255;
+ for (k = 0; k < 256; k++)
+ if (exptbl[k] == (i + j)) {
+ v = k;
+ break;
+ }
+ printf("0x%02x,%c", v, (j == 7) ? '\n' : ' ');
+ }
+ }
+ printf("};\n");
+ printf("#ifdef __KERNEL__\n");
+ printf("EXPORT_SYMBOL(raid6_gflog);\n");
+ printf("#endif\n");
+
/* Compute inverse table x^-1 == x^254 */
printf("\nconst u8 __attribute__((aligned(256)))\n"
"raid6_gfinv[256] =\n" "{\n");
diff --git a/sound/atmel/Kconfig b/sound/atmel/Kconfig
index 94de43a096f1..d789cbcb9106 100644
--- a/sound/atmel/Kconfig
+++ b/sound/atmel/Kconfig
@@ -1,18 +1,11 @@
-menu "Atmel devices (AVR32 and AT91)"
- depends on AVR32 || ARCH_AT91
-
-config SND_ATMEL_ABDAC
- tristate "Atmel Audio Bitstream DAC (ABDAC) driver"
- select SND_PCM
- depends on DW_DMAC && AVR32
- help
- ALSA sound driver for the Atmel Audio Bitstream DAC (ABDAC).
+menu "Atmel devices (AT91)"
+ depends on ARCH_AT91
config SND_ATMEL_AC97C
tristate "Atmel AC97 Controller (AC97C) driver"
select SND_PCM
select SND_AC97_CODEC
- depends on (DW_DMAC && AVR32) || ARCH_AT91
+ depends on ARCH_AT91
help
ALSA sound driver for the Atmel AC97 controller.
diff --git a/sound/atmel/Makefile b/sound/atmel/Makefile
index 219dcfac6086..d4009d1430ed 100644
--- a/sound/atmel/Makefile
+++ b/sound/atmel/Makefile
@@ -1,5 +1,3 @@
-snd-atmel-abdac-objs := abdac.o
snd-atmel-ac97c-objs := ac97c.o
-obj-$(CONFIG_SND_ATMEL_ABDAC) += snd-atmel-abdac.o
obj-$(CONFIG_SND_ATMEL_AC97C) += snd-atmel-ac97c.o
diff --git a/sound/atmel/abdac.c b/sound/atmel/abdac.c
deleted file mode 100644
index 558618802000..000000000000
--- a/sound/atmel/abdac.c
+++ /dev/null
@@ -1,610 +0,0 @@
-/*
- * Driver for the Atmel on-chip Audio Bitstream DAC (ABDAC)
- *
- * Copyright (C) 2006-2009 Atmel Corporation
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- */
-#include <linux/clk.h>
-#include <linux/bitmap.h>
-#include <linux/dmaengine.h>
-#include <linux/dma-mapping.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/types.h>
-#include <linux/io.h>
-
-#include <sound/core.h>
-#include <sound/initval.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/atmel-abdac.h>
-
-#include <linux/platform_data/dma-dw.h>
-#include <linux/dma/dw.h>
-
-/* DAC register offsets */
-#define DAC_DATA 0x0000
-#define DAC_CTRL 0x0008
-#define DAC_INT_MASK 0x000c
-#define DAC_INT_EN 0x0010
-#define DAC_INT_DIS 0x0014
-#define DAC_INT_CLR 0x0018
-#define DAC_INT_STATUS 0x001c
-
-/* Bitfields in CTRL */
-#define DAC_SWAP_OFFSET 30
-#define DAC_SWAP_SIZE 1
-#define DAC_EN_OFFSET 31
-#define DAC_EN_SIZE 1
-
-/* Bitfields in INT_MASK/INT_EN/INT_DIS/INT_STATUS/INT_CLR */
-#define DAC_UNDERRUN_OFFSET 28
-#define DAC_UNDERRUN_SIZE 1
-#define DAC_TX_READY_OFFSET 29
-#define DAC_TX_READY_SIZE 1
-
-/* Bit manipulation macros */
-#define DAC_BIT(name) \
- (1 << DAC_##name##_OFFSET)
-#define DAC_BF(name, value) \
- (((value) & ((1 << DAC_##name##_SIZE) - 1)) \
- << DAC_##name##_OFFSET)
-#define DAC_BFEXT(name, value) \
- (((value) >> DAC_##name##_OFFSET) \
- & ((1 << DAC_##name##_SIZE) - 1))
-#define DAC_BFINS(name, value, old) \
- (((old) & ~(((1 << DAC_##name##_SIZE) - 1) \
- << DAC_##name##_OFFSET)) \
- | DAC_BF(name, value))
-
-/* Register access macros */
-#define dac_readl(port, reg) \
- __raw_readl((port)->regs + DAC_##reg)
-#define dac_writel(port, reg, value) \
- __raw_writel((value), (port)->regs + DAC_##reg)
-
-/*
- * ABDAC supports a maximum of 6 different rates from a generic clock. The
- * generic clock has a power of two divider, which gives 6 steps from 192 kHz
- * to 5112 Hz.
- */
-#define MAX_NUM_RATES 6
-/* ALSA seems to use rates between 192000 Hz and 5112 Hz. */
-#define RATE_MAX 192000
-#define RATE_MIN 5112
-
-enum {
- DMA_READY = 0,
-};
-
-struct atmel_abdac_dma {
- struct dma_chan *chan;
- struct dw_cyclic_desc *cdesc;
-};
-
-struct atmel_abdac {
- struct clk *pclk;
- struct clk *sample_clk;
- struct platform_device *pdev;
- struct atmel_abdac_dma dma;
-
- struct snd_pcm_hw_constraint_list constraints_rates;
- struct snd_pcm_substream *substream;
- struct snd_card *card;
- struct snd_pcm *pcm;
-
- void __iomem *regs;
- unsigned long flags;
- unsigned int rates[MAX_NUM_RATES];
- unsigned int rates_num;
- int irq;
-};
-
-#define get_dac(card) ((struct atmel_abdac *)(card)->private_data)
-
-/* This function is called by the DMA driver. */
-static void atmel_abdac_dma_period_done(void *arg)
-{
- struct atmel_abdac *dac = arg;
- snd_pcm_period_elapsed(dac->substream);
-}
-
-static int atmel_abdac_prepare_dma(struct atmel_abdac *dac,
- struct snd_pcm_substream *substream,
- enum dma_data_direction direction)
-{
- struct dma_chan *chan = dac->dma.chan;
- struct dw_cyclic_desc *cdesc;
- struct snd_pcm_runtime *runtime = substream->runtime;
- unsigned long buffer_len, period_len;
-
- /*
- * We don't do DMA on "complex" transfers, i.e. with
- * non-halfword-aligned buffers or lengths.
- */
- if (runtime->dma_addr & 1 || runtime->buffer_size & 1) {
- dev_dbg(&dac->pdev->dev, "too complex transfer\n");
- return -EINVAL;
- }
-
- buffer_len = frames_to_bytes(runtime, runtime->buffer_size);
- period_len = frames_to_bytes(runtime, runtime->period_size);
-
- cdesc = dw_dma_cyclic_prep(chan, runtime->dma_addr, buffer_len,
- period_len, DMA_MEM_TO_DEV);
- if (IS_ERR(cdesc)) {
- dev_dbg(&dac->pdev->dev, "could not prepare cyclic DMA\n");
- return PTR_ERR(cdesc);
- }
-
- cdesc->period_callback = atmel_abdac_dma_period_done;
- cdesc->period_callback_param = dac;
-
- dac->dma.cdesc = cdesc;
-
- set_bit(DMA_READY, &dac->flags);
-
- return 0;
-}
-
-static struct snd_pcm_hardware atmel_abdac_hw = {
- .info = (SNDRV_PCM_INFO_MMAP
- | SNDRV_PCM_INFO_MMAP_VALID
- | SNDRV_PCM_INFO_INTERLEAVED
- | SNDRV_PCM_INFO_BLOCK_TRANSFER
- | SNDRV_PCM_INFO_RESUME
- | SNDRV_PCM_INFO_PAUSE),
- .formats = (SNDRV_PCM_FMTBIT_S16_BE),
- .rates = (SNDRV_PCM_RATE_KNOT),
- .rate_min = RATE_MIN,
- .rate_max = RATE_MAX,
- .channels_min = 2,
- .channels_max = 2,
- .buffer_bytes_max = 64 * 4096,
- .period_bytes_min = 4096,
- .period_bytes_max = 4096,
- .periods_min = 6,
- .periods_max = 64,
-};
-
-static int atmel_abdac_open(struct snd_pcm_substream *substream)
-{
- struct atmel_abdac *dac = snd_pcm_substream_chip(substream);
-
- dac->substream = substream;
- atmel_abdac_hw.rate_max = dac->rates[dac->rates_num - 1];
- atmel_abdac_hw.rate_min = dac->rates[0];
- substream->runtime->hw = atmel_abdac_hw;
-
- return snd_pcm_hw_constraint_list(substream->runtime, 0,
- SNDRV_PCM_HW_PARAM_RATE, &dac->constraints_rates);
-}
-
-static int atmel_abdac_close(struct snd_pcm_substream *substream)
-{
- struct atmel_abdac *dac = snd_pcm_substream_chip(substream);
- dac->substream = NULL;
- return 0;
-}
-
-static int atmel_abdac_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *hw_params)
-{
- struct atmel_abdac *dac = snd_pcm_substream_chip(substream);
- int retval;
-
- retval = snd_pcm_lib_malloc_pages(substream,
- params_buffer_bytes(hw_params));
- if (retval < 0)
- return retval;
- /* snd_pcm_lib_malloc_pages returns 1 if buffer is changed. */
- if (retval == 1)
- if (test_and_clear_bit(DMA_READY, &dac->flags))
- dw_dma_cyclic_free(dac->dma.chan);
-
- return retval;
-}
-
-static int atmel_abdac_hw_free(struct snd_pcm_substream *substream)
-{
- struct atmel_abdac *dac = snd_pcm_substream_chip(substream);
- if (test_and_clear_bit(DMA_READY, &dac->flags))
- dw_dma_cyclic_free(dac->dma.chan);
- return snd_pcm_lib_free_pages(substream);
-}
-
-static int atmel_abdac_prepare(struct snd_pcm_substream *substream)
-{
- struct atmel_abdac *dac = snd_pcm_substream_chip(substream);
- int retval;
-
- retval = clk_set_rate(dac->sample_clk, 256 * substream->runtime->rate);
- if (retval)
- return retval;
-
- if (!test_bit(DMA_READY, &dac->flags))
- retval = atmel_abdac_prepare_dma(dac, substream, DMA_TO_DEVICE);
-
- return retval;
-}
-
-static int atmel_abdac_trigger(struct snd_pcm_substream *substream, int cmd)
-{
- struct atmel_abdac *dac = snd_pcm_substream_chip(substream);
- int retval = 0;
-
- switch (cmd) {
- case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: /* fall through */
- case SNDRV_PCM_TRIGGER_RESUME: /* fall through */
- case SNDRV_PCM_TRIGGER_START:
- clk_prepare_enable(dac->sample_clk);
- retval = dw_dma_cyclic_start(dac->dma.chan);
- if (retval)
- goto out;
- dac_writel(dac, CTRL, DAC_BIT(EN));
- break;
- case SNDRV_PCM_TRIGGER_PAUSE_PUSH: /* fall through */
- case SNDRV_PCM_TRIGGER_SUSPEND: /* fall through */
- case SNDRV_PCM_TRIGGER_STOP:
- dw_dma_cyclic_stop(dac->dma.chan);
- dac_writel(dac, DATA, 0);
- dac_writel(dac, CTRL, 0);
- clk_disable_unprepare(dac->sample_clk);
- break;
- default:
- retval = -EINVAL;
- break;
- }
-out:
- return retval;
-}
-
-static snd_pcm_uframes_t
-atmel_abdac_pointer(struct snd_pcm_substream *substream)
-{
- struct atmel_abdac *dac = snd_pcm_substream_chip(substream);
- struct snd_pcm_runtime *runtime = substream->runtime;
- snd_pcm_uframes_t frames;
- unsigned long bytes;
-
- bytes = dw_dma_get_src_addr(dac->dma.chan);
- bytes -= runtime->dma_addr;
-
- frames = bytes_to_frames(runtime, bytes);
- if (frames >= runtime->buffer_size)
- frames -= runtime->buffer_size;
-
- return frames;
-}
-
-static irqreturn_t abdac_interrupt(int irq, void *dev_id)
-{
- struct atmel_abdac *dac = dev_id;
- u32 status;
-
- status = dac_readl(dac, INT_STATUS);
- if (status & DAC_BIT(UNDERRUN)) {
- dev_err(&dac->pdev->dev, "underrun detected\n");
- dac_writel(dac, INT_CLR, DAC_BIT(UNDERRUN));
- } else {
- dev_err(&dac->pdev->dev, "spurious interrupt (status=0x%x)\n",
- status);
- dac_writel(dac, INT_CLR, status);
- }
-
- return IRQ_HANDLED;
-}
-
-static struct snd_pcm_ops atmel_abdac_ops = {
- .open = atmel_abdac_open,
- .close = atmel_abdac_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = atmel_abdac_hw_params,
- .hw_free = atmel_abdac_hw_free,
- .prepare = atmel_abdac_prepare,
- .trigger = atmel_abdac_trigger,
- .pointer = atmel_abdac_pointer,
-};
-
-static int atmel_abdac_pcm_new(struct atmel_abdac *dac)
-{
- struct snd_pcm_hardware hw = atmel_abdac_hw;
- struct snd_pcm *pcm;
- int retval;
-
- retval = snd_pcm_new(dac->card, dac->card->shortname,
- dac->pdev->id, 1, 0, &pcm);
- if (retval)
- return retval;
-
- strcpy(pcm->name, dac->card->shortname);
- pcm->private_data = dac;
- pcm->info_flags = 0;
- dac->pcm = pcm;
-
- snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &atmel_abdac_ops);
-
- retval = snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- &dac->pdev->dev, hw.periods_min * hw.period_bytes_min,
- hw.buffer_bytes_max);
-
- return retval;
-}
-
-static bool filter(struct dma_chan *chan, void *slave)
-{
- struct dw_dma_slave *dws = slave;
-
- if (dws->dma_dev == chan->device->dev) {
- chan->private = dws;
- return true;
- } else
- return false;
-}
-
-static int set_sample_rates(struct atmel_abdac *dac)
-{
- long new_rate = RATE_MAX;
- int retval = -EINVAL;
- int index = 0;
-
- /* we start at 192 kHz and work our way down to 5112 Hz */
- while (new_rate >= RATE_MIN && index < (MAX_NUM_RATES + 1)) {
- new_rate = clk_round_rate(dac->sample_clk, 256 * new_rate);
- if (new_rate <= 0)
- break;
- /* make sure we are below the ABDAC clock */
- if (index < MAX_NUM_RATES &&
- new_rate <= clk_get_rate(dac->pclk)) {
- dac->rates[index] = new_rate / 256;
- index++;
- }
- /* divide by 256 and then by two to get next rate */
- new_rate /= 256 * 2;
- }
-
- if (index) {
- int i;
-
- /* reverse array, smallest go first */
- for (i = 0; i < (index / 2); i++) {
- unsigned int tmp = dac->rates[index - 1 - i];
- dac->rates[index - 1 - i] = dac->rates[i];
- dac->rates[i] = tmp;
- }
-
- dac->constraints_rates.count = index;
- dac->constraints_rates.list = dac->rates;
- dac->constraints_rates.mask = 0;
- dac->rates_num = index;
-
- retval = 0;
- }
-
- return retval;
-}
-
-static int atmel_abdac_probe(struct platform_device *pdev)
-{
- struct snd_card *card;
- struct atmel_abdac *dac;
- struct resource *regs;
- struct atmel_abdac_pdata *pdata;
- struct clk *pclk;
- struct clk *sample_clk;
- int retval;
- int irq;
-
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!regs) {
- dev_dbg(&pdev->dev, "no memory resource\n");
- return -ENXIO;
- }
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_dbg(&pdev->dev, "could not get IRQ number\n");
- return irq;
- }
-
- pdata = pdev->dev.platform_data;
- if (!pdata) {
- dev_dbg(&pdev->dev, "no platform data\n");
- return -ENXIO;
- }
-
- pclk = clk_get(&pdev->dev, "pclk");
- if (IS_ERR(pclk)) {
- dev_dbg(&pdev->dev, "no peripheral clock\n");
- return PTR_ERR(pclk);
- }
- sample_clk = clk_get(&pdev->dev, "sample_clk");
- if (IS_ERR(sample_clk)) {
- dev_dbg(&pdev->dev, "no sample clock\n");
- retval = PTR_ERR(sample_clk);
- goto out_put_pclk;
- }
- clk_prepare_enable(pclk);
-
- retval = snd_card_new(&pdev->dev, SNDRV_DEFAULT_IDX1,
- SNDRV_DEFAULT_STR1, THIS_MODULE,
- sizeof(struct atmel_abdac), &card);
- if (retval) {
- dev_dbg(&pdev->dev, "could not create sound card device\n");
- goto out_put_sample_clk;
- }
-
- dac = get_dac(card);
-
- dac->irq = irq;
- dac->card = card;
- dac->pclk = pclk;
- dac->sample_clk = sample_clk;
- dac->pdev = pdev;
-
- retval = set_sample_rates(dac);
- if (retval < 0) {
- dev_dbg(&pdev->dev, "could not set supported rates\n");
- goto out_free_card;
- }
-
- dac->regs = ioremap(regs->start, resource_size(regs));
- if (!dac->regs) {
- dev_dbg(&pdev->dev, "could not remap register memory\n");
- retval = -ENOMEM;
- goto out_free_card;
- }
-
- /* make sure the DAC is silent and disabled */
- dac_writel(dac, DATA, 0);
- dac_writel(dac, CTRL, 0);
-
- retval = request_irq(irq, abdac_interrupt, 0, "abdac", dac);
- if (retval) {
- dev_dbg(&pdev->dev, "could not request irq\n");
- goto out_unmap_regs;
- }
-
- if (pdata->dws.dma_dev) {
- dma_cap_mask_t mask;
-
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
-
- dac->dma.chan = dma_request_channel(mask, filter, &pdata->dws);
- if (dac->dma.chan) {
- struct dma_slave_config dma_conf = {
- .dst_addr = regs->start + DAC_DATA,
- .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
- .src_maxburst = 1,
- .dst_maxburst = 1,
- .direction = DMA_MEM_TO_DEV,
- .device_fc = false,
- };
-
- dmaengine_slave_config(dac->dma.chan, &dma_conf);
- }
- }
- if (!pdata->dws.dma_dev || !dac->dma.chan) {
- dev_dbg(&pdev->dev, "DMA not available\n");
- retval = -ENODEV;
- goto out_unmap_regs;
- }
-
- strcpy(card->driver, "Atmel ABDAC");
- strcpy(card->shortname, "Atmel ABDAC");
- sprintf(card->longname, "Atmel Audio Bitstream DAC");
-
- retval = atmel_abdac_pcm_new(dac);
- if (retval) {
- dev_dbg(&pdev->dev, "could not register ABDAC pcm device\n");
- goto out_release_dma;
- }
-
- retval = snd_card_register(card);
- if (retval) {
- dev_dbg(&pdev->dev, "could not register sound card\n");
- goto out_release_dma;
- }
-
- platform_set_drvdata(pdev, card);
-
- dev_info(&pdev->dev, "Atmel ABDAC at 0x%p using %s\n",
- dac->regs, dev_name(&dac->dma.chan->dev->device));
-
- return retval;
-
-out_release_dma:
- dma_release_channel(dac->dma.chan);
- dac->dma.chan = NULL;
-out_unmap_regs:
- iounmap(dac->regs);
-out_free_card:
- snd_card_free(card);
-out_put_sample_clk:
- clk_put(sample_clk);
- clk_disable_unprepare(pclk);
-out_put_pclk:
- clk_put(pclk);
- return retval;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int atmel_abdac_suspend(struct device *pdev)
-{
- struct snd_card *card = dev_get_drvdata(pdev);
- struct atmel_abdac *dac = card->private_data;
-
- dw_dma_cyclic_stop(dac->dma.chan);
- clk_disable_unprepare(dac->sample_clk);
- clk_disable_unprepare(dac->pclk);
-
- return 0;
-}
-
-static int atmel_abdac_resume(struct device *pdev)
-{
- struct snd_card *card = dev_get_drvdata(pdev);
- struct atmel_abdac *dac = card->private_data;
-
- clk_prepare_enable(dac->pclk);
- clk_prepare_enable(dac->sample_clk);
- if (test_bit(DMA_READY, &dac->flags))
- dw_dma_cyclic_start(dac->dma.chan);
-
- return 0;
-}
-
-static SIMPLE_DEV_PM_OPS(atmel_abdac_pm, atmel_abdac_suspend, atmel_abdac_resume);
-#define ATMEL_ABDAC_PM_OPS &atmel_abdac_pm
-#else
-#define ATMEL_ABDAC_PM_OPS NULL
-#endif
-
-static int atmel_abdac_remove(struct platform_device *pdev)
-{
- struct snd_card *card = platform_get_drvdata(pdev);
- struct atmel_abdac *dac = get_dac(card);
-
- clk_put(dac->sample_clk);
- clk_disable_unprepare(dac->pclk);
- clk_put(dac->pclk);
-
- dma_release_channel(dac->dma.chan);
- dac->dma.chan = NULL;
- iounmap(dac->regs);
- free_irq(dac->irq, dac);
- snd_card_free(card);
-
- return 0;
-}
-
-static struct platform_driver atmel_abdac_driver = {
- .remove = atmel_abdac_remove,
- .driver = {
- .name = "atmel_abdac",
- .pm = ATMEL_ABDAC_PM_OPS,
- },
-};
-
-static int __init atmel_abdac_init(void)
-{
- return platform_driver_probe(&atmel_abdac_driver,
- atmel_abdac_probe);
-}
-module_init(atmel_abdac_init);
-
-static void __exit atmel_abdac_exit(void)
-{
- platform_driver_unregister(&atmel_abdac_driver);
-}
-module_exit(atmel_abdac_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Driver for Atmel Audio Bitstream DAC (ABDAC)");
-MODULE_AUTHOR("Hans-Christian Egtvedt <egtvedt@samfundet.no>");
diff --git a/sound/atmel/ac97c.c b/sound/atmel/ac97c.c
index 6dad042630d8..65e6948e3995 100644
--- a/sound/atmel/ac97c.c
+++ b/sound/atmel/ac97c.c
@@ -11,8 +11,6 @@
#include <linux/delay.h>
#include <linux/bitmap.h>
#include <linux/device.h>
-#include <linux/dmaengine.h>
-#include <linux/dma-mapping.h>
#include <linux/atmel_pdc.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -34,36 +32,14 @@
#include <sound/atmel-ac97c.h>
#include <sound/memalloc.h>
-#include <linux/platform_data/dma-dw.h>
-#include <linux/dma/dw.h>
-
-#ifdef CONFIG_AVR32
-#include <mach/cpu.h>
-#else
-#define cpu_is_at32ap7000() 0
-#endif
-
#include "ac97c.h"
-enum {
- DMA_TX_READY = 0,
- DMA_RX_READY,
- DMA_TX_CHAN_PRESENT,
- DMA_RX_CHAN_PRESENT,
-};
-
/* Serialize access to opened variable */
static DEFINE_MUTEX(opened_mutex);
-struct atmel_ac97c_dma {
- struct dma_chan *rx_chan;
- struct dma_chan *tx_chan;
-};
-
struct atmel_ac97c {
struct clk *pclk;
struct platform_device *pdev;
- struct atmel_ac97c_dma dma;
struct snd_pcm_substream *playback_substream;
struct snd_pcm_substream *capture_substream;
@@ -74,7 +50,6 @@ struct atmel_ac97c {
u64 cur_format;
unsigned int cur_rate;
- unsigned long flags;
int playback_period, capture_period;
/* Serialize access to opened variable */
spinlock_t lock;
@@ -91,65 +66,6 @@ struct atmel_ac97c {
#define ac97c_readl(chip, reg) \
__raw_readl((chip)->regs + AC97C_##reg)
-/* This function is called by the DMA driver. */
-static void atmel_ac97c_dma_playback_period_done(void *arg)
-{
- struct atmel_ac97c *chip = arg;
- snd_pcm_period_elapsed(chip->playback_substream);
-}
-
-static void atmel_ac97c_dma_capture_period_done(void *arg)
-{
- struct atmel_ac97c *chip = arg;
- snd_pcm_period_elapsed(chip->capture_substream);
-}
-
-static int atmel_ac97c_prepare_dma(struct atmel_ac97c *chip,
- struct snd_pcm_substream *substream,
- enum dma_transfer_direction direction)
-{
- struct dma_chan *chan;
- struct dw_cyclic_desc *cdesc;
- struct snd_pcm_runtime *runtime = substream->runtime;
- unsigned long buffer_len, period_len;
-
- /*
- * We don't do DMA on "complex" transfers, i.e. with
- * non-halfword-aligned buffers or lengths.
- */
- if (runtime->dma_addr & 1 || runtime->buffer_size & 1) {
- dev_dbg(&chip->pdev->dev, "too complex transfer\n");
- return -EINVAL;
- }
-
- if (direction == DMA_MEM_TO_DEV)
- chan = chip->dma.tx_chan;
- else
- chan = chip->dma.rx_chan;
-
- buffer_len = frames_to_bytes(runtime, runtime->buffer_size);
- period_len = frames_to_bytes(runtime, runtime->period_size);
-
- cdesc = dw_dma_cyclic_prep(chan, runtime->dma_addr, buffer_len,
- period_len, direction);
- if (IS_ERR(cdesc)) {
- dev_dbg(&chip->pdev->dev, "could not prepare cyclic DMA\n");
- return PTR_ERR(cdesc);
- }
-
- if (direction == DMA_MEM_TO_DEV) {
- cdesc->period_callback = atmel_ac97c_dma_playback_period_done;
- set_bit(DMA_TX_READY, &chip->flags);
- } else {
- cdesc->period_callback = atmel_ac97c_dma_capture_period_done;
- set_bit(DMA_RX_READY, &chip->flags);
- }
-
- cdesc->period_callback_param = chip;
-
- return 0;
-}
-
static struct snd_pcm_hardware atmel_ac97c_hw = {
.info = (SNDRV_PCM_INFO_MMAP
| SNDRV_PCM_INFO_MMAP_VALID
@@ -254,13 +170,7 @@ static int atmel_ac97c_playback_hw_params(struct snd_pcm_substream *substream,
params_buffer_bytes(hw_params));
if (retval < 0)
return retval;
- /* snd_pcm_lib_malloc_pages returns 1 if buffer is changed. */
- if (cpu_is_at32ap7000()) {
- /* snd_pcm_lib_malloc_pages returns 1 if buffer is changed. */
- if (retval == 1)
- if (test_and_clear_bit(DMA_TX_READY, &chip->flags))
- dw_dma_cyclic_free(chip->dma.tx_chan);
- }
+
/* Set restrictions to params. */
mutex_lock(&opened_mutex);
chip->cur_rate = params_rate(hw_params);
@@ -280,10 +190,6 @@ static int atmel_ac97c_capture_hw_params(struct snd_pcm_substream *substream,
params_buffer_bytes(hw_params));
if (retval < 0)
return retval;
- /* snd_pcm_lib_malloc_pages returns 1 if buffer is changed. */
- if (cpu_is_at32ap7000() && retval == 1)
- if (test_and_clear_bit(DMA_RX_READY, &chip->flags))
- dw_dma_cyclic_free(chip->dma.rx_chan);
/* Set restrictions to params. */
mutex_lock(&opened_mutex);
@@ -294,26 +200,6 @@ static int atmel_ac97c_capture_hw_params(struct snd_pcm_substream *substream,
return retval;
}
-static int atmel_ac97c_playback_hw_free(struct snd_pcm_substream *substream)
-{
- struct atmel_ac97c *chip = snd_pcm_substream_chip(substream);
- if (cpu_is_at32ap7000()) {
- if (test_and_clear_bit(DMA_TX_READY, &chip->flags))
- dw_dma_cyclic_free(chip->dma.tx_chan);
- }
- return snd_pcm_lib_free_pages(substream);
-}
-
-static int atmel_ac97c_capture_hw_free(struct snd_pcm_substream *substream)
-{
- struct atmel_ac97c *chip = snd_pcm_substream_chip(substream);
- if (cpu_is_at32ap7000()) {
- if (test_and_clear_bit(DMA_RX_READY, &chip->flags))
- dw_dma_cyclic_free(chip->dma.rx_chan);
- }
- return snd_pcm_lib_free_pages(substream);
-}
-
static int atmel_ac97c_playback_prepare(struct snd_pcm_substream *substream)
{
struct atmel_ac97c *chip = snd_pcm_substream_chip(substream);
@@ -349,8 +235,6 @@ static int atmel_ac97c_playback_prepare(struct snd_pcm_substream *substream)
switch (runtime->format) {
case SNDRV_PCM_FORMAT_S16_LE:
- if (cpu_is_at32ap7000())
- word |= AC97C_CMR_CEM_LITTLE;
break;
case SNDRV_PCM_FORMAT_S16_BE: /* fall through */
word &= ~(AC97C_CMR_CEM_LITTLE);
@@ -389,18 +273,11 @@ static int atmel_ac97c_playback_prepare(struct snd_pcm_substream *substream)
dev_dbg(&chip->pdev->dev, "could not set rate %d Hz\n",
runtime->rate);
- if (cpu_is_at32ap7000()) {
- if (!test_bit(DMA_TX_READY, &chip->flags))
- retval = atmel_ac97c_prepare_dma(chip, substream,
- DMA_MEM_TO_DEV);
- } else {
- /* Initialize and start the PDC */
- writel(runtime->dma_addr, chip->regs + ATMEL_PDC_TPR);
- writel(block_size / 2, chip->regs + ATMEL_PDC_TCR);
- writel(runtime->dma_addr + block_size,
- chip->regs + ATMEL_PDC_TNPR);
- writel(block_size / 2, chip->regs + ATMEL_PDC_TNCR);
- }
+ /* Initialize and start the PDC */
+ writel(runtime->dma_addr, chip->regs + ATMEL_PDC_TPR);
+ writel(block_size / 2, chip->regs + ATMEL_PDC_TCR);
+ writel(runtime->dma_addr + block_size, chip->regs + ATMEL_PDC_TNPR);
+ writel(block_size / 2, chip->regs + ATMEL_PDC_TNCR);
return retval;
}
@@ -440,8 +317,6 @@ static int atmel_ac97c_capture_prepare(struct snd_pcm_substream *substream)
switch (runtime->format) {
case SNDRV_PCM_FORMAT_S16_LE:
- if (cpu_is_at32ap7000())
- word |= AC97C_CMR_CEM_LITTLE;
break;
case SNDRV_PCM_FORMAT_S16_BE: /* fall through */
word &= ~(AC97C_CMR_CEM_LITTLE);
@@ -480,18 +355,11 @@ static int atmel_ac97c_capture_prepare(struct snd_pcm_substream *substream)
dev_dbg(&chip->pdev->dev, "could not set rate %d Hz\n",
runtime->rate);
- if (cpu_is_at32ap7000()) {
- if (!test_bit(DMA_RX_READY, &chip->flags))
- retval = atmel_ac97c_prepare_dma(chip, substream,
- DMA_DEV_TO_MEM);
- } else {
- /* Initialize and start the PDC */
- writel(runtime->dma_addr, chip->regs + ATMEL_PDC_RPR);
- writel(block_size / 2, chip->regs + ATMEL_PDC_RCR);
- writel(runtime->dma_addr + block_size,
- chip->regs + ATMEL_PDC_RNPR);
- writel(block_size / 2, chip->regs + ATMEL_PDC_RNCR);
- }
+ /* Initialize and start the PDC */
+ writel(runtime->dma_addr, chip->regs + ATMEL_PDC_RPR);
+ writel(block_size / 2, chip->regs + ATMEL_PDC_RCR);
+ writel(runtime->dma_addr + block_size, chip->regs + ATMEL_PDC_RNPR);
+ writel(block_size / 2, chip->regs + ATMEL_PDC_RNCR);
return retval;
}
@@ -501,7 +369,6 @@ atmel_ac97c_playback_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct atmel_ac97c *chip = snd_pcm_substream_chip(substream);
unsigned long camr, ptcr = 0;
- int retval = 0;
camr = ac97c_readl(chip, CAMR);
@@ -509,35 +376,23 @@ atmel_ac97c_playback_trigger(struct snd_pcm_substream *substream, int cmd)
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: /* fall through */
case SNDRV_PCM_TRIGGER_RESUME: /* fall through */
case SNDRV_PCM_TRIGGER_START:
- if (cpu_is_at32ap7000()) {
- retval = dw_dma_cyclic_start(chip->dma.tx_chan);
- if (retval)
- goto out;
- } else {
- ptcr = ATMEL_PDC_TXTEN;
- }
+ ptcr = ATMEL_PDC_TXTEN;
camr |= AC97C_CMR_CENA | AC97C_CSR_ENDTX;
break;
case SNDRV_PCM_TRIGGER_PAUSE_PUSH: /* fall through */
case SNDRV_PCM_TRIGGER_SUSPEND: /* fall through */
case SNDRV_PCM_TRIGGER_STOP:
- if (cpu_is_at32ap7000())
- dw_dma_cyclic_stop(chip->dma.tx_chan);
- else
- ptcr |= ATMEL_PDC_TXTDIS;
+ ptcr |= ATMEL_PDC_TXTDIS;
if (chip->opened <= 1)
camr &= ~AC97C_CMR_CENA;
break;
default:
- retval = -EINVAL;
- goto out;
+ return -EINVAL;
}
ac97c_writel(chip, CAMR, camr);
- if (!cpu_is_at32ap7000())
- writel(ptcr, chip->regs + ATMEL_PDC_PTCR);
-out:
- return retval;
+ writel(ptcr, chip->regs + ATMEL_PDC_PTCR);
+ return 0;
}
static int
@@ -545,7 +400,6 @@ atmel_ac97c_capture_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct atmel_ac97c *chip = snd_pcm_substream_chip(substream);
unsigned long camr, ptcr = 0;
- int retval = 0;
camr = ac97c_readl(chip, CAMR);
ptcr = readl(chip->regs + ATMEL_PDC_PTSR);
@@ -554,35 +408,23 @@ atmel_ac97c_capture_trigger(struct snd_pcm_substream *substream, int cmd)
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: /* fall through */
case SNDRV_PCM_TRIGGER_RESUME: /* fall through */
case SNDRV_PCM_TRIGGER_START:
- if (cpu_is_at32ap7000()) {
- retval = dw_dma_cyclic_start(chip->dma.rx_chan);
- if (retval)
- goto out;
- } else {
- ptcr = ATMEL_PDC_RXTEN;
- }
+ ptcr = ATMEL_PDC_RXTEN;
camr |= AC97C_CMR_CENA | AC97C_CSR_ENDRX;
break;
case SNDRV_PCM_TRIGGER_PAUSE_PUSH: /* fall through */
case SNDRV_PCM_TRIGGER_SUSPEND: /* fall through */
case SNDRV_PCM_TRIGGER_STOP:
- if (cpu_is_at32ap7000())
- dw_dma_cyclic_stop(chip->dma.rx_chan);
- else
- ptcr |= (ATMEL_PDC_RXTDIS);
+ ptcr |= ATMEL_PDC_RXTDIS;
if (chip->opened <= 1)
camr &= ~AC97C_CMR_CENA;
break;
default:
- retval = -EINVAL;
- break;
+ return -EINVAL;
}
ac97c_writel(chip, CAMR, camr);
- if (!cpu_is_at32ap7000())
- writel(ptcr, chip->regs + ATMEL_PDC_PTCR);
-out:
- return retval;
+ writel(ptcr, chip->regs + ATMEL_PDC_PTCR);
+ return 0;
}
static snd_pcm_uframes_t
@@ -593,10 +435,7 @@ atmel_ac97c_playback_pointer(struct snd_pcm_substream *substream)
snd_pcm_uframes_t frames;
unsigned long bytes;
- if (cpu_is_at32ap7000())
- bytes = dw_dma_get_src_addr(chip->dma.tx_chan);
- else
- bytes = readl(chip->regs + ATMEL_PDC_TPR);
+ bytes = readl(chip->regs + ATMEL_PDC_TPR);
bytes -= runtime->dma_addr;
frames = bytes_to_frames(runtime, bytes);
@@ -613,10 +452,7 @@ atmel_ac97c_capture_pointer(struct snd_pcm_substream *substream)
snd_pcm_uframes_t frames;
unsigned long bytes;
- if (cpu_is_at32ap7000())
- bytes = dw_dma_get_dst_addr(chip->dma.rx_chan);
- else
- bytes = readl(chip->regs + ATMEL_PDC_RPR);
+ bytes = readl(chip->regs + ATMEL_PDC_RPR);
bytes -= runtime->dma_addr;
frames = bytes_to_frames(runtime, bytes);
@@ -630,7 +466,7 @@ static struct snd_pcm_ops atmel_ac97_playback_ops = {
.close = atmel_ac97c_playback_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = atmel_ac97c_playback_hw_params,
- .hw_free = atmel_ac97c_playback_hw_free,
+ .hw_free = snd_pcm_lib_free_pages,
.prepare = atmel_ac97c_playback_prepare,
.trigger = atmel_ac97c_playback_trigger,
.pointer = atmel_ac97c_playback_pointer,
@@ -641,7 +477,7 @@ static struct snd_pcm_ops atmel_ac97_capture_ops = {
.close = atmel_ac97c_capture_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = atmel_ac97c_capture_hw_params,
- .hw_free = atmel_ac97c_capture_hw_free,
+ .hw_free = snd_pcm_lib_free_pages,
.prepare = atmel_ac97c_capture_prepare,
.trigger = atmel_ac97c_capture_trigger,
.pointer = atmel_ac97c_capture_pointer,
@@ -666,49 +502,40 @@ static irqreturn_t atmel_ac97c_interrupt(int irq, void *dev)
casr & AC97C_CSR_TXEMPTY ? " TXEMPTY" : "",
casr & AC97C_CSR_TXRDY ? " TXRDY" : "",
!casr ? " NONE" : "");
- if (!cpu_is_at32ap7000()) {
- if ((casr & camr) & AC97C_CSR_ENDTX) {
- runtime = chip->playback_substream->runtime;
- block_size = frames_to_bytes(runtime,
- runtime->period_size);
- chip->playback_period++;
-
- if (chip->playback_period == runtime->periods)
- chip->playback_period = 0;
- next_period = chip->playback_period + 1;
- if (next_period == runtime->periods)
- next_period = 0;
-
- offset = block_size * next_period;
-
- writel(runtime->dma_addr + offset,
- chip->regs + ATMEL_PDC_TNPR);
- writel(block_size / 2,
- chip->regs + ATMEL_PDC_TNCR);
-
- snd_pcm_period_elapsed(
- chip->playback_substream);
- }
- if ((casr & camr) & AC97C_CSR_ENDRX) {
- runtime = chip->capture_substream->runtime;
- block_size = frames_to_bytes(runtime,
- runtime->period_size);
- chip->capture_period++;
-
- if (chip->capture_period == runtime->periods)
- chip->capture_period = 0;
- next_period = chip->capture_period + 1;
- if (next_period == runtime->periods)
- next_period = 0;
-
- offset = block_size * next_period;
-
- writel(runtime->dma_addr + offset,
- chip->regs + ATMEL_PDC_RNPR);
- writel(block_size / 2,
- chip->regs + ATMEL_PDC_RNCR);
- snd_pcm_period_elapsed(chip->capture_substream);
- }
+ if ((casr & camr) & AC97C_CSR_ENDTX) {
+ runtime = chip->playback_substream->runtime;
+ block_size = frames_to_bytes(runtime, runtime->period_size);
+ chip->playback_period++;
+
+ if (chip->playback_period == runtime->periods)
+ chip->playback_period = 0;
+ next_period = chip->playback_period + 1;
+ if (next_period == runtime->periods)
+ next_period = 0;
+
+ offset = block_size * next_period;
+
+ writel(runtime->dma_addr + offset, chip->regs + ATMEL_PDC_TNPR);
+ writel(block_size / 2, chip->regs + ATMEL_PDC_TNCR);
+
+ snd_pcm_period_elapsed(chip->playback_substream);
+ }
+ if ((casr & camr) & AC97C_CSR_ENDRX) {
+ runtime = chip->capture_substream->runtime;
+ block_size = frames_to_bytes(runtime, runtime->period_size);
+ chip->capture_period++;
+
+ if (chip->capture_period == runtime->periods)
+ chip->capture_period = 0;
+ next_period = chip->capture_period + 1;
+ if (next_period == runtime->periods)
+ next_period = 0;
+
+ offset = block_size * next_period;
+
+ writel(runtime->dma_addr + offset, chip->regs + ATMEL_PDC_RNPR);
+ writel(block_size / 2, chip->regs + ATMEL_PDC_RNCR);
+ snd_pcm_period_elapsed(chip->capture_substream);
}
retval = IRQ_HANDLED;
}
@@ -763,29 +590,20 @@ static int atmel_ac97c_pcm_new(struct atmel_ac97c *chip)
{
struct snd_pcm *pcm;
struct snd_pcm_hardware hw = atmel_ac97c_hw;
- int capture, playback, retval, err;
+ int retval;
- capture = test_bit(DMA_RX_CHAN_PRESENT, &chip->flags);
- playback = test_bit(DMA_TX_CHAN_PRESENT, &chip->flags);
+ retval = snd_ac97_pcm_assign(chip->ac97_bus,
+ ARRAY_SIZE(at91_ac97_pcm_defs),
+ at91_ac97_pcm_defs);
+ if (retval)
+ return retval;
- if (!cpu_is_at32ap7000()) {
- err = snd_ac97_pcm_assign(chip->ac97_bus,
- ARRAY_SIZE(at91_ac97_pcm_defs),
- at91_ac97_pcm_defs);
- if (err)
- return err;
- }
- retval = snd_pcm_new(chip->card, chip->card->shortname,
- 0, playback, capture, &pcm);
+ retval = snd_pcm_new(chip->card, chip->card->shortname, 0, 1, 1, &pcm);
if (retval)
return retval;
- if (capture)
- snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE,
- &atmel_ac97_capture_ops);
- if (playback)
- snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK,
- &atmel_ac97_playback_ops);
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &atmel_ac97_capture_ops);
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &atmel_ac97_playback_ops);
retval = snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
&chip->pdev->dev, hw.periods_min * hw.period_bytes_min,
@@ -875,17 +693,6 @@ timed_out:
return 0xffff;
}
-static bool filter(struct dma_chan *chan, void *slave)
-{
- struct dw_dma_slave *dws = slave;
-
- if (dws->dma_dev == chan->device->dev) {
- chan->private = dws;
- return true;
- } else
- return false;
-}
-
static void atmel_ac97c_reset(struct atmel_ac97c *chip)
{
ac97c_writel(chip, MR, 0);
@@ -971,12 +778,7 @@ static int atmel_ac97c_probe(struct platform_device *pdev)
return -ENXIO;
}
- if (cpu_is_at32ap7000()) {
- pclk = clk_get(&pdev->dev, "pclk");
- } else {
- pclk = clk_get(&pdev->dev, "ac97_clk");
- }
-
+ pclk = clk_get(&pdev->dev, "ac97_clk");
if (IS_ERR(pclk)) {
dev_dbg(&pdev->dev, "no peripheral clock\n");
return PTR_ERR(pclk);
@@ -1047,88 +849,16 @@ static int atmel_ac97c_probe(struct platform_device *pdev)
goto err_ac97_bus;
}
- if (cpu_is_at32ap7000()) {
- if (pdata->rx_dws.dma_dev) {
- dma_cap_mask_t mask;
-
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
-
- chip->dma.rx_chan = dma_request_channel(mask, filter,
- &pdata->rx_dws);
- if (chip->dma.rx_chan) {
- struct dma_slave_config dma_conf = {
- .src_addr = regs->start + AC97C_CARHR +
- 2,
- .src_addr_width =
- DMA_SLAVE_BUSWIDTH_2_BYTES,
- .src_maxburst = 1,
- .dst_maxburst = 1,
- .direction = DMA_DEV_TO_MEM,
- .device_fc = false,
- };
-
- dmaengine_slave_config(chip->dma.rx_chan,
- &dma_conf);
- }
-
- dev_info(&chip->pdev->dev, "using %s for DMA RX\n",
- dev_name(&chip->dma.rx_chan->dev->device));
- set_bit(DMA_RX_CHAN_PRESENT, &chip->flags);
- }
-
- if (pdata->tx_dws.dma_dev) {
- dma_cap_mask_t mask;
-
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
-
- chip->dma.tx_chan = dma_request_channel(mask, filter,
- &pdata->tx_dws);
- if (chip->dma.tx_chan) {
- struct dma_slave_config dma_conf = {
- .dst_addr = regs->start + AC97C_CATHR +
- 2,
- .dst_addr_width =
- DMA_SLAVE_BUSWIDTH_2_BYTES,
- .src_maxburst = 1,
- .dst_maxburst = 1,
- .direction = DMA_MEM_TO_DEV,
- .device_fc = false,
- };
-
- dmaengine_slave_config(chip->dma.tx_chan,
- &dma_conf);
- }
-
- dev_info(&chip->pdev->dev, "using %s for DMA TX\n",
- dev_name(&chip->dma.tx_chan->dev->device));
- set_bit(DMA_TX_CHAN_PRESENT, &chip->flags);
- }
-
- if (!test_bit(DMA_RX_CHAN_PRESENT, &chip->flags) &&
- !test_bit(DMA_TX_CHAN_PRESENT, &chip->flags)) {
- dev_dbg(&pdev->dev, "DMA not available\n");
- retval = -ENODEV;
- goto err_dma;
- }
- } else {
- /* Just pretend that we have DMA channel(for at91 i is actually
- * the PDC) */
- set_bit(DMA_RX_CHAN_PRESENT, &chip->flags);
- set_bit(DMA_TX_CHAN_PRESENT, &chip->flags);
- }
-
retval = atmel_ac97c_pcm_new(chip);
if (retval) {
dev_dbg(&pdev->dev, "could not register ac97 pcm device\n");
- goto err_dma;
+ goto err_ac97_bus;
}
retval = snd_card_register(card);
if (retval) {
dev_dbg(&pdev->dev, "could not register sound card\n");
- goto err_dma;
+ goto err_ac97_bus;
}
platform_set_drvdata(pdev, card);
@@ -1138,17 +868,6 @@ static int atmel_ac97c_probe(struct platform_device *pdev)
return 0;
-err_dma:
- if (cpu_is_at32ap7000()) {
- if (test_bit(DMA_RX_CHAN_PRESENT, &chip->flags))
- dma_release_channel(chip->dma.rx_chan);
- if (test_bit(DMA_TX_CHAN_PRESENT, &chip->flags))
- dma_release_channel(chip->dma.tx_chan);
- clear_bit(DMA_RX_CHAN_PRESENT, &chip->flags);
- clear_bit(DMA_TX_CHAN_PRESENT, &chip->flags);
- chip->dma.rx_chan = NULL;
- chip->dma.tx_chan = NULL;
- }
err_ac97_bus:
if (gpio_is_valid(chip->reset_pin))
gpio_free(chip->reset_pin);
@@ -1170,14 +889,7 @@ static int atmel_ac97c_suspend(struct device *pdev)
struct snd_card *card = dev_get_drvdata(pdev);
struct atmel_ac97c *chip = card->private_data;
- if (cpu_is_at32ap7000()) {
- if (test_bit(DMA_RX_READY, &chip->flags))
- dw_dma_cyclic_stop(chip->dma.rx_chan);
- if (test_bit(DMA_TX_READY, &chip->flags))
- dw_dma_cyclic_stop(chip->dma.tx_chan);
- }
clk_disable_unprepare(chip->pclk);
-
return 0;
}
@@ -1187,12 +899,6 @@ static int atmel_ac97c_resume(struct device *pdev)
struct atmel_ac97c *chip = card->private_data;
clk_prepare_enable(chip->pclk);
- if (cpu_is_at32ap7000()) {
- if (test_bit(DMA_RX_READY, &chip->flags))
- dw_dma_cyclic_start(chip->dma.rx_chan);
- if (test_bit(DMA_TX_READY, &chip->flags))
- dw_dma_cyclic_start(chip->dma.tx_chan);
- }
return 0;
}
@@ -1219,17 +925,6 @@ static int atmel_ac97c_remove(struct platform_device *pdev)
iounmap(chip->regs);
free_irq(chip->irq, chip);
- if (cpu_is_at32ap7000()) {
- if (test_bit(DMA_RX_CHAN_PRESENT, &chip->flags))
- dma_release_channel(chip->dma.rx_chan);
- if (test_bit(DMA_TX_CHAN_PRESENT, &chip->flags))
- dma_release_channel(chip->dma.tx_chan);
- clear_bit(DMA_RX_CHAN_PRESENT, &chip->flags);
- clear_bit(DMA_TX_CHAN_PRESENT, &chip->flags);
- chip->dma.rx_chan = NULL;
- chip->dma.tx_chan = NULL;
- }
-
snd_card_free(card);
return 0;
OpenPOWER on IntegriCloud