summaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/ath/ath10k/ce.c
diff options
context:
space:
mode:
authorMichal Kazior <michal.kazior@tieto.com>2014-03-28 10:02:38 +0200
committerKalle Valo <kvalo@qca.qualcomm.com>2014-03-28 14:32:10 +0200
commit25d0dbcbd5c746631ec1ee08bbbc4eba86bb9163 (patch)
treee7325922a9bba365c3eb0ad1a88fcde66d66abfa /drivers/net/wireless/ath/ath10k/ce.c
parent68c03249f388aafe74f0e87e2743294d4384c00c (diff)
downloadblackbird-op-linux-25d0dbcbd5c746631ec1ee08bbbc4eba86bb9163.tar.gz
blackbird-op-linux-25d0dbcbd5c746631ec1ee08bbbc4eba86bb9163.zip
ath10k: split ce initialization and allocation
Definitions by which copy engine structure are allocated do not change so it doesn't make much sense to re-create those structures each time device is booted (e.g. due to firmware recovery). This should decrease chance of memory allocation failures. While at it remove per_transfer_context pointer indirection. The array has been trailing the copy engine ringbuffer structure anyway. This also saves pointer size worth of bytes for each copy engine ringbuffer. Reported-By: Avery Pennarun <apenwarr@gmail.com> Signed-off-by: Michal Kazior <michal.kazior@tieto.com> Signed-off-by: Kalle Valo <kvalo@qca.qualcomm.com>
Diffstat (limited to 'drivers/net/wireless/ath/ath10k/ce.c')
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c307
1 files changed, 178 insertions, 129 deletions
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index 653a240142e5..1e4cad8632b5 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -840,34 +840,17 @@ void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state,
static int ath10k_ce_init_src_ring(struct ath10k *ar,
unsigned int ce_id,
- struct ath10k_ce_pipe *ce_state,
const struct ce_attr *attr)
{
- struct ath10k_ce_ring *src_ring;
- unsigned int nentries = attr->src_nentries;
- unsigned int ce_nbytes;
- u32 ctrl_addr = ath10k_ce_base_address(ce_id);
- dma_addr_t base_addr;
- char *ptr;
-
- nentries = roundup_pow_of_two(nentries);
-
- if (ce_state->src_ring) {
- WARN_ON(ce_state->src_ring->nentries != nentries);
- return 0;
- }
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
+ struct ath10k_ce_ring *src_ring = ce_state->src_ring;
+ u32 nentries, ctrl_addr = ath10k_ce_base_address(ce_id);
- ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *));
- ptr = kzalloc(ce_nbytes, GFP_KERNEL);
- if (ptr == NULL)
- return -ENOMEM;
+ nentries = roundup_pow_of_two(attr->src_nentries);
- ce_state->src_ring = (struct ath10k_ce_ring *)ptr;
- src_ring = ce_state->src_ring;
-
- ptr += sizeof(struct ath10k_ce_ring);
- src_ring->nentries = nentries;
- src_ring->nentries_mask = nentries - 1;
+ memset(src_ring->per_transfer_context, 0,
+ nentries * sizeof(*src_ring->per_transfer_context));
src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
src_ring->sw_index &= src_ring->nentries_mask;
@@ -877,7 +860,74 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
src_ring->write_index &= src_ring->nentries_mask;
- src_ring->per_transfer_context = (void **)ptr;
+ ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr,
+ src_ring->base_addr_ce_space);
+ ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
+ ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
+ ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
+ ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
+ ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
+
+ ath10k_dbg(ATH10K_DBG_BOOT,
+ "boot init ce src ring id %d entries %d base_addr %p\n",
+ ce_id, nentries, src_ring->base_addr_owner_space);
+
+ return 0;
+}
+
+static int ath10k_ce_init_dest_ring(struct ath10k *ar,
+ unsigned int ce_id,
+ const struct ce_attr *attr)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
+ struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
+ u32 nentries, ctrl_addr = ath10k_ce_base_address(ce_id);
+
+ nentries = roundup_pow_of_two(attr->dest_nentries);
+
+ memset(dest_ring->per_transfer_context, 0,
+ nentries * sizeof(*dest_ring->per_transfer_context));
+
+ dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
+ dest_ring->sw_index &= dest_ring->nentries_mask;
+ dest_ring->write_index =
+ ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
+ dest_ring->write_index &= dest_ring->nentries_mask;
+
+ ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr,
+ dest_ring->base_addr_ce_space);
+ ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
+ ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
+ ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
+ ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
+
+ ath10k_dbg(ATH10K_DBG_BOOT,
+ "boot ce dest ring id %d entries %d base_addr %p\n",
+ ce_id, nentries, dest_ring->base_addr_owner_space);
+
+ return 0;
+}
+
+static struct ath10k_ce_ring *
+ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
+ const struct ce_attr *attr)
+{
+ struct ath10k_ce_ring *src_ring;
+ u32 nentries = attr->src_nentries;
+ dma_addr_t base_addr;
+
+ nentries = roundup_pow_of_two(nentries);
+
+ src_ring = kzalloc(sizeof(*src_ring) +
+ (nentries *
+ sizeof(*src_ring->per_transfer_context)),
+ GFP_KERNEL);
+ if (src_ring == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ src_ring->nentries = nentries;
+ src_ring->nentries_mask = nentries - 1;
/*
* Legacy platforms that do not support cache
@@ -889,9 +939,8 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
CE_DESC_RING_ALIGN),
&base_addr, GFP_KERNEL);
if (!src_ring->base_addr_owner_space_unaligned) {
- kfree(ce_state->src_ring);
- ce_state->src_ring = NULL;
- return -ENOMEM;
+ kfree(src_ring);
+ return ERR_PTR(-ENOMEM);
}
src_ring->base_addr_ce_space_unaligned = base_addr;
@@ -916,69 +965,37 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
CE_DESC_RING_ALIGN),
src_ring->base_addr_owner_space,
src_ring->base_addr_ce_space);
- kfree(ce_state->src_ring);
- ce_state->src_ring = NULL;
- return -ENOMEM;
+ kfree(src_ring);
+ return ERR_PTR(-ENOMEM);
}
src_ring->shadow_base = PTR_ALIGN(
src_ring->shadow_base_unaligned,
CE_DESC_RING_ALIGN);
- ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr,
- src_ring->base_addr_ce_space);
- ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
- ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
- ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
- ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
- ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
-
- ath10k_dbg(ATH10K_DBG_BOOT,
- "boot ce src ring id %d entries %d base_addr %p\n",
- ce_id, nentries, src_ring->base_addr_owner_space);
-
- return 0;
+ return src_ring;
}
-static int ath10k_ce_init_dest_ring(struct ath10k *ar,
- unsigned int ce_id,
- struct ath10k_ce_pipe *ce_state,
- const struct ce_attr *attr)
+static struct ath10k_ce_ring *
+ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,
+ const struct ce_attr *attr)
{
struct ath10k_ce_ring *dest_ring;
- unsigned int nentries = attr->dest_nentries;
- unsigned int ce_nbytes;
- u32 ctrl_addr = ath10k_ce_base_address(ce_id);
+ u32 nentries;
dma_addr_t base_addr;
- char *ptr;
- nentries = roundup_pow_of_two(nentries);
+ nentries = roundup_pow_of_two(attr->dest_nentries);
- if (ce_state->dest_ring) {
- WARN_ON(ce_state->dest_ring->nentries != nentries);
- return 0;
- }
-
- ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *));
- ptr = kzalloc(ce_nbytes, GFP_KERNEL);
- if (ptr == NULL)
- return -ENOMEM;
-
- ce_state->dest_ring = (struct ath10k_ce_ring *)ptr;
- dest_ring = ce_state->dest_ring;
+ dest_ring = kzalloc(sizeof(*dest_ring) +
+ (nentries *
+ sizeof(*dest_ring->per_transfer_context)),
+ GFP_KERNEL);
+ if (dest_ring == NULL)
+ return ERR_PTR(-ENOMEM);
- ptr += sizeof(struct ath10k_ce_ring);
dest_ring->nentries = nentries;
dest_ring->nentries_mask = nentries - 1;
- dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
- dest_ring->sw_index &= dest_ring->nentries_mask;
- dest_ring->write_index =
- ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
- dest_ring->write_index &= dest_ring->nentries_mask;
-
- dest_ring->per_transfer_context = (void **)ptr;
-
/*
* Legacy platforms that do not support cache
* coherent DMA are unsupported
@@ -989,9 +1006,8 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
CE_DESC_RING_ALIGN),
&base_addr, GFP_KERNEL);
if (!dest_ring->base_addr_owner_space_unaligned) {
- kfree(ce_state->dest_ring);
- ce_state->dest_ring = NULL;
- return -ENOMEM;
+ kfree(dest_ring);
+ return ERR_PTR(-ENOMEM);
}
dest_ring->base_addr_ce_space_unaligned = base_addr;
@@ -1010,39 +1026,7 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
dest_ring->base_addr_ce_space_unaligned,
CE_DESC_RING_ALIGN);
- ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr,
- dest_ring->base_addr_ce_space);
- ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
- ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
- ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
- ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
-
- ath10k_dbg(ATH10K_DBG_BOOT,
- "boot ce dest ring id %d entries %d base_addr %p\n",
- ce_id, nentries, dest_ring->base_addr_owner_space);
-
- return 0;
-}
-
-static struct ath10k_ce_pipe *ath10k_ce_init_state(struct ath10k *ar,
- unsigned int ce_id,
- const struct ce_attr *attr)
-{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
- u32 ctrl_addr = ath10k_ce_base_address(ce_id);
-
- spin_lock_bh(&ar_pci->ce_lock);
-
- ce_state->ar = ar;
- ce_state->id = ce_id;
- ce_state->ctrl_addr = ctrl_addr;
- ce_state->attr_flags = attr->flags;
- ce_state->src_sz_max = attr->src_sz_max;
-
- spin_unlock_bh(&ar_pci->ce_lock);
-
- return ce_state;
+ return dest_ring;
}
/*
@@ -1052,11 +1036,11 @@ static struct ath10k_ce_pipe *ath10k_ce_init_state(struct ath10k *ar,
* initialization. It may be that only one side or the other is
* initialized by software/firmware.
*/
-struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
- unsigned int ce_id,
- const struct ce_attr *attr)
+int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
+ const struct ce_attr *attr)
{
- struct ath10k_ce_pipe *ce_state;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
int ret;
/*
@@ -1072,44 +1056,109 @@ struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
ret = ath10k_pci_wake(ar);
if (ret)
- return NULL;
+ return ret;
- ce_state = ath10k_ce_init_state(ar, ce_id, attr);
- if (!ce_state) {
- ath10k_err("Failed to initialize CE state for ID: %d\n", ce_id);
- goto out;
- }
+ spin_lock_bh(&ar_pci->ce_lock);
+ ce_state->ar = ar;
+ ce_state->id = ce_id;
+ ce_state->ctrl_addr = ath10k_ce_base_address(ce_id);
+ ce_state->attr_flags = attr->flags;
+ ce_state->src_sz_max = attr->src_sz_max;
+ spin_unlock_bh(&ar_pci->ce_lock);
if (attr->src_nentries) {
- ret = ath10k_ce_init_src_ring(ar, ce_id, ce_state, attr);
+ ret = ath10k_ce_init_src_ring(ar, ce_id, attr);
if (ret) {
ath10k_err("Failed to initialize CE src ring for ID: %d (%d)\n",
ce_id, ret);
- ath10k_ce_deinit(ce_state);
- ce_state = NULL;
goto out;
}
}
if (attr->dest_nentries) {
- ret = ath10k_ce_init_dest_ring(ar, ce_id, ce_state, attr);
+ ret = ath10k_ce_init_dest_ring(ar, ce_id, attr);
if (ret) {
ath10k_err("Failed to initialize CE dest ring for ID: %d (%d)\n",
ce_id, ret);
- ath10k_ce_deinit(ce_state);
- ce_state = NULL;
goto out;
}
}
out:
ath10k_pci_sleep(ar);
- return ce_state;
+ return ret;
}
-void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state)
+static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id)
{
- struct ath10k *ar = ce_state->ar;
+ u32 ctrl_addr = ath10k_ce_base_address(ce_id);
+
+ ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, 0);
+ ath10k_ce_src_ring_size_set(ar, ctrl_addr, 0);
+ ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, 0);
+ ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, 0);
+}
+
+static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id)
+{
+ u32 ctrl_addr = ath10k_ce_base_address(ce_id);
+
+ ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, 0);
+ ath10k_ce_dest_ring_size_set(ar, ctrl_addr, 0);
+ ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, 0);
+}
+
+void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id)
+{
+ int ret;
+
+ ret = ath10k_pci_wake(ar);
+ if (ret)
+ return;
+
+ ath10k_ce_deinit_src_ring(ar, ce_id);
+ ath10k_ce_deinit_dest_ring(ar, ce_id);
+
+ ath10k_pci_sleep(ar);
+}
+
+int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
+ const struct ce_attr *attr)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
+ int ret;
+
+ if (attr->src_nentries) {
+ ce_state->src_ring = ath10k_ce_alloc_src_ring(ar, ce_id, attr);
+ if (IS_ERR(ce_state->src_ring)) {
+ ret = PTR_ERR(ce_state->src_ring);
+ ath10k_err("failed to allocate copy engine source ring %d: %d\n",
+ ce_id, ret);
+ ce_state->src_ring = NULL;
+ return ret;
+ }
+ }
+
+ if (attr->dest_nentries) {
+ ce_state->dest_ring = ath10k_ce_alloc_dest_ring(ar, ce_id,
+ attr);
+ if (IS_ERR(ce_state->dest_ring)) {
+ ret = PTR_ERR(ce_state->dest_ring);
+ ath10k_err("failed to allocate copy engine destination ring %d: %d\n",
+ ce_id, ret);
+ ce_state->dest_ring = NULL;
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
if (ce_state->src_ring) {
kfree(ce_state->src_ring->shadow_base_unaligned);
OpenPOWER on IntegriCloud