diff options
Diffstat (limited to 'drivers/net/wireless/ath/ath10k')
24 files changed, 4155 insertions, 1704 deletions
diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c index 744da6d1c405..a1f099628850 100644 --- a/drivers/net/wireless/ath/ath10k/bmi.c +++ b/drivers/net/wireless/ath/ath10k/bmi.c @@ -22,7 +22,8 @@ void ath10k_bmi_start(struct ath10k *ar) { - ath10k_dbg(ATH10K_DBG_CORE, "BMI started\n"); + ath10k_dbg(ATH10K_DBG_BMI, "bmi start\n"); + ar->bmi.done_sent = false; } @@ -32,8 +33,10 @@ int ath10k_bmi_done(struct ath10k *ar) u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.done); int ret; + ath10k_dbg(ATH10K_DBG_BMI, "bmi done\n"); + if (ar->bmi.done_sent) { - ath10k_dbg(ATH10K_DBG_CORE, "%s skipped\n", __func__); + ath10k_dbg(ATH10K_DBG_BMI, "bmi skipped\n"); return 0; } @@ -46,7 +49,6 @@ int ath10k_bmi_done(struct ath10k *ar) return ret; } - ath10k_dbg(ATH10K_DBG_CORE, "BMI done\n"); return 0; } @@ -59,6 +61,8 @@ int ath10k_bmi_get_target_info(struct ath10k *ar, u32 resplen = sizeof(resp.get_target_info); int ret; + ath10k_dbg(ATH10K_DBG_BMI, "bmi get target info\n"); + if (ar->bmi.done_sent) { ath10k_warn("BMI Get Target Info Command disallowed\n"); return -EBUSY; @@ -80,6 +84,7 @@ int ath10k_bmi_get_target_info(struct ath10k *ar, target_info->version = __le32_to_cpu(resp.get_target_info.version); target_info->type = __le32_to_cpu(resp.get_target_info.type); + return 0; } @@ -92,15 +97,14 @@ int ath10k_bmi_read_memory(struct ath10k *ar, u32 rxlen; int ret; + ath10k_dbg(ATH10K_DBG_BMI, "bmi read address 0x%x length %d\n", + address, length); + if (ar->bmi.done_sent) { ath10k_warn("command disallowed\n"); return -EBUSY; } - ath10k_dbg(ATH10K_DBG_CORE, - "%s: (device: 0x%p, address: 0x%x, length: %d)\n", - __func__, ar, address, length); - while (length) { rxlen = min_t(u32, length, BMI_MAX_DATA_SIZE); @@ -133,15 +137,14 @@ int ath10k_bmi_write_memory(struct ath10k *ar, u32 txlen; int ret; + ath10k_dbg(ATH10K_DBG_BMI, "bmi write address 0x%x length %d\n", + address, length); + if (ar->bmi.done_sent) { ath10k_warn("command disallowed\n"); return -EBUSY; } - ath10k_dbg(ATH10K_DBG_CORE, - "%s: (device: 0x%p, address: 0x%x, length: %d)\n", - __func__, ar, address, length); - while (length) { txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen); @@ -180,15 +183,14 @@ int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param) u32 resplen = sizeof(resp.execute); int ret; + ath10k_dbg(ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n", + address, *param); + if (ar->bmi.done_sent) { ath10k_warn("command disallowed\n"); return -EBUSY; } - ath10k_dbg(ATH10K_DBG_CORE, - "%s: (device: 0x%p, address: 0x%x, param: %d)\n", - __func__, ar, address, *param); - cmd.id = __cpu_to_le32(BMI_EXECUTE); cmd.execute.addr = __cpu_to_le32(address); cmd.execute.param = __cpu_to_le32(*param); @@ -216,6 +218,9 @@ int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length) u32 txlen; int ret; + ath10k_dbg(ATH10K_DBG_BMI, "bmi lz data buffer 0x%p length %d\n", + buffer, length); + if (ar->bmi.done_sent) { ath10k_warn("command disallowed\n"); return -EBUSY; @@ -250,6 +255,9 @@ int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address) u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.lz_start); int ret; + ath10k_dbg(ATH10K_DBG_BMI, "bmi lz stream start address 0x%x\n", + address); + if (ar->bmi.done_sent) { ath10k_warn("command disallowed\n"); return -EBUSY; @@ -275,6 +283,10 @@ int ath10k_bmi_fast_download(struct ath10k *ar, u32 trailer_len = length - head_len; int ret; + ath10k_dbg(ATH10K_DBG_BMI, + "bmi fast download address 0x%x buffer 0x%p length %d\n", + address, buffer, length); + ret = ath10k_bmi_lz_stream_start(ar, address); if (ret) return ret; diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c index f8b969f518f8..e46951b8fb92 100644 --- a/drivers/net/wireless/ath/ath10k/ce.c +++ b/drivers/net/wireless/ath/ath10k/ce.c @@ -76,36 +76,7 @@ static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar, u32 ce_ctrl_addr, unsigned int n) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - void __iomem *indicator_addr; - - if (!test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features)) { - ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n); - return; - } - - /* workaround for QCA988x_1.0 HW CE */ - indicator_addr = ar_pci->mem + ce_ctrl_addr + DST_WATERMARK_ADDRESS; - - if (ce_ctrl_addr == ath10k_ce_base_address(CDC_WAR_DATA_CE)) { - iowrite32((CDC_WAR_MAGIC_STR | n), indicator_addr); - } else { - unsigned long irq_flags; - local_irq_save(irq_flags); - iowrite32(1, indicator_addr); - - /* - * PCIE write waits for ACK in IPQ8K, there is no - * need to read back value. - */ - (void)ioread32(indicator_addr); - (void)ioread32(indicator_addr); /* conservative */ - - ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n); - - iowrite32(0, indicator_addr); - local_irq_restore(irq_flags); - } + ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n); } static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar, @@ -285,7 +256,7 @@ static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar, * ath10k_ce_sendlist_send. * The caller takes responsibility for any needed locking. */ -static int ath10k_ce_send_nolock(struct ce_state *ce_state, +static int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state, void *per_transfer_context, u32 buffer, unsigned int nbytes, @@ -293,7 +264,7 @@ static int ath10k_ce_send_nolock(struct ce_state *ce_state, unsigned int flags) { struct ath10k *ar = ce_state->ar; - struct ce_ring_state *src_ring = ce_state->src_ring; + struct ath10k_ce_ring *src_ring = ce_state->src_ring; struct ce_desc *desc, *sdesc; unsigned int nentries_mask = src_ring->nentries_mask; unsigned int sw_index = src_ring->sw_index; @@ -306,11 +277,13 @@ static int ath10k_ce_send_nolock(struct ce_state *ce_state, ath10k_warn("%s: send more we can (nbytes: %d, max: %d)\n", __func__, nbytes, ce_state->src_sz_max); - ath10k_pci_wake(ar); + ret = ath10k_pci_wake(ar); + if (ret) + return ret; if (unlikely(CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) <= 0)) { - ret = -EIO; + ret = -ENOSR; goto exit; } @@ -346,7 +319,7 @@ exit: return ret; } -int ath10k_ce_send(struct ce_state *ce_state, +int ath10k_ce_send(struct ath10k_ce_pipe *ce_state, void *per_transfer_context, u32 buffer, unsigned int nbytes, @@ -365,77 +338,26 @@ int ath10k_ce_send(struct ce_state *ce_state, return ret; } -void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist, u32 buffer, - unsigned int nbytes, u32 flags) +int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe) { - unsigned int num_items = sendlist->num_items; - struct ce_sendlist_item *item; - - item = &sendlist->item[num_items]; - item->data = buffer; - item->u.nbytes = nbytes; - item->flags = flags; - sendlist->num_items++; -} - -int ath10k_ce_sendlist_send(struct ce_state *ce_state, - void *per_transfer_context, - struct ce_sendlist *sendlist, - unsigned int transfer_id) -{ - struct ce_ring_state *src_ring = ce_state->src_ring; - struct ce_sendlist_item *item; - struct ath10k *ar = ce_state->ar; + struct ath10k *ar = pipe->ar; struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - unsigned int nentries_mask = src_ring->nentries_mask; - unsigned int num_items = sendlist->num_items; - unsigned int sw_index; - unsigned int write_index; - int i, delta, ret = -ENOMEM; + int delta; spin_lock_bh(&ar_pci->ce_lock); - - sw_index = src_ring->sw_index; - write_index = src_ring->write_index; - - delta = CE_RING_DELTA(nentries_mask, write_index, sw_index - 1); - - if (delta >= num_items) { - /* - * Handle all but the last item uniformly. - */ - for (i = 0; i < num_items - 1; i++) { - item = &sendlist->item[i]; - ret = ath10k_ce_send_nolock(ce_state, - CE_SENDLIST_ITEM_CTXT, - (u32) item->data, - item->u.nbytes, transfer_id, - item->flags | - CE_SEND_FLAG_GATHER); - if (ret) - ath10k_warn("CE send failed for item: %d\n", i); - } - /* - * Provide valid context pointer for final item. - */ - item = &sendlist->item[i]; - ret = ath10k_ce_send_nolock(ce_state, per_transfer_context, - (u32) item->data, item->u.nbytes, - transfer_id, item->flags); - if (ret) - ath10k_warn("CE send failed for last item: %d\n", i); - } - + delta = CE_RING_DELTA(pipe->src_ring->nentries_mask, + pipe->src_ring->write_index, + pipe->src_ring->sw_index - 1); spin_unlock_bh(&ar_pci->ce_lock); - return ret; + return delta; } -int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state, +int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe *ce_state, void *per_recv_context, u32 buffer) { - struct ce_ring_state *dest_ring = ce_state->dest_ring; + struct ath10k_ce_ring *dest_ring = ce_state->dest_ring; u32 ctrl_addr = ce_state->ctrl_addr; struct ath10k *ar = ce_state->ar; struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); @@ -448,7 +370,9 @@ int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state, write_index = dest_ring->write_index; sw_index = dest_ring->sw_index; - ath10k_pci_wake(ar); + ret = ath10k_pci_wake(ar); + if (ret) + goto out; if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) { struct ce_desc *base = dest_ring->base_addr_owner_space; @@ -470,6 +394,8 @@ int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state, ret = -EIO; } ath10k_pci_sleep(ar); + +out: spin_unlock_bh(&ar_pci->ce_lock); return ret; @@ -479,14 +405,14 @@ int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state, * Guts of ath10k_ce_completed_recv_next. * The caller takes responsibility for any necessary locking. */ -static int ath10k_ce_completed_recv_next_nolock(struct ce_state *ce_state, +static int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state, void **per_transfer_contextp, u32 *bufferp, unsigned int *nbytesp, unsigned int *transfer_idp, unsigned int *flagsp) { - struct ce_ring_state *dest_ring = ce_state->dest_ring; + struct ath10k_ce_ring *dest_ring = ce_state->dest_ring; unsigned int nentries_mask = dest_ring->nentries_mask; unsigned int sw_index = dest_ring->sw_index; @@ -535,7 +461,7 @@ static int ath10k_ce_completed_recv_next_nolock(struct ce_state *ce_state, return 0; } -int ath10k_ce_completed_recv_next(struct ce_state *ce_state, +int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state, void **per_transfer_contextp, u32 *bufferp, unsigned int *nbytesp, @@ -556,11 +482,11 @@ int ath10k_ce_completed_recv_next(struct ce_state *ce_state, return ret; } -int ath10k_ce_revoke_recv_next(struct ce_state *ce_state, +int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state, void **per_transfer_contextp, u32 *bufferp) { - struct ce_ring_state *dest_ring; + struct ath10k_ce_ring *dest_ring; unsigned int nentries_mask; unsigned int sw_index; unsigned int write_index; @@ -612,19 +538,20 @@ int ath10k_ce_revoke_recv_next(struct ce_state *ce_state, * Guts of ath10k_ce_completed_send_next. * The caller takes responsibility for any necessary locking. */ -static int ath10k_ce_completed_send_next_nolock(struct ce_state *ce_state, +static int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state, void **per_transfer_contextp, u32 *bufferp, unsigned int *nbytesp, unsigned int *transfer_idp) { - struct ce_ring_state *src_ring = ce_state->src_ring; + struct ath10k_ce_ring *src_ring = ce_state->src_ring; u32 ctrl_addr = ce_state->ctrl_addr; struct ath10k *ar = ce_state->ar; unsigned int nentries_mask = src_ring->nentries_mask; unsigned int sw_index = src_ring->sw_index; + struct ce_desc *sdesc, *sbase; unsigned int read_index; - int ret = -EIO; + int ret; if (src_ring->hw_index == sw_index) { /* @@ -634,48 +561,54 @@ static int ath10k_ce_completed_send_next_nolock(struct ce_state *ce_state, * the SW has really caught up to the HW, or if the cached * value of the HW index has become stale. */ - ath10k_pci_wake(ar); + + ret = ath10k_pci_wake(ar); + if (ret) + return ret; + src_ring->hw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); src_ring->hw_index &= nentries_mask; + ath10k_pci_sleep(ar); } + read_index = src_ring->hw_index; - if ((read_index != sw_index) && (read_index != 0xffffffff)) { - struct ce_desc *sbase = src_ring->shadow_base; - struct ce_desc *sdesc = CE_SRC_RING_TO_DESC(sbase, sw_index); + if ((read_index == sw_index) || (read_index == 0xffffffff)) + return -EIO; - /* Return data from completed source descriptor */ - *bufferp = __le32_to_cpu(sdesc->addr); - *nbytesp = __le16_to_cpu(sdesc->nbytes); - *transfer_idp = MS(__le16_to_cpu(sdesc->flags), - CE_DESC_FLAGS_META_DATA); + sbase = src_ring->shadow_base; + sdesc = CE_SRC_RING_TO_DESC(sbase, sw_index); - if (per_transfer_contextp) - *per_transfer_contextp = - src_ring->per_transfer_context[sw_index]; + /* Return data from completed source descriptor */ + *bufferp = __le32_to_cpu(sdesc->addr); + *nbytesp = __le16_to_cpu(sdesc->nbytes); + *transfer_idp = MS(__le16_to_cpu(sdesc->flags), + CE_DESC_FLAGS_META_DATA); - /* sanity */ - src_ring->per_transfer_context[sw_index] = NULL; + if (per_transfer_contextp) + *per_transfer_contextp = + src_ring->per_transfer_context[sw_index]; - /* Update sw_index */ - sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); - src_ring->sw_index = sw_index; - ret = 0; - } + /* sanity */ + src_ring->per_transfer_context[sw_index] = NULL; - return ret; + /* Update sw_index */ + sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); + src_ring->sw_index = sw_index; + + return 0; } /* NB: Modeled after ath10k_ce_completed_send_next */ -int ath10k_ce_cancel_send_next(struct ce_state *ce_state, +int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state, void **per_transfer_contextp, u32 *bufferp, unsigned int *nbytesp, unsigned int *transfer_idp) { - struct ce_ring_state *src_ring; + struct ath10k_ce_ring *src_ring; unsigned int nentries_mask; unsigned int sw_index; unsigned int write_index; @@ -727,7 +660,7 @@ int ath10k_ce_cancel_send_next(struct ce_state *ce_state, return ret; } -int ath10k_ce_completed_send_next(struct ce_state *ce_state, +int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state, void **per_transfer_contextp, u32 *bufferp, unsigned int *nbytesp, @@ -756,53 +689,29 @@ int ath10k_ce_completed_send_next(struct ce_state *ce_state, void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct ce_state *ce_state = ar_pci->ce_id_to_state[ce_id]; + struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; u32 ctrl_addr = ce_state->ctrl_addr; - void *transfer_context; - u32 buf; - unsigned int nbytes; - unsigned int id; - unsigned int flags; + int ret; + + ret = ath10k_pci_wake(ar); + if (ret) + return; - ath10k_pci_wake(ar); spin_lock_bh(&ar_pci->ce_lock); /* Clear the copy-complete interrupts that will be handled here. */ ath10k_ce_engine_int_status_clear(ar, ctrl_addr, HOST_IS_COPY_COMPLETE_MASK); - if (ce_state->recv_cb) { - /* - * Pop completed recv buffers and call the registered - * recv callback for each - */ - while (ath10k_ce_completed_recv_next_nolock(ce_state, - &transfer_context, - &buf, &nbytes, - &id, &flags) == 0) { - spin_unlock_bh(&ar_pci->ce_lock); - ce_state->recv_cb(ce_state, transfer_context, buf, - nbytes, id, flags); - spin_lock_bh(&ar_pci->ce_lock); - } - } + spin_unlock_bh(&ar_pci->ce_lock); - if (ce_state->send_cb) { - /* - * Pop completed send buffers and call the registered - * send callback for each - */ - while (ath10k_ce_completed_send_next_nolock(ce_state, - &transfer_context, - &buf, - &nbytes, - &id) == 0) { - spin_unlock_bh(&ar_pci->ce_lock); - ce_state->send_cb(ce_state, transfer_context, - buf, nbytes, id); - spin_lock_bh(&ar_pci->ce_lock); - } - } + if (ce_state->recv_cb) + ce_state->recv_cb(ce_state); + + if (ce_state->send_cb) + ce_state->send_cb(ce_state); + + spin_lock_bh(&ar_pci->ce_lock); /* * Misc CE interrupts are not being handled, but still need @@ -823,10 +732,13 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id) void ath10k_ce_per_engine_service_any(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - int ce_id; + int ce_id, ret; u32 intr_summary; - ath10k_pci_wake(ar); + ret = ath10k_pci_wake(ar); + if (ret) + return; + intr_summary = CE_INTERRUPT_SUMMARY(ar); for (ce_id = 0; intr_summary && (ce_id < ar_pci->ce_count); ce_id++) { @@ -849,13 +761,16 @@ void ath10k_ce_per_engine_service_any(struct ath10k *ar) * * Called with ce_lock held. */ -static void ath10k_ce_per_engine_handler_adjust(struct ce_state *ce_state, +static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state, int disable_copy_compl_intr) { u32 ctrl_addr = ce_state->ctrl_addr; struct ath10k *ar = ce_state->ar; + int ret; - ath10k_pci_wake(ar); + ret = ath10k_pci_wake(ar); + if (ret) + return; if ((!disable_copy_compl_intr) && (ce_state->send_cb || ce_state->recv_cb)) @@ -871,11 +786,14 @@ static void ath10k_ce_per_engine_handler_adjust(struct ce_state *ce_state, void ath10k_ce_disable_interrupts(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - int ce_id; + int ce_id, ret; + + ret = ath10k_pci_wake(ar); + if (ret) + return; - ath10k_pci_wake(ar); for (ce_id = 0; ce_id < ar_pci->ce_count; ce_id++) { - struct ce_state *ce_state = ar_pci->ce_id_to_state[ce_id]; + struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; u32 ctrl_addr = ce_state->ctrl_addr; ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr); @@ -883,12 +801,8 @@ void ath10k_ce_disable_interrupts(struct ath10k *ar) ath10k_pci_sleep(ar); } -void ath10k_ce_send_cb_register(struct ce_state *ce_state, - void (*send_cb) (struct ce_state *ce_state, - void *transfer_context, - u32 buffer, - unsigned int nbytes, - unsigned int transfer_id), +void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state, + void (*send_cb)(struct ath10k_ce_pipe *), int disable_interrupts) { struct ath10k *ar = ce_state->ar; @@ -900,13 +814,8 @@ void ath10k_ce_send_cb_register(struct ce_state *ce_state, spin_unlock_bh(&ar_pci->ce_lock); } -void ath10k_ce_recv_cb_register(struct ce_state *ce_state, - void (*recv_cb) (struct ce_state *ce_state, - void *transfer_context, - u32 buffer, - unsigned int nbytes, - unsigned int transfer_id, - unsigned int flags)) +void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state, + void (*recv_cb)(struct ath10k_ce_pipe *)) { struct ath10k *ar = ce_state->ar; struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); @@ -919,11 +828,11 @@ void ath10k_ce_recv_cb_register(struct ce_state *ce_state, static int ath10k_ce_init_src_ring(struct ath10k *ar, unsigned int ce_id, - struct ce_state *ce_state, + struct ath10k_ce_pipe *ce_state, const struct ce_attr *attr) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct ce_ring_state *src_ring; + struct ath10k_ce_ring *src_ring; unsigned int nentries = attr->src_nentries; unsigned int ce_nbytes; u32 ctrl_addr = ath10k_ce_base_address(ce_id); @@ -937,19 +846,18 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar, return 0; } - ce_nbytes = sizeof(struct ce_ring_state) + (nentries * sizeof(void *)); + ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *)); ptr = kzalloc(ce_nbytes, GFP_KERNEL); if (ptr == NULL) return -ENOMEM; - ce_state->src_ring = (struct ce_ring_state *)ptr; + ce_state->src_ring = (struct ath10k_ce_ring *)ptr; src_ring = ce_state->src_ring; - ptr += sizeof(struct ce_ring_state); + ptr += sizeof(struct ath10k_ce_ring); src_ring->nentries = nentries; src_ring->nentries_mask = nentries - 1; - ath10k_pci_wake(ar); src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); src_ring->sw_index &= src_ring->nentries_mask; src_ring->hw_index = src_ring->sw_index; @@ -957,7 +865,6 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar, src_ring->write_index = ath10k_ce_src_ring_write_index_get(ar, ctrl_addr); src_ring->write_index &= src_ring->nentries_mask; - ath10k_pci_sleep(ar); src_ring->per_transfer_context = (void **)ptr; @@ -970,6 +877,12 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar, (nentries * sizeof(struct ce_desc) + CE_DESC_RING_ALIGN), &base_addr); + if (!src_ring->base_addr_owner_space_unaligned) { + kfree(ce_state->src_ring); + ce_state->src_ring = NULL; + return -ENOMEM; + } + src_ring->base_addr_ce_space_unaligned = base_addr; src_ring->base_addr_owner_space = PTR_ALIGN( @@ -986,12 +899,21 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar, src_ring->shadow_base_unaligned = kmalloc((nentries * sizeof(struct ce_desc) + CE_DESC_RING_ALIGN), GFP_KERNEL); + if (!src_ring->shadow_base_unaligned) { + pci_free_consistent(ar_pci->pdev, + (nentries * sizeof(struct ce_desc) + + CE_DESC_RING_ALIGN), + src_ring->base_addr_owner_space, + src_ring->base_addr_ce_space); + kfree(ce_state->src_ring); + ce_state->src_ring = NULL; + return -ENOMEM; + } src_ring->shadow_base = PTR_ALIGN( src_ring->shadow_base_unaligned, CE_DESC_RING_ALIGN); - ath10k_pci_wake(ar); ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, src_ring->base_addr_ce_space); ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries); @@ -999,18 +921,21 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar, ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0); ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0); ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries); - ath10k_pci_sleep(ar); + + ath10k_dbg(ATH10K_DBG_BOOT, + "boot ce src ring id %d entries %d base_addr %p\n", + ce_id, nentries, src_ring->base_addr_owner_space); return 0; } static int ath10k_ce_init_dest_ring(struct ath10k *ar, unsigned int ce_id, - struct ce_state *ce_state, + struct ath10k_ce_pipe *ce_state, const struct ce_attr *attr) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct ce_ring_state *dest_ring; + struct ath10k_ce_ring *dest_ring; unsigned int nentries = attr->dest_nentries; unsigned int ce_nbytes; u32 ctrl_addr = ath10k_ce_base_address(ce_id); @@ -1024,25 +949,23 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar, return 0; } - ce_nbytes = sizeof(struct ce_ring_state) + (nentries * sizeof(void *)); + ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *)); ptr = kzalloc(ce_nbytes, GFP_KERNEL); if (ptr == NULL) return -ENOMEM; - ce_state->dest_ring = (struct ce_ring_state *)ptr; + ce_state->dest_ring = (struct ath10k_ce_ring *)ptr; dest_ring = ce_state->dest_ring; - ptr += sizeof(struct ce_ring_state); + ptr += sizeof(struct ath10k_ce_ring); dest_ring->nentries = nentries; dest_ring->nentries_mask = nentries - 1; - ath10k_pci_wake(ar); dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr); dest_ring->sw_index &= dest_ring->nentries_mask; dest_ring->write_index = ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr); dest_ring->write_index &= dest_ring->nentries_mask; - ath10k_pci_sleep(ar); dest_ring->per_transfer_context = (void **)ptr; @@ -1055,6 +978,12 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar, (nentries * sizeof(struct ce_desc) + CE_DESC_RING_ALIGN), &base_addr); + if (!dest_ring->base_addr_owner_space_unaligned) { + kfree(ce_state->dest_ring); + ce_state->dest_ring = NULL; + return -ENOMEM; + } + dest_ring->base_addr_ce_space_unaligned = base_addr; /* @@ -1071,44 +1000,35 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar, dest_ring->base_addr_ce_space_unaligned, CE_DESC_RING_ALIGN); - ath10k_pci_wake(ar); ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, dest_ring->base_addr_ce_space); ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries); ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0); ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0); ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries); - ath10k_pci_sleep(ar); + + ath10k_dbg(ATH10K_DBG_BOOT, + "boot ce dest ring id %d entries %d base_addr %p\n", + ce_id, nentries, dest_ring->base_addr_owner_space); return 0; } -static struct ce_state *ath10k_ce_init_state(struct ath10k *ar, +static struct ath10k_ce_pipe *ath10k_ce_init_state(struct ath10k *ar, unsigned int ce_id, const struct ce_attr *attr) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct ce_state *ce_state = NULL; + struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; u32 ctrl_addr = ath10k_ce_base_address(ce_id); spin_lock_bh(&ar_pci->ce_lock); - if (!ar_pci->ce_id_to_state[ce_id]) { - ce_state = kzalloc(sizeof(*ce_state), GFP_ATOMIC); - if (ce_state == NULL) { - spin_unlock_bh(&ar_pci->ce_lock); - return NULL; - } - - ar_pci->ce_id_to_state[ce_id] = ce_state; - ce_state->ar = ar; - ce_state->id = ce_id; - ce_state->ctrl_addr = ctrl_addr; - ce_state->state = CE_RUNNING; - /* Save attribute flags */ - ce_state->attr_flags = attr->flags; - ce_state->src_sz_max = attr->src_sz_max; - } + ce_state->ar = ar; + ce_state->id = ce_id; + ce_state->ctrl_addr = ctrl_addr; + ce_state->attr_flags = attr->flags; + ce_state->src_sz_max = attr->src_sz_max; spin_unlock_bh(&ar_pci->ce_lock); @@ -1122,12 +1042,17 @@ static struct ce_state *ath10k_ce_init_state(struct ath10k *ar, * initialization. It may be that only one side or the other is * initialized by software/firmware. */ -struct ce_state *ath10k_ce_init(struct ath10k *ar, +struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar, unsigned int ce_id, const struct ce_attr *attr) { - struct ce_state *ce_state; + struct ath10k_ce_pipe *ce_state; u32 ctrl_addr = ath10k_ce_base_address(ce_id); + int ret; + + ret = ath10k_pci_wake(ar); + if (ret) + return NULL; ce_state = ath10k_ce_init_state(ar, ce_id, attr); if (!ce_state) { @@ -1136,40 +1061,38 @@ struct ce_state *ath10k_ce_init(struct ath10k *ar, } if (attr->src_nentries) { - if (ath10k_ce_init_src_ring(ar, ce_id, ce_state, attr)) { - ath10k_err("Failed to initialize CE src ring for ID: %d\n", - ce_id); + ret = ath10k_ce_init_src_ring(ar, ce_id, ce_state, attr); + if (ret) { + ath10k_err("Failed to initialize CE src ring for ID: %d (%d)\n", + ce_id, ret); ath10k_ce_deinit(ce_state); return NULL; } } if (attr->dest_nentries) { - if (ath10k_ce_init_dest_ring(ar, ce_id, ce_state, attr)) { - ath10k_err("Failed to initialize CE dest ring for ID: %d\n", - ce_id); + ret = ath10k_ce_init_dest_ring(ar, ce_id, ce_state, attr); + if (ret) { + ath10k_err("Failed to initialize CE dest ring for ID: %d (%d)\n", + ce_id, ret); ath10k_ce_deinit(ce_state); return NULL; } } /* Enable CE error interrupts */ - ath10k_pci_wake(ar); ath10k_ce_error_intr_enable(ar, ctrl_addr); + ath10k_pci_sleep(ar); return ce_state; } -void ath10k_ce_deinit(struct ce_state *ce_state) +void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state) { - unsigned int ce_id = ce_state->id; struct ath10k *ar = ce_state->ar; struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - ce_state->state = CE_UNUSED; - ar_pci->ce_id_to_state[ce_id] = NULL; - if (ce_state->src_ring) { kfree(ce_state->src_ring->shadow_base_unaligned); pci_free_consistent(ar_pci->pdev, @@ -1190,5 +1113,7 @@ void ath10k_ce_deinit(struct ce_state *ce_state) ce_state->dest_ring->base_addr_ce_space); kfree(ce_state->dest_ring); } - kfree(ce_state); + + ce_state->src_ring = NULL; + ce_state->dest_ring = NULL; } diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h index c17f07c026f4..15d45b5b7615 100644 --- a/drivers/net/wireless/ath/ath10k/ce.h +++ b/drivers/net/wireless/ath/ath10k/ce.h @@ -27,7 +27,6 @@ /* Descriptor rings must be aligned to this boundary */ #define CE_DESC_RING_ALIGN 8 -#define CE_SENDLIST_ITEMS_MAX 12 #define CE_SEND_FLAG_GATHER 0x00010000 /* @@ -36,16 +35,9 @@ * how to use copy engines. */ -struct ce_state; +struct ath10k_ce_pipe; -/* Copy Engine operational state */ -enum ce_op_state { - CE_UNUSED, - CE_PAUSED, - CE_RUNNING, -}; - #define CE_DESC_FLAGS_GATHER (1 << 0) #define CE_DESC_FLAGS_BYTE_SWAP (1 << 1) #define CE_DESC_FLAGS_META_DATA_MASK 0xFFFC @@ -57,8 +49,7 @@ struct ce_desc { __le16 flags; /* %CE_DESC_FLAGS_ */ }; -/* Copy Engine Ring internal state */ -struct ce_ring_state { +struct ath10k_ce_ring { /* Number of entries in this ring; must be power of 2 */ unsigned int nentries; unsigned int nentries_mask; @@ -116,49 +107,20 @@ struct ce_ring_state { void **per_transfer_context; }; -/* Copy Engine internal state */ -struct ce_state { +struct ath10k_ce_pipe { struct ath10k *ar; unsigned int id; unsigned int attr_flags; u32 ctrl_addr; - enum ce_op_state state; - - void (*send_cb) (struct ce_state *ce_state, - void *per_transfer_send_context, - u32 buffer, - unsigned int nbytes, - unsigned int transfer_id); - void (*recv_cb) (struct ce_state *ce_state, - void *per_transfer_recv_context, - u32 buffer, - unsigned int nbytes, - unsigned int transfer_id, - unsigned int flags); - - unsigned int src_sz_max; - struct ce_ring_state *src_ring; - struct ce_ring_state *dest_ring; -}; -struct ce_sendlist_item { - /* e.g. buffer or desc list */ - dma_addr_t data; - union { - /* simple buffer */ - unsigned int nbytes; - /* Rx descriptor list */ - unsigned int ndesc; - } u; - /* externally-specified flags; OR-ed with internal flags */ - u32 flags; -}; + void (*send_cb)(struct ath10k_ce_pipe *); + void (*recv_cb)(struct ath10k_ce_pipe *); -struct ce_sendlist { - unsigned int num_items; - struct ce_sendlist_item item[CE_SENDLIST_ITEMS_MAX]; + unsigned int src_sz_max; + struct ath10k_ce_ring *src_ring; + struct ath10k_ce_ring *dest_ring; }; /* Copy Engine settable attributes */ @@ -182,7 +144,7 @@ struct ce_attr; * * Implementation note: pushes 1 buffer to Source ring */ -int ath10k_ce_send(struct ce_state *ce_state, +int ath10k_ce_send(struct ath10k_ce_pipe *ce_state, void *per_transfer_send_context, u32 buffer, unsigned int nbytes, @@ -190,36 +152,11 @@ int ath10k_ce_send(struct ce_state *ce_state, unsigned int transfer_id, unsigned int flags); -void ath10k_ce_send_cb_register(struct ce_state *ce_state, - void (*send_cb) (struct ce_state *ce_state, - void *transfer_context, - u32 buffer, - unsigned int nbytes, - unsigned int transfer_id), +void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state, + void (*send_cb)(struct ath10k_ce_pipe *), int disable_interrupts); -/* Append a simple buffer (address/length) to a sendlist. */ -void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist, - u32 buffer, - unsigned int nbytes, - /* OR-ed with internal flags */ - u32 flags); - -/* - * Queue a "sendlist" of buffers to be sent using gather to a single - * anonymous destination buffer - * ce - which copy engine to use - * sendlist - list of simple buffers to send using gather - * transfer_id - arbitrary ID; reflected to destination - * Returns 0 on success; otherwise an error status. - * - * Implemenation note: Pushes multiple buffers with Gather to Source ring. - */ -int ath10k_ce_sendlist_send(struct ce_state *ce_state, - void *per_transfer_send_context, - struct ce_sendlist *sendlist, - /* 14 bits */ - unsigned int transfer_id); +int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe); /*==================Recv=======================*/ @@ -233,17 +170,12 @@ int ath10k_ce_sendlist_send(struct ce_state *ce_state, * * Implemenation note: Pushes a buffer to Dest ring. */ -int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state, +int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe *ce_state, void *per_transfer_recv_context, u32 buffer); -void ath10k_ce_recv_cb_register(struct ce_state *ce_state, - void (*recv_cb) (struct ce_state *ce_state, - void *transfer_context, - u32 buffer, - unsigned int nbytes, - unsigned int transfer_id, - unsigned int flags)); +void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state, + void (*recv_cb)(struct ath10k_ce_pipe *)); /* recv flags */ /* Data is byte-swapped */ @@ -253,7 +185,7 @@ void ath10k_ce_recv_cb_register(struct ce_state *ce_state, * Supply data for the next completed unprocessed receive descriptor. * Pops buffer from Dest ring. */ -int ath10k_ce_completed_recv_next(struct ce_state *ce_state, +int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state, void **per_transfer_contextp, u32 *bufferp, unsigned int *nbytesp, @@ -263,7 +195,7 @@ int ath10k_ce_completed_recv_next(struct ce_state *ce_state, * Supply data for the next completed unprocessed send descriptor. * Pops 1 completed send buffer from Source ring. */ -int ath10k_ce_completed_send_next(struct ce_state *ce_state, +int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state, void **per_transfer_contextp, u32 *bufferp, unsigned int *nbytesp, @@ -272,7 +204,7 @@ int ath10k_ce_completed_send_next(struct ce_state *ce_state, /*==================CE Engine Initialization=======================*/ /* Initialize an instance of a CE */ -struct ce_state *ath10k_ce_init(struct ath10k *ar, +struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar, unsigned int ce_id, const struct ce_attr *attr); @@ -282,7 +214,7 @@ struct ce_state *ath10k_ce_init(struct ath10k *ar, * receive buffers. Target DMA must be stopped before using * this API. */ -int ath10k_ce_revoke_recv_next(struct ce_state *ce_state, +int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state, void **per_transfer_contextp, u32 *bufferp); @@ -291,13 +223,13 @@ int ath10k_ce_revoke_recv_next(struct ce_state *ce_state, * pending sends. Target DMA must be stopped before using * this API. */ -int ath10k_ce_cancel_send_next(struct ce_state *ce_state, +int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state, void **per_transfer_contextp, u32 *bufferp, unsigned int *nbytesp, unsigned int *transfer_idp); -void ath10k_ce_deinit(struct ce_state *ce_state); +void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state); /*==================CE Interrupt Handlers====================*/ void ath10k_ce_per_engine_service_any(struct ath10k *ar); @@ -322,9 +254,6 @@ struct ce_attr { /* CE_ATTR_* values */ unsigned int flags; - /* currently not in use */ - unsigned int priority; - /* #entries in source ring - Must be a power of 2 */ unsigned int src_nentries; @@ -336,21 +265,8 @@ struct ce_attr { /* #entries in destination ring - Must be a power of 2 */ unsigned int dest_nentries; - - /* Future use */ - void *reserved; }; -/* - * When using sendlist_send to transfer multiple buffer fragments, the - * transfer context of each fragment, except last one, will be filled - * with CE_SENDLIST_ITEM_CTXT. ce_completed_send will return success for - * each fragment done with send and the transfer context would be - * CE_SENDLIST_ITEM_CTXT. Upper layer could use this to identify the - * status of a send completion. - */ -#define CE_SENDLIST_ITEM_CTXT ((void *)0xcecebeef) - #define SR_BA_ADDRESS 0x0000 #define SR_SIZE_ADDRESS 0x0004 #define DR_BA_ADDRESS 0x0008 diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 7226c23b9569..1129994fb105 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -39,17 +39,6 @@ MODULE_PARM_DESC(p2p, "Enable ath10k P2P support"); static const struct ath10k_hw_params ath10k_hw_params_list[] = { { - .id = QCA988X_HW_1_0_VERSION, - .name = "qca988x hw1.0", - .patch_load_addr = QCA988X_HW_1_0_PATCH_LOAD_ADDR, - .fw = { - .dir = QCA988X_HW_1_0_FW_DIR, - .fw = QCA988X_HW_1_0_FW_FILE, - .otp = QCA988X_HW_1_0_OTP_FILE, - .board = QCA988X_HW_1_0_BOARD_DATA_FILE, - }, - }, - { .id = QCA988X_HW_2_0_VERSION, .name = "qca988x hw2.0", .patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR, @@ -64,33 +53,12 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { static void ath10k_send_suspend_complete(struct ath10k *ar) { - ath10k_dbg(ATH10K_DBG_CORE, "%s\n", __func__); + ath10k_dbg(ATH10K_DBG_BOOT, "boot suspend complete\n"); ar->is_target_paused = true; wake_up(&ar->event_queue); } -static int ath10k_check_fw_version(struct ath10k *ar) -{ - char version[32]; - - if (ar->fw_version_major >= SUPPORTED_FW_MAJOR && - ar->fw_version_minor >= SUPPORTED_FW_MINOR && - ar->fw_version_release >= SUPPORTED_FW_RELEASE && - ar->fw_version_build >= SUPPORTED_FW_BUILD) - return 0; - - snprintf(version, sizeof(version), "%u.%u.%u.%u", - SUPPORTED_FW_MAJOR, SUPPORTED_FW_MINOR, - SUPPORTED_FW_RELEASE, SUPPORTED_FW_BUILD); - - ath10k_warn("WARNING: Firmware version %s is not officially supported.\n", - ar->hw->wiphy->fw_version); - ath10k_warn("Please upgrade to version %s (or newer)\n", version); - - return 0; -} - static int ath10k_init_connect_htc(struct ath10k *ar) { int status; @@ -112,7 +80,7 @@ static int ath10k_init_connect_htc(struct ath10k *ar) goto timeout; } - ath10k_dbg(ATH10K_DBG_CORE, "core wmi ready\n"); + ath10k_dbg(ATH10K_DBG_BOOT, "boot wmi ready\n"); return 0; timeout: @@ -200,8 +168,7 @@ static const struct firmware *ath10k_fetch_fw_file(struct ath10k *ar, return fw; } -static int ath10k_push_board_ext_data(struct ath10k *ar, - const struct firmware *fw) +static int ath10k_push_board_ext_data(struct ath10k *ar) { u32 board_data_size = QCA988X_BOARD_DATA_SZ; u32 board_ext_data_size = QCA988X_BOARD_EXT_DATA_SZ; @@ -214,21 +181,21 @@ static int ath10k_push_board_ext_data(struct ath10k *ar, return ret; } - ath10k_dbg(ATH10K_DBG_CORE, - "ath10k: Board extended Data download addr: 0x%x\n", + ath10k_dbg(ATH10K_DBG_BOOT, + "boot push board extended data addr 0x%x\n", board_ext_data_addr); if (board_ext_data_addr == 0) return 0; - if (fw->size != (board_data_size + board_ext_data_size)) { + if (ar->board_len != (board_data_size + board_ext_data_size)) { ath10k_err("invalid board (ext) data sizes %zu != %d+%d\n", - fw->size, board_data_size, board_ext_data_size); + ar->board_len, board_data_size, board_ext_data_size); return -EINVAL; } ret = ath10k_bmi_write_memory(ar, board_ext_data_addr, - fw->data + board_data_size, + ar->board_data + board_data_size, board_ext_data_size); if (ret) { ath10k_err("could not write board ext data (%d)\n", ret); @@ -247,12 +214,11 @@ static int ath10k_push_board_ext_data(struct ath10k *ar, static int ath10k_download_board_data(struct ath10k *ar) { - const struct firmware *fw = ar->board_data; u32 board_data_size = QCA988X_BOARD_DATA_SZ; u32 address; int ret; - ret = ath10k_push_board_ext_data(ar, fw); + ret = ath10k_push_board_ext_data(ar); if (ret) { ath10k_err("could not push board ext data (%d)\n", ret); goto exit; @@ -264,8 +230,9 @@ static int ath10k_download_board_data(struct ath10k *ar) goto exit; } - ret = ath10k_bmi_write_memory(ar, address, fw->data, - min_t(u32, board_data_size, fw->size)); + ret = ath10k_bmi_write_memory(ar, address, ar->board_data, + min_t(u32, board_data_size, + ar->board_len)); if (ret) { ath10k_err("could not write board data (%d)\n", ret); goto exit; @@ -283,17 +250,16 @@ exit: static int ath10k_download_and_run_otp(struct ath10k *ar) { - const struct firmware *fw = ar->otp; u32 address = ar->hw_params.patch_load_addr; u32 exec_param; int ret; /* OTP is optional */ - if (!ar->otp) + if (!ar->otp_data || !ar->otp_len) return 0; - ret = ath10k_bmi_fast_download(ar, address, fw->data, fw->size); + ret = ath10k_bmi_fast_download(ar, address, ar->otp_data, ar->otp_len); if (ret) { ath10k_err("could not write otp (%d)\n", ret); goto exit; @@ -312,13 +278,13 @@ exit: static int ath10k_download_fw(struct ath10k *ar) { - const struct firmware *fw = ar->firmware; u32 address; int ret; address = ar->hw_params.patch_load_addr; - ret = ath10k_bmi_fast_download(ar, address, fw->data, fw->size); + ret = ath10k_bmi_fast_download(ar, address, ar->firmware_data, + ar->firmware_len); if (ret) { ath10k_err("could not write fw (%d)\n", ret); goto exit; @@ -330,8 +296,8 @@ exit: static void ath10k_core_free_firmware_files(struct ath10k *ar) { - if (ar->board_data && !IS_ERR(ar->board_data)) - release_firmware(ar->board_data); + if (ar->board && !IS_ERR(ar->board)) + release_firmware(ar->board); if (ar->otp && !IS_ERR(ar->otp)) release_firmware(ar->otp); @@ -339,12 +305,20 @@ static void ath10k_core_free_firmware_files(struct ath10k *ar) if (ar->firmware && !IS_ERR(ar->firmware)) release_firmware(ar->firmware); + ar->board = NULL; ar->board_data = NULL; + ar->board_len = 0; + ar->otp = NULL; + ar->otp_data = NULL; + ar->otp_len = 0; + ar->firmware = NULL; + ar->firmware_data = NULL; + ar->firmware_len = 0; } -static int ath10k_core_fetch_firmware_files(struct ath10k *ar) +static int ath10k_core_fetch_firmware_api_1(struct ath10k *ar) { int ret = 0; @@ -358,15 +332,18 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar) return -EINVAL; } - ar->board_data = ath10k_fetch_fw_file(ar, - ar->hw_params.fw.dir, - ar->hw_params.fw.board); - if (IS_ERR(ar->board_data)) { - ret = PTR_ERR(ar->board_data); + ar->board = ath10k_fetch_fw_file(ar, + ar->hw_params.fw.dir, + ar->hw_params.fw.board); + if (IS_ERR(ar->board)) { + ret = PTR_ERR(ar->board); ath10k_err("could not fetch board data (%d)\n", ret); goto err; } + ar->board_data = ar->board->data; + ar->board_len = ar->board->size; + ar->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, ar->hw_params.fw.fw); @@ -376,6 +353,9 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar) goto err; } + ar->firmware_data = ar->firmware->data; + ar->firmware_len = ar->firmware->size; + /* OTP may be undefined. If so, don't fetch it at all */ if (ar->hw_params.fw.otp == NULL) return 0; @@ -389,6 +369,172 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar) goto err; } + ar->otp_data = ar->otp->data; + ar->otp_len = ar->otp->size; + + return 0; + +err: + ath10k_core_free_firmware_files(ar); + return ret; +} + +static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name) +{ + size_t magic_len, len, ie_len; + int ie_id, i, index, bit, ret; + struct ath10k_fw_ie *hdr; + const u8 *data; + __le32 *timestamp; + + /* first fetch the firmware file (firmware-*.bin) */ + ar->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, name); + if (IS_ERR(ar->firmware)) { + ath10k_err("Could not fetch firmware file '%s': %ld\n", + name, PTR_ERR(ar->firmware)); + return PTR_ERR(ar->firmware); + } + + data = ar->firmware->data; + len = ar->firmware->size; + + /* magic also includes the null byte, check that as well */ + magic_len = strlen(ATH10K_FIRMWARE_MAGIC) + 1; + + if (len < magic_len) { + ath10k_err("firmware image too small to contain magic: %zu\n", + len); + ret = -EINVAL; + goto err; + } + + if (memcmp(data, ATH10K_FIRMWARE_MAGIC, magic_len) != 0) { + ath10k_err("Invalid firmware magic\n"); + ret = -EINVAL; + goto err; + } + + /* jump over the padding */ + magic_len = ALIGN(magic_len, 4); + + len -= magic_len; + data += magic_len; + + /* loop elements */ + while (len > sizeof(struct ath10k_fw_ie)) { + hdr = (struct ath10k_fw_ie *)data; + + ie_id = le32_to_cpu(hdr->id); + ie_len = le32_to_cpu(hdr->len); + + len -= sizeof(*hdr); + data += sizeof(*hdr); + + if (len < ie_len) { + ath10k_err("Invalid length for FW IE %d (%zu < %zu)\n", + ie_id, len, ie_len); + ret = -EINVAL; + goto err; + } + + switch (ie_id) { + case ATH10K_FW_IE_FW_VERSION: + if (ie_len > sizeof(ar->hw->wiphy->fw_version) - 1) + break; + + memcpy(ar->hw->wiphy->fw_version, data, ie_len); + ar->hw->wiphy->fw_version[ie_len] = '\0'; + + ath10k_dbg(ATH10K_DBG_BOOT, + "found fw version %s\n", + ar->hw->wiphy->fw_version); + break; + case ATH10K_FW_IE_TIMESTAMP: + if (ie_len != sizeof(u32)) + break; + + timestamp = (__le32 *)data; + + ath10k_dbg(ATH10K_DBG_BOOT, "found fw timestamp %d\n", + le32_to_cpup(timestamp)); + break; + case ATH10K_FW_IE_FEATURES: + ath10k_dbg(ATH10K_DBG_BOOT, + "found firmware features ie (%zd B)\n", + ie_len); + + for (i = 0; i < ATH10K_FW_FEATURE_COUNT; i++) { + index = i / 8; + bit = i % 8; + + if (index == ie_len) + break; + + if (data[index] & (1 << bit)) + __set_bit(i, ar->fw_features); + } + + ath10k_dbg_dump(ATH10K_DBG_BOOT, "features", "", + ar->fw_features, + sizeof(ar->fw_features)); + break; + case ATH10K_FW_IE_FW_IMAGE: + ath10k_dbg(ATH10K_DBG_BOOT, + "found fw image ie (%zd B)\n", + ie_len); + + ar->firmware_data = data; + ar->firmware_len = ie_len; + + break; + case ATH10K_FW_IE_OTP_IMAGE: + ath10k_dbg(ATH10K_DBG_BOOT, + "found otp image ie (%zd B)\n", + ie_len); + + ar->otp_data = data; + ar->otp_len = ie_len; + + break; + default: + ath10k_warn("Unknown FW IE: %u\n", + le32_to_cpu(hdr->id)); + break; + } + + /* jump over the padding */ + ie_len = ALIGN(ie_len, 4); + + len -= ie_len; + data += ie_len; + } + + if (!ar->firmware_data || !ar->firmware_len) { + ath10k_warn("No ATH10K_FW_IE_FW_IMAGE found from %s, skipping\n", + name); + ret = -ENOMEDIUM; + goto err; + } + + /* now fetch the board file */ + if (ar->hw_params.fw.board == NULL) { + ath10k_err("board data file not defined"); + ret = -EINVAL; + goto err; + } + + ar->board = ath10k_fetch_fw_file(ar, + ar->hw_params.fw.dir, + ar->hw_params.fw.board); + if (IS_ERR(ar->board)) { + ret = PTR_ERR(ar->board); + ath10k_err("could not fetch board data (%d)\n", ret); + goto err; + } + + ar->board_data = ar->board->data; + ar->board_len = ar->board->size; + return 0; err: @@ -396,6 +542,28 @@ err: return ret; } +static int ath10k_core_fetch_firmware_files(struct ath10k *ar) +{ + int ret; + + ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API2_FILE); + if (ret == 0) { + ar->fw_api = 2; + goto out; + } + + ret = ath10k_core_fetch_firmware_api_1(ar); + if (ret) + return ret; + + ar->fw_api = 1; + +out: + ath10k_dbg(ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api); + + return 0; +} + static int ath10k_init_download_firmware(struct ath10k *ar) { int ret; @@ -446,6 +614,13 @@ static int ath10k_init_uart(struct ath10k *ar) return ret; } + /* Set the UART baud rate to 19200. */ + ret = ath10k_bmi_write32(ar, hi_desired_baud_rate, 19200); + if (ret) { + ath10k_warn("could not set the baud rate (%d)\n", ret); + return ret; + } + ath10k_info("UART prints enabled\n"); return 0; } @@ -545,6 +720,9 @@ struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev, INIT_WORK(&ar->offchan_tx_work, ath10k_offchan_tx_work); skb_queue_head_init(&ar->offchan_tx_queue); + INIT_WORK(&ar->wmi_mgmt_tx_work, ath10k_mgmt_over_wmi_tx_work); + skb_queue_head_init(&ar->wmi_mgmt_tx_queue); + init_waitqueue_head(&ar->event_queue); INIT_WORK(&ar->restart_work, ath10k_core_restart); @@ -559,6 +737,8 @@ EXPORT_SYMBOL(ath10k_core_create); void ath10k_core_destroy(struct ath10k *ar) { + ath10k_debug_destroy(ar); + flush_workqueue(ar->workqueue); destroy_workqueue(ar->workqueue); @@ -570,6 +750,8 @@ int ath10k_core_start(struct ath10k *ar) { int status; + lockdep_assert_held(&ar->conf_mutex); + ath10k_bmi_start(ar); if (ath10k_init_configure_target(ar)) { @@ -620,10 +802,6 @@ int ath10k_core_start(struct ath10k *ar) ath10k_info("firmware %s booted\n", ar->hw->wiphy->fw_version); - status = ath10k_check_fw_version(ar); - if (status) - goto err_disconnect_htc; - status = ath10k_wmi_cmd_init(ar); if (status) { ath10k_err("could not send WMI init command (%d)\n", status); @@ -641,7 +819,12 @@ int ath10k_core_start(struct ath10k *ar) if (status) goto err_disconnect_htc; + status = ath10k_debug_start(ar); + if (status) + goto err_disconnect_htc; + ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1; + INIT_LIST_HEAD(&ar->arvifs); return 0; @@ -658,6 +841,9 @@ EXPORT_SYMBOL(ath10k_core_start); void ath10k_core_stop(struct ath10k *ar) { + lockdep_assert_held(&ar->conf_mutex); + + ath10k_debug_stop(ar); ath10k_htc_stop(&ar->htc); ath10k_htt_detach(&ar->htt); ath10k_wmi_detach(ar); @@ -704,23 +890,65 @@ static int ath10k_core_probe_fw(struct ath10k *ar) return ret; } + mutex_lock(&ar->conf_mutex); + ret = ath10k_core_start(ar); if (ret) { ath10k_err("could not init core (%d)\n", ret); ath10k_core_free_firmware_files(ar); ath10k_hif_power_down(ar); + mutex_unlock(&ar->conf_mutex); return ret; } ath10k_core_stop(ar); + + mutex_unlock(&ar->conf_mutex); + ath10k_hif_power_down(ar); return 0; } -int ath10k_core_register(struct ath10k *ar) +static int ath10k_core_check_chip_id(struct ath10k *ar) +{ + u32 hw_revision = MS(ar->chip_id, SOC_CHIP_ID_REV); + + ath10k_dbg(ATH10K_DBG_BOOT, "boot chip_id 0x%08x hw_revision 0x%x\n", + ar->chip_id, hw_revision); + + /* Check that we are not using hw1.0 (some of them have same pci id + * as hw2.0) before doing anything else as ath10k crashes horribly + * due to missing hw1.0 workarounds. */ + switch (hw_revision) { + case QCA988X_HW_1_0_CHIP_ID_REV: + ath10k_err("ERROR: qca988x hw1.0 is not supported\n"); + return -EOPNOTSUPP; + + case QCA988X_HW_2_0_CHIP_ID_REV: + /* known hardware revision, continue normally */ + return 0; + + default: + ath10k_warn("Warning: hardware revision unknown (0x%x), expect problems\n", + ar->chip_id); + return 0; + } + + return 0; +} + +int ath10k_core_register(struct ath10k *ar, u32 chip_id) { int status; + ar->chip_id = chip_id; + + status = ath10k_core_check_chip_id(ar); + if (status) { + ath10k_err("Unsupported chip id 0x%08x\n", ar->chip_id); + return status; + } + status = ath10k_core_probe_fw(ar); if (status) { ath10k_err("could not probe fw (%d)\n", status); @@ -755,6 +983,7 @@ void ath10k_core_unregister(struct ath10k *ar) * Otherwise we will fail to submit commands to FW and mac80211 will be * unhappy about callback failures. */ ath10k_mac_unregister(ar); + ath10k_core_free_firmware_files(ar); } EXPORT_SYMBOL(ath10k_core_unregister); diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index e4bba563ed42..0934f7633de3 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -43,27 +43,23 @@ /* Antenna noise floor */ #define ATH10K_DEFAULT_NOISE_FLOOR -95 +#define ATH10K_MAX_NUM_MGMT_PENDING 16 + struct ath10k; struct ath10k_skb_cb { dma_addr_t paddr; bool is_mapped; bool is_aborted; + u8 vdev_id; struct { - u8 vdev_id; - u16 msdu_id; u8 tid; bool is_offchan; - bool is_conf; - bool discard; - bool no_ack; - u8 refcount; - struct sk_buff *txfrag; - struct sk_buff *msdu; - } __packed htt; - /* 4 bytes left on 64bit arch */ + u8 frag_len; + u8 pad_len; + } __packed htt; } __packed; static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb) @@ -108,15 +104,26 @@ struct ath10k_bmi { bool done_sent; }; +#define ATH10K_MAX_MEM_REQS 16 + +struct ath10k_mem_chunk { + void *vaddr; + dma_addr_t paddr; + u32 len; + u32 req_id; +}; + struct ath10k_wmi { enum ath10k_htc_ep_id eid; struct completion service_ready; struct completion unified_ready; - atomic_t pending_tx_count; - wait_queue_head_t wq; + wait_queue_head_t tx_credits_wq; + struct wmi_cmd_map *cmd; + struct wmi_vdev_param_map *vdev_param; + struct wmi_pdev_param_map *pdev_param; - struct sk_buff_head wmi_event_list; - struct work_struct wmi_event_work; + u32 num_mem_chunks; + struct ath10k_mem_chunk mem_chunks[ATH10K_MAX_MEM_REQS]; }; struct ath10k_peer_stat { @@ -198,17 +205,22 @@ struct ath10k_peer { #define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5*HZ) struct ath10k_vif { + struct list_head list; + u32 vdev_id; enum wmi_vdev_type vdev_type; enum wmi_vdev_subtype vdev_subtype; u32 beacon_interval; u32 dtim_period; + struct sk_buff *beacon; struct ath10k *ar; struct ieee80211_vif *vif; + struct work_struct wep_key_work; struct ieee80211_key_conf *wep_keys[WMI_MAX_KEY_INDEX + 1]; - u8 def_wep_key_index; + u8 def_wep_key_idx; + u8 def_wep_key_newidx; u16 tx_seq_no; @@ -246,6 +258,9 @@ struct ath10k_debug { u32 wmi_service_bitmap[WMI_SERVICE_BM_SIZE]; struct completion event_stats_compl; + + unsigned long htt_stats_mask; + struct delayed_work htt_stats_dwork; }; enum ath10k_state { @@ -270,12 +285,27 @@ enum ath10k_state { ATH10K_STATE_WEDGED, }; +enum ath10k_fw_features { + /* wmi_mgmt_rx_hdr contains extra RSSI information */ + ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX = 0, + + /* firmware from 10X branch */ + ATH10K_FW_FEATURE_WMI_10X = 1, + + /* firmware support tx frame management over WMI, otherwise it's HTT */ + ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX = 2, + + /* keep last */ + ATH10K_FW_FEATURE_COUNT, +}; + struct ath10k { struct ath_common ath_common; struct ieee80211_hw *hw; struct device *dev; u8 mac_addr[ETH_ALEN]; + u32 chip_id; u32 target_version; u8 fw_version_major; u32 fw_version_minor; @@ -288,6 +318,8 @@ struct ath10k { u32 vht_cap_info; u32 num_rf_chains; + DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT); + struct targetdef *targetdef; struct hostdef *hostdef; @@ -319,9 +351,19 @@ struct ath10k { } fw; } hw_params; - const struct firmware *board_data; + const struct firmware *board; + const void *board_data; + size_t board_len; + const struct firmware *otp; + const void *otp_data; + size_t otp_len; + const struct firmware *firmware; + const void *firmware_data; + size_t firmware_len; + + int fw_api; struct { struct completion started; @@ -364,6 +406,7 @@ struct ath10k { /* protects shared structure data */ spinlock_t data_lock; + struct list_head arvifs; struct list_head peers; wait_queue_head_t peer_mapping_wq; @@ -372,6 +415,9 @@ struct ath10k { struct completion offchan_tx_completed; struct sk_buff *offchan_tx_skb; + struct work_struct wmi_mgmt_tx_work; + struct sk_buff_head wmi_mgmt_tx_queue; + enum ath10k_state state; struct work_struct restart_work; @@ -393,7 +439,7 @@ void ath10k_core_destroy(struct ath10k *ar); int ath10k_core_start(struct ath10k *ar); void ath10k_core_stop(struct ath10k *ar); -int ath10k_core_register(struct ath10k *ar); +int ath10k_core_register(struct ath10k *ar, u32 chip_id); void ath10k_core_unregister(struct ath10k *ar); #endif /* _CORE_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index 3d65594fa098..760ff2289e3c 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -21,6 +21,9 @@ #include "core.h" #include "debug.h" +/* ms */ +#define ATH10K_DEBUG_HTT_STATS_INTERVAL 1000 + static int ath10k_printk(const char *level, const char *fmt, ...) { struct va_format vaf; @@ -260,7 +263,6 @@ void ath10k_debug_read_target_stats(struct ath10k *ar, } spin_unlock_bh(&ar->data_lock); - mutex_unlock(&ar->conf_mutex); complete(&ar->debug.event_stats_compl); } @@ -499,6 +501,144 @@ static const struct file_operations fops_simulate_fw_crash = { .llseek = default_llseek, }; +static ssize_t ath10k_read_chip_id(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + unsigned int len; + char buf[50]; + + len = scnprintf(buf, sizeof(buf), "0x%08x\n", ar->chip_id); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static const struct file_operations fops_chip_id = { + .read = ath10k_read_chip_id, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static int ath10k_debug_htt_stats_req(struct ath10k *ar) +{ + u64 cookie; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + if (ar->debug.htt_stats_mask == 0) + /* htt stats are disabled */ + return 0; + + if (ar->state != ATH10K_STATE_ON) + return 0; + + cookie = get_jiffies_64(); + + ret = ath10k_htt_h2t_stats_req(&ar->htt, ar->debug.htt_stats_mask, + cookie); + if (ret) { + ath10k_warn("failed to send htt stats request: %d\n", ret); + return ret; + } + + queue_delayed_work(ar->workqueue, &ar->debug.htt_stats_dwork, + msecs_to_jiffies(ATH10K_DEBUG_HTT_STATS_INTERVAL)); + + return 0; +} + +static void ath10k_debug_htt_stats_dwork(struct work_struct *work) +{ + struct ath10k *ar = container_of(work, struct ath10k, + debug.htt_stats_dwork.work); + + mutex_lock(&ar->conf_mutex); + + ath10k_debug_htt_stats_req(ar); + + mutex_unlock(&ar->conf_mutex); +} + +static ssize_t ath10k_read_htt_stats_mask(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + char buf[32]; + unsigned int len; + + len = scnprintf(buf, sizeof(buf), "%lu\n", ar->debug.htt_stats_mask); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t ath10k_write_htt_stats_mask(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + unsigned long mask; + int ret; + + ret = kstrtoul_from_user(user_buf, count, 0, &mask); + if (ret) + return ret; + + /* max 8 bit masks (for now) */ + if (mask > 0xff) + return -E2BIG; + + mutex_lock(&ar->conf_mutex); + + ar->debug.htt_stats_mask = mask; + + ret = ath10k_debug_htt_stats_req(ar); + if (ret) + goto out; + + ret = count; + +out: + mutex_unlock(&ar->conf_mutex); + + return ret; +} + +static const struct file_operations fops_htt_stats_mask = { + .read = ath10k_read_htt_stats_mask, + .write = ath10k_write_htt_stats_mask, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +int ath10k_debug_start(struct ath10k *ar) +{ + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + ret = ath10k_debug_htt_stats_req(ar); + if (ret) + /* continue normally anyway, this isn't serious */ + ath10k_warn("failed to start htt stats workqueue: %d\n", ret); + + return 0; +} + +void ath10k_debug_stop(struct ath10k *ar) +{ + lockdep_assert_held(&ar->conf_mutex); + + /* Must not use _sync to avoid deadlock, we do that in + * ath10k_debug_destroy(). The check for htt_stats_mask is to avoid + * warning from del_timer(). */ + if (ar->debug.htt_stats_mask != 0) + cancel_delayed_work(&ar->debug.htt_stats_dwork); +} + int ath10k_debug_create(struct ath10k *ar) { ar->debug.debugfs_phy = debugfs_create_dir("ath10k", @@ -507,6 +647,9 @@ int ath10k_debug_create(struct ath10k *ar) if (!ar->debug.debugfs_phy) return -ENOMEM; + INIT_DELAYED_WORK(&ar->debug.htt_stats_dwork, + ath10k_debug_htt_stats_dwork); + init_completion(&ar->debug.event_stats_compl); debugfs_create_file("fw_stats", S_IRUSR, ar->debug.debugfs_phy, ar, @@ -518,8 +661,20 @@ int ath10k_debug_create(struct ath10k *ar) debugfs_create_file("simulate_fw_crash", S_IRUSR, ar->debug.debugfs_phy, ar, &fops_simulate_fw_crash); + debugfs_create_file("chip_id", S_IRUSR, ar->debug.debugfs_phy, + ar, &fops_chip_id); + + debugfs_create_file("htt_stats_mask", S_IRUSR, ar->debug.debugfs_phy, + ar, &fops_htt_stats_mask); + return 0; } + +void ath10k_debug_destroy(struct ath10k *ar) +{ + cancel_delayed_work_sync(&ar->debug.htt_stats_dwork); +} + #endif /* CONFIG_ATH10K_DEBUGFS */ #ifdef CONFIG_ATH10K_DEBUG diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h index 168140c54028..3cfe3ee90dbe 100644 --- a/drivers/net/wireless/ath/ath10k/debug.h +++ b/drivers/net/wireless/ath/ath10k/debug.h @@ -27,22 +27,26 @@ enum ath10k_debug_mask { ATH10K_DBG_HTC = 0x00000004, ATH10K_DBG_HTT = 0x00000008, ATH10K_DBG_MAC = 0x00000010, - ATH10K_DBG_CORE = 0x00000020, + ATH10K_DBG_BOOT = 0x00000020, ATH10K_DBG_PCI_DUMP = 0x00000040, ATH10K_DBG_HTT_DUMP = 0x00000080, ATH10K_DBG_MGMT = 0x00000100, ATH10K_DBG_DATA = 0x00000200, + ATH10K_DBG_BMI = 0x00000400, ATH10K_DBG_ANY = 0xffffffff, }; extern unsigned int ath10k_debug_mask; -extern __printf(1, 2) int ath10k_info(const char *fmt, ...); -extern __printf(1, 2) int ath10k_err(const char *fmt, ...); -extern __printf(1, 2) int ath10k_warn(const char *fmt, ...); +__printf(1, 2) int ath10k_info(const char *fmt, ...); +__printf(1, 2) int ath10k_err(const char *fmt, ...); +__printf(1, 2) int ath10k_warn(const char *fmt, ...); #ifdef CONFIG_ATH10K_DEBUGFS +int ath10k_debug_start(struct ath10k *ar); +void ath10k_debug_stop(struct ath10k *ar); int ath10k_debug_create(struct ath10k *ar); +void ath10k_debug_destroy(struct ath10k *ar); void ath10k_debug_read_service_map(struct ath10k *ar, void *service_map, size_t map_size); @@ -50,11 +54,24 @@ void ath10k_debug_read_target_stats(struct ath10k *ar, struct wmi_stats_event *ev); #else +static inline int ath10k_debug_start(struct ath10k *ar) +{ + return 0; +} + +static inline void ath10k_debug_stop(struct ath10k *ar) +{ +} + static inline int ath10k_debug_create(struct ath10k *ar) { return 0; } +static inline void ath10k_debug_destroy(struct ath10k *ar) +{ +} + static inline void ath10k_debug_read_service_map(struct ath10k *ar, void *service_map, size_t map_size) @@ -68,7 +85,7 @@ static inline void ath10k_debug_read_target_stats(struct ath10k *ar, #endif /* CONFIG_ATH10K_DEBUGFS */ #ifdef CONFIG_ATH10K_DEBUG -extern __printf(2, 3) void ath10k_dbg(enum ath10k_debug_mask mask, +__printf(2, 3) void ath10k_dbg(enum ath10k_debug_mask mask, const char *fmt, ...); void ath10k_dbg_dump(enum ath10k_debug_mask mask, const char *msg, const char *prefix, diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c index ef3329ef52f3..3118d7506734 100644 --- a/drivers/net/wireless/ath/ath10k/htc.c +++ b/drivers/net/wireless/ath/ath10k/htc.c @@ -103,10 +103,10 @@ static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep, struct ath10k_htc_hdr *hdr; hdr = (struct ath10k_htc_hdr *)skb->data; - memset(hdr, 0, sizeof(*hdr)); hdr->eid = ep->eid; hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr)); + hdr->flags = 0; spin_lock_bh(&ep->htc->tx_lock); hdr->seq_no = ep->seq_no++; @@ -117,134 +117,13 @@ static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep, spin_unlock_bh(&ep->htc->tx_lock); } -static int ath10k_htc_issue_skb(struct ath10k_htc *htc, - struct ath10k_htc_ep *ep, - struct sk_buff *skb, - u8 credits) -{ - struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb); - int ret; - - ath10k_dbg(ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__, - ep->eid, skb); - - ath10k_htc_prepare_tx_skb(ep, skb); - - ret = ath10k_skb_map(htc->ar->dev, skb); - if (ret) - goto err; - - ret = ath10k_hif_send_head(htc->ar, - ep->ul_pipe_id, - ep->eid, - skb->len, - skb); - if (unlikely(ret)) - goto err; - - return 0; -err: - ath10k_warn("HTC issue failed: %d\n", ret); - - spin_lock_bh(&htc->tx_lock); - ep->tx_credits += credits; - spin_unlock_bh(&htc->tx_lock); - - /* this is the simplest way to handle out-of-resources for non-credit - * based endpoints. credit based endpoints can still get -ENOSR, but - * this is highly unlikely as credit reservation should prevent that */ - if (ret == -ENOSR) { - spin_lock_bh(&htc->tx_lock); - __skb_queue_head(&ep->tx_queue, skb); - spin_unlock_bh(&htc->tx_lock); - - return ret; - } - - skb_cb->is_aborted = true; - ath10k_htc_notify_tx_completion(ep, skb); - - return ret; -} - -static struct sk_buff *ath10k_htc_get_skb_credit_based(struct ath10k_htc *htc, - struct ath10k_htc_ep *ep, - u8 *credits) -{ - struct sk_buff *skb; - struct ath10k_skb_cb *skb_cb; - int credits_required; - int remainder; - unsigned int transfer_len; - - lockdep_assert_held(&htc->tx_lock); - - skb = __skb_dequeue(&ep->tx_queue); - if (!skb) - return NULL; - - skb_cb = ATH10K_SKB_CB(skb); - transfer_len = skb->len; - - if (likely(transfer_len <= htc->target_credit_size)) { - credits_required = 1; - } else { - /* figure out how many credits this message requires */ - credits_required = transfer_len / htc->target_credit_size; - remainder = transfer_len % htc->target_credit_size; - - if (remainder) - credits_required++; - } - - ath10k_dbg(ATH10K_DBG_HTC, "Credits required %d got %d\n", - credits_required, ep->tx_credits); - - if (ep->tx_credits < credits_required) { - __skb_queue_head(&ep->tx_queue, skb); - return NULL; - } - - ep->tx_credits -= credits_required; - *credits = credits_required; - return skb; -} - -static void ath10k_htc_send_work(struct work_struct *work) -{ - struct ath10k_htc_ep *ep = container_of(work, - struct ath10k_htc_ep, send_work); - struct ath10k_htc *htc = ep->htc; - struct sk_buff *skb; - u8 credits = 0; - int ret; - - while (true) { - if (ep->ul_is_polled) - ath10k_htc_send_complete_check(ep, 0); - - spin_lock_bh(&htc->tx_lock); - if (ep->tx_credit_flow_enabled) - skb = ath10k_htc_get_skb_credit_based(htc, ep, - &credits); - else - skb = __skb_dequeue(&ep->tx_queue); - spin_unlock_bh(&htc->tx_lock); - - if (!skb) - break; - - ret = ath10k_htc_issue_skb(htc, ep, skb, credits); - if (ret == -ENOSR) - break; - } -} - int ath10k_htc_send(struct ath10k_htc *htc, enum ath10k_htc_ep_id eid, struct sk_buff *skb) { struct ath10k_htc_ep *ep = &htc->endpoint[eid]; + int credits = 0; + int ret; if (htc->ar->state == ATH10K_STATE_WEDGED) return -ECOMM; @@ -254,18 +133,55 @@ int ath10k_htc_send(struct ath10k_htc *htc, return -ENOENT; } + /* FIXME: This looks ugly, can we fix it? */ spin_lock_bh(&htc->tx_lock); if (htc->stopped) { spin_unlock_bh(&htc->tx_lock); return -ESHUTDOWN; } + spin_unlock_bh(&htc->tx_lock); - __skb_queue_tail(&ep->tx_queue, skb); skb_push(skb, sizeof(struct ath10k_htc_hdr)); - spin_unlock_bh(&htc->tx_lock); - queue_work(htc->ar->workqueue, &ep->send_work); + if (ep->tx_credit_flow_enabled) { + credits = DIV_ROUND_UP(skb->len, htc->target_credit_size); + spin_lock_bh(&htc->tx_lock); + if (ep->tx_credits < credits) { + spin_unlock_bh(&htc->tx_lock); + ret = -EAGAIN; + goto err_pull; + } + ep->tx_credits -= credits; + spin_unlock_bh(&htc->tx_lock); + } + + ath10k_htc_prepare_tx_skb(ep, skb); + + ret = ath10k_skb_map(htc->ar->dev, skb); + if (ret) + goto err_credits; + + ret = ath10k_hif_send_head(htc->ar, ep->ul_pipe_id, ep->eid, + skb->len, skb); + if (ret) + goto err_unmap; + return 0; + +err_unmap: + ath10k_skb_unmap(htc->ar->dev, skb); +err_credits: + if (ep->tx_credit_flow_enabled) { + spin_lock_bh(&htc->tx_lock); + ep->tx_credits += credits; + spin_unlock_bh(&htc->tx_lock); + + if (ep->ep_ops.ep_tx_credits) + ep->ep_ops.ep_tx_credits(htc->ar); + } +err_pull: + skb_pull(skb, sizeof(struct ath10k_htc_hdr)); + return ret; } static int ath10k_htc_tx_completion_handler(struct ath10k *ar, @@ -278,39 +194,9 @@ static int ath10k_htc_tx_completion_handler(struct ath10k *ar, ath10k_htc_notify_tx_completion(ep, skb); /* the skb now belongs to the completion handler */ - /* note: when using TX credit flow, the re-checking of queues happens - * when credits flow back from the target. in the non-TX credit case, - * we recheck after the packet completes */ - spin_lock_bh(&htc->tx_lock); - if (!ep->tx_credit_flow_enabled && !htc->stopped) - queue_work(ar->workqueue, &ep->send_work); - spin_unlock_bh(&htc->tx_lock); - return 0; } -/* flush endpoint TX queue */ -static void ath10k_htc_flush_endpoint_tx(struct ath10k_htc *htc, - struct ath10k_htc_ep *ep) -{ - struct sk_buff *skb; - struct ath10k_skb_cb *skb_cb; - - spin_lock_bh(&htc->tx_lock); - for (;;) { - skb = __skb_dequeue(&ep->tx_queue); - if (!skb) - break; - - skb_cb = ATH10K_SKB_CB(skb); - skb_cb->is_aborted = true; - ath10k_htc_notify_tx_completion(ep, skb); - } - spin_unlock_bh(&htc->tx_lock); - - cancel_work_sync(&ep->send_work); -} - /***********/ /* Receive */ /***********/ @@ -340,8 +226,11 @@ ath10k_htc_process_credit_report(struct ath10k_htc *htc, ep = &htc->endpoint[report->eid]; ep->tx_credits += report->credits; - if (ep->tx_credits && !skb_queue_empty(&ep->tx_queue)) - queue_work(htc->ar->workqueue, &ep->send_work); + if (ep->ep_ops.ep_tx_credits) { + spin_unlock_bh(&htc->tx_lock); + ep->ep_ops.ep_tx_credits(htc->ar); + spin_lock_bh(&htc->tx_lock); + } } spin_unlock_bh(&htc->tx_lock); } @@ -599,10 +488,8 @@ static void ath10k_htc_reset_endpoint_states(struct ath10k_htc *htc) ep->max_ep_message_len = 0; ep->max_tx_queue_depth = 0; ep->eid = i; - skb_queue_head_init(&ep->tx_queue); ep->htc = htc; ep->tx_credit_flow_enabled = true; - INIT_WORK(&ep->send_work, ath10k_htc_send_work); } } @@ -752,8 +639,8 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc, tx_alloc = ath10k_htc_get_credit_allocation(htc, conn_req->service_id); if (!tx_alloc) - ath10k_dbg(ATH10K_DBG_HTC, - "HTC Service %s does not allocate target credits\n", + ath10k_dbg(ATH10K_DBG_BOOT, + "boot htc service %s does not allocate target credits\n", htc_service_name(conn_req->service_id)); skb = ath10k_htc_build_tx_ctrl_skb(htc->ar); @@ -772,16 +659,16 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc, flags |= SM(tx_alloc, ATH10K_HTC_CONN_FLAGS_RECV_ALLOC); - req_msg = &msg->connect_service; - req_msg->flags = __cpu_to_le16(flags); - req_msg->service_id = __cpu_to_le16(conn_req->service_id); - /* Only enable credit flow control for WMI ctrl service */ if (conn_req->service_id != ATH10K_HTC_SVC_ID_WMI_CONTROL) { flags |= ATH10K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL; disable_credit_flow_ctrl = true; } + req_msg = &msg->connect_service; + req_msg->flags = __cpu_to_le16(flags); + req_msg->service_id = __cpu_to_le16(conn_req->service_id); + INIT_COMPLETION(htc->ctl_resp); status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb); @@ -873,19 +760,19 @@ setup: if (status) return status; - ath10k_dbg(ATH10K_DBG_HTC, - "HTC service: %s UL pipe: %d DL pipe: %d eid: %d ready\n", + ath10k_dbg(ATH10K_DBG_BOOT, + "boot htc service '%s' ul pipe %d dl pipe %d eid %d ready\n", htc_service_name(ep->service_id), ep->ul_pipe_id, ep->dl_pipe_id, ep->eid); - ath10k_dbg(ATH10K_DBG_HTC, - "EP %d UL polled: %d, DL polled: %d\n", + ath10k_dbg(ATH10K_DBG_BOOT, + "boot htc ep %d ul polled %d dl polled %d\n", ep->eid, ep->ul_is_polled, ep->dl_is_polled); if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) { ep->tx_credit_flow_enabled = false; - ath10k_dbg(ATH10K_DBG_HTC, - "HTC service: %s eid: %d TX flow control disabled\n", + ath10k_dbg(ATH10K_DBG_BOOT, + "boot htc service '%s' eid %d TX flow control disabled\n", htc_service_name(ep->service_id), assigned_eid); } @@ -945,18 +832,10 @@ int ath10k_htc_start(struct ath10k_htc *htc) */ void ath10k_htc_stop(struct ath10k_htc *htc) { - int i; - struct ath10k_htc_ep *ep; - spin_lock_bh(&htc->tx_lock); htc->stopped = true; spin_unlock_bh(&htc->tx_lock); - for (i = ATH10K_HTC_EP_0; i < ATH10K_HTC_EP_COUNT; i++) { - ep = &htc->endpoint[i]; - ath10k_htc_flush_endpoint_tx(htc, ep); - } - ath10k_hif_stop(htc->ar); } diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h index e1dd8c761853..4716d331e6b6 100644 --- a/drivers/net/wireless/ath/ath10k/htc.h +++ b/drivers/net/wireless/ath/ath10k/htc.h @@ -276,6 +276,7 @@ struct ath10k_htc_ops { struct ath10k_htc_ep_ops { void (*ep_tx_complete)(struct ath10k *, struct sk_buff *); void (*ep_rx_complete)(struct ath10k *, struct sk_buff *); + void (*ep_tx_credits)(struct ath10k *); }; /* service connection information */ @@ -315,15 +316,11 @@ struct ath10k_htc_ep { int ul_is_polled; /* call HIF to get tx completions */ int dl_is_polled; /* call HIF to fetch rx (not implemented) */ - struct sk_buff_head tx_queue; - u8 seq_no; /* for debugging */ int tx_credits; int tx_credit_size; int tx_credits_per_max_message; bool tx_credit_flow_enabled; - - struct work_struct send_work; }; struct ath10k_htc_svc_tx_credits { diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c index 39342c5cfcb2..5f7eeebc5432 100644 --- a/drivers/net/wireless/ath/ath10k/htt.c +++ b/drivers/net/wireless/ath/ath10k/htt.c @@ -104,21 +104,16 @@ err_htc_attach: static int ath10k_htt_verify_version(struct ath10k_htt *htt) { - ath10k_dbg(ATH10K_DBG_HTT, - "htt target version %d.%d; host version %d.%d\n", - htt->target_version_major, - htt->target_version_minor, - HTT_CURRENT_VERSION_MAJOR, - HTT_CURRENT_VERSION_MINOR); - - if (htt->target_version_major != HTT_CURRENT_VERSION_MAJOR) { - ath10k_err("htt major versions are incompatible!\n"); + ath10k_info("htt target version %d.%d\n", + htt->target_version_major, htt->target_version_minor); + + if (htt->target_version_major != 2 && + htt->target_version_major != 3) { + ath10k_err("unsupported htt major version %d. supported versions are 2 and 3\n", + htt->target_version_major); return -ENOTSUPP; } - if (htt->target_version_minor != HTT_CURRENT_VERSION_MINOR) - ath10k_warn("htt minor version differ but still compatible\n"); - return 0; } diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h index 318be4629cde..1a337e93b7e9 100644 --- a/drivers/net/wireless/ath/ath10k/htt.h +++ b/drivers/net/wireless/ath/ath10k/htt.h @@ -19,13 +19,11 @@ #define _HTT_H_ #include <linux/bug.h> +#include <linux/interrupt.h> #include "htc.h" #include "rx_desc.h" -#define HTT_CURRENT_VERSION_MAJOR 2 -#define HTT_CURRENT_VERSION_MINOR 1 - enum htt_dbg_stats_type { HTT_DBG_STATS_WAL_PDEV_TXRX = 1 << 0, HTT_DBG_STATS_RX_REORDER = 1 << 1, @@ -45,6 +43,9 @@ enum htt_h2t_msg_type { /* host-to-target */ HTT_H2T_MSG_TYPE_SYNC = 4, HTT_H2T_MSG_TYPE_AGGR_CFG = 5, HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG = 6, + + /* This command is used for sending management frames in HTT < 3.0. + * HTT >= 3.0 uses TX_FRM for everything. */ HTT_H2T_MSG_TYPE_MGMT_TX = 7, HTT_H2T_NUM_MSGS /* keep this last */ @@ -1268,6 +1269,7 @@ struct ath10k_htt { /* set if host-fw communication goes haywire * used to avoid further failures */ bool rx_confused; + struct tasklet_struct rx_replenish_task; }; #define RX_HTT_HDR_STATUS_LEN 64 @@ -1308,6 +1310,10 @@ struct htt_rx_desc { #define HTT_RX_BUF_SIZE 1920 #define HTT_RX_MSDU_SIZE (HTT_RX_BUF_SIZE - (int)sizeof(struct htt_rx_desc)) +/* Refill a bunch of RX buffers for each refill round so that FW/HW can handle + * aggregated traffic more nicely. */ +#define ATH10K_HTT_MAX_NUM_REFILL 16 + /* * DMA_MAP expects the buffer to be an integral number of cache lines. * Rather than checking the actual cache line size, this code makes a @@ -1327,6 +1333,7 @@ void ath10k_htt_rx_detach(struct ath10k_htt *htt); void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb); void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb); int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt); +int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie); int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt); void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt); diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index e784c40b904b..90d4f74c28d7 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -20,6 +20,7 @@ #include "htt.h" #include "txrx.h" #include "debug.h" +#include "trace.h" #include <linux/log2.h> @@ -40,6 +41,10 @@ /* when under memory pressure rx ring refill may fail and needs a retry */ #define HTT_RX_RING_REFILL_RETRY_MS 50 + +static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb); + + static int ath10k_htt_rx_ring_size(struct ath10k_htt *htt) { int size; @@ -177,10 +182,27 @@ static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt) { - int ret, num_to_fill; + int ret, num_deficit, num_to_fill; + /* Refilling the whole RX ring buffer proves to be a bad idea. The + * reason is RX may take up significant amount of CPU cycles and starve + * other tasks, e.g. TX on an ethernet device while acting as a bridge + * with ath10k wlan interface. This ended up with very poor performance + * once CPU the host system was overwhelmed with RX on ath10k. + * + * By limiting the number of refills the replenishing occurs + * progressively. This in turns makes use of the fact tasklets are + * processed in FIFO order. This means actual RX processing can starve + * out refilling. If there's not enough buffers on RX ring FW will not + * report RX until it is refilled with enough buffers. This + * automatically balances load wrt to CPU power. + * + * This probably comes at a cost of lower maximum throughput but + * improves the avarage and stability. */ spin_lock_bh(&htt->rx_ring.lock); - num_to_fill = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt; + num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt; + num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit); + num_deficit -= num_to_fill; ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill); if (ret == -ENOMEM) { /* @@ -191,6 +213,8 @@ static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt) */ mod_timer(&htt->rx_ring.refill_retry_timer, jiffies + msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS)); + } else if (num_deficit > 0) { + tasklet_schedule(&htt->rx_replenish_task); } spin_unlock_bh(&htt->rx_ring.lock); } @@ -212,6 +236,7 @@ void ath10k_htt_rx_detach(struct ath10k_htt *htt) int sw_rd_idx = htt->rx_ring.sw_rd_idx.msdu_payld; del_timer_sync(&htt->rx_ring.refill_retry_timer); + tasklet_kill(&htt->rx_replenish_task); while (sw_rd_idx != __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr))) { struct sk_buff *skb = @@ -441,6 +466,12 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, return msdu_chaining; } +static void ath10k_htt_rx_replenish_task(unsigned long ptr) +{ + struct ath10k_htt *htt = (struct ath10k_htt *)ptr; + ath10k_htt_rx_msdu_buff_replenish(htt); +} + int ath10k_htt_rx_attach(struct ath10k_htt *htt) { dma_addr_t paddr; @@ -501,7 +532,10 @@ int ath10k_htt_rx_attach(struct ath10k_htt *htt) if (__ath10k_htt_rx_ring_fill_n(htt, htt->rx_ring.fill_level)) goto err_fill_ring; - ath10k_dbg(ATH10K_DBG_HTT, "HTT RX ring size: %d, fill_level: %d\n", + tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task, + (unsigned long)htt); + + ath10k_dbg(ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n", htt->rx_ring.size, htt->rx_ring.fill_level); return 0; @@ -590,134 +624,144 @@ static bool ath10k_htt_rx_hdr_is_amsdu(struct ieee80211_hdr *hdr) return false; } -static int ath10k_htt_rx_amsdu(struct ath10k_htt *htt, - struct htt_rx_info *info) +struct rfc1042_hdr { + u8 llc_dsap; + u8 llc_ssap; + u8 llc_ctrl; + u8 snap_oui[3]; + __be16 snap_type; +} __packed; + +struct amsdu_subframe_hdr { + u8 dst[ETH_ALEN]; + u8 src[ETH_ALEN]; + __be16 len; +} __packed; + +static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt, + struct htt_rx_info *info) { struct htt_rx_desc *rxd; - struct sk_buff *amsdu; struct sk_buff *first; - struct ieee80211_hdr *hdr; struct sk_buff *skb = info->skb; enum rx_msdu_decap_format fmt; enum htt_rx_mpdu_encrypt_type enctype; + struct ieee80211_hdr *hdr; + u8 hdr_buf[64], addr[ETH_ALEN], *qos; unsigned int hdr_len; - int crypto_len; rxd = (void *)skb->data - sizeof(*rxd); - fmt = MS(__le32_to_cpu(rxd->msdu_start.info1), - RX_MSDU_START_INFO1_DECAP_FORMAT); enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0), RX_MPDU_START_INFO0_ENCRYPT_TYPE); - /* FIXME: No idea what assumptions are safe here. Need logs */ - if ((fmt == RX_MSDU_DECAP_RAW && skb->next) || - (fmt == RX_MSDU_DECAP_8023_SNAP_LLC)) { - ath10k_htt_rx_free_msdu_chain(skb->next); - skb->next = NULL; - return -ENOTSUPP; - } + hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status; + hdr_len = ieee80211_hdrlen(hdr->frame_control); + memcpy(hdr_buf, hdr, hdr_len); + hdr = (struct ieee80211_hdr *)hdr_buf; - /* A-MSDU max is a little less than 8K */ - amsdu = dev_alloc_skb(8*1024); - if (!amsdu) { - ath10k_warn("A-MSDU allocation failed\n"); - ath10k_htt_rx_free_msdu_chain(skb->next); - skb->next = NULL; - return -ENOMEM; - } - - if (fmt >= RX_MSDU_DECAP_NATIVE_WIFI) { - int hdrlen; - - hdr = (void *)rxd->rx_hdr_status; - hdrlen = ieee80211_hdrlen(hdr->frame_control); - memcpy(skb_put(amsdu, hdrlen), hdr, hdrlen); - } + /* FIXME: Hopefully this is a temporary measure. + * + * Reporting individual A-MSDU subframes means each reported frame + * shares the same sequence number. + * + * mac80211 drops frames it recognizes as duplicates, i.e. + * retransmission flag is set and sequence number matches sequence + * number from a previous frame (as per IEEE 802.11-2012: 9.3.2.10 + * "Duplicate detection and recovery") + * + * To avoid frames being dropped clear retransmission flag for all + * received A-MSDUs. + * + * Worst case: actual duplicate frames will be reported but this should + * still be handled gracefully by other OSI/ISO layers. */ + hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_RETRY); first = skb; while (skb) { void *decap_hdr; - int decap_len = 0; + int len; rxd = (void *)skb->data - sizeof(*rxd); fmt = MS(__le32_to_cpu(rxd->msdu_start.info1), - RX_MSDU_START_INFO1_DECAP_FORMAT); + RX_MSDU_START_INFO1_DECAP_FORMAT); decap_hdr = (void *)rxd->rx_hdr_status; - if (skb == first) { - /* We receive linked A-MSDU subframe skbuffs. The - * first one contains the original 802.11 header (and - * possible crypto param) in the RX descriptor. The - * A-MSDU subframe header follows that. Each part is - * aligned to 4 byte boundary. */ - - hdr = (void *)amsdu->data; - hdr_len = ieee80211_hdrlen(hdr->frame_control); - crypto_len = ath10k_htt_rx_crypto_param_len(enctype); - - decap_hdr += roundup(hdr_len, 4); - decap_hdr += roundup(crypto_len, 4); - } + skb->ip_summed = ath10k_htt_rx_get_csum_state(skb); - if (fmt == RX_MSDU_DECAP_ETHERNET2_DIX) { - /* Ethernet2 decap inserts ethernet header in place of - * A-MSDU subframe header. */ - skb_pull(skb, 6 + 6 + 2); - - /* A-MSDU subframe header length */ - decap_len += 6 + 6 + 2; - - /* Ethernet2 decap also strips the LLC/SNAP so we need - * to re-insert it. The LLC/SNAP follows A-MSDU - * subframe header. */ - /* FIXME: Not all LLCs are 8 bytes long */ - decap_len += 8; - - memcpy(skb_put(amsdu, decap_len), decap_hdr, decap_len); + /* First frame in an A-MSDU chain has more decapped data. */ + if (skb == first) { + len = round_up(ieee80211_hdrlen(hdr->frame_control), 4); + len += round_up(ath10k_htt_rx_crypto_param_len(enctype), + 4); + decap_hdr += len; } - if (fmt == RX_MSDU_DECAP_NATIVE_WIFI) { - /* Native Wifi decap inserts regular 802.11 header - * in place of A-MSDU subframe header. */ + switch (fmt) { + case RX_MSDU_DECAP_RAW: + /* remove trailing FCS */ + skb_trim(skb, skb->len - FCS_LEN); + break; + case RX_MSDU_DECAP_NATIVE_WIFI: + /* pull decapped header and copy DA */ hdr = (struct ieee80211_hdr *)skb->data; - skb_pull(skb, ieee80211_hdrlen(hdr->frame_control)); + hdr_len = ieee80211_hdrlen(hdr->frame_control); + memcpy(addr, ieee80211_get_DA(hdr), ETH_ALEN); + skb_pull(skb, hdr_len); - /* A-MSDU subframe header length */ - decap_len += 6 + 6 + 2; + /* push original 802.11 header */ + hdr = (struct ieee80211_hdr *)hdr_buf; + hdr_len = ieee80211_hdrlen(hdr->frame_control); + memcpy(skb_push(skb, hdr_len), hdr, hdr_len); - memcpy(skb_put(amsdu, decap_len), decap_hdr, decap_len); - } + /* original A-MSDU header has the bit set but we're + * not including A-MSDU subframe header */ + hdr = (struct ieee80211_hdr *)skb->data; + qos = ieee80211_get_qos_ctl(hdr); + qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; - if (fmt == RX_MSDU_DECAP_RAW) - skb_trim(skb, skb->len - 4); /* remove FCS */ + /* original 802.11 header has a different DA */ + memcpy(ieee80211_get_DA(hdr), addr, ETH_ALEN); + break; + case RX_MSDU_DECAP_ETHERNET2_DIX: + /* strip ethernet header and insert decapped 802.11 + * header, amsdu subframe header and rfc1042 header */ - memcpy(skb_put(amsdu, skb->len), skb->data, skb->len); + len = 0; + len += sizeof(struct rfc1042_hdr); + len += sizeof(struct amsdu_subframe_hdr); - /* A-MSDU subframes are padded to 4bytes - * but relative to first subframe, not the whole MPDU */ - if (skb->next && ((decap_len + skb->len) & 3)) { - int padlen = 4 - ((decap_len + skb->len) & 3); - memset(skb_put(amsdu, padlen), 0, padlen); + skb_pull(skb, sizeof(struct ethhdr)); + memcpy(skb_push(skb, len), decap_hdr, len); + memcpy(skb_push(skb, hdr_len), hdr, hdr_len); + break; + case RX_MSDU_DECAP_8023_SNAP_LLC: + /* insert decapped 802.11 header making a singly + * A-MSDU */ + memcpy(skb_push(skb, hdr_len), hdr, hdr_len); + break; } + info->skb = skb; + info->encrypt_type = enctype; skb = skb->next; - } + info->skb->next = NULL; - info->skb = amsdu; - info->encrypt_type = enctype; - - ath10k_htt_rx_free_msdu_chain(first); + ath10k_process_rx(htt->ar, info); + } - return 0; + /* FIXME: It might be nice to re-assemble the A-MSDU when there's a + * monitor interface active for sniffing purposes. */ } -static int ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info) +static void ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info) { struct sk_buff *skb = info->skb; struct htt_rx_desc *rxd; struct ieee80211_hdr *hdr; enum rx_msdu_decap_format fmt; enum htt_rx_mpdu_encrypt_type enctype; + int hdr_len; + void *rfc1042; /* This shouldn't happen. If it does than it may be a FW bug. */ if (skb->next) { @@ -731,49 +775,53 @@ static int ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info) RX_MSDU_START_INFO1_DECAP_FORMAT); enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0), RX_MPDU_START_INFO0_ENCRYPT_TYPE); - hdr = (void *)skb->data - RX_HTT_HDR_STATUS_LEN; + hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status; + hdr_len = ieee80211_hdrlen(hdr->frame_control); + + skb->ip_summed = ath10k_htt_rx_get_csum_state(skb); switch (fmt) { case RX_MSDU_DECAP_RAW: /* remove trailing FCS */ - skb_trim(skb, skb->len - 4); + skb_trim(skb, skb->len - FCS_LEN); break; case RX_MSDU_DECAP_NATIVE_WIFI: - /* nothing to do here */ + /* Pull decapped header */ + hdr = (struct ieee80211_hdr *)skb->data; + hdr_len = ieee80211_hdrlen(hdr->frame_control); + skb_pull(skb, hdr_len); + + /* Push original header */ + hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status; + hdr_len = ieee80211_hdrlen(hdr->frame_control); + memcpy(skb_push(skb, hdr_len), hdr, hdr_len); break; case RX_MSDU_DECAP_ETHERNET2_DIX: - /* macaddr[6] + macaddr[6] + ethertype[2] */ - skb_pull(skb, 6 + 6 + 2); - break; - case RX_MSDU_DECAP_8023_SNAP_LLC: - /* macaddr[6] + macaddr[6] + len[2] */ - /* we don't need this for non-A-MSDU */ - skb_pull(skb, 6 + 6 + 2); - break; - } + /* strip ethernet header and insert decapped 802.11 header and + * rfc1042 header */ - if (fmt == RX_MSDU_DECAP_ETHERNET2_DIX) { - void *llc; - int llclen; + rfc1042 = hdr; + rfc1042 += roundup(hdr_len, 4); + rfc1042 += roundup(ath10k_htt_rx_crypto_param_len(enctype), 4); - llclen = 8; - llc = hdr; - llc += roundup(ieee80211_hdrlen(hdr->frame_control), 4); - llc += roundup(ath10k_htt_rx_crypto_param_len(enctype), 4); - - skb_push(skb, llclen); - memcpy(skb->data, llc, llclen); - } + skb_pull(skb, sizeof(struct ethhdr)); + memcpy(skb_push(skb, sizeof(struct rfc1042_hdr)), + rfc1042, sizeof(struct rfc1042_hdr)); + memcpy(skb_push(skb, hdr_len), hdr, hdr_len); + break; + case RX_MSDU_DECAP_8023_SNAP_LLC: + /* remove A-MSDU subframe header and insert + * decapped 802.11 header. rfc1042 header is already there */ - if (fmt >= RX_MSDU_DECAP_ETHERNET2_DIX) { - int len = ieee80211_hdrlen(hdr->frame_control); - skb_push(skb, len); - memcpy(skb->data, hdr, len); + skb_pull(skb, sizeof(struct amsdu_subframe_hdr)); + memcpy(skb_push(skb, hdr_len), hdr, hdr_len); + break; } info->skb = skb; info->encrypt_type = enctype; - return 0; + + ath10k_process_rx(htt->ar, info); } static bool ath10k_htt_rx_has_decrypt_err(struct sk_buff *skb) @@ -845,8 +893,6 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt, int fw_desc_len; u8 *fw_desc; int i, j; - int ret; - int ip_summed; memset(&info, 0, sizeof(info)); @@ -921,11 +967,6 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt, continue; } - /* The skb is not yet processed and it may be - * reallocated. Since the offload is in the original - * skb extract the checksum now and assign it later */ - ip_summed = ath10k_htt_rx_get_csum_state(msdu_head); - info.skb = msdu_head; info.fcs_err = ath10k_htt_rx_has_fcs_err(msdu_head); info.signal = ATH10K_DEFAULT_NOISE_FLOOR; @@ -938,28 +979,13 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt, hdr = ath10k_htt_rx_skb_get_hdr(msdu_head); if (ath10k_htt_rx_hdr_is_amsdu(hdr)) - ret = ath10k_htt_rx_amsdu(htt, &info); + ath10k_htt_rx_amsdu(htt, &info); else - ret = ath10k_htt_rx_msdu(htt, &info); - - if (ret && !info.fcs_err) { - ath10k_warn("error processing msdus %d\n", ret); - dev_kfree_skb_any(info.skb); - continue; - } - - if (ath10k_htt_rx_hdr_is_amsdu((void *)info.skb->data)) - ath10k_dbg(ATH10K_DBG_HTT, "htt mpdu is amsdu\n"); - - info.skb->ip_summed = ip_summed; - - ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt mpdu: ", - info.skb->data, info.skb->len); - ath10k_process_rx(htt->ar, &info); + ath10k_htt_rx_msdu(htt, &info); } } - ath10k_htt_rx_msdu_buff_replenish(htt); + tasklet_schedule(&htt->rx_replenish_task); } static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt, @@ -1131,7 +1157,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) break; } - ath10k_txrx_tx_completed(htt, &tx_done); + ath10k_txrx_tx_unref(htt, &tx_done); break; } case HTT_T2H_MSG_TYPE_TX_COMPL_IND: { @@ -1165,7 +1191,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) for (i = 0; i < resp->data_tx_completion.num_msdus; i++) { msdu_id = resp->data_tx_completion.msdus[i]; tx_done.msdu_id = __le16_to_cpu(msdu_id); - ath10k_txrx_tx_completed(htt, &tx_done); + ath10k_txrx_tx_unref(htt, &tx_done); } break; } @@ -1190,8 +1216,10 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) case HTT_T2H_MSG_TYPE_TEST: /* FIX THIS */ break; - case HTT_T2H_MSG_TYPE_TX_INSPECT_IND: case HTT_T2H_MSG_TYPE_STATS_CONF: + trace_ath10k_htt_stats(skb->data, skb->len); + break; + case HTT_T2H_MSG_TYPE_TX_INSPECT_IND: case HTT_T2H_MSG_TYPE_RX_ADDBA: case HTT_T2H_MSG_TYPE_RX_DELBA: case HTT_T2H_MSG_TYPE_RX_FLUSH: diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c index 656c2546b294..d9335e9d0d04 100644 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c @@ -96,7 +96,7 @@ int ath10k_htt_tx_attach(struct ath10k_htt *htt) htt->max_num_pending_tx = ath10k_hif_get_free_queue_number(htt->ar, pipe); - ath10k_dbg(ATH10K_DBG_HTT, "htt tx max num pending tx %d\n", + ath10k_dbg(ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n", htt->max_num_pending_tx); htt->pending_tx = kzalloc(sizeof(*htt->pending_tx) * @@ -117,7 +117,7 @@ int ath10k_htt_tx_attach(struct ath10k_htt *htt) static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt) { - struct sk_buff *txdesc; + struct htt_tx_done tx_done = {0}; int msdu_id; /* No locks needed. Called after communication with the device has @@ -127,18 +127,13 @@ static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt) if (!test_bit(msdu_id, htt->used_msdu_ids)) continue; - txdesc = htt->pending_tx[msdu_id]; - if (!txdesc) - continue; - ath10k_dbg(ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id); - if (ATH10K_SKB_CB(txdesc)->htt.refcount > 0) - ATH10K_SKB_CB(txdesc)->htt.refcount = 1; + tx_done.discard = 1; + tx_done.msdu_id = msdu_id; - ATH10K_SKB_CB(txdesc)->htt.discard = true; - ath10k_txrx_tx_unref(htt, txdesc); + ath10k_txrx_tx_unref(htt, &tx_done); } } @@ -152,26 +147,7 @@ void ath10k_htt_tx_detach(struct ath10k_htt *htt) void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb) { - struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb); - struct ath10k_htt *htt = &ar->htt; - - if (skb_cb->htt.is_conf) { - dev_kfree_skb_any(skb); - return; - } - - if (skb_cb->is_aborted) { - skb_cb->htt.discard = true; - - /* if the skbuff is aborted we need to make sure we'll free up - * the tx resources, we can't simply run tx_unref() 2 times - * because if htt tx completion came in earlier we'd access - * unallocated memory */ - if (skb_cb->htt.refcount > 1) - skb_cb->htt.refcount = 1; - } - - ath10k_txrx_tx_unref(htt, skb); + dev_kfree_skb_any(skb); } int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt) @@ -192,10 +168,48 @@ int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt) cmd = (struct htt_cmd *)skb->data; cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ; - ATH10K_SKB_CB(skb)->htt.is_conf = true; + ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); + if (ret) { + dev_kfree_skb_any(skb); + return ret; + } + + return 0; +} + +int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie) +{ + struct htt_stats_req *req; + struct sk_buff *skb; + struct htt_cmd *cmd; + int len = 0, ret; + + len += sizeof(cmd->hdr); + len += sizeof(cmd->stats_req); + + skb = ath10k_htc_alloc_skb(len); + if (!skb) + return -ENOMEM; + + skb_put(skb, len); + cmd = (struct htt_cmd *)skb->data; + cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ; + + req = &cmd->stats_req; + + memset(req, 0, sizeof(*req)); + + /* currently we support only max 8 bit masks so no need to worry + * about endian support */ + req->upload_types[0] = mask; + req->reset_types[0] = mask; + req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID; + req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff); + req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32); ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); if (ret) { + ath10k_warn("failed to send htt type stats request: %d", ret); dev_kfree_skb_any(skb); return ret; } @@ -279,8 +293,6 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt) #undef desc_offset - ATH10K_SKB_CB(skb)->htt.is_conf = true; - ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); if (ret) { dev_kfree_skb_any(skb); @@ -293,10 +305,10 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt) int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) { struct device *dev = htt->ar->dev; - struct ath10k_skb_cb *skb_cb; struct sk_buff *txdesc = NULL; struct htt_cmd *cmd; - u8 vdev_id = ATH10K_SKB_CB(msdu)->htt.vdev_id; + struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); + u8 vdev_id = skb_cb->vdev_id; int len = 0; int msdu_id = -1; int res; @@ -304,30 +316,30 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) res = ath10k_htt_tx_inc_pending(htt); if (res) - return res; + goto err; len += sizeof(cmd->hdr); len += sizeof(cmd->mgmt_tx); - txdesc = ath10k_htc_alloc_skb(len); - if (!txdesc) { - res = -ENOMEM; - goto err; - } - spin_lock_bh(&htt->tx_lock); - msdu_id = ath10k_htt_tx_alloc_msdu_id(htt); - if (msdu_id < 0) { + res = ath10k_htt_tx_alloc_msdu_id(htt); + if (res < 0) { spin_unlock_bh(&htt->tx_lock); - res = msdu_id; - goto err; + goto err_tx_dec; } - htt->pending_tx[msdu_id] = txdesc; + msdu_id = res; + htt->pending_tx[msdu_id] = msdu; spin_unlock_bh(&htt->tx_lock); + txdesc = ath10k_htc_alloc_skb(len); + if (!txdesc) { + res = -ENOMEM; + goto err_free_msdu_id; + } + res = ath10k_skb_map(dev, msdu); if (res) - goto err; + goto err_free_txdesc; skb_put(txdesc, len); cmd = (struct htt_cmd *)txdesc->data; @@ -339,31 +351,27 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) memcpy(cmd->mgmt_tx.hdr, msdu->data, min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN)); - /* refcount is decremented by HTC and HTT completions until it reaches - * zero and is freed */ - skb_cb = ATH10K_SKB_CB(txdesc); - skb_cb->htt.msdu_id = msdu_id; - skb_cb->htt.refcount = 2; - skb_cb->htt.msdu = msdu; + skb_cb->htt.frag_len = 0; + skb_cb->htt.pad_len = 0; res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc); if (res) - goto err; + goto err_unmap_msdu; return 0; -err: +err_unmap_msdu: ath10k_skb_unmap(dev, msdu); - - if (txdesc) - dev_kfree_skb_any(txdesc); - if (msdu_id >= 0) { - spin_lock_bh(&htt->tx_lock); - htt->pending_tx[msdu_id] = NULL; - ath10k_htt_tx_free_msdu_id(htt, msdu_id); - spin_unlock_bh(&htt->tx_lock); - } +err_free_txdesc: + dev_kfree_skb_any(txdesc); +err_free_msdu_id: + spin_lock_bh(&htt->tx_lock); + htt->pending_tx[msdu_id] = NULL; + ath10k_htt_tx_free_msdu_id(htt, msdu_id); + spin_unlock_bh(&htt->tx_lock); +err_tx_dec: ath10k_htt_tx_dec_pending(htt); +err: return res; } @@ -373,13 +381,12 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) struct htt_cmd *cmd; struct htt_data_tx_desc_frag *tx_frags; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; - struct ath10k_skb_cb *skb_cb; + struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); struct sk_buff *txdesc = NULL; - struct sk_buff *txfrag = NULL; - u8 vdev_id = ATH10K_SKB_CB(msdu)->htt.vdev_id; + bool use_frags; + u8 vdev_id = ATH10K_SKB_CB(msdu)->vdev_id; u8 tid; - int prefetch_len, desc_len, frag_len; - dma_addr_t frags_paddr; + int prefetch_len, desc_len; int msdu_id = -1; int res; u8 flags0; @@ -387,69 +394,82 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) res = ath10k_htt_tx_inc_pending(htt); if (res) - return res; + goto err; + + spin_lock_bh(&htt->tx_lock); + res = ath10k_htt_tx_alloc_msdu_id(htt); + if (res < 0) { + spin_unlock_bh(&htt->tx_lock); + goto err_tx_dec; + } + msdu_id = res; + htt->pending_tx[msdu_id] = msdu; + spin_unlock_bh(&htt->tx_lock); prefetch_len = min(htt->prefetch_len, msdu->len); prefetch_len = roundup(prefetch_len, 4); desc_len = sizeof(cmd->hdr) + sizeof(cmd->data_tx) + prefetch_len; - frag_len = sizeof(*tx_frags) * 2; txdesc = ath10k_htc_alloc_skb(desc_len); if (!txdesc) { res = -ENOMEM; - goto err; + goto err_free_msdu_id; } - txfrag = dev_alloc_skb(frag_len); - if (!txfrag) { - res = -ENOMEM; - goto err; - } + /* Since HTT 3.0 there is no separate mgmt tx command. However in case + * of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx + * fragment list host driver specifies directly frame pointer. */ + use_frags = htt->target_version_major < 3 || + !ieee80211_is_mgmt(hdr->frame_control); if (!IS_ALIGNED((unsigned long)txdesc->data, 4)) { ath10k_warn("htt alignment check failed. dropping packet.\n"); res = -EIO; - goto err; + goto err_free_txdesc; } - spin_lock_bh(&htt->tx_lock); - msdu_id = ath10k_htt_tx_alloc_msdu_id(htt); - if (msdu_id < 0) { - spin_unlock_bh(&htt->tx_lock); - res = msdu_id; - goto err; + if (use_frags) { + skb_cb->htt.frag_len = sizeof(*tx_frags) * 2; + skb_cb->htt.pad_len = (unsigned long)msdu->data - + round_down((unsigned long)msdu->data, 4); + + skb_push(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len); + } else { + skb_cb->htt.frag_len = 0; + skb_cb->htt.pad_len = 0; } - htt->pending_tx[msdu_id] = txdesc; - spin_unlock_bh(&htt->tx_lock); res = ath10k_skb_map(dev, msdu); if (res) - goto err; - - /* tx fragment list must be terminated with zero-entry */ - skb_put(txfrag, frag_len); - tx_frags = (struct htt_data_tx_desc_frag *)txfrag->data; - tx_frags[0].paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr); - tx_frags[0].len = __cpu_to_le32(msdu->len); - tx_frags[1].paddr = __cpu_to_le32(0); - tx_frags[1].len = __cpu_to_le32(0); - - res = ath10k_skb_map(dev, txfrag); - if (res) - goto err; + goto err_pull_txfrag; + + if (use_frags) { + dma_sync_single_for_cpu(dev, skb_cb->paddr, msdu->len, + DMA_TO_DEVICE); + + /* tx fragment list must be terminated with zero-entry */ + tx_frags = (struct htt_data_tx_desc_frag *)msdu->data; + tx_frags[0].paddr = __cpu_to_le32(skb_cb->paddr + + skb_cb->htt.frag_len + + skb_cb->htt.pad_len); + tx_frags[0].len = __cpu_to_le32(msdu->len - + skb_cb->htt.frag_len - + skb_cb->htt.pad_len); + tx_frags[1].paddr = __cpu_to_le32(0); + tx_frags[1].len = __cpu_to_le32(0); + + dma_sync_single_for_device(dev, skb_cb->paddr, msdu->len, + DMA_TO_DEVICE); + } - ath10k_dbg(ATH10K_DBG_HTT, "txfrag 0x%llx msdu 0x%llx\n", - (unsigned long long) ATH10K_SKB_CB(txfrag)->paddr, + ath10k_dbg(ATH10K_DBG_HTT, "msdu 0x%llx\n", (unsigned long long) ATH10K_SKB_CB(msdu)->paddr); - ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "txfrag: ", - txfrag->data, frag_len); ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "msdu: ", msdu->data, msdu->len); skb_put(txdesc, desc_len); cmd = (struct htt_cmd *)txdesc->data; - memset(cmd, 0, desc_len); tid = ATH10K_SKB_CB(msdu)->htt.tid; @@ -459,8 +479,13 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) if (!ieee80211_has_protected(hdr->frame_control)) flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; - flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI, - HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); + + if (use_frags) + flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI, + HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); + else + flags0 |= SM(ATH10K_HW_TXRX_MGMT, + HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); flags1 = 0; flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); @@ -468,45 +493,37 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD; flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD; - frags_paddr = ATH10K_SKB_CB(txfrag)->paddr; - cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM; cmd->data_tx.flags0 = flags0; cmd->data_tx.flags1 = __cpu_to_le16(flags1); - cmd->data_tx.len = __cpu_to_le16(msdu->len); + cmd->data_tx.len = __cpu_to_le16(msdu->len - + skb_cb->htt.frag_len - + skb_cb->htt.pad_len); cmd->data_tx.id = __cpu_to_le16(msdu_id); - cmd->data_tx.frags_paddr = __cpu_to_le32(frags_paddr); + cmd->data_tx.frags_paddr = __cpu_to_le32(skb_cb->paddr); cmd->data_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID); - memcpy(cmd->data_tx.prefetch, msdu->data, prefetch_len); - - /* refcount is decremented by HTC and HTT completions until it reaches - * zero and is freed */ - skb_cb = ATH10K_SKB_CB(txdesc); - skb_cb->htt.msdu_id = msdu_id; - skb_cb->htt.refcount = 2; - skb_cb->htt.txfrag = txfrag; - skb_cb->htt.msdu = msdu; + memcpy(cmd->data_tx.prefetch, hdr, prefetch_len); res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc); if (res) - goto err; + goto err_unmap_msdu; return 0; -err: - if (txfrag) - ath10k_skb_unmap(dev, txfrag); - if (txdesc) - dev_kfree_skb_any(txdesc); - if (txfrag) - dev_kfree_skb_any(txfrag); - if (msdu_id >= 0) { - spin_lock_bh(&htt->tx_lock); - htt->pending_tx[msdu_id] = NULL; - ath10k_htt_tx_free_msdu_id(htt, msdu_id); - spin_unlock_bh(&htt->tx_lock); - } - ath10k_htt_tx_dec_pending(htt); + +err_unmap_msdu: ath10k_skb_unmap(dev, msdu); +err_pull_txfrag: + skb_pull(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len); +err_free_txdesc: + dev_kfree_skb_any(txdesc); +err_free_msdu_id: + spin_lock_bh(&htt->tx_lock); + htt->pending_tx[msdu_id] = NULL; + ath10k_htt_tx_free_msdu_id(htt, msdu_id); + spin_unlock_bh(&htt->tx_lock); +err_tx_dec: + ath10k_htt_tx_dec_pending(htt); +err: return res; } diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index 44ed5af0a204..8aeb46d9b534 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -20,28 +20,37 @@ #include "targaddrs.h" -/* Supported FW version */ -#define SUPPORTED_FW_MAJOR 1 -#define SUPPORTED_FW_MINOR 0 -#define SUPPORTED_FW_RELEASE 0 -#define SUPPORTED_FW_BUILD 629 - -/* QCA988X 1.0 definitions */ -#define QCA988X_HW_1_0_VERSION 0x4000002c -#define QCA988X_HW_1_0_FW_DIR "ath10k/QCA988X/hw1.0" -#define QCA988X_HW_1_0_FW_FILE "firmware.bin" -#define QCA988X_HW_1_0_OTP_FILE "otp.bin" -#define QCA988X_HW_1_0_BOARD_DATA_FILE "board.bin" -#define QCA988X_HW_1_0_PATCH_LOAD_ADDR 0x1234 +/* QCA988X 1.0 definitions (unsupported) */ +#define QCA988X_HW_1_0_CHIP_ID_REV 0x0 /* QCA988X 2.0 definitions */ #define QCA988X_HW_2_0_VERSION 0x4100016c +#define QCA988X_HW_2_0_CHIP_ID_REV 0x2 #define QCA988X_HW_2_0_FW_DIR "ath10k/QCA988X/hw2.0" #define QCA988X_HW_2_0_FW_FILE "firmware.bin" #define QCA988X_HW_2_0_OTP_FILE "otp.bin" #define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin" #define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234 +#define ATH10K_FW_API2_FILE "firmware-2.bin" + +/* includes also the null byte */ +#define ATH10K_FIRMWARE_MAGIC "QCA-ATH10K" + +struct ath10k_fw_ie { + __le32 id; + __le32 len; + u8 data[0]; +}; + +enum ath10k_fw_ie_type { + ATH10K_FW_IE_FW_VERSION = 0, + ATH10K_FW_IE_TIMESTAMP = 1, + ATH10K_FW_IE_FEATURES = 2, + ATH10K_FW_IE_FW_IMAGE = 3, + ATH10K_FW_IE_OTP_IMAGE = 4, +}; + /* Known pecularities: * - current FW doesn't support raw rx mode (last tested v599) * - current FW dumps upon raw tx mode (last tested v599) @@ -53,6 +62,9 @@ enum ath10k_hw_txrx_mode { ATH10K_HW_TXRX_RAW = 0, ATH10K_HW_TXRX_NATIVE_WIFI = 1, ATH10K_HW_TXRX_ETHERNET = 2, + + /* Valid for HTT >= 3.0. Used for management frames in TX_FRM. */ + ATH10K_HW_TXRX_MGMT = 3, }; enum ath10k_mcast2ucast_mode { @@ -60,6 +72,7 @@ enum ath10k_mcast2ucast_mode { ATH10K_MCAST2UCAST_ENABLED = 1, }; +/* Target specific defines for MAIN firmware */ #define TARGET_NUM_VDEVS 8 #define TARGET_NUM_PEER_AST 2 #define TARGET_NUM_WDS_ENTRIES 32 @@ -75,7 +88,11 @@ enum ath10k_mcast2ucast_mode { #define TARGET_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2)) #define TARGET_RX_TIMEOUT_LO_PRI 100 #define TARGET_RX_TIMEOUT_HI_PRI 40 -#define TARGET_RX_DECAP_MODE ATH10K_HW_TXRX_ETHERNET + +/* Native Wifi decap mode is used to align IP frames to 4-byte boundaries and + * avoid a very expensive re-alignment in mac80211. */ +#define TARGET_RX_DECAP_MODE ATH10K_HW_TXRX_NATIVE_WIFI + #define TARGET_SCAN_MAX_PENDING_REQS 4 #define TARGET_BMISS_OFFLOAD_MAX_VDEV 3 #define TARGET_ROAM_OFFLOAD_MAX_VDEV 3 @@ -90,6 +107,36 @@ enum ath10k_mcast2ucast_mode { #define TARGET_NUM_MSDU_DESC (1024 + 400) #define TARGET_MAX_FRAG_ENTRIES 0 +/* Target specific defines for 10.X firmware */ +#define TARGET_10X_NUM_VDEVS 16 +#define TARGET_10X_NUM_PEER_AST 2 +#define TARGET_10X_NUM_WDS_ENTRIES 32 +#define TARGET_10X_DMA_BURST_SIZE 0 +#define TARGET_10X_MAC_AGGR_DELIM 0 +#define TARGET_10X_AST_SKID_LIMIT 16 +#define TARGET_10X_NUM_PEERS (128 + (TARGET_10X_NUM_VDEVS)) +#define TARGET_10X_NUM_OFFLOAD_PEERS 0 +#define TARGET_10X_NUM_OFFLOAD_REORDER_BUFS 0 +#define TARGET_10X_NUM_PEER_KEYS 2 +#define TARGET_10X_NUM_TIDS 256 +#define TARGET_10X_TX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2)) +#define TARGET_10X_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2)) +#define TARGET_10X_RX_TIMEOUT_LO_PRI 100 +#define TARGET_10X_RX_TIMEOUT_HI_PRI 40 +#define TARGET_10X_RX_DECAP_MODE ATH10K_HW_TXRX_NATIVE_WIFI +#define TARGET_10X_SCAN_MAX_PENDING_REQS 4 +#define TARGET_10X_BMISS_OFFLOAD_MAX_VDEV 2 +#define TARGET_10X_ROAM_OFFLOAD_MAX_VDEV 2 +#define TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES 8 +#define TARGET_10X_GTK_OFFLOAD_MAX_VDEV 3 +#define TARGET_10X_NUM_MCAST_GROUPS 0 +#define TARGET_10X_NUM_MCAST_TABLE_ELEMS 0 +#define TARGET_10X_MCAST2UCAST_MODE ATH10K_MCAST2UCAST_DISABLED +#define TARGET_10X_TX_DBG_LOG_SIZE 1024 +#define TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1 +#define TARGET_10X_VOW_CONFIG 0 +#define TARGET_10X_NUM_MSDU_DESC (1024 + 400) +#define TARGET_10X_MAX_FRAG_ENTRIES 0 /* Number of Copy Engines supported */ #define CE_COUNT 8 @@ -169,6 +216,10 @@ enum ath10k_mcast2ucast_mode { #define SOC_LPO_CAL_ENABLE_LSB 20 #define SOC_LPO_CAL_ENABLE_MASK 0x00100000 +#define SOC_CHIP_ID_ADDRESS 0x000000ec +#define SOC_CHIP_ID_REV_LSB 8 +#define SOC_CHIP_ID_REV_MASK 0x00000f00 + #define WLAN_RESET_CONTROL_COLD_RST_MASK 0x00000008 #define WLAN_RESET_CONTROL_WARM_RST_MASK 0x00000004 #define WLAN_SYSTEM_SLEEP_DISABLE_LSB 0 diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index cf2ba4d850c9..0b1cc516e778 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -334,25 +334,29 @@ static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr) static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value) { + struct ath10k *ar = arvif->ar; + u32 vdev_param; + if (value != 0xFFFFFFFF) value = min_t(u32, arvif->ar->hw->wiphy->rts_threshold, ATH10K_RTS_MAX); - return ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, - WMI_VDEV_PARAM_RTS_THRESHOLD, - value); + vdev_param = ar->wmi.vdev_param->rts_threshold; + return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value); } static int ath10k_mac_set_frag(struct ath10k_vif *arvif, u32 value) { + struct ath10k *ar = arvif->ar; + u32 vdev_param; + if (value != 0xFFFFFFFF) value = clamp_t(u32, arvif->ar->hw->wiphy->frag_threshold, ATH10K_FRAGMT_THRESHOLD_MIN, ATH10K_FRAGMT_THRESHOLD_MAX); - return ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, - WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD, - value); + vdev_param = ar->wmi.vdev_param->fragmentation_threshold; + return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value); } static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr) @@ -460,6 +464,11 @@ static int ath10k_vdev_start(struct ath10k_vif *arvif) arg.ssid_len = arvif->vif->bss_conf.ssid_len; } + ath10k_dbg(ATH10K_DBG_MAC, + "mac vdev %d start center_freq %d phymode %s\n", + arg.vdev_id, arg.channel.freq, + ath10k_wmi_phymode_str(arg.channel.mode)); + ret = ath10k_wmi_vdev_start(ar, &arg); if (ret) { ath10k_warn("WMI vdev start failed: ret %d\n", ret); @@ -503,13 +512,10 @@ static int ath10k_monitor_start(struct ath10k *ar, int vdev_id) { struct ieee80211_channel *channel = ar->hw->conf.chandef.chan; struct wmi_vdev_start_request_arg arg = {}; - enum nl80211_channel_type type; int ret = 0; lockdep_assert_held(&ar->conf_mutex); - type = cfg80211_get_chandef_type(&ar->hw->conf.chandef); - arg.vdev_id = vdev_id; arg.channel.freq = channel->center_freq; arg.channel.band_center_freq1 = ar->hw->conf.chandef.center_freq1; @@ -560,12 +566,9 @@ static int ath10k_monitor_stop(struct ath10k *ar) lockdep_assert_held(&ar->conf_mutex); - /* For some reasons, ath10k_wmi_vdev_down() here couse - * often ath10k_wmi_vdev_stop() to fail. Next we could - * not run monitor vdev and driver reload - * required. Don't see such problems we skip - * ath10k_wmi_vdev_down() here. - */ + ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id); + if (ret) + ath10k_warn("Monitor vdev down failed: %d\n", ret); ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id); if (ret) @@ -607,7 +610,7 @@ static int ath10k_monitor_create(struct ath10k *ar) goto vdev_fail; } - ath10k_dbg(ATH10K_DBG_MAC, "Monitor interface created, vdev id: %d\n", + ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %d created\n", ar->monitor_vdev_id); ar->monitor_present = true; @@ -639,7 +642,7 @@ static int ath10k_monitor_destroy(struct ath10k *ar) ar->free_vdev_map |= 1 << (ar->monitor_vdev_id); ar->monitor_present = false; - ath10k_dbg(ATH10K_DBG_MAC, "Monitor interface destroyed, vdev id: %d\n", + ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n", ar->monitor_vdev_id); return ret; } @@ -668,13 +671,14 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif, arvif->vdev_id); return; } - ath10k_dbg(ATH10K_DBG_MAC, "VDEV: %d up\n", arvif->vdev_id); + ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id); } static void ath10k_control_ibss(struct ath10k_vif *arvif, struct ieee80211_bss_conf *info, const u8 self_peer[ETH_ALEN]) { + u32 vdev_param; int ret = 0; lockdep_assert_held(&arvif->ar->conf_mutex); @@ -708,8 +712,8 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif, return; } - ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, - WMI_VDEV_PARAM_ATIM_WINDOW, + vdev_param = arvif->ar->wmi.vdev_param->atim_window; + ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param, ATH10K_DEFAULT_ATIM); if (ret) ath10k_warn("Failed to set IBSS ATIM for VDEV:%d ret:%d\n", @@ -719,47 +723,45 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif, /* * Review this when mac80211 gains per-interface powersave support. */ -static void ath10k_ps_iter(void *data, u8 *mac, struct ieee80211_vif *vif) +static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif) { - struct ath10k_generic_iter *ar_iter = data; - struct ieee80211_conf *conf = &ar_iter->ar->hw->conf; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ath10k *ar = arvif->ar; + struct ieee80211_conf *conf = &ar->hw->conf; enum wmi_sta_powersave_param param; enum wmi_sta_ps_mode psmode; int ret; lockdep_assert_held(&arvif->ar->conf_mutex); - if (vif->type != NL80211_IFTYPE_STATION) - return; + if (arvif->vif->type != NL80211_IFTYPE_STATION) + return 0; if (conf->flags & IEEE80211_CONF_PS) { psmode = WMI_STA_PS_MODE_ENABLED; param = WMI_STA_PS_PARAM_INACTIVITY_TIME; - ret = ath10k_wmi_set_sta_ps_param(ar_iter->ar, - arvif->vdev_id, - param, + ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, conf->dynamic_ps_timeout); if (ret) { ath10k_warn("Failed to set inactivity time for VDEV: %d\n", arvif->vdev_id); - return; + return ret; } - - ar_iter->ret = ret; } else { psmode = WMI_STA_PS_MODE_DISABLED; } - ar_iter->ret = ath10k_wmi_set_psmode(ar_iter->ar, arvif->vdev_id, - psmode); - if (ar_iter->ret) + ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d psmode %s\n", + arvif->vdev_id, psmode ? "enable" : "disable"); + + ret = ath10k_wmi_set_psmode(ar, arvif->vdev_id, psmode); + if (ret) { ath10k_warn("Failed to set PS Mode: %d for VDEV: %d\n", psmode, arvif->vdev_id); - else - ath10k_dbg(ATH10K_DBG_MAC, "Set PS Mode: %d for VDEV: %d\n", - psmode, arvif->vdev_id); + return ret; + } + + return 0; } /**********************/ @@ -949,7 +951,8 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar, arg->peer_ht_rates.num_rates = n; arg->peer_num_spatial_streams = max((n+7) / 8, 1); - ath10k_dbg(ATH10K_DBG_MAC, "mcs cnt %d nss %d\n", + ath10k_dbg(ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n", + arg->addr, arg->peer_ht_rates.num_rates, arg->peer_num_spatial_streams); } @@ -969,11 +972,11 @@ static void ath10k_peer_assoc_h_qos_ap(struct ath10k *ar, arg->peer_flags |= WMI_PEER_QOS; if (sta->wme && sta->uapsd_queues) { - ath10k_dbg(ATH10K_DBG_MAC, "uapsd_queues: 0x%X, max_sp: %d\n", + ath10k_dbg(ATH10K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n", sta->uapsd_queues, sta->max_sp); arg->peer_flags |= WMI_PEER_APSD; - arg->peer_flags |= WMI_RC_UAPSD_FLAG; + arg->peer_rate_caps |= WMI_RC_UAPSD_FLAG; if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN | @@ -1028,14 +1031,27 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar, struct wmi_peer_assoc_complete_arg *arg) { const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; + u8 ampdu_factor; if (!vht_cap->vht_supported) return; arg->peer_flags |= WMI_PEER_VHT; - arg->peer_vht_caps = vht_cap->cap; + + ampdu_factor = (vht_cap->cap & + IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >> + IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; + + /* Workaround: Some Netgear/Linksys 11ac APs set Rx A-MPDU factor to + * zero in VHT IE. Using it would result in degraded throughput. + * arg->peer_max_mpdu at this point contains HT max_mpdu so keep + * it if VHT max_mpdu is smaller. */ + arg->peer_max_mpdu = max(arg->peer_max_mpdu, + (1U << (IEEE80211_HT_MAX_AMPDU_FACTOR + + ampdu_factor)) - 1); + if (sta->bandwidth == IEEE80211_STA_RX_BW_80) arg->peer_flags |= WMI_PEER_80MHZ; @@ -1048,7 +1064,8 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar, arg->peer_vht_rates.tx_mcs_set = __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map); - ath10k_dbg(ATH10K_DBG_MAC, "mac vht peer\n"); + ath10k_dbg(ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n", + sta->addr, arg->peer_max_mpdu, arg->peer_flags); } static void ath10k_peer_assoc_h_qos(struct ath10k *ar, @@ -1076,8 +1093,6 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar, { enum wmi_phy_mode phymode = MODE_UNKNOWN; - /* FIXME: add VHT */ - switch (ar->hw->conf.chandef.chan->band) { case IEEE80211_BAND_2GHZ: if (sta->ht_cap.ht_supported) { @@ -1091,7 +1106,17 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar, break; case IEEE80211_BAND_5GHZ: - if (sta->ht_cap.ht_supported) { + /* + * Check VHT first. + */ + if (sta->vht_cap.vht_supported) { + if (sta->bandwidth == IEEE80211_STA_RX_BW_80) + phymode = MODE_11AC_VHT80; + else if (sta->bandwidth == IEEE80211_STA_RX_BW_40) + phymode = MODE_11AC_VHT40; + else if (sta->bandwidth == IEEE80211_STA_RX_BW_20) + phymode = MODE_11AC_VHT20; + } else if (sta->ht_cap.ht_supported) { if (sta->bandwidth == IEEE80211_STA_RX_BW_40) phymode = MODE_11NA_HT40; else @@ -1105,30 +1130,32 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar, break; } + ath10k_dbg(ATH10K_DBG_MAC, "mac peer %pM phymode %s\n", + sta->addr, ath10k_wmi_phymode_str(phymode)); + arg->peer_phymode = phymode; WARN_ON(phymode == MODE_UNKNOWN); } -static int ath10k_peer_assoc(struct ath10k *ar, - struct ath10k_vif *arvif, - struct ieee80211_sta *sta, - struct ieee80211_bss_conf *bss_conf) +static int ath10k_peer_assoc_prepare(struct ath10k *ar, + struct ath10k_vif *arvif, + struct ieee80211_sta *sta, + struct ieee80211_bss_conf *bss_conf, + struct wmi_peer_assoc_complete_arg *arg) { - struct wmi_peer_assoc_complete_arg arg; - lockdep_assert_held(&ar->conf_mutex); - memset(&arg, 0, sizeof(struct wmi_peer_assoc_complete_arg)); + memset(arg, 0, sizeof(*arg)); - ath10k_peer_assoc_h_basic(ar, arvif, sta, bss_conf, &arg); - ath10k_peer_assoc_h_crypto(ar, arvif, &arg); - ath10k_peer_assoc_h_rates(ar, sta, &arg); - ath10k_peer_assoc_h_ht(ar, sta, &arg); - ath10k_peer_assoc_h_vht(ar, sta, &arg); - ath10k_peer_assoc_h_qos(ar, arvif, sta, bss_conf, &arg); - ath10k_peer_assoc_h_phymode(ar, arvif, sta, &arg); + ath10k_peer_assoc_h_basic(ar, arvif, sta, bss_conf, arg); + ath10k_peer_assoc_h_crypto(ar, arvif, arg); + ath10k_peer_assoc_h_rates(ar, sta, arg); + ath10k_peer_assoc_h_ht(ar, sta, arg); + ath10k_peer_assoc_h_vht(ar, sta, arg); + ath10k_peer_assoc_h_qos(ar, arvif, sta, bss_conf, arg); + ath10k_peer_assoc_h_phymode(ar, arvif, sta, arg); - return ath10k_wmi_peer_assoc(ar, &arg); + return 0; } /* can be called only in mac80211 callbacks due to `key_count` usage */ @@ -1138,6 +1165,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw, { struct ath10k *ar = hw->priv; struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct wmi_peer_assoc_complete_arg peer_arg; struct ieee80211_sta *ap_sta; int ret; @@ -1153,24 +1181,33 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw, return; } - ret = ath10k_peer_assoc(ar, arvif, ap_sta, bss_conf); + ret = ath10k_peer_assoc_prepare(ar, arvif, ap_sta, + bss_conf, &peer_arg); if (ret) { - ath10k_warn("Peer assoc failed for %pM\n", bss_conf->bssid); + ath10k_warn("Peer assoc prepare failed for %pM\n: %d", + bss_conf->bssid, ret); rcu_read_unlock(); return; } rcu_read_unlock(); + ret = ath10k_wmi_peer_assoc(ar, &peer_arg); + if (ret) { + ath10k_warn("Peer assoc failed for %pM\n: %d", + bss_conf->bssid, ret); + return; + } + + ath10k_dbg(ATH10K_DBG_MAC, + "mac vdev %d up (associated) bssid %pM aid %d\n", + arvif->vdev_id, bss_conf->bssid, bss_conf->aid); + ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, bss_conf->aid, bss_conf->bssid); if (ret) ath10k_warn("VDEV: %d up failed: ret %d\n", arvif->vdev_id, ret); - else - ath10k_dbg(ATH10K_DBG_MAC, - "VDEV: %d associated, BSSID: %pM, AID: %d\n", - arvif->vdev_id, bss_conf->bssid, bss_conf->aid); } /* @@ -1191,10 +1228,11 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw, * No idea why this happens, even though VDEV-DOWN is supposed * to be analogous to link down, so just stop the VDEV. */ + ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d stop (disassociated\n", + arvif->vdev_id); + + /* FIXME: check return value */ ret = ath10k_vdev_stop(arvif); - if (!ret) - ath10k_dbg(ATH10K_DBG_MAC, "VDEV: %d stopped\n", - arvif->vdev_id); /* * If we don't call VDEV-DOWN after VDEV-STOP FW will remain active and @@ -1203,26 +1241,33 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw, * interfaces as it expects there is no rx when no interface is * running. */ - ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); - if (ret) - ath10k_dbg(ATH10K_DBG_MAC, "VDEV: %d ath10k_wmi_vdev_down failed (%d)\n", - arvif->vdev_id, ret); + ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d down\n", arvif->vdev_id); - ath10k_wmi_flush_tx(ar); + /* FIXME: why don't we print error if wmi call fails? */ + ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); - arvif->def_wep_key_index = 0; + arvif->def_wep_key_idx = 0; } static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif, struct ieee80211_sta *sta) { + struct wmi_peer_assoc_complete_arg peer_arg; int ret = 0; lockdep_assert_held(&ar->conf_mutex); - ret = ath10k_peer_assoc(ar, arvif, sta, NULL); + ret = ath10k_peer_assoc_prepare(ar, arvif, sta, NULL, &peer_arg); + if (ret) { + ath10k_warn("WMI peer assoc prepare failed for %pM\n", + sta->addr); + return ret; + } + + ret = ath10k_wmi_peer_assoc(ar, &peer_arg); if (ret) { - ath10k_warn("WMI peer assoc failed for %pM\n", sta->addr); + ath10k_warn("Peer assoc failed for STA %pM\n: %d", + sta->addr, ret); return ret; } @@ -1333,8 +1378,8 @@ static int ath10k_update_channel_list(struct ath10k *ar) continue; ath10k_dbg(ATH10K_DBG_WMI, - "%s: [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n", - __func__, ch - arg.channels, arg.n_channels, + "mac channel [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n", + ch - arg.channels, arg.n_channels, ch->freq, ch->max_power, ch->max_reg_power, ch->max_antenna_gain, ch->mode); @@ -1391,6 +1436,33 @@ static void ath10k_reg_notifier(struct wiphy *wiphy, /* TX handlers */ /***************/ +static u8 ath10k_tx_h_get_tid(struct ieee80211_hdr *hdr) +{ + if (ieee80211_is_mgmt(hdr->frame_control)) + return HTT_DATA_TX_EXT_TID_MGMT; + + if (!ieee80211_is_data_qos(hdr->frame_control)) + return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST; + + if (!is_unicast_ether_addr(ieee80211_get_DA(hdr))) + return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST; + + return ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK; +} + +static u8 ath10k_tx_h_get_vdev_id(struct ath10k *ar, + struct ieee80211_tx_info *info) +{ + if (info->control.vif) + return ath10k_vif_to_arvif(info->control.vif)->vdev_id; + + if (ar->monitor_enabled) + return ar->monitor_vdev_id; + + ath10k_warn("could not resolve vdev id\n"); + return 0; +} + /* * Frames sent to the FW have to be in "Native Wifi" format. * Strip the QoS field from the 802.11 header. @@ -1411,6 +1483,30 @@ static void ath10k_tx_h_qos_workaround(struct ieee80211_hw *hw, skb_pull(skb, IEEE80211_QOS_CTL_LEN); } +static void ath10k_tx_wep_key_work(struct work_struct *work) +{ + struct ath10k_vif *arvif = container_of(work, struct ath10k_vif, + wep_key_work); + int ret, keyidx = arvif->def_wep_key_newidx; + + if (arvif->def_wep_key_idx == keyidx) + return; + + ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n", + arvif->vdev_id, keyidx); + + ret = ath10k_wmi_vdev_set_param(arvif->ar, + arvif->vdev_id, + arvif->ar->wmi.vdev_param->def_keyid, + keyidx); + if (ret) { + ath10k_warn("could not update wep keyidx (%d)\n", ret); + return; + } + + arvif->def_wep_key_idx = keyidx; +} + static void ath10k_tx_h_update_wep_key(struct sk_buff *skb) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); @@ -1419,11 +1515,6 @@ static void ath10k_tx_h_update_wep_key(struct sk_buff *skb) struct ath10k *ar = arvif->ar; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_key_conf *key = info->control.hw_key; - int ret; - - /* TODO AP mode should be implemented */ - if (vif->type != NL80211_IFTYPE_STATION) - return; if (!ieee80211_has_protected(hdr->frame_control)) return; @@ -1435,20 +1526,14 @@ static void ath10k_tx_h_update_wep_key(struct sk_buff *skb) key->cipher != WLAN_CIPHER_SUITE_WEP104) return; - if (key->keyidx == arvif->def_wep_key_index) + if (key->keyidx == arvif->def_wep_key_idx) return; - ath10k_dbg(ATH10K_DBG_MAC, "new wep keyidx will be %d\n", key->keyidx); - - ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, - WMI_VDEV_PARAM_DEF_KEYID, - key->keyidx); - if (ret) { - ath10k_warn("could not update wep keyidx (%d)\n", ret); - return; - } - - arvif->def_wep_key_index = key->keyidx; + /* FIXME: Most likely a few frames will be TXed with an old key. Simply + * queueing frames until key index is updated is not an option because + * sk_buff may need more processing to be done, e.g. offchannel */ + arvif->def_wep_key_newidx = key->keyidx; + ieee80211_queue_work(ar->hw, &arvif->wep_key_work); } static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, struct sk_buff *skb) @@ -1478,19 +1563,42 @@ static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, struct sk_buff *skb) static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - int ret; + int ret = 0; - if (ieee80211_is_mgmt(hdr->frame_control)) - ret = ath10k_htt_mgmt_tx(&ar->htt, skb); - else if (ieee80211_is_nullfunc(hdr->frame_control)) + if (ar->htt.target_version_major >= 3) { + /* Since HTT 3.0 there is no separate mgmt tx command */ + ret = ath10k_htt_tx(&ar->htt, skb); + goto exit; + } + + if (ieee80211_is_mgmt(hdr->frame_control)) { + if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX, + ar->fw_features)) { + if (skb_queue_len(&ar->wmi_mgmt_tx_queue) >= + ATH10K_MAX_NUM_MGMT_PENDING) { + ath10k_warn("wmi mgmt_tx queue limit reached\n"); + ret = -EBUSY; + goto exit; + } + + skb_queue_tail(&ar->wmi_mgmt_tx_queue, skb); + ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work); + } else { + ret = ath10k_htt_mgmt_tx(&ar->htt, skb); + } + } else if (!test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX, + ar->fw_features) && + ieee80211_is_nullfunc(hdr->frame_control)) { /* FW does not report tx status properly for NullFunc frames * unless they are sent through mgmt tx path. mac80211 sends - * those frames when it detects link/beacon loss and depends on - * the tx status to be correct. */ + * those frames when it detects link/beacon loss and depends + * on the tx status to be correct. */ ret = ath10k_htt_mgmt_tx(&ar->htt, skb); - else + } else { ret = ath10k_htt_tx(&ar->htt, skb); + } +exit: if (ret) { ath10k_warn("tx failed (%d). dropping packet.\n", ret); ieee80211_free_txskb(ar->hw, skb); @@ -1534,18 +1642,19 @@ void ath10k_offchan_tx_work(struct work_struct *work) mutex_lock(&ar->conf_mutex); - ath10k_dbg(ATH10K_DBG_MAC, "processing offchannel skb %p\n", + ath10k_dbg(ATH10K_DBG_MAC, "mac offchannel skb %p\n", skb); hdr = (struct ieee80211_hdr *)skb->data; peer_addr = ieee80211_get_DA(hdr); - vdev_id = ATH10K_SKB_CB(skb)->htt.vdev_id; + vdev_id = ATH10K_SKB_CB(skb)->vdev_id; spin_lock_bh(&ar->data_lock); peer = ath10k_peer_find(ar, vdev_id, peer_addr); spin_unlock_bh(&ar->data_lock); if (peer) + /* FIXME: should this use ath10k_warn()? */ ath10k_dbg(ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n", peer_addr, vdev_id); @@ -1580,6 +1689,36 @@ void ath10k_offchan_tx_work(struct work_struct *work) } } +void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar) +{ + struct sk_buff *skb; + + for (;;) { + skb = skb_dequeue(&ar->wmi_mgmt_tx_queue); + if (!skb) + break; + + ieee80211_free_txskb(ar->hw, skb); + } +} + +void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work) +{ + struct ath10k *ar = container_of(work, struct ath10k, wmi_mgmt_tx_work); + struct sk_buff *skb; + int ret; + + for (;;) { + skb = skb_dequeue(&ar->wmi_mgmt_tx_queue); + if (!skb) + break; + + ret = ath10k_wmi_mgmt_tx(ar, skb); + if (ret) + ath10k_warn("wmi mgmt_tx failed (%d)\n", ret); + } +} + /************/ /* Scanning */ /************/ @@ -1643,8 +1782,6 @@ static int ath10k_abort_scan(struct ath10k *ar) return -EIO; } - ath10k_wmi_flush_tx(ar); - ret = wait_for_completion_timeout(&ar->scan.completed, 3*HZ); if (ret == 0) ath10k_warn("timed out while waiting for scan to stop\n"); @@ -1678,10 +1815,6 @@ static int ath10k_start_scan(struct ath10k *ar, if (ret) return ret; - /* make sure we submit the command so the completion - * timeout makes sense */ - ath10k_wmi_flush_tx(ar); - ret = wait_for_completion_timeout(&ar->scan.started, 1*HZ); if (ret == 0) { ath10k_abort_scan(ar); @@ -1709,16 +1842,7 @@ static void ath10k_tx(struct ieee80211_hw *hw, struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ath10k *ar = hw->priv; - struct ath10k_vif *arvif = NULL; - u32 vdev_id = 0; - u8 tid; - - if (info->control.vif) { - arvif = ath10k_vif_to_arvif(info->control.vif); - vdev_id = arvif->vdev_id; - } else if (ar->monitor_enabled) { - vdev_id = ar->monitor_vdev_id; - } + u8 tid, vdev_id; /* We should disable CCK RATE due to P2P */ if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE) @@ -1726,12 +1850,8 @@ static void ath10k_tx(struct ieee80211_hw *hw, /* we must calculate tid before we apply qos workaround * as we'd lose the qos control field */ - tid = HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST; - if (ieee80211_is_data_qos(hdr->frame_control) && - is_unicast_ether_addr(ieee80211_get_DA(hdr))) { - u8 *qc = ieee80211_get_qos_ctl(hdr); - tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; - } + tid = ath10k_tx_h_get_tid(hdr); + vdev_id = ath10k_tx_h_get_vdev_id(ar, info); /* it makes no sense to process injected frames like that */ if (info->control.vif && @@ -1742,14 +1862,14 @@ static void ath10k_tx(struct ieee80211_hw *hw, ath10k_tx_h_seq_no(skb); } - memset(ATH10K_SKB_CB(skb), 0, sizeof(*ATH10K_SKB_CB(skb))); - ATH10K_SKB_CB(skb)->htt.vdev_id = vdev_id; + ATH10K_SKB_CB(skb)->vdev_id = vdev_id; + ATH10K_SKB_CB(skb)->htt.is_offchan = false; ATH10K_SKB_CB(skb)->htt.tid = tid; if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) { spin_lock_bh(&ar->data_lock); ATH10K_SKB_CB(skb)->htt.is_offchan = true; - ATH10K_SKB_CB(skb)->htt.vdev_id = ar->scan.vdev_id; + ATH10K_SKB_CB(skb)->vdev_id = ar->scan.vdev_id; spin_unlock_bh(&ar->data_lock); ath10k_dbg(ATH10K_DBG_MAC, "queued offchannel skb %p\n", skb); @@ -1771,6 +1891,7 @@ void ath10k_halt(struct ath10k *ar) del_timer_sync(&ar->scan.timeout); ath10k_offchan_tx_purge(ar); + ath10k_mgmt_over_wmi_tx_purge(ar); ath10k_peer_cleanup_all(ar); ath10k_core_stop(ar); ath10k_hif_power_down(ar); @@ -1817,12 +1938,12 @@ static int ath10k_start(struct ieee80211_hw *hw) else if (ar->state == ATH10K_STATE_RESTARTING) ar->state = ATH10K_STATE_RESTARTED; - ret = ath10k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_PMF_QOS, 1); + ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->pmf_qos, 1); if (ret) ath10k_warn("could not enable WMI_PDEV_PARAM_PMF_QOS (%d)\n", ret); - ret = ath10k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_DYNAMIC_BW, 0); + ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->dynamic_bw, 0); if (ret) ath10k_warn("could not init WMI_PDEV_PARAM_DYNAMIC_BW (%d)\n", ret); @@ -1847,32 +1968,29 @@ static void ath10k_stop(struct ieee80211_hw *hw) ar->state = ATH10K_STATE_OFF; mutex_unlock(&ar->conf_mutex); + ath10k_mgmt_over_wmi_tx_purge(ar); + cancel_work_sync(&ar->offchan_tx_work); + cancel_work_sync(&ar->wmi_mgmt_tx_work); cancel_work_sync(&ar->restart_work); } -static void ath10k_config_ps(struct ath10k *ar) +static int ath10k_config_ps(struct ath10k *ar) { - struct ath10k_generic_iter ar_iter; + struct ath10k_vif *arvif; + int ret = 0; lockdep_assert_held(&ar->conf_mutex); - /* During HW reconfiguration mac80211 reports all interfaces that were - * running until reconfiguration was started. Since FW doesn't have any - * vdevs at this point we must not iterate over this interface list. - * This setting will be updated upon add_interface(). */ - if (ar->state == ATH10K_STATE_RESTARTED) - return; - - memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter)); - ar_iter.ar = ar; - - ieee80211_iterate_active_interfaces_atomic( - ar->hw, IEEE80211_IFACE_ITER_NORMAL, - ath10k_ps_iter, &ar_iter); + list_for_each_entry(arvif, &ar->arvifs, list) { + ret = ath10k_mac_vif_setup_ps(arvif); + if (ret) { + ath10k_warn("could not setup powersave (%d)\n", ret); + break; + } + } - if (ar_iter.ret) - ath10k_warn("failed to set ps config (%d)\n", ar_iter.ret); + return ret; } static int ath10k_config(struct ieee80211_hw *hw, u32 changed) @@ -1884,7 +2002,7 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed) mutex_lock(&ar->conf_mutex); if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { - ath10k_dbg(ATH10K_DBG_MAC, "Config channel %d mhz\n", + ath10k_dbg(ATH10K_DBG_MAC, "mac config channel %d mhz\n", conf->chandef.chan->center_freq); spin_lock_bh(&ar->data_lock); ar->rx_channel = conf->chandef.chan; @@ -1901,7 +2019,6 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed) ret = ath10k_monitor_destroy(ar); } - ath10k_wmi_flush_tx(ar); mutex_unlock(&ar->conf_mutex); return ret; } @@ -1922,6 +2039,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, int ret = 0; u32 value; int bit; + u32 vdev_param; mutex_lock(&ar->conf_mutex); @@ -1930,21 +2048,22 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, arvif->ar = ar; arvif->vif = vif; + INIT_WORK(&arvif->wep_key_work, ath10k_tx_wep_key_work); + if ((vif->type == NL80211_IFTYPE_MONITOR) && ar->monitor_present) { ath10k_warn("Only one monitor interface allowed\n"); ret = -EBUSY; - goto exit; + goto err; } bit = ffs(ar->free_vdev_map); if (bit == 0) { ret = -EBUSY; - goto exit; + goto err; } arvif->vdev_id = bit - 1; arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE; - ar->free_vdev_map &= ~(1 << arvif->vdev_id); if (ar->p2p) arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_DEVICE; @@ -1973,32 +2092,41 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, break; } - ath10k_dbg(ATH10K_DBG_MAC, "Add interface: id %d type %d subtype %d\n", + ath10k_dbg(ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d\n", arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype); ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype, vif->addr); if (ret) { ath10k_warn("WMI vdev create failed: ret %d\n", ret); - goto exit; + goto err; } - ret = ath10k_wmi_vdev_set_param(ar, 0, WMI_VDEV_PARAM_DEF_KEYID, - arvif->def_wep_key_index); - if (ret) + ar->free_vdev_map &= ~BIT(arvif->vdev_id); + list_add(&arvif->list, &ar->arvifs); + + vdev_param = ar->wmi.vdev_param->def_keyid; + ret = ath10k_wmi_vdev_set_param(ar, 0, vdev_param, + arvif->def_wep_key_idx); + if (ret) { ath10k_warn("Failed to set default keyid: %d\n", ret); + goto err_vdev_delete; + } - ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, - WMI_VDEV_PARAM_TX_ENCAP_TYPE, + vdev_param = ar->wmi.vdev_param->tx_encap_type; + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, ATH10K_HW_TXRX_NATIVE_WIFI); - if (ret) + /* 10.X firmware does not support this VDEV parameter. Do not warn */ + if (ret && ret != -EOPNOTSUPP) { ath10k_warn("Failed to set TX encap: %d\n", ret); + goto err_vdev_delete; + } if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr); if (ret) { ath10k_warn("Failed to create peer for AP: %d\n", ret); - goto exit; + goto err_vdev_delete; } } @@ -2007,39 +2135,62 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, value = WMI_STA_PS_RX_WAKE_POLICY_WAKE; ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, value); - if (ret) + if (ret) { ath10k_warn("Failed to set RX wake policy: %d\n", ret); + goto err_peer_delete; + } param = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD; value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS; ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, value); - if (ret) + if (ret) { ath10k_warn("Failed to set TX wake thresh: %d\n", ret); + goto err_peer_delete; + } param = WMI_STA_PS_PARAM_PSPOLL_COUNT; value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX; ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, value); - if (ret) + if (ret) { ath10k_warn("Failed to set PSPOLL count: %d\n", ret); + goto err_peer_delete; + } } ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold); - if (ret) + if (ret) { ath10k_warn("failed to set rts threshold for vdev %d (%d)\n", arvif->vdev_id, ret); + goto err_peer_delete; + } ret = ath10k_mac_set_frag(arvif, ar->hw->wiphy->frag_threshold); - if (ret) + if (ret) { ath10k_warn("failed to set frag threshold for vdev %d (%d)\n", arvif->vdev_id, ret); + goto err_peer_delete; + } if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) ar->monitor_present = true; -exit: mutex_unlock(&ar->conf_mutex); + return 0; + +err_peer_delete: + if (arvif->vdev_type == WMI_VDEV_TYPE_AP) + ath10k_wmi_peer_delete(ar, arvif->vdev_id, vif->addr); + +err_vdev_delete: + ath10k_wmi_vdev_delete(ar, arvif->vdev_id); + ar->free_vdev_map &= ~BIT(arvif->vdev_id); + list_del(&arvif->list); + +err: + mutex_unlock(&ar->conf_mutex); + return ret; } @@ -2052,9 +2203,17 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw, mutex_lock(&ar->conf_mutex); - ath10k_dbg(ATH10K_DBG_MAC, "Remove interface: id %d\n", arvif->vdev_id); + cancel_work_sync(&arvif->wep_key_work); + + spin_lock_bh(&ar->data_lock); + if (arvif->beacon) { + dev_kfree_skb_any(arvif->beacon); + arvif->beacon = NULL; + } + spin_unlock_bh(&ar->data_lock); ar->free_vdev_map |= 1 << (arvif->vdev_id); + list_del(&arvif->list); if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, vif->addr); @@ -2064,6 +2223,9 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw, kfree(arvif->u.ap.noa_data); } + ath10k_dbg(ATH10K_DBG_MAC, "mac vdev delete %d (remove interface)\n", + arvif->vdev_id); + ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id); if (ret) ath10k_warn("WMI vdev delete failed: %d\n", ret); @@ -2105,18 +2267,20 @@ static void ath10k_configure_filter(struct ieee80211_hw *hw, if ((ar->filter_flags & FIF_PROMISC_IN_BSS) && !ar->monitor_enabled) { + ath10k_dbg(ATH10K_DBG_MAC, "mac monitor %d start\n", + ar->monitor_vdev_id); + ret = ath10k_monitor_start(ar, ar->monitor_vdev_id); if (ret) ath10k_warn("Unable to start monitor mode\n"); - else - ath10k_dbg(ATH10K_DBG_MAC, "Monitor mode started\n"); } else if (!(ar->filter_flags & FIF_PROMISC_IN_BSS) && ar->monitor_enabled) { + ath10k_dbg(ATH10K_DBG_MAC, "mac monitor %d stop\n", + ar->monitor_vdev_id); + ret = ath10k_monitor_stop(ar); if (ret) ath10k_warn("Unable to stop monitor mode\n"); - else - ath10k_dbg(ATH10K_DBG_MAC, "Monitor mode stopped\n"); } mutex_unlock(&ar->conf_mutex); @@ -2130,6 +2294,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw, struct ath10k *ar = hw->priv; struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); int ret = 0; + u32 vdev_param, pdev_param; mutex_lock(&ar->conf_mutex); @@ -2138,44 +2303,44 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw, if (changed & BSS_CHANGED_BEACON_INT) { arvif->beacon_interval = info->beacon_int; - ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, - WMI_VDEV_PARAM_BEACON_INTERVAL, + vdev_param = ar->wmi.vdev_param->beacon_interval; + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, arvif->beacon_interval); + ath10k_dbg(ATH10K_DBG_MAC, + "mac vdev %d beacon_interval %d\n", + arvif->vdev_id, arvif->beacon_interval); + if (ret) ath10k_warn("Failed to set beacon interval for VDEV: %d\n", arvif->vdev_id); - else - ath10k_dbg(ATH10K_DBG_MAC, - "Beacon interval: %d set for VDEV: %d\n", - arvif->beacon_interval, arvif->vdev_id); } if (changed & BSS_CHANGED_BEACON) { - ret = ath10k_wmi_pdev_set_param(ar, - WMI_PDEV_PARAM_BEACON_TX_MODE, + ath10k_dbg(ATH10K_DBG_MAC, + "vdev %d set beacon tx mode to staggered\n", + arvif->vdev_id); + + pdev_param = ar->wmi.pdev_param->beacon_tx_mode; + ret = ath10k_wmi_pdev_set_param(ar, pdev_param, WMI_BEACON_STAGGERED_MODE); if (ret) ath10k_warn("Failed to set beacon mode for VDEV: %d\n", arvif->vdev_id); - else - ath10k_dbg(ATH10K_DBG_MAC, - "Set staggered beacon mode for VDEV: %d\n", - arvif->vdev_id); } if (changed & BSS_CHANGED_BEACON_INFO) { arvif->dtim_period = info->dtim_period; - ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, - WMI_VDEV_PARAM_DTIM_PERIOD, + ath10k_dbg(ATH10K_DBG_MAC, + "mac vdev %d dtim_period %d\n", + arvif->vdev_id, arvif->dtim_period); + + vdev_param = ar->wmi.vdev_param->dtim_period; + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, arvif->dtim_period); if (ret) ath10k_warn("Failed to set dtim period for VDEV: %d\n", arvif->vdev_id); - else - ath10k_dbg(ATH10K_DBG_MAC, - "Set dtim period: %d for VDEV: %d\n", - arvif->dtim_period, arvif->vdev_id); } if (changed & BSS_CHANGED_SSID && @@ -2188,16 +2353,15 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw, if (changed & BSS_CHANGED_BSSID) { if (!is_zero_ether_addr(info->bssid)) { + ath10k_dbg(ATH10K_DBG_MAC, + "mac vdev %d create peer %pM\n", + arvif->vdev_id, info->bssid); + ret = ath10k_peer_create(ar, arvif->vdev_id, info->bssid); if (ret) ath10k_warn("Failed to add peer: %pM for VDEV: %d\n", info->bssid, arvif->vdev_id); - else - ath10k_dbg(ATH10K_DBG_MAC, - "Added peer: %pM for VDEV: %d\n", - info->bssid, arvif->vdev_id); - if (vif->type == NL80211_IFTYPE_STATION) { /* @@ -2207,11 +2371,12 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw, memcpy(arvif->u.sta.bssid, info->bssid, ETH_ALEN); + ath10k_dbg(ATH10K_DBG_MAC, + "mac vdev %d start %pM\n", + arvif->vdev_id, info->bssid); + + /* FIXME: check return value */ ret = ath10k_vdev_start(arvif); - if (!ret) - ath10k_dbg(ATH10K_DBG_MAC, - "VDEV: %d started with BSSID: %pM\n", - arvif->vdev_id, info->bssid); } /* @@ -2235,16 +2400,15 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw, else cts_prot = 0; - ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, - WMI_VDEV_PARAM_ENABLE_RTSCTS, + ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n", + arvif->vdev_id, cts_prot); + + vdev_param = ar->wmi.vdev_param->enable_rtscts; + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, cts_prot); if (ret) ath10k_warn("Failed to set CTS prot for VDEV: %d\n", arvif->vdev_id); - else - ath10k_dbg(ATH10K_DBG_MAC, - "Set CTS prot: %d for VDEV: %d\n", - cts_prot, arvif->vdev_id); } if (changed & BSS_CHANGED_ERP_SLOT) { @@ -2255,16 +2419,15 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw, else slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */ - ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, - WMI_VDEV_PARAM_SLOT_TIME, + ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d slot_time %d\n", + arvif->vdev_id, slottime); + + vdev_param = ar->wmi.vdev_param->slot_time; + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, slottime); if (ret) ath10k_warn("Failed to set erp slot for VDEV: %d\n", arvif->vdev_id); - else - ath10k_dbg(ATH10K_DBG_MAC, - "Set slottime: %d for VDEV: %d\n", - slottime, arvif->vdev_id); } if (changed & BSS_CHANGED_ERP_PREAMBLE) { @@ -2274,16 +2437,16 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw, else preamble = WMI_VDEV_PREAMBLE_LONG; - ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, - WMI_VDEV_PARAM_PREAMBLE, + ath10k_dbg(ATH10K_DBG_MAC, + "mac vdev %d preamble %dn", + arvif->vdev_id, preamble); + + vdev_param = ar->wmi.vdev_param->preamble; + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, preamble); if (ret) ath10k_warn("Failed to set preamble for VDEV: %d\n", arvif->vdev_id); - else - ath10k_dbg(ATH10K_DBG_MAC, - "Set preamble: %d for VDEV: %d\n", - preamble, arvif->vdev_id); } if (changed & BSS_CHANGED_ASSOC) { @@ -2474,27 +2637,26 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, /* * New station addition. */ + ath10k_dbg(ATH10K_DBG_MAC, + "mac vdev %d peer create %pM (new sta)\n", + arvif->vdev_id, sta->addr); + ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr); if (ret) ath10k_warn("Failed to add peer: %pM for VDEV: %d\n", sta->addr, arvif->vdev_id); - else - ath10k_dbg(ATH10K_DBG_MAC, - "Added peer: %pM for VDEV: %d\n", - sta->addr, arvif->vdev_id); } else if ((old_state == IEEE80211_STA_NONE && new_state == IEEE80211_STA_NOTEXIST)) { /* * Existing station deletion. */ + ath10k_dbg(ATH10K_DBG_MAC, + "mac vdev %d peer delete %pM (sta gone)\n", + arvif->vdev_id, sta->addr); ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); if (ret) ath10k_warn("Failed to delete peer: %pM for VDEV: %d\n", sta->addr, arvif->vdev_id); - else - ath10k_dbg(ATH10K_DBG_MAC, - "Removed peer: %pM for VDEV: %d\n", - sta->addr, arvif->vdev_id); if (vif->type == NL80211_IFTYPE_STATION) ath10k_bss_disassoc(hw, vif); @@ -2505,14 +2667,13 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, /* * New association. */ + ath10k_dbg(ATH10K_DBG_MAC, "mac sta %pM associated\n", + sta->addr); + ret = ath10k_station_assoc(ar, arvif, sta); if (ret) ath10k_warn("Failed to associate station: %pM\n", sta->addr); - else - ath10k_dbg(ATH10K_DBG_MAC, - "Station %pM moved to assoc state\n", - sta->addr); } else if (old_state == IEEE80211_STA_ASSOC && new_state == IEEE80211_STA_AUTH && (vif->type == NL80211_IFTYPE_AP || @@ -2520,14 +2681,13 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, /* * Disassociation. */ + ath10k_dbg(ATH10K_DBG_MAC, "mac sta %pM disassociated\n", + sta->addr); + ret = ath10k_station_disassoc(ar, arvif, sta); if (ret) ath10k_warn("Failed to disassociate station: %pM\n", sta->addr); - else - ath10k_dbg(ATH10K_DBG_MAC, - "Station %pM moved to disassociated state\n", - sta->addr); } mutex_unlock(&ar->conf_mutex); @@ -2732,88 +2892,51 @@ static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw) * Both RTS and Fragmentation threshold are interface-specific * in ath10k, but device-specific in mac80211. */ -static void ath10k_set_rts_iter(void *data, u8 *mac, struct ieee80211_vif *vif) -{ - struct ath10k_generic_iter *ar_iter = data; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); - u32 rts = ar_iter->ar->hw->wiphy->rts_threshold; - - lockdep_assert_held(&arvif->ar->conf_mutex); - - /* During HW reconfiguration mac80211 reports all interfaces that were - * running until reconfiguration was started. Since FW doesn't have any - * vdevs at this point we must not iterate over this interface list. - * This setting will be updated upon add_interface(). */ - if (ar_iter->ar->state == ATH10K_STATE_RESTARTED) - return; - - ar_iter->ret = ath10k_mac_set_rts(arvif, rts); - if (ar_iter->ret) - ath10k_warn("Failed to set RTS threshold for VDEV: %d\n", - arvif->vdev_id); - else - ath10k_dbg(ATH10K_DBG_MAC, - "Set RTS threshold: %d for VDEV: %d\n", - rts, arvif->vdev_id); -} static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value) { - struct ath10k_generic_iter ar_iter; struct ath10k *ar = hw->priv; - - memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter)); - ar_iter.ar = ar; + struct ath10k_vif *arvif; + int ret = 0; mutex_lock(&ar->conf_mutex); - ieee80211_iterate_active_interfaces_atomic( - hw, IEEE80211_IFACE_ITER_NORMAL, - ath10k_set_rts_iter, &ar_iter); - mutex_unlock(&ar->conf_mutex); - - return ar_iter.ret; -} + list_for_each_entry(arvif, &ar->arvifs, list) { + ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d rts threshold %d\n", + arvif->vdev_id, value); -static void ath10k_set_frag_iter(void *data, u8 *mac, struct ieee80211_vif *vif) -{ - struct ath10k_generic_iter *ar_iter = data; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); - u32 frag = ar_iter->ar->hw->wiphy->frag_threshold; - - lockdep_assert_held(&arvif->ar->conf_mutex); - - /* During HW reconfiguration mac80211 reports all interfaces that were - * running until reconfiguration was started. Since FW doesn't have any - * vdevs at this point we must not iterate over this interface list. - * This setting will be updated upon add_interface(). */ - if (ar_iter->ar->state == ATH10K_STATE_RESTARTED) - return; + ret = ath10k_mac_set_rts(arvif, value); + if (ret) { + ath10k_warn("could not set rts threshold for vdev %d (%d)\n", + arvif->vdev_id, ret); + break; + } + } + mutex_unlock(&ar->conf_mutex); - ar_iter->ret = ath10k_mac_set_frag(arvif, frag); - if (ar_iter->ret) - ath10k_warn("Failed to set frag threshold for VDEV: %d\n", - arvif->vdev_id); - else - ath10k_dbg(ATH10K_DBG_MAC, - "Set frag threshold: %d for VDEV: %d\n", - frag, arvif->vdev_id); + return ret; } static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value) { - struct ath10k_generic_iter ar_iter; struct ath10k *ar = hw->priv; - - memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter)); - ar_iter.ar = ar; + struct ath10k_vif *arvif; + int ret = 0; mutex_lock(&ar->conf_mutex); - ieee80211_iterate_active_interfaces_atomic( - hw, IEEE80211_IFACE_ITER_NORMAL, - ath10k_set_frag_iter, &ar_iter); + list_for_each_entry(arvif, &ar->arvifs, list) { + ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d fragmentation threshold %d\n", + arvif->vdev_id, value); + + ret = ath10k_mac_set_rts(arvif, value); + if (ret) { + ath10k_warn("could not set fragmentation threshold for vdev %d (%d)\n", + arvif->vdev_id, ret); + break; + } + } mutex_unlock(&ar->conf_mutex); - return ar_iter.ret; + return ret; } static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop) @@ -2836,8 +2959,7 @@ static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop) bool empty; spin_lock_bh(&ar->htt.tx_lock); - empty = bitmap_empty(ar->htt.used_msdu_ids, - ar->htt.max_num_pending_tx); + empty = (ar->htt.num_pending_tx == 0); spin_unlock_bh(&ar->htt.tx_lock); skip = (ar->state == ATH10K_STATE_WEDGED); @@ -3326,6 +3448,10 @@ int ath10k_mac_register(struct ath10k *ar) IEEE80211_HW_WANT_MONITOR_VIF | IEEE80211_HW_AP_LINK_PS; + /* MSDU can have HTT TX fragment pushed in front. The additional 4 + * bytes is used for padding/alignment if necessary. */ + ar->hw->extra_tx_headroom += sizeof(struct htt_data_tx_desc_frag)*2 + 4; + if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) ar->hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS; diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h index 6fce9bfb19a5..ba1021997b8f 100644 --- a/drivers/net/wireless/ath/ath10k/mac.h +++ b/drivers/net/wireless/ath/ath10k/mac.h @@ -34,6 +34,8 @@ struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id); void ath10k_reset_scan(unsigned long ptr); void ath10k_offchan_tx_purge(struct ath10k *ar); void ath10k_offchan_tx_work(struct work_struct *work); +void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar); +void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work); void ath10k_halt(struct ath10k *ar); static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif) diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index e2f9ef50b1bd..f8d59c7b9082 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -36,11 +36,9 @@ static unsigned int ath10k_target_ps; module_param(ath10k_target_ps, uint, 0644); MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option"); -#define QCA988X_1_0_DEVICE_ID (0xabcd) #define QCA988X_2_0_DEVICE_ID (0x003c) static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = { - { PCI_VDEVICE(ATHEROS, QCA988X_1_0_DEVICE_ID) }, /* PCI-E QCA988X V1 */ { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */ {0} }; @@ -50,9 +48,9 @@ static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address, static void ath10k_pci_process_ce(struct ath10k *ar); static int ath10k_pci_post_rx(struct ath10k *ar); -static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info, +static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info, int num); -static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info); +static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info); static void ath10k_pci_stop_ce(struct ath10k *ar); static void ath10k_pci_device_reset(struct ath10k *ar); static int ath10k_pci_reset_target(struct ath10k *ar); @@ -60,43 +58,145 @@ static int ath10k_pci_start_intr(struct ath10k *ar); static void ath10k_pci_stop_intr(struct ath10k *ar); static const struct ce_attr host_ce_config_wlan[] = { - /* host->target HTC control and raw streams */ - { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 256, 0, NULL,}, - /* could be moved to share CE3 */ - /* target->host HTT + HTC control */ - { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 512, 512, NULL,}, - /* target->host WMI */ - { /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 32, NULL,}, - /* host->target WMI */ - { /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL,}, - /* host->target HTT */ - { /* CE4 */ CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, 0, - CE_HTT_H2T_MSG_SRC_NENTRIES, 256, 0, NULL,}, - /* unused */ - { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, - /* Target autonomous hif_memcpy */ - { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, - /* ce_diag, the Diagnostic Window */ - { /* CE7 */ CE_ATTR_FLAGS, 0, 2, DIAG_TRANSFER_LIMIT, 2, NULL,}, + /* CE0: host->target HTC control and raw streams */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 16, + .src_sz_max = 256, + .dest_nentries = 0, + }, + + /* CE1: target->host HTT + HTC control */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 512, + .dest_nentries = 512, + }, + + /* CE2: target->host WMI */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 2048, + .dest_nentries = 32, + }, + + /* CE3: host->target WMI */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 32, + .src_sz_max = 2048, + .dest_nentries = 0, + }, + + /* CE4: host->target HTT */ + { + .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, + .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES, + .src_sz_max = 256, + .dest_nentries = 0, + }, + + /* CE5: unused */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 0, + .dest_nentries = 0, + }, + + /* CE6: target autonomous hif_memcpy */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 0, + .dest_nentries = 0, + }, + + /* CE7: ce_diag, the Diagnostic Window */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 2, + .src_sz_max = DIAG_TRANSFER_LIMIT, + .dest_nentries = 2, + }, }; /* Target firmware's Copy Engine configuration. */ static const struct ce_pipe_config target_ce_config_wlan[] = { - /* host->target HTC control and raw streams */ - { /* CE0 */ 0, PIPEDIR_OUT, 32, 256, CE_ATTR_FLAGS, 0,}, - /* target->host HTT + HTC control */ - { /* CE1 */ 1, PIPEDIR_IN, 32, 512, CE_ATTR_FLAGS, 0,}, - /* target->host WMI */ - { /* CE2 */ 2, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, - /* host->target WMI */ - { /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,}, - /* host->target HTT */ - { /* CE4 */ 4, PIPEDIR_OUT, 256, 256, CE_ATTR_FLAGS, 0,}, + /* CE0: host->target HTC control and raw streams */ + { + .pipenum = 0, + .pipedir = PIPEDIR_OUT, + .nentries = 32, + .nbytes_max = 256, + .flags = CE_ATTR_FLAGS, + .reserved = 0, + }, + + /* CE1: target->host HTT + HTC control */ + { + .pipenum = 1, + .pipedir = PIPEDIR_IN, + .nentries = 32, + .nbytes_max = 512, + .flags = CE_ATTR_FLAGS, + .reserved = 0, + }, + + /* CE2: target->host WMI */ + { + .pipenum = 2, + .pipedir = PIPEDIR_IN, + .nentries = 32, + .nbytes_max = 2048, + .flags = CE_ATTR_FLAGS, + .reserved = 0, + }, + + /* CE3: host->target WMI */ + { + .pipenum = 3, + .pipedir = PIPEDIR_OUT, + .nentries = 32, + .nbytes_max = 2048, + .flags = CE_ATTR_FLAGS, + .reserved = 0, + }, + + /* CE4: host->target HTT */ + { + .pipenum = 4, + .pipedir = PIPEDIR_OUT, + .nentries = 256, + .nbytes_max = 256, + .flags = CE_ATTR_FLAGS, + .reserved = 0, + }, + /* NB: 50% of src nentries, since tx has 2 frags */ - /* unused */ - { /* CE5 */ 5, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,}, - /* Reserved for target autonomous hif_memcpy */ - { /* CE6 */ 6, PIPEDIR_INOUT, 32, 4096, CE_ATTR_FLAGS, 0,}, + + /* CE5: unused */ + { + .pipenum = 5, + .pipedir = PIPEDIR_OUT, + .nentries = 32, + .nbytes_max = 2048, + .flags = CE_ATTR_FLAGS, + .reserved = 0, + }, + + /* CE6: Reserved for target autonomous hif_memcpy */ + { + .pipenum = 6, + .pipedir = PIPEDIR_INOUT, + .nentries = 32, + .nbytes_max = 4096, + .flags = CE_ATTR_FLAGS, + .reserved = 0, + }, + /* CE7 used only by Host */ }; @@ -114,7 +214,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, unsigned int completed_nbytes, orig_nbytes, remaining_bytes; unsigned int id; unsigned int flags; - struct ce_state *ce_diag; + struct ath10k_ce_pipe *ce_diag; /* Host buffer address in CE space */ u32 ce_data; dma_addr_t ce_data_base = 0; @@ -278,7 +378,7 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, unsigned int completed_nbytes, orig_nbytes, remaining_bytes; unsigned int id; unsigned int flags; - struct ce_state *ce_diag; + struct ath10k_ce_pipe *ce_diag; void *data_buf = NULL; u32 ce_data; /* Host buffer address in CE space */ dma_addr_t ce_data_base = 0; @@ -437,7 +537,7 @@ static void ath10k_pci_wait(struct ath10k *ar) ath10k_warn("Unable to wakeup target\n"); } -void ath10k_do_pci_wake(struct ath10k *ar) +int ath10k_do_pci_wake(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); void __iomem *pci_addr = ar_pci->mem; @@ -453,18 +553,19 @@ void ath10k_do_pci_wake(struct ath10k *ar) atomic_inc(&ar_pci->keep_awake_count); if (ar_pci->verified_awake) - return; + return 0; for (;;) { if (ath10k_pci_target_is_awake(ar)) { ar_pci->verified_awake = true; - break; + return 0; } if (tot_delay > PCIE_WAKE_TIMEOUT) { - ath10k_warn("target takes too long to wake up (awake count %d)\n", + ath10k_warn("target took longer %d us to wake up (awake count %d)\n", + PCIE_WAKE_TIMEOUT, atomic_read(&ar_pci->keep_awake_count)); - break; + return -ETIMEDOUT; } udelay(curr_delay); @@ -493,7 +594,7 @@ void ath10k_do_pci_sleep(struct ath10k *ar) * FIXME: Handle OOM properly. */ static inline -struct ath10k_pci_compl *get_free_compl(struct hif_ce_pipe_info *pipe_info) +struct ath10k_pci_compl *get_free_compl(struct ath10k_pci_pipe *pipe_info) { struct ath10k_pci_compl *compl = NULL; @@ -511,39 +612,28 @@ exit: } /* Called by lower (CE) layer when a send to Target completes. */ -static void ath10k_pci_ce_send_done(struct ce_state *ce_state, - void *transfer_context, - u32 ce_data, - unsigned int nbytes, - unsigned int transfer_id) +static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state) { struct ath10k *ar = ce_state->ar; struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct hif_ce_pipe_info *pipe_info = &ar_pci->pipe_info[ce_state->id]; + struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id]; struct ath10k_pci_compl *compl; - bool process = false; - - do { - /* - * For the send completion of an item in sendlist, just - * increment num_sends_allowed. The upper layer callback will - * be triggered when last fragment is done with send. - */ - if (transfer_context == CE_SENDLIST_ITEM_CTXT) { - spin_lock_bh(&pipe_info->pipe_lock); - pipe_info->num_sends_allowed++; - spin_unlock_bh(&pipe_info->pipe_lock); - continue; - } + void *transfer_context; + u32 ce_data; + unsigned int nbytes; + unsigned int transfer_id; + while (ath10k_ce_completed_send_next(ce_state, &transfer_context, + &ce_data, &nbytes, + &transfer_id) == 0) { compl = get_free_compl(pipe_info); if (!compl) break; - compl->send_or_recv = HIF_CE_COMPLETE_SEND; + compl->state = ATH10K_PCI_COMPL_SEND; compl->ce_state = ce_state; compl->pipe_info = pipe_info; - compl->transfer_context = transfer_context; + compl->skb = transfer_context; compl->nbytes = nbytes; compl->transfer_id = transfer_id; compl->flags = 0; @@ -554,46 +644,36 @@ static void ath10k_pci_ce_send_done(struct ce_state *ce_state, spin_lock_bh(&ar_pci->compl_lock); list_add_tail(&compl->list, &ar_pci->compl_process); spin_unlock_bh(&ar_pci->compl_lock); - - process = true; - } while (ath10k_ce_completed_send_next(ce_state, - &transfer_context, - &ce_data, &nbytes, - &transfer_id) == 0); - - /* - * If only some of the items within a sendlist have completed, - * don't invoke completion processing until the entire sendlist - * has been sent. - */ - if (!process) - return; + } ath10k_pci_process_ce(ar); } /* Called by lower (CE) layer when data is received from the Target. */ -static void ath10k_pci_ce_recv_data(struct ce_state *ce_state, - void *transfer_context, u32 ce_data, - unsigned int nbytes, - unsigned int transfer_id, - unsigned int flags) +static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state) { struct ath10k *ar = ce_state->ar; struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct hif_ce_pipe_info *pipe_info = &ar_pci->pipe_info[ce_state->id]; + struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id]; struct ath10k_pci_compl *compl; struct sk_buff *skb; + void *transfer_context; + u32 ce_data; + unsigned int nbytes; + unsigned int transfer_id; + unsigned int flags; - do { + while (ath10k_ce_completed_recv_next(ce_state, &transfer_context, + &ce_data, &nbytes, &transfer_id, + &flags) == 0) { compl = get_free_compl(pipe_info); if (!compl) break; - compl->send_or_recv = HIF_CE_COMPLETE_RECV; + compl->state = ATH10K_PCI_COMPL_RECV; compl->ce_state = ce_state; compl->pipe_info = pipe_info; - compl->transfer_context = transfer_context; + compl->skb = transfer_context; compl->nbytes = nbytes; compl->transfer_id = transfer_id; compl->flags = flags; @@ -608,12 +688,7 @@ static void ath10k_pci_ce_recv_data(struct ce_state *ce_state, spin_lock_bh(&ar_pci->compl_lock); list_add_tail(&compl->list, &ar_pci->compl_process); spin_unlock_bh(&ar_pci->compl_lock); - - } while (ath10k_ce_completed_recv_next(ce_state, - &transfer_context, - &ce_data, &nbytes, - &transfer_id, - &flags) == 0); + } ath10k_pci_process_ce(ar); } @@ -625,15 +700,12 @@ static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id, { struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf); struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct hif_ce_pipe_info *pipe_info = &(ar_pci->pipe_info[pipe_id]); - struct ce_state *ce_hdl = pipe_info->ce_hdl; - struct ce_sendlist sendlist; + struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe_id]); + struct ath10k_ce_pipe *ce_hdl = pipe_info->ce_hdl; unsigned int len; u32 flags = 0; int ret; - memset(&sendlist, 0, sizeof(struct ce_sendlist)); - len = min(bytes, nbuf->len); bytes -= len; @@ -648,19 +720,8 @@ static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id, "ath10k tx: data: ", nbuf->data, nbuf->len); - ath10k_ce_sendlist_buf_add(&sendlist, skb_cb->paddr, len, flags); - - /* Make sure we have resources to handle this request */ - spin_lock_bh(&pipe_info->pipe_lock); - if (!pipe_info->num_sends_allowed) { - ath10k_warn("Pipe: %d is full\n", pipe_id); - spin_unlock_bh(&pipe_info->pipe_lock); - return -ENOSR; - } - pipe_info->num_sends_allowed--; - spin_unlock_bh(&pipe_info->pipe_lock); - - ret = ath10k_ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id); + ret = ath10k_ce_send(ce_hdl, nbuf, skb_cb->paddr, len, transfer_id, + flags); if (ret) ath10k_warn("CE send failed: %p\n", nbuf); @@ -670,14 +731,7 @@ static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id, static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct hif_ce_pipe_info *pipe_info = &(ar_pci->pipe_info[pipe]); - int ret; - - spin_lock_bh(&pipe_info->pipe_lock); - ret = pipe_info->num_sends_allowed; - spin_unlock_bh(&pipe_info->pipe_lock); - - return ret; + return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl); } static void ath10k_pci_hif_dump_area(struct ath10k *ar) @@ -764,9 +818,9 @@ static void ath10k_pci_hif_set_callbacks(struct ath10k *ar, static int ath10k_pci_start_ce(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct ce_state *ce_diag = ar_pci->ce_diag; + struct ath10k_ce_pipe *ce_diag = ar_pci->ce_diag; const struct ce_attr *attr; - struct hif_ce_pipe_info *pipe_info; + struct ath10k_pci_pipe *pipe_info; struct ath10k_pci_compl *compl; int i, pipe_num, completions, disable_interrupts; @@ -792,7 +846,6 @@ static int ath10k_pci_start_ce(struct ath10k *ar) ath10k_pci_ce_send_done, disable_interrupts); completions += attr->src_nentries; - pipe_info->num_sends_allowed = attr->src_nentries - 1; } if (attr->dest_nentries) { @@ -805,15 +858,14 @@ static int ath10k_pci_start_ce(struct ath10k *ar) continue; for (i = 0; i < completions; i++) { - compl = kmalloc(sizeof(struct ath10k_pci_compl), - GFP_KERNEL); + compl = kmalloc(sizeof(*compl), GFP_KERNEL); if (!compl) { ath10k_warn("No memory for completion state\n"); ath10k_pci_stop_ce(ar); return -ENOMEM; } - compl->send_or_recv = HIF_CE_COMPLETE_FREE; + compl->state = ATH10K_PCI_COMPL_FREE; list_add_tail(&compl->list, &pipe_info->compl_free); } } @@ -840,7 +892,7 @@ static void ath10k_pci_stop_ce(struct ath10k *ar) * their associated resources */ spin_lock_bh(&ar_pci->compl_lock); list_for_each_entry(compl, &ar_pci->compl_process, list) { - skb = (struct sk_buff *)compl->transfer_context; + skb = compl->skb; ATH10K_SKB_CB(skb)->is_aborted = true; } spin_unlock_bh(&ar_pci->compl_lock); @@ -850,7 +902,7 @@ static void ath10k_pci_cleanup_ce(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct ath10k_pci_compl *compl, *tmp; - struct hif_ce_pipe_info *pipe_info; + struct ath10k_pci_pipe *pipe_info; struct sk_buff *netbuf; int pipe_num; @@ -861,7 +913,7 @@ static void ath10k_pci_cleanup_ce(struct ath10k *ar) list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) { list_del(&compl->list); - netbuf = (struct sk_buff *)compl->transfer_context; + netbuf = compl->skb; dev_kfree_skb_any(netbuf); kfree(compl); } @@ -912,12 +964,14 @@ static void ath10k_pci_process_ce(struct ath10k *ar) list_del(&compl->list); spin_unlock_bh(&ar_pci->compl_lock); - if (compl->send_or_recv == HIF_CE_COMPLETE_SEND) { + switch (compl->state) { + case ATH10K_PCI_COMPL_SEND: cb->tx_completion(ar, - compl->transfer_context, + compl->skb, compl->transfer_id); send_done = 1; - } else { + break; + case ATH10K_PCI_COMPL_RECV: ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1); if (ret) { ath10k_warn("Unable to post recv buffer for pipe: %d\n", @@ -925,7 +979,7 @@ static void ath10k_pci_process_ce(struct ath10k *ar) break; } - skb = (struct sk_buff *)compl->transfer_context; + skb = compl->skb; nbytes = compl->nbytes; ath10k_dbg(ATH10K_DBG_PCI, @@ -944,16 +998,23 @@ static void ath10k_pci_process_ce(struct ath10k *ar) nbytes, skb->len + skb_tailroom(skb)); } + break; + case ATH10K_PCI_COMPL_FREE: + ath10k_warn("free completion cannot be processed\n"); + break; + default: + ath10k_warn("invalid completion state (%d)\n", + compl->state); + break; } - compl->send_or_recv = HIF_CE_COMPLETE_FREE; + compl->state = ATH10K_PCI_COMPL_FREE; /* * Add completion back to the pipe's free list. */ spin_lock_bh(&compl->pipe_info->pipe_lock); list_add_tail(&compl->list, &compl->pipe_info->compl_free); - compl->pipe_info->num_sends_allowed += send_done; spin_unlock_bh(&compl->pipe_info->pipe_lock); } @@ -1037,12 +1098,12 @@ static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar, &dl_is_polled); } -static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info, +static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info, int num) { struct ath10k *ar = pipe_info->hif_ce_state; struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct ce_state *ce_state = pipe_info->ce_hdl; + struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl; struct sk_buff *skb; dma_addr_t ce_data; int i, ret = 0; @@ -1097,7 +1158,7 @@ err: static int ath10k_pci_post_rx(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct hif_ce_pipe_info *pipe_info; + struct ath10k_pci_pipe *pipe_info; const struct ce_attr *attr; int pipe_num, ret = 0; @@ -1147,11 +1208,11 @@ static int ath10k_pci_hif_start(struct ath10k *ar) return 0; } -static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info) +static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info) { struct ath10k *ar; struct ath10k_pci *ar_pci; - struct ce_state *ce_hdl; + struct ath10k_ce_pipe *ce_hdl; u32 buf_sz; struct sk_buff *netbuf; u32 ce_data; @@ -1179,11 +1240,11 @@ static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info) } } -static void ath10k_pci_tx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info) +static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info) { struct ath10k *ar; struct ath10k_pci *ar_pci; - struct ce_state *ce_hdl; + struct ath10k_ce_pipe *ce_hdl; struct sk_buff *netbuf; u32 ce_data; unsigned int nbytes; @@ -1206,15 +1267,14 @@ static void ath10k_pci_tx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info) while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf, &ce_data, &nbytes, &id) == 0) { - if (netbuf != CE_SENDLIST_ITEM_CTXT) - /* - * Indicate the completion to higer layer to free - * the buffer - */ - ATH10K_SKB_CB(netbuf)->is_aborted = true; - ar_pci->msg_callbacks_current.tx_completion(ar, - netbuf, - id); + /* + * Indicate the completion to higer layer to free + * the buffer + */ + ATH10K_SKB_CB(netbuf)->is_aborted = true; + ar_pci->msg_callbacks_current.tx_completion(ar, + netbuf, + id); } } @@ -1232,7 +1292,7 @@ static void ath10k_pci_buffer_cleanup(struct ath10k *ar) int pipe_num; for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) { - struct hif_ce_pipe_info *pipe_info; + struct ath10k_pci_pipe *pipe_info; pipe_info = &ar_pci->pipe_info[pipe_num]; ath10k_pci_rx_pipe_cleanup(pipe_info); @@ -1243,7 +1303,7 @@ static void ath10k_pci_buffer_cleanup(struct ath10k *ar) static void ath10k_pci_ce_deinit(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct hif_ce_pipe_info *pipe_info; + struct ath10k_pci_pipe *pipe_info; int pipe_num; for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) { @@ -1293,8 +1353,10 @@ static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar, void *resp, u32 *resp_len) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct ce_state *ce_tx = ar_pci->pipe_info[BMI_CE_NUM_TO_TARG].ce_hdl; - struct ce_state *ce_rx = ar_pci->pipe_info[BMI_CE_NUM_TO_HOST].ce_hdl; + struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG]; + struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST]; + struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl; + struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl; dma_addr_t req_paddr = 0; dma_addr_t resp_paddr = 0; struct bmi_xfer xfer = {}; @@ -1378,13 +1440,16 @@ err_dma: return ret; } -static void ath10k_pci_bmi_send_done(struct ce_state *ce_state, - void *transfer_context, - u32 data, - unsigned int nbytes, - unsigned int transfer_id) +static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state) { - struct bmi_xfer *xfer = transfer_context; + struct bmi_xfer *xfer; + u32 ce_data; + unsigned int nbytes; + unsigned int transfer_id; + + if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data, + &nbytes, &transfer_id)) + return; if (xfer->wait_for_resp) return; @@ -1392,14 +1457,17 @@ static void ath10k_pci_bmi_send_done(struct ce_state *ce_state, complete(&xfer->done); } -static void ath10k_pci_bmi_recv_data(struct ce_state *ce_state, - void *transfer_context, - u32 data, - unsigned int nbytes, - unsigned int transfer_id, - unsigned int flags) +static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state) { - struct bmi_xfer *xfer = transfer_context; + struct bmi_xfer *xfer; + u32 ce_data; + unsigned int nbytes; + unsigned int transfer_id; + unsigned int flags; + + if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data, + &nbytes, &transfer_id, &flags)) + return; if (!xfer->wait_for_resp) { ath10k_warn("unexpected: BMI data received; ignoring\n"); @@ -1679,7 +1747,7 @@ static int ath10k_pci_init_config(struct ath10k *ar) static int ath10k_pci_ce_init(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct hif_ce_pipe_info *pipe_info; + struct ath10k_pci_pipe *pipe_info; const struct ce_attr *attr; int pipe_num; @@ -1895,7 +1963,7 @@ static const struct ath10k_hif_ops ath10k_pci_hif_ops = { static void ath10k_pci_ce_tasklet(unsigned long ptr) { - struct hif_ce_pipe_info *pipe = (struct hif_ce_pipe_info *)ptr; + struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr; struct ath10k_pci *ar_pci = pipe->ar_pci; ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num); @@ -2212,18 +2280,13 @@ static int ath10k_pci_reset_target(struct ath10k *ar) static void ath10k_pci_device_reset(struct ath10k *ar) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - void __iomem *mem = ar_pci->mem; int i; u32 val; if (!SOC_GLOBAL_RESET_ADDRESS) return; - if (!mem) - return; - - ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS, + ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK); for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) { if (ath10k_pci_target_is_awake(ar)) @@ -2232,12 +2295,12 @@ static void ath10k_pci_device_reset(struct ath10k *ar) } /* Put Target, including PCIe, into RESET. */ - val = ath10k_pci_reg_read32(mem, SOC_GLOBAL_RESET_ADDRESS); + val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS); val |= 1; - ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val); + ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val); for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) { - if (ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) & + if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) & RTC_STATE_COLD_RESET_MASK) break; msleep(1); @@ -2245,16 +2308,16 @@ static void ath10k_pci_device_reset(struct ath10k *ar) /* Pull Target, including PCIe, out of RESET. */ val &= ~1; - ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val); + ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val); for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) { - if (!(ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) & + if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) & RTC_STATE_COLD_RESET_MASK)) break; msleep(1); } - ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET); + ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET); } static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci) @@ -2267,13 +2330,10 @@ static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci) switch (i) { case ATH10K_PCI_FEATURE_MSI_X: - ath10k_dbg(ATH10K_DBG_PCI, "device supports MSI-X\n"); - break; - case ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND: - ath10k_dbg(ATH10K_DBG_PCI, "QCA988X_1.0 workaround enabled\n"); + ath10k_dbg(ATH10K_DBG_BOOT, "device supports MSI-X\n"); break; case ATH10K_PCI_FEATURE_SOC_POWER_SAVE: - ath10k_dbg(ATH10K_DBG_PCI, "QCA98XX SoC power save enabled\n"); + ath10k_dbg(ATH10K_DBG_BOOT, "QCA98XX SoC power save enabled\n"); break; } } @@ -2286,7 +2346,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev, int ret = 0; struct ath10k *ar; struct ath10k_pci *ar_pci; - u32 lcr_val; + u32 lcr_val, chip_id; ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__); @@ -2298,9 +2358,6 @@ static int ath10k_pci_probe(struct pci_dev *pdev, ar_pci->dev = &pdev->dev; switch (pci_dev->device) { - case QCA988X_1_0_DEVICE_ID: - set_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features); - break; case QCA988X_2_0_DEVICE_ID: set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features); break; @@ -2322,10 +2379,6 @@ static int ath10k_pci_probe(struct pci_dev *pdev, goto err_ar_pci; } - /* Enable QCA988X_1.0 HW workarounds */ - if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features)) - spin_lock_init(&ar_pci->hw_v1_workaround_lock); - ar_pci->ar = ar; ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS; atomic_set(&ar_pci->keep_awake_count, 0); @@ -2395,9 +2448,20 @@ static int ath10k_pci_probe(struct pci_dev *pdev, spin_lock_init(&ar_pci->ce_lock); - ar_pci->cacheline_sz = dma_get_cache_alignment(); + ret = ath10k_do_pci_wake(ar); + if (ret) { + ath10k_err("Failed to get chip id: %d\n", ret); + return ret; + } + + chip_id = ath10k_pci_read32(ar, + RTC_SOC_BASE_ADDRESS + SOC_CHIP_ID_ADDRESS); + + ath10k_do_pci_sleep(ar); + + ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem); - ret = ath10k_core_register(ar); + ret = ath10k_core_register(ar, chip_id); if (ret) { ath10k_err("could not register driver core (%d)\n", ret); goto err_iomap; @@ -2414,7 +2478,6 @@ err_region: err_device: pci_disable_device(pdev); err_ar: - pci_set_drvdata(pdev, NULL); ath10k_core_destroy(ar); err_ar_pci: /* call HIF PCI free here */ @@ -2442,7 +2505,6 @@ static void ath10k_pci_remove(struct pci_dev *pdev) ath10k_core_unregister(ar); - pci_set_drvdata(pdev, NULL); pci_iounmap(pdev, ar_pci->mem); pci_release_region(pdev, BAR_NUM); pci_clear_master(pdev); @@ -2483,9 +2545,6 @@ module_exit(ath10k_pci_exit); MODULE_AUTHOR("Qualcomm Atheros"); MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices"); MODULE_LICENSE("Dual BSD/GPL"); -MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_FW_FILE); -MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_OTP_FILE); -MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_BOARD_DATA_FILE); MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE); MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE); MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE); diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h index 871bb339d56d..52fb7b973571 100644 --- a/drivers/net/wireless/ath/ath10k/pci.h +++ b/drivers/net/wireless/ath/ath10k/pci.h @@ -43,22 +43,23 @@ struct bmi_xfer { u32 resp_len; }; +enum ath10k_pci_compl_state { + ATH10K_PCI_COMPL_FREE = 0, + ATH10K_PCI_COMPL_SEND, + ATH10K_PCI_COMPL_RECV, +}; + struct ath10k_pci_compl { struct list_head list; - int send_or_recv; - struct ce_state *ce_state; - struct hif_ce_pipe_info *pipe_info; - void *transfer_context; + enum ath10k_pci_compl_state state; + struct ath10k_ce_pipe *ce_state; + struct ath10k_pci_pipe *pipe_info; + struct sk_buff *skb; unsigned int nbytes; unsigned int transfer_id; unsigned int flags; }; -/* compl_state.send_or_recv */ -#define HIF_CE_COMPLETE_FREE 0 -#define HIF_CE_COMPLETE_SEND 1 -#define HIF_CE_COMPLETE_RECV 2 - /* * PCI-specific Target state * @@ -152,17 +153,16 @@ struct service_to_pipe { enum ath10k_pci_features { ATH10K_PCI_FEATURE_MSI_X = 0, - ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND = 1, - ATH10K_PCI_FEATURE_SOC_POWER_SAVE = 2, + ATH10K_PCI_FEATURE_SOC_POWER_SAVE = 1, /* keep last */ ATH10K_PCI_FEATURE_COUNT }; /* Per-pipe state. */ -struct hif_ce_pipe_info { +struct ath10k_pci_pipe { /* Handle of underlying Copy Engine */ - struct ce_state *ce_hdl; + struct ath10k_ce_pipe *ce_hdl; /* Our pipe number; facilitiates use of pipe_info ptrs. */ u8 pipe_num; @@ -178,9 +178,6 @@ struct hif_ce_pipe_info { /* List of free CE completion slots */ struct list_head compl_free; - /* Limit the number of outstanding send requests. */ - int num_sends_allowed; - struct ath10k_pci *ar_pci; struct tasklet_struct intr; }; @@ -190,7 +187,6 @@ struct ath10k_pci { struct device *dev; struct ath10k *ar; void __iomem *mem; - int cacheline_sz; DECLARE_BITMAP(features, ATH10K_PCI_FEATURE_COUNT); @@ -219,7 +215,7 @@ struct ath10k_pci { bool compl_processing; - struct hif_ce_pipe_info pipe_info[CE_COUNT_MAX]; + struct ath10k_pci_pipe pipe_info[CE_COUNT_MAX]; struct ath10k_hif_cb msg_callbacks_current; @@ -227,16 +223,13 @@ struct ath10k_pci { u32 fw_indicator_address; /* Copy Engine used for Diagnostic Accesses */ - struct ce_state *ce_diag; + struct ath10k_ce_pipe *ce_diag; /* FIXME: document what this really protects */ spinlock_t ce_lock; /* Map CE id to ce_state */ - struct ce_state *ce_id_to_state[CE_COUNT_MAX]; - - /* makes sure that dummy reads are atomic */ - spinlock_t hw_v1_workaround_lock; + struct ath10k_ce_pipe ce_states[CE_COUNT_MAX]; }; static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar) @@ -244,14 +237,18 @@ static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar) return ar->hif.priv; } -static inline u32 ath10k_pci_reg_read32(void __iomem *mem, u32 addr) +static inline u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr) { - return ioread32(mem + PCIE_LOCAL_BASE_ADDRESS + addr); + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + + return ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr); } -static inline void ath10k_pci_reg_write32(void __iomem *mem, u32 addr, u32 val) +static inline void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val) { - iowrite32(val, mem + PCIE_LOCAL_BASE_ADDRESS + addr); + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + + iowrite32(val, ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr); } #define ATH_PCI_RESET_WAIT_MAX 10 /* ms */ @@ -310,23 +307,8 @@ static inline void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - void __iomem *addr = ar_pci->mem; - - if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features)) { - unsigned long irq_flags; - spin_lock_irqsave(&ar_pci->hw_v1_workaround_lock, irq_flags); - - ioread32(addr+offset+4); /* 3rd read prior to write */ - ioread32(addr+offset+4); /* 2nd read prior to write */ - ioread32(addr+offset+4); /* 1st read prior to write */ - iowrite32(value, addr+offset); - - spin_unlock_irqrestore(&ar_pci->hw_v1_workaround_lock, - irq_flags); - } else { - iowrite32(value, addr+offset); - } + iowrite32(value, ar_pci->mem + offset); } static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset) @@ -336,15 +318,17 @@ static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset) return ioread32(ar_pci->mem + offset); } -void ath10k_do_pci_wake(struct ath10k *ar); +int ath10k_do_pci_wake(struct ath10k *ar); void ath10k_do_pci_sleep(struct ath10k *ar); -static inline void ath10k_pci_wake(struct ath10k *ar) +static inline int ath10k_pci_wake(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); if (test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features)) - ath10k_do_pci_wake(ar); + return ath10k_do_pci_wake(ar); + + return 0; } static inline void ath10k_pci_sleep(struct ath10k *ar) diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h index bfec6c8f2ecb..1c584c4b019c 100644 --- a/drivers/net/wireless/ath/ath10k/rx_desc.h +++ b/drivers/net/wireless/ath/ath10k/rx_desc.h @@ -422,10 +422,30 @@ struct rx_mpdu_end { #define RX_MSDU_START_INFO1_IP_FRAG (1 << 14) #define RX_MSDU_START_INFO1_TCP_ONLY_ACK (1 << 15) +/* The decapped header (rx_hdr_status) contains the following: + * a) 802.11 header + * [padding to 4 bytes] + * b) HW crypto parameter + * - 0 bytes for no security + * - 4 bytes for WEP + * - 8 bytes for TKIP, AES + * [padding to 4 bytes] + * c) A-MSDU subframe header (14 bytes) if appliable + * d) LLC/SNAP (RFC1042, 8 bytes) + * + * In case of A-MSDU only first frame in sequence contains (a) and (b). */ enum rx_msdu_decap_format { - RX_MSDU_DECAP_RAW = 0, - RX_MSDU_DECAP_NATIVE_WIFI = 1, + RX_MSDU_DECAP_RAW = 0, + + /* Note: QoS frames are reported as non-QoS. The rx_hdr_status in + * htt_rx_desc contains the original decapped 802.11 header. */ + RX_MSDU_DECAP_NATIVE_WIFI = 1, + + /* Payload contains an ethernet header (struct ethhdr). */ RX_MSDU_DECAP_ETHERNET2_DIX = 2, + + /* Payload contains two 48-bit addresses and 2-byte length (14 bytes + * total), followed by an RFC1042 header (8 bytes). */ RX_MSDU_DECAP_8023_SNAP_LLC = 3 }; diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h index 85e806bf7257..90817ddc92ba 100644 --- a/drivers/net/wireless/ath/ath10k/trace.h +++ b/drivers/net/wireless/ath/ath10k/trace.h @@ -111,26 +111,29 @@ TRACE_EVENT(ath10k_log_dbg_dump, ); TRACE_EVENT(ath10k_wmi_cmd, - TP_PROTO(int id, void *buf, size_t buf_len), + TP_PROTO(int id, void *buf, size_t buf_len, int ret), - TP_ARGS(id, buf, buf_len), + TP_ARGS(id, buf, buf_len, ret), TP_STRUCT__entry( __field(unsigned int, id) __field(size_t, buf_len) __dynamic_array(u8, buf, buf_len) + __field(int, ret) ), TP_fast_assign( __entry->id = id; __entry->buf_len = buf_len; + __entry->ret = ret; memcpy(__get_dynamic_array(buf), buf, buf_len); ), TP_printk( - "id %d len %zu", + "id %d len %zu ret %d", __entry->id, - __entry->buf_len + __entry->buf_len, + __entry->ret ) ); @@ -158,6 +161,27 @@ TRACE_EVENT(ath10k_wmi_event, ) ); +TRACE_EVENT(ath10k_htt_stats, + TP_PROTO(void *buf, size_t buf_len), + + TP_ARGS(buf, buf_len), + + TP_STRUCT__entry( + __field(size_t, buf_len) + __dynamic_array(u8, buf, buf_len) + ), + + TP_fast_assign( + __entry->buf_len = buf_len; + memcpy(__get_dynamic_array(buf), buf, buf_len); + ), + + TP_printk( + "len %zu", + __entry->buf_len + ) +); + #endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/ /* we don't want to use include/trace/events */ diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c index 68b6faefd1d8..5ae373a1e294 100644 --- a/drivers/net/wireless/ath/ath10k/txrx.c +++ b/drivers/net/wireless/ath/ath10k/txrx.c @@ -44,40 +44,39 @@ out: spin_unlock_bh(&ar->data_lock); } -void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc) +void ath10k_txrx_tx_unref(struct ath10k_htt *htt, + const struct htt_tx_done *tx_done) { struct device *dev = htt->ar->dev; struct ieee80211_tx_info *info; - struct sk_buff *txfrag = ATH10K_SKB_CB(txdesc)->htt.txfrag; - struct sk_buff *msdu = ATH10K_SKB_CB(txdesc)->htt.msdu; + struct ath10k_skb_cb *skb_cb; + struct sk_buff *msdu; int ret; - if (ATH10K_SKB_CB(txdesc)->htt.refcount == 0) - return; - - ATH10K_SKB_CB(txdesc)->htt.refcount--; + ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n", + tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack); - if (ATH10K_SKB_CB(txdesc)->htt.refcount > 0) + if (tx_done->msdu_id >= htt->max_num_pending_tx) { + ath10k_warn("warning: msdu_id %d too big, ignoring\n", + tx_done->msdu_id); return; - - if (txfrag) { - ret = ath10k_skb_unmap(dev, txfrag); - if (ret) - ath10k_warn("txfrag unmap failed (%d)\n", ret); - - dev_kfree_skb_any(txfrag); } + msdu = htt->pending_tx[tx_done->msdu_id]; + skb_cb = ATH10K_SKB_CB(msdu); + ret = ath10k_skb_unmap(dev, msdu); if (ret) ath10k_warn("data skb unmap failed (%d)\n", ret); + if (skb_cb->htt.frag_len) + skb_pull(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len); + ath10k_report_offchan_tx(htt->ar, msdu); info = IEEE80211_SKB_CB(msdu); - memset(&info->status, 0, sizeof(info->status)); - if (ATH10K_SKB_CB(txdesc)->htt.discard) { + if (tx_done->discard) { ieee80211_free_txskb(htt->ar->hw, msdu); goto exit; } @@ -85,7 +84,7 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc) if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) info->flags |= IEEE80211_TX_STAT_ACK; - if (ATH10K_SKB_CB(txdesc)->htt.no_ack) + if (tx_done->no_ack) info->flags &= ~IEEE80211_TX_STAT_ACK; ieee80211_tx_status(htt->ar->hw, msdu); @@ -93,36 +92,12 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc) exit: spin_lock_bh(&htt->tx_lock); - htt->pending_tx[ATH10K_SKB_CB(txdesc)->htt.msdu_id] = NULL; - ath10k_htt_tx_free_msdu_id(htt, ATH10K_SKB_CB(txdesc)->htt.msdu_id); + htt->pending_tx[tx_done->msdu_id] = NULL; + ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id); __ath10k_htt_tx_dec_pending(htt); - if (bitmap_empty(htt->used_msdu_ids, htt->max_num_pending_tx)) + if (htt->num_pending_tx == 0) wake_up(&htt->empty_tx_wq); spin_unlock_bh(&htt->tx_lock); - - dev_kfree_skb_any(txdesc); -} - -void ath10k_txrx_tx_completed(struct ath10k_htt *htt, - const struct htt_tx_done *tx_done) -{ - struct sk_buff *txdesc; - - ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n", - tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack); - - if (tx_done->msdu_id >= htt->max_num_pending_tx) { - ath10k_warn("warning: msdu_id %d too big, ignoring\n", - tx_done->msdu_id); - return; - } - - txdesc = htt->pending_tx[tx_done->msdu_id]; - - ATH10K_SKB_CB(txdesc)->htt.discard = tx_done->discard; - ATH10K_SKB_CB(txdesc)->htt.no_ack = tx_done->no_ack; - - ath10k_txrx_tx_unref(htt, txdesc); } static const u8 rx_legacy_rate_idx[] = { @@ -293,6 +268,8 @@ void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info) status->vht_nss, status->freq, status->band); + ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ", + info->skb->data, info->skb->len); ieee80211_rx(ar->hw, info->skb); } diff --git a/drivers/net/wireless/ath/ath10k/txrx.h b/drivers/net/wireless/ath/ath10k/txrx.h index e78632a76df7..356dc9c04c9e 100644 --- a/drivers/net/wireless/ath/ath10k/txrx.h +++ b/drivers/net/wireless/ath/ath10k/txrx.h @@ -19,9 +19,8 @@ #include "htt.h" -void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc); -void ath10k_txrx_tx_completed(struct ath10k_htt *htt, - const struct htt_tx_done *tx_done); +void ath10k_txrx_tx_unref(struct ath10k_htt *htt, + const struct htt_tx_done *tx_done); void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info); struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id, diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 55f90c761868..ccf3597fd9e2 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -23,29 +23,470 @@ #include "wmi.h" #include "mac.h" -void ath10k_wmi_flush_tx(struct ath10k *ar) -{ - int ret; - - lockdep_assert_held(&ar->conf_mutex); - - if (ar->state == ATH10K_STATE_WEDGED) { - ath10k_warn("wmi flush skipped - device is wedged anyway\n"); - return; - } - - ret = wait_event_timeout(ar->wmi.wq, - atomic_read(&ar->wmi.pending_tx_count) == 0, - 5*HZ); - if (atomic_read(&ar->wmi.pending_tx_count) == 0) - return; - - if (ret == 0) - ret = -ETIMEDOUT; - - if (ret < 0) - ath10k_warn("wmi flush failed (%d)\n", ret); -} +/* MAIN WMI cmd track */ +static struct wmi_cmd_map wmi_cmd_map = { + .init_cmdid = WMI_INIT_CMDID, + .start_scan_cmdid = WMI_START_SCAN_CMDID, + .stop_scan_cmdid = WMI_STOP_SCAN_CMDID, + .scan_chan_list_cmdid = WMI_SCAN_CHAN_LIST_CMDID, + .scan_sch_prio_tbl_cmdid = WMI_SCAN_SCH_PRIO_TBL_CMDID, + .pdev_set_regdomain_cmdid = WMI_PDEV_SET_REGDOMAIN_CMDID, + .pdev_set_channel_cmdid = WMI_PDEV_SET_CHANNEL_CMDID, + .pdev_set_param_cmdid = WMI_PDEV_SET_PARAM_CMDID, + .pdev_pktlog_enable_cmdid = WMI_PDEV_PKTLOG_ENABLE_CMDID, + .pdev_pktlog_disable_cmdid = WMI_PDEV_PKTLOG_DISABLE_CMDID, + .pdev_set_wmm_params_cmdid = WMI_PDEV_SET_WMM_PARAMS_CMDID, + .pdev_set_ht_cap_ie_cmdid = WMI_PDEV_SET_HT_CAP_IE_CMDID, + .pdev_set_vht_cap_ie_cmdid = WMI_PDEV_SET_VHT_CAP_IE_CMDID, + .pdev_set_dscp_tid_map_cmdid = WMI_PDEV_SET_DSCP_TID_MAP_CMDID, + .pdev_set_quiet_mode_cmdid = WMI_PDEV_SET_QUIET_MODE_CMDID, + .pdev_green_ap_ps_enable_cmdid = WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID, + .pdev_get_tpc_config_cmdid = WMI_PDEV_GET_TPC_CONFIG_CMDID, + .pdev_set_base_macaddr_cmdid = WMI_PDEV_SET_BASE_MACADDR_CMDID, + .vdev_create_cmdid = WMI_VDEV_CREATE_CMDID, + .vdev_delete_cmdid = WMI_VDEV_DELETE_CMDID, + .vdev_start_request_cmdid = WMI_VDEV_START_REQUEST_CMDID, + .vdev_restart_request_cmdid = WMI_VDEV_RESTART_REQUEST_CMDID, + .vdev_up_cmdid = WMI_VDEV_UP_CMDID, + .vdev_stop_cmdid = WMI_VDEV_STOP_CMDID, + .vdev_down_cmdid = WMI_VDEV_DOWN_CMDID, + .vdev_set_param_cmdid = WMI_VDEV_SET_PARAM_CMDID, + .vdev_install_key_cmdid = WMI_VDEV_INSTALL_KEY_CMDID, + .peer_create_cmdid = WMI_PEER_CREATE_CMDID, + .peer_delete_cmdid = WMI_PEER_DELETE_CMDID, + .peer_flush_tids_cmdid = WMI_PEER_FLUSH_TIDS_CMDID, + .peer_set_param_cmdid = WMI_PEER_SET_PARAM_CMDID, + .peer_assoc_cmdid = WMI_PEER_ASSOC_CMDID, + .peer_add_wds_entry_cmdid = WMI_PEER_ADD_WDS_ENTRY_CMDID, + .peer_remove_wds_entry_cmdid = WMI_PEER_REMOVE_WDS_ENTRY_CMDID, + .peer_mcast_group_cmdid = WMI_PEER_MCAST_GROUP_CMDID, + .bcn_tx_cmdid = WMI_BCN_TX_CMDID, + .pdev_send_bcn_cmdid = WMI_PDEV_SEND_BCN_CMDID, + .bcn_tmpl_cmdid = WMI_BCN_TMPL_CMDID, + .bcn_filter_rx_cmdid = WMI_BCN_FILTER_RX_CMDID, + .prb_req_filter_rx_cmdid = WMI_PRB_REQ_FILTER_RX_CMDID, + .mgmt_tx_cmdid = WMI_MGMT_TX_CMDID, + .prb_tmpl_cmdid = WMI_PRB_TMPL_CMDID, + .addba_clear_resp_cmdid = WMI_ADDBA_CLEAR_RESP_CMDID, + .addba_send_cmdid = WMI_ADDBA_SEND_CMDID, + .addba_status_cmdid = WMI_ADDBA_STATUS_CMDID, + .delba_send_cmdid = WMI_DELBA_SEND_CMDID, + .addba_set_resp_cmdid = WMI_ADDBA_SET_RESP_CMDID, + .send_singleamsdu_cmdid = WMI_SEND_SINGLEAMSDU_CMDID, + .sta_powersave_mode_cmdid = WMI_STA_POWERSAVE_MODE_CMDID, + .sta_powersave_param_cmdid = WMI_STA_POWERSAVE_PARAM_CMDID, + .sta_mimo_ps_mode_cmdid = WMI_STA_MIMO_PS_MODE_CMDID, + .pdev_dfs_enable_cmdid = WMI_PDEV_DFS_ENABLE_CMDID, + .pdev_dfs_disable_cmdid = WMI_PDEV_DFS_DISABLE_CMDID, + .roam_scan_mode = WMI_ROAM_SCAN_MODE, + .roam_scan_rssi_threshold = WMI_ROAM_SCAN_RSSI_THRESHOLD, + .roam_scan_period = WMI_ROAM_SCAN_PERIOD, + .roam_scan_rssi_change_threshold = WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD, + .roam_ap_profile = WMI_ROAM_AP_PROFILE, + .ofl_scan_add_ap_profile = WMI_ROAM_AP_PROFILE, + .ofl_scan_remove_ap_profile = WMI_OFL_SCAN_REMOVE_AP_PROFILE, + .ofl_scan_period = WMI_OFL_SCAN_PERIOD, + .p2p_dev_set_device_info = WMI_P2P_DEV_SET_DEVICE_INFO, + .p2p_dev_set_discoverability = WMI_P2P_DEV_SET_DISCOVERABILITY, + .p2p_go_set_beacon_ie = WMI_P2P_GO_SET_BEACON_IE, + .p2p_go_set_probe_resp_ie = WMI_P2P_GO_SET_PROBE_RESP_IE, + .p2p_set_vendor_ie_data_cmdid = WMI_P2P_SET_VENDOR_IE_DATA_CMDID, + .ap_ps_peer_param_cmdid = WMI_AP_PS_PEER_PARAM_CMDID, + .ap_ps_peer_uapsd_coex_cmdid = WMI_AP_PS_PEER_UAPSD_COEX_CMDID, + .peer_rate_retry_sched_cmdid = WMI_PEER_RATE_RETRY_SCHED_CMDID, + .wlan_profile_trigger_cmdid = WMI_WLAN_PROFILE_TRIGGER_CMDID, + .wlan_profile_set_hist_intvl_cmdid = + WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID, + .wlan_profile_get_profile_data_cmdid = + WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID, + .wlan_profile_enable_profile_id_cmdid = + WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID, + .wlan_profile_list_profile_id_cmdid = + WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID, + .pdev_suspend_cmdid = WMI_PDEV_SUSPEND_CMDID, + .pdev_resume_cmdid = WMI_PDEV_RESUME_CMDID, + .add_bcn_filter_cmdid = WMI_ADD_BCN_FILTER_CMDID, + .rmv_bcn_filter_cmdid = WMI_RMV_BCN_FILTER_CMDID, + .wow_add_wake_pattern_cmdid = WMI_WOW_ADD_WAKE_PATTERN_CMDID, + .wow_del_wake_pattern_cmdid = WMI_WOW_DEL_WAKE_PATTERN_CMDID, + .wow_enable_disable_wake_event_cmdid = + WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID, + .wow_enable_cmdid = WMI_WOW_ENABLE_CMDID, + .wow_hostwakeup_from_sleep_cmdid = WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID, + .rtt_measreq_cmdid = WMI_RTT_MEASREQ_CMDID, + .rtt_tsf_cmdid = WMI_RTT_TSF_CMDID, + .vdev_spectral_scan_configure_cmdid = + WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID, + .vdev_spectral_scan_enable_cmdid = WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID, + .request_stats_cmdid = WMI_REQUEST_STATS_CMDID, + .set_arp_ns_offload_cmdid = WMI_SET_ARP_NS_OFFLOAD_CMDID, + .network_list_offload_config_cmdid = + WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID, + .gtk_offload_cmdid = WMI_GTK_OFFLOAD_CMDID, + .csa_offload_enable_cmdid = WMI_CSA_OFFLOAD_ENABLE_CMDID, + .csa_offload_chanswitch_cmdid = WMI_CSA_OFFLOAD_CHANSWITCH_CMDID, + .chatter_set_mode_cmdid = WMI_CHATTER_SET_MODE_CMDID, + .peer_tid_addba_cmdid = WMI_PEER_TID_ADDBA_CMDID, + .peer_tid_delba_cmdid = WMI_PEER_TID_DELBA_CMDID, + .sta_dtim_ps_method_cmdid = WMI_STA_DTIM_PS_METHOD_CMDID, + .sta_uapsd_auto_trig_cmdid = WMI_STA_UAPSD_AUTO_TRIG_CMDID, + .sta_keepalive_cmd = WMI_STA_KEEPALIVE_CMD, + .echo_cmdid = WMI_ECHO_CMDID, + .pdev_utf_cmdid = WMI_PDEV_UTF_CMDID, + .dbglog_cfg_cmdid = WMI_DBGLOG_CFG_CMDID, + .pdev_qvit_cmdid = WMI_PDEV_QVIT_CMDID, + .pdev_ftm_intg_cmdid = WMI_PDEV_FTM_INTG_CMDID, + .vdev_set_keepalive_cmdid = WMI_VDEV_SET_KEEPALIVE_CMDID, + .vdev_get_keepalive_cmdid = WMI_VDEV_GET_KEEPALIVE_CMDID, + .force_fw_hang_cmdid = WMI_FORCE_FW_HANG_CMDID, + .gpio_config_cmdid = WMI_GPIO_CONFIG_CMDID, + .gpio_output_cmdid = WMI_GPIO_OUTPUT_CMDID, +}; + +/* 10.X WMI cmd track */ +static struct wmi_cmd_map wmi_10x_cmd_map = { + .init_cmdid = WMI_10X_INIT_CMDID, + .start_scan_cmdid = WMI_10X_START_SCAN_CMDID, + .stop_scan_cmdid = WMI_10X_STOP_SCAN_CMDID, + .scan_chan_list_cmdid = WMI_10X_SCAN_CHAN_LIST_CMDID, + .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_set_regdomain_cmdid = WMI_10X_PDEV_SET_REGDOMAIN_CMDID, + .pdev_set_channel_cmdid = WMI_10X_PDEV_SET_CHANNEL_CMDID, + .pdev_set_param_cmdid = WMI_10X_PDEV_SET_PARAM_CMDID, + .pdev_pktlog_enable_cmdid = WMI_10X_PDEV_PKTLOG_ENABLE_CMDID, + .pdev_pktlog_disable_cmdid = WMI_10X_PDEV_PKTLOG_DISABLE_CMDID, + .pdev_set_wmm_params_cmdid = WMI_10X_PDEV_SET_WMM_PARAMS_CMDID, + .pdev_set_ht_cap_ie_cmdid = WMI_10X_PDEV_SET_HT_CAP_IE_CMDID, + .pdev_set_vht_cap_ie_cmdid = WMI_10X_PDEV_SET_VHT_CAP_IE_CMDID, + .pdev_set_dscp_tid_map_cmdid = WMI_10X_PDEV_SET_DSCP_TID_MAP_CMDID, + .pdev_set_quiet_mode_cmdid = WMI_10X_PDEV_SET_QUIET_MODE_CMDID, + .pdev_green_ap_ps_enable_cmdid = WMI_10X_PDEV_GREEN_AP_PS_ENABLE_CMDID, + .pdev_get_tpc_config_cmdid = WMI_10X_PDEV_GET_TPC_CONFIG_CMDID, + .pdev_set_base_macaddr_cmdid = WMI_10X_PDEV_SET_BASE_MACADDR_CMDID, + .vdev_create_cmdid = WMI_10X_VDEV_CREATE_CMDID, + .vdev_delete_cmdid = WMI_10X_VDEV_DELETE_CMDID, + .vdev_start_request_cmdid = WMI_10X_VDEV_START_REQUEST_CMDID, + .vdev_restart_request_cmdid = WMI_10X_VDEV_RESTART_REQUEST_CMDID, + .vdev_up_cmdid = WMI_10X_VDEV_UP_CMDID, + .vdev_stop_cmdid = WMI_10X_VDEV_STOP_CMDID, + .vdev_down_cmdid = WMI_10X_VDEV_DOWN_CMDID, + .vdev_set_param_cmdid = WMI_10X_VDEV_SET_PARAM_CMDID, + .vdev_install_key_cmdid = WMI_10X_VDEV_INSTALL_KEY_CMDID, + .peer_create_cmdid = WMI_10X_PEER_CREATE_CMDID, + .peer_delete_cmdid = WMI_10X_PEER_DELETE_CMDID, + .peer_flush_tids_cmdid = WMI_10X_PEER_FLUSH_TIDS_CMDID, + .peer_set_param_cmdid = WMI_10X_PEER_SET_PARAM_CMDID, + .peer_assoc_cmdid = WMI_10X_PEER_ASSOC_CMDID, + .peer_add_wds_entry_cmdid = WMI_10X_PEER_ADD_WDS_ENTRY_CMDID, + .peer_remove_wds_entry_cmdid = WMI_10X_PEER_REMOVE_WDS_ENTRY_CMDID, + .peer_mcast_group_cmdid = WMI_10X_PEER_MCAST_GROUP_CMDID, + .bcn_tx_cmdid = WMI_10X_BCN_TX_CMDID, + .pdev_send_bcn_cmdid = WMI_10X_PDEV_SEND_BCN_CMDID, + .bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED, + .bcn_filter_rx_cmdid = WMI_10X_BCN_FILTER_RX_CMDID, + .prb_req_filter_rx_cmdid = WMI_10X_PRB_REQ_FILTER_RX_CMDID, + .mgmt_tx_cmdid = WMI_10X_MGMT_TX_CMDID, + .prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED, + .addba_clear_resp_cmdid = WMI_10X_ADDBA_CLEAR_RESP_CMDID, + .addba_send_cmdid = WMI_10X_ADDBA_SEND_CMDID, + .addba_status_cmdid = WMI_10X_ADDBA_STATUS_CMDID, + .delba_send_cmdid = WMI_10X_DELBA_SEND_CMDID, + .addba_set_resp_cmdid = WMI_10X_ADDBA_SET_RESP_CMDID, + .send_singleamsdu_cmdid = WMI_10X_SEND_SINGLEAMSDU_CMDID, + .sta_powersave_mode_cmdid = WMI_10X_STA_POWERSAVE_MODE_CMDID, + .sta_powersave_param_cmdid = WMI_10X_STA_POWERSAVE_PARAM_CMDID, + .sta_mimo_ps_mode_cmdid = WMI_10X_STA_MIMO_PS_MODE_CMDID, + .pdev_dfs_enable_cmdid = WMI_10X_PDEV_DFS_ENABLE_CMDID, + .pdev_dfs_disable_cmdid = WMI_10X_PDEV_DFS_DISABLE_CMDID, + .roam_scan_mode = WMI_10X_ROAM_SCAN_MODE, + .roam_scan_rssi_threshold = WMI_10X_ROAM_SCAN_RSSI_THRESHOLD, + .roam_scan_period = WMI_10X_ROAM_SCAN_PERIOD, + .roam_scan_rssi_change_threshold = + WMI_10X_ROAM_SCAN_RSSI_CHANGE_THRESHOLD, + .roam_ap_profile = WMI_10X_ROAM_AP_PROFILE, + .ofl_scan_add_ap_profile = WMI_10X_OFL_SCAN_ADD_AP_PROFILE, + .ofl_scan_remove_ap_profile = WMI_10X_OFL_SCAN_REMOVE_AP_PROFILE, + .ofl_scan_period = WMI_10X_OFL_SCAN_PERIOD, + .p2p_dev_set_device_info = WMI_10X_P2P_DEV_SET_DEVICE_INFO, + .p2p_dev_set_discoverability = WMI_10X_P2P_DEV_SET_DISCOVERABILITY, + .p2p_go_set_beacon_ie = WMI_10X_P2P_GO_SET_BEACON_IE, + .p2p_go_set_probe_resp_ie = WMI_10X_P2P_GO_SET_PROBE_RESP_IE, + .p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED, + .ap_ps_peer_param_cmdid = WMI_CMD_UNSUPPORTED, + .ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED, + .peer_rate_retry_sched_cmdid = WMI_10X_PEER_RATE_RETRY_SCHED_CMDID, + .wlan_profile_trigger_cmdid = WMI_10X_WLAN_PROFILE_TRIGGER_CMDID, + .wlan_profile_set_hist_intvl_cmdid = + WMI_10X_WLAN_PROFILE_SET_HIST_INTVL_CMDID, + .wlan_profile_get_profile_data_cmdid = + WMI_10X_WLAN_PROFILE_GET_PROFILE_DATA_CMDID, + .wlan_profile_enable_profile_id_cmdid = + WMI_10X_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID, + .wlan_profile_list_profile_id_cmdid = + WMI_10X_WLAN_PROFILE_LIST_PROFILE_ID_CMDID, + .pdev_suspend_cmdid = WMI_10X_PDEV_SUSPEND_CMDID, + .pdev_resume_cmdid = WMI_10X_PDEV_RESUME_CMDID, + .add_bcn_filter_cmdid = WMI_10X_ADD_BCN_FILTER_CMDID, + .rmv_bcn_filter_cmdid = WMI_10X_RMV_BCN_FILTER_CMDID, + .wow_add_wake_pattern_cmdid = WMI_10X_WOW_ADD_WAKE_PATTERN_CMDID, + .wow_del_wake_pattern_cmdid = WMI_10X_WOW_DEL_WAKE_PATTERN_CMDID, + .wow_enable_disable_wake_event_cmdid = + WMI_10X_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID, + .wow_enable_cmdid = WMI_10X_WOW_ENABLE_CMDID, + .wow_hostwakeup_from_sleep_cmdid = + WMI_10X_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID, + .rtt_measreq_cmdid = WMI_10X_RTT_MEASREQ_CMDID, + .rtt_tsf_cmdid = WMI_10X_RTT_TSF_CMDID, + .vdev_spectral_scan_configure_cmdid = + WMI_10X_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID, + .vdev_spectral_scan_enable_cmdid = + WMI_10X_VDEV_SPECTRAL_SCAN_ENABLE_CMDID, + .request_stats_cmdid = WMI_10X_REQUEST_STATS_CMDID, + .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED, + .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED, + .gtk_offload_cmdid = WMI_CMD_UNSUPPORTED, + .csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED, + .csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED, + .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED, + .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED, + .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED, + .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED, + .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED, + .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED, + .echo_cmdid = WMI_10X_ECHO_CMDID, + .pdev_utf_cmdid = WMI_10X_PDEV_UTF_CMDID, + .dbglog_cfg_cmdid = WMI_10X_DBGLOG_CFG_CMDID, + .pdev_qvit_cmdid = WMI_10X_PDEV_QVIT_CMDID, + .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED, + .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED, + .gpio_config_cmdid = WMI_10X_GPIO_CONFIG_CMDID, + .gpio_output_cmdid = WMI_10X_GPIO_OUTPUT_CMDID, +}; + +/* MAIN WMI VDEV param map */ +static struct wmi_vdev_param_map wmi_vdev_param_map = { + .rts_threshold = WMI_VDEV_PARAM_RTS_THRESHOLD, + .fragmentation_threshold = WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD, + .beacon_interval = WMI_VDEV_PARAM_BEACON_INTERVAL, + .listen_interval = WMI_VDEV_PARAM_LISTEN_INTERVAL, + .multicast_rate = WMI_VDEV_PARAM_MULTICAST_RATE, + .mgmt_tx_rate = WMI_VDEV_PARAM_MGMT_TX_RATE, + .slot_time = WMI_VDEV_PARAM_SLOT_TIME, + .preamble = WMI_VDEV_PARAM_PREAMBLE, + .swba_time = WMI_VDEV_PARAM_SWBA_TIME, + .wmi_vdev_stats_update_period = WMI_VDEV_STATS_UPDATE_PERIOD, + .wmi_vdev_pwrsave_ageout_time = WMI_VDEV_PWRSAVE_AGEOUT_TIME, + .wmi_vdev_host_swba_interval = WMI_VDEV_HOST_SWBA_INTERVAL, + .dtim_period = WMI_VDEV_PARAM_DTIM_PERIOD, + .wmi_vdev_oc_scheduler_air_time_limit = + WMI_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT, + .wds = WMI_VDEV_PARAM_WDS, + .atim_window = WMI_VDEV_PARAM_ATIM_WINDOW, + .bmiss_count_max = WMI_VDEV_PARAM_BMISS_COUNT_MAX, + .bmiss_first_bcnt = WMI_VDEV_PARAM_BMISS_FIRST_BCNT, + .bmiss_final_bcnt = WMI_VDEV_PARAM_BMISS_FINAL_BCNT, + .feature_wmm = WMI_VDEV_PARAM_FEATURE_WMM, + .chwidth = WMI_VDEV_PARAM_CHWIDTH, + .chextoffset = WMI_VDEV_PARAM_CHEXTOFFSET, + .disable_htprotection = WMI_VDEV_PARAM_DISABLE_HTPROTECTION, + .sta_quickkickout = WMI_VDEV_PARAM_STA_QUICKKICKOUT, + .mgmt_rate = WMI_VDEV_PARAM_MGMT_RATE, + .protection_mode = WMI_VDEV_PARAM_PROTECTION_MODE, + .fixed_rate = WMI_VDEV_PARAM_FIXED_RATE, + .sgi = WMI_VDEV_PARAM_SGI, + .ldpc = WMI_VDEV_PARAM_LDPC, + .tx_stbc = WMI_VDEV_PARAM_TX_STBC, + .rx_stbc = WMI_VDEV_PARAM_RX_STBC, + .intra_bss_fwd = WMI_VDEV_PARAM_INTRA_BSS_FWD, + .def_keyid = WMI_VDEV_PARAM_DEF_KEYID, + .nss = WMI_VDEV_PARAM_NSS, + .bcast_data_rate = WMI_VDEV_PARAM_BCAST_DATA_RATE, + .mcast_data_rate = WMI_VDEV_PARAM_MCAST_DATA_RATE, + .mcast_indicate = WMI_VDEV_PARAM_MCAST_INDICATE, + .dhcp_indicate = WMI_VDEV_PARAM_DHCP_INDICATE, + .unknown_dest_indicate = WMI_VDEV_PARAM_UNKNOWN_DEST_INDICATE, + .ap_keepalive_min_idle_inactive_time_secs = + WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS, + .ap_keepalive_max_idle_inactive_time_secs = + WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS, + .ap_keepalive_max_unresponsive_time_secs = + WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS, + .ap_enable_nawds = WMI_VDEV_PARAM_AP_ENABLE_NAWDS, + .mcast2ucast_set = WMI_VDEV_PARAM_UNSUPPORTED, + .enable_rtscts = WMI_VDEV_PARAM_ENABLE_RTSCTS, + .txbf = WMI_VDEV_PARAM_TXBF, + .packet_powersave = WMI_VDEV_PARAM_PACKET_POWERSAVE, + .drop_unencry = WMI_VDEV_PARAM_DROP_UNENCRY, + .tx_encap_type = WMI_VDEV_PARAM_TX_ENCAP_TYPE, + .ap_detect_out_of_sync_sleeping_sta_time_secs = + WMI_VDEV_PARAM_UNSUPPORTED, +}; + +/* 10.X WMI VDEV param map */ +static struct wmi_vdev_param_map wmi_10x_vdev_param_map = { + .rts_threshold = WMI_10X_VDEV_PARAM_RTS_THRESHOLD, + .fragmentation_threshold = WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD, + .beacon_interval = WMI_10X_VDEV_PARAM_BEACON_INTERVAL, + .listen_interval = WMI_10X_VDEV_PARAM_LISTEN_INTERVAL, + .multicast_rate = WMI_10X_VDEV_PARAM_MULTICAST_RATE, + .mgmt_tx_rate = WMI_10X_VDEV_PARAM_MGMT_TX_RATE, + .slot_time = WMI_10X_VDEV_PARAM_SLOT_TIME, + .preamble = WMI_10X_VDEV_PARAM_PREAMBLE, + .swba_time = WMI_10X_VDEV_PARAM_SWBA_TIME, + .wmi_vdev_stats_update_period = WMI_10X_VDEV_STATS_UPDATE_PERIOD, + .wmi_vdev_pwrsave_ageout_time = WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME, + .wmi_vdev_host_swba_interval = WMI_10X_VDEV_HOST_SWBA_INTERVAL, + .dtim_period = WMI_10X_VDEV_PARAM_DTIM_PERIOD, + .wmi_vdev_oc_scheduler_air_time_limit = + WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT, + .wds = WMI_10X_VDEV_PARAM_WDS, + .atim_window = WMI_10X_VDEV_PARAM_ATIM_WINDOW, + .bmiss_count_max = WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX, + .bmiss_first_bcnt = WMI_VDEV_PARAM_UNSUPPORTED, + .bmiss_final_bcnt = WMI_VDEV_PARAM_UNSUPPORTED, + .feature_wmm = WMI_10X_VDEV_PARAM_FEATURE_WMM, + .chwidth = WMI_10X_VDEV_PARAM_CHWIDTH, + .chextoffset = WMI_10X_VDEV_PARAM_CHEXTOFFSET, + .disable_htprotection = WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION, + .sta_quickkickout = WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT, + .mgmt_rate = WMI_10X_VDEV_PARAM_MGMT_RATE, + .protection_mode = WMI_10X_VDEV_PARAM_PROTECTION_MODE, + .fixed_rate = WMI_10X_VDEV_PARAM_FIXED_RATE, + .sgi = WMI_10X_VDEV_PARAM_SGI, + .ldpc = WMI_10X_VDEV_PARAM_LDPC, + .tx_stbc = WMI_10X_VDEV_PARAM_TX_STBC, + .rx_stbc = WMI_10X_VDEV_PARAM_RX_STBC, + .intra_bss_fwd = WMI_10X_VDEV_PARAM_INTRA_BSS_FWD, + .def_keyid = WMI_10X_VDEV_PARAM_DEF_KEYID, + .nss = WMI_10X_VDEV_PARAM_NSS, + .bcast_data_rate = WMI_10X_VDEV_PARAM_BCAST_DATA_RATE, + .mcast_data_rate = WMI_10X_VDEV_PARAM_MCAST_DATA_RATE, + .mcast_indicate = WMI_10X_VDEV_PARAM_MCAST_INDICATE, + .dhcp_indicate = WMI_10X_VDEV_PARAM_DHCP_INDICATE, + .unknown_dest_indicate = WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE, + .ap_keepalive_min_idle_inactive_time_secs = + WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS, + .ap_keepalive_max_idle_inactive_time_secs = + WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS, + .ap_keepalive_max_unresponsive_time_secs = + WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS, + .ap_enable_nawds = WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS, + .mcast2ucast_set = WMI_10X_VDEV_PARAM_MCAST2UCAST_SET, + .enable_rtscts = WMI_10X_VDEV_PARAM_ENABLE_RTSCTS, + .txbf = WMI_VDEV_PARAM_UNSUPPORTED, + .packet_powersave = WMI_VDEV_PARAM_UNSUPPORTED, + .drop_unencry = WMI_VDEV_PARAM_UNSUPPORTED, + .tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED, + .ap_detect_out_of_sync_sleeping_sta_time_secs = + WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS, +}; + +static struct wmi_pdev_param_map wmi_pdev_param_map = { + .tx_chain_mask = WMI_PDEV_PARAM_TX_CHAIN_MASK, + .rx_chain_mask = WMI_PDEV_PARAM_RX_CHAIN_MASK, + .txpower_limit2g = WMI_PDEV_PARAM_TXPOWER_LIMIT2G, + .txpower_limit5g = WMI_PDEV_PARAM_TXPOWER_LIMIT5G, + .txpower_scale = WMI_PDEV_PARAM_TXPOWER_SCALE, + .beacon_gen_mode = WMI_PDEV_PARAM_BEACON_GEN_MODE, + .beacon_tx_mode = WMI_PDEV_PARAM_BEACON_TX_MODE, + .resmgr_offchan_mode = WMI_PDEV_PARAM_RESMGR_OFFCHAN_MODE, + .protection_mode = WMI_PDEV_PARAM_PROTECTION_MODE, + .dynamic_bw = WMI_PDEV_PARAM_DYNAMIC_BW, + .non_agg_sw_retry_th = WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH, + .agg_sw_retry_th = WMI_PDEV_PARAM_AGG_SW_RETRY_TH, + .sta_kickout_th = WMI_PDEV_PARAM_STA_KICKOUT_TH, + .ac_aggrsize_scaling = WMI_PDEV_PARAM_AC_AGGRSIZE_SCALING, + .ltr_enable = WMI_PDEV_PARAM_LTR_ENABLE, + .ltr_ac_latency_be = WMI_PDEV_PARAM_LTR_AC_LATENCY_BE, + .ltr_ac_latency_bk = WMI_PDEV_PARAM_LTR_AC_LATENCY_BK, + .ltr_ac_latency_vi = WMI_PDEV_PARAM_LTR_AC_LATENCY_VI, + .ltr_ac_latency_vo = WMI_PDEV_PARAM_LTR_AC_LATENCY_VO, + .ltr_ac_latency_timeout = WMI_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT, + .ltr_sleep_override = WMI_PDEV_PARAM_LTR_SLEEP_OVERRIDE, + .ltr_rx_override = WMI_PDEV_PARAM_LTR_RX_OVERRIDE, + .ltr_tx_activity_timeout = WMI_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT, + .l1ss_enable = WMI_PDEV_PARAM_L1SS_ENABLE, + .dsleep_enable = WMI_PDEV_PARAM_DSLEEP_ENABLE, + .pcielp_txbuf_flush = WMI_PDEV_PARAM_PCIELP_TXBUF_FLUSH, + .pcielp_txbuf_watermark = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN, + .pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN, + .pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE, + .pdev_stats_update_period = WMI_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD, + .vdev_stats_update_period = WMI_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD, + .peer_stats_update_period = WMI_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD, + .bcnflt_stats_update_period = WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD, + .pmf_qos = WMI_PDEV_PARAM_PMF_QOS, + .arp_ac_override = WMI_PDEV_PARAM_ARP_AC_OVERRIDE, + .arpdhcp_ac_override = WMI_PDEV_PARAM_UNSUPPORTED, + .dcs = WMI_PDEV_PARAM_DCS, + .ani_enable = WMI_PDEV_PARAM_ANI_ENABLE, + .ani_poll_period = WMI_PDEV_PARAM_ANI_POLL_PERIOD, + .ani_listen_period = WMI_PDEV_PARAM_ANI_LISTEN_PERIOD, + .ani_ofdm_level = WMI_PDEV_PARAM_ANI_OFDM_LEVEL, + .ani_cck_level = WMI_PDEV_PARAM_ANI_CCK_LEVEL, + .dyntxchain = WMI_PDEV_PARAM_DYNTXCHAIN, + .proxy_sta = WMI_PDEV_PARAM_PROXY_STA, + .idle_ps_config = WMI_PDEV_PARAM_IDLE_PS_CONFIG, + .power_gating_sleep = WMI_PDEV_PARAM_POWER_GATING_SLEEP, + .fast_channel_reset = WMI_PDEV_PARAM_UNSUPPORTED, + .burst_dur = WMI_PDEV_PARAM_UNSUPPORTED, + .burst_enable = WMI_PDEV_PARAM_UNSUPPORTED, +}; + +static struct wmi_pdev_param_map wmi_10x_pdev_param_map = { + .tx_chain_mask = WMI_10X_PDEV_PARAM_TX_CHAIN_MASK, + .rx_chain_mask = WMI_10X_PDEV_PARAM_RX_CHAIN_MASK, + .txpower_limit2g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G, + .txpower_limit5g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G, + .txpower_scale = WMI_10X_PDEV_PARAM_TXPOWER_SCALE, + .beacon_gen_mode = WMI_10X_PDEV_PARAM_BEACON_GEN_MODE, + .beacon_tx_mode = WMI_10X_PDEV_PARAM_BEACON_TX_MODE, + .resmgr_offchan_mode = WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE, + .protection_mode = WMI_10X_PDEV_PARAM_PROTECTION_MODE, + .dynamic_bw = WMI_10X_PDEV_PARAM_DYNAMIC_BW, + .non_agg_sw_retry_th = WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH, + .agg_sw_retry_th = WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH, + .sta_kickout_th = WMI_10X_PDEV_PARAM_STA_KICKOUT_TH, + .ac_aggrsize_scaling = WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING, + .ltr_enable = WMI_10X_PDEV_PARAM_LTR_ENABLE, + .ltr_ac_latency_be = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE, + .ltr_ac_latency_bk = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK, + .ltr_ac_latency_vi = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI, + .ltr_ac_latency_vo = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO, + .ltr_ac_latency_timeout = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT, + .ltr_sleep_override = WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE, + .ltr_rx_override = WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE, + .ltr_tx_activity_timeout = WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT, + .l1ss_enable = WMI_10X_PDEV_PARAM_L1SS_ENABLE, + .dsleep_enable = WMI_10X_PDEV_PARAM_DSLEEP_ENABLE, + .pcielp_txbuf_flush = WMI_PDEV_PARAM_UNSUPPORTED, + .pcielp_txbuf_watermark = WMI_PDEV_PARAM_UNSUPPORTED, + .pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_UNSUPPORTED, + .pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_UNSUPPORTED, + .pdev_stats_update_period = WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD, + .vdev_stats_update_period = WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD, + .peer_stats_update_period = WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD, + .bcnflt_stats_update_period = + WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD, + .pmf_qos = WMI_10X_PDEV_PARAM_PMF_QOS, + .arp_ac_override = WMI_PDEV_PARAM_UNSUPPORTED, + .arpdhcp_ac_override = WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE, + .dcs = WMI_10X_PDEV_PARAM_DCS, + .ani_enable = WMI_10X_PDEV_PARAM_ANI_ENABLE, + .ani_poll_period = WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD, + .ani_listen_period = WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD, + .ani_ofdm_level = WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL, + .ani_cck_level = WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL, + .dyntxchain = WMI_10X_PDEV_PARAM_DYNTXCHAIN, + .proxy_sta = WMI_PDEV_PARAM_UNSUPPORTED, + .idle_ps_config = WMI_PDEV_PARAM_UNSUPPORTED, + .power_gating_sleep = WMI_PDEV_PARAM_UNSUPPORTED, + .fast_channel_reset = WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET, + .burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR, + .burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE, +}; int ath10k_wmi_wait_for_service_ready(struct ath10k *ar) { @@ -85,18 +526,14 @@ static struct sk_buff *ath10k_wmi_alloc_skb(u32 len) static void ath10k_wmi_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb) { dev_kfree_skb(skb); - - if (atomic_sub_return(1, &ar->wmi.pending_tx_count) == 0) - wake_up(&ar->wmi.wq); } -/* WMI command API */ -static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, - enum wmi_cmd_id cmd_id) +static int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb, + u32 cmd_id) { struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb); struct wmi_cmd_hdr *cmd_hdr; - int status; + int ret; u32 cmd = 0; if (skb_push(skb, sizeof(struct wmi_cmd_hdr)) == NULL) @@ -107,25 +544,146 @@ static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, cmd_hdr = (struct wmi_cmd_hdr *)skb->data; cmd_hdr->cmd_id = __cpu_to_le32(cmd); - if (atomic_add_return(1, &ar->wmi.pending_tx_count) > - WMI_MAX_PENDING_TX_COUNT) { - /* avoid using up memory when FW hangs */ - atomic_dec(&ar->wmi.pending_tx_count); - return -EBUSY; + memset(skb_cb, 0, sizeof(*skb_cb)); + ret = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb); + trace_ath10k_wmi_cmd(cmd_id, skb->data, skb->len, ret); + + if (ret) + goto err_pull; + + return 0; + +err_pull: + skb_pull(skb, sizeof(struct wmi_cmd_hdr)); + return ret; +} + +static void ath10k_wmi_tx_beacon_nowait(struct ath10k_vif *arvif) +{ + struct wmi_bcn_tx_arg arg = {0}; + int ret; + + lockdep_assert_held(&arvif->ar->data_lock); + + if (arvif->beacon == NULL) + return; + + arg.vdev_id = arvif->vdev_id; + arg.tx_rate = 0; + arg.tx_power = 0; + arg.bcn = arvif->beacon->data; + arg.bcn_len = arvif->beacon->len; + + ret = ath10k_wmi_beacon_send_nowait(arvif->ar, &arg); + if (ret) + return; + + dev_kfree_skb_any(arvif->beacon); + arvif->beacon = NULL; +} + +static void ath10k_wmi_tx_beacons_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + + ath10k_wmi_tx_beacon_nowait(arvif); +} + +static void ath10k_wmi_tx_beacons_nowait(struct ath10k *ar) +{ + spin_lock_bh(&ar->data_lock); + ieee80211_iterate_active_interfaces_atomic(ar->hw, + IEEE80211_IFACE_ITER_NORMAL, + ath10k_wmi_tx_beacons_iter, + NULL); + spin_unlock_bh(&ar->data_lock); +} + +static void ath10k_wmi_op_ep_tx_credits(struct ath10k *ar) +{ + /* try to send pending beacons first. they take priority */ + ath10k_wmi_tx_beacons_nowait(ar); + + wake_up(&ar->wmi.tx_credits_wq); +} + +static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, + u32 cmd_id) +{ + int ret = -EOPNOTSUPP; + + might_sleep(); + + if (cmd_id == WMI_CMD_UNSUPPORTED) { + ath10k_warn("wmi command %d is not supported by firmware\n", + cmd_id); + return ret; } - memset(skb_cb, 0, sizeof(*skb_cb)); + wait_event_timeout(ar->wmi.tx_credits_wq, ({ + /* try to send pending beacons first. they take priority */ + ath10k_wmi_tx_beacons_nowait(ar); - trace_ath10k_wmi_cmd(cmd_id, skb->data, skb->len); + ret = ath10k_wmi_cmd_send_nowait(ar, skb, cmd_id); + (ret != -EAGAIN); + }), 3*HZ); - status = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb); - if (status) { + if (ret) dev_kfree_skb_any(skb); - atomic_dec(&ar->wmi.pending_tx_count); - return status; + + return ret; +} + +int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb) +{ + int ret = 0; + struct wmi_mgmt_tx_cmd *cmd; + struct ieee80211_hdr *hdr; + struct sk_buff *wmi_skb; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + int len; + u16 fc; + + hdr = (struct ieee80211_hdr *)skb->data; + fc = le16_to_cpu(hdr->frame_control); + + if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control))) + return -EINVAL; + + len = sizeof(cmd->hdr) + skb->len; + len = round_up(len, 4); + + wmi_skb = ath10k_wmi_alloc_skb(len); + if (!wmi_skb) + return -ENOMEM; + + cmd = (struct wmi_mgmt_tx_cmd *)wmi_skb->data; + + cmd->hdr.vdev_id = __cpu_to_le32(ATH10K_SKB_CB(skb)->vdev_id); + cmd->hdr.tx_rate = 0; + cmd->hdr.tx_power = 0; + cmd->hdr.buf_len = __cpu_to_le32((u32)(skb->len)); + + memcpy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr), ETH_ALEN); + memcpy(cmd->buf, skb->data, skb->len); + + ath10k_dbg(ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n", + wmi_skb, wmi_skb->len, fc & IEEE80211_FCTL_FTYPE, + fc & IEEE80211_FCTL_STYPE); + + /* Send the management frame buffer to the target */ + ret = ath10k_wmi_cmd_send(ar, wmi_skb, ar->wmi.cmd->mgmt_tx_cmdid); + if (ret) { + dev_kfree_skb_any(skb); + return ret; } - return 0; + /* TODO: report tx status to mac80211 - temporary just ACK */ + info->flags |= IEEE80211_TX_STAT_ACK; + ieee80211_tx_status_irqsafe(ar->hw, skb); + + return ret; } static int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb) @@ -315,7 +873,9 @@ static inline u8 get_rate_idx(u32 rate, enum ieee80211_band band) static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) { - struct wmi_mgmt_rx_event *event = (struct wmi_mgmt_rx_event *)skb->data; + struct wmi_mgmt_rx_event_v1 *ev_v1; + struct wmi_mgmt_rx_event_v2 *ev_v2; + struct wmi_mgmt_rx_hdr_v1 *ev_hdr; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); struct ieee80211_hdr *hdr; u32 rx_status; @@ -325,13 +885,24 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) u32 rate; u32 buf_len; u16 fc; + int pull_len; + + if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features)) { + ev_v2 = (struct wmi_mgmt_rx_event_v2 *)skb->data; + ev_hdr = &ev_v2->hdr.v1; + pull_len = sizeof(*ev_v2); + } else { + ev_v1 = (struct wmi_mgmt_rx_event_v1 *)skb->data; + ev_hdr = &ev_v1->hdr; + pull_len = sizeof(*ev_v1); + } - channel = __le32_to_cpu(event->hdr.channel); - buf_len = __le32_to_cpu(event->hdr.buf_len); - rx_status = __le32_to_cpu(event->hdr.status); - snr = __le32_to_cpu(event->hdr.snr); - phy_mode = __le32_to_cpu(event->hdr.phy_mode); - rate = __le32_to_cpu(event->hdr.rate); + channel = __le32_to_cpu(ev_hdr->channel); + buf_len = __le32_to_cpu(ev_hdr->buf_len); + rx_status = __le32_to_cpu(ev_hdr->status); + snr = __le32_to_cpu(ev_hdr->snr); + phy_mode = __le32_to_cpu(ev_hdr->phy_mode); + rate = __le32_to_cpu(ev_hdr->rate); memset(status, 0, sizeof(*status)); @@ -358,7 +929,7 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR; status->rate_idx = get_rate_idx(rate, status->band); - skb_pull(skb, sizeof(event->hdr)); + skb_pull(skb, pull_len); hdr = (struct ieee80211_hdr *)skb->data; fc = le16_to_cpu(hdr->frame_control); @@ -734,10 +1305,8 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb) int i = -1; struct wmi_bcn_info *bcn_info; struct ath10k_vif *arvif; - struct wmi_bcn_tx_arg arg; struct sk_buff *bcn; int vdev_id = 0; - int ret; ath10k_dbg(ATH10K_DBG_MGMT, "WMI_HOST_SWBA_EVENTID\n"); @@ -794,17 +1363,17 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb) ath10k_wmi_update_tim(ar, arvif, bcn, bcn_info); ath10k_wmi_update_noa(ar, arvif, bcn, bcn_info); - arg.vdev_id = arvif->vdev_id; - arg.tx_rate = 0; - arg.tx_power = 0; - arg.bcn = bcn->data; - arg.bcn_len = bcn->len; + spin_lock_bh(&ar->data_lock); + if (arvif->beacon) { + ath10k_warn("SWBA overrun on vdev %d\n", + arvif->vdev_id); + dev_kfree_skb_any(arvif->beacon); + } - ret = ath10k_wmi_beacon_send(ar, &arg); - if (ret) - ath10k_warn("could not send beacon (%d)\n", ret); + arvif->beacon = bcn; - dev_kfree_skb_any(bcn); + ath10k_wmi_tx_beacon_nowait(arvif); + spin_unlock_bh(&ar->data_lock); } } @@ -919,6 +1488,55 @@ static void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar, ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n"); } +static void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar, + struct sk_buff *skb) +{ + ath10k_dbg(ATH10K_DBG_WMI, "WMI_INST_RSSI_STATS_EVENTID\n"); +} + +static void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar, + struct sk_buff *skb) +{ + ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_STANDBY_REQ_EVENTID\n"); +} + +static void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar, + struct sk_buff *skb) +{ + ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_RESUME_REQ_EVENTID\n"); +} + +static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id, + u32 num_units, u32 unit_len) +{ + dma_addr_t paddr; + u32 pool_size; + int idx = ar->wmi.num_mem_chunks; + + pool_size = num_units * round_up(unit_len, 4); + + if (!pool_size) + return -EINVAL; + + ar->wmi.mem_chunks[idx].vaddr = dma_alloc_coherent(ar->dev, + pool_size, + &paddr, + GFP_ATOMIC); + if (!ar->wmi.mem_chunks[idx].vaddr) { + ath10k_warn("failed to allocate memory chunk\n"); + return -ENOMEM; + } + + memset(ar->wmi.mem_chunks[idx].vaddr, 0, pool_size); + + ar->wmi.mem_chunks[idx].paddr = paddr; + ar->wmi.mem_chunks[idx].len = pool_size; + ar->wmi.mem_chunks[idx].req_id = req_id; + ar->wmi.num_mem_chunks++; + + return 0; +} + static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar, struct sk_buff *skb) { @@ -943,6 +1561,10 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar, ar->phy_capability = __le32_to_cpu(ev->phy_capability); ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains); + /* only manually set fw features when not using FW IE format */ + if (ar->fw_api == 1 && ar->fw_version_build > 636) + set_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features); + if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) { ath10k_warn("hardware advertises support for more spatial streams than it should (%d > %d)\n", ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM); @@ -987,6 +1609,108 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar, complete(&ar->wmi.service_ready); } +static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar, + struct sk_buff *skb) +{ + u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i; + int ret; + struct wmi_service_ready_event_10x *ev = (void *)skb->data; + + if (skb->len < sizeof(*ev)) { + ath10k_warn("Service ready event was %d B but expected %zu B. Wrong firmware version?\n", + skb->len, sizeof(*ev)); + return; + } + + ar->hw_min_tx_power = __le32_to_cpu(ev->hw_min_tx_power); + ar->hw_max_tx_power = __le32_to_cpu(ev->hw_max_tx_power); + ar->ht_cap_info = __le32_to_cpu(ev->ht_cap_info); + ar->vht_cap_info = __le32_to_cpu(ev->vht_cap_info); + ar->fw_version_major = + (__le32_to_cpu(ev->sw_version) & 0xff000000) >> 24; + ar->fw_version_minor = (__le32_to_cpu(ev->sw_version) & 0x00ffffff); + ar->phy_capability = __le32_to_cpu(ev->phy_capability); + ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains); + + if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) { + ath10k_warn("hardware advertises support for more spatial streams than it should (%d > %d)\n", + ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM); + ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM; + } + + ar->ath_common.regulatory.current_rd = + __le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd); + + ath10k_debug_read_service_map(ar, ev->wmi_service_bitmap, + sizeof(ev->wmi_service_bitmap)); + + if (strlen(ar->hw->wiphy->fw_version) == 0) { + snprintf(ar->hw->wiphy->fw_version, + sizeof(ar->hw->wiphy->fw_version), + "%u.%u", + ar->fw_version_major, + ar->fw_version_minor); + } + + num_mem_reqs = __le32_to_cpu(ev->num_mem_reqs); + + if (num_mem_reqs > ATH10K_MAX_MEM_REQS) { + ath10k_warn("requested memory chunks number (%d) exceeds the limit\n", + num_mem_reqs); + return; + } + + if (!num_mem_reqs) + goto exit; + + ath10k_dbg(ATH10K_DBG_WMI, "firmware has requested %d memory chunks\n", + num_mem_reqs); + + for (i = 0; i < num_mem_reqs; ++i) { + req_id = __le32_to_cpu(ev->mem_reqs[i].req_id); + num_units = __le32_to_cpu(ev->mem_reqs[i].num_units); + unit_size = __le32_to_cpu(ev->mem_reqs[i].unit_size); + num_unit_info = __le32_to_cpu(ev->mem_reqs[i].num_unit_info); + + if (num_unit_info & NUM_UNITS_IS_NUM_PEERS) + /* number of units to allocate is number of + * peers, 1 extra for self peer on target */ + /* this needs to be tied, host and target + * can get out of sync */ + num_units = TARGET_10X_NUM_PEERS + 1; + else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS) + num_units = TARGET_10X_NUM_VDEVS + 1; + + ath10k_dbg(ATH10K_DBG_WMI, + "wmi mem_req_id %d num_units %d num_unit_info %d unit size %d actual units %d\n", + req_id, + __le32_to_cpu(ev->mem_reqs[i].num_units), + num_unit_info, + unit_size, + num_units); + + ret = ath10k_wmi_alloc_host_mem(ar, req_id, num_units, + unit_size); + if (ret) + return; + } + +exit: + ath10k_dbg(ATH10K_DBG_WMI, + "wmi event service ready sw_ver 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u num_rf_chains %u\n", + __le32_to_cpu(ev->sw_version), + __le32_to_cpu(ev->abi_version), + __le32_to_cpu(ev->phy_capability), + __le32_to_cpu(ev->ht_cap_info), + __le32_to_cpu(ev->vht_cap_info), + __le32_to_cpu(ev->vht_supp_mcs), + __le32_to_cpu(ev->sys_cap_info), + __le32_to_cpu(ev->num_mem_reqs), + __le32_to_cpu(ev->num_rf_chains)); + + complete(&ar->wmi.service_ready); +} + static int ath10k_wmi_ready_event_rx(struct ath10k *ar, struct sk_buff *skb) { struct wmi_ready_event *ev = (struct wmi_ready_event *)skb->data; @@ -1007,7 +1731,7 @@ static int ath10k_wmi_ready_event_rx(struct ath10k *ar, struct sk_buff *skb) return 0; } -static void ath10k_wmi_event_process(struct ath10k *ar, struct sk_buff *skb) +static void ath10k_wmi_main_process_rx(struct ath10k *ar, struct sk_buff *skb) { struct wmi_cmd_hdr *cmd_hdr; enum wmi_event_id id; @@ -1126,64 +1850,158 @@ static void ath10k_wmi_event_process(struct ath10k *ar, struct sk_buff *skb) dev_kfree_skb(skb); } -static void ath10k_wmi_event_work(struct work_struct *work) +static void ath10k_wmi_10x_process_rx(struct ath10k *ar, struct sk_buff *skb) { - struct ath10k *ar = container_of(work, struct ath10k, - wmi.wmi_event_work); - struct sk_buff *skb; + struct wmi_cmd_hdr *cmd_hdr; + enum wmi_10x_event_id id; + u16 len; - for (;;) { - skb = skb_dequeue(&ar->wmi.wmi_event_list); - if (!skb) - break; + cmd_hdr = (struct wmi_cmd_hdr *)skb->data; + id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID); - ath10k_wmi_event_process(ar, skb); - } -} + if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL) + return; -static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb) -{ - struct wmi_cmd_hdr *cmd_hdr = (struct wmi_cmd_hdr *)skb->data; - enum wmi_event_id event_id; + len = skb->len; - event_id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID); + trace_ath10k_wmi_event(id, skb->data, skb->len); - /* some events require to be handled ASAP - * thus can't be defered to a worker thread */ - switch (event_id) { - case WMI_HOST_SWBA_EVENTID: - case WMI_MGMT_RX_EVENTID: - ath10k_wmi_event_process(ar, skb); + switch (id) { + case WMI_10X_MGMT_RX_EVENTID: + ath10k_wmi_event_mgmt_rx(ar, skb); + /* mgmt_rx() owns the skb now! */ return; + case WMI_10X_SCAN_EVENTID: + ath10k_wmi_event_scan(ar, skb); + break; + case WMI_10X_CHAN_INFO_EVENTID: + ath10k_wmi_event_chan_info(ar, skb); + break; + case WMI_10X_ECHO_EVENTID: + ath10k_wmi_event_echo(ar, skb); + break; + case WMI_10X_DEBUG_MESG_EVENTID: + ath10k_wmi_event_debug_mesg(ar, skb); + break; + case WMI_10X_UPDATE_STATS_EVENTID: + ath10k_wmi_event_update_stats(ar, skb); + break; + case WMI_10X_VDEV_START_RESP_EVENTID: + ath10k_wmi_event_vdev_start_resp(ar, skb); + break; + case WMI_10X_VDEV_STOPPED_EVENTID: + ath10k_wmi_event_vdev_stopped(ar, skb); + break; + case WMI_10X_PEER_STA_KICKOUT_EVENTID: + ath10k_wmi_event_peer_sta_kickout(ar, skb); + break; + case WMI_10X_HOST_SWBA_EVENTID: + ath10k_wmi_event_host_swba(ar, skb); + break; + case WMI_10X_TBTTOFFSET_UPDATE_EVENTID: + ath10k_wmi_event_tbttoffset_update(ar, skb); + break; + case WMI_10X_PHYERR_EVENTID: + ath10k_wmi_event_phyerr(ar, skb); + break; + case WMI_10X_ROAM_EVENTID: + ath10k_wmi_event_roam(ar, skb); + break; + case WMI_10X_PROFILE_MATCH: + ath10k_wmi_event_profile_match(ar, skb); + break; + case WMI_10X_DEBUG_PRINT_EVENTID: + ath10k_wmi_event_debug_print(ar, skb); + break; + case WMI_10X_PDEV_QVIT_EVENTID: + ath10k_wmi_event_pdev_qvit(ar, skb); + break; + case WMI_10X_WLAN_PROFILE_DATA_EVENTID: + ath10k_wmi_event_wlan_profile_data(ar, skb); + break; + case WMI_10X_RTT_MEASUREMENT_REPORT_EVENTID: + ath10k_wmi_event_rtt_measurement_report(ar, skb); + break; + case WMI_10X_TSF_MEASUREMENT_REPORT_EVENTID: + ath10k_wmi_event_tsf_measurement_report(ar, skb); + break; + case WMI_10X_RTT_ERROR_REPORT_EVENTID: + ath10k_wmi_event_rtt_error_report(ar, skb); + break; + case WMI_10X_WOW_WAKEUP_HOST_EVENTID: + ath10k_wmi_event_wow_wakeup_host(ar, skb); + break; + case WMI_10X_DCS_INTERFERENCE_EVENTID: + ath10k_wmi_event_dcs_interference(ar, skb); + break; + case WMI_10X_PDEV_TPC_CONFIG_EVENTID: + ath10k_wmi_event_pdev_tpc_config(ar, skb); + break; + case WMI_10X_INST_RSSI_STATS_EVENTID: + ath10k_wmi_event_inst_rssi_stats(ar, skb); + break; + case WMI_10X_VDEV_STANDBY_REQ_EVENTID: + ath10k_wmi_event_vdev_standby_req(ar, skb); + break; + case WMI_10X_VDEV_RESUME_REQ_EVENTID: + ath10k_wmi_event_vdev_resume_req(ar, skb); + break; + case WMI_10X_SERVICE_READY_EVENTID: + ath10k_wmi_10x_service_ready_event_rx(ar, skb); + break; + case WMI_10X_READY_EVENTID: + ath10k_wmi_ready_event_rx(ar, skb); + break; default: + ath10k_warn("Unknown eventid: %d\n", id); break; } - skb_queue_tail(&ar->wmi.wmi_event_list, skb); - queue_work(ar->workqueue, &ar->wmi.wmi_event_work); + dev_kfree_skb(skb); +} + + +static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb) +{ + if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) + ath10k_wmi_10x_process_rx(ar, skb); + else + ath10k_wmi_main_process_rx(ar, skb); } /* WMI Initialization functions */ int ath10k_wmi_attach(struct ath10k *ar) { + if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) { + ar->wmi.cmd = &wmi_10x_cmd_map; + ar->wmi.vdev_param = &wmi_10x_vdev_param_map; + ar->wmi.pdev_param = &wmi_10x_pdev_param_map; + } else { + ar->wmi.cmd = &wmi_cmd_map; + ar->wmi.vdev_param = &wmi_vdev_param_map; + ar->wmi.pdev_param = &wmi_pdev_param_map; + } + init_completion(&ar->wmi.service_ready); init_completion(&ar->wmi.unified_ready); - init_waitqueue_head(&ar->wmi.wq); - - skb_queue_head_init(&ar->wmi.wmi_event_list); - INIT_WORK(&ar->wmi.wmi_event_work, ath10k_wmi_event_work); + init_waitqueue_head(&ar->wmi.tx_credits_wq); return 0; } void ath10k_wmi_detach(struct ath10k *ar) { - /* HTC should've drained the packets already */ - if (WARN_ON(atomic_read(&ar->wmi.pending_tx_count) > 0)) - ath10k_warn("there are still pending packets\n"); + int i; + + /* free the host memory chunks requested by firmware */ + for (i = 0; i < ar->wmi.num_mem_chunks; i++) { + dma_free_coherent(ar->dev, + ar->wmi.mem_chunks[i].len, + ar->wmi.mem_chunks[i].vaddr, + ar->wmi.mem_chunks[i].paddr); + } - cancel_work_sync(&ar->wmi.wmi_event_work); - skb_queue_purge(&ar->wmi.wmi_event_list); + ar->wmi.num_mem_chunks = 0; } int ath10k_wmi_connect_htc_service(struct ath10k *ar) @@ -1198,6 +2016,7 @@ int ath10k_wmi_connect_htc_service(struct ath10k *ar) /* these fields are the same for all service endpoints */ conn_req.ep_ops.ep_tx_complete = ath10k_wmi_htc_tx_complete; conn_req.ep_ops.ep_rx_complete = ath10k_wmi_process_rx; + conn_req.ep_ops.ep_tx_credits = ath10k_wmi_op_ep_tx_credits; /* connect to control service */ conn_req.service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL; @@ -1234,7 +2053,8 @@ int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x\n", rd, rd2g, rd5g, ctl2g, ctl5g); - return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_REGDOMAIN_CMDID); + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->pdev_set_regdomain_cmdid); } int ath10k_wmi_pdev_set_channel(struct ath10k *ar, @@ -1264,7 +2084,8 @@ int ath10k_wmi_pdev_set_channel(struct ath10k *ar, "wmi set channel mode %d freq %d\n", arg->mode, arg->freq); - return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_CHANNEL_CMDID); + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->pdev_set_channel_cmdid); } int ath10k_wmi_pdev_suspend_target(struct ath10k *ar) @@ -1279,7 +2100,7 @@ int ath10k_wmi_pdev_suspend_target(struct ath10k *ar) cmd = (struct wmi_pdev_suspend_cmd *)skb->data; cmd->suspend_opt = WMI_PDEV_SUSPEND; - return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SUSPEND_CMDID); + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid); } int ath10k_wmi_pdev_resume_target(struct ath10k *ar) @@ -1290,15 +2111,19 @@ int ath10k_wmi_pdev_resume_target(struct ath10k *ar) if (skb == NULL) return -ENOMEM; - return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_RESUME_CMDID); + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid); } -int ath10k_wmi_pdev_set_param(struct ath10k *ar, enum wmi_pdev_param id, - u32 value) +int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value) { struct wmi_pdev_set_param_cmd *cmd; struct sk_buff *skb; + if (id == WMI_PDEV_PARAM_UNSUPPORTED) { + ath10k_warn("pdev param %d not supported by firmware\n", id); + return -EOPNOTSUPP; + } + skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); if (!skb) return -ENOMEM; @@ -1309,15 +2134,16 @@ int ath10k_wmi_pdev_set_param(struct ath10k *ar, enum wmi_pdev_param id, ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n", id, value); - return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_PARAM_CMDID); + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid); } -int ath10k_wmi_cmd_init(struct ath10k *ar) +static int ath10k_wmi_main_cmd_init(struct ath10k *ar) { struct wmi_init_cmd *cmd; struct sk_buff *buf; struct wmi_resource_config config = {}; - u32 val; + u32 len, val; + int i; config.num_vdevs = __cpu_to_le32(TARGET_NUM_VDEVS); config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS + TARGET_NUM_VDEVS); @@ -1370,23 +2196,158 @@ int ath10k_wmi_cmd_init(struct ath10k *ar) config.num_msdu_desc = __cpu_to_le32(TARGET_NUM_MSDU_DESC); config.max_frag_entries = __cpu_to_le32(TARGET_MAX_FRAG_ENTRIES); - buf = ath10k_wmi_alloc_skb(sizeof(*cmd)); + len = sizeof(*cmd) + + (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks); + + buf = ath10k_wmi_alloc_skb(len); if (!buf) return -ENOMEM; cmd = (struct wmi_init_cmd *)buf->data; - cmd->num_host_mem_chunks = 0; + + if (ar->wmi.num_mem_chunks == 0) { + cmd->num_host_mem_chunks = 0; + goto out; + } + + ath10k_dbg(ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n", + __cpu_to_le32(ar->wmi.num_mem_chunks)); + + cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks); + + for (i = 0; i < ar->wmi.num_mem_chunks; i++) { + cmd->host_mem_chunks[i].ptr = + __cpu_to_le32(ar->wmi.mem_chunks[i].paddr); + cmd->host_mem_chunks[i].size = + __cpu_to_le32(ar->wmi.mem_chunks[i].len); + cmd->host_mem_chunks[i].req_id = + __cpu_to_le32(ar->wmi.mem_chunks[i].req_id); + + ath10k_dbg(ATH10K_DBG_WMI, + "wmi chunk %d len %d requested, addr 0x%x\n", + i, + cmd->host_mem_chunks[i].size, + cmd->host_mem_chunks[i].ptr); + } +out: memcpy(&cmd->resource_config, &config, sizeof(config)); ath10k_dbg(ATH10K_DBG_WMI, "wmi init\n"); - return ath10k_wmi_cmd_send(ar, buf, WMI_INIT_CMDID); + return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid); } -static int ath10k_wmi_start_scan_calc_len(const struct wmi_start_scan_arg *arg) +static int ath10k_wmi_10x_cmd_init(struct ath10k *ar) +{ + struct wmi_init_cmd_10x *cmd; + struct sk_buff *buf; + struct wmi_resource_config_10x config = {}; + u32 len, val; + int i; + + config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS); + config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS); + config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS); + config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS); + config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT); + config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK); + config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK); + config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI); + config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI); + config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI); + config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI); + config.rx_decap_mode = __cpu_to_le32(TARGET_10X_RX_DECAP_MODE); + + config.scan_max_pending_reqs = + __cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS); + + config.bmiss_offload_max_vdev = + __cpu_to_le32(TARGET_10X_BMISS_OFFLOAD_MAX_VDEV); + + config.roam_offload_max_vdev = + __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_VDEV); + + config.roam_offload_max_ap_profiles = + __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES); + + config.num_mcast_groups = __cpu_to_le32(TARGET_10X_NUM_MCAST_GROUPS); + config.num_mcast_table_elems = + __cpu_to_le32(TARGET_10X_NUM_MCAST_TABLE_ELEMS); + + config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE); + config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE); + config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES); + config.dma_burst_size = __cpu_to_le32(TARGET_10X_DMA_BURST_SIZE); + config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM); + + val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK; + config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val); + + config.vow_config = __cpu_to_le32(TARGET_10X_VOW_CONFIG); + + config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC); + config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES); + + len = sizeof(*cmd) + + (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks); + + buf = ath10k_wmi_alloc_skb(len); + if (!buf) + return -ENOMEM; + + cmd = (struct wmi_init_cmd_10x *)buf->data; + + if (ar->wmi.num_mem_chunks == 0) { + cmd->num_host_mem_chunks = 0; + goto out; + } + + ath10k_dbg(ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n", + __cpu_to_le32(ar->wmi.num_mem_chunks)); + + cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks); + + for (i = 0; i < ar->wmi.num_mem_chunks; i++) { + cmd->host_mem_chunks[i].ptr = + __cpu_to_le32(ar->wmi.mem_chunks[i].paddr); + cmd->host_mem_chunks[i].size = + __cpu_to_le32(ar->wmi.mem_chunks[i].len); + cmd->host_mem_chunks[i].req_id = + __cpu_to_le32(ar->wmi.mem_chunks[i].req_id); + + ath10k_dbg(ATH10K_DBG_WMI, + "wmi chunk %d len %d requested, addr 0x%x\n", + i, + cmd->host_mem_chunks[i].size, + cmd->host_mem_chunks[i].ptr); + } +out: + memcpy(&cmd->resource_config, &config, sizeof(config)); + + ath10k_dbg(ATH10K_DBG_WMI, "wmi init 10x\n"); + return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid); +} + +int ath10k_wmi_cmd_init(struct ath10k *ar) +{ + int ret; + + if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) + ret = ath10k_wmi_10x_cmd_init(ar); + else + ret = ath10k_wmi_main_cmd_init(ar); + + return ret; +} + +static int ath10k_wmi_start_scan_calc_len(struct ath10k *ar, + const struct wmi_start_scan_arg *arg) { int len; - len = sizeof(struct wmi_start_scan_cmd); + if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) + len = sizeof(struct wmi_start_scan_cmd_10x); + else + len = sizeof(struct wmi_start_scan_cmd); if (arg->ie_len) { if (!arg->ie) @@ -1446,7 +2407,7 @@ int ath10k_wmi_start_scan(struct ath10k *ar, int len = 0; int i; - len = ath10k_wmi_start_scan_calc_len(arg); + len = ath10k_wmi_start_scan_calc_len(ar, arg); if (len < 0) return len; /* len contains error code here */ @@ -1478,7 +2439,14 @@ int ath10k_wmi_start_scan(struct ath10k *ar, cmd->scan_ctrl_flags = __cpu_to_le32(arg->scan_ctrl_flags); /* TLV list starts after fields included in the struct */ - off = sizeof(*cmd); + /* There's just one filed that differes the two start_scan + * structures - burst_duration, which we are not using btw, + no point to make the split here, just shift the buffer to fit with + given FW */ + if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) + off = sizeof(struct wmi_start_scan_cmd_10x); + else + off = sizeof(struct wmi_start_scan_cmd); if (arg->n_channels) { channels = (void *)skb->data + off; @@ -1540,7 +2508,7 @@ int ath10k_wmi_start_scan(struct ath10k *ar, } ath10k_dbg(ATH10K_DBG_WMI, "wmi start scan\n"); - return ath10k_wmi_cmd_send(ar, skb, WMI_START_SCAN_CMDID); + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid); } void ath10k_wmi_start_scan_init(struct ath10k *ar, @@ -1556,7 +2524,7 @@ void ath10k_wmi_start_scan_init(struct ath10k *ar, arg->repeat_probe_time = 0; arg->probe_spacing_time = 0; arg->idle_time = 0; - arg->max_scan_time = 5000; + arg->max_scan_time = 20000; arg->probe_delay = 5; arg->notify_scan_events = WMI_SCAN_EVENT_STARTED | WMI_SCAN_EVENT_COMPLETED @@ -1600,7 +2568,7 @@ int ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg) ath10k_dbg(ATH10K_DBG_WMI, "wmi stop scan reqid %d req_type %d vdev/scan_id %d\n", arg->req_id, arg->req_type, arg->u.scan_id); - return ath10k_wmi_cmd_send(ar, skb, WMI_STOP_SCAN_CMDID); + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid); } int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id, @@ -1625,7 +2593,7 @@ int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id, "WMI vdev create: id %d type %d subtype %d macaddr %pM\n", vdev_id, type, subtype, macaddr); - return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_CREATE_CMDID); + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid); } int ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id) @@ -1643,20 +2611,20 @@ int ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id) ath10k_dbg(ATH10K_DBG_WMI, "WMI vdev delete id %d\n", vdev_id); - return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_DELETE_CMDID); + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid); } static int ath10k_wmi_vdev_start_restart(struct ath10k *ar, const struct wmi_vdev_start_request_arg *arg, - enum wmi_cmd_id cmd_id) + u32 cmd_id) { struct wmi_vdev_start_request_cmd *cmd; struct sk_buff *skb; const char *cmdname; u32 flags = 0; - if (cmd_id != WMI_VDEV_START_REQUEST_CMDID && - cmd_id != WMI_VDEV_RESTART_REQUEST_CMDID) + if (cmd_id != ar->wmi.cmd->vdev_start_request_cmdid && + cmd_id != ar->wmi.cmd->vdev_restart_request_cmdid) return -EINVAL; if (WARN_ON(arg->ssid && arg->ssid_len == 0)) return -EINVAL; @@ -1665,9 +2633,9 @@ static int ath10k_wmi_vdev_start_restart(struct ath10k *ar, if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid))) return -EINVAL; - if (cmd_id == WMI_VDEV_START_REQUEST_CMDID) + if (cmd_id == ar->wmi.cmd->vdev_start_request_cmdid) cmdname = "start"; - else if (cmd_id == WMI_VDEV_RESTART_REQUEST_CMDID) + else if (cmd_id == ar->wmi.cmd->vdev_restart_request_cmdid) cmdname = "restart"; else return -EINVAL; /* should not happen, we already check cmd_id */ @@ -1718,15 +2686,17 @@ static int ath10k_wmi_vdev_start_restart(struct ath10k *ar, int ath10k_wmi_vdev_start(struct ath10k *ar, const struct wmi_vdev_start_request_arg *arg) { - return ath10k_wmi_vdev_start_restart(ar, arg, - WMI_VDEV_START_REQUEST_CMDID); + u32 cmd_id = ar->wmi.cmd->vdev_start_request_cmdid; + + return ath10k_wmi_vdev_start_restart(ar, arg, cmd_id); } int ath10k_wmi_vdev_restart(struct ath10k *ar, const struct wmi_vdev_start_request_arg *arg) { - return ath10k_wmi_vdev_start_restart(ar, arg, - WMI_VDEV_RESTART_REQUEST_CMDID); + u32 cmd_id = ar->wmi.cmd->vdev_restart_request_cmdid; + + return ath10k_wmi_vdev_start_restart(ar, arg, cmd_id); } int ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id) @@ -1743,7 +2713,7 @@ int ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id) ath10k_dbg(ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id); - return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_STOP_CMDID); + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid); } int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid) @@ -1758,13 +2728,13 @@ int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid) cmd = (struct wmi_vdev_up_cmd *)skb->data; cmd->vdev_id = __cpu_to_le32(vdev_id); cmd->vdev_assoc_id = __cpu_to_le32(aid); - memcpy(&cmd->vdev_bssid.addr, bssid, 6); + memcpy(&cmd->vdev_bssid.addr, bssid, ETH_ALEN); ath10k_dbg(ATH10K_DBG_WMI, "wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n", vdev_id, aid, bssid); - return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_UP_CMDID); + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid); } int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id) @@ -1782,15 +2752,22 @@ int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id) ath10k_dbg(ATH10K_DBG_WMI, "wmi mgmt vdev down id 0x%x\n", vdev_id); - return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_DOWN_CMDID); + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid); } int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, - enum wmi_vdev_param param_id, u32 param_value) + u32 param_id, u32 param_value) { struct wmi_vdev_set_param_cmd *cmd; struct sk_buff *skb; + if (param_id == WMI_VDEV_PARAM_UNSUPPORTED) { + ath10k_dbg(ATH10K_DBG_WMI, + "vdev param %d not supported by firmware\n", + param_id); + return -EOPNOTSUPP; + } + skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); if (!skb) return -ENOMEM; @@ -1804,7 +2781,7 @@ int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, "wmi vdev id 0x%x set param %d value %d\n", vdev_id, param_id, param_value); - return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_SET_PARAM_CMDID); + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid); } int ath10k_wmi_vdev_install_key(struct ath10k *ar, @@ -1839,7 +2816,8 @@ int ath10k_wmi_vdev_install_key(struct ath10k *ar, ath10k_dbg(ATH10K_DBG_WMI, "wmi vdev install key idx %d cipher %d len %d\n", arg->key_idx, arg->key_cipher, arg->key_len); - return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_INSTALL_KEY_CMDID); + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->vdev_install_key_cmdid); } int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id, @@ -1859,7 +2837,7 @@ int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id, ath10k_dbg(ATH10K_DBG_WMI, "wmi peer create vdev_id %d peer_addr %pM\n", vdev_id, peer_addr); - return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_CREATE_CMDID); + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid); } int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id, @@ -1879,7 +2857,7 @@ int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id, ath10k_dbg(ATH10K_DBG_WMI, "wmi peer delete vdev_id %d peer_addr %pM\n", vdev_id, peer_addr); - return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_DELETE_CMDID); + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid); } int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id, @@ -1900,7 +2878,7 @@ int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id, ath10k_dbg(ATH10K_DBG_WMI, "wmi peer flush vdev_id %d peer_addr %pM tids %08x\n", vdev_id, peer_addr, tid_bitmap); - return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_FLUSH_TIDS_CMDID); + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid); } int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, @@ -1918,13 +2896,13 @@ int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, cmd->vdev_id = __cpu_to_le32(vdev_id); cmd->param_id = __cpu_to_le32(param_id); cmd->param_value = __cpu_to_le32(param_value); - memcpy(&cmd->peer_macaddr.addr, peer_addr, 6); + memcpy(&cmd->peer_macaddr.addr, peer_addr, ETH_ALEN); ath10k_dbg(ATH10K_DBG_WMI, "wmi vdev %d peer 0x%pM set param %d value %d\n", vdev_id, peer_addr, param_id, param_value); - return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_SET_PARAM_CMDID); + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid); } int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id, @@ -1945,7 +2923,8 @@ int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id, "wmi set powersave id 0x%x mode %d\n", vdev_id, psmode); - return ath10k_wmi_cmd_send(ar, skb, WMI_STA_POWERSAVE_MODE_CMDID); + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->sta_powersave_mode_cmdid); } int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id, @@ -1967,7 +2946,8 @@ int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id, ath10k_dbg(ATH10K_DBG_WMI, "wmi sta ps param vdev_id 0x%x param %d value %d\n", vdev_id, param_id, value); - return ath10k_wmi_cmd_send(ar, skb, WMI_STA_POWERSAVE_PARAM_CMDID); + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->sta_powersave_param_cmdid); } int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac, @@ -1993,7 +2973,8 @@ int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac, "wmi ap ps param vdev_id 0x%X param %d value %d mac_addr %pM\n", vdev_id, param_id, value, mac); - return ath10k_wmi_cmd_send(ar, skb, WMI_AP_PS_PEER_PARAM_CMDID); + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->ap_ps_peer_param_cmdid); } int ath10k_wmi_scan_chan_list(struct ath10k *ar, @@ -2046,7 +3027,7 @@ int ath10k_wmi_scan_chan_list(struct ath10k *ar, ci->flags |= __cpu_to_le32(flags); } - return ath10k_wmi_cmd_send(ar, skb, WMI_SCAN_CHAN_LIST_CMDID); + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid); } int ath10k_wmi_peer_assoc(struct ath10k *ar, @@ -2105,10 +3086,11 @@ int ath10k_wmi_peer_assoc(struct ath10k *ar, ath10k_dbg(ATH10K_DBG_WMI, "wmi peer assoc vdev %d addr %pM\n", arg->vdev_id, arg->addr); - return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_ASSOC_CMDID); + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid); } -int ath10k_wmi_beacon_send(struct ath10k *ar, const struct wmi_bcn_tx_arg *arg) +int ath10k_wmi_beacon_send_nowait(struct ath10k *ar, + const struct wmi_bcn_tx_arg *arg) { struct wmi_bcn_tx_cmd *cmd; struct sk_buff *skb; @@ -2124,7 +3106,7 @@ int ath10k_wmi_beacon_send(struct ath10k *ar, const struct wmi_bcn_tx_arg *arg) cmd->hdr.bcn_len = __cpu_to_le32(arg->bcn_len); memcpy(cmd->bcn, arg->bcn, arg->bcn_len); - return ath10k_wmi_cmd_send(ar, skb, WMI_BCN_TX_CMDID); + return ath10k_wmi_cmd_send_nowait(ar, skb, ar->wmi.cmd->bcn_tx_cmdid); } static void ath10k_wmi_pdev_set_wmm_param(struct wmi_wmm_params *params, @@ -2155,7 +3137,8 @@ int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar, ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vo, &arg->ac_vo); ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set wmm params\n"); - return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_WMM_PARAMS_CMDID); + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->pdev_set_wmm_params_cmdid); } int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id) @@ -2171,7 +3154,7 @@ int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id) cmd->stats_id = __cpu_to_le32(stats_id); ath10k_dbg(ATH10K_DBG_WMI, "wmi request stats %d\n", (int)stats_id); - return ath10k_wmi_cmd_send(ar, skb, WMI_REQUEST_STATS_CMDID); + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid); } int ath10k_wmi_force_fw_hang(struct ath10k *ar, @@ -2190,5 +3173,5 @@ int ath10k_wmi_force_fw_hang(struct ath10k *ar, ath10k_dbg(ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n", type, delay_ms); - return ath10k_wmi_cmd_send(ar, skb, WMI_FORCE_FW_HANG_CMDID); + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid); } diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 2c5a4f8daf2e..78c991aec7f9 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -208,6 +208,118 @@ struct wmi_mac_addr { (c_macaddr)[5] = (((pwmi_mac_addr)->word1) >> 8) & 0xff; \ } while (0) +struct wmi_cmd_map { + u32 init_cmdid; + u32 start_scan_cmdid; + u32 stop_scan_cmdid; + u32 scan_chan_list_cmdid; + u32 scan_sch_prio_tbl_cmdid; + u32 pdev_set_regdomain_cmdid; + u32 pdev_set_channel_cmdid; + u32 pdev_set_param_cmdid; + u32 pdev_pktlog_enable_cmdid; + u32 pdev_pktlog_disable_cmdid; + u32 pdev_set_wmm_params_cmdid; + u32 pdev_set_ht_cap_ie_cmdid; + u32 pdev_set_vht_cap_ie_cmdid; + u32 pdev_set_dscp_tid_map_cmdid; + u32 pdev_set_quiet_mode_cmdid; + u32 pdev_green_ap_ps_enable_cmdid; + u32 pdev_get_tpc_config_cmdid; + u32 pdev_set_base_macaddr_cmdid; + u32 vdev_create_cmdid; + u32 vdev_delete_cmdid; + u32 vdev_start_request_cmdid; + u32 vdev_restart_request_cmdid; + u32 vdev_up_cmdid; + u32 vdev_stop_cmdid; + u32 vdev_down_cmdid; + u32 vdev_set_param_cmdid; + u32 vdev_install_key_cmdid; + u32 peer_create_cmdid; + u32 peer_delete_cmdid; + u32 peer_flush_tids_cmdid; + u32 peer_set_param_cmdid; + u32 peer_assoc_cmdid; + u32 peer_add_wds_entry_cmdid; + u32 peer_remove_wds_entry_cmdid; + u32 peer_mcast_group_cmdid; + u32 bcn_tx_cmdid; + u32 pdev_send_bcn_cmdid; + u32 bcn_tmpl_cmdid; + u32 bcn_filter_rx_cmdid; + u32 prb_req_filter_rx_cmdid; + u32 mgmt_tx_cmdid; + u32 prb_tmpl_cmdid; + u32 addba_clear_resp_cmdid; + u32 addba_send_cmdid; + u32 addba_status_cmdid; + u32 delba_send_cmdid; + u32 addba_set_resp_cmdid; + u32 send_singleamsdu_cmdid; + u32 sta_powersave_mode_cmdid; + u32 sta_powersave_param_cmdid; + u32 sta_mimo_ps_mode_cmdid; + u32 pdev_dfs_enable_cmdid; + u32 pdev_dfs_disable_cmdid; + u32 roam_scan_mode; + u32 roam_scan_rssi_threshold; + u32 roam_scan_period; + u32 roam_scan_rssi_change_threshold; + u32 roam_ap_profile; + u32 ofl_scan_add_ap_profile; + u32 ofl_scan_remove_ap_profile; + u32 ofl_scan_period; + u32 p2p_dev_set_device_info; + u32 p2p_dev_set_discoverability; + u32 p2p_go_set_beacon_ie; + u32 p2p_go_set_probe_resp_ie; + u32 p2p_set_vendor_ie_data_cmdid; + u32 ap_ps_peer_param_cmdid; + u32 ap_ps_peer_uapsd_coex_cmdid; + u32 peer_rate_retry_sched_cmdid; + u32 wlan_profile_trigger_cmdid; + u32 wlan_profile_set_hist_intvl_cmdid; + u32 wlan_profile_get_profile_data_cmdid; + u32 wlan_profile_enable_profile_id_cmdid; + u32 wlan_profile_list_profile_id_cmdid; + u32 pdev_suspend_cmdid; + u32 pdev_resume_cmdid; + u32 add_bcn_filter_cmdid; + u32 rmv_bcn_filter_cmdid; + u32 wow_add_wake_pattern_cmdid; + u32 wow_del_wake_pattern_cmdid; + u32 wow_enable_disable_wake_event_cmdid; + u32 wow_enable_cmdid; + u32 wow_hostwakeup_from_sleep_cmdid; + u32 rtt_measreq_cmdid; + u32 rtt_tsf_cmdid; + u32 vdev_spectral_scan_configure_cmdid; + u32 vdev_spectral_scan_enable_cmdid; + u32 request_stats_cmdid; + u32 set_arp_ns_offload_cmdid; + u32 network_list_offload_config_cmdid; + u32 gtk_offload_cmdid; + u32 csa_offload_enable_cmdid; + u32 csa_offload_chanswitch_cmdid; + u32 chatter_set_mode_cmdid; + u32 peer_tid_addba_cmdid; + u32 peer_tid_delba_cmdid; + u32 sta_dtim_ps_method_cmdid; + u32 sta_uapsd_auto_trig_cmdid; + u32 sta_keepalive_cmd; + u32 echo_cmdid; + u32 pdev_utf_cmdid; + u32 dbglog_cfg_cmdid; + u32 pdev_qvit_cmdid; + u32 pdev_ftm_intg_cmdid; + u32 vdev_set_keepalive_cmdid; + u32 vdev_get_keepalive_cmdid; + u32 force_fw_hang_cmdid; + u32 gpio_config_cmdid; + u32 gpio_output_cmdid; +}; + /* * wmi command groups. */ @@ -247,7 +359,9 @@ enum wmi_cmd_group { #define WMI_CMD_GRP(grp_id) (((grp_id) << 12) | 0x1) #define WMI_EVT_GRP_START_ID(grp_id) (((grp_id) << 12) | 0x1) -/* Command IDs and commande events. */ +#define WMI_CMD_UNSUPPORTED 0 + +/* Command IDs and command events for MAIN FW. */ enum wmi_cmd_id { WMI_INIT_CMDID = 0x1, @@ -488,6 +602,217 @@ enum wmi_event_id { WMI_GPIO_INPUT_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_GPIO), }; +/* Command IDs and command events for 10.X firmware */ +enum wmi_10x_cmd_id { + WMI_10X_START_CMDID = 0x9000, + WMI_10X_END_CMDID = 0x9FFF, + + /* initialize the wlan sub system */ + WMI_10X_INIT_CMDID, + + /* Scan specific commands */ + + WMI_10X_START_SCAN_CMDID = WMI_10X_START_CMDID, + WMI_10X_STOP_SCAN_CMDID, + WMI_10X_SCAN_CHAN_LIST_CMDID, + WMI_10X_ECHO_CMDID, + + /* PDEV(physical device) specific commands */ + WMI_10X_PDEV_SET_REGDOMAIN_CMDID, + WMI_10X_PDEV_SET_CHANNEL_CMDID, + WMI_10X_PDEV_SET_PARAM_CMDID, + WMI_10X_PDEV_PKTLOG_ENABLE_CMDID, + WMI_10X_PDEV_PKTLOG_DISABLE_CMDID, + WMI_10X_PDEV_SET_WMM_PARAMS_CMDID, + WMI_10X_PDEV_SET_HT_CAP_IE_CMDID, + WMI_10X_PDEV_SET_VHT_CAP_IE_CMDID, + WMI_10X_PDEV_SET_BASE_MACADDR_CMDID, + WMI_10X_PDEV_SET_DSCP_TID_MAP_CMDID, + WMI_10X_PDEV_SET_QUIET_MODE_CMDID, + WMI_10X_PDEV_GREEN_AP_PS_ENABLE_CMDID, + WMI_10X_PDEV_GET_TPC_CONFIG_CMDID, + + /* VDEV(virtual device) specific commands */ + WMI_10X_VDEV_CREATE_CMDID, + WMI_10X_VDEV_DELETE_CMDID, + WMI_10X_VDEV_START_REQUEST_CMDID, + WMI_10X_VDEV_RESTART_REQUEST_CMDID, + WMI_10X_VDEV_UP_CMDID, + WMI_10X_VDEV_STOP_CMDID, + WMI_10X_VDEV_DOWN_CMDID, + WMI_10X_VDEV_STANDBY_RESPONSE_CMDID, + WMI_10X_VDEV_RESUME_RESPONSE_CMDID, + WMI_10X_VDEV_SET_PARAM_CMDID, + WMI_10X_VDEV_INSTALL_KEY_CMDID, + + /* peer specific commands */ + WMI_10X_PEER_CREATE_CMDID, + WMI_10X_PEER_DELETE_CMDID, + WMI_10X_PEER_FLUSH_TIDS_CMDID, + WMI_10X_PEER_SET_PARAM_CMDID, + WMI_10X_PEER_ASSOC_CMDID, + WMI_10X_PEER_ADD_WDS_ENTRY_CMDID, + WMI_10X_PEER_REMOVE_WDS_ENTRY_CMDID, + WMI_10X_PEER_MCAST_GROUP_CMDID, + + /* beacon/management specific commands */ + + WMI_10X_BCN_TX_CMDID, + WMI_10X_BCN_PRB_TMPL_CMDID, + WMI_10X_BCN_FILTER_RX_CMDID, + WMI_10X_PRB_REQ_FILTER_RX_CMDID, + WMI_10X_MGMT_TX_CMDID, + + /* commands to directly control ba negotiation directly from host. */ + WMI_10X_ADDBA_CLEAR_RESP_CMDID, + WMI_10X_ADDBA_SEND_CMDID, + WMI_10X_ADDBA_STATUS_CMDID, + WMI_10X_DELBA_SEND_CMDID, + WMI_10X_ADDBA_SET_RESP_CMDID, + WMI_10X_SEND_SINGLEAMSDU_CMDID, + + /* Station power save specific config */ + WMI_10X_STA_POWERSAVE_MODE_CMDID, + WMI_10X_STA_POWERSAVE_PARAM_CMDID, + WMI_10X_STA_MIMO_PS_MODE_CMDID, + + /* set debug log config */ + WMI_10X_DBGLOG_CFG_CMDID, + + /* DFS-specific commands */ + WMI_10X_PDEV_DFS_ENABLE_CMDID, + WMI_10X_PDEV_DFS_DISABLE_CMDID, + + /* QVIT specific command id */ + WMI_10X_PDEV_QVIT_CMDID, + + /* Offload Scan and Roaming related commands */ + WMI_10X_ROAM_SCAN_MODE, + WMI_10X_ROAM_SCAN_RSSI_THRESHOLD, + WMI_10X_ROAM_SCAN_PERIOD, + WMI_10X_ROAM_SCAN_RSSI_CHANGE_THRESHOLD, + WMI_10X_ROAM_AP_PROFILE, + WMI_10X_OFL_SCAN_ADD_AP_PROFILE, + WMI_10X_OFL_SCAN_REMOVE_AP_PROFILE, + WMI_10X_OFL_SCAN_PERIOD, + + /* P2P specific commands */ + WMI_10X_P2P_DEV_SET_DEVICE_INFO, + WMI_10X_P2P_DEV_SET_DISCOVERABILITY, + WMI_10X_P2P_GO_SET_BEACON_IE, + WMI_10X_P2P_GO_SET_PROBE_RESP_IE, + + /* AP power save specific config */ + WMI_10X_AP_PS_PEER_PARAM_CMDID, + WMI_10X_AP_PS_PEER_UAPSD_COEX_CMDID, + + /* Rate-control specific commands */ + WMI_10X_PEER_RATE_RETRY_SCHED_CMDID, + + /* WLAN Profiling commands. */ + WMI_10X_WLAN_PROFILE_TRIGGER_CMDID, + WMI_10X_WLAN_PROFILE_SET_HIST_INTVL_CMDID, + WMI_10X_WLAN_PROFILE_GET_PROFILE_DATA_CMDID, + WMI_10X_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID, + WMI_10X_WLAN_PROFILE_LIST_PROFILE_ID_CMDID, + + /* Suspend resume command Ids */ + WMI_10X_PDEV_SUSPEND_CMDID, + WMI_10X_PDEV_RESUME_CMDID, + + /* Beacon filter commands */ + WMI_10X_ADD_BCN_FILTER_CMDID, + WMI_10X_RMV_BCN_FILTER_CMDID, + + /* WOW Specific WMI commands*/ + WMI_10X_WOW_ADD_WAKE_PATTERN_CMDID, + WMI_10X_WOW_DEL_WAKE_PATTERN_CMDID, + WMI_10X_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID, + WMI_10X_WOW_ENABLE_CMDID, + WMI_10X_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID, + + /* RTT measurement related cmd */ + WMI_10X_RTT_MEASREQ_CMDID, + WMI_10X_RTT_TSF_CMDID, + + /* transmit beacon by value */ + WMI_10X_PDEV_SEND_BCN_CMDID, + + /* F/W stats */ + WMI_10X_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID, + WMI_10X_VDEV_SPECTRAL_SCAN_ENABLE_CMDID, + WMI_10X_REQUEST_STATS_CMDID, + + /* GPIO Configuration */ + WMI_10X_GPIO_CONFIG_CMDID, + WMI_10X_GPIO_OUTPUT_CMDID, + + WMI_10X_PDEV_UTF_CMDID = WMI_10X_END_CMDID - 1, +}; + +enum wmi_10x_event_id { + WMI_10X_SERVICE_READY_EVENTID = 0x8000, + WMI_10X_READY_EVENTID, + WMI_10X_START_EVENTID = 0x9000, + WMI_10X_END_EVENTID = 0x9FFF, + + /* Scan specific events */ + WMI_10X_SCAN_EVENTID = WMI_10X_START_EVENTID, + WMI_10X_ECHO_EVENTID, + WMI_10X_DEBUG_MESG_EVENTID, + WMI_10X_UPDATE_STATS_EVENTID, + + /* Instantaneous RSSI event */ + WMI_10X_INST_RSSI_STATS_EVENTID, + + /* VDEV specific events */ + WMI_10X_VDEV_START_RESP_EVENTID, + WMI_10X_VDEV_STANDBY_REQ_EVENTID, + WMI_10X_VDEV_RESUME_REQ_EVENTID, + WMI_10X_VDEV_STOPPED_EVENTID, + + /* peer specific events */ + WMI_10X_PEER_STA_KICKOUT_EVENTID, + + /* beacon/mgmt specific events */ + WMI_10X_HOST_SWBA_EVENTID, + WMI_10X_TBTTOFFSET_UPDATE_EVENTID, + WMI_10X_MGMT_RX_EVENTID, + + /* Channel stats event */ + WMI_10X_CHAN_INFO_EVENTID, + + /* PHY Error specific WMI event */ + WMI_10X_PHYERR_EVENTID, + + /* Roam event to trigger roaming on host */ + WMI_10X_ROAM_EVENTID, + + /* matching AP found from list of profiles */ + WMI_10X_PROFILE_MATCH, + + /* debug print message used for tracing FW code while debugging */ + WMI_10X_DEBUG_PRINT_EVENTID, + /* VI spoecific event */ + WMI_10X_PDEV_QVIT_EVENTID, + /* FW code profile data in response to profile request */ + WMI_10X_WLAN_PROFILE_DATA_EVENTID, + + /*RTT related event ID*/ + WMI_10X_RTT_MEASUREMENT_REPORT_EVENTID, + WMI_10X_TSF_MEASUREMENT_REPORT_EVENTID, + WMI_10X_RTT_ERROR_REPORT_EVENTID, + + WMI_10X_WOW_WAKEUP_HOST_EVENTID, + WMI_10X_DCS_INTERFERENCE_EVENTID, + + /* TPC config for the current operating channel */ + WMI_10X_PDEV_TPC_CONFIG_EVENTID, + + WMI_10X_GPIO_INPUT_EVENTID, + WMI_10X_PDEV_UTF_EVENTID = WMI_10X_END_EVENTID-1, +}; + enum wmi_phy_mode { MODE_11A = 0, /* 11a Mode */ MODE_11G = 1, /* 11b/g Mode */ @@ -508,6 +833,48 @@ enum wmi_phy_mode { MODE_MAX = 14 }; +static inline const char *ath10k_wmi_phymode_str(enum wmi_phy_mode mode) +{ + switch (mode) { + case MODE_11A: + return "11a"; + case MODE_11G: + return "11g"; + case MODE_11B: + return "11b"; + case MODE_11GONLY: + return "11gonly"; + case MODE_11NA_HT20: + return "11na-ht20"; + case MODE_11NG_HT20: + return "11ng-ht20"; + case MODE_11NA_HT40: + return "11na-ht40"; + case MODE_11NG_HT40: + return "11ng-ht40"; + case MODE_11AC_VHT20: + return "11ac-vht20"; + case MODE_11AC_VHT40: + return "11ac-vht40"; + case MODE_11AC_VHT80: + return "11ac-vht80"; + case MODE_11AC_VHT20_2G: + return "11ac-vht20-2g"; + case MODE_11AC_VHT40_2G: + return "11ac-vht40-2g"; + case MODE_11AC_VHT80_2G: + return "11ac-vht80-2g"; + case MODE_UNKNOWN: + /* skip */ + break; + + /* no default handler to allow compiler to check that the + * enum is fully handled */ + }; + + return "<unknown>"; +} + #define WMI_CHAN_LIST_TAG 0x1 #define WMI_SSID_LIST_TAG 0x2 #define WMI_BSSID_LIST_TAG 0x3 @@ -763,13 +1130,45 @@ struct wmi_service_ready_event { struct wlan_host_mem_req mem_reqs[1]; } __packed; -/* - * status consists of upper 16 bits fo int status and lower 16 bits of - * module ID that retuned status - */ -#define WLAN_INIT_STATUS_SUCCESS 0x0 -#define WLAN_GET_INIT_STATUS_REASON(status) ((status) & 0xffff) -#define WLAN_GET_INIT_STATUS_MODULE_ID(status) (((status) >> 16) & 0xffff) +/* This is the definition from 10.X firmware branch */ +struct wmi_service_ready_event_10x { + __le32 sw_version; + __le32 abi_version; + + /* WMI_PHY_CAPABILITY */ + __le32 phy_capability; + + /* Maximum number of frag table entries that SW will populate less 1 */ + __le32 max_frag_entry; + __le32 wmi_service_bitmap[WMI_SERVICE_BM_SIZE]; + __le32 num_rf_chains; + + /* + * The following field is only valid for service type + * WMI_SERVICE_11AC + */ + __le32 ht_cap_info; /* WMI HT Capability */ + __le32 vht_cap_info; /* VHT capability info field of 802.11ac */ + __le32 vht_supp_mcs; /* VHT Supported MCS Set field Rx/Tx same */ + __le32 hw_min_tx_power; + __le32 hw_max_tx_power; + + struct hal_reg_capabilities hal_reg_capabilities; + + __le32 sys_cap_info; + __le32 min_pkt_size_enable; /* Enterprise mode short pkt enable */ + + /* + * request to host to allocate a chuck of memory and pss it down to FW + * via WM_INIT. FW uses this as FW extesnsion memory for saving its + * data structures. Only valid for low latency interfaces like PCIE + * where FW can access this memory directly (or) by DMA. + */ + __le32 num_mem_reqs; + + struct wlan_host_mem_req mem_reqs[1]; +} __packed; + #define WMI_SERVICE_READY_TIMEOUT_HZ (5*HZ) #define WMI_UNIFIED_READY_TIMEOUT_HZ (5*HZ) @@ -978,6 +1377,192 @@ struct wmi_resource_config { __le32 max_frag_entries; } __packed; +struct wmi_resource_config_10x { + /* number of virtual devices (VAPs) to support */ + __le32 num_vdevs; + + /* number of peer nodes to support */ + __le32 num_peers; + + /* number of keys per peer */ + __le32 num_peer_keys; + + /* total number of TX/RX data TIDs */ + __le32 num_tids; + + /* + * max skid for resolving hash collisions + * + * The address search table is sparse, so that if two MAC addresses + * result in the same hash value, the second of these conflicting + * entries can slide to the next index in the address search table, + * and use it, if it is unoccupied. This ast_skid_limit parameter + * specifies the upper bound on how many subsequent indices to search + * over to find an unoccupied space. + */ + __le32 ast_skid_limit; + + /* + * the nominal chain mask for transmit + * + * The chain mask may be modified dynamically, e.g. to operate AP + * tx with a reduced number of chains if no clients are associated. + * This configuration parameter specifies the nominal chain-mask that + * should be used when not operating with a reduced set of tx chains. + */ + __le32 tx_chain_mask; + + /* + * the nominal chain mask for receive + * + * The chain mask may be modified dynamically, e.g. for a client + * to use a reduced number of chains for receive if the traffic to + * the client is low enough that it doesn't require downlink MIMO + * or antenna diversity. + * This configuration parameter specifies the nominal chain-mask that + * should be used when not operating with a reduced set of rx chains. + */ + __le32 rx_chain_mask; + + /* + * what rx reorder timeout (ms) to use for the AC + * + * Each WMM access class (voice, video, best-effort, background) will + * have its own timeout value to dictate how long to wait for missing + * rx MPDUs to arrive before flushing subsequent MPDUs that have + * already been received. + * This parameter specifies the timeout in milliseconds for each + * class. + */ + __le32 rx_timeout_pri_vi; + __le32 rx_timeout_pri_vo; + __le32 rx_timeout_pri_be; + __le32 rx_timeout_pri_bk; + + /* + * what mode the rx should decap packets to + * + * MAC can decap to RAW (no decap), native wifi or Ethernet types + * THis setting also determines the default TX behavior, however TX + * behavior can be modified on a per VAP basis during VAP init + */ + __le32 rx_decap_mode; + + /* what is the maximum scan requests than can be queued */ + __le32 scan_max_pending_reqs; + + /* maximum VDEV that could use BMISS offload */ + __le32 bmiss_offload_max_vdev; + + /* maximum VDEV that could use offload roaming */ + __le32 roam_offload_max_vdev; + + /* maximum AP profiles that would push to offload roaming */ + __le32 roam_offload_max_ap_profiles; + + /* + * how many groups to use for mcast->ucast conversion + * + * The target's WAL maintains a table to hold information regarding + * which peers belong to a given multicast group, so that if + * multicast->unicast conversion is enabled, the target can convert + * multicast tx frames to a series of unicast tx frames, to each + * peer within the multicast group. + This num_mcast_groups configuration parameter tells the target how + * many multicast groups to provide storage for within its multicast + * group membership table. + */ + __le32 num_mcast_groups; + + /* + * size to alloc for the mcast membership table + * + * This num_mcast_table_elems configuration parameter tells the + * target how many peer elements it needs to provide storage for in + * its multicast group membership table. + * These multicast group membership table elements are shared by the + * multicast groups stored within the table. + */ + __le32 num_mcast_table_elems; + + /* + * whether/how to do multicast->unicast conversion + * + * This configuration parameter specifies whether the target should + * perform multicast --> unicast conversion on transmit, and if so, + * what to do if it finds no entries in its multicast group + * membership table for the multicast IP address in the tx frame. + * Configuration value: + * 0 -> Do not perform multicast to unicast conversion. + * 1 -> Convert multicast frames to unicast, if the IP multicast + * address from the tx frame is found in the multicast group + * membership table. If the IP multicast address is not found, + * drop the frame. + * 2 -> Convert multicast frames to unicast, if the IP multicast + * address from the tx frame is found in the multicast group + * membership table. If the IP multicast address is not found, + * transmit the frame as multicast. + */ + __le32 mcast2ucast_mode; + + /* + * how much memory to allocate for a tx PPDU dbg log + * + * This parameter controls how much memory the target will allocate + * to store a log of tx PPDU meta-information (how large the PPDU + * was, when it was sent, whether it was successful, etc.) + */ + __le32 tx_dbg_log_size; + + /* how many AST entries to be allocated for WDS */ + __le32 num_wds_entries; + + /* + * MAC DMA burst size, e.g., For target PCI limit can be + * 0 -default, 1 256B + */ + __le32 dma_burst_size; + + /* + * Fixed delimiters to be inserted after every MPDU to + * account for interface latency to avoid underrun. + */ + __le32 mac_aggr_delim; + + /* + * determine whether target is responsible for detecting duplicate + * non-aggregate MPDU and timing out stale fragments. + * + * A-MPDU reordering is always performed on the target. + * + * 0: target responsible for frag timeout and dup checking + * 1: host responsible for frag timeout and dup checking + */ + __le32 rx_skip_defrag_timeout_dup_detection_check; + + /* + * Configuration for VoW : + * No of Video Nodes to be supported + * and Max no of descriptors for each Video link (node). + */ + __le32 vow_config; + + /* Number of msdu descriptors target should use */ + __le32 num_msdu_desc; + + /* + * Max. number of Tx fragments per MSDU + * This parameter controls the max number of Tx fragments per MSDU. + * This is sent by the target as part of the WMI_SERVICE_READY event + * and is overriden by the OS shim as required. + */ + __le32 max_frag_entries; +} __packed; + + +#define NUM_UNITS_IS_NUM_VDEVS 0x1 +#define NUM_UNITS_IS_NUM_PEERS 0x2 + /* strucutre describing host memory chunk. */ struct host_memory_chunk { /* id of the request that is passed up in service ready */ @@ -999,6 +1584,18 @@ struct wmi_init_cmd { struct host_memory_chunk host_mem_chunks[1]; } __packed; +/* _10x stucture is from 10.X FW API */ +struct wmi_init_cmd_10x { + struct wmi_resource_config_10x resource_config; + __le32 num_host_mem_chunks; + + /* + * variable number of host memory chunks. + * This should be the last element in the structure + */ + struct host_memory_chunk host_mem_chunks[1]; +} __packed; + /* TLV for channel list */ struct wmi_chan_list { __le32 tag; /* WMI_CHAN_LIST_TAG */ @@ -1118,6 +1715,88 @@ struct wmi_start_scan_cmd { */ } __packed; +/* This is the definition from 10.X firmware branch */ +struct wmi_start_scan_cmd_10x { + /* Scan ID */ + __le32 scan_id; + + /* Scan requestor ID */ + __le32 scan_req_id; + + /* VDEV id(interface) that is requesting scan */ + __le32 vdev_id; + + /* Scan Priority, input to scan scheduler */ + __le32 scan_priority; + + /* Scan events subscription */ + __le32 notify_scan_events; + + /* dwell time in msec on active channels */ + __le32 dwell_time_active; + + /* dwell time in msec on passive channels */ + __le32 dwell_time_passive; + + /* + * min time in msec on the BSS channel,only valid if atleast one + * VDEV is active + */ + __le32 min_rest_time; + + /* + * max rest time in msec on the BSS channel,only valid if at least + * one VDEV is active + */ + /* + * the scanner will rest on the bss channel at least min_rest_time + * after min_rest_time the scanner will start checking for tx/rx + * activity on all VDEVs. if there is no activity the scanner will + * switch to off channel. if there is activity the scanner will let + * the radio on the bss channel until max_rest_time expires.at + * max_rest_time scanner will switch to off channel irrespective of + * activity. activity is determined by the idle_time parameter. + */ + __le32 max_rest_time; + + /* + * time before sending next set of probe requests. + * The scanner keeps repeating probe requests transmission with + * period specified by repeat_probe_time. + * The number of probe requests specified depends on the ssid_list + * and bssid_list + */ + __le32 repeat_probe_time; + + /* time in msec between 2 consequetive probe requests with in a set. */ + __le32 probe_spacing_time; + + /* + * data inactivity time in msec on bss channel that will be used by + * scanner for measuring the inactivity. + */ + __le32 idle_time; + + /* maximum time in msec allowed for scan */ + __le32 max_scan_time; + + /* + * delay in msec before sending first probe request after switching + * to a channel + */ + __le32 probe_delay; + + /* Scan control flags */ + __le32 scan_ctrl_flags; + + /* + * TLV (tag length value ) paramerters follow the scan_cmd structure. + * TLV can contain channel list, bssid list, ssid list and + * ie. the TLV tags are defined above; + */ +} __packed; + + struct wmi_ssid_arg { int len; const u8 *ssid; @@ -1268,7 +1947,7 @@ struct wmi_scan_event { * good idea to pass all the fields in the RX status * descriptor up to the host. */ -struct wmi_mgmt_rx_hdr { +struct wmi_mgmt_rx_hdr_v1 { __le32 channel; __le32 snr; __le32 rate; @@ -1277,8 +1956,18 @@ struct wmi_mgmt_rx_hdr { __le32 status; /* %WMI_RX_STATUS_ */ } __packed; -struct wmi_mgmt_rx_event { - struct wmi_mgmt_rx_hdr hdr; +struct wmi_mgmt_rx_hdr_v2 { + struct wmi_mgmt_rx_hdr_v1 v1; + __le32 rssi_ctl[4]; +} __packed; + +struct wmi_mgmt_rx_event_v1 { + struct wmi_mgmt_rx_hdr_v1 hdr; + u8 buf[0]; +} __packed; + +struct wmi_mgmt_rx_event_v2 { + struct wmi_mgmt_rx_hdr_v2 hdr; u8 buf[0]; } __packed; @@ -1465,6 +2154,60 @@ struct wmi_csa_event { #define VDEV_DEFAULT_STATS_UPDATE_PERIOD 500 #define PEER_DEFAULT_STATS_UPDATE_PERIOD 500 +struct wmi_pdev_param_map { + u32 tx_chain_mask; + u32 rx_chain_mask; + u32 txpower_limit2g; + u32 txpower_limit5g; + u32 txpower_scale; + u32 beacon_gen_mode; + u32 beacon_tx_mode; + u32 resmgr_offchan_mode; + u32 protection_mode; + u32 dynamic_bw; + u32 non_agg_sw_retry_th; + u32 agg_sw_retry_th; + u32 sta_kickout_th; + u32 ac_aggrsize_scaling; + u32 ltr_enable; + u32 ltr_ac_latency_be; + u32 ltr_ac_latency_bk; + u32 ltr_ac_latency_vi; + u32 ltr_ac_latency_vo; + u32 ltr_ac_latency_timeout; + u32 ltr_sleep_override; + u32 ltr_rx_override; + u32 ltr_tx_activity_timeout; + u32 l1ss_enable; + u32 dsleep_enable; + u32 pcielp_txbuf_flush; + u32 pcielp_txbuf_watermark; + u32 pcielp_txbuf_tmo_en; + u32 pcielp_txbuf_tmo_value; + u32 pdev_stats_update_period; + u32 vdev_stats_update_period; + u32 peer_stats_update_period; + u32 bcnflt_stats_update_period; + u32 pmf_qos; + u32 arp_ac_override; + u32 arpdhcp_ac_override; + u32 dcs; + u32 ani_enable; + u32 ani_poll_period; + u32 ani_listen_period; + u32 ani_ofdm_level; + u32 ani_cck_level; + u32 dyntxchain; + u32 proxy_sta; + u32 idle_ps_config; + u32 power_gating_sleep; + u32 fast_channel_reset; + u32 burst_dur; + u32 burst_enable; +}; + +#define WMI_PDEV_PARAM_UNSUPPORTED 0 + enum wmi_pdev_param { /* TX chian mask */ WMI_PDEV_PARAM_TX_CHAIN_MASK = 0x1, @@ -1564,6 +2307,97 @@ enum wmi_pdev_param { WMI_PDEV_PARAM_POWER_GATING_SLEEP, }; +enum wmi_10x_pdev_param { + /* TX chian mask */ + WMI_10X_PDEV_PARAM_TX_CHAIN_MASK = 0x1, + /* RX chian mask */ + WMI_10X_PDEV_PARAM_RX_CHAIN_MASK, + /* TX power limit for 2G Radio */ + WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G, + /* TX power limit for 5G Radio */ + WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G, + /* TX power scale */ + WMI_10X_PDEV_PARAM_TXPOWER_SCALE, + /* Beacon generation mode . 0: host, 1: target */ + WMI_10X_PDEV_PARAM_BEACON_GEN_MODE, + /* Beacon generation mode . 0: staggered 1: bursted */ + WMI_10X_PDEV_PARAM_BEACON_TX_MODE, + /* + * Resource manager off chan mode . + * 0: turn off off chan mode. 1: turn on offchan mode + */ + WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE, + /* + * Protection mode: + * 0: no protection 1:use CTS-to-self 2: use RTS/CTS + */ + WMI_10X_PDEV_PARAM_PROTECTION_MODE, + /* Dynamic bandwidth 0: disable 1: enable */ + WMI_10X_PDEV_PARAM_DYNAMIC_BW, + /* Non aggregrate/ 11g sw retry threshold.0-disable */ + WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH, + /* aggregrate sw retry threshold. 0-disable*/ + WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH, + /* Station kickout threshold (non of consecutive failures).0-disable */ + WMI_10X_PDEV_PARAM_STA_KICKOUT_TH, + /* Aggerate size scaling configuration per AC */ + WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING, + /* LTR enable */ + WMI_10X_PDEV_PARAM_LTR_ENABLE, + /* LTR latency for BE, in us */ + WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE, + /* LTR latency for BK, in us */ + WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK, + /* LTR latency for VI, in us */ + WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI, + /* LTR latency for VO, in us */ + WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO, + /* LTR AC latency timeout, in ms */ + WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT, + /* LTR platform latency override, in us */ + WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE, + /* LTR-RX override, in us */ + WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE, + /* Tx activity timeout for LTR, in us */ + WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT, + /* L1SS state machine enable */ + WMI_10X_PDEV_PARAM_L1SS_ENABLE, + /* Deep sleep state machine enable */ + WMI_10X_PDEV_PARAM_DSLEEP_ENABLE, + /* pdev level stats update period in ms */ + WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD, + /* vdev level stats update period in ms */ + WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD, + /* peer level stats update period in ms */ + WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD, + /* beacon filter status update period */ + WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD, + /* QOS Mgmt frame protection MFP/PMF 0: disable, 1: enable */ + WMI_10X_PDEV_PARAM_PMF_QOS, + /* Access category on which ARP and DHCP frames are sent */ + WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE, + /* DCS configuration */ + WMI_10X_PDEV_PARAM_DCS, + /* Enable/Disable ANI on target */ + WMI_10X_PDEV_PARAM_ANI_ENABLE, + /* configure the ANI polling period */ + WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD, + /* configure the ANI listening period */ + WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD, + /* configure OFDM immunity level */ + WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL, + /* configure CCK immunity level */ + WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL, + /* Enable/Disable CDD for 1x1 STAs in rate control module */ + WMI_10X_PDEV_PARAM_DYNTXCHAIN, + /* Enable/Disable Fast channel reset*/ + WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET, + /* Set Bursting DUR */ + WMI_10X_PDEV_PARAM_BURST_DUR, + /* Set Bursting Enable*/ + WMI_10X_PDEV_PARAM_BURST_ENABLE, +}; + struct wmi_pdev_set_param_cmd { __le32 param_id; __le32 param_value; @@ -2088,6 +2922,61 @@ enum wmi_rate_preamble { /* Value to disable fixed rate setting */ #define WMI_FIXED_RATE_NONE (0xff) +struct wmi_vdev_param_map { + u32 rts_threshold; + u32 fragmentation_threshold; + u32 beacon_interval; + u32 listen_interval; + u32 multicast_rate; + u32 mgmt_tx_rate; + u32 slot_time; + u32 preamble; + u32 swba_time; + u32 wmi_vdev_stats_update_period; + u32 wmi_vdev_pwrsave_ageout_time; + u32 wmi_vdev_host_swba_interval; + u32 dtim_period; + u32 wmi_vdev_oc_scheduler_air_time_limit; + u32 wds; + u32 atim_window; + u32 bmiss_count_max; + u32 bmiss_first_bcnt; + u32 bmiss_final_bcnt; + u32 feature_wmm; + u32 chwidth; + u32 chextoffset; + u32 disable_htprotection; + u32 sta_quickkickout; + u32 mgmt_rate; + u32 protection_mode; + u32 fixed_rate; + u32 sgi; + u32 ldpc; + u32 tx_stbc; + u32 rx_stbc; + u32 intra_bss_fwd; + u32 def_keyid; + u32 nss; + u32 bcast_data_rate; + u32 mcast_data_rate; + u32 mcast_indicate; + u32 dhcp_indicate; + u32 unknown_dest_indicate; + u32 ap_keepalive_min_idle_inactive_time_secs; + u32 ap_keepalive_max_idle_inactive_time_secs; + u32 ap_keepalive_max_unresponsive_time_secs; + u32 ap_enable_nawds; + u32 mcast2ucast_set; + u32 enable_rtscts; + u32 txbf; + u32 packet_powersave; + u32 drop_unencry; + u32 tx_encap_type; + u32 ap_detect_out_of_sync_sleeping_sta_time_secs; +}; + +#define WMI_VDEV_PARAM_UNSUPPORTED 0 + /* the definition of different VDEV parameters */ enum wmi_vdev_param { /* RTS Threshold */ @@ -2219,6 +3108,121 @@ enum wmi_vdev_param { WMI_VDEV_PARAM_TX_ENCAP_TYPE, }; +/* the definition of different VDEV parameters */ +enum wmi_10x_vdev_param { + /* RTS Threshold */ + WMI_10X_VDEV_PARAM_RTS_THRESHOLD = 0x1, + /* Fragmentation threshold */ + WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD, + /* beacon interval in TUs */ + WMI_10X_VDEV_PARAM_BEACON_INTERVAL, + /* Listen interval in TUs */ + WMI_10X_VDEV_PARAM_LISTEN_INTERVAL, + /* muticast rate in Mbps */ + WMI_10X_VDEV_PARAM_MULTICAST_RATE, + /* management frame rate in Mbps */ + WMI_10X_VDEV_PARAM_MGMT_TX_RATE, + /* slot time (long vs short) */ + WMI_10X_VDEV_PARAM_SLOT_TIME, + /* preamble (long vs short) */ + WMI_10X_VDEV_PARAM_PREAMBLE, + /* SWBA time (time before tbtt in msec) */ + WMI_10X_VDEV_PARAM_SWBA_TIME, + /* time period for updating VDEV stats */ + WMI_10X_VDEV_STATS_UPDATE_PERIOD, + /* age out time in msec for frames queued for station in power save */ + WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME, + /* + * Host SWBA interval (time in msec before tbtt for SWBA event + * generation). + */ + WMI_10X_VDEV_HOST_SWBA_INTERVAL, + /* DTIM period (specified in units of num beacon intervals) */ + WMI_10X_VDEV_PARAM_DTIM_PERIOD, + /* + * scheduler air time limit for this VDEV. used by off chan + * scheduler. + */ + WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT, + /* enable/dsiable WDS for this VDEV */ + WMI_10X_VDEV_PARAM_WDS, + /* ATIM Window */ + WMI_10X_VDEV_PARAM_ATIM_WINDOW, + /* BMISS max */ + WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX, + /* WMM enables/disabled */ + WMI_10X_VDEV_PARAM_FEATURE_WMM, + /* Channel width */ + WMI_10X_VDEV_PARAM_CHWIDTH, + /* Channel Offset */ + WMI_10X_VDEV_PARAM_CHEXTOFFSET, + /* Disable HT Protection */ + WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION, + /* Quick STA Kickout */ + WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT, + /* Rate to be used with Management frames */ + WMI_10X_VDEV_PARAM_MGMT_RATE, + /* Protection Mode */ + WMI_10X_VDEV_PARAM_PROTECTION_MODE, + /* Fixed rate setting */ + WMI_10X_VDEV_PARAM_FIXED_RATE, + /* Short GI Enable/Disable */ + WMI_10X_VDEV_PARAM_SGI, + /* Enable LDPC */ + WMI_10X_VDEV_PARAM_LDPC, + /* Enable Tx STBC */ + WMI_10X_VDEV_PARAM_TX_STBC, + /* Enable Rx STBC */ + WMI_10X_VDEV_PARAM_RX_STBC, + /* Intra BSS forwarding */ + WMI_10X_VDEV_PARAM_INTRA_BSS_FWD, + /* Setting Default xmit key for Vdev */ + WMI_10X_VDEV_PARAM_DEF_KEYID, + /* NSS width */ + WMI_10X_VDEV_PARAM_NSS, + /* Set the custom rate for the broadcast data frames */ + WMI_10X_VDEV_PARAM_BCAST_DATA_RATE, + /* Set the custom rate (rate-code) for multicast data frames */ + WMI_10X_VDEV_PARAM_MCAST_DATA_RATE, + /* Tx multicast packet indicate Enable/Disable */ + WMI_10X_VDEV_PARAM_MCAST_INDICATE, + /* Tx DHCP packet indicate Enable/Disable */ + WMI_10X_VDEV_PARAM_DHCP_INDICATE, + /* Enable host inspection of Tx unicast packet to unknown destination */ + WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE, + + /* The minimum amount of time AP begins to consider STA inactive */ + WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS, + + /* + * An associated STA is considered inactive when there is no recent + * TX/RX activity and no downlink frames are buffered for it. Once a + * STA exceeds the maximum idle inactive time, the AP will send an + * 802.11 data-null as a keep alive to verify the STA is still + * associated. If the STA does ACK the data-null, or if the data-null + * is buffered and the STA does not retrieve it, the STA will be + * considered unresponsive + * (see WMI_10X_VDEV_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS). + */ + WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS, + + /* + * An associated STA is considered unresponsive if there is no recent + * TX/RX activity and downlink frames are buffered for it. Once a STA + * exceeds the maximum unresponsive time, the AP will send a + * WMI_10X_STA_KICKOUT event to the host so the STA can be deleted. */ + WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS, + + /* Enable NAWDS : MCAST INSPECT Enable, NAWDS Flag set */ + WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS, + + WMI_10X_VDEV_PARAM_MCAST2UCAST_SET, + /* Enable/Disable RTS-CTS */ + WMI_10X_VDEV_PARAM_ENABLE_RTSCTS, + + WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS, +}; + /* slot time long */ #define WMI_VDEV_SLOT_TIME_LONG 0x1 /* slot time short */ @@ -3000,7 +4004,6 @@ struct wmi_force_fw_hang_cmd { #define WMI_MAX_EVENT 0x1000 /* Maximum number of pending TXed WMI packets */ -#define WMI_MAX_PENDING_TX_COUNT 128 #define WMI_SKB_HEADROOM sizeof(struct wmi_cmd_hdr) /* By default disable power save for IBSS */ @@ -3013,7 +4016,6 @@ int ath10k_wmi_attach(struct ath10k *ar); void ath10k_wmi_detach(struct ath10k *ar); int ath10k_wmi_wait_for_service_ready(struct ath10k *ar); int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar); -void ath10k_wmi_flush_tx(struct ath10k *ar); int ath10k_wmi_connect_htc_service(struct ath10k *ar); int ath10k_wmi_pdev_set_channel(struct ath10k *ar, @@ -3022,8 +4024,7 @@ int ath10k_wmi_pdev_suspend_target(struct ath10k *ar); int ath10k_wmi_pdev_resume_target(struct ath10k *ar); int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g, u16 ctl2g, u16 ctl5g); -int ath10k_wmi_pdev_set_param(struct ath10k *ar, enum wmi_pdev_param id, - u32 value); +int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value); int ath10k_wmi_cmd_init(struct ath10k *ar); int ath10k_wmi_start_scan(struct ath10k *ar, const struct wmi_start_scan_arg *); void ath10k_wmi_start_scan_init(struct ath10k *ar, struct wmi_start_scan_arg *); @@ -3043,7 +4044,7 @@ int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid); int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id); int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, - enum wmi_vdev_param param_id, u32 param_value); + u32 param_id, u32 param_value); int ath10k_wmi_vdev_install_key(struct ath10k *ar, const struct wmi_vdev_install_key_arg *arg); int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id, @@ -3066,11 +4067,13 @@ int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac, enum wmi_ap_ps_peer_param param_id, u32 value); int ath10k_wmi_scan_chan_list(struct ath10k *ar, const struct wmi_scan_chan_list_arg *arg); -int ath10k_wmi_beacon_send(struct ath10k *ar, const struct wmi_bcn_tx_arg *arg); +int ath10k_wmi_beacon_send_nowait(struct ath10k *ar, + const struct wmi_bcn_tx_arg *arg); int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar, const struct wmi_pdev_set_wmm_params_arg *arg); int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id); int ath10k_wmi_force_fw_hang(struct ath10k *ar, enum wmi_force_fw_hang_type type, u32 delay_ms); +int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb); #endif /* _WMI_H_ */ |