summaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/ath/ath6kl
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/ath/ath6kl')
-rw-r--r--drivers/net/wireless/ath/ath6kl/Kconfig17
-rw-r--r--drivers/net/wireless/ath/ath6kl/Makefile35
-rw-r--r--drivers/net/wireless/ath/ath6kl/bmi.c692
-rw-r--r--drivers/net/wireless/ath/ath6kl/bmi.h250
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c1538
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.h39
-rw-r--r--drivers/net/wireless/ath/ath6kl/common.h183
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.h546
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.c150
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.h104
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif-ops.h67
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif.h216
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc.c2466
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc.h596
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_hif.c811
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_hif.h113
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c1293
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c1337
-rw-r--r--drivers/net/wireless/ath/ath6kl/node.c238
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c853
-rw-r--r--drivers/net/wireless/ath/ath6kl/target.h331
-rw-r--r--drivers/net/wireless/ath/ath6kl/txrx.c1452
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c2762
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.h2024
24 files changed, 18113 insertions, 0 deletions
diff --git a/drivers/net/wireless/ath/ath6kl/Kconfig b/drivers/net/wireless/ath/ath6kl/Kconfig
new file mode 100644
index 000000000000..fc9f69c1f945
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/Kconfig
@@ -0,0 +1,17 @@
+config ATH6KL
+ tristate "Atheros ath6kl support"
+ depends on MMC
+ depends on CFG80211
+ select WIRELESS_EXT
+ select WEXT_PRIV
+ ---help---
+ This module adds support for wireless adapters based on
+ Atheros AR6003 chipset running over SDIO. If you choose to
+ build it as a module, it will be called ath6kl. Pls note
+ that AR6002 and AR6001 are not supported by this driver.
+
+config ATH6KL_DEBUG
+ bool "Atheros ath6kl debugging"
+ depends on ATH6KL
+ ---help---
+ Enables debug support
diff --git a/drivers/net/wireless/ath/ath6kl/Makefile b/drivers/net/wireless/ath/ath6kl/Makefile
new file mode 100644
index 000000000000..e1bb07ea8e80
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/Makefile
@@ -0,0 +1,35 @@
+#------------------------------------------------------------------------------
+# Copyright (c) 2004-2010 Atheros Communications Inc.
+# All rights reserved.
+#
+#
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+#
+#
+#
+# Author(s): ="Atheros"
+#------------------------------------------------------------------------------
+
+obj-$(CONFIG_ATH6KL) := ath6kl.o
+ath6kl-y += debug.o
+ath6kl-y += htc_hif.o
+ath6kl-y += htc.o
+ath6kl-y += bmi.o
+ath6kl-y += cfg80211.o
+ath6kl-y += init.o
+ath6kl-y += main.o
+ath6kl-y += txrx.o
+ath6kl-y += wmi.o
+ath6kl-y += node.o
+ath6kl-y += sdio.o
diff --git a/drivers/net/wireless/ath/ath6kl/bmi.c b/drivers/net/wireless/ath/ath6kl/bmi.c
new file mode 100644
index 000000000000..84676697d7eb
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/bmi.c
@@ -0,0 +1,692 @@
+/*
+ * Copyright (c) 2004-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+#include "hif-ops.h"
+#include "target.h"
+#include "debug.h"
+
+static int ath6kl_get_bmi_cmd_credits(struct ath6kl *ar)
+{
+ u32 addr;
+ unsigned long timeout;
+ int ret;
+
+ ar->bmi.cmd_credits = 0;
+
+ /* Read the counter register to get the command credits */
+ addr = COUNT_DEC_ADDRESS + (HTC_MAILBOX_NUM_MAX + ENDPOINT1) * 4;
+
+ timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
+ while (time_before(jiffies, timeout) && !ar->bmi.cmd_credits) {
+
+ /*
+ * Hit the credit counter with a 4-byte access, the first byte
+ * read will hit the counter and cause a decrement, while the
+ * remaining 3 bytes has no effect. The rationale behind this
+ * is to make all HIF accesses 4-byte aligned.
+ */
+ ret = hif_read_write_sync(ar, addr,
+ (u8 *)&ar->bmi.cmd_credits, 4,
+ HIF_RD_SYNC_BYTE_INC);
+ if (ret) {
+ ath6kl_err("Unable to decrement the command credit count register: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* The counter is only 8 bits.
+ * Ignore anything in the upper 3 bytes
+ */
+ ar->bmi.cmd_credits &= 0xFF;
+ }
+
+ if (!ar->bmi.cmd_credits) {
+ ath6kl_err("bmi communication timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int ath6kl_bmi_get_rx_lkahd(struct ath6kl *ar, bool need_timeout)
+{
+ unsigned long timeout;
+ u32 rx_word = 0;
+ int ret = 0;
+
+ timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
+ while ((!need_timeout || time_before(jiffies, timeout)) && !rx_word) {
+ ret = hif_read_write_sync(ar, RX_LOOKAHEAD_VALID_ADDRESS,
+ (u8 *)&rx_word, sizeof(rx_word),
+ HIF_RD_SYNC_BYTE_INC);
+ if (ret) {
+ ath6kl_err("unable to read RX_LOOKAHEAD_VALID\n");
+ return ret;
+ }
+
+ /* all we really want is one bit */
+ rx_word &= (1 << ENDPOINT1);
+ }
+
+ if (!rx_word) {
+ ath6kl_err("bmi_recv_buf FIFO empty\n");
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int ath6kl_bmi_send_buf(struct ath6kl *ar, u8 *buf, u32 len)
+{
+ int ret;
+ u32 addr;
+
+ ret = ath6kl_get_bmi_cmd_credits(ar);
+ if (ret)
+ return ret;
+
+ addr = ar->mbox_info.htc_addr;
+
+ ret = hif_read_write_sync(ar, addr, buf, len,
+ HIF_WR_SYNC_BYTE_INC);
+ if (ret)
+ ath6kl_err("unable to send the bmi data to the device\n");
+
+ return ret;
+}
+
+static int ath6kl_bmi_recv_buf(struct ath6kl *ar,
+ u8 *buf, u32 len, bool want_timeout)
+{
+ int ret;
+ u32 addr;
+
+ /*
+ * During normal bootup, small reads may be required.
+ * Rather than issue an HIF Read and then wait as the Target
+ * adds successive bytes to the FIFO, we wait here until
+ * we know that response data is available.
+ *
+ * This allows us to cleanly timeout on an unexpected
+ * Target failure rather than risk problems at the HIF level.
+ * In particular, this avoids SDIO timeouts and possibly garbage
+ * data on some host controllers. And on an interconnect
+ * such as Compact Flash (as well as some SDIO masters) which
+ * does not provide any indication on data timeout, it avoids
+ * a potential hang or garbage response.
+ *
+ * Synchronization is more difficult for reads larger than the
+ * size of the MBOX FIFO (128B), because the Target is unable
+ * to push the 129th byte of data until AFTER the Host posts an
+ * HIF Read and removes some FIFO data. So for large reads the
+ * Host proceeds to post an HIF Read BEFORE all the data is
+ * actually available to read. Fortunately, large BMI reads do
+ * not occur in practice -- they're supported for debug/development.
+ *
+ * So Host/Target BMI synchronization is divided into these cases:
+ * CASE 1: length < 4
+ * Should not happen
+ *
+ * CASE 2: 4 <= length <= 128
+ * Wait for first 4 bytes to be in FIFO
+ * If CONSERVATIVE_BMI_READ is enabled, also wait for
+ * a BMI command credit, which indicates that the ENTIRE
+ * response is available in the the FIFO
+ *
+ * CASE 3: length > 128
+ * Wait for the first 4 bytes to be in FIFO
+ *
+ * For most uses, a small timeout should be sufficient and we will
+ * usually see a response quickly; but there may be some unusual
+ * (debug) cases of BMI_EXECUTE where we want an larger timeout.
+ * For now, we use an unbounded busy loop while waiting for
+ * BMI_EXECUTE.
+ *
+ * If BMI_EXECUTE ever needs to support longer-latency execution,
+ * especially in production, this code needs to be enhanced to sleep
+ * and yield. Also note that BMI_COMMUNICATION_TIMEOUT is currently
+ * a function of Host processor speed.
+ */
+ if (len >= 4) { /* NB: Currently, always true */
+ ret = ath6kl_bmi_get_rx_lkahd(ar, want_timeout);
+ if (ret)
+ return ret;
+ }
+
+ addr = ar->mbox_info.htc_addr;
+ ret = hif_read_write_sync(ar, addr, buf, len,
+ HIF_RD_SYNC_BYTE_INC);
+ if (ret) {
+ ath6kl_err("Unable to read the bmi data from the device: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath6kl_bmi_done(struct ath6kl *ar)
+{
+ int ret;
+ u32 cid = BMI_DONE;
+
+ if (ar->bmi.done_sent) {
+ ath6kl_dbg(ATH6KL_DBG_BMI, "bmi done skipped\n");
+ return 0;
+ }
+
+ ar->bmi.done_sent = true;
+
+ ret = ath6kl_bmi_send_buf(ar, (u8 *)&cid, sizeof(cid));
+ if (ret) {
+ ath6kl_err("Unable to send bmi done: %d\n", ret);
+ return ret;
+ }
+
+ ath6kl_bmi_cleanup(ar);
+
+ return 0;
+}
+
+int ath6kl_bmi_get_target_info(struct ath6kl *ar,
+ struct ath6kl_bmi_target_info *targ_info)
+{
+ int ret;
+ u32 cid = BMI_GET_TARGET_INFO;
+
+ if (ar->bmi.done_sent) {
+ ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
+ return -EACCES;
+ }
+
+ ret = ath6kl_bmi_send_buf(ar, (u8 *)&cid, sizeof(cid));
+ if (ret) {
+ ath6kl_err("Unable to send get target info: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath6kl_bmi_recv_buf(ar, (u8 *)&targ_info->version,
+ sizeof(targ_info->version), true);
+ if (ret) {
+ ath6kl_err("Unable to recv target info: %d\n", ret);
+ return ret;
+ }
+
+ if (le32_to_cpu(targ_info->version) == TARGET_VERSION_SENTINAL) {
+ /* Determine how many bytes are in the Target's targ_info */
+ ret = ath6kl_bmi_recv_buf(ar,
+ (u8 *)&targ_info->byte_count,
+ sizeof(targ_info->byte_count),
+ true);
+ if (ret) {
+ ath6kl_err("unable to read target info byte count: %d\n",
+ ret);
+ return ret;
+ }
+
+ /*
+ * The target's targ_info doesn't match the host's targ_info.
+ * We need to do some backwards compatibility to make this work.
+ */
+ if (le32_to_cpu(targ_info->byte_count) != sizeof(*targ_info)) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ /* Read the remainder of the targ_info */
+ ret = ath6kl_bmi_recv_buf(ar,
+ ((u8 *)targ_info) +
+ sizeof(targ_info->byte_count),
+ sizeof(*targ_info) -
+ sizeof(targ_info->byte_count),
+ true);
+
+ if (ret) {
+ ath6kl_err("Unable to read target info (%d bytes): %d\n",
+ targ_info->byte_count, ret);
+ return ret;
+ }
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_BMI, "target info (ver: 0x%x type: 0x%x)\n",
+ targ_info->version, targ_info->type);
+
+ return 0;
+}
+
+int ath6kl_bmi_read(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
+{
+ u32 cid = BMI_READ_MEMORY;
+ int ret;
+ u32 offset;
+ u32 len_remain, rx_len;
+ u16 size;
+
+ if (ar->bmi.done_sent) {
+ ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
+ return -EACCES;
+ }
+
+ size = BMI_DATASZ_MAX + sizeof(cid) + sizeof(addr) + sizeof(len);
+ if (size > MAX_BMI_CMDBUF_SZ) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ memset(ar->bmi.cmd_buf, 0, size);
+
+ ath6kl_dbg(ATH6KL_DBG_BMI,
+ "bmi read memory: device: addr: 0x%x, len: %d\n",
+ addr, len);
+
+ len_remain = len;
+
+ while (len_remain) {
+ rx_len = (len_remain < BMI_DATASZ_MAX) ?
+ len_remain : BMI_DATASZ_MAX;
+ offset = 0;
+ memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
+ offset += sizeof(cid);
+ memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
+ offset += sizeof(addr);
+ memcpy(&(ar->bmi.cmd_buf[offset]), &rx_len, sizeof(rx_len));
+ offset += sizeof(len);
+
+ ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
+ if (ret) {
+ ath6kl_err("Unable to write to the device: %d\n",
+ ret);
+ return ret;
+ }
+ ret = ath6kl_bmi_recv_buf(ar, ar->bmi.cmd_buf, rx_len, true);
+ if (ret) {
+ ath6kl_err("Unable to read from the device: %d\n",
+ ret);
+ return ret;
+ }
+ memcpy(&buf[len - len_remain], ar->bmi.cmd_buf, rx_len);
+ len_remain -= rx_len; addr += rx_len;
+ }
+
+ return 0;
+}
+
+int ath6kl_bmi_write(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
+{
+ u32 cid = BMI_WRITE_MEMORY;
+ int ret;
+ u32 offset;
+ u32 len_remain, tx_len;
+ const u32 header = sizeof(cid) + sizeof(addr) + sizeof(len);
+ u8 aligned_buf[BMI_DATASZ_MAX];
+ u8 *src;
+
+ if (ar->bmi.done_sent) {
+ ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
+ return -EACCES;
+ }
+
+ if ((BMI_DATASZ_MAX + header) > MAX_BMI_CMDBUF_SZ) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ memset(ar->bmi.cmd_buf, 0, BMI_DATASZ_MAX + header);
+
+ ath6kl_dbg(ATH6KL_DBG_BMI,
+ "bmi write memory: addr: 0x%x, len: %d\n", addr, len);
+
+ len_remain = len;
+ while (len_remain) {
+ src = &buf[len - len_remain];
+
+ if (len_remain < (BMI_DATASZ_MAX - header)) {
+ if (len_remain & 3) {
+ /* align it with 4 bytes */
+ len_remain = len_remain +
+ (4 - (len_remain & 3));
+ memcpy(aligned_buf, src, len_remain);
+ src = aligned_buf;
+ }
+ tx_len = len_remain;
+ } else {
+ tx_len = (BMI_DATASZ_MAX - header);
+ }
+
+ offset = 0;
+ memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
+ offset += sizeof(cid);
+ memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
+ offset += sizeof(addr);
+ memcpy(&(ar->bmi.cmd_buf[offset]), &tx_len, sizeof(tx_len));
+ offset += sizeof(tx_len);
+ memcpy(&(ar->bmi.cmd_buf[offset]), src, tx_len);
+ offset += tx_len;
+
+ ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
+ if (ret) {
+ ath6kl_err("Unable to write to the device: %d\n",
+ ret);
+ return ret;
+ }
+ len_remain -= tx_len; addr += tx_len;
+ }
+
+ return 0;
+}
+
+int ath6kl_bmi_execute(struct ath6kl *ar, u32 addr, u32 *param)
+{
+ u32 cid = BMI_EXECUTE;
+ int ret;
+ u32 offset;
+ u16 size;
+
+ if (ar->bmi.done_sent) {
+ ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
+ return -EACCES;
+ }
+
+ size = sizeof(cid) + sizeof(addr) + sizeof(param);
+ if (size > MAX_BMI_CMDBUF_SZ) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ memset(ar->bmi.cmd_buf, 0, size);
+
+ ath6kl_dbg(ATH6KL_DBG_BMI, "bmi execute: addr: 0x%x, param: %d)\n",
+ addr, *param);
+
+ offset = 0;
+ memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
+ offset += sizeof(cid);
+ memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
+ offset += sizeof(addr);
+ memcpy(&(ar->bmi.cmd_buf[offset]), param, sizeof(*param));
+ offset += sizeof(*param);
+
+ ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
+ if (ret) {
+ ath6kl_err("Unable to write to the device: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath6kl_bmi_recv_buf(ar, ar->bmi.cmd_buf, sizeof(*param), false);
+ if (ret) {
+ ath6kl_err("Unable to read from the device: %d\n", ret);
+ return ret;
+ }
+
+ memcpy(param, ar->bmi.cmd_buf, sizeof(*param));
+
+ return 0;
+}
+
+int ath6kl_bmi_set_app_start(struct ath6kl *ar, u32 addr)
+{
+ u32 cid = BMI_SET_APP_START;
+ int ret;
+ u32 offset;
+ u16 size;
+
+ if (ar->bmi.done_sent) {
+ ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
+ return -EACCES;
+ }
+
+ size = sizeof(cid) + sizeof(addr);
+ if (size > MAX_BMI_CMDBUF_SZ) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ memset(ar->bmi.cmd_buf, 0, size);
+
+ ath6kl_dbg(ATH6KL_DBG_BMI, "bmi set app start: addr: 0x%x\n", addr);
+
+ offset = 0;
+ memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
+ offset += sizeof(cid);
+ memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
+ offset += sizeof(addr);
+
+ ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
+ if (ret) {
+ ath6kl_err("Unable to write to the device: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath6kl_bmi_reg_read(struct ath6kl *ar, u32 addr, u32 *param)
+{
+ u32 cid = BMI_READ_SOC_REGISTER;
+ int ret;
+ u32 offset;
+ u16 size;
+
+ if (ar->bmi.done_sent) {
+ ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
+ return -EACCES;
+ }
+
+ size = sizeof(cid) + sizeof(addr);
+ if (size > MAX_BMI_CMDBUF_SZ) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ memset(ar->bmi.cmd_buf, 0, size);
+
+ ath6kl_dbg(ATH6KL_DBG_BMI, "bmi read SOC reg: addr: 0x%x\n", addr);
+
+ offset = 0;
+ memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
+ offset += sizeof(cid);
+ memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
+ offset += sizeof(addr);
+
+ ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
+ if (ret) {
+ ath6kl_err("Unable to write to the device: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath6kl_bmi_recv_buf(ar, ar->bmi.cmd_buf, sizeof(*param), true);
+ if (ret) {
+ ath6kl_err("Unable to read from the device: %d\n", ret);
+ return ret;
+ }
+ memcpy(param, ar->bmi.cmd_buf, sizeof(*param));
+
+ return 0;
+}
+
+int ath6kl_bmi_reg_write(struct ath6kl *ar, u32 addr, u32 param)
+{
+ u32 cid = BMI_WRITE_SOC_REGISTER;
+ int ret;
+ u32 offset;
+ u16 size;
+
+ if (ar->bmi.done_sent) {
+ ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
+ return -EACCES;
+ }
+
+ size = sizeof(cid) + sizeof(addr) + sizeof(param);
+ if (size > MAX_BMI_CMDBUF_SZ) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ memset(ar->bmi.cmd_buf, 0, size);
+
+ ath6kl_dbg(ATH6KL_DBG_BMI,
+ "bmi write SOC reg: addr: 0x%x, param: %d\n",
+ addr, param);
+
+ offset = 0;
+ memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
+ offset += sizeof(cid);
+ memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
+ offset += sizeof(addr);
+ memcpy(&(ar->bmi.cmd_buf[offset]), &param, sizeof(param));
+ offset += sizeof(param);
+
+ ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
+ if (ret) {
+ ath6kl_err("Unable to write to the device: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath6kl_bmi_lz_data(struct ath6kl *ar, u8 *buf, u32 len)
+{
+ u32 cid = BMI_LZ_DATA;
+ int ret;
+ u32 offset;
+ u32 len_remain, tx_len;
+ const u32 header = sizeof(cid) + sizeof(len);
+ u16 size;
+
+ if (ar->bmi.done_sent) {
+ ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
+ return -EACCES;
+ }
+
+ size = BMI_DATASZ_MAX + header;
+ if (size > MAX_BMI_CMDBUF_SZ) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ memset(ar->bmi.cmd_buf, 0, size);
+
+ ath6kl_dbg(ATH6KL_DBG_BMI, "bmi send LZ data: len: %d)\n",
+ len);
+
+ len_remain = len;
+ while (len_remain) {
+ tx_len = (len_remain < (BMI_DATASZ_MAX - header)) ?
+ len_remain : (BMI_DATASZ_MAX - header);
+
+ offset = 0;
+ memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
+ offset += sizeof(cid);
+ memcpy(&(ar->bmi.cmd_buf[offset]), &tx_len, sizeof(tx_len));
+ offset += sizeof(tx_len);
+ memcpy(&(ar->bmi.cmd_buf[offset]), &buf[len - len_remain],
+ tx_len);
+ offset += tx_len;
+
+ ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
+ if (ret) {
+ ath6kl_err("Unable to write to the device: %d\n",
+ ret);
+ return ret;
+ }
+
+ len_remain -= tx_len;
+ }
+
+ return 0;
+}
+
+int ath6kl_bmi_lz_stream_start(struct ath6kl *ar, u32 addr)
+{
+ u32 cid = BMI_LZ_STREAM_START;
+ int ret;
+ u32 offset;
+ u16 size;
+
+ if (ar->bmi.done_sent) {
+ ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
+ return -EACCES;
+ }
+
+ size = sizeof(cid) + sizeof(addr);
+ if (size > MAX_BMI_CMDBUF_SZ) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ memset(ar->bmi.cmd_buf, 0, size);
+
+ ath6kl_dbg(ATH6KL_DBG_BMI,
+ "bmi LZ stream start: addr: 0x%x)\n",
+ addr);
+
+ offset = 0;
+ memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
+ offset += sizeof(cid);
+ memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
+ offset += sizeof(addr);
+
+ ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
+ if (ret) {
+ ath6kl_err("Unable to start LZ stream to the device: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath6kl_bmi_fast_download(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
+{
+ int ret;
+ u32 last_word = 0;
+ u32 last_word_offset = len & ~0x3;
+ u32 unaligned_bytes = len & 0x3;
+
+ ret = ath6kl_bmi_lz_stream_start(ar, addr);
+ if (ret)
+ return ret;
+
+ if (unaligned_bytes) {
+ /* copy the last word into a zero padded buffer */
+ memcpy(&last_word, &buf[last_word_offset], unaligned_bytes);
+ }
+
+ ret = ath6kl_bmi_lz_data(ar, buf, last_word_offset);
+ if (ret)
+ return ret;
+
+ if (unaligned_bytes)
+ ret = ath6kl_bmi_lz_data(ar, (u8 *)&last_word, 4);
+
+ if (!ret) {
+ /* Close compressed stream and open a new (fake) one.
+ * This serves mainly to flush Target caches. */
+ ret = ath6kl_bmi_lz_stream_start(ar, 0x00);
+ }
+ return ret;
+}
+
+int ath6kl_bmi_init(struct ath6kl *ar)
+{
+ ar->bmi.cmd_buf = kzalloc(MAX_BMI_CMDBUF_SZ, GFP_ATOMIC);
+
+ if (!ar->bmi.cmd_buf)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void ath6kl_bmi_cleanup(struct ath6kl *ar)
+{
+ kfree(ar->bmi.cmd_buf);
+ ar->bmi.cmd_buf = NULL;
+}
diff --git a/drivers/net/wireless/ath/ath6kl/bmi.h b/drivers/net/wireless/ath/ath6kl/bmi.h
new file mode 100644
index 000000000000..83546d76d979
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/bmi.h
@@ -0,0 +1,250 @@
+/*
+ * Copyright (c) 2004-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef BMI_H
+#define BMI_H
+
+/*
+ * Bootloader Messaging Interface (BMI)
+ *
+ * BMI is a very simple messaging interface used during initialization
+ * to read memory, write memory, execute code, and to define an
+ * application entry PC.
+ *
+ * It is used to download an application to ATH6KL, to provide
+ * patches to code that is already resident on ATH6KL, and generally
+ * to examine and modify state. The Host has an opportunity to use
+ * BMI only once during bootup. Once the Host issues a BMI_DONE
+ * command, this opportunity ends.
+ *
+ * The Host writes BMI requests to mailbox0, and reads BMI responses
+ * from mailbox0. BMI requests all begin with a command
+ * (see below for specific commands), and are followed by
+ * command-specific data.
+ *
+ * Flow control:
+ * The Host can only issue a command once the Target gives it a
+ * "BMI Command Credit", using ATH6KL Counter #4. As soon as the
+ * Target has completed a command, it issues another BMI Command
+ * Credit (so the Host can issue the next command).
+ *
+ * BMI handles all required Target-side cache flushing.
+ */
+
+#define MAX_BMI_CMDBUF_SZ (BMI_DATASZ_MAX + \
+ (sizeof(u32) * 3 /* cmd + addr + len */))
+
+/* Maximum data size used for BMI transfers */
+#define BMI_DATASZ_MAX 256
+
+/* BMI Commands */
+
+#define BMI_NO_COMMAND 0
+
+#define BMI_DONE 1
+/*
+ * Semantics: Host is done using BMI
+ * Request format:
+ * u32 command (BMI_DONE)
+ * Response format: none
+ */
+
+#define BMI_READ_MEMORY 2
+/*
+ * Semantics: Host reads ATH6KL memory
+ * Request format:
+ * u32 command (BMI_READ_MEMORY)
+ * u32 address
+ * u32 length, at most BMI_DATASZ_MAX
+ * Response format:
+ * u8 data[length]
+ */
+
+#define BMI_WRITE_MEMORY 3
+/*
+ * Semantics: Host writes ATH6KL memory
+ * Request format:
+ * u32 command (BMI_WRITE_MEMORY)
+ * u32 address
+ * u32 length, at most BMI_DATASZ_MAX
+ * u8 data[length]
+ * Response format: none
+ */
+
+#define BMI_EXECUTE 4
+/*
+ * Semantics: Causes ATH6KL to execute code
+ * Request format:
+ * u32 command (BMI_EXECUTE)
+ * u32 address
+ * u32 parameter
+ * Response format:
+ * u32 return value
+ */
+
+#define BMI_SET_APP_START 5
+/*
+ * Semantics: Set Target application starting address
+ * Request format:
+ * u32 command (BMI_SET_APP_START)
+ * u32 address
+ * Response format: none
+ */
+
+#define BMI_READ_SOC_REGISTER 6
+/*
+ * Semantics: Read a 32-bit Target SOC register.
+ * Request format:
+ * u32 command (BMI_READ_REGISTER)
+ * u32 address
+ * Response format:
+ * u32 value
+ */
+
+#define BMI_WRITE_SOC_REGISTER 7
+/*
+ * Semantics: Write a 32-bit Target SOC register.
+ * Request format:
+ * u32 command (BMI_WRITE_REGISTER)
+ * u32 address
+ * u32 value
+ *
+ * Response format: none
+ */
+
+#define BMI_GET_TARGET_ID 8
+#define BMI_GET_TARGET_INFO 8
+/*
+ * Semantics: Fetch the 4-byte Target information
+ * Request format:
+ * u32 command (BMI_GET_TARGET_ID/INFO)
+ * Response format1 (old firmware):
+ * u32 TargetVersionID
+ * Response format2 (newer firmware):
+ * u32 TARGET_VERSION_SENTINAL
+ * struct bmi_target_info;
+ */
+
+#define TARGET_VERSION_SENTINAL 0xffffffff
+#define TARGET_TYPE_AR6003 3
+
+#define BMI_ROMPATCH_INSTALL 9
+/*
+ * Semantics: Install a ROM Patch.
+ * Request format:
+ * u32 command (BMI_ROMPATCH_INSTALL)
+ * u32 Target ROM Address
+ * u32 Target RAM Address or Value (depending on Target Type)
+ * u32 Size, in bytes
+ * u32 Activate? 1-->activate;
+ * 0-->install but do not activate
+ * Response format:
+ * u32 PatchID
+ */
+
+#define BMI_ROMPATCH_UNINSTALL 10
+/*
+ * Semantics: Uninstall a previously-installed ROM Patch,
+ * automatically deactivating, if necessary.
+ * Request format:
+ * u32 command (BMI_ROMPATCH_UNINSTALL)
+ * u32 PatchID
+ *
+ * Response format: none
+ */
+
+#define BMI_ROMPATCH_ACTIVATE 11
+/*
+ * Semantics: Activate a list of previously-installed ROM Patches.
+ * Request format:
+ * u32 command (BMI_ROMPATCH_ACTIVATE)
+ * u32 rompatch_count
+ * u32 PatchID[rompatch_count]
+ *
+ * Response format: none
+ */
+
+#define BMI_ROMPATCH_DEACTIVATE 12
+/*
+ * Semantics: Deactivate a list of active ROM Patches.
+ * Request format:
+ * u32 command (BMI_ROMPATCH_DEACTIVATE)
+ * u32 rompatch_count
+ * u32 PatchID[rompatch_count]
+ *
+ * Response format: none
+ */
+
+
+#define BMI_LZ_STREAM_START 13
+/*
+ * Semantics: Begin an LZ-compressed stream of input
+ * which is to be uncompressed by the Target to an
+ * output buffer at address. The output buffer must
+ * be sufficiently large to hold the uncompressed
+ * output from the compressed input stream. This BMI
+ * command should be followed by a series of 1 or more
+ * BMI_LZ_DATA commands.
+ * u32 command (BMI_LZ_STREAM_START)
+ * u32 address
+ * Note: Not supported on all versions of ROM firmware.
+ */
+
+#define BMI_LZ_DATA 14
+/*
+ * Semantics: Host writes ATH6KL memory with LZ-compressed
+ * data which is uncompressed by the Target. This command
+ * must be preceded by a BMI_LZ_STREAM_START command. A series
+ * of BMI_LZ_DATA commands are considered part of a single
+ * input stream until another BMI_LZ_STREAM_START is issued.
+ * Request format:
+ * u32 command (BMI_LZ_DATA)
+ * u32 length (of compressed data),
+ * at most BMI_DATASZ_MAX
+ * u8 CompressedData[length]
+ * Response format: none
+ * Note: Not supported on all versions of ROM firmware.
+ */
+
+#define BMI_COMMUNICATION_TIMEOUT 1000 /* in msec */
+
+struct ath6kl;
+struct ath6kl_bmi_target_info {
+ __le32 byte_count; /* size of this structure */
+ __le32 version; /* target version id */
+ __le32 type; /* target type */
+} __packed;
+
+int ath6kl_bmi_init(struct ath6kl *ar);
+void ath6kl_bmi_cleanup(struct ath6kl *ar);
+int ath6kl_bmi_done(struct ath6kl *ar);
+int ath6kl_bmi_get_target_info(struct ath6kl *ar,
+ struct ath6kl_bmi_target_info *targ_info);
+int ath6kl_bmi_read(struct ath6kl *ar, u32 addr, u8 *buf, u32 len);
+int ath6kl_bmi_write(struct ath6kl *ar, u32 addr, u8 *buf, u32 len);
+int ath6kl_bmi_execute(struct ath6kl *ar,
+ u32 addr, u32 *param);
+int ath6kl_bmi_set_app_start(struct ath6kl *ar,
+ u32 addr);
+int ath6kl_bmi_reg_read(struct ath6kl *ar, u32 addr, u32 *param);
+int ath6kl_bmi_reg_write(struct ath6kl *ar, u32 addr, u32 param);
+int ath6kl_bmi_lz_data(struct ath6kl *ar,
+ u8 *buf, u32 len);
+int ath6kl_bmi_lz_stream_start(struct ath6kl *ar,
+ u32 addr);
+int ath6kl_bmi_fast_download(struct ath6kl *ar,
+ u32 addr, u8 *buf, u32 len);
+#endif
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
new file mode 100644
index 000000000000..4284a41ff775
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -0,0 +1,1538 @@
+/*
+ * Copyright (c) 2004-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+#include "cfg80211.h"
+#include "debug.h"
+
+#define RATETAB_ENT(_rate, _rateid, _flags) { \
+ .bitrate = (_rate), \
+ .flags = (_flags), \
+ .hw_value = (_rateid), \
+}
+
+#define CHAN2G(_channel, _freq, _flags) { \
+ .band = IEEE80211_BAND_2GHZ, \
+ .hw_value = (_channel), \
+ .center_freq = (_freq), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+#define CHAN5G(_channel, _flags) { \
+ .band = IEEE80211_BAND_5GHZ, \
+ .hw_value = (_channel), \
+ .center_freq = 5000 + (5 * (_channel)), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+static struct ieee80211_rate ath6kl_rates[] = {
+ RATETAB_ENT(10, 0x1, 0),
+ RATETAB_ENT(20, 0x2, 0),
+ RATETAB_ENT(55, 0x4, 0),
+ RATETAB_ENT(110, 0x8, 0),
+ RATETAB_ENT(60, 0x10, 0),
+ RATETAB_ENT(90, 0x20, 0),
+ RATETAB_ENT(120, 0x40, 0),
+ RATETAB_ENT(180, 0x80, 0),
+ RATETAB_ENT(240, 0x100, 0),
+ RATETAB_ENT(360, 0x200, 0),
+ RATETAB_ENT(480, 0x400, 0),
+ RATETAB_ENT(540, 0x800, 0),
+};
+
+#define ath6kl_a_rates (ath6kl_rates + 4)
+#define ath6kl_a_rates_size 8
+#define ath6kl_g_rates (ath6kl_rates + 0)
+#define ath6kl_g_rates_size 12
+
+static struct ieee80211_channel ath6kl_2ghz_channels[] = {
+ CHAN2G(1, 2412, 0),
+ CHAN2G(2, 2417, 0),
+ CHAN2G(3, 2422, 0),
+ CHAN2G(4, 2427, 0),
+ CHAN2G(5, 2432, 0),
+ CHAN2G(6, 2437, 0),
+ CHAN2G(7, 2442, 0),
+ CHAN2G(8, 2447, 0),
+ CHAN2G(9, 2452, 0),
+ CHAN2G(10, 2457, 0),
+ CHAN2G(11, 2462, 0),
+ CHAN2G(12, 2467, 0),
+ CHAN2G(13, 2472, 0),
+ CHAN2G(14, 2484, 0),
+};
+
+static struct ieee80211_channel ath6kl_5ghz_a_channels[] = {
+ CHAN5G(34, 0), CHAN5G(36, 0),
+ CHAN5G(38, 0), CHAN5G(40, 0),
+ CHAN5G(42, 0), CHAN5G(44, 0),
+ CHAN5G(46, 0), CHAN5G(48, 0),
+ CHAN5G(52, 0), CHAN5G(56, 0),
+ CHAN5G(60, 0), CHAN5G(64, 0),
+ CHAN5G(100, 0), CHAN5G(104, 0),
+ CHAN5G(108, 0), CHAN5G(112, 0),
+ CHAN5G(116, 0), CHAN5G(120, 0),
+ CHAN5G(124, 0), CHAN5G(128, 0),
+ CHAN5G(132, 0), CHAN5G(136, 0),
+ CHAN5G(140, 0), CHAN5G(149, 0),
+ CHAN5G(153, 0), CHAN5G(157, 0),
+ CHAN5G(161, 0), CHAN5G(165, 0),
+ CHAN5G(184, 0), CHAN5G(188, 0),
+ CHAN5G(192, 0), CHAN5G(196, 0),
+ CHAN5G(200, 0), CHAN5G(204, 0),
+ CHAN5G(208, 0), CHAN5G(212, 0),
+ CHAN5G(216, 0),
+};
+
+static struct ieee80211_supported_band ath6kl_band_2ghz = {
+ .n_channels = ARRAY_SIZE(ath6kl_2ghz_channels),
+ .channels = ath6kl_2ghz_channels,
+ .n_bitrates = ath6kl_g_rates_size,
+ .bitrates = ath6kl_g_rates,
+};
+
+static struct ieee80211_supported_band ath6kl_band_5ghz = {
+ .n_channels = ARRAY_SIZE(ath6kl_5ghz_a_channels),
+ .channels = ath6kl_5ghz_a_channels,
+ .n_bitrates = ath6kl_a_rates_size,
+ .bitrates = ath6kl_a_rates,
+};
+
+static int ath6kl_set_wpa_version(struct ath6kl *ar,
+ enum nl80211_wpa_versions wpa_version)
+{
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: %u\n", __func__, wpa_version);
+
+ if (!wpa_version) {
+ ar->auth_mode = NONE_AUTH;
+ } else if (wpa_version & NL80211_WPA_VERSION_2) {
+ ar->auth_mode = WPA2_AUTH;
+ } else if (wpa_version & NL80211_WPA_VERSION_1) {
+ ar->auth_mode = WPA_AUTH;
+ } else {
+ ath6kl_err("%s: %u not supported\n", __func__, wpa_version);
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int ath6kl_set_auth_type(struct ath6kl *ar,
+ enum nl80211_auth_type auth_type)
+{
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: 0x%x\n", __func__, auth_type);
+
+ switch (auth_type) {
+ case NL80211_AUTHTYPE_OPEN_SYSTEM:
+ ar->dot11_auth_mode = OPEN_AUTH;
+ break;
+ case NL80211_AUTHTYPE_SHARED_KEY:
+ ar->dot11_auth_mode = SHARED_AUTH;
+ break;
+ case NL80211_AUTHTYPE_NETWORK_EAP:
+ ar->dot11_auth_mode = LEAP_AUTH;
+ break;
+
+ case NL80211_AUTHTYPE_AUTOMATIC:
+ ar->dot11_auth_mode = OPEN_AUTH;
+ ar->auto_auth_stage = AUTH_OPEN_IN_PROGRESS;
+ break;
+
+ default:
+ ath6kl_err("%s: 0x%x not spported\n", __func__, auth_type);
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int ath6kl_set_cipher(struct ath6kl *ar, u32 cipher, bool ucast)
+{
+ u8 *ar_cipher = ucast ? &ar->prwise_crypto : &ar->grp_crypto;
+ u8 *ar_cipher_len = ucast ? &ar->prwise_crypto_len : &ar->grp_crpto_len;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: cipher 0x%x, ucast %u\n",
+ __func__, cipher, ucast);
+
+ switch (cipher) {
+ case 0:
+ /* our own hack to use value 0 as no crypto used */
+ *ar_cipher = NONE_CRYPT;
+ *ar_cipher_len = 0;
+ break;
+ case WLAN_CIPHER_SUITE_WEP40:
+ *ar_cipher = WEP_CRYPT;
+ *ar_cipher_len = 5;
+ break;
+ case WLAN_CIPHER_SUITE_WEP104:
+ *ar_cipher = WEP_CRYPT;
+ *ar_cipher_len = 13;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ *ar_cipher = TKIP_CRYPT;
+ *ar_cipher_len = 0;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ *ar_cipher = AES_CRYPT;
+ *ar_cipher_len = 0;
+ break;
+ default:
+ ath6kl_err("cipher 0x%x not supported\n", cipher);
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static void ath6kl_set_key_mgmt(struct ath6kl *ar, u32 key_mgmt)
+{
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: 0x%x\n", __func__, key_mgmt);
+
+ if (key_mgmt == WLAN_AKM_SUITE_PSK) {
+ if (ar->auth_mode == WPA_AUTH)
+ ar->auth_mode = WPA_PSK_AUTH;
+ else if (ar->auth_mode == WPA2_AUTH)
+ ar->auth_mode = WPA2_PSK_AUTH;
+ } else if (key_mgmt != WLAN_AKM_SUITE_8021X) {
+ ar->auth_mode = NONE_AUTH;
+ }
+}
+
+static bool ath6kl_cfg80211_ready(struct ath6kl *ar)
+{
+ if (!test_bit(WMI_READY, &ar->flag)) {
+ ath6kl_err("wmi is not ready\n");
+ return false;
+ }
+
+ if (ar->wlan_state == WLAN_DISABLED) {
+ ath6kl_err("wlan disabled\n");
+ return false;
+ }
+
+ return true;
+}
+
+static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_connect_params *sme)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+ int status;
+
+ ar->sme_state = SME_CONNECTING;
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ if (test_bit(DESTROY_IN_PROGRESS, &ar->flag)) {
+ ath6kl_err("destroy in progress\n");
+ return -EBUSY;
+ }
+
+ if (test_bit(SKIP_SCAN, &ar->flag) &&
+ ((sme->channel && sme->channel->center_freq == 0) ||
+ (sme->bssid && is_zero_ether_addr(sme->bssid)))) {
+ ath6kl_err("SkipScan: channel or bssid invalid\n");
+ return -EINVAL;
+ }
+
+ if (down_interruptible(&ar->sem)) {
+ ath6kl_err("busy, couldn't get access\n");
+ return -ERESTARTSYS;
+ }
+
+ if (test_bit(DESTROY_IN_PROGRESS, &ar->flag)) {
+ ath6kl_err("busy, destroy in progress\n");
+ up(&ar->sem);
+ return -EBUSY;
+ }
+
+ if (ar->tx_pending[ath6kl_wmi_get_control_ep(ar->wmi)]) {
+ /*
+ * sleep until the command queue drains
+ */
+ wait_event_interruptible_timeout(ar->event_wq,
+ ar->tx_pending[ath6kl_wmi_get_control_ep(ar->wmi)] == 0,
+ WMI_TIMEOUT);
+ if (signal_pending(current)) {
+ ath6kl_err("cmd queue drain timeout\n");
+ up(&ar->sem);
+ return -EINTR;
+ }
+ }
+
+ if (test_bit(CONNECTED, &ar->flag) &&
+ ar->ssid_len == sme->ssid_len &&
+ !memcmp(ar->ssid, sme->ssid, ar->ssid_len)) {
+ ar->reconnect_flag = true;
+ status = ath6kl_wmi_reconnect_cmd(ar->wmi, ar->req_bssid,
+ ar->ch_hint);
+
+ up(&ar->sem);
+ if (status) {
+ ath6kl_err("wmi_reconnect_cmd failed\n");
+ return -EIO;
+ }
+ return 0;
+ } else if (ar->ssid_len == sme->ssid_len &&
+ !memcmp(ar->ssid, sme->ssid, ar->ssid_len)) {
+ ath6kl_disconnect(ar);
+ }
+
+ memset(ar->ssid, 0, sizeof(ar->ssid));
+ ar->ssid_len = sme->ssid_len;
+ memcpy(ar->ssid, sme->ssid, sme->ssid_len);
+
+ if (sme->channel)
+ ar->ch_hint = sme->channel->center_freq;
+
+ memset(ar->req_bssid, 0, sizeof(ar->req_bssid));
+ if (sme->bssid && !is_broadcast_ether_addr(sme->bssid))
+ memcpy(ar->req_bssid, sme->bssid, sizeof(ar->req_bssid));
+
+ ath6kl_set_wpa_version(ar, sme->crypto.wpa_versions);
+
+ status = ath6kl_set_auth_type(ar, sme->auth_type);
+ if (status) {
+ up(&ar->sem);
+ return status;
+ }
+
+ if (sme->crypto.n_ciphers_pairwise)
+ ath6kl_set_cipher(ar, sme->crypto.ciphers_pairwise[0], true);
+ else
+ ath6kl_set_cipher(ar, 0, true);
+
+ ath6kl_set_cipher(ar, sme->crypto.cipher_group, false);
+
+ if (sme->crypto.n_akm_suites)
+ ath6kl_set_key_mgmt(ar, sme->crypto.akm_suites[0]);
+
+ if ((sme->key_len) &&
+ (ar->auth_mode == NONE_AUTH) && (ar->prwise_crypto == WEP_CRYPT)) {
+ struct ath6kl_key *key = NULL;
+
+ if (sme->key_idx < WMI_MIN_KEY_INDEX ||
+ sme->key_idx > WMI_MAX_KEY_INDEX) {
+ ath6kl_err("key index %d out of bounds\n",
+ sme->key_idx);
+ up(&ar->sem);
+ return -ENOENT;
+ }
+
+ key = &ar->keys[sme->key_idx];
+ key->key_len = sme->key_len;
+ memcpy(key->key, sme->key, key->key_len);
+ key->cipher = ar->prwise_crypto;
+ ar->def_txkey_index = sme->key_idx;
+
+ ath6kl_wmi_addkey_cmd(ar->wmi, sme->key_idx,
+ ar->prwise_crypto,
+ GROUP_USAGE | TX_USAGE,
+ key->key_len,
+ NULL,
+ key->key, KEY_OP_INIT_VAL, NULL,
+ NO_SYNC_WMIFLAG);
+ }
+
+ if (!ar->usr_bss_filter) {
+ if (ath6kl_wmi_bssfilter_cmd(ar->wmi, ALL_BSS_FILTER, 0) != 0) {
+ ath6kl_err("couldn't set bss filtering\n");
+ up(&ar->sem);
+ return -EIO;
+ }
+ }
+
+ ar->nw_type = ar->next_mode;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: connect called with authmode %d dot11 auth %d"
+ " PW crypto %d PW crypto len %d GRP crypto %d"
+ " GRP crypto len %d channel hint %u\n",
+ __func__,
+ ar->auth_mode, ar->dot11_auth_mode, ar->prwise_crypto,
+ ar->prwise_crypto_len, ar->grp_crypto,
+ ar->grp_crpto_len, ar->ch_hint);
+
+ ar->reconnect_flag = 0;
+ status = ath6kl_wmi_connect_cmd(ar->wmi, ar->nw_type,
+ ar->dot11_auth_mode, ar->auth_mode,
+ ar->prwise_crypto,
+ ar->prwise_crypto_len,
+ ar->grp_crypto, ar->grp_crpto_len,
+ ar->ssid_len, ar->ssid,
+ ar->req_bssid, ar->ch_hint,
+ ar->connect_ctrl_flags);
+
+ up(&ar->sem);
+
+ if (status == -EINVAL) {
+ memset(ar->ssid, 0, sizeof(ar->ssid));
+ ar->ssid_len = 0;
+ ath6kl_err("invalid request\n");
+ return -ENOENT;
+ } else if (status) {
+ ath6kl_err("ath6kl_wmi_connect_cmd failed\n");
+ return -EIO;
+ }
+
+ if ((!(ar->connect_ctrl_flags & CONNECT_DO_WPA_OFFLOAD)) &&
+ ((ar->auth_mode == WPA_PSK_AUTH)
+ || (ar->auth_mode == WPA2_PSK_AUTH))) {
+ mod_timer(&ar->disconnect_timer,
+ jiffies + msecs_to_jiffies(DISCON_TIMER_INTVAL));
+ }
+
+ ar->connect_ctrl_flags &= ~CONNECT_DO_WPA_OFFLOAD;
+ set_bit(CONNECT_PEND, &ar->flag);
+
+ return 0;
+}
+
+void ath6kl_cfg80211_connect_event(struct ath6kl *ar, u16 channel,
+ u8 *bssid, u16 listen_intvl,
+ u16 beacon_intvl,
+ enum network_type nw_type,
+ u8 beacon_ie_len, u8 assoc_req_len,
+ u8 assoc_resp_len, u8 *assoc_info)
+{
+ u16 size = 0;
+ u16 capability = 0;
+ struct cfg80211_bss *bss = NULL;
+ struct ieee80211_mgmt *mgmt = NULL;
+ struct ieee80211_channel *ibss_ch = NULL;
+ s32 signal = 50 * 100;
+ u8 ie_buf_len = 0;
+ unsigned char ie_buf[256];
+ unsigned char *ptr_ie_buf = ie_buf;
+ unsigned char *ieeemgmtbuf = NULL;
+ u8 source_mac[ETH_ALEN];
+ u16 capa_mask;
+ u16 capa_val;
+
+ /* capinfo + listen interval */
+ u8 assoc_req_ie_offset = sizeof(u16) + sizeof(u16);
+
+ /* capinfo + status code + associd */
+ u8 assoc_resp_ie_offset = sizeof(u16) + sizeof(u16) + sizeof(u16);
+
+ u8 *assoc_req_ie = assoc_info + beacon_ie_len + assoc_req_ie_offset;
+ u8 *assoc_resp_ie = assoc_info + beacon_ie_len + assoc_req_len +
+ assoc_resp_ie_offset;
+
+ assoc_req_len -= assoc_req_ie_offset;
+ assoc_resp_len -= assoc_resp_ie_offset;
+
+ ar->auto_auth_stage = AUTH_IDLE;
+
+ if (nw_type & ADHOC_NETWORK) {
+ if (ar->wdev->iftype != NL80211_IFTYPE_ADHOC) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: ath6k not in ibss mode\n", __func__);
+ return;
+ }
+ }
+
+ if (nw_type & INFRA_NETWORK) {
+ if (ar->wdev->iftype != NL80211_IFTYPE_STATION) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: ath6k not in station mode\n", __func__);
+ return;
+ }
+ }
+
+ if (nw_type & ADHOC_NETWORK) {
+ capa_mask = WLAN_CAPABILITY_IBSS;
+ capa_val = WLAN_CAPABILITY_IBSS;
+ } else {
+ capa_mask = WLAN_CAPABILITY_ESS;
+ capa_val = WLAN_CAPABILITY_ESS;
+ }
+
+ /* Before informing the join/connect event, make sure that
+ * bss entry is present in scan list, if it not present
+ * construct and insert into scan list, otherwise that
+ * event will be dropped on the way by cfg80211, due to
+ * this keys will not be plumbed in case of WEP and
+ * application will not be aware of join/connect status. */
+ bss = cfg80211_get_bss(ar->wdev->wiphy, NULL, bssid,
+ ar->wdev->ssid, ar->wdev->ssid_len,
+ capa_mask, capa_val);
+
+ /*
+ * Earlier we were updating the cfg about bss by making a beacon frame
+ * only if the entry for bss is not there. This can have some issue if
+ * ROAM event is generated and a heavy traffic is ongoing. The ROAM
+ * event is handled through a work queue and by the time it really gets
+ * handled, BSS would have been aged out. So it is better to update the
+ * cfg about BSS irrespective of its entry being present right now or
+ * not.
+ */
+
+ if (nw_type & ADHOC_NETWORK) {
+ /* construct 802.11 mgmt beacon */
+ if (ptr_ie_buf) {
+ *ptr_ie_buf++ = WLAN_EID_SSID;
+ *ptr_ie_buf++ = ar->ssid_len;
+ memcpy(ptr_ie_buf, ar->ssid, ar->ssid_len);
+ ptr_ie_buf += ar->ssid_len;
+
+ *ptr_ie_buf++ = WLAN_EID_IBSS_PARAMS;
+ *ptr_ie_buf++ = 2; /* length */
+ *ptr_ie_buf++ = 0; /* ATIM window */
+ *ptr_ie_buf++ = 0; /* ATIM window */
+
+ /* TODO: update ibss params and include supported rates,
+ * DS param set, extened support rates, wmm. */
+
+ ie_buf_len = ptr_ie_buf - ie_buf;
+ }
+
+ capability |= WLAN_CAPABILITY_IBSS;
+
+ if (ar->prwise_crypto == WEP_CRYPT)
+ capability |= WLAN_CAPABILITY_PRIVACY;
+
+ memcpy(source_mac, ar->net_dev->dev_addr, ETH_ALEN);
+ ptr_ie_buf = ie_buf;
+ } else {
+ capability = *(u16 *) (&assoc_info[beacon_ie_len]);
+ memcpy(source_mac, bssid, ETH_ALEN);
+ ptr_ie_buf = assoc_req_ie;
+ ie_buf_len = assoc_req_len;
+ }
+
+ size = offsetof(struct ieee80211_mgmt, u)
+ + sizeof(mgmt->u.beacon)
+ + ie_buf_len;
+
+ ieeemgmtbuf = kzalloc(size, GFP_ATOMIC);
+ if (!ieeemgmtbuf) {
+ ath6kl_err("ieee mgmt buf alloc error\n");
+ cfg80211_put_bss(bss);
+ return;
+ }
+
+ mgmt = (struct ieee80211_mgmt *)ieeemgmtbuf;
+ mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+ IEEE80211_STYPE_BEACON);
+ memset(mgmt->da, 0xff, ETH_ALEN); /* broadcast addr */
+ memcpy(mgmt->sa, source_mac, ETH_ALEN);
+ memcpy(mgmt->bssid, bssid, ETH_ALEN);
+ mgmt->u.beacon.beacon_int = cpu_to_le16(beacon_intvl);
+ mgmt->u.beacon.capab_info = cpu_to_le16(capability);
+ memcpy(mgmt->u.beacon.variable, ptr_ie_buf, ie_buf_len);
+
+ ibss_ch = ieee80211_get_channel(ar->wdev->wiphy, (int)channel);
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: inform bss with bssid %pM channel %d beacon_intvl %d capability 0x%x\n",
+ __func__, mgmt->bssid, ibss_ch->hw_value,
+ beacon_intvl, capability);
+
+ bss = cfg80211_inform_bss_frame(ar->wdev->wiphy,
+ ibss_ch, mgmt,
+ size, signal, GFP_KERNEL);
+ kfree(ieeemgmtbuf);
+ cfg80211_put_bss(bss);
+
+ if (nw_type & ADHOC_NETWORK) {
+ cfg80211_ibss_joined(ar->net_dev, bssid, GFP_KERNEL);
+ return;
+ }
+
+ if (!test_bit(CONNECTED, &ar->flag)) {
+ /* inform connect result to cfg80211 */
+ ar->sme_state = SME_DISCONNECTED;
+ cfg80211_connect_result(ar->net_dev, bssid,
+ assoc_req_ie, assoc_req_len,
+ assoc_resp_ie, assoc_resp_len,
+ WLAN_STATUS_SUCCESS, GFP_KERNEL);
+ } else {
+ /* inform roam event to cfg80211 */
+ cfg80211_roamed(ar->net_dev, ibss_ch, bssid,
+ assoc_req_ie, assoc_req_len,
+ assoc_resp_ie, assoc_resp_len, GFP_KERNEL);
+ }
+}
+
+static int ath6kl_cfg80211_disconnect(struct wiphy *wiphy,
+ struct net_device *dev, u16 reason_code)
+{
+ struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(dev);
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: reason=%u\n", __func__,
+ reason_code);
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ if (test_bit(DESTROY_IN_PROGRESS, &ar->flag)) {
+ ath6kl_err("busy, destroy in progress\n");
+ return -EBUSY;
+ }
+
+ if (down_interruptible(&ar->sem)) {
+ ath6kl_err("busy, couldn't get access\n");
+ return -ERESTARTSYS;
+ }
+
+ ar->reconnect_flag = 0;
+ ath6kl_disconnect(ar);
+ memset(ar->ssid, 0, sizeof(ar->ssid));
+ ar->ssid_len = 0;
+
+ if (!test_bit(SKIP_SCAN, &ar->flag))
+ memset(ar->req_bssid, 0, sizeof(ar->req_bssid));
+
+ up(&ar->sem);
+
+ return 0;
+}
+
+void ath6kl_cfg80211_disconnect_event(struct ath6kl *ar, u8 reason,
+ u8 *bssid, u8 assoc_resp_len,
+ u8 *assoc_info, u16 proto_reason)
+{
+ struct ath6kl_key *key = NULL;
+ u16 status;
+
+ if (ar->scan_req) {
+ cfg80211_scan_done(ar->scan_req, true);
+ ar->scan_req = NULL;
+ }
+
+ if (ar->nw_type & ADHOC_NETWORK) {
+ if (ar->wdev->iftype != NL80211_IFTYPE_ADHOC) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: ath6k not in ibss mode\n", __func__);
+ return;
+ }
+ memset(bssid, 0, ETH_ALEN);
+ cfg80211_ibss_joined(ar->net_dev, bssid, GFP_KERNEL);
+ return;
+ }
+
+ if (ar->nw_type & INFRA_NETWORK) {
+ if (ar->wdev->iftype != NL80211_IFTYPE_STATION) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: ath6k not in station mode\n", __func__);
+ return;
+ }
+ }
+
+ if (!test_bit(CONNECT_PEND, &ar->flag)) {
+ if (reason != DISCONNECT_CMD)
+ ath6kl_wmi_disconnect_cmd(ar->wmi);
+
+ return;
+ }
+
+ if (reason == NO_NETWORK_AVAIL) {
+ /* connect cmd failed */
+ ath6kl_wmi_disconnect_cmd(ar->wmi);
+ return;
+ }
+
+ if (reason != DISCONNECT_CMD)
+ return;
+
+ if (!ar->auto_auth_stage) {
+ clear_bit(CONNECT_PEND, &ar->flag);
+
+ if (ar->sme_state == SME_CONNECTING) {
+ cfg80211_connect_result(ar->net_dev,
+ bssid, NULL, 0,
+ NULL, 0,
+ WLAN_STATUS_UNSPECIFIED_FAILURE,
+ GFP_KERNEL);
+ } else {
+ cfg80211_disconnected(ar->net_dev, reason,
+ NULL, 0, GFP_KERNEL);
+ }
+
+ ar->sme_state = SME_DISCONNECTED;
+ return;
+ }
+
+ if (ar->dot11_auth_mode != OPEN_AUTH)
+ return;
+
+ /*
+ * If the current auth algorithm is open, try shared and
+ * make autoAuthStage idle. We do not make it leap for now
+ * being.
+ */
+ key = &ar->keys[ar->def_txkey_index];
+ if (down_interruptible(&ar->sem)) {
+ ath6kl_err("busy, couldn't get access\n");
+ return;
+ }
+
+ ar->dot11_auth_mode = SHARED_AUTH;
+ ar->auto_auth_stage = AUTH_IDLE;
+
+ ath6kl_wmi_addkey_cmd(ar->wmi,
+ ar->def_txkey_index,
+ ar->prwise_crypto,
+ GROUP_USAGE | TX_USAGE,
+ key->key_len, NULL,
+ key->key,
+ KEY_OP_INIT_VAL, NULL,
+ NO_SYNC_WMIFLAG);
+
+ status = ath6kl_wmi_connect_cmd(ar->wmi,
+ ar->nw_type,
+ ar->dot11_auth_mode,
+ ar->auth_mode,
+ ar->prwise_crypto,
+ ar->prwise_crypto_len,
+ ar->grp_crypto,
+ ar->grp_crpto_len,
+ ar->ssid_len,
+ ar->ssid,
+ ar->req_bssid,
+ ar->ch_hint,
+ ar->connect_ctrl_flags);
+ up(&ar->sem);
+}
+
+static inline bool is_ch_11a(u16 ch)
+{
+ return (!((ch >= 2412) && (ch <= 2484)));
+}
+
+static void ath6kl_cfg80211_scan_node(void *arg, struct bss *ni)
+{
+ struct wiphy *wiphy = (struct wiphy *)arg;
+ u16 size;
+ unsigned char *ieeemgmtbuf = NULL;
+ struct ieee80211_mgmt *mgmt;
+ struct ieee80211_channel *channel;
+ struct ieee80211_supported_band *band;
+ struct ath6kl_common_ie *cie;
+ s32 signal;
+ int freq;
+
+ cie = &ni->ni_cie;
+
+ if (is_ch_11a(cie->ie_chan))
+ band = wiphy->bands[IEEE80211_BAND_5GHZ]; /* 11a */
+ else if ((cie->ie_erp) || (cie->ie_xrates))
+ band = wiphy->bands[IEEE80211_BAND_2GHZ]; /* 11g */
+ else
+ band = wiphy->bands[IEEE80211_BAND_2GHZ]; /* 11b */
+
+ size = ni->ni_framelen + offsetof(struct ieee80211_mgmt, u);
+ ieeemgmtbuf = kmalloc(size, GFP_ATOMIC);
+ if (!ieeemgmtbuf) {
+ ath6kl_err("ieee mgmt buf alloc error\n");
+ return;
+ }
+
+ /*
+ * TODO: Update target to include 802.11 mac header while sending
+ * bss info. Target removes 802.11 mac header while sending the bss
+ * info to host, cfg80211 needs it, for time being just filling the
+ * da, sa and bssid fields alone.
+ */
+ mgmt = (struct ieee80211_mgmt *)ieeemgmtbuf;
+ memset(mgmt->da, 0xff, ETH_ALEN); /*broadcast addr */
+ memcpy(mgmt->sa, ni->ni_macaddr, ETH_ALEN);
+ memcpy(mgmt->bssid, ni->ni_macaddr, ETH_ALEN);
+ memcpy(ieeemgmtbuf + offsetof(struct ieee80211_mgmt, u),
+ ni->ni_buf, ni->ni_framelen);
+
+ freq = cie->ie_chan;
+ channel = ieee80211_get_channel(wiphy, freq);
+ signal = ni->ni_snr * 100;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: bssid %pM ch %d freq %d size %d\n", __func__,
+ mgmt->bssid, channel->hw_value, freq, size);
+ cfg80211_inform_bss_frame(wiphy, channel, mgmt,
+ size, signal, GFP_KERNEL);
+
+ kfree(ieeemgmtbuf);
+}
+
+static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_scan_request *request)
+{
+ struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
+ int ret = 0;
+ u32 force_fg_scan = 0;
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ if (!ar->usr_bss_filter) {
+ if (ath6kl_wmi_bssfilter_cmd(ar->wmi,
+ (test_bit(CONNECTED, &ar->flag) ?
+ ALL_BUT_BSS_FILTER :
+ ALL_BSS_FILTER), 0) != 0) {
+ ath6kl_err("couldn't set bss filtering\n");
+ return -EIO;
+ }
+ }
+
+ if (request->n_ssids && request->ssids[0].ssid_len) {
+ u8 i;
+
+ if (request->n_ssids > (MAX_PROBED_SSID_INDEX - 1))
+ request->n_ssids = MAX_PROBED_SSID_INDEX - 1;
+
+ for (i = 0; i < request->n_ssids; i++)
+ ath6kl_wmi_probedssid_cmd(ar->wmi, i + 1,
+ SPECIFIC_SSID_FLAG,
+ request->ssids[i].ssid_len,
+ request->ssids[i].ssid);
+ }
+
+ if (test_bit(CONNECTED, &ar->flag))
+ force_fg_scan = 1;
+
+ if (ath6kl_wmi_startscan_cmd(ar->wmi, WMI_LONG_SCAN, force_fg_scan,
+ false, 0, 0, 0, NULL) != 0) {
+ ath6kl_err("wmi_startscan_cmd failed\n");
+ ret = -EIO;
+ }
+
+ ar->scan_req = request;
+
+ return ret;
+}
+
+void ath6kl_cfg80211_scan_complete_event(struct ath6kl *ar, int status)
+{
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: status %d\n", __func__, status);
+
+ if (ar->scan_req) {
+ /* Translate data to cfg80211 mgmt format */
+ ath6kl_wmi_iterate_nodes(ar->wmi, ath6kl_cfg80211_scan_node,
+ ar->wdev->wiphy);
+
+ cfg80211_scan_done(ar->scan_req, ((status & -ECANCELED)
+ || (status & -EBUSY)) ? true :
+ false);
+
+ if (ar->scan_req->n_ssids && ar->scan_req->ssids[0].ssid_len) {
+ u8 i;
+
+ for (i = 0; i < ar->scan_req->n_ssids; i++) {
+ ath6kl_wmi_probedssid_cmd(ar->wmi, i + 1,
+ DISABLE_SSID_FLAG,
+ 0, NULL);
+ }
+ }
+ ar->scan_req = NULL;
+ }
+}
+
+static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
+ u8 key_index, bool pairwise,
+ const u8 *mac_addr,
+ struct key_params *params)
+{
+ struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
+ struct ath6kl_key *key = NULL;
+ u8 key_usage;
+ u8 key_type;
+ int status = 0;
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: key index %d out of bounds\n", __func__,
+ key_index);
+ return -ENOENT;
+ }
+
+ key = &ar->keys[key_index];
+ memset(key, 0, sizeof(struct ath6kl_key));
+
+ if (pairwise)
+ key_usage = PAIRWISE_USAGE;
+ else
+ key_usage = GROUP_USAGE;
+
+ if (params) {
+ if (params->key_len > WLAN_MAX_KEY_LEN ||
+ params->seq_len > sizeof(key->seq))
+ return -EINVAL;
+
+ key->key_len = params->key_len;
+ memcpy(key->key, params->key, key->key_len);
+ key->seq_len = params->seq_len;
+ memcpy(key->seq, params->seq, key->seq_len);
+ key->cipher = params->cipher;
+ }
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ key_type = WEP_CRYPT;
+ break;
+
+ case WLAN_CIPHER_SUITE_TKIP:
+ key_type = TKIP_CRYPT;
+ break;
+
+ case WLAN_CIPHER_SUITE_CCMP:
+ key_type = AES_CRYPT;
+ break;
+
+ default:
+ return -ENOTSUPP;
+ }
+
+ if (((ar->auth_mode == WPA_PSK_AUTH)
+ || (ar->auth_mode == WPA2_PSK_AUTH))
+ && (key_usage & GROUP_USAGE))
+ del_timer(&ar->disconnect_timer);
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: index %d, key_len %d, key_type 0x%x, key_usage 0x%x, seq_len %d\n",
+ __func__, key_index, key->key_len, key_type,
+ key_usage, key->seq_len);
+
+ ar->def_txkey_index = key_index;
+ status = ath6kl_wmi_addkey_cmd(ar->wmi, ar->def_txkey_index,
+ key_type, key_usage, key->key_len,
+ key->seq, key->key, KEY_OP_INIT_VAL,
+ (u8 *) mac_addr, SYNC_BOTH_WMIFLAG);
+
+ if (status)
+ return -EIO;
+
+ return 0;
+}
+
+static int ath6kl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
+ u8 key_index, bool pairwise,
+ const u8 *mac_addr)
+{
+ struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index);
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: key index %d out of bounds\n", __func__,
+ key_index);
+ return -ENOENT;
+ }
+
+ if (!ar->keys[key_index].key_len) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: index %d is empty\n", __func__, key_index);
+ return 0;
+ }
+
+ ar->keys[key_index].key_len = 0;
+
+ return ath6kl_wmi_deletekey_cmd(ar->wmi, key_index);
+}
+
+static int ath6kl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
+ u8 key_index, bool pairwise,
+ const u8 *mac_addr, void *cookie,
+ void (*callback) (void *cookie,
+ struct key_params *))
+{
+ struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
+ struct ath6kl_key *key = NULL;
+ struct key_params params;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index);
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: key index %d out of bounds\n", __func__,
+ key_index);
+ return -ENOENT;
+ }
+
+ key = &ar->keys[key_index];
+ memset(&params, 0, sizeof(params));
+ params.cipher = key->cipher;
+ params.key_len = key->key_len;
+ params.seq_len = key->seq_len;
+ params.seq = key->seq;
+ params.key = key->key;
+
+ callback(cookie, &params);
+
+ return key->key_len ? 0 : -ENOENT;
+}
+
+static int ath6kl_cfg80211_set_default_key(struct wiphy *wiphy,
+ struct net_device *ndev,
+ u8 key_index, bool unicast,
+ bool multicast)
+{
+ struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
+ struct ath6kl_key *key = NULL;
+ int status = 0;
+ u8 key_usage;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index);
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: key index %d out of bounds\n",
+ __func__, key_index);
+ return -ENOENT;
+ }
+
+ if (!ar->keys[key_index].key_len) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: invalid key index %d\n",
+ __func__, key_index);
+ return -EINVAL;
+ }
+
+ ar->def_txkey_index = key_index;
+ key = &ar->keys[ar->def_txkey_index];
+ key_usage = GROUP_USAGE;
+ if (ar->prwise_crypto == WEP_CRYPT)
+ key_usage |= TX_USAGE;
+
+ status = ath6kl_wmi_addkey_cmd(ar->wmi, ar->def_txkey_index,
+ ar->prwise_crypto, key_usage,
+ key->key_len, key->seq, key->key,
+ KEY_OP_INIT_VAL, NULL,
+ SYNC_BOTH_WMIFLAG);
+ if (status)
+ return -EIO;
+
+ return 0;
+}
+
+void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl *ar, u8 keyid,
+ bool ismcast)
+{
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: keyid %d, ismcast %d\n", __func__, keyid, ismcast);
+
+ cfg80211_michael_mic_failure(ar->net_dev, ar->bssid,
+ (ismcast ? NL80211_KEYTYPE_GROUP :
+ NL80211_KEYTYPE_PAIRWISE), keyid, NULL,
+ GFP_KERNEL);
+}
+
+static int ath6kl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
+{
+ struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy);
+ int ret;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: changed 0x%x\n", __func__,
+ changed);
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ if (changed & WIPHY_PARAM_RTS_THRESHOLD) {
+ ret = ath6kl_wmi_set_rts_cmd(ar->wmi, wiphy->rts_threshold);
+ if (ret != 0) {
+ ath6kl_err("ath6kl_wmi_set_rts_cmd failed\n");
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * The type nl80211_tx_power_setting replaces the following
+ * data type from 2.6.36 onwards
+*/
+static int ath6kl_cfg80211_set_txpower(struct wiphy *wiphy,
+ enum nl80211_tx_power_setting type,
+ int dbm)
+{
+ struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy);
+ u8 ath6kl_dbm;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type 0x%x, dbm %d\n", __func__,
+ type, dbm);
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ switch (type) {
+ case NL80211_TX_POWER_AUTOMATIC:
+ return 0;
+ case NL80211_TX_POWER_LIMITED:
+ ar->tx_pwr = ath6kl_dbm = dbm;
+ break;
+ default:
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type 0x%x not supported\n",
+ __func__, type);
+ return -EOPNOTSUPP;
+ }
+
+ ath6kl_wmi_set_tx_pwr_cmd(ar->wmi, ath6kl_dbm);
+
+ return 0;
+}
+
+static int ath6kl_cfg80211_get_txpower(struct wiphy *wiphy, int *dbm)
+{
+ struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy);
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ if (test_bit(CONNECTED, &ar->flag)) {
+ ar->tx_pwr = 0;
+
+ if (ath6kl_wmi_get_tx_pwr_cmd(ar->wmi) != 0) {
+ ath6kl_err("ath6kl_wmi_get_tx_pwr_cmd failed\n");
+ return -EIO;
+ }
+
+ wait_event_interruptible_timeout(ar->event_wq, ar->tx_pwr != 0,
+ 5 * HZ);
+
+ if (signal_pending(current)) {
+ ath6kl_err("target did not respond\n");
+ return -EINTR;
+ }
+ }
+
+ *dbm = ar->tx_pwr;
+ return 0;
+}
+
+static int ath6kl_cfg80211_set_power_mgmt(struct wiphy *wiphy,
+ struct net_device *dev,
+ bool pmgmt, int timeout)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+ struct wmi_power_mode_cmd mode;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: pmgmt %d, timeout %d\n",
+ __func__, pmgmt, timeout);
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ if (pmgmt) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: max perf\n", __func__);
+ mode.pwr_mode = REC_POWER;
+ } else {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: rec power\n", __func__);
+ mode.pwr_mode = MAX_PERF_POWER;
+ }
+
+ if (ath6kl_wmi_powermode_cmd(ar->wmi, mode.pwr_mode) != 0) {
+ ath6kl_err("wmi_powermode_cmd failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int ath6kl_cfg80211_change_iface(struct wiphy *wiphy,
+ struct net_device *ndev,
+ enum nl80211_iftype type, u32 *flags,
+ struct vif_params *params)
+{
+ struct ath6kl *ar = ath6kl_priv(ndev);
+ struct wireless_dev *wdev = ar->wdev;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type %u\n", __func__, type);
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ switch (type) {
+ case NL80211_IFTYPE_STATION:
+ ar->next_mode = INFRA_NETWORK;
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ ar->next_mode = ADHOC_NETWORK;
+ break;
+ default:
+ ath6kl_err("invalid interface type %u\n", type);
+ return -EOPNOTSUPP;
+ }
+
+ wdev->iftype = type;
+
+ return 0;
+}
+
+static int ath6kl_cfg80211_join_ibss(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_ibss_params *ibss_param)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+ int status;
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ ar->ssid_len = ibss_param->ssid_len;
+ memcpy(ar->ssid, ibss_param->ssid, ar->ssid_len);
+
+ if (ibss_param->channel)
+ ar->ch_hint = ibss_param->channel->center_freq;
+
+ if (ibss_param->channel_fixed) {
+ /*
+ * TODO: channel_fixed: The channel should be fixed, do not
+ * search for IBSSs to join on other channels. Target
+ * firmware does not support this feature, needs to be
+ * updated.
+ */
+ return -EOPNOTSUPP;
+ }
+
+ memset(ar->req_bssid, 0, sizeof(ar->req_bssid));
+ if (ibss_param->bssid && !is_broadcast_ether_addr(ibss_param->bssid))
+ memcpy(ar->req_bssid, ibss_param->bssid, sizeof(ar->req_bssid));
+
+ ath6kl_set_wpa_version(ar, 0);
+
+ status = ath6kl_set_auth_type(ar, NL80211_AUTHTYPE_OPEN_SYSTEM);
+ if (status)
+ return status;
+
+ if (ibss_param->privacy) {
+ ath6kl_set_cipher(ar, WLAN_CIPHER_SUITE_WEP40, true);
+ ath6kl_set_cipher(ar, WLAN_CIPHER_SUITE_WEP40, false);
+ } else {
+ ath6kl_set_cipher(ar, 0, true);
+ ath6kl_set_cipher(ar, 0, false);
+ }
+
+ ar->nw_type = ar->next_mode;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: connect called with authmode %d dot11 auth %d"
+ " PW crypto %d PW crypto len %d GRP crypto %d"
+ " GRP crypto len %d channel hint %u\n",
+ __func__,
+ ar->auth_mode, ar->dot11_auth_mode, ar->prwise_crypto,
+ ar->prwise_crypto_len, ar->grp_crypto,
+ ar->grp_crpto_len, ar->ch_hint);
+
+ status = ath6kl_wmi_connect_cmd(ar->wmi, ar->nw_type,
+ ar->dot11_auth_mode, ar->auth_mode,
+ ar->prwise_crypto,
+ ar->prwise_crypto_len,
+ ar->grp_crypto, ar->grp_crpto_len,
+ ar->ssid_len, ar->ssid,
+ ar->req_bssid, ar->ch_hint,
+ ar->connect_ctrl_flags);
+ set_bit(CONNECT_PEND, &ar->flag);
+
+ return 0;
+}
+
+static int ath6kl_cfg80211_leave_ibss(struct wiphy *wiphy,
+ struct net_device *dev)
+{
+ struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(dev);
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ ath6kl_disconnect(ar);
+ memset(ar->ssid, 0, sizeof(ar->ssid));
+ ar->ssid_len = 0;
+
+ return 0;
+}
+
+static const u32 cipher_suites[] = {
+ WLAN_CIPHER_SUITE_WEP40,
+ WLAN_CIPHER_SUITE_WEP104,
+ WLAN_CIPHER_SUITE_TKIP,
+ WLAN_CIPHER_SUITE_CCMP,
+};
+
+static bool is_rate_legacy(s32 rate)
+{
+ static const s32 legacy[] = { 1000, 2000, 5500, 11000,
+ 6000, 9000, 12000, 18000, 24000,
+ 36000, 48000, 54000
+ };
+ u8 i;
+
+ for (i = 0; i < ARRAY_SIZE(legacy); i++)
+ if (rate == legacy[i])
+ return true;
+
+ return false;
+}
+
+static bool is_rate_ht20(s32 rate, u8 *mcs, bool *sgi)
+{
+ static const s32 ht20[] = { 6500, 13000, 19500, 26000, 39000,
+ 52000, 58500, 65000, 72200
+ };
+ u8 i;
+
+ for (i = 0; i < ARRAY_SIZE(ht20); i++) {
+ if (rate == ht20[i]) {
+ if (i == ARRAY_SIZE(ht20) - 1)
+ /* last rate uses sgi */
+ *sgi = true;
+ else
+ *sgi = false;
+
+ *mcs = i;
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool is_rate_ht40(s32 rate, u8 *mcs, bool *sgi)
+{
+ static const s32 ht40[] = { 13500, 27000, 40500, 54000,
+ 81000, 108000, 121500, 135000,
+ 150000
+ };
+ u8 i;
+
+ for (i = 0; i < ARRAY_SIZE(ht40); i++) {
+ if (rate == ht40[i]) {
+ if (i == ARRAY_SIZE(ht40) - 1)
+ /* last rate uses sgi */
+ *sgi = true;
+ else
+ *sgi = false;
+
+ *mcs = i;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev,
+ u8 *mac, struct station_info *sinfo)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+ long left;
+ bool sgi;
+ s32 rate;
+ int ret;
+ u8 mcs;
+
+ if (memcmp(mac, ar->bssid, ETH_ALEN) != 0)
+ return -ENOENT;
+
+ if (down_interruptible(&ar->sem))
+ return -EBUSY;
+
+ set_bit(STATS_UPDATE_PEND, &ar->flag);
+
+ ret = ath6kl_wmi_get_stats_cmd(ar->wmi);
+
+ if (ret != 0) {
+ up(&ar->sem);
+ return -EIO;
+ }
+
+ left = wait_event_interruptible_timeout(ar->event_wq,
+ !test_bit(STATS_UPDATE_PEND,
+ &ar->flag),
+ WMI_TIMEOUT);
+
+ up(&ar->sem);
+
+ if (left == 0)
+ return -ETIMEDOUT;
+ else if (left < 0)
+ return left;
+
+ if (ar->target_stats.rx_byte) {
+ sinfo->rx_bytes = ar->target_stats.rx_byte;
+ sinfo->filled |= STATION_INFO_RX_BYTES;
+ sinfo->rx_packets = ar->target_stats.rx_pkt;
+ sinfo->filled |= STATION_INFO_RX_PACKETS;
+ }
+
+ if (ar->target_stats.tx_byte) {
+ sinfo->tx_bytes = ar->target_stats.tx_byte;
+ sinfo->filled |= STATION_INFO_TX_BYTES;
+ sinfo->tx_packets = ar->target_stats.tx_pkt;
+ sinfo->filled |= STATION_INFO_TX_PACKETS;
+ }
+
+ sinfo->signal = ar->target_stats.cs_rssi;
+ sinfo->filled |= STATION_INFO_SIGNAL;
+
+ rate = ar->target_stats.tx_ucast_rate;
+
+ if (is_rate_legacy(rate)) {
+ sinfo->txrate.legacy = rate / 100;
+ } else if (is_rate_ht20(rate, &mcs, &sgi)) {
+ if (sgi) {
+ sinfo->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ sinfo->txrate.mcs = mcs - 1;
+ } else {
+ sinfo->txrate.mcs = mcs;
+ }
+
+ sinfo->txrate.flags |= RATE_INFO_FLAGS_MCS;
+ } else if (is_rate_ht40(rate, &mcs, &sgi)) {
+ if (sgi) {
+ sinfo->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ sinfo->txrate.mcs = mcs - 1;
+ } else {
+ sinfo->txrate.mcs = mcs;
+ }
+
+ sinfo->txrate.flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH;
+ sinfo->txrate.flags |= RATE_INFO_FLAGS_MCS;
+ } else {
+ ath6kl_warn("invalid rate: %d\n", rate);
+ return 0;
+ }
+
+ sinfo->filled |= STATION_INFO_TX_BITRATE;
+
+ return 0;
+}
+
+static int ath6kl_set_pmksa(struct wiphy *wiphy, struct net_device *netdev,
+ struct cfg80211_pmksa *pmksa)
+{
+ struct ath6kl *ar = ath6kl_priv(netdev);
+ return ath6kl_wmi_setpmkid_cmd(ar->wmi, pmksa->bssid,
+ pmksa->pmkid, true);
+}
+
+static int ath6kl_del_pmksa(struct wiphy *wiphy, struct net_device *netdev,
+ struct cfg80211_pmksa *pmksa)
+{
+ struct ath6kl *ar = ath6kl_priv(netdev);
+ return ath6kl_wmi_setpmkid_cmd(ar->wmi, pmksa->bssid,
+ pmksa->pmkid, false);
+}
+
+static int ath6kl_flush_pmksa(struct wiphy *wiphy, struct net_device *netdev)
+{
+ struct ath6kl *ar = ath6kl_priv(netdev);
+ if (test_bit(CONNECTED, &ar->flag))
+ return ath6kl_wmi_setpmkid_cmd(ar->wmi, ar->bssid, NULL, false);
+ return 0;
+}
+
+static struct cfg80211_ops ath6kl_cfg80211_ops = {
+ .change_virtual_intf = ath6kl_cfg80211_change_iface,
+ .scan = ath6kl_cfg80211_scan,
+ .connect = ath6kl_cfg80211_connect,
+ .disconnect = ath6kl_cfg80211_disconnect,
+ .add_key = ath6kl_cfg80211_add_key,
+ .get_key = ath6kl_cfg80211_get_key,
+ .del_key = ath6kl_cfg80211_del_key,
+ .set_default_key = ath6kl_cfg80211_set_default_key,
+ .set_wiphy_params = ath6kl_cfg80211_set_wiphy_params,
+ .set_tx_power = ath6kl_cfg80211_set_txpower,
+ .get_tx_power = ath6kl_cfg80211_get_txpower,
+ .set_power_mgmt = ath6kl_cfg80211_set_power_mgmt,
+ .join_ibss = ath6kl_cfg80211_join_ibss,
+ .leave_ibss = ath6kl_cfg80211_leave_ibss,
+ .get_station = ath6kl_get_station,
+ .set_pmksa = ath6kl_set_pmksa,
+ .del_pmksa = ath6kl_del_pmksa,
+ .flush_pmksa = ath6kl_flush_pmksa,
+};
+
+struct wireless_dev *ath6kl_cfg80211_init(struct device *dev)
+{
+ int ret = 0;
+ struct wireless_dev *wdev;
+
+ wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
+ if (!wdev) {
+ ath6kl_err("couldn't allocate wireless device\n");
+ return NULL;
+ }
+
+ /* create a new wiphy for use with cfg80211 */
+ wdev->wiphy = wiphy_new(&ath6kl_cfg80211_ops, sizeof(struct ath6kl));
+ if (!wdev->wiphy) {
+ ath6kl_err("couldn't allocate wiphy device\n");
+ kfree(wdev);
+ return NULL;
+ }
+
+ /* set device pointer for wiphy */
+ set_wiphy_dev(wdev->wiphy, dev);
+
+ wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC);
+ /* max num of ssids that can be probed during scanning */
+ wdev->wiphy->max_scan_ssids = MAX_PROBED_SSID_INDEX;
+ wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &ath6kl_band_2ghz;
+ wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = &ath6kl_band_5ghz;
+ wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+
+ wdev->wiphy->cipher_suites = cipher_suites;
+ wdev->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
+
+ ret = wiphy_register(wdev->wiphy);
+ if (ret < 0) {
+ ath6kl_err("couldn't register wiphy device\n");
+ wiphy_free(wdev->wiphy);
+ kfree(wdev);
+ return NULL;
+ }
+
+ return wdev;
+}
+
+void ath6kl_cfg80211_deinit(struct ath6kl *ar)
+{
+ struct wireless_dev *wdev = ar->wdev;
+
+ if (ar->scan_req) {
+ cfg80211_scan_done(ar->scan_req, true);
+ ar->scan_req = NULL;
+ }
+
+ if (!wdev)
+ return;
+
+ wiphy_unregister(wdev->wiphy);
+ wiphy_free(wdev->wiphy);
+ kfree(wdev);
+}
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.h b/drivers/net/wireless/ath/ath6kl/cfg80211.h
new file mode 100644
index 000000000000..a84adc249c61
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef ATH6KL_CFG80211_H
+#define ATH6KL_CFG80211_H
+
+struct wireless_dev *ath6kl_cfg80211_init(struct device *dev);
+void ath6kl_cfg80211_deinit(struct ath6kl *ar);
+
+void ath6kl_cfg80211_scan_complete_event(struct ath6kl *ar, int status);
+
+void ath6kl_cfg80211_connect_event(struct ath6kl *ar, u16 channel,
+ u8 *bssid, u16 listen_intvl,
+ u16 beacon_intvl,
+ enum network_type nw_type,
+ u8 beacon_ie_len, u8 assoc_req_len,
+ u8 assoc_resp_len, u8 *assoc_info);
+
+void ath6kl_cfg80211_disconnect_event(struct ath6kl *ar, u8 reason,
+ u8 *bssid, u8 assoc_resp_len,
+ u8 *assoc_info, u16 proto_reason);
+
+void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl *ar, u8 keyid,
+ bool ismcast);
+
+#endif /* ATH6KL_CFG80211_H */
diff --git a/drivers/net/wireless/ath/ath6kl/common.h b/drivers/net/wireless/ath/ath6kl/common.h
new file mode 100644
index 000000000000..0a3a1d80d0a4
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/common.h
@@ -0,0 +1,183 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef COMMON_H
+#define COMMON_H
+
+#include <linux/netdevice.h>
+
+#define ATH6KL_MAX_IE 256
+
+extern int ath6kl_printk(const char *level, const char *fmt, ...);
+
+#define A_CACHE_LINE_PAD 128
+
+/*
+ * Reflects the version of binary interface exposed by ATH6KL target
+ * firmware. Needs to be incremented by 1 for any change in the firmware
+ * that requires upgrade of the driver on the host side for the change to
+ * work correctly
+ */
+#define ATH6KL_ABI_VERSION 1
+
+#define SIGNAL_QUALITY_METRICS_NUM_MAX 2
+
+enum {
+ SIGNAL_QUALITY_METRICS_SNR = 0,
+ SIGNAL_QUALITY_METRICS_RSSI,
+ SIGNAL_QUALITY_METRICS_ALL,
+};
+
+/*
+ * Data Path
+ */
+
+#define WMI_MAX_TX_DATA_FRAME_LENGTH \
+ (1500 + sizeof(struct wmi_data_hdr) + \
+ sizeof(struct ethhdr) + \
+ sizeof(struct ath6kl_llc_snap_hdr))
+
+/* An AMSDU frame */ /* The MAX AMSDU length of AR6003 is 3839 */
+#define WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH \
+ (3840 + sizeof(struct wmi_data_hdr) + \
+ sizeof(struct ethhdr) + \
+ sizeof(struct ath6kl_llc_snap_hdr))
+
+#define EPPING_ALIGNMENT_PAD \
+ (((sizeof(struct htc_frame_hdr) + 3) & (~0x3)) \
+ - sizeof(struct htc_frame_hdr))
+
+struct ath6kl_llc_snap_hdr {
+ u8 dsap;
+ u8 ssap;
+ u8 cntl;
+ u8 org_code[3];
+ __be16 eth_type;
+} __packed;
+
+enum crypto_type {
+ NONE_CRYPT = 0x01,
+ WEP_CRYPT = 0x02,
+ TKIP_CRYPT = 0x04,
+ AES_CRYPT = 0x08,
+};
+
+#define ATH6KL_NODE_HASHSIZE 32
+/* simple hash is enough for variation of macaddr */
+#define ATH6KL_NODE_HASH(addr) \
+ (((const u8 *)(addr))[ETH_ALEN - 1] % \
+ ATH6KL_NODE_HASHSIZE)
+
+/*
+ * Table of ath6kl_node instances. Each ieee80211com
+ * has at least one for holding the scan candidates.
+ * When operating as an access point or in ibss mode there
+ * is a second table for associated stations or neighbors.
+ */
+struct ath6kl_node_table {
+ void *nt_wmi; /* back reference */
+ spinlock_t nt_nodelock; /* on node table */
+ struct bss *nt_node_first; /* information of all nodes */
+ struct bss *nt_node_last; /* information of all nodes */
+ struct bss *nt_hash[ATH6KL_NODE_HASHSIZE];
+ const char *nt_name; /* for debugging */
+ u32 nt_node_age; /* node aging time */
+};
+
+#define WLAN_NODE_INACT_TIMEOUT_MSEC 120000
+#define WLAN_NODE_INACT_CNT 4
+
+struct ath6kl_common_ie {
+ u16 ie_chan;
+ u8 *ie_tstamp;
+ u8 *ie_ssid;
+ u8 *ie_rates;
+ u8 *ie_xrates;
+ u8 *ie_country;
+ u8 *ie_wpa;
+ u8 *ie_rsn;
+ u8 *ie_wmm;
+ u8 *ie_ath;
+ u16 ie_capInfo;
+ u16 ie_beaconInt;
+ u8 *ie_tim;
+ u8 *ie_chswitch;
+ u8 ie_erp;
+ u8 *ie_wsc;
+ u8 *ie_htcap;
+ u8 *ie_htop;
+};
+
+struct bss {
+ u8 ni_macaddr[ETH_ALEN];
+ u8 ni_snr;
+ s16 ni_rssi;
+ struct bss *ni_list_next;
+ struct bss *ni_list_prev;
+ struct bss *ni_hash_next;
+ struct bss *ni_hash_prev;
+ struct ath6kl_common_ie ni_cie;
+ u8 *ni_buf;
+ u16 ni_framelen;
+ struct ath6kl_node_table *ni_table;
+ u32 ni_refcnt;
+
+ u32 ni_tstamp;
+ u32 ni_actcnt;
+};
+
+struct htc_endpoint_credit_dist;
+struct ath6kl;
+enum htc_credit_dist_reason;
+struct htc_credit_state_info;
+
+struct bss *wlan_node_alloc(int wh_size);
+void wlan_node_free(struct bss *ni);
+void wlan_setup_node(struct ath6kl_node_table *nt, struct bss *ni,
+ const u8 *mac_addr);
+struct bss *wlan_find_node(struct ath6kl_node_table *nt,
+ const u8 *mac_addr);
+void wlan_node_reclaim(struct ath6kl_node_table *nt, struct bss *ni);
+void wlan_free_allnodes(struct ath6kl_node_table *nt);
+void wlan_iterate_nodes(struct ath6kl_node_table *nt,
+ void (*f) (void *arg, struct bss *),
+ void *arg);
+
+void wlan_node_table_init(void *wmip, struct ath6kl_node_table *nt);
+void wlan_node_table_cleanup(struct ath6kl_node_table *nt);
+
+void wlan_refresh_inactive_nodes(struct ath6kl_node_table *nt);
+
+struct bss *wlan_find_ssid_node(struct ath6kl_node_table *nt, u8 *ssid,
+ u32 ssid_len, bool is_wpa2, bool match_ssid);
+
+void wlan_node_return(struct ath6kl_node_table *nt, struct bss *ni);
+
+int ath6k_setup_credit_dist(void *htc_handle,
+ struct htc_credit_state_info *cred_info);
+void ath6k_credit_distribute(struct htc_credit_state_info *cred_inf,
+ struct list_head *epdist_list,
+ enum htc_credit_dist_reason reason);
+void ath6k_credit_init(struct htc_credit_state_info *cred_inf,
+ struct list_head *ep_list,
+ int tot_credits);
+void ath6k_seek_credits(struct htc_credit_state_info *cred_inf,
+ struct htc_endpoint_credit_dist *ep_dist);
+struct ath6kl *ath6kl_core_alloc(struct device *sdev);
+int ath6kl_core_init(struct ath6kl *ar);
+int ath6kl_unavail_ev(struct ath6kl *ar);
+struct sk_buff *ath6kl_buf_alloc(int size);
+#endif /* COMMON_H */
diff --git a/drivers/net/wireless/ath/ath6kl/core.h b/drivers/net/wireless/ath/ath6kl/core.h
new file mode 100644
index 000000000000..86177f0b98a5
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/core.h
@@ -0,0 +1,546 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef CORE_H
+#define CORE_H
+
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/firmware.h>
+#include <linux/sched.h>
+#include <net/cfg80211.h>
+#include "htc.h"
+#include "wmi.h"
+#include "bmi.h"
+
+#define MAX_ATH6KL 1
+#define ATH6KL_MAX_RX_BUFFERS 16
+#define ATH6KL_BUFFER_SIZE 1664
+#define ATH6KL_MAX_AMSDU_RX_BUFFERS 4
+#define ATH6KL_AMSDU_REFILL_THRESHOLD 3
+#define ATH6KL_AMSDU_BUFFER_SIZE (WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH + 128)
+#define MAX_MSDU_SUBFRAME_PAYLOAD_LEN 1508
+#define MIN_MSDU_SUBFRAME_PAYLOAD_LEN 46
+
+#define USER_SAVEDKEYS_STAT_INIT 0
+#define USER_SAVEDKEYS_STAT_RUN 1
+
+#define ATH6KL_TX_TIMEOUT 10
+#define ATH6KL_MAX_ENDPOINTS 4
+#define MAX_NODE_NUM 15
+
+/* MAX_HI_COOKIE_NUM are reserved for high priority traffic */
+#define MAX_DEF_COOKIE_NUM 180
+#define MAX_HI_COOKIE_NUM 18 /* 10% of MAX_COOKIE_NUM */
+#define MAX_COOKIE_NUM (MAX_DEF_COOKIE_NUM + MAX_HI_COOKIE_NUM)
+
+#define MAX_DEFAULT_SEND_QUEUE_DEPTH (MAX_DEF_COOKIE_NUM / WMM_NUM_AC)
+
+#define DISCON_TIMER_INTVAL 10000 /* in msec */
+#define A_DEFAULT_LISTEN_INTERVAL 100
+#define A_MAX_WOW_LISTEN_INTERVAL 1000
+
+/* AR6003 1.0 definitions */
+#define AR6003_REV1_VERSION 0x300002ba
+
+/* AR6003 2.0 definitions */
+#define AR6003_REV2_VERSION 0x30000384
+#define AR6003_REV2_PATCH_DOWNLOAD_ADDRESS 0x57e910
+#define AR6003_REV2_OTP_FILE "ath6k/AR6003/hw2.0/otp.bin.z77"
+#define AR6003_REV2_FIRMWARE_FILE "ath6k/AR6003/hw2.0/athwlan.bin.z77"
+#define AR6003_REV2_PATCH_FILE "ath6k/AR6003/hw2.0/data.patch.bin"
+#define AR6003_REV2_BOARD_DATA_FILE "ath6k/AR6003/hw2.0/bdata.bin"
+#define AR6003_REV2_DEFAULT_BOARD_DATA_FILE "ath6k/AR6003/hw2.0/bdata.SD31.bin"
+
+/* AR6003 3.0 definitions */
+#define AR6003_REV3_VERSION 0x30000582
+#define AR6003_REV3_OTP_FILE "ath6k/AR6003/hw2.1.1/otp.bin"
+#define AR6003_REV3_FIRMWARE_FILE "ath6k/AR6003/hw2.1.1/athwlan.bin"
+#define AR6003_REV3_PATCH_FILE "ath6k/AR6003/hw2.1.1/data.patch.bin"
+#define AR6003_REV3_BOARD_DATA_FILE "ath6k/AR6003/hw2.1.1/bdata.bin"
+#define AR6003_REV3_DEFAULT_BOARD_DATA_FILE \
+ "ath6k/AR6003/hw2.1.1/bdata.SD31.bin"
+
+/* Per STA data, used in AP mode */
+#define STA_PS_AWAKE BIT(0)
+#define STA_PS_SLEEP BIT(1)
+#define STA_PS_POLLED BIT(2)
+
+/* HTC TX packet tagging definitions */
+#define ATH6KL_CONTROL_PKT_TAG HTC_TX_PACKET_TAG_USER_DEFINED
+#define ATH6KL_DATA_PKT_TAG (ATH6KL_CONTROL_PKT_TAG + 1)
+
+#define AR6003_CUST_DATA_SIZE 16
+
+#define AGGR_WIN_IDX(x, y) ((x) % (y))
+#define AGGR_INCR_IDX(x, y) AGGR_WIN_IDX(((x) + 1), (y))
+#define AGGR_DCRM_IDX(x, y) AGGR_WIN_IDX(((x) - 1), (y))
+#define ATH6KL_MAX_SEQ_NO 0xFFF
+#define ATH6KL_NEXT_SEQ_NO(x) (((x) + 1) & ATH6KL_MAX_SEQ_NO)
+
+#define NUM_OF_TIDS 8
+#define AGGR_SZ_DEFAULT 8
+
+#define AGGR_WIN_SZ_MIN 2
+#define AGGR_WIN_SZ_MAX 8
+
+#define TID_WINDOW_SZ(_x) ((_x) << 1)
+
+#define AGGR_NUM_OF_FREE_NETBUFS 16
+
+#define AGGR_RX_TIMEOUT 400 /* in ms */
+
+#define WMI_TIMEOUT (2 * HZ)
+
+#define MBOX_YIELD_LIMIT 99
+
+/* configuration lags */
+/*
+ * ATH6KL_CONF_IGNORE_ERP_BARKER: Ignore the barker premable in
+ * ERP IE of beacon to determine the short premable support when
+ * sending (Re)Assoc req.
+ * ATH6KL_CONF_IGNORE_PS_FAIL_EVT_IN_SCAN: Don't send the power
+ * module state transition failure events which happen during
+ * scan, to the host.
+ */
+#define ATH6KL_CONF_IGNORE_ERP_BARKER BIT(0)
+#define ATH6KL_CONF_IGNORE_PS_FAIL_EVT_IN_SCAN BIT(1)
+#define ATH6KL_CONF_ENABLE_11N BIT(2)
+#define ATH6KL_CONF_ENABLE_TX_BURST BIT(3)
+
+enum wlan_low_pwr_state {
+ WLAN_POWER_STATE_ON,
+ WLAN_POWER_STATE_CUT_PWR,
+ WLAN_POWER_STATE_DEEP_SLEEP,
+ WLAN_POWER_STATE_WOW
+};
+
+enum sme_state {
+ SME_DISCONNECTED,
+ SME_CONNECTING,
+ SME_CONNECTED
+};
+
+enum ath6kl_wlan_state {
+ WLAN_DISABLED,
+ WLAN_ENABLED
+};
+
+struct skb_hold_q {
+ struct sk_buff *skb;
+ bool is_amsdu;
+ u16 seq_no;
+};
+
+struct rxtid {
+ bool aggr;
+ bool progress;
+ bool timer_mon;
+ u16 win_sz;
+ u16 seq_next;
+ u32 hold_q_sz;
+ struct skb_hold_q *hold_q;
+ struct sk_buff_head q;
+ spinlock_t lock;
+};
+
+struct rxtid_stats {
+ u32 num_into_aggr;
+ u32 num_dups;
+ u32 num_oow;
+ u32 num_mpdu;
+ u32 num_amsdu;
+ u32 num_delivered;
+ u32 num_timeouts;
+ u32 num_hole;
+ u32 num_bar;
+};
+
+struct aggr_info {
+ u8 aggr_sz;
+ u8 timer_scheduled;
+ struct timer_list timer;
+ struct net_device *dev;
+ struct rxtid rx_tid[NUM_OF_TIDS];
+ struct sk_buff_head free_q;
+ struct rxtid_stats stat[NUM_OF_TIDS];
+};
+
+struct ath6kl_wep_key {
+ u8 key_index;
+ u8 key_len;
+ u8 key[64];
+};
+
+#define ATH6KL_KEY_SEQ_LEN 8
+
+struct ath6kl_key {
+ u8 key[WLAN_MAX_KEY_LEN];
+ u8 key_len;
+ u8 seq[ATH6KL_KEY_SEQ_LEN];
+ u8 seq_len;
+ u32 cipher;
+};
+
+struct ath6kl_node_mapping {
+ u8 mac_addr[ETH_ALEN];
+ u8 ep_id;
+ u8 tx_pend;
+};
+
+struct ath6kl_cookie {
+ struct sk_buff *skb;
+ u32 map_no;
+ struct htc_packet htc_pkt;
+ struct ath6kl_cookie *arc_list_next;
+};
+
+struct ath6kl_sta {
+ u16 sta_flags;
+ u8 mac[ETH_ALEN];
+ u8 aid;
+ u8 keymgmt;
+ u8 ucipher;
+ u8 auth;
+ u8 wpa_ie[ATH6KL_MAX_IE];
+ struct sk_buff_head psq;
+ spinlock_t psq_lock;
+};
+
+struct ath6kl_version {
+ u32 target_ver;
+ u32 wlan_ver;
+ u32 abi_ver;
+};
+
+struct ath6kl_bmi {
+ u32 cmd_credits;
+ bool done_sent;
+ u8 *cmd_buf;
+};
+
+struct target_stats {
+ u64 tx_pkt;
+ u64 tx_byte;
+ u64 tx_ucast_pkt;
+ u64 tx_ucast_byte;
+ u64 tx_mcast_pkt;
+ u64 tx_mcast_byte;
+ u64 tx_bcast_pkt;
+ u64 tx_bcast_byte;
+ u64 tx_rts_success_cnt;
+ u64 tx_pkt_per_ac[4];
+
+ u64 tx_err;
+ u64 tx_fail_cnt;
+ u64 tx_retry_cnt;
+ u64 tx_mult_retry_cnt;
+ u64 tx_rts_fail_cnt;
+
+ u64 rx_pkt;
+ u64 rx_byte;
+ u64 rx_ucast_pkt;
+ u64 rx_ucast_byte;
+ u64 rx_mcast_pkt;
+ u64 rx_mcast_byte;
+ u64 rx_bcast_pkt;
+ u64 rx_bcast_byte;
+ u64 rx_frgment_pkt;
+
+ u64 rx_err;
+ u64 rx_crc_err;
+ u64 rx_key_cache_miss;
+ u64 rx_decrypt_err;
+ u64 rx_dupl_frame;
+
+ u64 tkip_local_mic_fail;
+ u64 tkip_cnter_measures_invoked;
+ u64 tkip_replays;
+ u64 tkip_fmt_err;
+ u64 ccmp_fmt_err;
+ u64 ccmp_replays;
+
+ u64 pwr_save_fail_cnt;
+
+ u64 cs_bmiss_cnt;
+ u64 cs_low_rssi_cnt;
+ u64 cs_connect_cnt;
+ u64 cs_discon_cnt;
+
+ s32 tx_ucast_rate;
+ s32 rx_ucast_rate;
+
+ u32 lq_val;
+
+ u32 wow_pkt_dropped;
+ u16 wow_evt_discarded;
+
+ s16 noise_floor_calib;
+ s16 cs_rssi;
+ s16 cs_ave_beacon_rssi;
+ u8 cs_ave_beacon_snr;
+ u8 cs_last_roam_msec;
+ u8 cs_snr;
+
+ u8 wow_host_pkt_wakeups;
+ u8 wow_host_evt_wakeups;
+
+ u32 arp_received;
+ u32 arp_matched;
+ u32 arp_replied;
+};
+
+struct ath6kl_mbox_info {
+ u32 htc_addr;
+ u32 htc_ext_addr;
+ u32 htc_ext_sz;
+
+ u32 block_size;
+
+ u32 gmbox_addr;
+
+ u32 gmbox_sz;
+};
+
+/*
+ * 802.11i defines an extended IV for use with non-WEP ciphers.
+ * When the EXTIV bit is set in the key id byte an additional
+ * 4 bytes immediately follow the IV for TKIP. For CCMP the
+ * EXTIV bit is likewise set but the 8 bytes represent the
+ * CCMP header rather than IV+extended-IV.
+ */
+
+#define ATH6KL_KEYBUF_SIZE 16
+#define ATH6KL_MICBUF_SIZE (8+8) /* space for both tx and rx */
+
+#define ATH6KL_KEY_XMIT 0x01
+#define ATH6KL_KEY_RECV 0x02
+#define ATH6KL_KEY_DEFAULT 0x80 /* default xmit key */
+
+/*
+ * WPA/RSN get/set key request. Specify the key/cipher
+ * type and whether the key is to be used for sending and/or
+ * receiving. The key index should be set only when working
+ * with global keys (use IEEE80211_KEYIX_NONE for ``no index'').
+ * Otherwise a unicast/pairwise key is specified by the bssid
+ * (on a station) or mac address (on an ap). They key length
+ * must include any MIC key data; otherwise it should be no
+ * more than ATH6KL_KEYBUF_SIZE.
+ */
+struct ath6kl_req_key {
+ u8 ik_type; /* key/cipher type */
+ u8 ik_pad;
+ u16 ik_keyix; /* key index */
+ u8 ik_keylen; /* key length in bytes */
+ u8 ik_flags;
+ u8 ik_macaddr[ETH_ALEN];
+ u64 ik_keyrsc; /* key receive sequence counter */
+ u64 ik_keytsc; /* key transmit sequence counter */
+ u8 ik_keydata[ATH6KL_KEYBUF_SIZE + ATH6KL_MICBUF_SIZE];
+};
+
+/* Flag info */
+#define WMI_ENABLED 0
+#define WMI_READY 1
+#define CONNECTED 2
+#define STATS_UPDATE_PEND 3
+#define CONNECT_PEND 4
+#define WMM_ENABLED 5
+#define NETQ_STOPPED 6
+#define WMI_CTRL_EP_FULL 7
+#define DTIM_EXPIRED 8
+#define DESTROY_IN_PROGRESS 9
+#define NETDEV_REGISTERED 10
+#define SKIP_SCAN 11
+
+struct ath6kl {
+ struct device *dev;
+ struct net_device *net_dev;
+ struct ath6kl_bmi bmi;
+ const struct ath6kl_hif_ops *hif_ops;
+ struct wmi *wmi;
+ int tx_pending[ENDPOINT_MAX];
+ int total_tx_data_pend;
+ struct htc_target *htc_target;
+ void *hif_priv;
+ spinlock_t lock;
+ struct semaphore sem;
+ int ssid_len;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ u8 next_mode;
+ u8 nw_type;
+ u8 dot11_auth_mode;
+ u8 auth_mode;
+ u8 prwise_crypto;
+ u8 prwise_crypto_len;
+ u8 grp_crypto;
+ u8 grp_crpto_len;
+ u8 def_txkey_index;
+ struct ath6kl_wep_key wep_key_list[WMI_MAX_KEY_INDEX + 1];
+ u8 bssid[ETH_ALEN];
+ u8 req_bssid[ETH_ALEN];
+ u16 ch_hint;
+ u16 bss_ch;
+ u16 listen_intvl_b;
+ u16 listen_intvl_t;
+ struct ath6kl_version version;
+ u32 target_type;
+ u8 tx_pwr;
+ struct net_device_stats net_stats;
+ struct target_stats target_stats;
+ enum ath6kl_wlan_state wlan_state;
+ struct ath6kl_node_mapping node_map[MAX_NODE_NUM];
+ u8 ibss_ps_enable;
+ u8 node_num;
+ u8 next_ep_id;
+ struct ath6kl_cookie *cookie_list;
+ u32 cookie_count;
+ enum htc_endpoint_id ac2ep_map[WMM_NUM_AC];
+ bool ac_stream_active[WMM_NUM_AC];
+ u8 ac_stream_pri_map[WMM_NUM_AC];
+ u8 hiac_stream_active_pri;
+ u8 ep2ac_map[ENDPOINT_MAX];
+ enum htc_endpoint_id ctrl_ep;
+ struct htc_credit_state_info credit_state_info;
+ u32 connect_ctrl_flags;
+ u32 user_key_ctrl;
+ u8 usr_bss_filter;
+ struct ath6kl_sta sta_list[AP_MAX_NUM_STA];
+ u8 sta_list_index;
+ struct ath6kl_req_key ap_mode_bkey;
+ struct sk_buff_head mcastpsq;
+ spinlock_t mcastpsq_lock;
+ u8 intra_bss;
+ struct aggr_info *aggr_cntxt;
+ struct wmi_ap_mode_stat ap_stats;
+ u8 ap_country_code[3];
+ struct list_head amsdu_rx_buffer_queue;
+ struct timer_list disconnect_timer;
+ u8 rx_meta_ver;
+ struct wireless_dev *wdev;
+ struct cfg80211_scan_request *scan_req;
+ struct ath6kl_key keys[WMI_MAX_KEY_INDEX + 1];
+ enum sme_state sme_state;
+ enum wlan_low_pwr_state wlan_pwr_state;
+ struct wmi_scan_params_cmd sc_params;
+#define AR_MCAST_FILTER_MAC_ADDR_SIZE 4
+ u8 auto_auth_stage;
+
+ u16 conf_flags;
+ wait_queue_head_t event_wq;
+ struct ath6kl_mbox_info mbox_info;
+
+ struct ath6kl_cookie cookie_mem[MAX_COOKIE_NUM];
+ int reconnect_flag;
+ unsigned long flag;
+
+ u8 *fw_board;
+ size_t fw_board_len;
+
+ u8 *fw_otp;
+ size_t fw_otp_len;
+
+ u8 *fw;
+ size_t fw_len;
+
+ u8 *fw_patch;
+ size_t fw_patch_len;
+
+ struct workqueue_struct *ath6kl_wq;
+};
+
+static inline void *ath6kl_priv(struct net_device *dev)
+{
+ return wdev_priv(dev->ieee80211_ptr);
+}
+
+static inline void ath6kl_deposit_credit_to_ep(struct htc_credit_state_info
+ *cred_info,
+ struct htc_endpoint_credit_dist
+ *ep_dist, int credits)
+{
+ ep_dist->credits += credits;
+ ep_dist->cred_assngd += credits;
+ cred_info->cur_free_credits -= credits;
+}
+
+void ath6kl_destroy(struct net_device *dev, unsigned int unregister);
+int ath6kl_configure_target(struct ath6kl *ar);
+void ath6kl_detect_error(unsigned long ptr);
+void disconnect_timer_handler(unsigned long ptr);
+void init_netdev(struct net_device *dev);
+void ath6kl_cookie_init(struct ath6kl *ar);
+void ath6kl_cookie_cleanup(struct ath6kl *ar);
+void ath6kl_rx(struct htc_target *target, struct htc_packet *packet);
+void ath6kl_tx_complete(void *context, struct list_head *packet_queue);
+enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
+ struct htc_packet *packet);
+void ath6kl_stop_txrx(struct ath6kl *ar);
+void ath6kl_cleanup_amsdu_rxbufs(struct ath6kl *ar);
+int ath6kl_access_datadiag(struct ath6kl *ar, u32 address,
+ u8 *data, u32 length, bool read);
+int ath6kl_read_reg_diag(struct ath6kl *ar, u32 *address, u32 *data);
+void ath6kl_init_profile_info(struct ath6kl *ar);
+void ath6kl_tx_data_cleanup(struct ath6kl *ar);
+void ath6kl_stop_endpoint(struct net_device *dev, bool keep_profile,
+ bool get_dbglogs);
+
+struct ath6kl_cookie *ath6kl_alloc_cookie(struct ath6kl *ar);
+void ath6kl_free_cookie(struct ath6kl *ar, struct ath6kl_cookie *cookie);
+int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev);
+
+struct aggr_info *aggr_init(struct net_device *dev);
+void ath6kl_rx_refill(struct htc_target *target,
+ enum htc_endpoint_id endpoint);
+void ath6kl_refill_amsdu_rxbufs(struct ath6kl *ar, int count);
+struct htc_packet *ath6kl_alloc_amsdu_rxbuf(struct htc_target *target,
+ enum htc_endpoint_id endpoint,
+ int len);
+void aggr_module_destroy(struct aggr_info *aggr_info);
+void aggr_reset_state(struct aggr_info *aggr_info);
+
+struct ath6kl_sta *ath6kl_find_sta(struct ath6kl *ar, u8 * node_addr);
+struct ath6kl_sta *ath6kl_find_sta_by_aid(struct ath6kl *ar, u8 aid);
+
+void ath6kl_ready_event(void *devt, u8 * datap, u32 sw_ver, u32 abi_ver);
+int ath6kl_control_tx(void *devt, struct sk_buff *skb,
+ enum htc_endpoint_id eid);
+void ath6kl_connect_event(struct ath6kl *ar, u16 channel,
+ u8 *bssid, u16 listen_int,
+ u16 beacon_int, enum network_type net_type,
+ u8 beacon_ie_len, u8 assoc_req_len,
+ u8 assoc_resp_len, u8 *assoc_info);
+void ath6kl_disconnect_event(struct ath6kl *ar, u8 reason,
+ u8 *bssid, u8 assoc_resp_len,
+ u8 *assoc_info, u16 prot_reason_status);
+void ath6kl_tkip_micerr_event(struct ath6kl *ar, u8 keyid, bool ismcast);
+void ath6kl_txpwr_rx_evt(void *devt, u8 tx_pwr);
+void ath6kl_scan_complete_evt(struct ath6kl *ar, int status);
+void ath6kl_tgt_stats_event(struct ath6kl *ar, u8 *ptr, u32 len);
+void ath6kl_indicate_tx_activity(void *devt, u8 traffic_class, bool active);
+enum htc_endpoint_id ath6kl_ac2_endpoint_id(void *devt, u8 ac);
+
+void ath6kl_pspoll_event(struct ath6kl *ar, u8 aid);
+
+void ath6kl_dtimexpiry_event(struct ath6kl *ar);
+void ath6kl_disconnect(struct ath6kl *ar);
+void aggr_recv_delba_req_evt(struct ath6kl *ar, u8 tid);
+void aggr_recv_addba_req_evt(struct ath6kl *ar, u8 tid, u16 seq_no,
+ u8 win_sz);
+void ath6kl_wakeup_event(void *dev);
+void ath6kl_target_failure(struct ath6kl *ar);
+
+#endif /* CORE_H */
diff --git a/drivers/net/wireless/ath/ath6kl/debug.c b/drivers/net/wireless/ath/ath6kl/debug.c
new file mode 100644
index 000000000000..316136c8b903
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/debug.c
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2004-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+#include "debug.h"
+
+int ath6kl_printk(const char *level, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+ int rtn;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ rtn = printk("%sath6kl: %pV", level, &vaf);
+
+ va_end(args);
+
+ return rtn;
+}
+
+#ifdef CONFIG_ATH6KL_DEBUG
+void ath6kl_dump_registers(struct ath6kl_device *dev,
+ struct ath6kl_irq_proc_registers *irq_proc_reg,
+ struct ath6kl_irq_enable_reg *irq_enable_reg)
+{
+
+ ath6kl_dbg(ATH6KL_DBG_ANY, ("<------- Register Table -------->\n"));
+
+ if (irq_proc_reg != NULL) {
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "Host Int status: 0x%x\n",
+ irq_proc_reg->host_int_status);
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "CPU Int status: 0x%x\n",
+ irq_proc_reg->cpu_int_status);
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "Error Int status: 0x%x\n",
+ irq_proc_reg->error_int_status);
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "Counter Int status: 0x%x\n",
+ irq_proc_reg->counter_int_status);
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "Mbox Frame: 0x%x\n",
+ irq_proc_reg->mbox_frame);
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "Rx Lookahead Valid: 0x%x\n",
+ irq_proc_reg->rx_lkahd_valid);
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "Rx Lookahead 0: 0x%x\n",
+ irq_proc_reg->rx_lkahd[0]);
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "Rx Lookahead 1: 0x%x\n",
+ irq_proc_reg->rx_lkahd[1]);
+
+ if (dev->ar->mbox_info.gmbox_addr != 0) {
+ /*
+ * If the target supports GMBOX hardware, dump some
+ * additional state.
+ */
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "GMBOX Host Int status 2: 0x%x\n",
+ irq_proc_reg->host_int_status2);
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "GMBOX RX Avail: 0x%x\n",
+ irq_proc_reg->gmbox_rx_avail);
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "GMBOX lookahead alias 0: 0x%x\n",
+ irq_proc_reg->rx_gmbox_lkahd_alias[0]);
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "GMBOX lookahead alias 1: 0x%x\n",
+ irq_proc_reg->rx_gmbox_lkahd_alias[1]);
+ }
+
+ }
+
+ if (irq_enable_reg != NULL) {
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "Int status Enable: 0x%x\n",
+ irq_enable_reg->int_status_en);
+ ath6kl_dbg(ATH6KL_DBG_ANY, "Counter Int status Enable: 0x%x\n",
+ irq_enable_reg->cntr_int_status_en);
+ }
+ ath6kl_dbg(ATH6KL_DBG_ANY, "<------------------------------->\n");
+}
+
+static void dump_cred_dist(struct htc_endpoint_credit_dist *ep_dist)
+{
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "--- endpoint: %d svc_id: 0x%X ---\n",
+ ep_dist->endpoint, ep_dist->svc_id);
+ ath6kl_dbg(ATH6KL_DBG_ANY, " dist_flags : 0x%X\n",
+ ep_dist->dist_flags);
+ ath6kl_dbg(ATH6KL_DBG_ANY, " cred_norm : %d\n",
+ ep_dist->cred_norm);
+ ath6kl_dbg(ATH6KL_DBG_ANY, " cred_min : %d\n",
+ ep_dist->cred_min);
+ ath6kl_dbg(ATH6KL_DBG_ANY, " credits : %d\n",
+ ep_dist->credits);
+ ath6kl_dbg(ATH6KL_DBG_ANY, " cred_assngd : %d\n",
+ ep_dist->cred_assngd);
+ ath6kl_dbg(ATH6KL_DBG_ANY, " seek_cred : %d\n",
+ ep_dist->seek_cred);
+ ath6kl_dbg(ATH6KL_DBG_ANY, " cred_sz : %d\n",
+ ep_dist->cred_sz);
+ ath6kl_dbg(ATH6KL_DBG_ANY, " cred_per_msg : %d\n",
+ ep_dist->cred_per_msg);
+ ath6kl_dbg(ATH6KL_DBG_ANY, " cred_to_dist : %d\n",
+ ep_dist->cred_to_dist);
+ ath6kl_dbg(ATH6KL_DBG_ANY, " txq_depth : %d\n",
+ get_queue_depth(&((struct htc_endpoint *)
+ ep_dist->htc_rsvd)->txq));
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "----------------------------------\n");
+}
+
+void dump_cred_dist_stats(struct htc_target *target)
+{
+ struct htc_endpoint_credit_dist *ep_list;
+
+ if (!AR_DBG_LVL_CHECK(ATH6KL_DBG_TRC))
+ return;
+
+ list_for_each_entry(ep_list, &target->cred_dist_list, list)
+ dump_cred_dist(ep_list);
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:%p dist:%p\n",
+ target->cred_dist_cntxt, NULL);
+ ath6kl_dbg(ATH6KL_DBG_TRC, "credit distribution, total : %d, free : %d\n",
+ target->cred_dist_cntxt->total_avail_credits,
+ target->cred_dist_cntxt->cur_free_credits);
+}
+
+#endif
diff --git a/drivers/net/wireless/ath/ath6kl/debug.h b/drivers/net/wireless/ath/ath6kl/debug.h
new file mode 100644
index 000000000000..2e6058856a6a
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/debug.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef DEBUG_H
+#define DEBUG_H
+
+#include "htc_hif.h"
+
+enum ATH6K_DEBUG_MASK {
+ ATH6KL_DBG_WLAN_CONNECT = BIT(0), /* wlan connect */
+ ATH6KL_DBG_WLAN_SCAN = BIT(1), /* wlan scan */
+ ATH6KL_DBG_WLAN_TX = BIT(2), /* wlan tx */
+ ATH6KL_DBG_WLAN_RX = BIT(3), /* wlan rx */
+ ATH6KL_DBG_BMI = BIT(4), /* bmi tracing */
+ ATH6KL_DBG_HTC_SEND = BIT(5), /* htc send */
+ ATH6KL_DBG_HTC_RECV = BIT(6), /* htc recv */
+ ATH6KL_DBG_IRQ = BIT(7), /* interrupt processing */
+ ATH6KL_DBG_PM = BIT(8), /* power management */
+ ATH6KL_DBG_WLAN_NODE = BIT(9), /* general wlan node tracing */
+ ATH6KL_DBG_WMI = BIT(10), /* wmi tracing */
+ ATH6KL_DBG_TRC = BIT(11), /* generic func tracing */
+ ATH6KL_DBG_SCATTER = BIT(12), /* hif scatter tracing */
+ ATH6KL_DBG_WLAN_CFG = BIT(13), /* cfg80211 i/f file tracing */
+ ATH6KL_DBG_RAW_BYTES = BIT(14), /* dump tx/rx and wmi frames */
+ ATH6KL_DBG_ANY = 0xffffffff /* enable all logs */
+};
+
+extern unsigned int debug_mask;
+extern int ath6kl_printk(const char *level, const char *fmt, ...)
+ __attribute__ ((format (printf, 2, 3)));
+
+#define ath6kl_info(fmt, ...) \
+ ath6kl_printk(KERN_INFO, fmt, ##__VA_ARGS__)
+#define ath6kl_err(fmt, ...) \
+ ath6kl_printk(KERN_ERR, fmt, ##__VA_ARGS__)
+#define ath6kl_warn(fmt, ...) \
+ ath6kl_printk(KERN_WARNING, fmt, ##__VA_ARGS__)
+
+#define AR_DBG_LVL_CHECK(mask) (debug_mask & mask)
+
+#ifdef CONFIG_ATH6KL_DEBUG
+#define ath6kl_dbg(mask, fmt, ...) \
+ ({ \
+ int rtn; \
+ if (debug_mask & mask) \
+ rtn = ath6kl_printk(KERN_DEBUG, fmt, ##__VA_ARGS__); \
+ else \
+ rtn = 0; \
+ \
+ rtn; \
+ })
+
+static inline void ath6kl_dbg_dump(enum ATH6K_DEBUG_MASK mask,
+ const char *msg, const void *buf,
+ size_t len)
+{
+ if (debug_mask & mask) {
+ ath6kl_dbg(mask, "%s\n", msg);
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
+ }
+}
+
+void ath6kl_dump_registers(struct ath6kl_device *dev,
+ struct ath6kl_irq_proc_registers *irq_proc_reg,
+ struct ath6kl_irq_enable_reg *irq_en_reg);
+void dump_cred_dist_stats(struct htc_target *target);
+#else
+static inline int ath6kl_dbg(enum ATH6K_DEBUG_MASK dbg_mask,
+ const char *fmt, ...)
+{
+ return 0;
+}
+
+static inline void ath6kl_dbg_dump(enum ATH6K_DEBUG_MASK mask,
+ const char *msg, const void *buf,
+ size_t len)
+{
+}
+
+static inline void ath6kl_dump_registers(struct ath6kl_device *dev,
+ struct ath6kl_irq_proc_registers *irq_proc_reg,
+ struct ath6kl_irq_enable_reg *irq_en_reg)
+{
+
+}
+static inline void dump_cred_dist_stats(struct htc_target *target)
+{
+}
+#endif
+
+#endif
diff --git a/drivers/net/wireless/ath/ath6kl/hif-ops.h b/drivers/net/wireless/ath/ath6kl/hif-ops.h
new file mode 100644
index 000000000000..ad4966917e84
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/hif-ops.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2004-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef HIF_OPS_H
+#define HIF_OPS_H
+
+#include "hif.h"
+
+static inline int hif_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
+ u32 len, u32 request)
+{
+ return ar->hif_ops->read_write_sync(ar, addr, buf, len, request);
+}
+
+static inline int hif_write_async(struct ath6kl *ar, u32 address, u8 *buffer,
+ u32 length, u32 request,
+ struct htc_packet *packet)
+{
+ return ar->hif_ops->write_async(ar, address, buffer, length,
+ request, packet);
+}
+static inline void ath6kl_hif_irq_enable(struct ath6kl *ar)
+{
+ return ar->hif_ops->irq_enable(ar);
+}
+
+static inline void ath6kl_hif_irq_disable(struct ath6kl *ar)
+{
+ return ar->hif_ops->irq_disable(ar);
+}
+
+static inline struct hif_scatter_req *hif_scatter_req_get(struct ath6kl *ar)
+{
+ return ar->hif_ops->scatter_req_get(ar);
+}
+
+static inline void hif_scatter_req_add(struct ath6kl *ar,
+ struct hif_scatter_req *s_req)
+{
+ return ar->hif_ops->scatter_req_add(ar, s_req);
+}
+
+static inline int ath6kl_hif_enable_scatter(struct ath6kl *ar,
+ struct hif_dev_scat_sup_info *info)
+{
+ return ar->hif_ops->enable_scatter(ar, info);
+}
+
+static inline void ath6kl_hif_cleanup_scatter(struct ath6kl *ar)
+{
+ return ar->hif_ops->cleanup_scatter(ar);
+}
+
+#endif
diff --git a/drivers/net/wireless/ath/ath6kl/hif.h b/drivers/net/wireless/ath/ath6kl/hif.h
new file mode 100644
index 000000000000..7d39c1769fe4
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/hif.h
@@ -0,0 +1,216 @@
+/*
+ * Copyright (c) 2004-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef HIF_H
+#define HIF_H
+
+#include "common.h"
+#include "core.h"
+
+#include <linux/scatterlist.h>
+
+#define BUS_REQUEST_MAX_NUM 64
+#define HIF_MBOX_BLOCK_SIZE 128
+#define HIF_MBOX0_BLOCK_SIZE 1
+
+#define HIF_DMA_BUFFER_SIZE (32 * 1024)
+#define CMD53_FIXED_ADDRESS 1
+#define CMD53_INCR_ADDRESS 2
+
+#define MAX_SCATTER_REQUESTS 4
+#define MAX_SCATTER_ENTRIES_PER_REQ 16
+#define MAX_SCATTER_REQ_TRANSFER_SIZE (32 * 1024)
+
+#define MANUFACTURER_ID_AR6003_BASE 0x300
+ /* SDIO manufacturer ID and Codes */
+#define MANUFACTURER_ID_ATH6KL_BASE_MASK 0xFF00
+#define MANUFACTURER_CODE 0x271 /* Atheros */
+
+/* Mailbox address in SDIO address space */
+#define HIF_MBOX_BASE_ADDR 0x800
+#define HIF_MBOX_WIDTH 0x800
+
+#define HIF_MBOX_END_ADDR (HTC_MAILBOX_NUM_MAX * HIF_MBOX_WIDTH - 1)
+
+/* version 1 of the chip has only a 12K extended mbox range */
+#define HIF_MBOX0_EXT_BASE_ADDR 0x4000
+#define HIF_MBOX0_EXT_WIDTH (12*1024)
+
+/* GMBOX addresses */
+#define HIF_GMBOX_BASE_ADDR 0x7000
+#define HIF_GMBOX_WIDTH 0x4000
+
+/* interrupt mode register */
+#define CCCR_SDIO_IRQ_MODE_REG 0xF0
+
+/* mode to enable special 4-bit interrupt assertion without clock */
+#define SDIO_IRQ_MODE_ASYNC_4BIT_IRQ (1 << 0)
+
+struct bus_request {
+ struct list_head list;
+
+ /* request data */
+ u32 address;
+
+ u8 *buffer;
+ u32 length;
+ u32 request;
+ struct htc_packet *packet;
+ int status;
+
+ /* this is a scatter request */
+ struct hif_scatter_req *scat_req;
+};
+
+/* direction of transfer (read/write) */
+#define HIF_READ 0x00000001
+#define HIF_WRITE 0x00000002
+#define HIF_DIR_MASK (HIF_READ | HIF_WRITE)
+
+/*
+ * emode - This indicates the whether the command is to be executed in a
+ * blocking or non-blocking fashion (HIF_SYNCHRONOUS/
+ * HIF_ASYNCHRONOUS). The read/write data paths in HTC have been
+ * implemented using the asynchronous mode allowing the the bus
+ * driver to indicate the completion of operation through the
+ * registered callback routine. The requirement primarily comes
+ * from the contexts these operations get called from (a driver's
+ * transmit context or the ISR context in case of receive).
+ * Support for both of these modes is essential.
+ */
+#define HIF_SYNCHRONOUS 0x00000010
+#define HIF_ASYNCHRONOUS 0x00000020
+#define HIF_EMODE_MASK (HIF_SYNCHRONOUS | HIF_ASYNCHRONOUS)
+
+/*
+ * dmode - An interface may support different kinds of commands based on
+ * the tradeoff between the amount of data it can carry and the
+ * setup time. Byte and Block modes are supported (HIF_BYTE_BASIS/
+ * HIF_BLOCK_BASIS). In case of latter, the data is rounded off
+ * to the nearest block size by padding. The size of the block is
+ * configurable at compile time using the HIF_BLOCK_SIZE and is
+ * negotiated with the target during initialization after the
+ * ATH6KL interrupts are enabled.
+ */
+#define HIF_BYTE_BASIS 0x00000040
+#define HIF_BLOCK_BASIS 0x00000080
+#define HIF_DMODE_MASK (HIF_BYTE_BASIS | HIF_BLOCK_BASIS)
+
+/*
+ * amode - This indicates if the address has to be incremented on ATH6KL
+ * after every read/write operation (HIF?FIXED_ADDRESS/
+ * HIF_INCREMENTAL_ADDRESS).
+ */
+#define HIF_FIXED_ADDRESS 0x00000100
+#define HIF_INCREMENTAL_ADDRESS 0x00000200
+#define HIF_AMODE_MASK (HIF_FIXED_ADDRESS | HIF_INCREMENTAL_ADDRESS)
+
+#define HIF_WR_ASYNC_BYTE_INC \
+ (HIF_WRITE | HIF_ASYNCHRONOUS | \
+ HIF_BYTE_BASIS | HIF_INCREMENTAL_ADDRESS)
+
+#define HIF_WR_ASYNC_BLOCK_INC \
+ (HIF_WRITE | HIF_ASYNCHRONOUS | \
+ HIF_BLOCK_BASIS | HIF_INCREMENTAL_ADDRESS)
+
+#define HIF_WR_SYNC_BYTE_FIX \
+ (HIF_WRITE | HIF_SYNCHRONOUS | \
+ HIF_BYTE_BASIS | HIF_FIXED_ADDRESS)
+
+#define HIF_WR_SYNC_BYTE_INC \
+ (HIF_WRITE | HIF_SYNCHRONOUS | \
+ HIF_BYTE_BASIS | HIF_INCREMENTAL_ADDRESS)
+
+#define HIF_WR_SYNC_BLOCK_INC \
+ (HIF_WRITE | HIF_SYNCHRONOUS | \
+ HIF_BLOCK_BASIS | HIF_INCREMENTAL_ADDRESS)
+
+#define HIF_RD_SYNC_BYTE_INC \
+ (HIF_READ | HIF_SYNCHRONOUS | \
+ HIF_BYTE_BASIS | HIF_INCREMENTAL_ADDRESS)
+
+#define HIF_RD_SYNC_BYTE_FIX \
+ (HIF_READ | HIF_SYNCHRONOUS | \
+ HIF_BYTE_BASIS | HIF_FIXED_ADDRESS)
+
+#define HIF_RD_ASYNC_BLOCK_FIX \
+ (HIF_READ | HIF_ASYNCHRONOUS | \
+ HIF_BLOCK_BASIS | HIF_FIXED_ADDRESS)
+
+#define HIF_RD_SYNC_BLOCK_FIX \
+ (HIF_READ | HIF_SYNCHRONOUS | \
+ HIF_BLOCK_BASIS | HIF_FIXED_ADDRESS)
+
+struct hif_scatter_item {
+ u8 *buf;
+ int len;
+ struct htc_packet *packet;
+};
+
+struct hif_scatter_req {
+ struct list_head list;
+ /* address for the read/write operation */
+ u32 addr;
+
+ /* request flags */
+ u32 req;
+
+ /* total length of entire transfer */
+ u32 len;
+
+ u32 flags;
+ void (*complete) (struct hif_scatter_req *);
+ int status;
+ struct htc_endpoint *ep;
+ int scat_entries;
+
+ struct hif_scatter_req_priv *req_priv;
+
+ /* bounce buffer for upper layers to copy to/from */
+ u8 *virt_dma_buf;
+
+ struct hif_scatter_item scat_list[1];
+};
+
+struct hif_dev_scat_sup_info {
+ int (*rw_scat_func) (struct ath6kl *ar, struct hif_scatter_req *);
+ int max_scat_entries;
+ int max_xfer_szper_scatreq;
+};
+
+struct hif_scatter_req_priv {
+ struct bus_request *busrequest;
+ struct scatterlist sgentries[MAX_SCATTER_ENTRIES_PER_REQ];
+};
+
+struct ath6kl_hif_ops {
+ int (*read_write_sync)(struct ath6kl *ar, u32 addr, u8 *buf,
+ u32 len, u32 request);
+ int (*write_async)(struct ath6kl *ar, u32 address, u8 *buffer,
+ u32 length, u32 request, struct htc_packet *packet);
+
+ void (*irq_enable)(struct ath6kl *ar);
+ void (*irq_disable)(struct ath6kl *ar);
+
+ struct hif_scatter_req *(*scatter_req_get)(struct ath6kl *ar);
+ void (*scatter_req_add)(struct ath6kl *ar,
+ struct hif_scatter_req *s_req);
+ int (*enable_scatter)(struct ath6kl *ar,
+ struct hif_dev_scat_sup_info *info);
+ void (*cleanup_scatter)(struct ath6kl *ar);
+};
+
+#endif
diff --git a/drivers/net/wireless/ath/ath6kl/htc.c b/drivers/net/wireless/ath/ath6kl/htc.c
new file mode 100644
index 000000000000..95c47bbd1d78
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/htc.c
@@ -0,0 +1,2466 @@
+/*
+ * Copyright (c) 2007-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+#include "htc_hif.h"
+#include "debug.h"
+#include "hif-ops.h"
+#include <asm/unaligned.h>
+
+#define CALC_TXRX_PADDED_LEN(dev, len) (__ALIGN_MASK((len), (dev)->block_mask))
+
+static void htc_prep_send_pkt(struct htc_packet *packet, u8 flags, int ctrl0,
+ int ctrl1)
+{
+ struct htc_frame_hdr *hdr;
+
+ packet->buf -= HTC_HDR_LENGTH;
+ hdr = (struct htc_frame_hdr *)packet->buf;
+
+ /* Endianess? */
+ put_unaligned((u16)packet->act_len, &hdr->payld_len);
+ hdr->flags = flags;
+ hdr->eid = packet->endpoint;
+ hdr->ctrl[0] = ctrl0;
+ hdr->ctrl[1] = ctrl1;
+}
+
+static void htc_reclaim_txctrl_buf(struct htc_target *target,
+ struct htc_packet *pkt)
+{
+ spin_lock_bh(&target->htc_lock);
+ list_add_tail(&pkt->list, &target->free_ctrl_txbuf);
+ spin_unlock_bh(&target->htc_lock);
+}
+
+static struct htc_packet *htc_get_control_buf(struct htc_target *target,
+ bool tx)
+{
+ struct htc_packet *packet = NULL;
+ struct list_head *buf_list;
+
+ buf_list = tx ? &target->free_ctrl_txbuf : &target->free_ctrl_rxbuf;
+
+ spin_lock_bh(&target->htc_lock);
+
+ if (list_empty(buf_list)) {
+ spin_unlock_bh(&target->htc_lock);
+ return NULL;
+ }
+
+ packet = list_first_entry(buf_list, struct htc_packet, list);
+ list_del(&packet->list);
+ spin_unlock_bh(&target->htc_lock);
+
+ if (tx)
+ packet->buf = packet->buf_start + HTC_HDR_LENGTH;
+
+ return packet;
+}
+
+static void htc_tx_comp_update(struct htc_target *target,
+ struct htc_endpoint *endpoint,
+ struct htc_packet *packet)
+{
+ packet->completion = NULL;
+ packet->buf += HTC_HDR_LENGTH;
+
+ if (!packet->status)
+ return;
+
+ ath6kl_err("req failed (status:%d, ep:%d, len:%d creds:%d)\n",
+ packet->status, packet->endpoint, packet->act_len,
+ packet->info.tx.cred_used);
+
+ /* on failure to submit, reclaim credits for this packet */
+ spin_lock_bh(&target->tx_lock);
+ endpoint->cred_dist.cred_to_dist +=
+ packet->info.tx.cred_used;
+ endpoint->cred_dist.txq_depth = get_queue_depth(&endpoint->txq);
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
+ target->cred_dist_cntxt, &target->cred_dist_list);
+
+ ath6k_credit_distribute(target->cred_dist_cntxt,
+ &target->cred_dist_list,
+ HTC_CREDIT_DIST_SEND_COMPLETE);
+
+ spin_unlock_bh(&target->tx_lock);
+}
+
+static void htc_tx_complete(struct htc_endpoint *endpoint,
+ struct list_head *txq)
+{
+ if (list_empty(txq))
+ return;
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
+ "send complete ep %d, (%d pkts)\n",
+ endpoint->eid, get_queue_depth(txq));
+
+ ath6kl_tx_complete(endpoint->target->dev->ar, txq);
+}
+
+static void htc_tx_comp_handler(struct htc_target *target,
+ struct htc_packet *packet)
+{
+ struct htc_endpoint *endpoint = &target->endpoint[packet->endpoint];
+ struct list_head container;
+
+ htc_tx_comp_update(target, endpoint, packet);
+ INIT_LIST_HEAD(&container);
+ list_add_tail(&packet->list, &container);
+ /* do completion */
+ htc_tx_complete(endpoint, &container);
+}
+
+static void htc_async_tx_scat_complete(struct hif_scatter_req *scat_req)
+{
+ struct htc_endpoint *endpoint = scat_req->ep;
+ struct htc_target *target = endpoint->target;
+ struct htc_packet *packet;
+ struct list_head tx_compq;
+ int i;
+
+ INIT_LIST_HEAD(&tx_compq);
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
+ "htc_async_tx_scat_complete total len: %d entries: %d\n",
+ scat_req->len, scat_req->scat_entries);
+
+ if (scat_req->status)
+ ath6kl_err("send scatter req failed: %d\n", scat_req->status);
+
+ /* walk through the scatter list and process */
+ for (i = 0; i < scat_req->scat_entries; i++) {
+ packet = scat_req->scat_list[i].packet;
+ if (!packet) {
+ WARN_ON(1);
+ return;
+ }
+
+ packet->status = scat_req->status;
+ htc_tx_comp_update(target, endpoint, packet);
+ list_add_tail(&packet->list, &tx_compq);
+ }
+
+ /* free scatter request */
+ hif_scatter_req_add(target->dev->ar, scat_req);
+
+ /* complete all packets */
+ htc_tx_complete(endpoint, &tx_compq);
+}
+
+static int htc_issue_send(struct htc_target *target, struct htc_packet *packet)
+{
+ int status;
+ bool sync = false;
+ u32 padded_len, send_len;
+
+ if (!packet->completion)
+ sync = true;
+
+ send_len = packet->act_len + HTC_HDR_LENGTH;
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "%s: transmit len : %d (%s)\n",
+ __func__, send_len, sync ? "sync" : "async");
+
+ padded_len = CALC_TXRX_PADDED_LEN(target->dev, send_len);
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
+ "DevSendPacket, padded len: %d mbox:0x%X (mode:%s)\n",
+ padded_len,
+ target->dev->ar->mbox_info.htc_addr,
+ sync ? "sync" : "async");
+
+ if (sync) {
+ status = hif_read_write_sync(target->dev->ar,
+ target->dev->ar->mbox_info.htc_addr,
+ packet->buf, padded_len,
+ HIF_WR_SYNC_BLOCK_INC);
+
+ packet->status = status;
+ packet->buf += HTC_HDR_LENGTH;
+ } else
+ status = hif_write_async(target->dev->ar,
+ target->dev->ar->mbox_info.htc_addr,
+ packet->buf, padded_len,
+ HIF_WR_ASYNC_BLOCK_INC, packet);
+
+ return status;
+}
+
+static int htc_check_credits(struct htc_target *target,
+ struct htc_endpoint *ep, u8 *flags,
+ enum htc_endpoint_id eid, unsigned int len,
+ int *req_cred)
+{
+
+ *req_cred = (len > target->tgt_cred_sz) ?
+ DIV_ROUND_UP(len, target->tgt_cred_sz) : 1;
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "creds required:%d got:%d\n",
+ *req_cred, ep->cred_dist.credits);
+
+ if (ep->cred_dist.credits < *req_cred) {
+ if (eid == ENDPOINT_0)
+ return -EINVAL;
+
+ /* Seek more credits */
+ ep->cred_dist.seek_cred = *req_cred - ep->cred_dist.credits;
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
+ target->cred_dist_cntxt, &ep->cred_dist);
+
+ ath6k_seek_credits(target->cred_dist_cntxt, &ep->cred_dist);
+
+ ep->cred_dist.seek_cred = 0;
+
+ if (ep->cred_dist.credits < *req_cred) {
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
+ "not enough credits for ep %d - leaving packet in queue\n",
+ eid);
+ return -EINVAL;
+ }
+ }
+
+ ep->cred_dist.credits -= *req_cred;
+ ep->ep_st.cred_cosumd += *req_cred;
+
+ /* When we are getting low on credits, ask for more */
+ if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) {
+ ep->cred_dist.seek_cred =
+ ep->cred_dist.cred_per_msg - ep->cred_dist.credits;
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
+ target->cred_dist_cntxt, &ep->cred_dist);
+
+ ath6k_seek_credits(target->cred_dist_cntxt, &ep->cred_dist);
+
+ /* see if we were successful in getting more */
+ if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) {
+ /* tell the target we need credits ASAP! */
+ *flags |= HTC_FLAGS_NEED_CREDIT_UPDATE;
+ ep->ep_st.cred_low_indicate += 1;
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "host needs credits\n");
+ }
+ }
+
+ return 0;
+}
+
+static void htc_tx_pkts_get(struct htc_target *target,
+ struct htc_endpoint *endpoint,
+ struct list_head *queue)
+{
+ int req_cred;
+ u8 flags;
+ struct htc_packet *packet;
+ unsigned int len;
+
+ while (true) {
+
+ flags = 0;
+
+ if (list_empty(&endpoint->txq))
+ break;
+ packet = list_first_entry(&endpoint->txq, struct htc_packet,
+ list);
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
+ "got head pkt:0x%p , queue depth: %d\n",
+ packet, get_queue_depth(&endpoint->txq));
+
+ len = CALC_TXRX_PADDED_LEN(target->dev,
+ packet->act_len + HTC_HDR_LENGTH);
+
+ if (htc_check_credits(target, endpoint, &flags,
+ packet->endpoint, len, &req_cred))
+ break;
+
+ /* now we can fully move onto caller's queue */
+ packet = list_first_entry(&endpoint->txq, struct htc_packet,
+ list);
+ list_move_tail(&packet->list, queue);
+
+ /* save the number of credits this packet consumed */
+ packet->info.tx.cred_used = req_cred;
+
+ /* all TX packets are handled asynchronously */
+ packet->completion = htc_tx_comp_handler;
+ packet->context = target;
+ endpoint->ep_st.tx_issued += 1;
+
+ /* save send flags */
+ packet->info.tx.flags = flags;
+ packet->info.tx.seqno = endpoint->seqno;
+ endpoint->seqno++;
+ }
+}
+
+/* See if the padded tx length falls on a credit boundary */
+static int htc_get_credit_padding(unsigned int cred_sz, int *len,
+ struct htc_endpoint *ep)
+{
+ int rem_cred, cred_pad;
+
+ rem_cred = *len % cred_sz;
+
+ /* No padding needed */
+ if (!rem_cred)
+ return 0;
+
+ if (!(ep->conn_flags & HTC_FLGS_TX_BNDL_PAD_EN))
+ return -1;
+
+ /*
+ * The transfer consumes a "partial" credit, this
+ * packet cannot be bundled unless we add
+ * additional "dummy" padding (max 255 bytes) to
+ * consume the entire credit.
+ */
+ cred_pad = *len < cred_sz ? (cred_sz - *len) : rem_cred;
+
+ if ((cred_pad > 0) && (cred_pad <= 255))
+ *len += cred_pad;
+ else
+ /* The amount of padding is too large, send as non-bundled */
+ return -1;
+
+ return cred_pad;
+}
+
+static int htc_setup_send_scat_list(struct htc_target *target,
+ struct htc_endpoint *endpoint,
+ struct hif_scatter_req *scat_req,
+ int n_scat,
+ struct list_head *queue)
+{
+ struct htc_packet *packet;
+ int i, len, rem_scat, cred_pad;
+ int status = 0;
+
+ rem_scat = target->dev->max_tx_bndl_sz;
+
+ for (i = 0; i < n_scat; i++) {
+ scat_req->scat_list[i].packet = NULL;
+
+ if (list_empty(queue))
+ break;
+
+ packet = list_first_entry(queue, struct htc_packet, list);
+ len = CALC_TXRX_PADDED_LEN(target->dev,
+ packet->act_len + HTC_HDR_LENGTH);
+
+ cred_pad = htc_get_credit_padding(target->tgt_cred_sz,
+ &len, endpoint);
+ if (cred_pad < 0) {
+ status = -EINVAL;
+ break;
+ }
+
+ if (rem_scat < len) {
+ /* exceeds what we can transfer */
+ status = -ENOSPC;
+ break;
+ }
+
+ rem_scat -= len;
+ /* now remove it from the queue */
+ packet = list_first_entry(queue, struct htc_packet, list);
+ list_del(&packet->list);
+
+ scat_req->scat_list[i].packet = packet;
+ /* prepare packet and flag message as part of a send bundle */
+ htc_prep_send_pkt(packet,
+ packet->info.tx.flags | HTC_FLAGS_SEND_BUNDLE,
+ cred_pad, packet->info.tx.seqno);
+ scat_req->scat_list[i].buf = packet->buf;
+ scat_req->scat_list[i].len = len;
+
+ scat_req->len += len;
+ scat_req->scat_entries++;
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
+ "%d, adding pkt : 0x%p len:%d (remaining space:%d)\n",
+ i, packet, len, rem_scat);
+ }
+
+ /* Roll back scatter setup in case of any failure */
+ if (status || (scat_req->scat_entries < HTC_MIN_HTC_MSGS_TO_BUNDLE)) {
+ for (i = scat_req->scat_entries - 1; i >= 0; i--) {
+ packet = scat_req->scat_list[i].packet;
+ if (packet) {
+ packet->buf += HTC_HDR_LENGTH;
+ list_add(&packet->list, queue);
+ }
+ }
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * htc_issue_send_bundle: drain a queue and send as bundles
+ * this function may return without fully draining the queue
+ * when
+ *
+ * 1. scatter resources are exhausted
+ * 2. a message that will consume a partial credit will stop the
+ * bundling process early
+ * 3. we drop below the minimum number of messages for a bundle
+ */
+static void htc_issue_send_bundle(struct htc_endpoint *endpoint,
+ struct list_head *queue,
+ int *sent_bundle, int *n_bundle_pkts)
+{
+ struct htc_target *target = endpoint->target;
+ struct hif_scatter_req *scat_req = NULL;
+ struct hif_dev_scat_sup_info hif_info;
+ int n_scat, n_sent_bundle = 0, tot_pkts_bundle = 0;
+
+ hif_info = target->dev->hif_scat_info;
+
+ while (true) {
+ n_scat = get_queue_depth(queue);
+ n_scat = min(n_scat, target->msg_per_bndl_max);
+
+ if (n_scat < HTC_MIN_HTC_MSGS_TO_BUNDLE)
+ /* not enough to bundle */
+ break;
+
+ scat_req = hif_scatter_req_get(target->dev->ar);
+
+ if (!scat_req) {
+ /* no scatter resources */
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
+ "no more scatter resources\n");
+ break;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "pkts to scatter: %d\n",
+ n_scat);
+
+ scat_req->len = 0;
+ scat_req->scat_entries = 0;
+
+ if (htc_setup_send_scat_list(target, endpoint, scat_req,
+ n_scat, queue)) {
+ hif_scatter_req_add(target->dev->ar, scat_req);
+ break;
+ }
+
+ /* send path is always asynchronous */
+ scat_req->complete = htc_async_tx_scat_complete;
+ scat_req->ep = endpoint;
+ n_sent_bundle++;
+ tot_pkts_bundle += scat_req->scat_entries;
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
+ "send scatter total bytes: %d , entries: %d\n",
+ scat_req->len, scat_req->scat_entries);
+ ath6kldev_submit_scat_req(target->dev, scat_req, false);
+ }
+
+ *sent_bundle = n_sent_bundle;
+ *n_bundle_pkts = tot_pkts_bundle;
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "htc_issue_send_bundle (sent:%d)\n",
+ n_sent_bundle);
+
+ return;
+}
+
+static void htc_tx_from_ep_txq(struct htc_target *target,
+ struct htc_endpoint *endpoint)
+{
+ struct list_head txq;
+ struct htc_packet *packet;
+ int bundle_sent;
+ int n_pkts_bundle;
+
+ spin_lock_bh(&target->tx_lock);
+
+ endpoint->tx_proc_cnt++;
+ if (endpoint->tx_proc_cnt > 1) {
+ endpoint->tx_proc_cnt--;
+ spin_unlock_bh(&target->tx_lock);
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "htc_try_send (busy)\n");
+ return;
+ }
+
+ /*
+ * drain the endpoint TX queue for transmission as long
+ * as we have enough credits.
+ */
+ INIT_LIST_HEAD(&txq);
+
+ while (true) {
+
+ if (list_empty(&endpoint->txq))
+ break;
+
+ htc_tx_pkts_get(target, endpoint, &txq);
+
+ if (list_empty(&txq))
+ break;
+
+ spin_unlock_bh(&target->tx_lock);
+
+ bundle_sent = 0;
+ n_pkts_bundle = 0;
+
+ while (true) {
+ /* try to send a bundle on each pass */
+ if ((target->tx_bndl_enable) &&
+ (get_queue_depth(&txq) >=
+ HTC_MIN_HTC_MSGS_TO_BUNDLE)) {
+ int temp1 = 0, temp2 = 0;
+
+ htc_issue_send_bundle(endpoint, &txq,
+ &temp1, &temp2);
+ bundle_sent += temp1;
+ n_pkts_bundle += temp2;
+ }
+
+ if (list_empty(&txq))
+ break;
+
+ packet = list_first_entry(&txq, struct htc_packet,
+ list);
+ list_del(&packet->list);
+
+ htc_prep_send_pkt(packet, packet->info.tx.flags,
+ 0, packet->info.tx.seqno);
+ htc_issue_send(target, packet);
+ }
+
+ spin_lock_bh(&target->tx_lock);
+
+ endpoint->ep_st.tx_bundles += bundle_sent;
+ endpoint->ep_st.tx_pkt_bundled += n_pkts_bundle;
+ }
+
+ endpoint->tx_proc_cnt = 0;
+ spin_unlock_bh(&target->tx_lock);
+}
+
+static bool htc_try_send(struct htc_target *target,
+ struct htc_endpoint *endpoint,
+ struct htc_packet *tx_pkt)
+{
+ struct htc_ep_callbacks ep_cb;
+ int txq_depth;
+ bool overflow = false;
+
+ ep_cb = endpoint->ep_cb;
+
+ spin_lock_bh(&target->tx_lock);
+ txq_depth = get_queue_depth(&endpoint->txq);
+ spin_unlock_bh(&target->tx_lock);
+
+ if (txq_depth >= endpoint->max_txq_depth)
+ overflow = true;
+
+ if (overflow)
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
+ "ep %d, tx queue will overflow :%d , tx depth:%d, max:%d\n",
+ endpoint->eid, overflow, txq_depth,
+ endpoint->max_txq_depth);
+
+ if (overflow && ep_cb.tx_full) {
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
+ "indicating overflowed tx packet: 0x%p\n", tx_pkt);
+
+ if (ep_cb.tx_full(endpoint->target, tx_pkt) ==
+ HTC_SEND_FULL_DROP) {
+ endpoint->ep_st.tx_dropped += 1;
+ return false;
+ }
+ }
+
+ spin_lock_bh(&target->tx_lock);
+ list_add_tail(&tx_pkt->list, &endpoint->txq);
+ spin_unlock_bh(&target->tx_lock);
+
+ htc_tx_from_ep_txq(target, endpoint);
+
+ return true;
+}
+
+static void htc_chk_ep_txq(struct htc_target *target)
+{
+ struct htc_endpoint *endpoint;
+ struct htc_endpoint_credit_dist *cred_dist;
+
+ /*
+ * Run through the credit distribution list to see if there are
+ * packets queued. NOTE: no locks need to be taken since the
+ * distribution list is not dynamic (cannot be re-ordered) and we
+ * are not modifying any state.
+ */
+ list_for_each_entry(cred_dist, &target->cred_dist_list, list) {
+ endpoint = (struct htc_endpoint *)cred_dist->htc_rsvd;
+
+ spin_lock_bh(&target->tx_lock);
+ if (!list_empty(&endpoint->txq)) {
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
+ "ep %d has %d credits and %d packets in tx queue\n",
+ cred_dist->endpoint,
+ endpoint->cred_dist.credits,
+ get_queue_depth(&endpoint->txq));
+ spin_unlock_bh(&target->tx_lock);
+ /*
+ * Try to start the stalled queue, this list is
+ * ordered by priority. If there are credits
+ * available the highest priority queue will get a
+ * chance to reclaim credits from lower priority
+ * ones.
+ */
+ htc_tx_from_ep_txq(target, endpoint);
+ spin_lock_bh(&target->tx_lock);
+ }
+ spin_unlock_bh(&target->tx_lock);
+ }
+}
+
+static int htc_setup_tx_complete(struct htc_target *target)
+{
+ struct htc_packet *send_pkt = NULL;
+ int status;
+
+ send_pkt = htc_get_control_buf(target, true);
+
+ if (!send_pkt)
+ return -ENOMEM;
+
+ if (target->htc_tgt_ver >= HTC_VERSION_2P1) {
+ struct htc_setup_comp_ext_msg *setup_comp_ext;
+ u32 flags = 0;
+
+ setup_comp_ext =
+ (struct htc_setup_comp_ext_msg *)send_pkt->buf;
+ memset(setup_comp_ext, 0, sizeof(*setup_comp_ext));
+ setup_comp_ext->msg_id =
+ cpu_to_le16(HTC_MSG_SETUP_COMPLETE_EX_ID);
+
+ if (target->msg_per_bndl_max > 0) {
+ /* Indicate HTC bundling to the target */
+ flags |= HTC_SETUP_COMP_FLG_RX_BNDL_EN;
+ setup_comp_ext->msg_per_rxbndl =
+ target->msg_per_bndl_max;
+ }
+
+ memcpy(&setup_comp_ext->flags, &flags,
+ sizeof(setup_comp_ext->flags));
+ set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp_ext,
+ sizeof(struct htc_setup_comp_ext_msg),
+ ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
+
+ } else {
+ struct htc_setup_comp_msg *setup_comp;
+ setup_comp = (struct htc_setup_comp_msg *)send_pkt->buf;
+ memset(setup_comp, 0, sizeof(struct htc_setup_comp_msg));
+ setup_comp->msg_id = cpu_to_le16(HTC_MSG_SETUP_COMPLETE_ID);
+ set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp,
+ sizeof(struct htc_setup_comp_msg),
+ ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
+ }
+
+ /* we want synchronous operation */
+ send_pkt->completion = NULL;
+ htc_prep_send_pkt(send_pkt, 0, 0, 0);
+ status = htc_issue_send(target, send_pkt);
+
+ if (send_pkt != NULL)
+ htc_reclaim_txctrl_buf(target, send_pkt);
+
+ return status;
+}
+
+void htc_set_credit_dist(struct htc_target *target,
+ struct htc_credit_state_info *cred_dist_cntxt,
+ u16 srvc_pri_order[], int list_len)
+{
+ struct htc_endpoint *endpoint;
+ int i, ep;
+
+ target->cred_dist_cntxt = cred_dist_cntxt;
+
+ list_add_tail(&target->endpoint[ENDPOINT_0].cred_dist.list,
+ &target->cred_dist_list);
+
+ for (i = 0; i < list_len; i++) {
+ for (ep = ENDPOINT_1; ep < ENDPOINT_MAX; ep++) {
+ endpoint = &target->endpoint[ep];
+ if (endpoint->svc_id == srvc_pri_order[i]) {
+ list_add_tail(&endpoint->cred_dist.list,
+ &target->cred_dist_list);
+ break;
+ }
+ }
+ if (ep >= ENDPOINT_MAX) {
+ WARN_ON(1);
+ return;
+ }
+ }
+}
+
+int htc_tx(struct htc_target *target, struct htc_packet *packet)
+{
+ struct htc_endpoint *endpoint;
+ struct list_head queue;
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
+ "htc_tx: ep id: %d, buf: 0x%p, len: %d\n",
+ packet->endpoint, packet->buf, packet->act_len);
+
+ if (packet->endpoint >= ENDPOINT_MAX) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ endpoint = &target->endpoint[packet->endpoint];
+
+ if (!htc_try_send(target, endpoint, packet)) {
+ packet->status = (target->htc_flags & HTC_OP_STATE_STOPPING) ?
+ -ECANCELED : -ENOSPC;
+ INIT_LIST_HEAD(&queue);
+ list_add(&packet->list, &queue);
+ htc_tx_complete(endpoint, &queue);
+ }
+
+ return 0;
+}
+
+/* flush endpoint TX queue */
+void htc_flush_txep(struct htc_target *target,
+ enum htc_endpoint_id eid, u16 tag)
+{
+ struct htc_packet *packet, *tmp_pkt;
+ struct list_head discard_q, container;
+ struct htc_endpoint *endpoint = &target->endpoint[eid];
+
+ if (!endpoint->svc_id) {
+ WARN_ON(1);
+ return;
+ }
+
+ /* initialize the discard queue */
+ INIT_LIST_HEAD(&discard_q);
+
+ spin_lock_bh(&target->tx_lock);
+
+ list_for_each_entry_safe(packet, tmp_pkt, &endpoint->txq, list) {
+ if ((tag == HTC_TX_PACKET_TAG_ALL) ||
+ (tag == packet->info.tx.tag))
+ list_move_tail(&packet->list, &discard_q);
+ }
+
+ spin_unlock_bh(&target->tx_lock);
+
+ list_for_each_entry_safe(packet, tmp_pkt, &discard_q, list) {
+ packet->status = -ECANCELED;
+ list_del(&packet->list);
+ ath6kl_dbg(ATH6KL_DBG_TRC,
+ "flushing tx pkt:0x%p, len:%d, ep:%d tag:0x%X\n",
+ packet, packet->act_len,
+ packet->endpoint, packet->info.tx.tag);
+
+ INIT_LIST_HEAD(&container);
+ list_add_tail(&packet->list, &container);
+ htc_tx_complete(endpoint, &container);
+ }
+
+}
+
+static void htc_flush_txep_all(struct htc_target *target)
+{
+ struct htc_endpoint *endpoint;
+ int i;
+
+ dump_cred_dist_stats(target);
+
+ for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
+ endpoint = &target->endpoint[i];
+ if (endpoint->svc_id == 0)
+ /* not in use.. */
+ continue;
+ htc_flush_txep(target, i, HTC_TX_PACKET_TAG_ALL);
+ }
+}
+
+void htc_indicate_activity_change(struct htc_target *target,
+ enum htc_endpoint_id eid, bool active)
+{
+ struct htc_endpoint *endpoint = &target->endpoint[eid];
+ bool dist = false;
+
+ if (endpoint->svc_id == 0) {
+ WARN_ON(1);
+ return;
+ }
+
+ spin_lock_bh(&target->tx_lock);
+
+ if (active) {
+ if (!(endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE)) {
+ endpoint->cred_dist.dist_flags |= HTC_EP_ACTIVE;
+ dist = true;
+ }
+ } else {
+ if (endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE) {
+ endpoint->cred_dist.dist_flags &= ~HTC_EP_ACTIVE;
+ dist = true;
+ }
+ }
+
+ if (dist) {
+ endpoint->cred_dist.txq_depth =
+ get_queue_depth(&endpoint->txq);
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
+ target->cred_dist_cntxt, &target->cred_dist_list);
+
+ ath6k_credit_distribute(target->cred_dist_cntxt,
+ &target->cred_dist_list,
+ HTC_CREDIT_DIST_ACTIVITY_CHANGE);
+ }
+
+ spin_unlock_bh(&target->tx_lock);
+
+ if (dist && !active)
+ htc_chk_ep_txq(target);
+}
+
+/* HTC Rx */
+
+static inline void htc_update_rx_stats(struct htc_endpoint *endpoint,
+ int n_look_ahds)
+{
+ endpoint->ep_st.rx_pkts++;
+ if (n_look_ahds == 1)
+ endpoint->ep_st.rx_lkahds++;
+ else if (n_look_ahds > 1)
+ endpoint->ep_st.rx_bundle_lkahd++;
+}
+
+static inline bool htc_valid_rx_frame_len(struct htc_target *target,
+ enum htc_endpoint_id eid, int len)
+{
+ return (eid == target->dev->ar->ctrl_ep) ?
+ len <= ATH6KL_BUFFER_SIZE : len <= ATH6KL_AMSDU_BUFFER_SIZE;
+}
+
+static int htc_add_rxbuf(struct htc_target *target, struct htc_packet *packet)
+{
+ struct list_head queue;
+
+ INIT_LIST_HEAD(&queue);
+ list_add_tail(&packet->list, &queue);
+ return htc_add_rxbuf_multiple(target, &queue);
+}
+
+static void htc_reclaim_rxbuf(struct htc_target *target,
+ struct htc_packet *packet,
+ struct htc_endpoint *ep)
+{
+ if (packet->info.rx.rx_flags & HTC_RX_PKT_NO_RECYCLE) {
+ htc_rxpkt_reset(packet);
+ packet->status = -ECANCELED;
+ ep->ep_cb.rx(ep->target, packet);
+ } else {
+ htc_rxpkt_reset(packet);
+ htc_add_rxbuf((void *)(target), packet);
+ }
+}
+
+static void reclaim_rx_ctrl_buf(struct htc_target *target,
+ struct htc_packet *packet)
+{
+ spin_lock_bh(&target->htc_lock);
+ list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
+ spin_unlock_bh(&target->htc_lock);
+}
+
+static int dev_rx_pkt(struct htc_target *target, struct htc_packet *packet,
+ u32 rx_len)
+{
+ struct ath6kl_device *dev = target->dev;
+ u32 padded_len;
+ int status;
+
+ padded_len = CALC_TXRX_PADDED_LEN(dev, rx_len);
+
+ if (padded_len > packet->buf_len) {
+ ath6kl_err("not enough receive space for packet - padlen:%d recvlen:%d bufferlen:%d\n",
+ padded_len, rx_len, packet->buf_len);
+ return -ENOMEM;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
+ "dev_rx_pkt (0x%p : hdr:0x%X) padded len: %d mbox:0x%X (mode:%s)\n",
+ packet, packet->info.rx.exp_hdr,
+ padded_len, dev->ar->mbox_info.htc_addr, "sync");
+
+ status = hif_read_write_sync(dev->ar,
+ dev->ar->mbox_info.htc_addr,
+ packet->buf, padded_len,
+ HIF_RD_SYNC_BLOCK_FIX);
+
+ packet->status = status;
+
+ return status;
+}
+
+/*
+ * optimization for recv packets, we can indicate a
+ * "hint" that there are more single-packets to fetch
+ * on this endpoint.
+ */
+static void set_rxpkt_indication_flag(u32 lk_ahd,
+ struct htc_endpoint *endpoint,
+ struct htc_packet *packet)
+{
+ struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)&lk_ahd;
+
+ if (htc_hdr->eid == packet->endpoint) {
+ if (!list_empty(&endpoint->rx_bufq))
+ packet->info.rx.indicat_flags |=
+ HTC_RX_FLAGS_INDICATE_MORE_PKTS;
+ }
+}
+
+static void chk_rx_water_mark(struct htc_endpoint *endpoint)
+{
+ struct htc_ep_callbacks ep_cb = endpoint->ep_cb;
+
+ if (ep_cb.rx_refill_thresh > 0) {
+ spin_lock_bh(&endpoint->target->rx_lock);
+ if (get_queue_depth(&endpoint->rx_bufq)
+ < ep_cb.rx_refill_thresh) {
+ spin_unlock_bh(&endpoint->target->rx_lock);
+ ep_cb.rx_refill(endpoint->target, endpoint->eid);
+ return;
+ }
+ spin_unlock_bh(&endpoint->target->rx_lock);
+ }
+}
+
+/* This function is called with rx_lock held */
+static int htc_setup_rxpkts(struct htc_target *target, struct htc_endpoint *ep,
+ u32 *lk_ahds, struct list_head *queue, int n_msg)
+{
+ struct htc_packet *packet;
+ /* FIXME: type of lk_ahds can't be right */
+ struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)lk_ahds;
+ struct htc_ep_callbacks ep_cb;
+ int status = 0, j, full_len;
+ bool no_recycle;
+
+ full_len = CALC_TXRX_PADDED_LEN(target->dev,
+ le16_to_cpu(htc_hdr->payld_len) +
+ sizeof(*htc_hdr));
+
+ if (!htc_valid_rx_frame_len(target, ep->eid, full_len)) {
+ ath6kl_warn("Rx buffer requested with invalid length\n");
+ return -EINVAL;
+ }
+
+ ep_cb = ep->ep_cb;
+ for (j = 0; j < n_msg; j++) {
+
+ /*
+ * Reset flag, any packets allocated using the
+ * rx_alloc() API cannot be recycled on
+ * cleanup,they must be explicitly returned.
+ */
+ no_recycle = false;
+
+ if (ep_cb.rx_allocthresh &&
+ (full_len > ep_cb.rx_alloc_thresh)) {
+ ep->ep_st.rx_alloc_thresh_hit += 1;
+ ep->ep_st.rxalloc_thresh_byte +=
+ le16_to_cpu(htc_hdr->payld_len);
+
+ spin_unlock_bh(&target->rx_lock);
+ no_recycle = true;
+
+ packet = ep_cb.rx_allocthresh(ep->target, ep->eid,
+ full_len);
+ spin_lock_bh(&target->rx_lock);
+ } else {
+ /* refill handler is being used */
+ if (list_empty(&ep->rx_bufq)) {
+ if (ep_cb.rx_refill) {
+ spin_unlock_bh(&target->rx_lock);
+ ep_cb.rx_refill(ep->target, ep->eid);
+ spin_lock_bh(&target->rx_lock);
+ }
+ }
+
+ if (list_empty(&ep->rx_bufq))
+ packet = NULL;
+ else {
+ packet = list_first_entry(&ep->rx_bufq,
+ struct htc_packet, list);
+ list_del(&packet->list);
+ }
+ }
+
+ if (!packet) {
+ target->rx_st_flags |= HTC_RECV_WAIT_BUFFERS;
+ target->ep_waiting = ep->eid;
+ return -ENOSPC;
+ }
+
+ /* clear flags */
+ packet->info.rx.rx_flags = 0;
+ packet->info.rx.indicat_flags = 0;
+ packet->status = 0;
+
+ if (no_recycle)
+ /*
+ * flag that these packets cannot be
+ * recycled, they have to be returned to
+ * the user
+ */
+ packet->info.rx.rx_flags |= HTC_RX_PKT_NO_RECYCLE;
+
+ /* Caller needs to free this upon any failure */
+ list_add_tail(&packet->list, queue);
+
+ if (target->htc_flags & HTC_OP_STATE_STOPPING) {
+ status = -ECANCELED;
+ break;
+ }
+
+ if (j) {
+ packet->info.rx.rx_flags |= HTC_RX_PKT_REFRESH_HDR;
+ packet->info.rx.exp_hdr = 0xFFFFFFFF;
+ } else
+ /* set expected look ahead */
+ packet->info.rx.exp_hdr = *lk_ahds;
+
+ packet->act_len = le16_to_cpu(htc_hdr->payld_len) +
+ HTC_HDR_LENGTH;
+ }
+
+ return status;
+}
+
+static int alloc_and_prep_rxpkts(struct htc_target *target,
+ u32 lk_ahds[], int msg,
+ struct htc_endpoint *endpoint,
+ struct list_head *queue)
+{
+ int status = 0;
+ struct htc_packet *packet, *tmp_pkt;
+ struct htc_frame_hdr *htc_hdr;
+ int i, n_msg;
+
+ spin_lock_bh(&target->rx_lock);
+
+ for (i = 0; i < msg; i++) {
+
+ htc_hdr = (struct htc_frame_hdr *)&lk_ahds[i];
+
+ if (htc_hdr->eid >= ENDPOINT_MAX) {
+ ath6kl_err("invalid ep in look-ahead: %d\n",
+ htc_hdr->eid);
+ status = -ENOMEM;
+ break;
+ }
+
+ if (htc_hdr->eid != endpoint->eid) {
+ ath6kl_err("invalid ep in look-ahead: %d should be : %d (index:%d)\n",
+ htc_hdr->eid, endpoint->eid, i);
+ status = -ENOMEM;
+ break;
+ }
+
+ if (le16_to_cpu(htc_hdr->payld_len) > HTC_MAX_PAYLOAD_LENGTH) {
+ ath6kl_err("payload len %d exceeds max htc : %d !\n",
+ htc_hdr->payld_len,
+ (u32) HTC_MAX_PAYLOAD_LENGTH);
+ status = -ENOMEM;
+ break;
+ }
+
+ if (endpoint->svc_id == 0) {
+ ath6kl_err("ep %d is not connected !\n", htc_hdr->eid);
+ status = -ENOMEM;
+ break;
+ }
+
+ if (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) {
+ /*
+ * HTC header indicates that every packet to follow
+ * has the same padded length so that it can be
+ * optimally fetched as a full bundle.
+ */
+ n_msg = (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) >>
+ HTC_FLG_RX_BNDL_CNT_S;
+
+ /* the count doesn't include the starter frame */
+ n_msg++;
+ if (n_msg > target->msg_per_bndl_max) {
+ status = -ENOMEM;
+ break;
+ }
+
+ endpoint->ep_st.rx_bundle_from_hdr += 1;
+ ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
+ "htc hdr indicates :%d msg can be fetched as a bundle\n",
+ n_msg);
+ } else
+ /* HTC header only indicates 1 message to fetch */
+ n_msg = 1;
+
+ /* Setup packet buffers for each message */
+ status = htc_setup_rxpkts(target, endpoint, &lk_ahds[i], queue,
+ n_msg);
+
+ /*
+ * This is due to unavailabilty of buffers to rx entire data.
+ * Return no error so that free buffers from queue can be used
+ * to receive partial data.
+ */
+ if (status == -ENOSPC) {
+ spin_unlock_bh(&target->rx_lock);
+ return 0;
+ }
+
+ if (status)
+ break;
+ }
+
+ spin_unlock_bh(&target->rx_lock);
+
+ if (status) {
+ list_for_each_entry_safe(packet, tmp_pkt, queue, list) {
+ list_del(&packet->list);
+ htc_reclaim_rxbuf(target, packet,
+ &target->endpoint[packet->endpoint]);
+ }
+ }
+
+ return status;
+}
+
+static void htc_ctrl_rx(struct htc_target *context, struct htc_packet *packets)
+{
+ if (packets->endpoint != ENDPOINT_0) {
+ WARN_ON(1);
+ return;
+ }
+
+ if (packets->status == -ECANCELED) {
+ reclaim_rx_ctrl_buf(context, packets);
+ return;
+ }
+
+ if (packets->act_len > 0) {
+ ath6kl_err("htc_ctrl_rx, got message with len:%zu\n",
+ packets->act_len + HTC_HDR_LENGTH);
+
+ ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES,
+ "Unexpected ENDPOINT 0 Message",
+ packets->buf - HTC_HDR_LENGTH,
+ packets->act_len + HTC_HDR_LENGTH);
+ }
+
+ htc_reclaim_rxbuf(context, packets, &context->endpoint[0]);
+}
+
+static void htc_proc_cred_rpt(struct htc_target *target,
+ struct htc_credit_report *rpt,
+ int n_entries,
+ enum htc_endpoint_id from_ep)
+{
+ struct htc_endpoint *endpoint;
+ int tot_credits = 0, i;
+ bool dist = false;
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
+ "htc_proc_cred_rpt, credit report entries:%d\n", n_entries);
+
+ spin_lock_bh(&target->tx_lock);
+
+ for (i = 0; i < n_entries; i++, rpt++) {
+ if (rpt->eid >= ENDPOINT_MAX) {
+ WARN_ON(1);
+ spin_unlock_bh(&target->tx_lock);
+ return;
+ }
+
+ endpoint = &target->endpoint[rpt->eid];
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND, " ep %d got %d credits\n",
+ rpt->eid, rpt->credits);
+
+ endpoint->ep_st.tx_cred_rpt += 1;
+ endpoint->ep_st.cred_retnd += rpt->credits;
+
+ if (from_ep == rpt->eid) {
+ /*
+ * This credit report arrived on the same endpoint
+ * indicating it arrived in an RX packet.
+ */
+ endpoint->ep_st.cred_from_rx += rpt->credits;
+ endpoint->ep_st.cred_rpt_from_rx += 1;
+ } else if (from_ep == ENDPOINT_0) {
+ /* credit arrived on endpoint 0 as a NULL message */
+ endpoint->ep_st.cred_from_ep0 += rpt->credits;
+ endpoint->ep_st.cred_rpt_ep0 += 1;
+ } else {
+ endpoint->ep_st.cred_from_other += rpt->credits;
+ endpoint->ep_st.cred_rpt_from_other += 1;
+ }
+
+ if (ENDPOINT_0 == rpt->eid)
+ /* always give endpoint 0 credits back */
+ endpoint->cred_dist.credits += rpt->credits;
+ else {
+ endpoint->cred_dist.cred_to_dist += rpt->credits;
+ dist = true;
+ }
+
+ /*
+ * Refresh tx depth for distribution function that will
+ * recover these credits NOTE: this is only valid when
+ * there are credits to recover!
+ */
+ endpoint->cred_dist.txq_depth =
+ get_queue_depth(&endpoint->txq);
+
+ tot_credits += rpt->credits;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
+ "report indicated %d credits to distribute\n",
+ tot_credits);
+
+ if (dist) {
+ /*
+ * This was a credit return based on a completed send
+ * operations note, this is done with the lock held
+ */
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
+ target->cred_dist_cntxt, &target->cred_dist_list);
+
+ ath6k_credit_distribute(target->cred_dist_cntxt,
+ &target->cred_dist_list,
+ HTC_CREDIT_DIST_SEND_COMPLETE);
+ }
+
+ spin_unlock_bh(&target->tx_lock);
+
+ if (tot_credits)
+ htc_chk_ep_txq(target);
+}
+
+static int htc_parse_trailer(struct htc_target *target,
+ struct htc_record_hdr *record,
+ u8 *record_buf, u32 *next_lk_ahds,
+ enum htc_endpoint_id endpoint,
+ int *n_lk_ahds)
+{
+ struct htc_bundle_lkahd_rpt *bundle_lkahd_rpt;
+ struct htc_lookahead_report *lk_ahd;
+ int len;
+
+ switch (record->rec_id) {
+ case HTC_RECORD_CREDITS:
+ len = record->len / sizeof(struct htc_credit_report);
+ if (!len) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ htc_proc_cred_rpt(target,
+ (struct htc_credit_report *) record_buf,
+ len, endpoint);
+ break;
+ case HTC_RECORD_LOOKAHEAD:
+ len = record->len / sizeof(*lk_ahd);
+ if (!len) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ lk_ahd = (struct htc_lookahead_report *) record_buf;
+ if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF))
+ && next_lk_ahds) {
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
+ "lk_ahd report found (pre valid:0x%X, post valid:0x%X)\n",
+ lk_ahd->pre_valid, lk_ahd->post_valid);
+
+ /* look ahead bytes are valid, copy them over */
+ memcpy((u8 *)&next_lk_ahds[0], lk_ahd->lk_ahd, 4);
+
+ ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Next Look Ahead",
+ next_lk_ahds, 4);
+
+ *n_lk_ahds = 1;
+ }
+ break;
+ case HTC_RECORD_LOOKAHEAD_BUNDLE:
+ len = record->len / sizeof(*bundle_lkahd_rpt);
+ if (!len || (len > HTC_HOST_MAX_MSG_PER_BUNDLE)) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ if (next_lk_ahds) {
+ int i;
+
+ bundle_lkahd_rpt =
+ (struct htc_bundle_lkahd_rpt *) record_buf;
+
+ ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Bundle lk_ahd",
+ record_buf, record->len);
+
+ for (i = 0; i < len; i++) {
+ memcpy((u8 *)&next_lk_ahds[i],
+ bundle_lkahd_rpt->lk_ahd, 4);
+ bundle_lkahd_rpt++;
+ }
+
+ *n_lk_ahds = i;
+ }
+ break;
+ default:
+ ath6kl_err("unhandled record: id:%d len:%d\n",
+ record->rec_id, record->len);
+ break;
+ }
+
+ return 0;
+
+}
+
+static int htc_proc_trailer(struct htc_target *target,
+ u8 *buf, int len, u32 *next_lk_ahds,
+ int *n_lk_ahds, enum htc_endpoint_id endpoint)
+{
+ struct htc_record_hdr *record;
+ int orig_len;
+ int status;
+ u8 *record_buf;
+ u8 *orig_buf;
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_RECV, "+htc_proc_trailer (len:%d)\n", len);
+
+ ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Recv Trailer", buf, len);
+
+ orig_buf = buf;
+ orig_len = len;
+ status = 0;
+
+ while (len > 0) {
+
+ if (len < sizeof(struct htc_record_hdr)) {
+ status = -ENOMEM;
+ break;
+ }
+ /* these are byte aligned structs */
+ record = (struct htc_record_hdr *) buf;
+ len -= sizeof(struct htc_record_hdr);
+ buf += sizeof(struct htc_record_hdr);
+
+ if (record->len > len) {
+ ath6kl_err("invalid record len: %d (id:%d) buf has: %d bytes left\n",
+ record->len, record->rec_id, len);
+ status = -ENOMEM;
+ break;
+ }
+ record_buf = buf;
+
+ status = htc_parse_trailer(target, record, record_buf,
+ next_lk_ahds, endpoint, n_lk_ahds);
+
+ if (status)
+ break;
+
+ /* advance buffer past this record for next time around */
+ buf += record->len;
+ len -= record->len;
+ }
+
+ ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "BAD Recv Trailer",
+ orig_buf, orig_len);
+
+ return status;
+}
+
+static int htc_proc_rxhdr(struct htc_target *target,
+ struct htc_packet *packet,
+ u32 *next_lkahds, int *n_lkahds)
+{
+ int status = 0;
+ u16 payload_len;
+ u32 lk_ahd;
+ struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)packet->buf;
+
+ if (n_lkahds != NULL)
+ *n_lkahds = 0;
+
+ ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "HTC Recv PKT", packet->buf,
+ packet->act_len);
+
+ /*
+ * NOTE: we cannot assume the alignment of buf, so we use the safe
+ * macros to retrieve 16 bit fields.
+ */
+ payload_len = le16_to_cpu(get_unaligned(&htc_hdr->payld_len));
+
+ memcpy((u8 *)&lk_ahd, packet->buf, sizeof(lk_ahd));
+
+ if (packet->info.rx.rx_flags & HTC_RX_PKT_REFRESH_HDR) {
+ /*
+ * Refresh the expected header and the actual length as it
+ * was unknown when this packet was grabbed as part of the
+ * bundle.
+ */
+ packet->info.rx.exp_hdr = lk_ahd;
+ packet->act_len = payload_len + HTC_HDR_LENGTH;
+
+ /* validate the actual header that was refreshed */
+ if (packet->act_len > packet->buf_len) {
+ ath6kl_err("refreshed hdr payload len (%d) in bundled recv is invalid (hdr: 0x%X)\n",
+ payload_len, lk_ahd);
+ /*
+ * Limit this to max buffer just to print out some
+ * of the buffer.
+ */
+ packet->act_len = min(packet->act_len, packet->buf_len);
+ status = -ENOMEM;
+ goto fail_rx;
+ }
+
+ if (packet->endpoint != htc_hdr->eid) {
+ ath6kl_err("refreshed hdr ep (%d) does not match expected ep (%d)\n",
+ htc_hdr->eid, packet->endpoint);
+ status = -ENOMEM;
+ goto fail_rx;
+ }
+ }
+
+ if (lk_ahd != packet->info.rx.exp_hdr) {
+ ath6kl_err("htc_proc_rxhdr, lk_ahd mismatch! (pPkt:0x%p flags:0x%X)\n",
+ packet, packet->info.rx.rx_flags);
+ ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Expected Message lk_ahd",
+ &packet->info.rx.exp_hdr, 4);
+ ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Current Frame Header",
+ (u8 *)&lk_ahd, sizeof(lk_ahd));
+ status = -ENOMEM;
+ goto fail_rx;
+ }
+
+ if (htc_hdr->flags & HTC_FLG_RX_TRAILER) {
+ if (htc_hdr->ctrl[0] < sizeof(struct htc_record_hdr) ||
+ htc_hdr->ctrl[0] > payload_len) {
+ ath6kl_err("htc_proc_rxhdr, invalid hdr (payload len should be :%d, CB[0] is:%d)\n",
+ payload_len, htc_hdr->ctrl[0]);
+ status = -ENOMEM;
+ goto fail_rx;
+ }
+
+ if (packet->info.rx.rx_flags & HTC_RX_PKT_IGNORE_LOOKAHEAD) {
+ next_lkahds = NULL;
+ n_lkahds = NULL;
+ }
+
+ status = htc_proc_trailer(target, packet->buf + HTC_HDR_LENGTH
+ + payload_len - htc_hdr->ctrl[0],
+ htc_hdr->ctrl[0], next_lkahds,
+ n_lkahds, packet->endpoint);
+
+ if (status)
+ goto fail_rx;
+
+ packet->act_len -= htc_hdr->ctrl[0];
+ }
+
+ packet->buf += HTC_HDR_LENGTH;
+ packet->act_len -= HTC_HDR_LENGTH;
+
+fail_rx:
+ if (status)
+ ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "BAD HTC Recv PKT",
+ packet->buf,
+ packet->act_len < 256 ? packet->act_len : 256);
+ else {
+ if (packet->act_len > 0)
+ ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES,
+ "HTC - Application Msg",
+ packet->buf, packet->act_len);
+ }
+
+ return status;
+}
+
+static void do_rx_completion(struct htc_endpoint *endpoint,
+ struct htc_packet *packet)
+{
+ ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
+ "htc calling ep %d recv callback on packet 0x%p\n",
+ endpoint->eid, packet);
+ endpoint->ep_cb.rx(endpoint->target, packet);
+}
+
+static int htc_issue_rxpkt_bundle(struct htc_target *target,
+ struct list_head *rxq,
+ struct list_head *sync_compq,
+ int *n_pkt_fetched, bool part_bundle)
+{
+ struct hif_scatter_req *scat_req;
+ struct htc_packet *packet;
+ int rem_space = target->dev->max_rx_bndl_sz;
+ int n_scat_pkt, status = 0, i, len;
+
+ n_scat_pkt = get_queue_depth(rxq);
+ n_scat_pkt = min(n_scat_pkt, target->msg_per_bndl_max);
+
+ if ((get_queue_depth(rxq) - n_scat_pkt) > 0) {
+ /*
+ * We were forced to split this bundle receive operation
+ * all packets in this partial bundle must have their
+ * lookaheads ignored.
+ */
+ part_bundle = true;
+
+ /*
+ * This would only happen if the target ignored our max
+ * bundle limit.
+ */
+ ath6kl_warn("htc_issue_rxpkt_bundle : partial bundle detected num:%d , %d\n",
+ get_queue_depth(rxq), n_scat_pkt);
+ }
+
+ len = 0;
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
+ "htc_issue_rxpkt_bundle (numpackets: %d , actual : %d)\n",
+ get_queue_depth(rxq), n_scat_pkt);
+
+ scat_req = hif_scatter_req_get(target->dev->ar);
+
+ if (scat_req == NULL)
+ goto fail_rx_pkt;
+
+ scat_req->flags = 0;
+
+ if (part_bundle)
+ scat_req->flags |=
+ HTC_SCAT_REQ_FLG_PART_BNDL;
+
+ for (i = 0; i < n_scat_pkt; i++) {
+ int pad_len;
+
+ packet = list_first_entry(rxq, struct htc_packet, list);
+ list_del(&packet->list);
+
+ pad_len = CALC_TXRX_PADDED_LEN(target->dev,
+ packet->act_len);
+
+ if ((rem_space - pad_len) < 0) {
+ list_add(&packet->list, rxq);
+ break;
+ }
+
+ rem_space -= pad_len;
+
+ if (part_bundle || (i < (n_scat_pkt - 1)))
+ /*
+ * Packet 0..n-1 cannot be checked for look-aheads
+ * since we are fetching a bundle the last packet
+ * however can have it's lookahead used
+ */
+ packet->info.rx.rx_flags |=
+ HTC_RX_PKT_IGNORE_LOOKAHEAD;
+
+ /* NOTE: 1 HTC packet per scatter entry */
+ scat_req->scat_list[i].buf = packet->buf;
+ scat_req->scat_list[i].len = pad_len;
+
+ packet->info.rx.rx_flags |= HTC_RX_PKT_PART_OF_BUNDLE;
+
+ list_add_tail(&packet->list, sync_compq);
+
+ WARN_ON(!scat_req->scat_list[i].len);
+ len += scat_req->scat_list[i].len;
+ }
+
+ scat_req->len = len;
+ scat_req->scat_entries = i;
+
+ status = ath6kldev_submit_scat_req(target->dev, scat_req, true);
+
+ if (!status)
+ *n_pkt_fetched = i;
+
+ /* free scatter request */
+ hif_scatter_req_add(target->dev->ar, scat_req);
+
+fail_rx_pkt:
+
+ return status;
+}
+
+static int htc_proc_fetched_rxpkts(struct htc_target *target,
+ struct list_head *comp_pktq, u32 lk_ahds[],
+ int *n_lk_ahd)
+{
+ struct htc_packet *packet, *tmp_pkt;
+ struct htc_endpoint *ep;
+ int status = 0;
+
+ list_for_each_entry_safe(packet, tmp_pkt, comp_pktq, list) {
+ list_del(&packet->list);
+ ep = &target->endpoint[packet->endpoint];
+
+ /* process header for each of the recv packet */
+ status = htc_proc_rxhdr(target, packet, lk_ahds, n_lk_ahd);
+ if (status)
+ return status;
+
+ if (list_empty(comp_pktq)) {
+ /*
+ * Last packet's more packet flag is set
+ * based on the lookahead.
+ */
+ if (*n_lk_ahd > 0)
+ set_rxpkt_indication_flag(lk_ahds[0],
+ ep, packet);
+ } else
+ /*
+ * Packets in a bundle automatically have
+ * this flag set.
+ */
+ packet->info.rx.indicat_flags |=
+ HTC_RX_FLAGS_INDICATE_MORE_PKTS;
+
+ htc_update_rx_stats(ep, *n_lk_ahd);
+
+ if (packet->info.rx.rx_flags & HTC_RX_PKT_PART_OF_BUNDLE)
+ ep->ep_st.rx_bundl += 1;
+
+ do_rx_completion(ep, packet);
+ }
+
+ return status;
+}
+
+static int htc_fetch_rxpkts(struct htc_target *target,
+ struct list_head *rx_pktq,
+ struct list_head *comp_pktq)
+{
+ int fetched_pkts;
+ bool part_bundle = false;
+ int status = 0;
+
+ /* now go fetch the list of HTC packets */
+ while (!list_empty(rx_pktq)) {
+ fetched_pkts = 0;
+
+ if (target->rx_bndl_enable && (get_queue_depth(rx_pktq) > 1)) {
+ /*
+ * There are enough packets to attempt a
+ * bundle transfer and recv bundling is
+ * allowed.
+ */
+ status = htc_issue_rxpkt_bundle(target, rx_pktq,
+ comp_pktq,
+ &fetched_pkts,
+ part_bundle);
+ if (status)
+ return status;
+
+ if (!list_empty(rx_pktq))
+ part_bundle = true;
+ }
+
+ if (!fetched_pkts) {
+ struct htc_packet *packet;
+
+ packet = list_first_entry(rx_pktq, struct htc_packet,
+ list);
+
+ list_del(&packet->list);
+
+ /* fully synchronous */
+ packet->completion = NULL;
+
+ if (!list_empty(rx_pktq))
+ /*
+ * look_aheads in all packet
+ * except the last one in the
+ * bundle must be ignored
+ */
+ packet->info.rx.rx_flags |=
+ HTC_RX_PKT_IGNORE_LOOKAHEAD;
+
+ /* go fetch the packet */
+ status = dev_rx_pkt(target, packet, packet->act_len);
+ if (status)
+ return status;
+
+ list_add_tail(&packet->list, comp_pktq);
+ }
+ }
+
+ return status;
+}
+
+static int htc_rxmsg_pending_handler(struct htc_target *target,
+ u32 msg_look_ahead[],
+ int *num_pkts)
+{
+ struct htc_packet *packets, *tmp_pkt;
+ struct htc_endpoint *endpoint;
+ struct list_head rx_pktq, comp_pktq;
+ int status = 0;
+ u32 look_aheads[HTC_HOST_MAX_MSG_PER_BUNDLE];
+ int num_look_ahead = 1;
+ enum htc_endpoint_id id;
+ int n_fetched = 0;
+
+ *num_pkts = 0;
+
+ /*
+ * On first entry copy the look_aheads into our temp array for
+ * processing
+ */
+ memcpy(look_aheads, msg_look_ahead, sizeof(look_aheads));
+
+ while (true) {
+
+ /*
+ * First lookahead sets the expected endpoint IDs for all
+ * packets in a bundle.
+ */
+ id = ((struct htc_frame_hdr *)&look_aheads[0])->eid;
+ endpoint = &target->endpoint[id];
+
+ if (id >= ENDPOINT_MAX) {
+ ath6kl_err("MsgPend, invalid endpoint in look-ahead: %d\n",
+ id);
+ status = -ENOMEM;
+ break;
+ }
+
+ INIT_LIST_HEAD(&rx_pktq);
+ INIT_LIST_HEAD(&comp_pktq);
+
+ /*
+ * Try to allocate as many HTC RX packets indicated by the
+ * look_aheads.
+ */
+ status = alloc_and_prep_rxpkts(target, look_aheads,
+ num_look_ahead, endpoint,
+ &rx_pktq);
+ if (status)
+ break;
+
+ if (get_queue_depth(&rx_pktq) >= 2)
+ /*
+ * A recv bundle was detected, force IRQ status
+ * re-check again
+ */
+ target->dev->chk_irq_status_cnt = 1;
+
+ n_fetched += get_queue_depth(&rx_pktq);
+
+ num_look_ahead = 0;
+
+ status = htc_fetch_rxpkts(target, &rx_pktq, &comp_pktq);
+
+ if (!status)
+ chk_rx_water_mark(endpoint);
+
+ /* Process fetched packets */
+ status = htc_proc_fetched_rxpkts(target, &comp_pktq,
+ look_aheads, &num_look_ahead);
+
+ if (!num_look_ahead || status)
+ break;
+
+ /*
+ * For SYNCH processing, if we get here, we are running
+ * through the loop again due to a detected lookahead. Set
+ * flag that we should re-check IRQ status registers again
+ * before leaving IRQ processing, this can net better
+ * performance in high throughput situations.
+ */
+ target->dev->chk_irq_status_cnt = 1;
+ }
+
+ if (status) {
+ ath6kl_err("failed to get pending recv messages: %d\n",
+ status);
+ /*
+ * Cleanup any packets we allocated but didn't use to
+ * actually fetch any packets.
+ */
+ list_for_each_entry_safe(packets, tmp_pkt, &rx_pktq, list) {
+ list_del(&packets->list);
+ htc_reclaim_rxbuf(target, packets,
+ &target->endpoint[packets->endpoint]);
+ }
+
+ /* cleanup any packets in sync completion queue */
+ list_for_each_entry_safe(packets, tmp_pkt, &comp_pktq, list) {
+ list_del(&packets->list);
+ htc_reclaim_rxbuf(target, packets,
+ &target->endpoint[packets->endpoint]);
+ }
+
+ if (target->htc_flags & HTC_OP_STATE_STOPPING) {
+ ath6kl_warn("host is going to stop blocking receiver for htc_stop\n");
+ ath6kldev_rx_control(target->dev, false);
+ }
+ }
+
+ /*
+ * Before leaving, check to see if host ran out of buffers and
+ * needs to stop the receiver.
+ */
+ if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
+ ath6kl_warn("host has no rx buffers blocking receiver to prevent overrun\n");
+ ath6kldev_rx_control(target->dev, false);
+ }
+ *num_pkts = n_fetched;
+
+ return status;
+}
+
+/*
+ * Synchronously wait for a control message from the target,
+ * This function is used at initialization time ONLY. At init messages
+ * on ENDPOINT 0 are expected.
+ */
+static struct htc_packet *htc_wait_for_ctrl_msg(struct htc_target *target)
+{
+ struct htc_packet *packet = NULL;
+ struct htc_frame_hdr *htc_hdr;
+ u32 look_ahead;
+
+ if (ath6kldev_poll_mboxmsg_rx(target->dev, &look_ahead,
+ HTC_TARGET_RESPONSE_TIMEOUT))
+ return NULL;
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
+ "htc_wait_for_ctrl_msg: look_ahead : 0x%X\n", look_ahead);
+
+ htc_hdr = (struct htc_frame_hdr *)&look_ahead;
+
+ if (htc_hdr->eid != ENDPOINT_0)
+ return NULL;
+
+ packet = htc_get_control_buf(target, false);
+
+ if (!packet)
+ return NULL;
+
+ packet->info.rx.rx_flags = 0;
+ packet->info.rx.exp_hdr = look_ahead;
+ packet->act_len = le16_to_cpu(htc_hdr->payld_len) + HTC_HDR_LENGTH;
+
+ if (packet->act_len > packet->buf_len)
+ goto fail_ctrl_rx;
+
+ /* we want synchronous operation */
+ packet->completion = NULL;
+
+ /* get the message from the device, this will block */
+ if (dev_rx_pkt(target, packet, packet->act_len))
+ goto fail_ctrl_rx;
+
+ /* process receive header */
+ packet->status = htc_proc_rxhdr(target, packet, NULL, NULL);
+
+ if (packet->status) {
+ ath6kl_err("htc_wait_for_ctrl_msg, htc_proc_rxhdr failed (status = %d)\n",
+ packet->status);
+ goto fail_ctrl_rx;
+ }
+
+ return packet;
+
+fail_ctrl_rx:
+ if (packet != NULL) {
+ htc_rxpkt_reset(packet);
+ reclaim_rx_ctrl_buf(target, packet);
+ }
+
+ return NULL;
+}
+
+int htc_add_rxbuf_multiple(struct htc_target *target,
+ struct list_head *pkt_queue)
+{
+ struct htc_endpoint *endpoint;
+ struct htc_packet *first_pkt;
+ bool rx_unblock = false;
+ int status = 0, depth;
+
+ if (list_empty(pkt_queue))
+ return -ENOMEM;
+
+ first_pkt = list_first_entry(pkt_queue, struct htc_packet, list);
+
+ if (first_pkt->endpoint >= ENDPOINT_MAX)
+ return status;
+
+ depth = get_queue_depth(pkt_queue);
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
+ "htc_add_rxbuf_multiple: ep id: %d, cnt:%d, len: %d\n",
+ first_pkt->endpoint, depth, first_pkt->buf_len);
+
+ endpoint = &target->endpoint[first_pkt->endpoint];
+
+ if (target->htc_flags & HTC_OP_STATE_STOPPING) {
+ struct htc_packet *packet, *tmp_pkt;
+
+ /* walk through queue and mark each one canceled */
+ list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) {
+ packet->status = -ECANCELED;
+ list_del(&packet->list);
+ do_rx_completion(endpoint, packet);
+ }
+
+ return status;
+ }
+
+ spin_lock_bh(&target->rx_lock);
+
+ list_splice_tail_init(pkt_queue, &endpoint->rx_bufq);
+
+ /* check if we are blocked waiting for a new buffer */
+ if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
+ if (target->ep_waiting == first_pkt->endpoint) {
+ ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
+ "receiver was blocked on ep:%d, unblocking.\n",
+ target->ep_waiting);
+ target->rx_st_flags &= ~HTC_RECV_WAIT_BUFFERS;
+ target->ep_waiting = ENDPOINT_MAX;
+ rx_unblock = true;
+ }
+ }
+
+ spin_unlock_bh(&target->rx_lock);
+
+ if (rx_unblock && !(target->htc_flags & HTC_OP_STATE_STOPPING))
+ /* TODO : implement a buffer threshold count? */
+ ath6kldev_rx_control(target->dev, true);
+
+ return status;
+}
+
+void htc_flush_rx_buf(struct htc_target *target)
+{
+ struct htc_endpoint *endpoint;
+ struct htc_packet *packet, *tmp_pkt;
+ int i;
+
+ for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
+ endpoint = &target->endpoint[i];
+ if (!endpoint->svc_id)
+ /* not in use.. */
+ continue;
+
+ spin_lock_bh(&target->rx_lock);
+ list_for_each_entry_safe(packet, tmp_pkt,
+ &endpoint->rx_bufq, list) {
+ list_del(&packet->list);
+ spin_unlock_bh(&target->rx_lock);
+ ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
+ "flushing rx pkt:0x%p, len:%d, ep:%d\n",
+ packet, packet->buf_len,
+ packet->endpoint);
+ dev_kfree_skb(packet->pkt_cntxt);
+ spin_lock_bh(&target->rx_lock);
+ }
+ spin_unlock_bh(&target->rx_lock);
+ }
+}
+
+int htc_conn_service(struct htc_target *target,
+ struct htc_service_connect_req *conn_req,
+ struct htc_service_connect_resp *conn_resp)
+{
+ struct htc_packet *rx_pkt = NULL;
+ struct htc_packet *tx_pkt = NULL;
+ struct htc_conn_service_resp *resp_msg;
+ struct htc_conn_service_msg *conn_msg;
+ struct htc_endpoint *endpoint;
+ enum htc_endpoint_id assigned_ep = ENDPOINT_MAX;
+ unsigned int max_msg_sz = 0;
+ int status = 0;
+
+ ath6kl_dbg(ATH6KL_DBG_TRC,
+ "htc_conn_service, target:0x%p service id:0x%X\n",
+ target, conn_req->svc_id);
+
+ if (conn_req->svc_id == HTC_CTRL_RSVD_SVC) {
+ /* special case for pseudo control service */
+ assigned_ep = ENDPOINT_0;
+ max_msg_sz = HTC_MAX_CTRL_MSG_LEN;
+ } else {
+ /* allocate a packet to send to the target */
+ tx_pkt = htc_get_control_buf(target, true);
+
+ if (!tx_pkt)
+ return -ENOMEM;
+
+ conn_msg = (struct htc_conn_service_msg *)tx_pkt->buf;
+ memset(conn_msg, 0, sizeof(*conn_msg));
+ conn_msg->msg_id = cpu_to_le16(HTC_MSG_CONN_SVC_ID);
+ conn_msg->svc_id = cpu_to_le16(conn_req->svc_id);
+ conn_msg->conn_flags = cpu_to_le16(conn_req->conn_flags);
+
+ set_htc_pkt_info(tx_pkt, NULL, (u8 *) conn_msg,
+ sizeof(*conn_msg) + conn_msg->svc_meta_len,
+ ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
+
+ /* we want synchronous operation */
+ tx_pkt->completion = NULL;
+ htc_prep_send_pkt(tx_pkt, 0, 0, 0);
+ status = htc_issue_send(target, tx_pkt);
+
+ if (status)
+ goto fail_tx;
+
+ /* wait for response */
+ rx_pkt = htc_wait_for_ctrl_msg(target);
+
+ if (!rx_pkt) {
+ status = -ENOMEM;
+ goto fail_tx;
+ }
+
+ resp_msg = (struct htc_conn_service_resp *)rx_pkt->buf;
+
+ if ((le16_to_cpu(resp_msg->msg_id) != HTC_MSG_CONN_SVC_RESP_ID)
+ || (rx_pkt->act_len < sizeof(*resp_msg))) {
+ status = -ENOMEM;
+ goto fail_tx;
+ }
+
+ conn_resp->resp_code = resp_msg->status;
+ /* check response status */
+ if (resp_msg->status != HTC_SERVICE_SUCCESS) {
+ ath6kl_err("target failed service 0x%X connect request (status:%d)\n",
+ resp_msg->svc_id, resp_msg->status);
+ status = -ENOMEM;
+ goto fail_tx;
+ }
+
+ assigned_ep = (enum htc_endpoint_id)resp_msg->eid;
+ max_msg_sz = le16_to_cpu(resp_msg->max_msg_sz);
+ }
+
+ if (assigned_ep >= ENDPOINT_MAX || !max_msg_sz) {
+ status = -ENOMEM;
+ goto fail_tx;
+ }
+
+ endpoint = &target->endpoint[assigned_ep];
+ endpoint->eid = assigned_ep;
+ if (endpoint->svc_id) {
+ status = -ENOMEM;
+ goto fail_tx;
+ }
+
+ /* return assigned endpoint to caller */
+ conn_resp->endpoint = assigned_ep;
+ conn_resp->len_max = max_msg_sz;
+
+ /* setup the endpoint */
+
+ /* this marks the endpoint in use */
+ endpoint->svc_id = conn_req->svc_id;
+
+ endpoint->max_txq_depth = conn_req->max_txq_depth;
+ endpoint->len_max = max_msg_sz;
+ endpoint->ep_cb = conn_req->ep_cb;
+ endpoint->cred_dist.svc_id = conn_req->svc_id;
+ endpoint->cred_dist.htc_rsvd = endpoint;
+ endpoint->cred_dist.endpoint = assigned_ep;
+ endpoint->cred_dist.cred_sz = target->tgt_cred_sz;
+
+ if (conn_req->max_rxmsg_sz) {
+ /*
+ * Override cred_per_msg calculation, this optimizes
+ * the credit-low indications since the host will actually
+ * issue smaller messages in the Send path.
+ */
+ if (conn_req->max_rxmsg_sz > max_msg_sz) {
+ status = -ENOMEM;
+ goto fail_tx;
+ }
+ endpoint->cred_dist.cred_per_msg =
+ conn_req->max_rxmsg_sz / target->tgt_cred_sz;
+ } else
+ endpoint->cred_dist.cred_per_msg =
+ max_msg_sz / target->tgt_cred_sz;
+
+ if (!endpoint->cred_dist.cred_per_msg)
+ endpoint->cred_dist.cred_per_msg = 1;
+
+ /* save local connection flags */
+ endpoint->conn_flags = conn_req->flags;
+
+fail_tx:
+ if (tx_pkt)
+ htc_reclaim_txctrl_buf(target, tx_pkt);
+
+ if (rx_pkt) {
+ htc_rxpkt_reset(rx_pkt);
+ reclaim_rx_ctrl_buf(target, rx_pkt);
+ }
+
+ return status;
+}
+
+static void reset_ep_state(struct htc_target *target)
+{
+ struct htc_endpoint *endpoint;
+ int i;
+
+ for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
+ endpoint = &target->endpoint[i];
+ memset(&endpoint->cred_dist, 0, sizeof(endpoint->cred_dist));
+ endpoint->svc_id = 0;
+ endpoint->len_max = 0;
+ endpoint->max_txq_depth = 0;
+ memset(&endpoint->ep_st, 0,
+ sizeof(endpoint->ep_st));
+ INIT_LIST_HEAD(&endpoint->rx_bufq);
+ INIT_LIST_HEAD(&endpoint->txq);
+ endpoint->target = target;
+ }
+
+ /* reset distribution list */
+ INIT_LIST_HEAD(&target->cred_dist_list);
+}
+
+int htc_get_rxbuf_num(struct htc_target *target, enum htc_endpoint_id endpoint)
+{
+ int num;
+
+ spin_lock_bh(&target->rx_lock);
+ num = get_queue_depth(&(target->endpoint[endpoint].rx_bufq));
+ spin_unlock_bh(&target->rx_lock);
+ return num;
+}
+
+static void htc_setup_msg_bndl(struct htc_target *target)
+{
+ struct hif_dev_scat_sup_info *scat_info = &target->dev->hif_scat_info;
+
+ /* limit what HTC can handle */
+ target->msg_per_bndl_max = min(HTC_HOST_MAX_MSG_PER_BUNDLE,
+ target->msg_per_bndl_max);
+
+ if (ath6kldev_setup_msg_bndl(target->dev, target->msg_per_bndl_max)) {
+ target->msg_per_bndl_max = 0;
+ return;
+ }
+
+ /* limit bundle what the device layer can handle */
+ target->msg_per_bndl_max = min(scat_info->max_scat_entries,
+ target->msg_per_bndl_max);
+
+ ath6kl_dbg(ATH6KL_DBG_TRC,
+ "htc bundling allowed. max msg per htc bundle: %d\n",
+ target->msg_per_bndl_max);
+
+ /* Max rx bundle size is limited by the max tx bundle size */
+ target->dev->max_rx_bndl_sz = scat_info->max_xfer_szper_scatreq;
+ /* Max tx bundle size if limited by the extended mbox address range */
+ target->dev->max_tx_bndl_sz = min(HIF_MBOX0_EXT_WIDTH,
+ scat_info->max_xfer_szper_scatreq);
+
+ ath6kl_dbg(ATH6KL_DBG_ANY, "max recv: %d max send: %d\n",
+ target->dev->max_rx_bndl_sz, target->dev->max_tx_bndl_sz);
+
+ if (target->dev->max_tx_bndl_sz)
+ target->tx_bndl_enable = true;
+
+ if (target->dev->max_rx_bndl_sz)
+ target->rx_bndl_enable = true;
+
+ if ((target->tgt_cred_sz % target->dev->block_sz) != 0) {
+ ath6kl_warn("credit size: %d is not block aligned! Disabling send bundling\n",
+ target->tgt_cred_sz);
+
+ /*
+ * Disallow send bundling since the credit size is
+ * not aligned to a block size the I/O block
+ * padding will spill into the next credit buffer
+ * which is fatal.
+ */
+ target->tx_bndl_enable = false;
+ }
+}
+
+int htc_wait_target(struct htc_target *target)
+{
+ struct htc_packet *packet = NULL;
+ struct htc_ready_ext_msg *rdy_msg;
+ struct htc_service_connect_req connect;
+ struct htc_service_connect_resp resp;
+ int status;
+
+ /* we should be getting 1 control message that the target is ready */
+ packet = htc_wait_for_ctrl_msg(target);
+
+ if (!packet)
+ return -ENOMEM;
+
+ /* we controlled the buffer creation so it's properly aligned */
+ rdy_msg = (struct htc_ready_ext_msg *)packet->buf;
+
+ if ((le16_to_cpu(rdy_msg->ver2_0_info.msg_id) != HTC_MSG_READY_ID) ||
+ (packet->act_len < sizeof(struct htc_ready_msg))) {
+ status = -ENOMEM;
+ goto fail_wait_target;
+ }
+
+ if (!rdy_msg->ver2_0_info.cred_cnt || !rdy_msg->ver2_0_info.cred_sz) {
+ status = -ENOMEM;
+ goto fail_wait_target;
+ }
+
+ target->tgt_creds = le16_to_cpu(rdy_msg->ver2_0_info.cred_cnt);
+ target->tgt_cred_sz = le16_to_cpu(rdy_msg->ver2_0_info.cred_sz);
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
+ "target ready: credits: %d credit size: %d\n",
+ target->tgt_creds, target->tgt_cred_sz);
+
+ /* check if this is an extended ready message */
+ if (packet->act_len >= sizeof(struct htc_ready_ext_msg)) {
+ /* this is an extended message */
+ target->htc_tgt_ver = rdy_msg->htc_ver;
+ target->msg_per_bndl_max = rdy_msg->msg_per_htc_bndl;
+ } else {
+ /* legacy */
+ target->htc_tgt_ver = HTC_VERSION_2P0;
+ target->msg_per_bndl_max = 0;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_TRC, "using htc protocol version : %s (%d)\n",
+ (target->htc_tgt_ver == HTC_VERSION_2P0) ? "2.0" : ">= 2.1",
+ target->htc_tgt_ver);
+
+ if (target->msg_per_bndl_max > 0)
+ htc_setup_msg_bndl(target);
+
+ /* setup our pseudo HTC control endpoint connection */
+ memset(&connect, 0, sizeof(connect));
+ memset(&resp, 0, sizeof(resp));
+ connect.ep_cb.rx = htc_ctrl_rx;
+ connect.ep_cb.rx_refill = NULL;
+ connect.ep_cb.tx_full = NULL;
+ connect.max_txq_depth = NUM_CONTROL_BUFFERS;
+ connect.svc_id = HTC_CTRL_RSVD_SVC;
+
+ /* connect fake service */
+ status = htc_conn_service((void *)target, &connect, &resp);
+
+ if (status)
+ ath6kl_hif_cleanup_scatter(target->dev->ar);
+
+fail_wait_target:
+ if (packet) {
+ htc_rxpkt_reset(packet);
+ reclaim_rx_ctrl_buf(target, packet);
+ }
+
+ return status;
+}
+
+/*
+ * Start HTC, enable interrupts and let the target know
+ * host has finished setup.
+ */
+int htc_start(struct htc_target *target)
+{
+ struct htc_packet *packet;
+ int status;
+
+ /* Disable interrupts at the chip level */
+ ath6kldev_disable_intrs(target->dev);
+
+ target->htc_flags = 0;
+ target->rx_st_flags = 0;
+
+ /* Push control receive buffers into htc control endpoint */
+ while ((packet = htc_get_control_buf(target, false)) != NULL) {
+ status = htc_add_rxbuf(target, packet);
+ if (status)
+ return status;
+ }
+
+ /* NOTE: the first entry in the distribution list is ENDPOINT_0 */
+ ath6k_credit_init(target->cred_dist_cntxt, &target->cred_dist_list,
+ target->tgt_creds);
+
+ dump_cred_dist_stats(target);
+
+ /* Indicate to the target of the setup completion */
+ status = htc_setup_tx_complete(target);
+
+ if (status)
+ return status;
+
+ /* unmask interrupts */
+ status = ath6kldev_unmask_intrs(target->dev);
+
+ if (status)
+ htc_stop(target);
+
+ return status;
+}
+
+/* htc_stop: stop interrupt reception, and flush all queued buffers */
+void htc_stop(struct htc_target *target)
+{
+ spin_lock_bh(&target->htc_lock);
+ target->htc_flags |= HTC_OP_STATE_STOPPING;
+ spin_unlock_bh(&target->htc_lock);
+
+ /*
+ * Masking interrupts is a synchronous operation, when this
+ * function returns all pending HIF I/O has completed, we can
+ * safely flush the queues.
+ */
+ ath6kldev_mask_intrs(target->dev);
+
+ htc_flush_txep_all(target);
+
+ htc_flush_rx_buf(target);
+
+ reset_ep_state(target);
+}
+
+void *htc_create(struct ath6kl *ar)
+{
+ struct htc_target *target = NULL;
+ struct htc_packet *packet;
+ int status = 0, i = 0;
+ u32 block_size, ctrl_bufsz;
+
+ target = kzalloc(sizeof(*target), GFP_KERNEL);
+ if (!target) {
+ ath6kl_err("unable to allocate memory\n");
+ return NULL;
+ }
+
+ target->dev = kzalloc(sizeof(*target->dev), GFP_KERNEL);
+ if (!target->dev) {
+ ath6kl_err("unable to allocate memory\n");
+ status = -ENOMEM;
+ goto fail_create_htc;
+ }
+
+ spin_lock_init(&target->htc_lock);
+ spin_lock_init(&target->rx_lock);
+ spin_lock_init(&target->tx_lock);
+
+ INIT_LIST_HEAD(&target->free_ctrl_txbuf);
+ INIT_LIST_HEAD(&target->free_ctrl_rxbuf);
+ INIT_LIST_HEAD(&target->cred_dist_list);
+
+ target->dev->ar = ar;
+ target->dev->htc_cnxt = target;
+ target->dev->msg_pending = htc_rxmsg_pending_handler;
+ target->ep_waiting = ENDPOINT_MAX;
+
+ reset_ep_state(target);
+
+ status = ath6kldev_setup(target->dev);
+
+ if (status)
+ goto fail_create_htc;
+
+ block_size = ar->mbox_info.block_size;
+
+ ctrl_bufsz = (block_size > HTC_MAX_CTRL_MSG_LEN) ?
+ (block_size + HTC_HDR_LENGTH) :
+ (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH);
+
+ for (i = 0; i < NUM_CONTROL_BUFFERS; i++) {
+ packet = kzalloc(sizeof(*packet), GFP_KERNEL);
+ if (!packet)
+ break;
+
+ packet->buf_start = kzalloc(ctrl_bufsz, GFP_KERNEL);
+ if (!packet->buf_start) {
+ kfree(packet);
+ break;
+ }
+
+ packet->buf_len = ctrl_bufsz;
+ if (i < NUM_CONTROL_RX_BUFFERS) {
+ packet->act_len = 0;
+ packet->buf = packet->buf_start;
+ packet->endpoint = ENDPOINT_0;
+ list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
+ } else
+ list_add_tail(&packet->list, &target->free_ctrl_txbuf);
+ }
+
+fail_create_htc:
+ if (i != NUM_CONTROL_BUFFERS || status) {
+ if (target) {
+ htc_cleanup(target);
+ target = NULL;
+ }
+ }
+
+ return target;
+}
+
+/* cleanup the HTC instance */
+void htc_cleanup(struct htc_target *target)
+{
+ struct htc_packet *packet, *tmp_packet;
+
+ ath6kl_hif_cleanup_scatter(target->dev->ar);
+
+ list_for_each_entry_safe(packet, tmp_packet,
+ &target->free_ctrl_txbuf, list) {
+ list_del(&packet->list);
+ kfree(packet->buf_start);
+ kfree(packet);
+ }
+
+ list_for_each_entry_safe(packet, tmp_packet,
+ &target->free_ctrl_rxbuf, list) {
+ list_del(&packet->list);
+ kfree(packet->buf_start);
+ kfree(packet);
+ }
+
+ kfree(target->dev);
+ kfree(target);
+}
diff --git a/drivers/net/wireless/ath/ath6kl/htc.h b/drivers/net/wireless/ath/ath6kl/htc.h
new file mode 100644
index 000000000000..16fa7a84a231
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/htc.h
@@ -0,0 +1,596 @@
+/*
+ * Copyright (c) 2004-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef HTC_H
+#define HTC_H
+
+#include "common.h"
+
+/* frame header flags */
+
+/* send direction */
+#define HTC_FLAGS_NEED_CREDIT_UPDATE (1 << 0)
+#define HTC_FLAGS_SEND_BUNDLE (1 << 1)
+
+/* receive direction */
+#define HTC_FLG_RX_UNUSED (1 << 0)
+#define HTC_FLG_RX_TRAILER (1 << 1)
+/* Bundle count maske and shift */
+#define HTC_FLG_RX_BNDL_CNT (0xF0)
+#define HTC_FLG_RX_BNDL_CNT_S 4
+
+#define HTC_HDR_LENGTH (sizeof(struct htc_frame_hdr))
+#define HTC_MAX_PAYLOAD_LENGTH (4096 - sizeof(struct htc_frame_hdr))
+
+/* HTC control message IDs */
+
+#define HTC_MSG_READY_ID 1
+#define HTC_MSG_CONN_SVC_ID 2
+#define HTC_MSG_CONN_SVC_RESP_ID 3
+#define HTC_MSG_SETUP_COMPLETE_ID 4
+#define HTC_MSG_SETUP_COMPLETE_EX_ID 5
+
+#define HTC_MAX_CTRL_MSG_LEN 256
+
+#define HTC_VERSION_2P0 0x00
+#define HTC_VERSION_2P1 0x01
+
+#define HTC_SERVICE_META_DATA_MAX_LENGTH 128
+
+#define HTC_CONN_FLGS_THRESH_LVL_QUAT 0x0
+#define HTC_CONN_FLGS_THRESH_LVL_HALF 0x1
+#define HTC_CONN_FLGS_THRESH_LVL_THREE_QUAT 0x2
+#define HTC_CONN_FLGS_REDUCE_CRED_DRIB 0x4
+#define HTC_CONN_FLGS_THRESH_MASK 0x3
+
+/* connect response status codes */
+#define HTC_SERVICE_SUCCESS 0
+#define HTC_SERVICE_NOT_FOUND 1
+#define HTC_SERVICE_FAILED 2
+
+/* no resources (i.e. no more endpoints) */
+#define HTC_SERVICE_NO_RESOURCES 3
+
+/* specific service is not allowing any more endpoints */
+#define HTC_SERVICE_NO_MORE_EP 4
+
+/* report record IDs */
+#define HTC_RECORD_NULL 0
+#define HTC_RECORD_CREDITS 1
+#define HTC_RECORD_LOOKAHEAD 2
+#define HTC_RECORD_LOOKAHEAD_BUNDLE 3
+
+#define HTC_SETUP_COMP_FLG_RX_BNDL_EN (1 << 0)
+
+#define MAKE_SERVICE_ID(group, index) \
+ (int)(((int)group << 8) | (int)(index))
+
+/* NOTE: service ID of 0x0000 is reserved and should never be used */
+#define HTC_CTRL_RSVD_SVC MAKE_SERVICE_ID(RSVD_SERVICE_GROUP, 1)
+#define WMI_CONTROL_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 0)
+#define WMI_DATA_BE_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 1)
+#define WMI_DATA_BK_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 2)
+#define WMI_DATA_VI_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 3)
+#define WMI_DATA_VO_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 4)
+#define WMI_MAX_SERVICES 5
+
+/* reserved and used to flush ALL packets */
+#define HTC_TX_PACKET_TAG_ALL 0
+#define HTC_SERVICE_TX_PACKET_TAG 1
+#define HTC_TX_PACKET_TAG_USER_DEFINED (HTC_SERVICE_TX_PACKET_TAG + 9)
+
+/* more packets on this endpoint are being fetched */
+#define HTC_RX_FLAGS_INDICATE_MORE_PKTS (1 << 0)
+
+/* TODO.. for BMI */
+#define ENDPOINT1 0
+/* TODO -remove me, but we have to fix BMI first */
+#define HTC_MAILBOX_NUM_MAX 4
+
+/* enable send bundle padding for this endpoint */
+#define HTC_FLGS_TX_BNDL_PAD_EN (1 << 0)
+#define HTC_EP_ACTIVE ((u32) (1u << 31))
+
+/* HTC operational parameters */
+#define HTC_TARGET_RESPONSE_TIMEOUT 2000 /* in ms */
+#define HTC_TARGET_DEBUG_INTR_MASK 0x01
+#define HTC_TARGET_CREDIT_INTR_MASK 0xF0
+
+#define HTC_HOST_MAX_MSG_PER_BUNDLE 8
+#define HTC_MIN_HTC_MSGS_TO_BUNDLE 2
+
+/* packet flags */
+
+#define HTC_RX_PKT_IGNORE_LOOKAHEAD (1 << 0)
+#define HTC_RX_PKT_REFRESH_HDR (1 << 1)
+#define HTC_RX_PKT_PART_OF_BUNDLE (1 << 2)
+#define HTC_RX_PKT_NO_RECYCLE (1 << 3)
+
+/* scatter request flags */
+
+#define HTC_SCAT_REQ_FLG_PART_BNDL (1 << 0)
+
+#define NUM_CONTROL_BUFFERS 8
+#define NUM_CONTROL_TX_BUFFERS 2
+#define NUM_CONTROL_RX_BUFFERS (NUM_CONTROL_BUFFERS - NUM_CONTROL_TX_BUFFERS)
+
+#define HTC_RECV_WAIT_BUFFERS (1 << 0)
+#define HTC_OP_STATE_STOPPING (1 << 0)
+
+/*
+ * The frame header length and message formats defined herein were selected
+ * to accommodate optimal alignment for target processing. This reduces
+ * code size and improves performance. Any changes to the header length may
+ * alter the alignment and cause exceptions on the target. When adding to
+ * the messagestructures insure that fields are properly aligned.
+ */
+
+/* HTC frame header
+ *
+ * NOTE: do not remove or re-arrange the fields, these are minimally
+ * required to take advantage of 4-byte lookaheads in some hardware
+ * implementations.
+ */
+struct htc_frame_hdr {
+ u8 eid;
+ u8 flags;
+
+ /* length of data (including trailer) that follows the header */
+ __le16 payld_len;
+
+ /* end of 4-byte lookahead */
+
+ u8 ctrl[2];
+} __packed;
+
+/* HTC ready message */
+struct htc_ready_msg {
+ __le16 msg_id;
+ __le16 cred_cnt;
+ __le16 cred_sz;
+ u8 max_ep;
+ u8 pad;
+} __packed;
+
+/* extended HTC ready message */
+struct htc_ready_ext_msg {
+ struct htc_ready_msg ver2_0_info;
+ u8 htc_ver;
+ u8 msg_per_htc_bndl;
+} __packed;
+
+/* connect service */
+struct htc_conn_service_msg {
+ __le16 msg_id;
+ __le16 svc_id;
+ __le16 conn_flags;
+ u8 svc_meta_len;
+ u8 pad;
+} __packed;
+
+/* connect response */
+struct htc_conn_service_resp {
+ __le16 msg_id;
+ __le16 svc_id;
+ u8 status;
+ u8 eid;
+ __le16 max_msg_sz;
+ u8 svc_meta_len;
+ u8 pad;
+} __packed;
+
+struct htc_setup_comp_msg {
+ __le16 msg_id;
+} __packed;
+
+/* extended setup completion message */
+struct htc_setup_comp_ext_msg {
+ __le16 msg_id;
+ __le32 flags;
+ u8 msg_per_rxbndl;
+ u8 Rsvd[3];
+} __packed;
+
+struct htc_record_hdr {
+ u8 rec_id;
+ u8 len;
+} __packed;
+
+struct htc_credit_report {
+ u8 eid;
+ u8 credits;
+} __packed;
+
+/*
+ * NOTE: The lk_ahd array is guarded by a pre_valid
+ * and Post Valid guard bytes. The pre_valid bytes must
+ * equal the inverse of the post_valid byte.
+ */
+struct htc_lookahead_report {
+ u8 pre_valid;
+ u8 lk_ahd[4];
+ u8 post_valid;
+} __packed;
+
+struct htc_bundle_lkahd_rpt {
+ u8 lk_ahd[4];
+} __packed;
+
+/* Current service IDs */
+
+enum htc_service_grp_ids {
+ RSVD_SERVICE_GROUP = 0,
+ WMI_SERVICE_GROUP = 1,
+
+ HTC_TEST_GROUP = 254,
+ HTC_SERVICE_GROUP_LAST = 255
+};
+
+/* ------ endpoint IDS ------ */
+
+enum htc_endpoint_id {
+ ENDPOINT_UNUSED = -1,
+ ENDPOINT_0 = 0,
+ ENDPOINT_1 = 1,
+ ENDPOINT_2 = 2,
+ ENDPOINT_3,
+ ENDPOINT_4,
+ ENDPOINT_5,
+ ENDPOINT_6,
+ ENDPOINT_7,
+ ENDPOINT_8,
+ ENDPOINT_MAX,
+};
+
+struct htc_tx_packet_info {
+ u16 tag;
+ int cred_used;
+ u8 flags;
+ int seqno;
+};
+
+struct htc_rx_packet_info {
+ u32 exp_hdr;
+ u32 rx_flags;
+ u32 indicat_flags;
+};
+
+struct htc_target;
+
+/* wrapper around endpoint-specific packets */
+struct htc_packet {
+ struct list_head list;
+
+ /* caller's per packet specific context */
+ void *pkt_cntxt;
+
+ /*
+ * the true buffer start , the caller can store the real
+ * buffer start here. In receive callbacks, the HTC layer
+ * sets buf to the start of the payload past the header.
+ * This field allows the caller to reset buf when it recycles
+ * receive packets back to HTC.
+ */
+ u8 *buf_start;
+
+ /*
+ * Pointer to the start of the buffer. In the transmit
+ * direction this points to the start of the payload. In the
+ * receive direction, however, the buffer when queued up
+ * points to the start of the HTC header but when returned
+ * to the caller points to the start of the payload
+ */
+ u8 *buf;
+ u32 buf_len;
+
+ /* actual length of payload */
+ u32 act_len;
+
+ /* endpoint that this packet was sent/recv'd from */
+ enum htc_endpoint_id endpoint;
+
+ /* completion status */
+
+ int status;
+ union {
+ struct htc_tx_packet_info tx;
+ struct htc_rx_packet_info rx;
+ } info;
+
+ void (*completion) (struct htc_target *, struct htc_packet *);
+ struct htc_target *context;
+};
+
+enum htc_send_full_action {
+ HTC_SEND_FULL_KEEP = 0,
+ HTC_SEND_FULL_DROP = 1,
+};
+
+struct htc_ep_callbacks {
+ void (*rx) (struct htc_target *, struct htc_packet *);
+ void (*rx_refill) (struct htc_target *, enum htc_endpoint_id endpoint);
+ enum htc_send_full_action (*tx_full) (struct htc_target *,
+ struct htc_packet *);
+ struct htc_packet *(*rx_allocthresh) (struct htc_target *,
+ enum htc_endpoint_id, int);
+ int rx_alloc_thresh;
+ int rx_refill_thresh;
+};
+
+/* service connection information */
+struct htc_service_connect_req {
+ u16 svc_id;
+ u16 conn_flags;
+ struct htc_ep_callbacks ep_cb;
+ int max_txq_depth;
+ u32 flags;
+ unsigned int max_rxmsg_sz;
+};
+
+/* service connection response information */
+struct htc_service_connect_resp {
+ u8 buf_len;
+ u8 act_len;
+ enum htc_endpoint_id endpoint;
+ unsigned int len_max;
+ u8 resp_code;
+};
+
+/* endpoint distributionstructure */
+struct htc_endpoint_credit_dist {
+ struct list_head list;
+
+ /* Service ID (set by HTC) */
+ u16 svc_id;
+
+ /* endpoint for this distributionstruct (set by HTC) */
+ enum htc_endpoint_id endpoint;
+
+ u32 dist_flags;
+
+ /*
+ * credits for normal operation, anything above this
+ * indicates the endpoint is over-subscribed.
+ */
+ int cred_norm;
+
+ /* floor for credit distribution */
+ int cred_min;
+
+ int cred_assngd;
+
+ /* current credits available */
+ int credits;
+
+ /*
+ * pending credits to distribute on this endpoint, this
+ * is set by HTC when credit reports arrive. The credit
+ * distribution functions sets this to zero when it distributes
+ * the credits.
+ */
+ int cred_to_dist;
+
+ /*
+ * the number of credits that the current pending TX packet needs
+ * to transmit. This is set by HTC when endpoint needs credits in
+ * order to transmit.
+ */
+ int seek_cred;
+
+ /* size in bytes of each credit */
+ int cred_sz;
+
+ /* credits required for a maximum sized messages */
+ int cred_per_msg;
+
+ /* reserved for HTC use */
+ void *htc_rsvd;
+
+ /*
+ * current depth of TX queue , i.e. messages waiting for credits
+ * This field is valid only when HTC_CREDIT_DIST_ACTIVITY_CHANGE
+ * or HTC_CREDIT_DIST_SEND_COMPLETE is indicated on an endpoint
+ * that has non-zero credits to recover.
+ */
+ int txq_depth;
+};
+
+/*
+ * credit distibution code that is passed into the distrbution function,
+ * there are mandatory and optional codes that must be handled
+ */
+enum htc_credit_dist_reason {
+ HTC_CREDIT_DIST_SEND_COMPLETE = 0,
+ HTC_CREDIT_DIST_ACTIVITY_CHANGE = 1,
+ HTC_CREDIT_DIST_SEEK_CREDITS,
+};
+
+struct htc_credit_state_info {
+ int total_avail_credits;
+ int cur_free_credits;
+ struct list_head lowestpri_ep_dist;
+};
+
+/* endpoint statistics */
+struct htc_endpoint_stats {
+ /*
+ * number of times the host set the credit-low flag in a send
+ * message on this endpoint
+ */
+ u32 cred_low_indicate;
+
+ u32 tx_issued;
+ u32 tx_pkt_bundled;
+ u32 tx_bundles;
+ u32 tx_dropped;
+
+ /* running count of total credit reports received for this endpoint */
+ u32 tx_cred_rpt;
+
+ /* credit reports received from this endpoint's RX packets */
+ u32 cred_rpt_from_rx;
+
+ /* credit reports received from RX packets of other endpoints */
+ u32 cred_rpt_from_other;
+
+ /* credit reports received from endpoint 0 RX packets */
+ u32 cred_rpt_ep0;
+
+ /* count of credits received via Rx packets on this endpoint */
+ u32 cred_from_rx;
+
+ /* count of credits received via another endpoint */
+ u32 cred_from_other;
+
+ /* count of credits received via another endpoint */
+ u32 cred_from_ep0;
+
+ /* count of consummed credits */
+ u32 cred_cosumd;
+
+ /* count of credits returned */
+ u32 cred_retnd;
+
+ u32 rx_pkts;
+
+ /* count of lookahead records found in Rx msg */
+ u32 rx_lkahds;
+
+ /* count of recv packets received in a bundle */
+ u32 rx_bundl;
+
+ /* count of number of bundled lookaheads */
+ u32 rx_bundle_lkahd;
+
+ /* count of the number of bundle indications from the HTC header */
+ u32 rx_bundle_from_hdr;
+
+ /* the number of times the recv allocation threshold was hit */
+ u32 rx_alloc_thresh_hit;
+
+ /* total number of bytes */
+ u32 rxalloc_thresh_byte;
+};
+
+struct htc_endpoint {
+ enum htc_endpoint_id eid;
+ u16 svc_id;
+ struct list_head txq;
+ struct list_head rx_bufq;
+ struct htc_endpoint_credit_dist cred_dist;
+ struct htc_ep_callbacks ep_cb;
+ int max_txq_depth;
+ int len_max;
+ int tx_proc_cnt;
+ int rx_proc_cnt;
+ struct htc_target *target;
+ u8 seqno;
+ u32 conn_flags;
+ struct htc_endpoint_stats ep_st;
+};
+
+struct htc_control_buffer {
+ struct htc_packet packet;
+ u8 *buf;
+};
+
+struct ath6kl_device;
+
+/* our HTC target state */
+struct htc_target {
+ struct htc_endpoint endpoint[ENDPOINT_MAX];
+ struct list_head cred_dist_list;
+ struct list_head free_ctrl_txbuf;
+ struct list_head free_ctrl_rxbuf;
+ struct htc_credit_state_info *cred_dist_cntxt;
+ int tgt_creds;
+ unsigned int tgt_cred_sz;
+ spinlock_t htc_lock;
+ spinlock_t rx_lock;
+ spinlock_t tx_lock;
+ struct ath6kl_device *dev;
+ u32 htc_flags;
+ u32 rx_st_flags;
+ enum htc_endpoint_id ep_waiting;
+ u8 htc_tgt_ver;
+
+ /* max messages per bundle for HTC */
+ int msg_per_bndl_max;
+
+ bool tx_bndl_enable;
+ int rx_bndl_enable;
+};
+
+void *htc_create(struct ath6kl *ar);
+void htc_set_credit_dist(struct htc_target *target,
+ struct htc_credit_state_info *cred_info,
+ u16 svc_pri_order[], int len);
+int htc_wait_target(struct htc_target *target);
+int htc_start(struct htc_target *target);
+int htc_conn_service(struct htc_target *target,
+ struct htc_service_connect_req *req,
+ struct htc_service_connect_resp *resp);
+int htc_tx(struct htc_target *target, struct htc_packet *packet);
+void htc_stop(struct htc_target *target);
+void htc_cleanup(struct htc_target *target);
+void htc_flush_txep(struct htc_target *target,
+ enum htc_endpoint_id endpoint, u16 tag);
+void htc_flush_rx_buf(struct htc_target *target);
+void htc_indicate_activity_change(struct htc_target *target,
+ enum htc_endpoint_id endpoint, bool active);
+int htc_get_rxbuf_num(struct htc_target *target, enum htc_endpoint_id endpoint);
+int htc_add_rxbuf_multiple(struct htc_target *target, struct list_head *pktq);
+
+static inline void set_htc_pkt_info(struct htc_packet *packet, void *context,
+ u8 *buf, unsigned int len,
+ enum htc_endpoint_id eid, u16 tag)
+{
+ packet->pkt_cntxt = context;
+ packet->buf = buf;
+ packet->act_len = len;
+ packet->endpoint = eid;
+ packet->info.tx.tag = tag;
+}
+
+static inline void htc_rxpkt_reset(struct htc_packet *packet)
+{
+ packet->buf = packet->buf_start;
+ packet->act_len = 0;
+}
+
+static inline void set_htc_rxpkt_info(struct htc_packet *packet, void *context,
+ u8 *buf, unsigned long len,
+ enum htc_endpoint_id eid)
+{
+ packet->pkt_cntxt = context;
+ packet->buf = buf;
+ packet->buf_start = buf;
+ packet->buf_len = len;
+ packet->endpoint = eid;
+}
+
+static inline int get_queue_depth(struct list_head *queue)
+{
+ struct list_head *tmp_list;
+ int depth = 0;
+
+ list_for_each(tmp_list, queue)
+ depth++;
+
+ return depth;
+}
+
+#endif
diff --git a/drivers/net/wireless/ath/ath6kl/htc_hif.c b/drivers/net/wireless/ath/ath6kl/htc_hif.c
new file mode 100644
index 000000000000..1bcaaec579c5
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/htc_hif.c
@@ -0,0 +1,811 @@
+/*
+ * Copyright (c) 2007-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+#include "target.h"
+#include "hif-ops.h"
+#include "htc_hif.h"
+#include "debug.h"
+
+#define MAILBOX_FOR_BLOCK_SIZE 1
+
+#define ATH6KL_TIME_QUANTUM 10 /* in ms */
+
+static void ath6kl_add_io_pkt(struct ath6kl_device *dev,
+ struct htc_packet *packet)
+{
+ spin_lock_bh(&dev->lock);
+ list_add_tail(&packet->list, &dev->reg_io);
+ spin_unlock_bh(&dev->lock);
+}
+
+static struct htc_packet *ath6kl_get_io_pkt(struct ath6kl_device *dev)
+{
+ struct htc_packet *packet = NULL;
+
+ spin_lock_bh(&dev->lock);
+ if (!list_empty(&dev->reg_io)) {
+ packet = list_first_entry(&dev->reg_io,
+ struct htc_packet, list);
+ list_del(&packet->list);
+ }
+ spin_unlock_bh(&dev->lock);
+
+ return packet;
+}
+
+static int ath6kldev_cp_scat_dma_buf(struct hif_scatter_req *req, bool from_dma)
+{
+ u8 *buf;
+ int i;
+
+ buf = req->virt_dma_buf;
+
+ for (i = 0; i < req->scat_entries; i++) {
+
+ if (from_dma)
+ memcpy(req->scat_list[i].buf, buf,
+ req->scat_list[i].len);
+ else
+ memcpy(buf, req->scat_list[i].buf,
+ req->scat_list[i].len);
+
+ buf += req->scat_list[i].len;
+ }
+
+ return 0;
+}
+
+int ath6kldev_rw_comp_handler(void *context, int status)
+{
+ struct htc_packet *packet = context;
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
+ "ath6kldev_rw_comp_handler (pkt:0x%p , status: %d\n",
+ packet, status);
+
+ packet->status = status;
+ packet->completion(packet->context, packet);
+
+ return 0;
+}
+
+static int ath6kldev_proc_dbg_intr(struct ath6kl_device *dev)
+{
+ u32 dummy;
+ int status;
+
+ ath6kl_err("target debug interrupt\n");
+
+ ath6kl_target_failure(dev->ar);
+
+ /*
+ * read counter to clear the interrupt, the debug error interrupt is
+ * counter 0.
+ */
+ status = hif_read_write_sync(dev->ar, COUNT_DEC_ADDRESS,
+ (u8 *)&dummy, 4, HIF_RD_SYNC_BYTE_INC);
+ if (status)
+ WARN_ON(1);
+
+ return status;
+}
+
+/* mailbox recv message polling */
+int ath6kldev_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd,
+ int timeout)
+{
+ struct ath6kl_irq_proc_registers *rg;
+ int status = 0, i;
+ u8 htc_mbox = 1 << HTC_MAILBOX;
+
+ for (i = timeout / ATH6KL_TIME_QUANTUM; i > 0; i--) {
+ /* this is the standard HIF way, load the reg table */
+ status = hif_read_write_sync(dev->ar, HOST_INT_STATUS_ADDRESS,
+ (u8 *) &dev->irq_proc_reg,
+ sizeof(dev->irq_proc_reg),
+ HIF_RD_SYNC_BYTE_INC);
+
+ if (status) {
+ ath6kl_err("failed to read reg table\n");
+ return status;
+ }
+
+ /* check for MBOX data and valid lookahead */
+ if (dev->irq_proc_reg.host_int_status & htc_mbox) {
+ if (dev->irq_proc_reg.rx_lkahd_valid &
+ htc_mbox) {
+ /*
+ * Mailbox has a message and the look ahead
+ * is valid.
+ */
+ rg = &dev->irq_proc_reg;
+ *lk_ahd =
+ le32_to_cpu(rg->rx_lkahd[HTC_MAILBOX]);
+ break;
+ }
+ }
+
+ /* delay a little */
+ mdelay(ATH6KL_TIME_QUANTUM);
+ ath6kl_dbg(ATH6KL_DBG_HTC_RECV, "retry mbox poll : %d\n", i);
+ }
+
+ if (i == 0) {
+ ath6kl_err("timeout waiting for recv message\n");
+ status = -ETIME;
+ /* check if the target asserted */
+ if (dev->irq_proc_reg.counter_int_status &
+ ATH6KL_TARGET_DEBUG_INTR_MASK)
+ /*
+ * Target failure handler will be called in case of
+ * an assert.
+ */
+ ath6kldev_proc_dbg_intr(dev);
+ }
+
+ return status;
+}
+
+/*
+ * Disable packet reception (used in case the host runs out of buffers)
+ * using the interrupt enable registers through the host I/F
+ */
+int ath6kldev_rx_control(struct ath6kl_device *dev, bool enable_rx)
+{
+ struct ath6kl_irq_enable_reg regs;
+ int status = 0;
+
+ /* take the lock to protect interrupt enable shadows */
+ spin_lock_bh(&dev->lock);
+
+ if (enable_rx)
+ dev->irq_en_reg.int_status_en |=
+ SM(INT_STATUS_ENABLE_MBOX_DATA, 0x01);
+ else
+ dev->irq_en_reg.int_status_en &=
+ ~SM(INT_STATUS_ENABLE_MBOX_DATA, 0x01);
+
+ memcpy(&regs, &dev->irq_en_reg, sizeof(regs));
+
+ spin_unlock_bh(&dev->lock);
+
+ status = hif_read_write_sync(dev->ar, INT_STATUS_ENABLE_ADDRESS,
+ &regs.int_status_en,
+ sizeof(struct ath6kl_irq_enable_reg),
+ HIF_WR_SYNC_BYTE_INC);
+
+ return status;
+}
+
+static void ath6kldev_rw_async_handler(struct htc_target *target,
+ struct htc_packet *packet)
+{
+ struct ath6kl_device *dev = target->dev;
+ struct hif_scatter_req *req = packet->pkt_cntxt;
+
+ req->status = packet->status;
+
+ ath6kl_add_io_pkt(dev, packet);
+
+ req->complete(req);
+}
+
+static int ath6kldev_rw_scatter(struct ath6kl *ar, struct hif_scatter_req *req)
+{
+ struct ath6kl_device *dev = ar->htc_target->dev;
+ struct htc_packet *packet = NULL;
+ int status = 0;
+ u32 request = req->req;
+ u8 *virt_dma_buf;
+
+ if (!req->len)
+ return 0;
+
+ if (request & HIF_ASYNCHRONOUS) {
+ /* use an I/O packet to carry this request */
+ packet = ath6kl_get_io_pkt(dev);
+ if (!packet) {
+ status = -ENOMEM;
+ goto out;
+ }
+
+ packet->pkt_cntxt = req;
+ packet->completion = ath6kldev_rw_async_handler;
+ packet->context = ar->htc_target;
+ }
+
+ virt_dma_buf = req->virt_dma_buf;
+
+ if (request & HIF_ASYNCHRONOUS)
+ status = hif_write_async(dev->ar, req->addr, virt_dma_buf,
+ req->len, request, packet);
+ else
+ status = hif_read_write_sync(dev->ar, req->addr, virt_dma_buf,
+ req->len, request);
+
+out:
+ if (status)
+ if (request & HIF_ASYNCHRONOUS) {
+ if (packet != NULL)
+ ath6kl_add_io_pkt(dev, packet);
+ req->status = status;
+ req->complete(req);
+ status = 0;
+ }
+
+ return status;
+}
+
+int ath6kldev_submit_scat_req(struct ath6kl_device *dev,
+ struct hif_scatter_req *scat_req, bool read)
+{
+ int status = 0;
+
+ if (read) {
+ scat_req->req = HIF_RD_SYNC_BLOCK_FIX;
+ scat_req->addr = dev->ar->mbox_info.htc_addr;
+ } else {
+ scat_req->req = HIF_WR_ASYNC_BLOCK_INC;
+
+ scat_req->addr =
+ (scat_req->len > HIF_MBOX_WIDTH) ?
+ dev->ar->mbox_info.htc_ext_addr :
+ dev->ar->mbox_info.htc_addr;
+ }
+
+ ath6kl_dbg((ATH6KL_DBG_HTC_RECV | ATH6KL_DBG_HTC_SEND),
+ "ath6kldev_submit_scat_req, entries: %d, total len: %d mbox:0x%X (mode: %s : %s)\n",
+ scat_req->scat_entries, scat_req->len,
+ scat_req->addr, !read ? "async" : "sync",
+ (read) ? "rd" : "wr");
+
+ if (!read && dev->virt_scat)
+ status = ath6kldev_cp_scat_dma_buf(scat_req, false);
+
+ if (status) {
+ if (!read) {
+ scat_req->status = status;
+ scat_req->complete(scat_req);
+ return 0;
+ }
+ return status;
+ }
+
+ status = dev->hif_scat_info.rw_scat_func(dev->ar, scat_req);
+
+ if (read) {
+ /* in sync mode, we can touch the scatter request */
+ scat_req->status = status;
+ if (!status && dev->virt_scat)
+ scat_req->status =
+ ath6kldev_cp_scat_dma_buf(scat_req, true);
+ }
+
+ return status;
+}
+
+/*
+ * function to set up virtual scatter support if HIF
+ * layer has not implemented the interface.
+ */
+static int ath6kldev_setup_virt_scat_sup(struct ath6kl_device *dev)
+{
+ struct hif_scatter_req *scat_req;
+ int buf_sz, scat_req_sz, scat_list_sz;
+ int i, status = 0;
+ u8 *virt_dma_buf;
+
+ buf_sz = 2 * L1_CACHE_BYTES + ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER;
+
+ scat_list_sz = (ATH6KL_SCATTER_ENTRIES_PER_REQ - 1) *
+ sizeof(struct hif_scatter_item);
+ scat_req_sz = sizeof(*scat_req) + scat_list_sz;
+
+ for (i = 0; i < ATH6KL_SCATTER_REQS; i++) {
+ scat_req = kzalloc(scat_req_sz, GFP_KERNEL);
+
+ if (!scat_req) {
+ status = -ENOMEM;
+ break;
+ }
+
+ virt_dma_buf = kzalloc(buf_sz, GFP_KERNEL);
+ if (!virt_dma_buf) {
+ kfree(scat_req);
+ status = -ENOMEM;
+ break;
+ }
+
+ scat_req->virt_dma_buf =
+ (u8 *)L1_CACHE_ALIGN((unsigned long)virt_dma_buf);
+
+ /* we emulate a DMA bounce interface */
+ hif_scatter_req_add(dev->ar, scat_req);
+ }
+
+ if (status)
+ ath6kl_hif_cleanup_scatter(dev->ar);
+ else {
+ dev->hif_scat_info.rw_scat_func = ath6kldev_rw_scatter;
+ dev->hif_scat_info.max_scat_entries =
+ ATH6KL_SCATTER_ENTRIES_PER_REQ;
+ dev->hif_scat_info.max_xfer_szper_scatreq =
+ ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER;
+ dev->virt_scat = true;
+ }
+
+ return status;
+}
+
+int ath6kldev_setup_msg_bndl(struct ath6kl_device *dev, int max_msg_per_trans)
+{
+ int status;
+
+ status = ath6kl_hif_enable_scatter(dev->ar, &dev->hif_scat_info);
+
+ if (status) {
+ ath6kl_warn("hif does not support scatter requests (%d)\n",
+ status);
+
+ /* we can try to use a virtual DMA scatter mechanism */
+ status = ath6kldev_setup_virt_scat_sup(dev);
+ }
+
+ if (!status)
+ ath6kl_dbg(ATH6KL_DBG_ANY, "max scatter items:%d: maxlen:%d\n",
+ dev->hif_scat_info.max_scat_entries,
+ dev->hif_scat_info.max_xfer_szper_scatreq);
+
+ return status;
+}
+
+static int ath6kldev_proc_counter_intr(struct ath6kl_device *dev)
+{
+ u8 counter_int_status;
+
+ ath6kl_dbg(ATH6KL_DBG_IRQ, "counter interrupt\n");
+
+ counter_int_status = dev->irq_proc_reg.counter_int_status &
+ dev->irq_en_reg.cntr_int_status_en;
+
+ ath6kl_dbg(ATH6KL_DBG_IRQ,
+ "valid interrupt source(s) in COUNTER_INT_STATUS: 0x%x\n",
+ counter_int_status);
+
+ /*
+ * NOTE: other modules like GMBOX may use the counter interrupt for
+ * credit flow control on other counters, we only need to check for
+ * the debug assertion counter interrupt.
+ */
+ if (counter_int_status & ATH6KL_TARGET_DEBUG_INTR_MASK)
+ return ath6kldev_proc_dbg_intr(dev);
+
+ return 0;
+}
+
+static int ath6kldev_proc_err_intr(struct ath6kl_device *dev)
+{
+ int status;
+ u8 error_int_status;
+ u8 reg_buf[4];
+
+ ath6kl_dbg(ATH6KL_DBG_IRQ, "error interrupt\n");
+
+ error_int_status = dev->irq_proc_reg.error_int_status & 0x0F;
+ if (!error_int_status) {
+ WARN_ON(1);
+ return -EIO;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_IRQ,
+ "valid interrupt source(s) in ERROR_INT_STATUS: 0x%x\n",
+ error_int_status);
+
+ if (MS(ERROR_INT_STATUS_WAKEUP, error_int_status))
+ ath6kl_dbg(ATH6KL_DBG_IRQ, "error : wakeup\n");
+
+ if (MS(ERROR_INT_STATUS_RX_UNDERFLOW, error_int_status))
+ ath6kl_err("rx underflow\n");
+
+ if (MS(ERROR_INT_STATUS_TX_OVERFLOW, error_int_status))
+ ath6kl_err("tx overflow\n");
+
+ /* Clear the interrupt */
+ dev->irq_proc_reg.error_int_status &= ~error_int_status;
+
+ /* set W1C value to clear the interrupt, this hits the register first */
+ reg_buf[0] = error_int_status;
+ reg_buf[1] = 0;
+ reg_buf[2] = 0;
+ reg_buf[3] = 0;
+
+ status = hif_read_write_sync(dev->ar, ERROR_INT_STATUS_ADDRESS,
+ reg_buf, 4, HIF_WR_SYNC_BYTE_FIX);
+
+ if (status)
+ WARN_ON(1);
+
+ return status;
+}
+
+static int ath6kldev_proc_cpu_intr(struct ath6kl_device *dev)
+{
+ int status;
+ u8 cpu_int_status;
+ u8 reg_buf[4];
+
+ ath6kl_dbg(ATH6KL_DBG_IRQ, "cpu interrupt\n");
+
+ cpu_int_status = dev->irq_proc_reg.cpu_int_status &
+ dev->irq_en_reg.cpu_int_status_en;
+ if (!cpu_int_status) {
+ WARN_ON(1);
+ return -EIO;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_IRQ,
+ "valid interrupt source(s) in CPU_INT_STATUS: 0x%x\n",
+ cpu_int_status);
+
+ /* Clear the interrupt */
+ dev->irq_proc_reg.cpu_int_status &= ~cpu_int_status;
+
+ /*
+ * Set up the register transfer buffer to hit the register 4 times ,
+ * this is done to make the access 4-byte aligned to mitigate issues
+ * with host bus interconnects that restrict bus transfer lengths to
+ * be a multiple of 4-bytes.
+ */
+
+ /* set W1C value to clear the interrupt, this hits the register first */
+ reg_buf[0] = cpu_int_status;
+ /* the remaining are set to zero which have no-effect */
+ reg_buf[1] = 0;
+ reg_buf[2] = 0;
+ reg_buf[3] = 0;
+
+ status = hif_read_write_sync(dev->ar, CPU_INT_STATUS_ADDRESS,
+ reg_buf, 4, HIF_WR_SYNC_BYTE_FIX);
+
+ if (status)
+ WARN_ON(1);
+
+ return status;
+}
+
+/* process pending interrupts synchronously */
+static int proc_pending_irqs(struct ath6kl_device *dev, bool *done)
+{
+ struct ath6kl_irq_proc_registers *rg;
+ int status = 0;
+ u8 host_int_status = 0;
+ u32 lk_ahd = 0;
+ u8 htc_mbox = 1 << HTC_MAILBOX;
+
+ ath6kl_dbg(ATH6KL_DBG_IRQ, "proc_pending_irqs: (dev: 0x%p)\n", dev);
+
+ /*
+ * NOTE: HIF implementation guarantees that the context of this
+ * call allows us to perform SYNCHRONOUS I/O, that is we can block,
+ * sleep or call any API that can block or switch thread/task
+ * contexts. This is a fully schedulable context.
+ */
+
+ /*
+ * Process pending intr only when int_status_en is clear, it may
+ * result in unnecessary bus transaction otherwise. Target may be
+ * unresponsive at the time.
+ */
+ if (dev->irq_en_reg.int_status_en) {
+ /*
+ * Read the first 28 bytes of the HTC register table. This
+ * will yield us the value of different int status
+ * registers and the lookahead registers.
+ *
+ * length = sizeof(int_status) + sizeof(cpu_int_status)
+ * + sizeof(error_int_status) +
+ * sizeof(counter_int_status) +
+ * sizeof(mbox_frame) + sizeof(rx_lkahd_valid)
+ * + sizeof(hole) + sizeof(rx_lkahd) +
+ * sizeof(int_status_en) +
+ * sizeof(cpu_int_status_en) +
+ * sizeof(err_int_status_en) +
+ * sizeof(cntr_int_status_en);
+ */
+ status = hif_read_write_sync(dev->ar, HOST_INT_STATUS_ADDRESS,
+ (u8 *) &dev->irq_proc_reg,
+ sizeof(dev->irq_proc_reg),
+ HIF_RD_SYNC_BYTE_INC);
+ if (status)
+ goto out;
+
+ if (AR_DBG_LVL_CHECK(ATH6KL_DBG_IRQ))
+ ath6kl_dump_registers(dev, &dev->irq_proc_reg,
+ &dev->irq_en_reg);
+
+ /* Update only those registers that are enabled */
+ host_int_status = dev->irq_proc_reg.host_int_status &
+ dev->irq_en_reg.int_status_en;
+
+ /* Look at mbox status */
+ if (host_int_status & htc_mbox) {
+ /*
+ * Mask out pending mbox value, we use "lookAhead as
+ * the real flag for mbox processing.
+ */
+ host_int_status &= ~htc_mbox;
+ if (dev->irq_proc_reg.rx_lkahd_valid &
+ htc_mbox) {
+ rg = &dev->irq_proc_reg;
+ lk_ahd = le32_to_cpu(rg->rx_lkahd[HTC_MAILBOX]);
+ if (!lk_ahd)
+ ath6kl_err("lookAhead is zero!\n");
+ }
+ }
+ }
+
+ if (!host_int_status && !lk_ahd) {
+ *done = true;
+ goto out;
+ }
+
+ if (lk_ahd) {
+ int fetched = 0;
+
+ ath6kl_dbg(ATH6KL_DBG_IRQ,
+ "pending mailbox msg, lk_ahd: 0x%X\n", lk_ahd);
+ /*
+ * Mailbox Interrupt, the HTC layer may issue async
+ * requests to empty the mailbox. When emptying the recv
+ * mailbox we use the async handler above called from the
+ * completion routine of the callers read request. This can
+ * improve performance by reducing context switching when
+ * we rapidly pull packets.
+ */
+ status = dev->msg_pending(dev->htc_cnxt, &lk_ahd, &fetched);
+ if (status)
+ goto out;
+
+ if (!fetched)
+ /*
+ * HTC could not pull any messages out due to lack
+ * of resources.
+ */
+ dev->chk_irq_status_cnt = 0;
+ }
+
+ /* now handle the rest of them */
+ ath6kl_dbg(ATH6KL_DBG_IRQ,
+ "valid interrupt source(s) for other interrupts: 0x%x\n",
+ host_int_status);
+
+ if (MS(HOST_INT_STATUS_CPU, host_int_status)) {
+ /* CPU Interrupt */
+ status = ath6kldev_proc_cpu_intr(dev);
+ if (status)
+ goto out;
+ }
+
+ if (MS(HOST_INT_STATUS_ERROR, host_int_status)) {
+ /* Error Interrupt */
+ status = ath6kldev_proc_err_intr(dev);
+ if (status)
+ goto out;
+ }
+
+ if (MS(HOST_INT_STATUS_COUNTER, host_int_status))
+ /* Counter Interrupt */
+ status = ath6kldev_proc_counter_intr(dev);
+
+out:
+ /*
+ * An optimization to bypass reading the IRQ status registers
+ * unecessarily which can re-wake the target, if upper layers
+ * determine that we are in a low-throughput mode, we can rely on
+ * taking another interrupt rather than re-checking the status
+ * registers which can re-wake the target.
+ *
+ * NOTE : for host interfaces that makes use of detecting pending
+ * mbox messages at hif can not use this optimization due to
+ * possible side effects, SPI requires the host to drain all
+ * messages from the mailbox before exiting the ISR routine.
+ */
+
+ ath6kl_dbg(ATH6KL_DBG_IRQ,
+ "bypassing irq status re-check, forcing done\n");
+
+ *done = true;
+
+ ath6kl_dbg(ATH6KL_DBG_IRQ,
+ "proc_pending_irqs: (done:%d, status=%d\n", *done, status);
+
+ return status;
+}
+
+/* interrupt handler, kicks off all interrupt processing */
+int ath6kldev_intr_bh_handler(struct ath6kl *ar)
+{
+ struct ath6kl_device *dev = ar->htc_target->dev;
+ int status = 0;
+ bool done = false;
+
+ /*
+ * Reset counter used to flag a re-scan of IRQ status registers on
+ * the target.
+ */
+ dev->chk_irq_status_cnt = 0;
+
+ /*
+ * IRQ processing is synchronous, interrupt status registers can be
+ * re-read.
+ */
+ while (!done) {
+ status = proc_pending_irqs(dev, &done);
+ if (status)
+ break;
+ }
+
+ return status;
+}
+
+static int ath6kldev_enable_intrs(struct ath6kl_device *dev)
+{
+ struct ath6kl_irq_enable_reg regs;
+ int status;
+
+ spin_lock_bh(&dev->lock);
+
+ /* Enable all but ATH6KL CPU interrupts */
+ dev->irq_en_reg.int_status_en =
+ SM(INT_STATUS_ENABLE_ERROR, 0x01) |
+ SM(INT_STATUS_ENABLE_CPU, 0x01) |
+ SM(INT_STATUS_ENABLE_COUNTER, 0x01);
+
+ /*
+ * NOTE: There are some cases where HIF can do detection of
+ * pending mbox messages which is disabled now.
+ */
+ dev->irq_en_reg.int_status_en |= SM(INT_STATUS_ENABLE_MBOX_DATA, 0x01);
+
+ /* Set up the CPU Interrupt status Register */
+ dev->irq_en_reg.cpu_int_status_en = 0;
+
+ /* Set up the Error Interrupt status Register */
+ dev->irq_en_reg.err_int_status_en =
+ SM(ERROR_STATUS_ENABLE_RX_UNDERFLOW, 0x01) |
+ SM(ERROR_STATUS_ENABLE_TX_OVERFLOW, 0x1);
+
+ /*
+ * Enable Counter interrupt status register to get fatal errors for
+ * debugging.
+ */
+ dev->irq_en_reg.cntr_int_status_en = SM(COUNTER_INT_STATUS_ENABLE_BIT,
+ ATH6KL_TARGET_DEBUG_INTR_MASK);
+ memcpy(&regs, &dev->irq_en_reg, sizeof(regs));
+
+ spin_unlock_bh(&dev->lock);
+
+ status = hif_read_write_sync(dev->ar, INT_STATUS_ENABLE_ADDRESS,
+ &regs.int_status_en, sizeof(regs),
+ HIF_WR_SYNC_BYTE_INC);
+
+ if (status)
+ ath6kl_err("failed to update interrupt ctl reg err: %d\n",
+ status);
+
+ return status;
+}
+
+int ath6kldev_disable_intrs(struct ath6kl_device *dev)
+{
+ struct ath6kl_irq_enable_reg regs;
+
+ spin_lock_bh(&dev->lock);
+ /* Disable all interrupts */
+ dev->irq_en_reg.int_status_en = 0;
+ dev->irq_en_reg.cpu_int_status_en = 0;
+ dev->irq_en_reg.err_int_status_en = 0;
+ dev->irq_en_reg.cntr_int_status_en = 0;
+ memcpy(&regs, &dev->irq_en_reg, sizeof(regs));
+ spin_unlock_bh(&dev->lock);
+
+ return hif_read_write_sync(dev->ar, INT_STATUS_ENABLE_ADDRESS,
+ &regs.int_status_en, sizeof(regs),
+ HIF_WR_SYNC_BYTE_INC);
+}
+
+/* enable device interrupts */
+int ath6kldev_unmask_intrs(struct ath6kl_device *dev)
+{
+ int status = 0;
+
+ /*
+ * Make sure interrupt are disabled before unmasking at the HIF
+ * layer. The rationale here is that between device insertion
+ * (where we clear the interrupts the first time) and when HTC
+ * is finally ready to handle interrupts, other software can perform
+ * target "soft" resets. The ATH6KL interrupt enables reset back to an
+ * "enabled" state when this happens.
+ */
+ ath6kldev_disable_intrs(dev);
+
+ /* unmask the host controller interrupts */
+ ath6kl_hif_irq_enable(dev->ar);
+ status = ath6kldev_enable_intrs(dev);
+
+ return status;
+}
+
+/* disable all device interrupts */
+int ath6kldev_mask_intrs(struct ath6kl_device *dev)
+{
+ /*
+ * Mask the interrupt at the HIF layer to avoid any stray interrupt
+ * taken while we zero out our shadow registers in
+ * ath6kldev_disable_intrs().
+ */
+ ath6kl_hif_irq_disable(dev->ar);
+
+ return ath6kldev_disable_intrs(dev);
+}
+
+int ath6kldev_setup(struct ath6kl_device *dev)
+{
+ int status = 0;
+ int i;
+ struct htc_packet *packet;
+
+ /* initialize our free list of IO packets */
+ INIT_LIST_HEAD(&dev->reg_io);
+ spin_lock_init(&dev->lock);
+
+ /* carve up register I/O packets (these are for ASYNC register I/O ) */
+ for (i = 0; i < ATH6KL_MAX_REG_IO_BUFFERS; i++) {
+ packet = &dev->reg_io_buf[i].packet;
+ set_htc_rxpkt_info(packet, dev, dev->reg_io_buf[i].buf,
+ ATH6KL_REG_IO_BUFFER_SIZE, 0);
+ ath6kl_add_io_pkt(dev, packet);
+ }
+
+ /*
+ * NOTE: we actually get the block size of a mailbox other than 0,
+ * for SDIO the block size on mailbox 0 is artificially set to 1.
+ * So we use the block size that is set for the other 3 mailboxes.
+ */
+ dev->block_sz = dev->ar->mbox_info.block_size;
+
+ /* must be a power of 2 */
+ if ((dev->block_sz & (dev->block_sz - 1)) != 0) {
+ WARN_ON(1);
+ goto fail_setup;
+ }
+
+ /* assemble mask, used for padding to a block */
+ dev->block_mask = dev->block_sz - 1;
+
+ ath6kl_dbg(ATH6KL_DBG_TRC, "block size: %d, mbox addr:0x%X\n",
+ dev->block_sz, dev->ar->mbox_info.htc_addr);
+
+ ath6kl_dbg(ATH6KL_DBG_TRC,
+ "hif interrupt processing is sync only\n");
+
+ status = ath6kldev_disable_intrs(dev);
+
+fail_setup:
+ return status;
+
+}
diff --git a/drivers/net/wireless/ath/ath6kl/htc_hif.h b/drivers/net/wireless/ath/ath6kl/htc_hif.h
new file mode 100644
index 000000000000..d770d4ec612e
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/htc_hif.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2007-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef HTC_HIF_H
+#define HTC_HIF_H
+
+#include "htc.h"
+#include "hif.h"
+
+#define ATH6KL_MAILBOXES 4
+
+/* HTC runs over mailbox 0 */
+#define HTC_MAILBOX 0
+
+#define ATH6KL_TARGET_DEBUG_INTR_MASK 0x01
+
+#define OTHER_INTS_ENABLED (INT_STATUS_ENABLE_ERROR_MASK | \
+ INT_STATUS_ENABLE_CPU_MASK | \
+ INT_STATUS_ENABLE_COUNTER_MASK)
+
+#define ATH6KL_REG_IO_BUFFER_SIZE 32
+#define ATH6KL_MAX_REG_IO_BUFFERS 8
+#define ATH6KL_SCATTER_ENTRIES_PER_REQ 16
+#define ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER (16 * 1024)
+#define ATH6KL_SCATTER_REQS 4
+
+#ifndef A_CACHE_LINE_PAD
+#define A_CACHE_LINE_PAD 128
+#endif
+#define ATH6KL_MIN_SCATTER_ENTRIES_PER_REQ 2
+#define ATH6KL_MIN_TRANSFER_SIZE_PER_SCATTER (4 * 1024)
+
+struct ath6kl_irq_proc_registers {
+ u8 host_int_status;
+ u8 cpu_int_status;
+ u8 error_int_status;
+ u8 counter_int_status;
+ u8 mbox_frame;
+ u8 rx_lkahd_valid;
+ u8 host_int_status2;
+ u8 gmbox_rx_avail;
+ __le32 rx_lkahd[2];
+ __le32 rx_gmbox_lkahd_alias[2];
+} __packed;
+
+struct ath6kl_irq_enable_reg {
+ u8 int_status_en;
+ u8 cpu_int_status_en;
+ u8 err_int_status_en;
+ u8 cntr_int_status_en;
+} __packed;
+
+/* buffers for ASYNC I/O */
+struct ath6kl_async_reg_io_buffer {
+ struct htc_packet packet;
+ u8 pad1[A_CACHE_LINE_PAD];
+ /* cache-line safe with pads around */
+ u8 buf[ATH6KL_REG_IO_BUFFER_SIZE];
+ u8 pad2[A_CACHE_LINE_PAD];
+};
+
+struct ath6kl_device {
+ spinlock_t lock;
+ u8 pad1[A_CACHE_LINE_PAD];
+ struct ath6kl_irq_proc_registers irq_proc_reg;
+ u8 pad2[A_CACHE_LINE_PAD];
+ struct ath6kl_irq_enable_reg irq_en_reg;
+ u8 pad3[A_CACHE_LINE_PAD];
+ u32 block_sz;
+ u32 block_mask;
+ struct htc_target *htc_cnxt;
+ struct list_head reg_io;
+ struct ath6kl_async_reg_io_buffer reg_io_buf[ATH6KL_MAX_REG_IO_BUFFERS];
+ int (*msg_pending) (struct htc_target *target, u32 lk_ahds[],
+ int *npkts_fetched);
+ struct hif_dev_scat_sup_info hif_scat_info;
+ bool virt_scat;
+ int max_rx_bndl_sz;
+ int max_tx_bndl_sz;
+ int chk_irq_status_cnt;
+ struct ath6kl *ar;
+};
+
+int ath6kldev_setup(struct ath6kl_device *dev);
+int ath6kldev_unmask_intrs(struct ath6kl_device *dev);
+int ath6kldev_mask_intrs(struct ath6kl_device *dev);
+int ath6kldev_poll_mboxmsg_rx(struct ath6kl_device *dev,
+ u32 *lk_ahd, int timeout);
+int ath6kldev_rx_control(struct ath6kl_device *dev, bool enable_rx);
+int ath6kldev_disable_intrs(struct ath6kl_device *dev);
+
+int ath6kldev_rw_comp_handler(void *context, int status);
+int ath6kldev_intr_bh_handler(struct ath6kl *ar);
+
+/* Scatter Function and Definitions */
+int ath6kldev_setup_msg_bndl(struct ath6kl_device *dev, int max_msg_per_xfer);
+int ath6kldev_submit_scat_req(struct ath6kl_device *dev,
+ struct hif_scatter_req *scat_req, bool read);
+
+#endif /*ATH6KL_H_ */
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
new file mode 100644
index 000000000000..fe61871e9874
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -0,0 +1,1293 @@
+
+/*
+ * Copyright (c) 2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/mmc/sdio_func.h>
+#include "core.h"
+#include "cfg80211.h"
+#include "target.h"
+#include "debug.h"
+#include "hif-ops.h"
+
+unsigned int debug_mask;
+
+module_param(debug_mask, uint, 0644);
+
+/*
+ * Include definitions here that can be used to tune the WLAN module
+ * behavior. Different customers can tune the behavior as per their needs,
+ * here.
+ */
+
+/*
+ * This configuration item enable/disable keepalive support.
+ * Keepalive support: In the absence of any data traffic to AP, null
+ * frames will be sent to the AP at periodic interval, to keep the association
+ * active. This configuration item defines the periodic interval.
+ * Use value of zero to disable keepalive support
+ * Default: 60 seconds
+ */
+#define WLAN_CONFIG_KEEP_ALIVE_INTERVAL 60
+
+/*
+ * This configuration item sets the value of disconnect timeout
+ * Firmware delays sending the disconnec event to the host for this
+ * timeout after is gets disconnected from the current AP.
+ * If the firmware successly roams within the disconnect timeout
+ * it sends a new connect event
+ */
+#define WLAN_CONFIG_DISCONNECT_TIMEOUT 10
+
+#define CONFIG_AR600x_DEBUG_UART_TX_PIN 8
+
+enum addr_type {
+ DATASET_PATCH_ADDR,
+ APP_LOAD_ADDR,
+ APP_START_OVERRIDE_ADDR,
+};
+
+#define ATH6KL_DATA_OFFSET 64
+struct sk_buff *ath6kl_buf_alloc(int size)
+{
+ struct sk_buff *skb;
+ u16 reserved;
+
+ /* Add chacheline space at front and back of buffer */
+ reserved = (2 * L1_CACHE_BYTES) + ATH6KL_DATA_OFFSET +
+ sizeof(struct htc_packet);
+ skb = dev_alloc_skb(size + reserved);
+
+ if (skb)
+ skb_reserve(skb, reserved - L1_CACHE_BYTES);
+ return skb;
+}
+
+void ath6kl_init_profile_info(struct ath6kl *ar)
+{
+ ar->ssid_len = 0;
+ memset(ar->ssid, 0, sizeof(ar->ssid));
+
+ ar->dot11_auth_mode = OPEN_AUTH;
+ ar->auth_mode = NONE_AUTH;
+ ar->prwise_crypto = NONE_CRYPT;
+ ar->prwise_crypto_len = 0;
+ ar->grp_crypto = NONE_CRYPT;
+ ar->grp_crpto_len = 0;
+ memset(ar->wep_key_list, 0, sizeof(ar->wep_key_list));
+ memset(ar->req_bssid, 0, sizeof(ar->req_bssid));
+ memset(ar->bssid, 0, sizeof(ar->bssid));
+ ar->bss_ch = 0;
+ ar->nw_type = ar->next_mode = INFRA_NETWORK;
+}
+
+static u8 ath6kl_get_fw_iftype(struct ath6kl *ar)
+{
+ switch (ar->nw_type) {
+ case INFRA_NETWORK:
+ return HI_OPTION_FW_MODE_BSS_STA;
+ case ADHOC_NETWORK:
+ return HI_OPTION_FW_MODE_IBSS;
+ case AP_NETWORK:
+ return HI_OPTION_FW_MODE_AP;
+ default:
+ ath6kl_err("Unsupported interface type :%d\n", ar->nw_type);
+ return 0xff;
+ }
+}
+
+static inline u32 ath6kl_get_hi_item_addr(struct ath6kl *ar,
+ u32 item_offset)
+{
+ u32 addr = 0;
+
+ if (ar->target_type == TARGET_TYPE_AR6003)
+ addr = ATH6KL_HI_START_ADDR + item_offset;
+
+ return addr;
+}
+
+static int ath6kl_set_host_app_area(struct ath6kl *ar)
+{
+ u32 address, data;
+ struct host_app_area host_app_area;
+
+ /* Fetch the address of the host_app_area_s
+ * instance in the host interest area */
+ address = ath6kl_get_hi_item_addr(ar, HI_ITEM(hi_app_host_interest));
+ address = TARG_VTOP(address);
+
+ if (ath6kl_read_reg_diag(ar, &address, &data))
+ return -EIO;
+
+ address = TARG_VTOP(data);
+ host_app_area.wmi_protocol_ver = WMI_PROTOCOL_VERSION;
+ if (ath6kl_access_datadiag(ar, address,
+ (u8 *)&host_app_area,
+ sizeof(struct host_app_area), false))
+ return -EIO;
+
+ return 0;
+}
+
+static inline void set_ac2_ep_map(struct ath6kl *ar,
+ u8 ac,
+ enum htc_endpoint_id ep)
+{
+ ar->ac2ep_map[ac] = ep;
+ ar->ep2ac_map[ep] = ac;
+}
+
+/* connect to a service */
+static int ath6kl_connectservice(struct ath6kl *ar,
+ struct htc_service_connect_req *con_req,
+ char *desc)
+{
+ int status;
+ struct htc_service_connect_resp response;
+
+ memset(&response, 0, sizeof(response));
+
+ status = htc_conn_service(ar->htc_target, con_req, &response);
+ if (status) {
+ ath6kl_err("failed to connect to %s service status:%d\n",
+ desc, status);
+ return status;
+ }
+
+ switch (con_req->svc_id) {
+ case WMI_CONTROL_SVC:
+ if (test_bit(WMI_ENABLED, &ar->flag))
+ ath6kl_wmi_set_control_ep(ar->wmi, response.endpoint);
+ ar->ctrl_ep = response.endpoint;
+ break;
+ case WMI_DATA_BE_SVC:
+ set_ac2_ep_map(ar, WMM_AC_BE, response.endpoint);
+ break;
+ case WMI_DATA_BK_SVC:
+ set_ac2_ep_map(ar, WMM_AC_BK, response.endpoint);
+ break;
+ case WMI_DATA_VI_SVC:
+ set_ac2_ep_map(ar, WMM_AC_VI, response.endpoint);
+ break;
+ case WMI_DATA_VO_SVC:
+ set_ac2_ep_map(ar, WMM_AC_VO, response.endpoint);
+ break;
+ default:
+ ath6kl_err("service id is not mapped %d\n", con_req->svc_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ath6kl_init_service_ep(struct ath6kl *ar)
+{
+ struct htc_service_connect_req connect;
+
+ memset(&connect, 0, sizeof(connect));
+
+ /* these fields are the same for all service endpoints */
+ connect.ep_cb.rx = ath6kl_rx;
+ connect.ep_cb.rx_refill = ath6kl_rx_refill;
+ connect.ep_cb.tx_full = ath6kl_tx_queue_full;
+
+ /*
+ * Set the max queue depth so that our ath6kl_tx_queue_full handler
+ * gets called.
+ */
+ connect.max_txq_depth = MAX_DEFAULT_SEND_QUEUE_DEPTH;
+ connect.ep_cb.rx_refill_thresh = ATH6KL_MAX_RX_BUFFERS / 4;
+ if (!connect.ep_cb.rx_refill_thresh)
+ connect.ep_cb.rx_refill_thresh++;
+
+ /* connect to control service */
+ connect.svc_id = WMI_CONTROL_SVC;
+ if (ath6kl_connectservice(ar, &connect, "WMI CONTROL"))
+ return -EIO;
+
+ connect.flags |= HTC_FLGS_TX_BNDL_PAD_EN;
+
+ /*
+ * Limit the HTC message size on the send path, although e can
+ * receive A-MSDU frames of 4K, we will only send ethernet-sized
+ * (802.3) frames on the send path.
+ */
+ connect.max_rxmsg_sz = WMI_MAX_TX_DATA_FRAME_LENGTH;
+
+ /*
+ * To reduce the amount of committed memory for larger A_MSDU
+ * frames, use the recv-alloc threshold mechanism for larger
+ * packets.
+ */
+ connect.ep_cb.rx_alloc_thresh = ATH6KL_BUFFER_SIZE;
+ connect.ep_cb.rx_allocthresh = ath6kl_alloc_amsdu_rxbuf;
+
+ /*
+ * For the remaining data services set the connection flag to
+ * reduce dribbling, if configured to do so.
+ */
+ connect.conn_flags |= HTC_CONN_FLGS_REDUCE_CRED_DRIB;
+ connect.conn_flags &= ~HTC_CONN_FLGS_THRESH_MASK;
+ connect.conn_flags |= HTC_CONN_FLGS_THRESH_LVL_HALF;
+
+ connect.svc_id = WMI_DATA_BE_SVC;
+
+ if (ath6kl_connectservice(ar, &connect, "WMI DATA BE"))
+ return -EIO;
+
+ /* connect to back-ground map this to WMI LOW_PRI */
+ connect.svc_id = WMI_DATA_BK_SVC;
+ if (ath6kl_connectservice(ar, &connect, "WMI DATA BK"))
+ return -EIO;
+
+ /* connect to Video service, map this to to HI PRI */
+ connect.svc_id = WMI_DATA_VI_SVC;
+ if (ath6kl_connectservice(ar, &connect, "WMI DATA VI"))
+ return -EIO;
+
+ /*
+ * Connect to VO service, this is currently not mapped to a WMI
+ * priority stream due to historical reasons. WMI originally
+ * defined 3 priorities over 3 mailboxes We can change this when
+ * WMI is reworked so that priorities are not dependent on
+ * mailboxes.
+ */
+ connect.svc_id = WMI_DATA_VO_SVC;
+ if (ath6kl_connectservice(ar, &connect, "WMI DATA VO"))
+ return -EIO;
+
+ return 0;
+}
+
+static void ath6kl_init_control_info(struct ath6kl *ar)
+{
+ u8 ctr;
+
+ clear_bit(WMI_ENABLED, &ar->flag);
+ ath6kl_init_profile_info(ar);
+ ar->def_txkey_index = 0;
+ memset(ar->wep_key_list, 0, sizeof(ar->wep_key_list));
+ ar->ch_hint = 0;
+ ar->listen_intvl_t = A_DEFAULT_LISTEN_INTERVAL;
+ ar->listen_intvl_b = 0;
+ ar->tx_pwr = 0;
+ clear_bit(SKIP_SCAN, &ar->flag);
+ set_bit(WMM_ENABLED, &ar->flag);
+ ar->intra_bss = 1;
+ memset(&ar->sc_params, 0, sizeof(ar->sc_params));
+ ar->sc_params.short_scan_ratio = WMI_SHORTSCANRATIO_DEFAULT;
+ ar->sc_params.scan_ctrl_flags = DEFAULT_SCAN_CTRL_FLAGS;
+
+ memset((u8 *)ar->sta_list, 0,
+ AP_MAX_NUM_STA * sizeof(struct ath6kl_sta));
+
+ spin_lock_init(&ar->mcastpsq_lock);
+
+ /* Init the PS queues */
+ for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
+ spin_lock_init(&ar->sta_list[ctr].psq_lock);
+ skb_queue_head_init(&ar->sta_list[ctr].psq);
+ }
+
+ skb_queue_head_init(&ar->mcastpsq);
+
+ memcpy(ar->ap_country_code, DEF_AP_COUNTRY_CODE, 3);
+}
+
+/*
+ * Set HTC/Mbox operational parameters, this can only be called when the
+ * target is in the BMI phase.
+ */
+static int ath6kl_set_htc_params(struct ath6kl *ar, u32 mbox_isr_yield_val,
+ u8 htc_ctrl_buf)
+{
+ int status;
+ u32 blk_size;
+
+ blk_size = ar->mbox_info.block_size;
+
+ if (htc_ctrl_buf)
+ blk_size |= ((u32)htc_ctrl_buf) << 16;
+
+ /* set the host interest area for the block size */
+ status = ath6kl_bmi_write(ar,
+ ath6kl_get_hi_item_addr(ar,
+ HI_ITEM(hi_mbox_io_block_sz)),
+ (u8 *)&blk_size,
+ 4);
+ if (status) {
+ ath6kl_err("bmi_write_memory for IO block size failed\n");
+ goto out;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_TRC, "block size set: %d (target addr:0x%X)\n",
+ blk_size,
+ ath6kl_get_hi_item_addr(ar, HI_ITEM(hi_mbox_io_block_sz)));
+
+ if (mbox_isr_yield_val) {
+ /* set the host interest area for the mbox ISR yield limit */
+ status = ath6kl_bmi_write(ar,
+ ath6kl_get_hi_item_addr(ar,
+ HI_ITEM(hi_mbox_isr_yield_limit)),
+ (u8 *)&mbox_isr_yield_val,
+ 4);
+ if (status) {
+ ath6kl_err("bmi_write_memory for yield limit failed\n");
+ goto out;
+ }
+ }
+
+out:
+ return status;
+}
+
+#define REG_DUMP_COUNT_AR6003 60
+#define REGISTER_DUMP_LEN_MAX 60
+
+static void ath6kl_dump_target_assert_info(struct ath6kl *ar)
+{
+ u32 address;
+ u32 regdump_loc = 0;
+ int status;
+ u32 regdump_val[REGISTER_DUMP_LEN_MAX];
+ u32 i;
+
+ if (ar->target_type != TARGET_TYPE_AR6003)
+ return;
+
+ /* the reg dump pointer is copied to the host interest area */
+ address = ath6kl_get_hi_item_addr(ar, HI_ITEM(hi_failure_state));
+ address = TARG_VTOP(address);
+
+ /* read RAM location through diagnostic window */
+ status = ath6kl_read_reg_diag(ar, &address, &regdump_loc);
+
+ if (status || !regdump_loc) {
+ ath6kl_err("failed to get ptr to register dump area\n");
+ return;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_TRC, "location of register dump data: 0x%X\n",
+ regdump_loc);
+
+ regdump_loc = TARG_VTOP(regdump_loc);
+
+ /* fetch register dump data */
+ status = ath6kl_access_datadiag(ar,
+ regdump_loc,
+ (u8 *)&regdump_val[0],
+ REG_DUMP_COUNT_AR6003 * (sizeof(u32)),
+ true);
+
+ if (status) {
+ ath6kl_err("failed to get register dump\n");
+ return;
+ }
+ ath6kl_dbg(ATH6KL_DBG_TRC, "Register Dump:\n");
+
+ for (i = 0; i < REG_DUMP_COUNT_AR6003; i++)
+ ath6kl_dbg(ATH6KL_DBG_TRC, " %d : 0x%8.8X\n",
+ i, regdump_val[i]);
+
+}
+
+void ath6kl_target_failure(struct ath6kl *ar)
+{
+ ath6kl_err("target asserted\n");
+
+ /* try dumping target assertion information (if any) */
+ ath6kl_dump_target_assert_info(ar);
+
+}
+
+static int ath6kl_target_config_wlan_params(struct ath6kl *ar)
+{
+ int status = 0;
+
+ /*
+ * Configure the device for rx dot11 header rules. "0,0" are the
+ * default values. Required if checksum offload is needed. Set
+ * RxMetaVersion to 2.
+ */
+ if (ath6kl_wmi_set_rx_frame_format_cmd(ar->wmi,
+ ar->rx_meta_ver, 0, 0)) {
+ ath6kl_err("unable to set the rx frame format\n");
+ status = -EIO;
+ }
+
+ if (ar->conf_flags & ATH6KL_CONF_IGNORE_PS_FAIL_EVT_IN_SCAN)
+ if ((ath6kl_wmi_pmparams_cmd(ar->wmi, 0, 1, 0, 0, 1,
+ IGNORE_POWER_SAVE_FAIL_EVENT_DURING_SCAN)) != 0) {
+ ath6kl_err("unable to set power save fail event policy\n");
+ status = -EIO;
+ }
+
+ if (!(ar->conf_flags & ATH6KL_CONF_IGNORE_ERP_BARKER))
+ if ((ath6kl_wmi_set_lpreamble_cmd(ar->wmi, 0,
+ WMI_DONOT_IGNORE_BARKER_IN_ERP)) != 0) {
+ ath6kl_err("unable to set barker preamble policy\n");
+ status = -EIO;
+ }
+
+ if (ath6kl_wmi_set_keepalive_cmd(ar->wmi,
+ WLAN_CONFIG_KEEP_ALIVE_INTERVAL)) {
+ ath6kl_err("unable to set keep alive interval\n");
+ status = -EIO;
+ }
+
+ if (ath6kl_wmi_disctimeout_cmd(ar->wmi,
+ WLAN_CONFIG_DISCONNECT_TIMEOUT)) {
+ ath6kl_err("unable to set disconnect timeout\n");
+ status = -EIO;
+ }
+
+ if (!(ar->conf_flags & ATH6KL_CONF_ENABLE_TX_BURST))
+ if (ath6kl_wmi_set_wmm_txop(ar->wmi, WMI_TXOP_DISABLED)) {
+ ath6kl_err("unable to set txop bursting\n");
+ status = -EIO;
+ }
+
+ return status;
+}
+
+int ath6kl_configure_target(struct ath6kl *ar)
+{
+ u32 param, ram_reserved_size;
+ u8 fw_iftype;
+
+ fw_iftype = ath6kl_get_fw_iftype(ar);
+ if (fw_iftype == 0xff)
+ return -EINVAL;
+
+ /* Tell target which HTC version it is used*/
+ param = HTC_PROTOCOL_VERSION;
+ if (ath6kl_bmi_write(ar,
+ ath6kl_get_hi_item_addr(ar,
+ HI_ITEM(hi_app_host_interest)),
+ (u8 *)&param, 4) != 0) {
+ ath6kl_err("bmi_write_memory for htc version failed\n");
+ return -EIO;
+ }
+
+ /* set the firmware mode to STA/IBSS/AP */
+ param = 0;
+
+ if (ath6kl_bmi_read(ar,
+ ath6kl_get_hi_item_addr(ar,
+ HI_ITEM(hi_option_flag)),
+ (u8 *)&param, 4) != 0) {
+ ath6kl_err("bmi_read_memory for setting fwmode failed\n");
+ return -EIO;
+ }
+
+ param |= (1 << HI_OPTION_NUM_DEV_SHIFT);
+ param |= (fw_iftype << HI_OPTION_FW_MODE_SHIFT);
+ param |= (0 << HI_OPTION_MAC_ADDR_METHOD_SHIFT);
+ param |= (0 << HI_OPTION_FW_BRIDGE_SHIFT);
+
+ if (ath6kl_bmi_write(ar,
+ ath6kl_get_hi_item_addr(ar,
+ HI_ITEM(hi_option_flag)),
+ (u8 *)&param,
+ 4) != 0) {
+ ath6kl_err("bmi_write_memory for setting fwmode failed\n");
+ return -EIO;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_TRC, "firmware mode set\n");
+
+ /*
+ * Hardcode the address use for the extended board data
+ * Ideally this should be pre-allocate by the OS at boot time
+ * But since it is a new feature and board data is loaded
+ * at init time, we have to workaround this from host.
+ * It is difficult to patch the firmware boot code,
+ * but possible in theory.
+ */
+
+ if (ar->target_type == TARGET_TYPE_AR6003) {
+ if (ar->version.target_ver == AR6003_REV2_VERSION) {
+ param = AR6003_REV2_BOARD_EXT_DATA_ADDRESS;
+ ram_reserved_size = AR6003_REV2_RAM_RESERVE_SIZE;
+ } else {
+ param = AR6003_REV3_BOARD_EXT_DATA_ADDRESS;
+ ram_reserved_size = AR6003_REV3_RAM_RESERVE_SIZE;
+ }
+
+ if (ath6kl_bmi_write(ar,
+ ath6kl_get_hi_item_addr(ar,
+ HI_ITEM(hi_board_ext_data)),
+ (u8 *)&param, 4) != 0) {
+ ath6kl_err("bmi_write_memory for hi_board_ext_data failed\n");
+ return -EIO;
+ }
+ if (ath6kl_bmi_write(ar,
+ ath6kl_get_hi_item_addr(ar,
+ HI_ITEM(hi_end_ram_reserve_sz)),
+ (u8 *)&ram_reserved_size, 4) != 0) {
+ ath6kl_err("bmi_write_memory for hi_end_ram_reserve_sz failed\n");
+ return -EIO;
+ }
+ }
+
+ /* set the block size for the target */
+ if (ath6kl_set_htc_params(ar, MBOX_YIELD_LIMIT, 0))
+ /* use default number of control buffers */
+ return -EIO;
+
+ return 0;
+}
+
+struct ath6kl *ath6kl_core_alloc(struct device *sdev)
+{
+ struct net_device *dev;
+ struct ath6kl *ar;
+ struct wireless_dev *wdev;
+
+ wdev = ath6kl_cfg80211_init(sdev);
+ if (!wdev) {
+ ath6kl_err("ath6kl_cfg80211_init failed\n");
+ return NULL;
+ }
+
+ ar = wdev_priv(wdev);
+ ar->dev = sdev;
+ ar->wdev = wdev;
+ wdev->iftype = NL80211_IFTYPE_STATION;
+
+ dev = alloc_netdev(0, "wlan%d", ether_setup);
+ if (!dev) {
+ ath6kl_err("no memory for network device instance\n");
+ ath6kl_cfg80211_deinit(ar);
+ return NULL;
+ }
+
+ dev->ieee80211_ptr = wdev;
+ SET_NETDEV_DEV(dev, wiphy_dev(wdev->wiphy));
+ wdev->netdev = dev;
+ ar->sme_state = SME_DISCONNECTED;
+ ar->auto_auth_stage = AUTH_IDLE;
+
+ init_netdev(dev);
+
+ ar->net_dev = dev;
+ ar->wlan_state = WLAN_ENABLED;
+
+ ar->wlan_pwr_state = WLAN_POWER_STATE_ON;
+
+ spin_lock_init(&ar->lock);
+
+ ath6kl_init_control_info(ar);
+ init_waitqueue_head(&ar->event_wq);
+ sema_init(&ar->sem, 1);
+ clear_bit(DESTROY_IN_PROGRESS, &ar->flag);
+
+ INIT_LIST_HEAD(&ar->amsdu_rx_buffer_queue);
+
+ setup_timer(&ar->disconnect_timer, disconnect_timer_handler,
+ (unsigned long) dev);
+
+ return ar;
+}
+
+int ath6kl_unavail_ev(struct ath6kl *ar)
+{
+ ath6kl_destroy(ar->net_dev, 1);
+
+ return 0;
+}
+
+/* firmware upload */
+static u32 ath6kl_get_load_address(u32 target_ver, enum addr_type type)
+{
+ WARN_ON(target_ver != AR6003_REV2_VERSION &&
+ target_ver != AR6003_REV3_VERSION);
+
+ switch (type) {
+ case DATASET_PATCH_ADDR:
+ return (target_ver == AR6003_REV2_VERSION) ?
+ AR6003_REV2_DATASET_PATCH_ADDRESS :
+ AR6003_REV3_DATASET_PATCH_ADDRESS;
+ case APP_LOAD_ADDR:
+ return (target_ver == AR6003_REV2_VERSION) ?
+ AR6003_REV2_APP_LOAD_ADDRESS :
+ 0x1234;
+ case APP_START_OVERRIDE_ADDR:
+ return (target_ver == AR6003_REV2_VERSION) ?
+ AR6003_REV2_APP_START_OVERRIDE :
+ AR6003_REV3_APP_START_OVERRIDE;
+ default:
+ return 0;
+ }
+}
+
+static int ath6kl_get_fw(struct ath6kl *ar, const char *filename,
+ u8 **fw, size_t *fw_len)
+{
+ const struct firmware *fw_entry;
+ int ret;
+
+ ret = request_firmware(&fw_entry, filename, ar->dev);
+ if (ret)
+ return ret;
+
+ *fw_len = fw_entry->size;
+ *fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
+
+ if (*fw == NULL)
+ ret = -ENOMEM;
+
+ release_firmware(fw_entry);
+
+ return ret;
+}
+
+static int ath6kl_fetch_board_file(struct ath6kl *ar)
+{
+ const char *filename;
+ int ret;
+
+ switch (ar->version.target_ver) {
+ case AR6003_REV2_VERSION:
+ filename = AR6003_REV2_BOARD_DATA_FILE;
+ break;
+ default:
+ filename = AR6003_REV3_BOARD_DATA_FILE;
+ break;
+ }
+
+ ret = ath6kl_get_fw(ar, filename, &ar->fw_board,
+ &ar->fw_board_len);
+ if (ret == 0) {
+ /* managed to get proper board file */
+ return 0;
+ }
+
+ /* there was no proper board file, try to use default instead */
+ ath6kl_warn("Failed to get board file %s (%d), trying to find default board file.\n",
+ filename, ret);
+
+ switch (ar->version.target_ver) {
+ case AR6003_REV2_VERSION:
+ filename = AR6003_REV2_DEFAULT_BOARD_DATA_FILE;
+ break;
+ default:
+ filename = AR6003_REV3_DEFAULT_BOARD_DATA_FILE;
+ break;
+ }
+
+ ret = ath6kl_get_fw(ar, filename, &ar->fw_board,
+ &ar->fw_board_len);
+ if (ret) {
+ ath6kl_err("Failed to get default board file %s: %d\n",
+ filename, ret);
+ return ret;
+ }
+
+ ath6kl_warn("WARNING! No proper board file was not found, instead using a default board file.\n");
+ ath6kl_warn("Most likely your hardware won't work as specified. Install correct board file!\n");
+
+ return 0;
+}
+
+
+static int ath6kl_upload_board_file(struct ath6kl *ar)
+{
+ u32 board_address, board_ext_address, param;
+ int ret;
+
+ if (ar->fw_board == NULL) {
+ ret = ath6kl_fetch_board_file(ar);
+ if (ret)
+ return ret;
+ }
+
+ /* Determine where in Target RAM to write Board Data */
+ ath6kl_bmi_read(ar,
+ ath6kl_get_hi_item_addr(ar,
+ HI_ITEM(hi_board_data)),
+ (u8 *) &board_address, 4);
+ ath6kl_dbg(ATH6KL_DBG_TRC, "board data download addr: 0x%x\n",
+ board_address);
+
+ /* determine where in target ram to write extended board data */
+ ath6kl_bmi_read(ar,
+ ath6kl_get_hi_item_addr(ar,
+ HI_ITEM(hi_board_ext_data)),
+ (u8 *) &board_ext_address, 4);
+
+ ath6kl_dbg(ATH6KL_DBG_TRC, "board file download addr: 0x%x\n",
+ board_ext_address);
+
+ if (board_ext_address == 0) {
+ ath6kl_err("Failed to get board file target address.\n");
+ return -EINVAL;
+ }
+
+ if (ar->fw_board_len == (AR6003_BOARD_DATA_SZ +
+ AR6003_BOARD_EXT_DATA_SZ)) {
+ /* write extended board data */
+ ret = ath6kl_bmi_write(ar, board_ext_address,
+ ar->fw_board + AR6003_BOARD_DATA_SZ,
+ AR6003_BOARD_EXT_DATA_SZ);
+
+ if (ret) {
+ ath6kl_err("Failed to write extended board data: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* record that extended board data is initialized */
+ param = (AR6003_BOARD_EXT_DATA_SZ << 16) | 1;
+ ath6kl_bmi_write(ar,
+ ath6kl_get_hi_item_addr(ar,
+ HI_ITEM(hi_board_ext_data_config)),
+ (unsigned char *) &param, 4);
+ }
+
+ if (ar->fw_board_len < AR6003_BOARD_DATA_SZ) {
+ ath6kl_err("Too small board file: %zu\n", ar->fw_board_len);
+ ret = -EINVAL;
+ return ret;
+ }
+
+ ret = ath6kl_bmi_write(ar, board_address, ar->fw_board,
+ AR6003_BOARD_DATA_SZ);
+
+ if (ret) {
+ ath6kl_err("Board file bmi write failed: %d\n", ret);
+ return ret;
+ }
+
+ /* record the fact that Board Data IS initialized */
+ param = 1;
+ ath6kl_bmi_write(ar,
+ ath6kl_get_hi_item_addr(ar,
+ HI_ITEM(hi_board_data_initialized)),
+ (u8 *)&param, 4);
+
+ return ret;
+}
+
+static int ath6kl_upload_otp(struct ath6kl *ar)
+{
+ const char *filename;
+ u32 address, param;
+ int ret;
+
+ switch (ar->version.target_ver) {
+ case AR6003_REV2_VERSION:
+ filename = AR6003_REV2_OTP_FILE;
+ break;
+ default:
+ filename = AR6003_REV3_OTP_FILE;
+ break;
+ }
+
+ if (ar->fw_otp == NULL) {
+ ret = ath6kl_get_fw(ar, filename, &ar->fw_otp,
+ &ar->fw_otp_len);
+ if (ret) {
+ ath6kl_err("Failed to get OTP file %s: %d\n",
+ filename, ret);
+ return ret;
+ }
+ }
+
+ address = ath6kl_get_load_address(ar->version.target_ver,
+ APP_LOAD_ADDR);
+
+ ret = ath6kl_bmi_fast_download(ar, address, ar->fw_otp,
+ ar->fw_otp_len);
+ if (ret) {
+ ath6kl_err("Failed to upload OTP file: %d\n", ret);
+ return ret;
+ }
+
+ /* execute the OTP code */
+ param = 0;
+ address = ath6kl_get_load_address(ar->version.target_ver,
+ APP_START_OVERRIDE_ADDR);
+ ath6kl_bmi_execute(ar, address, &param);
+
+ return ret;
+}
+
+static int ath6kl_upload_firmware(struct ath6kl *ar)
+{
+ const char *filename;
+ u32 address;
+ int ret;
+
+ switch (ar->version.target_ver) {
+ case AR6003_REV2_VERSION:
+ filename = AR6003_REV2_FIRMWARE_FILE;
+ break;
+ default:
+ filename = AR6003_REV3_FIRMWARE_FILE;
+ break;
+ }
+
+ if (ar->fw == NULL) {
+ ret = ath6kl_get_fw(ar, filename, &ar->fw, &ar->fw_len);
+ if (ret) {
+ ath6kl_err("Failed to get firmware file %s: %d\n",
+ filename, ret);
+ return ret;
+ }
+ }
+
+ address = ath6kl_get_load_address(ar->version.target_ver,
+ APP_LOAD_ADDR);
+
+ ret = ath6kl_bmi_fast_download(ar, address, ar->fw, ar->fw_len);
+
+ if (ret) {
+ ath6kl_err("Failed to write firmware: %d\n", ret);
+ return ret;
+ }
+
+ /* Set starting address for firmware */
+ address = ath6kl_get_load_address(ar->version.target_ver,
+ APP_START_OVERRIDE_ADDR);
+ ath6kl_bmi_set_app_start(ar, address);
+
+ return ret;
+}
+
+static int ath6kl_upload_patch(struct ath6kl *ar)
+{
+ const char *filename;
+ u32 address, param;
+ int ret;
+
+ switch (ar->version.target_ver) {
+ case AR6003_REV2_VERSION:
+ filename = AR6003_REV2_PATCH_FILE;
+ break;
+ default:
+ filename = AR6003_REV3_PATCH_FILE;
+ break;
+ }
+
+ if (ar->fw_patch == NULL) {
+ ret = ath6kl_get_fw(ar, filename, &ar->fw_patch,
+ &ar->fw_patch_len);
+ if (ret) {
+ ath6kl_err("Failed to get patch file %s: %d\n",
+ filename, ret);
+ return ret;
+ }
+ }
+
+ address = ath6kl_get_load_address(ar->version.target_ver,
+ DATASET_PATCH_ADDR);
+
+ ret = ath6kl_bmi_write(ar, address, ar->fw_patch, ar->fw_patch_len);
+ if (ret) {
+ ath6kl_err("Failed to write patch file: %d\n", ret);
+ return ret;
+ }
+
+ param = address;
+ ath6kl_bmi_write(ar,
+ ath6kl_get_hi_item_addr(ar,
+ HI_ITEM(hi_dset_list_head)),
+ (unsigned char *) &param, 4);
+
+ return 0;
+}
+
+static int ath6kl_init_upload(struct ath6kl *ar)
+{
+ u32 param, options, sleep, address;
+ int status = 0;
+
+ if (ar->target_type != TARGET_TYPE_AR6003)
+ return -EINVAL;
+
+ /* temporarily disable system sleep */
+ address = MBOX_BASE_ADDRESS + LOCAL_SCRATCH_ADDRESS;
+ status = ath6kl_bmi_reg_read(ar, address, &param);
+ if (status)
+ return status;
+
+ options = param;
+
+ param |= ATH6KL_OPTION_SLEEP_DISABLE;
+ status = ath6kl_bmi_reg_write(ar, address, param);
+ if (status)
+ return status;
+
+ address = RTC_BASE_ADDRESS + SYSTEM_SLEEP_ADDRESS;
+ status = ath6kl_bmi_reg_read(ar, address, &param);
+ if (status)
+ return status;
+
+ sleep = param;
+
+ param |= SM(SYSTEM_SLEEP_DISABLE, 1);
+ status = ath6kl_bmi_reg_write(ar, address, param);
+ if (status)
+ return status;
+
+ ath6kl_dbg(ATH6KL_DBG_TRC, "old options: %d, old sleep: %d\n",
+ options, sleep);
+
+ /* program analog PLL register */
+ status = ath6kl_bmi_reg_write(ar, ATH6KL_ANALOG_PLL_REGISTER,
+ 0xF9104001);
+ if (status)
+ return status;
+
+ /* Run at 80/88MHz by default */
+ param = SM(CPU_CLOCK_STANDARD, 1);
+
+ address = RTC_BASE_ADDRESS + CPU_CLOCK_ADDRESS;
+ status = ath6kl_bmi_reg_write(ar, address, param);
+ if (status)
+ return status;
+
+ param = 0;
+ address = RTC_BASE_ADDRESS + LPO_CAL_ADDRESS;
+ param = SM(LPO_CAL_ENABLE, 1);
+ status = ath6kl_bmi_reg_write(ar, address, param);
+ if (status)
+ return status;
+
+ /* WAR to avoid SDIO CRC err */
+ if (ar->version.target_ver == AR6003_REV2_VERSION) {
+ ath6kl_err("temporary war to avoid sdio crc error\n");
+
+ param = 0x20;
+
+ address = GPIO_BASE_ADDRESS + GPIO_PIN10_ADDRESS;
+ status = ath6kl_bmi_reg_write(ar, address, param);
+ if (status)
+ return status;
+
+ address = GPIO_BASE_ADDRESS + GPIO_PIN11_ADDRESS;
+ status = ath6kl_bmi_reg_write(ar, address, param);
+ if (status)
+ return status;
+
+ address = GPIO_BASE_ADDRESS + GPIO_PIN12_ADDRESS;
+ status = ath6kl_bmi_reg_write(ar, address, param);
+ if (status)
+ return status;
+
+ address = GPIO_BASE_ADDRESS + GPIO_PIN13_ADDRESS;
+ status = ath6kl_bmi_reg_write(ar, address, param);
+ if (status)
+ return status;
+ }
+
+ /* write EEPROM data to Target RAM */
+ status = ath6kl_upload_board_file(ar);
+ if (status)
+ return status;
+
+ /* transfer One time Programmable data */
+ status = ath6kl_upload_otp(ar);
+ if (status)
+ return status;
+
+ /* Download Target firmware */
+ status = ath6kl_upload_firmware(ar);
+ if (status)
+ return status;
+
+ status = ath6kl_upload_patch(ar);
+ if (status)
+ return status;
+
+ /* Restore system sleep */
+ address = RTC_BASE_ADDRESS + SYSTEM_SLEEP_ADDRESS;
+ status = ath6kl_bmi_reg_write(ar, address, sleep);
+ if (status)
+ return status;
+
+ address = MBOX_BASE_ADDRESS + LOCAL_SCRATCH_ADDRESS;
+ param = options | 0x20;
+ status = ath6kl_bmi_reg_write(ar, address, param);
+ if (status)
+ return status;
+
+ /* Configure GPIO AR6003 UART */
+ param = CONFIG_AR600x_DEBUG_UART_TX_PIN;
+ status = ath6kl_bmi_write(ar,
+ ath6kl_get_hi_item_addr(ar,
+ HI_ITEM(hi_dbg_uart_txpin)),
+ (u8 *)&param, 4);
+
+ return status;
+}
+
+static int ath6kl_init(struct net_device *dev)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+ int status = 0;
+ s32 timeleft;
+
+ if (!ar)
+ return -EIO;
+
+ /* Do we need to finish the BMI phase */
+ if (ath6kl_bmi_done(ar)) {
+ status = -EIO;
+ goto ath6kl_init_done;
+ }
+
+ /* Indicate that WMI is enabled (although not ready yet) */
+ set_bit(WMI_ENABLED, &ar->flag);
+ ar->wmi = ath6kl_wmi_init((void *) ar);
+ if (!ar->wmi) {
+ ath6kl_err("failed to initialize wmi\n");
+ status = -EIO;
+ goto ath6kl_init_done;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_TRC, "%s: got wmi @ 0x%p.\n", __func__, ar->wmi);
+
+ /*
+ * The reason we have to wait for the target here is that the
+ * driver layer has to init BMI in order to set the host block
+ * size.
+ */
+ if (htc_wait_target(ar->htc_target)) {
+ status = -EIO;
+ goto err_wmi_cleanup;
+ }
+
+ if (ath6kl_init_service_ep(ar)) {
+ status = -EIO;
+ goto err_cleanup_scatter;
+ }
+
+ /* setup access class priority mappings */
+ ar->ac_stream_pri_map[WMM_AC_BK] = 0; /* lowest */
+ ar->ac_stream_pri_map[WMM_AC_BE] = 1;
+ ar->ac_stream_pri_map[WMM_AC_VI] = 2;
+ ar->ac_stream_pri_map[WMM_AC_VO] = 3; /* highest */
+
+ /* give our connected endpoints some buffers */
+ ath6kl_rx_refill(ar->htc_target, ar->ctrl_ep);
+ ath6kl_rx_refill(ar->htc_target, ar->ac2ep_map[WMM_AC_BE]);
+
+ /* allocate some buffers that handle larger AMSDU frames */
+ ath6kl_refill_amsdu_rxbufs(ar, ATH6KL_MAX_AMSDU_RX_BUFFERS);
+
+ /* setup credit distribution */
+ ath6k_setup_credit_dist(ar->htc_target, &ar->credit_state_info);
+
+ ath6kl_cookie_init(ar);
+
+ /* start HTC */
+ status = htc_start(ar->htc_target);
+
+ if (status) {
+ ath6kl_cookie_cleanup(ar);
+ goto err_rxbuf_cleanup;
+ }
+
+ /* Wait for Wmi event to be ready */
+ timeleft = wait_event_interruptible_timeout(ar->event_wq,
+ test_bit(WMI_READY,
+ &ar->flag),
+ WMI_TIMEOUT);
+
+ if (ar->version.abi_ver != ATH6KL_ABI_VERSION) {
+ ath6kl_err("abi version mismatch: host(0x%x), target(0x%x)\n",
+ ATH6KL_ABI_VERSION, ar->version.abi_ver);
+ status = -EIO;
+ goto err_htc_stop;
+ }
+
+ if (!timeleft || signal_pending(current)) {
+ ath6kl_err("wmi is not ready or wait was interrupted\n");
+ status = -EIO;
+ goto err_htc_stop;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_TRC, "%s: wmi is ready\n", __func__);
+
+ /* communicate the wmi protocol verision to the target */
+ if ((ath6kl_set_host_app_area(ar)) != 0)
+ ath6kl_err("unable to set the host app area\n");
+
+ ar->conf_flags = ATH6KL_CONF_IGNORE_ERP_BARKER |
+ ATH6KL_CONF_ENABLE_11N | ATH6KL_CONF_ENABLE_TX_BURST;
+
+ status = ath6kl_target_config_wlan_params(ar);
+ if (!status)
+ goto ath6kl_init_done;
+
+err_htc_stop:
+ htc_stop(ar->htc_target);
+err_rxbuf_cleanup:
+ htc_flush_rx_buf(ar->htc_target);
+ ath6kl_cleanup_amsdu_rxbufs(ar);
+err_cleanup_scatter:
+ ath6kl_hif_cleanup_scatter(ar);
+err_wmi_cleanup:
+ ath6kl_wmi_shutdown(ar->wmi);
+ clear_bit(WMI_ENABLED, &ar->flag);
+ ar->wmi = NULL;
+
+ath6kl_init_done:
+ return status;
+}
+
+int ath6kl_core_init(struct ath6kl *ar)
+{
+ int ret = 0;
+ struct ath6kl_bmi_target_info targ_info;
+
+ ar->ath6kl_wq = create_singlethread_workqueue("ath6kl");
+ if (!ar->ath6kl_wq)
+ return -ENOMEM;
+
+ ret = ath6kl_bmi_init(ar);
+ if (ret)
+ goto err_wq;
+
+ ret = ath6kl_bmi_get_target_info(ar, &targ_info);
+ if (ret)
+ goto err_bmi_cleanup;
+
+ ar->version.target_ver = le32_to_cpu(targ_info.version);
+ ar->target_type = le32_to_cpu(targ_info.type);
+ ar->wdev->wiphy->hw_version = le32_to_cpu(targ_info.version);
+
+ ret = ath6kl_configure_target(ar);
+ if (ret)
+ goto err_bmi_cleanup;
+
+ ar->htc_target = htc_create(ar);
+
+ if (!ar->htc_target) {
+ ret = -ENOMEM;
+ goto err_bmi_cleanup;
+ }
+
+ ar->aggr_cntxt = aggr_init(ar->net_dev);
+ if (!ar->aggr_cntxt) {
+ ath6kl_err("failed to initialize aggr\n");
+ ret = -ENOMEM;
+ goto err_htc_cleanup;
+ }
+
+ ret = ath6kl_init_upload(ar);
+ if (ret)
+ goto err_htc_cleanup;
+
+ ret = ath6kl_init(ar->net_dev);
+ if (ret)
+ goto err_htc_cleanup;
+
+ /* This runs the init function if registered */
+ ret = register_netdev(ar->net_dev);
+ if (ret) {
+ ath6kl_err("register_netdev failed\n");
+ ath6kl_destroy(ar->net_dev, 0);
+ return ret;
+ }
+
+ set_bit(NETDEV_REGISTERED, &ar->flag);
+
+ ath6kl_dbg(ATH6KL_DBG_TRC, "%s: name=%s dev=0x%p, ar=0x%p\n",
+ __func__, ar->net_dev->name, ar->net_dev, ar);
+
+ return ret;
+
+err_htc_cleanup:
+ htc_cleanup(ar->htc_target);
+err_bmi_cleanup:
+ ath6kl_bmi_cleanup(ar);
+err_wq:
+ destroy_workqueue(ar->ath6kl_wq);
+ return ret;
+}
+
+void ath6kl_stop_txrx(struct ath6kl *ar)
+{
+ struct net_device *ndev = ar->net_dev;
+
+ if (!ndev)
+ return;
+
+ set_bit(DESTROY_IN_PROGRESS, &ar->flag);
+
+ if (down_interruptible(&ar->sem)) {
+ ath6kl_err("down_interruptible failed\n");
+ return;
+ }
+
+ if (ar->wlan_pwr_state != WLAN_POWER_STATE_CUT_PWR)
+ ath6kl_stop_endpoint(ndev, false, true);
+
+ ar->wlan_state = WLAN_DISABLED;
+}
+
+/*
+ * We need to differentiate between the surprise and planned removal of the
+ * device because of the following consideration:
+ *
+ * - In case of surprise removal, the hcd already frees up the pending
+ * for the device and hence there is no need to unregister the function
+ * driver inorder to get these requests. For planned removal, the function
+ * driver has to explicitly unregister itself to have the hcd return all the
+ * pending requests before the data structures for the devices are freed up.
+ * Note that as per the current implementation, the function driver will
+ * end up releasing all the devices since there is no API to selectively
+ * release a particular device.
+ *
+ * - Certain commands issued to the target can be skipped for surprise
+ * removal since they will anyway not go through.
+ */
+void ath6kl_destroy(struct net_device *dev, unsigned int unregister)
+{
+ struct ath6kl *ar;
+
+ if (!dev || !ath6kl_priv(dev)) {
+ ath6kl_err("failed to get device structure\n");
+ return;
+ }
+
+ ar = ath6kl_priv(dev);
+
+ destroy_workqueue(ar->ath6kl_wq);
+
+ if (ar->htc_target)
+ htc_cleanup(ar->htc_target);
+
+ aggr_module_destroy(ar->aggr_cntxt);
+
+ ath6kl_cookie_cleanup(ar);
+
+ ath6kl_cleanup_amsdu_rxbufs(ar);
+
+ ath6kl_bmi_cleanup(ar);
+
+ if (unregister && test_bit(NETDEV_REGISTERED, &ar->flag)) {
+ unregister_netdev(dev);
+ clear_bit(NETDEV_REGISTERED, &ar->flag);
+ }
+
+ free_netdev(dev);
+
+ ath6kl_cfg80211_deinit(ar);
+}
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
new file mode 100644
index 000000000000..f325a23dfff0
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/main.c
@@ -0,0 +1,1337 @@
+/*
+ * Copyright (c) 2004-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+#include "hif-ops.h"
+#include "cfg80211.h"
+#include "target.h"
+#include "debug.h"
+
+struct ath6kl_sta *ath6kl_find_sta(struct ath6kl *ar, u8 *node_addr)
+{
+ struct ath6kl_sta *conn = NULL;
+ u8 i, max_conn;
+
+ max_conn = (ar->nw_type == AP_NETWORK) ? AP_MAX_NUM_STA : 0;
+
+ for (i = 0; i < max_conn; i++) {
+ if (memcmp(node_addr, ar->sta_list[i].mac, ETH_ALEN) == 0) {
+ conn = &ar->sta_list[i];
+ break;
+ }
+ }
+
+ return conn;
+}
+
+struct ath6kl_sta *ath6kl_find_sta_by_aid(struct ath6kl *ar, u8 aid)
+{
+ struct ath6kl_sta *conn = NULL;
+ u8 ctr;
+
+ for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
+ if (ar->sta_list[ctr].aid == aid) {
+ conn = &ar->sta_list[ctr];
+ break;
+ }
+ }
+ return conn;
+}
+
+static void ath6kl_add_new_sta(struct ath6kl *ar, u8 *mac, u16 aid, u8 *wpaie,
+ u8 ielen, u8 keymgmt, u8 ucipher, u8 auth)
+{
+ struct ath6kl_sta *sta;
+ u8 free_slot;
+
+ free_slot = aid - 1;
+
+ sta = &ar->sta_list[free_slot];
+ memcpy(sta->mac, mac, ETH_ALEN);
+ memcpy(sta->wpa_ie, wpaie, ielen);
+ sta->aid = aid;
+ sta->keymgmt = keymgmt;
+ sta->ucipher = ucipher;
+ sta->auth = auth;
+
+ ar->sta_list_index = ar->sta_list_index | (1 << free_slot);
+ ar->ap_stats.sta[free_slot].aid = cpu_to_le32(aid);
+}
+
+static void ath6kl_sta_cleanup(struct ath6kl *ar, u8 i)
+{
+ struct ath6kl_sta *sta = &ar->sta_list[i];
+
+ /* empty the queued pkts in the PS queue if any */
+ spin_lock_bh(&sta->psq_lock);
+ skb_queue_purge(&sta->psq);
+ spin_unlock_bh(&sta->psq_lock);
+
+ memset(&ar->ap_stats.sta[sta->aid - 1], 0,
+ sizeof(struct wmi_per_sta_stat));
+ memset(sta->mac, 0, ETH_ALEN);
+ memset(sta->wpa_ie, 0, ATH6KL_MAX_IE);
+ sta->aid = 0;
+ sta->sta_flags = 0;
+
+ ar->sta_list_index = ar->sta_list_index & ~(1 << i);
+
+}
+
+static u8 ath6kl_remove_sta(struct ath6kl *ar, u8 *mac, u16 reason)
+{
+ u8 i, removed = 0;
+
+ if (is_zero_ether_addr(mac))
+ return removed;
+
+ if (is_broadcast_ether_addr(mac)) {
+ ath6kl_dbg(ATH6KL_DBG_TRC, "deleting all station\n");
+
+ for (i = 0; i < AP_MAX_NUM_STA; i++) {
+ if (!is_zero_ether_addr(ar->sta_list[i].mac)) {
+ ath6kl_sta_cleanup(ar, i);
+ removed = 1;
+ }
+ }
+ } else {
+ for (i = 0; i < AP_MAX_NUM_STA; i++) {
+ if (memcmp(ar->sta_list[i].mac, mac, ETH_ALEN) == 0) {
+ ath6kl_dbg(ATH6KL_DBG_TRC,
+ "deleting station %pM aid=%d reason=%d\n",
+ mac, ar->sta_list[i].aid, reason);
+ ath6kl_sta_cleanup(ar, i);
+ removed = 1;
+ break;
+ }
+ }
+ }
+
+ return removed;
+}
+
+enum htc_endpoint_id ath6kl_ac2_endpoint_id(void *devt, u8 ac)
+{
+ struct ath6kl *ar = devt;
+ return ar->ac2ep_map[ac];
+}
+
+struct ath6kl_cookie *ath6kl_alloc_cookie(struct ath6kl *ar)
+{
+ struct ath6kl_cookie *cookie;
+
+ cookie = ar->cookie_list;
+ if (cookie != NULL) {
+ ar->cookie_list = cookie->arc_list_next;
+ ar->cookie_count--;
+ }
+
+ return cookie;
+}
+
+void ath6kl_cookie_init(struct ath6kl *ar)
+{
+ u32 i;
+
+ ar->cookie_list = NULL;
+ ar->cookie_count = 0;
+
+ memset(ar->cookie_mem, 0, sizeof(ar->cookie_mem));
+
+ for (i = 0; i < MAX_COOKIE_NUM; i++)
+ ath6kl_free_cookie(ar, &ar->cookie_mem[i]);
+}
+
+void ath6kl_cookie_cleanup(struct ath6kl *ar)
+{
+ ar->cookie_list = NULL;
+ ar->cookie_count = 0;
+}
+
+void ath6kl_free_cookie(struct ath6kl *ar, struct ath6kl_cookie *cookie)
+{
+ /* Insert first */
+
+ if (!ar || !cookie)
+ return;
+
+ cookie->arc_list_next = ar->cookie_list;
+ ar->cookie_list = cookie;
+ ar->cookie_count++;
+}
+
+/* set the window address register (using 4-byte register access ). */
+static int ath6kl_set_addrwin_reg(struct ath6kl *ar, u32 reg_addr, u32 addr)
+{
+ int status;
+ u8 addr_val[4];
+ s32 i;
+
+ /*
+ * Write bytes 1,2,3 of the register to set the upper address bytes,
+ * the LSB is written last to initiate the access cycle
+ */
+
+ for (i = 1; i <= 3; i++) {
+ /*
+ * Fill the buffer with the address byte value we want to
+ * hit 4 times.
+ */
+ memset(addr_val, ((u8 *)&addr)[i], 4);
+
+ /*
+ * Hit each byte of the register address with a 4-byte
+ * write operation to the same address, this is a harmless
+ * operation.
+ */
+ status = hif_read_write_sync(ar, reg_addr + i, addr_val,
+ 4, HIF_WR_SYNC_BYTE_FIX);
+ if (status)
+ break;
+ }
+
+ if (status) {
+ ath6kl_err("failed to write initial bytes of 0x%x to window reg: 0x%X\n",
+ addr, reg_addr);
+ return status;
+ }
+
+ /*
+ * Write the address register again, this time write the whole
+ * 4-byte value. The effect here is that the LSB write causes the
+ * cycle to start, the extra 3 byte write to bytes 1,2,3 has no
+ * effect since we are writing the same values again
+ */
+ status = hif_read_write_sync(ar, reg_addr, (u8 *)(&addr),
+ 4, HIF_WR_SYNC_BYTE_INC);
+
+ if (status) {
+ ath6kl_err("failed to write 0x%x to window reg: 0x%X\n",
+ addr, reg_addr);
+ return status;
+ }
+
+ return 0;
+}
+
+/*
+ * Read from the ATH6KL through its diagnostic window. No cooperation from
+ * the Target is required for this.
+ */
+int ath6kl_read_reg_diag(struct ath6kl *ar, u32 *address, u32 *data)
+{
+ int status;
+
+ /* set window register to start read cycle */
+ status = ath6kl_set_addrwin_reg(ar, WINDOW_READ_ADDR_ADDRESS,
+ *address);
+
+ if (status)
+ return status;
+
+ /* read the data */
+ status = hif_read_write_sync(ar, WINDOW_DATA_ADDRESS, (u8 *)data,
+ sizeof(u32), HIF_RD_SYNC_BYTE_INC);
+ if (status) {
+ ath6kl_err("failed to read from window data addr\n");
+ return status;
+ }
+
+ return status;
+}
+
+
+/*
+ * Write to the ATH6KL through its diagnostic window. No cooperation from
+ * the Target is required for this.
+ */
+static int ath6kl_write_reg_diag(struct ath6kl *ar, u32 *address, u32 *data)
+{
+ int status;
+
+ /* set write data */
+ status = hif_read_write_sync(ar, WINDOW_DATA_ADDRESS, (u8 *)data,
+ sizeof(u32), HIF_WR_SYNC_BYTE_INC);
+ if (status) {
+ ath6kl_err("failed to write 0x%x to window data addr\n", *data);
+ return status;
+ }
+
+ /* set window register, which starts the write cycle */
+ return ath6kl_set_addrwin_reg(ar, WINDOW_WRITE_ADDR_ADDRESS,
+ *address);
+}
+
+int ath6kl_access_datadiag(struct ath6kl *ar, u32 address,
+ u8 *data, u32 length, bool read)
+{
+ u32 count;
+ int status = 0;
+
+ for (count = 0; count < length; count += 4, address += 4) {
+ if (read) {
+ status = ath6kl_read_reg_diag(ar, &address,
+ (u32 *) &data[count]);
+ if (status)
+ break;
+ } else {
+ status = ath6kl_write_reg_diag(ar, &address,
+ (u32 *) &data[count]);
+ if (status)
+ break;
+ }
+ }
+
+ return status;
+}
+
+static void ath6kl_reset_device(struct ath6kl *ar, u32 target_type,
+ bool wait_fot_compltn, bool cold_reset)
+{
+ int status = 0;
+ u32 address;
+ u32 data;
+
+ if (target_type != TARGET_TYPE_AR6003)
+ return;
+
+ data = cold_reset ? RESET_CONTROL_COLD_RST : RESET_CONTROL_MBOX_RST;
+
+ address = RTC_BASE_ADDRESS;
+ status = ath6kl_write_reg_diag(ar, &address, &data);
+
+ if (status)
+ ath6kl_err("failed to reset target\n");
+}
+
+void ath6kl_stop_endpoint(struct net_device *dev, bool keep_profile,
+ bool get_dbglogs)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+ static u8 bcast_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ bool discon_issued;
+
+ netif_stop_queue(dev);
+
+ /* disable the target and the interrupts associated with it */
+ if (test_bit(WMI_READY, &ar->flag)) {
+ discon_issued = (test_bit(CONNECTED, &ar->flag) ||
+ test_bit(CONNECT_PEND, &ar->flag));
+ ath6kl_disconnect(ar);
+ if (!keep_profile)
+ ath6kl_init_profile_info(ar);
+
+ del_timer(&ar->disconnect_timer);
+
+ clear_bit(WMI_READY, &ar->flag);
+ ath6kl_wmi_shutdown(ar->wmi);
+ clear_bit(WMI_ENABLED, &ar->flag);
+ ar->wmi = NULL;
+
+ /*
+ * After wmi_shudown all WMI events will be dropped. We
+ * need to cleanup the buffers allocated in AP mode and
+ * give disconnect notification to stack, which usually
+ * happens in the disconnect_event. Simulate the disconnect
+ * event by calling the function directly. Sometimes
+ * disconnect_event will be received when the debug logs
+ * are collected.
+ */
+ if (discon_issued)
+ ath6kl_disconnect_event(ar, DISCONNECT_CMD,
+ (ar->nw_type & AP_NETWORK) ?
+ bcast_mac : ar->bssid,
+ 0, NULL, 0);
+
+ ar->user_key_ctrl = 0;
+
+ } else {
+ ath6kl_dbg(ATH6KL_DBG_TRC,
+ "%s: wmi is not ready 0x%p 0x%p\n",
+ __func__, ar, ar->wmi);
+
+ /* Shut down WMI if we have started it */
+ if (test_bit(WMI_ENABLED, &ar->flag)) {
+ ath6kl_dbg(ATH6KL_DBG_TRC,
+ "%s: shut down wmi\n", __func__);
+ ath6kl_wmi_shutdown(ar->wmi);
+ clear_bit(WMI_ENABLED, &ar->flag);
+ ar->wmi = NULL;
+ }
+ }
+
+ if (ar->htc_target) {
+ ath6kl_dbg(ATH6KL_DBG_TRC, "%s: shut down htc\n", __func__);
+ htc_stop(ar->htc_target);
+ }
+
+ /*
+ * Try to reset the device if we can. The driver may have been
+ * configure NOT to reset the target during a debug session.
+ */
+ ath6kl_dbg(ATH6KL_DBG_TRC,
+ "attempting to reset target on instance destroy\n");
+ ath6kl_reset_device(ar, ar->target_type, true, true);
+}
+
+static void ath6kl_install_static_wep_keys(struct ath6kl *ar)
+{
+ u8 index;
+ u8 keyusage;
+
+ for (index = WMI_MIN_KEY_INDEX; index <= WMI_MAX_KEY_INDEX; index++) {
+ if (ar->wep_key_list[index].key_len) {
+ keyusage = GROUP_USAGE;
+ if (index == ar->def_txkey_index)
+ keyusage |= TX_USAGE;
+
+ ath6kl_wmi_addkey_cmd(ar->wmi,
+ index,
+ WEP_CRYPT,
+ keyusage,
+ ar->wep_key_list[index].key_len,
+ NULL,
+ ar->wep_key_list[index].key,
+ KEY_OP_INIT_VAL, NULL,
+ NO_SYNC_WMIFLAG);
+ }
+ }
+}
+
+static void ath6kl_connect_ap_mode(struct ath6kl *ar, u16 channel, u8 *bssid,
+ u16 listen_int, u16 beacon_int,
+ u8 assoc_resp_len, u8 *assoc_info)
+{
+ struct net_device *dev = ar->net_dev;
+ struct station_info sinfo;
+ struct ath6kl_req_key *ik;
+ enum crypto_type keyType = NONE_CRYPT;
+
+ if (memcmp(dev->dev_addr, bssid, ETH_ALEN) == 0) {
+ ik = &ar->ap_mode_bkey;
+
+ switch (ar->auth_mode) {
+ case NONE_AUTH:
+ if (ar->prwise_crypto == WEP_CRYPT)
+ ath6kl_install_static_wep_keys(ar);
+ break;
+ case WPA_PSK_AUTH:
+ case WPA2_PSK_AUTH:
+ case (WPA_PSK_AUTH|WPA2_PSK_AUTH):
+ switch (ik->ik_type) {
+ case ATH6KL_CIPHER_TKIP:
+ keyType = TKIP_CRYPT;
+ break;
+ case ATH6KL_CIPHER_AES_CCM:
+ keyType = AES_CRYPT;
+ break;
+ default:
+ goto skip_key;
+ }
+ ath6kl_wmi_addkey_cmd(ar->wmi, ik->ik_keyix, keyType,
+ GROUP_USAGE, ik->ik_keylen,
+ (u8 *)&ik->ik_keyrsc,
+ ik->ik_keydata,
+ KEY_OP_INIT_VAL, ik->ik_macaddr,
+ SYNC_BOTH_WMIFLAG);
+ break;
+ }
+skip_key:
+ set_bit(CONNECTED, &ar->flag);
+ return;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_TRC, "new station %pM aid=%d\n",
+ bssid, channel);
+
+ ath6kl_add_new_sta(ar, bssid, channel, assoc_info, assoc_resp_len,
+ listen_int & 0xFF, beacon_int,
+ (listen_int >> 8) & 0xFF);
+
+ /* send event to application */
+ memset(&sinfo, 0, sizeof(sinfo));
+
+ /* TODO: sinfo.generation */
+ /* TODO: need to deliver (Re)AssocReq IEs somehow.. change in
+ * cfg80211 needed, e.g., by adding those into sinfo
+ */
+ cfg80211_new_sta(ar->net_dev, bssid, &sinfo, GFP_KERNEL);
+
+ netif_wake_queue(ar->net_dev);
+
+ return;
+}
+
+/* Functions for Tx credit handling */
+void ath6k_credit_init(struct htc_credit_state_info *cred_info,
+ struct list_head *ep_list,
+ int tot_credits)
+{
+ struct htc_endpoint_credit_dist *cur_ep_dist;
+ int count;
+
+ cred_info->cur_free_credits = tot_credits;
+ cred_info->total_avail_credits = tot_credits;
+
+ list_for_each_entry(cur_ep_dist, ep_list, list) {
+ if (cur_ep_dist->endpoint == ENDPOINT_0)
+ continue;
+
+ cur_ep_dist->cred_min = cur_ep_dist->cred_per_msg;
+
+ if (tot_credits > 4)
+ if ((cur_ep_dist->svc_id == WMI_DATA_BK_SVC) ||
+ (cur_ep_dist->svc_id == WMI_DATA_BE_SVC)) {
+ ath6kl_deposit_credit_to_ep(cred_info,
+ cur_ep_dist,
+ cur_ep_dist->cred_min);
+ cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
+ }
+
+ if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) {
+ ath6kl_deposit_credit_to_ep(cred_info, cur_ep_dist,
+ cur_ep_dist->cred_min);
+ /*
+ * Control service is always marked active, it
+ * never goes inactive EVER.
+ */
+ cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
+ } else if (cur_ep_dist->svc_id == WMI_DATA_BK_SVC)
+ /* this is the lowest priority data endpoint */
+ cred_info->lowestpri_ep_dist = cur_ep_dist->list;
+
+ /*
+ * Streams have to be created (explicit | implicit) for all
+ * kinds of traffic. BE endpoints are also inactive in the
+ * beginning. When BE traffic starts it creates implicit
+ * streams that redistributes credits.
+ *
+ * Note: all other endpoints have minimums set but are
+ * initially given NO credits. credits will be distributed
+ * as traffic activity demands
+ */
+ }
+
+ WARN_ON(cred_info->cur_free_credits <= 0);
+
+ list_for_each_entry(cur_ep_dist, ep_list, list) {
+ if (cur_ep_dist->endpoint == ENDPOINT_0)
+ continue;
+
+ if (cur_ep_dist->svc_id == WMI_CONTROL_SVC)
+ cur_ep_dist->cred_norm = cur_ep_dist->cred_per_msg;
+ else {
+ /*
+ * For the remaining data endpoints, we assume that
+ * each cred_per_msg are the same. We use a simple
+ * calculation here, we take the remaining credits
+ * and determine how many max messages this can
+ * cover and then set each endpoint's normal value
+ * equal to 3/4 this amount.
+ */
+ count = (cred_info->cur_free_credits /
+ cur_ep_dist->cred_per_msg)
+ * cur_ep_dist->cred_per_msg;
+ count = (count * 3) >> 2;
+ count = max(count, cur_ep_dist->cred_per_msg);
+ cur_ep_dist->cred_norm = count;
+
+ }
+ }
+}
+
+/* initialize and setup credit distribution */
+int ath6k_setup_credit_dist(void *htc_handle,
+ struct htc_credit_state_info *cred_info)
+{
+ u16 servicepriority[5];
+
+ memset(cred_info, 0, sizeof(struct htc_credit_state_info));
+
+ servicepriority[0] = WMI_CONTROL_SVC; /* highest */
+ servicepriority[1] = WMI_DATA_VO_SVC;
+ servicepriority[2] = WMI_DATA_VI_SVC;
+ servicepriority[3] = WMI_DATA_BE_SVC;
+ servicepriority[4] = WMI_DATA_BK_SVC; /* lowest */
+
+ /* set priority list */
+ htc_set_credit_dist(htc_handle, cred_info, servicepriority, 5);
+
+ return 0;
+}
+
+/* reduce an ep's credits back to a set limit */
+static void ath6k_reduce_credits(struct htc_credit_state_info *cred_info,
+ struct htc_endpoint_credit_dist *ep_dist,
+ int limit)
+{
+ int credits;
+
+ ep_dist->cred_assngd = limit;
+
+ if (ep_dist->credits <= limit)
+ return;
+
+ credits = ep_dist->credits - limit;
+ ep_dist->credits -= credits;
+ cred_info->cur_free_credits += credits;
+}
+
+static void ath6k_credit_update(struct htc_credit_state_info *cred_info,
+ struct list_head *epdist_list)
+{
+ struct htc_endpoint_credit_dist *cur_dist_list;
+
+ list_for_each_entry(cur_dist_list, epdist_list, list) {
+ if (cur_dist_list->endpoint == ENDPOINT_0)
+ continue;
+
+ if (cur_dist_list->cred_to_dist > 0) {
+ cur_dist_list->credits +=
+ cur_dist_list->cred_to_dist;
+ cur_dist_list->cred_to_dist = 0;
+ if (cur_dist_list->credits >
+ cur_dist_list->cred_assngd)
+ ath6k_reduce_credits(cred_info,
+ cur_dist_list,
+ cur_dist_list->cred_assngd);
+
+ if (cur_dist_list->credits >
+ cur_dist_list->cred_norm)
+ ath6k_reduce_credits(cred_info, cur_dist_list,
+ cur_dist_list->cred_norm);
+
+ if (!(cur_dist_list->dist_flags & HTC_EP_ACTIVE)) {
+ if (cur_dist_list->txq_depth == 0)
+ ath6k_reduce_credits(cred_info,
+ cur_dist_list, 0);
+ }
+ }
+ }
+}
+
+/*
+ * HTC has an endpoint that needs credits, ep_dist is the endpoint in
+ * question.
+ */
+void ath6k_seek_credits(struct htc_credit_state_info *cred_info,
+ struct htc_endpoint_credit_dist *ep_dist)
+{
+ struct htc_endpoint_credit_dist *curdist_list;
+ int credits = 0;
+ int need;
+
+ if (ep_dist->svc_id == WMI_CONTROL_SVC)
+ goto out;
+
+ if ((ep_dist->svc_id == WMI_DATA_VI_SVC) ||
+ (ep_dist->svc_id == WMI_DATA_VO_SVC))
+ if ((ep_dist->cred_assngd >= ep_dist->cred_norm))
+ goto out;
+
+ /*
+ * For all other services, we follow a simple algorithm of:
+ *
+ * 1. checking the free pool for credits
+ * 2. checking lower priority endpoints for credits to take
+ */
+
+ credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
+
+ if (credits >= ep_dist->seek_cred)
+ goto out;
+
+ /*
+ * We don't have enough in the free pool, try taking away from
+ * lower priority services The rule for taking away credits:
+ *
+ * 1. Only take from lower priority endpoints
+ * 2. Only take what is allocated above the minimum (never
+ * starve an endpoint completely)
+ * 3. Only take what you need.
+ */
+
+ list_for_each_entry_reverse(curdist_list,
+ &cred_info->lowestpri_ep_dist,
+ list) {
+ if (curdist_list == ep_dist)
+ break;
+
+ need = ep_dist->seek_cred - cred_info->cur_free_credits;
+
+ if ((curdist_list->cred_assngd - need) >=
+ curdist_list->cred_min) {
+ /*
+ * The current one has been allocated more than
+ * it's minimum and it has enough credits assigned
+ * above it's minimum to fulfill our need try to
+ * take away just enough to fulfill our need.
+ */
+ ath6k_reduce_credits(cred_info, curdist_list,
+ curdist_list->cred_assngd - need);
+
+ if (cred_info->cur_free_credits >=
+ ep_dist->seek_cred)
+ break;
+ }
+
+ if (curdist_list->endpoint == ENDPOINT_0)
+ break;
+ }
+
+ credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
+
+out:
+ /* did we find some credits? */
+ if (credits)
+ ath6kl_deposit_credit_to_ep(cred_info, ep_dist, credits);
+
+ ep_dist->seek_cred = 0;
+}
+
+/* redistribute credits based on activity change */
+static void ath6k_redistribute_credits(struct htc_credit_state_info *info,
+ struct list_head *ep_dist_list)
+{
+ struct htc_endpoint_credit_dist *curdist_list;
+
+ list_for_each_entry(curdist_list, ep_dist_list, list) {
+ if (curdist_list->endpoint == ENDPOINT_0)
+ continue;
+
+ if ((curdist_list->svc_id == WMI_DATA_BK_SVC) ||
+ (curdist_list->svc_id == WMI_DATA_BE_SVC))
+ curdist_list->dist_flags |= HTC_EP_ACTIVE;
+
+ if ((curdist_list->svc_id != WMI_CONTROL_SVC) &&
+ !(curdist_list->dist_flags & HTC_EP_ACTIVE)) {
+ if (curdist_list->txq_depth == 0)
+ ath6k_reduce_credits(info,
+ curdist_list, 0);
+ else
+ ath6k_reduce_credits(info,
+ curdist_list,
+ curdist_list->cred_min);
+ }
+ }
+}
+
+/*
+ *
+ * This function is invoked whenever endpoints require credit
+ * distributions. A lock is held while this function is invoked, this
+ * function shall NOT block. The ep_dist_list is a list of distribution
+ * structures in prioritized order as defined by the call to the
+ * htc_set_credit_dist() api.
+ */
+void ath6k_credit_distribute(struct htc_credit_state_info *cred_info,
+ struct list_head *ep_dist_list,
+ enum htc_credit_dist_reason reason)
+{
+ switch (reason) {
+ case HTC_CREDIT_DIST_SEND_COMPLETE:
+ ath6k_credit_update(cred_info, ep_dist_list);
+ break;
+ case HTC_CREDIT_DIST_ACTIVITY_CHANGE:
+ ath6k_redistribute_credits(cred_info, ep_dist_list);
+ break;
+ default:
+ break;
+ }
+
+ WARN_ON(cred_info->cur_free_credits > cred_info->total_avail_credits);
+ WARN_ON(cred_info->cur_free_credits < 0);
+}
+
+void disconnect_timer_handler(unsigned long ptr)
+{
+ struct net_device *dev = (struct net_device *)ptr;
+ struct ath6kl *ar = ath6kl_priv(dev);
+
+ ath6kl_init_profile_info(ar);
+ ath6kl_disconnect(ar);
+}
+
+void ath6kl_disconnect(struct ath6kl *ar)
+{
+ if (test_bit(CONNECTED, &ar->flag) ||
+ test_bit(CONNECT_PEND, &ar->flag)) {
+ ath6kl_wmi_disconnect_cmd(ar->wmi);
+ /*
+ * Disconnect command is issued, clear the connect pending
+ * flag. The connected flag will be cleared in
+ * disconnect event notification.
+ */
+ clear_bit(CONNECT_PEND, &ar->flag);
+ }
+}
+
+/* WMI Event handlers */
+
+static const char *get_hw_id_string(u32 id)
+{
+ switch (id) {
+ case AR6003_REV1_VERSION:
+ return "1.0";
+ case AR6003_REV2_VERSION:
+ return "2.0";
+ case AR6003_REV3_VERSION:
+ return "2.1.1";
+ default:
+ return "unknown";
+ }
+}
+
+void ath6kl_ready_event(void *devt, u8 *datap, u32 sw_ver, u32 abi_ver)
+{
+ struct ath6kl *ar = devt;
+ struct net_device *dev = ar->net_dev;
+
+ memcpy(dev->dev_addr, datap, ETH_ALEN);
+ ath6kl_dbg(ATH6KL_DBG_TRC, "%s: mac addr = %pM\n",
+ __func__, dev->dev_addr);
+
+ ar->version.wlan_ver = sw_ver;
+ ar->version.abi_ver = abi_ver;
+
+ snprintf(ar->wdev->wiphy->fw_version,
+ sizeof(ar->wdev->wiphy->fw_version),
+ "%u.%u.%u.%u",
+ (ar->version.wlan_ver & 0xf0000000) >> 28,
+ (ar->version.wlan_ver & 0x0f000000) >> 24,
+ (ar->version.wlan_ver & 0x00ff0000) >> 16,
+ (ar->version.wlan_ver & 0x0000ffff));
+
+ /* indicate to the waiting thread that the ready event was received */
+ set_bit(WMI_READY, &ar->flag);
+ wake_up(&ar->event_wq);
+
+ ath6kl_info("hw %s fw %s\n",
+ get_hw_id_string(ar->wdev->wiphy->hw_version),
+ ar->wdev->wiphy->fw_version);
+}
+
+void ath6kl_scan_complete_evt(struct ath6kl *ar, int status)
+{
+ ath6kl_cfg80211_scan_complete_event(ar, status);
+
+ if (!ar->usr_bss_filter)
+ ath6kl_wmi_bssfilter_cmd(ar->wmi, NONE_BSS_FILTER, 0);
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_SCAN, "scan complete: %d\n", status);
+}
+
+void ath6kl_connect_event(struct ath6kl *ar, u16 channel, u8 *bssid,
+ u16 listen_int, u16 beacon_int,
+ enum network_type net_type, u8 beacon_ie_len,
+ u8 assoc_req_len, u8 assoc_resp_len,
+ u8 *assoc_info)
+{
+ unsigned long flags;
+
+ if (ar->nw_type == AP_NETWORK) {
+ ath6kl_connect_ap_mode(ar, channel, bssid, listen_int,
+ beacon_int, assoc_resp_len,
+ assoc_info);
+ return;
+ }
+
+ ath6kl_cfg80211_connect_event(ar, channel, bssid,
+ listen_int, beacon_int,
+ net_type, beacon_ie_len,
+ assoc_req_len, assoc_resp_len,
+ assoc_info);
+
+ memcpy(ar->bssid, bssid, sizeof(ar->bssid));
+ ar->bss_ch = channel;
+
+ if ((ar->nw_type == INFRA_NETWORK))
+ ath6kl_wmi_listeninterval_cmd(ar->wmi, ar->listen_intvl_t,
+ ar->listen_intvl_b);
+
+ netif_wake_queue(ar->net_dev);
+
+ /* Update connect & link status atomically */
+ spin_lock_irqsave(&ar->lock, flags);
+ set_bit(CONNECTED, &ar->flag);
+ clear_bit(CONNECT_PEND, &ar->flag);
+ netif_carrier_on(ar->net_dev);
+ spin_unlock_irqrestore(&ar->lock, flags);
+
+ aggr_reset_state(ar->aggr_cntxt);
+ ar->reconnect_flag = 0;
+
+ if ((ar->nw_type == ADHOC_NETWORK) && ar->ibss_ps_enable) {
+ memset(ar->node_map, 0, sizeof(ar->node_map));
+ ar->node_num = 0;
+ ar->next_ep_id = ENDPOINT_2;
+ }
+
+ if (!ar->usr_bss_filter)
+ ath6kl_wmi_bssfilter_cmd(ar->wmi, NONE_BSS_FILTER, 0);
+}
+
+void ath6kl_tkip_micerr_event(struct ath6kl *ar, u8 keyid, bool ismcast)
+{
+ struct ath6kl_sta *sta;
+ u8 tsc[6];
+ /*
+ * For AP case, keyid will have aid of STA which sent pkt with
+ * MIC error. Use this aid to get MAC & send it to hostapd.
+ */
+ if (ar->nw_type == AP_NETWORK) {
+ sta = ath6kl_find_sta_by_aid(ar, (keyid >> 2));
+ if (!sta)
+ return;
+
+ ath6kl_dbg(ATH6KL_DBG_TRC,
+ "ap tkip mic error received from aid=%d\n", keyid);
+
+ memset(tsc, 0, sizeof(tsc)); /* FIX: get correct TSC */
+ cfg80211_michael_mic_failure(ar->net_dev, sta->mac,
+ NL80211_KEYTYPE_PAIRWISE, keyid,
+ tsc, GFP_KERNEL);
+ } else
+ ath6kl_cfg80211_tkip_micerr_event(ar, keyid, ismcast);
+
+}
+
+static void ath6kl_update_target_stats(struct ath6kl *ar, u8 *ptr, u32 len)
+{
+ struct wmi_target_stats *tgt_stats =
+ (struct wmi_target_stats *) ptr;
+ struct target_stats *stats = &ar->target_stats;
+ struct tkip_ccmp_stats *ccmp_stats;
+ struct bss *conn_bss = NULL;
+ struct cserv_stats *c_stats;
+ u8 ac;
+
+ if (len < sizeof(*tgt_stats))
+ return;
+
+ /* update the RSSI of the connected bss */
+ if (test_bit(CONNECTED, &ar->flag)) {
+ conn_bss = ath6kl_wmi_find_node(ar->wmi, ar->bssid);
+ if (conn_bss) {
+ c_stats = &tgt_stats->cserv_stats;
+ conn_bss->ni_rssi =
+ a_sle16_to_cpu(c_stats->cs_ave_beacon_rssi);
+ conn_bss->ni_snr =
+ tgt_stats->cserv_stats.cs_ave_beacon_snr;
+ ath6kl_wmi_node_return(ar->wmi, conn_bss);
+ }
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_TRC, "updating target stats\n");
+
+ stats->tx_pkt += le32_to_cpu(tgt_stats->stats.tx.pkt);
+ stats->tx_byte += le32_to_cpu(tgt_stats->stats.tx.byte);
+ stats->tx_ucast_pkt += le32_to_cpu(tgt_stats->stats.tx.ucast_pkt);
+ stats->tx_ucast_byte += le32_to_cpu(tgt_stats->stats.tx.ucast_byte);
+ stats->tx_mcast_pkt += le32_to_cpu(tgt_stats->stats.tx.mcast_pkt);
+ stats->tx_mcast_byte += le32_to_cpu(tgt_stats->stats.tx.mcast_byte);
+ stats->tx_bcast_pkt += le32_to_cpu(tgt_stats->stats.tx.bcast_pkt);
+ stats->tx_bcast_byte += le32_to_cpu(tgt_stats->stats.tx.bcast_byte);
+ stats->tx_rts_success_cnt +=
+ le32_to_cpu(tgt_stats->stats.tx.rts_success_cnt);
+
+ for (ac = 0; ac < WMM_NUM_AC; ac++)
+ stats->tx_pkt_per_ac[ac] +=
+ le32_to_cpu(tgt_stats->stats.tx.pkt_per_ac[ac]);
+
+ stats->tx_err += le32_to_cpu(tgt_stats->stats.tx.err);
+ stats->tx_fail_cnt += le32_to_cpu(tgt_stats->stats.tx.fail_cnt);
+ stats->tx_retry_cnt += le32_to_cpu(tgt_stats->stats.tx.retry_cnt);
+ stats->tx_mult_retry_cnt +=
+ le32_to_cpu(tgt_stats->stats.tx.mult_retry_cnt);
+ stats->tx_rts_fail_cnt +=
+ le32_to_cpu(tgt_stats->stats.tx.rts_fail_cnt);
+ stats->tx_ucast_rate =
+ ath6kl_wmi_get_rate(a_sle32_to_cpu(tgt_stats->stats.tx.ucast_rate));
+
+ stats->rx_pkt += le32_to_cpu(tgt_stats->stats.rx.pkt);
+ stats->rx_byte += le32_to_cpu(tgt_stats->stats.rx.byte);
+ stats->rx_ucast_pkt += le32_to_cpu(tgt_stats->stats.rx.ucast_pkt);
+ stats->rx_ucast_byte += le32_to_cpu(tgt_stats->stats.rx.ucast_byte);
+ stats->rx_mcast_pkt += le32_to_cpu(tgt_stats->stats.rx.mcast_pkt);
+ stats->rx_mcast_byte += le32_to_cpu(tgt_stats->stats.rx.mcast_byte);
+ stats->rx_bcast_pkt += le32_to_cpu(tgt_stats->stats.rx.bcast_pkt);
+ stats->rx_bcast_byte += le32_to_cpu(tgt_stats->stats.rx.bcast_byte);
+ stats->rx_frgment_pkt += le32_to_cpu(tgt_stats->stats.rx.frgment_pkt);
+ stats->rx_err += le32_to_cpu(tgt_stats->stats.rx.err);
+ stats->rx_crc_err += le32_to_cpu(tgt_stats->stats.rx.crc_err);
+ stats->rx_key_cache_miss +=
+ le32_to_cpu(tgt_stats->stats.rx.key_cache_miss);
+ stats->rx_decrypt_err += le32_to_cpu(tgt_stats->stats.rx.decrypt_err);
+ stats->rx_dupl_frame += le32_to_cpu(tgt_stats->stats.rx.dupl_frame);
+ stats->rx_ucast_rate =
+ ath6kl_wmi_get_rate(a_sle32_to_cpu(tgt_stats->stats.rx.ucast_rate));
+
+ ccmp_stats = &tgt_stats->stats.tkip_ccmp_stats;
+
+ stats->tkip_local_mic_fail +=
+ le32_to_cpu(ccmp_stats->tkip_local_mic_fail);
+ stats->tkip_cnter_measures_invoked +=
+ le32_to_cpu(ccmp_stats->tkip_cnter_measures_invoked);
+ stats->tkip_fmt_err += le32_to_cpu(ccmp_stats->tkip_fmt_err);
+
+ stats->ccmp_fmt_err += le32_to_cpu(ccmp_stats->ccmp_fmt_err);
+ stats->ccmp_replays += le32_to_cpu(ccmp_stats->ccmp_replays);
+
+ stats->pwr_save_fail_cnt +=
+ le32_to_cpu(tgt_stats->pm_stats.pwr_save_failure_cnt);
+ stats->noise_floor_calib =
+ a_sle32_to_cpu(tgt_stats->noise_floor_calib);
+
+ stats->cs_bmiss_cnt +=
+ le32_to_cpu(tgt_stats->cserv_stats.cs_bmiss_cnt);
+ stats->cs_low_rssi_cnt +=
+ le32_to_cpu(tgt_stats->cserv_stats.cs_low_rssi_cnt);
+ stats->cs_connect_cnt +=
+ le16_to_cpu(tgt_stats->cserv_stats.cs_connect_cnt);
+ stats->cs_discon_cnt +=
+ le16_to_cpu(tgt_stats->cserv_stats.cs_discon_cnt);
+
+ stats->cs_ave_beacon_rssi =
+ a_sle16_to_cpu(tgt_stats->cserv_stats.cs_ave_beacon_rssi);
+
+ stats->cs_last_roam_msec =
+ tgt_stats->cserv_stats.cs_last_roam_msec;
+ stats->cs_snr = tgt_stats->cserv_stats.cs_snr;
+ stats->cs_rssi = a_sle16_to_cpu(tgt_stats->cserv_stats.cs_rssi);
+
+ stats->lq_val = le32_to_cpu(tgt_stats->lq_val);
+
+ stats->wow_pkt_dropped +=
+ le32_to_cpu(tgt_stats->wow_stats.wow_pkt_dropped);
+ stats->wow_host_pkt_wakeups +=
+ tgt_stats->wow_stats.wow_host_pkt_wakeups;
+ stats->wow_host_evt_wakeups +=
+ tgt_stats->wow_stats.wow_host_evt_wakeups;
+ stats->wow_evt_discarded +=
+ le16_to_cpu(tgt_stats->wow_stats.wow_evt_discarded);
+
+ if (test_bit(STATS_UPDATE_PEND, &ar->flag)) {
+ clear_bit(STATS_UPDATE_PEND, &ar->flag);
+ wake_up(&ar->event_wq);
+ }
+}
+
+static void ath6kl_add_le32(__le32 *var, __le32 val)
+{
+ *var = cpu_to_le32(le32_to_cpu(*var) + le32_to_cpu(val));
+}
+
+void ath6kl_tgt_stats_event(struct ath6kl *ar, u8 *ptr, u32 len)
+{
+ struct wmi_ap_mode_stat *p = (struct wmi_ap_mode_stat *) ptr;
+ struct wmi_ap_mode_stat *ap = &ar->ap_stats;
+ struct wmi_per_sta_stat *st_ap, *st_p;
+ u8 ac;
+
+ if (ar->nw_type == AP_NETWORK) {
+ if (len < sizeof(*p))
+ return;
+
+ for (ac = 0; ac < AP_MAX_NUM_STA; ac++) {
+ st_ap = &ap->sta[ac];
+ st_p = &p->sta[ac];
+
+ ath6kl_add_le32(&st_ap->tx_bytes, st_p->tx_bytes);
+ ath6kl_add_le32(&st_ap->tx_pkts, st_p->tx_pkts);
+ ath6kl_add_le32(&st_ap->tx_error, st_p->tx_error);
+ ath6kl_add_le32(&st_ap->tx_discard, st_p->tx_discard);
+ ath6kl_add_le32(&st_ap->rx_bytes, st_p->rx_bytes);
+ ath6kl_add_le32(&st_ap->rx_pkts, st_p->rx_pkts);
+ ath6kl_add_le32(&st_ap->rx_error, st_p->rx_error);
+ ath6kl_add_le32(&st_ap->rx_discard, st_p->rx_discard);
+ }
+
+ } else {
+ ath6kl_update_target_stats(ar, ptr, len);
+ }
+}
+
+void ath6kl_wakeup_event(void *dev)
+{
+ struct ath6kl *ar = (struct ath6kl *) dev;
+
+ wake_up(&ar->event_wq);
+}
+
+void ath6kl_txpwr_rx_evt(void *devt, u8 tx_pwr)
+{
+ struct ath6kl *ar = (struct ath6kl *) devt;
+
+ ar->tx_pwr = tx_pwr;
+ wake_up(&ar->event_wq);
+}
+
+void ath6kl_pspoll_event(struct ath6kl *ar, u8 aid)
+{
+ struct ath6kl_sta *conn;
+ struct sk_buff *skb;
+ bool psq_empty = false;
+
+ conn = ath6kl_find_sta_by_aid(ar, aid);
+
+ if (!conn)
+ return;
+ /*
+ * Send out a packet queued on ps queue. When the ps queue
+ * becomes empty update the PVB for this station.
+ */
+ spin_lock_bh(&conn->psq_lock);
+ psq_empty = skb_queue_empty(&conn->psq);
+ spin_unlock_bh(&conn->psq_lock);
+
+ if (psq_empty)
+ /* TODO: Send out a NULL data frame */
+ return;
+
+ spin_lock_bh(&conn->psq_lock);
+ skb = skb_dequeue(&conn->psq);
+ spin_unlock_bh(&conn->psq_lock);
+
+ conn->sta_flags |= STA_PS_POLLED;
+ ath6kl_data_tx(skb, ar->net_dev);
+ conn->sta_flags &= ~STA_PS_POLLED;
+
+ spin_lock_bh(&conn->psq_lock);
+ psq_empty = skb_queue_empty(&conn->psq);
+ spin_unlock_bh(&conn->psq_lock);
+
+ if (psq_empty)
+ ath6kl_wmi_set_pvb_cmd(ar->wmi, conn->aid, 0);
+}
+
+void ath6kl_dtimexpiry_event(struct ath6kl *ar)
+{
+ bool mcastq_empty = false;
+ struct sk_buff *skb;
+
+ /*
+ * If there are no associated STAs, ignore the DTIM expiry event.
+ * There can be potential race conditions where the last associated
+ * STA may disconnect & before the host could clear the 'Indicate
+ * DTIM' request to the firmware, the firmware would have just
+ * indicated a DTIM expiry event. The race is between 'clear DTIM
+ * expiry cmd' going from the host to the firmware & the DTIM
+ * expiry event happening from the firmware to the host.
+ */
+ if (!ar->sta_list_index)
+ return;
+
+ spin_lock_bh(&ar->mcastpsq_lock);
+ mcastq_empty = skb_queue_empty(&ar->mcastpsq);
+ spin_unlock_bh(&ar->mcastpsq_lock);
+
+ if (mcastq_empty)
+ return;
+
+ /* set the STA flag to dtim_expired for the frame to go out */
+ set_bit(DTIM_EXPIRED, &ar->flag);
+
+ spin_lock_bh(&ar->mcastpsq_lock);
+ while ((skb = skb_dequeue(&ar->mcastpsq)) != NULL) {
+ spin_unlock_bh(&ar->mcastpsq_lock);
+
+ ath6kl_data_tx(skb, ar->net_dev);
+
+ spin_lock_bh(&ar->mcastpsq_lock);
+ }
+ spin_unlock_bh(&ar->mcastpsq_lock);
+
+ clear_bit(DTIM_EXPIRED, &ar->flag);
+
+ /* clear the LSB of the BitMapCtl field of the TIM IE */
+ ath6kl_wmi_set_pvb_cmd(ar->wmi, MCAST_AID, 0);
+}
+
+void ath6kl_disconnect_event(struct ath6kl *ar, u8 reason, u8 *bssid,
+ u8 assoc_resp_len, u8 *assoc_info,
+ u16 prot_reason_status)
+{
+ struct bss *wmi_ssid_node = NULL;
+ unsigned long flags;
+
+ if (ar->nw_type == AP_NETWORK) {
+ if (!ath6kl_remove_sta(ar, bssid, prot_reason_status))
+ return;
+
+ /* if no more associated STAs, empty the mcast PS q */
+ if (ar->sta_list_index == 0) {
+ spin_lock_bh(&ar->mcastpsq_lock);
+ skb_queue_purge(&ar->mcastpsq);
+ spin_unlock_bh(&ar->mcastpsq_lock);
+
+ /* clear the LSB of the TIM IE's BitMapCtl field */
+ if (test_bit(WMI_READY, &ar->flag))
+ ath6kl_wmi_set_pvb_cmd(ar->wmi, MCAST_AID, 0);
+ }
+
+ if (!is_broadcast_ether_addr(bssid)) {
+ /* send event to application */
+ cfg80211_del_sta(ar->net_dev, bssid, GFP_KERNEL);
+ }
+
+ clear_bit(CONNECTED, &ar->flag);
+ return;
+ }
+
+ ath6kl_cfg80211_disconnect_event(ar, reason, bssid,
+ assoc_resp_len, assoc_info,
+ prot_reason_status);
+
+ aggr_reset_state(ar->aggr_cntxt);
+
+ del_timer(&ar->disconnect_timer);
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CONNECT,
+ "disconnect reason is %d\n", reason);
+
+ /*
+ * If the event is due to disconnect cmd from the host, only they
+ * the target would stop trying to connect. Under any other
+ * condition, target would keep trying to connect.
+ */
+ if (reason == DISCONNECT_CMD) {
+ if (!ar->usr_bss_filter && test_bit(WMI_READY, &ar->flag))
+ ath6kl_wmi_bssfilter_cmd(ar->wmi, NONE_BSS_FILTER, 0);
+ } else {
+ set_bit(CONNECT_PEND, &ar->flag);
+ if (((reason == ASSOC_FAILED) &&
+ (prot_reason_status == 0x11)) ||
+ ((reason == ASSOC_FAILED) && (prot_reason_status == 0x0)
+ && (ar->reconnect_flag == 1))) {
+ set_bit(CONNECTED, &ar->flag);
+ return;
+ }
+ }
+
+ if ((reason == NO_NETWORK_AVAIL) && test_bit(WMI_READY, &ar->flag)) {
+ ath6kl_wmi_node_free(ar->wmi, bssid);
+
+ /*
+ * In case any other same SSID nodes are present remove it,
+ * since those nodes also not available now.
+ */
+ do {
+ /*
+ * Find the nodes based on SSID and remove it
+ *
+ * Note: This case will not work out for
+ * Hidden-SSID
+ */
+ wmi_ssid_node = ath6kl_wmi_find_ssid_node(ar->wmi,
+ ar->ssid,
+ ar->ssid_len,
+ false,
+ true);
+
+ if (wmi_ssid_node)
+ ath6kl_wmi_node_free(ar->wmi,
+ wmi_ssid_node->ni_macaddr);
+
+ } while (wmi_ssid_node);
+ }
+
+ /* update connect & link status atomically */
+ spin_lock_irqsave(&ar->lock, flags);
+ clear_bit(CONNECTED, &ar->flag);
+ netif_carrier_off(ar->net_dev);
+ spin_unlock_irqrestore(&ar->lock, flags);
+
+ if ((reason != CSERV_DISCONNECT) || (ar->reconnect_flag != 1))
+ ar->reconnect_flag = 0;
+
+ if (reason != CSERV_DISCONNECT)
+ ar->user_key_ctrl = 0;
+
+ netif_stop_queue(ar->net_dev);
+ memset(ar->bssid, 0, sizeof(ar->bssid));
+ ar->bss_ch = 0;
+
+ ath6kl_tx_data_cleanup(ar);
+}
+
+static int ath6kl_open(struct net_device *dev)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ar->lock, flags);
+
+ ar->wlan_state = WLAN_ENABLED;
+
+ if (test_bit(CONNECTED, &ar->flag)) {
+ netif_carrier_on(dev);
+ netif_wake_queue(dev);
+ } else
+ netif_carrier_off(dev);
+
+ spin_unlock_irqrestore(&ar->lock, flags);
+
+ return 0;
+}
+
+static int ath6kl_close(struct net_device *dev)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+
+ netif_stop_queue(dev);
+
+ ath6kl_disconnect(ar);
+
+ if (test_bit(WMI_READY, &ar->flag)) {
+ if (ath6kl_wmi_scanparams_cmd(ar->wmi, 0xFFFF, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0))
+ return -EIO;
+
+ ar->wlan_state = WLAN_DISABLED;
+ }
+
+ ath6kl_cfg80211_scan_complete_event(ar, -ECANCELED);
+
+ return 0;
+}
+
+static struct net_device_stats *ath6kl_get_stats(struct net_device *dev)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+
+ return &ar->net_stats;
+}
+
+static struct net_device_ops ath6kl_netdev_ops = {
+ .ndo_open = ath6kl_open,
+ .ndo_stop = ath6kl_close,
+ .ndo_start_xmit = ath6kl_data_tx,
+ .ndo_get_stats = ath6kl_get_stats,
+};
+
+void init_netdev(struct net_device *dev)
+{
+ dev->netdev_ops = &ath6kl_netdev_ops;
+ dev->watchdog_timeo = ATH6KL_TX_TIMEOUT;
+
+ dev->needed_headroom = ETH_HLEN;
+ dev->needed_headroom += sizeof(struct ath6kl_llc_snap_hdr) +
+ sizeof(struct wmi_data_hdr) + HTC_HDR_LENGTH
+ + WMI_MAX_TX_META_SZ;
+
+ return;
+}
diff --git a/drivers/net/wireless/ath/ath6kl/node.c b/drivers/net/wireless/ath/ath6kl/node.c
new file mode 100644
index 000000000000..b0f9ba2e463c
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/node.c
@@ -0,0 +1,238 @@
+/*
+ * Copyright (c) 2004-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "htc.h"
+#include "wmi.h"
+#include "debug.h"
+
+struct bss *wlan_node_alloc(int wh_size)
+{
+ struct bss *ni;
+
+ ni = kzalloc(sizeof(struct bss), GFP_ATOMIC);
+
+ if ((ni != NULL) && wh_size) {
+ ni->ni_buf = kmalloc(wh_size, GFP_ATOMIC);
+ if (ni->ni_buf == NULL) {
+ kfree(ni);
+ return NULL;
+ }
+ }
+
+ return ni;
+}
+
+void wlan_node_free(struct bss *ni)
+{
+ kfree(ni->ni_buf);
+ kfree(ni);
+}
+
+void wlan_setup_node(struct ath6kl_node_table *nt, struct bss *ni,
+ const u8 *mac_addr)
+{
+ int hash;
+
+ memcpy(ni->ni_macaddr, mac_addr, ETH_ALEN);
+ hash = ATH6KL_NODE_HASH(mac_addr);
+ ni->ni_refcnt = 1;
+
+ ni->ni_tstamp = jiffies_to_msecs(jiffies);
+ ni->ni_actcnt = WLAN_NODE_INACT_CNT;
+
+ spin_lock_bh(&nt->nt_nodelock);
+
+ /* insert at the end of the node list */
+ ni->ni_list_next = NULL;
+ ni->ni_list_prev = nt->nt_node_last;
+ if (nt->nt_node_last != NULL)
+ nt->nt_node_last->ni_list_next = ni;
+
+ nt->nt_node_last = ni;
+ if (nt->nt_node_first == NULL)
+ nt->nt_node_first = ni;
+
+ /* insert into the hash list */
+ ni->ni_hash_next = nt->nt_hash[hash];
+ if (ni->ni_hash_next != NULL)
+ nt->nt_hash[hash]->ni_hash_prev = ni;
+
+ ni->ni_hash_prev = NULL;
+ nt->nt_hash[hash] = ni;
+
+ spin_unlock_bh(&nt->nt_nodelock);
+}
+
+struct bss *wlan_find_node(struct ath6kl_node_table *nt,
+ const u8 *mac_addr)
+{
+ struct bss *ni, *found_ni = NULL;
+ int hash;
+
+ spin_lock_bh(&nt->nt_nodelock);
+
+ hash = ATH6KL_NODE_HASH(mac_addr);
+ for (ni = nt->nt_hash[hash]; ni; ni = ni->ni_hash_next) {
+ if (memcmp(ni->ni_macaddr, mac_addr, ETH_ALEN) == 0) {
+ ni->ni_refcnt++;
+ found_ni = ni;
+ break;
+ }
+ }
+
+ spin_unlock_bh(&nt->nt_nodelock);
+
+ return found_ni;
+}
+
+void wlan_node_reclaim(struct ath6kl_node_table *nt, struct bss *ni)
+{
+ int hash;
+
+ spin_lock_bh(&nt->nt_nodelock);
+
+ if (ni->ni_list_prev == NULL)
+ /* fix list head */
+ nt->nt_node_first = ni->ni_list_next;
+ else
+ ni->ni_list_prev->ni_list_next = ni->ni_list_next;
+
+ if (ni->ni_list_next == NULL)
+ /* fix list tail */
+ nt->nt_node_last = ni->ni_list_prev;
+ else
+ ni->ni_list_next->ni_list_prev = ni->ni_list_prev;
+
+ if (ni->ni_hash_prev == NULL) {
+ /* first in list so fix the list head */
+ hash = ATH6KL_NODE_HASH(ni->ni_macaddr);
+ nt->nt_hash[hash] = ni->ni_hash_next;
+ } else {
+ ni->ni_hash_prev->ni_hash_next = ni->ni_hash_next;
+ }
+
+ if (ni->ni_hash_next != NULL)
+ ni->ni_hash_next->ni_hash_prev = ni->ni_hash_prev;
+
+ wlan_node_free(ni);
+
+ spin_unlock_bh(&nt->nt_nodelock);
+}
+
+static void wlan_node_dec_free(struct bss *ni)
+{
+ if ((ni->ni_refcnt--) == 1)
+ wlan_node_free(ni);
+}
+
+void wlan_free_allnodes(struct ath6kl_node_table *nt)
+{
+ struct bss *ni;
+
+ while ((ni = nt->nt_node_first) != NULL)
+ wlan_node_reclaim(nt, ni);
+}
+
+void wlan_iterate_nodes(struct ath6kl_node_table *nt,
+ void (*f) (void *arg, struct bss *), void *arg)
+{
+ struct bss *ni;
+
+ spin_lock_bh(&nt->nt_nodelock);
+ for (ni = nt->nt_node_first; ni; ni = ni->ni_list_next) {
+ ni->ni_refcnt++;
+ (*f) (arg, ni);
+ wlan_node_dec_free(ni);
+ }
+ spin_unlock_bh(&nt->nt_nodelock);
+}
+
+void wlan_node_table_init(void *wmi, struct ath6kl_node_table *nt)
+{
+ ath6kl_dbg(ATH6KL_DBG_WLAN_NODE, "node table = 0x%lx\n",
+ (unsigned long)nt);
+
+ memset(nt, 0, sizeof(struct ath6kl_node_table));
+
+ spin_lock_init(&nt->nt_nodelock);
+
+ nt->nt_wmi = wmi;
+ nt->nt_node_age = WLAN_NODE_INACT_TIMEOUT_MSEC;
+}
+
+void wlan_refresh_inactive_nodes(struct ath6kl_node_table *nt)
+{
+ struct bss *bss;
+ u8 my_bssid[ETH_ALEN];
+ u32 now;
+
+ ath6kl_wmi_get_current_bssid(nt->nt_wmi, my_bssid);
+
+ now = jiffies_to_msecs(jiffies);
+ bss = nt->nt_node_first;
+ while (bss != NULL) {
+ /* refresh all nodes except the current bss */
+ if (memcmp(my_bssid, bss->ni_macaddr, sizeof(my_bssid)) != 0) {
+ if (((now - bss->ni_tstamp) > nt->nt_node_age)
+ || --bss->ni_actcnt == 0) {
+ wlan_node_reclaim(nt, bss);
+ }
+ }
+ bss = bss->ni_list_next;
+ }
+}
+
+void wlan_node_table_cleanup(struct ath6kl_node_table *nt)
+{
+ wlan_free_allnodes(nt);
+}
+
+struct bss *wlan_find_ssid_node(struct ath6kl_node_table *nt, u8 * ssid,
+ u32 ssid_len, bool is_wpa2, bool match_ssid)
+{
+ struct bss *ni, *found_ni = NULL;
+ u8 *ie_ssid;
+
+ spin_lock_bh(&nt->nt_nodelock);
+
+ for (ni = nt->nt_node_first; ni; ni = ni->ni_list_next) {
+
+ ie_ssid = ni->ni_cie.ie_ssid;
+
+ if ((ie_ssid[1] <= IEEE80211_MAX_SSID_LEN) &&
+ (memcmp(ssid, &ie_ssid[2], ssid_len) == 0)) {
+
+ if (match_ssid ||
+ (is_wpa2 && ni->ni_cie.ie_rsn != NULL) ||
+ (!is_wpa2 && ni->ni_cie.ie_wpa != NULL)) {
+ ni->ni_refcnt++;
+ found_ni = ni;
+ break;
+ }
+ }
+ }
+
+ spin_unlock_bh(&nt->nt_nodelock);
+
+ return found_ni;
+}
+
+void wlan_node_return(struct ath6kl_node_table *nt, struct bss *ni)
+{
+ spin_lock_bh(&nt->nt_nodelock);
+ wlan_node_dec_free(ni);
+ spin_unlock_bh(&nt->nt_nodelock);
+}
diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c
new file mode 100644
index 000000000000..b38732aaf41a
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/sdio.c
@@ -0,0 +1,853 @@
+/*
+ * Copyright (c) 2004-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/mmc/card.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/sd.h>
+#include "htc_hif.h"
+#include "hif-ops.h"
+#include "target.h"
+#include "debug.h"
+
+struct ath6kl_sdio {
+ struct sdio_func *func;
+
+ spinlock_t lock;
+
+ /* free list */
+ struct list_head bus_req_freeq;
+
+ /* available bus requests */
+ struct bus_request bus_req[BUS_REQUEST_MAX_NUM];
+
+ struct ath6kl *ar;
+ u8 *dma_buffer;
+
+ /* scatter request list head */
+ struct list_head scat_req;
+
+ spinlock_t scat_lock;
+ bool is_disabled;
+ atomic_t irq_handling;
+ const struct sdio_device_id *id;
+ struct work_struct wr_async_work;
+ struct list_head wr_asyncq;
+ spinlock_t wr_async_lock;
+};
+
+#define CMD53_ARG_READ 0
+#define CMD53_ARG_WRITE 1
+#define CMD53_ARG_BLOCK_BASIS 1
+#define CMD53_ARG_FIXED_ADDRESS 0
+#define CMD53_ARG_INCR_ADDRESS 1
+
+static inline struct ath6kl_sdio *ath6kl_sdio_priv(struct ath6kl *ar)
+{
+ return ar->hif_priv;
+}
+
+/*
+ * Macro to check if DMA buffer is WORD-aligned and DMA-able.
+ * Most host controllers assume the buffer is DMA'able and will
+ * bug-check otherwise (i.e. buffers on the stack). virt_addr_valid
+ * check fails on stack memory.
+ */
+static inline bool buf_needs_bounce(u8 *buf)
+{
+ return ((unsigned long) buf & 0x3) || !virt_addr_valid(buf);
+}
+
+static void ath6kl_sdio_set_mbox_info(struct ath6kl *ar)
+{
+ struct ath6kl_mbox_info *mbox_info = &ar->mbox_info;
+
+ /* EP1 has an extended range */
+ mbox_info->htc_addr = HIF_MBOX_BASE_ADDR;
+ mbox_info->htc_ext_addr = HIF_MBOX0_EXT_BASE_ADDR;
+ mbox_info->htc_ext_sz = HIF_MBOX0_EXT_WIDTH;
+ mbox_info->block_size = HIF_MBOX_BLOCK_SIZE;
+ mbox_info->gmbox_addr = HIF_GMBOX_BASE_ADDR;
+ mbox_info->gmbox_sz = HIF_GMBOX_WIDTH;
+}
+
+static inline void ath6kl_sdio_set_cmd53_arg(u32 *arg, u8 rw, u8 func,
+ u8 mode, u8 opcode, u32 addr,
+ u16 blksz)
+{
+ *arg = (((rw & 1) << 31) |
+ ((func & 0x7) << 28) |
+ ((mode & 1) << 27) |
+ ((opcode & 1) << 26) |
+ ((addr & 0x1FFFF) << 9) |
+ (blksz & 0x1FF));
+}
+
+static inline void ath6kl_sdio_set_cmd52_arg(u32 *arg, u8 write, u8 raw,
+ unsigned int address,
+ unsigned char val)
+{
+ const u8 func = 0;
+
+ *arg = ((write & 1) << 31) |
+ ((func & 0x7) << 28) |
+ ((raw & 1) << 27) |
+ (1 << 26) |
+ ((address & 0x1FFFF) << 9) |
+ (1 << 8) |
+ (val & 0xFF);
+}
+
+static int ath6kl_sdio_func0_cmd52_wr_byte(struct mmc_card *card,
+ unsigned int address,
+ unsigned char byte)
+{
+ struct mmc_command io_cmd;
+
+ memset(&io_cmd, 0, sizeof(io_cmd));
+ ath6kl_sdio_set_cmd52_arg(&io_cmd.arg, 1, 0, address, byte);
+ io_cmd.opcode = SD_IO_RW_DIRECT;
+ io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
+
+ return mmc_wait_for_cmd(card->host, &io_cmd, 0);
+}
+
+static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio)
+{
+ struct bus_request *bus_req;
+ unsigned long flag;
+
+ spin_lock_irqsave(&ar_sdio->lock, flag);
+
+ if (list_empty(&ar_sdio->bus_req_freeq)) {
+ spin_unlock_irqrestore(&ar_sdio->lock, flag);
+ return NULL;
+ }
+
+ bus_req = list_first_entry(&ar_sdio->bus_req_freeq,
+ struct bus_request, list);
+ list_del(&bus_req->list);
+
+ spin_unlock_irqrestore(&ar_sdio->lock, flag);
+ ath6kl_dbg(ATH6KL_DBG_TRC, "%s: bus request 0x%p\n", __func__, bus_req);
+
+ return bus_req;
+}
+
+static void ath6kl_sdio_free_bus_req(struct ath6kl_sdio *ar_sdio,
+ struct bus_request *bus_req)
+{
+ unsigned long flag;
+
+ ath6kl_dbg(ATH6KL_DBG_TRC, "%s: bus request 0x%p\n", __func__, bus_req);
+
+ spin_lock_irqsave(&ar_sdio->lock, flag);
+ list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq);
+ spin_unlock_irqrestore(&ar_sdio->lock, flag);
+}
+
+static void ath6kl_sdio_setup_scat_data(struct hif_scatter_req *scat_req,
+ struct hif_scatter_req_priv *s_req_priv,
+ struct mmc_data *data)
+{
+ struct scatterlist *sg;
+ int i;
+
+ data->blksz = HIF_MBOX_BLOCK_SIZE;
+ data->blocks = scat_req->len / HIF_MBOX_BLOCK_SIZE;
+
+ ath6kl_dbg(ATH6KL_DBG_SCATTER,
+ "hif-scatter: (%s) addr: 0x%X, (block len: %d, block count: %d) , (tot:%d,sg:%d)\n",
+ (scat_req->req & HIF_WRITE) ? "WR" : "RD", scat_req->addr,
+ data->blksz, data->blocks, scat_req->len,
+ scat_req->scat_entries);
+
+ data->flags = (scat_req->req & HIF_WRITE) ? MMC_DATA_WRITE :
+ MMC_DATA_READ;
+
+ /* fill SG entries */
+ sg = s_req_priv->sgentries;
+ sg_init_table(sg, scat_req->scat_entries);
+
+ /* assemble SG list */
+ for (i = 0; i < scat_req->scat_entries; i++, sg++) {
+ if ((unsigned long)scat_req->scat_list[i].buf & 0x3)
+ /*
+ * Some scatter engines can handle unaligned
+ * buffers, print this as informational only.
+ */
+ ath6kl_dbg(ATH6KL_DBG_SCATTER,
+ "(%s) scatter buffer is unaligned 0x%p\n",
+ scat_req->req & HIF_WRITE ? "WR" : "RD",
+ scat_req->scat_list[i].buf);
+
+ ath6kl_dbg(ATH6KL_DBG_SCATTER, "%d: addr:0x%p, len:%d\n",
+ i, scat_req->scat_list[i].buf,
+ scat_req->scat_list[i].len);
+
+ sg_set_buf(sg, scat_req->scat_list[i].buf,
+ scat_req->scat_list[i].len);
+ }
+
+ /* set scatter-gather table for request */
+ data->sg = s_req_priv->sgentries;
+ data->sg_len = scat_req->scat_entries;
+}
+
+static int ath6kl_sdio_scat_rw(struct ath6kl_sdio *ar_sdio,
+ struct bus_request *req)
+{
+ struct mmc_request mmc_req;
+ struct mmc_command cmd;
+ struct mmc_data data;
+ struct hif_scatter_req *scat_req;
+ u8 opcode, rw;
+ int status;
+
+ scat_req = req->scat_req;
+
+ memset(&mmc_req, 0, sizeof(struct mmc_request));
+ memset(&cmd, 0, sizeof(struct mmc_command));
+ memset(&data, 0, sizeof(struct mmc_data));
+
+ ath6kl_sdio_setup_scat_data(scat_req, scat_req->req_priv, &data);
+
+ opcode = (scat_req->req & HIF_FIXED_ADDRESS) ?
+ CMD53_ARG_FIXED_ADDRESS : CMD53_ARG_INCR_ADDRESS;
+
+ rw = (scat_req->req & HIF_WRITE) ? CMD53_ARG_WRITE : CMD53_ARG_READ;
+
+ /* Fixup the address so that the last byte will fall on MBOX EOM */
+ if (scat_req->req & HIF_WRITE) {
+ if (scat_req->addr == HIF_MBOX_BASE_ADDR)
+ scat_req->addr += HIF_MBOX_WIDTH - scat_req->len;
+ else
+ /* Uses extended address range */
+ scat_req->addr += HIF_MBOX0_EXT_WIDTH - scat_req->len;
+ }
+
+ /* set command argument */
+ ath6kl_sdio_set_cmd53_arg(&cmd.arg, rw, ar_sdio->func->num,
+ CMD53_ARG_BLOCK_BASIS, opcode, scat_req->addr,
+ data.blocks);
+
+ cmd.opcode = SD_IO_RW_EXTENDED;
+ cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
+
+ mmc_req.cmd = &cmd;
+ mmc_req.data = &data;
+
+ mmc_set_data_timeout(&data, ar_sdio->func->card);
+ /* synchronous call to process request */
+ mmc_wait_for_req(ar_sdio->func->card->host, &mmc_req);
+
+ status = cmd.error ? cmd.error : data.error;
+ scat_req->status = status;
+
+ if (scat_req->status)
+ ath6kl_err("Scatter write request failed:%d\n",
+ scat_req->status);
+
+ if (scat_req->req & HIF_ASYNCHRONOUS)
+ scat_req->complete(scat_req);
+
+ return status;
+}
+
+
+/* callback to issue a read-write scatter request */
+static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar,
+ struct hif_scatter_req *scat_req)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ struct hif_scatter_req_priv *req_priv = scat_req->req_priv;
+ u32 request = scat_req->req;
+ int status = 0;
+ unsigned long flags;
+
+ if (!scat_req->len)
+ return -EINVAL;
+
+ ath6kl_dbg(ATH6KL_DBG_SCATTER,
+ "hif-scatter: total len: %d scatter entries: %d\n",
+ scat_req->len, scat_req->scat_entries);
+
+ if (request & HIF_SYNCHRONOUS) {
+ sdio_claim_host(ar_sdio->func);
+ status = ath6kl_sdio_scat_rw(ar_sdio, req_priv->busrequest);
+ sdio_release_host(ar_sdio->func);
+ } else {
+ spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
+ list_add_tail(&req_priv->busrequest->list, &ar_sdio->wr_asyncq);
+ spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
+ queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
+ }
+
+ return status;
+}
+
+/* clean up scatter support */
+static void ath6kl_sdio_cleanup_scat_resource(struct ath6kl_sdio *ar_sdio)
+{
+ struct hif_scatter_req *s_req, *tmp_req;
+ unsigned long flag;
+
+ /* empty the free list */
+ spin_lock_irqsave(&ar_sdio->scat_lock, flag);
+ list_for_each_entry_safe(s_req, tmp_req, &ar_sdio->scat_req, list) {
+ list_del(&s_req->list);
+ spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
+
+ if (s_req->req_priv && s_req->req_priv->busrequest)
+ ath6kl_sdio_free_bus_req(ar_sdio,
+ s_req->req_priv->busrequest);
+ kfree(s_req->virt_dma_buf);
+ kfree(s_req->req_priv);
+ kfree(s_req);
+
+ spin_lock_irqsave(&ar_sdio->scat_lock, flag);
+ }
+ spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
+}
+
+/* setup of HIF scatter resources */
+static int ath6kl_sdio_setup_scat_resource(struct ath6kl_sdio *ar_sdio,
+ struct hif_dev_scat_sup_info *pinfo)
+{
+ struct hif_scatter_req *s_req;
+ struct bus_request *bus_req;
+ int i, scat_req_sz, scat_list_sz;
+
+ /* check if host supports scatter and it meets our requirements */
+ if (ar_sdio->func->card->host->max_segs < MAX_SCATTER_ENTRIES_PER_REQ) {
+ ath6kl_err("hif-scatter: host only supports scatter of : %d entries, need: %d\n",
+ ar_sdio->func->card->host->max_segs,
+ MAX_SCATTER_ENTRIES_PER_REQ);
+ return -EINVAL;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "hif-scatter enabled: max scatter req : %d entries: %d\n",
+ MAX_SCATTER_REQUESTS, MAX_SCATTER_ENTRIES_PER_REQ);
+
+ scat_list_sz = (MAX_SCATTER_ENTRIES_PER_REQ - 1) *
+ sizeof(struct hif_scatter_item);
+ scat_req_sz = sizeof(*s_req) + scat_list_sz;
+
+ for (i = 0; i < MAX_SCATTER_REQUESTS; i++) {
+ /* allocate the scatter request */
+ s_req = kzalloc(scat_req_sz, GFP_KERNEL);
+ if (!s_req)
+ goto fail_setup_scat;
+
+ /* allocate the private request blob */
+ s_req->req_priv = kzalloc(sizeof(*s_req->req_priv), GFP_KERNEL);
+
+ if (!s_req->req_priv) {
+ kfree(s_req);
+ goto fail_setup_scat;
+ }
+
+ /* allocate a bus request for this scatter request */
+ bus_req = ath6kl_sdio_alloc_busreq(ar_sdio);
+ if (!bus_req) {
+ kfree(s_req->req_priv);
+ kfree(s_req);
+ goto fail_setup_scat;
+ }
+
+ /* assign the scatter request to this bus request */
+ bus_req->scat_req = s_req;
+ s_req->req_priv->busrequest = bus_req;
+ /* add it to the scatter pool */
+ hif_scatter_req_add(ar_sdio->ar, s_req);
+ }
+
+ /* set scatter function pointers */
+ pinfo->rw_scat_func = ath6kl_sdio_async_rw_scatter;
+ pinfo->max_scat_entries = MAX_SCATTER_ENTRIES_PER_REQ;
+ pinfo->max_xfer_szper_scatreq = MAX_SCATTER_REQ_TRANSFER_SIZE;
+
+ return 0;
+
+fail_setup_scat:
+ ath6kl_err("hif-scatter: failed to alloc scatter resources !\n");
+ ath6kl_sdio_cleanup_scat_resource(ar_sdio);
+
+ return -ENOMEM;
+}
+
+static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
+ u32 len, u32 request)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ u8 *tbuf = NULL;
+ int ret;
+ bool bounced = false;
+
+ if (request & HIF_BLOCK_BASIS)
+ len = round_down(len, HIF_MBOX_BLOCK_SIZE);
+
+ if (buf_needs_bounce(buf)) {
+ if (!ar_sdio->dma_buffer)
+ return -ENOMEM;
+ tbuf = ar_sdio->dma_buffer;
+ memcpy(tbuf, buf, len);
+ bounced = true;
+ } else
+ tbuf = buf;
+
+ sdio_claim_host(ar_sdio->func);
+ if (request & HIF_WRITE) {
+ if (addr >= HIF_MBOX_BASE_ADDR &&
+ addr <= HIF_MBOX_END_ADDR)
+ addr += (HIF_MBOX_WIDTH - len);
+
+ if (addr == HIF_MBOX0_EXT_BASE_ADDR)
+ addr += HIF_MBOX0_EXT_WIDTH - len;
+
+ if (request & HIF_FIXED_ADDRESS)
+ ret = sdio_writesb(ar_sdio->func, addr, tbuf, len);
+ else
+ ret = sdio_memcpy_toio(ar_sdio->func, addr, tbuf, len);
+ } else {
+ if (request & HIF_FIXED_ADDRESS)
+ ret = sdio_readsb(ar_sdio->func, tbuf, addr, len);
+ else
+ ret = sdio_memcpy_fromio(ar_sdio->func, tbuf,
+ addr, len);
+ if (bounced)
+ memcpy(buf, tbuf, len);
+ }
+ sdio_release_host(ar_sdio->func);
+
+ return ret;
+}
+
+static void __ath6kl_sdio_write_async(struct ath6kl_sdio *ar_sdio,
+ struct bus_request *req)
+{
+ if (req->scat_req)
+ ath6kl_sdio_scat_rw(ar_sdio, req);
+ else {
+ void *context;
+ int status;
+
+ status = ath6kl_sdio_read_write_sync(ar_sdio->ar, req->address,
+ req->buffer, req->length,
+ req->request);
+ context = req->packet;
+ ath6kl_sdio_free_bus_req(ar_sdio, req);
+ ath6kldev_rw_comp_handler(context, status);
+ }
+}
+
+static void ath6kl_sdio_write_async_work(struct work_struct *work)
+{
+ struct ath6kl_sdio *ar_sdio;
+ unsigned long flags;
+ struct bus_request *req, *tmp_req;
+
+ ar_sdio = container_of(work, struct ath6kl_sdio, wr_async_work);
+ sdio_claim_host(ar_sdio->func);
+
+ spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
+ list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
+ list_del(&req->list);
+ spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
+ __ath6kl_sdio_write_async(ar_sdio, req);
+ spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
+ }
+ spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
+
+ sdio_release_host(ar_sdio->func);
+}
+
+static void ath6kl_sdio_irq_handler(struct sdio_func *func)
+{
+ int status;
+ struct ath6kl_sdio *ar_sdio;
+
+ ar_sdio = sdio_get_drvdata(func);
+ atomic_set(&ar_sdio->irq_handling, 1);
+
+ /*
+ * Release the host during interrups so we can pick it back up when
+ * we process commands.
+ */
+ sdio_release_host(ar_sdio->func);
+
+ status = ath6kldev_intr_bh_handler(ar_sdio->ar);
+ sdio_claim_host(ar_sdio->func);
+ atomic_set(&ar_sdio->irq_handling, 0);
+ WARN_ON(status && status != -ECANCELED);
+}
+
+static int ath6kl_sdio_power_on(struct ath6kl_sdio *ar_sdio)
+{
+ struct sdio_func *func = ar_sdio->func;
+ int ret = 0;
+
+ if (!ar_sdio->is_disabled)
+ return 0;
+
+ sdio_claim_host(func);
+
+ ret = sdio_enable_func(func);
+ if (ret) {
+ ath6kl_err("Unable to enable sdio func: %d)\n", ret);
+ sdio_release_host(func);
+ return ret;
+ }
+
+ sdio_release_host(func);
+
+ /*
+ * Wait for hardware to initialise. It should take a lot less than
+ * 10 ms but let's be conservative here.
+ */
+ msleep(10);
+
+ ar_sdio->is_disabled = false;
+
+ return ret;
+}
+
+static int ath6kl_sdio_power_off(struct ath6kl_sdio *ar_sdio)
+{
+ int ret;
+
+ if (ar_sdio->is_disabled)
+ return 0;
+
+ /* Disable the card */
+ sdio_claim_host(ar_sdio->func);
+ ret = sdio_disable_func(ar_sdio->func);
+ sdio_release_host(ar_sdio->func);
+
+ if (ret)
+ return ret;
+
+ ar_sdio->is_disabled = true;
+
+ return ret;
+}
+
+static int ath6kl_sdio_write_async(struct ath6kl *ar, u32 address, u8 *buffer,
+ u32 length, u32 request,
+ struct htc_packet *packet)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ struct bus_request *bus_req;
+ unsigned long flags;
+
+ bus_req = ath6kl_sdio_alloc_busreq(ar_sdio);
+
+ if (!bus_req)
+ return -ENOMEM;
+
+ bus_req->address = address;
+ bus_req->buffer = buffer;
+ bus_req->length = length;
+ bus_req->request = request;
+ bus_req->packet = packet;
+
+ spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
+ list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq);
+ spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
+ queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
+
+ return 0;
+}
+
+static void ath6kl_sdio_irq_enable(struct ath6kl *ar)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ int ret;
+
+ sdio_claim_host(ar_sdio->func);
+
+ /* Register the isr */
+ ret = sdio_claim_irq(ar_sdio->func, ath6kl_sdio_irq_handler);
+ if (ret)
+ ath6kl_err("Failed to claim sdio irq: %d\n", ret);
+
+ sdio_release_host(ar_sdio->func);
+}
+
+static void ath6kl_sdio_irq_disable(struct ath6kl *ar)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ int ret;
+
+ sdio_claim_host(ar_sdio->func);
+
+ /* Mask our function IRQ */
+ while (atomic_read(&ar_sdio->irq_handling)) {
+ sdio_release_host(ar_sdio->func);
+ schedule_timeout(HZ / 10);
+ sdio_claim_host(ar_sdio->func);
+ }
+
+ ret = sdio_release_irq(ar_sdio->func);
+ if (ret)
+ ath6kl_err("Failed to release sdio irq: %d\n", ret);
+
+ sdio_release_host(ar_sdio->func);
+}
+
+static struct hif_scatter_req *ath6kl_sdio_scatter_req_get(struct ath6kl *ar)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ struct hif_scatter_req *node = NULL;
+ unsigned long flag;
+
+ spin_lock_irqsave(&ar_sdio->scat_lock, flag);
+
+ if (!list_empty(&ar_sdio->scat_req)) {
+ node = list_first_entry(&ar_sdio->scat_req,
+ struct hif_scatter_req, list);
+ list_del(&node->list);
+ }
+
+ spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
+
+ return node;
+}
+
+static void ath6kl_sdio_scatter_req_add(struct ath6kl *ar,
+ struct hif_scatter_req *s_req)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ unsigned long flag;
+
+ spin_lock_irqsave(&ar_sdio->scat_lock, flag);
+
+ list_add_tail(&s_req->list, &ar_sdio->scat_req);
+
+ spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
+
+}
+
+static int ath6kl_sdio_enable_scatter(struct ath6kl *ar,
+ struct hif_dev_scat_sup_info *info)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ int ret;
+
+ ret = ath6kl_sdio_setup_scat_resource(ar_sdio, info);
+
+ return ret;
+}
+
+static void ath6kl_sdio_cleanup_scatter(struct ath6kl *ar)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+
+ ath6kl_sdio_cleanup_scat_resource(ar_sdio);
+}
+
+static const struct ath6kl_hif_ops ath6kl_sdio_ops = {
+ .read_write_sync = ath6kl_sdio_read_write_sync,
+ .write_async = ath6kl_sdio_write_async,
+ .irq_enable = ath6kl_sdio_irq_enable,
+ .irq_disable = ath6kl_sdio_irq_disable,
+ .scatter_req_get = ath6kl_sdio_scatter_req_get,
+ .scatter_req_add = ath6kl_sdio_scatter_req_add,
+ .enable_scatter = ath6kl_sdio_enable_scatter,
+ .cleanup_scatter = ath6kl_sdio_cleanup_scatter,
+};
+
+static int ath6kl_sdio_probe(struct sdio_func *func,
+ const struct sdio_device_id *id)
+{
+ int ret;
+ struct ath6kl_sdio *ar_sdio;
+ struct ath6kl *ar;
+ int count;
+
+ ath6kl_dbg(ATH6KL_DBG_TRC,
+ "%s: func: 0x%X, vendor id: 0x%X, dev id: 0x%X, block size: 0x%X/0x%X\n",
+ __func__, func->num, func->vendor,
+ func->device, func->max_blksize, func->cur_blksize);
+
+ ar_sdio = kzalloc(sizeof(struct ath6kl_sdio), GFP_KERNEL);
+ if (!ar_sdio)
+ return -ENOMEM;
+
+ ar_sdio->dma_buffer = kzalloc(HIF_DMA_BUFFER_SIZE, GFP_KERNEL);
+ if (!ar_sdio->dma_buffer) {
+ ret = -ENOMEM;
+ goto err_hif;
+ }
+
+ ar_sdio->func = func;
+ sdio_set_drvdata(func, ar_sdio);
+
+ ar_sdio->id = id;
+ ar_sdio->is_disabled = true;
+
+ spin_lock_init(&ar_sdio->lock);
+ spin_lock_init(&ar_sdio->scat_lock);
+ spin_lock_init(&ar_sdio->wr_async_lock);
+
+ INIT_LIST_HEAD(&ar_sdio->scat_req);
+ INIT_LIST_HEAD(&ar_sdio->bus_req_freeq);
+ INIT_LIST_HEAD(&ar_sdio->wr_asyncq);
+
+ INIT_WORK(&ar_sdio->wr_async_work, ath6kl_sdio_write_async_work);
+
+ for (count = 0; count < BUS_REQUEST_MAX_NUM; count++)
+ ath6kl_sdio_free_bus_req(ar_sdio, &ar_sdio->bus_req[count]);
+
+ ar = ath6kl_core_alloc(&ar_sdio->func->dev);
+ if (!ar) {
+ ath6kl_err("Failed to alloc ath6kl core\n");
+ ret = -ENOMEM;
+ goto err_dma;
+ }
+
+ ar_sdio->ar = ar;
+ ar->hif_priv = ar_sdio;
+ ar->hif_ops = &ath6kl_sdio_ops;
+
+ ath6kl_sdio_set_mbox_info(ar);
+
+ sdio_claim_host(func);
+
+ if ((ar_sdio->id->device & MANUFACTURER_ID_ATH6KL_BASE_MASK) >=
+ MANUFACTURER_ID_AR6003_BASE) {
+ /* enable 4-bit ASYNC interrupt on AR6003 or later */
+ ret = ath6kl_sdio_func0_cmd52_wr_byte(func->card,
+ CCCR_SDIO_IRQ_MODE_REG,
+ SDIO_IRQ_MODE_ASYNC_4BIT_IRQ);
+ if (ret) {
+ ath6kl_err("Failed to enable 4-bit async irq mode %d\n",
+ ret);
+ sdio_release_host(func);
+ goto err_dma;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_TRC, "4-bit async irq mode enabled\n");
+ }
+
+ /* give us some time to enable, in ms */
+ func->enable_timeout = 100;
+
+ sdio_release_host(func);
+
+ ret = ath6kl_sdio_power_on(ar_sdio);
+ if (ret)
+ goto err_dma;
+
+ sdio_claim_host(func);
+
+ ret = sdio_set_block_size(func, HIF_MBOX_BLOCK_SIZE);
+ if (ret) {
+ ath6kl_err("Set sdio block size %d failed: %d)\n",
+ HIF_MBOX_BLOCK_SIZE, ret);
+ sdio_release_host(func);
+ goto err_off;
+ }
+
+ sdio_release_host(func);
+
+ ret = ath6kl_core_init(ar);
+ if (ret) {
+ ath6kl_err("Failed to init ath6kl core\n");
+ goto err_off;
+ }
+
+ return ret;
+
+err_off:
+ ath6kl_sdio_power_off(ar_sdio);
+err_dma:
+ kfree(ar_sdio->dma_buffer);
+err_hif:
+ kfree(ar_sdio);
+
+ return ret;
+}
+
+static void ath6kl_sdio_remove(struct sdio_func *func)
+{
+ struct ath6kl_sdio *ar_sdio;
+
+ ar_sdio = sdio_get_drvdata(func);
+
+ ath6kl_stop_txrx(ar_sdio->ar);
+ cancel_work_sync(&ar_sdio->wr_async_work);
+
+ ath6kl_unavail_ev(ar_sdio->ar);
+
+ ath6kl_sdio_power_off(ar_sdio);
+
+ kfree(ar_sdio->dma_buffer);
+ kfree(ar_sdio);
+}
+
+static const struct sdio_device_id ath6kl_sdio_devices[] = {
+ {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x0))},
+ {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x1))},
+ {},
+};
+
+MODULE_DEVICE_TABLE(sdio, ath6kl_sdio_devices);
+
+static struct sdio_driver ath6kl_sdio_driver = {
+ .name = "ath6kl_sdio",
+ .id_table = ath6kl_sdio_devices,
+ .probe = ath6kl_sdio_probe,
+ .remove = ath6kl_sdio_remove,
+};
+
+static int __init ath6kl_sdio_init(void)
+{
+ int ret;
+
+ ret = sdio_register_driver(&ath6kl_sdio_driver);
+ if (ret)
+ ath6kl_err("sdio driver registration failed: %d\n", ret);
+
+ return ret;
+}
+
+static void __exit ath6kl_sdio_exit(void)
+{
+ sdio_unregister_driver(&ath6kl_sdio_driver);
+}
+
+module_init(ath6kl_sdio_init);
+module_exit(ath6kl_sdio_exit);
+
+MODULE_AUTHOR("Atheros Communications, Inc.");
+MODULE_DESCRIPTION("Driver support for Atheros AR600x SDIO devices");
+MODULE_LICENSE("Dual BSD/GPL");
+
+MODULE_FIRMWARE(AR6003_REV2_OTP_FILE);
+MODULE_FIRMWARE(AR6003_REV2_FIRMWARE_FILE);
+MODULE_FIRMWARE(AR6003_REV2_PATCH_FILE);
+MODULE_FIRMWARE(AR6003_REV2_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6003_REV2_DEFAULT_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6003_REV3_OTP_FILE);
+MODULE_FIRMWARE(AR6003_REV3_FIRMWARE_FILE);
+MODULE_FIRMWARE(AR6003_REV3_PATCH_FILE);
+MODULE_FIRMWARE(AR6003_REV3_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6003_REV3_DEFAULT_BOARD_DATA_FILE);
diff --git a/drivers/net/wireless/ath/ath6kl/target.h b/drivers/net/wireless/ath/ath6kl/target.h
new file mode 100644
index 000000000000..519a013c9991
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/target.h
@@ -0,0 +1,331 @@
+/*
+ * Copyright (c) 2004-2010 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef TARGET_H
+#define TARGET_H
+
+#define AR6003_BOARD_DATA_SZ 1024
+#define AR6003_BOARD_EXT_DATA_SZ 768
+
+#define RESET_CONTROL_ADDRESS 0x00000000
+#define RESET_CONTROL_COLD_RST 0x00000100
+#define RESET_CONTROL_MBOX_RST 0x00000004
+
+#define CPU_CLOCK_STANDARD_S 0
+#define CPU_CLOCK_STANDARD 0x00000003
+#define CPU_CLOCK_ADDRESS 0x00000020
+
+#define CLOCK_CONTROL_ADDRESS 0x00000028
+#define CLOCK_CONTROL_LF_CLK32_S 2
+#define CLOCK_CONTROL_LF_CLK32 0x00000004
+
+#define SYSTEM_SLEEP_ADDRESS 0x000000c4
+#define SYSTEM_SLEEP_DISABLE_S 0
+#define SYSTEM_SLEEP_DISABLE 0x00000001
+
+#define LPO_CAL_ADDRESS 0x000000e0
+#define LPO_CAL_ENABLE_S 20
+#define LPO_CAL_ENABLE 0x00100000
+
+#define GPIO_PIN10_ADDRESS 0x00000050
+#define GPIO_PIN11_ADDRESS 0x00000054
+#define GPIO_PIN12_ADDRESS 0x00000058
+#define GPIO_PIN13_ADDRESS 0x0000005c
+
+#define HOST_INT_STATUS_ADDRESS 0x00000400
+#define HOST_INT_STATUS_ERROR_S 7
+#define HOST_INT_STATUS_ERROR 0x00000080
+
+#define HOST_INT_STATUS_CPU_S 6
+#define HOST_INT_STATUS_CPU 0x00000040
+
+#define HOST_INT_STATUS_COUNTER_S 4
+#define HOST_INT_STATUS_COUNTER 0x00000010
+
+#define CPU_INT_STATUS_ADDRESS 0x00000401
+
+#define ERROR_INT_STATUS_ADDRESS 0x00000402
+#define ERROR_INT_STATUS_WAKEUP_S 2
+#define ERROR_INT_STATUS_WAKEUP 0x00000004
+
+#define ERROR_INT_STATUS_RX_UNDERFLOW_S 1
+#define ERROR_INT_STATUS_RX_UNDERFLOW 0x00000002
+
+#define ERROR_INT_STATUS_TX_OVERFLOW_S 0
+#define ERROR_INT_STATUS_TX_OVERFLOW 0x00000001
+
+#define COUNTER_INT_STATUS_ADDRESS 0x00000403
+#define COUNTER_INT_STATUS_COUNTER_S 0
+#define COUNTER_INT_STATUS_COUNTER 0x000000ff
+
+#define RX_LOOKAHEAD_VALID_ADDRESS 0x00000405
+
+#define INT_STATUS_ENABLE_ADDRESS 0x00000418
+#define INT_STATUS_ENABLE_ERROR_S 7
+#define INT_STATUS_ENABLE_ERROR 0x00000080
+
+#define INT_STATUS_ENABLE_CPU_S 6
+#define INT_STATUS_ENABLE_CPU 0x00000040
+
+#define INT_STATUS_ENABLE_INT_S 5
+#define INT_STATUS_ENABLE_INT 0x00000020
+#define INT_STATUS_ENABLE_COUNTER_S 4
+#define INT_STATUS_ENABLE_COUNTER 0x00000010
+
+#define INT_STATUS_ENABLE_MBOX_DATA_S 0
+#define INT_STATUS_ENABLE_MBOX_DATA 0x0000000f
+
+#define CPU_INT_STATUS_ENABLE_ADDRESS 0x00000419
+#define CPU_INT_STATUS_ENABLE_BIT_S 0
+#define CPU_INT_STATUS_ENABLE_BIT 0x000000ff
+
+#define ERROR_STATUS_ENABLE_ADDRESS 0x0000041a
+#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_S 1
+#define ERROR_STATUS_ENABLE_RX_UNDERFLOW 0x00000002
+
+#define ERROR_STATUS_ENABLE_TX_OVERFLOW_S 0
+#define ERROR_STATUS_ENABLE_TX_OVERFLOW 0x00000001
+
+#define COUNTER_INT_STATUS_ENABLE_ADDRESS 0x0000041b
+#define COUNTER_INT_STATUS_ENABLE_BIT_S 0
+#define COUNTER_INT_STATUS_ENABLE_BIT 0x000000ff
+
+#define COUNT_ADDRESS 0x00000420
+
+#define COUNT_DEC_ADDRESS 0x00000440
+
+#define WINDOW_DATA_ADDRESS 0x00000474
+#define WINDOW_WRITE_ADDR_ADDRESS 0x00000478
+#define WINDOW_READ_ADDR_ADDRESS 0x0000047c
+#define CPU_DBG_SEL_ADDRESS 0x00000483
+#define CPU_DBG_ADDRESS 0x00000484
+
+#define LOCAL_SCRATCH_ADDRESS 0x000000c0
+#define ATH6KL_OPTION_SLEEP_DISABLE 0x08
+
+#define RTC_BASE_ADDRESS 0x00004000
+#define GPIO_BASE_ADDRESS 0x00014000
+#define MBOX_BASE_ADDRESS 0x00018000
+#define ANALOG_INTF_BASE_ADDRESS 0x0001c000
+
+/* real name of the register is unknown */
+#define ATH6KL_ANALOG_PLL_REGISTER (ANALOG_INTF_BASE_ADDRESS + 0x284)
+
+#define SM(f, v) (((v) << f##_S) & f)
+#define MS(f, v) (((v) & f) >> f##_S)
+
+/*
+ * xxx_HOST_INTEREST_ADDRESS is the address in Target RAM of the
+ * host_interest structure.
+ *
+ * Host Interest is shared between Host and Target in order to coordinate
+ * between the two, and is intended to remain constant (with additions only
+ * at the end).
+ */
+#define ATH6KL_HI_START_ADDR 0x00540600
+
+/*
+ * These are items that the Host may need to access
+ * via BMI or via the Diagnostic Window. The position
+ * of items in this structure must remain constant.
+ * across firmware revisions!
+ *
+ * Types for each item must be fixed size across target and host platforms.
+ * The structure is used only to calculate offset for each register with
+ * HI_ITEM() macro, no values are stored to it.
+ *
+ * More items may be added at the end.
+ */
+struct host_interest {
+ /*
+ * Pointer to application-defined area, if any.
+ * Set by Target application during startup.
+ */
+ u32 hi_app_host_interest; /* 0x00 */
+
+ /* Pointer to register dump area, valid after Target crash. */
+ u32 hi_failure_state; /* 0x04 */
+
+ /* Pointer to debug logging header */
+ u32 hi_dbglog_hdr; /* 0x08 */
+
+ u32 hi_unused1; /* 0x0c */
+
+ /*
+ * General-purpose flag bits, similar to ATH6KL_OPTION_* flags.
+ * Can be used by application rather than by OS.
+ */
+ u32 hi_option_flag; /* 0x10 */
+
+ /*
+ * Boolean that determines whether or not to
+ * display messages on the serial port.
+ */
+ u32 hi_serial_enable; /* 0x14 */
+
+ /* Start address of DataSet index, if any */
+ u32 hi_dset_list_head; /* 0x18 */
+
+ /* Override Target application start address */
+ u32 hi_app_start; /* 0x1c */
+
+ /* Clock and voltage tuning */
+ u32 hi_skip_clock_init; /* 0x20 */
+ u32 hi_core_clock_setting; /* 0x24 */
+ u32 hi_cpu_clock_setting; /* 0x28 */
+ u32 hi_system_sleep_setting; /* 0x2c */
+ u32 hi_xtal_control_setting; /* 0x30 */
+ u32 hi_pll_ctrl_setting_24ghz; /* 0x34 */
+ u32 hi_pll_ctrl_setting_5ghz; /* 0x38 */
+ u32 hi_ref_voltage_trim_setting; /* 0x3c */
+ u32 hi_clock_info; /* 0x40 */
+
+ /*
+ * Flash configuration overrides, used only
+ * when firmware is not executing from flash.
+ * (When using flash, modify the global variables
+ * with equivalent names.)
+ */
+ u32 hi_bank0_addr_value; /* 0x44 */
+ u32 hi_bank0_read_value; /* 0x48 */
+ u32 hi_bank0_write_value; /* 0x4c */
+ u32 hi_bank0_config_value; /* 0x50 */
+
+ /* Pointer to Board Data */
+ u32 hi_board_data; /* 0x54 */
+ u32 hi_board_data_initialized; /* 0x58 */
+
+ u32 hi_dset_ram_index_tbl; /* 0x5c */
+
+ u32 hi_desired_baud_rate; /* 0x60 */
+ u32 hi_dbglog_config; /* 0x64 */
+ u32 hi_end_ram_reserve_sz; /* 0x68 */
+ u32 hi_mbox_io_block_sz; /* 0x6c */
+
+ u32 hi_num_bpatch_streams; /* 0x70 -- unused */
+ u32 hi_mbox_isr_yield_limit; /* 0x74 */
+
+ u32 hi_refclk_hz; /* 0x78 */
+ u32 hi_ext_clk_detected; /* 0x7c */
+ u32 hi_dbg_uart_txpin; /* 0x80 */
+ u32 hi_dbg_uart_rxpin; /* 0x84 */
+ u32 hi_hci_uart_baud; /* 0x88 */
+ u32 hi_hci_uart_pin_assignments; /* 0x8C */
+ /*
+ * NOTE: byte [0] = tx pin, [1] = rx pin, [2] = rts pin, [3] = cts
+ * pin
+ */
+ u32 hi_hci_uart_baud_scale_val; /* 0x90 */
+ u32 hi_hci_uart_baud_step_val; /* 0x94 */
+
+ u32 hi_allocram_start; /* 0x98 */
+ u32 hi_allocram_sz; /* 0x9c */
+ u32 hi_hci_bridge_flags; /* 0xa0 */
+ u32 hi_hci_uart_support_pins; /* 0xa4 */
+ /*
+ * NOTE: byte [0] = RESET pin (bit 7 is polarity),
+ * bytes[1]..bytes[3] are for future use
+ */
+ u32 hi_hci_uart_pwr_mgmt_params; /* 0xa8 */
+ /*
+ * 0xa8 - [1]: 0 = UART FC active low, 1 = UART FC active high
+ * [31:16]: wakeup timeout in ms
+ */
+
+ /* Pointer to extended board data */
+ u32 hi_board_ext_data; /* 0xac */
+ u32 hi_board_ext_data_config; /* 0xb0 */
+
+ /*
+ * Bit [0] : valid
+ * Bit[31:16: size
+ */
+ /*
+ * hi_reset_flag is used to do some stuff when target reset.
+ * such as restore app_start after warm reset or
+ * preserve host Interest area, or preserve ROM data, literals etc.
+ */
+ u32 hi_reset_flag; /* 0xb4 */
+ /* indicate hi_reset_flag is valid */
+ u32 hi_reset_flag_valid; /* 0xb8 */
+ u32 hi_hci_uart_pwr_mgmt_params_ext; /* 0xbc */
+ /*
+ * 0xbc - [31:0]: idle timeout in ms
+ */
+ /* ACS flags */
+ u32 hi_acs_flags; /* 0xc0 */
+ u32 hi_console_flags; /* 0xc4 */
+ u32 hi_nvram_state; /* 0xc8 */
+ u32 hi_option_flag2; /* 0xcc */
+
+ /* If non-zero, override values sent to Host in WMI_READY event. */
+ u32 hi_sw_version_override; /* 0xd0 */
+ u32 hi_abi_version_override; /* 0xd4 */
+
+ /*
+ * Percentage of high priority RX traffic to total expected RX traffic -
+ * applicable only to ar6004
+ */
+ u32 hi_hp_rx_traffic_ratio; /* 0xd8 */
+
+ /* test applications flags */
+ u32 hi_test_apps_related ; /* 0xdc */
+ /* location of test script */
+ u32 hi_ota_testscript; /* 0xe0 */
+ /* location of CAL data */
+ u32 hi_cal_data; /* 0xe4 */
+ /* Number of packet log buffers */
+ u32 hi_pktlog_num_buffers; /* 0xe8 */
+
+} __packed;
+
+#define HI_ITEM(item) offsetof(struct host_interest, item)
+
+#define HI_OPTION_MAC_ADDR_METHOD_SHIFT 3
+
+#define HI_OPTION_FW_MODE_IBSS 0x0
+#define HI_OPTION_FW_MODE_BSS_STA 0x1
+#define HI_OPTION_FW_MODE_AP 0x2
+
+#define HI_OPTION_NUM_DEV_SHIFT 0x9
+
+#define HI_OPTION_FW_BRIDGE_SHIFT 0x04
+
+/* Fw Mode/SubMode Mask
+|------------------------------------------------------------------------------|
+| SUB | SUB | SUB | SUB | | | |
+| MODE[3] | MODE[2] | MODE[1] | MODE[0] | MODE[3] | MODE[2] | MODE[1] | MODE[0|
+| (2) | (2) | (2) | (2) | (2) | (2) | (2) | (2)
+|------------------------------------------------------------------------------|
+*/
+#define HI_OPTION_FW_MODE_SHIFT 0xC
+
+/* Convert a Target virtual address into a Target physical address */
+#define TARG_VTOP(vaddr) (vaddr & 0x001fffff)
+
+#define AR6003_REV2_APP_START_OVERRIDE 0x944C00
+#define AR6003_REV2_APP_LOAD_ADDRESS 0x543180
+#define AR6003_REV2_BOARD_EXT_DATA_ADDRESS 0x57E500
+#define AR6003_REV2_DATASET_PATCH_ADDRESS 0x57e884
+#define AR6003_REV2_RAM_RESERVE_SIZE 6912
+
+#define AR6003_REV3_APP_START_OVERRIDE 0x945d00
+#define AR6003_REV3_APP_LOAD_ADDRESS 0x545000
+#define AR6003_REV3_BOARD_EXT_DATA_ADDRESS 0x542330
+#define AR6003_REV3_DATASET_PATCH_ADDRESS 0x57FF74
+#define AR6003_REV3_RAM_RESERVE_SIZE 512
+
+#endif
diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c
new file mode 100644
index 000000000000..615b46d388f6
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/txrx.c
@@ -0,0 +1,1452 @@
+/*
+ * Copyright (c) 2004-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+#include "debug.h"
+
+static u8 ath6kl_ibss_map_epid(struct sk_buff *skb, struct net_device *dev,
+ u32 *map_no)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+ struct ethhdr *eth_hdr;
+ u32 i, ep_map = -1;
+ u8 *datap;
+
+ *map_no = 0;
+ datap = skb->data;
+ eth_hdr = (struct ethhdr *) (datap + sizeof(struct wmi_data_hdr));
+
+ if (is_multicast_ether_addr(eth_hdr->h_dest))
+ return ENDPOINT_2;
+
+ for (i = 0; i < ar->node_num; i++) {
+ if (memcmp(eth_hdr->h_dest, ar->node_map[i].mac_addr,
+ ETH_ALEN) == 0) {
+ *map_no = i + 1;
+ ar->node_map[i].tx_pend++;
+ return ar->node_map[i].ep_id;
+ }
+
+ if ((ep_map == -1) && !ar->node_map[i].tx_pend)
+ ep_map = i;
+ }
+
+ if (ep_map == -1) {
+ ep_map = ar->node_num;
+ ar->node_num++;
+ if (ar->node_num > MAX_NODE_NUM)
+ return ENDPOINT_UNUSED;
+ }
+
+ memcpy(ar->node_map[ep_map].mac_addr, eth_hdr->h_dest, ETH_ALEN);
+
+ for (i = ENDPOINT_2; i <= ENDPOINT_5; i++) {
+ if (!ar->tx_pending[i]) {
+ ar->node_map[ep_map].ep_id = i;
+ break;
+ }
+
+ /*
+ * No free endpoint is available, start redistribution on
+ * the inuse endpoints.
+ */
+ if (i == ENDPOINT_5) {
+ ar->node_map[ep_map].ep_id = ar->next_ep_id;
+ ar->next_ep_id++;
+ if (ar->next_ep_id > ENDPOINT_5)
+ ar->next_ep_id = ENDPOINT_2;
+ }
+ }
+
+ *map_no = ep_map + 1;
+ ar->node_map[ep_map].tx_pend++;
+
+ return ar->node_map[ep_map].ep_id;
+}
+
+static bool ath6kl_powersave_ap(struct ath6kl *ar, struct sk_buff *skb,
+ bool *more_data)
+{
+ struct ethhdr *datap = (struct ethhdr *) skb->data;
+ struct ath6kl_sta *conn = NULL;
+ bool ps_queued = false, is_psq_empty = false;
+
+ if (is_multicast_ether_addr(datap->h_dest)) {
+ u8 ctr = 0;
+ bool q_mcast = false;
+
+ for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
+ if (ar->sta_list[ctr].sta_flags & STA_PS_SLEEP) {
+ q_mcast = true;
+ break;
+ }
+ }
+
+ if (q_mcast) {
+ /*
+ * If this transmit is not because of a Dtim Expiry
+ * q it.
+ */
+ if (!test_bit(DTIM_EXPIRED, &ar->flag)) {
+ bool is_mcastq_empty = false;
+
+ spin_lock_bh(&ar->mcastpsq_lock);
+ is_mcastq_empty =
+ skb_queue_empty(&ar->mcastpsq);
+ skb_queue_tail(&ar->mcastpsq, skb);
+ spin_unlock_bh(&ar->mcastpsq_lock);
+
+ /*
+ * If this is the first Mcast pkt getting
+ * queued indicate to the target to set the
+ * BitmapControl LSB of the TIM IE.
+ */
+ if (is_mcastq_empty)
+ ath6kl_wmi_set_pvb_cmd(ar->wmi,
+ MCAST_AID, 1);
+
+ ps_queued = true;
+ } else {
+ /*
+ * This transmit is because of Dtim expiry.
+ * Determine if MoreData bit has to be set.
+ */
+ spin_lock_bh(&ar->mcastpsq_lock);
+ if (!skb_queue_empty(&ar->mcastpsq))
+ *more_data = true;
+ spin_unlock_bh(&ar->mcastpsq_lock);
+ }
+ }
+ } else {
+ conn = ath6kl_find_sta(ar, datap->h_dest);
+ if (!conn) {
+ dev_kfree_skb(skb);
+
+ /* Inform the caller that the skb is consumed */
+ return true;
+ }
+
+ if (conn->sta_flags & STA_PS_SLEEP) {
+ if (!(conn->sta_flags & STA_PS_POLLED)) {
+ /* Queue the frames if the STA is sleeping */
+ spin_lock_bh(&conn->psq_lock);
+ is_psq_empty = skb_queue_empty(&conn->psq);
+ skb_queue_tail(&conn->psq, skb);
+ spin_unlock_bh(&conn->psq_lock);
+
+ /*
+ * If this is the first pkt getting queued
+ * for this STA, update the PVB for this
+ * STA.
+ */
+ if (is_psq_empty)
+ ath6kl_wmi_set_pvb_cmd(ar->wmi,
+ conn->aid, 1);
+
+ ps_queued = true;
+ } else {
+ /*
+ * This tx is because of a PsPoll.
+ * Determine if MoreData bit has to be set.
+ */
+ spin_lock_bh(&conn->psq_lock);
+ if (!skb_queue_empty(&conn->psq))
+ *more_data = true;
+ spin_unlock_bh(&conn->psq_lock);
+ }
+ }
+ }
+
+ return ps_queued;
+}
+
+/* Tx functions */
+
+int ath6kl_control_tx(void *devt, struct sk_buff *skb,
+ enum htc_endpoint_id eid)
+{
+ struct ath6kl *ar = devt;
+ int status = 0;
+ struct ath6kl_cookie *cookie = NULL;
+
+ spin_lock_bh(&ar->lock);
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
+ "%s: skb=0x%p, len=0x%x eid =%d\n", __func__,
+ skb, skb->len, eid);
+
+ if (test_bit(WMI_CTRL_EP_FULL, &ar->flag) && (eid == ar->ctrl_ep)) {
+ /*
+ * Control endpoint is full, don't allocate resources, we
+ * are just going to drop this packet.
+ */
+ cookie = NULL;
+ ath6kl_err("wmi ctrl ep full, dropping pkt : 0x%p, len:%d\n",
+ skb, skb->len);
+ } else
+ cookie = ath6kl_alloc_cookie(ar);
+
+ if (cookie == NULL) {
+ spin_unlock_bh(&ar->lock);
+ status = -ENOMEM;
+ goto fail_ctrl_tx;
+ }
+
+ ar->tx_pending[eid]++;
+
+ if (eid != ar->ctrl_ep)
+ ar->total_tx_data_pend++;
+
+ spin_unlock_bh(&ar->lock);
+
+ cookie->skb = skb;
+ cookie->map_no = 0;
+ set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len,
+ eid, ATH6KL_CONTROL_PKT_TAG);
+
+ /*
+ * This interface is asynchronous, if there is an error, cleanup
+ * will happen in the TX completion callback.
+ */
+ htc_tx(ar->htc_target, &cookie->htc_pkt);
+
+ return 0;
+
+fail_ctrl_tx:
+ dev_kfree_skb(skb);
+ return status;
+}
+
+int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_cookie *cookie = NULL;
+ enum htc_endpoint_id eid = ENDPOINT_UNUSED;
+ u32 map_no = 0;
+ u16 htc_tag = ATH6KL_DATA_PKT_TAG;
+ u8 ac = 99 ; /* initialize to unmapped ac */
+ bool chk_adhoc_ps_mapping = false, more_data = false;
+ struct wmi_tx_meta_v2 meta_v2;
+ int ret;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
+ "%s: skb=0x%p, data=0x%p, len=0x%x\n", __func__,
+ skb, skb->data, skb->len);
+
+ /* If target is not associated */
+ if (!test_bit(CONNECTED, &ar->flag)) {
+ dev_kfree_skb(skb);
+ return 0;
+ }
+
+ if (!test_bit(WMI_READY, &ar->flag))
+ goto fail_tx;
+
+ /* AP mode Power saving processing */
+ if (ar->nw_type == AP_NETWORK) {
+ if (ath6kl_powersave_ap(ar, skb, &more_data))
+ return 0;
+ }
+
+ if (test_bit(WMI_ENABLED, &ar->flag)) {
+ memset(&meta_v2, 0, sizeof(meta_v2));
+
+ if (skb_headroom(skb) < dev->needed_headroom) {
+ WARN_ON(1);
+ goto fail_tx;
+ }
+
+ if (ath6kl_wmi_dix_2_dot3(ar->wmi, skb)) {
+ ath6kl_err("ath6kl_wmi_dix_2_dot3 failed\n");
+ goto fail_tx;
+ }
+
+ if (ath6kl_wmi_data_hdr_add(ar->wmi, skb, DATA_MSGTYPE,
+ more_data, 0, 0, NULL)) {
+ ath6kl_err("wmi_data_hdr_add failed\n");
+ goto fail_tx;
+ }
+
+ if ((ar->nw_type == ADHOC_NETWORK) &&
+ ar->ibss_ps_enable && test_bit(CONNECTED, &ar->flag))
+ chk_adhoc_ps_mapping = true;
+ else {
+ /* get the stream mapping */
+ ret = ath6kl_wmi_implicit_create_pstream(ar->wmi, skb,
+ 0, test_bit(WMM_ENABLED, &ar->flag), &ac);
+ if (ret)
+ goto fail_tx;
+ }
+ } else
+ goto fail_tx;
+
+ spin_lock_bh(&ar->lock);
+
+ if (chk_adhoc_ps_mapping)
+ eid = ath6kl_ibss_map_epid(skb, dev, &map_no);
+ else
+ eid = ar->ac2ep_map[ac];
+
+ if (eid == 0 || eid == ENDPOINT_UNUSED) {
+ ath6kl_err("eid %d is not mapped!\n", eid);
+ spin_unlock_bh(&ar->lock);
+ goto fail_tx;
+ }
+
+ /* allocate resource for this packet */
+ cookie = ath6kl_alloc_cookie(ar);
+
+ if (!cookie) {
+ spin_unlock_bh(&ar->lock);
+ goto fail_tx;
+ }
+
+ /* update counts while the lock is held */
+ ar->tx_pending[eid]++;
+ ar->total_tx_data_pend++;
+
+ spin_unlock_bh(&ar->lock);
+
+ cookie->skb = skb;
+ cookie->map_no = map_no;
+ set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len,
+ eid, htc_tag);
+
+ ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, skb->data, skb->len);
+
+ /*
+ * HTC interface is asynchronous, if this fails, cleanup will
+ * happen in the ath6kl_tx_complete callback.
+ */
+ htc_tx(ar->htc_target, &cookie->htc_pkt);
+
+ return 0;
+
+fail_tx:
+ dev_kfree_skb(skb);
+
+ ar->net_stats.tx_dropped++;
+ ar->net_stats.tx_aborted_errors++;
+
+ return 0;
+}
+
+/* indicate tx activity or inactivity on a WMI stream */
+void ath6kl_indicate_tx_activity(void *devt, u8 traffic_class, bool active)
+{
+ struct ath6kl *ar = devt;
+ enum htc_endpoint_id eid;
+ int i;
+
+ eid = ar->ac2ep_map[traffic_class];
+
+ if (!test_bit(WMI_ENABLED, &ar->flag))
+ goto notify_htc;
+
+ spin_lock_bh(&ar->lock);
+
+ ar->ac_stream_active[traffic_class] = active;
+
+ if (active) {
+ /*
+ * Keep track of the active stream with the highest
+ * priority.
+ */
+ if (ar->ac_stream_pri_map[traffic_class] >
+ ar->hiac_stream_active_pri)
+ /* set the new highest active priority */
+ ar->hiac_stream_active_pri =
+ ar->ac_stream_pri_map[traffic_class];
+
+ } else {
+ /*
+ * We may have to search for the next active stream
+ * that is the highest priority.
+ */
+ if (ar->hiac_stream_active_pri ==
+ ar->ac_stream_pri_map[traffic_class]) {
+ /*
+ * The highest priority stream just went inactive
+ * reset and search for the "next" highest "active"
+ * priority stream.
+ */
+ ar->hiac_stream_active_pri = 0;
+
+ for (i = 0; i < WMM_NUM_AC; i++) {
+ if (ar->ac_stream_active[i] &&
+ (ar->ac_stream_pri_map[i] >
+ ar->hiac_stream_active_pri))
+ /*
+ * Set the new highest active
+ * priority.
+ */
+ ar->hiac_stream_active_pri =
+ ar->ac_stream_pri_map[i];
+ }
+ }
+ }
+
+ spin_unlock_bh(&ar->lock);
+
+notify_htc:
+ /* notify HTC, this may cause credit distribution changes */
+ htc_indicate_activity_change(ar->htc_target, eid, active);
+}
+
+enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
+ struct htc_packet *packet)
+{
+ struct ath6kl *ar = target->dev->ar;
+ enum htc_endpoint_id endpoint = packet->endpoint;
+
+ if (endpoint == ar->ctrl_ep) {
+ /*
+ * Under normal WMI if this is getting full, then something
+ * is running rampant the host should not be exhausting the
+ * WMI queue with too many commands the only exception to
+ * this is during testing using endpointping.
+ */
+ spin_lock_bh(&ar->lock);
+ set_bit(WMI_CTRL_EP_FULL, &ar->flag);
+ spin_unlock_bh(&ar->lock);
+ ath6kl_err("wmi ctrl ep is full\n");
+ return HTC_SEND_FULL_KEEP;
+ }
+
+ if (packet->info.tx.tag == ATH6KL_CONTROL_PKT_TAG)
+ return HTC_SEND_FULL_KEEP;
+
+ if (ar->nw_type == ADHOC_NETWORK)
+ /*
+ * In adhoc mode, we cannot differentiate traffic
+ * priorities so there is no need to continue, however we
+ * should stop the network.
+ */
+ goto stop_net_queues;
+
+ /*
+ * The last MAX_HI_COOKIE_NUM "batch" of cookies are reserved for
+ * the highest active stream.
+ */
+ if (ar->ac_stream_pri_map[ar->ep2ac_map[endpoint]] <
+ ar->hiac_stream_active_pri &&
+ ar->cookie_count <= MAX_HI_COOKIE_NUM)
+ /*
+ * Give preference to the highest priority stream by
+ * dropping the packets which overflowed.
+ */
+ return HTC_SEND_FULL_DROP;
+
+stop_net_queues:
+ spin_lock_bh(&ar->lock);
+ set_bit(NETQ_STOPPED, &ar->flag);
+ spin_unlock_bh(&ar->lock);
+ netif_stop_queue(ar->net_dev);
+
+ return HTC_SEND_FULL_KEEP;
+}
+
+/* TODO this needs to be looked at */
+static void ath6kl_tx_clear_node_map(struct ath6kl *ar,
+ enum htc_endpoint_id eid, u32 map_no)
+{
+ u32 i;
+
+ if (ar->nw_type != ADHOC_NETWORK)
+ return;
+
+ if (!ar->ibss_ps_enable)
+ return;
+
+ if (eid == ar->ctrl_ep)
+ return;
+
+ if (map_no == 0)
+ return;
+
+ map_no--;
+ ar->node_map[map_no].tx_pend--;
+
+ if (ar->node_map[map_no].tx_pend)
+ return;
+
+ if (map_no != (ar->node_num - 1))
+ return;
+
+ for (i = ar->node_num; i > 0; i--) {
+ if (ar->node_map[i - 1].tx_pend)
+ break;
+
+ memset(&ar->node_map[i - 1], 0,
+ sizeof(struct ath6kl_node_mapping));
+ ar->node_num--;
+ }
+}
+
+void ath6kl_tx_complete(void *context, struct list_head *packet_queue)
+{
+ struct ath6kl *ar = context;
+ struct sk_buff_head skb_queue;
+ struct htc_packet *packet;
+ struct sk_buff *skb;
+ struct ath6kl_cookie *ath6kl_cookie;
+ u32 map_no = 0;
+ int status;
+ enum htc_endpoint_id eid;
+ bool wake_event = false;
+ bool flushing = false;
+
+ skb_queue_head_init(&skb_queue);
+
+ /* lock the driver as we update internal state */
+ spin_lock_bh(&ar->lock);
+
+ /* reap completed packets */
+ while (!list_empty(packet_queue)) {
+
+ packet = list_first_entry(packet_queue, struct htc_packet,
+ list);
+ list_del(&packet->list);
+
+ ath6kl_cookie = (struct ath6kl_cookie *)packet->pkt_cntxt;
+ if (!ath6kl_cookie)
+ goto fatal;
+
+ status = packet->status;
+ skb = ath6kl_cookie->skb;
+ eid = packet->endpoint;
+ map_no = ath6kl_cookie->map_no;
+
+ if (!skb || !skb->data)
+ goto fatal;
+
+ packet->buf = skb->data;
+
+ __skb_queue_tail(&skb_queue, skb);
+
+ if (!status && (packet->act_len != skb->len))
+ goto fatal;
+
+ ar->tx_pending[eid]--;
+
+ if (eid != ar->ctrl_ep)
+ ar->total_tx_data_pend--;
+
+ if (eid == ar->ctrl_ep) {
+ if (test_bit(WMI_CTRL_EP_FULL, &ar->flag))
+ clear_bit(WMI_CTRL_EP_FULL, &ar->flag);
+
+ if (ar->tx_pending[eid] == 0)
+ wake_event = true;
+ }
+
+ if (status) {
+ if (status == -ECANCELED)
+ /* a packet was flushed */
+ flushing = true;
+
+ ar->net_stats.tx_errors++;
+
+ if (status != -ENOSPC)
+ ath6kl_err("tx error, status: 0x%x\n", status);
+ ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
+ "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
+ __func__, skb, packet->buf, packet->act_len,
+ eid, "error!");
+ } else {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
+ "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
+ __func__, skb, packet->buf, packet->act_len,
+ eid, "OK");
+
+ flushing = false;
+ ar->net_stats.tx_packets++;
+ ar->net_stats.tx_bytes += skb->len;
+ }
+
+ ath6kl_tx_clear_node_map(ar, eid, map_no);
+
+ ath6kl_free_cookie(ar, ath6kl_cookie);
+
+ if (test_bit(NETQ_STOPPED, &ar->flag))
+ clear_bit(NETQ_STOPPED, &ar->flag);
+ }
+
+ spin_unlock_bh(&ar->lock);
+
+ __skb_queue_purge(&skb_queue);
+
+ if (test_bit(CONNECTED, &ar->flag)) {
+ if (!flushing)
+ netif_wake_queue(ar->net_dev);
+ }
+
+ if (wake_event)
+ wake_up(&ar->event_wq);
+
+ return;
+
+fatal:
+ WARN_ON(1);
+ spin_unlock_bh(&ar->lock);
+ return;
+}
+
+void ath6kl_tx_data_cleanup(struct ath6kl *ar)
+{
+ int i;
+
+ /* flush all the data (non-control) streams */
+ for (i = 0; i < WMM_NUM_AC; i++)
+ htc_flush_txep(ar->htc_target, ar->ac2ep_map[i],
+ ATH6KL_DATA_PKT_TAG);
+}
+
+/* Rx functions */
+
+static void ath6kl_deliver_frames_to_nw_stack(struct net_device *dev,
+ struct sk_buff *skb)
+{
+ if (!skb)
+ return;
+
+ skb->dev = dev;
+
+ if (!(skb->dev->flags & IFF_UP)) {
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ netif_rx_ni(skb);
+}
+
+static void ath6kl_alloc_netbufs(struct sk_buff_head *q, u16 num)
+{
+ struct sk_buff *skb;
+
+ while (num) {
+ skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE);
+ if (!skb) {
+ ath6kl_err("netbuf allocation failed\n");
+ return;
+ }
+ skb_queue_tail(q, skb);
+ num--;
+ }
+}
+
+static struct sk_buff *aggr_get_free_skb(struct aggr_info *p_aggr)
+{
+ struct sk_buff *skb = NULL;
+
+ if (skb_queue_len(&p_aggr->free_q) < (AGGR_NUM_OF_FREE_NETBUFS >> 2))
+ ath6kl_alloc_netbufs(&p_aggr->free_q, AGGR_NUM_OF_FREE_NETBUFS);
+
+ skb = skb_dequeue(&p_aggr->free_q);
+
+ return skb;
+}
+
+void ath6kl_rx_refill(struct htc_target *target, enum htc_endpoint_id endpoint)
+{
+ struct ath6kl *ar = target->dev->ar;
+ struct sk_buff *skb;
+ int rx_buf;
+ int n_buf_refill;
+ struct htc_packet *packet;
+ struct list_head queue;
+
+ n_buf_refill = ATH6KL_MAX_RX_BUFFERS -
+ htc_get_rxbuf_num(ar->htc_target, endpoint);
+
+ if (n_buf_refill <= 0)
+ return;
+
+ INIT_LIST_HEAD(&queue);
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_RX,
+ "%s: providing htc with %d buffers at eid=%d\n",
+ __func__, n_buf_refill, endpoint);
+
+ for (rx_buf = 0; rx_buf < n_buf_refill; rx_buf++) {
+ skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE);
+ if (!skb)
+ break;
+
+ packet = (struct htc_packet *) skb->head;
+ set_htc_rxpkt_info(packet, skb, skb->data,
+ ATH6KL_BUFFER_SIZE, endpoint);
+ list_add_tail(&packet->list, &queue);
+ }
+
+ if (!list_empty(&queue))
+ htc_add_rxbuf_multiple(ar->htc_target, &queue);
+}
+
+void ath6kl_refill_amsdu_rxbufs(struct ath6kl *ar, int count)
+{
+ struct htc_packet *packet;
+ struct sk_buff *skb;
+
+ while (count) {
+ skb = ath6kl_buf_alloc(ATH6KL_AMSDU_BUFFER_SIZE);
+ if (!skb)
+ return;
+
+ packet = (struct htc_packet *) skb->head;
+ set_htc_rxpkt_info(packet, skb, skb->data,
+ ATH6KL_AMSDU_BUFFER_SIZE, 0);
+ spin_lock_bh(&ar->lock);
+ list_add_tail(&packet->list, &ar->amsdu_rx_buffer_queue);
+ spin_unlock_bh(&ar->lock);
+ count--;
+ }
+}
+
+/*
+ * Callback to allocate a receive buffer for a pending packet. We use a
+ * pre-allocated list of buffers of maximum AMSDU size (4K).
+ */
+struct htc_packet *ath6kl_alloc_amsdu_rxbuf(struct htc_target *target,
+ enum htc_endpoint_id endpoint,
+ int len)
+{
+ struct ath6kl *ar = target->dev->ar;
+ struct htc_packet *packet = NULL;
+ struct list_head *pkt_pos;
+ int refill_cnt = 0, depth = 0;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: eid=%d, len:%d\n",
+ __func__, endpoint, len);
+
+ if ((len <= ATH6KL_BUFFER_SIZE) ||
+ (len > ATH6KL_AMSDU_BUFFER_SIZE))
+ return NULL;
+
+ spin_lock_bh(&ar->lock);
+
+ if (list_empty(&ar->amsdu_rx_buffer_queue)) {
+ spin_unlock_bh(&ar->lock);
+ refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS;
+ goto refill_buf;
+ }
+
+ packet = list_first_entry(&ar->amsdu_rx_buffer_queue,
+ struct htc_packet, list);
+ list_del(&packet->list);
+ list_for_each(pkt_pos, &ar->amsdu_rx_buffer_queue)
+ depth++;
+
+ refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS - depth;
+ spin_unlock_bh(&ar->lock);
+
+ /* set actual endpoint ID */
+ packet->endpoint = endpoint;
+
+refill_buf:
+ if (refill_cnt >= ATH6KL_AMSDU_REFILL_THRESHOLD)
+ ath6kl_refill_amsdu_rxbufs(ar, refill_cnt);
+
+ return packet;
+}
+
+static void aggr_slice_amsdu(struct aggr_info *p_aggr,
+ struct rxtid *rxtid, struct sk_buff *skb)
+{
+ struct sk_buff *new_skb;
+ struct ethhdr *hdr;
+ u16 frame_8023_len, payload_8023_len, mac_hdr_len, amsdu_len;
+ u8 *framep;
+
+ mac_hdr_len = sizeof(struct ethhdr);
+ framep = skb->data + mac_hdr_len;
+ amsdu_len = skb->len - mac_hdr_len;
+
+ while (amsdu_len > mac_hdr_len) {
+ hdr = (struct ethhdr *) framep;
+ payload_8023_len = ntohs(hdr->h_proto);
+
+ if (payload_8023_len < MIN_MSDU_SUBFRAME_PAYLOAD_LEN ||
+ payload_8023_len > MAX_MSDU_SUBFRAME_PAYLOAD_LEN) {
+ ath6kl_err("802.3 AMSDU frame bound check failed. len %d\n",
+ payload_8023_len);
+ break;
+ }
+
+ frame_8023_len = payload_8023_len + mac_hdr_len;
+ new_skb = aggr_get_free_skb(p_aggr);
+ if (!new_skb) {
+ ath6kl_err("no buffer available\n");
+ break;
+ }
+
+ memcpy(new_skb->data, framep, frame_8023_len);
+ skb_put(new_skb, frame_8023_len);
+ if (ath6kl_wmi_dot3_2_dix(new_skb)) {
+ ath6kl_err("dot3_2_dix error\n");
+ dev_kfree_skb(new_skb);
+ break;
+ }
+
+ skb_queue_tail(&rxtid->q, new_skb);
+
+ /* Is this the last subframe within this aggregate ? */
+ if ((amsdu_len - frame_8023_len) == 0)
+ break;
+
+ /* Add the length of A-MSDU subframe padding bytes -
+ * Round to nearest word.
+ */
+ frame_8023_len = ALIGN(frame_8023_len + 3, 3);
+
+ framep += frame_8023_len;
+ amsdu_len -= frame_8023_len;
+ }
+
+ dev_kfree_skb(skb);
+}
+
+static void aggr_deque_frms(struct aggr_info *p_aggr, u8 tid,
+ u16 seq_no, u8 order)
+{
+ struct sk_buff *skb;
+ struct rxtid *rxtid;
+ struct skb_hold_q *node;
+ u16 idx, idx_end, seq_end;
+ struct rxtid_stats *stats;
+
+ if (!p_aggr)
+ return;
+
+ rxtid = &p_aggr->rx_tid[tid];
+ stats = &p_aggr->stat[tid];
+
+ idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
+
+ /*
+ * idx_end is typically the last possible frame in the window,
+ * but changes to 'the' seq_no, when BAR comes. If seq_no
+ * is non-zero, we will go up to that and stop.
+ * Note: last seq no in current window will occupy the same
+ * index position as index that is just previous to start.
+ * An imp point : if win_sz is 7, for seq_no space of 4095,
+ * then, there would be holes when sequence wrap around occurs.
+ * Target should judiciously choose the win_sz, based on
+ * this condition. For 4095, (TID_WINDOW_SZ = 2 x win_sz
+ * 2, 4, 8, 16 win_sz works fine).
+ * We must deque from "idx" to "idx_end", including both.
+ */
+ seq_end = seq_no ? seq_no : rxtid->seq_next;
+ idx_end = AGGR_WIN_IDX(seq_end, rxtid->hold_q_sz);
+
+ spin_lock_bh(&rxtid->lock);
+
+ do {
+ node = &rxtid->hold_q[idx];
+ if ((order == 1) && (!node->skb))
+ break;
+
+ if (node->skb) {
+ if (node->is_amsdu)
+ aggr_slice_amsdu(p_aggr, rxtid, node->skb);
+ else
+ skb_queue_tail(&rxtid->q, node->skb);
+ node->skb = NULL;
+ } else
+ stats->num_hole++;
+
+ rxtid->seq_next = ATH6KL_NEXT_SEQ_NO(rxtid->seq_next);
+ idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
+ } while (idx != idx_end);
+
+ spin_unlock_bh(&rxtid->lock);
+
+ stats->num_delivered += skb_queue_len(&rxtid->q);
+
+ while ((skb = skb_dequeue(&rxtid->q)))
+ ath6kl_deliver_frames_to_nw_stack(p_aggr->dev, skb);
+}
+
+static bool aggr_process_recv_frm(struct aggr_info *agg_info, u8 tid,
+ u16 seq_no,
+ bool is_amsdu, struct sk_buff *frame)
+{
+ struct rxtid *rxtid;
+ struct rxtid_stats *stats;
+ struct sk_buff *skb;
+ struct skb_hold_q *node;
+ u16 idx, st, cur, end;
+ bool is_queued = false;
+ u16 extended_end;
+
+ rxtid = &agg_info->rx_tid[tid];
+ stats = &agg_info->stat[tid];
+
+ stats->num_into_aggr++;
+
+ if (!rxtid->aggr) {
+ if (is_amsdu) {
+ aggr_slice_amsdu(agg_info, rxtid, frame);
+ is_queued = true;
+ stats->num_amsdu++;
+ while ((skb = skb_dequeue(&rxtid->q)))
+ ath6kl_deliver_frames_to_nw_stack(agg_info->dev,
+ skb);
+ }
+ return is_queued;
+ }
+
+ /* Check the incoming sequence no, if it's in the window */
+ st = rxtid->seq_next;
+ cur = seq_no;
+ end = (st + rxtid->hold_q_sz-1) & ATH6KL_MAX_SEQ_NO;
+
+ if (((st < end) && (cur < st || cur > end)) ||
+ ((st > end) && (cur > end) && (cur < st))) {
+ extended_end = (end + rxtid->hold_q_sz - 1) &
+ ATH6KL_MAX_SEQ_NO;
+
+ if (((end < extended_end) &&
+ (cur < end || cur > extended_end)) ||
+ ((end > extended_end) && (cur > extended_end) &&
+ (cur < end))) {
+ aggr_deque_frms(agg_info, tid, 0, 0);
+ if (cur >= rxtid->hold_q_sz - 1)
+ rxtid->seq_next = cur - (rxtid->hold_q_sz - 1);
+ else
+ rxtid->seq_next = ATH6KL_MAX_SEQ_NO -
+ (rxtid->hold_q_sz - 2 - cur);
+ } else {
+ /*
+ * Dequeue only those frames that are outside the
+ * new shifted window.
+ */
+ if (cur >= rxtid->hold_q_sz - 1)
+ st = cur - (rxtid->hold_q_sz - 1);
+ else
+ st = ATH6KL_MAX_SEQ_NO -
+ (rxtid->hold_q_sz - 2 - cur);
+
+ aggr_deque_frms(agg_info, tid, st, 0);
+ }
+
+ stats->num_oow++;
+ }
+
+ idx = AGGR_WIN_IDX(seq_no, rxtid->hold_q_sz);
+
+ node = &rxtid->hold_q[idx];
+
+ spin_lock_bh(&rxtid->lock);
+
+ /*
+ * Is the cur frame duplicate or something beyond our window(hold_q
+ * -> which is 2x, already)?
+ *
+ * 1. Duplicate is easy - drop incoming frame.
+ * 2. Not falling in current sliding window.
+ * 2a. is the frame_seq_no preceding current tid_seq_no?
+ * -> drop the frame. perhaps sender did not get our ACK.
+ * this is taken care of above.
+ * 2b. is the frame_seq_no beyond window(st, TID_WINDOW_SZ);
+ * -> Taken care of it above, by moving window forward.
+ */
+ dev_kfree_skb(node->skb);
+ stats->num_dups++;
+
+ node->skb = frame;
+ is_queued = true;
+ node->is_amsdu = is_amsdu;
+ node->seq_no = seq_no;
+
+ if (node->is_amsdu)
+ stats->num_amsdu++;
+ else
+ stats->num_mpdu++;
+
+ spin_unlock_bh(&rxtid->lock);
+
+ aggr_deque_frms(agg_info, tid, 0, 1);
+
+ if (agg_info->timer_scheduled)
+ rxtid->progress = true;
+ else
+ for (idx = 0 ; idx < rxtid->hold_q_sz; idx++) {
+ if (rxtid->hold_q[idx].skb) {
+ /*
+ * There is a frame in the queue and no
+ * timer so start a timer to ensure that
+ * the frame doesn't remain stuck
+ * forever.
+ */
+ agg_info->timer_scheduled = true;
+ mod_timer(&agg_info->timer,
+ (jiffies +
+ HZ * (AGGR_RX_TIMEOUT) / 1000));
+ rxtid->progress = false;
+ rxtid->timer_mon = true;
+ break;
+ }
+ }
+
+ return is_queued;
+}
+
+void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
+{
+ struct ath6kl *ar = target->dev->ar;
+ struct sk_buff *skb = packet->pkt_cntxt;
+ struct wmi_rx_meta_v2 *meta;
+ struct wmi_data_hdr *dhdr;
+ int min_hdr_len;
+ u8 meta_type, dot11_hdr = 0;
+ int status = packet->status;
+ enum htc_endpoint_id ept = packet->endpoint;
+ bool is_amsdu, prev_ps, ps_state = false;
+ struct ath6kl_sta *conn = NULL;
+ struct sk_buff *skb1 = NULL;
+ struct ethhdr *datap = NULL;
+ u16 seq_no, offset;
+ u8 tid;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_RX,
+ "%s: ar=0x%p eid=%d, skb=0x%p, data=0x%p, len=0x%x status:%d",
+ __func__, ar, ept, skb, packet->buf,
+ packet->act_len, status);
+
+ if (status || !(skb->data + HTC_HDR_LENGTH)) {
+ ar->net_stats.rx_errors++;
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ /*
+ * Take lock to protect buffer counts and adaptive power throughput
+ * state.
+ */
+ spin_lock_bh(&ar->lock);
+
+ ar->net_stats.rx_packets++;
+ ar->net_stats.rx_bytes += packet->act_len;
+
+ skb_put(skb, packet->act_len + HTC_HDR_LENGTH);
+ skb_pull(skb, HTC_HDR_LENGTH);
+
+ ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, skb->data, skb->len);
+
+ spin_unlock_bh(&ar->lock);
+
+ skb->dev = ar->net_dev;
+
+ if (!test_bit(WMI_ENABLED, &ar->flag)) {
+ if (EPPING_ALIGNMENT_PAD > 0)
+ skb_pull(skb, EPPING_ALIGNMENT_PAD);
+ ath6kl_deliver_frames_to_nw_stack(ar->net_dev, skb);
+ return;
+ }
+
+ if (ept == ar->ctrl_ep) {
+ ath6kl_wmi_control_rx(ar->wmi, skb);
+ return;
+ }
+
+ min_hdr_len = sizeof(struct ethhdr);
+ min_hdr_len += sizeof(struct wmi_data_hdr) +
+ sizeof(struct ath6kl_llc_snap_hdr);
+
+ dhdr = (struct wmi_data_hdr *) skb->data;
+
+ /*
+ * In the case of AP mode we may receive NULL data frames
+ * that do not have LLC hdr. They are 16 bytes in size.
+ * Allow these frames in the AP mode.
+ */
+ if (ar->nw_type != AP_NETWORK &&
+ ((packet->act_len < min_hdr_len) ||
+ (packet->act_len > WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH))) {
+ ath6kl_info("frame len is too short or too long\n");
+ ar->net_stats.rx_errors++;
+ ar->net_stats.rx_length_errors++;
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ /* Get the Power save state of the STA */
+ if (ar->nw_type == AP_NETWORK) {
+ meta_type = wmi_data_hdr_get_meta(dhdr);
+
+ ps_state = !!((dhdr->info >> WMI_DATA_HDR_PS_SHIFT) &
+ WMI_DATA_HDR_PS_MASK);
+
+ offset = sizeof(struct wmi_data_hdr);
+
+ switch (meta_type) {
+ case 0:
+ break;
+ case WMI_META_VERSION_1:
+ offset += sizeof(struct wmi_rx_meta_v1);
+ break;
+ case WMI_META_VERSION_2:
+ offset += sizeof(struct wmi_rx_meta_v2);
+ break;
+ default:
+ break;
+ }
+
+ datap = (struct ethhdr *) (skb->data + offset);
+ conn = ath6kl_find_sta(ar, datap->h_source);
+
+ if (!conn) {
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ /*
+ * If there is a change in PS state of the STA,
+ * take appropriate steps:
+ *
+ * 1. If Sleep-->Awake, flush the psq for the STA
+ * Clear the PVB for the STA.
+ * 2. If Awake-->Sleep, Starting queueing frames
+ * the STA.
+ */
+ prev_ps = !!(conn->sta_flags & STA_PS_SLEEP);
+
+ if (ps_state)
+ conn->sta_flags |= STA_PS_SLEEP;
+ else
+ conn->sta_flags &= ~STA_PS_SLEEP;
+
+ if (prev_ps ^ !!(conn->sta_flags & STA_PS_SLEEP)) {
+ if (!(conn->sta_flags & STA_PS_SLEEP)) {
+ struct sk_buff *skbuff = NULL;
+
+ spin_lock_bh(&conn->psq_lock);
+ while ((skbuff = skb_dequeue(&conn->psq))
+ != NULL) {
+ spin_unlock_bh(&conn->psq_lock);
+ ath6kl_data_tx(skbuff, ar->net_dev);
+ spin_lock_bh(&conn->psq_lock);
+ }
+ spin_unlock_bh(&conn->psq_lock);
+ /* Clear the PVB for this STA */
+ ath6kl_wmi_set_pvb_cmd(ar->wmi, conn->aid, 0);
+ }
+ }
+
+ /* drop NULL data frames here */
+ if ((packet->act_len < min_hdr_len) ||
+ (packet->act_len >
+ WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH)) {
+ dev_kfree_skb(skb);
+ return;
+ }
+ }
+
+ is_amsdu = wmi_data_hdr_is_amsdu(dhdr) ? true : false;
+ tid = wmi_data_hdr_get_up(dhdr);
+ seq_no = wmi_data_hdr_get_seqno(dhdr);
+ meta_type = wmi_data_hdr_get_meta(dhdr);
+ dot11_hdr = wmi_data_hdr_get_dot11(dhdr);
+
+ ath6kl_wmi_data_hdr_remove(ar->wmi, skb);
+
+ switch (meta_type) {
+ case WMI_META_VERSION_1:
+ skb_pull(skb, sizeof(struct wmi_rx_meta_v1));
+ break;
+ case WMI_META_VERSION_2:
+ meta = (struct wmi_rx_meta_v2 *) skb->data;
+ if (meta->csum_flags & 0x1) {
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ skb->csum = (__force __wsum) meta->csum;
+ }
+ skb_pull(skb, sizeof(struct wmi_rx_meta_v2));
+ break;
+ default:
+ break;
+ }
+
+ if (dot11_hdr)
+ status = ath6kl_wmi_dot11_hdr_remove(ar->wmi, skb);
+ else if (!is_amsdu)
+ status = ath6kl_wmi_dot3_2_dix(skb);
+
+ if (status) {
+ /*
+ * Drop frames that could not be processed (lack of
+ * memory, etc.)
+ */
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ if (!(ar->net_dev->flags & IFF_UP)) {
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ if (ar->nw_type == AP_NETWORK) {
+ datap = (struct ethhdr *) skb->data;
+ if (is_multicast_ether_addr(datap->h_dest))
+ /*
+ * Bcast/Mcast frames should be sent to the
+ * OS stack as well as on the air.
+ */
+ skb1 = skb_copy(skb, GFP_ATOMIC);
+ else {
+ /*
+ * Search for a connected STA with dstMac
+ * as the Mac address. If found send the
+ * frame to it on the air else send the
+ * frame up the stack.
+ */
+ struct ath6kl_sta *conn = NULL;
+ conn = ath6kl_find_sta(ar, datap->h_dest);
+
+ if (conn && ar->intra_bss) {
+ skb1 = skb;
+ skb = NULL;
+ } else if (conn && !ar->intra_bss) {
+ dev_kfree_skb(skb);
+ skb = NULL;
+ }
+ }
+ if (skb1)
+ ath6kl_data_tx(skb1, ar->net_dev);
+ }
+
+ if (!aggr_process_recv_frm(ar->aggr_cntxt, tid, seq_no,
+ is_amsdu, skb))
+ ath6kl_deliver_frames_to_nw_stack(ar->net_dev, skb);
+}
+
+static void aggr_timeout(unsigned long arg)
+{
+ u8 i, j;
+ struct aggr_info *p_aggr = (struct aggr_info *) arg;
+ struct rxtid *rxtid;
+ struct rxtid_stats *stats;
+
+ for (i = 0; i < NUM_OF_TIDS; i++) {
+ rxtid = &p_aggr->rx_tid[i];
+ stats = &p_aggr->stat[i];
+
+ if (!rxtid->aggr || !rxtid->timer_mon || rxtid->progress)
+ continue;
+
+ stats->num_timeouts++;
+ ath6kl_err("aggr timeout (st %d end %d)\n",
+ rxtid->seq_next,
+ ((rxtid->seq_next + rxtid->hold_q_sz-1) &
+ ATH6KL_MAX_SEQ_NO));
+ aggr_deque_frms(p_aggr, i, 0, 0);
+ }
+
+ p_aggr->timer_scheduled = false;
+
+ for (i = 0; i < NUM_OF_TIDS; i++) {
+ rxtid = &p_aggr->rx_tid[i];
+
+ if (rxtid->aggr && rxtid->hold_q) {
+ for (j = 0; j < rxtid->hold_q_sz; j++) {
+ if (rxtid->hold_q[j].skb) {
+ p_aggr->timer_scheduled = true;
+ rxtid->timer_mon = true;
+ rxtid->progress = false;
+ break;
+ }
+ }
+
+ if (j >= rxtid->hold_q_sz)
+ rxtid->timer_mon = false;
+ }
+ }
+
+ if (p_aggr->timer_scheduled)
+ mod_timer(&p_aggr->timer,
+ jiffies + msecs_to_jiffies(AGGR_RX_TIMEOUT));
+}
+
+static void aggr_delete_tid_state(struct aggr_info *p_aggr, u8 tid)
+{
+ struct rxtid *rxtid;
+ struct rxtid_stats *stats;
+
+ if (!p_aggr || tid >= NUM_OF_TIDS)
+ return;
+
+ rxtid = &p_aggr->rx_tid[tid];
+ stats = &p_aggr->stat[tid];
+
+ if (rxtid->aggr)
+ aggr_deque_frms(p_aggr, tid, 0, 0);
+
+ rxtid->aggr = false;
+ rxtid->progress = false;
+ rxtid->timer_mon = false;
+ rxtid->win_sz = 0;
+ rxtid->seq_next = 0;
+ rxtid->hold_q_sz = 0;
+
+ kfree(rxtid->hold_q);
+ rxtid->hold_q = NULL;
+
+ memset(stats, 0, sizeof(struct rxtid_stats));
+}
+
+void aggr_recv_addba_req_evt(struct ath6kl *ar, u8 tid, u16 seq_no, u8 win_sz)
+{
+ struct aggr_info *p_aggr = ar->aggr_cntxt;
+ struct rxtid *rxtid;
+ struct rxtid_stats *stats;
+ u16 hold_q_size;
+
+ if (!p_aggr)
+ return;
+
+ rxtid = &p_aggr->rx_tid[tid];
+ stats = &p_aggr->stat[tid];
+
+ if (win_sz < AGGR_WIN_SZ_MIN || win_sz > AGGR_WIN_SZ_MAX)
+ ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: win_sz %d, tid %d\n",
+ __func__, win_sz, tid);
+
+ if (rxtid->aggr)
+ aggr_delete_tid_state(p_aggr, tid);
+
+ rxtid->seq_next = seq_no;
+ hold_q_size = TID_WINDOW_SZ(win_sz) * sizeof(struct skb_hold_q);
+ rxtid->hold_q = kzalloc(hold_q_size, GFP_KERNEL);
+ if (!rxtid->hold_q)
+ return;
+
+ rxtid->win_sz = win_sz;
+ rxtid->hold_q_sz = TID_WINDOW_SZ(win_sz);
+ if (!skb_queue_empty(&rxtid->q))
+ return;
+
+ rxtid->aggr = true;
+}
+
+struct aggr_info *aggr_init(struct net_device *dev)
+{
+ struct aggr_info *p_aggr = NULL;
+ struct rxtid *rxtid;
+ u8 i;
+
+ p_aggr = kzalloc(sizeof(struct aggr_info), GFP_KERNEL);
+ if (!p_aggr) {
+ ath6kl_err("failed to alloc memory for aggr_node\n");
+ return NULL;
+ }
+
+ p_aggr->aggr_sz = AGGR_SZ_DEFAULT;
+ p_aggr->dev = dev;
+ init_timer(&p_aggr->timer);
+ p_aggr->timer.function = aggr_timeout;
+ p_aggr->timer.data = (unsigned long) p_aggr;
+
+ p_aggr->timer_scheduled = false;
+ skb_queue_head_init(&p_aggr->free_q);
+
+ ath6kl_alloc_netbufs(&p_aggr->free_q, AGGR_NUM_OF_FREE_NETBUFS);
+
+ for (i = 0; i < NUM_OF_TIDS; i++) {
+ rxtid = &p_aggr->rx_tid[i];
+ rxtid->aggr = false;
+ rxtid->progress = false;
+ rxtid->timer_mon = false;
+ skb_queue_head_init(&rxtid->q);
+ spin_lock_init(&rxtid->lock);
+ }
+
+ return p_aggr;
+}
+
+void aggr_recv_delba_req_evt(struct ath6kl *ar, u8 tid)
+{
+ struct aggr_info *p_aggr = ar->aggr_cntxt;
+ struct rxtid *rxtid;
+
+ if (!p_aggr)
+ return;
+
+ rxtid = &p_aggr->rx_tid[tid];
+
+ if (rxtid->aggr)
+ aggr_delete_tid_state(p_aggr, tid);
+}
+
+void aggr_reset_state(struct aggr_info *aggr_info)
+{
+ u8 tid;
+
+ for (tid = 0; tid < NUM_OF_TIDS; tid++)
+ aggr_delete_tid_state(aggr_info, tid);
+}
+
+/* clean up our amsdu buffer list */
+void ath6kl_cleanup_amsdu_rxbufs(struct ath6kl *ar)
+{
+ struct htc_packet *packet, *tmp_pkt;
+
+ spin_lock_bh(&ar->lock);
+ if (list_empty(&ar->amsdu_rx_buffer_queue)) {
+ spin_unlock_bh(&ar->lock);
+ return;
+ }
+
+ list_for_each_entry_safe(packet, tmp_pkt, &ar->amsdu_rx_buffer_queue,
+ list) {
+ list_del(&packet->list);
+ spin_unlock_bh(&ar->lock);
+ dev_kfree_skb(packet->pkt_cntxt);
+ spin_lock_bh(&ar->lock);
+ }
+
+ spin_unlock_bh(&ar->lock);
+}
+
+void aggr_module_destroy(struct aggr_info *aggr_info)
+{
+ struct rxtid *rxtid;
+ u8 i, k;
+
+ if (!aggr_info)
+ return;
+
+ if (aggr_info->timer_scheduled) {
+ del_timer(&aggr_info->timer);
+ aggr_info->timer_scheduled = false;
+ }
+
+ for (i = 0; i < NUM_OF_TIDS; i++) {
+ rxtid = &aggr_info->rx_tid[i];
+ if (rxtid->hold_q) {
+ for (k = 0; k < rxtid->hold_q_sz; k++)
+ dev_kfree_skb(rxtid->hold_q[k].skb);
+ kfree(rxtid->hold_q);
+ }
+
+ skb_queue_purge(&rxtid->q);
+ }
+
+ skb_queue_purge(&aggr_info->free_q);
+ kfree(aggr_info);
+}
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
new file mode 100644
index 000000000000..a52d7d201fbd
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -0,0 +1,2762 @@
+/*
+ * Copyright (c) 2004-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/ip.h>
+#include "core.h"
+#include "debug.h"
+
+static int ath6kl_wmi_sync_point(struct wmi *wmi);
+
+static const s32 wmi_rate_tbl[][2] = {
+ /* {W/O SGI, with SGI} */
+ {1000, 1000},
+ {2000, 2000},
+ {5500, 5500},
+ {11000, 11000},
+ {6000, 6000},
+ {9000, 9000},
+ {12000, 12000},
+ {18000, 18000},
+ {24000, 24000},
+ {36000, 36000},
+ {48000, 48000},
+ {54000, 54000},
+ {6500, 7200},
+ {13000, 14400},
+ {19500, 21700},
+ {26000, 28900},
+ {39000, 43300},
+ {52000, 57800},
+ {58500, 65000},
+ {65000, 72200},
+ {13500, 15000},
+ {27000, 30000},
+ {40500, 45000},
+ {54000, 60000},
+ {81000, 90000},
+ {108000, 120000},
+ {121500, 135000},
+ {135000, 150000},
+ {0, 0}
+};
+
+/* 802.1d to AC mapping. Refer pg 57 of WMM-test-plan-v1.2 */
+static const u8 up_to_ac[] = {
+ WMM_AC_BE,
+ WMM_AC_BK,
+ WMM_AC_BK,
+ WMM_AC_BE,
+ WMM_AC_VI,
+ WMM_AC_VI,
+ WMM_AC_VO,
+ WMM_AC_VO,
+};
+
+void ath6kl_wmi_set_control_ep(struct wmi *wmi, enum htc_endpoint_id ep_id)
+{
+ if (WARN_ON(ep_id == ENDPOINT_UNUSED || ep_id >= ENDPOINT_MAX))
+ return;
+
+ wmi->ep_id = ep_id;
+}
+
+enum htc_endpoint_id ath6kl_wmi_get_control_ep(struct wmi *wmi)
+{
+ return wmi->ep_id;
+}
+
+/* Performs DIX to 802.3 encapsulation for transmit packets.
+ * Assumes the entire DIX header is contigous and that there is
+ * enough room in the buffer for a 802.3 mac header and LLC+SNAP headers.
+ */
+int ath6kl_wmi_dix_2_dot3(struct wmi *wmi, struct sk_buff *skb)
+{
+ struct ath6kl_llc_snap_hdr *llc_hdr;
+ struct ethhdr *eth_hdr;
+ size_t new_len;
+ __be16 type;
+ u8 *datap;
+ u16 size;
+
+ if (WARN_ON(skb == NULL))
+ return -EINVAL;
+
+ size = sizeof(struct ath6kl_llc_snap_hdr) + sizeof(struct wmi_data_hdr);
+ if (skb_headroom(skb) < size)
+ return -ENOMEM;
+
+ eth_hdr = (struct ethhdr *) skb->data;
+ type = eth_hdr->h_proto;
+
+ if (!is_ethertype(be16_to_cpu(type))) {
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "%s: pkt is already in 802.3 format\n", __func__);
+ return 0;
+ }
+
+ new_len = skb->len - sizeof(*eth_hdr) + sizeof(*llc_hdr);
+
+ skb_push(skb, sizeof(struct ath6kl_llc_snap_hdr));
+ datap = skb->data;
+
+ eth_hdr->h_proto = cpu_to_be16(new_len);
+
+ memcpy(datap, eth_hdr, sizeof(*eth_hdr));
+
+ llc_hdr = (struct ath6kl_llc_snap_hdr *)(datap + sizeof(*eth_hdr));
+ llc_hdr->dsap = 0xAA;
+ llc_hdr->ssap = 0xAA;
+ llc_hdr->cntl = 0x03;
+ llc_hdr->org_code[0] = 0x0;
+ llc_hdr->org_code[1] = 0x0;
+ llc_hdr->org_code[2] = 0x0;
+ llc_hdr->eth_type = type;
+
+ return 0;
+}
+
+static int ath6kl_wmi_meta_add(struct wmi *wmi, struct sk_buff *skb,
+ u8 *version, void *tx_meta_info)
+{
+ struct wmi_tx_meta_v1 *v1;
+ struct wmi_tx_meta_v2 *v2;
+
+ if (WARN_ON(skb == NULL || version == NULL))
+ return -EINVAL;
+
+ switch (*version) {
+ case WMI_META_VERSION_1:
+ skb_push(skb, WMI_MAX_TX_META_SZ);
+ v1 = (struct wmi_tx_meta_v1 *) skb->data;
+ v1->pkt_id = 0;
+ v1->rate_plcy_id = 0;
+ *version = WMI_META_VERSION_1;
+ break;
+ case WMI_META_VERSION_2:
+ skb_push(skb, WMI_MAX_TX_META_SZ);
+ v2 = (struct wmi_tx_meta_v2 *) skb->data;
+ memcpy(v2, (struct wmi_tx_meta_v2 *) tx_meta_info,
+ sizeof(struct wmi_tx_meta_v2));
+ break;
+ }
+
+ return 0;
+}
+
+int ath6kl_wmi_data_hdr_add(struct wmi *wmi, struct sk_buff *skb,
+ u8 msg_type, bool more_data,
+ enum wmi_data_hdr_data_type data_type,
+ u8 meta_ver, void *tx_meta_info)
+{
+ struct wmi_data_hdr *data_hdr;
+ int ret;
+
+ if (WARN_ON(skb == NULL))
+ return -EINVAL;
+
+ ret = ath6kl_wmi_meta_add(wmi, skb, &meta_ver, tx_meta_info);
+ if (ret)
+ return ret;
+
+ skb_push(skb, sizeof(struct wmi_data_hdr));
+
+ data_hdr = (struct wmi_data_hdr *)skb->data;
+ memset(data_hdr, 0, sizeof(struct wmi_data_hdr));
+
+ data_hdr->info = msg_type << WMI_DATA_HDR_MSG_TYPE_SHIFT;
+ data_hdr->info |= data_type << WMI_DATA_HDR_DATA_TYPE_SHIFT;
+
+ if (more_data)
+ data_hdr->info |=
+ WMI_DATA_HDR_MORE_MASK << WMI_DATA_HDR_MORE_SHIFT;
+
+ data_hdr->info2 = cpu_to_le16(meta_ver << WMI_DATA_HDR_META_SHIFT);
+ data_hdr->info3 = 0;
+
+ return 0;
+}
+
+static u8 ath6kl_wmi_determine_user_priority(u8 *pkt, u32 layer2_pri)
+{
+ struct iphdr *ip_hdr = (struct iphdr *) pkt;
+ u8 ip_pri;
+
+ /*
+ * Determine IPTOS priority
+ *
+ * IP-TOS - 8bits
+ * : DSCP(6-bits) ECN(2-bits)
+ * : DSCP - P2 P1 P0 X X X
+ * where (P2 P1 P0) form 802.1D
+ */
+ ip_pri = ip_hdr->tos >> 5;
+ ip_pri &= 0x7;
+
+ if ((layer2_pri & 0x7) > ip_pri)
+ return (u8) layer2_pri & 0x7;
+ else
+ return ip_pri;
+}
+
+int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, struct sk_buff *skb,
+ u32 layer2_priority, bool wmm_enabled,
+ u8 *ac)
+{
+ struct wmi_data_hdr *data_hdr;
+ struct ath6kl_llc_snap_hdr *llc_hdr;
+ struct wmi_create_pstream_cmd cmd;
+ u32 meta_size, hdr_size;
+ u16 ip_type = IP_ETHERTYPE;
+ u8 stream_exist, usr_pri;
+ u8 traffic_class = WMM_AC_BE;
+ u8 *datap;
+
+ if (WARN_ON(skb == NULL))
+ return -EINVAL;
+
+ datap = skb->data;
+ data_hdr = (struct wmi_data_hdr *) datap;
+
+ meta_size = ((le16_to_cpu(data_hdr->info2) >> WMI_DATA_HDR_META_SHIFT) &
+ WMI_DATA_HDR_META_MASK) ? WMI_MAX_TX_META_SZ : 0;
+
+ if (!wmm_enabled) {
+ /* If WMM is disabled all traffic goes as BE traffic */
+ usr_pri = 0;
+ } else {
+ hdr_size = sizeof(struct ethhdr);
+
+ llc_hdr = (struct ath6kl_llc_snap_hdr *)(datap +
+ sizeof(struct
+ wmi_data_hdr) +
+ meta_size + hdr_size);
+
+ if (llc_hdr->eth_type == htons(ip_type)) {
+ /*
+ * Extract the endpoint info from the TOS field
+ * in the IP header.
+ */
+ usr_pri =
+ ath6kl_wmi_determine_user_priority(((u8 *) llc_hdr) +
+ sizeof(struct ath6kl_llc_snap_hdr),
+ layer2_priority);
+ } else
+ usr_pri = layer2_priority & 0x7;
+ }
+
+ /* workaround for WMM S5 */
+ if ((wmi->traffic_class == WMM_AC_VI) &&
+ ((usr_pri == 5) || (usr_pri == 4)))
+ usr_pri = 1;
+
+ /* Convert user priority to traffic class */
+ traffic_class = up_to_ac[usr_pri & 0x7];
+
+ wmi_data_hdr_set_up(data_hdr, usr_pri);
+
+ spin_lock_bh(&wmi->lock);
+ stream_exist = wmi->fat_pipe_exist;
+ spin_unlock_bh(&wmi->lock);
+
+ if (!(stream_exist & (1 << traffic_class))) {
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.traffic_class = traffic_class;
+ cmd.user_pri = usr_pri;
+ cmd.inactivity_int =
+ cpu_to_le32(WMI_IMPLICIT_PSTREAM_INACTIVITY_INT);
+ /* Implicit streams are created with TSID 0xFF */
+ cmd.tsid = WMI_IMPLICIT_PSTREAM;
+ ath6kl_wmi_create_pstream_cmd(wmi, &cmd);
+ }
+
+ *ac = traffic_class;
+
+ return 0;
+}
+
+int ath6kl_wmi_dot11_hdr_remove(struct wmi *wmi, struct sk_buff *skb)
+{
+ struct ieee80211_hdr_3addr *pwh, wh;
+ struct ath6kl_llc_snap_hdr *llc_hdr;
+ struct ethhdr eth_hdr;
+ u32 hdr_size;
+ u8 *datap;
+ __le16 sub_type;
+
+ if (WARN_ON(skb == NULL))
+ return -EINVAL;
+
+ datap = skb->data;
+ pwh = (struct ieee80211_hdr_3addr *) datap;
+
+ sub_type = pwh->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE);
+
+ memcpy((u8 *) &wh, datap, sizeof(struct ieee80211_hdr_3addr));
+
+ /* Strip off the 802.11 header */
+ if (sub_type == cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
+ hdr_size = roundup(sizeof(struct ieee80211_qos_hdr),
+ sizeof(u32));
+ skb_pull(skb, hdr_size);
+ } else if (sub_type == cpu_to_le16(IEEE80211_STYPE_DATA))
+ skb_pull(skb, sizeof(struct ieee80211_hdr_3addr));
+
+ datap = skb->data;
+ llc_hdr = (struct ath6kl_llc_snap_hdr *)(datap);
+
+ eth_hdr.h_proto = llc_hdr->eth_type;
+ memset(eth_hdr.h_dest, 0, sizeof(eth_hdr.h_dest));
+ memset(eth_hdr.h_source, 0, sizeof(eth_hdr.h_source));
+
+ switch ((le16_to_cpu(wh.frame_control)) &
+ (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) {
+ case 0:
+ memcpy(eth_hdr.h_dest, wh.addr1, ETH_ALEN);
+ memcpy(eth_hdr.h_source, wh.addr2, ETH_ALEN);
+ break;
+ case IEEE80211_FCTL_TODS:
+ memcpy(eth_hdr.h_dest, wh.addr3, ETH_ALEN);
+ memcpy(eth_hdr.h_source, wh.addr2, ETH_ALEN);
+ break;
+ case IEEE80211_FCTL_FROMDS:
+ memcpy(eth_hdr.h_dest, wh.addr1, ETH_ALEN);
+ memcpy(eth_hdr.h_source, wh.addr3, ETH_ALEN);
+ break;
+ case IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS:
+ break;
+ }
+
+ skb_pull(skb, sizeof(struct ath6kl_llc_snap_hdr));
+ skb_push(skb, sizeof(eth_hdr));
+
+ datap = skb->data;
+
+ memcpy(datap, &eth_hdr, sizeof(eth_hdr));
+
+ return 0;
+}
+
+/*
+ * Performs 802.3 to DIX encapsulation for received packets.
+ * Assumes the entire 802.3 header is contigous.
+ */
+int ath6kl_wmi_dot3_2_dix(struct sk_buff *skb)
+{
+ struct ath6kl_llc_snap_hdr *llc_hdr;
+ struct ethhdr eth_hdr;
+ u8 *datap;
+
+ if (WARN_ON(skb == NULL))
+ return -EINVAL;
+
+ datap = skb->data;
+
+ memcpy(&eth_hdr, datap, sizeof(eth_hdr));
+
+ llc_hdr = (struct ath6kl_llc_snap_hdr *) (datap + sizeof(eth_hdr));
+ eth_hdr.h_proto = llc_hdr->eth_type;
+
+ skb_pull(skb, sizeof(struct ath6kl_llc_snap_hdr));
+ datap = skb->data;
+
+ memcpy(datap, &eth_hdr, sizeof(eth_hdr));
+
+ return 0;
+}
+
+int ath6kl_wmi_data_hdr_remove(struct wmi *wmi, struct sk_buff *skb)
+{
+ if (WARN_ON(skb == NULL))
+ return -EINVAL;
+
+ skb_pull(skb, sizeof(struct wmi_data_hdr));
+
+ return 0;
+}
+
+void ath6kl_wmi_iterate_nodes(struct wmi *wmi,
+ void (*f) (void *arg, struct bss *),
+ void *arg)
+{
+ wlan_iterate_nodes(&wmi->scan_table, f, arg);
+}
+
+static void ath6kl_wmi_convert_bssinfo_hdr2_to_hdr(struct sk_buff *skb,
+ u8 *datap)
+{
+ struct wmi_bss_info_hdr2 bih2;
+ struct wmi_bss_info_hdr *bih;
+
+ memcpy(&bih2, datap, sizeof(struct wmi_bss_info_hdr2));
+
+ skb_push(skb, 4);
+ bih = (struct wmi_bss_info_hdr *) skb->data;
+
+ bih->ch = bih2.ch;
+ bih->frame_type = bih2.frame_type;
+ bih->snr = bih2.snr;
+ bih->rssi = a_cpu_to_sle16(bih2.snr - 95);
+ bih->ie_mask = cpu_to_le32(le16_to_cpu(bih2.ie_mask));
+ memcpy(bih->bssid, bih2.bssid, ETH_ALEN);
+}
+
+static int ath6kl_wmi_tx_complete_event_rx(u8 *datap, int len)
+{
+ struct tx_complete_msg_v1 *msg_v1;
+ struct wmi_tx_complete_event *evt;
+ int index;
+ u16 size;
+
+ evt = (struct wmi_tx_complete_event *) datap;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI, "comp: %d %d %d\n",
+ evt->num_msg, evt->msg_len, evt->msg_type);
+
+ if (!AR_DBG_LVL_CHECK(ATH6KL_DBG_WMI))
+ return 0;
+
+ for (index = 0; index < evt->num_msg; index++) {
+ size = sizeof(struct wmi_tx_complete_event) +
+ (index * sizeof(struct tx_complete_msg_v1));
+ msg_v1 = (struct tx_complete_msg_v1 *)(datap + size);
+
+ ath6kl_dbg(ATH6KL_DBG_WMI, "msg: %d %d %d %d\n",
+ msg_v1->status, msg_v1->pkt_id,
+ msg_v1->rate_idx, msg_v1->ack_failures);
+ }
+
+ return 0;
+}
+
+static inline struct sk_buff *ath6kl_wmi_get_new_buf(u32 size)
+{
+ struct sk_buff *skb;
+
+ skb = ath6kl_buf_alloc(size);
+ if (!skb)
+ return NULL;
+
+ skb_put(skb, size);
+ if (size)
+ memset(skb->data, 0, size);
+
+ return skb;
+}
+
+/* Send a "simple" wmi command -- one with no arguments */
+static int ath6kl_wmi_simple_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id)
+{
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(0);
+ if (!skb)
+ return -ENOMEM;
+
+ ret = ath6kl_wmi_cmd_send(wmi, skb, cmd_id, NO_SYNC_WMIFLAG);
+
+ return ret;
+}
+
+static int ath6kl_wmi_ready_event_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ struct wmi_ready_event_2 *ev = (struct wmi_ready_event_2 *) datap;
+
+ if (len < sizeof(struct wmi_ready_event_2))
+ return -EINVAL;
+
+ wmi->ready = true;
+ ath6kl_ready_event(wmi->parent_dev, ev->mac_addr,
+ le32_to_cpu(ev->sw_version),
+ le32_to_cpu(ev->abi_version));
+
+ return 0;
+}
+
+static int ath6kl_wmi_connect_event_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ struct wmi_connect_event *ev;
+ u8 *pie, *peie;
+
+ if (len < sizeof(struct wmi_connect_event))
+ return -EINVAL;
+
+ ev = (struct wmi_connect_event *) datap;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI, "%s: freq %d bssid %pM\n",
+ __func__, ev->ch, ev->bssid);
+
+ memcpy(wmi->bssid, ev->bssid, ETH_ALEN);
+
+ /* Start of assoc rsp IEs */
+ pie = ev->assoc_info + ev->beacon_ie_len +
+ ev->assoc_req_len + (sizeof(u16) * 3); /* capinfo, status, aid */
+
+ /* End of assoc rsp IEs */
+ peie = ev->assoc_info + ev->beacon_ie_len + ev->assoc_req_len +
+ ev->assoc_resp_len;
+
+ while (pie < peie) {
+ switch (*pie) {
+ case WLAN_EID_VENDOR_SPECIFIC:
+ if (pie[1] > 3 && pie[2] == 0x00 && pie[3] == 0x50 &&
+ pie[4] == 0xf2 && pie[5] == WMM_OUI_TYPE) {
+ /* WMM OUT (00:50:F2) */
+ if (pie[1] > 5
+ && pie[6] == WMM_PARAM_OUI_SUBTYPE)
+ wmi->is_wmm_enabled = true;
+ }
+ break;
+ }
+
+ if (wmi->is_wmm_enabled)
+ break;
+
+ pie += pie[1] + 2;
+ }
+
+ ath6kl_connect_event(wmi->parent_dev, le16_to_cpu(ev->ch), ev->bssid,
+ le16_to_cpu(ev->listen_intvl),
+ le16_to_cpu(ev->beacon_intvl),
+ le32_to_cpu(ev->nw_type),
+ ev->beacon_ie_len, ev->assoc_req_len,
+ ev->assoc_resp_len, ev->assoc_info);
+
+ return 0;
+}
+
+static int ath6kl_wmi_disconnect_event_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ struct wmi_disconnect_event *ev;
+ wmi->traffic_class = 100;
+
+ if (len < sizeof(struct wmi_disconnect_event))
+ return -EINVAL;
+
+ ev = (struct wmi_disconnect_event *) datap;
+ memset(wmi->bssid, 0, sizeof(wmi->bssid));
+
+ wmi->is_wmm_enabled = false;
+ wmi->pair_crypto_type = NONE_CRYPT;
+ wmi->grp_crypto_type = NONE_CRYPT;
+
+ ath6kl_disconnect_event(wmi->parent_dev, ev->disconn_reason,
+ ev->bssid, ev->assoc_resp_len, ev->assoc_info,
+ le16_to_cpu(ev->proto_reason_status));
+
+ return 0;
+}
+
+static int ath6kl_wmi_peer_node_event_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ struct wmi_peer_node_event *ev;
+
+ if (len < sizeof(struct wmi_peer_node_event))
+ return -EINVAL;
+
+ ev = (struct wmi_peer_node_event *) datap;
+
+ if (ev->event_code == PEER_NODE_JOIN_EVENT)
+ ath6kl_dbg(ATH6KL_DBG_WMI, "joined node with mac addr: %pM\n",
+ ev->peer_mac_addr);
+ else if (ev->event_code == PEER_NODE_LEAVE_EVENT)
+ ath6kl_dbg(ATH6KL_DBG_WMI, "left node with mac addr: %pM\n",
+ ev->peer_mac_addr);
+
+ return 0;
+}
+
+static int ath6kl_wmi_tkip_micerr_event_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ struct wmi_tkip_micerr_event *ev;
+
+ if (len < sizeof(struct wmi_tkip_micerr_event))
+ return -EINVAL;
+
+ ev = (struct wmi_tkip_micerr_event *) datap;
+
+ ath6kl_tkip_micerr_event(wmi->parent_dev, ev->key_id, ev->is_mcast);
+
+ return 0;
+}
+
+static int ath6kl_wlan_parse_beacon(u8 *buf, int frame_len,
+ struct ath6kl_common_ie *cie)
+{
+ u8 *frm, *efrm;
+ u8 elemid_ssid = false;
+
+ frm = buf;
+ efrm = (u8 *) (frm + frame_len);
+
+ /*
+ * beacon/probe response frame format
+ * [8] time stamp
+ * [2] beacon interval
+ * [2] capability information
+ * [tlv] ssid
+ * [tlv] supported rates
+ * [tlv] country information
+ * [tlv] parameter set (FH/DS)
+ * [tlv] erp information
+ * [tlv] extended supported rates
+ * [tlv] WMM
+ * [tlv] WPA or RSN
+ * [tlv] Atheros Advanced Capabilities
+ */
+ if ((efrm - frm) < 12)
+ return -EINVAL;
+
+ memset(cie, 0, sizeof(*cie));
+
+ cie->ie_tstamp = frm;
+ frm += 8;
+ cie->ie_beaconInt = *(u16 *) frm;
+ frm += 2;
+ cie->ie_capInfo = *(u16 *) frm;
+ frm += 2;
+ cie->ie_chan = 0;
+
+ while (frm < efrm) {
+ switch (*frm) {
+ case WLAN_EID_SSID:
+ if (!elemid_ssid) {
+ cie->ie_ssid = frm;
+ elemid_ssid = true;
+ }
+ break;
+ case WLAN_EID_SUPP_RATES:
+ cie->ie_rates = frm;
+ break;
+ case WLAN_EID_COUNTRY:
+ cie->ie_country = frm;
+ break;
+ case WLAN_EID_FH_PARAMS:
+ break;
+ case WLAN_EID_DS_PARAMS:
+ cie->ie_chan = frm[2];
+ break;
+ case WLAN_EID_TIM:
+ cie->ie_tim = frm;
+ break;
+ case WLAN_EID_IBSS_PARAMS:
+ break;
+ case WLAN_EID_EXT_SUPP_RATES:
+ cie->ie_xrates = frm;
+ break;
+ case WLAN_EID_ERP_INFO:
+ if (frm[1] != 1)
+ return -EINVAL;
+
+ cie->ie_erp = frm[2];
+ break;
+ case WLAN_EID_RSN:
+ cie->ie_rsn = frm;
+ break;
+ case WLAN_EID_HT_CAPABILITY:
+ cie->ie_htcap = frm;
+ break;
+ case WLAN_EID_HT_INFORMATION:
+ cie->ie_htop = frm;
+ break;
+ case WLAN_EID_VENDOR_SPECIFIC:
+ if (frm[1] > 3 && frm[2] == 0x00 && frm[3] == 0x50 &&
+ frm[4] == 0xf2) {
+ /* OUT Type (00:50:F2) */
+
+ if (frm[5] == WPA_OUI_TYPE) {
+ /* WPA OUT */
+ cie->ie_wpa = frm;
+ } else if (frm[5] == WMM_OUI_TYPE) {
+ /* WMM OUT */
+ cie->ie_wmm = frm;
+ } else if (frm[5] == WSC_OUT_TYPE) {
+ /* WSC OUT */
+ cie->ie_wsc = frm;
+ }
+
+ } else if (frm[1] > 3 && frm[2] == 0x00
+ && frm[3] == 0x03 && frm[4] == 0x7f
+ && frm[5] == ATH_OUI_TYPE) {
+ /* Atheros OUI (00:03:7f) */
+ cie->ie_ath = frm;
+ }
+ break;
+ default:
+ break;
+ }
+ frm += frm[1] + 2;
+ }
+
+ if ((cie->ie_rates == NULL)
+ || (cie->ie_rates[1] > ATH6KL_RATE_MAXSIZE))
+ return -EINVAL;
+
+ if ((cie->ie_ssid == NULL)
+ || (cie->ie_ssid[1] > IEEE80211_MAX_SSID_LEN))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ struct bss *bss = NULL;
+ struct wmi_bss_info_hdr *bih;
+ u8 cached_ssid_len = 0;
+ u8 cached_ssid[IEEE80211_MAX_SSID_LEN] = { 0 };
+ u8 beacon_ssid_len = 0;
+ u8 *buf, *ie_ssid;
+ u8 *ni_buf;
+ int buf_len;
+
+ int ret;
+
+ if (len <= sizeof(struct wmi_bss_info_hdr))
+ return -EINVAL;
+
+ bih = (struct wmi_bss_info_hdr *) datap;
+ bss = wlan_find_node(&wmi->scan_table, bih->bssid);
+
+ if (a_sle16_to_cpu(bih->rssi) > 0) {
+ if (bss == NULL)
+ return 0;
+ else
+ bih->rssi = a_cpu_to_sle16(bss->ni_rssi);
+ }
+
+ buf = datap + sizeof(struct wmi_bss_info_hdr);
+ len -= sizeof(struct wmi_bss_info_hdr);
+
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "bss info evt - ch %u, rssi %02x, bssid \"%pM\"\n",
+ bih->ch, a_sle16_to_cpu(bih->rssi), bih->bssid);
+
+ if (bss != NULL) {
+ /*
+ * Free up the node. We are about to allocate a new node.
+ * In case of hidden AP, beacon will not have ssid,
+ * but a directed probe response will have it,
+ * so cache the probe-resp-ssid if already present.
+ */
+ if (wmi->is_probe_ssid && (bih->frame_type == BEACON_FTYPE)) {
+ ie_ssid = bss->ni_cie.ie_ssid;
+ if (ie_ssid && (ie_ssid[1] <= IEEE80211_MAX_SSID_LEN) &&
+ (ie_ssid[2] != 0)) {
+ cached_ssid_len = ie_ssid[1];
+ memcpy(cached_ssid, ie_ssid + 2,
+ cached_ssid_len);
+ }
+ }
+
+ /*
+ * Use the current average rssi of associated AP base on
+ * assumption
+ * 1. Most os with GUI will update RSSI by
+ * ath6kl_wmi_get_stats_cmd() periodically.
+ * 2. ath6kl_wmi_get_stats_cmd(..) will be called when calling
+ * ath6kl_wmi_startscan_cmd(...)
+ * The average value of RSSI give end-user better feeling for
+ * instance value of scan result. It also sync up RSSI info
+ * in GUI between scan result and RSSI signal icon.
+ */
+ if (memcmp(wmi->bssid, bih->bssid, ETH_ALEN) == 0) {
+ bih->rssi = a_cpu_to_sle16(bss->ni_rssi);
+ bih->snr = bss->ni_snr;
+ }
+
+ wlan_node_reclaim(&wmi->scan_table, bss);
+ }
+
+ /*
+ * beacon/probe response frame format
+ * [8] time stamp
+ * [2] beacon interval
+ * [2] capability information
+ * [tlv] ssid
+ */
+ beacon_ssid_len = buf[SSID_IE_LEN_INDEX];
+
+ /*
+ * If ssid is cached for this hidden AP, then change
+ * buffer len accordingly.
+ */
+ if (wmi->is_probe_ssid && (bih->frame_type == BEACON_FTYPE) &&
+ (cached_ssid_len != 0) &&
+ (beacon_ssid_len == 0 || (cached_ssid_len > beacon_ssid_len &&
+ buf[SSID_IE_LEN_INDEX + 1] == 0))) {
+
+ len += (cached_ssid_len - beacon_ssid_len);
+ }
+
+ bss = wlan_node_alloc(len);
+ if (!bss)
+ return -ENOMEM;
+
+ bss->ni_snr = bih->snr;
+ bss->ni_rssi = a_sle16_to_cpu(bih->rssi);
+
+ if (WARN_ON(!bss->ni_buf))
+ return -EINVAL;
+
+ /*
+ * In case of hidden AP, beacon will not have ssid,
+ * but a directed probe response will have it,
+ * so place the cached-ssid(probe-resp) in the bss info.
+ */
+ if (wmi->is_probe_ssid && (bih->frame_type == BEACON_FTYPE) &&
+ (cached_ssid_len != 0) &&
+ (beacon_ssid_len == 0 || (beacon_ssid_len &&
+ buf[SSID_IE_LEN_INDEX + 1] == 0))) {
+ ni_buf = bss->ni_buf;
+ buf_len = len;
+
+ /*
+ * Copy the first 14 bytes:
+ * time-stamp(8), beacon-interval(2),
+ * cap-info(2), ssid-id(1), ssid-len(1).
+ */
+ memcpy(ni_buf, buf, SSID_IE_LEN_INDEX + 1);
+
+ ni_buf[SSID_IE_LEN_INDEX] = cached_ssid_len;
+ ni_buf += (SSID_IE_LEN_INDEX + 1);
+
+ buf += (SSID_IE_LEN_INDEX + 1);
+ buf_len -= (SSID_IE_LEN_INDEX + 1);
+
+ memcpy(ni_buf, cached_ssid, cached_ssid_len);
+ ni_buf += cached_ssid_len;
+
+ buf += beacon_ssid_len;
+ buf_len -= beacon_ssid_len;
+
+ if (cached_ssid_len > beacon_ssid_len)
+ buf_len -= (cached_ssid_len - beacon_ssid_len);
+
+ memcpy(ni_buf, buf, buf_len);
+ } else
+ memcpy(bss->ni_buf, buf, len);
+
+ bss->ni_framelen = len;
+
+ ret = ath6kl_wlan_parse_beacon(bss->ni_buf, len, &bss->ni_cie);
+ if (ret) {
+ wlan_node_free(bss);
+ return -EINVAL;
+ }
+
+ /*
+ * Update the frequency in ie_chan, overwriting of channel number
+ * which is done in ath6kl_wlan_parse_beacon
+ */
+ bss->ni_cie.ie_chan = le16_to_cpu(bih->ch);
+ wlan_setup_node(&wmi->scan_table, bss, bih->bssid);
+
+ return 0;
+}
+
+static int ath6kl_wmi_opt_frame_event_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ struct bss *bss;
+ struct wmi_opt_rx_info_hdr *bih;
+ u8 *buf;
+
+ if (len <= sizeof(struct wmi_opt_rx_info_hdr))
+ return -EINVAL;
+
+ bih = (struct wmi_opt_rx_info_hdr *) datap;
+ buf = datap + sizeof(struct wmi_opt_rx_info_hdr);
+ len -= sizeof(struct wmi_opt_rx_info_hdr);
+
+ ath6kl_dbg(ATH6KL_DBG_WMI, "opt frame event %2.2x:%2.2x\n",
+ bih->bssid[4], bih->bssid[5]);
+
+ bss = wlan_find_node(&wmi->scan_table, bih->bssid);
+ if (bss != NULL) {
+ /* Free up the node. We are about to allocate a new node. */
+ wlan_node_reclaim(&wmi->scan_table, bss);
+ }
+
+ bss = wlan_node_alloc(len);
+ if (!bss)
+ return -ENOMEM;
+
+ bss->ni_snr = bih->snr;
+ bss->ni_cie.ie_chan = le16_to_cpu(bih->ch);
+
+ if (WARN_ON(!bss->ni_buf))
+ return -EINVAL;
+
+ memcpy(bss->ni_buf, buf, len);
+ wlan_setup_node(&wmi->scan_table, bss, bih->bssid);
+
+ return 0;
+}
+
+/* Inactivity timeout of a fatpipe(pstream) at the target */
+static int ath6kl_wmi_pstream_timeout_event_rx(struct wmi *wmi, u8 *datap,
+ int len)
+{
+ struct wmi_pstream_timeout_event *ev;
+
+ if (len < sizeof(struct wmi_pstream_timeout_event))
+ return -EINVAL;
+
+ ev = (struct wmi_pstream_timeout_event *) datap;
+
+ /*
+ * When the pstream (fat pipe == AC) timesout, it means there were
+ * no thinStreams within this pstream & it got implicitly created
+ * due to data flow on this AC. We start the inactivity timer only
+ * for implicitly created pstream. Just reset the host state.
+ */
+ spin_lock_bh(&wmi->lock);
+ wmi->stream_exist_for_ac[ev->traffic_class] = 0;
+ wmi->fat_pipe_exist &= ~(1 << ev->traffic_class);
+ spin_unlock_bh(&wmi->lock);
+
+ /* Indicate inactivity to driver layer for this fatpipe (pstream) */
+ ath6kl_indicate_tx_activity(wmi->parent_dev, ev->traffic_class, false);
+
+ return 0;
+}
+
+static int ath6kl_wmi_bitrate_reply_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ struct wmi_bit_rate_reply *reply;
+ s32 rate;
+ u32 sgi, index;
+
+ if (len < sizeof(struct wmi_bit_rate_reply))
+ return -EINVAL;
+
+ reply = (struct wmi_bit_rate_reply *) datap;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI, "rateindex %d\n", reply->rate_index);
+
+ if (reply->rate_index == (s8) RATE_AUTO) {
+ rate = RATE_AUTO;
+ } else {
+ index = reply->rate_index & 0x7f;
+ sgi = (reply->rate_index & 0x80) ? 1 : 0;
+ rate = wmi_rate_tbl[index][sgi];
+ }
+
+ ath6kl_wakeup_event(wmi->parent_dev);
+
+ return 0;
+}
+
+static int ath6kl_wmi_ratemask_reply_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ if (len < sizeof(struct wmi_fix_rates_reply))
+ return -EINVAL;
+
+ ath6kl_wakeup_event(wmi->parent_dev);
+
+ return 0;
+}
+
+static int ath6kl_wmi_ch_list_reply_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ if (len < sizeof(struct wmi_channel_list_reply))
+ return -EINVAL;
+
+ ath6kl_wakeup_event(wmi->parent_dev);
+
+ return 0;
+}
+
+static int ath6kl_wmi_tx_pwr_reply_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ struct wmi_tx_pwr_reply *reply;
+
+ if (len < sizeof(struct wmi_tx_pwr_reply))
+ return -EINVAL;
+
+ reply = (struct wmi_tx_pwr_reply *) datap;
+ ath6kl_txpwr_rx_evt(wmi->parent_dev, reply->dbM);
+
+ return 0;
+}
+
+static int ath6kl_wmi_keepalive_reply_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ if (len < sizeof(struct wmi_get_keepalive_cmd))
+ return -EINVAL;
+
+ ath6kl_wakeup_event(wmi->parent_dev);
+
+ return 0;
+}
+
+static int ath6kl_wmi_scan_complete_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ struct wmi_scan_complete_event *ev;
+
+ ev = (struct wmi_scan_complete_event *) datap;
+
+ if (a_sle32_to_cpu(ev->status) == 0)
+ wlan_refresh_inactive_nodes(&wmi->scan_table);
+
+ ath6kl_scan_complete_evt(wmi->parent_dev, a_sle32_to_cpu(ev->status));
+ wmi->is_probe_ssid = false;
+
+ return 0;
+}
+
+/*
+ * Target is reporting a programming error. This is for
+ * developer aid only. Target only checks a few common violations
+ * and it is responsibility of host to do all error checking.
+ * Behavior of target after wmi error event is undefined.
+ * A reset is recommended.
+ */
+static int ath6kl_wmi_error_event_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ const char *type = "unknown error";
+ struct wmi_cmd_error_event *ev;
+ ev = (struct wmi_cmd_error_event *) datap;
+
+ switch (ev->err_code) {
+ case INVALID_PARAM:
+ type = "invalid parameter";
+ break;
+ case ILLEGAL_STATE:
+ type = "invalid state";
+ break;
+ case INTERNAL_ERROR:
+ type = "internal error";
+ break;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_WMI, "programming error, cmd=%d %s\n",
+ ev->cmd_id, type);
+
+ return 0;
+}
+
+static int ath6kl_wmi_stats_event_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ ath6kl_tgt_stats_event(wmi->parent_dev, datap, len);
+
+ return 0;
+}
+
+static u8 ath6kl_wmi_get_upper_threshold(s16 rssi,
+ struct sq_threshold_params *sq_thresh,
+ u32 size)
+{
+ u32 index;
+ u8 threshold = (u8) sq_thresh->upper_threshold[size - 1];
+
+ /* The list is already in sorted order. Get the next lower value */
+ for (index = 0; index < size; index++) {
+ if (rssi < sq_thresh->upper_threshold[index]) {
+ threshold = (u8) sq_thresh->upper_threshold[index];
+ break;
+ }
+ }
+
+ return threshold;
+}
+
+static u8 ath6kl_wmi_get_lower_threshold(s16 rssi,
+ struct sq_threshold_params *sq_thresh,
+ u32 size)
+{
+ u32 index;
+ u8 threshold = (u8) sq_thresh->lower_threshold[size - 1];
+
+ /* The list is already in sorted order. Get the next lower value */
+ for (index = 0; index < size; index++) {
+ if (rssi > sq_thresh->lower_threshold[index]) {
+ threshold = (u8) sq_thresh->lower_threshold[index];
+ break;
+ }
+ }
+
+ return threshold;
+}
+
+static int ath6kl_wmi_send_rssi_threshold_params(struct wmi *wmi,
+ struct wmi_rssi_threshold_params_cmd *rssi_cmd)
+{
+ struct sk_buff *skb;
+ struct wmi_rssi_threshold_params_cmd *cmd;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_rssi_threshold_params_cmd *) skb->data;
+ memcpy(cmd, rssi_cmd, sizeof(struct wmi_rssi_threshold_params_cmd));
+
+ return ath6kl_wmi_cmd_send(wmi, skb, WMI_RSSI_THRESHOLD_PARAMS_CMDID,
+ NO_SYNC_WMIFLAG);
+}
+
+static int ath6kl_wmi_rssi_threshold_event_rx(struct wmi *wmi, u8 *datap,
+ int len)
+{
+ struct wmi_rssi_threshold_event *reply;
+ struct wmi_rssi_threshold_params_cmd cmd;
+ struct sq_threshold_params *sq_thresh;
+ enum wmi_rssi_threshold_val new_threshold;
+ u8 upper_rssi_threshold, lower_rssi_threshold;
+ s16 rssi;
+ int ret;
+
+ if (len < sizeof(struct wmi_rssi_threshold_event))
+ return -EINVAL;
+
+ reply = (struct wmi_rssi_threshold_event *) datap;
+ new_threshold = (enum wmi_rssi_threshold_val) reply->range;
+ rssi = a_sle16_to_cpu(reply->rssi);
+
+ sq_thresh = &wmi->sq_threshld[SIGNAL_QUALITY_METRICS_RSSI];
+
+ /*
+ * Identify the threshold breached and communicate that to the app.
+ * After that install a new set of thresholds based on the signal
+ * quality reported by the target
+ */
+ if (new_threshold) {
+ /* Upper threshold breached */
+ if (rssi < sq_thresh->upper_threshold[0]) {
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "spurious upper rssi threshold event: %d\n",
+ rssi);
+ } else if ((rssi < sq_thresh->upper_threshold[1]) &&
+ (rssi >= sq_thresh->upper_threshold[0])) {
+ new_threshold = WMI_RSSI_THRESHOLD1_ABOVE;
+ } else if ((rssi < sq_thresh->upper_threshold[2]) &&
+ (rssi >= sq_thresh->upper_threshold[1])) {
+ new_threshold = WMI_RSSI_THRESHOLD2_ABOVE;
+ } else if ((rssi < sq_thresh->upper_threshold[3]) &&
+ (rssi >= sq_thresh->upper_threshold[2])) {
+ new_threshold = WMI_RSSI_THRESHOLD3_ABOVE;
+ } else if ((rssi < sq_thresh->upper_threshold[4]) &&
+ (rssi >= sq_thresh->upper_threshold[3])) {
+ new_threshold = WMI_RSSI_THRESHOLD4_ABOVE;
+ } else if ((rssi < sq_thresh->upper_threshold[5]) &&
+ (rssi >= sq_thresh->upper_threshold[4])) {
+ new_threshold = WMI_RSSI_THRESHOLD5_ABOVE;
+ } else if (rssi >= sq_thresh->upper_threshold[5]) {
+ new_threshold = WMI_RSSI_THRESHOLD6_ABOVE;
+ }
+ } else {
+ /* Lower threshold breached */
+ if (rssi > sq_thresh->lower_threshold[0]) {
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "spurious lower rssi threshold event: %d %d\n",
+ rssi, sq_thresh->lower_threshold[0]);
+ } else if ((rssi > sq_thresh->lower_threshold[1]) &&
+ (rssi <= sq_thresh->lower_threshold[0])) {
+ new_threshold = WMI_RSSI_THRESHOLD6_BELOW;
+ } else if ((rssi > sq_thresh->lower_threshold[2]) &&
+ (rssi <= sq_thresh->lower_threshold[1])) {
+ new_threshold = WMI_RSSI_THRESHOLD5_BELOW;
+ } else if ((rssi > sq_thresh->lower_threshold[3]) &&
+ (rssi <= sq_thresh->lower_threshold[2])) {
+ new_threshold = WMI_RSSI_THRESHOLD4_BELOW;
+ } else if ((rssi > sq_thresh->lower_threshold[4]) &&
+ (rssi <= sq_thresh->lower_threshold[3])) {
+ new_threshold = WMI_RSSI_THRESHOLD3_BELOW;
+ } else if ((rssi > sq_thresh->lower_threshold[5]) &&
+ (rssi <= sq_thresh->lower_threshold[4])) {
+ new_threshold = WMI_RSSI_THRESHOLD2_BELOW;
+ } else if (rssi <= sq_thresh->lower_threshold[5]) {
+ new_threshold = WMI_RSSI_THRESHOLD1_BELOW;
+ }
+ }
+
+ /* Calculate and install the next set of thresholds */
+ lower_rssi_threshold = ath6kl_wmi_get_lower_threshold(rssi, sq_thresh,
+ sq_thresh->lower_threshold_valid_count);
+ upper_rssi_threshold = ath6kl_wmi_get_upper_threshold(rssi, sq_thresh,
+ sq_thresh->upper_threshold_valid_count);
+
+ /* Issue a wmi command to install the thresholds */
+ cmd.thresh_above1_val = a_cpu_to_sle16(upper_rssi_threshold);
+ cmd.thresh_below1_val = a_cpu_to_sle16(lower_rssi_threshold);
+ cmd.weight = sq_thresh->weight;
+ cmd.poll_time = cpu_to_le32(sq_thresh->polling_interval);
+
+ ret = ath6kl_wmi_send_rssi_threshold_params(wmi, &cmd);
+ if (ret) {
+ ath6kl_err("unable to configure rssi thresholds\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ struct wmi_cac_event *reply;
+ struct ieee80211_tspec_ie *ts;
+ u16 active_tsids, tsinfo;
+ u8 tsid, index;
+ u8 ts_id;
+
+ if (len < sizeof(struct wmi_cac_event))
+ return -EINVAL;
+
+ reply = (struct wmi_cac_event *) datap;
+
+ if ((reply->cac_indication == CAC_INDICATION_ADMISSION_RESP) &&
+ (reply->status_code != IEEE80211_TSPEC_STATUS_ADMISS_ACCEPTED)) {
+
+ ts = (struct ieee80211_tspec_ie *) &(reply->tspec_suggestion);
+ tsinfo = le16_to_cpu(ts->tsinfo);
+ tsid = (tsinfo >> IEEE80211_WMM_IE_TSPEC_TID_SHIFT) &
+ IEEE80211_WMM_IE_TSPEC_TID_MASK;
+
+ ath6kl_wmi_delete_pstream_cmd(wmi, reply->ac, tsid);
+ } else if (reply->cac_indication == CAC_INDICATION_NO_RESP) {
+ /*
+ * Following assumes that there is only one outstanding
+ * ADDTS request when this event is received
+ */
+ spin_lock_bh(&wmi->lock);
+ active_tsids = wmi->stream_exist_for_ac[reply->ac];
+ spin_unlock_bh(&wmi->lock);
+
+ for (index = 0; index < sizeof(active_tsids) * 8; index++) {
+ if ((active_tsids >> index) & 1)
+ break;
+ }
+ if (index < (sizeof(active_tsids) * 8))
+ ath6kl_wmi_delete_pstream_cmd(wmi, reply->ac, index);
+ }
+
+ /*
+ * Clear active tsids and Add missing handling
+ * for delete qos stream from AP
+ */
+ else if (reply->cac_indication == CAC_INDICATION_DELETE) {
+
+ ts = (struct ieee80211_tspec_ie *) &(reply->tspec_suggestion);
+ tsinfo = le16_to_cpu(ts->tsinfo);
+ ts_id = ((tsinfo >> IEEE80211_WMM_IE_TSPEC_TID_SHIFT) &
+ IEEE80211_WMM_IE_TSPEC_TID_MASK);
+
+ spin_lock_bh(&wmi->lock);
+ wmi->stream_exist_for_ac[reply->ac] &= ~(1 << ts_id);
+ active_tsids = wmi->stream_exist_for_ac[reply->ac];
+ spin_unlock_bh(&wmi->lock);
+
+ /* Indicate stream inactivity to driver layer only if all tsids
+ * within this AC are deleted.
+ */
+ if (!active_tsids) {
+ ath6kl_indicate_tx_activity(wmi->parent_dev, reply->ac,
+ false);
+ wmi->fat_pipe_exist &= ~(1 << reply->ac);
+ }
+ }
+
+ return 0;
+}
+
+static int ath6kl_wmi_send_snr_threshold_params(struct wmi *wmi,
+ struct wmi_snr_threshold_params_cmd *snr_cmd)
+{
+ struct sk_buff *skb;
+ struct wmi_snr_threshold_params_cmd *cmd;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_snr_threshold_params_cmd *) skb->data;
+ memcpy(cmd, snr_cmd, sizeof(struct wmi_snr_threshold_params_cmd));
+
+ return ath6kl_wmi_cmd_send(wmi, skb, WMI_SNR_THRESHOLD_PARAMS_CMDID,
+ NO_SYNC_WMIFLAG);
+}
+
+static int ath6kl_wmi_snr_threshold_event_rx(struct wmi *wmi, u8 *datap,
+ int len)
+{
+ struct wmi_snr_threshold_event *reply;
+ struct sq_threshold_params *sq_thresh;
+ struct wmi_snr_threshold_params_cmd cmd;
+ enum wmi_snr_threshold_val new_threshold;
+ u8 upper_snr_threshold, lower_snr_threshold;
+ s16 snr;
+ int ret;
+
+ if (len < sizeof(struct wmi_snr_threshold_event))
+ return -EINVAL;
+
+ reply = (struct wmi_snr_threshold_event *) datap;
+
+ new_threshold = (enum wmi_snr_threshold_val) reply->range;
+ snr = reply->snr;
+
+ sq_thresh = &wmi->sq_threshld[SIGNAL_QUALITY_METRICS_SNR];
+
+ /*
+ * Identify the threshold breached and communicate that to the app.
+ * After that install a new set of thresholds based on the signal
+ * quality reported by the target.
+ */
+ if (new_threshold) {
+ /* Upper threshold breached */
+ if (snr < sq_thresh->upper_threshold[0]) {
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "spurious upper snr threshold event: %d\n",
+ snr);
+ } else if ((snr < sq_thresh->upper_threshold[1]) &&
+ (snr >= sq_thresh->upper_threshold[0])) {
+ new_threshold = WMI_SNR_THRESHOLD1_ABOVE;
+ } else if ((snr < sq_thresh->upper_threshold[2]) &&
+ (snr >= sq_thresh->upper_threshold[1])) {
+ new_threshold = WMI_SNR_THRESHOLD2_ABOVE;
+ } else if ((snr < sq_thresh->upper_threshold[3]) &&
+ (snr >= sq_thresh->upper_threshold[2])) {
+ new_threshold = WMI_SNR_THRESHOLD3_ABOVE;
+ } else if (snr >= sq_thresh->upper_threshold[3]) {
+ new_threshold = WMI_SNR_THRESHOLD4_ABOVE;
+ }
+ } else {
+ /* Lower threshold breached */
+ if (snr > sq_thresh->lower_threshold[0]) {
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "spurious lower snr threshold event: %d\n",
+ sq_thresh->lower_threshold[0]);
+ } else if ((snr > sq_thresh->lower_threshold[1]) &&
+ (snr <= sq_thresh->lower_threshold[0])) {
+ new_threshold = WMI_SNR_THRESHOLD4_BELOW;
+ } else if ((snr > sq_thresh->lower_threshold[2]) &&
+ (snr <= sq_thresh->lower_threshold[1])) {
+ new_threshold = WMI_SNR_THRESHOLD3_BELOW;
+ } else if ((snr > sq_thresh->lower_threshold[3]) &&
+ (snr <= sq_thresh->lower_threshold[2])) {
+ new_threshold = WMI_SNR_THRESHOLD2_BELOW;
+ } else if (snr <= sq_thresh->lower_threshold[3]) {
+ new_threshold = WMI_SNR_THRESHOLD1_BELOW;
+ }
+ }
+
+ /* Calculate and install the next set of thresholds */
+ lower_snr_threshold = ath6kl_wmi_get_lower_threshold(snr, sq_thresh,
+ sq_thresh->lower_threshold_valid_count);
+ upper_snr_threshold = ath6kl_wmi_get_upper_threshold(snr, sq_thresh,
+ sq_thresh->upper_threshold_valid_count);
+
+ /* Issue a wmi command to install the thresholds */
+ cmd.thresh_above1_val = upper_snr_threshold;
+ cmd.thresh_below1_val = lower_snr_threshold;
+ cmd.weight = sq_thresh->weight;
+ cmd.poll_time = cpu_to_le32(sq_thresh->polling_interval);
+
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "snr: %d, threshold: %d, lower: %d, upper: %d\n",
+ snr, new_threshold,
+ lower_snr_threshold, upper_snr_threshold);
+
+ ret = ath6kl_wmi_send_snr_threshold_params(wmi, &cmd);
+ if (ret) {
+ ath6kl_err("unable to configure snr threshold\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int ath6kl_wmi_aplist_event_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ u16 ap_info_entry_size;
+ struct wmi_aplist_event *ev = (struct wmi_aplist_event *) datap;
+ struct wmi_ap_info_v1 *ap_info_v1;
+ u8 index;
+
+ if (len < sizeof(struct wmi_aplist_event) ||
+ ev->ap_list_ver != APLIST_VER1)
+ return -EINVAL;
+
+ ap_info_entry_size = sizeof(struct wmi_ap_info_v1);
+ ap_info_v1 = (struct wmi_ap_info_v1 *) ev->ap_list;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "number of APs in aplist event: %d\n", ev->num_ap);
+
+ if (len < (int) (sizeof(struct wmi_aplist_event) +
+ (ev->num_ap - 1) * ap_info_entry_size))
+ return -EINVAL;
+
+ /* AP list version 1 contents */
+ for (index = 0; index < ev->num_ap; index++) {
+ ath6kl_dbg(ATH6KL_DBG_WMI, "AP#%d BSSID %pM Channel %d\n",
+ index, ap_info_v1->bssid, ap_info_v1->channel);
+ ap_info_v1++;
+ }
+
+ return 0;
+}
+
+int ath6kl_wmi_cmd_send(struct wmi *wmi, struct sk_buff *skb,
+ enum wmi_cmd_id cmd_id, enum wmi_sync_flag sync_flag)
+{
+ struct wmi_cmd_hdr *cmd_hdr;
+ enum htc_endpoint_id ep_id = wmi->ep_id;
+ int ret;
+
+ if (WARN_ON(skb == NULL))
+ return -EINVAL;
+
+ if (sync_flag >= END_WMIFLAG) {
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ if ((sync_flag == SYNC_BEFORE_WMIFLAG) ||
+ (sync_flag == SYNC_BOTH_WMIFLAG)) {
+ /*
+ * Make sure all data currently queued is transmitted before
+ * the cmd execution. Establish a new sync point.
+ */
+ ath6kl_wmi_sync_point(wmi);
+ }
+
+ skb_push(skb, sizeof(struct wmi_cmd_hdr));
+
+ cmd_hdr = (struct wmi_cmd_hdr *) skb->data;
+ cmd_hdr->cmd_id = cpu_to_le16(cmd_id);
+ cmd_hdr->info1 = 0; /* added for virtual interface */
+
+ /* Only for OPT_TX_CMD, use BE endpoint. */
+ if (cmd_id == WMI_OPT_TX_FRAME_CMDID) {
+ ret = ath6kl_wmi_data_hdr_add(wmi, skb, OPT_MSGTYPE,
+ false, false, 0, NULL);
+ if (ret) {
+ dev_kfree_skb(skb);
+ return ret;
+ }
+ ep_id = ath6kl_ac2_endpoint_id(wmi->parent_dev, WMM_AC_BE);
+ }
+
+ ath6kl_control_tx(wmi->parent_dev, skb, ep_id);
+
+ if ((sync_flag == SYNC_AFTER_WMIFLAG) ||
+ (sync_flag == SYNC_BOTH_WMIFLAG)) {
+ /*
+ * Make sure all new data queued waits for the command to
+ * execute. Establish a new sync point.
+ */
+ ath6kl_wmi_sync_point(wmi);
+ }
+
+ return 0;
+}
+
+int ath6kl_wmi_connect_cmd(struct wmi *wmi, enum network_type nw_type,
+ enum dot11_auth_mode dot11_auth_mode,
+ enum auth_mode auth_mode,
+ enum crypto_type pairwise_crypto,
+ u8 pairwise_crypto_len,
+ enum crypto_type group_crypto,
+ u8 group_crypto_len, int ssid_len, u8 *ssid,
+ u8 *bssid, u16 channel, u32 ctrl_flags)
+{
+ struct sk_buff *skb;
+ struct wmi_connect_cmd *cc;
+ int ret;
+
+ wmi->traffic_class = 100;
+
+ if ((pairwise_crypto == NONE_CRYPT) && (group_crypto != NONE_CRYPT))
+ return -EINVAL;
+
+ if ((pairwise_crypto != NONE_CRYPT) && (group_crypto == NONE_CRYPT))
+ return -EINVAL;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_connect_cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cc = (struct wmi_connect_cmd *) skb->data;
+
+ if (ssid_len)
+ memcpy(cc->ssid, ssid, ssid_len);
+
+ cc->ssid_len = ssid_len;
+ cc->nw_type = nw_type;
+ cc->dot11_auth_mode = dot11_auth_mode;
+ cc->auth_mode = auth_mode;
+ cc->prwise_crypto_type = pairwise_crypto;
+ cc->prwise_crypto_len = pairwise_crypto_len;
+ cc->grp_crypto_type = group_crypto;
+ cc->grp_crypto_len = group_crypto_len;
+ cc->ch = cpu_to_le16(channel);
+ cc->ctrl_flags = cpu_to_le32(ctrl_flags);
+
+ if (bssid != NULL)
+ memcpy(cc->bssid, bssid, ETH_ALEN);
+
+ wmi->pair_crypto_type = pairwise_crypto;
+ wmi->grp_crypto_type = group_crypto;
+
+ ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_CONNECT_CMDID, NO_SYNC_WMIFLAG);
+
+ return ret;
+}
+
+int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 *bssid, u16 channel)
+{
+ struct sk_buff *skb;
+ struct wmi_reconnect_cmd *cc;
+ int ret;
+
+ wmi->traffic_class = 100;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_reconnect_cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cc = (struct wmi_reconnect_cmd *) skb->data;
+ cc->channel = cpu_to_le16(channel);
+
+ if (bssid != NULL)
+ memcpy(cc->bssid, bssid, ETH_ALEN);
+
+ ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_RECONNECT_CMDID,
+ NO_SYNC_WMIFLAG);
+
+ return ret;
+}
+
+int ath6kl_wmi_disconnect_cmd(struct wmi *wmi)
+{
+ int ret;
+
+ wmi->traffic_class = 100;
+
+ /* Disconnect command does not need to do a SYNC before. */
+ ret = ath6kl_wmi_simple_cmd(wmi, WMI_DISCONNECT_CMDID);
+
+ return ret;
+}
+
+int ath6kl_wmi_startscan_cmd(struct wmi *wmi, enum wmi_scan_type scan_type,
+ u32 force_fgscan, u32 is_legacy,
+ u32 home_dwell_time, u32 force_scan_interval,
+ s8 num_chan, u16 *ch_list)
+{
+ struct sk_buff *skb;
+ struct wmi_start_scan_cmd *sc;
+ s8 size;
+ int ret;
+
+ size = sizeof(struct wmi_start_scan_cmd);
+
+ if ((scan_type != WMI_LONG_SCAN) && (scan_type != WMI_SHORT_SCAN))
+ return -EINVAL;
+
+ if (num_chan > WMI_MAX_CHANNELS)
+ return -EINVAL;
+
+ if (num_chan)
+ size += sizeof(u16) * (num_chan - 1);
+
+ skb = ath6kl_wmi_get_new_buf(size);
+ if (!skb)
+ return -ENOMEM;
+
+ sc = (struct wmi_start_scan_cmd *) skb->data;
+ sc->scan_type = scan_type;
+ sc->force_fg_scan = cpu_to_le32(force_fgscan);
+ sc->is_legacy = cpu_to_le32(is_legacy);
+ sc->home_dwell_time = cpu_to_le32(home_dwell_time);
+ sc->force_scan_intvl = cpu_to_le32(force_scan_interval);
+ sc->num_ch = num_chan;
+
+ if (num_chan)
+ memcpy(sc->ch_list, ch_list, num_chan * sizeof(u16));
+
+ ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_START_SCAN_CMDID,
+ NO_SYNC_WMIFLAG);
+
+ return ret;
+}
+
+int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u16 fg_start_sec,
+ u16 fg_end_sec, u16 bg_sec,
+ u16 minact_chdw_msec, u16 maxact_chdw_msec,
+ u16 pas_chdw_msec, u8 short_scan_ratio,
+ u8 scan_ctrl_flag, u32 max_dfsch_act_time,
+ u16 maxact_scan_per_ssid)
+{
+ struct sk_buff *skb;
+ struct wmi_scan_params_cmd *sc;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*sc));
+ if (!skb)
+ return -ENOMEM;
+
+ sc = (struct wmi_scan_params_cmd *) skb->data;
+ sc->fg_start_period = cpu_to_le16(fg_start_sec);
+ sc->fg_end_period = cpu_to_le16(fg_end_sec);
+ sc->bg_period = cpu_to_le16(bg_sec);
+ sc->minact_chdwell_time = cpu_to_le16(minact_chdw_msec);
+ sc->maxact_chdwell_time = cpu_to_le16(maxact_chdw_msec);
+ sc->pas_chdwell_time = cpu_to_le16(pas_chdw_msec);
+ sc->short_scan_ratio = short_scan_ratio;
+ sc->scan_ctrl_flags = scan_ctrl_flag;
+ sc->max_dfsch_act_time = cpu_to_le32(max_dfsch_act_time);
+ sc->maxact_scan_per_ssid = cpu_to_le16(maxact_scan_per_ssid);
+
+ ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_SCAN_PARAMS_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+int ath6kl_wmi_bssfilter_cmd(struct wmi *wmi, u8 filter, u32 ie_mask)
+{
+ struct sk_buff *skb;
+ struct wmi_bss_filter_cmd *cmd;
+ int ret;
+
+ if (filter >= LAST_BSS_FILTER)
+ return -EINVAL;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_bss_filter_cmd *) skb->data;
+ cmd->bss_filter = filter;
+ cmd->ie_mask = cpu_to_le32(ie_mask);
+
+ ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_BSS_FILTER_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 index, u8 flag,
+ u8 ssid_len, u8 *ssid)
+{
+ struct sk_buff *skb;
+ struct wmi_probed_ssid_cmd *cmd;
+ int ret;
+
+ if (index > MAX_PROBED_SSID_INDEX)
+ return -EINVAL;
+
+ if (ssid_len > sizeof(cmd->ssid))
+ return -EINVAL;
+
+ if ((flag & (DISABLE_SSID_FLAG | ANY_SSID_FLAG)) && (ssid_len > 0))
+ return -EINVAL;
+
+ if ((flag & SPECIFIC_SSID_FLAG) && !ssid_len)
+ return -EINVAL;
+
+ if (flag & SPECIFIC_SSID_FLAG)
+ wmi->is_probe_ssid = true;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_probed_ssid_cmd *) skb->data;
+ cmd->entry_index = index;
+ cmd->flag = flag;
+ cmd->ssid_len = ssid_len;
+ memcpy(cmd->ssid, ssid, ssid_len);
+
+ ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_PROBED_SSID_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u16 listen_interval,
+ u16 listen_beacons)
+{
+ struct sk_buff *skb;
+ struct wmi_listen_int_cmd *cmd;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_listen_int_cmd *) skb->data;
+ cmd->listen_intvl = cpu_to_le16(listen_interval);
+ cmd->num_beacons = cpu_to_le16(listen_beacons);
+
+ ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_LISTEN_INT_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 pwr_mode)
+{
+ struct sk_buff *skb;
+ struct wmi_power_mode_cmd *cmd;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_power_mode_cmd *) skb->data;
+ cmd->pwr_mode = pwr_mode;
+ wmi->pwr_mode = pwr_mode;
+
+ ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_POWER_MODE_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u16 idle_period,
+ u16 ps_poll_num, u16 dtim_policy,
+ u16 tx_wakeup_policy, u16 num_tx_to_wakeup,
+ u16 ps_fail_event_policy)
+{
+ struct sk_buff *skb;
+ struct wmi_power_params_cmd *pm;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*pm));
+ if (!skb)
+ return -ENOMEM;
+
+ pm = (struct wmi_power_params_cmd *)skb->data;
+ pm->idle_period = cpu_to_le16(idle_period);
+ pm->pspoll_number = cpu_to_le16(ps_poll_num);
+ pm->dtim_policy = cpu_to_le16(dtim_policy);
+ pm->tx_wakeup_policy = cpu_to_le16(tx_wakeup_policy);
+ pm->num_tx_to_wakeup = cpu_to_le16(num_tx_to_wakeup);
+ pm->ps_fail_event_policy = cpu_to_le16(ps_fail_event_policy);
+
+ ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_POWER_PARAMS_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+int ath6kl_wmi_disctimeout_cmd(struct wmi *wmi, u8 timeout)
+{
+ struct sk_buff *skb;
+ struct wmi_disc_timeout_cmd *cmd;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_disc_timeout_cmd *) skb->data;
+ cmd->discon_timeout = timeout;
+
+ ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_DISC_TIMEOUT_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 key_index,
+ enum crypto_type key_type,
+ u8 key_usage, u8 key_len,
+ u8 *key_rsc, u8 *key_material,
+ u8 key_op_ctrl, u8 *mac_addr,
+ enum wmi_sync_flag sync_flag)
+{
+ struct sk_buff *skb;
+ struct wmi_add_cipher_key_cmd *cmd;
+ int ret;
+
+ if ((key_index > WMI_MAX_KEY_INDEX) || (key_len > WMI_MAX_KEY_LEN) ||
+ (key_material == NULL))
+ return -EINVAL;
+
+ if ((WEP_CRYPT != key_type) && (NULL == key_rsc))
+ return -EINVAL;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_add_cipher_key_cmd *) skb->data;
+ cmd->key_index = key_index;
+ cmd->key_type = key_type;
+ cmd->key_usage = key_usage;
+ cmd->key_len = key_len;
+ memcpy(cmd->key, key_material, key_len);
+
+ if (key_rsc != NULL)
+ memcpy(cmd->key_rsc, key_rsc, sizeof(cmd->key_rsc));
+
+ cmd->key_op_ctrl = key_op_ctrl;
+
+ if (mac_addr)
+ memcpy(cmd->key_mac_addr, mac_addr, ETH_ALEN);
+
+ ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_ADD_CIPHER_KEY_CMDID,
+ sync_flag);
+
+ return ret;
+}
+
+int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 *krk)
+{
+ struct sk_buff *skb;
+ struct wmi_add_krk_cmd *cmd;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_add_krk_cmd *) skb->data;
+ memcpy(cmd->krk, krk, WMI_KRK_LEN);
+
+ ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_ADD_KRK_CMDID, NO_SYNC_WMIFLAG);
+
+ return ret;
+}
+
+int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 key_index)
+{
+ struct sk_buff *skb;
+ struct wmi_delete_cipher_key_cmd *cmd;
+ int ret;
+
+ if (key_index > WMI_MAX_KEY_INDEX)
+ return -EINVAL;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_delete_cipher_key_cmd *) skb->data;
+ cmd->key_index = key_index;
+
+ ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_DELETE_CIPHER_KEY_CMDID,
+ NO_SYNC_WMIFLAG);
+
+ return ret;
+}
+
+int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, const u8 *bssid,
+ const u8 *pmkid, bool set)
+{
+ struct sk_buff *skb;
+ struct wmi_setpmkid_cmd *cmd;
+ int ret;
+
+ if (bssid == NULL)
+ return -EINVAL;
+
+ if (set && pmkid == NULL)
+ return -EINVAL;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_setpmkid_cmd *) skb->data;
+ memcpy(cmd->bssid, bssid, ETH_ALEN);
+ if (set) {
+ memcpy(cmd->pmkid, pmkid, sizeof(cmd->pmkid));
+ cmd->enable = PMKID_ENABLE;
+ } else {
+ memset(cmd->pmkid, 0, sizeof(cmd->pmkid));
+ cmd->enable = PMKID_DISABLE;
+ }
+
+ ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_PMKID_CMDID,
+ NO_SYNC_WMIFLAG);
+
+ return ret;
+}
+
+static int ath6kl_wmi_data_sync_send(struct wmi *wmi, struct sk_buff *skb,
+ enum htc_endpoint_id ep_id)
+{
+ struct wmi_data_hdr *data_hdr;
+ int ret;
+
+ if (WARN_ON(skb == NULL || ep_id == wmi->ep_id))
+ return -EINVAL;
+
+ skb_push(skb, sizeof(struct wmi_data_hdr));
+
+ data_hdr = (struct wmi_data_hdr *) skb->data;
+ data_hdr->info = SYNC_MSGTYPE << WMI_DATA_HDR_MSG_TYPE_SHIFT;
+ data_hdr->info3 = 0;
+
+ ret = ath6kl_control_tx(wmi->parent_dev, skb, ep_id);
+
+ return ret;
+}
+
+static int ath6kl_wmi_sync_point(struct wmi *wmi)
+{
+ struct sk_buff *skb;
+ struct wmi_sync_cmd *cmd;
+ struct wmi_data_sync_bufs data_sync_bufs[WMM_NUM_AC];
+ enum htc_endpoint_id ep_id;
+ u8 index, num_pri_streams = 0;
+ int ret = 0;
+
+ memset(data_sync_bufs, 0, sizeof(data_sync_bufs));
+
+ spin_lock_bh(&wmi->lock);
+
+ for (index = 0; index < WMM_NUM_AC; index++) {
+ if (wmi->fat_pipe_exist & (1 << index)) {
+ num_pri_streams++;
+ data_sync_bufs[num_pri_streams - 1].traffic_class =
+ index;
+ }
+ }
+
+ spin_unlock_bh(&wmi->lock);
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb) {
+ ret = -ENOMEM;
+ goto free_skb;
+ }
+
+ cmd = (struct wmi_sync_cmd *) skb->data;
+
+ /*
+ * In the SYNC cmd sent on the control Ep, send a bitmap
+ * of the data eps on which the Data Sync will be sent
+ */
+ cmd->data_sync_map = wmi->fat_pipe_exist;
+
+ for (index = 0; index < num_pri_streams; index++) {
+ data_sync_bufs[index].skb = ath6kl_buf_alloc(0);
+ if (data_sync_bufs[index].skb == NULL) {
+ ret = -ENOMEM;
+ break;
+ }
+ }
+
+ /*
+ * If buffer allocation for any of the dataSync fails,
+ * then do not send the Synchronize cmd on the control ep
+ */
+ if (ret)
+ goto free_skb;
+
+ /*
+ * Send sync cmd followed by sync data messages on all
+ * endpoints being used
+ */
+ ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SYNCHRONIZE_CMDID,
+ NO_SYNC_WMIFLAG);
+
+ if (ret)
+ goto free_skb;
+
+ /* cmd buffer sent, we no longer own it */
+ skb = NULL;
+
+ for (index = 0; index < num_pri_streams; index++) {
+
+ if (WARN_ON(!data_sync_bufs[index].skb))
+ break;
+
+ ep_id = ath6kl_ac2_endpoint_id(wmi->parent_dev,
+ data_sync_bufs[index].
+ traffic_class);
+ ret =
+ ath6kl_wmi_data_sync_send(wmi, data_sync_bufs[index].skb,
+ ep_id);
+
+ if (ret)
+ break;
+
+ data_sync_bufs[index].skb = NULL;
+ }
+
+free_skb:
+ /* free up any resources left over (possibly due to an error) */
+ if (skb)
+ dev_kfree_skb(skb);
+
+ for (index = 0; index < num_pri_streams; index++) {
+ if (data_sync_bufs[index].skb != NULL) {
+ dev_kfree_skb((struct sk_buff *)data_sync_bufs[index].
+ skb);
+ }
+ }
+
+ return ret;
+}
+
+int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi,
+ struct wmi_create_pstream_cmd *params)
+{
+ struct sk_buff *skb;
+ struct wmi_create_pstream_cmd *cmd;
+ u8 fatpipe_exist_for_ac = 0;
+ s32 min_phy = 0;
+ s32 nominal_phy = 0;
+ int ret;
+
+ if (!((params->user_pri < 8) &&
+ (params->user_pri <= 0x7) &&
+ (up_to_ac[params->user_pri & 0x7] == params->traffic_class) &&
+ (params->traffic_direc == UPLINK_TRAFFIC ||
+ params->traffic_direc == DNLINK_TRAFFIC ||
+ params->traffic_direc == BIDIR_TRAFFIC) &&
+ (params->traffic_type == TRAFFIC_TYPE_APERIODIC ||
+ params->traffic_type == TRAFFIC_TYPE_PERIODIC) &&
+ (params->voice_psc_cap == DISABLE_FOR_THIS_AC ||
+ params->voice_psc_cap == ENABLE_FOR_THIS_AC ||
+ params->voice_psc_cap == ENABLE_FOR_ALL_AC) &&
+ (params->tsid == WMI_IMPLICIT_PSTREAM ||
+ params->tsid <= WMI_MAX_THINSTREAM))) {
+ return -EINVAL;
+ }
+
+ /*
+ * Check nominal PHY rate is >= minimalPHY,
+ * so that DUT can allow TSRS IE
+ */
+
+ /* Get the physical rate (units of bps) */
+ min_phy = ((le32_to_cpu(params->min_phy_rate) / 1000) / 1000);
+
+ /* Check minimal phy < nominal phy rate */
+ if (params->nominal_phy >= min_phy) {
+ /* unit of 500 kbps */
+ nominal_phy = (params->nominal_phy * 1000) / 500;
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "TSRS IE enabled::MinPhy %x->NominalPhy ===> %x\n",
+ min_phy, nominal_phy);
+
+ params->nominal_phy = nominal_phy;
+ } else {
+ params->nominal_phy = 0;
+ }
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "sending create_pstream_cmd: ac=%d tsid:%d\n",
+ params->traffic_class, params->tsid);
+
+ cmd = (struct wmi_create_pstream_cmd *) skb->data;
+ memcpy(cmd, params, sizeof(*cmd));
+
+ /* This is an implicitly created Fat pipe */
+ if ((u32) params->tsid == (u32) WMI_IMPLICIT_PSTREAM) {
+ spin_lock_bh(&wmi->lock);
+ fatpipe_exist_for_ac = (wmi->fat_pipe_exist &
+ (1 << params->traffic_class));
+ wmi->fat_pipe_exist |= (1 << params->traffic_class);
+ spin_unlock_bh(&wmi->lock);
+ } else {
+ /* explicitly created thin stream within a fat pipe */
+ spin_lock_bh(&wmi->lock);
+ fatpipe_exist_for_ac = (wmi->fat_pipe_exist &
+ (1 << params->traffic_class));
+ wmi->stream_exist_for_ac[params->traffic_class] |=
+ (1 << params->tsid);
+ /*
+ * If a thinstream becomes active, the fat pipe automatically
+ * becomes active
+ */
+ wmi->fat_pipe_exist |= (1 << params->traffic_class);
+ spin_unlock_bh(&wmi->lock);
+ }
+
+ /*
+ * Indicate activty change to driver layer only if this is the
+ * first TSID to get created in this AC explicitly or an implicit
+ * fat pipe is getting created.
+ */
+ if (!fatpipe_exist_for_ac)
+ ath6kl_indicate_tx_activity(wmi->parent_dev,
+ params->traffic_class, true);
+
+ ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_CREATE_PSTREAM_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 traffic_class, u8 tsid)
+{
+ struct sk_buff *skb;
+ struct wmi_delete_pstream_cmd *cmd;
+ u16 active_tsids = 0;
+ int ret;
+
+ if (traffic_class > 3) {
+ ath6kl_err("invalid traffic class: %d\n", traffic_class);
+ return -EINVAL;
+ }
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_delete_pstream_cmd *) skb->data;
+ cmd->traffic_class = traffic_class;
+ cmd->tsid = tsid;
+
+ spin_lock_bh(&wmi->lock);
+ active_tsids = wmi->stream_exist_for_ac[traffic_class];
+ spin_unlock_bh(&wmi->lock);
+
+ if (!(active_tsids & (1 << tsid))) {
+ dev_kfree_skb(skb);
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "TSID %d doesn't exist for traffic class: %d\n",
+ tsid, traffic_class);
+ return -ENODATA;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "sending delete_pstream_cmd: traffic class: %d tsid=%d\n",
+ traffic_class, tsid);
+
+ ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_DELETE_PSTREAM_CMDID,
+ SYNC_BEFORE_WMIFLAG);
+
+ spin_lock_bh(&wmi->lock);
+ wmi->stream_exist_for_ac[traffic_class] &= ~(1 << tsid);
+ active_tsids = wmi->stream_exist_for_ac[traffic_class];
+ spin_unlock_bh(&wmi->lock);
+
+ /*
+ * Indicate stream inactivity to driver layer only if all tsids
+ * within this AC are deleted.
+ */
+ if (!active_tsids) {
+ ath6kl_indicate_tx_activity(wmi->parent_dev,
+ traffic_class, false);
+ wmi->fat_pipe_exist &= ~(1 << traffic_class);
+ }
+
+ return ret;
+}
+
+int ath6kl_wmi_set_ip_cmd(struct wmi *wmi, struct wmi_set_ip_cmd *ip_cmd)
+{
+ struct sk_buff *skb;
+ struct wmi_set_ip_cmd *cmd;
+ int ret;
+
+ /* Multicast address are not valid */
+ if ((*((u8 *) &ip_cmd->ips[0]) >= 0xE0) ||
+ (*((u8 *) &ip_cmd->ips[1]) >= 0xE0))
+ return -EINVAL;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_set_ip_cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_set_ip_cmd *) skb->data;
+ memcpy(cmd, ip_cmd, sizeof(struct wmi_set_ip_cmd));
+
+ ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_IP_CMDID, NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+static int ath6kl_wmi_get_wow_list_event_rx(struct wmi *wmi, u8 * datap,
+ int len)
+{
+ if (len < sizeof(struct wmi_get_wow_list_reply))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ath6kl_wmi_cmd_send_xtnd(struct wmi *wmi, struct sk_buff *skb,
+ enum wmix_command_id cmd_id,
+ enum wmi_sync_flag sync_flag)
+{
+ struct wmix_cmd_hdr *cmd_hdr;
+ int ret;
+
+ skb_push(skb, sizeof(struct wmix_cmd_hdr));
+
+ cmd_hdr = (struct wmix_cmd_hdr *) skb->data;
+ cmd_hdr->cmd_id = cpu_to_le32(cmd_id);
+
+ ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_EXTENSION_CMDID, sync_flag);
+
+ return ret;
+}
+
+int ath6kl_wmi_get_challenge_resp_cmd(struct wmi *wmi, u32 cookie, u32 source)
+{
+ struct sk_buff *skb;
+ struct wmix_hb_challenge_resp_cmd *cmd;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmix_hb_challenge_resp_cmd *) skb->data;
+ cmd->cookie = cpu_to_le32(cookie);
+ cmd->source = cpu_to_le32(source);
+
+ ret = ath6kl_wmi_cmd_send_xtnd(wmi, skb, WMIX_HB_CHALLENGE_RESP_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+int ath6kl_wmi_get_stats_cmd(struct wmi *wmi)
+{
+ return ath6kl_wmi_simple_cmd(wmi, WMI_GET_STATISTICS_CMDID);
+}
+
+int ath6kl_wmi_set_tx_pwr_cmd(struct wmi *wmi, u8 dbM)
+{
+ struct sk_buff *skb;
+ struct wmi_set_tx_pwr_cmd *cmd;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_set_tx_pwr_cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_set_tx_pwr_cmd *) skb->data;
+ cmd->dbM = dbM;
+
+ ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_TX_PWR_CMDID,
+ NO_SYNC_WMIFLAG);
+
+ return ret;
+}
+
+int ath6kl_wmi_get_tx_pwr_cmd(struct wmi *wmi)
+{
+ return ath6kl_wmi_simple_cmd(wmi, WMI_GET_TX_PWR_CMDID);
+}
+
+void ath6kl_wmi_get_current_bssid(struct wmi *wmi, u8 *bssid)
+{
+ if (bssid)
+ memcpy(bssid, wmi->bssid, ETH_ALEN);
+}
+
+int ath6kl_wmi_set_lpreamble_cmd(struct wmi *wmi, u8 status, u8 preamble_policy)
+{
+ struct sk_buff *skb;
+ struct wmi_set_lpreamble_cmd *cmd;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_set_lpreamble_cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_set_lpreamble_cmd *) skb->data;
+ cmd->status = status;
+ cmd->preamble_policy = preamble_policy;
+
+ ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_LPREAMBLE_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+int ath6kl_wmi_set_rts_cmd(struct wmi *wmi, u16 threshold)
+{
+ struct sk_buff *skb;
+ struct wmi_set_rts_cmd *cmd;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_set_rts_cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_set_rts_cmd *) skb->data;
+ cmd->threshold = cpu_to_le16(threshold);
+
+ ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_RTS_CMDID, NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, enum wmi_txop_cfg cfg)
+{
+ struct sk_buff *skb;
+ struct wmi_set_wmm_txop_cmd *cmd;
+ int ret;
+
+ if (!((cfg == WMI_TXOP_DISABLED) || (cfg == WMI_TXOP_ENABLED)))
+ return -EINVAL;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_set_wmm_txop_cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_set_wmm_txop_cmd *) skb->data;
+ cmd->txop_enable = cfg;
+
+ ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_WMM_TXOP_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 keep_alive_intvl)
+{
+ struct sk_buff *skb;
+ struct wmi_set_keepalive_cmd *cmd;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_set_keepalive_cmd *) skb->data;
+ cmd->keep_alive_intvl = keep_alive_intvl;
+ wmi->keep_alive_intvl = keep_alive_intvl;
+
+ ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_KEEPALIVE_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+s32 ath6kl_wmi_get_rate(s8 rate_index)
+{
+ if (rate_index == RATE_AUTO)
+ return 0;
+
+ return wmi_rate_tbl[(u32) rate_index][0];
+}
+
+void ath6kl_wmi_node_return(struct wmi *wmi, struct bss *bss)
+{
+ if (bss)
+ wlan_node_return(&wmi->scan_table, bss);
+}
+
+struct bss *ath6kl_wmi_find_ssid_node(struct wmi *wmi, u8 * ssid,
+ u32 ssid_len, bool is_wpa2,
+ bool match_ssid)
+{
+ struct bss *node = NULL;
+
+ node = wlan_find_ssid_node(&wmi->scan_table, ssid,
+ ssid_len, is_wpa2, match_ssid);
+ return node;
+}
+
+struct bss *ath6kl_wmi_find_node(struct wmi *wmi, const u8 * mac_addr)
+{
+ struct bss *ni = NULL;
+
+ ni = wlan_find_node(&wmi->scan_table, mac_addr);
+
+ return ni;
+}
+
+void ath6kl_wmi_node_free(struct wmi *wmi, const u8 * mac_addr)
+{
+ struct bss *ni = NULL;
+
+ ni = wlan_find_node(&wmi->scan_table, mac_addr);
+ if (ni != NULL)
+ wlan_node_reclaim(&wmi->scan_table, ni);
+
+ return;
+}
+
+static int ath6kl_wmi_get_pmkid_list_event_rx(struct wmi *wmi, u8 *datap,
+ u32 len)
+{
+ struct wmi_pmkid_list_reply *reply;
+ u32 expected_len;
+
+ if (len < sizeof(struct wmi_pmkid_list_reply))
+ return -EINVAL;
+
+ reply = (struct wmi_pmkid_list_reply *)datap;
+ expected_len = sizeof(reply->num_pmkid) +
+ le32_to_cpu(reply->num_pmkid) * WMI_PMKID_LEN;
+
+ if (len < expected_len)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ath6kl_wmi_addba_req_event_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ struct wmi_addba_req_event *cmd = (struct wmi_addba_req_event *) datap;
+
+ aggr_recv_addba_req_evt(wmi->parent_dev, cmd->tid,
+ le16_to_cpu(cmd->st_seq_no), cmd->win_sz);
+
+ return 0;
+}
+
+static int ath6kl_wmi_delba_req_event_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ struct wmi_delba_event *cmd = (struct wmi_delba_event *) datap;
+
+ aggr_recv_delba_req_evt(wmi->parent_dev, cmd->tid);
+
+ return 0;
+}
+
+/* AP mode functions */
+static int ath6kl_wmi_pspoll_event_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ struct wmi_pspoll_event *ev;
+
+ if (len < sizeof(struct wmi_pspoll_event))
+ return -EINVAL;
+
+ ev = (struct wmi_pspoll_event *) datap;
+
+ ath6kl_pspoll_event(wmi->parent_dev, le16_to_cpu(ev->aid));
+
+ return 0;
+}
+
+static int ath6kl_wmi_dtimexpiry_event_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ ath6kl_dtimexpiry_event(wmi->parent_dev);
+
+ return 0;
+}
+
+int ath6kl_wmi_set_pvb_cmd(struct wmi *wmi, u16 aid, bool flag)
+{
+ struct sk_buff *skb;
+ struct wmi_ap_set_pvb_cmd *cmd;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_ap_set_pvb_cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_ap_set_pvb_cmd *) skb->data;
+ cmd->aid = cpu_to_le16(aid);
+ cmd->flag = cpu_to_le32(flag);
+
+ ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_AP_SET_PVB_CMDID,
+ NO_SYNC_WMIFLAG);
+
+ return 0;
+}
+
+int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 rx_meta_ver,
+ bool rx_dot11_hdr, bool defrag_on_host)
+{
+ struct sk_buff *skb;
+ struct wmi_rx_frame_format_cmd *cmd;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_rx_frame_format_cmd *) skb->data;
+ cmd->dot11_hdr = rx_dot11_hdr ? 1 : 0;
+ cmd->defrag_on_host = defrag_on_host ? 1 : 0;
+ cmd->meta_ver = rx_meta_ver;
+
+ /* Delete the local aggr state, on host */
+ ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_RX_FRAME_FORMAT_CMDID,
+ NO_SYNC_WMIFLAG);
+
+ return ret;
+}
+
+static int ath6kl_wmi_control_rx_xtnd(struct wmi *wmi, struct sk_buff *skb)
+{
+ struct wmix_cmd_hdr *cmd;
+ u32 len;
+ u16 id;
+ u8 *datap;
+ int ret = 0;
+
+ if (skb->len < sizeof(struct wmix_cmd_hdr)) {
+ ath6kl_err("bad packet 1\n");
+ wmi->stat.cmd_len_err++;
+ return -EINVAL;
+ }
+
+ cmd = (struct wmix_cmd_hdr *) skb->data;
+ id = le32_to_cpu(cmd->cmd_id);
+
+ skb_pull(skb, sizeof(struct wmix_cmd_hdr));
+
+ datap = skb->data;
+ len = skb->len;
+
+ switch (id) {
+ case WMIX_HB_CHALLENGE_RESP_EVENTID:
+ break;
+ case WMIX_DBGLOG_EVENTID:
+ break;
+ default:
+ ath6kl_err("unknown cmd id 0x%x\n", id);
+ wmi->stat.cmd_id_err++;
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/* Control Path */
+int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
+{
+ struct wmi_cmd_hdr *cmd;
+ u32 len;
+ u16 id;
+ u8 *datap;
+ int ret = 0;
+
+ if (WARN_ON(skb == NULL))
+ return -EINVAL;
+
+ if (skb->len < sizeof(struct wmi_cmd_hdr)) {
+ ath6kl_err("bad packet 1\n");
+ dev_kfree_skb(skb);
+ wmi->stat.cmd_len_err++;
+ return -EINVAL;
+ }
+
+ cmd = (struct wmi_cmd_hdr *) skb->data;
+ id = le16_to_cpu(cmd->cmd_id);
+
+ skb_pull(skb, sizeof(struct wmi_cmd_hdr));
+
+ datap = skb->data;
+ len = skb->len;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI, "%s: wmi id: %d\n", __func__, id);
+ ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "msg payload ", datap, len);
+
+ switch (id) {
+ case WMI_GET_BITRATE_CMDID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_BITRATE_CMDID\n");
+ ret = ath6kl_wmi_bitrate_reply_rx(wmi, datap, len);
+ break;
+ case WMI_GET_CHANNEL_LIST_CMDID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_CHANNEL_LIST_CMDID\n");
+ ret = ath6kl_wmi_ch_list_reply_rx(wmi, datap, len);
+ break;
+ case WMI_GET_TX_PWR_CMDID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_TX_PWR_CMDID\n");
+ ret = ath6kl_wmi_tx_pwr_reply_rx(wmi, datap, len);
+ break;
+ case WMI_READY_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_READY_EVENTID\n");
+ ret = ath6kl_wmi_ready_event_rx(wmi, datap, len);
+ break;
+ case WMI_CONNECT_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CONNECT_EVENTID\n");
+ ret = ath6kl_wmi_connect_event_rx(wmi, datap, len);
+ break;
+ case WMI_DISCONNECT_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DISCONNECT_EVENTID\n");
+ ret = ath6kl_wmi_disconnect_event_rx(wmi, datap, len);
+ break;
+ case WMI_PEER_NODE_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_PEER_NODE_EVENTID\n");
+ ret = ath6kl_wmi_peer_node_event_rx(wmi, datap, len);
+ break;
+ case WMI_TKIP_MICERR_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TKIP_MICERR_EVENTID\n");
+ ret = ath6kl_wmi_tkip_micerr_event_rx(wmi, datap, len);
+ break;
+ case WMI_BSSINFO_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_BSSINFO_EVENTID\n");
+ ath6kl_wmi_convert_bssinfo_hdr2_to_hdr(skb, datap);
+ ret = ath6kl_wmi_bssinfo_event_rx(wmi, skb->data, skb->len);
+ break;
+ case WMI_REGDOMAIN_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REGDOMAIN_EVENTID\n");
+ break;
+ case WMI_PSTREAM_TIMEOUT_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_PSTREAM_TIMEOUT_EVENTID\n");
+ ret = ath6kl_wmi_pstream_timeout_event_rx(wmi, datap, len);
+ break;
+ case WMI_NEIGHBOR_REPORT_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_NEIGHBOR_REPORT_EVENTID\n");
+ break;
+ case WMI_SCAN_COMPLETE_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_SCAN_COMPLETE_EVENTID\n");
+ ret = ath6kl_wmi_scan_complete_rx(wmi, datap, len);
+ break;
+ case WMI_CMDERROR_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CMDERROR_EVENTID\n");
+ ret = ath6kl_wmi_error_event_rx(wmi, datap, len);
+ break;
+ case WMI_REPORT_STATISTICS_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REPORT_STATISTICS_EVENTID\n");
+ ret = ath6kl_wmi_stats_event_rx(wmi, datap, len);
+ break;
+ case WMI_RSSI_THRESHOLD_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_RSSI_THRESHOLD_EVENTID\n");
+ ret = ath6kl_wmi_rssi_threshold_event_rx(wmi, datap, len);
+ break;
+ case WMI_ERROR_REPORT_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_ERROR_REPORT_EVENTID\n");
+ break;
+ case WMI_OPT_RX_FRAME_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_OPT_RX_FRAME_EVENTID\n");
+ ret = ath6kl_wmi_opt_frame_event_rx(wmi, datap, len);
+ break;
+ case WMI_REPORT_ROAM_TBL_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REPORT_ROAM_TBL_EVENTID\n");
+ break;
+ case WMI_EXTENSION_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_EXTENSION_EVENTID\n");
+ ret = ath6kl_wmi_control_rx_xtnd(wmi, skb);
+ break;
+ case WMI_CAC_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CAC_EVENTID\n");
+ ret = ath6kl_wmi_cac_event_rx(wmi, datap, len);
+ break;
+ case WMI_CHANNEL_CHANGE_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CHANNEL_CHANGE_EVENTID\n");
+ break;
+ case WMI_REPORT_ROAM_DATA_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REPORT_ROAM_DATA_EVENTID\n");
+ break;
+ case WMI_GET_FIXRATES_CMDID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_FIXRATES_CMDID\n");
+ ret = ath6kl_wmi_ratemask_reply_rx(wmi, datap, len);
+ break;
+ case WMI_TX_RETRY_ERR_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TX_RETRY_ERR_EVENTID\n");
+ break;
+ case WMI_SNR_THRESHOLD_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_SNR_THRESHOLD_EVENTID\n");
+ ret = ath6kl_wmi_snr_threshold_event_rx(wmi, datap, len);
+ break;
+ case WMI_LQ_THRESHOLD_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_LQ_THRESHOLD_EVENTID\n");
+ break;
+ case WMI_APLIST_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_APLIST_EVENTID\n");
+ ret = ath6kl_wmi_aplist_event_rx(wmi, datap, len);
+ break;
+ case WMI_GET_KEEPALIVE_CMDID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_KEEPALIVE_CMDID\n");
+ ret = ath6kl_wmi_keepalive_reply_rx(wmi, datap, len);
+ break;
+ case WMI_GET_WOW_LIST_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_WOW_LIST_EVENTID\n");
+ ret = ath6kl_wmi_get_wow_list_event_rx(wmi, datap, len);
+ break;
+ case WMI_GET_PMKID_LIST_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_PMKID_LIST_EVENTID\n");
+ ret = ath6kl_wmi_get_pmkid_list_event_rx(wmi, datap, len);
+ break;
+ case WMI_PSPOLL_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_PSPOLL_EVENTID\n");
+ ret = ath6kl_wmi_pspoll_event_rx(wmi, datap, len);
+ break;
+ case WMI_DTIMEXPIRY_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DTIMEXPIRY_EVENTID\n");
+ ret = ath6kl_wmi_dtimexpiry_event_rx(wmi, datap, len);
+ break;
+ case WMI_SET_PARAMS_REPLY_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_SET_PARAMS_REPLY_EVENTID\n");
+ break;
+ case WMI_ADDBA_REQ_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_ADDBA_REQ_EVENTID\n");
+ ret = ath6kl_wmi_addba_req_event_rx(wmi, datap, len);
+ break;
+ case WMI_ADDBA_RESP_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_ADDBA_RESP_EVENTID\n");
+ break;
+ case WMI_DELBA_REQ_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DELBA_REQ_EVENTID\n");
+ ret = ath6kl_wmi_delba_req_event_rx(wmi, datap, len);
+ break;
+ case WMI_REPORT_BTCOEX_CONFIG_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "WMI_REPORT_BTCOEX_CONFIG_EVENTID\n");
+ break;
+ case WMI_REPORT_BTCOEX_STATS_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "WMI_REPORT_BTCOEX_STATS_EVENTID\n");
+ break;
+ case WMI_TX_COMPLETE_EVENTID:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TX_COMPLETE_EVENTID\n");
+ ret = ath6kl_wmi_tx_complete_event_rx(datap, len);
+ break;
+ default:
+ ath6kl_dbg(ATH6KL_DBG_WMI, "unknown cmd id 0x%x\n", id);
+ wmi->stat.cmd_id_err++;
+ ret = -EINVAL;
+ break;
+ }
+
+ dev_kfree_skb(skb);
+
+ return ret;
+}
+
+static void ath6kl_wmi_qos_state_init(struct wmi *wmi)
+{
+ if (!wmi)
+ return;
+
+ spin_lock_bh(&wmi->lock);
+
+ wmi->fat_pipe_exist = 0;
+ memset(wmi->stream_exist_for_ac, 0, sizeof(wmi->stream_exist_for_ac));
+
+ spin_unlock_bh(&wmi->lock);
+}
+
+void *ath6kl_wmi_init(void *dev)
+{
+ struct wmi *wmi;
+
+ wmi = kzalloc(sizeof(struct wmi), GFP_KERNEL);
+ if (!wmi)
+ return NULL;
+
+ spin_lock_init(&wmi->lock);
+
+ wmi->parent_dev = dev;
+
+ wlan_node_table_init(wmi, &wmi->scan_table);
+ ath6kl_wmi_qos_state_init(wmi);
+
+ wmi->pwr_mode = REC_POWER;
+ wmi->phy_mode = WMI_11G_MODE;
+
+ wmi->pair_crypto_type = NONE_CRYPT;
+ wmi->grp_crypto_type = NONE_CRYPT;
+
+ wmi->ht_allowed[A_BAND_24GHZ] = 1;
+ wmi->ht_allowed[A_BAND_5GHZ] = 1;
+
+ return wmi;
+}
+
+void ath6kl_wmi_shutdown(struct wmi *wmi)
+{
+ if (!wmi)
+ return;
+
+ wlan_node_table_cleanup(&wmi->scan_table);
+ kfree(wmi);
+}
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.h b/drivers/net/wireless/ath/ath6kl/wmi.h
new file mode 100644
index 000000000000..bbaa7049f4a8
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/wmi.h
@@ -0,0 +1,2024 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file contains the definitions of the WMI protocol specified in the
+ * Wireless Module Interface (WMI). It includes definitions of all the
+ * commands and events. Commands are messages from the host to the WM.
+ * Events and Replies are messages from the WM to the host.
+ */
+
+#ifndef WMI_H
+#define WMI_H
+
+#include <linux/ieee80211.h>
+
+#include "htc.h"
+
+#define HTC_PROTOCOL_VERSION 0x0002
+#define WMI_PROTOCOL_VERSION 0x0002
+#define WMI_CONTROL_MSG_MAX_LEN 256
+#define is_ethertype(type_or_len) ((type_or_len) >= 0x0600)
+
+#define IP_ETHERTYPE 0x0800
+
+#define WMI_IMPLICIT_PSTREAM 0xFF
+#define WMI_MAX_THINSTREAM 15
+
+#define SSID_IE_LEN_INDEX 13
+
+/* Host side link management data structures */
+#define SIG_QUALITY_THRESH_LVLS 6
+#define SIG_QUALITY_UPPER_THRESH_LVLS SIG_QUALITY_THRESH_LVLS
+#define SIG_QUALITY_LOWER_THRESH_LVLS SIG_QUALITY_THRESH_LVLS
+
+#define A_BAND_24GHZ 0
+#define A_BAND_5GHZ 1
+#define A_NUM_BANDS 2
+
+/* in ms */
+#define WMI_IMPLICIT_PSTREAM_INACTIVITY_INT 5000
+
+/*
+ * There are no signed versions of __le16 and __le32, so for a temporary
+ * solution come up with our own version. The idea is from fs/ntfs/types.h.
+ *
+ * Use a_ prefix so that it doesn't conflict if we get proper support to
+ * linux/types.h.
+ */
+typedef __s16 __bitwise a_sle16;
+typedef __s32 __bitwise a_sle32;
+
+static inline a_sle32 a_cpu_to_sle32(s32 val)
+{
+ return (__force a_sle32) cpu_to_le32(val);
+}
+
+static inline s32 a_sle32_to_cpu(a_sle32 val)
+{
+ return le32_to_cpu((__force __le32) val);
+}
+
+static inline a_sle16 a_cpu_to_sle16(s16 val)
+{
+ return (__force a_sle16) cpu_to_le16(val);
+}
+
+static inline s16 a_sle16_to_cpu(a_sle16 val)
+{
+ return le16_to_cpu((__force __le16) val);
+}
+
+struct sq_threshold_params {
+ s16 upper_threshold[SIG_QUALITY_UPPER_THRESH_LVLS];
+ s16 lower_threshold[SIG_QUALITY_LOWER_THRESH_LVLS];
+ u32 upper_threshold_valid_count;
+ u32 lower_threshold_valid_count;
+ u32 polling_interval;
+ u8 weight;
+ u8 last_rssi;
+ u8 last_rssi_poll_event;
+};
+
+struct wmi_stats {
+ u32 cmd_len_err;
+ u32 cmd_id_err;
+};
+
+struct wmi_data_sync_bufs {
+ u8 traffic_class;
+ struct sk_buff *skb;
+};
+
+/* WMM stream classes */
+#define WMM_NUM_AC 4
+#define WMM_AC_BE 0 /* best effort */
+#define WMM_AC_BK 1 /* background */
+#define WMM_AC_VI 2 /* video */
+#define WMM_AC_VO 3 /* voice */
+
+struct wmi {
+ bool ready;
+ u16 stream_exist_for_ac[WMM_NUM_AC];
+ u8 fat_pipe_exist;
+ void *parent_dev;
+ struct wmi_stats stat;
+ struct ath6kl_node_table scan_table;
+ u8 bssid[ETH_ALEN];
+ u8 pwr_mode;
+ u8 phy_mode;
+ u8 keep_alive_intvl;
+ spinlock_t lock;
+ enum htc_endpoint_id ep_id;
+ struct sq_threshold_params
+ sq_threshld[SIGNAL_QUALITY_METRICS_NUM_MAX];
+ enum crypto_type pair_crypto_type;
+ enum crypto_type grp_crypto_type;
+ bool is_wmm_enabled;
+ u8 ht_allowed[A_NUM_BANDS];
+ u8 traffic_class;
+ bool is_probe_ssid;
+};
+
+struct host_app_area {
+ u32 wmi_protocol_ver;
+};
+
+enum wmi_msg_type {
+ DATA_MSGTYPE = 0x0,
+ CNTL_MSGTYPE,
+ SYNC_MSGTYPE,
+ OPT_MSGTYPE,
+};
+
+/*
+ * Macros for operating on WMI_DATA_HDR (info) field
+ */
+
+#define WMI_DATA_HDR_MSG_TYPE_MASK 0x03
+#define WMI_DATA_HDR_MSG_TYPE_SHIFT 0
+#define WMI_DATA_HDR_UP_MASK 0x07
+#define WMI_DATA_HDR_UP_SHIFT 2
+
+/* In AP mode, the same bit (b5) is used to indicate Power save state in
+ * the Rx dir and More data bit state in the tx direction.
+ */
+#define WMI_DATA_HDR_PS_MASK 0x1
+#define WMI_DATA_HDR_PS_SHIFT 5
+
+#define WMI_DATA_HDR_MORE_MASK 0x1
+#define WMI_DATA_HDR_MORE_SHIFT 5
+
+enum wmi_data_hdr_data_type {
+ WMI_DATA_HDR_DATA_TYPE_802_3 = 0,
+ WMI_DATA_HDR_DATA_TYPE_802_11,
+
+ /* used to be used for the PAL */
+ WMI_DATA_HDR_DATA_TYPE_ACL,
+};
+
+#define WMI_DATA_HDR_DATA_TYPE_MASK 0x3
+#define WMI_DATA_HDR_DATA_TYPE_SHIFT 6
+
+/* Macros for operating on WMI_DATA_HDR (info2) field */
+#define WMI_DATA_HDR_SEQNO_MASK 0xFFF
+#define WMI_DATA_HDR_SEQNO_SHIFT 0
+
+#define WMI_DATA_HDR_AMSDU_MASK 0x1
+#define WMI_DATA_HDR_AMSDU_SHIFT 12
+
+#define WMI_DATA_HDR_META_MASK 0x7
+#define WMI_DATA_HDR_META_SHIFT 13
+
+struct wmi_data_hdr {
+ s8 rssi;
+
+ /*
+ * usage of 'info' field(8-bit):
+ *
+ * b1:b0 - WMI_MSG_TYPE
+ * b4:b3:b2 - UP(tid)
+ * b5 - Used in AP mode.
+ * More-data in tx dir, PS in rx.
+ * b7:b6 - Dot3 header(0),
+ * Dot11 Header(1),
+ * ACL data(2)
+ */
+ u8 info;
+
+ /*
+ * usage of 'info2' field(16-bit):
+ *
+ * b11:b0 - seq_no
+ * b12 - A-MSDU?
+ * b15:b13 - META_DATA_VERSION 0 - 7
+ */
+ __le16 info2;
+ __le16 info3;
+} __packed;
+
+static inline u8 wmi_data_hdr_get_up(struct wmi_data_hdr *dhdr)
+{
+ return (dhdr->info >> WMI_DATA_HDR_UP_SHIFT) & WMI_DATA_HDR_UP_MASK;
+}
+
+static inline void wmi_data_hdr_set_up(struct wmi_data_hdr *dhdr,
+ u8 usr_pri)
+{
+ dhdr->info &= ~(WMI_DATA_HDR_UP_MASK << WMI_DATA_HDR_UP_SHIFT);
+ dhdr->info |= usr_pri << WMI_DATA_HDR_UP_SHIFT;
+}
+
+static inline u8 wmi_data_hdr_get_dot11(struct wmi_data_hdr *dhdr)
+{
+ u8 data_type;
+
+ data_type = (dhdr->info >> WMI_DATA_HDR_DATA_TYPE_SHIFT) &
+ WMI_DATA_HDR_DATA_TYPE_MASK;
+ return (data_type == WMI_DATA_HDR_DATA_TYPE_802_11);
+}
+
+static inline u16 wmi_data_hdr_get_seqno(struct wmi_data_hdr *dhdr)
+{
+ return (le16_to_cpu(dhdr->info2) >> WMI_DATA_HDR_SEQNO_SHIFT) &
+ WMI_DATA_HDR_SEQNO_MASK;
+}
+
+static inline u8 wmi_data_hdr_is_amsdu(struct wmi_data_hdr *dhdr)
+{
+ return (le16_to_cpu(dhdr->info2) >> WMI_DATA_HDR_AMSDU_SHIFT) &
+ WMI_DATA_HDR_AMSDU_MASK;
+}
+
+static inline u8 wmi_data_hdr_get_meta(struct wmi_data_hdr *dhdr)
+{
+ return (le16_to_cpu(dhdr->info2) >> WMI_DATA_HDR_META_SHIFT) &
+ WMI_DATA_HDR_META_MASK;
+}
+
+/* Tx meta version definitions */
+#define WMI_MAX_TX_META_SZ 12
+#define WMI_META_VERSION_1 0x01
+#define WMI_META_VERSION_2 0x02
+
+struct wmi_tx_meta_v1 {
+ /* packet ID to identify the tx request */
+ u8 pkt_id;
+
+ /* rate policy to be used for the tx of this frame */
+ u8 rate_plcy_id;
+} __packed;
+
+struct wmi_tx_meta_v2 {
+ /*
+ * Offset from start of the WMI header for csum calculation to
+ * begin.
+ */
+ u8 csum_start;
+
+ /* offset from start of WMI header where final csum goes */
+ u8 csum_dest;
+
+ /* no of bytes over which csum is calculated */
+ u8 csum_flags;
+} __packed;
+
+struct wmi_rx_meta_v1 {
+ u8 status;
+
+ /* rate index mapped to rate at which this packet was received. */
+ u8 rix;
+
+ /* rssi of packet */
+ u8 rssi;
+
+ /* rf channel during packet reception */
+ u8 channel;
+
+ __le16 flags;
+} __packed;
+
+struct wmi_rx_meta_v2 {
+ __le16 csum;
+
+ /* bit 0 set -partial csum valid bit 1 set -test mode */
+ u8 csum_flags;
+} __packed;
+
+/* Control Path */
+struct wmi_cmd_hdr {
+ __le16 cmd_id;
+
+ /* info1 - 16 bits
+ * b03:b00 - id
+ * b15:b04 - unused */
+ __le16 info1;
+
+ /* for alignment */
+ __le16 reserved;
+} __packed;
+
+/* List of WMI commands */
+enum wmi_cmd_id {
+ WMI_CONNECT_CMDID = 0x0001,
+ WMI_RECONNECT_CMDID,
+ WMI_DISCONNECT_CMDID,
+ WMI_SYNCHRONIZE_CMDID,
+ WMI_CREATE_PSTREAM_CMDID,
+ WMI_DELETE_PSTREAM_CMDID,
+ WMI_START_SCAN_CMDID,
+ WMI_SET_SCAN_PARAMS_CMDID,
+ WMI_SET_BSS_FILTER_CMDID,
+ WMI_SET_PROBED_SSID_CMDID, /* 10 */
+ WMI_SET_LISTEN_INT_CMDID,
+ WMI_SET_BMISS_TIME_CMDID,
+ WMI_SET_DISC_TIMEOUT_CMDID,
+ WMI_GET_CHANNEL_LIST_CMDID,
+ WMI_SET_BEACON_INT_CMDID,
+ WMI_GET_STATISTICS_CMDID,
+ WMI_SET_CHANNEL_PARAMS_CMDID,
+ WMI_SET_POWER_MODE_CMDID,
+ WMI_SET_IBSS_PM_CAPS_CMDID,
+ WMI_SET_POWER_PARAMS_CMDID, /* 20 */
+ WMI_SET_POWERSAVE_TIMERS_POLICY_CMDID,
+ WMI_ADD_CIPHER_KEY_CMDID,
+ WMI_DELETE_CIPHER_KEY_CMDID,
+ WMI_ADD_KRK_CMDID,
+ WMI_DELETE_KRK_CMDID,
+ WMI_SET_PMKID_CMDID,
+ WMI_SET_TX_PWR_CMDID,
+ WMI_GET_TX_PWR_CMDID,
+ WMI_SET_ASSOC_INFO_CMDID,
+ WMI_ADD_BAD_AP_CMDID, /* 30 */
+ WMI_DELETE_BAD_AP_CMDID,
+ WMI_SET_TKIP_COUNTERMEASURES_CMDID,
+ WMI_RSSI_THRESHOLD_PARAMS_CMDID,
+ WMI_TARGET_ERROR_REPORT_BITMASK_CMDID,
+ WMI_SET_ACCESS_PARAMS_CMDID,
+ WMI_SET_RETRY_LIMITS_CMDID,
+ WMI_SET_OPT_MODE_CMDID,
+ WMI_OPT_TX_FRAME_CMDID,
+ WMI_SET_VOICE_PKT_SIZE_CMDID,
+ WMI_SET_MAX_SP_LEN_CMDID, /* 40 */
+ WMI_SET_ROAM_CTRL_CMDID,
+ WMI_GET_ROAM_TBL_CMDID,
+ WMI_GET_ROAM_DATA_CMDID,
+ WMI_ENABLE_RM_CMDID,
+ WMI_SET_MAX_OFFHOME_DURATION_CMDID,
+ WMI_EXTENSION_CMDID, /* Non-wireless extensions */
+ WMI_SNR_THRESHOLD_PARAMS_CMDID,
+ WMI_LQ_THRESHOLD_PARAMS_CMDID,
+ WMI_SET_LPREAMBLE_CMDID,
+ WMI_SET_RTS_CMDID, /* 50 */
+ WMI_CLR_RSSI_SNR_CMDID,
+ WMI_SET_FIXRATES_CMDID,
+ WMI_GET_FIXRATES_CMDID,
+ WMI_SET_AUTH_MODE_CMDID,
+ WMI_SET_REASSOC_MODE_CMDID,
+ WMI_SET_WMM_CMDID,
+ WMI_SET_WMM_TXOP_CMDID,
+ WMI_TEST_CMDID,
+
+ /* COEX AR6002 only */
+ WMI_SET_BT_STATUS_CMDID,
+ WMI_SET_BT_PARAMS_CMDID, /* 60 */
+
+ WMI_SET_KEEPALIVE_CMDID,
+ WMI_GET_KEEPALIVE_CMDID,
+ WMI_SET_APPIE_CMDID,
+ WMI_GET_APPIE_CMDID,
+ WMI_SET_WSC_STATUS_CMDID,
+
+ /* Wake on Wireless */
+ WMI_SET_HOST_SLEEP_MODE_CMDID,
+ WMI_SET_WOW_MODE_CMDID,
+ WMI_GET_WOW_LIST_CMDID,
+ WMI_ADD_WOW_PATTERN_CMDID,
+ WMI_DEL_WOW_PATTERN_CMDID, /* 70 */
+
+ WMI_SET_FRAMERATES_CMDID,
+ WMI_SET_AP_PS_CMDID,
+ WMI_SET_QOS_SUPP_CMDID,
+
+ /* WMI_THIN_RESERVED_... mark the start and end
+ * values for WMI_THIN_RESERVED command IDs. These
+ * command IDs can be found in wmi_thin.h */
+ WMI_THIN_RESERVED_START = 0x8000,
+ WMI_THIN_RESERVED_END = 0x8fff,
+
+ /* Developer commands starts at 0xF000 */
+ WMI_SET_BITRATE_CMDID = 0xF000,
+ WMI_GET_BITRATE_CMDID,
+ WMI_SET_WHALPARAM_CMDID,
+ WMI_SET_MAC_ADDRESS_CMDID,
+ WMI_SET_AKMP_PARAMS_CMDID,
+ WMI_SET_PMKID_LIST_CMDID,
+ WMI_GET_PMKID_LIST_CMDID,
+ WMI_ABORT_SCAN_CMDID,
+ WMI_SET_TARGET_EVENT_REPORT_CMDID,
+
+ /* Unused */
+ WMI_UNUSED1,
+ WMI_UNUSED2,
+
+ /* AP mode commands */
+ WMI_AP_HIDDEN_SSID_CMDID,
+ WMI_AP_SET_NUM_STA_CMDID,
+ WMI_AP_ACL_POLICY_CMDID,
+ WMI_AP_ACL_MAC_LIST_CMDID,
+ WMI_AP_CONFIG_COMMIT_CMDID,
+ WMI_AP_SET_MLME_CMDID,
+ WMI_AP_SET_PVB_CMDID,
+ WMI_AP_CONN_INACT_CMDID,
+ WMI_AP_PROT_SCAN_TIME_CMDID,
+ WMI_AP_SET_COUNTRY_CMDID,
+ WMI_AP_SET_DTIM_CMDID,
+ WMI_AP_MODE_STAT_CMDID,
+
+ WMI_SET_IP_CMDID,
+ WMI_SET_PARAMS_CMDID,
+ WMI_SET_MCAST_FILTER_CMDID,
+ WMI_DEL_MCAST_FILTER_CMDID,
+
+ WMI_ALLOW_AGGR_CMDID,
+ WMI_ADDBA_REQ_CMDID,
+ WMI_DELBA_REQ_CMDID,
+ WMI_SET_HT_CAP_CMDID,
+ WMI_SET_HT_OP_CMDID,
+ WMI_SET_TX_SELECT_RATES_CMDID,
+ WMI_SET_TX_SGI_PARAM_CMDID,
+ WMI_SET_RATE_POLICY_CMDID,
+
+ WMI_HCI_CMD_CMDID,
+ WMI_RX_FRAME_FORMAT_CMDID,
+ WMI_SET_THIN_MODE_CMDID,
+ WMI_SET_BT_WLAN_CONN_PRECEDENCE_CMDID,
+
+ WMI_AP_SET_11BG_RATESET_CMDID,
+ WMI_SET_PMK_CMDID,
+ WMI_MCAST_FILTER_CMDID,
+
+ /* COEX CMDID AR6003 */
+ WMI_SET_BTCOEX_FE_ANT_CMDID,
+ WMI_SET_BTCOEX_COLOCATED_BT_DEV_CMDID,
+ WMI_SET_BTCOEX_SCO_CONFIG_CMDID,
+ WMI_SET_BTCOEX_A2DP_CONFIG_CMDID,
+ WMI_SET_BTCOEX_ACLCOEX_CONFIG_CMDID,
+ WMI_SET_BTCOEX_BTINQUIRY_PAGE_CONFIG_CMDID,
+ WMI_SET_BTCOEX_DEBUG_CMDID,
+ WMI_SET_BTCOEX_BT_OPERATING_STATUS_CMDID,
+ WMI_GET_BTCOEX_STATS_CMDID,
+ WMI_GET_BTCOEX_CONFIG_CMDID,
+
+ WMI_SET_DFS_ENABLE_CMDID, /* F034 */
+ WMI_SET_DFS_MINRSSITHRESH_CMDID,
+ WMI_SET_DFS_MAXPULSEDUR_CMDID,
+ WMI_DFS_RADAR_DETECTED_CMDID,
+
+ /* P2P commands */
+ WMI_P2P_SET_CONFIG_CMDID, /* F038 */
+ WMI_WPS_SET_CONFIG_CMDID,
+ WMI_SET_REQ_DEV_ATTR_CMDID,
+ WMI_P2P_FIND_CMDID,
+ WMI_P2P_STOP_FIND_CMDID,
+ WMI_P2P_GO_NEG_START_CMDID,
+ WMI_P2P_LISTEN_CMDID,
+
+ WMI_CONFIG_TX_MAC_RULES_CMDID, /* F040 */
+ WMI_SET_PROMISCUOUS_MODE_CMDID,
+ WMI_RX_FRAME_FILTER_CMDID,
+ WMI_SET_CHANNEL_CMDID,
+
+ /* WAC commands */
+ WMI_ENABLE_WAC_CMDID,
+ WMI_WAC_SCAN_REPLY_CMDID,
+ WMI_WAC_CTRL_REQ_CMDID,
+ WMI_SET_DIV_PARAMS_CMDID,
+
+ WMI_GET_PMK_CMDID,
+ WMI_SET_PASSPHRASE_CMDID,
+ WMI_SEND_ASSOC_RES_CMDID,
+ WMI_SET_ASSOC_REQ_RELAY_CMDID,
+ WMI_GET_RFKILL_MODE_CMDID,
+
+ /* ACS command, consists of sub-commands */
+ WMI_ACS_CTRL_CMDID,
+
+ /* Ultra low power store / recall commands */
+ WMI_STORERECALL_CONFIGURE_CMDID,
+ WMI_STORERECALL_RECALL_CMDID,
+ WMI_STORERECALL_HOST_READY_CMDID,
+ WMI_FORCE_TARGET_ASSERT_CMDID,
+ WMI_SET_EXCESS_TX_RETRY_THRES_CMDID,
+};
+
+/* WMI_CONNECT_CMDID */
+enum network_type {
+ INFRA_NETWORK = 0x01,
+ ADHOC_NETWORK = 0x02,
+ ADHOC_CREATOR = 0x04,
+ AP_NETWORK = 0x10,
+};
+
+enum dot11_auth_mode {
+ OPEN_AUTH = 0x01,
+ SHARED_AUTH = 0x02,
+
+ /* different from IEEE_AUTH_MODE definitions */
+ LEAP_AUTH = 0x04,
+};
+
+enum {
+ AUTH_IDLE,
+ AUTH_OPEN_IN_PROGRESS,
+};
+
+enum auth_mode {
+ NONE_AUTH = 0x01,
+ WPA_AUTH = 0x02,
+ WPA2_AUTH = 0x04,
+ WPA_PSK_AUTH = 0x08,
+ WPA2_PSK_AUTH = 0x10,
+ WPA_AUTH_CCKM = 0x20,
+ WPA2_AUTH_CCKM = 0x40,
+};
+
+#define WMI_MIN_CRYPTO_TYPE NONE_CRYPT
+#define WMI_MAX_CRYPTO_TYPE (AES_CRYPT + 1)
+
+#define WMI_MIN_KEY_INDEX 0
+#define WMI_MAX_KEY_INDEX 3
+
+#define WMI_MAX_KEY_LEN 32
+
+/*
+ * NB: these values are ordered carefully; there are lots of
+ * of implications in any reordering. In particular beware
+ * that 4 is not used to avoid conflicting with IEEE80211_F_PRIVACY.
+ */
+#define ATH6KL_CIPHER_WEP 0
+#define ATH6KL_CIPHER_TKIP 1
+#define ATH6KL_CIPHER_AES_OCB 2
+#define ATH6KL_CIPHER_AES_CCM 3
+#define ATH6KL_CIPHER_CKIP 5
+#define ATH6KL_CIPHER_CCKM_KRK 6
+#define ATH6KL_CIPHER_NONE 7 /* pseudo value */
+
+/*
+ * 802.11 rate set.
+ */
+#define ATH6KL_RATE_MAXSIZE 15 /* max rates we'll handle */
+
+#define ATH_OUI_TYPE 0x01
+#define WPA_OUI_TYPE 0x01
+#define WMM_PARAM_OUI_SUBTYPE 0x01
+#define WMM_OUI_TYPE 0x02
+#define WSC_OUT_TYPE 0x04
+
+enum wmi_connect_ctrl_flags_bits {
+ CONNECT_ASSOC_POLICY_USER = 0x0001,
+ CONNECT_SEND_REASSOC = 0x0002,
+ CONNECT_IGNORE_WPAx_GROUP_CIPHER = 0x0004,
+ CONNECT_PROFILE_MATCH_DONE = 0x0008,
+ CONNECT_IGNORE_AAC_BEACON = 0x0010,
+ CONNECT_CSA_FOLLOW_BSS = 0x0020,
+ CONNECT_DO_WPA_OFFLOAD = 0x0040,
+ CONNECT_DO_NOT_DEAUTH = 0x0080,
+};
+
+struct wmi_connect_cmd {
+ u8 nw_type;
+ u8 dot11_auth_mode;
+ u8 auth_mode;
+ u8 prwise_crypto_type;
+ u8 prwise_crypto_len;
+ u8 grp_crypto_type;
+ u8 grp_crypto_len;
+ u8 ssid_len;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ __le16 ch;
+ u8 bssid[ETH_ALEN];
+ __le32 ctrl_flags;
+} __packed;
+
+/* WMI_RECONNECT_CMDID */
+struct wmi_reconnect_cmd {
+ /* channel hint */
+ __le16 channel;
+
+ /* mandatory if set */
+ u8 bssid[ETH_ALEN];
+} __packed;
+
+/* WMI_ADD_CIPHER_KEY_CMDID */
+enum key_usage {
+ PAIRWISE_USAGE = 0x00,
+ GROUP_USAGE = 0x01,
+
+ /* default Tx Key - static WEP only */
+ TX_USAGE = 0x02,
+};
+
+/*
+ * Bit Flag
+ * Bit 0 - Initialise TSC - default is Initialize
+ */
+#define KEY_OP_INIT_TSC 0x01
+#define KEY_OP_INIT_RSC 0x02
+
+/* default initialise the TSC & RSC */
+#define KEY_OP_INIT_VAL 0x03
+#define KEY_OP_VALID_MASK 0x03
+
+struct wmi_add_cipher_key_cmd {
+ u8 key_index;
+ u8 key_type;
+
+ /* enum key_usage */
+ u8 key_usage;
+
+ u8 key_len;
+
+ /* key replay sequence counter */
+ u8 key_rsc[8];
+
+ u8 key[WLAN_MAX_KEY_LEN];
+
+ /* additional key control info */
+ u8 key_op_ctrl;
+
+ u8 key_mac_addr[ETH_ALEN];
+} __packed;
+
+/* WMI_DELETE_CIPHER_KEY_CMDID */
+struct wmi_delete_cipher_key_cmd {
+ u8 key_index;
+} __packed;
+
+#define WMI_KRK_LEN 16
+
+/* WMI_ADD_KRK_CMDID */
+struct wmi_add_krk_cmd {
+ u8 krk[WMI_KRK_LEN];
+} __packed;
+
+/* WMI_SETPMKID_CMDID */
+
+#define WMI_PMKID_LEN 16
+
+enum pmkid_enable_flg {
+ PMKID_DISABLE = 0,
+ PMKID_ENABLE = 1,
+};
+
+struct wmi_setpmkid_cmd {
+ u8 bssid[ETH_ALEN];
+
+ /* enum pmkid_enable_flg */
+ u8 enable;
+
+ u8 pmkid[WMI_PMKID_LEN];
+} __packed;
+
+/* WMI_START_SCAN_CMD */
+enum wmi_scan_type {
+ WMI_LONG_SCAN = 0,
+ WMI_SHORT_SCAN = 1,
+};
+
+struct wmi_start_scan_cmd {
+ __le32 force_fg_scan;
+
+ /* for legacy cisco AP compatibility */
+ __le32 is_legacy;
+
+ /* max duration in the home channel(msec) */
+ __le32 home_dwell_time;
+
+ /* time interval between scans (msec) */
+ __le32 force_scan_intvl;
+
+ /* enum wmi_scan_type */
+ u8 scan_type;
+
+ /* how many channels follow */
+ u8 num_ch;
+
+ /* channels in Mhz */
+ __le16 ch_list[1];
+} __packed;
+
+/* WMI_SET_SCAN_PARAMS_CMDID */
+#define WMI_SHORTSCANRATIO_DEFAULT 3
+
+/*
+ * Warning: scan control flag value of 0xFF is used to disable
+ * all flags in WMI_SCAN_PARAMS_CMD. Do not add any more
+ * flags here
+ */
+enum wmi_scan_ctrl_flags_bits {
+
+ /* set if can scan in the connect cmd */
+ CONNECT_SCAN_CTRL_FLAGS = 0x01,
+
+ /* set if scan for the SSID it is already connected to */
+ SCAN_CONNECTED_CTRL_FLAGS = 0x02,
+
+ /* set if enable active scan */
+ ACTIVE_SCAN_CTRL_FLAGS = 0x04,
+
+ /* set if enable roam scan when bmiss and lowrssi */
+ ROAM_SCAN_CTRL_FLAGS = 0x08,
+
+ /* set if follows customer BSSINFO reporting rule */
+ REPORT_BSSINFO_CTRL_FLAGS = 0x10,
+
+ /* if disabled, target doesn't scan after a disconnect event */
+ ENABLE_AUTO_CTRL_FLAGS = 0x20,
+
+ /*
+ * Scan complete event with canceled status will be generated when
+ * a scan is prempted before it gets completed.
+ */
+ ENABLE_SCAN_ABORT_EVENT = 0x40
+};
+
+#define DEFAULT_SCAN_CTRL_FLAGS \
+ (CONNECT_SCAN_CTRL_FLAGS | \
+ SCAN_CONNECTED_CTRL_FLAGS | \
+ ACTIVE_SCAN_CTRL_FLAGS | \
+ ROAM_SCAN_CTRL_FLAGS | \
+ ENABLE_AUTO_CTRL_FLAGS)
+
+struct wmi_scan_params_cmd {
+ /* sec */
+ __le16 fg_start_period;
+
+ /* sec */
+ __le16 fg_end_period;
+
+ /* sec */
+ __le16 bg_period;
+
+ /* msec */
+ __le16 maxact_chdwell_time;
+
+ /* msec */
+ __le16 pas_chdwell_time;
+
+ /* how many shorts scan for one long */
+ u8 short_scan_ratio;
+
+ u8 scan_ctrl_flags;
+
+ /* msec */
+ __le16 minact_chdwell_time;
+
+ /* max active scans per ssid */
+ __le16 maxact_scan_per_ssid;
+
+ /* msecs */
+ __le32 max_dfsch_act_time;
+} __packed;
+
+/* WMI_SET_BSS_FILTER_CMDID */
+enum wmi_bss_filter {
+ /* no beacons forwarded */
+ NONE_BSS_FILTER = 0x0,
+
+ /* all beacons forwarded */
+ ALL_BSS_FILTER,
+
+ /* only beacons matching profile */
+ PROFILE_FILTER,
+
+ /* all but beacons matching profile */
+ ALL_BUT_PROFILE_FILTER,
+
+ /* only beacons matching current BSS */
+ CURRENT_BSS_FILTER,
+
+ /* all but beacons matching BSS */
+ ALL_BUT_BSS_FILTER,
+
+ /* beacons matching probed ssid */
+ PROBED_SSID_FILTER,
+
+ /* marker only */
+ LAST_BSS_FILTER,
+};
+
+struct wmi_bss_filter_cmd {
+ /* see, enum wmi_bss_filter */
+ u8 bss_filter;
+
+ /* for alignment */
+ u8 reserved1;
+
+ /* for alignment */
+ __le16 reserved2;
+
+ __le32 ie_mask;
+} __packed;
+
+/* WMI_SET_PROBED_SSID_CMDID */
+#define MAX_PROBED_SSID_INDEX 9
+
+enum wmi_ssid_flag {
+ /* disables entry */
+ DISABLE_SSID_FLAG = 0,
+
+ /* probes specified ssid */
+ SPECIFIC_SSID_FLAG = 0x01,
+
+ /* probes for any ssid */
+ ANY_SSID_FLAG = 0x02,
+};
+
+struct wmi_probed_ssid_cmd {
+ /* 0 to MAX_PROBED_SSID_INDEX */
+ u8 entry_index;
+
+ /* see, enum wmi_ssid_flg */
+ u8 flag;
+
+ u8 ssid_len;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+} __packed;
+
+/*
+ * WMI_SET_LISTEN_INT_CMDID
+ * The Listen interval is between 15 and 3000 TUs
+ */
+struct wmi_listen_int_cmd {
+ __le16 listen_intvl;
+ __le16 num_beacons;
+} __packed;
+
+/* WMI_SET_POWER_MODE_CMDID */
+enum wmi_power_mode {
+ REC_POWER = 0x01,
+ MAX_PERF_POWER,
+};
+
+struct wmi_power_mode_cmd {
+ /* see, enum wmi_power_mode */
+ u8 pwr_mode;
+} __packed;
+
+/*
+ * Policy to determnine whether power save failure event should be sent to
+ * host during scanning
+ */
+enum power_save_fail_event_policy {
+ SEND_POWER_SAVE_FAIL_EVENT_ALWAYS = 1,
+ IGNORE_POWER_SAVE_FAIL_EVENT_DURING_SCAN = 2,
+};
+
+struct wmi_power_params_cmd {
+ /* msec */
+ __le16 idle_period;
+
+ __le16 pspoll_number;
+ __le16 dtim_policy;
+ __le16 tx_wakeup_policy;
+ __le16 num_tx_to_wakeup;
+ __le16 ps_fail_event_policy;
+} __packed;
+
+/* WMI_SET_DISC_TIMEOUT_CMDID */
+struct wmi_disc_timeout_cmd {
+ /* seconds */
+ u8 discon_timeout;
+} __packed;
+
+enum dir_type {
+ UPLINK_TRAFFIC = 0,
+ DNLINK_TRAFFIC = 1,
+ BIDIR_TRAFFIC = 2,
+};
+
+enum voiceps_cap_type {
+ DISABLE_FOR_THIS_AC = 0,
+ ENABLE_FOR_THIS_AC = 1,
+ ENABLE_FOR_ALL_AC = 2,
+};
+
+enum traffic_type {
+ TRAFFIC_TYPE_APERIODIC = 0,
+ TRAFFIC_TYPE_PERIODIC = 1,
+};
+
+/* WMI_SYNCHRONIZE_CMDID */
+struct wmi_sync_cmd {
+ u8 data_sync_map;
+} __packed;
+
+/* WMI_CREATE_PSTREAM_CMDID */
+struct wmi_create_pstream_cmd {
+ /* msec */
+ __le32 min_service_int;
+
+ /* msec */
+ __le32 max_service_int;
+
+ /* msec */
+ __le32 inactivity_int;
+
+ /* msec */
+ __le32 suspension_int;
+
+ __le32 service_start_time;
+
+ /* in bps */
+ __le32 min_data_rate;
+
+ /* in bps */
+ __le32 mean_data_rate;
+
+ /* in bps */
+ __le32 peak_data_rate;
+
+ __le32 max_burst_size;
+ __le32 delay_bound;
+
+ /* in bps */
+ __le32 min_phy_rate;
+
+ __le32 sba;
+ __le32 medium_time;
+
+ /* in octects */
+ __le16 nominal_msdu;
+
+ /* in octects */
+ __le16 max_msdu;
+
+ u8 traffic_class;
+
+ /* see, enum dir_type */
+ u8 traffic_direc;
+
+ u8 rx_queue_num;
+
+ /* see, enum traffic_type */
+ u8 traffic_type;
+
+ /* see, enum voiceps_cap_type */
+ u8 voice_psc_cap;
+ u8 tsid;
+
+ /* 802.1D user priority */
+ u8 user_pri;
+
+ /* nominal phy rate */
+ u8 nominal_phy;
+} __packed;
+
+/* WMI_DELETE_PSTREAM_CMDID */
+struct wmi_delete_pstream_cmd {
+ u8 tx_queue_num;
+ u8 rx_queue_num;
+ u8 traffic_direc;
+ u8 traffic_class;
+ u8 tsid;
+} __packed;
+
+/* WMI_SET_CHANNEL_PARAMS_CMDID */
+enum wmi_phy_mode {
+ WMI_11A_MODE = 0x1,
+ WMI_11G_MODE = 0x2,
+ WMI_11AG_MODE = 0x3,
+ WMI_11B_MODE = 0x4,
+ WMI_11GONLY_MODE = 0x5,
+};
+
+#define WMI_MAX_CHANNELS 32
+
+/*
+ * WMI_RSSI_THRESHOLD_PARAMS_CMDID
+ * Setting the polltime to 0 would disable polling. Threshold values are
+ * in the ascending order, and should agree to:
+ * (lowThreshold_lowerVal < lowThreshold_upperVal < highThreshold_lowerVal
+ * < highThreshold_upperVal)
+ */
+
+struct wmi_rssi_threshold_params_cmd {
+ /* polling time as a factor of LI */
+ __le32 poll_time;
+
+ /* lowest of upper */
+ a_sle16 thresh_above1_val;
+
+ a_sle16 thresh_above2_val;
+ a_sle16 thresh_above3_val;
+ a_sle16 thresh_above4_val;
+ a_sle16 thresh_above5_val;
+
+ /* highest of upper */
+ a_sle16 thresh_above6_val;
+
+ /* lowest of bellow */
+ a_sle16 thresh_below1_val;
+
+ a_sle16 thresh_below2_val;
+ a_sle16 thresh_below3_val;
+ a_sle16 thresh_below4_val;
+ a_sle16 thresh_below5_val;
+
+ /* highest of bellow */
+ a_sle16 thresh_below6_val;
+
+ /* "alpha" */
+ u8 weight;
+
+ u8 reserved[3];
+} __packed;
+
+/*
+ * WMI_SNR_THRESHOLD_PARAMS_CMDID
+ * Setting the polltime to 0 would disable polling.
+ */
+
+struct wmi_snr_threshold_params_cmd {
+ /* polling time as a factor of LI */
+ __le32 poll_time;
+
+ /* "alpha" */
+ u8 weight;
+
+ /* lowest of uppper */
+ u8 thresh_above1_val;
+
+ u8 thresh_above2_val;
+ u8 thresh_above3_val;
+
+ /* highest of upper */
+ u8 thresh_above4_val;
+
+ /* lowest of bellow */
+ u8 thresh_below1_val;
+
+ u8 thresh_below2_val;
+ u8 thresh_below3_val;
+
+ /* highest of bellow */
+ u8 thresh_below4_val;
+
+ u8 reserved[3];
+} __packed;
+
+enum wmi_preamble_policy {
+ WMI_IGNORE_BARKER_IN_ERP = 0,
+ WMI_DONOT_IGNORE_BARKER_IN_ERP
+};
+
+struct wmi_set_lpreamble_cmd {
+ u8 status;
+ u8 preamble_policy;
+} __packed;
+
+struct wmi_set_rts_cmd {
+ __le16 threshold;
+} __packed;
+
+/* WMI_SET_TX_PWR_CMDID */
+struct wmi_set_tx_pwr_cmd {
+ /* in dbM units */
+ u8 dbM;
+} __packed;
+
+struct wmi_tx_pwr_reply {
+ /* in dbM units */
+ u8 dbM;
+} __packed;
+
+struct wmi_report_sleep_state_event {
+ __le32 sleep_state;
+};
+
+enum wmi_report_sleep_status {
+ WMI_REPORT_SLEEP_STATUS_IS_DEEP_SLEEP = 0,
+ WMI_REPORT_SLEEP_STATUS_IS_AWAKE
+};
+enum target_event_report_config {
+ /* default */
+ DISCONN_EVT_IN_RECONN = 0,
+
+ NO_DISCONN_EVT_IN_RECONN
+};
+
+/* Command Replies */
+
+/* WMI_GET_CHANNEL_LIST_CMDID reply */
+struct wmi_channel_list_reply {
+ u8 reserved;
+
+ /* number of channels in reply */
+ u8 num_ch;
+
+ /* channel in Mhz */
+ __le16 ch_list[1];
+} __packed;
+
+/* List of Events (target to host) */
+enum wmi_event_id {
+ WMI_READY_EVENTID = 0x1001,
+ WMI_CONNECT_EVENTID,
+ WMI_DISCONNECT_EVENTID,
+ WMI_BSSINFO_EVENTID,
+ WMI_CMDERROR_EVENTID,
+ WMI_REGDOMAIN_EVENTID,
+ WMI_PSTREAM_TIMEOUT_EVENTID,
+ WMI_NEIGHBOR_REPORT_EVENTID,
+ WMI_TKIP_MICERR_EVENTID,
+ WMI_SCAN_COMPLETE_EVENTID, /* 0x100a */
+ WMI_REPORT_STATISTICS_EVENTID,
+ WMI_RSSI_THRESHOLD_EVENTID,
+ WMI_ERROR_REPORT_EVENTID,
+ WMI_OPT_RX_FRAME_EVENTID,
+ WMI_REPORT_ROAM_TBL_EVENTID,
+ WMI_EXTENSION_EVENTID,
+ WMI_CAC_EVENTID,
+ WMI_SNR_THRESHOLD_EVENTID,
+ WMI_LQ_THRESHOLD_EVENTID,
+ WMI_TX_RETRY_ERR_EVENTID, /* 0x1014 */
+ WMI_REPORT_ROAM_DATA_EVENTID,
+ WMI_TEST_EVENTID,
+ WMI_APLIST_EVENTID,
+ WMI_GET_WOW_LIST_EVENTID,
+ WMI_GET_PMKID_LIST_EVENTID,
+ WMI_CHANNEL_CHANGE_EVENTID,
+ WMI_PEER_NODE_EVENTID,
+ WMI_PSPOLL_EVENTID,
+ WMI_DTIMEXPIRY_EVENTID,
+ WMI_WLAN_VERSION_EVENTID,
+ WMI_SET_PARAMS_REPLY_EVENTID,
+ WMI_ADDBA_REQ_EVENTID, /*0x1020 */
+ WMI_ADDBA_RESP_EVENTID,
+ WMI_DELBA_REQ_EVENTID,
+ WMI_TX_COMPLETE_EVENTID,
+ WMI_HCI_EVENT_EVENTID,
+ WMI_ACL_DATA_EVENTID,
+ WMI_REPORT_SLEEP_STATE_EVENTID,
+ WMI_REPORT_BTCOEX_STATS_EVENTID,
+ WMI_REPORT_BTCOEX_CONFIG_EVENTID,
+ WMI_GET_PMK_EVENTID,
+
+ /* DFS Events */
+ WMI_DFS_HOST_ATTACH_EVENTID,
+ WMI_DFS_HOST_INIT_EVENTID,
+ WMI_DFS_RESET_DELAYLINES_EVENTID,
+ WMI_DFS_RESET_RADARQ_EVENTID,
+ WMI_DFS_RESET_AR_EVENTID,
+ WMI_DFS_RESET_ARQ_EVENTID,
+ WMI_DFS_SET_DUR_MULTIPLIER_EVENTID,
+ WMI_DFS_SET_BANGRADAR_EVENTID,
+ WMI_DFS_SET_DEBUGLEVEL_EVENTID,
+ WMI_DFS_PHYERR_EVENTID,
+
+ /* CCX Evants */
+ WMI_CCX_RM_STATUS_EVENTID,
+
+ /* P2P Events */
+ WMI_P2P_GO_NEG_RESULT_EVENTID,
+
+ WMI_WAC_SCAN_DONE_EVENTID,
+ WMI_WAC_REPORT_BSS_EVENTID,
+ WMI_WAC_START_WPS_EVENTID,
+ WMI_WAC_CTRL_REQ_REPLY_EVENTID,
+
+ /* RFKILL Events */
+ WMI_RFKILL_STATE_CHANGE_EVENTID,
+ WMI_RFKILL_GET_MODE_CMD_EVENTID,
+ WMI_THIN_RESERVED_START_EVENTID = 0x8000,
+
+ /*
+ * Events in this range are reserved for thinmode
+ * See wmi_thin.h for actual definitions
+ */
+ WMI_THIN_RESERVED_END_EVENTID = 0x8fff,
+
+ WMI_SET_CHANNEL_EVENTID,
+ WMI_ASSOC_REQ_EVENTID,
+
+ /* Generic ACS event */
+ WMI_ACS_EVENTID,
+ WMI_REPORT_WMM_PARAMS_EVENTID
+};
+
+struct wmi_ready_event_2 {
+ __le32 sw_version;
+ __le32 abi_version;
+ u8 mac_addr[ETH_ALEN];
+ u8 phy_cap;
+} __packed;
+
+/* Connect Event */
+struct wmi_connect_event {
+ __le16 ch;
+ u8 bssid[ETH_ALEN];
+ __le16 listen_intvl;
+ __le16 beacon_intvl;
+ __le32 nw_type;
+ u8 beacon_ie_len;
+ u8 assoc_req_len;
+ u8 assoc_resp_len;
+ u8 assoc_info[1];
+} __packed;
+
+/* Disconnect Event */
+enum wmi_disconnect_reason {
+ NO_NETWORK_AVAIL = 0x01,
+
+ /* bmiss */
+ LOST_LINK = 0x02,
+
+ DISCONNECT_CMD = 0x03,
+ BSS_DISCONNECTED = 0x04,
+ AUTH_FAILED = 0x05,
+ ASSOC_FAILED = 0x06,
+ NO_RESOURCES_AVAIL = 0x07,
+ CSERV_DISCONNECT = 0x08,
+ INVALID_PROFILE = 0x0a,
+ DOT11H_CHANNEL_SWITCH = 0x0b,
+ PROFILE_MISMATCH = 0x0c,
+ CONNECTION_EVICTED = 0x0d,
+ IBSS_MERGE = 0xe,
+};
+
+struct wmi_disconnect_event {
+ /* reason code, see 802.11 spec. */
+ __le16 proto_reason_status;
+
+ /* set if known */
+ u8 bssid[ETH_ALEN];
+
+ /* see WMI_DISCONNECT_REASON */
+ u8 disconn_reason;
+
+ u8 assoc_resp_len;
+ u8 assoc_info[1];
+} __packed;
+
+/*
+ * BSS Info Event.
+ * Mechanism used to inform host of the presence and characteristic of
+ * wireless networks present. Consists of bss info header followed by
+ * the beacon or probe-response frame body. The 802.11 header is no included.
+ */
+enum wmi_bi_ftype {
+ BEACON_FTYPE = 0x1,
+ PROBERESP_FTYPE,
+ ACTION_MGMT_FTYPE,
+ PROBEREQ_FTYPE,
+};
+
+struct wmi_bss_info_hdr {
+ __le16 ch;
+
+ /* see, enum wmi_bi_ftype */
+ u8 frame_type;
+
+ u8 snr;
+ a_sle16 rssi;
+ u8 bssid[ETH_ALEN];
+ __le32 ie_mask;
+} __packed;
+
+/*
+ * BSS INFO HDR version 2.0
+ * With 6 bytes HTC header and 6 bytes of WMI header
+ * WMI_BSS_INFO_HDR cannot be accommodated in the removed 802.11 management
+ * header space.
+ * - Reduce the ie_mask to 2 bytes as only two bit flags are used
+ * - Remove rssi and compute it on the host. rssi = snr - 95
+ */
+struct wmi_bss_info_hdr2 {
+ __le16 ch;
+
+ /* see, enum wmi_bi_ftype */
+ u8 frame_type;
+
+ u8 snr;
+ u8 bssid[ETH_ALEN];
+ __le16 ie_mask;
+} __packed;
+
+/* Command Error Event */
+enum wmi_error_code {
+ INVALID_PARAM = 0x01,
+ ILLEGAL_STATE = 0x02,
+ INTERNAL_ERROR = 0x03,
+};
+
+struct wmi_cmd_error_event {
+ __le16 cmd_id;
+ u8 err_code;
+} __packed;
+
+struct wmi_pstream_timeout_event {
+ u8 tx_queue_num;
+ u8 rx_queue_num;
+ u8 traffic_direc;
+ u8 traffic_class;
+} __packed;
+
+/*
+ * The WMI_NEIGHBOR_REPORT Event is generated by the target to inform
+ * the host of BSS's it has found that matches the current profile.
+ * It can be used by the host to cache PMKs and/to initiate pre-authentication
+ * if the BSS supports it. The first bssid is always the current associated
+ * BSS.
+ * The bssid and bssFlags information repeats according to the number
+ * or APs reported.
+ */
+enum wmi_bss_flags {
+ WMI_DEFAULT_BSS_FLAGS = 0x00,
+ WMI_PREAUTH_CAPABLE_BSS = 0x01,
+ WMI_PMKID_VALID_BSS = 0x02,
+};
+
+/* TKIP MIC Error Event */
+struct wmi_tkip_micerr_event {
+ u8 key_id;
+ u8 is_mcast;
+} __packed;
+
+/* WMI_SCAN_COMPLETE_EVENTID */
+struct wmi_scan_complete_event {
+ a_sle32 status;
+} __packed;
+
+#define MAX_OPT_DATA_LEN 1400
+
+/*
+ * Special frame receive Event.
+ * Mechanism used to inform host of the receiption of the special frames.
+ * Consists of special frame info header followed by special frame body.
+ * The 802.11 header is not included.
+ */
+struct wmi_opt_rx_info_hdr {
+ __le16 ch;
+ u8 frame_type;
+ s8 snr;
+ u8 src_addr[ETH_ALEN];
+ u8 bssid[ETH_ALEN];
+} __packed;
+
+/* Reporting statistic */
+struct tx_stats {
+ __le32 pkt;
+ __le32 byte;
+ __le32 ucast_pkt;
+ __le32 ucast_byte;
+ __le32 mcast_pkt;
+ __le32 mcast_byte;
+ __le32 bcast_pkt;
+ __le32 bcast_byte;
+ __le32 rts_success_cnt;
+ __le32 pkt_per_ac[4];
+ __le32 err_per_ac[4];
+
+ __le32 err;
+ __le32 fail_cnt;
+ __le32 retry_cnt;
+ __le32 mult_retry_cnt;
+ __le32 rts_fail_cnt;
+ a_sle32 ucast_rate;
+} __packed;
+
+struct rx_stats {
+ __le32 pkt;
+ __le32 byte;
+ __le32 ucast_pkt;
+ __le32 ucast_byte;
+ __le32 mcast_pkt;
+ __le32 mcast_byte;
+ __le32 bcast_pkt;
+ __le32 bcast_byte;
+ __le32 frgment_pkt;
+
+ __le32 err;
+ __le32 crc_err;
+ __le32 key_cache_miss;
+ __le32 decrypt_err;
+ __le32 dupl_frame;
+ a_sle32 ucast_rate;
+} __packed;
+
+struct tkip_ccmp_stats {
+ __le32 tkip_local_mic_fail;
+ __le32 tkip_cnter_measures_invoked;
+ __le32 tkip_replays;
+ __le32 tkip_fmt_err;
+ __le32 ccmp_fmt_err;
+ __le32 ccmp_replays;
+} __packed;
+
+struct pm_stats {
+ __le32 pwr_save_failure_cnt;
+ __le16 stop_tx_failure_cnt;
+ __le16 atim_tx_failure_cnt;
+ __le16 atim_rx_failure_cnt;
+ __le16 bcn_rx_failure_cnt;
+} __packed;
+
+struct cserv_stats {
+ __le32 cs_bmiss_cnt;
+ __le32 cs_low_rssi_cnt;
+ __le16 cs_connect_cnt;
+ __le16 cs_discon_cnt;
+ a_sle16 cs_ave_beacon_rssi;
+ __le16 cs_roam_count;
+ a_sle16 cs_rssi;
+ u8 cs_snr;
+ u8 cs_ave_beacon_snr;
+ u8 cs_last_roam_msec;
+} __packed;
+
+struct wlan_net_stats {
+ struct tx_stats tx;
+ struct rx_stats rx;
+ struct tkip_ccmp_stats tkip_ccmp_stats;
+} __packed;
+
+struct arp_stats {
+ __le32 arp_received;
+ __le32 arp_matched;
+ __le32 arp_replied;
+} __packed;
+
+struct wlan_wow_stats {
+ __le32 wow_pkt_dropped;
+ __le16 wow_evt_discarded;
+ u8 wow_host_pkt_wakeups;
+ u8 wow_host_evt_wakeups;
+} __packed;
+
+struct wmi_target_stats {
+ __le32 lq_val;
+ a_sle32 noise_floor_calib;
+ struct pm_stats pm_stats;
+ struct wlan_net_stats stats;
+ struct wlan_wow_stats wow_stats;
+ struct arp_stats arp_stats;
+ struct cserv_stats cserv_stats;
+} __packed;
+
+/*
+ * WMI_RSSI_THRESHOLD_EVENTID.
+ * Indicate the RSSI events to host. Events are indicated when we breach a
+ * thresold value.
+ */
+enum wmi_rssi_threshold_val {
+ WMI_RSSI_THRESHOLD1_ABOVE = 0,
+ WMI_RSSI_THRESHOLD2_ABOVE,
+ WMI_RSSI_THRESHOLD3_ABOVE,
+ WMI_RSSI_THRESHOLD4_ABOVE,
+ WMI_RSSI_THRESHOLD5_ABOVE,
+ WMI_RSSI_THRESHOLD6_ABOVE,
+ WMI_RSSI_THRESHOLD1_BELOW,
+ WMI_RSSI_THRESHOLD2_BELOW,
+ WMI_RSSI_THRESHOLD3_BELOW,
+ WMI_RSSI_THRESHOLD4_BELOW,
+ WMI_RSSI_THRESHOLD5_BELOW,
+ WMI_RSSI_THRESHOLD6_BELOW
+};
+
+struct wmi_rssi_threshold_event {
+ a_sle16 rssi;
+ u8 range;
+} __packed;
+
+enum wmi_snr_threshold_val {
+ WMI_SNR_THRESHOLD1_ABOVE = 1,
+ WMI_SNR_THRESHOLD1_BELOW,
+ WMI_SNR_THRESHOLD2_ABOVE,
+ WMI_SNR_THRESHOLD2_BELOW,
+ WMI_SNR_THRESHOLD3_ABOVE,
+ WMI_SNR_THRESHOLD3_BELOW,
+ WMI_SNR_THRESHOLD4_ABOVE,
+ WMI_SNR_THRESHOLD4_BELOW
+};
+
+struct wmi_snr_threshold_event {
+ /* see, enum wmi_snr_threshold_val */
+ u8 range;
+
+ u8 snr;
+} __packed;
+
+/* WMI_REPORT_ROAM_TBL_EVENTID */
+#define MAX_ROAM_TBL_CAND 5
+
+struct wmi_bss_roam_info {
+ a_sle32 roam_util;
+ u8 bssid[ETH_ALEN];
+ s8 rssi;
+ s8 rssidt;
+ s8 last_rssi;
+ s8 util;
+ s8 bias;
+
+ /* for alignment */
+ u8 reserved;
+} __packed;
+
+/* WMI_CAC_EVENTID */
+enum cac_indication {
+ CAC_INDICATION_ADMISSION = 0x00,
+ CAC_INDICATION_ADMISSION_RESP = 0x01,
+ CAC_INDICATION_DELETE = 0x02,
+ CAC_INDICATION_NO_RESP = 0x03,
+};
+
+#define WMM_TSPEC_IE_LEN 63
+
+struct wmi_cac_event {
+ u8 ac;
+ u8 cac_indication;
+ u8 status_code;
+ u8 tspec_suggestion[WMM_TSPEC_IE_LEN];
+} __packed;
+
+/* WMI_APLIST_EVENTID */
+
+enum aplist_ver {
+ APLIST_VER1 = 1,
+};
+
+struct wmi_ap_info_v1 {
+ u8 bssid[ETH_ALEN];
+ __le16 channel;
+} __packed;
+
+union wmi_ap_info {
+ struct wmi_ap_info_v1 ap_info_v1;
+} __packed;
+
+struct wmi_aplist_event {
+ u8 ap_list_ver;
+ u8 num_ap;
+ union wmi_ap_info ap_list[1];
+} __packed;
+
+/* Developer Commands */
+
+/*
+ * WMI_SET_BITRATE_CMDID
+ *
+ * Get bit rate cmd uses same definition as set bit rate cmd
+ */
+enum wmi_bit_rate {
+ RATE_AUTO = -1,
+ RATE_1Mb = 0,
+ RATE_2Mb = 1,
+ RATE_5_5Mb = 2,
+ RATE_11Mb = 3,
+ RATE_6Mb = 4,
+ RATE_9Mb = 5,
+ RATE_12Mb = 6,
+ RATE_18Mb = 7,
+ RATE_24Mb = 8,
+ RATE_36Mb = 9,
+ RATE_48Mb = 10,
+ RATE_54Mb = 11,
+ RATE_MCS_0_20 = 12,
+ RATE_MCS_1_20 = 13,
+ RATE_MCS_2_20 = 14,
+ RATE_MCS_3_20 = 15,
+ RATE_MCS_4_20 = 16,
+ RATE_MCS_5_20 = 17,
+ RATE_MCS_6_20 = 18,
+ RATE_MCS_7_20 = 19,
+ RATE_MCS_0_40 = 20,
+ RATE_MCS_1_40 = 21,
+ RATE_MCS_2_40 = 22,
+ RATE_MCS_3_40 = 23,
+ RATE_MCS_4_40 = 24,
+ RATE_MCS_5_40 = 25,
+ RATE_MCS_6_40 = 26,
+ RATE_MCS_7_40 = 27,
+};
+
+struct wmi_bit_rate_reply {
+ /* see, enum wmi_bit_rate */
+ s8 rate_index;
+} __packed;
+
+/*
+ * WMI_SET_FIXRATES_CMDID
+ *
+ * Get fix rates cmd uses same definition as set fix rates cmd
+ */
+struct wmi_fix_rates_reply {
+ /* see wmi_bit_rate */
+ __le32 fix_rate_mask;
+} __packed;
+
+enum roam_data_type {
+ /* get the roam time data */
+ ROAM_DATA_TIME = 1,
+};
+
+struct wmi_target_roam_time {
+ __le32 disassoc_time;
+ __le32 no_txrx_time;
+ __le32 assoc_time;
+ __le32 allow_txrx_time;
+ u8 disassoc_bssid[ETH_ALEN];
+ s8 disassoc_bss_rssi;
+ u8 assoc_bssid[ETH_ALEN];
+ s8 assoc_bss_rssi;
+} __packed;
+
+enum wmi_txop_cfg {
+ WMI_TXOP_DISABLED = 0,
+ WMI_TXOP_ENABLED
+};
+
+struct wmi_set_wmm_txop_cmd {
+ u8 txop_enable;
+} __packed;
+
+struct wmi_set_keepalive_cmd {
+ u8 keep_alive_intvl;
+} __packed;
+
+struct wmi_get_keepalive_cmd {
+ __le32 configured;
+ u8 keep_alive_intvl;
+} __packed;
+
+/* Notify the WSC registration status to the target */
+#define WSC_REG_ACTIVE 1
+#define WSC_REG_INACTIVE 0
+
+#define WOW_MAX_FILTER_LISTS 1
+#define WOW_MAX_FILTERS_PER_LIST 4
+#define WOW_PATTERN_SIZE 64
+#define WOW_MASK_SIZE 64
+
+#define MAC_MAX_FILTERS_PER_LIST 4
+
+struct wow_filter {
+ u8 wow_valid_filter;
+ u8 wow_filter_id;
+ u8 wow_filter_size;
+ u8 wow_filter_offset;
+ u8 wow_filter_mask[WOW_MASK_SIZE];
+ u8 wow_filter_pattern[WOW_PATTERN_SIZE];
+} __packed;
+
+#define MAX_IP_ADDRS 2
+
+struct wmi_set_ip_cmd {
+ /* IP in network byte order */
+ __le32 ips[MAX_IP_ADDRS];
+} __packed;
+
+/* WMI_GET_WOW_LIST_CMD reply */
+struct wmi_get_wow_list_reply {
+ /* number of patterns in reply */
+ u8 num_filters;
+
+ /* this is filter # x of total num_filters */
+ u8 this_filter_num;
+
+ u8 wow_mode;
+ u8 host_mode;
+ struct wow_filter wow_filters[1];
+} __packed;
+
+/* WMI_SET_AKMP_PARAMS_CMD */
+
+struct wmi_pmkid {
+ u8 pmkid[WMI_PMKID_LEN];
+} __packed;
+
+/* WMI_GET_PMKID_LIST_CMD Reply */
+struct wmi_pmkid_list_reply {
+ __le32 num_pmkid;
+ u8 bssid_list[ETH_ALEN][1];
+ struct wmi_pmkid pmkid_list[1];
+} __packed;
+
+/* WMI_ADDBA_REQ_EVENTID */
+struct wmi_addba_req_event {
+ u8 tid;
+ u8 win_sz;
+ __le16 st_seq_no;
+
+ /* f/w response for ADDBA Req; OK (0) or failure (!=0) */
+ u8 status;
+} __packed;
+
+/* WMI_ADDBA_RESP_EVENTID */
+struct wmi_addba_resp_event {
+ u8 tid;
+
+ /* OK (0), failure (!=0) */
+ u8 status;
+
+ /* three values: not supported(0), 3839, 8k */
+ __le16 amsdu_sz;
+} __packed;
+
+/* WMI_DELBA_EVENTID
+ * f/w received a DELBA for peer and processed it.
+ * Host is notified of this
+ */
+struct wmi_delba_event {
+ u8 tid;
+ u8 is_peer_initiator;
+ __le16 reason_code;
+} __packed;
+
+#define PEER_NODE_JOIN_EVENT 0x00
+#define PEER_NODE_LEAVE_EVENT 0x01
+#define PEER_FIRST_NODE_JOIN_EVENT 0x10
+#define PEER_LAST_NODE_LEAVE_EVENT 0x11
+
+struct wmi_peer_node_event {
+ u8 event_code;
+ u8 peer_mac_addr[ETH_ALEN];
+} __packed;
+
+/* Transmit complete event data structure(s) */
+
+/* version 1 of tx complete msg */
+struct tx_complete_msg_v1 {
+#define TX_COMPLETE_STATUS_SUCCESS 0
+#define TX_COMPLETE_STATUS_RETRIES 1
+#define TX_COMPLETE_STATUS_NOLINK 2
+#define TX_COMPLETE_STATUS_TIMEOUT 3
+#define TX_COMPLETE_STATUS_OTHER 4
+
+ u8 status;
+
+ /* packet ID to identify parent packet */
+ u8 pkt_id;
+
+ /* rate index on successful transmission */
+ u8 rate_idx;
+
+ /* number of ACK failures in tx attempt */
+ u8 ack_failures;
+} __packed;
+
+struct wmi_tx_complete_event {
+ /* no of tx comp msgs following this struct */
+ u8 num_msg;
+
+ /* length in bytes for each individual msg following this struct */
+ u8 msg_len;
+
+ /* version of tx complete msg data following this struct */
+ u8 msg_type;
+
+ /* individual messages follow this header */
+ u8 reserved;
+} __packed;
+
+/*
+ * ------- AP Mode definitions --------------
+ */
+
+/*
+ * !!! Warning !!!
+ * -Changing the following values needs compilation of both driver and firmware
+ */
+#define AP_MAX_NUM_STA 8
+
+/* Spl. AID used to set DTIM flag in the beacons */
+#define MCAST_AID 0xFF
+
+#define DEF_AP_COUNTRY_CODE "US "
+
+/* Used with WMI_AP_SET_NUM_STA_CMDID */
+
+struct wmi_ap_set_pvb_cmd {
+ __le32 flag;
+ __le16 aid;
+} __packed;
+
+struct wmi_rx_frame_format_cmd {
+ /* version of meta data for rx packets <0 = default> (0-7 = valid) */
+ u8 meta_ver;
+
+ /*
+ * 1 == leave .11 header intact,
+ * 0 == replace .11 header with .3 <default>
+ */
+ u8 dot11_hdr;
+
+ /*
+ * 1 == defragmentation is performed by host,
+ * 0 == performed by target <default>
+ */
+ u8 defrag_on_host;
+
+ /* for alignment */
+ u8 reserved[1];
+} __packed;
+
+/* AP mode events */
+
+/* WMI_PS_POLL_EVENT */
+struct wmi_pspoll_event {
+ __le16 aid;
+} __packed;
+
+struct wmi_per_sta_stat {
+ __le32 tx_bytes;
+ __le32 tx_pkts;
+ __le32 tx_error;
+ __le32 tx_discard;
+ __le32 rx_bytes;
+ __le32 rx_pkts;
+ __le32 rx_error;
+ __le32 rx_discard;
+ __le32 aid;
+} __packed;
+
+struct wmi_ap_mode_stat {
+ __le32 action;
+ struct wmi_per_sta_stat sta[AP_MAX_NUM_STA + 1];
+} __packed;
+
+/* End of AP mode definitions */
+
+/* Extended WMI (WMIX)
+ *
+ * Extended WMIX commands are encapsulated in a WMI message with
+ * cmd=WMI_EXTENSION_CMD.
+ *
+ * Extended WMI commands are those that are needed during wireless
+ * operation, but which are not really wireless commands. This allows,
+ * for instance, platform-specific commands. Extended WMI commands are
+ * embedded in a WMI command message with WMI_COMMAND_ID=WMI_EXTENSION_CMDID.
+ * Extended WMI events are similarly embedded in a WMI event message with
+ * WMI_EVENT_ID=WMI_EXTENSION_EVENTID.
+ */
+struct wmix_cmd_hdr {
+ __le32 cmd_id;
+} __packed;
+
+enum wmix_command_id {
+ WMIX_DSETOPEN_REPLY_CMDID = 0x2001,
+ WMIX_DSETDATA_REPLY_CMDID,
+ WMIX_GPIO_OUTPUT_SET_CMDID,
+ WMIX_GPIO_INPUT_GET_CMDID,
+ WMIX_GPIO_REGISTER_SET_CMDID,
+ WMIX_GPIO_REGISTER_GET_CMDID,
+ WMIX_GPIO_INTR_ACK_CMDID,
+ WMIX_HB_CHALLENGE_RESP_CMDID,
+ WMIX_DBGLOG_CFG_MODULE_CMDID,
+ WMIX_PROF_CFG_CMDID, /* 0x200a */
+ WMIX_PROF_ADDR_SET_CMDID,
+ WMIX_PROF_START_CMDID,
+ WMIX_PROF_STOP_CMDID,
+ WMIX_PROF_COUNT_GET_CMDID,
+};
+
+enum wmix_event_id {
+ WMIX_DSETOPENREQ_EVENTID = 0x3001,
+ WMIX_DSETCLOSE_EVENTID,
+ WMIX_DSETDATAREQ_EVENTID,
+ WMIX_GPIO_INTR_EVENTID,
+ WMIX_GPIO_DATA_EVENTID,
+ WMIX_GPIO_ACK_EVENTID,
+ WMIX_HB_CHALLENGE_RESP_EVENTID,
+ WMIX_DBGLOG_EVENTID,
+ WMIX_PROF_COUNT_EVENTID,
+};
+
+/*
+ * ------Error Detection support-------
+ */
+
+/*
+ * WMIX_HB_CHALLENGE_RESP_CMDID
+ * Heartbeat Challenge Response command
+ */
+struct wmix_hb_challenge_resp_cmd {
+ __le32 cookie;
+ __le32 source;
+} __packed;
+
+/* End of Extended WMI (WMIX) */
+
+enum wmi_sync_flag {
+ NO_SYNC_WMIFLAG = 0,
+
+ /* transmit all queued data before cmd */
+ SYNC_BEFORE_WMIFLAG,
+
+ /* any new data waits until cmd execs */
+ SYNC_AFTER_WMIFLAG,
+
+ SYNC_BOTH_WMIFLAG,
+
+ /* end marker */
+ END_WMIFLAG
+};
+
+enum htc_endpoint_id ath6kl_wmi_get_control_ep(struct wmi *wmi);
+void ath6kl_wmi_set_control_ep(struct wmi *wmi, enum htc_endpoint_id ep_id);
+int ath6kl_wmi_dix_2_dot3(struct wmi *wmi, struct sk_buff *skb);
+int ath6kl_wmi_data_hdr_add(struct wmi *wmi, struct sk_buff *skb,
+ u8 msg_type, bool more_data,
+ enum wmi_data_hdr_data_type data_type,
+ u8 meta_ver, void *tx_meta_info);
+
+int ath6kl_wmi_dot11_hdr_remove(struct wmi *wmi, struct sk_buff *skb);
+int ath6kl_wmi_dot3_2_dix(struct sk_buff *skb);
+int ath6kl_wmi_data_hdr_remove(struct wmi *wmi, struct sk_buff *skb);
+int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, struct sk_buff *skb,
+ u32 layer2_priority, bool wmm_enabled,
+ u8 *ac);
+
+int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb);
+void ath6kl_wmi_iterate_nodes(struct wmi *wmi,
+ void (*f) (void *arg, struct bss *),
+ void *arg);
+struct bss *ath6kl_wmi_find_node(struct wmi *wmi, const u8 *mac_addr);
+void ath6kl_wmi_node_free(struct wmi *wmi, const u8 *mac_addr);
+
+int ath6kl_wmi_cmd_send(struct wmi *wmi, struct sk_buff *skb,
+ enum wmi_cmd_id cmd_id, enum wmi_sync_flag sync_flag);
+
+int ath6kl_wmi_connect_cmd(struct wmi *wmi, enum network_type nw_type,
+ enum dot11_auth_mode dot11_auth_mode,
+ enum auth_mode auth_mode,
+ enum crypto_type pairwise_crypto,
+ u8 pairwise_crypto_len,
+ enum crypto_type group_crypto,
+ u8 group_crypto_len, int ssid_len, u8 *ssid,
+ u8 *bssid, u16 channel, u32 ctrl_flags);
+
+int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 *bssid, u16 channel);
+int ath6kl_wmi_disconnect_cmd(struct wmi *wmi);
+int ath6kl_wmi_startscan_cmd(struct wmi *wmi, enum wmi_scan_type scan_type,
+ u32 force_fgscan, u32 is_legacy,
+ u32 home_dwell_time, u32 force_scan_interval,
+ s8 num_chan, u16 *ch_list);
+int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u16 fg_start_sec,
+ u16 fg_end_sec, u16 bg_sec,
+ u16 minact_chdw_msec, u16 maxact_chdw_msec,
+ u16 pas_chdw_msec, u8 short_scan_ratio,
+ u8 scan_ctrl_flag, u32 max_dfsch_act_time,
+ u16 maxact_scan_per_ssid);
+int ath6kl_wmi_bssfilter_cmd(struct wmi *wmi, u8 filter, u32 ie_mask);
+int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 index, u8 flag,
+ u8 ssid_len, u8 *ssid);
+int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u16 listen_interval,
+ u16 listen_beacons);
+int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 pwr_mode);
+int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u16 idle_period,
+ u16 ps_poll_num, u16 dtim_policy,
+ u16 tx_wakup_policy, u16 num_tx_to_wakeup,
+ u16 ps_fail_event_policy);
+int ath6kl_wmi_disctimeout_cmd(struct wmi *wmi, u8 timeout);
+int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi,
+ struct wmi_create_pstream_cmd *pstream);
+int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 traffic_class, u8 tsid);
+
+int ath6kl_wmi_set_rts_cmd(struct wmi *wmi, u16 threshold);
+int ath6kl_wmi_set_lpreamble_cmd(struct wmi *wmi, u8 status,
+ u8 preamble_policy);
+
+int ath6kl_wmi_get_challenge_resp_cmd(struct wmi *wmi, u32 cookie, u32 source);
+
+int ath6kl_wmi_get_stats_cmd(struct wmi *wmi);
+int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 key_index,
+ enum crypto_type key_type,
+ u8 key_usage, u8 key_len,
+ u8 *key_rsc, u8 *key_material,
+ u8 key_op_ctrl, u8 *mac_addr,
+ enum wmi_sync_flag sync_flag);
+int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 *krk);
+int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 key_index);
+int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, const u8 *bssid,
+ const u8 *pmkid, bool set);
+int ath6kl_wmi_set_tx_pwr_cmd(struct wmi *wmi, u8 dbM);
+int ath6kl_wmi_get_tx_pwr_cmd(struct wmi *wmi);
+void ath6kl_wmi_get_current_bssid(struct wmi *wmi, u8 *bssid);
+
+int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, enum wmi_txop_cfg cfg);
+int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 keep_alive_intvl);
+
+s32 ath6kl_wmi_get_rate(s8 rate_index);
+
+int ath6kl_wmi_set_ip_cmd(struct wmi *wmi, struct wmi_set_ip_cmd *ip_cmd);
+
+struct bss *ath6kl_wmi_find_ssid_node(struct wmi *wmi, u8 *ssid,
+ u32 ssid_len, bool is_wpa2,
+ bool match_ssid);
+
+void ath6kl_wmi_node_return(struct wmi *wmi, struct bss *bss);
+
+/* AP mode */
+int ath6kl_wmi_set_pvb_cmd(struct wmi *wmi, u16 aid, bool flag);
+
+int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 rx_meta_version,
+ bool rx_dot11_hdr, bool defrag_on_host);
+
+void *ath6kl_wmi_init(void *devt);
+void ath6kl_wmi_shutdown(struct wmi *wmi);
+
+#endif /* WMI_H */
OpenPOWER on IntegriCloud