summaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/intel/iwlwifi/pcie
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/intel/iwlwifi/pcie')
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c117
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c56
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c701
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h85
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c229
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c662
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c332
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c258
9 files changed, 1333 insertions, 1127 deletions
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
index 5e86783d616b..01f248ba8fec 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -55,6 +55,66 @@
#include "internal.h"
#include "iwl-prph.h"
+static void
+iwl_pcie_ctxt_info_dbg_enable(struct iwl_trans *trans,
+ struct iwl_prph_scratch_hwm_cfg *dbg_cfg,
+ u32 *control_flags)
+{
+ enum iwl_fw_ini_allocation_id alloc_id = IWL_FW_INI_ALLOCATION_ID_DBGC1;
+ struct iwl_fw_ini_allocation_tlv *fw_mon_cfg;
+ u32 dbg_flags = 0;
+
+ if (!iwl_trans_dbg_ini_valid(trans)) {
+ struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
+
+ iwl_pcie_alloc_fw_monitor(trans, 0);
+
+ if (fw_mon->size) {
+ dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_DRAM;
+
+ IWL_DEBUG_FW(trans,
+ "WRT: Applying DRAM buffer destination\n");
+
+ dbg_cfg->hwm_base_addr = cpu_to_le64(fw_mon->physical);
+ dbg_cfg->hwm_size = cpu_to_le32(fw_mon->size);
+ }
+
+ goto out;
+ }
+
+ fw_mon_cfg = &trans->dbg.fw_mon_cfg[alloc_id];
+
+ if (le32_to_cpu(fw_mon_cfg->buf_location) ==
+ IWL_FW_INI_LOCATION_SRAM_PATH) {
+ dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL;
+
+ IWL_DEBUG_FW(trans,
+ "WRT: Applying SMEM buffer destination\n");
+
+ goto out;
+ }
+
+ if (le32_to_cpu(fw_mon_cfg->buf_location) ==
+ IWL_FW_INI_LOCATION_DRAM_PATH &&
+ trans->dbg.fw_mon_ini[alloc_id].num_frags) {
+ struct iwl_dram_data *frag =
+ &trans->dbg.fw_mon_ini[alloc_id].frags[0];
+
+ dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_DRAM;
+
+ IWL_DEBUG_FW(trans,
+ "WRT: Applying DRAM destination (alloc_id=%u)\n",
+ alloc_id);
+
+ dbg_cfg->hwm_base_addr = cpu_to_le64(frag->physical);
+ dbg_cfg->hwm_size = cpu_to_le32(frag->size);
+ }
+
+out:
+ if (dbg_flags)
+ *control_flags |= IWL_PRPH_SCRATCH_EARLY_DEBUG_EN | dbg_flags;
+}
+
int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
const struct fw_img *fw)
{
@@ -86,34 +146,21 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
control_flags = IWL_PRPH_SCRATCH_RB_SIZE_4K |
IWL_PRPH_SCRATCH_MTR_MODE |
(IWL_PRPH_MTR_FORMAT_256B &
- IWL_PRPH_SCRATCH_MTR_FORMAT) |
- IWL_PRPH_SCRATCH_EARLY_DEBUG_EN |
- IWL_PRPH_SCRATCH_EDBG_DEST_DRAM;
- prph_sc_ctrl->control.control_flags = cpu_to_le32(control_flags);
+ IWL_PRPH_SCRATCH_MTR_FORMAT);
/* initialize RX default queue */
prph_sc_ctrl->rbd_cfg.free_rbd_addr =
cpu_to_le64(trans_pcie->rxq->bd_dma);
- /* Configure debug, for integration */
- if (!trans->dbg.ini_valid)
- iwl_pcie_alloc_fw_monitor(trans, 0);
- if (trans->dbg.num_blocks) {
- prph_sc_ctrl->hwm_cfg.hwm_base_addr =
- cpu_to_le64(trans->dbg.fw_mon[0].physical);
- prph_sc_ctrl->hwm_cfg.hwm_size =
- cpu_to_le32(trans->dbg.fw_mon[0].size);
- }
+ iwl_pcie_ctxt_info_dbg_enable(trans, &prph_sc_ctrl->hwm_cfg,
+ &control_flags);
+ prph_sc_ctrl->control.control_flags = cpu_to_le32(control_flags);
/* allocate ucode sections in dram and set addresses */
ret = iwl_pcie_init_fw_sec(trans, fw, &prph_scratch->dram);
- if (ret) {
- dma_free_coherent(trans->dev,
- sizeof(*prph_scratch),
- prph_scratch,
- trans_pcie->prph_scratch_dma_addr);
- return ret;
- }
+ if (ret)
+ goto err_free_prph_scratch;
+
/* Allocate prph information
* currently we don't assign to the prph info anything, but it would get
@@ -121,16 +168,20 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
prph_info = dma_alloc_coherent(trans->dev, sizeof(*prph_info),
&trans_pcie->prph_info_dma_addr,
GFP_KERNEL);
- if (!prph_info)
- return -ENOMEM;
+ if (!prph_info) {
+ ret = -ENOMEM;
+ goto err_free_prph_scratch;
+ }
/* Allocate context info */
ctxt_info_gen3 = dma_alloc_coherent(trans->dev,
sizeof(*ctxt_info_gen3),
&trans_pcie->ctxt_info_dma_addr,
GFP_KERNEL);
- if (!ctxt_info_gen3)
- return -ENOMEM;
+ if (!ctxt_info_gen3) {
+ ret = -ENOMEM;
+ goto err_free_prph_info;
+ }
ctxt_info_gen3->prph_info_base_addr =
cpu_to_le64(trans_pcie->prph_info_dma_addr);
@@ -155,7 +206,7 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
ctxt_info_gen3->mtr_size =
cpu_to_le16(TFD_QUEUE_CB_SIZE(cmdq_size));
ctxt_info_gen3->mcr_size =
- cpu_to_le16(RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE));
+ cpu_to_le16(RX_QUEUE_CB_SIZE(trans->cfg->num_rbds));
trans_pcie->ctxt_info_gen3 = ctxt_info_gen3;
trans_pcie->prph_info = prph_info;
@@ -180,12 +231,26 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL,
CSR_AUTO_FUNC_BOOT_ENA);
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
iwl_write_umac_prph(trans, UREG_CPU_INIT_RUN, 1);
else
iwl_set_bit(trans, CSR_GP_CNTRL, CSR_AUTO_FUNC_INIT);
return 0;
+
+err_free_prph_info:
+ dma_free_coherent(trans->dev,
+ sizeof(*prph_info),
+ prph_info,
+ trans_pcie->prph_info_dma_addr);
+
+err_free_prph_scratch:
+ dma_free_coherent(trans->dev,
+ sizeof(*prph_scratch),
+ prph_scratch,
+ trans_pcie->prph_scratch_dma_addr);
+ return ret;
+
}
void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
index d38cefbb779e..acd01d86f101 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
@@ -57,6 +57,42 @@
#include "internal.h"
#include "iwl-prph.h"
+static void *_iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans,
+ size_t size,
+ dma_addr_t *phys,
+ int depth)
+{
+ void *result;
+
+ if (WARN(depth > 2,
+ "failed to allocate DMA memory not crossing 2^32 boundary"))
+ return NULL;
+
+ result = dma_alloc_coherent(trans->dev, size, phys, GFP_KERNEL);
+
+ if (!result)
+ return NULL;
+
+ if (unlikely(iwl_pcie_crosses_4g_boundary(*phys, size))) {
+ void *old = result;
+ dma_addr_t oldphys = *phys;
+
+ result = _iwl_pcie_ctxt_info_dma_alloc_coherent(trans, size,
+ phys,
+ depth + 1);
+ dma_free_coherent(trans->dev, size, old, oldphys);
+ }
+
+ return result;
+}
+
+static void *iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans,
+ size_t size,
+ dma_addr_t *phys)
+{
+ return _iwl_pcie_ctxt_info_dma_alloc_coherent(trans, size, phys, 0);
+}
+
void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans)
{
struct iwl_self_init_dram *dram = &trans->init_dram;
@@ -161,14 +197,17 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
struct iwl_context_info *ctxt_info;
struct iwl_context_info_rbd_cfg *rx_cfg;
u32 control_flags = 0, rb_size;
+ dma_addr_t phys;
int ret;
- ctxt_info = dma_alloc_coherent(trans->dev, sizeof(*ctxt_info),
- &trans_pcie->ctxt_info_dma_addr,
- GFP_KERNEL);
+ ctxt_info = iwl_pcie_ctxt_info_dma_alloc_coherent(trans,
+ sizeof(*ctxt_info),
+ &phys);
if (!ctxt_info)
return -ENOMEM;
+ trans_pcie->ctxt_info_dma_addr = phys;
+
ctxt_info->version.version = 0;
ctxt_info->version.mac_id =
cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV));
@@ -193,11 +232,12 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
rb_size = IWL_CTXT_INFO_RB_SIZE_4K;
}
- BUILD_BUG_ON(RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) > 0xF);
- control_flags = IWL_CTXT_INFO_TFD_FORMAT_LONG |
- (RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) <<
- IWL_CTXT_INFO_RB_CB_SIZE_POS) |
- (rb_size << IWL_CTXT_INFO_RB_SIZE_POS);
+ WARN_ON(RX_QUEUE_CB_SIZE(trans->cfg->num_rbds) > 12);
+ control_flags = IWL_CTXT_INFO_TFD_FORMAT_LONG;
+ control_flags |=
+ u32_encode_bits(RX_QUEUE_CB_SIZE(trans->cfg->num_rbds),
+ IWL_CTXT_INFO_RB_CB_SIZE);
+ control_flags |= u32_encode_bits(rb_size, IWL_CTXT_INFO_RB_SIZE);
ctxt_info->control.control_flags = cpu_to_le32(control_flags);
/* initialize RX default queue */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index de711c1160d3..97f227f3cbc3 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -65,7 +65,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
-#include <linux/pm_runtime.h>
#include <linux/pci.h>
#include <linux/acpi.h>
@@ -73,6 +72,7 @@
#include "iwl-trans.h"
#include "iwl-drv.h"
+#include "iwl-prph.h"
#include "internal.h"
#define IWL_PCI_DEVICE(dev, subdev, cfg) \
@@ -513,31 +513,33 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)},
/* 9000 Series */
- {IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x0034, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x0060, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x0064, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x00A0, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x00A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x0230, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x0234, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x0238, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x023C, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x0260, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x0264, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x02A0, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x02A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x1551, iwl9560_killer_s_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x1552, iwl9560_killer_i_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x40A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x4234, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x02F0, 0x42A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x02F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+
{IWL_PCI_DEVICE(0x06F0, 0x0030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x0034, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x0038, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
@@ -563,28 +565,22 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x06F0, 0x40A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x4234, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
{IWL_PCI_DEVICE(0x06F0, 0x42A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x0018, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x001C, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x0030, iwl9560_2ac_160_cfg)},
+
{IWL_PCI_DEVICE(0x2526, 0x0034, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x0038, iwl9560_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x003C, iwl9560_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x0060, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x0064, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x00A0, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x00A4, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0060, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2526, 0x0064, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2526, 0x00A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2526, 0x00A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x0210, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0214, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0230, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0234, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0238, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x023C, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x0260, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0260, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x0264, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x02A0, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x02A4, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x02A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2526, 0x02A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x1010, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x1030, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x1210, iwl9260_2ac_cfg)},
@@ -596,53 +592,44 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x2030, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x2034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x4018, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x401C, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x4034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x4234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2526, 0x6010, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x6014, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x8014, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x8010, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0xE010, iwl9260_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0xE014, iwl9260_2ac_160_cfg)},
+ {IWL_PCI_DEVICE(0x2526, PCI_ANY_ID, iwl9000_trans_cfg)},
+
{IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
{IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)},
{IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)},
{IWL_PCI_DEVICE(0x271B, 0x0214, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x271C, 0x0214, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2720, 0x0060, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2720, 0x0064, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2720, 0x00A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2720, 0x00A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2720, 0x0230, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2720, 0x0234, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2720, 0x0238, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2720, 0x023C, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2720, 0x0260, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2720, 0x0264, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2720, 0x02A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2720, 0x02A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2720, 0x1010, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2720, 0x1030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2720, 0x1210, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2720, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2720, 0x1552, iwl9560_killer_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2720, 0x2030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x2720, 0x2034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_160_cfg)},
- {IWL_PCI_DEVICE(0x2720, 0x4034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x2720, 0x40A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2720, 0x4234, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x2720, 0x42A4, iwl9462_2ac_cfg_soc)},
+
+ {IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+
{IWL_PCI_DEVICE(0x30DC, 0x0030, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x0034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x0038, iwl9560_2ac_160_cfg_soc)},
@@ -671,6 +658,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x30DC, 0x40A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x4234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x30DC, 0x42A4, iwl9462_2ac_cfg_soc)},
+
{IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_160_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg_shared_clk)},
{IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_160_cfg_shared_clk)},
@@ -726,62 +714,60 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x34F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0038, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x003C, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0060, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0064, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x00A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x00A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0230, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0234, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0238, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x023C, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0260, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x0264, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x02A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x02A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x1010, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x3DF0, 0x1030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x1210, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x3DF0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x2030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x2034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x4030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x4034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x40A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x4234, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x3DF0, 0x42A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x0030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x0034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x0038, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x003C, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x0060, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x0064, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x00A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x00A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x0230, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x0234, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x0238, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x023C, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x0260, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x0264, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x02A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x02A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x1010, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x43F0, 0x1030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x1210, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x43F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x2030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x2034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x4030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x4034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x40A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x4234, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0x43F0, 0x42A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x3DF0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+
+ {IWL_PCI_DEVICE(0x43F0, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+
{IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_160_cfg_soc)},
@@ -821,34 +807,34 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x9DF0, 0x40A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x4234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x9DF0, 0x42A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0034, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0038, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x003C, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0060, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0064, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x00A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x00A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0230, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0234, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0238, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x023C, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0260, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x0264, iwl9461_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x02A0, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x02A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x1010, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0xA0F0, 0x1030, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x1210, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0xA0F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x2030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x2034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x4030, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x4034, iwl9560_2ac_160_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x40A4, iwl9462_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x4234, iwl9560_2ac_cfg_soc)},
- {IWL_PCI_DEVICE(0xA0F0, 0x42A4, iwl9462_2ac_cfg_soc)},
+
+ {IWL_PCI_DEVICE(0xA0F0, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+
{IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_160_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x0034, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0xA370, 0x0038, iwl9560_2ac_160_cfg_soc)},
@@ -909,7 +895,6 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2720, 0x0074, iwl_ax201_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0078, iwl_ax201_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x2720, 0x007C, iwl_ax201_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x2720, 0x0090, iwl22000_2ac_cfg_hr_cdb)},
{IWL_PCI_DEVICE(0x2720, 0x0244, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0310, iwl_ax201_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0A10, iwl_ax201_cfg_qu_hr)},
@@ -986,23 +971,76 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
};
MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
+#define IWL_DEV_INFO(_device, _subdevice, _cfg, _name) \
+ { .device = (_device), .subdevice = (_subdevice), .cfg = &(_cfg), \
+ .name = _name }
+
+static const struct iwl_dev_info iwl_dev_info_table[] = {
+#if IS_ENABLED(CONFIG_IWLMVM)
+ IWL_DEV_INFO(0x2526, 0x0010, iwl9260_2ac_160_cfg, iwl9260_160_name),
+ IWL_DEV_INFO(0x2526, 0x0014, iwl9260_2ac_160_cfg, iwl9260_160_name),
+ IWL_DEV_INFO(0x2526, 0x0018, iwl9260_2ac_160_cfg, iwl9260_160_name),
+ IWL_DEV_INFO(0x2526, 0x001C, iwl9260_2ac_160_cfg, iwl9260_160_name),
+ IWL_DEV_INFO(0x2526, 0x6010, iwl9260_2ac_160_cfg, iwl9260_160_name),
+ IWL_DEV_INFO(0x2526, 0x6014, iwl9260_2ac_160_cfg, iwl9260_160_name),
+ IWL_DEV_INFO(0x2526, 0x8014, iwl9260_2ac_160_cfg, iwl9260_160_name),
+ IWL_DEV_INFO(0x2526, 0x8010, iwl9260_2ac_160_cfg, iwl9260_160_name),
+ IWL_DEV_INFO(0x2526, 0xA014, iwl9260_2ac_160_cfg, iwl9260_160_name),
+ IWL_DEV_INFO(0x2526, 0xE010, iwl9260_2ac_160_cfg, iwl9260_160_name),
+ IWL_DEV_INFO(0x2526, 0xE014, iwl9260_2ac_160_cfg, iwl9260_160_name),
+
+ IWL_DEV_INFO(0x2526, 0x0030, iwl9560_2ac_160_cfg, iwl9560_160_name),
+ IWL_DEV_INFO(0x2526, 0x0038, iwl9560_2ac_160_cfg, iwl9560_160_name),
+ IWL_DEV_INFO(0x2526, 0x003C, iwl9560_2ac_160_cfg, iwl9560_160_name),
+ IWL_DEV_INFO(0x2526, 0x4030, iwl9560_2ac_160_cfg, iwl9560_160_name),
+#endif /* CONFIG_IWLMVM */
+};
+
/* PCI registers */
#define PCI_CFG_RETRY_TIMEOUT 0x041
static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
+ const struct iwl_cfg_trans_params *trans =
+ (struct iwl_cfg_trans_params *)(ent->driver_data);
const struct iwl_cfg *cfg_7265d __maybe_unused = NULL;
struct iwl_trans *iwl_trans;
- int ret;
+ struct iwl_trans_pcie *trans_pcie;
+ unsigned long flags;
+ int i, ret;
+ /*
+ * This is needed for backwards compatibility with the old
+ * tables, so we don't need to change all the config structs
+ * at the same time. The cfg is used to compare with the old
+ * full cfg structs.
+ */
+ const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
- if (WARN_ONCE(!cfg->csr, "CSR addresses aren't configured\n"))
- return -EINVAL;
+ /* make sure trans is the first element in iwl_cfg */
+ BUILD_BUG_ON(offsetof(struct iwl_cfg, trans));
- iwl_trans = iwl_trans_pcie_alloc(pdev, ent, cfg);
+ iwl_trans = iwl_trans_pcie_alloc(pdev, ent, trans);
if (IS_ERR(iwl_trans))
return PTR_ERR(iwl_trans);
+ trans_pcie = IWL_TRANS_GET_PCIE_TRANS(iwl_trans);
+
+ /* the trans_cfg should never change, so set it now */
+ iwl_trans->trans_cfg = trans;
+
+ for (i = 0; i < ARRAY_SIZE(iwl_dev_info_table); i++) {
+ const struct iwl_dev_info *dev_info = &iwl_dev_info_table[i];
+
+ if ((dev_info->device == IWL_CFG_ANY ||
+ dev_info->device == pdev->device) &&
+ (dev_info->subdevice == IWL_CFG_ANY ||
+ dev_info->subdevice == pdev->subsystem_device)) {
+ iwl_trans->cfg = dev_info->cfg;
+ iwl_trans->name = dev_info->name;
+ goto found;
+ }
+ }
+
#if IS_ENABLED(CONFIG_IWLMVM)
/*
* special-case 7265D, it has the same PCI IDs.
@@ -1018,29 +1056,62 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
else if (cfg == &iwl7265_n_cfg)
cfg_7265d = &iwl7265d_n_cfg;
if (cfg_7265d &&
- (iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D) {
- cfg = cfg_7265d;
+ (iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D)
iwl_trans->cfg = cfg_7265d;
- }
- if (iwl_trans->cfg->rf_id && cfg == &iwl22000_2ac_cfg_hr_cdb &&
- iwl_trans->hw_rev != CSR_HW_REV_TYPE_HR_CDB) {
- u32 rf_id_chp = CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id);
- u32 jf_chp_id = CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF);
- u32 hr_chp_id = CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR);
-
- if (rf_id_chp == jf_chp_id) {
- if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ)
- cfg = &iwl9560_2ac_cfg_qnj_jf_b0;
- else
- cfg = &iwl22000_2ac_cfg_jf;
- } else if (rf_id_chp == hr_chp_id) {
- if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ)
- cfg = &iwl22000_2ax_cfg_qnj_hr_a0;
- else
- cfg = &iwl22000_2ac_cfg_hr;
+ iwl_trans->hw_rf_id = iwl_read32(iwl_trans, CSR_HW_RF_ID);
+
+ if (cfg == &iwlax210_2ax_cfg_so_hr_a0) {
+ if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_TY) {
+ iwl_trans->cfg = &iwlax210_2ax_cfg_ty_gf_a0;
+ } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
+ CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF)) {
+ iwl_trans->cfg = &iwlax210_2ax_cfg_so_jf_a0;
+ } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
+ CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF)) {
+ iwl_trans->cfg = &iwlax211_2ax_cfg_so_gf_a0;
+ } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
+ CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF4)) {
+ iwl_trans->cfg = &iwlax411_2ax_cfg_so_gf4_a0;
}
- iwl_trans->cfg = cfg;
+ } else if (cfg == &iwl_ax101_cfg_qu_hr) {
+ if ((CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
+ CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
+ iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) ||
+ (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
+ CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR1))) {
+ iwl_trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0;
+ } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
+ CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
+ iwl_trans->hw_rev == CSR_HW_REV_TYPE_QUZ) {
+ iwl_trans->cfg = &iwl_ax101_cfg_quz_hr;
+ } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
+ CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) {
+ iwl_trans->cfg = &iwl_ax101_cfg_qu_hr;
+ } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
+ CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF)) {
+ iwl_trans->cfg = &iwl22000_2ax_cfg_jf;
+ } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
+ CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HRCDB)) {
+ IWL_ERR(iwl_trans, "RF ID HRCDB is not supported\n");
+ return -EINVAL;
+ } else {
+ IWL_ERR(iwl_trans, "Unrecognized RF ID 0x%08x\n",
+ CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id));
+ return -EINVAL;
+ }
+ } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
+ CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
+ iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) {
+ u32 hw_status;
+
+ hw_status = iwl_read_prph(iwl_trans, UMAG_GEN_HW_STATUS);
+ if (CSR_HW_RF_STEP(iwl_trans->hw_rf_id) == SILICON_B_STEP)
+ iwl_trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0;
+ else if ((hw_status & UMAG_GEN_HW_IS_FPGA) &&
+ CSR_HW_RF_STEP(iwl_trans->hw_rf_id) ==
+ SILICON_A_STEP)
+ iwl_trans->cfg = &iwl22000_2ax_cfg_qnj_hr_a0_f0;
}
/*
@@ -1050,20 +1121,77 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* thing to do to support Qu C-step.
*/
if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QU_C0) {
- if (iwl_trans->cfg == &iwl_ax101_cfg_qu_hr)
+ if (cfg == &iwl_ax101_cfg_qu_hr)
iwl_trans->cfg = &iwl_ax101_cfg_qu_c0_hr_b0;
- else if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr)
+ else if (cfg == &iwl_ax201_cfg_qu_hr)
iwl_trans->cfg = &iwl_ax201_cfg_qu_c0_hr_b0;
- else if (iwl_trans->cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0)
+ else if (cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0)
iwl_trans->cfg = &iwl9461_2ac_cfg_qu_c0_jf_b0;
- else if (iwl_trans->cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0)
+ else if (cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0)
iwl_trans->cfg = &iwl9462_2ac_cfg_qu_c0_jf_b0;
- else if (iwl_trans->cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0)
+ else if (cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0)
iwl_trans->cfg = &iwl9560_2ac_cfg_qu_c0_jf_b0;
- else if (iwl_trans->cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0)
+ else if (cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0)
iwl_trans->cfg = &iwl9560_2ac_160_cfg_qu_c0_jf_b0;
+ else if (cfg == &killer1650s_2ax_cfg_qu_b0_hr_b0)
+ iwl_trans->cfg = &killer1650s_2ax_cfg_qu_c0_hr_b0;
+ else if (cfg == &killer1650i_2ax_cfg_qu_b0_hr_b0)
+ iwl_trans->cfg = &killer1650i_2ax_cfg_qu_c0_hr_b0;
+ }
+
+ /* same thing for QuZ... */
+ if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QUZ) {
+ if (cfg == &iwl_ax101_cfg_qu_hr)
+ cfg = &iwl_ax101_cfg_quz_hr;
+ else if (cfg == &iwl_ax201_cfg_qu_hr)
+ cfg = &iwl_ax201_cfg_quz_hr;
+ else if (cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0)
+ cfg = &iwl9461_2ac_cfg_quz_a0_jf_b0_soc;
+ else if (cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0)
+ cfg = &iwl9462_2ac_cfg_quz_a0_jf_b0_soc;
+ else if (cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0)
+ cfg = &iwl9560_2ac_cfg_quz_a0_jf_b0_soc;
+ else if (cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0)
+ cfg = &iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc;
}
+
#endif
+ /*
+ * If we didn't set the cfg yet, assume the trans is actually
+ * a full cfg from the old tables.
+ */
+ if (!iwl_trans->cfg)
+ iwl_trans->cfg = cfg;
+
+found:
+ /* if we don't have a name yet, copy name from the old cfg */
+ if (!iwl_trans->name)
+ iwl_trans->name = iwl_trans->cfg->name;
+
+ if (iwl_trans->trans_cfg->mq_rx_supported) {
+ if (WARN_ON(!iwl_trans->cfg->num_rbds)) {
+ ret = -EINVAL;
+ goto out_free_trans;
+ }
+ trans_pcie->num_rx_bufs = iwl_trans->cfg->num_rbds;
+ } else {
+ trans_pcie->num_rx_bufs = RX_QUEUE_SIZE;
+ }
+
+ if (iwl_trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000 &&
+ iwl_trans_grab_nic_access(iwl_trans, &flags)) {
+ u32 hw_step;
+
+ hw_step = iwl_read_umac_prph_no_grab(iwl_trans, WFPM_CTRL_REG);
+ hw_step |= ENABLE_WFPM;
+ iwl_write_umac_prph_no_grab(iwl_trans, WFPM_CTRL_REG, hw_step);
+ hw_step = iwl_read_prph_no_grab(iwl_trans, CNVI_AUX_MISC_CHIP);
+ hw_step = (hw_step >> HW_STEP_LOCATION_BITS) & 0xF;
+ if (hw_step == 0x3)
+ iwl_trans->hw_rev = (iwl_trans->hw_rev & 0xFFFFFFF3) |
+ (SILICON_C_STEP << 2);
+ iwl_trans_release_nic_access(iwl_trans, &flags);
+ }
pci_set_drvdata(pdev, iwl_trans);
iwl_trans->drv = iwl_drv_start(iwl_trans);
@@ -1076,25 +1204,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* register transport layer debugfs here */
iwl_trans_pcie_dbgfs_register(iwl_trans);
- /* if RTPM is in use, enable it in our device */
- if (iwl_trans->runtime_pm_mode != IWL_PLAT_PM_MODE_DISABLED) {
- /* We explicitly set the device to active here to
- * clear contingent errors.
- */
- pm_runtime_set_active(&pdev->dev);
-
- pm_runtime_set_autosuspend_delay(&pdev->dev,
- iwlwifi_mod_params.d0i3_timeout);
- pm_runtime_use_autosuspend(&pdev->dev);
-
- /* We are not supposed to call pm_runtime_allow() by
- * ourselves, but let userspace enable runtime PM via
- * sysfs. However, since we don't enable this from
- * userspace yet, we need to allow/forbid() ourselves.
- */
- pm_runtime_allow(&pdev->dev);
- }
-
/* The PCI device starts with a reference taken and we are
* supposed to release it here. But to simplify the
* interaction with the opmode, we don't do it now, but let
@@ -1112,15 +1221,6 @@ static void iwl_pci_remove(struct pci_dev *pdev)
{
struct iwl_trans *trans = pci_get_drvdata(pdev);
- /* if RTPM was in use, restore it to the state before probe */
- if (trans->runtime_pm_mode != IWL_PLAT_PM_MODE_DISABLED) {
- /* We should not call forbid here, but we do for now.
- * Check the comment to pm_runtime_allow() in
- * iwl_pci_probe().
- */
- pm_runtime_forbid(trans->dev);
- }
-
iwl_drv_stop(trans->drv);
iwl_trans_pcie_free(trans);
@@ -1178,164 +1278,9 @@ static int iwl_pci_resume(struct device *device)
return 0;
}
-int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int ret;
-
- if (test_bit(STATUS_FW_ERROR, &trans->status))
- return 0;
-
- set_bit(STATUS_TRANS_GOING_IDLE, &trans->status);
-
- /* config the fw */
- ret = iwl_op_mode_enter_d0i3(trans->op_mode);
- if (ret == 1) {
- IWL_DEBUG_RPM(trans, "aborting d0i3 entrance\n");
- clear_bit(STATUS_TRANS_GOING_IDLE, &trans->status);
- return -EBUSY;
- }
- if (ret)
- goto err;
-
- ret = wait_event_timeout(trans_pcie->d0i3_waitq,
- test_bit(STATUS_TRANS_IDLE, &trans->status),
- msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT));
- if (!ret) {
- IWL_ERR(trans, "Timeout entering D0i3\n");
- ret = -ETIMEDOUT;
- goto err;
- }
-
- clear_bit(STATUS_TRANS_GOING_IDLE, &trans->status);
-
- return 0;
-err:
- clear_bit(STATUS_TRANS_GOING_IDLE, &trans->status);
- iwl_trans_fw_error(trans);
- return ret;
-}
-
-int iwl_pci_fw_exit_d0i3(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int ret;
-
- /* sometimes a D0i3 entry is not followed through */
- if (!test_bit(STATUS_TRANS_IDLE, &trans->status))
- return 0;
-
- /* config the fw */
- ret = iwl_op_mode_exit_d0i3(trans->op_mode);
- if (ret)
- goto err;
-
- /* we clear STATUS_TRANS_IDLE only when D0I3_END command is completed */
-
- ret = wait_event_timeout(trans_pcie->d0i3_waitq,
- !test_bit(STATUS_TRANS_IDLE, &trans->status),
- msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT));
- if (!ret) {
- IWL_ERR(trans, "Timeout exiting D0i3\n");
- ret = -ETIMEDOUT;
- goto err;
- }
-
- return 0;
-err:
- clear_bit(STATUS_TRANS_IDLE, &trans->status);
- iwl_trans_fw_error(trans);
- return ret;
-}
-
-#ifdef CONFIG_IWLWIFI_PCIE_RTPM
-static int iwl_pci_runtime_suspend(struct device *device)
-{
- struct pci_dev *pdev = to_pci_dev(device);
- struct iwl_trans *trans = pci_get_drvdata(pdev);
- int ret;
-
- IWL_DEBUG_RPM(trans, "entering runtime suspend\n");
-
- if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
- ret = iwl_pci_fw_enter_d0i3(trans);
- if (ret < 0)
- return ret;
- }
-
- trans->system_pm_mode = IWL_PLAT_PM_MODE_D0I3;
-
- iwl_trans_d3_suspend(trans, false, false);
-
- return 0;
-}
-
-static int iwl_pci_runtime_resume(struct device *device)
-{
- struct pci_dev *pdev = to_pci_dev(device);
- struct iwl_trans *trans = pci_get_drvdata(pdev);
- enum iwl_d3_status d3_status;
-
- IWL_DEBUG_RPM(trans, "exiting runtime suspend (resume)\n");
-
- iwl_trans_d3_resume(trans, &d3_status, false, false);
-
- if (test_bit(STATUS_DEVICE_ENABLED, &trans->status))
- return iwl_pci_fw_exit_d0i3(trans);
-
- return 0;
-}
-
-static int iwl_pci_system_prepare(struct device *device)
-{
- struct pci_dev *pdev = to_pci_dev(device);
- struct iwl_trans *trans = pci_get_drvdata(pdev);
-
- IWL_DEBUG_RPM(trans, "preparing for system suspend\n");
-
- /* This is called before entering system suspend and before
- * the runtime resume is called. Set the suspending flag to
- * prevent the wakelock from being taken.
- */
- trans->suspending = true;
-
- /* Wake the device up from runtime suspend before going to
- * platform suspend. This is needed because we don't know
- * whether wowlan any is set and, if it's not, mac80211 will
- * disconnect (in which case, we can't be in D0i3).
- */
- pm_runtime_resume(device);
-
- return 0;
-}
-
-static void iwl_pci_system_complete(struct device *device)
-{
- struct pci_dev *pdev = to_pci_dev(device);
- struct iwl_trans *trans = pci_get_drvdata(pdev);
-
- IWL_DEBUG_RPM(trans, "completing system suspend\n");
-
- /* This is called as a counterpart to the prepare op. It is
- * called either when suspending fails or when suspend
- * completed successfully. Now there's no risk of grabbing
- * the wakelock anymore, so we can release the suspending
- * flag.
- */
- trans->suspending = false;
-}
-#endif /* CONFIG_IWLWIFI_PCIE_RTPM */
-
static const struct dev_pm_ops iwl_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(iwl_pci_suspend,
iwl_pci_resume)
-#ifdef CONFIG_IWLWIFI_PCIE_RTPM
- SET_RUNTIME_PM_OPS(iwl_pci_runtime_suspend,
- iwl_pci_runtime_resume,
- NULL)
- .prepare = iwl_pci_system_prepare,
- .complete = iwl_pci_system_complete,
-#endif /* CONFIG_IWLWIFI_PCIE_RTPM */
};
#define IWL_PM_OPS (&iwl_dev_pm_ops)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index 9f5d0fc839fe..72f144c3a46e 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -106,6 +106,8 @@ struct iwl_host_cmd;
* @page: driver's pointer to the rxb page
* @invalid: rxb is in driver ownership - not owned by HW
* @vid: index of this rxb in the global table
+ * @offset: indicates which offset of the page (in bytes)
+ * this buffer uses (if multiple RBs fit into one page)
*/
struct iwl_rx_mem_buffer {
dma_addr_t page_dma;
@@ -113,6 +115,7 @@ struct iwl_rx_mem_buffer {
u16 vid;
bool invalid;
struct list_head list;
+ u32 offset;
};
/**
@@ -166,7 +169,7 @@ struct iwl_rx_completion_desc {
* @id: queue index
* @bd: driver's pointer to buffer of receive buffer descriptors (rbd).
* Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices.
- * In 22560 devices it is a pointer to a list of iwl_rx_transfer_desc's
+ * In AX210 devices it is a pointer to a list of iwl_rx_transfer_desc's
* @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
* @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd)
* @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd)
@@ -253,7 +256,8 @@ struct iwl_dma_ptr {
*/
static inline int iwl_queue_inc_wrap(struct iwl_trans *trans, int index)
{
- return ++index & (trans->cfg->base_params->max_tfd_queue_size - 1);
+ return ++index &
+ (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
}
/**
@@ -263,7 +267,7 @@ static inline int iwl_queue_inc_wrap(struct iwl_trans *trans, int index)
static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans,
struct iwl_rxq *rxq)
{
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
__le16 *rb_stts = rxq->rb_stts;
return READ_ONCE(*rb_stts);
@@ -280,7 +284,8 @@ static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans,
*/
static inline int iwl_queue_dec_wrap(struct iwl_trans *trans, int index)
{
- return --index & (trans->cfg->base_params->max_tfd_queue_size - 1);
+ return --index &
+ (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
}
struct iwl_cmd_meta {
@@ -303,7 +308,7 @@ struct iwl_cmd_meta {
#define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
struct iwl_pcie_txq_entry {
- struct iwl_device_cmd *cmd;
+ void *cmd;
struct sk_buff *skb;
/* buffer to free after command completes */
const void *free_buf;
@@ -489,6 +494,7 @@ struct cont_rec {
* @sw_csum_tx: if true, then the transport will compute the csum of the TXed
* frame.
* @rx_page_order: page order for receive buffer size
+ * @rx_buf_bytes: RX buffer (RB) size in bytes
* @reg_lock: protect hw register access
* @mutex: to protect stop_device / start_fw / start_hw
* @cmd_in_flight: true when we have a host command in flight
@@ -508,11 +514,16 @@ struct cont_rec {
* @in_rescan: true if we have triggered a device rescan
* @base_rb_stts: base virtual address of receive buffer status for all queues
* @base_rb_stts_dma: base physical address of receive buffer status
+ * @supported_dma_mask: DMA mask to validate the actual address against,
+ * will be DMA_BIT_MASK(11) or DMA_BIT_MASK(12) depending on the device
+ * @alloc_page_lock: spinlock for the page allocator
+ * @alloc_page: allocated page to still use parts of
+ * @alloc_page_used: how much of the allocated page was already used (bytes)
*/
struct iwl_trans_pcie {
struct iwl_rxq *rxq;
- struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE];
- struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE];
+ struct iwl_rx_mem_buffer *rx_pool;
+ struct iwl_rx_mem_buffer **global_table;
struct iwl_rb_allocator rba;
union {
struct iwl_context_info *ctxt_info;
@@ -556,9 +567,10 @@ struct iwl_trans_pcie {
void __iomem *hw_base;
bool ucode_write_complete;
+ bool sx_complete;
wait_queue_head_t ucode_write_waitq;
wait_queue_head_t wait_command_queue;
- wait_queue_head_t d0i3_waitq;
+ wait_queue_head_t sx_waitq;
u8 page_offs, dev_cmd_offs;
@@ -570,6 +582,7 @@ struct iwl_trans_pcie {
u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
u8 max_tbs;
u16 tfd_size;
+ u16 num_rx_bufs;
enum iwl_amsdu_size rx_buf_size;
bool bc_table_dword;
@@ -577,11 +590,17 @@ struct iwl_trans_pcie {
bool sw_csum_tx;
bool pcie_dbg_dumped_once;
u32 rx_page_order;
+ u32 rx_buf_bytes;
+ u32 supported_dma_mask;
+
+ /* allocator lock for the two values below */
+ spinlock_t alloc_page_lock;
+ struct page *alloc_page;
+ u32 alloc_page_used;
/*protect hw register */
spinlock_t reg_lock;
bool cmd_hold_nic_awake;
- bool ref_cmd_in_flight;
#ifdef CONFIG_IWLWIFI_DEBUGFS
struct cont_rec fw_mon_data;
@@ -635,15 +654,15 @@ iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
* Convention: trans API functions: iwl_trans_pcie_XXX
* Other functions: iwl_pcie_XXX
*/
-struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
- const struct pci_device_id *ent,
- const struct iwl_cfg *cfg);
+struct iwl_trans
+*iwl_trans_pcie_alloc(struct pci_dev *pdev,
+ const struct pci_device_id *ent,
+ const struct iwl_cfg_trans_params *cfg_trans);
void iwl_trans_pcie_free(struct iwl_trans *trans);
/*****************************************************
* RX
******************************************************/
-int _iwl_pcie_rx_init(struct iwl_trans *trans);
int iwl_pcie_rx_init(struct iwl_trans *trans);
int iwl_pcie_gen2_rx_init(struct iwl_trans *trans);
irqreturn_t iwl_pcie_msix_isr(int irq, void *data);
@@ -657,7 +676,6 @@ void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq);
int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget);
void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
struct iwl_rxq *rxq);
-int iwl_pcie_rx_alloc(struct iwl_trans *trans);
/*****************************************************
* ICT - interrupt handling
@@ -671,6 +689,16 @@ void iwl_pcie_disable_ict(struct iwl_trans *trans);
/*****************************************************
* TX / HCMD
******************************************************/
+/*
+ * We need this inline in case dma_addr_t is only 32-bits - since the
+ * hardware is always 64-bit, the issue can still occur in that case,
+ * so use u64 for 'phys' here to force the addition in 64-bit.
+ */
+static inline bool iwl_pcie_crosses_4g_boundary(u64 phys, u16 len)
+{
+ return upper_32_bits(phys) != upper_32_bits(phys + len);
+}
+
int iwl_pcie_tx_init(struct iwl_trans *trans);
int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id,
int queue_size);
@@ -687,7 +715,7 @@ void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans,
struct iwl_txq *txq);
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_device_cmd *dev_cmd, int txq_id);
+ struct iwl_device_tx_cmd *dev_cmd, int txq_id);
void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx);
@@ -697,15 +725,13 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
struct iwl_rx_cmd_buffer *rxb);
void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
struct sk_buff_head *skbs);
+void iwl_trans_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr);
void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
-void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
- struct iwl_txq *txq, u16 byte_cnt,
- int num_tbs);
static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_trans *trans, void *_tfd,
u8 idx)
{
- if (trans->cfg->use_tfh) {
+ if (trans->trans_cfg->use_tfh) {
struct iwl_tfh_tfd *tfd = _tfd;
struct iwl_tfh_tb *tb = &tfd->tbs[idx];
@@ -911,7 +937,7 @@ static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans,
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- if (trans->cfg->use_tfh)
+ if (trans->trans_cfg->use_tfh)
idx = iwl_pcie_get_cmd_index(txq, idx);
return txq->tfds + trans_pcie->tfd_size * idx;
@@ -955,7 +981,7 @@ static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
MSIX_HW_INT_CAUSES_REG_RF_KILL);
}
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_9000) {
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000) {
/*
* On 9000-series devices this bit isn't enabled by default, so
* when we power down the device we need set the bit to allow it
@@ -1045,7 +1071,7 @@ static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans)
{
- return (trans->dbg.dest_tlv || trans->dbg.ini_valid);
+ return (trans->dbg.dest_tlv || iwl_trans_dbg_ini_valid(trans));
}
void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
@@ -1058,9 +1084,6 @@ void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
static inline void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { }
#endif
-int iwl_pci_fw_exit_d0i3(struct iwl_trans *trans);
-int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans);
-
void iwl_pcie_rx_allocator_work(struct work_struct *data);
/* common functions that are used by gen2 transport */
@@ -1086,7 +1109,8 @@ void iwl_pcie_apply_destination(struct iwl_trans *trans);
void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
struct sk_buff *skb);
#ifdef CONFIG_INET
-struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len);
+struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
+ struct sk_buff *skb);
#endif
/* common functions that are used by gen3 transport */
@@ -1110,13 +1134,14 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
unsigned int timeout);
void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue);
int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_device_cmd *dev_cmd, int txq_id);
+ struct iwl_device_tx_cmd *dev_cmd, int txq_id);
int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd);
-void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans,
- bool low_power);
-void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power);
+void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
+void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id);
void iwl_pcie_gen2_tx_free(struct iwl_trans *trans);
void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans);
+void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
+ bool test, bool reset);
#endif /* __iwl_trans_int_pcie_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index a2d709642b2a..427fcea5cb2d 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -200,12 +200,12 @@ static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
*/
int iwl_pcie_rx_stop(struct iwl_trans *trans)
{
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
- /* TODO: remove this for 22560 once fw does it */
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ /* TODO: remove this once fw does it */
iwl_write_umac_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0);
return iwl_poll_umac_prph_bit(trans, RFH_GEN_STATUS_GEN3,
RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
- } else if (trans->cfg->mq_rx_supported) {
+ } else if (trans->trans_cfg->mq_rx_supported) {
iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0);
return iwl_poll_prph_bit(trans, RFH_GEN_STATUS,
RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
@@ -232,7 +232,7 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
* 1. shadow registers aren't enabled
* 2. there is a chance that the NIC is asleep
*/
- if (!trans->cfg->base_params->shadow_reg_enable &&
+ if (!trans->trans_cfg->base_params->shadow_reg_enable &&
test_bit(STATUS_TPOWER_PMI, &trans->status)) {
reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
@@ -240,18 +240,14 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n",
reg);
iwl_set_bit(trans, CSR_GP_CNTRL,
- BIT(trans->cfg->csr->flag_mac_access_req));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
rxq->need_update = true;
return;
}
}
rxq->write_actual = round_down(rxq->write, 8);
- if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22560)
- iwl_write32(trans, HBUS_TARG_WRPTR,
- (rxq->write_actual |
- ((FIRST_RX_QUEUE + rxq->id) << 16)));
- else if (trans->cfg->mq_rx_supported)
+ if (trans->trans_cfg->mq_rx_supported)
iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id),
rxq->write_actual);
else
@@ -279,7 +275,7 @@ static void iwl_pcie_restock_bd(struct iwl_trans *trans,
struct iwl_rxq *rxq,
struct iwl_rx_mem_buffer *rxb)
{
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
struct iwl_rx_transfer_desc *bd = rxq->bd;
BUILD_BUG_ON(sizeof(*bd) != 2 * sizeof(u64));
@@ -302,6 +298,7 @@ static void iwl_pcie_restock_bd(struct iwl_trans *trans,
static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
struct iwl_rxq *rxq)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rx_mem_buffer *rxb;
/*
@@ -322,11 +319,11 @@ static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
list);
list_del(&rxb->list);
rxb->invalid = false;
- /* 12 first bits are expected to be empty */
- WARN_ON(rxb->page_dma & DMA_BIT_MASK(12));
+ /* some low bits are expected to be unset (depending on hw) */
+ WARN_ON(rxb->page_dma & trans_pcie->supported_dma_mask);
/* Point to Rx buffer via next RBD in circular buffer */
iwl_pcie_restock_bd(trans, rxq, rxb);
- rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK;
+ rxq->write = (rxq->write + 1) & (rxq->queue_size - 1);
rxq->free_count--;
}
spin_unlock(&rxq->lock);
@@ -405,7 +402,7 @@ static void iwl_pcie_rxsq_restock(struct iwl_trans *trans,
static
void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq)
{
- if (trans->cfg->mq_rx_supported)
+ if (trans->trans_cfg->mq_rx_supported)
iwl_pcie_rxmq_restock(trans, rxq);
else
iwl_pcie_rxsq_restock(trans, rxq);
@@ -416,15 +413,34 @@ void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq)
*
*/
static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
- gfp_t priority)
+ u32 *offset, gfp_t priority)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ unsigned int rbsize = iwl_trans_get_rb_size(trans_pcie->rx_buf_size);
+ unsigned int allocsize = PAGE_SIZE << trans_pcie->rx_page_order;
struct page *page;
gfp_t gfp_mask = priority;
if (trans_pcie->rx_page_order > 0)
gfp_mask |= __GFP_COMP;
+ if (trans_pcie->alloc_page) {
+ spin_lock_bh(&trans_pcie->alloc_page_lock);
+ /* recheck */
+ if (trans_pcie->alloc_page) {
+ *offset = trans_pcie->alloc_page_used;
+ page = trans_pcie->alloc_page;
+ trans_pcie->alloc_page_used += rbsize;
+ if (trans_pcie->alloc_page_used >= allocsize)
+ trans_pcie->alloc_page = NULL;
+ else
+ get_page(page);
+ spin_unlock_bh(&trans_pcie->alloc_page_lock);
+ return page;
+ }
+ spin_unlock_bh(&trans_pcie->alloc_page_lock);
+ }
+
/* Alloc a new receive buffer */
page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
if (!page) {
@@ -440,6 +456,18 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
"Failed to alloc_pages\n");
return NULL;
}
+
+ if (2 * rbsize <= allocsize) {
+ spin_lock_bh(&trans_pcie->alloc_page_lock);
+ if (!trans_pcie->alloc_page) {
+ get_page(page);
+ trans_pcie->alloc_page = page;
+ trans_pcie->alloc_page_used = rbsize;
+ }
+ spin_unlock_bh(&trans_pcie->alloc_page_lock);
+ }
+
+ *offset = 0;
return page;
}
@@ -460,6 +488,8 @@ void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
struct page *page;
while (1) {
+ unsigned int offset;
+
spin_lock(&rxq->lock);
if (list_empty(&rxq->rx_used)) {
spin_unlock(&rxq->lock);
@@ -467,8 +497,7 @@ void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
}
spin_unlock(&rxq->lock);
- /* Alloc a new receive buffer */
- page = iwl_pcie_rx_alloc_page(trans, priority);
+ page = iwl_pcie_rx_alloc_page(trans, &offset, priority);
if (!page)
return;
@@ -486,10 +515,11 @@ void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
BUG_ON(rxb->page);
rxb->page = page;
+ rxb->offset = offset;
/* Get physical address of the RB */
rxb->page_dma =
- dma_map_page(trans->dev, page, 0,
- PAGE_SIZE << trans_pcie->rx_page_order,
+ dma_map_page(trans->dev, page, rxb->offset,
+ trans_pcie->rx_buf_bytes,
DMA_FROM_DEVICE);
if (dma_mapping_error(trans->dev, rxb->page_dma)) {
rxb->page = NULL;
@@ -514,12 +544,11 @@ void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int i;
- for (i = 0; i < RX_POOL_SIZE; i++) {
+ for (i = 0; i < RX_POOL_SIZE(trans_pcie->num_rx_bufs); i++) {
if (!trans_pcie->rx_pool[i].page)
continue;
dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma,
- PAGE_SIZE << trans_pcie->rx_page_order,
- DMA_FROM_DEVICE);
+ trans_pcie->rx_buf_bytes, DMA_FROM_DEVICE);
__free_pages(trans_pcie->rx_pool[i].page,
trans_pcie->rx_page_order);
trans_pcie->rx_pool[i].page = NULL;
@@ -572,15 +601,17 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
BUG_ON(rxb->page);
/* Alloc a new receive buffer */
- page = iwl_pcie_rx_alloc_page(trans, gfp_mask);
+ page = iwl_pcie_rx_alloc_page(trans, &rxb->offset,
+ gfp_mask);
if (!page)
continue;
rxb->page = page;
/* Get physical address of the RB */
- rxb->page_dma = dma_map_page(trans->dev, page, 0,
- PAGE_SIZE << trans_pcie->rx_page_order,
- DMA_FROM_DEVICE);
+ rxb->page_dma = dma_map_page(trans->dev, page,
+ rxb->offset,
+ trans_pcie->rx_buf_bytes,
+ DMA_FROM_DEVICE);
if (dma_mapping_error(trans->dev, rxb->page_dma)) {
rxb->page = NULL;
__free_pages(page, trans_pcie->rx_page_order);
@@ -682,7 +713,7 @@ static int iwl_pcie_free_bd_size(struct iwl_trans *trans, bool use_rx_td)
if (use_rx_td)
return sizeof(*rx_td);
else
- return trans->cfg->mq_rx_supported ? sizeof(__le64) :
+ return trans->trans_cfg->mq_rx_supported ? sizeof(__le64) :
sizeof(__le32);
}
@@ -690,8 +721,8 @@ static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
struct iwl_rxq *rxq)
{
struct device *dev = trans->dev;
- bool use_rx_td = (trans->cfg->device_family >=
- IWL_DEVICE_FAMILY_22560);
+ bool use_rx_td = (trans->trans_cfg->device_family >=
+ IWL_DEVICE_FAMILY_AX210);
int free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
if (rxq->bd)
@@ -712,7 +743,7 @@ static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
rxq->used_bd_dma = 0;
rxq->used_bd = NULL;
- if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560)
+ if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
return;
if (rxq->tr_tail)
@@ -735,14 +766,14 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
struct device *dev = trans->dev;
int i;
int free_size;
- bool use_rx_td = (trans->cfg->device_family >=
- IWL_DEVICE_FAMILY_22560);
+ bool use_rx_td = (trans->trans_cfg->device_family >=
+ IWL_DEVICE_FAMILY_AX210);
size_t rb_stts_size = use_rx_td ? sizeof(__le16) :
sizeof(struct iwl_rb_status);
spin_lock_init(&rxq->lock);
- if (trans->cfg->mq_rx_supported)
- rxq->queue_size = MQ_RX_TABLE_SIZE;
+ if (trans->trans_cfg->mq_rx_supported)
+ rxq->queue_size = trans->cfg->num_rbds;
else
rxq->queue_size = RX_QUEUE_SIZE;
@@ -757,7 +788,7 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
if (!rxq->bd)
goto err;
- if (trans->cfg->mq_rx_supported) {
+ if (trans->trans_cfg->mq_rx_supported) {
rxq->used_bd = dma_alloc_coherent(dev,
(use_rx_td ? sizeof(*rxq->cd) : sizeof(__le32)) * rxq->queue_size,
&rxq->used_bd_dma,
@@ -784,11 +815,6 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
&rxq->cr_tail_dma, GFP_KERNEL);
if (!rxq->cr_tail)
goto err;
- /*
- * W/A 22560 device step Z0 must be non zero bug
- * TODO: remove this when stop supporting Z0
- */
- *rxq->cr_tail = cpu_to_le16(500);
return 0;
@@ -802,13 +828,13 @@ err:
return -ENOMEM;
}
-int iwl_pcie_rx_alloc(struct iwl_trans *trans)
+static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rb_allocator *rba = &trans_pcie->rba;
int i, ret;
- size_t rb_stts_size = trans->cfg->device_family >=
- IWL_DEVICE_FAMILY_22560 ?
+ size_t rb_stts_size = trans->trans_cfg->device_family >=
+ IWL_DEVICE_FAMILY_AX210 ?
sizeof(__le16) : sizeof(struct iwl_rb_status);
if (WARN_ON(trans_pcie->rxq))
@@ -816,8 +842,18 @@ int iwl_pcie_rx_alloc(struct iwl_trans *trans)
trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq),
GFP_KERNEL);
- if (!trans_pcie->rxq)
- return -ENOMEM;
+ trans_pcie->rx_pool = kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs),
+ sizeof(trans_pcie->rx_pool[0]),
+ GFP_KERNEL);
+ trans_pcie->global_table =
+ kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs),
+ sizeof(trans_pcie->global_table[0]),
+ GFP_KERNEL);
+ if (!trans_pcie->rxq || !trans_pcie->rx_pool ||
+ !trans_pcie->global_table) {
+ ret = -ENOMEM;
+ goto err;
+ }
spin_lock_init(&rba->lock);
@@ -854,6 +890,8 @@ err:
trans_pcie->base_rb_stts = NULL;
trans_pcie->base_rb_stts_dma = 0;
}
+ kfree(trans_pcie->rx_pool);
+ kfree(trans_pcie->global_table);
kfree(trans_pcie->rxq);
return ret;
@@ -1033,7 +1071,7 @@ int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
return 0;
}
-int _iwl_pcie_rx_init(struct iwl_trans *trans)
+static int _iwl_pcie_rx_init(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *def_rxq;
@@ -1075,7 +1113,8 @@ int _iwl_pcie_rx_init(struct iwl_trans *trans)
rxq->write = 0;
rxq->write_actual = 0;
memset(rxq->rb_stts, 0,
- (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ?
+ (trans->trans_cfg->device_family >=
+ IWL_DEVICE_FAMILY_AX210) ?
sizeof(__le16) : sizeof(struct iwl_rb_status));
iwl_pcie_rx_init_rxb_lists(rxq);
@@ -1088,13 +1127,12 @@ int _iwl_pcie_rx_init(struct iwl_trans *trans)
}
/* move the pool to the default queue and allocator ownerships */
- queue_size = trans->cfg->mq_rx_supported ?
- MQ_RX_NUM_RBDS : RX_QUEUE_SIZE;
+ queue_size = trans->trans_cfg->mq_rx_supported ?
+ trans_pcie->num_rx_bufs - 1 : RX_QUEUE_SIZE;
allocator_pool_size = trans->num_rx_queues *
(RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC);
num_alloc = queue_size + allocator_pool_size;
- BUILD_BUG_ON(ARRAY_SIZE(trans_pcie->global_table) !=
- ARRAY_SIZE(trans_pcie->rx_pool));
+
for (i = 0; i < num_alloc; i++) {
struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i];
@@ -1120,7 +1158,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
if (ret)
return ret;
- if (trans->cfg->mq_rx_supported)
+ if (trans->trans_cfg->mq_rx_supported)
iwl_pcie_rx_mq_hw_init(trans);
else
iwl_pcie_rx_hw_init(trans, trans_pcie->rxq);
@@ -1151,8 +1189,8 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rb_allocator *rba = &trans_pcie->rba;
int i;
- size_t rb_stts_size = trans->cfg->device_family >=
- IWL_DEVICE_FAMILY_22560 ?
+ size_t rb_stts_size = trans->trans_cfg->device_family >=
+ IWL_DEVICE_FAMILY_AX210 ?
sizeof(__le16) : sizeof(struct iwl_rb_status);
/*
@@ -1185,7 +1223,12 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
if (rxq->napi.poll)
netif_napi_del(&rxq->napi);
}
+ kfree(trans_pcie->rx_pool);
+ kfree(trans_pcie->global_table);
kfree(trans_pcie->rxq);
+
+ if (trans_pcie->alloc_page)
+ __free_pages(trans_pcie->alloc_page, trans_pcie->rx_page_order);
}
static void iwl_pcie_rx_move_to_allocator(struct iwl_rxq *rxq,
@@ -1243,7 +1286,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
bool page_stolen = false;
- int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
+ int max_len = trans_pcie->rx_buf_bytes;
u32 offset = 0;
if (WARN_ON(!rxb))
@@ -1257,7 +1300,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
bool reclaim;
int index, cmd_index, len;
struct iwl_rx_cmd_buffer rxcb = {
- ._offset = offset,
+ ._offset = rxb->offset + offset,
._rx_page_order = trans_pcie->rx_page_order,
._page = rxb->page,
._page_stolen = false,
@@ -1347,7 +1390,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
}
page_stolen |= rxcb._page_stolen;
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
break;
offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
}
@@ -1363,8 +1406,8 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
* rx_free list for reuse later. */
if (rxb->page != NULL) {
rxb->page_dma =
- dma_map_page(trans->dev, rxb->page, 0,
- PAGE_SIZE << trans_pcie->rx_page_order,
+ dma_map_page(trans->dev, rxb->page, rxb->offset,
+ trans_pcie->rx_buf_bytes,
DMA_FROM_DEVICE);
if (dma_mapping_error(trans->dev, rxb->page_dma)) {
/*
@@ -1392,19 +1435,18 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc) != 32);
- if (!trans->cfg->mq_rx_supported) {
+ if (!trans->trans_cfg->mq_rx_supported) {
rxb = rxq->queue[i];
rxq->queue[i] = NULL;
return rxb;
}
- /* used_bd is a 32/16 bit but only 12 are used to retrieve the vid */
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
- vid = le16_to_cpu(rxq->cd[i].rbid) & 0x0FFF;
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ vid = le16_to_cpu(rxq->cd[i].rbid);
else
- vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF;
+ vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF; /* 12-bit VID */
- if (!vid || vid > ARRAY_SIZE(trans_pcie->global_table))
+ if (!vid || vid > RX_POOL_SIZE(trans_pcie->num_rx_bufs))
goto out_err;
rxb = trans_pcie->global_table[vid - 1];
@@ -1429,6 +1471,7 @@ out_err:
static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct napi_struct *napi;
struct iwl_rxq *rxq;
u32 r, i, count = 0;
bool emergency = false;
@@ -1515,7 +1558,7 @@ out:
/* Backtrack one entry */
rxq->read = i;
/* update cr tail with the rxq read pointer */
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
*rxq->cr_tail = cpu_to_le16(r);
spin_unlock(&rxq->lock);
@@ -1534,8 +1577,16 @@ out:
if (unlikely(emergency && count))
iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
- if (rxq->napi.poll)
- napi_gro_flush(&rxq->napi, false);
+ napi = &rxq->napi;
+ if (napi->poll) {
+ napi_gro_flush(napi, false);
+
+ if (napi->rx_count) {
+ netif_receive_skb_list(&napi->rx_list);
+ INIT_LIST_HEAD(&napi->rx_list);
+ napi->rx_count = 0;
+ }
+ }
iwl_pcie_rxq_restock(trans, rxq);
}
@@ -1597,7 +1648,7 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
return;
}
- for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
+ for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
if (!trans_pcie->txq[i])
continue;
del_timer(&trans_pcie->txq[i]->stuck_timer);
@@ -1838,7 +1889,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
if (inta & CSR_INT_BIT_ALIVE) {
IWL_DEBUG_ISR(trans, "Alive interrupt\n");
isr_stats->alive++;
- if (trans->cfg->gen2) {
+ if (trans->trans_cfg->gen2) {
/*
* We can restock, since firmware configured
* the RFH
@@ -2152,8 +2203,7 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
/* Error detected by uCode */
if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) ||
- (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
- (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
+ (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR)) {
IWL_ERR(trans,
"Microcode SW error detected. Restarting 0x%X.\n",
inta_fh);
@@ -2179,29 +2229,30 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) {
IWL_DEBUG_ISR(trans, "Alive interrupt\n");
isr_stats->alive++;
- if (trans->cfg->gen2) {
+ if (trans->trans_cfg->gen2) {
/* We can restock, since firmware configured the RFH */
iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
}
}
- if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22560 &&
- inta_hw & MSIX_HW_INT_CAUSES_REG_IPC) {
- /* Reflect IML transfer status */
- int res = iwl_read32(trans, CSR_IML_RESP_ADDR);
+ if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) {
+ u32 sleep_notif =
+ le32_to_cpu(trans_pcie->prph_info->sleep_notif);
+ if (sleep_notif == IWL_D3_SLEEP_STATUS_SUSPEND ||
+ sleep_notif == IWL_D3_SLEEP_STATUS_RESUME) {
+ IWL_DEBUG_ISR(trans,
+ "Sx interrupt: sleep notification = 0x%x\n",
+ sleep_notif);
+ trans_pcie->sx_complete = true;
+ wake_up(&trans_pcie->sx_waitq);
+ } else {
+ /* uCode wakes up after power-down sleep */
+ IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
+ iwl_pcie_rxq_check_wrptr(trans);
+ iwl_pcie_txq_check_wrptrs(trans);
- IWL_DEBUG_ISR(trans, "IML transfer status: %d\n", res);
- if (res == IWL_IMAGE_RESP_FAIL) {
- isr_stats->sw++;
- iwl_pcie_irq_handle_error(trans);
+ isr_stats->wakeup++;
}
- } else if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) {
- /* uCode wakes up after power-down sleep */
- IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
- iwl_pcie_rxq_check_wrptr(trans);
- iwl_pcie_txq_check_wrptrs(trans);
-
- isr_stats->wakeup++;
}
if (inta_hw & MSIX_HW_INT_CAUSES_REG_IML) {
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index 8d17e68577fd..19a2c72081ab 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -92,7 +92,7 @@ int iwl_pcie_gen2_apm_init(struct iwl_trans *trans)
iwl_pcie_apm_config(trans);
- ret = iwl_finish_nic_init(trans);
+ ret = iwl_finish_nic_init(trans, trans->trans_cfg);
if (ret)
return ret;
@@ -132,11 +132,10 @@ static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
* Clear "initialization complete" bit to move adapter from
* D0A* (powered-up Active) --> D0U* (Uninitialized) state.
*/
- iwl_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->cfg->csr->flag_init_done));
+ iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
}
-void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
+void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -147,9 +146,6 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
trans_pcie->is_down = true;
- /* Stop dbgc before stopping device */
- iwl_fw_dbg_stop_recording(trans, NULL);
-
/* tell the device to stop sending interrupts */
iwl_disable_interrupts(trans);
@@ -171,14 +167,14 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
}
iwl_pcie_ctxt_info_free_paging(trans);
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
iwl_pcie_ctxt_info_gen3_free(trans);
else
iwl_pcie_ctxt_info_free(trans);
/* Make sure (redundant) we've released our request to stay awake */
iwl_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->cfg->csr->flag_mac_access_req));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
/* Stop the device, and put it in low power state */
iwl_pcie_gen2_apm_stop(trans, false);
@@ -218,7 +214,7 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
iwl_pcie_prepare_card_hw(trans);
}
-void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
+void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
bool was_in_rfkill;
@@ -226,7 +222,7 @@ void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
mutex_lock(&trans_pcie->mutex);
trans_pcie->opmode_down = true;
was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
- _iwl_trans_pcie_gen2_stop_device(trans, low_power);
+ _iwl_trans_pcie_gen2_stop_device(trans);
iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill);
mutex_unlock(&trans_pcie->mutex);
}
@@ -343,7 +339,7 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
goto out;
}
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
ret = iwl_pcie_ctxt_info_gen3_init(trans, fw);
else
ret = iwl_pcie_ctxt_info_init(trans, fw);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index f5df5b370d78..38d8fe21690a 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -62,14 +62,12 @@
*
*****************************************************************************/
#include <linux/pci.h>
-#include <linux/pci-aspm.h>
#include <linux/interrupt.h>
#include <linux/debugfs.h>
#include <linux/sched.h>
#include <linux/bitops.h>
#include <linux/gfp.h>
#include <linux/vmalloc.h>
-#include <linux/pm_runtime.h>
#include <linux/module.h>
#include <linux/wait.h>
@@ -81,6 +79,7 @@
#include "iwl-agn-hw.h"
#include "fw/error-dump.h"
#include "fw/dbg.h"
+#include "fw/api/tx.h"
#include "internal.h"
#include "iwl-fh.h"
@@ -185,40 +184,42 @@ out:
static void iwl_trans_pcie_sw_reset(struct iwl_trans *trans)
{
/* Reset entire device - do controller reset (results in SHRD_HW_RST) */
- iwl_set_bit(trans, trans->cfg->csr->addr_sw_reset,
- BIT(trans->cfg->csr->flag_sw_reset));
+ iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
usleep_range(5000, 6000);
}
static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
{
- int i;
+ struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
- for (i = 0; i < trans->dbg.num_blocks; i++) {
- dma_free_coherent(trans->dev, trans->dbg.fw_mon[i].size,
- trans->dbg.fw_mon[i].block,
- trans->dbg.fw_mon[i].physical);
- trans->dbg.fw_mon[i].block = NULL;
- trans->dbg.fw_mon[i].physical = 0;
- trans->dbg.fw_mon[i].size = 0;
- trans->dbg.num_blocks--;
- }
+ if (!fw_mon->size)
+ return;
+
+ dma_free_coherent(trans->dev, fw_mon->size, fw_mon->block,
+ fw_mon->physical);
+
+ fw_mon->block = NULL;
+ fw_mon->physical = 0;
+ fw_mon->size = 0;
}
static void iwl_pcie_alloc_fw_monitor_block(struct iwl_trans *trans,
u8 max_power, u8 min_power)
{
- void *cpu_addr = NULL;
- dma_addr_t phys = 0;
+ struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
+ void *block = NULL;
+ dma_addr_t physical = 0;
u32 size = 0;
u8 power;
+ if (fw_mon->size)
+ return;
+
for (power = max_power; power >= min_power; power--) {
size = BIT(power);
- cpu_addr = dma_alloc_coherent(trans->dev, size, &phys,
- GFP_KERNEL | __GFP_NOWARN |
- __GFP_ZERO | __GFP_COMP);
- if (!cpu_addr)
+ block = dma_alloc_coherent(trans->dev, size, &physical,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!block)
continue;
IWL_INFO(trans,
@@ -227,7 +228,7 @@ static void iwl_pcie_alloc_fw_monitor_block(struct iwl_trans *trans,
break;
}
- if (WARN_ON_ONCE(!cpu_addr))
+ if (WARN_ON_ONCE(!block))
return;
if (power != max_power)
@@ -236,10 +237,9 @@ static void iwl_pcie_alloc_fw_monitor_block(struct iwl_trans *trans,
(unsigned long)BIT(power - 10),
(unsigned long)BIT(max_power - 10));
- trans->dbg.fw_mon[trans->dbg.num_blocks].block = cpu_addr;
- trans->dbg.fw_mon[trans->dbg.num_blocks].physical = phys;
- trans->dbg.fw_mon[trans->dbg.num_blocks].size = size;
- trans->dbg.num_blocks++;
+ fw_mon->block = block;
+ fw_mon->physical = physical;
+ fw_mon->size = size;
}
void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
@@ -256,11 +256,7 @@ void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
max_power))
return;
- /*
- * This function allocats the default fw monitor.
- * The optional additional ones will be allocated in runtime
- */
- if (trans->dbg.num_blocks)
+ if (trans->dbg.fw_mon.size)
return;
iwl_pcie_alloc_fw_monitor_block(trans, max_power, 11);
@@ -305,18 +301,13 @@ void iwl_pcie_apm_config(struct iwl_trans *trans)
u16 cap;
/*
- * HW bug W/A for instability in PCIe bus L0S->L1 transition.
- * Check if BIOS (or OS) enabled L1-ASPM on this device.
- * If so (likely), disable L0S, so device moves directly L0->L1;
- * costs negligible amount of power savings.
- * If not (unlikely), enable L0S, so there is at least some
- * power savings, even without L1.
+ * L0S states have been found to be unstable with our devices
+ * and in newer hardware they are not officially supported at
+ * all, so we must always set the L0S_DISABLED bit.
*/
+ iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_DISABLED);
+
pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
- if (lctl & PCI_EXP_LNKCTL_ASPM_L1)
- iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
- else
- iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
@@ -343,7 +334,7 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
*/
/* Disable L0S exit timer (platform NMI Work/Around) */
- if (trans->cfg->device_family < IWL_DEVICE_FAMILY_8000)
+ if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000)
iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
@@ -367,10 +358,10 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
iwl_pcie_apm_config(trans);
/* Configure analog phase-lock-loop before activating to D0A */
- if (trans->cfg->base_params->pll_cfg)
+ if (trans->trans_cfg->base_params->pll_cfg)
iwl_set_bit(trans, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL);
- ret = iwl_finish_nic_init(trans);
+ ret = iwl_finish_nic_init(trans, trans->trans_cfg);
if (ret)
return ret;
@@ -442,7 +433,7 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
iwl_trans_pcie_sw_reset(trans);
- ret = iwl_finish_nic_init(trans);
+ ret = iwl_finish_nic_init(trans, trans->trans_cfg);
if (WARN_ON(ret)) {
/* Release XTAL ON request */
__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
@@ -491,8 +482,7 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
* Clear "initialization complete" bit to move adapter from
* D0A* (powered-up Active) --> D0U* (Uninitialized) state.
*/
- iwl_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->cfg->csr->flag_init_done));
+ iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
/* Activates XTAL resources monitor */
__iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG,
@@ -514,12 +504,11 @@ void iwl_pcie_apm_stop_master(struct iwl_trans *trans)
int ret;
/* stop device's busmaster DMA activity */
- iwl_set_bit(trans, trans->cfg->csr->addr_sw_reset,
- BIT(trans->cfg->csr->flag_stop_master));
+ iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
- ret = iwl_poll_bit(trans, trans->cfg->csr->addr_sw_reset,
- BIT(trans->cfg->csr->flag_master_dis),
- BIT(trans->cfg->csr->flag_master_dis), 100);
+ ret = iwl_poll_bit(trans, CSR_RESET,
+ CSR_RESET_REG_FLAG_MASTER_DISABLED,
+ CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
if (ret < 0)
IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
@@ -535,10 +524,11 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
iwl_pcie_apm_init(trans);
/* inform ME that we are leaving */
- if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000)
+ if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000)
iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
APMG_PCIDEV_STT_VAL_WAKE_ME);
- else if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000) {
+ else if (trans->trans_cfg->device_family >=
+ IWL_DEVICE_FAMILY_8000) {
iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
CSR_RESET_LINK_PWR_MGMT_DISABLED);
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
@@ -567,8 +557,7 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
* Clear "initialization complete" bit to move adapter from
* D0A* (powered-up Active) --> D0U* (Uninitialized) state.
*/
- iwl_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->cfg->csr->flag_init_done));
+ iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
}
static int iwl_pcie_nic_init(struct iwl_trans *trans)
@@ -595,7 +584,7 @@ static int iwl_pcie_nic_init(struct iwl_trans *trans)
if (iwl_pcie_tx_init(trans))
return -ENOMEM;
- if (trans->cfg->base_params->shadow_reg_enable) {
+ if (trans->trans_cfg->base_params->shadow_reg_enable) {
/* enable shadow regs in HW */
iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
@@ -833,7 +822,7 @@ static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
iwl_enable_interrupts(trans);
- if (trans->cfg->use_tfh) {
+ if (trans->trans_cfg->use_tfh) {
if (cpu == 1)
iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,
0xFFFF);
@@ -893,24 +882,51 @@ static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
return 0;
}
+static void iwl_pcie_apply_destination_ini(struct iwl_trans *trans)
+{
+ enum iwl_fw_ini_allocation_id alloc_id = IWL_FW_INI_ALLOCATION_ID_DBGC1;
+ struct iwl_fw_ini_allocation_tlv *fw_mon_cfg =
+ &trans->dbg.fw_mon_cfg[alloc_id];
+ struct iwl_dram_data *frag;
+
+ if (!iwl_trans_dbg_ini_valid(trans))
+ return;
+
+ if (le32_to_cpu(fw_mon_cfg->buf_location) ==
+ IWL_FW_INI_LOCATION_SRAM_PATH) {
+ IWL_DEBUG_FW(trans, "WRT: Applying SMEM buffer destination\n");
+ /* set sram monitor by enabling bit 7 */
+ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_MONITOR_SRAM);
+
+ return;
+ }
+
+ if (le32_to_cpu(fw_mon_cfg->buf_location) !=
+ IWL_FW_INI_LOCATION_DRAM_PATH ||
+ !trans->dbg.fw_mon_ini[alloc_id].num_frags)
+ return;
+
+ frag = &trans->dbg.fw_mon_ini[alloc_id].frags[0];
+
+ IWL_DEBUG_FW(trans, "WRT: Applying DRAM destination (alloc_id=%u)\n",
+ alloc_id);
+
+ iwl_write_umac_prph(trans, MON_BUFF_BASE_ADDR_VER2,
+ frag->physical >> MON_BUFF_SHIFT_VER2);
+ iwl_write_umac_prph(trans, MON_BUFF_END_ADDR_VER2,
+ (frag->physical + frag->size - 256) >>
+ MON_BUFF_SHIFT_VER2);
+}
+
void iwl_pcie_apply_destination(struct iwl_trans *trans)
{
const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg.dest_tlv;
+ const struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
int i;
- if (trans->dbg.ini_valid) {
- if (!trans->dbg.num_blocks)
- return;
-
- IWL_DEBUG_FW(trans,
- "WRT: applying DRAM buffer[0] destination\n");
- iwl_write_umac_prph(trans, MON_BUFF_BASE_ADDR_VER2,
- trans->dbg.fw_mon[0].physical >>
- MON_BUFF_SHIFT_VER2);
- iwl_write_umac_prph(trans, MON_BUFF_END_ADDR_VER2,
- (trans->dbg.fw_mon[0].physical +
- trans->dbg.fw_mon[0].size - 256) >>
- MON_BUFF_SHIFT_VER2);
+ if (iwl_trans_dbg_ini_valid(trans)) {
+ iwl_pcie_apply_destination_ini(trans);
return;
}
@@ -961,20 +977,17 @@ void iwl_pcie_apply_destination(struct iwl_trans *trans)
}
monitor:
- if (dest->monitor_mode == EXTERNAL_MODE && trans->dbg.fw_mon[0].size) {
+ if (dest->monitor_mode == EXTERNAL_MODE && fw_mon->size) {
iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
- trans->dbg.fw_mon[0].physical >>
- dest->base_shift);
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000)
+ fw_mon->physical >> dest->base_shift);
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
- (trans->dbg.fw_mon[0].physical +
- trans->dbg.fw_mon[0].size - 256) >>
- dest->end_shift);
+ (fw_mon->physical + fw_mon->size -
+ 256) >> dest->end_shift);
else
iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
- (trans->dbg.fw_mon[0].physical +
- trans->dbg.fw_mon[0].size) >>
- dest->end_shift);
+ (fw_mon->physical + fw_mon->size) >>
+ dest->end_shift);
}
}
@@ -1007,15 +1020,15 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
/* supported for 7000 only for the moment */
if (iwlwifi_mod_params.fw_monitor &&
- trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
- iwl_pcie_alloc_fw_monitor(trans, 0);
+ trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000) {
+ struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
- if (trans->dbg.fw_mon[0].size) {
+ iwl_pcie_alloc_fw_monitor(trans, 0);
+ if (fw_mon->size) {
iwl_write_prph(trans, MON_BUFF_BASE_ADDR,
- trans->dbg.fw_mon[0].physical >> 4);
+ fw_mon->physical >> 4);
iwl_write_prph(trans, MON_BUFF_END_ADDR,
- (trans->dbg.fw_mon[0].physical +
- trans->dbg.fw_mon[0].size) >> 4);
+ (fw_mon->physical + fw_mon->size) >> 4);
}
} else if (iwl_pcie_dbg_on(trans)) {
iwl_pcie_apply_destination(trans);
@@ -1114,30 +1127,12 @@ static struct iwl_causes_list causes_list[] = {
{MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E},
};
-static struct iwl_causes_list causes_list_v2[] = {
- {MSIX_FH_INT_CAUSES_D2S_CH0_NUM, CSR_MSIX_FH_INT_MASK_AD, 0},
- {MSIX_FH_INT_CAUSES_D2S_CH1_NUM, CSR_MSIX_FH_INT_MASK_AD, 0x1},
- {MSIX_FH_INT_CAUSES_S2D, CSR_MSIX_FH_INT_MASK_AD, 0x3},
- {MSIX_FH_INT_CAUSES_FH_ERR, CSR_MSIX_FH_INT_MASK_AD, 0x5},
- {MSIX_HW_INT_CAUSES_REG_ALIVE, CSR_MSIX_HW_INT_MASK_AD, 0x10},
- {MSIX_HW_INT_CAUSES_REG_IPC, CSR_MSIX_HW_INT_MASK_AD, 0x11},
- {MSIX_HW_INT_CAUSES_REG_SW_ERR_V2, CSR_MSIX_HW_INT_MASK_AD, 0x15},
- {MSIX_HW_INT_CAUSES_REG_CT_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x16},
- {MSIX_HW_INT_CAUSES_REG_RF_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x17},
- {MSIX_HW_INT_CAUSES_REG_PERIODIC, CSR_MSIX_HW_INT_MASK_AD, 0x18},
- {MSIX_HW_INT_CAUSES_REG_SCD, CSR_MSIX_HW_INT_MASK_AD, 0x2A},
- {MSIX_HW_INT_CAUSES_REG_FH_TX, CSR_MSIX_HW_INT_MASK_AD, 0x2B},
- {MSIX_HW_INT_CAUSES_REG_HW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x2D},
- {MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E},
-};
-
static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE;
- int i, arr_size =
- (trans->cfg->device_family != IWL_DEVICE_FAMILY_22560) ?
- ARRAY_SIZE(causes_list) : ARRAY_SIZE(causes_list_v2);
+ int i, arr_size = ARRAY_SIZE(causes_list);
+ struct iwl_causes_list *causes = causes_list;
/*
* Access all non RX causes and map them to the default irq.
@@ -1145,10 +1140,6 @@ static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans)
* the first interrupt vector will serve non-RX and FBQ causes.
*/
for (i = 0; i < arr_size; i++) {
- struct iwl_causes_list *causes =
- (trans->cfg->device_family != IWL_DEVICE_FAMILY_22560) ?
- causes_list : causes_list_v2;
-
iwl_write8(trans, CSR_MSIX_IVAR(causes[i].addr), val);
iwl_clear_bit(trans, causes[i].mask_reg,
causes[i].cause_num);
@@ -1190,7 +1181,7 @@ void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie)
struct iwl_trans *trans = trans_pcie->trans;
if (!trans_pcie->msix_enabled) {
- if (trans->cfg->mq_rx_supported &&
+ if (trans->trans_cfg->mq_rx_supported &&
test_bit(STATUS_DEVICE_ENABLED, &trans->status))
iwl_write_umac_prph(trans, UREG_CHICK,
UREG_CHICK_MSI_ENABLE);
@@ -1231,7 +1222,7 @@ static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie)
trans_pcie->hw_mask = trans_pcie->hw_init_mask;
}
-static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
+static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -1242,9 +1233,6 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
trans_pcie->is_down = true;
- /* Stop dbgc before stopping device */
- iwl_fw_dbg_stop_recording(trans, NULL);
-
/* tell the device to stop sending interrupts */
iwl_disable_interrupts(trans);
@@ -1274,7 +1262,7 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
/* Make sure (redundant) we've released our request to stay awake */
iwl_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->cfg->csr->flag_mac_access_req));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
/* Stop the device, and put it in low power state */
iwl_pcie_apm_stop(trans, false);
@@ -1401,7 +1389,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
/* Load the given image to the HW */
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000)
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
ret = iwl_pcie_load_given_ucode_8000(trans, fw);
else
ret = iwl_pcie_load_given_ucode(trans, fw);
@@ -1451,7 +1439,7 @@ void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
iwl_trans_pcie_rf_kill(trans, hw_rfkill);
}
-static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
+static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
bool was_in_rfkill;
@@ -1459,7 +1447,7 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
mutex_lock(&trans_pcie->mutex);
trans_pcie->opmode_down = true;
was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
- _iwl_trans_pcie_stop_device(trans, low_power);
+ _iwl_trans_pcie_stop_device(trans);
iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill);
mutex_unlock(&trans_pcie->mutex);
}
@@ -1474,22 +1462,16 @@ void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
IWL_WARN(trans, "reporting RF_KILL (radio %s)\n",
state ? "disabled" : "enabled");
if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) {
- if (trans->cfg->gen2)
- _iwl_trans_pcie_gen2_stop_device(trans, true);
+ if (trans->trans_cfg->gen2)
+ _iwl_trans_pcie_gen2_stop_device(trans);
else
- _iwl_trans_pcie_stop_device(trans, true);
+ _iwl_trans_pcie_stop_device(trans);
}
}
-static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test,
- bool reset)
+void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
+ bool test, bool reset)
{
- if (!reset) {
- /* Enable persistence mode to avoid reset */
- iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
- }
-
iwl_disable_interrupts(trans);
/*
@@ -1504,9 +1486,8 @@ static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test,
iwl_pcie_synchronize_irqs(trans);
iwl_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->cfg->csr->flag_mac_access_req));
- iwl_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->cfg->csr->flag_init_done));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
if (reset) {
/*
@@ -1520,6 +1501,42 @@ static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test,
iwl_pcie_set_pwr(trans, true);
}
+static int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test,
+ bool reset)
+{
+ int ret;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ /*
+ * Family IWL_DEVICE_FAMILY_AX210 and above persist mode is set by FW.
+ */
+ if (!reset && trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) {
+ /* Enable persistence mode to avoid reset */
+ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
+ }
+
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,
+ UREG_DOORBELL_TO_ISR6_SUSPEND);
+
+ ret = wait_event_timeout(trans_pcie->sx_waitq,
+ trans_pcie->sx_complete, 2 * HZ);
+ /*
+ * Invalidate it toward resume.
+ */
+ trans_pcie->sx_complete = false;
+
+ if (!ret) {
+ IWL_ERR(trans, "Timeout entering D3\n");
+ return -ETIMEDOUT;
+ }
+ }
+ iwl_pcie_d3_complete_suspend(trans, test, reset);
+
+ return 0;
+}
+
static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
enum iwl_d3_status *status,
bool test, bool reset)
@@ -1531,13 +1548,13 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
if (test) {
iwl_enable_interrupts(trans);
*status = IWL_D3_STATUS_ALIVE;
- return 0;
+ goto out;
}
iwl_set_bit(trans, CSR_GP_CNTRL,
- BIT(trans->cfg->csr->flag_mac_access_req));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- ret = iwl_finish_nic_init(trans);
+ ret = iwl_finish_nic_init(trans, trans->trans_cfg);
if (ret)
return ret;
@@ -1557,7 +1574,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
if (!reset) {
iwl_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->cfg->csr->flag_mac_access_req));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
} else {
iwl_trans_pcie_tx_reset(trans);
@@ -1578,17 +1595,38 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
else
*status = IWL_D3_STATUS_ALIVE;
+out:
+ if (*status == IWL_D3_STATUS_ALIVE &&
+ trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ trans_pcie->sx_complete = false;
+ iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,
+ UREG_DOORBELL_TO_ISR6_RESUME);
+
+ ret = wait_event_timeout(trans_pcie->sx_waitq,
+ trans_pcie->sx_complete, 2 * HZ);
+ /*
+ * Invalidate it toward next suspend.
+ */
+ trans_pcie->sx_complete = false;
+
+ if (!ret) {
+ IWL_ERR(trans, "Timeout exiting D3\n");
+ return -ETIMEDOUT;
+ }
+ }
return 0;
}
-static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
- struct iwl_trans *trans)
+static void
+iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
+ struct iwl_trans *trans,
+ const struct iwl_cfg_trans_params *cfg_trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int max_irqs, num_irqs, i, ret;
u16 pci_cmd;
- if (!trans->cfg->mq_rx_supported)
+ if (!cfg_trans->mq_rx_supported)
goto enable_msi;
max_irqs = min_t(u32, num_online_cpus() + 2, IWL_MAX_RX_HW_QUEUES);
@@ -1709,7 +1747,7 @@ static int iwl_trans_pcie_clear_persistence_bit(struct iwl_trans *trans)
{
u32 hpm, wprot;
- switch (trans->cfg->device_family) {
+ switch (trans->trans_cfg->device_family) {
case IWL_DEVICE_FAMILY_9000:
wprot = PREG_PRPH_WPROT_9000;
break;
@@ -1736,7 +1774,30 @@ static int iwl_trans_pcie_clear_persistence_bit(struct iwl_trans *trans)
return 0;
}
-static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
+static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans)
+{
+ int ret;
+
+ ret = iwl_finish_nic_init(trans, trans->trans_cfg);
+ if (ret < 0)
+ return ret;
+
+ iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,
+ HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
+ udelay(20);
+ iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,
+ HPM_HIPM_GEN_CFG_CR_PG_EN |
+ HPM_HIPM_GEN_CFG_CR_SLP_EN);
+ udelay(20);
+ iwl_clear_bits_prph(trans, HPM_HIPM_GEN_CFG,
+ HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
+
+ iwl_trans_pcie_sw_reset(trans);
+
+ return 0;
+}
+
+static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int err;
@@ -1755,6 +1816,13 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
iwl_trans_pcie_sw_reset(trans);
+ if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 &&
+ trans->cfg->integrated) {
+ err = iwl_pcie_gen2_force_power_gating(trans);
+ if (err)
+ return err;
+ }
+
err = iwl_pcie_apm_init(trans);
if (err)
return err;
@@ -1772,20 +1840,16 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
/* ...rfkill can call stop_device and set it false if needed */
iwl_pcie_check_hw_rf_kill(trans);
- /* Make sure we sync here, because we'll need full access later */
- if (low_power)
- pm_runtime_resume(trans->dev);
-
return 0;
}
-static int iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
+static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ret;
mutex_lock(&trans_pcie->mutex);
- ret = _iwl_trans_pcie_start_hw(trans, low_power);
+ ret = _iwl_trans_pcie_start_hw(trans);
mutex_unlock(&trans_pcie->mutex);
return ret;
@@ -1828,7 +1892,7 @@ static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
static u32 iwl_trans_pcie_prph_msk(struct iwl_trans *trans)
{
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
return 0x00FFFFFF;
else
return 0x000FFFFF;
@@ -1872,6 +1936,11 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
trans_pcie->rx_buf_size = trans_cfg->rx_buf_size;
trans_pcie->rx_page_order =
iwl_trans_get_rb_size_order(trans_pcie->rx_buf_size);
+ trans_pcie->rx_buf_bytes =
+ iwl_trans_get_rb_size(trans_pcie->rx_buf_size);
+ trans_pcie->supported_dma_mask = DMA_BIT_MASK(12);
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ trans_pcie->supported_dma_mask = DMA_BIT_MASK(11);
trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
trans_pcie->scd_set_active = trans_cfg->scd_set_active;
@@ -1899,7 +1968,7 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
iwl_pcie_synchronize_irqs(trans);
- if (trans->cfg->gen2)
+ if (trans->trans_cfg->gen2)
iwl_pcie_gen2_tx_free(trans);
else
iwl_pcie_tx_free(trans);
@@ -1981,8 +2050,8 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans,
/* this bit wakes up the NIC */
__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
- BIT(trans->cfg->csr->flag_mac_access_req));
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000)
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
udelay(2);
/*
@@ -2006,8 +2075,8 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans,
* and do not save/restore SRAM when power cycling.
*/
ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
- BIT(trans->cfg->csr->flag_val_mac_access_en),
- (BIT(trans->cfg->csr->flag_mac_clock_ready) |
+ CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
+ (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
if (unlikely(ret < 0)) {
u32 cntrl = iwl_read32(trans, CSR_GP_CNTRL);
@@ -2089,7 +2158,7 @@ static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
goto out;
__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->cfg->csr->flag_mac_access_req));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
/*
* Above we read the CSR_GP_CNTRL register, which will flush
* any previous writes, but we need the write that clears the
@@ -2196,7 +2265,7 @@ static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int i;
- for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
+ for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
struct iwl_txq *txq = trans_pcie->txq[i];
if (i == trans_pcie->cmd_queue)
@@ -2227,7 +2296,7 @@ void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
bool active;
u8 fifo;
- if (trans->cfg->use_tfh) {
+ if (trans->trans_cfg->use_tfh) {
IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id,
txq->read_ptr, txq->write_ptr);
/* TODO: access new SCD registers and dump them */
@@ -2244,10 +2313,10 @@ void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
jiffies_to_msecs(txq->wd_timeout),
txq->read_ptr, txq->write_ptr,
iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) &
- (trans->cfg->base_params->max_tfd_queue_size - 1),
- iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) &
- (trans->cfg->base_params->max_tfd_queue_size - 1),
- iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));
+ (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
+ iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) &
+ (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
+ iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));
}
static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
@@ -2335,7 +2404,9 @@ static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm)
int ret = 0;
/* waiting for all the tx frames complete might take a while */
- for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
+ for (cnt = 0;
+ cnt < trans->trans_cfg->base_params->num_of_queues;
+ cnt++) {
if (cnt == trans_pcie->cmd_queue)
continue;
@@ -2363,37 +2434,6 @@ static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
}
-static void iwl_trans_pcie_ref(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- if (iwlwifi_mod_params.d0i3_disable)
- return;
-
- pm_runtime_get(&trans_pcie->pci_dev->dev);
-
-#ifdef CONFIG_PM
- IWL_DEBUG_RPM(trans, "runtime usage count: %d\n",
- atomic_read(&trans_pcie->pci_dev->dev.power.usage_count));
-#endif /* CONFIG_PM */
-}
-
-static void iwl_trans_pcie_unref(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- if (iwlwifi_mod_params.d0i3_disable)
- return;
-
- pm_runtime_mark_last_busy(&trans_pcie->pci_dev->dev);
- pm_runtime_put_autosuspend(&trans_pcie->pci_dev->dev);
-
-#ifdef CONFIG_PM
- IWL_DEBUG_RPM(trans, "runtime usage count: %d\n",
- atomic_read(&trans_pcie->pci_dev->dev.power.usage_count));
-#endif /* CONFIG_PM */
-}
-
static const char *get_csr_string(int cmd)
{
#define IWL_CMD(x) case x: return #x
@@ -2510,7 +2550,8 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
int ret;
size_t bufsz;
- bufsz = sizeof(char) * 75 * trans->cfg->base_params->num_of_queues;
+ bufsz = sizeof(char) * 75 *
+ trans->trans_cfg->base_params->num_of_queues;
if (!trans_pcie->txq_memory)
return -EAGAIN;
@@ -2519,7 +2560,9 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
if (!buf)
return -ENOMEM;
- for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
+ for (cnt = 0;
+ cnt < trans->trans_cfg->base_params->num_of_queues;
+ cnt++) {
txq = trans_pcie->txq[cnt];
pos += scnprintf(buf + pos, bufsz - pos,
"hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d frozen=%d%s\n",
@@ -2542,7 +2585,7 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
char *buf;
int pos = 0, i, ret;
- size_t bufsz = sizeof(buf);
+ size_t bufsz;
bufsz = sizeof(char) * 121 * trans->num_rx_queues;
@@ -2784,7 +2827,7 @@ static ssize_t iwl_dbgfs_monitor_data_read(struct file *file,
{
struct iwl_trans *trans = file->private_data;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- void *cpu_addr = (void *)trans->dbg.fw_mon[0].block, *curr_buf;
+ void *cpu_addr = (void *)trans->dbg.fw_mon.block, *curr_buf;
struct cont_rec *data = &trans_pcie->fw_mon_data;
u32 write_ptr_addr, wrap_cnt_addr, write_ptr, wrap_cnt;
ssize_t size, bytes_copied = 0;
@@ -2823,7 +2866,7 @@ static ssize_t iwl_dbgfs_monitor_data_read(struct file *file,
} else if (data->prev_wrap_cnt == wrap_cnt - 1 &&
write_ptr < data->prev_wr_ptr) {
- size = trans->dbg.fw_mon[0].size - data->prev_wr_ptr;
+ size = trans->dbg.fw_mon.size - data->prev_wr_ptr;
curr_buf = cpu_addr + data->prev_wr_ptr;
b_full = iwl_write_to_user_buf(user_buf, count,
curr_buf, &size,
@@ -2916,7 +2959,7 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
int allocated_rb_nums)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
+ int max_len = trans_pcie->rx_buf_bytes;
/* Dump RBs is supported only for pre-9000 devices (1 queue) */
struct iwl_rxq *rxq = &trans_pcie->rxq[0];
u32 i, r, j, rb_len = 0;
@@ -2942,9 +2985,9 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
rb->index = cpu_to_le32(i);
memcpy(rb->data, page_address(rxb->page), max_len);
/* remap the page for the free benefit */
- rxb->page_dma = dma_map_page(trans->dev, rxb->page, 0,
- max_len,
- DMA_FROM_DEVICE);
+ rxb->page_dma = dma_map_page(trans->dev, rxb->page,
+ rxb->offset, max_len,
+ DMA_FROM_DEVICE);
*data = iwl_fw_error_next_data(*data);
}
@@ -2989,7 +3032,7 @@ static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans,
(*data)->len = cpu_to_le32(fh_regs_len);
val = (void *)(*data)->data;
- if (!trans->cfg->gen2)
+ if (!trans->trans_cfg->gen2)
for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND;
i += sizeof(u32))
*val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
@@ -3037,7 +3080,7 @@ iwl_trans_pcie_dump_pointers(struct iwl_trans *trans,
{
u32 base, base_high, write_ptr, write_ptr_val, wrap_cnt;
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
base = DBGC_CUR_DBGBUF_BASE_ADDR_LSB;
base_high = DBGC_CUR_DBGBUF_BASE_ADDR_MSB;
write_ptr = DBGC_CUR_DBGBUF_STATUS;
@@ -3057,7 +3100,7 @@ iwl_trans_pcie_dump_pointers(struct iwl_trans *trans,
cpu_to_le32(iwl_read_prph(trans, wrap_cnt));
fw_mon_data->fw_mon_base_ptr =
cpu_to_le32(iwl_read_prph(trans, base));
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
fw_mon_data->fw_mon_base_high_ptr =
cpu_to_le32(iwl_read_prph(trans, base_high));
write_ptr_val &= DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK;
@@ -3070,12 +3113,13 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
struct iwl_fw_error_dump_data **data,
u32 monitor_len)
{
+ struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
u32 len = 0;
if (trans->dbg.dest_tlv ||
- (trans->dbg.num_blocks &&
- (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 ||
- trans->cfg->device_family >= IWL_DEVICE_FAMILY_AX210))) {
+ (fw_mon->size &&
+ (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000 ||
+ trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))) {
struct iwl_fw_error_dump_fw_mon *fw_mon_data;
(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
@@ -3084,12 +3128,9 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
iwl_trans_pcie_dump_pointers(trans, fw_mon_data);
len += sizeof(**data) + sizeof(*fw_mon_data);
- if (trans->dbg.num_blocks) {
- memcpy(fw_mon_data->data,
- trans->dbg.fw_mon[0].block,
- trans->dbg.fw_mon[0].size);
-
- monitor_len = trans->dbg.fw_mon[0].size;
+ if (fw_mon->size) {
+ memcpy(fw_mon_data->data, fw_mon->block, fw_mon->size);
+ monitor_len = fw_mon->size;
} else if (trans->dbg.dest_tlv->monitor_mode == SMEM_MODE) {
u32 base = le32_to_cpu(fw_mon_data->fw_mon_base_ptr);
/*
@@ -3128,11 +3169,11 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, u32 *len)
{
- if (trans->dbg.num_blocks) {
+ if (trans->dbg.fw_mon.size) {
*len += sizeof(struct iwl_fw_error_dump_data) +
sizeof(struct iwl_fw_error_dump_fw_mon) +
- trans->dbg.fw_mon[0].size;
- return trans->dbg.fw_mon[0].size;
+ trans->dbg.fw_mon.size;
+ return trans->dbg.fw_mon.size;
} else if (trans->dbg.dest_tlv) {
u32 base, end, cfg_reg, monitor_len;
@@ -3158,7 +3199,7 @@ static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, u32 *len)
trans->dbg.dest_tlv->end_shift;
/* Make "end" point to the actual end */
- if (trans->cfg->device_family >=
+ if (trans->trans_cfg->device_family >=
IWL_DEVICE_FAMILY_8000 ||
trans->dbg.dest_tlv->monitor_mode == MARBH_MODE)
end += (1 << trans->dbg.dest_tlv->end_shift);
@@ -3184,7 +3225,7 @@ static struct iwl_trans_dump_data
u32 len, num_rbs = 0, monitor_len = 0;
int i, ptr;
bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) &&
- !trans->cfg->mq_rx_supported &&
+ !trans->trans_cfg->mq_rx_supported &&
dump_mask & BIT(IWL_FW_ERROR_DUMP_RB);
if (!dump_mask)
@@ -3209,7 +3250,7 @@ static struct iwl_trans_dump_data
/* FH registers */
if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) {
- if (trans->cfg->gen2)
+ if (trans->trans_cfg->gen2)
len += sizeof(*data) +
(iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2) -
iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2));
@@ -3233,7 +3274,7 @@ static struct iwl_trans_dump_data
}
/* Paged memory for gen2 HW */
- if (trans->cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING))
+ if (trans->trans_cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING))
for (i = 0; i < trans->init_dram.paging_cnt; i++)
len += sizeof(*data) +
sizeof(struct iwl_fw_error_dump_paging) +
@@ -3255,11 +3296,17 @@ static struct iwl_trans_dump_data
ptr = cmdq->write_ptr;
for (i = 0; i < cmdq->n_window; i++) {
u8 idx = iwl_pcie_get_cmd_index(cmdq, ptr);
+ u8 tfdidx;
u32 caplen, cmdlen;
+ if (trans->trans_cfg->use_tfh)
+ tfdidx = idx;
+ else
+ tfdidx = ptr;
+
cmdlen = iwl_trans_pcie_get_cmdlen(trans,
- cmdq->tfds +
- tfd_size * ptr);
+ (u8 *)cmdq->tfds +
+ tfd_size * tfdidx);
caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
if (cmdlen) {
@@ -3288,7 +3335,8 @@ static struct iwl_trans_dump_data
len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs);
/* Paged memory for gen2 HW */
- if (trans->cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) {
+ if (trans->trans_cfg->gen2 &&
+ dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) {
for (i = 0; i < trans->init_dram.paging_cnt; i++) {
struct iwl_fw_error_dump_paging *paging;
u32 page_len = trans->init_dram.paging[i].size;
@@ -3315,18 +3363,11 @@ static struct iwl_trans_dump_data
#ifdef CONFIG_PM_SLEEP
static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
{
- if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3 &&
- (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3))
- return iwl_pci_fw_enter_d0i3(trans);
-
return 0;
}
static void iwl_trans_pcie_resume(struct iwl_trans *trans)
{
- if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3 &&
- (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3))
- iwl_pci_fw_exit_d0i3(trans);
}
#endif /* CONFIG_PM_SLEEP */
@@ -3345,8 +3386,6 @@ static void iwl_trans_pcie_resume(struct iwl_trans *trans)
.grab_nic_access = iwl_trans_pcie_grab_nic_access, \
.release_nic_access = iwl_trans_pcie_release_nic_access, \
.set_bits_mask = iwl_trans_pcie_set_bits_mask, \
- .ref = iwl_trans_pcie_ref, \
- .unref = iwl_trans_pcie_unref, \
.dump_data = iwl_trans_pcie_dump_data, \
.d3_suspend = iwl_trans_pcie_d3_suspend, \
.d3_resume = iwl_trans_pcie_d3_resume, \
@@ -3400,6 +3439,8 @@ static const struct iwl_trans_ops trans_ops_pcie_gen2 = {
.tx = iwl_trans_pcie_gen2_tx,
.reclaim = iwl_trans_pcie_reclaim,
+ .set_q_ptrs = iwl_trans_pcie_set_q_ptrs,
+
.txq_alloc = iwl_trans_pcie_dyn_txq_alloc,
.txq_free = iwl_trans_pcie_dyn_txq_free,
.wait_txq_empty = iwl_trans_pcie_wait_txq_empty,
@@ -3410,23 +3451,39 @@ static const struct iwl_trans_ops trans_ops_pcie_gen2 = {
};
struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
- const struct pci_device_id *ent,
- const struct iwl_cfg *cfg)
+ const struct pci_device_id *ent,
+ const struct iwl_cfg_trans_params *cfg_trans)
{
struct iwl_trans_pcie *trans_pcie;
struct iwl_trans *trans;
- int ret, addr_size;
+ int ret, addr_size, txcmd_size, txcmd_align;
+ const struct iwl_trans_ops *ops = &trans_ops_pcie_gen2;
+
+ if (!cfg_trans->gen2) {
+ ops = &trans_ops_pcie;
+ txcmd_size = sizeof(struct iwl_tx_cmd);
+ txcmd_align = sizeof(void *);
+ } else if (cfg_trans->device_family < IWL_DEVICE_FAMILY_AX210) {
+ txcmd_size = sizeof(struct iwl_tx_cmd_gen2);
+ txcmd_align = 64;
+ } else {
+ txcmd_size = sizeof(struct iwl_tx_cmd_gen3);
+ txcmd_align = 128;
+ }
+
+ txcmd_size += sizeof(struct iwl_cmd_header);
+ txcmd_size += 36; /* biggest possible 802.11 header */
+
+ /* Ensure device TX cmd cannot reach/cross a page boundary in gen2 */
+ if (WARN_ON(cfg_trans->gen2 && txcmd_size >= txcmd_align))
+ return ERR_PTR(-EINVAL);
ret = pcim_enable_device(pdev);
if (ret)
return ERR_PTR(ret);
- if (cfg->gen2)
- trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
- &pdev->dev, cfg, &trans_ops_pcie_gen2);
- else
- trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
- &pdev->dev, cfg, &trans_ops_pcie);
+ trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev, ops,
+ txcmd_size, txcmd_align);
if (!trans)
return ERR_PTR(-ENOMEM);
@@ -3436,8 +3493,18 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans_pcie->opmode_down = true;
spin_lock_init(&trans_pcie->irq_lock);
spin_lock_init(&trans_pcie->reg_lock);
+ spin_lock_init(&trans_pcie->alloc_page_lock);
mutex_init(&trans_pcie->mutex);
init_waitqueue_head(&trans_pcie->ucode_write_waitq);
+
+ trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator",
+ WQ_HIGHPRI | WQ_UNBOUND, 1);
+ if (!trans_pcie->rba.alloc_wq) {
+ ret = -ENOMEM;
+ goto out_free_trans;
+ }
+ INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work);
+
trans_pcie->tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
if (!trans_pcie->tso_hdr_page) {
ret = -ENOMEM;
@@ -3445,7 +3512,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
}
trans_pcie->debug_rfkill = -1;
- if (!cfg->base_params->pcie_l1_allowed) {
+ if (!cfg_trans->base_params->pcie_l1_allowed) {
/*
* W/A - seems to solve weird behavior. We need to remove this
* if we don't want to stay in L1 all the time. This wastes a
@@ -3458,7 +3525,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans_pcie->def_rx_queue = 0;
- if (cfg->use_tfh) {
+ if (cfg_trans->use_tfh) {
addr_size = 64;
trans_pcie->max_tbs = IWL_TFH_NUM_TBS;
trans_pcie->tfd_size = sizeof(struct iwl_tfh_tfd);
@@ -3520,9 +3587,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
* "dash" value). To keep hw_rev backwards compatible - we'll store it
* in the old format.
*/
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000) {
- unsigned long flags;
-
+ if (cfg_trans->device_family >= IWL_DEVICE_FAMILY_8000) {
trans->hw_rev = (trans->hw_rev & 0xfff0) |
(CSR_HW_REV_STEP(trans->hw_rev << 2) << 2);
@@ -3536,98 +3601,15 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
* in-order to recognize C step driver should read chip version
* id located at the AUX bus MISC address space.
*/
- ret = iwl_finish_nic_init(trans);
+ ret = iwl_finish_nic_init(trans, cfg_trans);
if (ret)
goto out_no_pci;
- if (iwl_trans_grab_nic_access(trans, &flags)) {
- u32 hw_step;
-
- hw_step = iwl_read_umac_prph_no_grab(trans,
- WFPM_CTRL_REG);
- hw_step |= ENABLE_WFPM;
- iwl_write_umac_prph_no_grab(trans, WFPM_CTRL_REG,
- hw_step);
- hw_step = iwl_read_prph_no_grab(trans,
- CNVI_AUX_MISC_CHIP);
- hw_step = (hw_step >> HW_STEP_LOCATION_BITS) & 0xF;
- if (hw_step == 0x3)
- trans->hw_rev = (trans->hw_rev & 0xFFFFFFF3) |
- (SILICON_C_STEP << 2);
- iwl_trans_release_nic_access(trans, &flags);
- }
}
IWL_DEBUG_INFO(trans, "HW REV: 0x%0x\n", trans->hw_rev);
-#if IS_ENABLED(CONFIG_IWLMVM)
- trans->hw_rf_id = iwl_read32(trans, CSR_HW_RF_ID);
-
- if (cfg == &iwlax210_2ax_cfg_so_hr_a0) {
- if (trans->hw_rev == CSR_HW_REV_TYPE_TY) {
- trans->cfg = &iwlax210_2ax_cfg_ty_gf_a0;
- } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
- CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF)) {
- trans->cfg = &iwlax210_2ax_cfg_so_jf_a0;
- } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
- CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF)) {
- trans->cfg = &iwlax211_2ax_cfg_so_gf_a0;
- } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
- CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF4)) {
- trans->cfg = &iwlax411_2ax_cfg_so_gf4_a0;
- }
- } else if (cfg == &iwl_ax101_cfg_qu_hr) {
- if ((CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
- CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
- trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) ||
- (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
- CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR1))) {
- trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0;
- } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
- CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) {
- trans->cfg = &iwl_ax101_cfg_qu_hr;
- } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
- CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF)) {
- trans->cfg = &iwl22000_2ax_cfg_jf;
- } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
- CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HRCDB)) {
- IWL_ERR(trans, "RF ID HRCDB is not supported\n");
- ret = -EINVAL;
- goto out_no_pci;
- } else {
- IWL_ERR(trans, "Unrecognized RF ID 0x%08x\n",
- CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id));
- ret = -EINVAL;
- goto out_no_pci;
- }
- } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
- CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
- ((trans->cfg != &iwl_ax200_cfg_cc &&
- trans->cfg != &killer1650x_2ax_cfg &&
- trans->cfg != &killer1650w_2ax_cfg &&
- trans->cfg != &iwl_ax201_cfg_quz_hr) ||
- trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0)) {
- u32 hw_status;
-
- hw_status = iwl_read_prph(trans, UMAG_GEN_HW_STATUS);
- if (CSR_HW_RF_STEP(trans->hw_rf_id) == SILICON_B_STEP)
- /*
- * b step fw is the same for physical card and fpga
- */
- trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0;
- else if ((hw_status & UMAG_GEN_HW_IS_FPGA) &&
- CSR_HW_RF_STEP(trans->hw_rf_id) == SILICON_A_STEP) {
- trans->cfg = &iwl22000_2ax_cfg_qnj_hr_a0_f0;
- } else {
- /*
- * a step no FPGA
- */
- trans->cfg = &iwl22000_2ac_cfg_hr;
- }
- }
-#endif
-
- iwl_pcie_set_interrupt_capa(pdev, trans);
+ iwl_pcie_set_interrupt_capa(pdev, trans, cfg_trans);
trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
"PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
@@ -3635,7 +3617,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
/* Initialize the wait queue for commands */
init_waitqueue_head(&trans_pcie->wait_command_queue);
- init_waitqueue_head(&trans_pcie->d0i3_waitq);
+ init_waitqueue_head(&trans_pcie->sx_waitq);
if (trans_pcie->msix_enabled) {
ret = iwl_pcie_init_msix_handler(pdev, trans_pcie);
@@ -3657,27 +3639,21 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans_pcie->inta_mask = CSR_INI_SET_MASK;
}
- trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator",
- WQ_HIGHPRI | WQ_UNBOUND, 1);
- INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work);
-
-#ifdef CONFIG_IWLWIFI_PCIE_RTPM
- trans->runtime_pm_mode = IWL_PLAT_PM_MODE_D0I3;
-#else
- trans->runtime_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
-#endif /* CONFIG_IWLWIFI_PCIE_RTPM */
-
#ifdef CONFIG_IWLWIFI_DEBUGFS
trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED;
mutex_init(&trans_pcie->fw_mon_data.mutex);
#endif
+ iwl_dbg_tlv_init(trans);
+
return trans;
out_free_ict:
iwl_pcie_free_ict(trans);
out_no_pci:
free_percpu(trans_pcie->tso_hdr_page);
+ destroy_workqueue(trans_pcie->rba.alloc_wq);
+out_free_trans:
iwl_trans_free(trans);
return ERR_PTR(ret);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 38d110338987..86fc00167817 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -50,7 +50,6 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
-#include <linux/pm_runtime.h>
#include <net/tso.h>
#include <linux/tcp.h>
@@ -87,9 +86,9 @@ void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans)
/*
* iwl_pcie_txq_update_byte_tbl - Set up entry in Tx byte-count array
*/
-void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
- struct iwl_txq *txq, u16 byte_cnt,
- int num_tbs)
+static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
+ struct iwl_txq *txq, u16 byte_cnt,
+ int num_tbs)
{
struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
@@ -99,10 +98,7 @@ void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
u16 len = byte_cnt;
__le16 bc_ent;
- if (trans_pcie->bc_table_dword)
- len = DIV_ROUND_UP(len, 4);
-
- if (WARN_ON(len > 0xFFF || idx >= txq->n_window))
+ if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window))
return;
filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
@@ -117,11 +113,20 @@ void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
*/
num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
- bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ /* Starting from AX210, the HW expects bytes */
+ WARN_ON(trans_pcie->bc_table_dword);
+ WARN_ON(len > 0x3FFF);
+ bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent;
- else
+ } else {
+ /* Before AX210, the HW expects DW */
+ WARN_ON(!trans_pcie->bc_table_dword);
+ len = DIV_ROUND_UP(len, 4);
+ WARN_ON(len > 0xFFF);
+ bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
scd_bc_tbl->tfd_offset[idx] = bc_ent;
+ }
}
/*
@@ -216,6 +221,17 @@ static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans,
int idx = iwl_pcie_gen2_get_num_tbs(trans, tfd);
struct iwl_tfh_tb *tb;
+ /*
+ * Only WARN here so we know about the issue, but we mess up our
+ * unmap path because not every place currently checks for errors
+ * returned from this function - it can only return an error if
+ * there's no more space, and so when we know there is enough we
+ * don't always check ...
+ */
+ WARN(iwl_pcie_crosses_4g_boundary(addr, len),
+ "possible DMA problem with iova:0x%llx, len:%d\n",
+ (unsigned long long)addr, len);
+
if (WARN_ON(idx >= IWL_TFH_NUM_TBS))
return -EINVAL;
tb = &tfd->tbs[idx];
@@ -235,56 +251,147 @@ static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans,
return idx;
}
+static struct page *get_workaround_page(struct iwl_trans *trans,
+ struct sk_buff *skb)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct page **page_ptr;
+ struct page *ret;
+
+ page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
+
+ ret = alloc_page(GFP_ATOMIC);
+ if (!ret)
+ return NULL;
+
+ /* set the chaining pointer to the previous page if there */
+ *(void **)(page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr;
+ *page_ptr = ret;
+
+ return ret;
+}
+
+/*
+ * Add a TB and if needed apply the FH HW bug workaround;
+ * meta != NULL indicates that it's a page mapping and we
+ * need to dma_unmap_page() and set the meta->tbs bit in
+ * this case.
+ */
+static int iwl_pcie_gen2_set_tb_with_wa(struct iwl_trans *trans,
+ struct sk_buff *skb,
+ struct iwl_tfh_tfd *tfd,
+ dma_addr_t phys, void *virt,
+ u16 len, struct iwl_cmd_meta *meta)
+{
+ dma_addr_t oldphys = phys;
+ struct page *page;
+ int ret;
+
+ if (unlikely(dma_mapping_error(trans->dev, phys)))
+ return -ENOMEM;
+
+ if (likely(!iwl_pcie_crosses_4g_boundary(phys, len))) {
+ ret = iwl_pcie_gen2_set_tb(trans, tfd, phys, len);
+
+ if (ret < 0)
+ goto unmap;
+
+ if (meta)
+ meta->tbs |= BIT(ret);
+
+ ret = 0;
+ goto trace;
+ }
+
+ /*
+ * Work around a hardware bug. If (as expressed in the
+ * condition above) the TB ends on a 32-bit boundary,
+ * then the next TB may be accessed with the wrong
+ * address.
+ * To work around it, copy the data elsewhere and make
+ * a new mapping for it so the device will not fail.
+ */
+
+ if (WARN_ON(len > PAGE_SIZE - sizeof(void *))) {
+ ret = -ENOBUFS;
+ goto unmap;
+ }
+
+ page = get_workaround_page(trans, skb);
+ if (!page) {
+ ret = -ENOMEM;
+ goto unmap;
+ }
+
+ memcpy(page_address(page), virt, len);
+
+ phys = dma_map_single(trans->dev, page_address(page), len,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(trans->dev, phys)))
+ return -ENOMEM;
+ ret = iwl_pcie_gen2_set_tb(trans, tfd, phys, len);
+ if (ret < 0) {
+ /* unmap the new allocation as single */
+ oldphys = phys;
+ meta = NULL;
+ goto unmap;
+ }
+ IWL_WARN(trans,
+ "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n",
+ len, (unsigned long long)oldphys, (unsigned long long)phys);
+
+ ret = 0;
+unmap:
+ if (meta)
+ dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE);
+ else
+ dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE);
+trace:
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len);
+
+ return ret;
+}
+
static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
struct sk_buff *skb,
struct iwl_tfh_tfd *tfd, int start_len,
- u8 hdr_len, struct iwl_device_cmd *dev_cmd)
+ u8 hdr_len,
+ struct iwl_device_tx_cmd *dev_cmd)
{
#ifdef CONFIG_INET
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
struct ieee80211_hdr *hdr = (void *)skb->data;
unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
unsigned int mss = skb_shinfo(skb)->gso_size;
- u16 length, iv_len, amsdu_pad;
+ u16 length, amsdu_pad;
u8 *start_hdr;
struct iwl_tso_hdr_page *hdr_page;
- struct page **page_ptr;
struct tso_t tso;
- /* if the packet is protected, then it must be CCMP or GCMP */
- iv_len = ieee80211_has_protected(hdr->frame_control) ?
- IEEE80211_CCMP_HDR_LEN : 0;
-
trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
&dev_cmd->hdr, start_len, 0);
ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
- total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len;
+ total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len;
amsdu_pad = 0;
/* total amount of header we may need for this A-MSDU */
hdr_room = DIV_ROUND_UP(total_len, mss) *
- (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;
+ (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr));
/* Our device supports 9 segments at most, it will fit in 1 page */
- hdr_page = get_page_hdr(trans, hdr_room);
+ hdr_page = get_page_hdr(trans, hdr_room, skb);
if (!hdr_page)
return -ENOMEM;
- get_page(hdr_page->page);
start_hdr = hdr_page->pos;
- page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
- *page_ptr = hdr_page->page;
- memcpy(hdr_page->pos, skb->data + hdr_len, iv_len);
- hdr_page->pos += iv_len;
/*
- * Pull the ieee80211 header + IV to be able to use TSO core,
+ * Pull the ieee80211 header to be able to use TSO core,
* we will restore it for the tx_status flow.
*/
- skb_pull(skb, hdr_len + iv_len);
+ skb_pull(skb, hdr_len);
/*
* Remove the length of all the headers that we don't actually
@@ -333,8 +440,14 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
dev_kfree_skb(csum_skb);
goto out_err;
}
+ /*
+ * No need for _with_wa, this is from the TSO page and
+ * we leave some space at the end of it so can't hit
+ * the buggy scenario.
+ */
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
- trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, tb_len);
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
+ tb_phys, tb_len);
/* add this subframe's headers' length to the tx_cmd */
le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
@@ -343,24 +456,26 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
/* put the payload */
while (data_left) {
+ int ret;
+
tb_len = min_t(unsigned int, tso.size, data_left);
tb_phys = dma_map_single(trans->dev, tso.data,
tb_len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
+ ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd,
+ tb_phys, tso.data,
+ tb_len, NULL);
+ if (ret) {
dev_kfree_skb(csum_skb);
goto out_err;
}
- iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
- trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data,
- tb_len);
data_left -= tb_len;
tso_build_data(skb, &tso, tb_len);
}
}
- /* re -add the WiFi header and IV */
- skb_push(skb, hdr_len + iv_len);
+ /* re -add the WiFi header */
+ skb_push(skb, hdr_len);
return 0;
@@ -372,7 +487,7 @@ out_err:
static struct
iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans,
struct iwl_txq *txq,
- struct iwl_device_cmd *dev_cmd,
+ struct iwl_device_tx_cmd *dev_cmd,
struct sk_buff *skb,
struct iwl_cmd_meta *out_meta,
int hdr_len,
@@ -386,6 +501,11 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans,
tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
+ /*
+ * No need for _with_wa, the first TB allocation is aligned up
+ * to a 64-byte boundary and thus can't be at the end or cross
+ * a page boundary (much less a 2^32 boundary).
+ */
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
/*
@@ -404,6 +524,10 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans,
tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
goto out_err;
+ /*
+ * No need for _with_wa(), we ensure (via alignment) that the data
+ * here can never cross or end at a page boundary.
+ */
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, len);
if (iwl_pcie_gen2_build_amsdu(trans, skb, tfd,
@@ -430,25 +554,19 @@ static int iwl_pcie_gen2_tx_add_frags(struct iwl_trans *trans,
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
dma_addr_t tb_phys;
- int tb_idx;
+ unsigned int fragsz = skb_frag_size(frag);
+ int ret;
- if (!skb_frag_size(frag))
+ if (!fragsz)
continue;
tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
- skb_frag_size(frag), DMA_TO_DEVICE);
-
- if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
- return -ENOMEM;
- tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys,
- skb_frag_size(frag));
- trace_iwlwifi_dev_tx_tb(trans->dev, skb,
- skb_frag_address(frag),
- skb_frag_size(frag));
- if (tb_idx < 0)
- return tb_idx;
-
- out_meta->tbs |= BIT(tb_idx);
+ fragsz, DMA_TO_DEVICE);
+ ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
+ skb_frag_address(frag),
+ fragsz, out_meta);
+ if (ret)
+ return ret;
}
return 0;
@@ -457,7 +575,7 @@ static int iwl_pcie_gen2_tx_add_frags(struct iwl_trans *trans,
static struct
iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
struct iwl_txq *txq,
- struct iwl_device_cmd *dev_cmd,
+ struct iwl_device_tx_cmd *dev_cmd,
struct sk_buff *skb,
struct iwl_cmd_meta *out_meta,
int hdr_len,
@@ -469,12 +587,18 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
dma_addr_t tb_phys;
int len, tb1_len, tb2_len;
void *tb1_addr;
+ struct sk_buff *frag;
tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
/* The first TB points to bi-directional DMA data */
memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
+ /*
+ * No need for _with_wa, the first TB allocation is aligned up
+ * to a 64-byte boundary and thus can't be at the end or cross
+ * a page boundary (much less a 2^32 boundary).
+ */
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
/*
@@ -496,6 +620,10 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
goto out_err;
+ /*
+ * No need for _with_wa(), we ensure (via alignment) that the data
+ * here can never cross or end at a page boundary.
+ */
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
@@ -504,19 +632,34 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
tb2_len = skb_headlen(skb) - hdr_len;
if (tb2_len > 0) {
+ int ret;
+
tb_phys = dma_map_single(trans->dev, skb->data + hdr_len,
tb2_len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
+ ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
+ skb->data + hdr_len, tb2_len,
+ NULL);
+ if (ret)
goto out_err;
- iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb2_len);
- trace_iwlwifi_dev_tx_tb(trans->dev, skb,
- skb->data + hdr_len,
- tb2_len);
}
if (iwl_pcie_gen2_tx_add_frags(trans, skb, tfd, out_meta))
goto out_err;
+ skb_walk_frags(skb, frag) {
+ int ret;
+
+ tb_phys = dma_map_single(trans->dev, frag->data,
+ skb_headlen(frag), DMA_TO_DEVICE);
+ ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
+ frag->data,
+ skb_headlen(frag), NULL);
+ if (ret)
+ goto out_err;
+ if (iwl_pcie_gen2_tx_add_frags(trans, frag, tfd, out_meta))
+ goto out_err;
+ }
+
return tfd;
out_err:
@@ -527,7 +670,7 @@ out_err:
static
struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
struct iwl_txq *txq,
- struct iwl_device_cmd *dev_cmd,
+ struct iwl_device_tx_cmd *dev_cmd,
struct sk_buff *skb,
struct iwl_cmd_meta *out_meta)
{
@@ -542,7 +685,7 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
memset(tfd, 0, sizeof(*tfd));
- if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560)
+ if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
len = sizeof(struct iwl_tx_cmd_gen2);
else
len = sizeof(struct iwl_tx_cmd_gen3);
@@ -567,7 +710,7 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
}
int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_device_cmd *dev_cmd, int txq_id)
+ struct iwl_device_tx_cmd *dev_cmd, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_cmd_meta *out_meta;
@@ -576,6 +719,10 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
int idx;
void *tfd;
+ if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
+ "queue %d out of range", txq_id))
+ return -EINVAL;
+
if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
"TX on unused queue %d\n", txq_id))
return -EINVAL;
@@ -592,7 +739,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
/* don't put the packet on the ring, if there is no room */
if (unlikely(iwl_queue_space(trans, txq) < 3)) {
- struct iwl_device_cmd **dev_cmd_ptr;
+ struct iwl_device_tx_cmd **dev_cmd_ptr;
dev_cmd_ptr = (void *)((u8 *)skb->cb +
trans_pcie->dev_cmd_offs);
@@ -624,7 +771,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
return -1;
}
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
struct iwl_tx_cmd_gen3 *tx_cmd_gen3 =
(void *)dev_cmd->payload;
@@ -641,12 +788,8 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
iwl_pcie_gen2_get_num_tbs(trans, tfd));
/* start timer if queue currently empty */
- if (txq->read_ptr == txq->write_ptr) {
- if (txq->wd_timeout)
- mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
- IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", txq->id);
- iwl_trans_ref(trans);
- }
+ if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
+ mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
/* Tell device the write index *just past* this latest filled TFD */
txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
@@ -891,12 +1034,6 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
spin_lock_irqsave(&trans_pcie->reg_lock, flags);
- if (!(cmd->flags & CMD_SEND_IN_IDLE) &&
- !trans_pcie->ref_cmd_in_flight) {
- trans_pcie->ref_cmd_in_flight = true;
- IWL_DEBUG_RPM(trans, "set ref_cmd_in_flight - ref\n");
- iwl_trans_ref(trans);
- }
/* Increment and update queue's write index */
txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq);
@@ -930,16 +1067,6 @@ static int iwl_pcie_gen2_send_hcmd_sync(struct iwl_trans *trans,
IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str);
- if (pm_runtime_suspended(&trans_pcie->pci_dev->dev)) {
- ret = wait_event_timeout(trans_pcie->d0i3_waitq,
- pm_runtime_active(&trans_pcie->pci_dev->dev),
- msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT));
- if (!ret) {
- IWL_ERR(trans, "Timeout exiting D0i3 before hcmd\n");
- return -ETIMEDOUT;
- }
- }
-
cmd_idx = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);
if (cmd_idx < 0) {
ret = cmd_idx;
@@ -1064,23 +1191,6 @@ void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id)
}
iwl_pcie_gen2_free_tfd(trans, txq);
txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);
-
- if (txq->read_ptr == txq->write_ptr) {
- unsigned long flags;
-
- spin_lock_irqsave(&trans_pcie->reg_lock, flags);
- if (txq_id != trans_pcie->cmd_queue) {
- IWL_DEBUG_RPM(trans, "Q %d - last tx freed\n",
- txq->id);
- iwl_trans_unref(trans);
- } else if (trans_pcie->ref_cmd_in_flight) {
- trans_pcie->ref_cmd_in_flight = false;
- IWL_DEBUG_RPM(trans,
- "clear ref_cmd_in_flight\n");
- iwl_trans_unref(trans);
- }
- spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
- }
}
while (!skb_queue_empty(&txq->overflow_q)) {
@@ -1127,9 +1237,15 @@ void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans,
static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans_pcie->txq[txq_id];
+ struct iwl_txq *txq;
int i;
+ if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
+ "queue %d out of range", txq_id))
+ return;
+
+ txq = trans_pcie->txq[txq_id];
+
if (WARN_ON(!txq))
return;
@@ -1161,8 +1277,8 @@ int iwl_trans_pcie_dyn_txq_alloc_dma(struct iwl_trans *trans,
if (!txq)
return -ENOMEM;
ret = iwl_pcie_alloc_dma_ptr(trans, &txq->bc_tbl,
- (trans->cfg->device_family >=
- IWL_DEVICE_FAMILY_22560) ?
+ (trans->trans_cfg->device_family >=
+ IWL_DEVICE_FAMILY_AX210) ?
sizeof(struct iwl_gen3_bc_tbl) :
sizeof(struct iwlagn_scd_bc_tbl));
if (ret) {
@@ -1225,7 +1341,7 @@ int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans,
txq->id = qid;
trans_pcie->txq[qid] = txq;
- wr_ptr &= (trans->cfg->base_params->max_tfd_queue_size - 1);
+ wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
/* Place first TFD at index corresponding to start sequence number */
txq->read_ptr = wr_ptr;
@@ -1284,6 +1400,10 @@ void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ if (WARN(queue >= IWL_MAX_TVQM_QUEUES,
+ "queue %d out of range", queue))
+ return;
+
/*
* Upon HW Rfkill - we stop the device, and then stop the queues
* in the op_mode. Just for the sake of the simplicity of the op_mode,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 2f0ba7ef53b8..2f69a6469fe7 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -65,7 +65,6 @@
#include <linux/ieee80211.h>
#include <linux/slab.h>
#include <linux/sched.h>
-#include <linux/pm_runtime.h>
#include <net/ip6_checksum.h>
#include <net/tso.h>
@@ -114,17 +113,17 @@ int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q)
* If q->n_window is smaller than max_tfd_queue_size, there is no need
* to reserve any queue entries for this purpose.
*/
- if (q->n_window < trans->cfg->base_params->max_tfd_queue_size)
+ if (q->n_window < trans->trans_cfg->base_params->max_tfd_queue_size)
max = q->n_window;
else
- max = trans->cfg->base_params->max_tfd_queue_size - 1;
+ max = trans->trans_cfg->base_params->max_tfd_queue_size - 1;
/*
* max_tfd_queue_size is a power of 2, so the following is equivalent to
* modulo by max_tfd_queue_size and is well defined.
*/
used = (q->write_ptr - q->read_ptr) &
- (trans->cfg->base_params->max_tfd_queue_size - 1);
+ (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
if (WARN_ON(used > max))
return 0;
@@ -214,8 +213,8 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
u8 sec_ctl = 0;
u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
__le16 bc_ent;
- struct iwl_tx_cmd *tx_cmd =
- (void *)txq->entries[txq->write_ptr].cmd->payload;
+ struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd;
+ struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
u8 sta_id = tx_cmd->sta_id;
scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
@@ -258,8 +257,8 @@ static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
int read_ptr = txq->read_ptr;
u8 sta_id = 0;
__le16 bc_ent;
- struct iwl_tx_cmd *tx_cmd =
- (void *)txq->entries[read_ptr].cmd->payload;
+ struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd;
+ struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
@@ -293,7 +292,7 @@ static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
* 2. NIC is woken up for CMD regardless of shadow outside this function
* 3. there is a chance that the NIC is asleep
*/
- if (!trans->cfg->base_params->shadow_reg_enable &&
+ if (!trans->trans_cfg->base_params->shadow_reg_enable &&
txq_id != trans_pcie->cmd_queue &&
test_bit(STATUS_TPOWER_PMI, &trans->status)) {
/*
@@ -307,7 +306,7 @@ static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
txq_id, reg);
iwl_set_bit(trans, CSR_GP_CNTRL,
- BIT(trans->cfg->csr->flag_mac_access_req));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
txq->need_update = true;
return;
}
@@ -328,7 +327,7 @@ void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int i;
- for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
+ for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
struct iwl_txq *txq = trans_pcie->txq[i];
if (!test_bit(i, trans_pcie->queue_used))
@@ -347,7 +346,7 @@ static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_trans *trans,
void *_tfd, u8 idx)
{
- if (trans->cfg->use_tfh) {
+ if (trans->trans_cfg->use_tfh) {
struct iwl_tfh_tfd *tfd = _tfd;
struct iwl_tfh_tb *tb = &tfd->tbs[idx];
@@ -390,7 +389,7 @@ static inline void iwl_pcie_tfd_set_tb(struct iwl_trans *trans, void *tfd,
static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_trans *trans, void *_tfd)
{
- if (trans->cfg->use_tfh) {
+ if (trans->trans_cfg->use_tfh) {
struct iwl_tfh_tfd *tfd = _tfd;
return le16_to_cpu(tfd->num_tbs) & 0x1f;
@@ -437,7 +436,7 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
meta->tbs = 0;
- if (trans->cfg->use_tfh) {
+ if (trans->trans_cfg->use_tfh) {
struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
tfd_fh->num_tbs = 0;
@@ -525,14 +524,14 @@ int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
size_t tfd_sz = trans_pcie->tfd_size *
- trans->cfg->base_params->max_tfd_queue_size;
+ trans->trans_cfg->base_params->max_tfd_queue_size;
size_t tb0_buf_sz;
int i;
if (WARN_ON(txq->entries || txq->tfds))
return -EINVAL;
- if (trans->cfg->use_tfh)
+ if (trans->trans_cfg->use_tfh)
tfd_sz = trans_pcie->tfd_size * slots_num;
timer_setup(&txq->stuck_timer, iwl_pcie_txq_stuck_timer, 0);
@@ -591,7 +590,8 @@ int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
int slots_num, bool cmd_queue)
{
int ret;
- u32 tfd_queue_max_size = trans->cfg->base_params->max_tfd_queue_size;
+ u32 tfd_queue_max_size =
+ trans->trans_cfg->base_params->max_tfd_queue_size;
txq->need_update = false;
@@ -624,12 +624,18 @@ void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
struct sk_buff *skb)
{
struct page **page_ptr;
+ struct page *next;
page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
+ next = *page_ptr;
+ *page_ptr = NULL;
+
+ while (next) {
+ struct page *tmp = next;
- if (*page_ptr) {
- __free_page(*page_ptr);
- *page_ptr = NULL;
+ next = *(void **)(page_address(next) + PAGE_SIZE -
+ sizeof(void *));
+ __free_page(tmp);
}
}
@@ -639,20 +645,14 @@ static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
lockdep_assert_held(&trans_pcie->reg_lock);
- if (trans_pcie->ref_cmd_in_flight) {
- trans_pcie->ref_cmd_in_flight = false;
- IWL_DEBUG_RPM(trans, "clear ref_cmd_in_flight - unref\n");
- iwl_trans_unref(trans);
- }
-
- if (!trans->cfg->base_params->apmg_wake_up_wa)
+ if (!trans->trans_cfg->base_params->apmg_wake_up_wa)
return;
if (WARN_ON(!trans_pcie->cmd_hold_nic_awake))
return;
trans_pcie->cmd_hold_nic_awake = false;
__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
- BIT(trans->cfg->csr->flag_mac_access_req));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
}
/*
@@ -683,13 +683,8 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
unsigned long flags;
spin_lock_irqsave(&trans_pcie->reg_lock, flags);
- if (txq_id != trans_pcie->cmd_queue) {
- IWL_DEBUG_RPM(trans, "Q %d - last tx freed\n",
- txq->id);
- iwl_trans_unref(trans);
- } else {
+ if (txq_id == trans_pcie->cmd_queue)
iwl_pcie_clear_cmd_in_flight(trans);
- }
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
}
}
@@ -737,7 +732,7 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
if (txq->tfds) {
dma_free_coherent(dev,
trans_pcie->tfd_size *
- trans->cfg->base_params->max_tfd_queue_size,
+ trans->trans_cfg->base_params->max_tfd_queue_size,
txq->tfds, txq->dma_addr);
txq->dma_addr = 0;
txq->tfds = NULL;
@@ -759,7 +754,7 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int nq = trans->cfg->base_params->num_of_queues;
+ int nq = trans->trans_cfg->base_params->num_of_queues;
int chan;
u32 reg_val;
int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) -
@@ -786,7 +781,7 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
/* The chain extension of the SCD doesn't work well. This feature is
* enabled by default by the HW, so we need to disable it manually.
*/
- if (trans->cfg->base_params->scd_chain_ext_wa)
+ if (trans->trans_cfg->base_params->scd_chain_ext_wa)
iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
@@ -808,7 +803,7 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
/* Enable L1-Active */
- if (trans->cfg->device_family < IWL_DEVICE_FAMILY_8000)
+ if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000)
iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
}
@@ -822,13 +817,13 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
* we should never get here in gen2 trans mode return early to avoid
* having invalid accesses
*/
- if (WARN_ON_ONCE(trans->cfg->gen2))
+ if (WARN_ON_ONCE(trans->trans_cfg->gen2))
return;
- for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
+ for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
txq_id++) {
struct iwl_txq *txq = trans_pcie->txq[txq_id];
- if (trans->cfg->use_tfh)
+ if (trans->trans_cfg->use_tfh)
iwl_write_direct64(trans,
FH_MEM_CBBC_QUEUE(trans, txq_id),
txq->dma_addr);
@@ -911,7 +906,7 @@ int iwl_pcie_tx_stop(struct iwl_trans *trans)
return 0;
/* Unmap DMA from host system and free skb's */
- for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
+ for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
txq_id++)
iwl_pcie_txq_unmap(trans, txq_id);
@@ -933,7 +928,7 @@ void iwl_pcie_tx_free(struct iwl_trans *trans)
/* Tx queues */
if (trans_pcie->txq_memory) {
for (txq_id = 0;
- txq_id < trans->cfg->base_params->num_of_queues;
+ txq_id < trans->trans_cfg->base_params->num_of_queues;
txq_id++) {
iwl_pcie_txq_free(trans, txq_id);
trans_pcie->txq[txq_id] = NULL;
@@ -957,9 +952,10 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
int ret;
int txq_id, slots_num;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- u16 bc_tbls_size = trans->cfg->base_params->num_of_queues;
+ u16 bc_tbls_size = trans->trans_cfg->base_params->num_of_queues;
- bc_tbls_size *= (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ?
+ bc_tbls_size *= (trans->trans_cfg->device_family >=
+ IWL_DEVICE_FAMILY_AX210) ?
sizeof(struct iwl_gen3_bc_tbl) :
sizeof(struct iwlagn_scd_bc_tbl);
@@ -984,8 +980,9 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
goto error;
}
- trans_pcie->txq_memory = kcalloc(trans->cfg->base_params->num_of_queues,
- sizeof(struct iwl_txq), GFP_KERNEL);
+ trans_pcie->txq_memory =
+ kcalloc(trans->trans_cfg->base_params->num_of_queues,
+ sizeof(struct iwl_txq), GFP_KERNEL);
if (!trans_pcie->txq_memory) {
IWL_ERR(trans, "Not enough memory for txq\n");
ret = -ENOMEM;
@@ -993,7 +990,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
}
/* Alloc and init all Tx queues, including the command queue (#4/#9) */
- for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
+ for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
txq_id++) {
bool cmd_queue = (txq_id == trans_pcie->cmd_queue);
@@ -1047,7 +1044,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
spin_unlock(&trans_pcie->irq_lock);
/* Alloc and init all Tx queues, including the command queue (#4/#9) */
- for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
+ for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
txq_id++) {
bool cmd_queue = (txq_id == trans_pcie->cmd_queue);
@@ -1075,7 +1072,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
}
iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE);
- if (trans->cfg->base_params->num_of_queues > 20)
+ if (trans->trans_cfg->base_params->num_of_queues > 20)
iwl_set_bits_prph(trans, SCD_GP_CTRL,
SCD_GP_CTRL_ENABLE_31_QUEUES);
@@ -1147,7 +1144,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
IWL_ERR(trans,
"%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
__func__, txq_id, last_to_free,
- trans->cfg->base_params->max_tfd_queue_size,
+ trans->trans_cfg->base_params->max_tfd_queue_size,
txq->write_ptr, txq->read_ptr);
goto out;
}
@@ -1170,7 +1167,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
txq->entries[read_ptr].skb = NULL;
- if (!trans->cfg->use_tfh)
+ if (!trans->trans_cfg->use_tfh)
iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
iwl_pcie_txq_free_tfd(trans, txq);
@@ -1205,7 +1202,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
while (!skb_queue_empty(&overflow_skbs)) {
struct sk_buff *skb = __skb_dequeue(&overflow_skbs);
- struct iwl_device_cmd *dev_cmd_ptr;
+ struct iwl_device_tx_cmd *dev_cmd_ptr;
dev_cmd_ptr = *(void **)((u8 *)skb->cb +
trans_pcie->dev_cmd_offs);
@@ -1225,20 +1222,28 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
txq->overflow_tx = false;
}
- if (txq->read_ptr == txq->write_ptr) {
- IWL_DEBUG_RPM(trans, "Q %d - last tx reclaimed\n", txq->id);
- iwl_trans_unref(trans);
- }
-
out:
spin_unlock_bh(&txq->lock);
}
+/* Set wr_ptr of specific device and txq */
+void iwl_trans_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = trans_pcie->txq[txq_id];
+
+ spin_lock_bh(&txq->lock);
+
+ txq->write_ptr = ptr;
+ txq->read_ptr = txq->write_ptr;
+
+ spin_unlock_bh(&txq->lock);
+}
+
static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
const struct iwl_host_cmd *cmd)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- const struct iwl_cfg *cfg = trans->cfg;
int ret;
lockdep_assert_held(&trans_pcie->reg_lock);
@@ -1247,32 +1252,25 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
if (test_bit(STATUS_TRANS_DEAD, &trans->status))
return -ENODEV;
- if (!(cmd->flags & CMD_SEND_IN_IDLE) &&
- !trans_pcie->ref_cmd_in_flight) {
- trans_pcie->ref_cmd_in_flight = true;
- IWL_DEBUG_RPM(trans, "set ref_cmd_in_flight - ref\n");
- iwl_trans_ref(trans);
- }
-
/*
* wake up the NIC to make sure that the firmware will see the host
* command - we will let the NIC sleep once all the host commands
* returned. This needs to be done only on NICs that have
* apmg_wake_up_wa set.
*/
- if (cfg->base_params->apmg_wake_up_wa &&
+ if (trans->trans_cfg->base_params->apmg_wake_up_wa &&
!trans_pcie->cmd_hold_nic_awake) {
__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
- BIT(cfg->csr->flag_mac_access_req));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
- BIT(cfg->csr->flag_val_mac_access_en),
- (BIT(cfg->csr->flag_mac_clock_ready) |
+ CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
+ (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP),
15000);
if (ret < 0) {
__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
- BIT(cfg->csr->flag_mac_access_req));
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
IWL_ERR(trans, "Failed to wake NIC for hcmd\n");
return -EIO;
}
@@ -1302,12 +1300,12 @@ void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
idx = iwl_pcie_get_cmd_index(txq, idx);
r = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
- if (idx >= trans->cfg->base_params->max_tfd_queue_size ||
+ if (idx >= trans->trans_cfg->base_params->max_tfd_queue_size ||
(!iwl_queue_used(txq, idx))) {
WARN_ONCE(test_bit(txq_id, trans_pcie->queue_used),
"%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
__func__, txq_id, idx,
- trans->cfg->base_params->max_tfd_queue_size,
+ trans->trans_cfg->base_params->max_tfd_queue_size,
txq->write_ptr, txq->read_ptr);
return;
}
@@ -1421,7 +1419,7 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
* this sad hardware issue.
* This bug has been fixed on devices 9000 and up.
*/
- scd_bug = !trans->cfg->mq_rx_supported &&
+ scd_bug = !trans->trans_cfg->mq_rx_supported &&
!((ssn - txq->write_ptr) & 0x3f) &&
(ssn != txq->write_ptr);
if (scd_bug)
@@ -1867,20 +1865,6 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
wake_up(&trans_pcie->wait_command_queue);
}
- if (meta->flags & CMD_MAKE_TRANS_IDLE) {
- IWL_DEBUG_INFO(trans, "complete %s - mark trans as idle\n",
- iwl_get_cmd_string(trans, cmd->hdr.cmd));
- set_bit(STATUS_TRANS_IDLE, &trans->status);
- wake_up(&trans_pcie->d0i3_waitq);
- }
-
- if (meta->flags & CMD_WAKE_UP_TRANS) {
- IWL_DEBUG_INFO(trans, "complete %s - clear trans idle flag\n",
- iwl_get_cmd_string(trans, cmd->hdr.cmd));
- clear_bit(STATUS_TRANS_IDLE, &trans->status);
- wake_up(&trans_pcie->d0i3_waitq);
- }
-
meta->flags = 0;
spin_unlock_bh(&txq->lock);
@@ -1927,16 +1911,6 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
iwl_get_cmd_string(trans, cmd->id));
- if (pm_runtime_suspended(&trans_pcie->pci_dev->dev)) {
- ret = wait_event_timeout(trans_pcie->d0i3_waitq,
- pm_runtime_active(&trans_pcie->pci_dev->dev),
- msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT));
- if (!ret) {
- IWL_ERR(trans, "Timeout exiting D0i3 before hcmd\n");
- return -ETIMEDOUT;
- }
- }
-
cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd);
if (cmd_idx < 0) {
ret = cmd_idx;
@@ -2051,9 +2025,8 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
head_tb_len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
return -EINVAL;
- trace_iwlwifi_dev_tx_tb(trans->dev, skb,
- skb->data + hdr_len,
- head_tb_len);
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb->data + hdr_len,
+ tb_phys, head_tb_len);
iwl_pcie_txq_build_tfd(trans, txq, tb_phys, head_tb_len, false);
}
@@ -2071,9 +2044,8 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
return -EINVAL;
- trace_iwlwifi_dev_tx_tb(trans->dev, skb,
- skb_frag_address(frag),
- skb_frag_size(frag));
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb_frag_address(frag),
+ tb_phys, skb_frag_size(frag));
tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
skb_frag_size(frag), false);
if (tb_idx < 0)
@@ -2086,17 +2058,34 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
}
#ifdef CONFIG_INET
-struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len)
+struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
+ struct sk_buff *skb)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->tso_hdr_page);
+ struct page **page_ptr;
+
+ page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
+
+ if (WARN_ON(*page_ptr))
+ return NULL;
if (!p->page)
goto alloc;
- /* enough room on this page */
- if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE)
- return p;
+ /*
+ * Check if there's enough room on this page
+ *
+ * Note that we put a page chaining pointer *last* in the
+ * page - we need it somewhere, and if it's there then we
+ * avoid DMA mapping the last bits of the page which may
+ * trigger the 32-bit boundary hardware bug.
+ *
+ * (see also get_workaround_page() in tx-gen2.c)
+ */
+ if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE -
+ sizeof(void *))
+ goto out;
/* We don't have enough room on this page, get a new one. */
__free_page(p->page);
@@ -2106,6 +2095,11 @@ alloc:
if (!p->page)
return NULL;
p->pos = page_address(p->page);
+ /* set the chaining pointer to NULL */
+ *(void **)(page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL;
+out:
+ *page_ptr = p->page;
+ get_page(p->page);
return p;
}
@@ -2131,7 +2125,8 @@ static void iwl_compute_pseudo_hdr_csum(void *iph, struct tcphdr *tcph,
static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_txq *txq, u8 hdr_len,
struct iwl_cmd_meta *out_meta,
- struct iwl_device_cmd *dev_cmd, u16 tb1_len)
+ struct iwl_device_tx_cmd *dev_cmd,
+ u16 tb1_len)
{
struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
@@ -2141,7 +2136,6 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
u16 length, iv_len, amsdu_pad;
u8 *start_hdr;
struct iwl_tso_hdr_page *hdr_page;
- struct page **page_ptr;
struct tso_t tso;
/* if the packet is protected, then it must be CCMP or GCMP */
@@ -2164,14 +2158,11 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
(3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;
/* Our device supports 9 segments at most, it will fit in 1 page */
- hdr_page = get_page_hdr(trans, hdr_room);
+ hdr_page = get_page_hdr(trans, hdr_room, skb);
if (!hdr_page)
return -ENOMEM;
- get_page(hdr_page->page);
start_hdr = hdr_page->pos;
- page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
- *page_ptr = hdr_page->page;
memcpy(hdr_page->pos, skb->data + hdr_len, iv_len);
hdr_page->pos += iv_len;
@@ -2254,7 +2245,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys,
hdr_tb_len, false);
trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
- hdr_tb_len);
+ hdr_tb_phys, hdr_tb_len);
/* add this subframe's headers' length to the tx_cmd */
le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
@@ -2280,7 +2271,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
size, false);
trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data,
- size);
+ tb_phys, size);
data_left -= size;
tso_build_data(skb, &tso, size);
@@ -2313,7 +2304,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_txq *txq, u8 hdr_len,
struct iwl_cmd_meta *out_meta,
- struct iwl_device_cmd *dev_cmd, u16 tb1_len)
+ struct iwl_device_tx_cmd *dev_cmd,
+ u16 tb1_len)
{
/* No A-MSDU without CONFIG_INET */
WARN_ON(1);
@@ -2323,7 +2315,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
#endif /* CONFIG_INET */
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_device_cmd *dev_cmd, int txq_id)
+ struct iwl_device_tx_cmd *dev_cmd, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct ieee80211_hdr *hdr;
@@ -2380,7 +2372,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
/* don't put the packet on the ring, if there is no room */
if (unlikely(iwl_queue_space(trans, txq) < 3)) {
- struct iwl_device_cmd **dev_cmd_ptr;
+ struct iwl_device_tx_cmd **dev_cmd_ptr;
dev_cmd_ptr = (void *)((u8 *)skb->cb +
trans_pcie->dev_cmd_offs);
@@ -2504,22 +2496,18 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
wait_write_ptr = ieee80211_has_morefrags(fc);
/* start timer if queue currently empty */
- if (txq->read_ptr == txq->write_ptr) {
- if (txq->wd_timeout) {
- /*
- * If the TXQ is active, then set the timer, if not,
- * set the timer in remainder so that the timer will
- * be armed with the right value when the station will
- * wake up.
- */
- if (!txq->frozen)
- mod_timer(&txq->stuck_timer,
- jiffies + txq->wd_timeout);
- else
- txq->frozen_expiry_remainder = txq->wd_timeout;
- }
- IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", txq->id);
- iwl_trans_ref(trans);
+ if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) {
+ /*
+ * If the TXQ is active, then set the timer, if not,
+ * set the timer in remainder so that the timer will
+ * be armed with the right value when the station will
+ * wake up.
+ */
+ if (!txq->frozen)
+ mod_timer(&txq->stuck_timer,
+ jiffies + txq->wd_timeout);
+ else
+ txq->frozen_expiry_remainder = txq->wd_timeout;
}
/* Tell device the write index *just past* this latest filled TFD */
OpenPOWER on IntegriCloud