diff options
Diffstat (limited to 'drivers/net/wireless/intel')
48 files changed, 2234 insertions, 1040 deletions
diff --git a/drivers/net/wireless/intel/iwlegacy/3945.c b/drivers/net/wireless/intel/iwlegacy/3945.c index 209dc9988455..4db327a95414 100644 --- a/drivers/net/wireless/intel/iwlegacy/3945.c +++ b/drivers/net/wireless/intel/iwlegacy/3945.c @@ -2671,7 +2671,7 @@ const struct il_ops il3945_ops = { .send_led_cmd = il3945_send_led_cmd, }; -static struct il_cfg il3945_bg_cfg = { +static const struct il_cfg il3945_bg_cfg = { .name = "3945BG", .fw_name_pre = IL3945_FW_PRE, .ucode_api_max = IL3945_UCODE_API_MAX, @@ -2700,7 +2700,7 @@ static struct il_cfg il3945_bg_cfg = { }, }; -static struct il_cfg il3945_abg_cfg = { +static const struct il_cfg il3945_abg_cfg = { .name = "3945ABG", .fw_name_pre = IL3945_FW_PRE, .ucode_api_max = IL3945_UCODE_API_MAX, diff --git a/drivers/net/wireless/intel/iwlegacy/common.h b/drivers/net/wireless/intel/iwlegacy/common.h index 726ede391cb9..3bba521d2cd9 100644 --- a/drivers/net/wireless/intel/iwlegacy/common.h +++ b/drivers/net/wireless/intel/iwlegacy/common.h @@ -1320,7 +1320,7 @@ struct il_priv { u64 timestamp; union { -#if defined(CONFIG_IWL3945) || defined(CONFIG_IWL3945_MODULE) +#if IS_ENABLED(CONFIG_IWL3945) struct { void *shared_virt; dma_addr_t shared_phys; @@ -1351,7 +1351,7 @@ struct il_priv { } _3945; #endif -#if defined(CONFIG_IWL4965) || defined(CONFIG_IWL4965_MODULE) +#if IS_ENABLED(CONFIG_IWL4965) struct { struct il_rx_phy_res last_phy_res; bool last_phy_res_valid; diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c b/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c index b662cf35b033..c7509c51e9d9 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c @@ -46,15 +46,6 @@ * ******************************************************************************/ -static inline const struct fw_img * -iwl_get_ucode_image(struct iwl_priv *priv, enum iwl_ucode_type ucode_type) -{ - if (ucode_type >= IWL_UCODE_TYPE_MAX) - return NULL; - - return &priv->fw->img[ucode_type]; -} - /* * Calibration */ @@ -330,7 +321,7 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv, enum iwl_ucode_type old_type; static const u16 alive_cmd[] = { REPLY_ALIVE }; - fw = iwl_get_ucode_image(priv, ucode_type); + fw = iwl_get_ucode_image(priv->fw, ucode_type); if (WARN_ON(!fw)) return -EINVAL; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c index 64690c14ff4d..d4b73dedf89b 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c @@ -73,13 +73,13 @@ /* Highest firmware API version supported */ #define IWL7260_UCODE_API_MAX 17 #define IWL7265_UCODE_API_MAX 17 -#define IWL7265D_UCODE_API_MAX 24 -#define IWL3168_UCODE_API_MAX 24 +#define IWL7265D_UCODE_API_MAX 26 +#define IWL3168_UCODE_API_MAX 26 /* Lowest firmware API version supported */ -#define IWL7260_UCODE_API_MIN 16 -#define IWL7265_UCODE_API_MIN 16 -#define IWL7265D_UCODE_API_MIN 16 +#define IWL7260_UCODE_API_MIN 17 +#define IWL7265_UCODE_API_MIN 17 +#define IWL7265D_UCODE_API_MIN 17 #define IWL3168_UCODE_API_MIN 20 /* NVM versions */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c index 6c6725e808d4..d02ca1491d16 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c @@ -70,11 +70,11 @@ #include "iwl-agn-hw.h" /* Highest firmware API version supported */ -#define IWL8000_UCODE_API_MAX 24 -#define IWL8265_UCODE_API_MAX 24 +#define IWL8000_UCODE_API_MAX 26 +#define IWL8265_UCODE_API_MAX 26 /* Lowest firmware API version supported */ -#define IWL8000_UCODE_API_MIN 16 +#define IWL8000_UCODE_API_MIN 17 #define IWL8265_UCODE_API_MIN 20 /* NVM versions */ @@ -212,6 +212,17 @@ const struct iwl_cfg iwl8265_2ac_cfg = { .vht_mu_mimo_supported = true, }; +const struct iwl_cfg iwl8275_2ac_cfg = { + .name = "Intel(R) Dual Band Wireless AC 8275", + .fw_name_pre = IWL8265_FW_PRE, + IWL_DEVICE_8265, + .ht_params = &iwl8000_ht_params, + .nvm_ver = IWL8000_NVM_VERSION, + .nvm_calib_ver = IWL8000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + .vht_mu_mimo_supported = true, +}; + const struct iwl_cfg iwl4165_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 4165", .fw_name_pre = IWL8000_FW_PRE, diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c index fbaf705f3fa7..ff850410d897 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c @@ -55,10 +55,10 @@ #include "iwl-agn-hw.h" /* Highest firmware API version supported */ -#define IWL9000_UCODE_API_MAX 24 +#define IWL9000_UCODE_API_MAX 26 /* Lowest firmware API version supported */ -#define IWL9000_UCODE_API_MIN 16 +#define IWL9000_UCODE_API_MIN 17 /* NVM versions */ #define IWL9000_NVM_VERSION 0x0a1d @@ -72,15 +72,15 @@ #define IWL9000_SMEM_OFFSET 0x400000 #define IWL9000_SMEM_LEN 0x68000 -#define IWL9000_FW_PRE "iwlwifi-9000-pu-a0-lc-a0-" +#define IWL9000_FW_PRE "iwlwifi-9000-pu-a0-jf-a0-" #define IWL9260_FW_PRE "iwlwifi-9260-th-a0-jf-a0-" -#define IWL9260LC_FW_PRE "iwlwifi-9260-th-a0-lc-a0-" +#define IWL9000LC_FW_PRE "iwlwifi-9000-pu-a0-lc-a0-" #define IWL9000_MODULE_FIRMWARE(api) \ IWL9000_FW_PRE "-" __stringify(api) ".ucode" #define IWL9260_MODULE_FIRMWARE(api) \ IWL9260_FW_PRE "-" __stringify(api) ".ucode" -#define IWL9260LC_MODULE_FIRMWARE(api) \ - IWL9260LC_FW_PRE "-" __stringify(api) ".ucode" +#define IWL9000LC_MODULE_FIRMWARE(api) \ + IWL9000LC_FW_PRE "-" __stringify(api) ".ucode" #define NVM_HW_SECTION_NUM_FAMILY_9000 10 @@ -146,41 +146,73 @@ static const struct iwl_tt_params iwl9000_tt_params = { .mac_addr_from_csr = true, \ .rf_id = true +const struct iwl_cfg iwl9160_2ac_cfg = { + .name = "Intel(R) Dual Band Wireless AC 9160", + .fw_name_pre = IWL9260_FW_PRE, + IWL_DEVICE_9000, + .ht_params = &iwl9000_ht_params, + .nvm_ver = IWL9000_NVM_VERSION, + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, +}; + const struct iwl_cfg iwl9260_2ac_cfg = { - .name = "Intel(R) Dual Band Wireless AC 9260", - .fw_name_pre = IWL9260_FW_PRE, - IWL_DEVICE_9000, - .ht_params = &iwl9000_ht_params, - .nvm_ver = IWL9000_NVM_VERSION, - .nvm_calib_ver = IWL9000_TX_POWER_VERSION, - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + .name = "Intel(R) Dual Band Wireless AC 9260", + .fw_name_pre = IWL9260_FW_PRE, + IWL_DEVICE_9000, + .ht_params = &iwl9000_ht_params, + .nvm_ver = IWL9000_NVM_VERSION, + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, +}; + +const struct iwl_cfg iwl9270_2ac_cfg = { + .name = "Intel(R) Dual Band Wireless AC 9270", + .fw_name_pre = IWL9260_FW_PRE, + IWL_DEVICE_9000, + .ht_params = &iwl9000_ht_params, + .nvm_ver = IWL9000_NVM_VERSION, + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, +}; + +const struct iwl_cfg iwl9460_2ac_cfg = { + .name = "Intel(R) Dual Band Wireless AC 9460", + .fw_name_pre = IWL9000_FW_PRE, + IWL_DEVICE_9000, + .ht_params = &iwl9000_ht_params, + .nvm_ver = IWL9000_NVM_VERSION, + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + .integrated = true, +}; + +const struct iwl_cfg iwl9560_2ac_cfg = { + .name = "Intel(R) Dual Band Wireless AC 9560", + .fw_name_pre = IWL9000_FW_PRE, + IWL_DEVICE_9000, + .ht_params = &iwl9000_ht_params, + .nvm_ver = IWL9000_NVM_VERSION, + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + .integrated = true, }; /* * TODO the struct below is for internal testing only this should be * removed by EO 2016~ */ -const struct iwl_cfg iwl9260lc_2ac_cfg = { - .name = "Intel(R) Dual Band Wireless AC 9260", - .fw_name_pre = IWL9260LC_FW_PRE, - IWL_DEVICE_9000, - .ht_params = &iwl9000_ht_params, - .nvm_ver = IWL9000_NVM_VERSION, - .nvm_calib_ver = IWL9000_TX_POWER_VERSION, - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, -}; - -const struct iwl_cfg iwl5165_2ac_cfg = { - .name = "Intel(R) Dual Band Wireless AC 5165", - .fw_name_pre = IWL9000_FW_PRE, - IWL_DEVICE_9000, - .ht_params = &iwl9000_ht_params, - .nvm_ver = IWL9000_NVM_VERSION, - .nvm_calib_ver = IWL9000_TX_POWER_VERSION, - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, - .integrated = true, +const struct iwl_cfg iwl9000lc_2ac_cfg = { + .name = "Intel(R) Dual Band Wireless AC 9000", + .fw_name_pre = IWL9000LC_FW_PRE, + IWL_DEVICE_9000, + .ht_params = &iwl9000_ht_params, + .nvm_ver = IWL9000_NVM_VERSION, + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + .integrated = true, }; MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL9260_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL9260LC_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL9000LC_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-a000.c b/drivers/net/wireless/intel/iwlwifi/iwl-a000.c index 4d78232c8afe..ea1618525878 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-a000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-a000.c @@ -55,7 +55,7 @@ #include "iwl-agn-hw.h" /* Highest firmware API version supported */ -#define IWL_A000_UCODE_API_MAX 24 +#define IWL_A000_UCODE_API_MAX 26 /* Lowest firmware API version supported */ #define IWL_A000_UCODE_API_MIN 24 diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index 423b23320d4f..2660cc4b9f8c 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -359,7 +359,6 @@ struct iwl_cfg { high_temp:1, mac_addr_from_csr:1, lp_xtal_workaround:1, - no_power_up_nic_in_init:1, disable_dummy_notification:1, apmg_not_supported:1, mq_rx_supported:1, @@ -445,13 +444,17 @@ extern const struct iwl_cfg iwl7265d_n_cfg; extern const struct iwl_cfg iwl8260_2n_cfg; extern const struct iwl_cfg iwl8260_2ac_cfg; extern const struct iwl_cfg iwl8265_2ac_cfg; +extern const struct iwl_cfg iwl8275_2ac_cfg; extern const struct iwl_cfg iwl4165_2ac_cfg; extern const struct iwl_cfg iwl8260_2ac_sdio_cfg; extern const struct iwl_cfg iwl8265_2ac_sdio_cfg; extern const struct iwl_cfg iwl4165_2ac_sdio_cfg; +extern const struct iwl_cfg iwl9000lc_2ac_cfg; +extern const struct iwl_cfg iwl9160_2ac_cfg; extern const struct iwl_cfg iwl9260_2ac_cfg; -extern const struct iwl_cfg iwl9260lc_2ac_cfg; -extern const struct iwl_cfg iwl5165_2ac_cfg; +extern const struct iwl_cfg iwl9270_2ac_cfg; +extern const struct iwl_cfg iwl9460_2ac_cfg; +extern const struct iwl_cfg iwl9560_2ac_cfg; extern const struct iwl_cfg iwla000_2ac_cfg; #endif /* CONFIG_IWLMVM */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h index 871ad02fdb17..d73e9d436027 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h @@ -589,6 +589,8 @@ enum dtd_diode_reg { * Causes for the FH register interrupts */ enum msix_fh_int_causes { + MSIX_FH_INT_CAUSES_Q0 = BIT(0), + MSIX_FH_INT_CAUSES_Q1 = BIT(1), MSIX_FH_INT_CAUSES_D2S_CH0_NUM = BIT(16), MSIX_FH_INT_CAUSES_D2S_CH1_NUM = BIT(17), MSIX_FH_INT_CAUSES_S2D = BIT(19), diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c index 1d9dd153ef1c..50510fb6ab8c 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c @@ -33,9 +33,6 @@ #define CREATE_TRACE_POINTS #include "iwl-devtrace.h" -EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_iowrite8); -EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ioread32); -EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_iowrite32); EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_event); EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_error); EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_cont_event); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h index 1d6f5d21a663..33ef5372d195 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h @@ -77,7 +77,6 @@ */ #define FH_MEM_LOWER_BOUND (0x1000) #define FH_MEM_UPPER_BOUND (0x2000) -#define TFH_MEM_LOWER_BOUND (0xA06000) /** * Keep-Warm (KW) buffer base address. @@ -120,7 +119,7 @@ #define FH_MEM_CBBC_20_31_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xB20) #define FH_MEM_CBBC_20_31_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xB80) /* a000 TFD table address, 64 bit */ -#define TFH_TFDQ_CBB_TABLE (TFH_MEM_LOWER_BOUND + 0x1C00) +#define TFH_TFDQ_CBB_TABLE (0x1C00) /* Find TFD CB base pointer for given queue */ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans, @@ -156,7 +155,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans, * In case of DRAM read address which is not aligned to 128B, the TFH will * enable transfer size which doesn't cross 64B DRAM address boundary. */ -#define TFH_TRANSFER_MODE (TFH_MEM_LOWER_BOUND + 0x1F40) +#define TFH_TRANSFER_MODE (0x1F40) #define TFH_TRANSFER_MAX_PENDING_REQ 0xc #define TFH_CHUNK_SIZE_128 BIT(8) #define TFH_CHUNK_SPLIT_MODE BIT(10) @@ -167,7 +166,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans, * the start of the TFD first TB. * In case of a DRAM Tx CMD update the TFH will update PN and Key ID */ -#define TFH_TXCMD_UPDATE_CFG (TFH_MEM_LOWER_BOUND + 0x1F48) +#define TFH_TXCMD_UPDATE_CFG (0x1F48) /* * Controls TX DMA operation * @@ -181,22 +180,22 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans, * set to 1 - interrupt is sent to the driver * Bit 0: Indicates the snoop configuration */ -#define TFH_SRV_DMA_CHNL0_CTRL (TFH_MEM_LOWER_BOUND + 0x1F60) +#define TFH_SRV_DMA_CHNL0_CTRL (0x1F60) #define TFH_SRV_DMA_SNOOP BIT(0) #define TFH_SRV_DMA_TO_DRIVER BIT(24) #define TFH_SRV_DMA_START BIT(31) /* Defines the DMA SRAM write start address to transfer a data block */ -#define TFH_SRV_DMA_CHNL0_SRAM_ADDR (TFH_MEM_LOWER_BOUND + 0x1F64) +#define TFH_SRV_DMA_CHNL0_SRAM_ADDR (0x1F64) /* Defines the 64bits DRAM start address to read the DMA data block from */ -#define TFH_SRV_DMA_CHNL0_DRAM_ADDR (TFH_MEM_LOWER_BOUND + 0x1F68) +#define TFH_SRV_DMA_CHNL0_DRAM_ADDR (0x1F68) /* * Defines the number of bytes to transfer from DRAM to SRAM. * Note that this register may be configured with non-dword aligned size. */ -#define TFH_SRV_DMA_CHNL0_BC (TFH_MEM_LOWER_BOUND + 0x1F70) +#define TFH_SRV_DMA_CHNL0_BC (0x1F70) /** * Rx SRAM Control and Status Registers (RSCSR) @@ -644,6 +643,7 @@ struct iwl_rb_status { #define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP) #define IWL_TX_DMA_MASK DMA_BIT_MASK(36) #define IWL_NUM_OF_TBS 20 +#define IWL_TFH_NUM_TBS 25 static inline u8 iwl_get_dma_hi_addr(dma_addr_t addr) { @@ -665,25 +665,29 @@ struct iwl_tfd_tb { } __packed; /** - * struct iwl_tfd + * struct iwl_tfh_tb transmit buffer descriptor within transmit frame descriptor * - * Transmit Frame Descriptor (TFD) - * - * @ __reserved1[3] reserved - * @ num_tbs 0-4 number of active tbs - * 5 reserved - * 6-7 padding (not used) - * @ tbs[20] transmit frame buffer descriptors - * @ __pad padding + * This structure contains dma address and length of transmission address * + * @tb_len length of the tx buffer + * @addr 64 bits dma address + */ +struct iwl_tfh_tb { + __le16 tb_len; + __le64 addr; +} __packed; + +/** * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM. * Both driver and device share these circular buffers, each of which must be - * contiguous 256 TFDs x 128 bytes-per-TFD = 32 KBytes + * contiguous 256 TFDs. + * For pre a000 HW it is 256 x 128 bytes-per-TFD = 32 KBytes + * For a000 HW and on it is 256 x 256 bytes-per-TFD = 65 KBytes * * Driver must indicate the physical address of the base of each * circular buffer via the FH_MEM_CBBC_QUEUE registers. * - * Each TFD contains pointer/size information for up to 20 data buffers + * Each TFD contains pointer/size information for up to 20 / 25 data buffers * in host DRAM. These buffers collectively contain the (one) frame described * by the TFD. Each buffer must be a single contiguous block of memory within * itself, but buffers may be scattered in host DRAM. Each buffer has max size @@ -692,6 +696,16 @@ struct iwl_tfd_tb { * * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx. */ + +/** + * struct iwl_tfd - Transmit Frame Descriptor (TFD) + * @ __reserved1[3] reserved + * @ num_tbs 0-4 number of active tbs + * 5 reserved + * 6-7 padding (not used) + * @ tbs[20] transmit frame buffer descriptors + * @ __pad padding + */ struct iwl_tfd { u8 __reserved1[3]; u8 num_tbs; @@ -699,6 +713,19 @@ struct iwl_tfd { __le32 __pad; } __packed; +/** + * struct iwl_tfh_tfd - Transmit Frame Descriptor (TFD) + * @ num_tbs 0-4 number of active tbs + * 5 -15 reserved + * @ tbs[25] transmit frame buffer descriptors + * @ __pad padding + */ +struct iwl_tfh_tfd { + __le16 num_tbs; + struct iwl_tfh_tb tbs[IWL_TFH_NUM_TBS]; + __le32 __pad; +} __packed; + /* Keep Warm Size */ #define IWL_KW_SIZE 0x1000 /* 4k */ @@ -707,8 +734,13 @@ struct iwl_tfd { /** * struct iwlagn_schedq_bc_tbl scheduler byte count table * base physical address provided by SCD_DRAM_BASE_ADDR + * For devices up to a000: + * @tfd_offset 0-12 - tx command byte count + * 12-16 - station index + * For a000 and on: * @tfd_offset 0-12 - tx command byte count - * 12-16 - station index + * 12-13 - number of 64 byte chunks + * 14-16 - reserved */ struct iwlagn_scd_bc_tbl { __le16 tfd_offset[TFD_QUEUE_BC_SIZE]; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h index 1b1e045f8907..ceec5ca2b1ab 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h @@ -199,8 +199,6 @@ struct iwl_ucode_capa { * @IWL_UCODE_TLV_FLAGS_NEWSCAN: new uCode scan behavior on hidden SSID, * treats good CRC threshold as a boolean * @IWL_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w). - * @IWL_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P. - * @IWL_UCODE_TLV_FLAGS_DW_BC_TABLE: The SCD byte count table is in DWORDS * @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: This uCode image supports uAPSD * @IWL_UCODE_TLV_FLAGS_SHORT_BL: 16 entries of black list instead of 64 in scan * offload profile config command. @@ -210,36 +208,24 @@ struct iwl_ucode_capa { * from the probe request template. * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL: new NS offload (small version) * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE: new NS offload (large version) - * @IWL_UCODE_TLV_FLAGS_P2P_PM: P2P client supports PM as a stand alone MAC - * @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_DCM: support power save on BSS station and - * P2P client interfaces simultaneously if they are in different bindings. - * @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_SCM: support power save on BSS station and - * P2P client interfaces simultaneously if they are in same bindings. * @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: General support for uAPSD * @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save * @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering. - * @IWL_UCODE_TLV_FLAGS_GO_UAPSD: AP/GO interfaces support uAPSD clients * @IWL_UCODE_TLV_FLAGS_EBS_SUPPORT: this uCode image supports EBS. */ enum iwl_ucode_tlv_flag { IWL_UCODE_TLV_FLAGS_PAN = BIT(0), IWL_UCODE_TLV_FLAGS_NEWSCAN = BIT(1), IWL_UCODE_TLV_FLAGS_MFP = BIT(2), - IWL_UCODE_TLV_FLAGS_P2P = BIT(3), - IWL_UCODE_TLV_FLAGS_DW_BC_TABLE = BIT(4), IWL_UCODE_TLV_FLAGS_SHORT_BL = BIT(7), IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS = BIT(10), IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID = BIT(12), IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL = BIT(15), IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE = BIT(16), - IWL_UCODE_TLV_FLAGS_P2P_PM = BIT(21), - IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM = BIT(22), - IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_SCM = BIT(23), IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT = BIT(24), IWL_UCODE_TLV_FLAGS_EBS_SUPPORT = BIT(25), IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD = BIT(26), IWL_UCODE_TLV_FLAGS_BCAST_FILTERING = BIT(29), - IWL_UCODE_TLV_FLAGS_GO_UAPSD = BIT(30), }; typedef unsigned int __bitwise__ iwl_ucode_tlv_api_t; @@ -249,24 +235,21 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_api_t; * @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time * longer than the passive one, which is essential for fragmented scan. * @IWL_UCODE_TLV_API_WIFI_MCC_UPDATE: ucode supports MCC updates with source. - * @IWL_UCODE_TLV_API_WIDE_CMD_HDR: ucode supports wide command header * @IWL_UCODE_TLV_API_LQ_SS_PARAMS: Configure STBC/BFER via LQ CMD ss_params * @IWL_UCODE_TLV_API_NEW_VERSION: new versioning format - * @IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY: scan APIs use 8-level priority - * instead of 3. - * @IWL_UCODE_TLV_API_TX_POWER_CHAIN: TX power API has larger command size - * (command version 3) that supports per-chain limits + * @IWL_UCODE_TLV_API_SCAN_TSF_REPORT: Scan start time reported in scan + * iteration complete notification, and the timestamp reported for RX + * received during scan, are reported in TSF of the mac specified in the + * scan request. * * @NUM_IWL_UCODE_TLV_API: number of bits used */ enum iwl_ucode_tlv_api { IWL_UCODE_TLV_API_FRAGMENTED_SCAN = (__force iwl_ucode_tlv_api_t)8, IWL_UCODE_TLV_API_WIFI_MCC_UPDATE = (__force iwl_ucode_tlv_api_t)9, - IWL_UCODE_TLV_API_WIDE_CMD_HDR = (__force iwl_ucode_tlv_api_t)14, IWL_UCODE_TLV_API_LQ_SS_PARAMS = (__force iwl_ucode_tlv_api_t)18, - IWL_UCODE_TLV_API_NEW_VERSION = (__force iwl_ucode_tlv_api_t)20, - IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY = (__force iwl_ucode_tlv_api_t)24, - IWL_UCODE_TLV_API_TX_POWER_CHAIN = (__force iwl_ucode_tlv_api_t)27, + IWL_UCODE_TLV_API_NEW_VERSION = (__force iwl_ucode_tlv_api_t)20, + IWL_UCODE_TLV_API_SCAN_TSF_REPORT = (__force iwl_ucode_tlv_api_t)28, NUM_IWL_UCODE_TLV_API #ifdef __CHECKER__ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw.h index 74ea68d1063c..5f229556339a 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fw.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw.h @@ -329,4 +329,13 @@ iwl_fw_dbg_conf_usniffer(const struct iwl_fw *fw, u8 id) return conf_tlv->usniffer; } +static inline const struct fw_img * +iwl_get_ucode_image(const struct iwl_fw *fw, enum iwl_ucode_type ucode_type) +{ + if (ucode_type >= IWL_UCODE_TYPE_MAX) + return NULL; + + return &fw->img[ucode_type]; +} + #endif /* __iwl_fw_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.c b/drivers/net/wireless/intel/iwlwifi/iwl-io.c index 92c8b5f9a9cb..a9f69fdd170b 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-io.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.c @@ -267,7 +267,7 @@ static const char *get_rfh_string(int cmd) IWL_CMD_MQ(cmd, RFH_Q_FRBDCB_WIDX, i); IWL_CMD_MQ(cmd, RFH_Q_FRBDCB_RIDX, i); IWL_CMD_MQ(cmd, RFH_Q_URBD_STTS_WPTR_LSB, i); - }; + } switch (cmd) { IWL_CMD(RFH_RXF_DMA_CFG); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.c b/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.c index 8aa1f2b7fdfc..88f260db3744 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-notif-wait.c @@ -99,8 +99,12 @@ void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait, continue; for (i = 0; i < w->n_cmds; i++) { - if (w->cmds[i] == - WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)) { + u16 rec_id = WIDE_ID(pkt->hdr.group_id, + pkt->hdr.cmd); + + if (w->cmds[i] == rec_id || + (!iwl_cmd_groupid(w->cmds[i]) && + DEF_ID(w->cmds[i]) == rec_id)) { found = true; break; } diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index 43f8f7d45ddb..3bd6fc1b76d4 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -67,6 +67,7 @@ #include <linux/export.h> #include <linux/etherdevice.h> #include <linux/pci.h> +#include <linux/acpi.h> #include "iwl-drv.h" #include "iwl-modparams.h" #include "iwl-nvm-parse.h" @@ -564,11 +565,16 @@ static void iwl_set_hw_address_from_csr(struct iwl_trans *trans, __le32 mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_STRAP)); __le32 mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_STRAP)); - /* If OEM did not fuse address - get it from OTP */ - if (!mac_addr0 && !mac_addr1) { - mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_OTP)); - mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_OTP)); - } + iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr); + /* + * If the OEM fused a valid address, use it instead of the one in the + * OTP + */ + if (is_valid_ether_addr(data->hw_addr)) + return; + + mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_OTP)); + mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_OTP)); iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr); } @@ -899,3 +905,91 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, return regd; } IWL_EXPORT_SYMBOL(iwl_parse_nvm_mcc_info); + +#ifdef CONFIG_ACPI +#define WRDD_METHOD "WRDD" +#define WRDD_WIFI (0x07) +#define WRDD_WIGIG (0x10) + +static u32 iwl_wrdd_get_mcc(struct device *dev, union acpi_object *wrdd) +{ + union acpi_object *mcc_pkg, *domain_type, *mcc_value; + u32 i; + + if (wrdd->type != ACPI_TYPE_PACKAGE || + wrdd->package.count < 2 || + wrdd->package.elements[0].type != ACPI_TYPE_INTEGER || + wrdd->package.elements[0].integer.value != 0) { + IWL_DEBUG_EEPROM(dev, "Unsupported wrdd structure\n"); + return 0; + } + + for (i = 1 ; i < wrdd->package.count ; ++i) { + mcc_pkg = &wrdd->package.elements[i]; + + if (mcc_pkg->type != ACPI_TYPE_PACKAGE || + mcc_pkg->package.count < 2 || + mcc_pkg->package.elements[0].type != ACPI_TYPE_INTEGER || + mcc_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) { + mcc_pkg = NULL; + continue; + } + + domain_type = &mcc_pkg->package.elements[0]; + if (domain_type->integer.value == WRDD_WIFI) + break; + + mcc_pkg = NULL; + } + + if (mcc_pkg) { + mcc_value = &mcc_pkg->package.elements[1]; + return mcc_value->integer.value; + } + + return 0; +} + +int iwl_get_bios_mcc(struct device *dev, char *mcc) +{ + acpi_handle root_handle; + acpi_handle handle; + struct acpi_buffer wrdd = {ACPI_ALLOCATE_BUFFER, NULL}; + acpi_status status; + u32 mcc_val; + + root_handle = ACPI_HANDLE(dev); + if (!root_handle) { + IWL_DEBUG_EEPROM(dev, + "Could not retrieve root port ACPI handle\n"); + return -ENOENT; + } + + /* Get the method's handle */ + status = acpi_get_handle(root_handle, (acpi_string)WRDD_METHOD, + &handle); + if (ACPI_FAILURE(status)) { + IWL_DEBUG_EEPROM(dev, "WRD method not found\n"); + return -ENOENT; + } + + /* Call WRDD with no arguments */ + status = acpi_evaluate_object(handle, NULL, NULL, &wrdd); + if (ACPI_FAILURE(status)) { + IWL_DEBUG_EEPROM(dev, "WRDC invocation failed (0x%x)\n", + status); + return -ENOENT; + } + + mcc_val = iwl_wrdd_get_mcc(dev, wrdd.pointer); + kfree(wrdd.pointer); + if (!mcc_val) + return -ENOENT; + + mcc[0] = (mcc_val >> 8) & 0xff; + mcc[1] = mcc_val & 0xff; + mcc[2] = '\0'; + return 0; +} +IWL_EXPORT_SYMBOL(iwl_get_bios_mcc); +#endif diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h index d704d52aa7ec..7249e5b403f4 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h @@ -5,7 +5,8 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2008 - 2015 Intel Corporation. All rights reserved. + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -93,4 +94,21 @@ struct ieee80211_regdomain * iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, int num_of_ch, __le32 *channels, u16 fw_mcc); +#ifdef CONFIG_ACPI +/** + * iwl_get_bios_mcc - read MCC from BIOS, if available + * + * @dev: the struct device + * @mcc: output buffer (3 bytes) that will get the MCC + * + * This function tries to read the current MCC from ACPI if available. + */ +int iwl_get_bios_mcc(struct device *dev, char *mcc); +#else +static inline int iwl_get_bios_mcc(struct device *dev, char *mcc) +{ + return -ENOENT; +} +#endif + #endif /* __iwl_nvm_parse_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c index 7beba9ae5617..2893826d7d2b 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c @@ -110,7 +110,7 @@ enum iwl_phy_db_section_type { IWL_PHY_DB_MAX }; -#define PHY_DB_CMD 0x6c /* TEMP API - The actual is 0x8c */ +#define PHY_DB_CMD 0x6c /* * phy db - configure operational ucode diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h index 459bf736fd5b..406ef301b8ab 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h @@ -302,22 +302,17 @@ #define OSC_CLK_FORCE_CONTROL (0x8) #define FH_UCODE_LOAD_STATUS (0x1AF0) -#define CSR_UCODE_LOAD_STATUS_ADDR (0x1E70) -enum secure_load_status_reg { - LMPM_CPU_UCODE_LOADING_STARTED = 0x00000001, - LMPM_CPU_HDRS_LOADING_COMPLETED = 0x00000003, - LMPM_CPU_UCODE_LOADING_COMPLETED = 0x00000007, - LMPM_CPU_STATUS_NUM_OF_LAST_COMPLETED = 0x000000F8, - LMPM_CPU_STATUS_NUM_OF_LAST_LOADED_BLOCK = 0x0000FF00, -}; -#define LMPM_SECURE_INSPECTOR_CODE_ADDR (0x1E38) -#define LMPM_SECURE_INSPECTOR_DATA_ADDR (0x1E3C) +/* + * Replacing FH_UCODE_LOAD_STATUS + * This register is writen by driver and is read by uCode during boot flow. + * Note this address is cleared after MAC reset. + */ +#define UREG_UCODE_LOAD_STATUS (0xa05c40) + #define LMPM_SECURE_UCODE_LOAD_CPU1_HDR_ADDR (0x1E78) #define LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR (0x1E7C) -#define LMPM_SECURE_INSPECTOR_CODE_MEM_SPACE (0x400000) -#define LMPM_SECURE_INSPECTOR_DATA_MEM_SPACE (0x402000) #define LMPM_SECURE_CPU1_HDR_MEM_SPACE (0x420000) #define LMPM_SECURE_CPU2_HDR_MEM_SPACE (0x420400) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c index 6069a9ff53fa..d42cab291025 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c @@ -65,6 +65,7 @@ #include "iwl-trans.h" #include "iwl-drv.h" +#include "iwl-fh.h" struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, struct device *dev, @@ -77,7 +78,7 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, static struct lock_class_key __key; #endif - trans = kzalloc(sizeof(*trans) + priv_size, GFP_KERNEL); + trans = devm_kzalloc(dev, sizeof(*trans) + priv_size, GFP_KERNEL); if (!trans) return NULL; @@ -102,18 +103,14 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, SLAB_HWCACHE_ALIGN, NULL); if (!trans->dev_cmd_pool) - goto free; + return NULL; return trans; - free: - kfree(trans); - return NULL; } void iwl_trans_free(struct iwl_trans *trans) { kmem_cache_destroy(trans->dev_cmd_pool); - kfree(trans); } int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) @@ -139,6 +136,9 @@ int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) if (!(cmd->flags & CMD_ASYNC)) lock_map_acquire_read(&trans->sync_cmd_lockdep_map); + if (trans->wide_cmd_header && !iwl_cmd_groupid(cmd->id)) + cmd->id = DEF_ID(cmd->id); + ret = trans->ops->send_cmd(trans, cmd); if (!(cmd->flags & CMD_ASYNC)) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index 5535e2238da3..0296124a7f9c 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -153,6 +153,7 @@ static inline u32 iwl_cmd_id(u8 opcode, u8 groupid, u8 version) /* make u16 wide id out of u8 group and opcode */ #define WIDE_ID(grp, opcode) ((grp << 8) | opcode) +#define DEF_ID(opcode) ((1 << 8) | (opcode)) /* due to the conversion, this group is special; new groups * should be defined in the appropriate fw-api header files @@ -262,8 +263,6 @@ static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt) * (i.e. mark it as non-idle). * @CMD_WANT_ASYNC_CALLBACK: the op_mode's async callback function must be * called after this command completes. Valid only with CMD_ASYNC. - * @CMD_TB_BITMAP_POS: Position of the first bit for the TB bitmap. We need to - * check that we leave enough room for the TBs bitmap which needs 20 bits. */ enum CMD_MODE { CMD_ASYNC = BIT(0), @@ -274,8 +273,6 @@ enum CMD_MODE { CMD_MAKE_TRANS_IDLE = BIT(5), CMD_WAKE_UP_TRANS = BIT(6), CMD_WANT_ASYNC_CALLBACK = BIT(7), - - CMD_TB_BITMAP_POS = 11, }; #define DEF_CMD_PAYLOAD_SIZE 320 @@ -488,7 +485,6 @@ struct iwl_hcmd_arr { * @bc_table_dword: set to true if the BC table expects the byte count to be * in DWORD (as opposed to bytes) * @scd_set_active: should the transport configure the SCD for HCMD queue - * @wide_cmd_header: firmware supports wide host command header * @sw_csum_tx: transport should compute the TCP checksum * @command_groups: array of command groups, each member is an array of the * commands in the group; for debugging only @@ -510,7 +506,6 @@ struct iwl_trans_config { enum iwl_amsdu_size rx_buf_size; bool bc_table_dword; bool scd_set_active; - bool wide_cmd_header; bool sw_csum_tx; const struct iwl_hcmd_arr *command_groups; int command_groups_size; @@ -649,6 +644,8 @@ struct iwl_trans_ops { void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id, bool shared); + dma_addr_t (*get_txq_byte_table)(struct iwl_trans *trans, int txq_id); + int (*wait_tx_queue_empty)(struct iwl_trans *trans, u32 txq_bm); void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs, bool freeze); @@ -772,6 +769,7 @@ enum iwl_plat_pm_mode { * @hw_id_str: a string with info about HW ID. Set during transport allocation. * @pm_support: set to true in start_hw if link pm is supported * @ltr_enabled: set to true if the LTR is enabled + * @wide_cmd_header: true when ucode supports wide command header format * @num_rx_queues: number of RX queues allocated by the transport; * the transport must set this before calling iwl_drv_start() * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only. @@ -823,6 +821,7 @@ struct iwl_trans { const struct iwl_hcmd_arr *command_groups; int command_groups_size; + bool wide_cmd_header; u8 num_rx_queues; @@ -1073,6 +1072,15 @@ static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans, trans->ops->txq_set_shared_mode(trans, queue, shared_mode); } +static inline dma_addr_t iwl_trans_get_txq_byte_table(struct iwl_trans *trans, + int queue) +{ + /* we should never be called if the trans doesn't support it */ + BUG_ON(!trans->ops->get_txq_byte_table); + + return trans->ops->get_txq_byte_table(trans, queue); +} + static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue, int fifo, int sta_id, int tid, int frame_limit, u16 ssn, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c index b23271755daf..2d6f44fbaf62 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c @@ -504,6 +504,28 @@ static inline char *iwl_dbgfs_is_match(char *name, char *buf) return !strncmp(name, buf, len) ? buf + len : NULL; } +static ssize_t iwl_dbgfs_os_device_timediff_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_vif *vif = file->private_data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + u32 curr_gp2; + u64 curr_os; + s64 diff; + char buf[64]; + const size_t bufsz = sizeof(buf); + int pos = 0; + + iwl_mvm_get_sync_time(mvm, &curr_gp2, &curr_os); + do_div(curr_os, NSEC_PER_USEC); + diff = curr_os - curr_gp2; + pos += scnprintf(buf + pos, bufsz - pos, "diff=%lld\n", diff); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + static ssize_t iwl_dbgfs_tof_enable_write(struct ieee80211_vif *vif, char *buf, size_t count, loff_t *ppos) @@ -1530,6 +1552,8 @@ MVM_DEBUGFS_READ_FILE_OPS(tof_range_response); MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_responder_params, 32); MVM_DEBUGFS_READ_WRITE_FILE_OPS(quota_min, 32); MVM_DEBUGFS_WRITE_FILE_OPS(lqm_send_cmd, 64); +MVM_DEBUGFS_READ_FILE_OPS(os_device_timediff); + void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { @@ -1554,8 +1578,7 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM && ((vif->type == NL80211_IFTYPE_STATION && !vif->p2p) || - (vif->type == NL80211_IFTYPE_STATION && vif->p2p && - mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM))) + (vif->type == NL80211_IFTYPE_STATION && vif->p2p))) MVM_DEBUGFS_ADD_FILE_VIF(pm_params, mvmvif->dbgfs_dir, S_IWUSR | S_IRUSR); @@ -1570,6 +1593,8 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) MVM_DEBUGFS_ADD_FILE_VIF(quota_min, mvmvif->dbgfs_dir, S_IRUSR | S_IWUSR); MVM_DEBUGFS_ADD_FILE_VIF(lqm_send_cmd, mvmvif->dbgfs_dir, S_IWUSR); + MVM_DEBUGFS_ADD_FILE_VIF(os_device_timediff, + mvmvif->dbgfs_dir, S_IRUSR); if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p && mvmvif == mvm->bf_allowed_vif) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index b34489817c70..539d718df797 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -917,6 +917,59 @@ static ssize_t iwl_dbgfs_indirection_tbl_write(struct iwl_mvm *mvm, return ret ?: count; } +static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm, + char *buf, size_t count, + loff_t *ppos) +{ + struct iwl_rx_cmd_buffer rxb = { + ._rx_page_order = 0, + .truesize = 0, /* not used */ + ._offset = 0, + }; + struct iwl_rx_packet *pkt; + struct iwl_rx_mpdu_desc *desc; + int bin_len = count / 2; + int ret = -EINVAL; + + /* supporting only 9000 descriptor */ + if (!mvm->trans->cfg->mq_rx_supported) + return -ENOTSUPP; + + rxb._page = alloc_pages(GFP_ATOMIC, 0); + if (!rxb._page) + return -ENOMEM; + pkt = rxb_addr(&rxb); + + ret = hex2bin(page_address(rxb._page), buf, bin_len); + if (ret) + goto out; + + /* avoid invalid memory access */ + if (bin_len < sizeof(*pkt) + sizeof(*desc)) + goto out; + + /* check this is RX packet */ + if (WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd) != + WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)) + goto out; + + /* check the length in metadata matches actual received length */ + desc = (void *)pkt->data; + if (le16_to_cpu(desc->mpdu_len) != + (bin_len - sizeof(*desc) - sizeof(*pkt))) + goto out; + + local_bh_disable(); + iwl_mvm_rx_mpdu_mq(mvm, NULL, &rxb, 0); + local_bh_enable(); + ret = 0; + +out: + iwl_free_rxb(&rxb); + + return ret ?: count; +} + static ssize_t iwl_dbgfs_fw_dbg_conf_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) @@ -1454,6 +1507,7 @@ MVM_DEBUGFS_WRITE_FILE_OPS(cont_recording, 8); MVM_DEBUGFS_WRITE_FILE_OPS(max_amsdu_len, 8); MVM_DEBUGFS_WRITE_FILE_OPS(indirection_tbl, (IWL_RSS_INDIRECTION_TABLE_SIZE * 2)); +MVM_DEBUGFS_WRITE_FILE_OPS(inject_packet, 512); #ifdef CONFIG_IWLWIFI_BCAST_FILTERING MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256); @@ -1464,6 +1518,132 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters_macs, 256); MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram, 8); #endif +static ssize_t iwl_dbgfs_mem_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + struct iwl_dbg_mem_access_cmd cmd = {}; + struct iwl_dbg_mem_access_rsp *rsp; + struct iwl_host_cmd hcmd = { + .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL, + .data = { &cmd, }, + .len = { sizeof(cmd) }, + }; + size_t delta, len; + ssize_t ret; + + hcmd.id = iwl_cmd_id(*ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR, + DEBUG_GROUP, 0); + cmd.op = cpu_to_le32(DEBUG_MEM_OP_READ); + + /* Take care of alignment of both the position and the length */ + delta = *ppos & 0x3; + cmd.addr = cpu_to_le32(*ppos - delta); + cmd.len = cpu_to_le32(min(ALIGN(count + delta, 4) / 4, + (size_t)DEBUG_MEM_MAX_SIZE_DWORDS)); + + mutex_lock(&mvm->mutex); + ret = iwl_mvm_send_cmd(mvm, &hcmd); + mutex_unlock(&mvm->mutex); + + if (ret < 0) + return ret; + + rsp = (void *)hcmd.resp_pkt->data; + if (le32_to_cpu(rsp->status) != DEBUG_MEM_STATUS_SUCCESS) { + ret = -ENXIO; + goto out; + } + + len = min((size_t)le32_to_cpu(rsp->len) << 2, + iwl_rx_packet_payload_len(hcmd.resp_pkt) - sizeof(*rsp)); + len = min(len - delta, count); + if (len < 0) { + ret = -EFAULT; + goto out; + } + + ret = len - copy_to_user(user_buf, (void *)rsp->data + delta, len); + *ppos += ret; + +out: + iwl_free_resp(&hcmd); + return ret; +} + +static ssize_t iwl_dbgfs_mem_write(struct file *file, + const char __user *user_buf, size_t count, + loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + struct iwl_dbg_mem_access_cmd *cmd; + struct iwl_dbg_mem_access_rsp *rsp; + struct iwl_host_cmd hcmd = {}; + size_t cmd_size; + size_t data_size; + u32 op, len; + ssize_t ret; + + hcmd.id = iwl_cmd_id(*ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR, + DEBUG_GROUP, 0); + + if (*ppos & 0x3 || count < 4) { + op = DEBUG_MEM_OP_WRITE_BYTES; + len = min(count, (size_t)(4 - (*ppos & 0x3))); + data_size = len; + } else { + op = DEBUG_MEM_OP_WRITE; + len = min(count >> 2, (size_t)DEBUG_MEM_MAX_SIZE_DWORDS); + data_size = len << 2; + } + + cmd_size = sizeof(*cmd) + ALIGN(data_size, 4); + cmd = kzalloc(cmd_size, GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + cmd->op = cpu_to_le32(op); + cmd->len = cpu_to_le32(len); + cmd->addr = cpu_to_le32(*ppos); + if (copy_from_user((void *)cmd->data, user_buf, data_size)) { + kfree(cmd); + return -EFAULT; + } + + hcmd.flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL, + hcmd.data[0] = (void *)cmd; + hcmd.len[0] = cmd_size; + + mutex_lock(&mvm->mutex); + ret = iwl_mvm_send_cmd(mvm, &hcmd); + mutex_unlock(&mvm->mutex); + + kfree(cmd); + + if (ret < 0) + return ret; + + rsp = (void *)hcmd.resp_pkt->data; + if (rsp->status != DEBUG_MEM_STATUS_SUCCESS) { + ret = -ENXIO; + goto out; + } + + ret = data_size; + *ppos += ret; + +out: + iwl_free_resp(&hcmd); + return ret; +} + +static const struct file_operations iwl_dbgfs_mem_ops = { + .read = iwl_dbgfs_mem_read, + .write = iwl_dbgfs_mem_write, + .open = simple_open, + .llseek = default_llseek, +}; + int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) { struct dentry *bcast_dir __maybe_unused; @@ -1502,6 +1682,7 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) MVM_DEBUGFS_ADD_FILE(send_echo_cmd, mvm->debugfs_dir, S_IWUSR); MVM_DEBUGFS_ADD_FILE(cont_recording, mvm->debugfs_dir, S_IWUSR); MVM_DEBUGFS_ADD_FILE(indirection_tbl, mvm->debugfs_dir, S_IWUSR); + MVM_DEBUGFS_ADD_FILE(inject_packet, mvm->debugfs_dir, S_IWUSR); if (!debugfs_create_bool("enable_scan_iteration_notif", S_IRUSR | S_IWUSR, mvm->debugfs_dir, @@ -1560,6 +1741,9 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) mvm->debugfs_dir, &mvm->nvm_phy_sku_blob)) goto err; + debugfs_create_file("mem", S_IRUSR | S_IWUSR, dbgfs_dir, mvm, + &iwl_dbgfs_mem_ops); + /* * Create a symlink with mac80211. It will be removed when mac80211 * exists (before the opmode exists which removes the target.) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h index 404b0de9e2dc..3fa43d1348a2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h @@ -313,35 +313,26 @@ enum iwl_dev_tx_power_cmd_mode { IWL_TX_POWER_MODE_SET_ACK = 3, }; /* TX_POWER_REDUCED_FLAGS_TYPE_API_E_VER_4 */; +#define IWL_NUM_CHAIN_LIMITS 2 +#define IWL_NUM_SUB_BANDS 5 + /** - * struct iwl_dev_tx_power_cmd_v2 - TX power reduction command + * struct iwl_dev_tx_power_cmd - TX power reduction command * @set_mode: see &enum iwl_dev_tx_power_cmd_mode * @mac_context_id: id of the mac ctx for which we are reducing TX power. * @pwr_restriction: TX power restriction in 1/8 dBms. * @dev_24: device TX power restriction in 1/8 dBms * @dev_52_low: device TX power restriction upper band - low * @dev_52_high: device TX power restriction upper band - high + * @per_chain_restriction: per chain restrictions */ -struct iwl_dev_tx_power_cmd_v2 { +struct iwl_dev_tx_power_cmd_v3 { __le32 set_mode; __le32 mac_context_id; __le16 pwr_restriction; __le16 dev_24; __le16 dev_52_low; __le16 dev_52_high; -} __packed; /* TX_REDUCED_POWER_API_S_VER_2 */ - -#define IWL_NUM_CHAIN_LIMITS 2 -#define IWL_NUM_SUB_BANDS 5 - -/** - * struct iwl_dev_tx_power_cmd - TX power reduction command - * @v2: version 2 of the command, embedded here for easier software handling - * @per_chain_restriction: per chain restrictions - */ -struct iwl_dev_tx_power_cmd_v3 { - /* v3 is just an extension of v2 - keep this here */ - struct iwl_dev_tx_power_cmd_v2 v2; __le16 per_chain_restriction[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS]; } __packed; /* TX_REDUCED_POWER_API_S_VER_3 */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h index f01dab0d0dac..0c294c9f98e9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h @@ -7,6 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -603,6 +604,8 @@ struct iwl_scan_req_umac_tail { * @uid: scan id, &enum iwl_umac_scan_uid_offsets * @ooc_priority: out of channel priority - &enum iwl_scan_priority * @general_flags: &enum iwl_umac_scan_general_flags + * @reserved2: for future use and alignment + * @scan_start_mac_id: report the scan start TSF time according to this mac TSF * @extended_dwell: dwell time for channels 1, 6 and 11 * @active_dwell: dwell time for active scan * @passive_dwell: dwell time for passive scan @@ -620,8 +623,10 @@ struct iwl_scan_req_umac { __le32 flags; __le32 uid; __le32 ooc_priority; - /* SCAN_GENERAL_PARAMS_API_S_VER_1 */ - __le32 general_flags; + /* SCAN_GENERAL_PARAMS_API_S_VER_4 */ + __le16 general_flags; + u8 reserved2; + u8 scan_start_mac_id; u8 extended_dwell; u8 active_dwell; u8 passive_dwell; @@ -629,7 +634,7 @@ struct iwl_scan_req_umac { __le32 max_out_time; __le32 suspend_time; __le32 scan_priority; - /* SCAN_CHANNEL_PARAMS_API_S_VER_1 */ + /* SCAN_CHANNEL_PARAMS_API_S_VER_4 */ u8 channel_flags; u8 n_channels; __le16 reserved; @@ -718,8 +723,8 @@ struct iwl_scan_offload_profiles_query { * @status: one of SCAN_COMP_STATUS_* * @bt_status: BT on/off status * @last_channel: last channel that was scanned - * @tsf_low: TSF timer (lower half) in usecs - * @tsf_high: TSF timer (higher half) in usecs + * @start_tsf: TSF timer in usecs of the scan start time for the mac specified + * in &struct iwl_scan_req_umac. * @results: array of scan results, only "scanned_channels" of them are valid */ struct iwl_umac_scan_iter_complete_notif { @@ -728,9 +733,8 @@ struct iwl_umac_scan_iter_complete_notif { u8 status; u8 bt_status; u8 last_channel; - __le32 tsf_low; - __le32 tsf_high; + __le64 start_tsf; struct iwl_scan_results_notif results[]; -} __packed; /* SCAN_ITER_COMPLETE_NTF_UMAC_API_S_VER_1 */ +} __packed; /* SCAN_ITER_COMPLETE_NTF_UMAC_API_S_VER_2 */ #endif diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h index d1c4fb849111..6c8e3ca79323 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h @@ -433,25 +433,42 @@ struct iwl_mvm_rm_sta_cmd { } __packed; /* REMOVE_STA_CMD_API_S_VER_2 */ /** + * struct iwl_mvm_mgmt_mcast_key_cmd_v1 + * ( MGMT_MCAST_KEY = 0x1f ) + * @ctrl_flags: %iwl_sta_key_flag + * @igtk: + * @k1: unused + * @k2: unused + * @sta_id: station ID that support IGTK + * @key_id: + * @receive_seq_cnt: initial RSC/PN needed for replay check + */ +struct iwl_mvm_mgmt_mcast_key_cmd_v1 { + __le32 ctrl_flags; + u8 igtk[16]; + u8 k1[16]; + u8 k2[16]; + __le32 key_id; + __le32 sta_id; + __le64 receive_seq_cnt; +} __packed; /* SEC_MGMT_MULTICAST_KEY_CMD_API_S_VER_1 */ + +/** * struct iwl_mvm_mgmt_mcast_key_cmd * ( MGMT_MCAST_KEY = 0x1f ) * @ctrl_flags: %iwl_sta_key_flag - * @IGTK: - * @K1: unused - * @K2: unused + * @igtk: IGTK master key * @sta_id: station ID that support IGTK * @key_id: * @receive_seq_cnt: initial RSC/PN needed for replay check */ struct iwl_mvm_mgmt_mcast_key_cmd { __le32 ctrl_flags; - u8 IGTK[16]; - u8 K1[16]; - u8 K2[16]; + u8 igtk[32]; __le32 key_id; __le32 sta_id; __le64 receive_seq_cnt; -} __packed; /* SEC_MGMT_MULTICAST_KEY_CMD_API_S_VER_1 */ +} __packed; /* SEC_MGMT_MULTICAST_KEY_CMD_API_S_VER_2 */ struct iwl_mvm_wep_key { u8 key_index; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h index 4144623e1616..59ca97a11b2b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h @@ -89,7 +89,6 @@ * @TX_CMD_FLG_MH_PAD: driver inserted 2 byte padding after MAC header. * Should be set for 26/30 length MAC headers * @TX_CMD_FLG_RESP_TO_DRV: zero this if the response should go only to FW - * @TX_CMD_FLG_CCMP_AGG: this frame uses CCMP for aggregation acceleration * @TX_CMD_FLG_TKIP_MIC_DONE: FW already performed TKIP MIC calculation * @TX_CMD_FLG_DUR: disable duration overwriting used in PS-Poll Assoc-id * @TX_CMD_FLG_FW_DROP: FW should mark frame to be dropped @@ -116,7 +115,6 @@ enum iwl_tx_flags { TX_CMD_FLG_KEEP_SEQ_CTL = BIT(18), TX_CMD_FLG_MH_PAD = BIT(20), TX_CMD_FLG_RESP_TO_DRV = BIT(21), - TX_CMD_FLG_CCMP_AGG = BIT(22), TX_CMD_FLG_TKIP_MIC_DONE = BIT(23), TX_CMD_FLG_DUR = BIT(25), TX_CMD_FLG_FW_DROP = BIT(26), @@ -149,7 +147,7 @@ enum iwl_tx_pm_timeouts { * @TX_CMD_SEC_EXT: extended cipher algorithm. * @TX_CMD_SEC_GCMP: GCMP encryption algorithm. * @TX_CMD_SEC_KEY128: set for 104 bits WEP key. - * @TC_CMD_SEC_KEY_FROM_TABLE: for a non-WEP key, set if the key should be taken + * @TX_CMD_SEC_KEY_FROM_TABLE: for a non-WEP key, set if the key should be taken * from the table instead of from the TX command. * If the key is taken from the key table its index should be given by the * first byte of the TX command key field. @@ -161,7 +159,7 @@ enum iwl_tx_cmd_sec_ctrl { TX_CMD_SEC_EXT = 0x04, TX_CMD_SEC_GCMP = 0x05, TX_CMD_SEC_KEY128 = 0x08, - TC_CMD_SEC_KEY_FROM_TABLE = 0x08, + TX_CMD_SEC_KEY_FROM_TABLE = 0x08, }; /* TODO: how does these values are OK with only 16 bit variable??? */ @@ -578,6 +576,85 @@ struct iwl_mvm_ba_notif { } __packed; /** + * struct iwl_mvm_compressed_ba_tfd - progress of a TFD queue + * @q_num: TFD queue number + * @tfd_index: Index of first un-acked frame in the TFD queue + */ +struct iwl_mvm_compressed_ba_tfd { + u8 q_num; + u8 reserved; + __le16 tfd_index; +} __packed; /* COMPRESSED_BA_TFD_API_S_VER_1 */ + +/** + * struct iwl_mvm_compressed_ba_ratid - progress of a RA TID queue + * @q_num: RA TID queue number + * @tid: TID of the queue + * @ssn: BA window current SSN + */ +struct iwl_mvm_compressed_ba_ratid { + u8 q_num; + u8 tid; + __le16 ssn; +} __packed; /* COMPRESSED_BA_RATID_API_S_VER_1 */ + +/* + * enum iwl_mvm_ba_resp_flags - TX aggregation status + * @IWL_MVM_BA_RESP_TX_AGG: generated due to BA + * @IWL_MVM_BA_RESP_TX_BAR: generated due to BA after BAR + * @IWL_MVM_BA_RESP_TX_AGG_FAIL: aggregation didn't receive BA + * @IWL_MVM_BA_RESP_TX_UNDERRUN: aggregation got underrun + * @IWL_MVM_BA_RESP_TX_BT_KILL: aggregation got BT-kill + * @IWL_MVM_BA_RESP_TX_DSP_TIMEOUT: aggregation didn't finish within the + * expected time + */ +enum iwl_mvm_ba_resp_flags { + IWL_MVM_BA_RESP_TX_AGG, + IWL_MVM_BA_RESP_TX_BAR, + IWL_MVM_BA_RESP_TX_AGG_FAIL, + IWL_MVM_BA_RESP_TX_UNDERRUN, + IWL_MVM_BA_RESP_TX_BT_KILL, + IWL_MVM_BA_RESP_TX_DSP_TIMEOUT +}; + +/** + * struct iwl_mvm_compressed_ba_notif - notifies about reception of BA + * ( BA_NOTIF = 0xc5 ) + * @flags: status flag, see the &iwl_mvm_ba_resp_flags + * @sta_id: Index of recipient (BA-sending) station in fw's station table + * @reduced_txp: power reduced according to TPC. This is the actual value and + * not a copy from the LQ command. Thus, if not the first rate was used + * for Tx-ing then this value will be set to 0 by FW. + * @initial_rate: TLC rate info, initial rate index, TLC table color + * @retry_cnt: retry count + * @query_byte_cnt: SCD query byte count + * @query_frame_cnt: SCD query frame count + * @txed: number of frames sent in the aggregation (all-TIDs) + * @done: number of frames that were Acked by the BA (all-TIDs) + * @wireless_time: Wireless-media time + * @tx_rate: the rate the aggregation was sent at + * @tfd_cnt: number of TFD-Q elements + * @ra_tid_cnt: number of RATID-Q elements + */ +struct iwl_mvm_compressed_ba_notif { + __le32 flags; + u8 sta_id; + u8 reduced_txp; + u8 initial_rate; + u8 retry_cnt; + __le32 query_byte_cnt; + __le16 query_frame_cnt; + __le16 txed; + __le16 done; + __le32 wireless_time; + __le32 tx_rate; + __le16 tfd_cnt; + __le16 ra_tid_cnt; + struct iwl_mvm_compressed_ba_tfd tfd[1]; + struct iwl_mvm_compressed_ba_ratid ra_tid[0]; +} __packed; /* COMPRESSED_BA_RES_API_S_VER_4 */ + +/** * struct iwl_mac_beacon_cmd_v6 - beacon template command * @tx: the tx commands associated with the beacon frame * @template_id: currently equal to the mac context id of the coresponding @@ -675,13 +752,21 @@ static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm_tx_resp *tx_resp) tx_resp->frame_count) & 0xfff; } +/* Available options for the SCD_QUEUE_CFG HCMD */ +enum iwl_scd_cfg_actions { + SCD_CFG_DISABLE_QUEUE = 0x0, + SCD_CFG_ENABLE_QUEUE = 0x1, + SCD_CFG_UPDATE_QUEUE_TID = 0x2, +}; + /** * struct iwl_scd_txq_cfg_cmd - New txq hw scheduler config command * @token: * @sta_id: station id * @tid: * @scd_queue: scheduler queue to confiug - * @enable: 1 queue enable, 0 queue disable + * @action: 1 queue enable, 0 queue disable, 2 change txq's tid owner + * Value is one of %iwl_scd_cfg_actions options * @aggregate: 1 aggregated queue, 0 otherwise * @tx_fifo: %enum iwl_mvm_tx_fifo * @window: BA window size @@ -692,7 +777,7 @@ struct iwl_scd_txq_cfg_cmd { u8 sta_id; u8 tid; u8 scd_queue; - u8 enable; + u8 action; u8 aggregate; u8 tx_fifo; u8 window; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h index 71076f02796e..97633690f3d5 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h @@ -205,7 +205,7 @@ enum { /* Phy */ PHY_CONFIGURATION_CMD = 0x6a, CALIB_RES_NOTIF_PHY_DB = 0x6b, - /* PHY_DB_CMD = 0x6c, */ + PHY_DB_CMD = 0x6c, /* ToF - 802.11mc FTM */ TOF_CMD = 0x10, @@ -340,6 +340,11 @@ enum iwl_prot_offload_subcmd_ids { STORED_BEACON_NTF = 0xFF, }; +enum iwl_fmac_debug_cmds { + LMAC_RD_WR = 0x0, + UMAC_RD_WR = 0x1, +}; + /* command groups */ enum { LEGACY_GROUP = 0x0, @@ -349,6 +354,7 @@ enum { PHY_OPS_GROUP = 0x4, DATA_PATH_GROUP = 0x5, PROT_OFFLOAD_GROUP = 0xb, + DEBUG_GROUP = 0xf, }; /** @@ -482,13 +488,17 @@ struct iwl_nvm_access_cmd { * @block_size: the block size in powers of 2 * @block_num: number of blocks specified in the command. * @device_phy_addr: virtual addresses from device side + * 32 bit address for API version 1, 64 bit address for API version 2. */ struct iwl_fw_paging_cmd { __le32 flags; __le32 block_size; __le32 block_num; - __le32 device_phy_addr[NUM_OF_FW_PAGING_BLOCKS]; -} __packed; /* FW_PAGING_BLOCK_CMD_API_S_VER_1 */ + union { + __le32 addr32[NUM_OF_FW_PAGING_BLOCKS]; + __le64 addr64[NUM_OF_FW_PAGING_BLOCKS]; + } device_phy_addr; +} __packed; /* FW_PAGING_BLOCK_CMD_API_S_VER_2 */ /* * Fw items ID's @@ -1973,8 +1983,9 @@ struct iwl_tdls_config_res { struct iwl_tdls_config_sta_info_res sta_info[IWL_MVM_TDLS_STA_COUNT]; } __packed; /* TDLS_CONFIG_RSP_API_S_VER_1 */ -#define TX_FIFO_MAX_NUM 8 -#define RX_FIFO_MAX_NUM 2 +#define TX_FIFO_MAX_NUM_9000 8 +#define TX_FIFO_MAX_NUM 15 +#define RX_FIFO_MAX_NUM 2 #define TX_FIFO_INTERNAL_MAX_NUM 6 /** @@ -2000,6 +2011,21 @@ struct iwl_tdls_config_res { * NOTE: on firmware that don't have IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG * set, the last 3 members don't exist. */ +struct iwl_shared_mem_cfg_v1 { + __le32 shared_mem_addr; + __le32 shared_mem_size; + __le32 sample_buff_addr; + __le32 sample_buff_size; + __le32 txfifo_addr; + __le32 txfifo_size[TX_FIFO_MAX_NUM_9000]; + __le32 rxfifo_size[RX_FIFO_MAX_NUM]; + __le32 page_buff_addr; + __le32 page_buff_size; + __le32 rxfifo_addr; + __le32 internal_txfifo_addr; + __le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM]; +} __packed; /* SHARED_MEM_ALLOC_API_S_VER_2 */ + struct iwl_shared_mem_cfg { __le32 shared_mem_addr; __le32 shared_mem_size; @@ -2013,7 +2039,7 @@ struct iwl_shared_mem_cfg { __le32 rxfifo_addr; __le32 internal_txfifo_addr; __le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM]; -} __packed; /* SHARED_MEM_ALLOC_API_S_VER_2 */ +} __packed; /* SHARED_MEM_ALLOC_API_S_VER_3 */ /** * VHT MU-MIMO group configuration @@ -2129,4 +2155,48 @@ struct iwl_channel_switch_noa_notif { __le32 id_and_color; } __packed; /* CHANNEL_SWITCH_START_NTFY_API_S_VER_1 */ +/* Operation types for the debug mem access */ +enum { + DEBUG_MEM_OP_READ = 0, + DEBUG_MEM_OP_WRITE = 1, + DEBUG_MEM_OP_WRITE_BYTES = 2, +}; + +#define DEBUG_MEM_MAX_SIZE_DWORDS 32 + +/** + * struct iwl_dbg_mem_access_cmd - Request the device to read/write memory + * @op: DEBUG_MEM_OP_* + * @addr: address to read/write from/to + * @len: in dwords, to read/write + * @data: for write opeations, contains the source buffer + */ +struct iwl_dbg_mem_access_cmd { + __le32 op; + __le32 addr; + __le32 len; + __le32 data[]; +} __packed; /* DEBUG_(U|L)MAC_RD_WR_CMD_API_S_VER_1 */ + +/* Status responses for the debug mem access */ +enum { + DEBUG_MEM_STATUS_SUCCESS = 0x0, + DEBUG_MEM_STATUS_FAILED = 0x1, + DEBUG_MEM_STATUS_LOCKED = 0x2, + DEBUG_MEM_STATUS_HIDDEN = 0x3, + DEBUG_MEM_STATUS_LENGTH = 0x4, +}; + +/** + * struct iwl_dbg_mem_access_rsp - Response to debug mem commands + * @status: DEBUG_MEM_STATUS_* + * @len: read dwords (0 for write operations) + * @data: contains the read DWs + */ +struct iwl_dbg_mem_access_rsp { + __le32 status; + __le32 len; + __le32 data[]; +} __packed; /* DEBUG_(U|L)MAC_RD_WR_RSP_API_S_VER_1 */ + #endif /* __fw_api_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c index 46b52bf705fb..d89d0a1fd34e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c @@ -440,14 +440,12 @@ static const struct iwl_prph_range iwl_prph_dump_addr_comm[] = { { .start = 0x00a04560, .end = 0x00a0457c }, { .start = 0x00a04590, .end = 0x00a04598 }, { .start = 0x00a045c0, .end = 0x00a045f4 }, - { .start = 0x00a44000, .end = 0x00a7bf80 }, }; static const struct iwl_prph_range iwl_prph_dump_addr_9000[] = { { .start = 0x00a05c00, .end = 0x00a05c18 }, { .start = 0x00a05400, .end = 0x00a056e8 }, { .start = 0x00a08000, .end = 0x00a098bc }, - { .start = 0x00adfc00, .end = 0x00adfd1c }, { .start = 0x00a02400, .end = 0x00a02758 }, }; @@ -559,7 +557,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) sizeof(struct iwl_fw_error_dump_fifo); } - for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++) { + for (i = 0; i < mem_cfg->num_txfifo_entries; i++) { if (!mem_cfg->txfifo_size[i]) continue; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 7e0cdbf8bf74..872066317fa5 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -90,15 +90,6 @@ struct iwl_mvm_alive_data { u32 scd_base_addr; }; -static inline const struct fw_img * -iwl_get_ucode_image(struct iwl_mvm *mvm, enum iwl_ucode_type ucode_type) -{ - if (ucode_type >= IWL_UCODE_TYPE_MAX) - return NULL; - - return &mvm->fw->img[ucode_type]; -} - static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant) { struct iwl_tx_ant_cfg_cmd tx_ant_cmd = { @@ -385,9 +376,7 @@ static int iwl_save_fw_paging(struct iwl_mvm *mvm, /* send paging cmd to FW in case CPU2 has paging image */ static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw) { - int blk_idx; - __le32 dev_phy_addr; - struct iwl_fw_paging_cmd fw_paging_cmd = { + struct iwl_fw_paging_cmd paging_cmd = { .flags = cpu_to_le32(PAGING_CMD_IS_SECURED | PAGING_CMD_IS_ENABLED | @@ -396,18 +385,32 @@ static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw) .block_size = cpu_to_le32(BLOCK_2_EXP_SIZE), .block_num = cpu_to_le32(mvm->num_of_paging_blk), }; + int blk_idx, size = sizeof(paging_cmd); + + /* A bit hard coded - but this is the old API and will be deprecated */ + if (!iwl_mvm_has_new_tx_api(mvm)) + size -= NUM_OF_FW_PAGING_BLOCKS * 4; /* loop for for all paging blocks + CSS block */ for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) { - dev_phy_addr = - cpu_to_le32(mvm->fw_paging_db[blk_idx].fw_paging_phys >> - PAGE_2_EXP_SIZE); - fw_paging_cmd.device_phy_addr[blk_idx] = dev_phy_addr; + dma_addr_t addr = mvm->fw_paging_db[blk_idx].fw_paging_phys; + + addr = addr >> PAGE_2_EXP_SIZE; + + if (iwl_mvm_has_new_tx_api(mvm)) { + __le64 phy_addr = cpu_to_le64(addr); + + paging_cmd.device_phy_addr.addr64[blk_idx] = phy_addr; + } else { + __le32 phy_addr = cpu_to_le32(addr); + + paging_cmd.device_phy_addr.addr32[blk_idx] = phy_addr; + } } return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(FW_PAGING_BLOCK_CMD, IWL_ALWAYS_LONG_GROUP, 0), - 0, sizeof(fw_paging_cmd), &fw_paging_cmd); + 0, size, &paging_cmd); } /* @@ -580,9 +583,9 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) && !(fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED))) - fw = iwl_get_ucode_image(mvm, IWL_UCODE_REGULAR_USNIFFER); + fw = iwl_get_ucode_image(mvm->fw, IWL_UCODE_REGULAR_USNIFFER); else - fw = iwl_get_ucode_image(mvm, ucode_type); + fw = iwl_get_ucode_image(mvm->fw, ucode_type); if (WARN_ON(!fw)) return -EINVAL; mvm->cur_ucode = ucode_type; @@ -826,59 +829,48 @@ out: return ret; } -static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm) +static void iwl_mvm_parse_shared_mem_a000(struct iwl_mvm *mvm, + struct iwl_rx_packet *pkt) { - struct iwl_host_cmd cmd = { - .flags = CMD_WANT_SKB, - .data = { NULL, }, - .len = { 0, }, - }; - struct iwl_shared_mem_cfg *mem_cfg; - struct iwl_rx_packet *pkt; - u32 i; + struct iwl_shared_mem_cfg *mem_cfg = (void *)pkt->data; + int i; - lockdep_assert_held(&mvm->mutex); + mvm->shared_mem_cfg.num_txfifo_entries = + ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size); + for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++) + mvm->shared_mem_cfg.txfifo_size[i] = + le32_to_cpu(mem_cfg->txfifo_size[i]); + for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++) + mvm->shared_mem_cfg.rxfifo_size[i] = + le32_to_cpu(mem_cfg->rxfifo_size[i]); - if (fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) - cmd.id = iwl_cmd_id(SHARED_MEM_CFG_CMD, SYSTEM_GROUP, 0); - else - cmd.id = SHARED_MEM_CFG; + BUILD_BUG_ON(sizeof(mvm->shared_mem_cfg.internal_txfifo_size) != + sizeof(mem_cfg->internal_txfifo_size)); - if (WARN_ON(iwl_mvm_send_cmd(mvm, &cmd))) - return; + for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size); + i++) + mvm->shared_mem_cfg.internal_txfifo_size[i] = + le32_to_cpu(mem_cfg->internal_txfifo_size[i]); +} - pkt = cmd.resp_pkt; - mem_cfg = (void *)pkt->data; - - mvm->shared_mem_cfg.shared_mem_addr = - le32_to_cpu(mem_cfg->shared_mem_addr); - mvm->shared_mem_cfg.shared_mem_size = - le32_to_cpu(mem_cfg->shared_mem_size); - mvm->shared_mem_cfg.sample_buff_addr = - le32_to_cpu(mem_cfg->sample_buff_addr); - mvm->shared_mem_cfg.sample_buff_size = - le32_to_cpu(mem_cfg->sample_buff_size); - mvm->shared_mem_cfg.txfifo_addr = le32_to_cpu(mem_cfg->txfifo_addr); - for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size); i++) +static void iwl_mvm_parse_shared_mem(struct iwl_mvm *mvm, + struct iwl_rx_packet *pkt) +{ + struct iwl_shared_mem_cfg_v1 *mem_cfg = (void *)pkt->data; + int i; + + mvm->shared_mem_cfg.num_txfifo_entries = + ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size); + for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++) mvm->shared_mem_cfg.txfifo_size[i] = le32_to_cpu(mem_cfg->txfifo_size[i]); for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++) mvm->shared_mem_cfg.rxfifo_size[i] = le32_to_cpu(mem_cfg->rxfifo_size[i]); - mvm->shared_mem_cfg.page_buff_addr = - le32_to_cpu(mem_cfg->page_buff_addr); - mvm->shared_mem_cfg.page_buff_size = - le32_to_cpu(mem_cfg->page_buff_size); - /* new API has more data */ + /* new API has more data, from rxfifo_addr field and on */ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) { - mvm->shared_mem_cfg.rxfifo_addr = - le32_to_cpu(mem_cfg->rxfifo_addr); - mvm->shared_mem_cfg.internal_txfifo_addr = - le32_to_cpu(mem_cfg->internal_txfifo_addr); - BUILD_BUG_ON(sizeof(mvm->shared_mem_cfg.internal_txfifo_size) != sizeof(mem_cfg->internal_txfifo_size)); @@ -888,6 +880,33 @@ static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm) mvm->shared_mem_cfg.internal_txfifo_size[i] = le32_to_cpu(mem_cfg->internal_txfifo_size[i]); } +} + +static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm) +{ + struct iwl_host_cmd cmd = { + .flags = CMD_WANT_SKB, + .data = { NULL, }, + .len = { 0, }, + }; + struct iwl_rx_packet *pkt; + + lockdep_assert_held(&mvm->mutex); + + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) + cmd.id = iwl_cmd_id(SHARED_MEM_CFG_CMD, SYSTEM_GROUP, 0); + else + cmd.id = SHARED_MEM_CFG; + + if (WARN_ON(iwl_mvm_send_cmd(mvm, &cmd))) + return; + + pkt = cmd.resp_pkt; + if (iwl_mvm_has_new_tx_api(mvm)) + iwl_mvm_parse_shared_mem_a000(mvm, pkt); + else + iwl_mvm_parse_shared_mem(mvm, pkt); IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n"); @@ -1027,19 +1046,11 @@ static int iwl_mvm_sar_init(struct iwl_mvm *mvm) { struct iwl_mvm_sar_table sar_table; struct iwl_dev_tx_power_cmd cmd = { - .v3.v2.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS), + .v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS), }; int ret, i, j, idx; int len = sizeof(cmd); - /* we can't do anything with the table if the FW doesn't support it */ - if (!fw_has_api(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_API_TX_POWER_CHAIN)) { - IWL_DEBUG_RADIO(mvm, - "FW doesn't support per-chain TX power settings.\n"); - return 0; - } - if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK)) len = sizeof(cmd.v3); @@ -1096,27 +1107,27 @@ int iwl_mvm_up(struct iwl_mvm *mvm) * (for example, if we were in RFKILL) */ ret = iwl_run_init_mvm_ucode(mvm, false); - if (ret && !iwlmvm_mod_params.init_dbg) { + + if (iwlmvm_mod_params.init_dbg) + return 0; + + if (ret) { IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret); /* this can't happen */ if (WARN_ON(ret > 0)) ret = -ERFKILL; goto error; } - if (!iwlmvm_mod_params.init_dbg) { - /* - * Stop and start the transport without entering low power - * mode. This will save the state of other components on the - * device that are triggered by the INIT firwmare (MFUART). - */ - _iwl_trans_stop_device(mvm->trans, false); - ret = _iwl_trans_start_hw(mvm->trans, false); - if (ret) - goto error; - } - if (iwlmvm_mod_params.init_dbg) - return 0; + /* + * Stop and start the transport without entering low power + * mode. This will save the state of other components on the + * device that are triggered by the INIT firwmare (MFUART). + */ + _iwl_trans_stop_device(mvm->trans, false); + ret = _iwl_trans_start_hw(mvm->trans, false); + if (ret) + goto error; ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR); if (ret) { @@ -1214,9 +1225,12 @@ int iwl_mvm_up(struct iwl_mvm *mvm) } /* TODO: read the budget from BIOS / Platform NVM */ - if (iwl_mvm_is_ctdp_supported(mvm) && mvm->cooling_dev.cur_state > 0) + if (iwl_mvm_is_ctdp_supported(mvm) && mvm->cooling_dev.cur_state > 0) { ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START, mvm->cooling_dev.cur_state); + if (ret) + goto error; + } #else /* Initialize tx backoffs to the minimal possible */ iwl_mvm_tt_tx_backoff(mvm, 0); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index 69c42ce45b8a..6b962d6b067a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -539,6 +539,11 @@ void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif) iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE, IWL_MVM_OFFCHANNEL_QUEUE, IWL_MAX_TID_COUNT, 0); + else + iwl_mvm_disable_txq(mvm, + IWL_MVM_DQA_P2P_DEVICE_QUEUE, + vif->hw_queue[0], IWL_MAX_TID_COUNT, + 0); break; case NL80211_IFTYPE_AP: @@ -769,26 +774,6 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm, cmd->ac[txf].fifos_mask = BIT(txf); } - if (vif->type == NL80211_IFTYPE_AP) { - /* in AP mode, the MCAST FIFO takes the EDCA params from VO */ - cmd->ac[IWL_MVM_TX_FIFO_VO].fifos_mask |= - BIT(IWL_MVM_TX_FIFO_MCAST); - - /* - * in AP mode, pass probe requests and beacons from other APs - * (needed for ht protection); when there're no any associated - * station don't ask FW to pass beacons to prevent unnecessary - * wake-ups. - */ - cmd->filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST); - if (mvmvif->ap_assoc_sta_count || !mvm->drop_bcn_ap_mode) { - cmd->filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON); - IWL_DEBUG_HC(mvm, "Asking FW to pass beacons\n"); - } else { - IWL_DEBUG_HC(mvm, "No need to receive beacons\n"); - } - } - if (vif->bss_conf.qos) cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA); @@ -1186,6 +1171,7 @@ static void iwl_mvm_mac_ap_iterator(void *_data, u8 *mac, */ static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct iwl_mac_ctx_cmd *cmd, struct iwl_mac_data_ap *ctxt_ap, bool add) { @@ -1196,6 +1182,23 @@ static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm, .beacon_device_ts = 0 }; + /* in AP mode, the MCAST FIFO takes the EDCA params from VO */ + cmd->ac[IWL_MVM_TX_FIFO_VO].fifos_mask |= BIT(IWL_MVM_TX_FIFO_MCAST); + + /* + * in AP mode, pass probe requests and beacons from other APs + * (needed for ht protection); when there're no any associated + * station don't ask FW to pass beacons to prevent unnecessary + * wake-ups. + */ + cmd->filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST); + if (mvmvif->ap_assoc_sta_count || !mvm->drop_bcn_ap_mode) { + cmd->filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON); + IWL_DEBUG_HC(mvm, "Asking FW to pass beacons\n"); + } else { + IWL_DEBUG_HC(mvm, "No need to receive beacons\n"); + } + ctxt_ap->bi = cpu_to_le32(vif->bss_conf.beacon_int); ctxt_ap->bi_reciprocal = cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int)); @@ -1253,7 +1256,7 @@ static int iwl_mvm_mac_ctxt_cmd_ap(struct iwl_mvm *mvm, iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); /* Fill the data specific for ap mode */ - iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.ap, + iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd, &cmd.ap, action == FW_CTXT_ACTION_ADD); return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); @@ -1272,7 +1275,7 @@ static int iwl_mvm_mac_ctxt_cmd_go(struct iwl_mvm *mvm, iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); /* Fill the data specific for GO mode */ - iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.go.ap, + iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd, &cmd.go.ap, action == FW_CTXT_ACTION_ADD); cmd.go.ctwin = cpu_to_le32(noa->oppps_ctwindow & diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 5dd77e336617..318efd814037 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -465,7 +465,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES; hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP; - BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 4); + BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 6); memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers)); hw->wiphy->n_cipher_suites = ARRAY_SIZE(mvm_ciphers); hw->wiphy->cipher_suites = mvm->ciphers; @@ -479,17 +479,23 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) hw->wiphy->n_cipher_suites++; } - /* - * Enable 11w if advertised by firmware and software crypto - * is not enabled (as the firmware will interpret some mgmt - * packets, so enabling it with software crypto isn't safe) + /* Enable 11w if software crypto is not enabled (as the + * firmware will interpret some mgmt packets, so enabling it + * with software crypto isn't safe). */ - if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_MFP && - !iwlwifi_mod_params.sw_crypto) { + if (!iwlwifi_mod_params.sw_crypto) { ieee80211_hw_set(hw, MFP_CAPABLE); mvm->ciphers[hw->wiphy->n_cipher_suites] = WLAN_CIPHER_SUITE_AES_CMAC; hw->wiphy->n_cipher_suites++; + if (iwl_mvm_has_new_rx_api(mvm)) { + mvm->ciphers[hw->wiphy->n_cipher_suites] = + WLAN_CIPHER_SUITE_BIP_GMAC_128; + hw->wiphy->n_cipher_suites++; + mvm->ciphers[hw->wiphy->n_cipher_suites] = + WLAN_CIPHER_SUITE_BIP_GMAC_256; + hw->wiphy->n_cipher_suites++; + } } /* currently FW API supports only one optional cipher scheme */ @@ -539,9 +545,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG | REGULATORY_DISABLE_BEACON_HINTS; - if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_GO_UAPSD) - hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; - + hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; hw->wiphy->iface_combinations = iwl_mvm_iface_combinations; @@ -645,6 +649,16 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)) hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES; + if (fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_SCAN_TSF_REPORT)) { + wiphy_ext_feature_set(hw->wiphy, + NL80211_EXT_FEATURE_SCAN_START_TIME); + wiphy_ext_feature_set(hw->wiphy, + NL80211_EXT_FEATURE_BSS_PARENT_TSF); + wiphy_ext_feature_set(hw->wiphy, + NL80211_EXT_FEATURE_SET_SCAN_DWELL); + } + mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD; #ifdef CONFIG_PM_SLEEP @@ -712,6 +726,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) if (ret) iwl_mvm_leds_exit(mvm); + if (mvm->cfg->vht_mu_mimo_supported) + wiphy_ext_feature_set(hw->wiphy, + NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER); + return ret; } @@ -1251,20 +1269,18 @@ static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif, s16 tx_power) { struct iwl_dev_tx_power_cmd cmd = { - .v3.v2.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC), - .v3.v2.mac_context_id = + .v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC), + .v3.mac_context_id = cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id), - .v3.v2.pwr_restriction = cpu_to_le16(8 * tx_power), + .v3.pwr_restriction = cpu_to_le16(8 * tx_power), }; int len = sizeof(cmd); if (tx_power == IWL_DEFAULT_MAX_TX_POWER) - cmd.v3.v2.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER); + cmd.v3.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER); if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK)) len = sizeof(cmd.v3); - if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TX_POWER_CHAIN)) - len = sizeof(cmd.v3.v2); return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd); } @@ -2221,6 +2237,10 @@ static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw, case NL80211_IFTYPE_ADHOC: iwl_mvm_bss_info_changed_ap_ibss(mvm, vif, bss_conf, changes); break; + case NL80211_IFTYPE_MONITOR: + if (changes & BSS_CHANGED_MU_GROUPS) + iwl_mvm_update_mu_groups(mvm, vif); + break; default: /* shouldn't happen */ WARN_ON_ONCE(1); @@ -2747,6 +2767,8 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; break; case WLAN_CIPHER_SUITE_AES_CMAC: + case WLAN_CIPHER_SUITE_BIP_GMAC_128: + case WLAN_CIPHER_SUITE_BIP_GMAC_256: WARN_ON_ONCE(!ieee80211_hw_check(hw, MFP_CAPABLE)); break; case WLAN_CIPHER_SUITE_WEP40: @@ -2780,9 +2802,11 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, * GTK on AP interface is a TX-only key, return 0; * on IBSS they're per-station and because we're lazy * we don't support them for RX, so do the same. - * CMAC in AP/IBSS modes must be done in software. + * CMAC/GMAC in AP/IBSS modes must be done in software. */ - if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) + if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC || + key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || + key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) ret = -EOPNOTSUPP; else ret = 0; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 6a615bb73042..d17cbf603f7c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -604,16 +604,9 @@ enum iwl_mvm_tdls_cs_state { }; struct iwl_mvm_shared_mem_cfg { - u32 shared_mem_addr; - u32 shared_mem_size; - u32 sample_buff_addr; - u32 sample_buff_size; - u32 txfifo_addr; + int num_txfifo_entries; u32 txfifo_size[TX_FIFO_MAX_NUM]; u32 rxfifo_size[RX_FIFO_MAX_NUM]; - u32 page_buff_addr; - u32 page_buff_size; - u32 rxfifo_addr; u32 internal_txfifo_addr; u32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM]; }; @@ -699,6 +692,10 @@ struct iwl_mvm_baid_data { * it. In this state, when a new queue is needed to be allocated but no * such free queue exists, an inactive queue might be freed and given to * the new RA/TID. + * @IWL_MVM_QUEUE_RECONFIGURING: queue is being reconfigured + * This is the state of a queue that has had traffic pass through it, but + * needs to be reconfigured for some reason, e.g. the queue needs to + * become unshared and aggregations re-enabled on. */ enum iwl_mvm_queue_status { IWL_MVM_QUEUE_FREE, @@ -706,10 +703,11 @@ enum iwl_mvm_queue_status { IWL_MVM_QUEUE_READY, IWL_MVM_QUEUE_SHARED, IWL_MVM_QUEUE_INACTIVE, + IWL_MVM_QUEUE_RECONFIGURING, }; #define IWL_MVM_DQA_QUEUE_TIMEOUT (5 * HZ) -#define IWL_MVM_NUM_CIPHERS 8 +#define IWL_MVM_NUM_CIPHERS 10 struct iwl_mvm { /* for logger access */ @@ -769,6 +767,7 @@ struct iwl_mvm { u8 ra_sta_id; /* The RA this queue is mapped to, if exists */ bool reserved; /* Is this the TXQ reserved for a STA */ u8 mac80211_ac; /* The mac80211 AC this queue is mapped to */ + u8 txq_tid; /* The TID "owner" of this queue*/ u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */ /* Timestamp for inactivation per TID of this queue */ unsigned long last_frame_time[IWL_MAX_TID_COUNT + 1]; @@ -822,6 +821,12 @@ struct iwl_mvm { /* UMAC scan tracking */ u32 scan_uid_status[IWL_MVM_MAX_UMAC_SCANS]; + /* start time of last scan in TSF of the mac that requested the scan */ + u64 scan_start; + + /* the vif that requested the current scan */ + struct iwl_mvm_vif *scan_vif; + /* rx chain antennas set through debugfs for the scan command */ u8 scan_rx_ant; @@ -1124,6 +1129,18 @@ static inline bool iwl_mvm_enter_d0i3_on_suspend(struct iwl_mvm *mvm) (mvm->trans->runtime_pm_mode != IWL_PLAT_PM_MODE_D0I3); } +static inline bool iwl_mvm_is_dqa_data_queue(struct iwl_mvm *mvm, u8 queue) +{ + return (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE) && + (queue <= IWL_MVM_DQA_MAX_DATA_QUEUE); +} + +static inline bool iwl_mvm_is_dqa_mgmt_queue(struct iwl_mvm *mvm, u8 queue) +{ + return (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE) && + (queue <= IWL_MVM_DQA_MAX_MGMT_QUEUE); +} + static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm) { bool nvm_lar = mvm->nvm_data->lar_enabled; @@ -1194,6 +1211,12 @@ static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm *mvm) IWL_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT); } +static inline bool iwl_mvm_has_new_tx_api(struct iwl_mvm *mvm) +{ + /* TODO - replace with TLV once defined */ + return mvm->trans->cfg->use_tfh; +} + static inline bool iwl_mvm_is_tt_in_fw(struct iwl_mvm *mvm) { #ifdef CONFIG_THERMAL @@ -1245,6 +1268,7 @@ u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx); void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm); u8 first_antenna(u8 mask); u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx); +void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime); /* Tx / Host Commands */ int __must_check iwl_mvm_send_cmd(struct iwl_mvm *mvm, @@ -1281,8 +1305,6 @@ static inline void iwl_mvm_set_tx_cmd_ccmp(struct ieee80211_tx_info *info, tx_cmd->sec_ctl = TX_CMD_SEC_CCM; memcpy(tx_cmd->key, keyconf->key, keyconf->keylen); - if (info->flags & IEEE80211_TX_CTL_AMPDU) - tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_CCMP_AGG); } static inline void iwl_mvm_wait_for_async_handlers(struct iwl_mvm *mvm) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c index 7a686f67f007..eade099b6dbf 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c @@ -66,7 +66,6 @@ *****************************************************************************/ #include <linux/firmware.h> #include <linux/rtnetlink.h> -#include <linux/acpi.h> #include "iwl-trans.h" #include "iwl-csr.h" #include "mvm.h" @@ -751,96 +750,6 @@ exit: return resp_cp; } -#ifdef CONFIG_ACPI -#define WRD_METHOD "WRDD" -#define WRDD_WIFI (0x07) -#define WRDD_WIGIG (0x10) - -static u32 iwl_mvm_wrdd_get_mcc(struct iwl_mvm *mvm, union acpi_object *wrdd) -{ - union acpi_object *mcc_pkg, *domain_type, *mcc_value; - u32 i; - - if (wrdd->type != ACPI_TYPE_PACKAGE || - wrdd->package.count < 2 || - wrdd->package.elements[0].type != ACPI_TYPE_INTEGER || - wrdd->package.elements[0].integer.value != 0) { - IWL_DEBUG_LAR(mvm, "Unsupported wrdd structure\n"); - return 0; - } - - for (i = 1 ; i < wrdd->package.count ; ++i) { - mcc_pkg = &wrdd->package.elements[i]; - - if (mcc_pkg->type != ACPI_TYPE_PACKAGE || - mcc_pkg->package.count < 2 || - mcc_pkg->package.elements[0].type != ACPI_TYPE_INTEGER || - mcc_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) { - mcc_pkg = NULL; - continue; - } - - domain_type = &mcc_pkg->package.elements[0]; - if (domain_type->integer.value == WRDD_WIFI) - break; - - mcc_pkg = NULL; - } - - if (mcc_pkg) { - mcc_value = &mcc_pkg->package.elements[1]; - return mcc_value->integer.value; - } - - return 0; -} - -static int iwl_mvm_get_bios_mcc(struct iwl_mvm *mvm, char *mcc) -{ - acpi_handle root_handle; - acpi_handle handle; - struct acpi_buffer wrdd = {ACPI_ALLOCATE_BUFFER, NULL}; - acpi_status status; - u32 mcc_val; - - root_handle = ACPI_HANDLE(mvm->dev); - if (!root_handle) { - IWL_DEBUG_LAR(mvm, - "Could not retrieve root port ACPI handle\n"); - return -ENOENT; - } - - /* Get the method's handle */ - status = acpi_get_handle(root_handle, (acpi_string)WRD_METHOD, &handle); - if (ACPI_FAILURE(status)) { - IWL_DEBUG_LAR(mvm, "WRD method not found\n"); - return -ENOENT; - } - - /* Call WRDD with no arguments */ - status = acpi_evaluate_object(handle, NULL, NULL, &wrdd); - if (ACPI_FAILURE(status)) { - IWL_DEBUG_LAR(mvm, "WRDC invocation failed (0x%x)\n", status); - return -ENOENT; - } - - mcc_val = iwl_mvm_wrdd_get_mcc(mvm, wrdd.pointer); - kfree(wrdd.pointer); - if (!mcc_val) - return -ENOENT; - - mcc[0] = (mcc_val >> 8) & 0xff; - mcc[1] = mcc_val & 0xff; - mcc[2] = '\0'; - return 0; -} -#else /* CONFIG_ACPI */ -static int iwl_mvm_get_bios_mcc(struct iwl_mvm *mvm, char *mcc) -{ - return -ENOENT; -} -#endif - int iwl_mvm_init_mcc(struct iwl_mvm *mvm) { bool tlv_lar; @@ -884,7 +793,7 @@ int iwl_mvm_init_mcc(struct iwl_mvm *mvm) return -EIO; if (iwl_mvm_is_wifi_mcc_supported(mvm) && - !iwl_mvm_get_bios_mcc(mvm, mcc)) { + !iwl_get_bios_mcc(mvm->dev, mcc)) { kfree(regd); regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc, MCC_SOURCE_BIOS, NULL); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 55d9096da68c..05fe6dd1a2c8 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -359,6 +359,7 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = { HCMD_NAME(BT_COEX_CI), HCMD_NAME(PHY_CONFIGURATION_CMD), HCMD_NAME(CALIB_RES_NOTIF_PHY_DB), + HCMD_NAME(PHY_DB_CMD), HCMD_NAME(SCAN_OFFLOAD_COMPLETE), HCMD_NAME(SCAN_OFFLOAD_UPDATE_PROFILES_CMD), HCMD_NAME(SCAN_OFFLOAD_CONFIG_CMD), @@ -652,11 +653,9 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, /* the hardware splits the A-MSDU */ if (mvm->cfg->mq_rx_supported) trans_cfg.rx_buf_size = IWL_AMSDU_4K; - trans_cfg.wide_cmd_header = fw_has_api(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_API_WIDE_CMD_HDR); - if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DW_BC_TABLE) - trans_cfg.bc_table_dword = true; + trans->wide_cmd_header = true; + trans_cfg.bc_table_dword = true; trans_cfg.command_groups = iwl_mvm_groups; trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups); @@ -711,37 +710,21 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, IWL_DEBUG_EEPROM(mvm->trans->dev, "working without external nvm file\n"); - if (WARN(cfg->no_power_up_nic_in_init && !mvm->nvm_file_name, - "not allowing power-up and not having nvm_file\n")) + err = iwl_trans_start_hw(mvm->trans); + if (err) goto out_free; - /* - * Even if nvm exists in the nvm_file driver should read again the nvm - * from the nic because there might be entries that exist in the OTP - * and not in the file. - * for nics with no_power_up_nic_in_init: rely completley on nvm_file - */ - if (cfg->no_power_up_nic_in_init && mvm->nvm_file_name) { - err = iwl_nvm_init(mvm, false); - if (err) - goto out_free; - } else { - err = iwl_trans_start_hw(mvm->trans); - if (err) - goto out_free; - - mutex_lock(&mvm->mutex); - iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE); - err = iwl_run_init_mvm_ucode(mvm, true); - if (!err || !iwlmvm_mod_params.init_dbg) - iwl_mvm_stop_device(mvm); - iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE); - mutex_unlock(&mvm->mutex); - /* returns 0 if successful, 1 if success but in rfkill */ - if (err < 0 && !iwlmvm_mod_params.init_dbg) { - IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err); - goto out_free; - } + mutex_lock(&mvm->mutex); + iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE); + err = iwl_run_init_mvm_ucode(mvm, true); + if (!err || !iwlmvm_mod_params.init_dbg) + iwl_mvm_stop_device(mvm); + iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE); + mutex_unlock(&mvm->mutex); + /* returns 0 if successful, 1 if success but in rfkill */ + if (err < 0 && !iwlmvm_mod_params.init_dbg) { + IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err); + goto out_free; } scan_size = iwl_mvm_scan_size(mvm); @@ -783,8 +766,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, flush_delayed_work(&mvm->fw_dump_wk); iwl_phy_db_free(mvm->phy_db); kfree(mvm->scan_cmd); - if (!cfg->no_power_up_nic_in_init || !mvm->nvm_file_name) - iwl_trans_op_mode_leave(trans); + iwl_trans_op_mode_leave(trans); + ieee80211_free_hw(mvm->hw); return NULL; } @@ -857,9 +840,7 @@ static void iwl_mvm_async_handlers_wk(struct work_struct *wk) struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, async_handlers_wk); struct iwl_async_handler_entry *entry, *tmp; - struct list_head local_list; - - INIT_LIST_HEAD(&local_list); + LIST_HEAD(local_list); /* Ensure that we are not in stop flow (check iwl_mvm_mac_stop) */ @@ -966,10 +947,11 @@ static void iwl_mvm_rx(struct iwl_op_mode *op_mode, { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); + u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd); - if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD)) + if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD))) iwl_mvm_rx_rx_mpdu(mvm, napi, rxb); - else if (pkt->hdr.cmd == REPLY_RX_PHY_CMD) + else if (cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_PHY_CMD)) iwl_mvm_rx_rx_phy_cmd(mvm, rxb); else iwl_mvm_rx_common(mvm, rxb, pkt); @@ -981,13 +963,14 @@ static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode, { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); + u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd); - if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD)) + if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD))) iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, 0); - else if (unlikely(pkt->hdr.group_id == DATA_PATH_GROUP && - pkt->hdr.cmd == RX_QUEUES_NOTIFICATION)) + else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP, + RX_QUEUES_NOTIFICATION))) iwl_mvm_rx_queue_notif(mvm, rxb, 0); - else if (pkt->hdr.cmd == FRAME_RELEASE) + else if (cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE)) iwl_mvm_rx_frame_release(mvm, napi, rxb, 0); else iwl_mvm_rx_common(mvm, rxb, pkt); @@ -1666,13 +1649,14 @@ static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode, { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); struct iwl_rx_packet *pkt = rxb_addr(rxb); + u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd); - if (unlikely(pkt->hdr.cmd == FRAME_RELEASE)) + if (unlikely(cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE))) iwl_mvm_rx_frame_release(mvm, napi, rxb, queue); - else if (unlikely(pkt->hdr.cmd == RX_QUEUES_NOTIFICATION && - pkt->hdr.group_id == DATA_PATH_GROUP)) + else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP, + RX_QUEUES_NOTIFICATION))) iwl_mvm_rx_queue_notif(mvm, rxb, queue); - else + else if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD))) iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, queue); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c index ff85865b1dda..af6d10c23e5a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c @@ -694,8 +694,7 @@ static void iwl_mvm_power_set_pm(struct iwl_mvm *mvm, /* enable PM on p2p if p2p stand alone */ if (vifs->p2p_active && !vifs->bss_active && !vifs->ap_active) { - if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PM) - p2p_mvmvif->pm_enabled = true; + p2p_mvmvif->pm_enabled = true; return; } @@ -707,12 +706,10 @@ static void iwl_mvm_power_set_pm(struct iwl_mvm *mvm, ap_mvmvif->phy_ctxt->id); /* clients are not stand alone: enable PM if DCM */ - if (!(client_same_channel || ap_same_channel) && - (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM)) { + if (!(client_same_channel || ap_same_channel)) { if (vifs->bss_active) bss_mvmvif->pm_enabled = true; - if (vifs->p2p_active && - (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PM)) + if (vifs->p2p_active) p2p_mvmvif->pm_enabled = true; return; } @@ -721,12 +718,10 @@ static void iwl_mvm_power_set_pm(struct iwl_mvm *mvm, * There is only one channel in the system and there are only * bss and p2p clients that share it */ - if (client_same_channel && !vifs->ap_active && - (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_SCM)) { + if (client_same_channel && !vifs->ap_active) { /* share same channel*/ bss_mvmvif->pm_enabled = true; - if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PM) - p2p_mvmvif->pm_enabled = true; + p2p_mvmvif->pm_enabled = true; } } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index df6c32caa5f0..a57c6ef5bc14 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -132,7 +132,8 @@ static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb, IEEE80211_CCMP_PN_LEN) <= 0) return -1; - memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN); + if (!(stats->flag & RX_FLAG_AMSDU_MORE)) + memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN); stats->flag |= RX_FLAG_PN_VALIDATED; return 0; @@ -417,10 +418,11 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm, ssn = ieee80211_sn_inc(ssn); - /* holes are valid since nssn indicates frames were received. */ - if (skb_queue_empty(skb_list) || !skb_peek_tail(skb_list)) - continue; - /* Empty the list. Will have more than one frame for A-MSDU */ + /* + * Empty the list. Will have more than one frame for A-MSDU. + * Empty list is valid as well since nssn indicates frames were + * received. + */ while ((skb = __skb_dequeue(skb_list))) { iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, reorder_buf->queue, @@ -433,7 +435,7 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm, if (reorder_buf->num_stored && !reorder_buf->removed) { u16 index = reorder_buf->head_sn % reorder_buf->buf_size; - while (!skb_peek_tail(&reorder_buf->entries[index])) + while (skb_queue_empty(&reorder_buf->entries[index])) index = (index + 1) % reorder_buf->buf_size; /* modify timer to match next frame's expiration time */ mod_timer(&reorder_buf->reorder_timer, @@ -451,17 +453,17 @@ void iwl_mvm_reorder_timer_expired(unsigned long data) u16 sn = 0, index = 0; bool expired = false; - spin_lock_bh(&buf->lock); + spin_lock(&buf->lock); if (!buf->num_stored || buf->removed) { - spin_unlock_bh(&buf->lock); + spin_unlock(&buf->lock); return; } for (i = 0; i < buf->buf_size ; i++) { index = (buf->head_sn + i) % buf->buf_size; - if (!skb_peek_tail(&buf->entries[index])) + if (skb_queue_empty(&buf->entries[index])) continue; if (!time_after(jiffies, buf->reorder_time[index] + RX_REORDER_BUF_TIMEOUT_MQ)) @@ -491,7 +493,7 @@ void iwl_mvm_reorder_timer_expired(unsigned long data) buf->reorder_time[index] + 1 + RX_REORDER_BUF_TIMEOUT_MQ); } - spin_unlock_bh(&buf->lock); + spin_unlock(&buf->lock); } static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue, @@ -502,7 +504,7 @@ static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue, struct iwl_mvm_reorder_buffer *reorder_buf; u8 baid = data->baid; - if (WARN_ON_ONCE(baid >= IWL_RX_REORDER_DATA_INVALID_BAID)) + if (WARN_ONCE(baid >= IWL_MAX_BAID, "invalid BAID: %x\n", baid)) return; rcu_read_lock(); @@ -589,6 +591,11 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, baid = (reorder & IWL_RX_MPDU_REORDER_BAID_MASK) >> IWL_RX_MPDU_REORDER_BAID_SHIFT; + /* + * This also covers the case of receiving a Block Ack Request + * outside a BA session; we'll pass it to mac80211 and that + * then sends a delBA action frame. + */ if (baid == IWL_RX_REORDER_DATA_INVALID_BAID) return false; @@ -598,9 +605,10 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, mvm_sta = iwl_mvm_sta_from_mac80211(sta); - /* not a data packet */ - if (!ieee80211_is_data_qos(hdr->frame_control) || - is_multicast_ether_addr(hdr->addr1)) + /* not a data packet or a bar */ + if (!ieee80211_is_back_req(hdr->frame_control) && + (!ieee80211_is_data_qos(hdr->frame_control) || + is_multicast_ether_addr(hdr->addr1))) return false; if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) @@ -624,6 +632,11 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, spin_lock_bh(&buffer->lock); + if (ieee80211_is_back_req(hdr->frame_control)) { + iwl_mvm_release_frames(mvm, sta, napi, buffer, nssn); + goto drop; + } + /* * If there was a significant jump in the nssn - adjust. * If the SN is smaller than the NSSN it might need to first go into @@ -883,6 +896,9 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, u8 *qc = ieee80211_get_qos_ctl(hdr); *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; + if (!(desc->amsdu_info & + IWL_RX_MPDU_AMSDU_LAST_SUBFRAME)) + rx_status->flag |= RX_FLAG_AMSDU_MORE; } if (baid != IWL_RX_REORDER_DATA_INVALID_BAID) iwl_mvm_agg_rx_received(mvm, baid); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index dac120f8861b..f279fdd6eb44 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -141,6 +141,7 @@ struct iwl_mvm_scan_params { struct cfg80211_match_set *match_sets; int n_scan_plans; struct cfg80211_sched_scan_plan *scan_plans; + u32 measurement_dwell; }; static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm) @@ -232,6 +233,27 @@ iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device) return IWL_SCAN_TYPE_WILD; } +static int +iwl_mvm_get_measurement_dwell(struct iwl_mvm *mvm, + struct cfg80211_scan_request *req, + struct iwl_mvm_scan_params *params) +{ + if (!req->duration) + return 0; + + if (req->duration_mandatory && + req->duration > scan_timing[params->type].max_out_time) { + IWL_DEBUG_SCAN(mvm, + "Measurement scan - too long dwell %hu (max out time %u)\n", + req->duration, + scan_timing[params->type].max_out_time); + return -EOPNOTSUPP; + } + + return min_t(u32, (u32)req->duration, + scan_timing[params->type].max_out_time); +} + static inline bool iwl_mvm_rrm_scan_needed(struct iwl_mvm *mvm) { /* require rrm scan whenever the fw supports it */ @@ -717,22 +739,6 @@ iwl_mvm_build_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif, params->preq.common_data.len = cpu_to_le16(ies->common_ie_len); } -static __le32 iwl_mvm_scan_priority(struct iwl_mvm *mvm, - enum iwl_scan_priority_ext prio) -{ - if (fw_has_api(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY)) - return cpu_to_le32(prio); - - if (prio <= IWL_SCAN_PRIORITY_EXT_2) - return cpu_to_le32(IWL_SCAN_PRIORITY_LOW); - - if (prio <= IWL_SCAN_PRIORITY_EXT_4) - return cpu_to_le32(IWL_SCAN_PRIORITY_MEDIUM); - - return cpu_to_le32(IWL_SCAN_PRIORITY_HIGH); -} - static void iwl_mvm_scan_lmac_dwell(struct iwl_mvm *mvm, struct iwl_scan_req_lmac *cmd, struct iwl_mvm_scan_params *params) @@ -743,7 +749,7 @@ static void iwl_mvm_scan_lmac_dwell(struct iwl_mvm *mvm, cmd->extended_dwell = scan_timing[params->type].dwell_extended; cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time); cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time); - cmd->scan_prio = iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6); + cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); } static inline bool iwl_mvm_scan_fits(struct iwl_mvm *mvm, int n_ssids, @@ -1033,21 +1039,24 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm, struct iwl_scan_req_umac *cmd, struct iwl_mvm_scan_params *params) { - cmd->extended_dwell = scan_timing[params->type].dwell_extended; - cmd->active_dwell = scan_timing[params->type].dwell_active; - cmd->passive_dwell = scan_timing[params->type].dwell_passive; + if (params->measurement_dwell) { + cmd->active_dwell = params->measurement_dwell; + cmd->passive_dwell = params->measurement_dwell; + cmd->extended_dwell = params->measurement_dwell; + } else { + cmd->active_dwell = scan_timing[params->type].dwell_active; + cmd->passive_dwell = scan_timing[params->type].dwell_passive; + cmd->extended_dwell = scan_timing[params->type].dwell_extended; + } cmd->fragmented_dwell = scan_timing[params->type].dwell_fragmented; cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time); cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time); - cmd->scan_priority = - iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6); + cmd->scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); if (iwl_mvm_is_regular_scan(params)) - cmd->ooc_priority = - iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6); + cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); else - cmd->ooc_priority = - iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_2); + cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_2); } static void @@ -1067,11 +1076,11 @@ iwl_mvm_umac_scan_cfg_channels(struct iwl_mvm *mvm, } } -static u32 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm, +static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm, struct iwl_mvm_scan_params *params, struct ieee80211_vif *vif) { - int flags = 0; + u16 flags = 0; if (params->n_ssids == 0) flags = IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE; @@ -1093,6 +1102,9 @@ static u32 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm, if (!iwl_mvm_is_regular_scan(params)) flags |= IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC; + if (params->measurement_dwell) + flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE; + #ifdef CONFIG_IWLWIFI_DEBUGFS if (mvm->scan_iter_notif_enabled) flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE; @@ -1119,6 +1131,7 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, mvm->fw->ucode_capa.n_scan_channels; int uid, i; u32 ssid_bitmap = 0; + struct iwl_mvm_vif *scan_vif = iwl_mvm_vif_from_mac80211(vif); lockdep_assert_held(&mvm->mutex); @@ -1136,8 +1149,9 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, mvm->scan_uid_status[uid] = type; cmd->uid = cpu_to_le32(uid); - cmd->general_flags = cpu_to_le32(iwl_mvm_scan_umac_flags(mvm, params, + cmd->general_flags = cpu_to_le16(iwl_mvm_scan_umac_flags(mvm, params, vif)); + cmd->scan_start_mac_id = scan_vif->id; if (type == IWL_MVM_SCAN_SCHED || type == IWL_MVM_SCAN_NETDETECT) cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE); @@ -1289,6 +1303,12 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, iwl_mvm_get_scan_type(mvm, vif->type == NL80211_IFTYPE_P2P_DEVICE); + ret = iwl_mvm_get_measurement_dwell(mvm, req, ¶ms); + if (ret < 0) + return ret; + + params.measurement_dwell = ret; + iwl_mvm_build_scan_probe(mvm, vif, ies, ¶ms); if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { @@ -1315,6 +1335,7 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, IWL_DEBUG_SCAN(mvm, "Scan request was sent successfully\n"); mvm->scan_status |= IWL_MVM_SCAN_REGULAR; + mvm->scan_vif = iwl_mvm_vif_from_mac80211(vif); iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN); queue_delayed_work(system_wq, &mvm->scan_timeout_dwork, @@ -1437,9 +1458,12 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_REGULAR) { struct cfg80211_scan_info info = { .aborted = aborted, + .scan_start_tsf = mvm->scan_start, }; + memcpy(info.tsf_bssid, mvm->scan_vif->bssid, ETH_ALEN); ieee80211_scan_completed(mvm->hw, &info); + mvm->scan_vif = NULL; iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); cancel_delayed_work(&mvm->scan_timeout_dwork); } else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) { @@ -1473,6 +1497,8 @@ void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm, struct iwl_umac_scan_iter_complete_notif *notif = (void *)pkt->data; u8 buf[256]; + mvm->scan_start = le64_to_cpu(notif->start_tsf); + IWL_DEBUG_SCAN(mvm, "UMAC Scan iteration complete: status=0x%x scanned_channels=%d channels list: %s\n", notif->status, notif->scanned_channels, @@ -1485,6 +1511,10 @@ void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm, ieee80211_sched_scan_results(mvm->hw); mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_ENABLED; } + + IWL_DEBUG_SCAN(mvm, + "UMAC Scan iteration complete: scan started at %llu (TSF)\n", + mvm->scan_start); } static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 3130b9c68a74..fc771885e383 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -468,6 +468,11 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm, i != IWL_MVM_DQA_BSS_CLIENT_QUEUE) continue; + /* Don't try and take queues being reconfigured */ + if (mvm->queue_info[queue].status == + IWL_MVM_QUEUE_RECONFIGURING) + continue; + ac_to_queue[mvm->queue_info[i].mac80211_ac] = i; } @@ -501,31 +506,37 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm, queue = ac_to_queue[IEEE80211_AC_VO]; /* Make sure queue found (or not) is legal */ - if (!((queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE && - queue <= IWL_MVM_DQA_MAX_MGMT_QUEUE) || - (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE && - queue <= IWL_MVM_DQA_MAX_DATA_QUEUE) || - (queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE))) { + if (!iwl_mvm_is_dqa_data_queue(mvm, queue) && + !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) && + (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) { IWL_ERR(mvm, "No DATA queues available to share\n"); - queue = -ENOSPC; + return -ENOSPC; + } + + /* Make sure the queue isn't in the middle of being reconfigured */ + if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_RECONFIGURING) { + IWL_ERR(mvm, + "TXQ %d is in the middle of re-config - try again\n", + queue); + return -EBUSY; } return queue; } /* - * If a given queue has a higher AC than the TID stream that is being added to - * it, the queue needs to be redirected to the lower AC. This function does that + * If a given queue has a higher AC than the TID stream that is being compared + * to, the queue needs to be redirected to the lower AC. This function does that * in such a case, otherwise - if no redirection required - it does nothing, * unless the %force param is true. */ -static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid, - int ac, int ssn, unsigned int wdg_timeout, - bool force) +int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid, + int ac, int ssn, unsigned int wdg_timeout, + bool force) { struct iwl_scd_txq_cfg_cmd cmd = { .scd_queue = queue, - .enable = 0, + .action = SCD_CFG_DISABLE_QUEUE, }; bool shared_queue; unsigned long mq; @@ -551,11 +562,12 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid, cmd.sta_id = mvm->queue_info[queue].ra_sta_id; cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac]; + cmd.tid = mvm->queue_info[queue].txq_tid; mq = mvm->queue_info[queue].hw_queue_to_mac80211; shared_queue = (mvm->queue_info[queue].hw_queue_refcount > 1); spin_unlock_bh(&mvm->queue_info_lock); - IWL_DEBUG_TX_QUEUES(mvm, "Redirecting shared TXQ #%d to FIFO #%d\n", + IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n", queue, iwl_mvm_ac_to_tx_fifo[ac]); /* Stop MAC queues and wait for this queue to empty */ @@ -576,9 +588,12 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid, ret); /* Make sure the SCD wrptr is correctly set before reconfiguring */ - iwl_trans_txq_enable(mvm->trans, queue, iwl_mvm_ac_to_tx_fifo[ac], - cmd.sta_id, tid, LINK_QUAL_AGG_FRAME_LIMIT_DEF, - ssn, wdg_timeout); + iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout); + + /* Update the TID "owner" of the queue */ + spin_lock_bh(&mvm->queue_info_lock); + mvm->queue_info[queue].txq_tid = tid; + spin_unlock_bh(&mvm->queue_info_lock); /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */ @@ -709,7 +724,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, if (WARN_ON(queue <= 0)) { IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n", tid, cfg.sta_id); - return -ENOSPC; + return queue; } /* @@ -728,21 +743,23 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, if (using_inactive_queue) { struct iwl_scd_txq_cfg_cmd cmd = { .scd_queue = queue, - .enable = 0, + .action = SCD_CFG_DISABLE_QUEUE, }; - u8 ac; + u8 txq_curr_ac; disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue); spin_lock_bh(&mvm->queue_info_lock); - ac = mvm->queue_info[queue].mac80211_ac; + txq_curr_ac = mvm->queue_info[queue].mac80211_ac; cmd.sta_id = mvm->queue_info[queue].ra_sta_id; - cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[ac]; + cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[txq_curr_ac]; + cmd.tid = mvm->queue_info[queue].txq_tid; spin_unlock_bh(&mvm->queue_info_lock); /* Disable the queue */ - iwl_mvm_invalidate_sta_queue(mvm, queue, disable_agg_tids, - true); + if (disable_agg_tids) + iwl_mvm_invalidate_sta_queue(mvm, queue, + disable_agg_tids, false); iwl_trans_txq_disable(mvm->trans, queue, false); ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd); @@ -758,6 +775,10 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, return ret; } + + /* If TXQ is allocated to another STA, update removal in FW */ + if (cmd.sta_id != mvmsta->sta_id) + iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true); } IWL_DEBUG_TX_QUEUES(mvm, @@ -827,6 +848,119 @@ out_err: return ret; } +static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue) +{ + struct iwl_scd_txq_cfg_cmd cmd = { + .scd_queue = queue, + .action = SCD_CFG_UPDATE_QUEUE_TID, + }; + s8 sta_id; + int tid; + unsigned long tid_bitmap; + int ret; + + lockdep_assert_held(&mvm->mutex); + + spin_lock_bh(&mvm->queue_info_lock); + sta_id = mvm->queue_info[queue].ra_sta_id; + tid_bitmap = mvm->queue_info[queue].tid_bitmap; + spin_unlock_bh(&mvm->queue_info_lock); + + if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue)) + return; + + /* Find any TID for queue */ + tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1); + cmd.tid = tid; + cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; + + ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd); + if (ret) + IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n", + queue, ret); + else + IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n", + queue, tid); +} + +static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue) +{ + struct ieee80211_sta *sta; + struct iwl_mvm_sta *mvmsta; + s8 sta_id; + int tid = -1; + unsigned long tid_bitmap; + unsigned int wdg_timeout; + int ssn; + int ret = true; + + lockdep_assert_held(&mvm->mutex); + + spin_lock_bh(&mvm->queue_info_lock); + sta_id = mvm->queue_info[queue].ra_sta_id; + tid_bitmap = mvm->queue_info[queue].tid_bitmap; + spin_unlock_bh(&mvm->queue_info_lock); + + /* Find TID for queue, and make sure it is the only one on the queue */ + tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1); + if (tid_bitmap != BIT(tid)) { + IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n", + queue, tid_bitmap); + return; + } + + IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue, + tid); + + sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], + lockdep_is_held(&mvm->mutex)); + + if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) + return; + + mvmsta = iwl_mvm_sta_from_mac80211(sta); + wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); + + ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number); + + ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, + tid_to_mac80211_ac[tid], ssn, + wdg_timeout, true); + if (ret) { + IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue); + return; + } + + /* If aggs should be turned back on - do it */ + if (mvmsta->tid_data[tid].state == IWL_AGG_ON) { + struct iwl_mvm_add_sta_cmd cmd = {0}; + + mvmsta->tid_disable_agg &= ~BIT(tid); + + cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color); + cmd.sta_id = mvmsta->sta_id; + cmd.add_modify = STA_MODE_MODIFY; + cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX; + cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk); + cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg); + + ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, + iwl_mvm_add_sta_cmd_size(mvm), &cmd); + if (!ret) { + IWL_DEBUG_TX_QUEUES(mvm, + "TXQ #%d is now aggregated again\n", + queue); + + /* Mark queue intenally as aggregating again */ + iwl_trans_txq_set_shared_mode(mvm->trans, queue, false); + } + } + + spin_lock_bh(&mvm->queue_info_lock); + mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; + spin_unlock_bh(&mvm->queue_info_lock); +} + static inline u8 iwl_mvm_tid_to_ac_queue(int tid) { if (tid == IWL_MAX_TID_COUNT) @@ -894,13 +1028,42 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk) struct ieee80211_sta *sta; struct iwl_mvm_sta *mvmsta; unsigned long deferred_tid_traffic; - int sta_id, tid; + int queue, sta_id, tid; /* Check inactivity of queues */ iwl_mvm_inactivity_check(mvm); mutex_lock(&mvm->mutex); + /* Reconfigure queues requiring reconfiguation */ + for (queue = 0; queue < IWL_MAX_HW_QUEUES; queue++) { + bool reconfig; + bool change_owner; + + spin_lock_bh(&mvm->queue_info_lock); + reconfig = (mvm->queue_info[queue].status == + IWL_MVM_QUEUE_RECONFIGURING); + + /* + * We need to take into account a situation in which a TXQ was + * allocated to TID x, and then turned shared by adding TIDs y + * and z. If TID x becomes inactive and is removed from the TXQ, + * ownership must be given to one of the remaining TIDs. + * This is mainly because if TID x continues - a new queue can't + * be allocated for it as long as it is an owner of another TXQ. + */ + change_owner = !(mvm->queue_info[queue].tid_bitmap & + BIT(mvm->queue_info[queue].txq_tid)) && + (mvm->queue_info[queue].status == + IWL_MVM_QUEUE_SHARED); + spin_unlock_bh(&mvm->queue_info_lock); + + if (reconfig) + iwl_mvm_unshare_queue(mvm, queue); + else if (change_owner) + iwl_mvm_change_queue_owner(mvm, queue); + } + /* Go over all stations with deferred traffic */ for_each_set_bit(sta_id, mvm->sta_deferred_frames, IWL_MVM_STATION_COUNT) { @@ -963,6 +1126,61 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, return 0; } +/* + * In DQA mode, after a HW restart the queues should be allocated as before, in + * order to avoid race conditions when there are shared queues. This function + * does the re-mapping and queue allocation. + * + * Note that re-enabling aggregations isn't done in this function. + */ +static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm, + struct iwl_mvm_sta *mvm_sta) +{ + unsigned int wdg_timeout = + iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false); + int i; + struct iwl_trans_txq_scd_cfg cfg = { + .sta_id = mvm_sta->sta_id, + .frame_limit = IWL_FRAME_LIMIT, + }; + + /* Make sure reserved queue is still marked as such (or allocated) */ + mvm->queue_info[mvm_sta->reserved_queue].status = + IWL_MVM_QUEUE_RESERVED; + + for (i = 0; i <= IWL_MAX_TID_COUNT; i++) { + struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i]; + int txq_id = tid_data->txq_id; + int ac; + u8 mac_queue; + + if (txq_id == IEEE80211_INVAL_HW_QUEUE) + continue; + + skb_queue_head_init(&tid_data->deferred_tx_frames); + + ac = tid_to_mac80211_ac[i]; + mac_queue = mvm_sta->vif->hw_queue[ac]; + + cfg.tid = i; + cfg.fifo = iwl_mvm_ac_to_tx_fifo[ac]; + cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE || + txq_id == IWL_MVM_DQA_BSS_CLIENT_QUEUE); + + IWL_DEBUG_TX_QUEUES(mvm, + "Re-mapping sta %d tid %d to queue %d\n", + mvm_sta->sta_id, i, txq_id); + + iwl_mvm_enable_txq(mvm, txq_id, mac_queue, + IEEE80211_SEQ_TO_SN(tid_data->seq_number), + &cfg, wdg_timeout); + + mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY; + } + + atomic_set(&mvm->pending_frames[mvm_sta->sta_id], 0); +} + int iwl_mvm_add_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta) @@ -985,6 +1203,13 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, spin_lock_init(&mvm_sta->lock); + /* In DQA mode, if this is a HW restart, re-alloc existing queues */ + if (iwl_mvm_is_dqa_supported(mvm) && + test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { + iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta); + goto update_fw; + } + mvm_sta->sta_id = sta_id; mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color); @@ -1048,6 +1273,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, goto err; } +update_fw: ret = iwl_mvm_sta_send_to_fw(mvm, sta, false, 0); if (ret) goto err; @@ -1071,13 +1297,6 @@ err: return ret; } -int iwl_mvm_update_sta(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta) -{ - return iwl_mvm_sta_send_to_fw(mvm, sta, true, 0); -} - int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, bool drain) { @@ -1270,9 +1489,31 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, ret = iwl_mvm_drain_sta(mvm, mvm_sta, false); /* If DQA is supported - the queues can be disabled now */ - if (iwl_mvm_is_dqa_supported(mvm)) + if (iwl_mvm_is_dqa_supported(mvm)) { + u8 reserved_txq = mvm_sta->reserved_queue; + enum iwl_mvm_queue_status *status; + iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta); + /* + * If no traffic has gone through the reserved TXQ - it + * is still marked as IWL_MVM_QUEUE_RESERVED, and + * should be manually marked as free again + */ + spin_lock_bh(&mvm->queue_info_lock); + status = &mvm->queue_info[reserved_txq].status; + if (WARN((*status != IWL_MVM_QUEUE_RESERVED) && + (*status != IWL_MVM_QUEUE_FREE), + "sta_id %d reserved txq %d status %d", + mvm_sta->sta_id, reserved_txq, *status)) { + spin_unlock_bh(&mvm->queue_info_lock); + return -EINVAL; + } + + *status = IWL_MVM_QUEUE_FREE; + spin_unlock_bh(&mvm->queue_info_lock); + } + if (vif->type == NL80211_IFTYPE_STATION && mvmvif->ap_sta_id == mvm_sta->sta_id) { /* if associated - we can't remove the AP STA now */ @@ -1802,11 +2043,9 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, baid_data->baid = baid; baid_data->timeout = timeout; baid_data->last_rx = jiffies; - init_timer(&baid_data->session_timer); - baid_data->session_timer.function = - iwl_mvm_rx_agg_session_expired; - baid_data->session_timer.data = - (unsigned long)&mvm->baid_map[baid]; + setup_timer(&baid_data->session_timer, + iwl_mvm_rx_agg_session_expired, + (unsigned long)&mvm->baid_map[baid]); baid_data->mvm = mvm; baid_data->tid = tid; baid_data->sta_id = mvm_sta->sta_id; @@ -1956,7 +2195,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, return -EIO; } - spin_lock_bh(&mvm->queue_info_lock); + spin_lock(&mvm->queue_info_lock); /* * Note the possible cases: @@ -1967,14 +2206,20 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, * non-DQA mode, since the TXQ hasn't yet been allocated */ txq_id = mvmsta->tid_data[tid].txq_id; - if (!iwl_mvm_is_dqa_supported(mvm) || + if (iwl_mvm_is_dqa_supported(mvm) && + unlikely(mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_SHARED)) { + ret = -ENXIO; + IWL_DEBUG_TX_QUEUES(mvm, + "Can't start tid %d agg on shared queue!\n", + tid); + goto release_locks; + } else if (!iwl_mvm_is_dqa_supported(mvm) || mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) { txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, mvm->first_agg_queue, mvm->last_agg_queue); if (txq_id < 0) { ret = txq_id; - spin_unlock_bh(&mvm->queue_info_lock); IWL_ERR(mvm, "Failed to allocate agg queue\n"); goto release_locks; } @@ -1982,7 +2227,8 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, /* TXQ hasn't yet been enabled, so mark it only as reserved */ mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED; } - spin_unlock_bh(&mvm->queue_info_lock); + + spin_unlock(&mvm->queue_info_lock); IWL_DEBUG_TX_QUEUES(mvm, "AGG for tid %d will be on queue #%d\n", @@ -2006,8 +2252,11 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, } ret = 0; + goto out; release_locks: + spin_unlock(&mvm->queue_info_lock); +out: spin_unlock_bh(&mvmsta->lock); return ret; @@ -2023,6 +2272,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false); int queue, ret; bool alloc_queue = true; + enum iwl_mvm_queue_status queue_status; u16 ssn; struct iwl_trans_txq_scd_cfg cfg = { @@ -2048,13 +2298,15 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; + spin_lock_bh(&mvm->queue_info_lock); + queue_status = mvm->queue_info[queue].status; + spin_unlock_bh(&mvm->queue_info_lock); + /* In DQA mode, the existing queue might need to be reconfigured */ if (iwl_mvm_is_dqa_supported(mvm)) { - spin_lock_bh(&mvm->queue_info_lock); /* Maybe there is no need to even alloc a queue... */ if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY) alloc_queue = false; - spin_unlock_bh(&mvm->queue_info_lock); /* * Only reconfig the SCD for the queue if the window size has @@ -2089,9 +2341,12 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, vif->hw_queue[tid_to_mac80211_ac[tid]], ssn, &cfg, wdg_timeout); - ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); - if (ret) - return -EIO; + /* Send ADD_STA command to enable aggs only if the queue isn't shared */ + if (queue_status != IWL_MVM_QUEUE_SHARED) { + ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); + if (ret) + return -EIO; + } /* No need to mark as reserved */ spin_lock_bh(&mvm->queue_info_lock); @@ -2123,7 +2378,6 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u16 txq_id; int err; - /* * If mac80211 is cleaning its state, then say that we finished since * our state has been cleared anyway. @@ -2152,6 +2406,7 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, */ if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE; + spin_unlock_bh(&mvm->queue_info_lock); switch (tid_data->state) { @@ -2412,9 +2667,15 @@ static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm, struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {}; /* verify the key details match the required command's expectations */ - if (WARN_ON((keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC) || - (keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) || - (keyconf->keyidx != 4 && keyconf->keyidx != 5))) + if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) || + (keyconf->keyidx != 4 && keyconf->keyidx != 5) || + (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC && + keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 && + keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256))) + return -EINVAL; + + if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) && + keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC)) return -EINVAL; igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx); @@ -2430,11 +2691,18 @@ static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm, case WLAN_CIPHER_SUITE_AES_CMAC: igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM); break; + case WLAN_CIPHER_SUITE_BIP_GMAC_128: + case WLAN_CIPHER_SUITE_BIP_GMAC_256: + igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP); + break; default: return -EINVAL; } - memcpy(igtk_cmd.IGTK, keyconf->key, keyconf->keylen); + memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen); + if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) + igtk_cmd.ctrl_flags |= + cpu_to_le32(STA_KEY_FLG_KEY_32BYTES); ieee80211_get_key_rx_seq(keyconf, 0, &seq); pn = seq.aes_cmac.pn; igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) | @@ -2449,6 +2717,19 @@ static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm, remove_key ? "removing" : "installing", igtk_cmd.sta_id); + if (!iwl_mvm_has_new_rx_api(mvm)) { + struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = { + .ctrl_flags = igtk_cmd.ctrl_flags, + .key_id = igtk_cmd.key_id, + .sta_id = igtk_cmd.sta_id, + .receive_seq_cnt = igtk_cmd.receive_seq_cnt + }; + + memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk, + ARRAY_SIZE(igtk_cmd_v1.igtk)); + return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0, + sizeof(igtk_cmd_v1), &igtk_cmd_v1); + } return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0, sizeof(igtk_cmd), &igtk_cmd); } @@ -2573,7 +2854,9 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, } sta_id = mvm_sta->sta_id; - if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC) { + if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC || + keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || + keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) { ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false); goto end; } @@ -2659,7 +2942,9 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n", keyconf->keyidx, sta_id); - if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC) + if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC || + keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || + keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true); if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index bbc1cab2c3bf..e068d5355865 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -473,9 +473,14 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, int iwl_mvm_add_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta); -int iwl_mvm_update_sta(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta); + +static inline int iwl_mvm_update_sta(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + return iwl_mvm_sta_send_to_fw(mvm, sta, true, 0); +} + int iwl_mvm_rm_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta); @@ -554,4 +559,8 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm, void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk); +int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid, + int ac, int ssn, unsigned int wdg_timeout, + bool force); + #endif /* __sta_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c index 58fc7b3c711c..63a051be832e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c @@ -241,11 +241,8 @@ static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm) }; u32 cmdid; - if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_WIDE_CMD_HDR)) - cmdid = iwl_cmd_id(CMD_DTS_MEASUREMENT_TRIGGER_WIDE, - PHY_OPS_GROUP, 0); - else - cmdid = CMD_DTS_MEASUREMENT_TRIGGER; + cmdid = iwl_cmd_id(CMD_DTS_MEASUREMENT_TRIGGER_WIDE, + PHY_OPS_GROUP, 0); if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE)) @@ -261,9 +258,6 @@ int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp) DTS_MEASUREMENT_NOTIF_WIDE) }; int ret; - if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_WIDE_CMD_HDR)) - temp_notif[0] = DTS_MEASUREMENT_NOTIFICATION; - lockdep_assert_held(&mvm->mutex); iwl_init_notification_wait(&mvm->notif_wait, &wait_temp_notif, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index b3a87a31de30..66957ac12ca4 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -346,7 +346,7 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd, rate_idx = info->control.rates[0].idx; /* if the rate isn't a well known legacy rate, take the lowest one */ - if (rate_idx < 0 || rate_idx > IWL_RATE_COUNT_LEGACY) + if (rate_idx < 0 || rate_idx >= IWL_RATE_COUNT_LEGACY) rate_idx = rate_lowest_index( &mvm->nvm_data->bands[info->band], sta); @@ -441,7 +441,7 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm, * one. * Need to handle this. */ - tx_cmd->sec_ctl |= TX_CMD_SEC_GCMP | TC_CMD_SEC_KEY_FROM_TABLE; + tx_cmd->sec_ctl |= TX_CMD_SEC_GCMP | TX_CMD_SEC_KEY_FROM_TABLE; tx_cmd->key[0] = keyconf->hw_key_idx; iwl_mvm_set_tx_cmd_pn(info, crypto_hdr); break; @@ -490,16 +490,34 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm, struct ieee80211_tx_info *info, __le16 fc) { - if (iwl_mvm_is_dqa_supported(mvm)) { - if (info->control.vif->type == NL80211_IFTYPE_AP && - ieee80211_is_probe_resp(fc)) + if (!iwl_mvm_is_dqa_supported(mvm)) + return info->hw_queue; + + switch (info->control.vif->type) { + case NL80211_IFTYPE_AP: + /* + * handle legacy hostapd as well, where station may be added + * only after assoc. + */ + if (ieee80211_is_probe_resp(fc) || ieee80211_is_auth(fc)) return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; - else if (ieee80211_is_mgmt(fc) && - info->control.vif->type == NL80211_IFTYPE_P2P_DEVICE) + if (info->hw_queue == info->control.vif->cab_queue) + return info->hw_queue; + + WARN_ON_ONCE(1); + return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; + case NL80211_IFTYPE_P2P_DEVICE: + if (ieee80211_is_mgmt(fc)) return IWL_MVM_DQA_P2P_DEVICE_QUEUE; - } + if (info->hw_queue == info->control.vif->cab_queue) + return info->hw_queue; - return info->hw_queue; + WARN_ON_ONCE(1); + return IWL_MVM_DQA_P2P_DEVICE_QUEUE; + default: + WARN_ONCE(1, "Not a ctrl vif, no available queue\n"); + return -1; + } } int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) @@ -559,6 +577,9 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) sta_id = mvmvif->bcast_sta.sta_id; queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info, hdr->frame_control); + if (queue < 0) + return -1; + } else if (info.control.vif->type == NL80211_IFTYPE_STATION && is_multicast_ether_addr(hdr->addr1)) { u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id); @@ -837,6 +858,22 @@ static void iwl_mvm_tx_add_stream(struct iwl_mvm *mvm, } } +/* Check if there are any timed-out TIDs on a given shared TXQ */ +static bool iwl_mvm_txq_should_update(struct iwl_mvm *mvm, int txq_id) +{ + unsigned long queue_tid_bitmap = mvm->queue_info[txq_id].tid_bitmap; + unsigned long now = jiffies; + int tid; + + for_each_set_bit(tid, &queue_tid_bitmap, IWL_MAX_TID_COUNT + 1) { + if (time_before(mvm->queue_info[txq_id].last_frame_time[tid] + + IWL_MVM_DQA_QUEUE_TIMEOUT, now)) + return true; + } + + return false; +} + /* * Sets the fields in the Tx cmd that are crypto related */ @@ -903,9 +940,13 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, tid = IWL_MAX_TID_COUNT; } - if (iwl_mvm_is_dqa_supported(mvm)) + if (iwl_mvm_is_dqa_supported(mvm)) { txq_id = mvmsta->tid_data[tid].txq_id; + if (ieee80211_is_mgmt(fc)) + tx_cmd->tid_tspec = IWL_TID_NON_QOS; + } + /* Copy MAC header from skb into command buffer */ memcpy(tx_cmd->hdr, hdr, hdrlen); @@ -939,7 +980,6 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); spin_unlock(&mvmsta->lock); return 0; - } /* If we are here - TXQ exists and needs to be re-activated */ @@ -952,8 +992,25 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, txq_id); } - /* Keep track of the time of the last frame for this RA/TID */ - mvm->queue_info[txq_id].last_frame_time[tid] = jiffies; + if (iwl_mvm_is_dqa_supported(mvm)) { + /* Keep track of the time of the last frame for this RA/TID */ + mvm->queue_info[txq_id].last_frame_time[tid] = jiffies; + + /* + * If we have timed-out TIDs - schedule the worker that will + * reconfig the queues and update them + * + * Note that the mvm->queue_info_lock isn't being taken here in + * order to not serialize the TX flow. This isn't dangerous + * because scheduling mvm->add_stream_wk can't ruin the state, + * and if we DON'T schedule it due to some race condition then + * next TX we get here we will. + */ + if (unlikely(mvm->queue_info[txq_id].status == + IWL_MVM_QUEUE_SHARED && + iwl_mvm_txq_should_update(mvm, txq_id))) + schedule_work(&mvm->add_stream_wk); + } IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id, tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number)); @@ -1067,9 +1124,13 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm, IWL_DEBUG_TX_QUEUES(mvm, "Can continue DELBA flow ssn = next_recl = %d\n", tid_data->next_reclaimed); - iwl_mvm_disable_txq(mvm, tid_data->txq_id, - vif->hw_queue[tid_to_mac80211_ac[tid]], tid, - CMD_ASYNC); + if (!iwl_mvm_is_dqa_supported(mvm)) { + u8 mac80211_ac = tid_to_mac80211_ac[tid]; + + iwl_mvm_disable_txq(mvm, tid_data->txq_id, + vif->hw_queue[mac80211_ac], tid, + CMD_ASYNC); + } tid_data->state = IWL_AGG_OFF; ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; @@ -1547,41 +1608,16 @@ void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) iwl_mvm_rx_tx_cmd_agg(mvm, pkt); } -static void iwl_mvm_tx_info_from_ba_notif(struct ieee80211_tx_info *info, - struct iwl_mvm_ba_notif *ba_notif, - struct iwl_mvm_tid_data *tid_data) -{ - info->flags |= IEEE80211_TX_STAT_AMPDU; - info->status.ampdu_ack_len = ba_notif->txed_2_done; - info->status.ampdu_len = ba_notif->txed; - iwl_mvm_hwrate_to_tx_status(tid_data->rate_n_flags, - info); - /* TODO: not accounted if the whole A-MPDU failed */ - info->status.tx_time = tid_data->tx_time; - info->status.status_driver_data[0] = - (void *)(uintptr_t)ba_notif->reduced_txp; - info->status.status_driver_data[1] = - (void *)(uintptr_t)tid_data->rate_n_flags; -} - -void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) +static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid, + int txq, int index, + struct ieee80211_tx_info *ba_info, u32 rate) { - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_mvm_ba_notif *ba_notif = (void *)pkt->data; struct sk_buff_head reclaimed_skbs; struct iwl_mvm_tid_data *tid_data; struct ieee80211_sta *sta; struct iwl_mvm_sta *mvmsta; struct sk_buff *skb; - int sta_id, tid, freed; - /* "flow" corresponds to Tx queue */ - u16 scd_flow = le16_to_cpu(ba_notif->scd_flow); - /* "ssn" is start of block-ack Tx window, corresponds to index - * (in Tx queue's circular buffer) of first TFD/frame in window */ - u16 ba_resp_scd_ssn = le16_to_cpu(ba_notif->scd_ssn); - - sta_id = ba_notif->sta_id; - tid = ba_notif->tid; + int freed; if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT || tid >= IWL_MAX_TID_COUNT, @@ -1601,10 +1637,10 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) mvmsta = iwl_mvm_sta_from_mac80211(sta); tid_data = &mvmsta->tid_data[tid]; - if (tid_data->txq_id != scd_flow) { + if (tid_data->txq_id != txq) { IWL_ERR(mvm, - "invalid BA notification: Q %d, tid %d, flow %d\n", - tid_data->txq_id, tid, scd_flow); + "invalid BA notification: Q %d, tid %d\n", + tid_data->txq_id, tid); rcu_read_unlock(); return; } @@ -1618,27 +1654,14 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) * block-ack window (we assume that they've been successfully * transmitted ... if not, it's too late anyway). */ - iwl_trans_reclaim(mvm->trans, scd_flow, ba_resp_scd_ssn, - &reclaimed_skbs); + iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs); - IWL_DEBUG_TX_REPLY(mvm, - "BA_NOTIFICATION Received from %pM, sta_id = %d\n", - (u8 *)&ba_notif->sta_addr_lo32, - ba_notif->sta_id); - IWL_DEBUG_TX_REPLY(mvm, - "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n", - ba_notif->tid, le16_to_cpu(ba_notif->seq_ctl), - (unsigned long long)le64_to_cpu(ba_notif->bitmap), - scd_flow, ba_resp_scd_ssn, ba_notif->txed, - ba_notif->txed_2_done); - - IWL_DEBUG_TX_REPLY(mvm, "reduced txp from ba notif %d\n", - ba_notif->reduced_txp); - tid_data->next_reclaimed = ba_resp_scd_ssn; + tid_data->next_reclaimed = index; iwl_mvm_check_ratid_empty(mvm, sta, tid); freed = 0; + ba_info->status.status_driver_data[1] = (void *)(uintptr_t)rate; skb_queue_walk(&reclaimed_skbs, skb) { struct ieee80211_hdr *hdr = (void *)skb->data; @@ -1660,8 +1683,12 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) /* this is the first skb we deliver in this batch */ /* put the rate scaling data there */ - if (freed == 1) - iwl_mvm_tx_info_from_ba_notif(info, ba_notif, tid_data); + if (freed == 1) { + info->flags |= IEEE80211_TX_STAT_AMPDU; + memcpy(&info->status, &ba_info->status, + sizeof(ba_info->status)); + iwl_mvm_hwrate_to_tx_status(rate, info); + } } spin_unlock_bh(&mvmsta->lock); @@ -1671,7 +1698,6 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) * Still it's important to update RS about sent vs. acked. */ if (skb_queue_empty(&reclaimed_skbs)) { - struct ieee80211_tx_info ba_info = {}; struct ieee80211_chanctx_conf *chanctx_conf = NULL; if (mvmsta->vif) @@ -1681,11 +1707,11 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) if (WARN_ON_ONCE(!chanctx_conf)) goto out; - ba_info.band = chanctx_conf->def.chan->band; - iwl_mvm_tx_info_from_ba_notif(&ba_info, ba_notif, tid_data); + ba_info->band = chanctx_conf->def.chan->band; + iwl_mvm_hwrate_to_tx_status(rate, ba_info); IWL_DEBUG_TX_REPLY(mvm, "No reclaim. Update rs directly\n"); - iwl_mvm_rs_tx_status(mvm, sta, tid, &ba_info, false); + iwl_mvm_rs_tx_status(mvm, sta, tid, ba_info, false); } out: @@ -1697,6 +1723,92 @@ out: } } +void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + int sta_id, tid, txq, index; + struct ieee80211_tx_info ba_info = {}; + struct iwl_mvm_ba_notif *ba_notif; + struct iwl_mvm_tid_data *tid_data; + struct iwl_mvm_sta *mvmsta; + + if (iwl_mvm_has_new_tx_api(mvm)) { + struct iwl_mvm_compressed_ba_notif *ba_res = + (void *)pkt->data; + + sta_id = ba_res->sta_id; + ba_info.status.ampdu_ack_len = (u8)le16_to_cpu(ba_res->done); + ba_info.status.ampdu_len = (u8)le16_to_cpu(ba_res->txed); + ba_info.status.tx_time = + (u16)le32_to_cpu(ba_res->wireless_time); + ba_info.status.status_driver_data[0] = + (void *)(uintptr_t)ba_res->reduced_txp; + + /* + * TODO: + * When supporting multi TID aggregations - we need to move + * next_reclaimed to be per TXQ and not per TID or handle it + * in a different way. + * This will go together with SN and AddBA offload and cannot + * be handled properly for now. + */ + WARN_ON(le16_to_cpu(ba_res->tfd_cnt) != 1); + iwl_mvm_tx_reclaim(mvm, sta_id, ba_res->ra_tid[0].tid, + (int)ba_res->tfd[0].q_num, + le16_to_cpu(ba_res->tfd[0].tfd_index), + &ba_info, le32_to_cpu(ba_res->tx_rate)); + + IWL_DEBUG_TX_REPLY(mvm, + "BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n", + sta_id, le32_to_cpu(ba_res->flags), + le16_to_cpu(ba_res->txed), + le16_to_cpu(ba_res->done)); + return; + } + + ba_notif = (void *)pkt->data; + sta_id = ba_notif->sta_id; + tid = ba_notif->tid; + /* "flow" corresponds to Tx queue */ + txq = le16_to_cpu(ba_notif->scd_flow); + /* "ssn" is start of block-ack Tx window, corresponds to index + * (in Tx queue's circular buffer) of first TFD/frame in window */ + index = le16_to_cpu(ba_notif->scd_ssn); + + rcu_read_lock(); + mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id); + if (WARN_ON_ONCE(!mvmsta)) { + rcu_read_unlock(); + return; + } + + tid_data = &mvmsta->tid_data[tid]; + + ba_info.status.ampdu_ack_len = ba_notif->txed_2_done; + ba_info.status.ampdu_len = ba_notif->txed; + ba_info.status.tx_time = tid_data->tx_time; + ba_info.status.status_driver_data[0] = + (void *)(uintptr_t)ba_notif->reduced_txp; + + rcu_read_unlock(); + + iwl_mvm_tx_reclaim(mvm, sta_id, tid, txq, index, &ba_info, + tid_data->rate_n_flags); + + IWL_DEBUG_TX_REPLY(mvm, + "BA_NOTIFICATION Received from %pM, sta_id = %d\n", + (u8 *)&ba_notif->sta_addr_lo32, ba_notif->sta_id); + + IWL_DEBUG_TX_REPLY(mvm, + "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n", + ba_notif->tid, le16_to_cpu(ba_notif->seq_ctl), + le64_to_cpu(ba_notif->bitmap), txq, index, + ba_notif->txed, ba_notif->txed_2_done); + + IWL_DEBUG_TX_REPLY(mvm, "reduced txp from ba notif %d\n", + ba_notif->reduced_txp); +} + /* * Note that there are transports that buffer frames before they reach * the firmware. This means that after flush_tx_path is called, the diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index 68f4e7fdfc11..d04babd99b53 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -512,7 +512,7 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) base = mvm->fw->inst_errlog_ptr; } - if (base < 0x800000) { + if (base < 0x400000) { IWL_ERR(mvm, "Not valid error log pointer 0x%08X for %s uCode\n", base, @@ -610,7 +610,7 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id, { struct iwl_scd_txq_cfg_cmd cmd = { .scd_queue = queue, - .enable = 1, + .action = SCD_CFG_ENABLE_QUEUE, .window = frame_limit, .sta_id = sta_id, .ssn = cpu_to_le16(ssn), @@ -669,6 +669,8 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, tid_to_mac80211_ac[cfg->tid]; else mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO; + + mvm->queue_info[queue].txq_tid = cfg->tid; } IWL_DEBUG_TX_QUEUES(mvm, @@ -682,7 +684,7 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, if (enable_queue) { struct iwl_scd_txq_cfg_cmd cmd = { .scd_queue = queue, - .enable = 1, + .action = SCD_CFG_ENABLE_QUEUE, .window = cfg->frame_limit, .sta_id = cfg->sta_id, .ssn = cpu_to_le16(ssn), @@ -709,7 +711,7 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, { struct iwl_scd_txq_cfg_cmd cmd = { .scd_queue = queue, - .enable = 0, + .action = SCD_CFG_DISABLE_QUEUE, }; bool remove_mac_queue = true; int ret; @@ -744,8 +746,9 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, ~BIT(mac80211_queue); mvm->queue_info[queue].hw_queue_refcount--; - cmd.enable = mvm->queue_info[queue].hw_queue_refcount ? 1 : 0; - if (!cmd.enable) + cmd.action = mvm->queue_info[queue].hw_queue_refcount ? + SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE; + if (cmd.action == SCD_CFG_DISABLE_QUEUE) mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE; IWL_DEBUG_TX_QUEUES(mvm, @@ -755,12 +758,13 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, mvm->queue_info[queue].hw_queue_to_mac80211); /* If the queue is still enabled - nothing left to do in this func */ - if (cmd.enable) { + if (cmd.action == SCD_CFG_ENABLE_QUEUE) { spin_unlock_bh(&mvm->queue_info_lock); return; } cmd.sta_id = mvm->queue_info[queue].ra_sta_id; + cmd.tid = mvm->queue_info[queue].txq_tid; /* Make sure queue info is correct even though we overwrite it */ WARN(mvm->queue_info[queue].hw_queue_refcount || @@ -1131,7 +1135,13 @@ static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm, BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]); } - /* TODO: if queue was shared - need to re-enable AGGs */ + /* If the queue is marked as shared - "unshare" it */ + if (mvm->queue_info[queue].hw_queue_refcount == 1 && + mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) { + mvm->queue_info[queue].status = IWL_MVM_QUEUE_RECONFIGURING; + IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n", + queue); + } } void iwl_mvm_inactivity_check(struct iwl_mvm *mvm) @@ -1215,6 +1225,28 @@ void iwl_mvm_inactivity_check(struct iwl_mvm *mvm) rcu_read_unlock(); } +void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime) +{ + bool ps_disabled; + + lockdep_assert_held(&mvm->mutex); + + /* Disable power save when reading GP2 */ + ps_disabled = mvm->ps_disabled; + if (!ps_disabled) { + mvm->ps_disabled = true; + iwl_mvm_power_update_device(mvm); + } + + *gp2 = iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG); + *boottime = ktime_get_boot_ns(); + + if (!ps_disabled) { + mvm->ps_disabled = ps_disabled; + iwl_mvm_power_update_device(mvm); + } +} + int iwl_mvm_send_lqm_cmd(struct ieee80211_vif *vif, enum iwl_lqm_cmd_operatrions operation, u32 duration, u32 timeout) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 78cf9a7f3eac..001be406a3d3 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -487,6 +487,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x24FD, 0x1130, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0130, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x1010, iwl8265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x10D0, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0050, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0150, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x9010, iwl8265_2ac_cfg)}, @@ -500,22 +501,36 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x24FD, 0x0930, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0950, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0850, iwl8265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x0012, iwl8275_2ac_cfg)}, /* 9000 Series */ + {IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0000, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl5165_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl5165_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl5165_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl5165_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x1420, iwl5165_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl5165_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl5165_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9460_2ac_cfg)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9460_2ac_cfg)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9460_2ac_cfg)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl9460_2ac_cfg)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl9460_2ac_cfg)}, + {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl9460_2ac_cfg)}, + {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg)}, + {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x0060, iwl9460_2ac_cfg)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg)}, + {IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg)}, + {IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x0030, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x1030, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg)}, /* a000 Series */ {IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg)}, @@ -608,7 +623,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data); const struct iwl_cfg *cfg_7265d __maybe_unused = NULL; - const struct iwl_cfg *cfg_9260lc __maybe_unused = NULL; struct iwl_trans *iwl_trans; int ret; @@ -637,11 +651,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } if (iwl_trans->cfg->rf_id) { - if (cfg == &iwl9260_2ac_cfg) - cfg_9260lc = &iwl9260lc_2ac_cfg; - if (cfg_9260lc && iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_LC) { - cfg = cfg_9260lc; - iwl_trans->cfg = cfg_9260lc; + if (cfg == &iwl9460_2ac_cfg && + iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_LC) { + cfg = &iwl9000lc_2ac_cfg; + iwl_trans->cfg = cfg; } } #endif diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index 11e347dd44c7..cac6d99012b3 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -37,6 +37,7 @@ #include <linux/wait.h> #include <linux/pci.h> #include <linux/timer.h> +#include <linux/cpu.h> #include "iwl-fh.h" #include "iwl-csr.h" @@ -49,7 +50,7 @@ * be needed for potential data in the SKB's head. The remaining ones can * be used for frags. */ -#define IWL_PCIE_MAX_FRAGS (IWL_NUM_OF_TBS - 3) +#define IWL_PCIE_MAX_FRAGS(x) (x->max_tbs - 3) /* * RX related structures and functions @@ -192,41 +193,9 @@ struct iwl_cmd_meta { /* only for SYNC commands, iff the reply skb is wanted */ struct iwl_host_cmd *source; u32 flags; + u32 tbs; }; -/* - * Generic queue structure - * - * Contains common data for Rx and Tx queues. - * - * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware - * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless - * there might be HW changes in the future). For the normal TX - * queues, n_window, which is the size of the software queue data - * is also 256; however, for the command queue, n_window is only - * 32 since we don't need so many commands pending. Since the HW - * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256. As a result, - * the software buffers (in the variables @meta, @txb in struct - * iwl_txq) only have 32 entries, while the HW buffers (@tfds in - * the same struct) have 256. - * This means that we end up with the following: - * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 | - * SW entries: | 0 | ... | 31 | - * where N is a number between 0 and 7. This means that the SW - * data is a window overlayed over the HW queue. - */ -struct iwl_queue { - int write_ptr; /* 1-st empty entry (index) host_w*/ - int read_ptr; /* last used entry (index) host_r*/ - /* use for monitoring and recovering the stuck queue */ - dma_addr_t dma_addr; /* physical addr for BD's */ - int n_window; /* safe queue window */ - u32 id; - int low_mark; /* low watermark, resume queue if free - * space more than this */ - int high_mark; /* high watermark, stop queue if free - * space less than this */ -}; #define TFD_TX_CMD_SLOTS 256 #define TFD_CMD_SLOTS 32 @@ -273,13 +242,32 @@ struct iwl_pcie_first_tb_buf { * @wd_timeout: queue watchdog timeout (jiffies) - per queue * @frozen: tx stuck queue timer is frozen * @frozen_expiry_remainder: remember how long until the timer fires + * @write_ptr: 1-st empty entry (index) host_w + * @read_ptr: last used entry (index) host_r + * @dma_addr: physical addr for BD's + * @n_window: safe queue window + * @id: queue id + * @low_mark: low watermark, resume queue if free space more than this + * @high_mark: high watermark, stop queue if free space less than this * * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame * descriptors) and required locking structures. + * + * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware + * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless + * there might be HW changes in the future). For the normal TX + * queues, n_window, which is the size of the software queue data + * is also 256; however, for the command queue, n_window is only + * 32 since we don't need so many commands pending. Since the HW + * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256. + * This means that we end up with the following: + * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 | + * SW entries: | 0 | ... | 31 | + * where N is a number between 0 and 7. This means that the SW + * data is a window overlayed over the HW queue. */ struct iwl_txq { - struct iwl_queue q; - struct iwl_tfd *tfds; + void *tfds; struct iwl_pcie_first_tb_buf *first_tb_bufs; dma_addr_t first_tb_dma; struct iwl_pcie_txq_entry *entries; @@ -294,6 +282,14 @@ struct iwl_txq { bool block; unsigned long wd_timeout; struct sk_buff_head overflow_q; + + int write_ptr; + int read_ptr; + dma_addr_t dma_addr; + int n_window; + u32 id; + int low_mark; + int high_mark; }; static inline dma_addr_t @@ -309,6 +305,16 @@ struct iwl_tso_hdr_page { }; /** + * enum iwl_shared_irq_flags - level of sharing for irq + * @IWL_SHARED_IRQ_NON_RX: interrupt vector serves non rx causes. + * @IWL_SHARED_IRQ_FIRST_RSS: interrupt vector serves first RSS queue. + */ +enum iwl_shared_irq_flags { + IWL_SHARED_IRQ_NON_RX = BIT(0), + IWL_SHARED_IRQ_FIRST_RSS = BIT(1), +}; + +/** * struct iwl_trans_pcie - PCIe transport specific data * @rxq: all the RX queue data * @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues @@ -326,7 +332,6 @@ struct iwl_tso_hdr_page { * @rx_buf_size: Rx buffer size * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes) * @scd_set_active: should the transport configure the SCD for HCMD queue - * @wide_cmd_header: true when ucode supports wide command header format * @sw_csum_tx: if true, then the transport will compute the csum of the TXed * frame. * @rx_page_order: page order for receive buffer size @@ -338,8 +343,10 @@ struct iwl_tso_hdr_page { * @fw_mon_size: size of the buffer for the firmware monitor * @msix_entries: array of MSI-X entries * @msix_enabled: true if managed to enable MSI-X - * @allocated_vector: the number of interrupt vector allocated by the OS - * @default_irq_num: default irq for non rx interrupt + * @shared_vec_mask: the type of causes the shared vector handles + * (see iwl_shared_irq_flags). + * @alloc_vecs: the number of interrupt vectors allocated by the OS + * @def_irq: default irq for non rx causes * @fh_init_mask: initial unmasked fh causes * @hw_init_mask: initial unmasked hw causes * @fh_mask: current unmasked fh causes @@ -391,11 +398,12 @@ struct iwl_trans_pcie { unsigned int cmd_q_wdg_timeout; u8 n_no_reclaim_cmds; u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS]; + u8 max_tbs; + u16 tfd_size; enum iwl_amsdu_size rx_buf_size; bool bc_table_dword; bool scd_set_active; - bool wide_cmd_header; bool sw_csum_tx; u32 rx_page_order; @@ -410,12 +418,14 @@ struct iwl_trans_pcie { struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES]; bool msix_enabled; - u32 allocated_vector; - u32 default_irq_num; + u8 shared_vec_mask; + u32 alloc_vecs; + u32 def_irq; u32 fh_init_mask; u32 hw_init_mask; u32 fh_mask; u32 hw_mask; + cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES]; }; static inline struct iwl_trans_pcie * @@ -474,6 +484,7 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue, bool configure_scd); void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, bool shared_mode); +dma_addr_t iwl_trans_pcie_get_txq_byte_table(struct iwl_trans *trans, int txq); void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq); int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, @@ -486,11 +497,20 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, struct sk_buff_head *skbs); void iwl_trans_pcie_tx_reset(struct iwl_trans *trans); -static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx) +static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_trans *trans, void *_tfd, + u8 idx) { - struct iwl_tfd_tb *tb = &tfd->tbs[idx]; + if (trans->cfg->use_tfh) { + struct iwl_tfh_tfd *tfd = _tfd; + struct iwl_tfh_tb *tb = &tfd->tbs[idx]; + + return le16_to_cpu(tb->tb_len); + } else { + struct iwl_tfd *tfd = _tfd; + struct iwl_tfd_tb *tb = &tfd->tbs[idx]; - return le16_to_cpu(tb->hi_n_len) >> 4; + return le16_to_cpu(tb->hi_n_len) >> 4; + } } /***************************************************** @@ -617,9 +637,9 @@ static inline void iwl_wake_queue(struct iwl_trans *trans, { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - if (test_and_clear_bit(txq->q.id, trans_pcie->queue_stopped)) { - IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->q.id); - iwl_op_mode_queue_not_full(trans->op_mode, txq->q.id); + if (test_and_clear_bit(txq->id, trans_pcie->queue_stopped)) { + IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id); + iwl_op_mode_queue_not_full(trans->op_mode, txq->id); } } @@ -628,22 +648,22 @@ static inline void iwl_stop_queue(struct iwl_trans *trans, { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - if (!test_and_set_bit(txq->q.id, trans_pcie->queue_stopped)) { - iwl_op_mode_queue_full(trans->op_mode, txq->q.id); - IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->q.id); + if (!test_and_set_bit(txq->id, trans_pcie->queue_stopped)) { + iwl_op_mode_queue_full(trans->op_mode, txq->id); + IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id); } else IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n", - txq->q.id); + txq->id); } -static inline bool iwl_queue_used(const struct iwl_queue *q, int i) +static inline bool iwl_queue_used(const struct iwl_txq *q, int i) { return q->write_ptr >= q->read_ptr ? (i >= q->read_ptr && i < q->write_ptr) : !(i < q->read_ptr && i >= q->write_ptr); } -static inline u8 get_cmd_index(struct iwl_queue *q, u32 index) +static inline u8 get_cmd_index(struct iwl_txq *q, u32 index) { return index & (q->n_window - 1); } diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index 5c36e6d00622..6fe5546dc773 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -487,15 +487,13 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans) while (pending) { int i; - struct list_head local_allocated; + LIST_HEAD(local_allocated); gfp_t gfp_mask = GFP_KERNEL; /* Do not post a warning if there are only a few requests */ if (pending < RX_PENDING_WATERMARK) gfp_mask |= __GFP_NOWARN; - INIT_LIST_HEAD(&local_allocated); - for (i = 0; i < RX_CLAIM_REQ_ALLOC;) { struct iwl_rx_mem_buffer *rxb; struct page *page; @@ -1108,13 +1106,14 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, FH_RSCSR_RXQ_POS != rxq->id); IWL_DEBUG_RX(trans, - "cmd at offset %d: %s (0x%.2x, seq 0x%x)\n", + "cmd at offset %d: %s (%.2x.%2x, seq 0x%x)\n", rxcb._offset, iwl_get_cmd_string(trans, iwl_cmd_id(pkt->hdr.cmd, pkt->hdr.group_id, 0)), - pkt->hdr.cmd, le16_to_cpu(pkt->hdr.sequence)); + pkt->hdr.group_id, pkt->hdr.cmd, + le16_to_cpu(pkt->hdr.sequence)); len = iwl_rx_packet_len(pkt); len += sizeof(u32); /* account for status word */ @@ -1142,7 +1141,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, sequence = le16_to_cpu(pkt->hdr.sequence); index = SEQ_TO_INDEX(sequence); - cmd_index = get_cmd_index(&txq->q, index); + cmd_index = get_cmd_index(txq, index); if (rxq->id == 0) iwl_op_mode_rx(trans->op_mode, &rxq->napi, @@ -1885,6 +1884,20 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) inta_fh, iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD)); + if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) && + inta_fh & MSIX_FH_INT_CAUSES_Q0) { + local_bh_disable(); + iwl_pcie_rx_handle(trans, 0); + local_bh_enable(); + } + + if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) && + inta_fh & MSIX_FH_INT_CAUSES_Q1) { + local_bh_disable(); + iwl_pcie_rx_handle(trans, 1); + local_bh_enable(); + } + /* This "Tx" DMA channel is used only for loading uCode */ if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) { IWL_DEBUG_ISR(trans, "uCode load interrupt\n"); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 74f2f035bd28..ae95533e587d 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -827,10 +827,16 @@ static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans, if (ret) return ret; - /* Notify the ucode of the loaded section number and status */ - val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS); - val = val | (sec_num << shift_param); - iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val); + /* Notify ucode of loaded section number and status */ + if (trans->cfg->use_tfh) { + val = iwl_read_prph(trans, UREG_UCODE_LOAD_STATUS); + val = val | (sec_num << shift_param); + iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS, val); + } else { + val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS); + val = val | (sec_num << shift_param); + iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val); + } sec_num = (sec_num << 1) | 0x1; } @@ -838,10 +844,21 @@ static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans, iwl_enable_interrupts(trans); - if (cpu == 1) - iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFF); - else - iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFFFFFF); + if (trans->cfg->use_tfh) { + if (cpu == 1) + iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS, + 0xFFFF); + else + iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS, + 0xFFFFFFFF); + } else { + if (cpu == 1) + iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, + 0xFFFF); + else + iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, + 0xFFFFFFFF); + } return 0; } @@ -886,14 +903,6 @@ static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans, return ret; } - if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) - iwl_set_bits_prph(trans, - CSR_UCODE_LOAD_STATUS_ADDR, - (LMPM_CPU_UCODE_LOADING_COMPLETED | - LMPM_CPU_HDRS_LOADING_COMPLETED | - LMPM_CPU_UCODE_LOADING_STARTED) << - shift_param); - *first_ucode_section = last_read_idx; return 0; @@ -1161,7 +1170,7 @@ static void iwl_pcie_synchronize_irqs(struct iwl_trans *trans) if (trans_pcie->msix_enabled) { int i; - for (i = 0; i < trans_pcie->allocated_vector; i++) + for (i = 0; i < trans_pcie->alloc_vecs; i++) synchronize_irq(trans_pcie->msix_entries[i].vector); } else { synchronize_irq(trans_pcie->pci_dev->irq); @@ -1420,13 +1429,58 @@ static struct iwl_causes_list causes_list[] = { {MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E}, }; +static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE; + int i; + + /* + * Access all non RX causes and map them to the default irq. + * In case we are missing at least one interrupt vector, + * the first interrupt vector will serve non-RX and FBQ causes. + */ + for (i = 0; i < ARRAY_SIZE(causes_list); i++) { + iwl_write8(trans, CSR_MSIX_IVAR(causes_list[i].addr), val); + iwl_clear_bit(trans, causes_list[i].mask_reg, + causes_list[i].cause_num); + } +} + +static void iwl_pcie_map_rx_causes(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + u32 offset = + trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0; + u32 val, idx; + + /* + * The first RX queue - fallback queue, which is designated for + * management frame, command responses etc, is always mapped to the + * first interrupt vector. The other RX queues are mapped to + * the other (N - 2) interrupt vectors. + */ + val = BIT(MSIX_FH_INT_CAUSES_Q(0)); + for (idx = 1; idx < trans->num_rx_queues; idx++) { + iwl_write8(trans, CSR_MSIX_RX_IVAR(idx), + MSIX_FH_INT_CAUSES_Q(idx - offset)); + val |= BIT(MSIX_FH_INT_CAUSES_Q(idx)); + } + iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~val); + + val = MSIX_FH_INT_CAUSES_Q(0); + if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) + val |= MSIX_NON_AUTO_CLEAR_CAUSE; + iwl_write8(trans, CSR_MSIX_RX_IVAR(0), val); + + if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) + iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val); +} + static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie) { - u32 val, max_rx_vector, i; struct iwl_trans *trans = trans_pcie->trans; - max_rx_vector = trans_pcie->allocated_vector - 1; - if (!trans_pcie->msix_enabled) { if (trans->cfg->mq_rx_supported) iwl_write_prph(trans, UREG_CHICK, @@ -1437,25 +1491,16 @@ static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie) iwl_write_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE); /* - * Each cause from the list above and the RX causes is represented as - * a byte in the IVAR table. We access the first (N - 1) bytes and map - * them to the (N - 1) vectors so these vectors will be used as rx - * vectors. Then access all non rx causes and map them to the - * default queue (N'th queue). + * Each cause from the causes list above and the RX causes is + * represented as a byte in the IVAR table. The first nibble + * represents the bound interrupt vector of the cause, the second + * represents no auto clear for this cause. This will be set if its + * interrupt vector is bound to serve other causes. */ - for (i = 0; i < max_rx_vector; i++) { - iwl_write8(trans, CSR_MSIX_RX_IVAR(i), MSIX_FH_INT_CAUSES_Q(i)); - iwl_clear_bit(trans, CSR_MSIX_FH_INT_MASK_AD, - BIT(MSIX_FH_INT_CAUSES_Q(i))); - } + iwl_pcie_map_rx_causes(trans); + + iwl_pcie_map_non_rx_causes(trans); - for (i = 0; i < ARRAY_SIZE(causes_list); i++) { - val = trans_pcie->default_irq_num | - MSIX_NON_AUTO_CLEAR_CAUSE; - iwl_write8(trans, CSR_MSIX_IVAR(causes_list[i].addr), val); - iwl_clear_bit(trans, causes_list[i].mask_reg, - causes_list[i].cause_num); - } trans_pcie->fh_init_mask = ~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD); trans_pcie->fh_mask = trans_pcie->fh_init_mask; @@ -1468,40 +1513,55 @@ static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev, struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int max_irqs, num_irqs, i, ret, nr_online_cpus; u16 pci_cmd; - int max_vector; - int ret, i; - - if (trans->cfg->mq_rx_supported) { - max_vector = min_t(u32, (num_possible_cpus() + 2), - IWL_MAX_RX_HW_QUEUES); - for (i = 0; i < max_vector; i++) - trans_pcie->msix_entries[i].entry = i; - - ret = pci_enable_msix_range(pdev, trans_pcie->msix_entries, - MSIX_MIN_INTERRUPT_VECTORS, - max_vector); - if (ret > 1) { - IWL_DEBUG_INFO(trans, - "Enable MSI-X allocate %d interrupt vector\n", - ret); - trans_pcie->allocated_vector = ret; - trans_pcie->default_irq_num = - trans_pcie->allocated_vector - 1; - trans_pcie->trans->num_rx_queues = - trans_pcie->allocated_vector - 1; - trans_pcie->msix_enabled = true; - - return; - } + + if (!trans->cfg->mq_rx_supported) + goto enable_msi; + + nr_online_cpus = num_online_cpus(); + max_irqs = min_t(u32, nr_online_cpus + 2, IWL_MAX_RX_HW_QUEUES); + for (i = 0; i < max_irqs; i++) + trans_pcie->msix_entries[i].entry = i; + + num_irqs = pci_enable_msix_range(pdev, trans_pcie->msix_entries, + MSIX_MIN_INTERRUPT_VECTORS, + max_irqs); + if (num_irqs < 0) { IWL_DEBUG_INFO(trans, - "ret = %d %s move to msi mode\n", ret, - (ret == 1) ? - "can't allocate more than 1 interrupt vector" : - "failed to enable msi-x mode"); - pci_disable_msix(pdev); + "Failed to enable msi-x mode (ret %d). Moving to msi mode.\n", + num_irqs); + goto enable_msi; } + trans_pcie->def_irq = (num_irqs == max_irqs) ? num_irqs - 1 : 0; + + IWL_DEBUG_INFO(trans, + "MSI-X enabled. %d interrupt vectors were allocated\n", + num_irqs); + /* + * In case the OS provides fewer interrupts than requested, different + * causes will share the same interrupt vector as follows: + * One interrupt less: non rx causes shared with FBQ. + * Two interrupts less: non rx causes shared with FBQ and RSS. + * More than two interrupts: we will use fewer RSS queues. + */ + if (num_irqs <= nr_online_cpus) { + trans_pcie->trans->num_rx_queues = num_irqs + 1; + trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX | + IWL_SHARED_IRQ_FIRST_RSS; + } else if (num_irqs == nr_online_cpus + 1) { + trans_pcie->trans->num_rx_queues = num_irqs; + trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX; + } else { + trans_pcie->trans->num_rx_queues = num_irqs - 1; + } + + trans_pcie->alloc_vecs = num_irqs; + trans_pcie->msix_enabled = true; + return; + +enable_msi: ret = pci_enable_msi(pdev); if (ret) { dev_err(&pdev->dev, "pci_enable_msi failed - %d\n", ret); @@ -1514,36 +1574,57 @@ static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev, } } +static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans) +{ + int iter_rx_q, i, ret, cpu, offset; + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + i = trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 0 : 1; + iter_rx_q = trans_pcie->trans->num_rx_queues - 1 + i; + offset = 1 + i; + for (; i < iter_rx_q ; i++) { + /* + * Get the cpu prior to the place to search + * (i.e. return will be > i - 1). + */ + cpu = cpumask_next(i - offset, cpu_online_mask); + cpumask_set_cpu(cpu, &trans_pcie->affinity_mask[i]); + ret = irq_set_affinity_hint(trans_pcie->msix_entries[i].vector, + &trans_pcie->affinity_mask[i]); + if (ret) + IWL_ERR(trans_pcie->trans, + "Failed to set affinity mask for IRQ %d\n", + i); + } +} + static int iwl_pcie_init_msix_handler(struct pci_dev *pdev, struct iwl_trans_pcie *trans_pcie) { - int i, last_vector; - - last_vector = trans_pcie->trans->num_rx_queues; + int i; - for (i = 0; i < trans_pcie->allocated_vector; i++) { + for (i = 0; i < trans_pcie->alloc_vecs; i++) { int ret; - - ret = request_threaded_irq(trans_pcie->msix_entries[i].vector, - iwl_pcie_msix_isr, - (i == last_vector) ? - iwl_pcie_irq_msix_handler : - iwl_pcie_irq_rx_msix_handler, - IRQF_SHARED, - DRV_NAME, - &trans_pcie->msix_entries[i]); + struct msix_entry *msix_entry; + + msix_entry = &trans_pcie->msix_entries[i]; + ret = devm_request_threaded_irq(&pdev->dev, + msix_entry->vector, + iwl_pcie_msix_isr, + (i == trans_pcie->def_irq) ? + iwl_pcie_irq_msix_handler : + iwl_pcie_irq_rx_msix_handler, + IRQF_SHARED, + DRV_NAME, + msix_entry); if (ret) { - int j; - IWL_ERR(trans_pcie->trans, "Error allocating IRQ %d\n", i); - for (j = 0; j < i; j++) - free_irq(trans_pcie->msix_entries[j].vector, - &trans_pcie->msix_entries[j]); - pci_disable_msix(pdev); + return ret; } } + iwl_pcie_irq_set_affinity(trans_pcie->trans); return 0; } @@ -1672,7 +1753,6 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans, trans_pcie->rx_page_order = iwl_trans_get_rb_size_order(trans_pcie->rx_buf_size); - trans_pcie->wide_cmd_header = trans_cfg->wide_cmd_header; trans_pcie->bc_table_dword = trans_cfg->bc_table_dword; trans_pcie->scd_set_active = trans_cfg->scd_set_active; trans_pcie->sw_csum_tx = trans_cfg->sw_csum_tx; @@ -1703,22 +1783,16 @@ void iwl_trans_pcie_free(struct iwl_trans *trans) iwl_pcie_rx_free(trans); if (trans_pcie->msix_enabled) { - for (i = 0; i < trans_pcie->allocated_vector; i++) - free_irq(trans_pcie->msix_entries[i].vector, - &trans_pcie->msix_entries[i]); + for (i = 0; i < trans_pcie->alloc_vecs; i++) { + irq_set_affinity_hint( + trans_pcie->msix_entries[i].vector, + NULL); + } - pci_disable_msix(trans_pcie->pci_dev); trans_pcie->msix_enabled = false; } else { - free_irq(trans_pcie->pci_dev->irq, trans); - iwl_pcie_free_ict(trans); - - pci_disable_msi(trans_pcie->pci_dev); } - iounmap(trans_pcie->hw_base); - pci_release_regions(trans_pcie->pci_dev); - pci_disable_device(trans_pcie->pci_dev); iwl_pcie_free_fw_monitor(trans); @@ -1890,7 +1964,7 @@ static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans, txq->frozen = freeze; - if (txq->q.read_ptr == txq->q.write_ptr) + if (txq->read_ptr == txq->write_ptr) goto next_queue; if (freeze) { @@ -1938,7 +2012,7 @@ static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block) txq->block--; if (!txq->block) { iwl_write32(trans, HBUS_TARG_WRPTR, - txq->q.write_ptr | (i << 8)); + txq->write_ptr | (i << 8)); } } else if (block) { txq->block++; @@ -1958,10 +2032,14 @@ void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq) int cnt; IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n", - txq->q.read_ptr, txq->q.write_ptr); + txq->read_ptr, txq->write_ptr); + + if (trans->cfg->use_tfh) + /* TODO: access new SCD registers and dump them */ + return; scd_sram_addr = trans_pcie->scd_base_addr + - SCD_TX_STTS_QUEUE_OFFSET(txq->q.id); + SCD_TX_STTS_QUEUE_OFFSET(txq->id); iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf)); iwl_print_hex_error(trans, buf, sizeof(buf)); @@ -1996,7 +2074,6 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq; - struct iwl_queue *q; int cnt; unsigned long now = jiffies; int ret = 0; @@ -2014,13 +2091,12 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm) IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", cnt); txq = &trans_pcie->txq[cnt]; - q = &txq->q; - wr_ptr = ACCESS_ONCE(q->write_ptr); + wr_ptr = ACCESS_ONCE(txq->write_ptr); - while (q->read_ptr != ACCESS_ONCE(q->write_ptr) && + while (txq->read_ptr != ACCESS_ONCE(txq->write_ptr) && !time_after(jiffies, now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) { - u8 write_ptr = ACCESS_ONCE(q->write_ptr); + u8 write_ptr = ACCESS_ONCE(txq->write_ptr); if (WARN_ONCE(wr_ptr != write_ptr, "WR pointer moved while flushing %d -> %d\n", @@ -2029,7 +2105,7 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm) usleep_range(1000, 2000); } - if (q->read_ptr != q->write_ptr) { + if (txq->read_ptr != txq->write_ptr) { IWL_ERR(trans, "fail to flush all tx fifo queues Q %d\n", cnt); ret = -ETIMEDOUT; @@ -2197,7 +2273,6 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file, struct iwl_trans *trans = file->private_data; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq; - struct iwl_queue *q; char *buf; int pos = 0; int cnt; @@ -2215,10 +2290,9 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file, for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) { txq = &trans_pcie->txq[cnt]; - q = &txq->q; pos += scnprintf(buf + pos, bufsz - pos, "hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d frozen=%d%s\n", - cnt, q->read_ptr, q->write_ptr, + cnt, txq->read_ptr, txq->write_ptr, !!test_bit(cnt, trans_pcie->queue_used), !!test_bit(cnt, trans_pcie->queue_stopped), txq->need_update, txq->frozen, @@ -2424,13 +2498,14 @@ err: } #endif /*CONFIG_IWLWIFI_DEBUGFS */ -static u32 iwl_trans_pcie_get_cmdlen(struct iwl_tfd *tfd) +static u32 iwl_trans_pcie_get_cmdlen(struct iwl_trans *trans, void *tfd) { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 cmdlen = 0; int i; - for (i = 0; i < IWL_NUM_OF_TBS; i++) - cmdlen += iwl_pcie_tfd_tb_get_len(tfd, i); + for (i = 0; i < trans_pcie->max_tbs; i++) + cmdlen += iwl_pcie_tfd_tb_get_len(trans, tfd, i); return cmdlen; } @@ -2645,7 +2720,7 @@ static struct iwl_trans_dump_data /* host commands */ len += sizeof(*data) + - cmdq->q.n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE); + cmdq->n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE); /* FW monitor */ if (trans_pcie->fw_mon_page) { @@ -2713,12 +2788,13 @@ static struct iwl_trans_dump_data data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD); txcmd = (void *)data->data; spin_lock_bh(&cmdq->lock); - ptr = cmdq->q.write_ptr; - for (i = 0; i < cmdq->q.n_window; i++) { - u8 idx = get_cmd_index(&cmdq->q, ptr); + ptr = cmdq->write_ptr; + for (i = 0; i < cmdq->n_window; i++) { + u8 idx = get_cmd_index(cmdq, ptr); u32 caplen, cmdlen; - cmdlen = iwl_trans_pcie_get_cmdlen(&cmdq->tfds[ptr]); + cmdlen = iwl_trans_pcie_get_cmdlen(trans, cmdq->tfds + + trans_pcie->tfd_size * ptr); caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen); if (cmdlen) { @@ -2788,6 +2864,8 @@ static const struct iwl_trans_ops trans_ops_pcie = { .txq_disable = iwl_trans_pcie_txq_disable, .txq_enable = iwl_trans_pcie_txq_enable, + .get_txq_byte_table = iwl_trans_pcie_get_txq_byte_table, + .txq_set_shared_mode = iwl_trans_pcie_txq_set_shared_mode, .wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty, @@ -2821,13 +2899,15 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, struct iwl_trans *trans; int ret, addr_size; + ret = pcim_enable_device(pdev); + if (ret) + return ERR_PTR(ret); + trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev, cfg, &trans_ops_pcie, 0); if (!trans) return ERR_PTR(-ENOMEM); - trans->max_skb_frags = IWL_PCIE_MAX_FRAGS; - trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); trans_pcie->trans = trans; @@ -2841,9 +2921,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, goto out_no_pci; } - ret = pci_enable_device(pdev); - if (ret) - goto out_no_pci; if (!cfg->base_params->pcie_l1_allowed) { /* @@ -2861,6 +2938,16 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, else addr_size = 36; + if (cfg->use_tfh) { + trans_pcie->max_tbs = IWL_TFH_NUM_TBS; + trans_pcie->tfd_size = sizeof(struct iwl_tfh_tfd); + + } else { + trans_pcie->max_tbs = IWL_NUM_OF_TBS; + trans_pcie->tfd_size = sizeof(struct iwl_tfd); + } + trans->max_skb_frags = IWL_PCIE_MAX_FRAGS(trans_pcie); + pci_set_master(pdev); ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(addr_size)); @@ -2875,21 +2962,21 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, /* both attempts failed: */ if (ret) { dev_err(&pdev->dev, "No suitable DMA available\n"); - goto out_pci_disable_device; + goto out_no_pci; } } - ret = pci_request_regions(pdev, DRV_NAME); + ret = pcim_iomap_regions_request_all(pdev, BIT(0), DRV_NAME); if (ret) { - dev_err(&pdev->dev, "pci_request_regions failed\n"); - goto out_pci_disable_device; + dev_err(&pdev->dev, "pcim_iomap_regions_request_all failed\n"); + goto out_no_pci; } - trans_pcie->hw_base = pci_ioremap_bar(pdev, 0); + trans_pcie->hw_base = pcim_iomap_table(pdev)[0]; if (!trans_pcie->hw_base) { - dev_err(&pdev->dev, "pci_ioremap_bar failed\n"); + dev_err(&pdev->dev, "pcim_iomap_table failed\n"); ret = -ENODEV; - goto out_pci_release_regions; + goto out_no_pci; } /* We disable the RETRY_TIMEOUT register (0x41) to keep @@ -2916,7 +3003,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, ret = iwl_pcie_prepare_card_hw(trans); if (ret) { IWL_WARN(trans, "Exit HW not ready\n"); - goto out_pci_disable_msi; + goto out_no_pci; } /* @@ -2933,7 +3020,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, 25000); if (ret < 0) { IWL_DEBUG_INFO(trans, "Failed to wake up the nic\n"); - goto out_pci_disable_msi; + goto out_no_pci; } if (iwl_trans_grab_nic_access(trans, &flags)) { @@ -2965,15 +3052,16 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, if (trans_pcie->msix_enabled) { if (iwl_pcie_init_msix_handler(pdev, trans_pcie)) - goto out_pci_release_regions; + goto out_no_pci; } else { ret = iwl_pcie_alloc_ict(trans); if (ret) - goto out_pci_disable_msi; + goto out_no_pci; - ret = request_threaded_irq(pdev->irq, iwl_pcie_isr, - iwl_pcie_irq_handler, - IRQF_SHARED, DRV_NAME, trans); + ret = devm_request_threaded_irq(&pdev->dev, pdev->irq, + iwl_pcie_isr, + iwl_pcie_irq_handler, + IRQF_SHARED, DRV_NAME, trans); if (ret) { IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq); goto out_free_ict; @@ -2991,12 +3079,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, out_free_ict: iwl_pcie_free_ict(trans); -out_pci_disable_msi: - pci_disable_msi(pdev); -out_pci_release_regions: - pci_release_regions(pdev); -out_pci_disable_device: - pci_disable_device(pdev); out_no_pci: free_percpu(trans_pcie->tso_hdr_page); iwl_trans_free(trans); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index 18650dccdb58..e9a278b60dfd 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -71,7 +71,7 @@ * ***************************************************/ -static int iwl_queue_space(const struct iwl_queue *q) +static int iwl_queue_space(const struct iwl_txq *q) { unsigned int max; unsigned int used; @@ -102,7 +102,7 @@ static int iwl_queue_space(const struct iwl_queue *q) /* * iwl_queue_init - Initialize queue's high/low-water and read/write indexes */ -static int iwl_queue_init(struct iwl_queue *q, int slots_num, u32 id) +static int iwl_queue_init(struct iwl_txq *q, int slots_num, u32 id) { q->n_window = slots_num; q->id = id; @@ -158,13 +158,13 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data) spin_lock(&txq->lock); /* check if triggered erroneously */ - if (txq->q.read_ptr == txq->q.write_ptr) { + if (txq->read_ptr == txq->write_ptr) { spin_unlock(&txq->lock); return; } spin_unlock(&txq->lock); - IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id, + IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->id, jiffies_to_msecs(txq->wd_timeout)); iwl_trans_pcie_log_scd_error(trans, txq); @@ -176,22 +176,21 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data) * iwl_pcie_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array */ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans, - struct iwl_txq *txq, u16 byte_cnt) + struct iwl_txq *txq, u16 byte_cnt, + int num_tbs) { struct iwlagn_scd_bc_tbl *scd_bc_tbl; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - int write_ptr = txq->q.write_ptr; - int txq_id = txq->q.id; + int write_ptr = txq->write_ptr; + int txq_id = txq->id; u8 sec_ctl = 0; - u8 sta_id = 0; u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; __le16 bc_ent; struct iwl_tx_cmd *tx_cmd = - (void *) txq->entries[txq->q.write_ptr].cmd->payload; + (void *)txq->entries[txq->write_ptr].cmd->payload; scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; - sta_id = tx_cmd->sta_id; sec_ctl = tx_cmd->sec_ctl; switch (sec_ctl & TX_CMD_SEC_MSK) { @@ -205,14 +204,32 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans, len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN; break; } - if (trans_pcie->bc_table_dword) len = DIV_ROUND_UP(len, 4); if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX)) return; - bc_ent = cpu_to_le16(len | (sta_id << 12)); + if (trans->cfg->use_tfh) { + u8 filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) + + num_tbs * sizeof(struct iwl_tfh_tb); + /* + * filled_tfd_size contains the number of filled bytes in the + * TFD. + * Dividing it by 64 will give the number of chunks to fetch + * to SRAM- 0 for one chunk, 1 for 2 and so on. + * If, for example, TFD contains only 3 TBs then 32 bytes + * of the TFD are used, and only one chunk of 64 bytes should + * be fetched + */ + u8 num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1; + + bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12)); + } else { + u8 sta_id = tx_cmd->sta_id; + + bc_ent = cpu_to_le16(len | (sta_id << 12)); + } scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; @@ -227,12 +244,12 @@ static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans, struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; - int txq_id = txq->q.id; - int read_ptr = txq->q.read_ptr; + int txq_id = txq->id; + int read_ptr = txq->read_ptr; u8 sta_id = 0; __le16 bc_ent; struct iwl_tx_cmd *tx_cmd = - (void *)txq->entries[txq->q.read_ptr].cmd->payload; + (void *)txq->entries[read_ptr].cmd->payload; WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); @@ -240,6 +257,7 @@ static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans, sta_id = tx_cmd->sta_id; bc_ent = cpu_to_le16(1 | (sta_id << 12)); + scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent; if (read_ptr < TFD_QUEUE_SIZE_BC_DUP) @@ -255,7 +273,7 @@ static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 reg = 0; - int txq_id = txq->q.id; + int txq_id = txq->id; lockdep_assert_held(&txq->lock); @@ -289,10 +307,10 @@ static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, * if not in power-save mode, uCode will never sleep when we're * trying to tx (during RFKILL, we're not trying to tx). */ - IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->q.write_ptr); + IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->write_ptr); if (!txq->block) iwl_write32(trans, HBUS_TARG_WRPTR, - txq->q.write_ptr | (txq_id << 8)); + txq->write_ptr | (txq_id << 8)); } void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans) @@ -312,49 +330,93 @@ void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans) } } -static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx) +static inline void *iwl_pcie_get_tfd(struct iwl_trans_pcie *trans_pcie, + struct iwl_txq *txq, int idx) +{ + return txq->tfds + trans_pcie->tfd_size * idx; +} + +static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_trans *trans, + void *_tfd, u8 idx) { - struct iwl_tfd_tb *tb = &tfd->tbs[idx]; - dma_addr_t addr = get_unaligned_le32(&tb->lo); - if (sizeof(dma_addr_t) > sizeof(u32)) - addr |= - ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16; + if (trans->cfg->use_tfh) { + struct iwl_tfh_tfd *tfd = _tfd; + struct iwl_tfh_tb *tb = &tfd->tbs[idx]; + + return (dma_addr_t)(le64_to_cpu(tb->addr)); + } else { + struct iwl_tfd *tfd = _tfd; + struct iwl_tfd_tb *tb = &tfd->tbs[idx]; + dma_addr_t addr = get_unaligned_le32(&tb->lo); + dma_addr_t hi_len; + + if (sizeof(dma_addr_t) <= sizeof(u32)) + return addr; - return addr; + hi_len = le16_to_cpu(tb->hi_n_len) & 0xF; + + /* + * shift by 16 twice to avoid warnings on 32-bit + * (where this code never runs anyway due to the + * if statement above) + */ + return addr | ((hi_len << 16) << 16); + } } -static inline void iwl_pcie_tfd_set_tb(struct iwl_tfd *tfd, u8 idx, - dma_addr_t addr, u16 len) +static inline void iwl_pcie_tfd_set_tb(struct iwl_trans *trans, void *tfd, + u8 idx, dma_addr_t addr, u16 len) { - struct iwl_tfd_tb *tb = &tfd->tbs[idx]; - u16 hi_n_len = len << 4; + if (trans->cfg->use_tfh) { + struct iwl_tfh_tfd *tfd_fh = (void *)tfd; + struct iwl_tfh_tb *tb = &tfd_fh->tbs[idx]; - put_unaligned_le32(addr, &tb->lo); - if (sizeof(dma_addr_t) > sizeof(u32)) - hi_n_len |= ((addr >> 16) >> 16) & 0xF; + put_unaligned_le64(addr, &tb->addr); + tb->tb_len = cpu_to_le16(len); - tb->hi_n_len = cpu_to_le16(hi_n_len); + tfd_fh->num_tbs = cpu_to_le16(idx + 1); + } else { + struct iwl_tfd *tfd_fh = (void *)tfd; + struct iwl_tfd_tb *tb = &tfd_fh->tbs[idx]; - tfd->num_tbs = idx + 1; + u16 hi_n_len = len << 4; + + put_unaligned_le32(addr, &tb->lo); + if (sizeof(dma_addr_t) > sizeof(u32)) + hi_n_len |= ((addr >> 16) >> 16) & 0xF; + + tb->hi_n_len = cpu_to_le16(hi_n_len); + + tfd_fh->num_tbs = idx + 1; + } } -static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_tfd *tfd) +static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_trans *trans, void *_tfd) { - return tfd->num_tbs & 0x1f; + if (trans->cfg->use_tfh) { + struct iwl_tfh_tfd *tfd = _tfd; + + return le16_to_cpu(tfd->num_tbs) & 0x1f; + } else { + struct iwl_tfd *tfd = _tfd; + + return tfd->num_tbs & 0x1f; + } } static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, struct iwl_cmd_meta *meta, - struct iwl_tfd *tfd) + struct iwl_txq *txq, int index) { - int i; - int num_tbs; + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int i, num_tbs; + void *tfd = iwl_pcie_get_tfd(trans_pcie, txq, index); /* Sanity check on number of chunks */ - num_tbs = iwl_pcie_tfd_get_num_tbs(tfd); + num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd); - if (num_tbs >= IWL_NUM_OF_TBS) { + if (num_tbs >= trans_pcie->max_tbs) { IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); /* @todo issue fatal error, it is quite serious situation */ return; @@ -363,18 +425,30 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, /* first TB is never freed - it's the bidirectional DMA data */ for (i = 1; i < num_tbs; i++) { - if (meta->flags & BIT(i + CMD_TB_BITMAP_POS)) + if (meta->tbs & BIT(i)) dma_unmap_page(trans->dev, - iwl_pcie_tfd_tb_get_addr(tfd, i), - iwl_pcie_tfd_tb_get_len(tfd, i), + iwl_pcie_tfd_tb_get_addr(trans, tfd, i), + iwl_pcie_tfd_tb_get_len(trans, tfd, i), DMA_TO_DEVICE); else dma_unmap_single(trans->dev, - iwl_pcie_tfd_tb_get_addr(tfd, i), - iwl_pcie_tfd_tb_get_len(tfd, i), + iwl_pcie_tfd_tb_get_addr(trans, tfd, + i), + iwl_pcie_tfd_tb_get_len(trans, tfd, + i), DMA_TO_DEVICE); } - tfd->num_tbs = 0; + + if (trans->cfg->use_tfh) { + struct iwl_tfh_tfd *tfd_fh = (void *)tfd; + + tfd_fh->num_tbs = 0; + } else { + struct iwl_tfd *tfd_fh = (void *)tfd; + + tfd_fh->num_tbs = 0; + } + } /* @@ -388,20 +462,18 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, */ static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) { - struct iwl_tfd *tfd_tmp = txq->tfds; - /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and * idx is bounded by n_window */ - int rd_ptr = txq->q.read_ptr; - int idx = get_cmd_index(&txq->q, rd_ptr); + int rd_ptr = txq->read_ptr; + int idx = get_cmd_index(txq, rd_ptr); lockdep_assert_held(&txq->lock); /* We have only q->n_window txq->entries, but we use * TFD_QUEUE_SIZE_MAX tfds */ - iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr]); + iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr); /* free SKB */ if (txq->entries) { @@ -423,23 +495,21 @@ static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, dma_addr_t addr, u16 len, bool reset) { - struct iwl_queue *q; - struct iwl_tfd *tfd, *tfd_tmp; + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + void *tfd; u32 num_tbs; - q = &txq->q; - tfd_tmp = txq->tfds; - tfd = &tfd_tmp[q->write_ptr]; + tfd = txq->tfds + trans_pcie->tfd_size * txq->write_ptr; if (reset) - memset(tfd, 0, sizeof(*tfd)); + memset(tfd, 0, trans_pcie->tfd_size); - num_tbs = iwl_pcie_tfd_get_num_tbs(tfd); + num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd); - /* Each TFD can point to a maximum 20 Tx buffers */ - if (num_tbs >= IWL_NUM_OF_TBS) { + /* Each TFD can point to a maximum max_tbs Tx buffers */ + if (num_tbs >= trans_pcie->max_tbs) { IWL_ERR(trans, "Error can not send more than %d chunks\n", - IWL_NUM_OF_TBS); + trans_pcie->max_tbs); return -EINVAL; } @@ -447,7 +517,7 @@ static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, "Unaligned address = %llx\n", (unsigned long long)addr)) return -EINVAL; - iwl_pcie_tfd_set_tb(tfd, num_tbs, addr, len); + iwl_pcie_tfd_set_tb(trans, tfd, num_tbs, addr, len); return num_tbs; } @@ -457,7 +527,7 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans, u32 txq_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX; + size_t tfd_sz = trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX; size_t tb0_buf_sz; int i; @@ -468,7 +538,7 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans, (unsigned long)txq); txq->trans_pcie = trans_pcie; - txq->q.n_window = slots_num; + txq->n_window = slots_num; txq->entries = kcalloc(slots_num, sizeof(struct iwl_pcie_txq_entry), @@ -489,7 +559,7 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans, /* Circular buffer of transmit frame descriptors (TFDs), * shared with device */ txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz, - &txq->q.dma_addr, GFP_KERNEL); + &txq->dma_addr, GFP_KERNEL); if (!txq->tfds) goto error; @@ -503,11 +573,11 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans, if (!txq->first_tb_bufs) goto err_free_tfds; - txq->q.id = txq_id; + txq->id = txq_id; return 0; err_free_tfds: - dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->q.dma_addr); + dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr); error: if (txq->entries && txq_id == trans_pcie->cmd_queue) for (i = 0; i < slots_num; i++) @@ -531,7 +601,7 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); /* Initialize queue's high/low-water marks, and head/tail indexes */ - ret = iwl_queue_init(&txq->q, slots_num, txq_id); + ret = iwl_queue_init(txq, slots_num, txq_id); if (ret) return ret; @@ -545,10 +615,10 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, if (trans->cfg->use_tfh) iwl_write_direct64(trans, FH_MEM_CBBC_QUEUE(trans, txq_id), - txq->q.dma_addr); + txq->dma_addr); else iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id), - txq->q.dma_addr >> 8); + txq->dma_addr >> 8); return 0; } @@ -595,15 +665,14 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq = &trans_pcie->txq[txq_id]; - struct iwl_queue *q = &txq->q; spin_lock_bh(&txq->lock); - while (q->write_ptr != q->read_ptr) { + while (txq->write_ptr != txq->read_ptr) { IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", - txq_id, q->read_ptr); + txq_id, txq->read_ptr); if (txq_id != trans_pcie->cmd_queue) { - struct sk_buff *skb = txq->entries[q->read_ptr].skb; + struct sk_buff *skb = txq->entries[txq->read_ptr].skb; if (WARN_ON_ONCE(!skb)) continue; @@ -611,15 +680,15 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) iwl_pcie_free_tso_page(trans_pcie, skb); } iwl_pcie_txq_free_tfd(trans, txq); - q->read_ptr = iwl_queue_inc_wrap(q->read_ptr); + txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr); - if (q->read_ptr == q->write_ptr) { + if (txq->read_ptr == txq->write_ptr) { unsigned long flags; spin_lock_irqsave(&trans_pcie->reg_lock, flags); if (txq_id != trans_pcie->cmd_queue) { IWL_DEBUG_RPM(trans, "Q %d - last tx freed\n", - q->id); + txq->id); iwl_trans_unref(trans); } else { iwl_pcie_clear_cmd_in_flight(trans); @@ -663,7 +732,7 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) /* De-alloc array of command/tx buffers */ if (txq_id == trans_pcie->cmd_queue) - for (i = 0; i < txq->q.n_window; i++) { + for (i = 0; i < txq->n_window; i++) { kzfree(txq->entries[i].cmd); kzfree(txq->entries[i].free_buf); } @@ -671,13 +740,13 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) /* De-alloc circular buffer of TFDs */ if (txq->tfds) { dma_free_coherent(dev, - sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX, - txq->tfds, txq->q.dma_addr); - txq->q.dma_addr = 0; + trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX, + txq->tfds, txq->dma_addr); + txq->dma_addr = 0; txq->tfds = NULL; dma_free_coherent(dev, - sizeof(*txq->first_tb_bufs) * txq->q.n_window, + sizeof(*txq->first_tb_bufs) * txq->n_window, txq->first_tb_bufs, txq->first_tb_dma); } @@ -703,6 +772,9 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped)); memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); + if (trans->cfg->use_tfh) + return; + trans_pcie->scd_base_addr = iwl_read_prph(trans, SCD_SRAM_BASE_ADDR); @@ -758,14 +830,14 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans) if (trans->cfg->use_tfh) iwl_write_direct64(trans, FH_MEM_CBBC_QUEUE(trans, txq_id), - txq->q.dma_addr); + txq->dma_addr); else iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id), - txq->q.dma_addr >> 8); + txq->dma_addr >> 8); iwl_pcie_txq_unmap(trans, txq_id); - txq->q.read_ptr = 0; - txq->q.write_ptr = 0; + txq->read_ptr = 0; + txq->write_ptr = 0; } /* Tell NIC where to find the "keep warm" buffer */ @@ -970,11 +1042,13 @@ int iwl_pcie_tx_init(struct iwl_trans *trans) } } - if (trans->cfg->use_tfh) + if (trans->cfg->use_tfh) { iwl_write_direct32(trans, TFH_TRANSFER_MODE, TFH_TRANSFER_MAX_PENDING_REQ | TFH_CHUNK_SIZE_128 | TFH_CHUNK_SPLIT_MODE); + return 0; + } iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE); if (trans->cfg->base_params->num_of_queues > 20) @@ -1007,7 +1081,7 @@ static inline void iwl_pcie_txq_progress(struct iwl_txq *txq) * if empty delete timer, otherwise move timer forward * since we're making progress on this queue */ - if (txq->q.read_ptr == txq->q.write_ptr) + if (txq->read_ptr == txq->write_ptr) del_timer(&txq->stuck_timer); else mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); @@ -1020,7 +1094,6 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq = &trans_pcie->txq[txq_id]; int tfd_num = ssn & (TFD_QUEUE_SIZE_MAX - 1); - struct iwl_queue *q = &txq->q; int last_to_free; /* This function is not meant to release cmd queue*/ @@ -1035,21 +1108,21 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, goto out; } - if (txq->q.read_ptr == tfd_num) + if (txq->read_ptr == tfd_num) goto out; IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n", - txq_id, txq->q.read_ptr, tfd_num, ssn); + txq_id, txq->read_ptr, tfd_num, ssn); /*Since we free until index _not_ inclusive, the one before index is * the last we will free. This one must be used */ last_to_free = iwl_queue_dec_wrap(tfd_num); - if (!iwl_queue_used(q, last_to_free)) { + if (!iwl_queue_used(txq, last_to_free)) { IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", __func__, txq_id, last_to_free, TFD_QUEUE_SIZE_MAX, - q->write_ptr, q->read_ptr); + txq->write_ptr, txq->read_ptr); goto out; } @@ -1057,9 +1130,9 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, goto out; for (; - q->read_ptr != tfd_num; - q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) { - struct sk_buff *skb = txq->entries[txq->q.read_ptr].skb; + txq->read_ptr != tfd_num; + txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr)) { + struct sk_buff *skb = txq->entries[txq->read_ptr].skb; if (WARN_ON_ONCE(!skb)) continue; @@ -1068,16 +1141,17 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, __skb_queue_tail(skbs, skb); - txq->entries[txq->q.read_ptr].skb = NULL; + txq->entries[txq->read_ptr].skb = NULL; - iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq); + if (!trans->cfg->use_tfh) + iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq); iwl_pcie_txq_free_tfd(trans, txq); } iwl_pcie_txq_progress(txq); - if (iwl_queue_space(&txq->q) > txq->q.low_mark && + if (iwl_queue_space(txq) > txq->low_mark && test_bit(txq_id, trans_pcie->queue_stopped)) { struct sk_buff_head overflow_skbs; @@ -1109,12 +1183,12 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, } spin_lock_bh(&txq->lock); - if (iwl_queue_space(&txq->q) > txq->q.low_mark) + if (iwl_queue_space(txq) > txq->low_mark) iwl_wake_queue(trans, txq); } - if (q->read_ptr == q->write_ptr) { - IWL_DEBUG_RPM(trans, "Q %d - last tx reclaimed\n", q->id); + if (txq->read_ptr == txq->write_ptr) { + IWL_DEBUG_RPM(trans, "Q %d - last tx reclaimed\n", txq->id); iwl_trans_unref(trans); } @@ -1176,31 +1250,30 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq = &trans_pcie->txq[txq_id]; - struct iwl_queue *q = &txq->q; unsigned long flags; int nfreed = 0; lockdep_assert_held(&txq->lock); - if ((idx >= TFD_QUEUE_SIZE_MAX) || (!iwl_queue_used(q, idx))) { + if ((idx >= TFD_QUEUE_SIZE_MAX) || (!iwl_queue_used(txq, idx))) { IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", __func__, txq_id, idx, TFD_QUEUE_SIZE_MAX, - q->write_ptr, q->read_ptr); + txq->write_ptr, txq->read_ptr); return; } - for (idx = iwl_queue_inc_wrap(idx); q->read_ptr != idx; - q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) { + for (idx = iwl_queue_inc_wrap(idx); txq->read_ptr != idx; + txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr)) { if (nfreed++ > 0) { IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", - idx, q->write_ptr, q->read_ptr); + idx, txq->write_ptr, txq->read_ptr); iwl_force_nmi(trans); } } - if (q->read_ptr == q->write_ptr) { + if (txq->read_ptr == txq->write_ptr) { spin_lock_irqsave(&trans_pcie->reg_lock, flags); iwl_pcie_clear_cmd_in_flight(trans); spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); @@ -1249,6 +1322,9 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, if (test_and_set_bit(txq_id, trans_pcie->queue_used)) WARN_ONCE(1, "queue %d already used - expect issues", txq_id); + if (cfg && trans->cfg->use_tfh) + WARN_ONCE(1, "Expected no calls to SCD configuration"); + txq->wd_timeout = msecs_to_jiffies(wdg_timeout); if (cfg) { @@ -1283,14 +1359,14 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, */ iwl_scd_txq_disable_agg(trans, txq_id); - ssn = txq->q.read_ptr; + ssn = txq->read_ptr; } } /* Place first TFD at index corresponding to start sequence number. * Assumes that ssn_idx is valid (!= 0xFFF) */ - txq->q.read_ptr = (ssn & 0xff); - txq->q.write_ptr = (ssn & 0xff); + txq->read_ptr = (ssn & 0xff); + txq->write_ptr = (ssn & 0xff); iwl_write_direct32(trans, HBUS_TARG_WRPTR, (ssn & 0xff) | (txq_id << 8)); @@ -1343,6 +1419,14 @@ void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, txq->ampdu = !shared_mode; } +dma_addr_t iwl_trans_pcie_get_txq_byte_table(struct iwl_trans *trans, int txq) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + return trans_pcie->scd_bc_tbls.dma + + txq * sizeof(struct iwlagn_scd_bc_tbl); +} + void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, bool configure_scd) { @@ -1366,6 +1450,9 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, return; } + if (configure_scd && trans->cfg->use_tfh) + WARN_ONCE(1, "Expected no calls to SCD configuration"); + if (configure_scd) { iwl_scd_txq_set_inactive(trans, txq_id); @@ -1395,7 +1482,6 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; - struct iwl_queue *q = &txq->q; struct iwl_device_cmd *out_cmd; struct iwl_cmd_meta *out_meta; unsigned long flags; @@ -1410,7 +1496,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; - if (WARN(!trans_pcie->wide_cmd_header && + if (WARN(!trans->wide_cmd_header && group_id > IWL_ALWAYS_LONG_GROUP, "unsupported wide command %#x\n", cmd->id)) return -EINVAL; @@ -1494,7 +1580,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, spin_lock_bh(&txq->lock); - if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { + if (iwl_queue_space(txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { spin_unlock_bh(&txq->lock); IWL_ERR(trans, "No space in command queue\n"); @@ -1503,7 +1589,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, goto free_dup_buf; } - idx = get_cmd_index(q, q->write_ptr); + idx = get_cmd_index(txq, txq->write_ptr); out_cmd = txq->entries[idx].cmd; out_meta = &txq->entries[idx].meta; @@ -1522,7 +1608,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, out_cmd->hdr_wide.reserved = 0; out_cmd->hdr_wide.sequence = cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | - INDEX_TO_SEQ(q->write_ptr)); + INDEX_TO_SEQ(txq->write_ptr)); cmd_pos = sizeof(struct iwl_cmd_header_wide); copy_size = sizeof(struct iwl_cmd_header_wide); @@ -1530,7 +1616,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id); out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | - INDEX_TO_SEQ(q->write_ptr)); + INDEX_TO_SEQ(txq->write_ptr)); out_cmd->hdr.group_id = 0; cmd_pos = sizeof(struct iwl_cmd_header); @@ -1580,7 +1666,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, iwl_get_cmd_string(trans, cmd->id), group_id, out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), - cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue); + cmd_size, txq->write_ptr, idx, trans_pcie->cmd_queue); /* start the TFD with the minimum copy bytes */ tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE); @@ -1596,8 +1682,8 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, copy_size - tb0_size, DMA_TO_DEVICE); if (dma_mapping_error(trans->dev, phys_addr)) { - iwl_pcie_tfd_unmap(trans, out_meta, - &txq->tfds[q->write_ptr]); + iwl_pcie_tfd_unmap(trans, out_meta, txq, + txq->write_ptr); idx = -ENOMEM; goto out; } @@ -1620,8 +1706,8 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, phys_addr = dma_map_single(trans->dev, (void *)data, cmdlen[i], DMA_TO_DEVICE); if (dma_mapping_error(trans->dev, phys_addr)) { - iwl_pcie_tfd_unmap(trans, out_meta, - &txq->tfds[q->write_ptr]); + iwl_pcie_tfd_unmap(trans, out_meta, txq, + txq->write_ptr); idx = -ENOMEM; goto out; } @@ -1629,8 +1715,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false); } - BUILD_BUG_ON(IWL_NUM_OF_TBS + CMD_TB_BITMAP_POS > - sizeof(out_meta->flags) * BITS_PER_BYTE); + BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE); out_meta->flags = cmd->flags; if (WARN_ON_ONCE(txq->entries[idx].free_buf)) kzfree(txq->entries[idx].free_buf); @@ -1639,7 +1724,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide); /* start timer if queue currently empty */ - if (q->read_ptr == q->write_ptr && txq->wd_timeout) + if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); spin_lock_irqsave(&trans_pcie->reg_lock, flags); @@ -1651,7 +1736,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, } /* Increment and update queue's write index */ - q->write_ptr = iwl_queue_inc_wrap(q->write_ptr); + txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr); iwl_pcie_txq_inc_wr_ptr(trans, txq); spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); @@ -1689,20 +1774,20 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans, if (WARN(txq_id != trans_pcie->cmd_queue, "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", txq_id, trans_pcie->cmd_queue, sequence, - trans_pcie->txq[trans_pcie->cmd_queue].q.read_ptr, - trans_pcie->txq[trans_pcie->cmd_queue].q.write_ptr)) { + trans_pcie->txq[trans_pcie->cmd_queue].read_ptr, + trans_pcie->txq[trans_pcie->cmd_queue].write_ptr)) { iwl_print_hex_error(trans, pkt, 32); return; } spin_lock_bh(&txq->lock); - cmd_index = get_cmd_index(&txq->q, index); + cmd_index = get_cmd_index(txq, index); cmd = txq->entries[cmd_index].cmd; meta = &txq->entries[cmd_index].meta; cmd_id = iwl_cmd_id(cmd->hdr.cmd, group_id, 0); - iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index]); + iwl_pcie_tfd_unmap(trans, meta, txq, index); /* Input error checking is done when commands are added to queue. */ if (meta->flags & CMD_WANT_SKB) { @@ -1815,14 +1900,13 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans, HOST_COMPLETE_TIMEOUT); if (!ret) { struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; - struct iwl_queue *q = &txq->q; IWL_ERR(trans, "Error sending %s: time out after %dms.\n", iwl_get_cmd_string(trans, cmd->id), jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n", - q->read_ptr, q->write_ptr); + txq->read_ptr, txq->write_ptr); clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", @@ -1900,7 +1984,7 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_cmd_meta *out_meta, struct iwl_device_cmd *dev_cmd, u16 tb1_len) { - struct iwl_queue *q = &txq->q; + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u16 tb2_len; int i; @@ -1915,8 +1999,8 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb, skb->data + hdr_len, tb2_len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(trans->dev, tb2_phys))) { - iwl_pcie_tfd_unmap(trans, out_meta, - &txq->tfds[q->write_ptr]); + iwl_pcie_tfd_unmap(trans, out_meta, txq, + txq->write_ptr); return -EINVAL; } iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false); @@ -1935,19 +2019,19 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb, skb_frag_size(frag), DMA_TO_DEVICE); if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { - iwl_pcie_tfd_unmap(trans, out_meta, - &txq->tfds[q->write_ptr]); + iwl_pcie_tfd_unmap(trans, out_meta, txq, + txq->write_ptr); return -EINVAL; } tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys, skb_frag_size(frag), false); - out_meta->flags |= BIT(tb_idx + CMD_TB_BITMAP_POS); + out_meta->tbs |= BIT(tb_idx); } trace_iwlwifi_dev_tx(trans->dev, skb, - &txq->tfds[txq->q.write_ptr], - sizeof(struct iwl_tfd), + iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr), + trans_pcie->tfd_size, &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, skb->data + hdr_len, tb2_len); trace_iwlwifi_dev_tx_data(trans->dev, skb, @@ -2008,7 +2092,6 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, struct ieee80211_hdr *hdr = (void *)skb->data; unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; unsigned int mss = skb_shinfo(skb)->gso_size; - struct iwl_queue *q = &txq->q; u16 length, iv_len, amsdu_pad; u8 *start_hdr; struct iwl_tso_hdr_page *hdr_page; @@ -2022,8 +2105,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, IEEE80211_CCMP_HDR_LEN : 0; trace_iwlwifi_dev_tx(trans->dev, skb, - &txq->tfds[txq->q.write_ptr], - sizeof(struct iwl_tfd), + iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr), + trans_pcie->tfd_size, &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, NULL, 0); @@ -2179,7 +2262,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, return 0; out_unmap: - iwl_pcie_tfd_unmap(trans, out_meta, &txq->tfds[q->write_ptr]); + iwl_pcie_tfd_unmap(trans, out_meta, txq, txq->write_ptr); return ret; } #else /* CONFIG_INET */ @@ -2203,9 +2286,9 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; struct iwl_cmd_meta *out_meta; struct iwl_txq *txq; - struct iwl_queue *q; dma_addr_t tb0_phys, tb1_phys, scratch_phys; void *tb1_addr; + void *tfd; u16 len, tb1_len; bool wait_write_ptr; __le16 fc; @@ -2214,7 +2297,6 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, bool amsdu; txq = &trans_pcie->txq[txq_id]; - q = &txq->q; if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used), "TX on unused queue %d\n", txq_id)) @@ -2236,7 +2318,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, } if (skb_is_nonlinear(skb) && - skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS && + skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS(trans_pcie) && __skb_linearize(skb)) return -ENOMEM; @@ -2249,11 +2331,11 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, spin_lock(&txq->lock); - if (iwl_queue_space(q) < q->high_mark) { + if (iwl_queue_space(txq) < txq->high_mark) { iwl_stop_queue(trans, txq); /* don't put the packet on the ring, if there is no room */ - if (unlikely(iwl_queue_space(q) < 3)) { + if (unlikely(iwl_queue_space(txq) < 3)) { struct iwl_device_cmd **dev_cmd_ptr; dev_cmd_ptr = (void *)((u8 *)skb->cb + @@ -2274,19 +2356,19 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, */ wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); WARN_ONCE(txq->ampdu && - (wifi_seq & 0xff) != q->write_ptr, + (wifi_seq & 0xff) != txq->write_ptr, "Q: %d WiFi Seq %d tfdNum %d", - txq_id, wifi_seq, q->write_ptr); + txq_id, wifi_seq, txq->write_ptr); /* Set up driver data for this TFD */ - txq->entries[q->write_ptr].skb = skb; - txq->entries[q->write_ptr].cmd = dev_cmd; + txq->entries[txq->write_ptr].skb = skb; + txq->entries[txq->write_ptr].cmd = dev_cmd; dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | - INDEX_TO_SEQ(q->write_ptr))); + INDEX_TO_SEQ(txq->write_ptr))); - tb0_phys = iwl_pcie_get_first_tb_dma(txq, q->write_ptr); + tb0_phys = iwl_pcie_get_first_tb_dma(txq, txq->write_ptr); scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) + offsetof(struct iwl_tx_cmd, scratch); @@ -2294,7 +2376,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); /* Set up first empty entry in queue's array of Tx/cmd buffers */ - out_meta = &txq->entries[q->write_ptr].meta; + out_meta = &txq->entries[txq->write_ptr].meta; out_meta->flags = 0; /* @@ -2319,7 +2401,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, } /* The first TB points to bi-directional DMA data */ - memcpy(&txq->first_tb_bufs[q->write_ptr], &dev_cmd->hdr, + memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr, IWL_FIRST_TB_SIZE); iwl_pcie_txq_build_tfd(trans, txq, tb0_phys, IWL_FIRST_TB_SIZE, true); @@ -2344,13 +2426,15 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, goto out_err; } + tfd = iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr); /* Set up entry for this TFD in Tx byte-count array */ - iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len)); + iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len), + iwl_pcie_tfd_get_num_tbs(trans, tfd)); wait_write_ptr = ieee80211_has_morefrags(fc); /* start timer if queue currently empty */ - if (q->read_ptr == q->write_ptr) { + if (txq->read_ptr == txq->write_ptr) { if (txq->wd_timeout) { /* * If the TXQ is active, then set the timer, if not, @@ -2364,12 +2448,12 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, else txq->frozen_expiry_remainder = txq->wd_timeout; } - IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id); + IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", txq->id); iwl_trans_ref(trans); } /* Tell device the write index *just past* this latest filled TFD */ - q->write_ptr = iwl_queue_inc_wrap(q->write_ptr); + txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr); if (!wait_write_ptr) iwl_pcie_txq_inc_wr_ptr(trans, txq); |