summaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
diff options
context:
space:
mode:
authorJohannes Berg <johannes.berg@intel.com>2018-07-04 22:59:52 +0200
committerLuca Coelho <luciano.coelho@intel.com>2018-10-08 10:49:08 +0300
commitc20e08b0d6ed2a641a5217ccfaf8b616bbba869e (patch)
treedd5ed3b950ffdefe83921668a37f0dac085b1d19 /drivers/net/wireless/intel/iwlwifi/mvm/sta.c
parent6fe64d034ec927d9c0fa28176dd071741d4f2180 (diff)
downloadtalos-op-linux-c20e08b0d6ed2a641a5217ccfaf8b616bbba869e.tar.gz
talos-op-linux-c20e08b0d6ed2a641a5217ccfaf8b616bbba869e.zip
iwlwifi: mvm: move iwl_mvm_sta_alloc_queue() down
We want to call iwl_mvm_inactivity_check() from here in the next patch, so need to move the code down to be able to. Fix a minor checkpatch complaint while at it. Signed-off-by: Johannes Berg <johannes.berg@intel.com> Signed-off-by: Luca Coelho <luciano.coelho@intel.com>
Diffstat (limited to 'drivers/net/wireless/intel/iwlwifi/mvm/sta.c')
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c410
1 files changed, 205 insertions, 205 deletions
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 24631866a174..8ae4fbf17f03 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -936,211 +936,6 @@ static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue,
return inc_ssn;
}
-static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
- struct ieee80211_sta *sta, u8 ac, int tid,
- struct ieee80211_hdr *hdr)
-{
- struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
- struct iwl_trans_txq_scd_cfg cfg = {
- .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac),
- .sta_id = mvmsta->sta_id,
- .tid = tid,
- .frame_limit = IWL_FRAME_LIMIT,
- };
- unsigned int wdg_timeout =
- iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
- u8 mac_queue = mvmsta->vif->hw_queue[ac];
- int queue = -1;
- bool using_inactive_queue = false, same_sta = false;
- unsigned long disable_agg_tids = 0;
- enum iwl_mvm_agg_state queue_state;
- bool shared_queue = false, inc_ssn;
- int ssn;
- unsigned long tfd_queue_mask;
- int ret;
-
- lockdep_assert_held(&mvm->mutex);
-
- if (iwl_mvm_has_new_tx_api(mvm))
- return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
-
- spin_lock_bh(&mvmsta->lock);
- tfd_queue_mask = mvmsta->tfd_queue_msk;
- spin_unlock_bh(&mvmsta->lock);
-
- spin_lock_bh(&mvm->queue_info_lock);
-
- /*
- * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
- * exists
- */
- if (!ieee80211_is_data_qos(hdr->frame_control) ||
- ieee80211_is_qos_nullfunc(hdr->frame_control)) {
- queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
- IWL_MVM_DQA_MIN_MGMT_QUEUE,
- IWL_MVM_DQA_MAX_MGMT_QUEUE);
- if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
- IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
- queue);
-
- /* If no such queue is found, we'll use a DATA queue instead */
- }
-
- if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
- (mvm->queue_info[mvmsta->reserved_queue].status ==
- IWL_MVM_QUEUE_RESERVED ||
- mvm->queue_info[mvmsta->reserved_queue].status ==
- IWL_MVM_QUEUE_INACTIVE)) {
- queue = mvmsta->reserved_queue;
- mvm->queue_info[queue].reserved = true;
- IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
- }
-
- if (queue < 0)
- queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
- IWL_MVM_DQA_MIN_DATA_QUEUE,
- IWL_MVM_DQA_MAX_DATA_QUEUE);
-
- /*
- * Check if this queue is already allocated but inactive.
- * In such a case, we'll need to first free this queue before enabling
- * it again, so we'll mark it as reserved to make sure no new traffic
- * arrives on it
- */
- if (queue > 0 &&
- mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
- mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
- using_inactive_queue = true;
- same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id;
- IWL_DEBUG_TX_QUEUES(mvm,
- "Re-assigning TXQ %d: sta_id=%d, tid=%d\n",
- queue, mvmsta->sta_id, tid);
- }
-
- /* No free queue - we'll have to share */
- if (queue <= 0) {
- queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
- if (queue > 0) {
- shared_queue = true;
- mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
- }
- }
-
- /*
- * Mark TXQ as ready, even though it hasn't been fully configured yet,
- * to make sure no one else takes it.
- * This will allow avoiding re-acquiring the lock at the end of the
- * configuration. On error we'll mark it back as free.
- */
- if ((queue > 0) && !shared_queue)
- mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
-
- spin_unlock_bh(&mvm->queue_info_lock);
-
- /* This shouldn't happen - out of queues */
- if (WARN_ON(queue <= 0)) {
- IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
- tid, cfg.sta_id);
- return queue;
- }
-
- /*
- * Actual en/disablement of aggregations is through the ADD_STA HCMD,
- * but for configuring the SCD to send A-MPDUs we need to mark the queue
- * as aggregatable.
- * Mark all DATA queues as allowing to be aggregated at some point
- */
- cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
- queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
-
- /*
- * If this queue was previously inactive (idle) - we need to free it
- * first
- */
- if (using_inactive_queue) {
- ret = iwl_mvm_free_inactive_queue(mvm, queue, same_sta);
- if (ret)
- return ret;
- }
-
- IWL_DEBUG_TX_QUEUES(mvm,
- "Allocating %squeue #%d to sta %d on tid %d\n",
- shared_queue ? "shared " : "", queue,
- mvmsta->sta_id, tid);
-
- if (shared_queue) {
- /* Disable any open aggs on this queue */
- disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
-
- if (disable_agg_tids) {
- IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",
- queue);
- iwl_mvm_invalidate_sta_queue(mvm, queue,
- disable_agg_tids, false);
- }
- }
-
- ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
- inc_ssn = iwl_mvm_enable_txq(mvm, queue, mac_queue,
- ssn, &cfg, wdg_timeout);
- if (inc_ssn) {
- ssn = (ssn + 1) & IEEE80211_SCTL_SEQ;
- le16_add_cpu(&hdr->seq_ctrl, 0x10);
- }
-
- /*
- * Mark queue as shared in transport if shared
- * Note this has to be done after queue enablement because enablement
- * can also set this value, and there is no indication there to shared
- * queues
- */
- if (shared_queue)
- iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
-
- spin_lock_bh(&mvmsta->lock);
- /*
- * This looks racy, but it is not. We have only one packet for
- * this ra/tid in our Tx path since we stop the Qdisc when we
- * need to allocate a new TFD queue.
- */
- if (inc_ssn)
- mvmsta->tid_data[tid].seq_number += 0x10;
- mvmsta->tid_data[tid].txq_id = queue;
- mvmsta->tid_data[tid].is_tid_active = true;
- mvmsta->tfd_queue_msk |= BIT(queue);
- queue_state = mvmsta->tid_data[tid].state;
-
- if (mvmsta->reserved_queue == queue)
- mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
- spin_unlock_bh(&mvmsta->lock);
-
- if (!shared_queue) {
- ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
- if (ret)
- goto out_err;
-
- /* If we need to re-enable aggregations... */
- if (queue_state == IWL_AGG_ON) {
- ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
- if (ret)
- goto out_err;
- }
- } else {
- /* Redirect queue, if needed */
- ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, ac, ssn,
- wdg_timeout, false);
- if (ret)
- goto out_err;
- }
-
- return 0;
-
-out_err:
- iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0);
-
- return ret;
-}
-
static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue)
{
struct iwl_scd_txq_cfg_cmd cmd = {
@@ -1460,6 +1255,211 @@ static void iwl_mvm_inactivity_check(struct iwl_mvm *mvm)
iwl_mvm_change_queue_tid(mvm, i);
}
+static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta, u8 ac, int tid,
+ struct ieee80211_hdr *hdr)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_trans_txq_scd_cfg cfg = {
+ .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac),
+ .sta_id = mvmsta->sta_id,
+ .tid = tid,
+ .frame_limit = IWL_FRAME_LIMIT,
+ };
+ unsigned int wdg_timeout =
+ iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
+ u8 mac_queue = mvmsta->vif->hw_queue[ac];
+ int queue = -1;
+ bool using_inactive_queue = false, same_sta = false;
+ unsigned long disable_agg_tids = 0;
+ enum iwl_mvm_agg_state queue_state;
+ bool shared_queue = false, inc_ssn;
+ int ssn;
+ unsigned long tfd_queue_mask;
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (iwl_mvm_has_new_tx_api(mvm))
+ return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
+
+ spin_lock_bh(&mvmsta->lock);
+ tfd_queue_mask = mvmsta->tfd_queue_msk;
+ spin_unlock_bh(&mvmsta->lock);
+
+ spin_lock_bh(&mvm->queue_info_lock);
+
+ /*
+ * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
+ * exists
+ */
+ if (!ieee80211_is_data_qos(hdr->frame_control) ||
+ ieee80211_is_qos_nullfunc(hdr->frame_control)) {
+ queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
+ IWL_MVM_DQA_MIN_MGMT_QUEUE,
+ IWL_MVM_DQA_MAX_MGMT_QUEUE);
+ if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
+ IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
+ queue);
+
+ /* If no such queue is found, we'll use a DATA queue instead */
+ }
+
+ if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
+ (mvm->queue_info[mvmsta->reserved_queue].status ==
+ IWL_MVM_QUEUE_RESERVED ||
+ mvm->queue_info[mvmsta->reserved_queue].status ==
+ IWL_MVM_QUEUE_INACTIVE)) {
+ queue = mvmsta->reserved_queue;
+ mvm->queue_info[queue].reserved = true;
+ IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
+ }
+
+ if (queue < 0)
+ queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
+ IWL_MVM_DQA_MIN_DATA_QUEUE,
+ IWL_MVM_DQA_MAX_DATA_QUEUE);
+
+ /*
+ * Check if this queue is already allocated but inactive.
+ * In such a case, we'll need to first free this queue before enabling
+ * it again, so we'll mark it as reserved to make sure no new traffic
+ * arrives on it
+ */
+ if (queue > 0 &&
+ mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
+ mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
+ using_inactive_queue = true;
+ same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id;
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Re-assigning TXQ %d: sta_id=%d, tid=%d\n",
+ queue, mvmsta->sta_id, tid);
+ }
+
+ /* No free queue - we'll have to share */
+ if (queue <= 0) {
+ queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
+ if (queue > 0) {
+ shared_queue = true;
+ mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
+ }
+ }
+
+ /*
+ * Mark TXQ as ready, even though it hasn't been fully configured yet,
+ * to make sure no one else takes it.
+ * This will allow avoiding re-acquiring the lock at the end of the
+ * configuration. On error we'll mark it back as free.
+ */
+ if (queue > 0 && !shared_queue)
+ mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
+
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ /* This shouldn't happen - out of queues */
+ if (WARN_ON(queue <= 0)) {
+ IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
+ tid, cfg.sta_id);
+ return queue;
+ }
+
+ /*
+ * Actual en/disablement of aggregations is through the ADD_STA HCMD,
+ * but for configuring the SCD to send A-MPDUs we need to mark the queue
+ * as aggregatable.
+ * Mark all DATA queues as allowing to be aggregated at some point
+ */
+ cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
+ queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
+
+ /*
+ * If this queue was previously inactive (idle) - we need to free it
+ * first
+ */
+ if (using_inactive_queue) {
+ ret = iwl_mvm_free_inactive_queue(mvm, queue, same_sta);
+ if (ret)
+ return ret;
+ }
+
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Allocating %squeue #%d to sta %d on tid %d\n",
+ shared_queue ? "shared " : "", queue,
+ mvmsta->sta_id, tid);
+
+ if (shared_queue) {
+ /* Disable any open aggs on this queue */
+ disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
+
+ if (disable_agg_tids) {
+ IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",
+ queue);
+ iwl_mvm_invalidate_sta_queue(mvm, queue,
+ disable_agg_tids, false);
+ }
+ }
+
+ ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+ inc_ssn = iwl_mvm_enable_txq(mvm, queue, mac_queue,
+ ssn, &cfg, wdg_timeout);
+ if (inc_ssn) {
+ ssn = (ssn + 1) & IEEE80211_SCTL_SEQ;
+ le16_add_cpu(&hdr->seq_ctrl, 0x10);
+ }
+
+ /*
+ * Mark queue as shared in transport if shared
+ * Note this has to be done after queue enablement because enablement
+ * can also set this value, and there is no indication there to shared
+ * queues
+ */
+ if (shared_queue)
+ iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
+
+ spin_lock_bh(&mvmsta->lock);
+ /*
+ * This looks racy, but it is not. We have only one packet for
+ * this ra/tid in our Tx path since we stop the Qdisc when we
+ * need to allocate a new TFD queue.
+ */
+ if (inc_ssn)
+ mvmsta->tid_data[tid].seq_number += 0x10;
+ mvmsta->tid_data[tid].txq_id = queue;
+ mvmsta->tid_data[tid].is_tid_active = true;
+ mvmsta->tfd_queue_msk |= BIT(queue);
+ queue_state = mvmsta->tid_data[tid].state;
+
+ if (mvmsta->reserved_queue == queue)
+ mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
+ spin_unlock_bh(&mvmsta->lock);
+
+ if (!shared_queue) {
+ ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
+ if (ret)
+ goto out_err;
+
+ /* If we need to re-enable aggregations... */
+ if (queue_state == IWL_AGG_ON) {
+ ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
+ if (ret)
+ goto out_err;
+ }
+ } else {
+ /* Redirect queue, if needed */
+ ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, ac, ssn,
+ wdg_timeout, false);
+ if (ret)
+ goto out_err;
+ }
+
+ return 0;
+
+out_err:
+ iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0);
+
+ return ret;
+}
+
static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
{
if (tid == IWL_MAX_TID_COUNT)
OpenPOWER on IntegriCloud