summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
diff options
context:
space:
mode:
authorNithin Dabilpuram <ndabilpuram@marvell.com>2018-12-02 18:17:41 +0530
committerDavid S. Miller <davem@davemloft.net>2018-12-03 16:23:08 -0800
commite2703c5f581a8d970c46251914e837db192b80d4 (patch)
treef725d02272d68e3a226ce7eb0230e422fbe20111 /drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
parent26dda7da8ed33b8541053daaf1a2a08389bc0fe0 (diff)
downloadtalos-op-linux-e2703c5f581a8d970c46251914e837db192b80d4.tar.gz
talos-op-linux-e2703c5f581a8d970c46251914e837db192b80d4.zip
octeontx2-af: Allow freeing single TLx Tx schedule queue
The default behavior was to free all the TLx Tx schedule queues. This patch adds support for freeing a single Tx schedule queue if TXSCHQ_FREE_ALL flag is not set. Signed-off-by: Krzysztof Kanas <kkanas@marvell.com> Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com> Signed-off-by: Jerin Jacob <jerinj@marvell.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c')
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c72
1 files changed, 71 insertions, 1 deletions
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 0d4929bd3a95..741728c3d597 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -1252,11 +1252,81 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
return 0;
}
+static int nix_txschq_free_one(struct rvu *rvu,
+ struct nix_txsch_free_req *req)
+{
+ int lvl, schq, nixlf, blkaddr, rc;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct nix_txsch *txsch;
+ struct nix_hw *nix_hw;
+ u32 *pfvf_map;
+ u64 cfg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return -EINVAL;
+
+ nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ if (nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ lvl = req->schq_lvl;
+ schq = req->schq;
+ txsch = &nix_hw->txsch[lvl];
+
+ /* Don't allow freeing TL1 */
+ if (lvl > NIX_TXSCH_LVL_TL2 ||
+ schq >= txsch->schq.max)
+ goto err;
+
+ pfvf_map = txsch->pfvf_map;
+ mutex_lock(&rvu->rsrc_lock);
+
+ if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) {
+ mutex_unlock(&rvu->rsrc_lock);
+ goto err;
+ }
+
+ /* Flush if it is a SMQ. Onus of disabling
+ * TL2/3 queue links before SMQ flush is on user
+ */
+ if (lvl == NIX_TXSCH_LVL_SMQ) {
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
+ /* Do SMQ flush and set enqueue xoff */
+ cfg |= BIT_ULL(50) | BIT_ULL(49);
+ rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
+
+ /* Wait for flush to complete */
+ rc = rvu_poll_reg(rvu, blkaddr,
+ NIX_AF_SMQX_CFG(schq), BIT_ULL(49), true);
+ if (rc) {
+ dev_err(rvu->dev,
+ "NIXLF%d: SMQ%d flush failed\n", nixlf, schq);
+ }
+ }
+
+ /* Free the resource */
+ rvu_free_rsrc(&txsch->schq, schq);
+ txsch->pfvf_map[schq] = 0;
+ mutex_unlock(&rvu->rsrc_lock);
+ return 0;
+err:
+ return NIX_AF_ERR_TLX_INVALID;
+}
+
int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
struct nix_txsch_free_req *req,
struct msg_rsp *rsp)
{
- return nix_txschq_free(rvu, req->hdr.pcifunc);
+ if (req->flags & TXSCHQ_FREE_ALL)
+ return nix_txschq_free(rvu, req->hdr.pcifunc);
+ else
+ return nix_txschq_free_one(rvu, req);
}
static bool is_txschq_config_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
OpenPOWER on IntegriCloud