diff options
Diffstat (limited to 'drivers/net/wireless/realtek/rtw88/pci.c')
-rw-r--r-- | drivers/net/wireless/realtek/rtw88/pci.c | 484 |
1 files changed, 405 insertions, 79 deletions
diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c index 353871c27779..1fbc14c149ec 100644 --- a/drivers/net/wireless/realtek/rtw88/pci.c +++ b/drivers/net/wireless/realtek/rtw88/pci.c @@ -6,10 +6,17 @@ #include <linux/pci.h> #include "main.h" #include "pci.h" +#include "reg.h" #include "tx.h" #include "rx.h" +#include "fw.h" +#include "ps.h" #include "debug.h" +static bool rtw_disable_msi; +module_param_named(disable_msi, rtw_disable_msi, bool, 0644); +MODULE_PARM_DESC(disable_msi, "Set Y to disable MSI interrupt support"); + static u32 rtw_pci_tx_queue_idx_addr[] = { [RTW_TX_QUEUE_BK] = RTK_PCI_TXBD_IDX_BKQ, [RTW_TX_QUEUE_BE] = RTK_PCI_TXBD_IDX_BEQ, @@ -85,16 +92,13 @@ static inline void *rtw_pci_get_tx_desc(struct rtw_pci_tx_ring *tx_ring, u8 idx) return tx_ring->r.head + offset; } -static void rtw_pci_free_tx_ring(struct rtw_dev *rtwdev, - struct rtw_pci_tx_ring *tx_ring) +static void rtw_pci_free_tx_ring_skbs(struct rtw_dev *rtwdev, + struct rtw_pci_tx_ring *tx_ring) { struct pci_dev *pdev = to_pci_dev(rtwdev->dev); struct rtw_pci_tx_data *tx_data; struct sk_buff *skb, *tmp; dma_addr_t dma; - u8 *head = tx_ring->r.head; - u32 len = tx_ring->r.len; - int ring_sz = len * tx_ring->r.desc_size; /* free every skb remained in tx list */ skb_queue_walk_safe(&tx_ring->queue, skb, tmp) { @@ -105,21 +109,30 @@ static void rtw_pci_free_tx_ring(struct rtw_dev *rtwdev, pci_unmap_single(pdev, dma, skb->len, PCI_DMA_TODEVICE); dev_kfree_skb_any(skb); } +} + +static void rtw_pci_free_tx_ring(struct rtw_dev *rtwdev, + struct rtw_pci_tx_ring *tx_ring) +{ + struct pci_dev *pdev = to_pci_dev(rtwdev->dev); + u8 *head = tx_ring->r.head; + u32 len = tx_ring->r.len; + int ring_sz = len * tx_ring->r.desc_size; + + rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring); /* free the ring itself */ pci_free_consistent(pdev, ring_sz, head, tx_ring->r.dma); tx_ring->r.head = NULL; } -static void rtw_pci_free_rx_ring(struct rtw_dev *rtwdev, - struct rtw_pci_rx_ring *rx_ring) +static void rtw_pci_free_rx_ring_skbs(struct rtw_dev *rtwdev, + struct rtw_pci_rx_ring *rx_ring) { struct pci_dev *pdev = to_pci_dev(rtwdev->dev); struct sk_buff *skb; - dma_addr_t dma; - u8 *head = rx_ring->r.head; int buf_sz = RTK_PCI_RX_BUF_SIZE; - int ring_sz = rx_ring->r.desc_size * rx_ring->r.len; + dma_addr_t dma; int i; for (i = 0; i < rx_ring->r.len; i++) { @@ -132,6 +145,16 @@ static void rtw_pci_free_rx_ring(struct rtw_dev *rtwdev, dev_kfree_skb(skb); rx_ring->buf[i] = NULL; } +} + +static void rtw_pci_free_rx_ring(struct rtw_dev *rtwdev, + struct rtw_pci_rx_ring *rx_ring) +{ + struct pci_dev *pdev = to_pci_dev(rtwdev->dev); + u8 *head = rx_ring->r.head; + int ring_sz = rx_ring->r.desc_size * rx_ring->r.len; + + rtw_pci_free_rx_ring_skbs(rtwdev, rx_ring); pci_free_consistent(pdev, ring_sz, head, rx_ring->r.dma); } @@ -206,6 +229,23 @@ static int rtw_pci_reset_rx_desc(struct rtw_dev *rtwdev, struct sk_buff *skb, return 0; } +static void rtw_pci_sync_rx_desc_device(struct rtw_dev *rtwdev, dma_addr_t dma, + struct rtw_pci_rx_ring *rx_ring, + u32 idx, u32 desc_sz) +{ + struct device *dev = rtwdev->dev; + struct rtw_pci_rx_buffer_desc *buf_desc; + int buf_sz = RTK_PCI_RX_BUF_SIZE; + + dma_sync_single_for_device(dev, dma, buf_sz, DMA_FROM_DEVICE); + + buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head + + idx * desc_sz); + memset(buf_desc, 0, sizeof(*buf_desc)); + buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE); + buf_desc->dma = cpu_to_le32(dma); +} + static int rtw_pci_init_rx_ring(struct rtw_dev *rtwdev, struct rtw_pci_rx_ring *rx_ring, u8 desc_size, u32 len) @@ -419,9 +459,9 @@ static void rtw_pci_reset_buf_desc(struct rtw_dev *rtwdev) /* reset read/write point */ rtw_write32(rtwdev, RTK_PCI_TXBD_RWPTR_CLR, 0xffffffff); - /* rest H2C Queue index */ - rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR, BIT_CLR_H2CQ_HOST_IDX); - rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR, BIT_CLR_H2CQ_HW_IDX); + /* reset H2C Queue index in a single write */ + rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR, + BIT_CLR_H2CQ_HOST_IDX | BIT_CLR_H2CQ_HW_IDX); } static void rtw_pci_reset_trx_ring(struct rtw_dev *rtwdev) @@ -447,19 +487,34 @@ static void rtw_pci_disable_interrupt(struct rtw_dev *rtwdev, rtwpci->irq_enabled = false; } +static void rtw_pci_dma_reset(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci) +{ + /* reset dma and rx tag */ + rtw_write32_set(rtwdev, RTK_PCI_CTRL, + BIT_RST_TRXDMA_INTF | BIT_RX_TAG_EN); + rtwpci->rx_tag = 0; +} + static int rtw_pci_setup(struct rtw_dev *rtwdev) { + struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; + rtw_pci_reset_trx_ring(rtwdev); + rtw_pci_dma_reset(rtwdev, rtwpci); return 0; } -static void rtw_pci_dma_reset(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci) +static void rtw_pci_dma_release(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci) { - /* reset dma and rx tag */ - rtw_write32_set(rtwdev, RTK_PCI_CTRL, - BIT_RST_TRXDMA_INTF | BIT_RX_TAG_EN); - rtwpci->rx_tag = 0; + struct rtw_pci_tx_ring *tx_ring; + u8 queue; + + rtw_pci_reset_trx_ring(rtwdev); + for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) { + tx_ring = &rtwpci->tx_rings[queue]; + rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring); + } } static int rtw_pci_start(struct rtw_dev *rtwdev) @@ -467,8 +522,6 @@ static int rtw_pci_start(struct rtw_dev *rtwdev) struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; unsigned long flags; - rtw_pci_dma_reset(rtwdev, rtwpci); - spin_lock_irqsave(&rtwpci->irq_lock, flags); rtw_pci_enable_interrupt(rtwdev, rtwpci); spin_unlock_irqrestore(&rtwpci->irq_lock, flags); @@ -483,6 +536,70 @@ static void rtw_pci_stop(struct rtw_dev *rtwdev) spin_lock_irqsave(&rtwpci->irq_lock, flags); rtw_pci_disable_interrupt(rtwdev, rtwpci); + rtw_pci_dma_release(rtwdev, rtwpci); + spin_unlock_irqrestore(&rtwpci->irq_lock, flags); +} + +static void rtw_pci_deep_ps_enter(struct rtw_dev *rtwdev) +{ + struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; + struct rtw_pci_tx_ring *tx_ring; + bool tx_empty = true; + u8 queue; + + lockdep_assert_held(&rtwpci->irq_lock); + + /* Deep PS state is not allowed to TX-DMA */ + for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) { + /* BCN queue is rsvd page, does not have DMA interrupt + * H2C queue is managed by firmware + */ + if (queue == RTW_TX_QUEUE_BCN || + queue == RTW_TX_QUEUE_H2C) + continue; + + tx_ring = &rtwpci->tx_rings[queue]; + + /* check if there is any skb DMAing */ + if (skb_queue_len(&tx_ring->queue)) { + tx_empty = false; + break; + } + } + + if (!tx_empty) { + rtw_dbg(rtwdev, RTW_DBG_PS, + "TX path not empty, cannot enter deep power save state\n"); + return; + } + + set_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags); + rtw_power_mode_change(rtwdev, true); +} + +static void rtw_pci_deep_ps_leave(struct rtw_dev *rtwdev) +{ + struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; + + lockdep_assert_held(&rtwpci->irq_lock); + + if (test_and_clear_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags)) + rtw_power_mode_change(rtwdev, false); +} + +static void rtw_pci_deep_ps(struct rtw_dev *rtwdev, bool enter) +{ + struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; + unsigned long flags; + + spin_lock_irqsave(&rtwpci->irq_lock, flags); + + if (enter && !test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags)) + rtw_pci_deep_ps_enter(rtwdev); + + if (!enter && test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags)) + rtw_pci_deep_ps_leave(rtwdev); + spin_unlock_irqrestore(&rtwpci->irq_lock, flags); } @@ -566,6 +683,7 @@ static int rtw_pci_xmit(struct rtw_dev *rtwdev, u8 *pkt_desc; struct rtw_pci_tx_buffer_desc *buf_desc; u32 bd_idx; + unsigned long flags; ring = &rtwpci->tx_rings[queue]; @@ -601,6 +719,10 @@ static int rtw_pci_xmit(struct rtw_dev *rtwdev, tx_data = rtw_pci_get_tx_data(skb); tx_data->dma = dma; tx_data->sn = pkt_info->sn; + + spin_lock_irqsave(&rtwpci->irq_lock, flags); + + rtw_pci_deep_ps_leave(rtwdev); skb_queue_tail(&ring->queue, skb); /* kick off tx queue */ @@ -616,6 +738,7 @@ static int rtw_pci_xmit(struct rtw_dev *rtwdev, reg_bcn_work |= BIT_PCI_BCNQ_FLAG; rtw_write8(rtwdev, RTK_PCI_TXBD_BCN_WORK, reg_bcn_work); } + spin_unlock_irqrestore(&rtwpci->irq_lock, flags); return 0; } @@ -712,6 +835,11 @@ static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci, while (count--) { skb = skb_dequeue(&ring->queue); + if (!skb) { + rtw_err(rtwdev, "failed to dequeue %d skb TX queue %d, BD=0x%08x, rp %d -> %d\n", + count, hw_queue, bd_idx, ring->r.rp, cur_rp); + break; + } tx_data = rtw_pci_get_tx_data(skb); pci_unmap_single(rtwpci->pdev, tx_data->dma, skb->len, PCI_DMA_TODEVICE); @@ -765,6 +893,7 @@ static void rtw_pci_rx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci, u32 pkt_offset; u32 pkt_desc_sz = chip->rx_pkt_desc_sz; u32 buf_desc_sz = chip->rx_buf_desc_sz; + u32 new_len; u8 *rx_desc; dma_addr_t dma; @@ -783,8 +912,8 @@ static void rtw_pci_rx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci, rtw_pci_dma_check(rtwdev, ring, cur_rp); skb = ring->buf[cur_rp]; dma = *((dma_addr_t *)skb->cb); - pci_unmap_single(rtwpci->pdev, dma, RTK_PCI_RX_BUF_SIZE, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(rtwdev->dev, dma, RTK_PCI_RX_BUF_SIZE, + DMA_FROM_DEVICE); rx_desc = skb->data; chip->ops->query_rx_desc(rtwdev, rx_desc, &pkt_stat, &rx_status); @@ -792,40 +921,32 @@ static void rtw_pci_rx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci, pkt_offset = pkt_desc_sz + pkt_stat.drv_info_sz + pkt_stat.shift; - if (pkt_stat.is_c2h) { - /* keep rx_desc, halmac needs it */ - skb_put(skb, pkt_stat.pkt_len + pkt_offset); + /* allocate a new skb for this frame, + * discard the frame if none available + */ + new_len = pkt_stat.pkt_len + pkt_offset; + new = dev_alloc_skb(new_len); + if (WARN_ONCE(!new, "rx routine starvation\n")) + goto next_rp; - /* pass offset for further operation */ - *((u32 *)skb->cb) = pkt_offset; - skb_queue_tail(&rtwdev->c2h_queue, skb); - ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work); + /* put the DMA data including rx_desc from phy to new skb */ + skb_put_data(new, skb->data, new_len); + + if (pkt_stat.is_c2h) { + rtw_fw_c2h_cmd_rx_irqsafe(rtwdev, pkt_offset, new); } else { - /* remove rx_desc, maybe use skb_pull? */ - skb_put(skb, pkt_stat.pkt_len); - skb_reserve(skb, pkt_offset); - - /* alloc a smaller skb to mac80211 */ - new = dev_alloc_skb(pkt_stat.pkt_len); - if (!new) { - new = skb; - } else { - skb_put_data(new, skb->data, skb->len); - dev_kfree_skb_any(skb); - } - /* TODO: merge into rx.c */ - rtw_rx_stats(rtwdev, pkt_stat.vif, skb); + /* remove rx_desc */ + skb_pull(new, pkt_offset); + + rtw_rx_stats(rtwdev, pkt_stat.vif, new); memcpy(new->cb, &rx_status, sizeof(rx_status)); ieee80211_rx_irqsafe(rtwdev->hw, new); } - /* skb delivered to mac80211, alloc a new one in rx ring */ - new = dev_alloc_skb(RTK_PCI_RX_BUF_SIZE); - if (WARN(!new, "rx routine starvation\n")) - return; - - ring->buf[cur_rp] = new; - rtw_pci_reset_rx_desc(rtwdev, new, ring, cur_rp, buf_desc_sz); +next_rp: + /* new skb delivered to mac80211, re-enable original skb DMA */ + rtw_pci_sync_rx_desc_device(rtwdev, dma, ring, cur_rp, + buf_desc_sz); /* host read next element in ring */ if (++cur_rp >= ring->r.len) @@ -855,12 +976,34 @@ static irqreturn_t rtw_pci_interrupt_handler(int irq, void *dev) { struct rtw_dev *rtwdev = dev; struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; - u32 irq_status[4]; spin_lock(&rtwpci->irq_lock); if (!rtwpci->irq_enabled) goto out; + /* disable RTW PCI interrupt to avoid more interrupts before the end of + * thread function + * + * disable HIMR here to also avoid new HISR flag being raised before + * the HISRs have been Write-1-cleared for MSI. If not all of the HISRs + * are cleared, the edge-triggered interrupt will not be generated when + * a new HISR flag is set. + */ + rtw_pci_disable_interrupt(rtwdev, rtwpci); +out: + spin_unlock(&rtwpci->irq_lock); + + return IRQ_WAKE_THREAD; +} + +static irqreturn_t rtw_pci_interrupt_threadfn(int irq, void *dev) +{ + struct rtw_dev *rtwdev = dev; + struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; + unsigned long flags; + u32 irq_status[4]; + + spin_lock_irqsave(&rtwpci->irq_lock, flags); rtw_pci_irq_recognized(rtwdev, rtwpci, irq_status); if (irq_status[0] & IMR_MGNTDOK) @@ -880,8 +1023,9 @@ static irqreturn_t rtw_pci_interrupt_handler(int irq, void *dev) if (irq_status[0] & IMR_ROK) rtw_pci_rx_isr(rtwdev, rtwpci, RTW_RX_QUEUE_MPDU); -out: - spin_unlock(&rtwpci->irq_lock); + /* all of the jobs for this interrupt have been done */ + rtw_pci_enable_interrupt(rtwdev, rtwpci); + spin_unlock_irqrestore(&rtwpci->irq_lock, flags); return IRQ_HANDLED; } @@ -924,23 +1068,49 @@ static void rtw_pci_io_unmapping(struct rtw_dev *rtwdev, static void rtw_dbi_write8(struct rtw_dev *rtwdev, u16 addr, u8 data) { u16 write_addr; - u16 remainder = addr & 0x3; + u16 remainder = addr & ~(BITS_DBI_WREN | BITS_DBI_ADDR_MASK); u8 flag; - u8 cnt = 20; + u8 cnt; - write_addr = ((addr & 0x0ffc) | (BIT(0) << (remainder + 12))); + write_addr = addr & BITS_DBI_ADDR_MASK; + write_addr |= u16_encode_bits(BIT(remainder), BITS_DBI_WREN); rtw_write8(rtwdev, REG_DBI_WDATA_V1 + remainder, data); rtw_write16(rtwdev, REG_DBI_FLAG_V1, write_addr); - rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, 0x01); + rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, BIT_DBI_WFLAG >> 16); + + for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) { + flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2); + if (flag == 0) + return; - flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2); - while (flag && (cnt != 0)) { udelay(10); + } + + WARN(flag, "failed to write to DBI register, addr=0x%04x\n", addr); +} + +static int rtw_dbi_read8(struct rtw_dev *rtwdev, u16 addr, u8 *value) +{ + u16 read_addr = addr & BITS_DBI_ADDR_MASK; + u8 flag; + u8 cnt; + + rtw_write16(rtwdev, REG_DBI_FLAG_V1, read_addr); + rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, BIT_DBI_RFLAG >> 16); + + for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) { flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2); - cnt--; + if (flag == 0) { + read_addr = REG_DBI_RDATA_V1 + (addr & 3); + *value = rtw_read8(rtwdev, read_addr); + return 0; + } + + udelay(10); } - WARN(flag, "DBI write fail\n"); + WARN(1, "failed to read DBI register, addr=0x%04x\n", addr); + return -EIO; } static void rtw_mdio_write(struct rtw_dev *rtwdev, u8 addr, u16 data, bool g1) @@ -951,23 +1121,128 @@ static void rtw_mdio_write(struct rtw_dev *rtwdev, u8 addr, u16 data, bool g1) rtw_write16(rtwdev, REG_MDIO_V1, data); - page = addr < 0x20 ? 0 : 1; - page += g1 ? 0 : 2; - rtw_write8(rtwdev, REG_PCIE_MIX_CFG, addr & 0x1f); + page = addr < RTW_PCI_MDIO_PG_SZ ? 0 : 1; + page += g1 ? RTW_PCI_MDIO_PG_OFFS_G1 : RTW_PCI_MDIO_PG_OFFS_G2; + rtw_write8(rtwdev, REG_PCIE_MIX_CFG, addr & BITS_MDIO_ADDR_MASK); rtw_write8(rtwdev, REG_PCIE_MIX_CFG + 3, page); - rtw_write32_mask(rtwdev, REG_PCIE_MIX_CFG, BIT_MDIO_WFLAG_V1, 1); - wflag = rtw_read32_mask(rtwdev, REG_PCIE_MIX_CFG, BIT_MDIO_WFLAG_V1); - cnt = 20; - while (wflag && (cnt != 0)) { - udelay(10); + for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) { wflag = rtw_read32_mask(rtwdev, REG_PCIE_MIX_CFG, BIT_MDIO_WFLAG_V1); - cnt--; + if (wflag == 0) + return; + + udelay(10); } - WARN(wflag, "MDIO write fail\n"); + WARN(wflag, "failed to write to MDIO register, addr=0x%02x\n", addr); +} + +static void rtw_pci_clkreq_set(struct rtw_dev *rtwdev, bool enable) +{ + u8 value; + int ret; + + ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value); + if (ret) { + rtw_err(rtwdev, "failed to read CLKREQ_L1, ret=%d", ret); + return; + } + + if (enable) + value |= BIT_CLKREQ_SW_EN; + else + value &= ~BIT_CLKREQ_SW_EN; + + rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value); +} + +static void rtw_pci_aspm_set(struct rtw_dev *rtwdev, bool enable) +{ + u8 value; + int ret; + + ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value); + if (ret) { + rtw_err(rtwdev, "failed to read ASPM, ret=%d", ret); + return; + } + + if (enable) + value |= BIT_L1_SW_EN; + else + value &= ~BIT_L1_SW_EN; + + rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value); +} + +static void rtw_pci_link_ps(struct rtw_dev *rtwdev, bool enter) +{ + struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; + + /* Like CLKREQ, ASPM is also implemented by two HW modules, and can + * only be enabled when host supports it. + * + * And ASPM mechanism should be enabled when driver/firmware enters + * power save mode, without having heavy traffic. Because we've + * experienced some inter-operability issues that the link tends + * to enter L1 state on the fly even when driver is having high + * throughput. This is probably because the ASPM behavior slightly + * varies from different SOC. + */ + if (rtwpci->link_ctrl & PCI_EXP_LNKCTL_ASPM_L1) + rtw_pci_aspm_set(rtwdev, enter); +} + +static void rtw_pci_link_cfg(struct rtw_dev *rtwdev) +{ + struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; + struct pci_dev *pdev = rtwpci->pdev; + u16 link_ctrl; + int ret; + + /* Though there is standard PCIE configuration space to set the + * link control register, but by Realtek's design, driver should + * check if host supports CLKREQ/ASPM to enable the HW module. + * + * These functions are implemented by two HW modules associated, + * one is responsible to access PCIE configuration space to + * follow the host settings, and another is in charge of doing + * CLKREQ/ASPM mechanisms, it is default disabled. Because sometimes + * the host does not support it, and due to some reasons or wrong + * settings (ex. CLKREQ# not Bi-Direction), it could lead to device + * loss if HW misbehaves on the link. + * + * Hence it's designed that driver should first check the PCIE + * configuration space is sync'ed and enabled, then driver can turn + * on the other module that is actually working on the mechanism. + */ + ret = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &link_ctrl); + if (ret) { + rtw_err(rtwdev, "failed to read PCI cap, ret=%d\n", ret); + return; + } + + if (link_ctrl & PCI_EXP_LNKCTL_CLKREQ_EN) + rtw_pci_clkreq_set(rtwdev, true); + + rtwpci->link_ctrl = link_ctrl; +} + +static void rtw_pci_interface_cfg(struct rtw_dev *rtwdev) +{ + struct rtw_chip_info *chip = rtwdev->chip; + + switch (chip->id) { + case RTW_CHIP_TYPE_8822C: + if (rtwdev->hal.cut_version >= RTW_CHIP_VER_CUT_D) + rtw_write32_mask(rtwdev, REG_HCI_MIX_CFG, + BIT_PCIE_EMAC_PDN_AUX_TO_FAST_CLK, 1); + break; + default: + break; + } } static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev) @@ -977,7 +1252,6 @@ static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev) u16 cut; u16 value; u16 offset; - u16 ip_sel; int i; cut = BIT(0) << rtwdev->hal.cut_version; @@ -990,7 +1264,6 @@ static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev) break; offset = para->offset; value = para->value; - ip_sel = para->ip_sel; if (para->ip_sel == RTW_IP_SEL_PHY) rtw_mdio_write(rtwdev, offset, value, true); else @@ -1005,14 +1278,32 @@ static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev) break; offset = para->offset; value = para->value; - ip_sel = para->ip_sel; if (para->ip_sel == RTW_IP_SEL_PHY) rtw_mdio_write(rtwdev, offset, value, false); else rtw_dbi_write8(rtwdev, offset, value); } + + rtw_pci_link_cfg(rtwdev); } +#ifdef CONFIG_PM +static int rtw_pci_suspend(struct device *dev) +{ + return 0; +} + +static int rtw_pci_resume(struct device *dev) +{ + return 0; +} + +static SIMPLE_DEV_PM_OPS(rtw_pm_ops, rtw_pci_suspend, rtw_pci_resume); +#define RTW_PM_OPS (&rtw_pm_ops) +#else +#define RTW_PM_OPS NULL +#endif + static int rtw_pci_claim(struct rtw_dev *rtwdev, struct pci_dev *pdev) { int ret; @@ -1057,8 +1348,6 @@ static int rtw_pci_setup_resource(struct rtw_dev *rtwdev, struct pci_dev *pdev) goto err_io_unmap; } - rtw_pci_phy_cfg(rtwdev); - return 0; err_io_unmap: @@ -1079,6 +1368,9 @@ static struct rtw_hci_ops rtw_pci_ops = { .setup = rtw_pci_setup, .start = rtw_pci_start, .stop = rtw_pci_stop, + .deep_ps = rtw_pci_deep_ps, + .link_ps = rtw_pci_link_ps, + .interface_cfg = rtw_pci_interface_cfg, .read8 = rtw_pci_read8, .read16 = rtw_pci_read16, @@ -1090,6 +1382,38 @@ static struct rtw_hci_ops rtw_pci_ops = { .write_data_h2c = rtw_pci_write_data_h2c, }; +static int rtw_pci_request_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev) +{ + unsigned int flags = PCI_IRQ_LEGACY; + int ret; + + if (!rtw_disable_msi) + flags |= PCI_IRQ_MSI; + + ret = pci_alloc_irq_vectors(pdev, 1, 1, flags); + if (ret < 0) { + rtw_err(rtwdev, "failed to alloc PCI irq vectors\n"); + return ret; + } + + ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq, + rtw_pci_interrupt_handler, + rtw_pci_interrupt_threadfn, + IRQF_SHARED, KBUILD_MODNAME, rtwdev); + if (ret) { + rtw_err(rtwdev, "failed to request irq %d\n", ret); + pci_free_irq_vectors(pdev); + } + + return ret; +} + +static void rtw_pci_free_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev) +{ + devm_free_irq(rtwdev->dev, pdev->irq, rtwdev); + pci_free_irq_vectors(pdev); +} + static int rtw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { @@ -1138,14 +1462,15 @@ static int rtw_pci_probe(struct pci_dev *pdev, goto err_destroy_pci; } + rtw_pci_phy_cfg(rtwdev); + ret = rtw_register_hw(rtwdev, hw); if (ret) { rtw_err(rtwdev, "failed to register hw\n"); goto err_destroy_pci; } - ret = request_irq(pdev->irq, &rtw_pci_interrupt_handler, - IRQF_SHARED, KBUILD_MODNAME, rtwdev); + ret = rtw_pci_request_irq(rtwdev, pdev); if (ret) { ieee80211_unregister_hw(hw); goto err_destroy_pci; @@ -1184,7 +1509,7 @@ static void rtw_pci_remove(struct pci_dev *pdev) rtw_pci_disable_interrupt(rtwdev, rtwpci); rtw_pci_destroy(rtwdev, pdev); rtw_pci_declaim(rtwdev, pdev); - free_irq(rtwpci->pdev->irq, rtwdev); + rtw_pci_free_irq(rtwdev, pdev); rtw_core_deinit(rtwdev); ieee80211_free_hw(hw); } @@ -1205,6 +1530,7 @@ static struct pci_driver rtw_pci_driver = { .id_table = rtw_pci_id_table, .probe = rtw_pci_probe, .remove = rtw_pci_remove, + .driver.pm = RTW_PM_OPS, }; module_pci_driver(rtw_pci_driver); |