summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/igc/igc_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/igc/igc_main.c')
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c151
1 files changed, 139 insertions, 12 deletions
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 34fa0e60a780..63b62d74f961 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -5,6 +5,11 @@
#include <linux/types.h>
#include <linux/if_vlan.h>
#include <linux/aer.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/ip.h>
+
+#include <net/ipv6.h>
#include "igc.h"
#include "igc_hw.h"
@@ -36,6 +41,9 @@ static const struct igc_info *igc_info_tbl[] = {
static const struct pci_device_id igc_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LM), board_base },
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_V), board_base },
+ { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_I), board_base },
+ { PCI_VDEVICE(INTEL, IGC_DEV_ID_I220_V), board_base },
+ { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K), board_base },
/* required last entry */
{0, }
};
@@ -72,6 +80,27 @@ void igc_reset(struct igc_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
struct igc_hw *hw = &adapter->hw;
+ struct igc_fc_info *fc = &hw->fc;
+ u32 pba, hwm;
+
+ /* Repartition PBA for greater than 9k MTU if required */
+ pba = IGC_PBA_34K;
+
+ /* flow control settings
+ * The high water mark must be low enough to fit one full frame
+ * after transmitting the pause frame. As such we must have enough
+ * space to allow for us to complete our current transmit and then
+ * receive the frame that is in progress from the link partner.
+ * Set it to:
+ * - the full Rx FIFO size minus one full Tx plus one full Rx frame
+ */
+ hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE);
+
+ fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */
+ fc->low_water = fc->high_water - 16;
+ fc->pause_time = 0xFFFF;
+ fc->send_xon = 1;
+ fc->current_mode = fc->requested_mode;
hw->mac.ops.reset_hw(hw);
@@ -328,8 +357,7 @@ static void igc_clean_rx_ring(struct igc_ring *rx_ring)
{
u16 i = rx_ring->next_to_clean;
- if (rx_ring->skb)
- dev_kfree_skb(rx_ring->skb);
+ dev_kfree_skb(rx_ring->skb);
rx_ring->skb = NULL;
/* Free all the Rx ring sk_buffs */
@@ -767,8 +795,96 @@ static int igc_set_mac(struct net_device *netdev, void *p)
return 0;
}
+static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
+ struct igc_tx_buffer *first,
+ u32 vlan_macip_lens, u32 type_tucmd,
+ u32 mss_l4len_idx)
+{
+ struct igc_adv_tx_context_desc *context_desc;
+ u16 i = tx_ring->next_to_use;
+ struct timespec64 ts;
+
+ context_desc = IGC_TX_CTXTDESC(tx_ring, i);
+
+ i++;
+ tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
+
+ /* set bits to identify this as an advanced context descriptor */
+ type_tucmd |= IGC_TXD_CMD_DEXT | IGC_ADVTXD_DTYP_CTXT;
+
+ /* For 82575, context index must be unique per ring. */
+ if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
+ mss_l4len_idx |= tx_ring->reg_idx << 4;
+
+ context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
+ context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
+ context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
+
+ /* We assume there is always a valid Tx time available. Invalid times
+ * should have been handled by the upper layers.
+ */
+ if (tx_ring->launchtime_enable) {
+ ts = ns_to_timespec64(first->skb->tstamp);
+ first->skb->tstamp = 0;
+ context_desc->launch_time = cpu_to_le32(ts.tv_nsec / 32);
+ } else {
+ context_desc->launch_time = 0;
+ }
+}
+
+static inline bool igc_ipv6_csum_is_sctp(struct sk_buff *skb)
+{
+ unsigned int offset = 0;
+
+ ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
+
+ return offset == skb_checksum_start_offset(skb);
+}
+
static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first)
{
+ struct sk_buff *skb = first->skb;
+ u32 vlan_macip_lens = 0;
+ u32 type_tucmd = 0;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL) {
+csum_failed:
+ if (!(first->tx_flags & IGC_TX_FLAGS_VLAN) &&
+ !tx_ring->launchtime_enable)
+ return;
+ goto no_csum;
+ }
+
+ switch (skb->csum_offset) {
+ case offsetof(struct tcphdr, check):
+ type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP;
+ /* fall through */
+ case offsetof(struct udphdr, check):
+ break;
+ case offsetof(struct sctphdr, checksum):
+ /* validate that this is actually an SCTP request */
+ if ((first->protocol == htons(ETH_P_IP) &&
+ (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
+ (first->protocol == htons(ETH_P_IPV6) &&
+ igc_ipv6_csum_is_sctp(skb))) {
+ type_tucmd = IGC_ADVTXD_TUCMD_L4T_SCTP;
+ break;
+ }
+ /* fall through */
+ default:
+ skb_checksum_help(skb);
+ goto csum_failed;
+ }
+
+ /* update TX checksum flag */
+ first->tx_flags |= IGC_TX_FLAGS_CSUM;
+ vlan_macip_lens = skb_checksum_start_offset(skb) -
+ skb_network_offset(skb);
+no_csum:
+ vlan_macip_lens |= skb_network_offset(skb) << IGC_ADVTXD_MACLEN_SHIFT;
+ vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK;
+
+ igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0);
}
static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
@@ -840,7 +956,7 @@ static int igc_tx_map(struct igc_ring *tx_ring,
struct igc_tx_buffer *tx_buffer;
union igc_adv_tx_desc *tx_desc;
u32 tx_flags = first->tx_flags;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
u16 i = tx_ring->next_to_use;
unsigned int data_len, size;
dma_addr_t dma;
@@ -994,7 +1110,8 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
* otherwise try next time
*/
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
- count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+ count += TXD_USE_COUNT(skb_frag_size(
+ &skb_shinfo(skb)->frags[f]));
if (igc_maybe_stop_tx(tx_ring, count + 3)) {
/* this is a hard error */
@@ -3891,13 +4008,11 @@ void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value)
s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
{
struct igc_adapter *adapter = hw->back;
- u16 cap_offset;
- cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
- if (!cap_offset)
+ if (!pci_is_pcie(adapter->pdev))
return -IGC_ERR_CONFIG;
- pci_read_config_word(adapter->pdev, cap_offset + reg, value);
+ pcie_capability_read_word(adapter->pdev, reg, value);
return IGC_SUCCESS;
}
@@ -3905,13 +4020,11 @@ s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
{
struct igc_adapter *adapter = hw->back;
- u16 cap_offset;
- cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
- if (!cap_offset)
+ if (!pci_is_pcie(adapter->pdev))
return -IGC_ERR_CONFIG;
- pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
+ pcie_capability_write_word(adapter->pdev, reg, *value);
return IGC_SUCCESS;
}
@@ -3934,6 +4047,7 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg)
hw->hw_addr = NULL;
netif_device_detach(netdev);
netdev_err(netdev, "PCIe link lost, device now detached\n");
+ WARN(1, "igc: Failed to read reg 0x%x!\n", reg);
}
return value;
@@ -4095,6 +4209,9 @@ static int igc_probe(struct pci_dev *pdev,
if (err)
goto err_sw_init;
+ /* Add supported features to the features list*/
+ netdev->features |= NETIF_F_HW_CSUM;
+
/* setup the private structure */
err = igc_sw_init(adapter);
if (err)
@@ -4102,6 +4219,7 @@ static int igc_probe(struct pci_dev *pdev,
/* copy netdev features into list of user selectable features */
netdev->hw_features |= NETIF_F_NTUPLE;
+ netdev->hw_features |= netdev->features;
/* MTU range: 68 - 9216 */
netdev->min_mtu = ETH_MIN_MTU;
@@ -4112,6 +4230,15 @@ static int igc_probe(struct pci_dev *pdev,
*/
hw->mac.ops.reset_hw(hw);
+ if (igc_get_flash_presence_i225(hw)) {
+ if (hw->nvm.ops.validate(hw) < 0) {
+ dev_err(&pdev->dev,
+ "The NVM Checksum Is Not Valid\n");
+ err = -EIO;
+ goto err_eeprom;
+ }
+ }
+
if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) {
/* copy the MAC address out of the NVM */
if (hw->mac.ops.read_mac_addr(hw))
OpenPOWER on IntegriCloud