diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-12 14:27:40 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-12 14:27:40 -0700 |
commit | f9da455b93f6ba076935b4ef4589f61e529ae046 (patch) | |
tree | 3c4e69ce1ba1d6bf65915b97a76ca2172105b278 /drivers/net/ethernet/marvell/mvneta.c | |
parent | 0e04c641b199435f3779454055f6a7de258ecdfc (diff) | |
parent | e5eca6d41f53db48edd8cf88a3f59d2c30227f8e (diff) | |
download | blackbird-op-linux-f9da455b93f6ba076935b4ef4589f61e529ae046.tar.gz blackbird-op-linux-f9da455b93f6ba076935b4ef4589f61e529ae046.zip |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller:
1) Seccomp BPF filters can now be JIT'd, from Alexei Starovoitov.
2) Multiqueue support in xen-netback and xen-netfront, from Andrew J
Benniston.
3) Allow tweaking of aggregation settings in cdc_ncm driver, from Bjørn
Mork.
4) BPF now has a "random" opcode, from Chema Gonzalez.
5) Add more BPF documentation and improve test framework, from Daniel
Borkmann.
6) Support TCP fastopen over ipv6, from Daniel Lee.
7) Add software TSO helper functions and use them to support software
TSO in mvneta and mv643xx_eth drivers. From Ezequiel Garcia.
8) Support software TSO in fec driver too, from Nimrod Andy.
9) Add Broadcom SYSTEMPORT driver, from Florian Fainelli.
10) Handle broadcasts more gracefully over macvlan when there are large
numbers of interfaces configured, from Herbert Xu.
11) Allow more control over fwmark used for non-socket based responses,
from Lorenzo Colitti.
12) Do TCP congestion window limiting based upon measurements, from Neal
Cardwell.
13) Support busy polling in SCTP, from Neal Horman.
14) Allow RSS key to be configured via ethtool, from Venkata Duvvuru.
15) Bridge promisc mode handling improvements from Vlad Yasevich.
16) Don't use inetpeer entries to implement ID generation any more, it
performs poorly, from Eric Dumazet.
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1522 commits)
rtnetlink: fix userspace API breakage for iproute2 < v3.9.0
tcp: fixing TLP's FIN recovery
net: fec: Add software TSO support
net: fec: Add Scatter/gather support
net: fec: Increase buffer descriptor entry number
net: fec: Factorize feature setting
net: fec: Enable IP header hardware checksum
net: fec: Factorize the .xmit transmit function
bridge: fix compile error when compiling without IPv6 support
bridge: fix smatch warning / potential null pointer dereference
via-rhine: fix full-duplex with autoneg disable
bnx2x: Enlarge the dorq threshold for VFs
bnx2x: Check for UNDI in uncommon branch
bnx2x: Fix 1G-baseT link
bnx2x: Fix link for KR with swapped polarity lane
sctp: Fix sk_ack_backlog wrap-around problem
net/core: Add VF link state control policy
net/fsl: xgmac_mdio is dependent on OF_MDIO
net/fsl: Make xgmac_mdio read error message useful
net_sched: drr: warn when qdisc is not work conserving
...
Diffstat (limited to 'drivers/net/ethernet/marvell/mvneta.c')
-rw-r--r-- | drivers/net/ethernet/marvell/mvneta.c | 324 |
1 files changed, 244 insertions, 80 deletions
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 14786c8bf99e..45beca17fa50 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -23,6 +23,7 @@ #include <net/ip.h> #include <net/ipv6.h> #include <linux/io.h> +#include <net/tso.h> #include <linux/of.h> #include <linux/of_irq.h> #include <linux/of_mdio.h> @@ -218,9 +219,6 @@ #define MVNETA_RX_COAL_PKTS 32 #define MVNETA_RX_COAL_USEC 100 -/* Napi polling weight */ -#define MVNETA_RX_POLL_WEIGHT 64 - /* The two bytes Marvell header. Either contains a special value used * by Marvell switches when a specific hardware mode is enabled (not * supported by this driver) or is filled automatically by zeroes on @@ -244,12 +242,20 @@ #define MVNETA_TX_MTU_MAX 0x3ffff +/* TSO header size */ +#define TSO_HEADER_SIZE 128 + /* Max number of Rx descriptors */ #define MVNETA_MAX_RXD 128 /* Max number of Tx descriptors */ #define MVNETA_MAX_TXD 532 +/* Max number of allowed TCP segments for software TSO */ +#define MVNETA_MAX_TSO_SEGS 100 + +#define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) + /* descriptor aligned size */ #define MVNETA_DESC_ALIGNED_SIZE 32 @@ -258,6 +264,10 @@ ETH_HLEN + ETH_FCS_LEN, \ MVNETA_CPU_D_CACHE_LINE_SIZE) +#define IS_TSO_HEADER(txq, addr) \ + ((addr >= txq->tso_hdrs_phys) && \ + (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE)) + #define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD) struct mvneta_pcpu_stats { @@ -279,9 +289,6 @@ struct mvneta_port { u32 cause_rx_tx; struct napi_struct napi; - /* Napi weight */ - int weight; - /* Core clock */ struct clk *clk; u8 mcast_count[256]; @@ -390,6 +397,8 @@ struct mvneta_tx_queue { * descriptor ring */ int count; + int tx_stop_threshold; + int tx_wake_threshold; /* Array of transmitted skb */ struct sk_buff **tx_skb; @@ -413,6 +422,12 @@ struct mvneta_tx_queue { /* Index of the next TX DMA descriptor to process */ int next_desc_to_proc; + + /* DMA buffers for TSO headers */ + char *tso_hdrs; + + /* DMA address of TSO headers */ + dma_addr_t tso_hdrs_phys; }; struct mvneta_rx_queue { @@ -441,7 +456,10 @@ struct mvneta_rx_queue { int next_desc_to_proc; }; -static int rxq_number = 8; +/* The hardware supports eight (8) rx queues, but we are only allowing + * the first one to be used. Therefore, let's just allocate one queue. + */ +static int rxq_number = 1; static int txq_number = 8; static int rxq_def; @@ -1277,11 +1295,12 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp, mvneta_txq_inc_get(txq); + if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr)) + dma_unmap_single(pp->dev->dev.parent, + tx_desc->buf_phys_addr, + tx_desc->data_size, DMA_TO_DEVICE); if (!skb) continue; - - dma_unmap_single(pp->dev->dev.parent, tx_desc->buf_phys_addr, - tx_desc->data_size, DMA_TO_DEVICE); dev_kfree_skb_any(skb); } } @@ -1302,7 +1321,7 @@ static void mvneta_txq_done(struct mvneta_port *pp, txq->count -= tx_done; if (netif_tx_queue_stopped(nq)) { - if (txq->size - txq->count >= MAX_SKB_FRAGS + 1) + if (txq->count <= txq->tx_wake_threshold) netif_tx_wake_queue(nq); } } @@ -1519,14 +1538,134 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo, return rx_done; } +static inline void +mvneta_tso_put_hdr(struct sk_buff *skb, + struct mvneta_port *pp, struct mvneta_tx_queue *txq) +{ + struct mvneta_tx_desc *tx_desc; + int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + + txq->tx_skb[txq->txq_put_index] = NULL; + tx_desc = mvneta_txq_next_desc_get(txq); + tx_desc->data_size = hdr_len; + tx_desc->command = mvneta_skb_tx_csum(pp, skb); + tx_desc->command |= MVNETA_TXD_F_DESC; + tx_desc->buf_phys_addr = txq->tso_hdrs_phys + + txq->txq_put_index * TSO_HEADER_SIZE; + mvneta_txq_inc_put(txq); +} + +static inline int +mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq, + struct sk_buff *skb, char *data, int size, + bool last_tcp, bool is_last) +{ + struct mvneta_tx_desc *tx_desc; + + tx_desc = mvneta_txq_next_desc_get(txq); + tx_desc->data_size = size; + tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, data, + size, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev->dev.parent, + tx_desc->buf_phys_addr))) { + mvneta_txq_desc_put(txq); + return -ENOMEM; + } + + tx_desc->command = 0; + txq->tx_skb[txq->txq_put_index] = NULL; + + if (last_tcp) { + /* last descriptor in the TCP packet */ + tx_desc->command = MVNETA_TXD_L_DESC; + + /* last descriptor in SKB */ + if (is_last) + txq->tx_skb[txq->txq_put_index] = skb; + } + mvneta_txq_inc_put(txq); + return 0; +} + +static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev, + struct mvneta_tx_queue *txq) +{ + int total_len, data_left; + int desc_count = 0; + struct mvneta_port *pp = netdev_priv(dev); + struct tso_t tso; + int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + int i; + + /* Count needed descriptors */ + if ((txq->count + tso_count_descs(skb)) >= txq->size) + return 0; + + if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) { + pr_info("*** Is this even possible???!?!?\n"); + return 0; + } + + /* Initialize the TSO handler, and prepare the first payload */ + tso_start(skb, &tso); + + total_len = skb->len - hdr_len; + while (total_len > 0) { + char *hdr; + + data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); + total_len -= data_left; + desc_count++; + + /* prepare packet headers: MAC + IP + TCP */ + hdr = txq->tso_hdrs + txq->txq_put_index * TSO_HEADER_SIZE; + tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); + + mvneta_tso_put_hdr(skb, pp, txq); + + while (data_left > 0) { + int size; + desc_count++; + + size = min_t(int, tso.size, data_left); + + if (mvneta_tso_put_data(dev, txq, skb, + tso.data, size, + size == data_left, + total_len == 0)) + goto err_release; + data_left -= size; + + tso_build_data(skb, &tso, size); + } + } + + return desc_count; + +err_release: + /* Release all used data descriptors; header descriptors must not + * be DMA-unmapped. + */ + for (i = desc_count - 1; i >= 0; i--) { + struct mvneta_tx_desc *tx_desc = txq->descs + i; + if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr)) + dma_unmap_single(pp->dev->dev.parent, + tx_desc->buf_phys_addr, + tx_desc->data_size, + DMA_TO_DEVICE); + mvneta_txq_desc_put(txq); + } + return 0; +} + /* Handle tx fragmentation processing */ static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb, struct mvneta_tx_queue *txq) { struct mvneta_tx_desc *tx_desc; - int i; + int i, nr_frags = skb_shinfo(skb)->nr_frags; - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + for (i = 0; i < nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; void *addr = page_address(frag->page.p) + frag->page_offset; @@ -1543,20 +1682,16 @@ static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb, goto error; } - if (i == (skb_shinfo(skb)->nr_frags - 1)) { + if (i == nr_frags - 1) { /* Last descriptor */ tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD; - txq->tx_skb[txq->txq_put_index] = skb; - - mvneta_txq_inc_put(txq); } else { /* Descriptor in the middle: Not First, Not Last */ tx_desc->command = 0; - txq->tx_skb[txq->txq_put_index] = NULL; - mvneta_txq_inc_put(txq); } + mvneta_txq_inc_put(txq); } return 0; @@ -1584,15 +1719,18 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev) u16 txq_id = skb_get_queue_mapping(skb); struct mvneta_tx_queue *txq = &pp->txqs[txq_id]; struct mvneta_tx_desc *tx_desc; - struct netdev_queue *nq; int frags = 0; u32 tx_cmd; if (!netif_running(dev)) goto out; + if (skb_is_gso(skb)) { + frags = mvneta_tx_tso(skb, dev, txq); + goto out; + } + frags = skb_shinfo(skb)->nr_frags + 1; - nq = netdev_get_tx_queue(dev, txq_id); /* Get a descriptor for the first part of the packet */ tx_desc = mvneta_txq_next_desc_get(txq); @@ -1635,15 +1773,16 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev) } } - txq->count += frags; - mvneta_txq_pend_desc_add(pp, txq, frags); - - if (txq->size - txq->count < MAX_SKB_FRAGS + 1) - netif_tx_stop_queue(nq); - out: if (frags > 0) { struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); + struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id); + + txq->count += frags; + mvneta_txq_pend_desc_add(pp, txq, frags); + + if (txq->count >= txq->tx_stop_threshold) + netif_tx_stop_queue(nq); u64_stats_update_begin(&stats->syncp); stats->tx_packets++; @@ -2003,7 +2142,7 @@ static void mvneta_tx_reset(struct mvneta_port *pp) { int queue; - /* free the skb's in the hal tx ring */ + /* free the skb's in the tx ring */ for (queue = 0; queue < txq_number; queue++) mvneta_txq_done_force(pp, &pp->txqs[queue]); @@ -2081,6 +2220,14 @@ static int mvneta_txq_init(struct mvneta_port *pp, { txq->size = pp->tx_ring_size; + /* A queue must always have room for at least one skb. + * Therefore, stop the queue when the free entries reaches + * the maximum number of descriptors per skb. + */ + txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS; + txq->tx_wake_threshold = txq->tx_stop_threshold / 2; + + /* Allocate memory for TX descriptors */ txq->descs = dma_alloc_coherent(pp->dev->dev.parent, txq->size * MVNETA_DESC_ALIGNED_SIZE, @@ -2109,6 +2256,18 @@ static int mvneta_txq_init(struct mvneta_port *pp, txq->descs, txq->descs_phys); return -ENOMEM; } + + /* Allocate DMA buffers for TSO MAC/IP/TCP headers */ + txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent, + txq->size * TSO_HEADER_SIZE, + &txq->tso_hdrs_phys, GFP_KERNEL); + if (txq->tso_hdrs == NULL) { + kfree(txq->tx_skb); + dma_free_coherent(pp->dev->dev.parent, + txq->size * MVNETA_DESC_ALIGNED_SIZE, + txq->descs, txq->descs_phys); + return -ENOMEM; + } mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); return 0; @@ -2120,6 +2279,10 @@ static void mvneta_txq_deinit(struct mvneta_port *pp, { kfree(txq->tx_skb); + if (txq->tso_hdrs) + dma_free_coherent(pp->dev->dev.parent, + txq->size * TSO_HEADER_SIZE, + txq->tso_hdrs, txq->tso_hdrs_phys); if (txq->descs) dma_free_coherent(pp->dev->dev.parent, txq->size * MVNETA_DESC_ALIGNED_SIZE, @@ -2279,24 +2442,28 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu) return 0; /* The interface is running, so we have to force a - * reallocation of the RXQs + * reallocation of the queues */ mvneta_stop_dev(pp); mvneta_cleanup_txqs(pp); mvneta_cleanup_rxqs(pp); - pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu); + pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu); pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); ret = mvneta_setup_rxqs(pp); if (ret) { - netdev_err(pp->dev, "unable to setup rxqs after MTU change\n"); + netdev_err(dev, "unable to setup rxqs after MTU change\n"); return ret; } - mvneta_setup_txqs(pp); + ret = mvneta_setup_txqs(pp); + if (ret) { + netdev_err(dev, "unable to setup txqs after MTU change\n"); + return ret; + } mvneta_start_dev(pp); mvneta_port_up(pp); @@ -2323,22 +2490,19 @@ static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr) static int mvneta_set_mac_addr(struct net_device *dev, void *addr) { struct mvneta_port *pp = netdev_priv(dev); - u8 *mac = addr + 2; - int i; - - if (netif_running(dev)) - return -EBUSY; + struct sockaddr *sockaddr = addr; + int ret; + ret = eth_prepare_mac_addr_change(dev, addr); + if (ret < 0) + return ret; /* Remove previous address table entry */ mvneta_mac_addr_set(pp, dev->dev_addr, -1); /* Set new addr in hw */ - mvneta_mac_addr_set(pp, mac, rxq_def); - - /* Set addr in the device */ - for (i = 0; i < ETH_ALEN; i++) - dev->dev_addr[i] = mac[i]; + mvneta_mac_addr_set(pp, sockaddr->sa_data, rxq_def); + eth_commit_mac_addr_change(dev, addr); return 0; } @@ -2433,8 +2597,6 @@ static int mvneta_open(struct net_device *dev) struct mvneta_port *pp = netdev_priv(dev); int ret; - mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def); - pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu); pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); @@ -2600,8 +2762,12 @@ static int mvneta_ethtool_set_ringparam(struct net_device *dev, return -EINVAL; pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ? ring->rx_pending : MVNETA_MAX_RXD; - pp->tx_ring_size = ring->tx_pending < MVNETA_MAX_TXD ? - ring->tx_pending : MVNETA_MAX_TXD; + + pp->tx_ring_size = clamp_t(u16, ring->tx_pending, + MVNETA_MAX_SKB_DESCS * 2, MVNETA_MAX_TXD); + if (pp->tx_ring_size != ring->tx_pending) + netdev_warn(dev, "TX queue size set to %u (requested %u)\n", + pp->tx_ring_size, ring->tx_pending); if (netif_running(dev)) { mvneta_stop(dev); @@ -2638,7 +2804,7 @@ const struct ethtool_ops mvneta_eth_tool_ops = { }; /* Initialize hw */ -static int mvneta_init(struct mvneta_port *pp, int phy_addr) +static int mvneta_init(struct device *dev, struct mvneta_port *pp) { int queue; @@ -2648,8 +2814,8 @@ static int mvneta_init(struct mvneta_port *pp, int phy_addr) /* Set port default values */ mvneta_defaults_set(pp); - pp->txqs = kzalloc(txq_number * sizeof(struct mvneta_tx_queue), - GFP_KERNEL); + pp->txqs = devm_kcalloc(dev, txq_number, sizeof(struct mvneta_tx_queue), + GFP_KERNEL); if (!pp->txqs) return -ENOMEM; @@ -2661,12 +2827,10 @@ static int mvneta_init(struct mvneta_port *pp, int phy_addr) txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS; } - pp->rxqs = kzalloc(rxq_number * sizeof(struct mvneta_rx_queue), - GFP_KERNEL); - if (!pp->rxqs) { - kfree(pp->txqs); + pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(struct mvneta_rx_queue), + GFP_KERNEL); + if (!pp->rxqs) return -ENOMEM; - } /* Create Rx descriptor rings */ for (queue = 0; queue < rxq_number; queue++) { @@ -2680,12 +2844,6 @@ static int mvneta_init(struct mvneta_port *pp, int phy_addr) return 0; } -static void mvneta_deinit(struct mvneta_port *pp) -{ - kfree(pp->txqs); - kfree(pp->rxqs); -} - /* platform glue : initialize decoding windows */ static void mvneta_conf_mbus_windows(struct mvneta_port *pp, const struct mbus_dram_target_info *dram) @@ -2768,7 +2926,6 @@ static int mvneta_probe(struct platform_device *pdev) struct resource *res; struct device_node *dn = pdev->dev.of_node; struct device_node *phy_node; - u32 phy_addr; struct mvneta_port *pp; struct net_device *dev; const char *dt_mac_addr; @@ -2797,9 +2954,22 @@ static int mvneta_probe(struct platform_device *pdev) phy_node = of_parse_phandle(dn, "phy", 0); if (!phy_node) { - dev_err(&pdev->dev, "no associated PHY\n"); - err = -ENODEV; - goto err_free_irq; + if (!of_phy_is_fixed_link(dn)) { + dev_err(&pdev->dev, "no PHY specified\n"); + err = -ENODEV; + goto err_free_irq; + } + + err = of_phy_register_fixed_link(dn); + if (err < 0) { + dev_err(&pdev->dev, "cannot register fixed PHY\n"); + goto err_free_irq; + } + + /* In the case of a fixed PHY, the DT node associated + * to the PHY is the Ethernet MAC DT node. + */ + phy_node = dn; } phy_mode = of_get_phy_mode(dn); @@ -2813,11 +2983,9 @@ static int mvneta_probe(struct platform_device *pdev) dev->watchdog_timeo = 5 * HZ; dev->netdev_ops = &mvneta_netdev_ops; - SET_ETHTOOL_OPS(dev, &mvneta_eth_tool_ops); + dev->ethtool_ops = &mvneta_eth_tool_ops; pp = netdev_priv(dev); - - pp->weight = MVNETA_RX_POLL_WEIGHT; pp->phy_node = phy_node; pp->phy_interface = phy_mode; @@ -2864,33 +3032,32 @@ static int mvneta_probe(struct platform_device *pdev) pp->dev = dev; SET_NETDEV_DEV(dev, &pdev->dev); - err = mvneta_init(pp, phy_addr); - if (err < 0) { - dev_err(&pdev->dev, "can't init eth hal\n"); + err = mvneta_init(&pdev->dev, pp); + if (err < 0) goto err_free_stats; - } err = mvneta_port_power_up(pp, phy_mode); if (err < 0) { dev_err(&pdev->dev, "can't power up port\n"); - goto err_deinit; + goto err_free_stats; } dram_target_info = mv_mbus_dram_info(); if (dram_target_info) mvneta_conf_mbus_windows(pp, dram_target_info); - netif_napi_add(dev, &pp->napi, mvneta_poll, pp->weight); + netif_napi_add(dev, &pp->napi, mvneta_poll, NAPI_POLL_WEIGHT); - dev->features = NETIF_F_SG | NETIF_F_IP_CSUM; - dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM; - dev->vlan_features |= NETIF_F_SG | NETIF_F_IP_CSUM; + dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; + dev->hw_features |= dev->features; + dev->vlan_features |= dev->features; dev->priv_flags |= IFF_UNICAST_FLT; + dev->gso_max_segs = MVNETA_MAX_TSO_SEGS; err = register_netdev(dev); if (err < 0) { dev_err(&pdev->dev, "failed to register\n"); - goto err_deinit; + goto err_free_stats; } netdev_info(dev, "Using %s mac address %pM\n", mac_from, @@ -2900,8 +3067,6 @@ static int mvneta_probe(struct platform_device *pdev) return 0; -err_deinit: - mvneta_deinit(pp); err_free_stats: free_percpu(pp->stats); err_clk: @@ -2920,7 +3085,6 @@ static int mvneta_remove(struct platform_device *pdev) struct mvneta_port *pp = netdev_priv(dev); unregister_netdev(dev); - mvneta_deinit(pp); clk_disable_unprepare(pp->clk); free_percpu(pp->stats); irq_dispose_mapping(dev->irq); |