diff options
Diffstat (limited to 'drivers/net/ethernet/ibm')
-rw-r--r-- | drivers/net/ethernet/ibm/ehea/ehea_main.c | 6 | ||||
-rw-r--r-- | drivers/net/ethernet/ibm/emac/phy.c | 12 | ||||
-rw-r--r-- | drivers/net/ethernet/ibm/ibmveth.c | 268 | ||||
-rw-r--r-- | drivers/net/ethernet/ibm/ibmveth.h | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/ibm/ibmvnic.c | 403 | ||||
-rw-r--r-- | drivers/net/ethernet/ibm/ibmvnic.h | 5 |
6 files changed, 499 insertions, 196 deletions
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index 1e53d7a82675..b9d310f20bcc 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -3553,14 +3553,12 @@ static int check_module_parm(void) return ret; } -static ssize_t ehea_show_capabilities(struct device_driver *drv, - char *buf) +static ssize_t capabilities_show(struct device_driver *drv, char *buf) { return sprintf(buf, "%d", EHEA_CAPABILITIES); } -static DRIVER_ATTR(capabilities, S_IRUSR | S_IRGRP | S_IROTH, - ehea_show_capabilities, NULL); +static DRIVER_ATTR_RO(capabilities); static int __init ehea_module_init(void) { diff --git a/drivers/net/ethernet/ibm/emac/phy.c b/drivers/net/ethernet/ibm/emac/phy.c index 5b88cc690c22..35865d05fccd 100644 --- a/drivers/net/ethernet/ibm/emac/phy.c +++ b/drivers/net/ethernet/ibm/emac/phy.c @@ -276,7 +276,7 @@ static int genmii_read_link(struct mii_phy *phy) } /* Generic implementation for most 10/100/1000 PHYs */ -static struct mii_phy_ops generic_phy_ops = { +static const struct mii_phy_ops generic_phy_ops = { .setup_aneg = genmii_setup_aneg, .setup_forced = genmii_setup_forced, .poll_link = genmii_poll_link, @@ -340,7 +340,7 @@ static int cis8201_init(struct mii_phy *phy) return 0; } -static struct mii_phy_ops cis8201_phy_ops = { +static const struct mii_phy_ops cis8201_phy_ops = { .init = cis8201_init, .setup_aneg = genmii_setup_aneg, .setup_forced = genmii_setup_forced, @@ -420,7 +420,7 @@ static int et1011c_init(struct mii_phy *phy) return 0; } -static struct mii_phy_ops et1011c_phy_ops = { +static const struct mii_phy_ops et1011c_phy_ops = { .init = et1011c_init, .setup_aneg = genmii_setup_aneg, .setup_forced = genmii_setup_forced, @@ -439,7 +439,7 @@ static struct mii_phy_def et1011c_phy_def = { -static struct mii_phy_ops m88e1111_phy_ops = { +static const struct mii_phy_ops m88e1111_phy_ops = { .init = m88e1111_init, .setup_aneg = genmii_setup_aneg, .setup_forced = genmii_setup_forced, @@ -455,7 +455,7 @@ static struct mii_phy_def m88e1111_phy_def = { .ops = &m88e1111_phy_ops, }; -static struct mii_phy_ops m88e1112_phy_ops = { +static const struct mii_phy_ops m88e1112_phy_ops = { .init = m88e1112_init, .setup_aneg = genmii_setup_aneg, .setup_forced = genmii_setup_forced, @@ -480,7 +480,7 @@ static int ar8035_init(struct mii_phy *phy) return 0; } -static struct mii_phy_ops ar8035_phy_ops = { +static const struct mii_phy_ops ar8035_phy_ops = { .init = ar8035_init, .setup_aneg = genmii_setup_aneg, .setup_forced = genmii_setup_forced, diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 72ab7b6bf20b..d17c2b03f580 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -46,6 +46,8 @@ #include <asm/vio.h> #include <asm/iommu.h> #include <asm/firmware.h> +#include <net/tcp.h> +#include <net/ip6_checksum.h> #include "ibmveth.h" @@ -467,56 +469,6 @@ static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter) } } -static void ibmveth_cleanup(struct ibmveth_adapter *adapter) -{ - int i; - struct device *dev = &adapter->vdev->dev; - - if (adapter->buffer_list_addr != NULL) { - if (!dma_mapping_error(dev, adapter->buffer_list_dma)) { - dma_unmap_single(dev, adapter->buffer_list_dma, 4096, - DMA_BIDIRECTIONAL); - adapter->buffer_list_dma = DMA_ERROR_CODE; - } - free_page((unsigned long)adapter->buffer_list_addr); - adapter->buffer_list_addr = NULL; - } - - if (adapter->filter_list_addr != NULL) { - if (!dma_mapping_error(dev, adapter->filter_list_dma)) { - dma_unmap_single(dev, adapter->filter_list_dma, 4096, - DMA_BIDIRECTIONAL); - adapter->filter_list_dma = DMA_ERROR_CODE; - } - free_page((unsigned long)adapter->filter_list_addr); - adapter->filter_list_addr = NULL; - } - - if (adapter->rx_queue.queue_addr != NULL) { - dma_free_coherent(dev, adapter->rx_queue.queue_len, - adapter->rx_queue.queue_addr, - adapter->rx_queue.queue_dma); - adapter->rx_queue.queue_addr = NULL; - } - - for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) - if (adapter->rx_buff_pool[i].active) - ibmveth_free_buffer_pool(adapter, - &adapter->rx_buff_pool[i]); - - if (adapter->bounce_buffer != NULL) { - if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) { - dma_unmap_single(&adapter->vdev->dev, - adapter->bounce_buffer_dma, - adapter->netdev->mtu + IBMVETH_BUFF_OH, - DMA_BIDIRECTIONAL); - adapter->bounce_buffer_dma = DMA_ERROR_CODE; - } - kfree(adapter->bounce_buffer); - adapter->bounce_buffer = NULL; - } -} - static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter, union ibmveth_buf_desc rxq_desc, u64 mac_address) { @@ -573,14 +525,17 @@ static int ibmveth_open(struct net_device *netdev) for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) rxq_entries += adapter->rx_buff_pool[i].size; + rc = -ENOMEM; adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL); - adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL); + if (!adapter->buffer_list_addr) { + netdev_err(netdev, "unable to allocate list pages\n"); + goto out; + } - if (!adapter->buffer_list_addr || !adapter->filter_list_addr) { - netdev_err(netdev, "unable to allocate filter or buffer list " - "pages\n"); - rc = -ENOMEM; - goto err_out; + adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL); + if (!adapter->filter_list_addr) { + netdev_err(netdev, "unable to allocate filter pages\n"); + goto out_free_buffer_list; } dev = &adapter->vdev->dev; @@ -590,22 +545,21 @@ static int ibmveth_open(struct net_device *netdev) adapter->rx_queue.queue_addr = dma_alloc_coherent(dev, adapter->rx_queue.queue_len, &adapter->rx_queue.queue_dma, GFP_KERNEL); - if (!adapter->rx_queue.queue_addr) { - rc = -ENOMEM; - goto err_out; - } + if (!adapter->rx_queue.queue_addr) + goto out_free_filter_list; adapter->buffer_list_dma = dma_map_single(dev, adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL); + if (dma_mapping_error(dev, adapter->buffer_list_dma)) { + netdev_err(netdev, "unable to map buffer list pages\n"); + goto out_free_queue_mem; + } + adapter->filter_list_dma = dma_map_single(dev, adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL); - - if ((dma_mapping_error(dev, adapter->buffer_list_dma)) || - (dma_mapping_error(dev, adapter->filter_list_dma))) { - netdev_err(netdev, "unable to map filter or buffer list " - "pages\n"); - rc = -ENOMEM; - goto err_out; + if (dma_mapping_error(dev, adapter->filter_list_dma)) { + netdev_err(netdev, "unable to map filter list pages\n"); + goto out_unmap_buffer_list; } adapter->rx_queue.index = 0; @@ -636,7 +590,7 @@ static int ibmveth_open(struct net_device *netdev) rxq_desc.desc, mac_address); rc = -ENONET; - goto err_out; + goto out_unmap_filter_list; } for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { @@ -646,7 +600,7 @@ static int ibmveth_open(struct net_device *netdev) netdev_err(netdev, "unable to alloc pool\n"); adapter->rx_buff_pool[i].active = 0; rc = -ENOMEM; - goto err_out; + goto out_free_buffer_pools; } } @@ -660,22 +614,21 @@ static int ibmveth_open(struct net_device *netdev) lpar_rc = h_free_logical_lan(adapter->vdev->unit_address); } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY)); - goto err_out; + goto out_free_buffer_pools; } + rc = -ENOMEM; adapter->bounce_buffer = kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL); - if (!adapter->bounce_buffer) { - rc = -ENOMEM; - goto err_out_free_irq; - } + if (!adapter->bounce_buffer) + goto out_free_irq; + adapter->bounce_buffer_dma = dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer, netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) { netdev_err(netdev, "unable to map bounce buffer\n"); - rc = -ENOMEM; - goto err_out_free_irq; + goto out_free_bounce_buffer; } netdev_dbg(netdev, "initial replenish cycle\n"); @@ -687,10 +640,31 @@ static int ibmveth_open(struct net_device *netdev) return 0; -err_out_free_irq: +out_free_bounce_buffer: + kfree(adapter->bounce_buffer); +out_free_irq: free_irq(netdev->irq, netdev); -err_out: - ibmveth_cleanup(adapter); +out_free_buffer_pools: + while (--i >= 0) { + if (adapter->rx_buff_pool[i].active) + ibmveth_free_buffer_pool(adapter, + &adapter->rx_buff_pool[i]); + } +out_unmap_filter_list: + dma_unmap_single(dev, adapter->filter_list_dma, 4096, + DMA_BIDIRECTIONAL); +out_unmap_buffer_list: + dma_unmap_single(dev, adapter->buffer_list_dma, 4096, + DMA_BIDIRECTIONAL); +out_free_queue_mem: + dma_free_coherent(dev, adapter->rx_queue.queue_len, + adapter->rx_queue.queue_addr, + adapter->rx_queue.queue_dma); +out_free_filter_list: + free_page((unsigned long)adapter->filter_list_addr); +out_free_buffer_list: + free_page((unsigned long)adapter->buffer_list_addr); +out: napi_disable(&adapter->napi); return rc; } @@ -698,7 +672,9 @@ err_out: static int ibmveth_close(struct net_device *netdev) { struct ibmveth_adapter *adapter = netdev_priv(netdev); + struct device *dev = &adapter->vdev->dev; long lpar_rc; + int i; netdev_dbg(netdev, "close starting\n"); @@ -722,7 +698,27 @@ static int ibmveth_close(struct net_device *netdev) ibmveth_update_rx_no_buffer(adapter); - ibmveth_cleanup(adapter); + dma_unmap_single(dev, adapter->buffer_list_dma, 4096, + DMA_BIDIRECTIONAL); + free_page((unsigned long)adapter->buffer_list_addr); + + dma_unmap_single(dev, adapter->filter_list_dma, 4096, + DMA_BIDIRECTIONAL); + free_page((unsigned long)adapter->filter_list_addr); + + dma_free_coherent(dev, adapter->rx_queue.queue_len, + adapter->rx_queue.queue_addr, + adapter->rx_queue.queue_dma); + + for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) + if (adapter->rx_buff_pool[i].active) + ibmveth_free_buffer_pool(adapter, + &adapter->rx_buff_pool[i]); + + dma_unmap_single(&adapter->vdev->dev, adapter->bounce_buffer_dma, + adapter->netdev->mtu + IBMVETH_BUFF_OH, + DMA_BIDIRECTIONAL); + kfree(adapter->bounce_buffer); netdev_dbg(netdev, "close complete\n"); @@ -808,8 +804,7 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data) ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr); - if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) && - !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) && + if (ret == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) { ret4 = h_illan_attributes(adapter->vdev->unit_address, clr_attr, set_attr, &ret_attr); @@ -1040,6 +1035,15 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb, dma_addr_t dma_addr; unsigned long mss = 0; + /* veth doesn't handle frag_list, so linearize the skb. + * When GRO is enabled SKB's can have frag_list. + */ + if (adapter->is_active_trunk && + skb_has_frag_list(skb) && __skb_linearize(skb)) { + netdev->stats.tx_dropped++; + goto out; + } + /* * veth handles a maximum of 6 segments including the header, so * we have to linearize the skb if there are more than this. @@ -1064,9 +1068,6 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb, desc_flags = IBMVETH_BUF_VALID; - if (skb_is_gso(skb) && adapter->fw_large_send_support) - desc_flags |= IBMVETH_BUF_LRG_SND; - if (skb->ip_summed == CHECKSUM_PARTIAL) { unsigned char *buf = skb_transport_header(skb) + skb->csum_offset; @@ -1076,6 +1077,9 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb, /* Need to zero out the checksum */ buf[0] = 0; buf[1] = 0; + + if (skb_is_gso(skb) && adapter->fw_large_send_support) + desc_flags |= IBMVETH_BUF_LRG_SND; } retry_bounce: @@ -1128,7 +1132,7 @@ retry_bounce: descs[i+1].fields.address = dma_addr; } - if (skb_is_gso(skb)) { + if (skb->ip_summed == CHECKSUM_PARTIAL && skb_is_gso(skb)) { if (adapter->fw_large_send_support) { mss = (unsigned long)skb_shinfo(skb)->gso_size; adapter->tx_large_packets++; @@ -1232,6 +1236,71 @@ static void ibmveth_rx_mss_helper(struct sk_buff *skb, u16 mss, int lrg_pkt) } } +static void ibmveth_rx_csum_helper(struct sk_buff *skb, + struct ibmveth_adapter *adapter) +{ + struct iphdr *iph = NULL; + struct ipv6hdr *iph6 = NULL; + __be16 skb_proto = 0; + u16 iphlen = 0; + u16 iph_proto = 0; + u16 tcphdrlen = 0; + + skb_proto = be16_to_cpu(skb->protocol); + + if (skb_proto == ETH_P_IP) { + iph = (struct iphdr *)skb->data; + + /* If the IP checksum is not offloaded and if the packet + * is large send, the checksum must be rebuilt. + */ + if (iph->check == 0xffff) { + iph->check = 0; + iph->check = ip_fast_csum((unsigned char *)iph, + iph->ihl); + } + + iphlen = iph->ihl * 4; + iph_proto = iph->protocol; + } else if (skb_proto == ETH_P_IPV6) { + iph6 = (struct ipv6hdr *)skb->data; + iphlen = sizeof(struct ipv6hdr); + iph_proto = iph6->nexthdr; + } + + /* In OVS environment, when a flow is not cached, specifically for a + * new TCP connection, the first packet information is passed up + * the user space for finding a flow. During this process, OVS computes + * checksum on the first packet when CHECKSUM_PARTIAL flag is set. + * + * Given that we zeroed out TCP checksum field in transmit path + * (refer ibmveth_start_xmit routine) as we set "no checksum bit", + * OVS computed checksum will be incorrect w/o TCP pseudo checksum + * in the packet. This leads to OVS dropping the packet and hence + * TCP retransmissions are seen. + * + * So, re-compute TCP pseudo header checksum. + */ + if (iph_proto == IPPROTO_TCP && adapter->is_active_trunk) { + struct tcphdr *tcph = (struct tcphdr *)(skb->data + iphlen); + + tcphdrlen = skb->len - iphlen; + + /* Recompute TCP pseudo header checksum */ + if (skb_proto == ETH_P_IP) + tcph->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, tcphdrlen, iph_proto, 0); + else if (skb_proto == ETH_P_IPV6) + tcph->check = ~csum_ipv6_magic(&iph6->saddr, + &iph6->daddr, tcphdrlen, iph_proto, 0); + + /* Setup SKB fields for checksum offload */ + skb_partial_csum_set(skb, iphlen, + offsetof(struct tcphdr, check)); + skb_reset_network_header(skb); + } +} + static int ibmveth_poll(struct napi_struct *napi, int budget) { struct ibmveth_adapter *adapter = @@ -1239,7 +1308,6 @@ static int ibmveth_poll(struct napi_struct *napi, int budget) struct net_device *netdev = adapter->netdev; int frames_processed = 0; unsigned long lpar_rc; - struct iphdr *iph; u16 mss = 0; restart_poll: @@ -1297,17 +1365,7 @@ restart_poll: if (csum_good) { skb->ip_summed = CHECKSUM_UNNECESSARY; - if (be16_to_cpu(skb->protocol) == ETH_P_IP) { - iph = (struct iphdr *)skb->data; - - /* If the IP checksum is not offloaded and if the packet - * is large send, the checksum must be rebuilt. - */ - if (iph->check == 0xffff) { - iph->check = 0; - iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); - } - } + ibmveth_rx_csum_helper(skb, adapter); } if (length > netdev->mtu + ETH_HLEN) { @@ -1626,6 +1684,13 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) netdev->hw_features |= NETIF_F_TSO; } + adapter->is_active_trunk = false; + if (ret == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK)) { + adapter->is_active_trunk = true; + netdev->hw_features |= NETIF_F_FRAGLIST; + netdev->features |= NETIF_F_FRAGLIST; + } + netdev->min_mtu = IBMVETH_MIN_MTU; netdev->max_mtu = ETH_MAX_MTU; @@ -1648,11 +1713,6 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) } netdev_dbg(netdev, "adapter @ 0x%p\n", adapter); - - adapter->buffer_list_dma = DMA_ERROR_CODE; - adapter->filter_list_dma = DMA_ERROR_CODE; - adapter->rx_queue.queue_dma = DMA_ERROR_CODE; - netdev_dbg(netdev, "registering netdev...\n"); ibmveth_set_features(netdev, netdev->features); @@ -1843,7 +1903,7 @@ static struct vio_device_id ibmveth_device_table[] = { }; MODULE_DEVICE_TABLE(vio, ibmveth_device_table); -static struct dev_pm_ops ibmveth_pm_ops = { +static const struct dev_pm_ops ibmveth_pm_ops = { .resume = ibmveth_resume }; diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h index ed8780cca982..01c587fc02c7 100644 --- a/drivers/net/ethernet/ibm/ibmveth.h +++ b/drivers/net/ethernet/ibm/ibmveth.h @@ -156,6 +156,7 @@ struct ibmveth_adapter { int pool_config; int rx_csum; int large_send; + bool is_active_trunk; void *bounce_buffer; dma_addr_t bounce_buffer_dma; diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index c0fbeb387db4..a3e694679635 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -183,6 +183,12 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter, send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id); wait_for_completion(&adapter->fw_done); + + if (adapter->fw_done_rc) { + dev_err(dev, "Couldn't map long term buffer,rc = %d\n", + adapter->fw_done_rc); + return -1; + } return 0; } @@ -200,6 +206,33 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter, dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); } +static int reset_long_term_buff(struct ibmvnic_adapter *adapter, + struct ibmvnic_long_term_buff *ltb) +{ + memset(ltb->buff, 0, ltb->size); + + init_completion(&adapter->fw_done); + send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id); + wait_for_completion(&adapter->fw_done); + + if (adapter->fw_done_rc) { + dev_info(&adapter->vdev->dev, + "Reset failed, attempting to free and reallocate buffer\n"); + free_long_term_buff(adapter, ltb); + return alloc_long_term_buff(adapter, ltb, ltb->size); + } + return 0; +} + +static void deactivate_rx_pools(struct ibmvnic_adapter *adapter) +{ + int i; + + for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); + i++) + adapter->rx_pool[i].active = 0; +} + static void replenish_rx_pool(struct ibmvnic_adapter *adapter, struct ibmvnic_rx_pool *pool) { @@ -217,6 +250,9 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter, int index; int i; + if (!pool->active) + return; + handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + be32_to_cpu(adapter->login_rsp_buf-> off_rxadd_subcrqs)); @@ -287,6 +323,15 @@ failure: dev_kfree_skb_any(skb); adapter->replenish_add_buff_failure++; atomic_add(buffers_added, &pool->available); + + if (lpar_rc == H_CLOSED) { + /* Disable buffer pool replenishment and report carrier off if + * queue is closed. Firmware guarantees that a signal will + * be sent to the driver, triggering a reset. + */ + deactivate_rx_pools(adapter); + netif_carrier_off(adapter->netdev); + } } static void replenish_pools(struct ibmvnic_adapter *adapter) @@ -331,6 +376,35 @@ static int init_stats_token(struct ibmvnic_adapter *adapter) return 0; } +static int reset_rx_pools(struct ibmvnic_adapter *adapter) +{ + struct ibmvnic_rx_pool *rx_pool; + int rx_scrqs; + int i, j, rc; + + rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); + for (i = 0; i < rx_scrqs; i++) { + rx_pool = &adapter->rx_pool[i]; + + rc = reset_long_term_buff(adapter, &rx_pool->long_term_buff); + if (rc) + return rc; + + for (j = 0; j < rx_pool->size; j++) + rx_pool->free_map[j] = j; + + memset(rx_pool->rx_buff, 0, + rx_pool->size * sizeof(struct ibmvnic_rx_buff)); + + atomic_set(&rx_pool->available, 0); + rx_pool->next_alloc = 0; + rx_pool->next_free = 0; + rx_pool->active = 1; + } + + return 0; +} + static void release_rx_pools(struct ibmvnic_adapter *adapter) { struct ibmvnic_rx_pool *rx_pool; @@ -432,6 +506,34 @@ static int init_rx_pools(struct net_device *netdev) return 0; } +static int reset_tx_pools(struct ibmvnic_adapter *adapter) +{ + struct ibmvnic_tx_pool *tx_pool; + int tx_scrqs; + int i, j, rc; + + tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); + for (i = 0; i < tx_scrqs; i++) { + tx_pool = &adapter->tx_pool[i]; + + rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff); + if (rc) + return rc; + + memset(tx_pool->tx_buff, 0, + adapter->req_tx_entries_per_subcrq * + sizeof(struct ibmvnic_tx_buff)); + + for (j = 0; j < adapter->req_tx_entries_per_subcrq; j++) + tx_pool->free_map[j] = j; + + tx_pool->consumer_index = 0; + tx_pool->producer_index = 0; + } + + return 0; +} + static void release_tx_pools(struct ibmvnic_adapter *adapter) { struct ibmvnic_tx_pool *tx_pool; @@ -518,6 +620,32 @@ static void release_error_buffers(struct ibmvnic_adapter *adapter) spin_unlock_irqrestore(&adapter->error_list_lock, flags); } +static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter) +{ + int i; + + if (adapter->napi_enabled) + return; + + for (i = 0; i < adapter->req_rx_queues; i++) + napi_enable(&adapter->napi[i]); + + adapter->napi_enabled = true; +} + +static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter) +{ + int i; + + if (!adapter->napi_enabled) + return; + + for (i = 0; i < adapter->req_rx_queues; i++) + napi_disable(&adapter->napi[i]); + + adapter->napi_enabled = false; +} + static int ibmvnic_login(struct net_device *netdev) { struct ibmvnic_adapter *adapter = netdev_priv(netdev); @@ -635,12 +763,6 @@ static int init_resources(struct ibmvnic_adapter *adapter) if (rc) return rc; - rc = init_sub_crq_irqs(adapter); - if (rc) { - netdev_err(netdev, "failed to initialize sub crq irqs\n"); - return -1; - } - rc = init_stats_token(adapter); if (rc) return rc; @@ -674,9 +796,7 @@ static int __ibmvnic_open(struct net_device *netdev) adapter->state = VNIC_OPENING; replenish_pools(adapter); - - for (i = 0; i < adapter->req_rx_queues; i++) - napi_enable(&adapter->napi[i]); + ibmvnic_napi_enable(adapter); /* We're ready to receive frames, enable the sub-crq interrupts and * set the logical link state to up @@ -778,14 +898,14 @@ static int __ibmvnic_close(struct net_device *netdev) int i; adapter->state = VNIC_CLOSING; - netif_tx_stop_all_queues(netdev); - if (adapter->napi) { - for (i = 0; i < adapter->req_rx_queues; i++) - napi_disable(&adapter->napi[i]); - } + /* ensure that transmissions are stopped if called by do_reset */ + if (adapter->resetting) + netif_tx_disable(netdev); + else + netif_tx_stop_all_queues(netdev); - clean_tx_pools(adapter); + ibmvnic_napi_disable(adapter); if (adapter->tx_scrq) { for (i = 0; i < adapter->req_tx_queues; i++) @@ -814,6 +934,7 @@ static int __ibmvnic_close(struct net_device *netdev) } } + clean_tx_pools(adapter); adapter->state = VNIC_CLOSED; return rc; } @@ -1092,8 +1213,14 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) dev_kfree_skb_any(skb); tx_buff->skb = NULL; - if (lpar_rc == H_CLOSED) - netif_stop_subqueue(netdev, queue_num); + if (lpar_rc == H_CLOSED) { + /* Disable TX and report carrier off if queue is closed. + * Firmware guarantees that a signal will be sent to the + * driver, triggering a reset or some other action. + */ + netif_tx_stop_all_queues(netdev); + netif_carrier_off(netdev); + } tx_send_failed++; tx_dropped++; @@ -1206,37 +1333,39 @@ static int do_reset(struct ibmvnic_adapter *adapter, if (rc) return rc; - /* remove the closed state so when we call open it appears - * we are coming from the probed state. - */ - adapter->state = VNIC_PROBED; + if (adapter->reset_reason != VNIC_RESET_NON_FATAL) { + /* remove the closed state so when we call open it appears + * we are coming from the probed state. + */ + adapter->state = VNIC_PROBED; - release_resources(adapter); - release_sub_crqs(adapter); - release_crq_queue(adapter); + rc = ibmvnic_init(adapter); + if (rc) + return 0; - rc = ibmvnic_init(adapter); - if (rc) - return 0; + /* If the adapter was in PROBE state prior to the reset, + * exit here. + */ + if (reset_state == VNIC_PROBED) + return 0; - /* If the adapter was in PROBE state prior to the reset, exit here. */ - if (reset_state == VNIC_PROBED) - return 0; + rc = ibmvnic_login(netdev); + if (rc) { + adapter->state = VNIC_PROBED; + return 0; + } - rc = ibmvnic_login(netdev); - if (rc) { - adapter->state = VNIC_PROBED; - return 0; - } + rc = reset_tx_pools(adapter); + if (rc) + return rc; - rtnl_lock(); - rc = init_resources(adapter); - rtnl_unlock(); - if (rc) - return rc; + rc = reset_rx_pools(adapter); + if (rc) + return rc; - if (reset_state == VNIC_CLOSED) - return 0; + if (reset_state == VNIC_CLOSED) + return 0; + } rc = __ibmvnic_open(netdev); if (rc) { @@ -1254,6 +1383,9 @@ static int do_reset(struct ibmvnic_adapter *adapter, for (i = 0; i < adapter->req_rx_queues; i++) napi_schedule(&adapter->napi[i]); + if (adapter->reset_reason != VNIC_RESET_FAILOVER) + netdev_notify_peers(netdev); + return 0; } @@ -1313,6 +1445,7 @@ static void __ibmvnic_reset(struct work_struct *work) if (rc) { free_all_rwi(adapter); + mutex_unlock(&adapter->reset_lock); return; } @@ -1333,6 +1466,12 @@ static void ibmvnic_reset(struct ibmvnic_adapter *adapter, return; } + if (adapter->state == VNIC_PROBING) { + netdev_warn(netdev, "Adapter reset during probe\n"); + adapter->init_done_rc = EAGAIN; + return; + } + mutex_lock(&adapter->rwi_lock); list_for_each(entry, &adapter->rwi_list) { @@ -1383,6 +1522,7 @@ static int ibmvnic_poll(struct napi_struct *napi, int budget) struct ibmvnic_adapter *adapter = netdev_priv(netdev); int scrq_num = (int)(napi - adapter->napi); int frames_processed = 0; + restart_poll: while (frames_processed < budget) { struct sk_buff *skb; @@ -1392,6 +1532,12 @@ restart_poll: u16 offset; u8 flags = 0; + if (unlikely(adapter->resetting)) { + enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]); + napi_complete_done(napi, frames_processed); + return frames_processed; + } + if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num])) break; next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]); @@ -1441,7 +1587,9 @@ restart_poll: netdev->stats.rx_bytes += length; frames_processed++; } - replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]); + + if (adapter->state != VNIC_CLOSING) + replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]); if (frames_processed < budget) { enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]); @@ -1614,6 +1762,44 @@ static const struct ethtool_ops ibmvnic_ethtool_ops = { /* Routines for managing CRQs/sCRQs */ +static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter, + struct ibmvnic_sub_crq_queue *scrq) +{ + int rc; + + if (scrq->irq) { + free_irq(scrq->irq, scrq); + irq_dispose_mapping(scrq->irq); + scrq->irq = 0; + } + + memset(scrq->msgs, 0, 4 * PAGE_SIZE); + scrq->cur = 0; + + rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token, + 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq); + return rc; +} + +static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter) +{ + int i, rc; + + for (i = 0; i < adapter->req_tx_queues; i++) { + rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]); + if (rc) + return rc; + } + + for (i = 0; i < adapter->req_rx_queues; i++) { + rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]); + if (rc) + return rc; + } + + return rc; +} + static void release_sub_crq_queue(struct ibmvnic_adapter *adapter, struct ibmvnic_sub_crq_queue *scrq) { @@ -2109,8 +2295,7 @@ static int pending_scrq(struct ibmvnic_adapter *adapter, { union sub_crq *entry = &scrq->msgs[scrq->cur]; - if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP || - adapter->state == VNIC_CLOSING) + if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) return 1; else return 0; @@ -2748,6 +2933,8 @@ static void handle_error_indication(union ibmvnic_crq *crq, if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR) ibmvnic_reset(adapter, VNIC_RESET_FATAL); + else + ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL); } static void handle_change_mac_rsp(union ibmvnic_crq *crq, @@ -2899,36 +3086,6 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq, return 0; } -static void handle_request_map_rsp(union ibmvnic_crq *crq, - struct ibmvnic_adapter *adapter) -{ - struct device *dev = &adapter->vdev->dev; - u8 map_id = crq->request_map_rsp.map_id; - int tx_subcrqs; - int rx_subcrqs; - long rc; - int i; - - tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); - rx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); - - rc = crq->request_map_rsp.rc.code; - if (rc) { - dev_err(dev, "Error %ld in REQUEST_MAP_RSP\n", rc); - adapter->map_id--; - /* need to find and zero tx/rx_pool map_id */ - for (i = 0; i < tx_subcrqs; i++) { - if (adapter->tx_pool[i].long_term_buff.map_id == map_id) - adapter->tx_pool[i].long_term_buff.map_id = 0; - } - for (i = 0; i < rx_subcrqs; i++) { - if (adapter->rx_pool[i].long_term_buff.map_id == map_id) - adapter->rx_pool[i].long_term_buff.map_id = 0; - } - } - complete(&adapter->fw_done); -} - static void handle_request_unmap_rsp(union ibmvnic_crq *crq, struct ibmvnic_adapter *adapter) { @@ -3153,6 +3310,8 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, switch (gen_crq->cmd) { case IBMVNIC_CRQ_INIT: dev_info(dev, "Partner initialized\n"); + adapter->from_passive_init = true; + complete(&adapter->init_done); break; case IBMVNIC_CRQ_INIT_COMPLETE: dev_info(dev, "Partner initialization complete\n"); @@ -3207,7 +3366,8 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, handle_query_map_rsp(crq, adapter); break; case REQUEST_MAP_RSP: - handle_request_map_rsp(crq, adapter); + adapter->fw_done_rc = crq->request_map_rsp.rc.code; + complete(&adapter->fw_done); break; case REQUEST_UNMAP_RSP: handle_request_unmap_rsp(crq, adapter); @@ -3461,29 +3621,61 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter) unsigned long timeout = msecs_to_jiffies(30000); int rc; - rc = init_crq_queue(adapter); + if (adapter->resetting) { + rc = ibmvnic_reset_crq(adapter); + if (!rc) + rc = vio_enable_interrupts(adapter->vdev); + } else { + rc = init_crq_queue(adapter); + } + if (rc) { dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc); return rc; } + adapter->from_passive_init = false; + init_completion(&adapter->init_done); + adapter->init_done_rc = 0; ibmvnic_send_crq_init(adapter); if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { dev_err(dev, "Initialization sequence timed out\n"); + return -1; + } + + if (adapter->init_done_rc) { release_crq_queue(adapter); + return adapter->init_done_rc; + } + + if (adapter->from_passive_init) { + adapter->state = VNIC_OPEN; + adapter->from_passive_init = false; return -1; } - rc = init_sub_crqs(adapter); + if (adapter->resetting) + rc = reset_sub_crq_queues(adapter); + else + rc = init_sub_crqs(adapter); if (rc) { dev_err(dev, "Initialization of sub crqs failed\n"); release_crq_queue(adapter); + return rc; + } + + rc = init_sub_crq_irqs(adapter); + if (rc) { + dev_err(dev, "Failed to initialize sub crq irqs\n"); + release_crq_queue(adapter); } return rc; } +static struct device_attribute dev_attr_failover; + static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) { struct ibmvnic_adapter *adapter; @@ -3532,17 +3724,26 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) mutex_init(&adapter->rwi_lock); adapter->resetting = false; - rc = ibmvnic_init(adapter); + do { + rc = ibmvnic_init(adapter); + if (rc && rc != EAGAIN) { + free_netdev(netdev); + return rc; + } + } while (rc == EAGAIN); + + netdev->mtu = adapter->req_mtu - ETH_HLEN; + + rc = device_create_file(&dev->dev, &dev_attr_failover); if (rc) { free_netdev(netdev); return rc; } - netdev->mtu = adapter->req_mtu - ETH_HLEN; - rc = register_netdev(netdev); if (rc) { dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc); + device_remove_file(&dev->dev, &dev_attr_failover); free_netdev(netdev); return rc; } @@ -3568,12 +3769,49 @@ static int ibmvnic_remove(struct vio_dev *dev) adapter->state = VNIC_REMOVED; mutex_unlock(&adapter->reset_lock); + device_remove_file(&dev->dev, &dev_attr_failover); free_netdev(netdev); dev_set_drvdata(&dev->dev, NULL); return 0; } +static ssize_t failover_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct net_device *netdev = dev_get_drvdata(dev); + struct ibmvnic_adapter *adapter = netdev_priv(netdev); + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + __be64 session_token; + long rc; + + if (!sysfs_streq(buf, "1")) + return -EINVAL; + + rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address, + H_GET_SESSION_TOKEN, 0, 0, 0); + if (rc) { + netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n", + rc); + return -EINVAL; + } + + session_token = (__be64)retbuf[0]; + netdev_dbg(netdev, "Initiating client failover, session id %llx\n", + be64_to_cpu(session_token)); + rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, + H_SESSION_ERR_DETECTED, session_token, 0, 0); + if (rc) { + netdev_err(netdev, "Client initiated failover failed, rc %ld\n", + rc); + return -EINVAL; + } + + return count; +} + +static DEVICE_ATTR(failover, 0200, NULL, failover_store); + static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev) { struct net_device *netdev = dev_get_drvdata(&vdev->dev); @@ -3610,6 +3848,9 @@ static int ibmvnic_resume(struct device *dev) struct ibmvnic_adapter *adapter = netdev_priv(netdev); int i; + if (adapter->state != VNIC_OPEN) + return 0; + /* kick the interrupt handlers just in case we lost an interrupt */ for (i = 0; i < adapter->req_rx_queues; i++) ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq, diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index 4702b48cfa44..8eff6e15f4bb 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -595,7 +595,7 @@ struct ibmvnic_request_map_rsp { u8 cmd; u8 reserved1; u8 map_id; - u8 reserved2[4]; + u8 reserved2[8]; struct ibmvnic_rc rc; } __packed __aligned(8); @@ -925,6 +925,7 @@ enum vnic_state {VNIC_PROBING = 1, enum ibmvnic_reset_reason {VNIC_RESET_FAILOVER = 1, VNIC_RESET_MOBILITY, VNIC_RESET_FATAL, + VNIC_RESET_NON_FATAL, VNIC_RESET_TIMEOUT}; struct ibmvnic_rwi { @@ -987,6 +988,7 @@ struct ibmvnic_adapter { spinlock_t error_list_lock; struct completion fw_done; + int fw_done_rc; /* partner capabilities */ u64 min_tx_queues; @@ -1031,4 +1033,5 @@ struct ibmvnic_adapter { struct list_head rwi_list; struct work_struct ibmvnic_reset; bool resetting; + bool napi_enabled, from_passive_init; }; |