diff options
Diffstat (limited to 'drivers/net/ethernet/ibm')
-rw-r--r-- | drivers/net/ethernet/ibm/ehea/ehea_main.c | 14 | ||||
-rw-r--r-- | drivers/net/ethernet/ibm/ehea/ehea_qmr.c | 5 | ||||
-rw-r--r-- | drivers/net/ethernet/ibm/emac/core.c | 9 | ||||
-rw-r--r-- | drivers/net/ethernet/ibm/emac/core.h | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/ibm/emac/zmii.c | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/ibm/emac/zmii.h | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/ibm/ibmveth.c | 26 | ||||
-rw-r--r-- | drivers/net/ethernet/ibm/ibmvnic.c | 477 | ||||
-rw-r--r-- | drivers/net/ethernet/ibm/ibmvnic.h | 8 |
9 files changed, 416 insertions, 131 deletions
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index cca71ba7a74a..0273fb7a9d01 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -1577,20 +1577,16 @@ static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr) ehea_destroy_eq(pr->eq); for (i = 0; i < pr->rq1_skba.len; i++) - if (pr->rq1_skba.arr[i]) - dev_kfree_skb(pr->rq1_skba.arr[i]); + dev_kfree_skb(pr->rq1_skba.arr[i]); for (i = 0; i < pr->rq2_skba.len; i++) - if (pr->rq2_skba.arr[i]) - dev_kfree_skb(pr->rq2_skba.arr[i]); + dev_kfree_skb(pr->rq2_skba.arr[i]); for (i = 0; i < pr->rq3_skba.len; i++) - if (pr->rq3_skba.arr[i]) - dev_kfree_skb(pr->rq3_skba.arr[i]); + dev_kfree_skb(pr->rq3_skba.arr[i]); for (i = 0; i < pr->sq_skba.len; i++) - if (pr->sq_skba.arr[i]) - dev_kfree_skb(pr->sq_skba.arr[i]); + dev_kfree_skb(pr->sq_skba.arr[i]); vfree(pr->rq1_skba.arr); vfree(pr->rq2_skba.arr); @@ -2790,7 +2786,7 @@ out: return; } -static void ehea_tx_watchdog(struct net_device *dev) +static void ehea_tx_watchdog(struct net_device *dev, unsigned int txqueue) { struct ehea_port *port = netdev_priv(dev); diff --git a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c index 6e70658d50c4..db45373ea31c 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c @@ -670,13 +670,10 @@ int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages) static int ehea_is_hugepage(unsigned long pfn) { - int page_order; - if (pfn & EHEA_HUGEPAGE_PFN_MASK) return 0; - page_order = compound_order(pfn_to_page(pfn)); - if (page_order + PAGE_SHIFT != EHEA_HUGEPAGESHIFT) + if (page_shift(pfn_to_page(pfn)) != EHEA_HUGEPAGESHIFT) return 0; return 1; diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 395dde444483..b7fc17756c51 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -776,7 +776,7 @@ static void emac_reset_work(struct work_struct *work) mutex_unlock(&dev->link_lock); } -static void emac_tx_timeout(struct net_device *ndev) +static void emac_tx_timeout(struct net_device *ndev, unsigned int txqueue) { struct emac_instance *dev = netdev_priv(ndev); @@ -1549,7 +1549,7 @@ emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev) ctrl); /* skb fragments */ for (i = 0; i < nr_frags; ++i) { - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; len = skb_frag_size(frag); if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF)) @@ -2849,6 +2849,7 @@ static int emac_init_config(struct emac_instance *dev) { struct device_node *np = dev->ofdev->dev.of_node; const void *p; + int err; /* Read config from device-tree */ if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1)) @@ -2897,8 +2898,8 @@ static int emac_init_config(struct emac_instance *dev) dev->mal_burst_size = 256; /* PHY mode needs some decoding */ - dev->phy_mode = of_get_phy_mode(np); - if (dev->phy_mode < 0) + err = of_get_phy_mode(np, &dev->phy_mode); + if (err) dev->phy_mode = PHY_INTERFACE_MODE_NA; /* Check EMAC version */ diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h index e9cda024cbf6..89a1b0fea158 100644 --- a/drivers/net/ethernet/ibm/emac/core.h +++ b/drivers/net/ethernet/ibm/emac/core.h @@ -171,7 +171,7 @@ struct emac_instance { struct mal_commac commac; /* PHY infos */ - int phy_mode; + phy_interface_t phy_mode; u32 phy_map; u32 phy_address; u32 phy_feat_exc; diff --git a/drivers/net/ethernet/ibm/emac/zmii.c b/drivers/net/ethernet/ibm/emac/zmii.c index b9e821de2ac6..57a25c7a9e70 100644 --- a/drivers/net/ethernet/ibm/emac/zmii.c +++ b/drivers/net/ethernet/ibm/emac/zmii.c @@ -78,7 +78,8 @@ static inline u32 zmii_mode_mask(int mode, int input) } } -int zmii_attach(struct platform_device *ofdev, int input, int *mode) +int zmii_attach(struct platform_device *ofdev, int input, + phy_interface_t *mode) { struct zmii_instance *dev = platform_get_drvdata(ofdev); struct zmii_regs __iomem *p = dev->base; diff --git a/drivers/net/ethernet/ibm/emac/zmii.h b/drivers/net/ethernet/ibm/emac/zmii.h index 41d46e9b87ba..65daedc78594 100644 --- a/drivers/net/ethernet/ibm/emac/zmii.h +++ b/drivers/net/ethernet/ibm/emac/zmii.h @@ -50,7 +50,8 @@ struct zmii_instance { int zmii_init(void); void zmii_exit(void); -int zmii_attach(struct platform_device *ofdev, int input, int *mode); +int zmii_attach(struct platform_device *ofdev, int input, + phy_interface_t *mode); void zmii_detach(struct platform_device *ofdev, int input); void zmii_get_mdio(struct platform_device *ofdev, int input); void zmii_put_mdio(struct platform_device *ofdev, int input); diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index c5be4ebd8437..84121aab7ff1 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -1011,6 +1011,29 @@ static int ibmveth_send(struct ibmveth_adapter *adapter, return 0; } +static int ibmveth_is_packet_unsupported(struct sk_buff *skb, + struct net_device *netdev) +{ + struct ethhdr *ether_header; + int ret = 0; + + ether_header = eth_hdr(skb); + + if (ether_addr_equal(ether_header->h_dest, netdev->dev_addr)) { + netdev_dbg(netdev, "veth doesn't support loopback packets, dropping packet.\n"); + netdev->stats.tx_dropped++; + ret = -EOPNOTSUPP; + } + + if (!ether_addr_equal(ether_header->h_source, netdev->dev_addr)) { + netdev_dbg(netdev, "source packet MAC address does not match veth device's, dropping packet.\n"); + netdev->stats.tx_dropped++; + ret = -EOPNOTSUPP; + } + + return ret; +} + static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) { @@ -1022,6 +1045,9 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb, dma_addr_t dma_addr; unsigned long mss = 0; + if (ibmveth_is_packet_unsupported(skb, netdev)) + goto out; + /* veth doesn't handle frag_list, so linearize the skb. * When GRO is enabled SKB's can have frag_list. */ diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index cebd20f3128d..c75239d8820f 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -159,6 +159,40 @@ static long h_reg_sub_crq(unsigned long unit_address, unsigned long token, return rc; } +/** + * ibmvnic_wait_for_completion - Check device state and wait for completion + * @adapter: private device data + * @comp_done: completion structure to wait for + * @timeout: time to wait in milliseconds + * + * Wait for a completion signal or until the timeout limit is reached + * while checking that the device is still active. + */ +static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter, + struct completion *comp_done, + unsigned long timeout) +{ + struct net_device *netdev; + unsigned long div_timeout; + u8 retry; + + netdev = adapter->netdev; + retry = 5; + div_timeout = msecs_to_jiffies(timeout / retry); + while (true) { + if (!adapter->crq.active) { + netdev_err(netdev, "Device down!\n"); + return -ENODEV; + } + if (!retry--) + break; + if (wait_for_completion_timeout(comp_done, div_timeout)) + return 0; + } + netdev_err(netdev, "Operation timed out.\n"); + return -ETIMEDOUT; +} + static int alloc_long_term_buff(struct ibmvnic_adapter *adapter, struct ibmvnic_long_term_buff *ltb, int size) { @@ -176,21 +210,35 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter, ltb->map_id = adapter->map_id; adapter->map_id++; - init_completion(&adapter->fw_done); + mutex_lock(&adapter->fw_lock); + adapter->fw_done_rc = 0; + reinit_completion(&adapter->fw_done); rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id); if (rc) { dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); + mutex_unlock(&adapter->fw_lock); + return rc; + } + + rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); + if (rc) { + dev_err(dev, + "Long term map request aborted or timed out,rc = %d\n", + rc); + dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); + mutex_unlock(&adapter->fw_lock); return rc; } - wait_for_completion(&adapter->fw_done); if (adapter->fw_done_rc) { dev_err(dev, "Couldn't map long term buffer,rc = %d\n", adapter->fw_done_rc); dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); + mutex_unlock(&adapter->fw_lock); return -1; } + mutex_unlock(&adapter->fw_lock); return 0; } @@ -211,22 +259,37 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter, static int reset_long_term_buff(struct ibmvnic_adapter *adapter, struct ibmvnic_long_term_buff *ltb) { + struct device *dev = &adapter->vdev->dev; int rc; memset(ltb->buff, 0, ltb->size); - init_completion(&adapter->fw_done); + mutex_lock(&adapter->fw_lock); + adapter->fw_done_rc = 0; + + reinit_completion(&adapter->fw_done); rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id); - if (rc) + if (rc) { + mutex_unlock(&adapter->fw_lock); + return rc; + } + + rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); + if (rc) { + dev_info(dev, + "Reset failed, long term map request timed out or aborted\n"); + mutex_unlock(&adapter->fw_lock); return rc; - wait_for_completion(&adapter->fw_done); + } if (adapter->fw_done_rc) { - dev_info(&adapter->vdev->dev, + dev_info(dev, "Reset failed, attempting to free and reallocate buffer\n"); free_long_term_buff(adapter, ltb); + mutex_unlock(&adapter->fw_lock); return alloc_long_term_buff(adapter, ltb, ltb->size); } + mutex_unlock(&adapter->fw_lock); return 0; } @@ -943,13 +1006,25 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter) if (adapter->vpd->buff) len = adapter->vpd->len; - init_completion(&adapter->fw_done); + mutex_lock(&adapter->fw_lock); + adapter->fw_done_rc = 0; + reinit_completion(&adapter->fw_done); + crq.get_vpd_size.first = IBMVNIC_CRQ_CMD; crq.get_vpd_size.cmd = GET_VPD_SIZE; rc = ibmvnic_send_crq(adapter, &crq); - if (rc) + if (rc) { + mutex_unlock(&adapter->fw_lock); + return rc; + } + + rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); + if (rc) { + dev_err(dev, "Could not retrieve VPD size, rc = %d\n", rc); + mutex_unlock(&adapter->fw_lock); return rc; - wait_for_completion(&adapter->fw_done); + } + mutex_unlock(&adapter->fw_lock); if (!adapter->vpd->len) return -ENODATA; @@ -976,7 +1051,10 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter) return -ENOMEM; } + mutex_lock(&adapter->fw_lock); + adapter->fw_done_rc = 0; reinit_completion(&adapter->fw_done); + crq.get_vpd.first = IBMVNIC_CRQ_CMD; crq.get_vpd.cmd = GET_VPD; crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr); @@ -985,10 +1063,20 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter) if (rc) { kfree(adapter->vpd->buff); adapter->vpd->buff = NULL; + mutex_unlock(&adapter->fw_lock); + return rc; + } + + rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); + if (rc) { + dev_err(dev, "Unable to retrieve VPD, rc = %d\n", rc); + kfree(adapter->vpd->buff); + adapter->vpd->buff = NULL; + mutex_unlock(&adapter->fw_lock); return rc; } - wait_for_completion(&adapter->fw_done); + mutex_unlock(&adapter->fw_lock); return 0; } @@ -1207,7 +1295,7 @@ static void ibmvnic_cleanup(struct net_device *netdev) struct ibmvnic_adapter *adapter = netdev_priv(netdev); /* ensure that transmissions are stopped if called by do_reset */ - if (adapter->resetting) + if (test_bit(0, &adapter->resetting)) netif_tx_disable(netdev); else netif_tx_stop_all_queues(netdev); @@ -1428,7 +1516,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) u8 proto = 0; netdev_tx_t ret = NETDEV_TX_OK; - if (adapter->resetting) { + if (test_bit(0, &adapter->resetting)) { if (!netif_subqueue_stopped(netdev, skb)) netif_stop_subqueue(netdev, queue_num); dev_kfree_skb_any(skb); @@ -1485,7 +1573,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) memcpy(dst + cur, page_address(skb_frag_page(frag)) + - frag->page_offset, skb_frag_size(frag)); + skb_frag_off(frag), skb_frag_size(frag)); cur += skb_frag_size(frag); } } else { @@ -1689,20 +1777,25 @@ static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr) crq.change_mac_addr.cmd = CHANGE_MAC_ADDR; ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr); - init_completion(&adapter->fw_done); + mutex_lock(&adapter->fw_lock); + adapter->fw_done_rc = 0; + reinit_completion(&adapter->fw_done); + rc = ibmvnic_send_crq(adapter, &crq); if (rc) { rc = -EIO; + mutex_unlock(&adapter->fw_lock); goto err; } - wait_for_completion(&adapter->fw_done); + rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); /* netdev->dev_addr is changed in handle_change_mac_rsp function */ - if (adapter->fw_done_rc) { + if (rc || adapter->fw_done_rc) { rc = -EIO; + mutex_unlock(&adapter->fw_lock); goto err; } - + mutex_unlock(&adapter->fw_lock); return 0; err: ether_addr_copy(adapter->mac_addr, netdev->dev_addr); @@ -1724,6 +1817,86 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p) } /** + * do_change_param_reset returns zero if we are able to keep processing reset + * events, or non-zero if we hit a fatal error and must halt. + */ +static int do_change_param_reset(struct ibmvnic_adapter *adapter, + struct ibmvnic_rwi *rwi, + u32 reset_state) +{ + struct net_device *netdev = adapter->netdev; + int i, rc; + + netdev_dbg(adapter->netdev, "Change param resetting driver (%d)\n", + rwi->reset_reason); + + netif_carrier_off(netdev); + adapter->reset_reason = rwi->reset_reason; + + ibmvnic_cleanup(netdev); + + if (reset_state == VNIC_OPEN) { + rc = __ibmvnic_close(netdev); + if (rc) + return rc; + } + + release_resources(adapter); + release_sub_crqs(adapter, 1); + release_crq_queue(adapter); + + adapter->state = VNIC_PROBED; + + rc = init_crq_queue(adapter); + + if (rc) { + netdev_err(adapter->netdev, + "Couldn't initialize crq. rc=%d\n", rc); + return rc; + } + + rc = ibmvnic_reset_init(adapter); + if (rc) + return IBMVNIC_INIT_FAILED; + + /* If the adapter was in PROBE state prior to the reset, + * exit here. + */ + if (reset_state == VNIC_PROBED) + return 0; + + rc = ibmvnic_login(netdev); + if (rc) { + adapter->state = reset_state; + return rc; + } + + rc = init_resources(adapter); + if (rc) + return rc; + + ibmvnic_disable_irqs(adapter); + + adapter->state = VNIC_CLOSED; + + if (reset_state == VNIC_CLOSED) + return 0; + + rc = __ibmvnic_open(netdev); + if (rc) + return IBMVNIC_OPEN_FAILED; + + /* refresh device's multicast list */ + ibmvnic_set_multi(netdev); + + /* kick napi */ + for (i = 0; i < adapter->req_rx_queues; i++) + napi_schedule(&adapter->napi[i]); + + return 0; +} + +/** * do_reset returns zero if we are able to keep processing reset events, or * non-zero if we hit a fatal error and must halt. */ @@ -1738,6 +1911,8 @@ static int do_reset(struct ibmvnic_adapter *adapter, netdev_dbg(adapter->netdev, "Re-setting driver (%d)\n", rwi->reset_reason); + rtnl_lock(); + netif_carrier_off(netdev); adapter->reset_reason = rwi->reset_reason; @@ -1751,16 +1926,25 @@ static int do_reset(struct ibmvnic_adapter *adapter, if (reset_state == VNIC_OPEN && adapter->reset_reason != VNIC_RESET_MOBILITY && adapter->reset_reason != VNIC_RESET_FAILOVER) { - rc = __ibmvnic_close(netdev); + adapter->state = VNIC_CLOSING; + + /* Release the RTNL lock before link state change and + * re-acquire after the link state change to allow + * linkwatch_event to grab the RTNL lock and run during + * a reset. + */ + rtnl_unlock(); + rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN); + rtnl_lock(); if (rc) - return rc; - } + goto out; - if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM || - adapter->wait_for_reset) { - release_resources(adapter); - release_sub_crqs(adapter, 1); - release_crq_queue(adapter); + if (adapter->state != VNIC_CLOSING) { + rc = -1; + goto out; + } + + adapter->state = VNIC_CLOSED; } if (adapter->reset_reason != VNIC_RESET_NON_FATAL) { @@ -1769,9 +1953,7 @@ static int do_reset(struct ibmvnic_adapter *adapter, */ adapter->state = VNIC_PROBED; - if (adapter->wait_for_reset) { - rc = init_crq_queue(adapter); - } else if (adapter->reset_reason == VNIC_RESET_MOBILITY) { + if (adapter->reset_reason == VNIC_RESET_MOBILITY) { rc = ibmvnic_reenable_crq_queue(adapter); release_sub_crqs(adapter, 1); } else { @@ -1783,36 +1965,35 @@ static int do_reset(struct ibmvnic_adapter *adapter, if (rc) { netdev_err(adapter->netdev, "Couldn't initialize crq. rc=%d\n", rc); - return rc; + goto out; } rc = ibmvnic_reset_init(adapter); - if (rc) - return IBMVNIC_INIT_FAILED; + if (rc) { + rc = IBMVNIC_INIT_FAILED; + goto out; + } /* If the adapter was in PROBE state prior to the reset, * exit here. */ - if (reset_state == VNIC_PROBED) - return 0; + if (reset_state == VNIC_PROBED) { + rc = 0; + goto out; + } rc = ibmvnic_login(netdev); if (rc) { adapter->state = reset_state; - return rc; + goto out; } - if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM || - adapter->wait_for_reset) { - rc = init_resources(adapter); - if (rc) - return rc; - } else if (adapter->req_rx_queues != old_num_rx_queues || - adapter->req_tx_queues != old_num_tx_queues || - adapter->req_rx_add_entries_per_subcrq != - old_num_rx_slots || - adapter->req_tx_entries_per_subcrq != - old_num_tx_slots) { + if (adapter->req_rx_queues != old_num_rx_queues || + adapter->req_tx_queues != old_num_tx_queues || + adapter->req_rx_add_entries_per_subcrq != + old_num_rx_slots || + adapter->req_tx_entries_per_subcrq != + old_num_tx_slots) { release_rx_pools(adapter); release_tx_pools(adapter); release_napi(adapter); @@ -1820,32 +2001,30 @@ static int do_reset(struct ibmvnic_adapter *adapter, rc = init_resources(adapter); if (rc) - return rc; + goto out; } else { rc = reset_tx_pools(adapter); if (rc) - return rc; + goto out; rc = reset_rx_pools(adapter); if (rc) - return rc; + goto out; } ibmvnic_disable_irqs(adapter); } adapter->state = VNIC_CLOSED; - if (reset_state == VNIC_CLOSED) - return 0; + if (reset_state == VNIC_CLOSED) { + rc = 0; + goto out; + } rc = __ibmvnic_open(netdev); if (rc) { - if (list_empty(&adapter->rwi_list)) - adapter->state = VNIC_CLOSED; - else - adapter->state = reset_state; - - return 0; + rc = IBMVNIC_OPEN_FAILED; + goto out; } /* refresh device's multicast list */ @@ -1855,11 +2034,15 @@ static int do_reset(struct ibmvnic_adapter *adapter, for (i = 0; i < adapter->req_rx_queues; i++) napi_schedule(&adapter->napi[i]); - if (adapter->reset_reason != VNIC_RESET_FAILOVER && - adapter->reset_reason != VNIC_RESET_CHANGE_PARAM) + if (adapter->reset_reason != VNIC_RESET_FAILOVER) call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev); - return 0; + rc = 0; + +out: + rtnl_unlock(); + + return rc; } static int do_hard_reset(struct ibmvnic_adapter *adapter, @@ -1919,14 +2102,8 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter, return 0; rc = __ibmvnic_open(netdev); - if (rc) { - if (list_empty(&adapter->rwi_list)) - adapter->state = VNIC_CLOSED; - else - adapter->state = reset_state; - - return 0; - } + if (rc) + return IBMVNIC_OPEN_FAILED; return 0; } @@ -1965,40 +2142,65 @@ static void __ibmvnic_reset(struct work_struct *work) { struct ibmvnic_rwi *rwi; struct ibmvnic_adapter *adapter; - bool we_lock_rtnl = false; u32 reset_state; int rc = 0; adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset); - /* netif_set_real_num_xx_queues needs to take rtnl lock here - * unless wait_for_reset is set, in which case the rtnl lock - * has already been taken before initializing the reset - */ - if (!adapter->wait_for_reset) { - rtnl_lock(); - we_lock_rtnl = true; + if (test_and_set_bit_lock(0, &adapter->resetting)) { + schedule_delayed_work(&adapter->ibmvnic_delayed_reset, + IBMVNIC_RESET_DELAY); + return; } + reset_state = adapter->state; rwi = get_next_rwi(adapter); while (rwi) { - if (adapter->force_reset_recovery) { - adapter->force_reset_recovery = false; - rc = do_hard_reset(adapter, rwi, reset_state); + if (adapter->state == VNIC_REMOVING || + adapter->state == VNIC_REMOVED) { + kfree(rwi); + rc = EBUSY; + break; + } + + if (rwi->reset_reason == VNIC_RESET_CHANGE_PARAM) { + /* CHANGE_PARAM requestor holds rtnl_lock */ + rc = do_change_param_reset(adapter, rwi, reset_state); + } else if (adapter->force_reset_recovery) { + /* Transport event occurred during previous reset */ + if (adapter->wait_for_reset) { + /* Previous was CHANGE_PARAM; caller locked */ + adapter->force_reset_recovery = false; + rc = do_hard_reset(adapter, rwi, reset_state); + } else { + rtnl_lock(); + adapter->force_reset_recovery = false; + rc = do_hard_reset(adapter, rwi, reset_state); + rtnl_unlock(); + } } else { rc = do_reset(adapter, rwi, reset_state); } kfree(rwi); - if (rc && rc != IBMVNIC_INIT_FAILED && + if (rc == IBMVNIC_OPEN_FAILED) { + if (list_empty(&adapter->rwi_list)) + adapter->state = VNIC_CLOSED; + else + adapter->state = reset_state; + rc = 0; + } else if (rc && rc != IBMVNIC_INIT_FAILED && !adapter->force_reset_recovery) break; rwi = get_next_rwi(adapter); + + if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER || + rwi->reset_reason == VNIC_RESET_MOBILITY)) + adapter->force_reset_recovery = true; } if (adapter->wait_for_reset) { - adapter->wait_for_reset = false; adapter->reset_done_rc = rc; complete(&adapter->reset_done); } @@ -2008,9 +2210,16 @@ static void __ibmvnic_reset(struct work_struct *work) free_all_rwi(adapter); } - adapter->resetting = false; - if (we_lock_rtnl) - rtnl_unlock(); + clear_bit_unlock(0, &adapter->resetting); +} + +static void __ibmvnic_delayed_reset(struct work_struct *work) +{ + struct ibmvnic_adapter *adapter; + + adapter = container_of(work, struct ibmvnic_adapter, + ibmvnic_delayed_reset.work); + __ibmvnic_reset(&adapter->ibmvnic_reset); } static int ibmvnic_reset(struct ibmvnic_adapter *adapter, @@ -2065,18 +2274,15 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter, rwi->reset_reason = reason; list_add_tail(&rwi->list, &adapter->rwi_list); spin_unlock_irqrestore(&adapter->rwi_lock, flags); - adapter->resetting = true; netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason); schedule_work(&adapter->ibmvnic_reset); return 0; err: - if (adapter->wait_for_reset) - adapter->wait_for_reset = false; return -ret; } -static void ibmvnic_tx_timeout(struct net_device *dev) +static void ibmvnic_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct ibmvnic_adapter *adapter = netdev_priv(dev); @@ -2112,7 +2318,7 @@ restart_poll: u16 offset; u8 flags = 0; - if (unlikely(adapter->resetting && + if (unlikely(test_bit(0, &adapter->resetting) && adapter->reset_reason != VNIC_RESET_NON_FATAL)) { enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]); napi_complete_done(napi, frames_processed); @@ -2203,12 +2409,19 @@ static int wait_for_reset(struct ibmvnic_adapter *adapter) adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq; adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq; - init_completion(&adapter->reset_done); + reinit_completion(&adapter->reset_done); adapter->wait_for_reset = true; rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM); - if (rc) - return rc; - wait_for_completion(&adapter->reset_done); + + if (rc) { + ret = rc; + goto out; + } + rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 60000); + if (rc) { + ret = -ENODEV; + goto out; + } ret = 0; if (adapter->reset_done_rc) { @@ -2219,13 +2432,21 @@ static int wait_for_reset(struct ibmvnic_adapter *adapter) adapter->desired.rx_entries = adapter->fallback.rx_entries; adapter->desired.tx_entries = adapter->fallback.tx_entries; - init_completion(&adapter->reset_done); + reinit_completion(&adapter->reset_done); adapter->wait_for_reset = true; rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM); - if (rc) - return ret; - wait_for_completion(&adapter->reset_done); + if (rc) { + ret = rc; + goto out; + } + rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, + 60000); + if (rc) { + ret = -ENODEV; + goto out; + } } +out: adapter->wait_for_reset = false; return ret; @@ -2490,11 +2711,13 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev, cpu_to_be32(sizeof(struct ibmvnic_statistics)); /* Wait for data to be written */ - init_completion(&adapter->stats_done); + reinit_completion(&adapter->stats_done); rc = ibmvnic_send_crq(adapter, &crq); if (rc) return; - wait_for_completion(&adapter->stats_done); + rc = ibmvnic_wait_for_completion(adapter, &adapter->stats_done, 10000); + if (rc) + return; for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++) data[i] = be64_to_cpu(IBMVNIC_GET_STAT(adapter, @@ -2763,12 +2986,15 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter, return 1; } - if (adapter->resetting && + if (test_bit(0, &adapter->resetting) && adapter->reset_reason == VNIC_RESET_MOBILITY) { u64 val = (0xff000000) | scrq->hw_irq; rc = plpar_hcall_norets(H_EOI, val); - if (rc) + /* H_EOI would fail with rc = H_FUNCTION when running + * in XIVE mode which is expected, but not an error. + */ + if (rc && (rc != H_FUNCTION)) dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n", val, rc); } @@ -3313,7 +3539,7 @@ static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter, if (rc) { if (rc == H_CLOSED) { dev_warn(dev, "CRQ Queue closed\n"); - if (adapter->resetting) + if (test_bit(0, &adapter->resetting)) ibmvnic_reset(adapter, VNIC_RESET_FATAL); } @@ -4292,11 +4518,24 @@ static int send_query_phys_parms(struct ibmvnic_adapter *adapter) memset(&crq, 0, sizeof(crq)); crq.query_phys_parms.first = IBMVNIC_CRQ_CMD; crq.query_phys_parms.cmd = QUERY_PHYS_PARMS; - init_completion(&adapter->fw_done); + + mutex_lock(&adapter->fw_lock); + adapter->fw_done_rc = 0; + reinit_completion(&adapter->fw_done); + rc = ibmvnic_send_crq(adapter, &crq); - if (rc) + if (rc) { + mutex_unlock(&adapter->fw_lock); + return rc; + } + + rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); + if (rc) { + mutex_unlock(&adapter->fw_lock); return rc; - wait_for_completion(&adapter->fw_done); + } + + mutex_unlock(&adapter->fw_lock); return adapter->fw_done_rc ? -EIO : 0; } @@ -4305,13 +4544,14 @@ static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq, { struct net_device *netdev = adapter->netdev; int rc; + __be32 rspeed = cpu_to_be32(crq->query_phys_parms_rsp.speed); rc = crq->query_phys_parms_rsp.rc.code; if (rc) { netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc); return rc; } - switch (cpu_to_be32(crq->query_phys_parms_rsp.speed)) { + switch (rspeed) { case IBMVNIC_10MBPS: adapter->speed = SPEED_10; break; @@ -4337,8 +4577,8 @@ static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq, adapter->speed = SPEED_100000; break; default: - netdev_warn(netdev, "Unknown speed 0x%08x\n", - cpu_to_be32(crq->query_phys_parms_rsp.speed)); + if (netif_carrier_ok(netdev)) + netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed); adapter->speed = SPEED_UNKNOWN; } if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX) @@ -4388,7 +4628,16 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, case IBMVNIC_CRQ_XPORT_EVENT: netif_carrier_off(netdev); adapter->crq.active = false; - if (adapter->resetting) + /* terminate any thread waiting for a response + * from the device + */ + if (!completion_done(&adapter->fw_done)) { + adapter->fw_done_rc = -EIO; + complete(&adapter->fw_done); + } + if (!completion_done(&adapter->stats_done)) + complete(&adapter->stats_done); + if (test_bit(0, &adapter->resetting)) adapter->force_reset_recovery = true; if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) { dev_info(dev, "Migrated, re-enabling adapter\n"); @@ -4726,7 +4975,7 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter) return -1; } - if (adapter->resetting && !adapter->wait_for_reset && + if (test_bit(0, &adapter->resetting) && !adapter->wait_for_reset && adapter->reset_reason != VNIC_RESET_MOBILITY) { if (adapter->req_rx_queues != old_num_rx_queues || adapter->req_tx_queues != old_num_tx_queues) { @@ -4838,10 +5087,16 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) spin_lock_init(&adapter->stats_lock); INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset); + INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset, + __ibmvnic_delayed_reset); INIT_LIST_HEAD(&adapter->rwi_list); spin_lock_init(&adapter->rwi_lock); + mutex_init(&adapter->fw_lock); init_completion(&adapter->init_done); - adapter->resetting = false; + init_completion(&adapter->fw_done); + init_completion(&adapter->reset_done); + init_completion(&adapter->stats_done); + clear_bit(0, &adapter->resetting); do { rc = init_crq_queue(adapter); @@ -4898,6 +5153,7 @@ ibmvnic_stats_fail: ibmvnic_init_fail: release_sub_crqs(adapter, 1); release_crq_queue(adapter); + mutex_destroy(&adapter->fw_lock); free_netdev(netdev); return rc; @@ -4922,6 +5178,7 @@ static int ibmvnic_remove(struct vio_dev *dev) adapter->state = VNIC_REMOVED; rtnl_unlock(); + mutex_destroy(&adapter->fw_lock); device_remove_file(&dev->dev, &dev_attr_failover); free_netdev(netdev); dev_set_drvdata(&dev->dev, NULL); diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index 70bd286f8932..60eccaf91b12 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -20,6 +20,7 @@ #define IBMVNIC_INVALID_MAP -1 #define IBMVNIC_STATS_TIMEOUT 1 #define IBMVNIC_INIT_FAILED 2 +#define IBMVNIC_OPEN_FAILED 3 /* basic structures plus 100 2k buffers */ #define IBMVNIC_IO_ENTITLEMENT_DEFAULT 610305 @@ -38,6 +39,8 @@ #define IBMVNIC_MAX_LTB_SIZE ((1 << (MAX_ORDER - 1)) * PAGE_SIZE) #define IBMVNIC_BUFFER_HLEN 500 +#define IBMVNIC_RESET_DELAY 100 + static const char ibmvnic_priv_flags[][ETH_GSTRING_LEN] = { #define IBMVNIC_USE_SERVER_MAXES 0x1 "use-server-maxes" @@ -1023,6 +1026,8 @@ struct ibmvnic_adapter { int init_done_rc; struct completion fw_done; + /* Used for serialization of device commands */ + struct mutex fw_lock; int fw_done_rc; struct completion reset_done; @@ -1076,7 +1081,8 @@ struct ibmvnic_adapter { spinlock_t rwi_lock; struct list_head rwi_list; struct work_struct ibmvnic_reset; - bool resetting; + struct delayed_work ibmvnic_delayed_reset; + unsigned long resetting; bool napi_enabled, from_passive_init; bool failover_pending; |