diff options
Diffstat (limited to 'drivers/net/ethernet/ibm/ibmvnic.c')
-rw-r--r-- | drivers/net/ethernet/ibm/ibmvnic.c | 477 |
1 files changed, 367 insertions, 110 deletions
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index cebd20f3128d..c75239d8820f 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -159,6 +159,40 @@ static long h_reg_sub_crq(unsigned long unit_address, unsigned long token, return rc; } +/** + * ibmvnic_wait_for_completion - Check device state and wait for completion + * @adapter: private device data + * @comp_done: completion structure to wait for + * @timeout: time to wait in milliseconds + * + * Wait for a completion signal or until the timeout limit is reached + * while checking that the device is still active. + */ +static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter, + struct completion *comp_done, + unsigned long timeout) +{ + struct net_device *netdev; + unsigned long div_timeout; + u8 retry; + + netdev = adapter->netdev; + retry = 5; + div_timeout = msecs_to_jiffies(timeout / retry); + while (true) { + if (!adapter->crq.active) { + netdev_err(netdev, "Device down!\n"); + return -ENODEV; + } + if (!retry--) + break; + if (wait_for_completion_timeout(comp_done, div_timeout)) + return 0; + } + netdev_err(netdev, "Operation timed out.\n"); + return -ETIMEDOUT; +} + static int alloc_long_term_buff(struct ibmvnic_adapter *adapter, struct ibmvnic_long_term_buff *ltb, int size) { @@ -176,21 +210,35 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter, ltb->map_id = adapter->map_id; adapter->map_id++; - init_completion(&adapter->fw_done); + mutex_lock(&adapter->fw_lock); + adapter->fw_done_rc = 0; + reinit_completion(&adapter->fw_done); rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id); if (rc) { dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); + mutex_unlock(&adapter->fw_lock); + return rc; + } + + rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); + if (rc) { + dev_err(dev, + "Long term map request aborted or timed out,rc = %d\n", + rc); + dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); + mutex_unlock(&adapter->fw_lock); return rc; } - wait_for_completion(&adapter->fw_done); if (adapter->fw_done_rc) { dev_err(dev, "Couldn't map long term buffer,rc = %d\n", adapter->fw_done_rc); dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); + mutex_unlock(&adapter->fw_lock); return -1; } + mutex_unlock(&adapter->fw_lock); return 0; } @@ -211,22 +259,37 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter, static int reset_long_term_buff(struct ibmvnic_adapter *adapter, struct ibmvnic_long_term_buff *ltb) { + struct device *dev = &adapter->vdev->dev; int rc; memset(ltb->buff, 0, ltb->size); - init_completion(&adapter->fw_done); + mutex_lock(&adapter->fw_lock); + adapter->fw_done_rc = 0; + + reinit_completion(&adapter->fw_done); rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id); - if (rc) + if (rc) { + mutex_unlock(&adapter->fw_lock); + return rc; + } + + rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); + if (rc) { + dev_info(dev, + "Reset failed, long term map request timed out or aborted\n"); + mutex_unlock(&adapter->fw_lock); return rc; - wait_for_completion(&adapter->fw_done); + } if (adapter->fw_done_rc) { - dev_info(&adapter->vdev->dev, + dev_info(dev, "Reset failed, attempting to free and reallocate buffer\n"); free_long_term_buff(adapter, ltb); + mutex_unlock(&adapter->fw_lock); return alloc_long_term_buff(adapter, ltb, ltb->size); } + mutex_unlock(&adapter->fw_lock); return 0; } @@ -943,13 +1006,25 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter) if (adapter->vpd->buff) len = adapter->vpd->len; - init_completion(&adapter->fw_done); + mutex_lock(&adapter->fw_lock); + adapter->fw_done_rc = 0; + reinit_completion(&adapter->fw_done); + crq.get_vpd_size.first = IBMVNIC_CRQ_CMD; crq.get_vpd_size.cmd = GET_VPD_SIZE; rc = ibmvnic_send_crq(adapter, &crq); - if (rc) + if (rc) { + mutex_unlock(&adapter->fw_lock); + return rc; + } + + rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); + if (rc) { + dev_err(dev, "Could not retrieve VPD size, rc = %d\n", rc); + mutex_unlock(&adapter->fw_lock); return rc; - wait_for_completion(&adapter->fw_done); + } + mutex_unlock(&adapter->fw_lock); if (!adapter->vpd->len) return -ENODATA; @@ -976,7 +1051,10 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter) return -ENOMEM; } + mutex_lock(&adapter->fw_lock); + adapter->fw_done_rc = 0; reinit_completion(&adapter->fw_done); + crq.get_vpd.first = IBMVNIC_CRQ_CMD; crq.get_vpd.cmd = GET_VPD; crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr); @@ -985,10 +1063,20 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter) if (rc) { kfree(adapter->vpd->buff); adapter->vpd->buff = NULL; + mutex_unlock(&adapter->fw_lock); + return rc; + } + + rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); + if (rc) { + dev_err(dev, "Unable to retrieve VPD, rc = %d\n", rc); + kfree(adapter->vpd->buff); + adapter->vpd->buff = NULL; + mutex_unlock(&adapter->fw_lock); return rc; } - wait_for_completion(&adapter->fw_done); + mutex_unlock(&adapter->fw_lock); return 0; } @@ -1207,7 +1295,7 @@ static void ibmvnic_cleanup(struct net_device *netdev) struct ibmvnic_adapter *adapter = netdev_priv(netdev); /* ensure that transmissions are stopped if called by do_reset */ - if (adapter->resetting) + if (test_bit(0, &adapter->resetting)) netif_tx_disable(netdev); else netif_tx_stop_all_queues(netdev); @@ -1428,7 +1516,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) u8 proto = 0; netdev_tx_t ret = NETDEV_TX_OK; - if (adapter->resetting) { + if (test_bit(0, &adapter->resetting)) { if (!netif_subqueue_stopped(netdev, skb)) netif_stop_subqueue(netdev, queue_num); dev_kfree_skb_any(skb); @@ -1485,7 +1573,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) memcpy(dst + cur, page_address(skb_frag_page(frag)) + - frag->page_offset, skb_frag_size(frag)); + skb_frag_off(frag), skb_frag_size(frag)); cur += skb_frag_size(frag); } } else { @@ -1689,20 +1777,25 @@ static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr) crq.change_mac_addr.cmd = CHANGE_MAC_ADDR; ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr); - init_completion(&adapter->fw_done); + mutex_lock(&adapter->fw_lock); + adapter->fw_done_rc = 0; + reinit_completion(&adapter->fw_done); + rc = ibmvnic_send_crq(adapter, &crq); if (rc) { rc = -EIO; + mutex_unlock(&adapter->fw_lock); goto err; } - wait_for_completion(&adapter->fw_done); + rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); /* netdev->dev_addr is changed in handle_change_mac_rsp function */ - if (adapter->fw_done_rc) { + if (rc || adapter->fw_done_rc) { rc = -EIO; + mutex_unlock(&adapter->fw_lock); goto err; } - + mutex_unlock(&adapter->fw_lock); return 0; err: ether_addr_copy(adapter->mac_addr, netdev->dev_addr); @@ -1724,6 +1817,86 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p) } /** + * do_change_param_reset returns zero if we are able to keep processing reset + * events, or non-zero if we hit a fatal error and must halt. + */ +static int do_change_param_reset(struct ibmvnic_adapter *adapter, + struct ibmvnic_rwi *rwi, + u32 reset_state) +{ + struct net_device *netdev = adapter->netdev; + int i, rc; + + netdev_dbg(adapter->netdev, "Change param resetting driver (%d)\n", + rwi->reset_reason); + + netif_carrier_off(netdev); + adapter->reset_reason = rwi->reset_reason; + + ibmvnic_cleanup(netdev); + + if (reset_state == VNIC_OPEN) { + rc = __ibmvnic_close(netdev); + if (rc) + return rc; + } + + release_resources(adapter); + release_sub_crqs(adapter, 1); + release_crq_queue(adapter); + + adapter->state = VNIC_PROBED; + + rc = init_crq_queue(adapter); + + if (rc) { + netdev_err(adapter->netdev, + "Couldn't initialize crq. rc=%d\n", rc); + return rc; + } + + rc = ibmvnic_reset_init(adapter); + if (rc) + return IBMVNIC_INIT_FAILED; + + /* If the adapter was in PROBE state prior to the reset, + * exit here. + */ + if (reset_state == VNIC_PROBED) + return 0; + + rc = ibmvnic_login(netdev); + if (rc) { + adapter->state = reset_state; + return rc; + } + + rc = init_resources(adapter); + if (rc) + return rc; + + ibmvnic_disable_irqs(adapter); + + adapter->state = VNIC_CLOSED; + + if (reset_state == VNIC_CLOSED) + return 0; + + rc = __ibmvnic_open(netdev); + if (rc) + return IBMVNIC_OPEN_FAILED; + + /* refresh device's multicast list */ + ibmvnic_set_multi(netdev); + + /* kick napi */ + for (i = 0; i < adapter->req_rx_queues; i++) + napi_schedule(&adapter->napi[i]); + + return 0; +} + +/** * do_reset returns zero if we are able to keep processing reset events, or * non-zero if we hit a fatal error and must halt. */ @@ -1738,6 +1911,8 @@ static int do_reset(struct ibmvnic_adapter *adapter, netdev_dbg(adapter->netdev, "Re-setting driver (%d)\n", rwi->reset_reason); + rtnl_lock(); + netif_carrier_off(netdev); adapter->reset_reason = rwi->reset_reason; @@ -1751,16 +1926,25 @@ static int do_reset(struct ibmvnic_adapter *adapter, if (reset_state == VNIC_OPEN && adapter->reset_reason != VNIC_RESET_MOBILITY && adapter->reset_reason != VNIC_RESET_FAILOVER) { - rc = __ibmvnic_close(netdev); + adapter->state = VNIC_CLOSING; + + /* Release the RTNL lock before link state change and + * re-acquire after the link state change to allow + * linkwatch_event to grab the RTNL lock and run during + * a reset. + */ + rtnl_unlock(); + rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN); + rtnl_lock(); if (rc) - return rc; - } + goto out; - if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM || - adapter->wait_for_reset) { - release_resources(adapter); - release_sub_crqs(adapter, 1); - release_crq_queue(adapter); + if (adapter->state != VNIC_CLOSING) { + rc = -1; + goto out; + } + + adapter->state = VNIC_CLOSED; } if (adapter->reset_reason != VNIC_RESET_NON_FATAL) { @@ -1769,9 +1953,7 @@ static int do_reset(struct ibmvnic_adapter *adapter, */ adapter->state = VNIC_PROBED; - if (adapter->wait_for_reset) { - rc = init_crq_queue(adapter); - } else if (adapter->reset_reason == VNIC_RESET_MOBILITY) { + if (adapter->reset_reason == VNIC_RESET_MOBILITY) { rc = ibmvnic_reenable_crq_queue(adapter); release_sub_crqs(adapter, 1); } else { @@ -1783,36 +1965,35 @@ static int do_reset(struct ibmvnic_adapter *adapter, if (rc) { netdev_err(adapter->netdev, "Couldn't initialize crq. rc=%d\n", rc); - return rc; + goto out; } rc = ibmvnic_reset_init(adapter); - if (rc) - return IBMVNIC_INIT_FAILED; + if (rc) { + rc = IBMVNIC_INIT_FAILED; + goto out; + } /* If the adapter was in PROBE state prior to the reset, * exit here. */ - if (reset_state == VNIC_PROBED) - return 0; + if (reset_state == VNIC_PROBED) { + rc = 0; + goto out; + } rc = ibmvnic_login(netdev); if (rc) { adapter->state = reset_state; - return rc; + goto out; } - if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM || - adapter->wait_for_reset) { - rc = init_resources(adapter); - if (rc) - return rc; - } else if (adapter->req_rx_queues != old_num_rx_queues || - adapter->req_tx_queues != old_num_tx_queues || - adapter->req_rx_add_entries_per_subcrq != - old_num_rx_slots || - adapter->req_tx_entries_per_subcrq != - old_num_tx_slots) { + if (adapter->req_rx_queues != old_num_rx_queues || + adapter->req_tx_queues != old_num_tx_queues || + adapter->req_rx_add_entries_per_subcrq != + old_num_rx_slots || + adapter->req_tx_entries_per_subcrq != + old_num_tx_slots) { release_rx_pools(adapter); release_tx_pools(adapter); release_napi(adapter); @@ -1820,32 +2001,30 @@ static int do_reset(struct ibmvnic_adapter *adapter, rc = init_resources(adapter); if (rc) - return rc; + goto out; } else { rc = reset_tx_pools(adapter); if (rc) - return rc; + goto out; rc = reset_rx_pools(adapter); if (rc) - return rc; + goto out; } ibmvnic_disable_irqs(adapter); } adapter->state = VNIC_CLOSED; - if (reset_state == VNIC_CLOSED) - return 0; + if (reset_state == VNIC_CLOSED) { + rc = 0; + goto out; + } rc = __ibmvnic_open(netdev); if (rc) { - if (list_empty(&adapter->rwi_list)) - adapter->state = VNIC_CLOSED; - else - adapter->state = reset_state; - - return 0; + rc = IBMVNIC_OPEN_FAILED; + goto out; } /* refresh device's multicast list */ @@ -1855,11 +2034,15 @@ static int do_reset(struct ibmvnic_adapter *adapter, for (i = 0; i < adapter->req_rx_queues; i++) napi_schedule(&adapter->napi[i]); - if (adapter->reset_reason != VNIC_RESET_FAILOVER && - adapter->reset_reason != VNIC_RESET_CHANGE_PARAM) + if (adapter->reset_reason != VNIC_RESET_FAILOVER) call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev); - return 0; + rc = 0; + +out: + rtnl_unlock(); + + return rc; } static int do_hard_reset(struct ibmvnic_adapter *adapter, @@ -1919,14 +2102,8 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter, return 0; rc = __ibmvnic_open(netdev); - if (rc) { - if (list_empty(&adapter->rwi_list)) - adapter->state = VNIC_CLOSED; - else - adapter->state = reset_state; - - return 0; - } + if (rc) + return IBMVNIC_OPEN_FAILED; return 0; } @@ -1965,40 +2142,65 @@ static void __ibmvnic_reset(struct work_struct *work) { struct ibmvnic_rwi *rwi; struct ibmvnic_adapter *adapter; - bool we_lock_rtnl = false; u32 reset_state; int rc = 0; adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset); - /* netif_set_real_num_xx_queues needs to take rtnl lock here - * unless wait_for_reset is set, in which case the rtnl lock - * has already been taken before initializing the reset - */ - if (!adapter->wait_for_reset) { - rtnl_lock(); - we_lock_rtnl = true; + if (test_and_set_bit_lock(0, &adapter->resetting)) { + schedule_delayed_work(&adapter->ibmvnic_delayed_reset, + IBMVNIC_RESET_DELAY); + return; } + reset_state = adapter->state; rwi = get_next_rwi(adapter); while (rwi) { - if (adapter->force_reset_recovery) { - adapter->force_reset_recovery = false; - rc = do_hard_reset(adapter, rwi, reset_state); + if (adapter->state == VNIC_REMOVING || + adapter->state == VNIC_REMOVED) { + kfree(rwi); + rc = EBUSY; + break; + } + + if (rwi->reset_reason == VNIC_RESET_CHANGE_PARAM) { + /* CHANGE_PARAM requestor holds rtnl_lock */ + rc = do_change_param_reset(adapter, rwi, reset_state); + } else if (adapter->force_reset_recovery) { + /* Transport event occurred during previous reset */ + if (adapter->wait_for_reset) { + /* Previous was CHANGE_PARAM; caller locked */ + adapter->force_reset_recovery = false; + rc = do_hard_reset(adapter, rwi, reset_state); + } else { + rtnl_lock(); + adapter->force_reset_recovery = false; + rc = do_hard_reset(adapter, rwi, reset_state); + rtnl_unlock(); + } } else { rc = do_reset(adapter, rwi, reset_state); } kfree(rwi); - if (rc && rc != IBMVNIC_INIT_FAILED && + if (rc == IBMVNIC_OPEN_FAILED) { + if (list_empty(&adapter->rwi_list)) + adapter->state = VNIC_CLOSED; + else + adapter->state = reset_state; + rc = 0; + } else if (rc && rc != IBMVNIC_INIT_FAILED && !adapter->force_reset_recovery) break; rwi = get_next_rwi(adapter); + + if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER || + rwi->reset_reason == VNIC_RESET_MOBILITY)) + adapter->force_reset_recovery = true; } if (adapter->wait_for_reset) { - adapter->wait_for_reset = false; adapter->reset_done_rc = rc; complete(&adapter->reset_done); } @@ -2008,9 +2210,16 @@ static void __ibmvnic_reset(struct work_struct *work) free_all_rwi(adapter); } - adapter->resetting = false; - if (we_lock_rtnl) - rtnl_unlock(); + clear_bit_unlock(0, &adapter->resetting); +} + +static void __ibmvnic_delayed_reset(struct work_struct *work) +{ + struct ibmvnic_adapter *adapter; + + adapter = container_of(work, struct ibmvnic_adapter, + ibmvnic_delayed_reset.work); + __ibmvnic_reset(&adapter->ibmvnic_reset); } static int ibmvnic_reset(struct ibmvnic_adapter *adapter, @@ -2065,18 +2274,15 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter, rwi->reset_reason = reason; list_add_tail(&rwi->list, &adapter->rwi_list); spin_unlock_irqrestore(&adapter->rwi_lock, flags); - adapter->resetting = true; netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason); schedule_work(&adapter->ibmvnic_reset); return 0; err: - if (adapter->wait_for_reset) - adapter->wait_for_reset = false; return -ret; } -static void ibmvnic_tx_timeout(struct net_device *dev) +static void ibmvnic_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct ibmvnic_adapter *adapter = netdev_priv(dev); @@ -2112,7 +2318,7 @@ restart_poll: u16 offset; u8 flags = 0; - if (unlikely(adapter->resetting && + if (unlikely(test_bit(0, &adapter->resetting) && adapter->reset_reason != VNIC_RESET_NON_FATAL)) { enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]); napi_complete_done(napi, frames_processed); @@ -2203,12 +2409,19 @@ static int wait_for_reset(struct ibmvnic_adapter *adapter) adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq; adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq; - init_completion(&adapter->reset_done); + reinit_completion(&adapter->reset_done); adapter->wait_for_reset = true; rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM); - if (rc) - return rc; - wait_for_completion(&adapter->reset_done); + + if (rc) { + ret = rc; + goto out; + } + rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 60000); + if (rc) { + ret = -ENODEV; + goto out; + } ret = 0; if (adapter->reset_done_rc) { @@ -2219,13 +2432,21 @@ static int wait_for_reset(struct ibmvnic_adapter *adapter) adapter->desired.rx_entries = adapter->fallback.rx_entries; adapter->desired.tx_entries = adapter->fallback.tx_entries; - init_completion(&adapter->reset_done); + reinit_completion(&adapter->reset_done); adapter->wait_for_reset = true; rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM); - if (rc) - return ret; - wait_for_completion(&adapter->reset_done); + if (rc) { + ret = rc; + goto out; + } + rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, + 60000); + if (rc) { + ret = -ENODEV; + goto out; + } } +out: adapter->wait_for_reset = false; return ret; @@ -2490,11 +2711,13 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev, cpu_to_be32(sizeof(struct ibmvnic_statistics)); /* Wait for data to be written */ - init_completion(&adapter->stats_done); + reinit_completion(&adapter->stats_done); rc = ibmvnic_send_crq(adapter, &crq); if (rc) return; - wait_for_completion(&adapter->stats_done); + rc = ibmvnic_wait_for_completion(adapter, &adapter->stats_done, 10000); + if (rc) + return; for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++) data[i] = be64_to_cpu(IBMVNIC_GET_STAT(adapter, @@ -2763,12 +2986,15 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter, return 1; } - if (adapter->resetting && + if (test_bit(0, &adapter->resetting) && adapter->reset_reason == VNIC_RESET_MOBILITY) { u64 val = (0xff000000) | scrq->hw_irq; rc = plpar_hcall_norets(H_EOI, val); - if (rc) + /* H_EOI would fail with rc = H_FUNCTION when running + * in XIVE mode which is expected, but not an error. + */ + if (rc && (rc != H_FUNCTION)) dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n", val, rc); } @@ -3313,7 +3539,7 @@ static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter, if (rc) { if (rc == H_CLOSED) { dev_warn(dev, "CRQ Queue closed\n"); - if (adapter->resetting) + if (test_bit(0, &adapter->resetting)) ibmvnic_reset(adapter, VNIC_RESET_FATAL); } @@ -4292,11 +4518,24 @@ static int send_query_phys_parms(struct ibmvnic_adapter *adapter) memset(&crq, 0, sizeof(crq)); crq.query_phys_parms.first = IBMVNIC_CRQ_CMD; crq.query_phys_parms.cmd = QUERY_PHYS_PARMS; - init_completion(&adapter->fw_done); + + mutex_lock(&adapter->fw_lock); + adapter->fw_done_rc = 0; + reinit_completion(&adapter->fw_done); + rc = ibmvnic_send_crq(adapter, &crq); - if (rc) + if (rc) { + mutex_unlock(&adapter->fw_lock); + return rc; + } + + rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); + if (rc) { + mutex_unlock(&adapter->fw_lock); return rc; - wait_for_completion(&adapter->fw_done); + } + + mutex_unlock(&adapter->fw_lock); return adapter->fw_done_rc ? -EIO : 0; } @@ -4305,13 +4544,14 @@ static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq, { struct net_device *netdev = adapter->netdev; int rc; + __be32 rspeed = cpu_to_be32(crq->query_phys_parms_rsp.speed); rc = crq->query_phys_parms_rsp.rc.code; if (rc) { netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc); return rc; } - switch (cpu_to_be32(crq->query_phys_parms_rsp.speed)) { + switch (rspeed) { case IBMVNIC_10MBPS: adapter->speed = SPEED_10; break; @@ -4337,8 +4577,8 @@ static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq, adapter->speed = SPEED_100000; break; default: - netdev_warn(netdev, "Unknown speed 0x%08x\n", - cpu_to_be32(crq->query_phys_parms_rsp.speed)); + if (netif_carrier_ok(netdev)) + netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed); adapter->speed = SPEED_UNKNOWN; } if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX) @@ -4388,7 +4628,16 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, case IBMVNIC_CRQ_XPORT_EVENT: netif_carrier_off(netdev); adapter->crq.active = false; - if (adapter->resetting) + /* terminate any thread waiting for a response + * from the device + */ + if (!completion_done(&adapter->fw_done)) { + adapter->fw_done_rc = -EIO; + complete(&adapter->fw_done); + } + if (!completion_done(&adapter->stats_done)) + complete(&adapter->stats_done); + if (test_bit(0, &adapter->resetting)) adapter->force_reset_recovery = true; if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) { dev_info(dev, "Migrated, re-enabling adapter\n"); @@ -4726,7 +4975,7 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter) return -1; } - if (adapter->resetting && !adapter->wait_for_reset && + if (test_bit(0, &adapter->resetting) && !adapter->wait_for_reset && adapter->reset_reason != VNIC_RESET_MOBILITY) { if (adapter->req_rx_queues != old_num_rx_queues || adapter->req_tx_queues != old_num_tx_queues) { @@ -4838,10 +5087,16 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) spin_lock_init(&adapter->stats_lock); INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset); + INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset, + __ibmvnic_delayed_reset); INIT_LIST_HEAD(&adapter->rwi_list); spin_lock_init(&adapter->rwi_lock); + mutex_init(&adapter->fw_lock); init_completion(&adapter->init_done); - adapter->resetting = false; + init_completion(&adapter->fw_done); + init_completion(&adapter->reset_done); + init_completion(&adapter->stats_done); + clear_bit(0, &adapter->resetting); do { rc = init_crq_queue(adapter); @@ -4898,6 +5153,7 @@ ibmvnic_stats_fail: ibmvnic_init_fail: release_sub_crqs(adapter, 1); release_crq_queue(adapter); + mutex_destroy(&adapter->fw_lock); free_netdev(netdev); return rc; @@ -4922,6 +5178,7 @@ static int ibmvnic_remove(struct vio_dev *dev) adapter->state = VNIC_REMOVED; rtnl_unlock(); + mutex_destroy(&adapter->fw_lock); device_remove_file(&dev->dev, &dev_attr_failover); free_netdev(netdev); dev_set_drvdata(&dev->dev, NULL); |