summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/ibm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/ibm')
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c19
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_qmr.c4
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c34
-rw-r--r--drivers/net/ethernet/ibm/emac/core.h3
-rw-r--r--drivers/net/ethernet/ibm/emac/emac.h2
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.h2
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c1
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c241
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h11
9 files changed, 180 insertions, 137 deletions
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index ba580bfae512..3baabdc89726 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -778,12 +778,11 @@ static void check_sqs(struct ehea_port *port)
{
struct ehea_swqe *swqe;
int swqe_index;
- int i, k;
+ int i;
for (i = 0; i < port->num_def_qps; i++) {
struct ehea_port_res *pr = &port->port_res[i];
int ret;
- k = 0;
swqe = ehea_get_swqe(pr->qp, &swqe_index);
memset(swqe, 0, SWQE_HEADER_SIZE);
atomic_dec(&pr->swqe_avail);
@@ -921,17 +920,6 @@ static int ehea_poll(struct napi_struct *napi, int budget)
return rx;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void ehea_netpoll(struct net_device *dev)
-{
- struct ehea_port *port = netdev_priv(dev);
- int i;
-
- for (i = 0; i < port->num_def_qps; i++)
- napi_schedule(&port->port_res[i].napi);
-}
-#endif
-
static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
{
struct ehea_port_res *pr = param;
@@ -2038,7 +2026,7 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
dev_consume_skb_any(skb);
}
-static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ehea_port *port = netdev_priv(dev);
struct ehea_swqe *swqe;
@@ -2953,9 +2941,6 @@ static const struct net_device_ops ehea_netdev_ops = {
.ndo_open = ehea_open,
.ndo_stop = ehea_stop,
.ndo_start_xmit = ehea_start_xmit,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = ehea_netpoll,
-#endif
.ndo_get_stats64 = ehea_get_stats64,
.ndo_set_mac_address = ehea_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
index a0820f72b25c..5e4e37132bf2 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
@@ -125,7 +125,7 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
struct ehea_cq *cq;
struct h_epa epa;
u64 *cq_handle_ref, hret, rpage;
- u32 act_nr_of_entries, act_pages, counter;
+ u32 counter;
int ret;
void *vpage;
@@ -140,8 +140,6 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
cq->adapter = adapter;
cq_handle_ref = &cq->fw_handle;
- act_nr_of_entries = 0;
- act_pages = 0;
hret = ehea_h_alloc_resource_cq(adapter->handle, &cq->attr,
&cq->fw_handle, &cq->epas);
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 354c0982847b..760b2ad8e295 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -423,7 +423,7 @@ static void emac_hash_mc(struct emac_instance *dev)
{
const int regs = EMAC_XAHT_REGS(dev);
u32 *gaht_base = emac_gaht_base(dev);
- u32 gaht_temp[regs];
+ u32 gaht_temp[EMAC_XAHT_MAX_REGS];
struct netdev_hw_addr *ha;
int i;
@@ -494,9 +494,6 @@ static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_s
case 16384:
ret |= EMAC_MR1_RFS_16K;
break;
- case 8192:
- ret |= EMAC4_MR1_RFS_8K;
- break;
case 4096:
ret |= EMAC_MR1_RFS_4K;
break;
@@ -537,6 +534,9 @@ static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_
case 16384:
ret |= EMAC4_MR1_RFS_16K;
break;
+ case 8192:
+ ret |= EMAC4_MR1_RFS_8K;
+ break;
case 4096:
ret |= EMAC4_MR1_RFS_4K;
break;
@@ -1409,7 +1409,7 @@ static inline u16 emac_tx_csum(struct emac_instance *dev,
return 0;
}
-static inline int emac_xmit_finish(struct emac_instance *dev, int len)
+static inline netdev_tx_t emac_xmit_finish(struct emac_instance *dev, int len)
{
struct emac_regs __iomem *p = dev->emacp;
struct net_device *ndev = dev->ndev;
@@ -1436,7 +1436,7 @@ static inline int emac_xmit_finish(struct emac_instance *dev, int len)
}
/* Tx lock BH */
-static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct emac_instance *dev = netdev_priv(ndev);
unsigned int len = skb->len;
@@ -1494,7 +1494,8 @@ static inline int emac_xmit_split(struct emac_instance *dev, int slot,
}
/* Tx lock BH disabled (SG version for TAH equipped EMACs) */
-static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t
+emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
{
struct emac_instance *dev = netdev_priv(ndev);
int nr_frags = skb_shinfo(skb)->nr_frags;
@@ -2677,12 +2678,17 @@ static int emac_init_phy(struct emac_instance *dev)
if (of_phy_is_fixed_link(np)) {
int res = emac_dt_mdio_probe(dev);
- if (!res) {
- res = of_phy_register_fixed_link(np);
- if (res)
- mdiobus_unregister(dev->mii_bus);
+ if (res)
+ return res;
+
+ res = of_phy_register_fixed_link(np);
+ dev->phy_dev = of_phy_find_device(np);
+ if (res || !dev->phy_dev) {
+ mdiobus_unregister(dev->mii_bus);
+ return res ? res : -EINVAL;
}
- return res;
+ emac_adjust_link(dev->ndev);
+ put_device(&dev->phy_dev->mdio.dev);
}
return 0;
}
@@ -2964,6 +2970,10 @@ static int emac_init_config(struct emac_instance *dev)
dev->xaht_width_shift = EMAC4_XAHT_WIDTH_SHIFT;
}
+ /* This should never happen */
+ if (WARN_ON(EMAC_XAHT_REGS(dev) > EMAC_XAHT_MAX_REGS))
+ return -ENXIO;
+
DBG(dev, "features : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE);
DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige);
DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige);
diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h
index 369de2cfb15b..84caa4a3fc52 100644
--- a/drivers/net/ethernet/ibm/emac/core.h
+++ b/drivers/net/ethernet/ibm/emac/core.h
@@ -390,6 +390,9 @@ static inline int emac_has_feature(struct emac_instance *dev,
#define EMAC4SYNC_XAHT_SLOTS_SHIFT 8
#define EMAC4SYNC_XAHT_WIDTH_SHIFT 5
+/* The largest span between slots and widths above is 3 */
+#define EMAC_XAHT_MAX_REGS (1 << 3)
+
#define EMAC_XAHT_SLOTS(dev) (1 << (dev)->xaht_slots_shift)
#define EMAC_XAHT_WIDTH(dev) (1 << (dev)->xaht_width_shift)
#define EMAC_XAHT_REGS(dev) (1 << ((dev)->xaht_slots_shift - \
diff --git a/drivers/net/ethernet/ibm/emac/emac.h b/drivers/net/ethernet/ibm/emac/emac.h
index e2f80cca9bed..0d2de6f67676 100644
--- a/drivers/net/ethernet/ibm/emac/emac.h
+++ b/drivers/net/ethernet/ibm/emac/emac.h
@@ -231,7 +231,7 @@ struct emac_regs {
#define EMAC_STACR_PHYE 0x00004000
#define EMAC_STACR_STAC_MASK 0x00003000
#define EMAC_STACR_STAC_READ 0x00001000
-#define EMAC_STACR_STAC_WRITE 0x00000800
+#define EMAC_STACR_STAC_WRITE 0x00002000
#define EMAC_STACR_OPBC_MASK 0x00000C00
#define EMAC_STACR_OPBC_50 0x00000000
#define EMAC_STACR_OPBC_66 0x00000400
diff --git a/drivers/net/ethernet/ibm/emac/mal.h b/drivers/net/ethernet/ibm/emac/mal.h
index eeade2ea8334..e4c20f0024f6 100644
--- a/drivers/net/ethernet/ibm/emac/mal.h
+++ b/drivers/net/ethernet/ibm/emac/mal.h
@@ -136,7 +136,7 @@ static inline int mal_rx_size(int len)
static inline int mal_tx_chunks(int len)
{
- return (len + MAL_MAX_TX_SIZE - 1) / MAL_MAX_TX_SIZE;
+ return DIV_ROUND_UP(len, MAL_MAX_TX_SIZE);
}
#define MAL_CHAN_MASK(n) (0x80000000 >> (n))
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 525d8b89187b..a4681780a55d 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -24,7 +24,6 @@
*/
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index dafdd4ade705..67cc6d9c8fd7 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -485,8 +485,8 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter)
for (j = 0; j < rx_pool->size; j++) {
if (rx_pool->rx_buff[j].skb) {
- dev_kfree_skb_any(rx_pool->rx_buff[i].skb);
- rx_pool->rx_buff[i].skb = NULL;
+ dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
+ rx_pool->rx_buff[j].skb = NULL;
}
}
@@ -1103,20 +1103,15 @@ static int ibmvnic_open(struct net_device *netdev)
return 0;
}
- mutex_lock(&adapter->reset_lock);
-
if (adapter->state != VNIC_CLOSED) {
rc = ibmvnic_login(netdev);
- if (rc) {
- mutex_unlock(&adapter->reset_lock);
+ if (rc)
return rc;
- }
rc = init_resources(adapter);
if (rc) {
netdev_err(netdev, "failed to initialize resources\n");
release_resources(adapter);
- mutex_unlock(&adapter->reset_lock);
return rc;
}
}
@@ -1124,8 +1119,6 @@ static int ibmvnic_open(struct net_device *netdev)
rc = __ibmvnic_open(netdev);
netif_carrier_on(netdev);
- mutex_unlock(&adapter->reset_lock);
-
return rc;
}
@@ -1269,10 +1262,8 @@ static int ibmvnic_close(struct net_device *netdev)
return 0;
}
- mutex_lock(&adapter->reset_lock);
rc = __ibmvnic_close(netdev);
ibmvnic_cleanup(netdev);
- mutex_unlock(&adapter->reset_lock);
return rc;
}
@@ -1428,7 +1419,7 @@ static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
return 0;
}
-static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
+static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
int queue_num = skb_get_queue_mapping(skb);
@@ -1452,7 +1443,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
u64 *handle_array;
int index = 0;
u8 proto = 0;
- int ret = 0;
+ netdev_tx_t ret = NETDEV_TX_OK;
if (adapter->resetting) {
if (!netif_subqueue_stopped(netdev, skb))
@@ -1545,7 +1536,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_crq.v1.sge_len = cpu_to_be32(skb->len);
tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
- if (adapter->vlan_header_insertion) {
+ if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) {
tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
}
@@ -1746,6 +1737,7 @@ static int do_reset(struct ibmvnic_adapter *adapter,
struct ibmvnic_rwi *rwi, u32 reset_state)
{
u64 old_num_rx_queues, old_num_tx_queues;
+ u64 old_num_rx_slots, old_num_tx_slots;
struct net_device *netdev = adapter->netdev;
int i, rc;
@@ -1757,6 +1749,8 @@ static int do_reset(struct ibmvnic_adapter *adapter,
old_num_rx_queues = adapter->req_rx_queues;
old_num_tx_queues = adapter->req_tx_queues;
+ old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq;
+ old_num_tx_slots = adapter->req_tx_entries_per_subcrq;
ibmvnic_cleanup(netdev);
@@ -1819,15 +1813,20 @@ static int do_reset(struct ibmvnic_adapter *adapter,
if (rc)
return rc;
} else if (adapter->req_rx_queues != old_num_rx_queues ||
- adapter->req_tx_queues != old_num_tx_queues) {
- adapter->map_id = 1;
+ adapter->req_tx_queues != old_num_tx_queues ||
+ adapter->req_rx_add_entries_per_subcrq !=
+ old_num_rx_slots ||
+ adapter->req_tx_entries_per_subcrq !=
+ old_num_tx_slots) {
release_rx_pools(adapter);
release_tx_pools(adapter);
- init_rx_pools(netdev);
- init_tx_pools(netdev);
-
release_napi(adapter);
- init_napi(adapter);
+ release_vpd_data(adapter);
+
+ rc = init_resources(adapter);
+ if (rc)
+ return rc;
+
} else {
rc = reset_tx_pools(adapter);
if (rc)
@@ -1860,7 +1859,7 @@ static int do_reset(struct ibmvnic_adapter *adapter,
if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
adapter->reset_reason != VNIC_RESET_CHANGE_PARAM)
- netdev_notify_peers(netdev);
+ call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
netif_carrier_on(netdev);
@@ -1911,17 +1910,8 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
adapter->state = VNIC_PROBED;
return 0;
}
- /* netif_set_real_num_xx_queues needs to take rtnl lock here
- * unless wait_for_reset is set, in which case the rtnl lock
- * has already been taken before initializing the reset
- */
- if (!adapter->wait_for_reset) {
- rtnl_lock();
- rc = init_resources(adapter);
- rtnl_unlock();
- } else {
- rc = init_resources(adapter);
- }
+
+ rc = init_resources(adapter);
if (rc)
return rc;
@@ -1949,8 +1939,9 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
{
struct ibmvnic_rwi *rwi;
+ unsigned long flags;
- mutex_lock(&adapter->rwi_lock);
+ spin_lock_irqsave(&adapter->rwi_lock, flags);
if (!list_empty(&adapter->rwi_list)) {
rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
@@ -1960,7 +1951,7 @@ static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
rwi = NULL;
}
- mutex_unlock(&adapter->rwi_lock);
+ spin_unlock_irqrestore(&adapter->rwi_lock, flags);
return rwi;
}
@@ -1980,13 +1971,21 @@ static void __ibmvnic_reset(struct work_struct *work)
struct ibmvnic_rwi *rwi;
struct ibmvnic_adapter *adapter;
struct net_device *netdev;
+ bool we_lock_rtnl = false;
u32 reset_state;
int rc = 0;
adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
netdev = adapter->netdev;
- mutex_lock(&adapter->reset_lock);
+ /* netif_set_real_num_xx_queues needs to take rtnl lock here
+ * unless wait_for_reset is set, in which case the rtnl lock
+ * has already been taken before initializing the reset
+ */
+ if (!adapter->wait_for_reset) {
+ rtnl_lock();
+ we_lock_rtnl = true;
+ }
reset_state = adapter->state;
rwi = get_next_rwi(adapter);
@@ -2014,12 +2013,11 @@ static void __ibmvnic_reset(struct work_struct *work)
if (rc) {
netdev_dbg(adapter->netdev, "Reset failed\n");
free_all_rwi(adapter);
- mutex_unlock(&adapter->reset_lock);
- return;
}
adapter->resetting = false;
- mutex_unlock(&adapter->reset_lock);
+ if (we_lock_rtnl)
+ rtnl_unlock();
}
static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
@@ -2028,6 +2026,7 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
struct list_head *entry, *tmp_entry;
struct ibmvnic_rwi *rwi, *tmp;
struct net_device *netdev = adapter->netdev;
+ unsigned long flags;
int ret;
if (adapter->state == VNIC_REMOVING ||
@@ -2044,21 +2043,21 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
goto err;
}
- mutex_lock(&adapter->rwi_lock);
+ spin_lock_irqsave(&adapter->rwi_lock, flags);
list_for_each(entry, &adapter->rwi_list) {
tmp = list_entry(entry, struct ibmvnic_rwi, list);
if (tmp->reset_reason == reason) {
netdev_dbg(netdev, "Skipping matching reset\n");
- mutex_unlock(&adapter->rwi_lock);
+ spin_unlock_irqrestore(&adapter->rwi_lock, flags);
ret = EBUSY;
goto err;
}
}
- rwi = kzalloc(sizeof(*rwi), GFP_KERNEL);
+ rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
if (!rwi) {
- mutex_unlock(&adapter->rwi_lock);
+ spin_unlock_irqrestore(&adapter->rwi_lock, flags);
ibmvnic_close(netdev);
ret = ENOMEM;
goto err;
@@ -2072,7 +2071,7 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
}
rwi->reset_reason = reason;
list_add_tail(&rwi->list, &adapter->rwi_list);
- mutex_unlock(&adapter->rwi_lock);
+ spin_unlock_irqrestore(&adapter->rwi_lock, flags);
adapter->resetting = true;
netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
schedule_work(&adapter->ibmvnic_reset);
@@ -2201,19 +2200,6 @@ restart_poll:
return frames_processed;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void ibmvnic_netpoll_controller(struct net_device *dev)
-{
- struct ibmvnic_adapter *adapter = netdev_priv(dev);
- int i;
-
- replenish_pools(netdev_priv(dev));
- for (i = 0; i < adapter->req_rx_queues; i++)
- ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
- adapter->rx_scrq[i]);
-}
-#endif
-
static int wait_for_reset(struct ibmvnic_adapter *adapter)
{
int rc, ret;
@@ -2286,9 +2272,6 @@ static const struct net_device_ops ibmvnic_netdev_ops = {
.ndo_set_mac_address = ibmvnic_set_mac,
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = ibmvnic_tx_timeout,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = ibmvnic_netpoll_controller,
-#endif
.ndo_change_mtu = ibmvnic_change_mtu,
.ndo_features_check = ibmvnic_features_check,
};
@@ -2358,8 +2341,13 @@ static void ibmvnic_get_ringparam(struct net_device *netdev,
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
- ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
- ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
+ if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
+ ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
+ ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
+ } else {
+ ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
+ ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
+ }
ring->rx_mini_max_pending = 0;
ring->rx_jumbo_max_pending = 0;
ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
@@ -2372,21 +2360,23 @@ static int ibmvnic_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+ int ret;
- if (ring->rx_pending > adapter->max_rx_add_entries_per_subcrq ||
- ring->tx_pending > adapter->max_tx_entries_per_subcrq) {
- netdev_err(netdev, "Invalid request.\n");
- netdev_err(netdev, "Max tx buffers = %llu\n",
- adapter->max_rx_add_entries_per_subcrq);
- netdev_err(netdev, "Max rx buffers = %llu\n",
- adapter->max_tx_entries_per_subcrq);
- return -EINVAL;
- }
-
+ ret = 0;
adapter->desired.rx_entries = ring->rx_pending;
adapter->desired.tx_entries = ring->tx_pending;
- return wait_for_reset(adapter);
+ ret = wait_for_reset(adapter);
+
+ if (!ret &&
+ (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending ||
+ adapter->req_tx_entries_per_subcrq != ring->tx_pending))
+ netdev_info(netdev,
+ "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
+ ring->rx_pending, ring->tx_pending,
+ adapter->req_rx_add_entries_per_subcrq,
+ adapter->req_tx_entries_per_subcrq);
+ return ret;
}
static void ibmvnic_get_channels(struct net_device *netdev,
@@ -2394,8 +2384,14 @@ static void ibmvnic_get_channels(struct net_device *netdev,
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
- channels->max_rx = adapter->max_rx_queues;
- channels->max_tx = adapter->max_tx_queues;
+ if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
+ channels->max_rx = adapter->max_rx_queues;
+ channels->max_tx = adapter->max_tx_queues;
+ } else {
+ channels->max_rx = IBMVNIC_MAX_QUEUES;
+ channels->max_tx = IBMVNIC_MAX_QUEUES;
+ }
+
channels->max_other = 0;
channels->max_combined = 0;
channels->rx_count = adapter->req_rx_queues;
@@ -2408,11 +2404,23 @@ static int ibmvnic_set_channels(struct net_device *netdev,
struct ethtool_channels *channels)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+ int ret;
+ ret = 0;
adapter->desired.rx_queues = channels->rx_count;
adapter->desired.tx_queues = channels->tx_count;
- return wait_for_reset(adapter);
+ ret = wait_for_reset(adapter);
+
+ if (!ret &&
+ (adapter->req_rx_queues != channels->rx_count ||
+ adapter->req_tx_queues != channels->tx_count))
+ netdev_info(netdev,
+ "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
+ channels->rx_count, channels->tx_count,
+ adapter->req_rx_queues, adapter->req_tx_queues);
+ return ret;
+
}
static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -2420,32 +2428,43 @@ static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
struct ibmvnic_adapter *adapter = netdev_priv(dev);
int i;
- if (stringset != ETH_SS_STATS)
- return;
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < ARRAY_SIZE(ibmvnic_stats);
+ i++, data += ETH_GSTRING_LEN)
+ memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
- for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
- memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
+ for (i = 0; i < adapter->req_tx_queues; i++) {
+ snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
+ data += ETH_GSTRING_LEN;
- for (i = 0; i < adapter->req_tx_queues; i++) {
- snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
- data += ETH_GSTRING_LEN;
+ snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
+ data += ETH_GSTRING_LEN;
- snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
- data += ETH_GSTRING_LEN;
+ snprintf(data, ETH_GSTRING_LEN,
+ "tx%d_dropped_packets", i);
+ data += ETH_GSTRING_LEN;
+ }
- snprintf(data, ETH_GSTRING_LEN, "tx%d_dropped_packets", i);
- data += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < adapter->req_rx_queues; i++) {
+ snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
+ data += ETH_GSTRING_LEN;
- for (i = 0; i < adapter->req_rx_queues; i++) {
- snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
- data += ETH_GSTRING_LEN;
+ snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
+ data += ETH_GSTRING_LEN;
- snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
- data += ETH_GSTRING_LEN;
+ snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
+ data += ETH_GSTRING_LEN;
+ }
+ break;
- snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
- data += ETH_GSTRING_LEN;
+ case ETH_SS_PRIV_FLAGS:
+ for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++)
+ strcpy(data + i * ETH_GSTRING_LEN,
+ ibmvnic_priv_flags[i]);
+ break;
+ default:
+ return;
}
}
@@ -2458,6 +2477,8 @@ static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
return ARRAY_SIZE(ibmvnic_stats) +
adapter->req_tx_queues * NUM_TX_STATS +
adapter->req_rx_queues * NUM_RX_STATS;
+ case ETH_SS_PRIV_FLAGS:
+ return ARRAY_SIZE(ibmvnic_priv_flags);
default:
return -EOPNOTSUPP;
}
@@ -2508,6 +2529,25 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev,
}
}
+static u32 ibmvnic_get_priv_flags(struct net_device *netdev)
+{
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->priv_flags;
+}
+
+static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags)
+{
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+ bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES);
+
+ if (which_maxes)
+ adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES;
+ else
+ adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES;
+
+ return 0;
+}
static const struct ethtool_ops ibmvnic_ethtool_ops = {
.get_drvinfo = ibmvnic_get_drvinfo,
.get_msglevel = ibmvnic_get_msglevel,
@@ -2521,6 +2561,8 @@ static const struct ethtool_ops ibmvnic_ethtool_ops = {
.get_sset_count = ibmvnic_get_sset_count,
.get_ethtool_stats = ibmvnic_get_ethtool_stats,
.get_link_ksettings = ibmvnic_get_link_ksettings,
+ .get_priv_flags = ibmvnic_get_priv_flags,
+ .set_priv_flags = ibmvnic_set_priv_flags,
};
/* Routines for managing CRQs/sCRQs */
@@ -4719,8 +4761,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
INIT_LIST_HEAD(&adapter->rwi_list);
- mutex_init(&adapter->reset_lock);
- mutex_init(&adapter->rwi_lock);
+ spin_lock_init(&adapter->rwi_lock);
adapter->resetting = false;
adapter->mac_change_pending = false;
@@ -4791,8 +4832,8 @@ static int ibmvnic_remove(struct vio_dev *dev)
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
adapter->state = VNIC_REMOVING;
- unregister_netdev(netdev);
- mutex_lock(&adapter->reset_lock);
+ rtnl_lock();
+ unregister_netdevice(netdev);
release_resources(adapter);
release_sub_crqs(adapter, 1);
@@ -4803,7 +4844,7 @@ static int ibmvnic_remove(struct vio_dev *dev)
adapter->state = VNIC_REMOVED;
- mutex_unlock(&adapter->reset_lock);
+ rtnl_unlock();
device_remove_file(&dev->dev, &dev_attr_failover);
free_netdev(netdev);
dev_set_drvdata(&dev->dev, NULL);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index f06eec145ca6..f2018dbebfa5 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -39,7 +39,8 @@
#define IBMVNIC_RX_WEIGHT 16
/* when changing this, update IBMVNIC_IO_ENTITLEMENT_DEFAULT */
#define IBMVNIC_BUFFS_PER_POOL 100
-#define IBMVNIC_MAX_QUEUES 10
+#define IBMVNIC_MAX_QUEUES 16
+#define IBMVNIC_MAX_QUEUE_SZ 4096
#define IBMVNIC_TSO_BUF_SZ 65536
#define IBMVNIC_TSO_BUFS 64
@@ -48,6 +49,11 @@
#define IBMVNIC_MAX_LTB_SIZE ((1 << (MAX_ORDER - 1)) * PAGE_SIZE)
#define IBMVNIC_BUFFER_HLEN 500
+static const char ibmvnic_priv_flags[][ETH_GSTRING_LEN] = {
+#define IBMVNIC_USE_SERVER_MAXES 0x1
+ "use-server-maxes"
+};
+
struct ibmvnic_login_buffer {
__be32 len;
__be32 version;
@@ -969,6 +975,7 @@ struct ibmvnic_adapter {
struct ibmvnic_control_ip_offload_buffer ip_offload_ctrl;
dma_addr_t ip_offload_ctrl_tok;
u32 msg_enable;
+ u32 priv_flags;
/* Vital Product Data (VPD) */
struct ibmvnic_vpd *vpd;
@@ -1068,7 +1075,7 @@ struct ibmvnic_adapter {
struct tasklet_struct tasklet;
enum vnic_state state;
enum ibmvnic_reset_reason reset_reason;
- struct mutex reset_lock, rwi_lock;
+ spinlock_t rwi_lock;
struct list_head rwi_list;
struct work_struct ibmvnic_reset;
bool resetting;
OpenPOWER on IntegriCloud