diff options
Diffstat (limited to 'drivers/net')
27 files changed, 371 insertions, 180 deletions
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h index 448d1fafc827..f4d81765221e 100644 --- a/drivers/net/ethernet/cavium/thunder/nic.h +++ b/drivers/net/ethernet/cavium/thunder/nic.h @@ -325,6 +325,8 @@ struct nicvf { struct tasklet_struct qs_err_task; struct work_struct reset_task; struct nicvf_work rx_mode_work; + /* spinlock to protect workqueue arguments from concurrent access */ + spinlock_t rx_mode_wq_lock; /* PTP timestamp */ struct cavium_ptp *ptp_clock; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 7135db45927e..135766c4296b 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -1923,17 +1923,12 @@ static int nicvf_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) } } -static void nicvf_set_rx_mode_task(struct work_struct *work_arg) +static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs, + struct nicvf *nic) { - struct nicvf_work *vf_work = container_of(work_arg, struct nicvf_work, - work.work); - struct nicvf *nic = container_of(vf_work, struct nicvf, rx_mode_work); union nic_mbx mbx = {}; int idx; - if (!vf_work) - return; - /* From the inside of VM code flow we have only 128 bits memory * available to send message to host's PF, so send all mc addrs * one by one, starting from flush command in case if kernel @@ -1944,7 +1939,7 @@ static void nicvf_set_rx_mode_task(struct work_struct *work_arg) mbx.xcast.msg = NIC_MBOX_MSG_RESET_XCAST; nicvf_send_msg_to_pf(nic, &mbx); - if (vf_work->mode & BGX_XCAST_MCAST_FILTER) { + if (mode & BGX_XCAST_MCAST_FILTER) { /* once enabling filtering, we need to signal to PF to add * its' own LMAC to the filter to accept packets for it. */ @@ -1954,23 +1949,46 @@ static void nicvf_set_rx_mode_task(struct work_struct *work_arg) } /* check if we have any specific MACs to be added to PF DMAC filter */ - if (vf_work->mc) { + if (mc_addrs) { /* now go through kernel list of MACs and add them one by one */ - for (idx = 0; idx < vf_work->mc->count; idx++) { + for (idx = 0; idx < mc_addrs->count; idx++) { mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST; - mbx.xcast.data.mac = vf_work->mc->mc[idx]; + mbx.xcast.data.mac = mc_addrs->mc[idx]; nicvf_send_msg_to_pf(nic, &mbx); } - kfree(vf_work->mc); + kfree(mc_addrs); } /* and finally set rx mode for PF accordingly */ mbx.xcast.msg = NIC_MBOX_MSG_SET_XCAST; - mbx.xcast.data.mode = vf_work->mode; + mbx.xcast.data.mode = mode; nicvf_send_msg_to_pf(nic, &mbx); } +static void nicvf_set_rx_mode_task(struct work_struct *work_arg) +{ + struct nicvf_work *vf_work = container_of(work_arg, struct nicvf_work, + work.work); + struct nicvf *nic = container_of(vf_work, struct nicvf, rx_mode_work); + u8 mode; + struct xcast_addr_list *mc; + + if (!vf_work) + return; + + /* Save message data locally to prevent them from + * being overwritten by next ndo_set_rx_mode call(). + */ + spin_lock(&nic->rx_mode_wq_lock); + mode = vf_work->mode; + mc = vf_work->mc; + vf_work->mc = NULL; + spin_unlock(&nic->rx_mode_wq_lock); + + __nicvf_set_rx_mode_task(mode, mc, nic); +} + static void nicvf_set_rx_mode(struct net_device *netdev) { struct nicvf *nic = netdev_priv(netdev); @@ -2004,9 +2022,12 @@ static void nicvf_set_rx_mode(struct net_device *netdev) } } } + spin_lock(&nic->rx_mode_wq_lock); + kfree(nic->rx_mode_work.mc); nic->rx_mode_work.mc = mc_list; nic->rx_mode_work.mode = mode; - queue_delayed_work(nicvf_rx_mode_wq, &nic->rx_mode_work.work, 2 * HZ); + queue_delayed_work(nicvf_rx_mode_wq, &nic->rx_mode_work.work, 0); + spin_unlock(&nic->rx_mode_wq_lock); } static const struct net_device_ops nicvf_netdev_ops = { @@ -2163,6 +2184,7 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_WORK(&nic->reset_task, nicvf_reset_task); INIT_DELAYED_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task); + spin_lock_init(&nic->rx_mode_wq_lock); err = register_netdev(netdev); if (err) { diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index 2edfdbdaae48..7b795edd9d3a 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c @@ -3362,10 +3362,17 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) err = sysfs_create_group(&adapter->port[0]->dev.kobj, &cxgb3_attr_group); + if (err) { + dev_err(&pdev->dev, "cannot create sysfs group\n"); + goto out_close_led; + } print_port_info(adapter, ai); return 0; +out_close_led: + t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 0); + out_free_dev: iounmap(adapter->regs); for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index fc534e91c6b2..144d5fe6b944 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -760,9 +760,9 @@ struct ixgbe_adapter { #define IXGBE_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */ u32 *rss_key; -#ifdef CONFIG_XFRM +#ifdef CONFIG_XFRM_OFFLOAD struct ixgbe_ipsec *ipsec; -#endif /* CONFIG_XFRM */ +#endif /* CONFIG_XFRM_OFFLOAD */ }; static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c index 344a1f213a5f..c116f459945d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c @@ -158,7 +158,16 @@ static void ixgbe_ipsec_stop_data(struct ixgbe_adapter *adapter) reg |= IXGBE_SECRXCTRL_RX_DIS; IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, reg); - IXGBE_WRITE_FLUSH(hw); + /* If both Tx and Rx are ready there are no packets + * that we need to flush so the loopback configuration + * below is not necessary. + */ + t_rdy = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT) & + IXGBE_SECTXSTAT_SECTX_RDY; + r_rdy = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT) & + IXGBE_SECRXSTAT_SECRX_RDY; + if (t_rdy && r_rdy) + return; /* If the tx fifo doesn't have link, but still has data, * we can't clear the tx sec block. Set the MAC loopback @@ -185,7 +194,7 @@ static void ixgbe_ipsec_stop_data(struct ixgbe_adapter *adapter) IXGBE_SECTXSTAT_SECTX_RDY; r_rdy = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT) & IXGBE_SECRXSTAT_SECRX_RDY; - } while (!t_rdy && !r_rdy && limit--); + } while (!(t_rdy && r_rdy) && limit--); /* undo loopback if we played with it earlier */ if (!link) { @@ -966,10 +975,22 @@ void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring, **/ void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter) { + struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_ipsec *ipsec; + u32 t_dis, r_dis; size_t size; - if (adapter->hw.mac.type == ixgbe_mac_82598EB) + if (hw->mac.type == ixgbe_mac_82598EB) + return; + + /* If there is no support for either Tx or Rx offload + * we should not be advertising support for IPsec. + */ + t_dis = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT) & + IXGBE_SECTXSTAT_SECTX_OFF_DIS; + r_dis = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT) & + IXGBE_SECRXSTAT_SECRX_OFF_DIS; + if (t_dis || r_dis) return; ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL); @@ -1001,13 +1022,6 @@ void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter) adapter->netdev->xfrmdev_ops = &ixgbe_xfrmdev_ops; -#define IXGBE_ESP_FEATURES (NETIF_F_HW_ESP | \ - NETIF_F_HW_ESP_TX_CSUM | \ - NETIF_F_GSO_ESP) - - adapter->netdev->features |= IXGBE_ESP_FEATURES; - adapter->netdev->hw_enc_features |= IXGBE_ESP_FEATURES; - return; err2: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index 893a9206e718..d361f570ca37 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -593,6 +593,14 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) } #endif + /* To support macvlan offload we have to use num_tc to + * restrict the queues that can be used by the device. + * By doing this we can avoid reporting a false number of + * queues. + */ + if (vmdq_i > 1) + netdev_set_num_tc(adapter->netdev, 1); + /* populate TC0 for use by pool 0 */ netdev_set_tc_queue(adapter->netdev, 0, adapter->num_rx_queues_per_pool, 0); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 0b1ba3ae159c..3e87dbbc9024 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -6117,6 +6117,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter, #ifdef CONFIG_IXGBE_DCB ixgbe_init_dcb(adapter); #endif + ixgbe_init_ipsec_offload(adapter); /* default flow control settings */ hw->fc.requested_mode = ixgbe_fc_full; @@ -8822,14 +8823,6 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) } else { netdev_reset_tc(dev); - /* To support macvlan offload we have to use num_tc to - * restrict the queues that can be used by the device. - * By doing this we can avoid reporting a false number of - * queues. - */ - if (!tc && adapter->num_rx_pools > 1) - netdev_set_num_tc(dev, 1); - if (adapter->hw.mac.type == ixgbe_mac_82598EB) adapter->hw.fc.requested_mode = adapter->last_lfc_mode; @@ -9904,7 +9897,7 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev, * the TSO, so it's the exception. */ if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) { -#ifdef CONFIG_XFRM +#ifdef CONFIG_XFRM_OFFLOAD if (!skb->sp) #endif features &= ~NETIF_F_TSO; @@ -10437,6 +10430,14 @@ skip_sriov: if (hw->mac.type >= ixgbe_mac_82599EB) netdev->features |= NETIF_F_SCTP_CRC; +#ifdef CONFIG_XFRM_OFFLOAD +#define IXGBE_ESP_FEATURES (NETIF_F_HW_ESP | \ + NETIF_F_HW_ESP_TX_CSUM | \ + NETIF_F_GSO_ESP) + + if (adapter->ipsec) + netdev->features |= IXGBE_ESP_FEATURES; +#endif /* copy netdev features into list of user selectable features */ netdev->hw_features |= netdev->features | NETIF_F_HW_VLAN_CTAG_FILTER | @@ -10499,8 +10500,6 @@ skip_sriov: NETIF_F_FCOE_MTU; } #endif /* IXGBE_FCOE */ - ixgbe_init_ipsec_offload(adapter); - if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) netdev->hw_features |= NETIF_F_LRO; if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index e8ed37749ab1..44cfb2021145 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -599,13 +599,15 @@ struct ixgbe_nvm_version { #define IXGBE_SECTXCTRL_STORE_FORWARD 0x00000004 #define IXGBE_SECTXSTAT_SECTX_RDY 0x00000001 -#define IXGBE_SECTXSTAT_ECC_TXERR 0x00000002 +#define IXGBE_SECTXSTAT_SECTX_OFF_DIS 0x00000002 +#define IXGBE_SECTXSTAT_ECC_TXERR 0x00000004 #define IXGBE_SECRXCTRL_SECRX_DIS 0x00000001 #define IXGBE_SECRXCTRL_RX_DIS 0x00000002 #define IXGBE_SECRXSTAT_SECRX_RDY 0x00000001 -#define IXGBE_SECRXSTAT_ECC_RXERR 0x00000002 +#define IXGBE_SECRXSTAT_SECRX_OFF_DIS 0x00000002 +#define IXGBE_SECRXSTAT_ECC_RXERR 0x00000004 /* LinkSec (MacSec) Registers */ #define IXGBE_LSECTXCAP 0x08A00 diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 77b2adb29341..6aaaf3d9ba31 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -4756,12 +4756,6 @@ static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6) kfree(mlxsw_sp_rt6); } -static bool mlxsw_sp_fib6_rt_can_mp(const struct fib6_info *rt) -{ - /* RTF_CACHE routes are ignored */ - return (rt->fib6_flags & (RTF_GATEWAY | RTF_ADDRCONF)) == RTF_GATEWAY; -} - static struct fib6_info * mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry) { @@ -4771,11 +4765,11 @@ mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry) static struct mlxsw_sp_fib6_entry * mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node, - const struct fib6_info *nrt, bool replace) + const struct fib6_info *nrt, bool append) { struct mlxsw_sp_fib6_entry *fib6_entry; - if (!mlxsw_sp_fib6_rt_can_mp(nrt) || replace) + if (!append) return NULL; list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) { @@ -4790,8 +4784,7 @@ mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node, break; if (rt->fib6_metric < nrt->fib6_metric) continue; - if (rt->fib6_metric == nrt->fib6_metric && - mlxsw_sp_fib6_rt_can_mp(rt)) + if (rt->fib6_metric == nrt->fib6_metric) return fib6_entry; if (rt->fib6_metric > nrt->fib6_metric) break; @@ -5170,7 +5163,7 @@ static struct mlxsw_sp_fib6_entry * mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node, const struct fib6_info *nrt, bool replace) { - struct mlxsw_sp_fib6_entry *fib6_entry, *fallback = NULL; + struct mlxsw_sp_fib6_entry *fib6_entry; list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) { struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry); @@ -5179,18 +5172,13 @@ mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node, continue; if (rt->fib6_table->tb6_id != nrt->fib6_table->tb6_id) break; - if (replace && rt->fib6_metric == nrt->fib6_metric) { - if (mlxsw_sp_fib6_rt_can_mp(rt) == - mlxsw_sp_fib6_rt_can_mp(nrt)) - return fib6_entry; - if (mlxsw_sp_fib6_rt_can_mp(nrt)) - fallback = fallback ?: fib6_entry; - } + if (replace && rt->fib6_metric == nrt->fib6_metric) + return fib6_entry; if (rt->fib6_metric > nrt->fib6_metric) - return fallback ?: fib6_entry; + return fib6_entry; } - return fallback; + return NULL; } static int @@ -5316,7 +5304,8 @@ static void mlxsw_sp_fib6_entry_replace(struct mlxsw_sp *mlxsw_sp, } static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp, - struct fib6_info *rt, bool replace) + struct fib6_info *rt, bool replace, + bool append) { struct mlxsw_sp_fib6_entry *fib6_entry; struct mlxsw_sp_fib_node *fib_node; @@ -5342,7 +5331,7 @@ static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp, /* Before creating a new entry, try to append route to an existing * multipath entry. */ - fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, replace); + fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, append); if (fib6_entry) { err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt); if (err) @@ -5350,6 +5339,14 @@ static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp, return 0; } + /* We received an append event, yet did not find any route to + * append to. + */ + if (WARN_ON(append)) { + err = -EINVAL; + goto err_fib6_entry_append; + } + fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt); if (IS_ERR(fib6_entry)) { err = PTR_ERR(fib6_entry); @@ -5367,6 +5364,7 @@ static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp, err_fib6_node_entry_link: mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry); err_fib6_entry_create: +err_fib6_entry_append: err_fib6_entry_nexthop_add: mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); return err; @@ -5717,7 +5715,7 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work) struct mlxsw_sp_fib_event_work *fib_work = container_of(work, struct mlxsw_sp_fib_event_work, work); struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp; - bool replace; + bool replace, append; int err; rtnl_lock(); @@ -5728,8 +5726,10 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work) case FIB_EVENT_ENTRY_APPEND: /* fall through */ case FIB_EVENT_ENTRY_ADD: replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE; + append = fib_work->event == FIB_EVENT_ENTRY_APPEND; err = mlxsw_sp_router_fib6_add(mlxsw_sp, - fib_work->fen6_info.rt, replace); + fib_work->fen6_info.rt, replace, + append); if (err) mlxsw_sp_router_fib_abort(mlxsw_sp); mlxsw_sp_rt6_release(fib_work->fen6_info.rt); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index e97652c40d13..eea5666a86b2 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -1018,8 +1018,10 @@ mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, int err; /* No need to continue if only VLAN flags were changed */ - if (mlxsw_sp_port_vlan->bridge_port) + if (mlxsw_sp_port_vlan->bridge_port) { + mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); return 0; + } err = mlxsw_sp_port_vlan_fid_join(mlxsw_sp_port_vlan, bridge_port); if (err) diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c index 19cfa162ac65..1decf3a1cad3 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.c +++ b/drivers/net/ethernet/netronome/nfp/flower/main.c @@ -455,6 +455,7 @@ static int nfp_flower_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, eth_hw_addr_random(nn->dp.netdev); netif_keep_dst(nn->dp.netdev); + nn->vnic_no_name = true; return 0; diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c index ec524d97869d..78afe75129ab 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c @@ -381,6 +381,8 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event, err = PTR_ERR_OR_ZERO(rt); if (err) return NOTIFY_DONE; + + ip_rt_put(rt); #else return NOTIFY_DONE; #endif diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h index 57cb035dcc6d..2a71a9ffd095 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h @@ -590,6 +590,8 @@ struct nfp_net_dp { * @vnic_list: Entry on device vNIC list * @pdev: Backpointer to PCI device * @app: APP handle if available + * @vnic_no_name: For non-port PF vNIC make ndo_get_phys_port_name return + * -EOPNOTSUPP to keep backwards compatibility (set by app) * @port: Pointer to nfp_port structure if vNIC is a port * @app_priv: APP private data for this vNIC */ @@ -663,6 +665,8 @@ struct nfp_net { struct pci_dev *pdev; struct nfp_app *app; + bool vnic_no_name; + struct nfp_port *port; void *app_priv; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 75110c8d6a90..d4c27f849f9b 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -3121,7 +3121,7 @@ static void nfp_net_stat64(struct net_device *netdev, struct nfp_net *nn = netdev_priv(netdev); int r; - for (r = 0; r < nn->dp.num_r_vecs; r++) { + for (r = 0; r < nn->max_r_vecs; r++) { struct nfp_net_r_vector *r_vec = &nn->r_vecs[r]; u64 data[3]; unsigned int start; @@ -3286,7 +3286,7 @@ nfp_net_get_phys_port_name(struct net_device *netdev, char *name, size_t len) if (nn->port) return nfp_port_get_phys_port_name(netdev, name, len); - if (nn->dp.is_vf) + if (nn->dp.is_vf || nn->vnic_no_name) return -EOPNOTSUPP; n = snprintf(name, len, "n%d", nn->id); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c index 2dd89dba9311..d32af598da90 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c @@ -98,21 +98,18 @@ struct nfp_resource { static int nfp_cpp_resource_find(struct nfp_cpp *cpp, struct nfp_resource *res) { - char name_pad[NFP_RESOURCE_ENTRY_NAME_SZ] = {}; struct nfp_resource_entry entry; u32 cpp_id, key; int ret, i; cpp_id = NFP_CPP_ID(NFP_RESOURCE_TBL_TARGET, 3, 0); /* Atomic read */ - strncpy(name_pad, res->name, sizeof(name_pad)); - /* Search for a matching entry */ - if (!memcmp(name_pad, NFP_RESOURCE_TBL_NAME "\0\0\0\0\0\0\0\0", 8)) { + if (!strcmp(res->name, NFP_RESOURCE_TBL_NAME)) { nfp_err(cpp, "Grabbing device lock not supported\n"); return -EOPNOTSUPP; } - key = crc32_posix(name_pad, sizeof(name_pad)); + key = crc32_posix(res->name, NFP_RESOURCE_ENTRY_NAME_SZ); for (i = 0; i < NFP_RESOURCE_TBL_ENTRIES; i++) { u64 addr = NFP_RESOURCE_TBL_BASE + diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c index e78e5db39458..c694e3428dfc 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c @@ -384,6 +384,7 @@ int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt) } sgmii_pdev = of_find_device_by_node(np); + of_node_put(np); if (!sgmii_pdev) { dev_err(&pdev->dev, "invalid internal-phy property\n"); return -ENODEV; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c index 4ff231df7322..c5979569fd60 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c @@ -334,9 +334,10 @@ static int meson8b_dwmac_probe(struct platform_device *pdev) dwmac->data = (const struct meson8b_dwmac_data *) of_device_get_match_data(&pdev->dev); - if (!dwmac->data) - return -EINVAL; - + if (!dwmac->data) { + ret = -EINVAL; + goto err_remove_config_dt; + } res = platform_get_resource(pdev, IORESOURCE_MEM, 1); dwmac->regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(dwmac->regs)) { diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c index 14770fc8865e..1f50e83cafb2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.c +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c @@ -252,13 +252,8 @@ int stmmac_hwif_init(struct stmmac_priv *priv) return ret; } - /* Run quirks, if needed */ - if (entry->quirks) { - ret = entry->quirks(priv); - if (ret) - return ret; - } - + /* Save quirks, if needed for posterior use */ + priv->hwif_quirks = entry->quirks; return 0; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 025efbf6145c..76649adf8fb0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -129,6 +129,7 @@ struct stmmac_priv { struct net_device *dev; struct device *device; struct mac_device_info *hw; + int (*hwif_quirks)(struct stmmac_priv *priv); struct mutex lock; /* RX Queue */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 11fb7c777d89..e79b0d7b388a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -3182,17 +3182,22 @@ dma_map_err: static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb) { - struct ethhdr *ehdr; + struct vlan_ethhdr *veth; + __be16 vlan_proto; u16 vlanid; - if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) == - NETIF_F_HW_VLAN_CTAG_RX && - !__vlan_get_tag(skb, &vlanid)) { + veth = (struct vlan_ethhdr *)skb->data; + vlan_proto = veth->h_vlan_proto; + + if ((vlan_proto == htons(ETH_P_8021Q) && + dev->features & NETIF_F_HW_VLAN_CTAG_RX) || + (vlan_proto == htons(ETH_P_8021AD) && + dev->features & NETIF_F_HW_VLAN_STAG_RX)) { /* pop the vlan tag */ - ehdr = (struct ethhdr *)skb->data; - memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2); + vlanid = ntohs(veth->h_vlan_TCI); + memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2); skb_pull(skb, VLAN_HLEN); - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid); + __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid); } } @@ -4130,6 +4135,13 @@ static int stmmac_hw_init(struct stmmac_priv *priv) if (priv->dma_cap.tsoen) dev_info(priv->device, "TSO supported\n"); + /* Run HW quirks, if any */ + if (priv->hwif_quirks) { + ret = priv->hwif_quirks(priv); + if (ret) + return ret; + } + return 0; } @@ -4235,7 +4247,7 @@ int stmmac_dvr_probe(struct device *device, ndev->watchdog_timeo = msecs_to_jiffies(watchdog); #ifdef STMMAC_VLAN_TAG_USED /* Both mac100 and gmac support receive VLAN tag detection */ - ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; + ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX; #endif priv->msg_enable = netif_msg_init(debug, default_msg_level); diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 69e31ceccfae..2a0c06e0f730 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -123,7 +123,6 @@ * @phy_node: pointer to the PHY device node * @mii_bus: pointer to the MII bus * @last_link: last link status - * @has_mdio: indicates whether MDIO is included in the HW */ struct net_local { @@ -144,7 +143,6 @@ struct net_local { struct mii_bus *mii_bus; int last_link; - bool has_mdio; }; @@ -863,14 +861,14 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev) bus->write = xemaclite_mdio_write; bus->parent = dev; - lp->mii_bus = bus; - rc = of_mdiobus_register(bus, np); if (rc) { dev_err(dev, "Failed to register mdio bus.\n"); goto err_register; } + lp->mii_bus = bus; + return 0; err_register: @@ -1145,9 +1143,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev) xemaclite_update_address(lp, ndev->dev_addr); lp->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0); - rc = xemaclite_mdio_setup(lp, &ofdev->dev); - if (rc) - dev_warn(&ofdev->dev, "error registering MDIO bus\n"); + xemaclite_mdio_setup(lp, &ofdev->dev); dev_info(dev, "MAC address is now %pM\n", ndev->dev_addr); @@ -1191,7 +1187,7 @@ static int xemaclite_of_remove(struct platform_device *of_dev) struct net_local *lp = netdev_priv(ndev); /* Un-register the mii_bus, if configured */ - if (lp->has_mdio) { + if (lp->mii_bus) { mdiobus_unregister(lp->mii_bus); mdiobus_free(lp->mii_bus); lp->mii_bus = NULL; diff --git a/drivers/net/hyperv/Kconfig b/drivers/net/hyperv/Kconfig index 23a2d145813a..0765d5f61714 100644 --- a/drivers/net/hyperv/Kconfig +++ b/drivers/net/hyperv/Kconfig @@ -2,6 +2,5 @@ config HYPERV_NET tristate "Microsoft Hyper-V virtual network driver" depends on HYPERV select UCS2_STRING - select FAILOVER help Select this option to enable the Hyper-V virtual network driver. diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 23304aca25f9..1a924b867b07 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -901,6 +901,8 @@ struct net_device_context { struct hv_device *device_ctx; /* netvsc_device */ struct netvsc_device __rcu *nvdev; + /* list of netvsc net_devices */ + struct list_head list; /* reconfigure work */ struct delayed_work dwork; /* last reconfig time */ @@ -931,8 +933,6 @@ struct net_device_context { u32 vf_alloc; /* Serial number of the VF to team with */ u32 vf_serial; - - struct failover *failover; }; /* Per channel data */ @@ -1277,17 +1277,17 @@ struct ndis_lsov2_offload { struct ndis_ipsecv2_offload { u32 encap; - u16 ip6; - u16 ip4opt; - u16 ip6ext; - u16 ah; - u16 esp; - u16 ah_esp; - u16 xport; - u16 tun; - u16 xport_tun; - u16 lso; - u16 extseq; + u8 ip6; + u8 ip4opt; + u8 ip6ext; + u8 ah; + u8 esp; + u8 ah_esp; + u8 xport; + u8 tun; + u8 xport_tun; + u8 lso; + u8 extseq; u32 udp_esp; u32 auth; u32 crypto; @@ -1295,8 +1295,8 @@ struct ndis_ipsecv2_offload { }; struct ndis_rsc_offload { - u16 ip4; - u16 ip6; + u8 ip4; + u8 ip6; }; struct ndis_encap_offload { diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 7b18a8c267c2..fe2256bf1d13 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -42,7 +42,6 @@ #include <net/pkt_sched.h> #include <net/checksum.h> #include <net/ip6_checksum.h> -#include <net/failover.h> #include "hyperv_net.h" @@ -68,6 +67,8 @@ static int debug = -1; module_param(debug, int, 0444); MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); +static LIST_HEAD(netvsc_dev_list); + static void netvsc_change_rx_flags(struct net_device *net, int change) { struct net_device_context *ndev_ctx = netdev_priv(net); @@ -1780,6 +1781,36 @@ out_unlock: rtnl_unlock(); } +static struct net_device *get_netvsc_bymac(const u8 *mac) +{ + struct net_device_context *ndev_ctx; + + list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) { + struct net_device *dev = hv_get_drvdata(ndev_ctx->device_ctx); + + if (ether_addr_equal(mac, dev->perm_addr)) + return dev; + } + + return NULL; +} + +static struct net_device *get_netvsc_byref(struct net_device *vf_netdev) +{ + struct net_device_context *net_device_ctx; + struct net_device *dev; + + dev = netdev_master_upper_dev_get(vf_netdev); + if (!dev || dev->netdev_ops != &device_ops) + return NULL; /* not a netvsc device */ + + net_device_ctx = netdev_priv(dev); + if (!rtnl_dereference(net_device_ctx->nvdev)) + return NULL; /* device is removed */ + + return dev; +} + /* Called when VF is injecting data into network stack. * Change the associated network device from VF to netvsc. * note: already called with rcu_read_lock @@ -1802,6 +1833,46 @@ static rx_handler_result_t netvsc_vf_handle_frame(struct sk_buff **pskb) return RX_HANDLER_ANOTHER; } +static int netvsc_vf_join(struct net_device *vf_netdev, + struct net_device *ndev) +{ + struct net_device_context *ndev_ctx = netdev_priv(ndev); + int ret; + + ret = netdev_rx_handler_register(vf_netdev, + netvsc_vf_handle_frame, ndev); + if (ret != 0) { + netdev_err(vf_netdev, + "can not register netvsc VF receive handler (err = %d)\n", + ret); + goto rx_handler_failed; + } + + ret = netdev_master_upper_dev_link(vf_netdev, ndev, + NULL, NULL, NULL); + if (ret != 0) { + netdev_err(vf_netdev, + "can not set master device %s (err = %d)\n", + ndev->name, ret); + goto upper_link_failed; + } + + /* set slave flag before open to prevent IPv6 addrconf */ + vf_netdev->flags |= IFF_SLAVE; + + schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT); + + call_netdevice_notifiers(NETDEV_JOIN, vf_netdev); + + netdev_info(vf_netdev, "joined to %s\n", ndev->name); + return 0; + +upper_link_failed: + netdev_rx_handler_unregister(vf_netdev); +rx_handler_failed: + return ret; +} + static void __netvsc_vf_setup(struct net_device *ndev, struct net_device *vf_netdev) { @@ -1852,95 +1923,104 @@ static void netvsc_vf_setup(struct work_struct *w) rtnl_unlock(); } -static int netvsc_pre_register_vf(struct net_device *vf_netdev, - struct net_device *ndev) +static int netvsc_register_vf(struct net_device *vf_netdev) { + struct net_device *ndev; struct net_device_context *net_device_ctx; struct netvsc_device *netvsc_dev; + int ret; + + if (vf_netdev->addr_len != ETH_ALEN) + return NOTIFY_DONE; + + /* + * We will use the MAC address to locate the synthetic interface to + * associate with the VF interface. If we don't find a matching + * synthetic interface, move on. + */ + ndev = get_netvsc_bymac(vf_netdev->perm_addr); + if (!ndev) + return NOTIFY_DONE; net_device_ctx = netdev_priv(ndev); netvsc_dev = rtnl_dereference(net_device_ctx->nvdev); if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev)) - return -ENODEV; - - return 0; -} + return NOTIFY_DONE; -static int netvsc_register_vf(struct net_device *vf_netdev, - struct net_device *ndev) -{ - struct net_device_context *ndev_ctx = netdev_priv(ndev); - - /* set slave flag before open to prevent IPv6 addrconf */ - vf_netdev->flags |= IFF_SLAVE; - - schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT); + /* if syntihetic interface is a different namespace, + * then move the VF to that namespace; join will be + * done again in that context. + */ + if (!net_eq(dev_net(ndev), dev_net(vf_netdev))) { + ret = dev_change_net_namespace(vf_netdev, + dev_net(ndev), "eth%d"); + if (ret) + netdev_err(vf_netdev, + "could not move to same namespace as %s: %d\n", + ndev->name, ret); + else + netdev_info(vf_netdev, + "VF moved to namespace with: %s\n", + ndev->name); + return NOTIFY_DONE; + } - call_netdevice_notifiers(NETDEV_JOIN, vf_netdev); + netdev_info(ndev, "VF registering: %s\n", vf_netdev->name); - netdev_info(vf_netdev, "joined to %s\n", ndev->name); + if (netvsc_vf_join(vf_netdev, ndev) != 0) + return NOTIFY_DONE; dev_hold(vf_netdev); - rcu_assign_pointer(ndev_ctx->vf_netdev, vf_netdev); - - return 0; + rcu_assign_pointer(net_device_ctx->vf_netdev, vf_netdev); + return NOTIFY_OK; } /* VF up/down change detected, schedule to change data path */ -static int netvsc_vf_changed(struct net_device *vf_netdev, - struct net_device *ndev) +static int netvsc_vf_changed(struct net_device *vf_netdev) { struct net_device_context *net_device_ctx; struct netvsc_device *netvsc_dev; + struct net_device *ndev; bool vf_is_up = netif_running(vf_netdev); + ndev = get_netvsc_byref(vf_netdev); + if (!ndev) + return NOTIFY_DONE; + net_device_ctx = netdev_priv(ndev); netvsc_dev = rtnl_dereference(net_device_ctx->nvdev); if (!netvsc_dev) - return -ENODEV; + return NOTIFY_DONE; netvsc_switch_datapath(ndev, vf_is_up); netdev_info(ndev, "Data path switched %s VF: %s\n", vf_is_up ? "to" : "from", vf_netdev->name); - return 0; + return NOTIFY_OK; } -static int netvsc_pre_unregister_vf(struct net_device *vf_netdev, - struct net_device *ndev) +static int netvsc_unregister_vf(struct net_device *vf_netdev) { + struct net_device *ndev; struct net_device_context *net_device_ctx; - net_device_ctx = netdev_priv(ndev); - cancel_delayed_work_sync(&net_device_ctx->vf_takeover); - - return 0; -} - -static int netvsc_unregister_vf(struct net_device *vf_netdev, - struct net_device *ndev) -{ - struct net_device_context *net_device_ctx; + ndev = get_netvsc_byref(vf_netdev); + if (!ndev) + return NOTIFY_DONE; net_device_ctx = netdev_priv(ndev); + cancel_delayed_work_sync(&net_device_ctx->vf_takeover); netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name); + netdev_rx_handler_unregister(vf_netdev); + netdev_upper_dev_unlink(vf_netdev, ndev); RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL); dev_put(vf_netdev); - return 0; + return NOTIFY_OK; } -static struct failover_ops netvsc_failover_ops = { - .slave_pre_register = netvsc_pre_register_vf, - .slave_register = netvsc_register_vf, - .slave_pre_unregister = netvsc_pre_unregister_vf, - .slave_unregister = netvsc_unregister_vf, - .slave_link_change = netvsc_vf_changed, - .slave_handle_frame = netvsc_vf_handle_frame, -}; - static int netvsc_probe(struct hv_device *dev, const struct hv_vmbus_device_id *dev_id) { @@ -2024,23 +2104,19 @@ static int netvsc_probe(struct hv_device *dev, else net->max_mtu = ETH_DATA_LEN; - ret = register_netdev(net); + rtnl_lock(); + ret = register_netdevice(net); if (ret != 0) { pr_err("Unable to register netdev.\n"); goto register_failed; } - net_device_ctx->failover = failover_register(net, &netvsc_failover_ops); - if (IS_ERR(net_device_ctx->failover)) { - ret = PTR_ERR(net_device_ctx->failover); - goto err_failover; - } - - return ret; + list_add(&net_device_ctx->list, &netvsc_dev_list); + rtnl_unlock(); + return 0; -err_failover: - unregister_netdev(net); register_failed: + rtnl_unlock(); rndis_filter_device_remove(dev, nvdev); rndis_failed: free_percpu(net_device_ctx->vf_stats); @@ -2080,14 +2156,13 @@ static int netvsc_remove(struct hv_device *dev) rtnl_lock(); vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); if (vf_netdev) - failover_slave_unregister(vf_netdev); + netvsc_unregister_vf(vf_netdev); if (nvdev) rndis_filter_device_remove(dev, nvdev); unregister_netdevice(net); - - failover_unregister(ndev_ctx->failover); + list_del(&ndev_ctx->list); rtnl_unlock(); rcu_read_unlock(); @@ -2115,8 +2190,54 @@ static struct hv_driver netvsc_drv = { .remove = netvsc_remove, }; +/* + * On Hyper-V, every VF interface is matched with a corresponding + * synthetic interface. The synthetic interface is presented first + * to the guest. When the corresponding VF instance is registered, + * we will take care of switching the data path. + */ +static int netvsc_netdev_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); + + /* Skip our own events */ + if (event_dev->netdev_ops == &device_ops) + return NOTIFY_DONE; + + /* Avoid non-Ethernet type devices */ + if (event_dev->type != ARPHRD_ETHER) + return NOTIFY_DONE; + + /* Avoid Vlan dev with same MAC registering as VF */ + if (is_vlan_dev(event_dev)) + return NOTIFY_DONE; + + /* Avoid Bonding master dev with same MAC registering as VF */ + if ((event_dev->priv_flags & IFF_BONDING) && + (event_dev->flags & IFF_MASTER)) + return NOTIFY_DONE; + + switch (event) { + case NETDEV_REGISTER: + return netvsc_register_vf(event_dev); + case NETDEV_UNREGISTER: + return netvsc_unregister_vf(event_dev); + case NETDEV_UP: + case NETDEV_DOWN: + return netvsc_vf_changed(event_dev); + default: + return NOTIFY_DONE; + } +} + +static struct notifier_block netvsc_netdev_notifier = { + .notifier_call = netvsc_netdev_event, +}; + static void __exit netvsc_drv_exit(void) { + unregister_netdevice_notifier(&netvsc_netdev_notifier); vmbus_driver_unregister(&netvsc_drv); } @@ -2135,6 +2256,7 @@ static int __init netvsc_drv_init(void) if (ret) return ret; + register_netdevice_notifier(&netvsc_netdev_notifier); return 0; } diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index 4e4c8daf44c3..33265747bf39 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c @@ -26,10 +26,7 @@ #include <linux/platform_device.h> #include <linux/mdio-bitbang.h> #include <linux/mdio-gpio.h> -#include <linux/gpio.h> #include <linux/gpio/consumer.h> - -#include <linux/of_gpio.h> #include <linux/of_mdio.h> struct mdio_gpio_info { diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 9825bfd42abc..18e819d964f1 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -3572,11 +3572,14 @@ static int __init init_mac80211_hwsim(void) hwsim_wq = alloc_workqueue("hwsim_wq", 0, 0); if (!hwsim_wq) return -ENOMEM; - rhashtable_init(&hwsim_radios_rht, &hwsim_rht_params); + + err = rhashtable_init(&hwsim_radios_rht, &hwsim_rht_params); + if (err) + goto out_free_wq; err = register_pernet_device(&hwsim_net_ops); if (err) - return err; + goto out_free_rht; err = platform_driver_register(&mac80211_hwsim_driver); if (err) @@ -3701,6 +3704,10 @@ out_unregister_driver: platform_driver_unregister(&mac80211_hwsim_driver); out_unregister_pernet: unregister_pernet_device(&hwsim_net_ops); +out_free_rht: + rhashtable_destroy(&hwsim_radios_rht); +out_free_wq: + destroy_workqueue(hwsim_wq); return err; } module_init(init_mac80211_hwsim); diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 679da1abd73c..922ce0abf5cf 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -239,7 +239,7 @@ static void rx_refill_timeout(struct timer_list *t) static int netfront_tx_slot_available(struct netfront_queue *queue) { return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) < - (NET_TX_RING_SIZE - MAX_SKB_FRAGS - 2); + (NET_TX_RING_SIZE - XEN_NETIF_NR_SLOTS_MIN - 1); } static void xennet_maybe_wake_tx(struct netfront_queue *queue) @@ -790,7 +790,7 @@ static int xennet_get_responses(struct netfront_queue *queue, RING_IDX cons = queue->rx.rsp_cons; struct sk_buff *skb = xennet_get_rx_skb(queue, cons); grant_ref_t ref = xennet_get_rx_ref(queue, cons); - int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD); + int max = XEN_NETIF_NR_SLOTS_MIN + (rx->status <= RX_COPY_THRESHOLD); int slots = 1; int err = 0; unsigned long ret; |