summaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/bonding/bond_3ad.c7
-rw-r--r--drivers/net/bonding/bond_main.c15
-rw-r--r--drivers/net/bonding/bond_sysfs.c46
-rw-r--r--drivers/net/bonding/bonding.h7
-rw-r--r--drivers/net/caif/caif_hsi.c427
-rw-r--r--drivers/net/can/at91_can.c2
-rw-r--r--drivers/net/can/mscan/mscan.c11
-rw-r--r--drivers/net/can/sja1000/sja1000.c2
-rw-r--r--drivers/net/can/sja1000/sja1000.h2
-rw-r--r--drivers/net/can/sja1000/sja1000_of_platform.c2
-rw-r--r--drivers/net/can/slcan.c2
-rw-r--r--drivers/net/can/vcan.c2
-rw-r--r--drivers/net/ethernet/3com/3c59x.c6
-rw-r--r--drivers/net/ethernet/3com/typhoon.c6
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c8
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c8
-rw-r--r--drivers/net/ethernet/alteon/acenic.c10
-rw-r--r--drivers/net/ethernet/apple/Kconfig22
-rw-r--r--drivers/net/ethernet/apple/Makefile1
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c6
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.h12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h18
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c16
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c41
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h1
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c6
-rw-r--r--drivers/net/ethernet/cadence/at91_ether.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c49
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/adapter.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c94
-rw-r--r--drivers/net/ethernet/cirrus/Kconfig22
-rw-r--r--drivers/net/ethernet/cirrus/Makefile1
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c (renamed from drivers/net/ethernet/apple/cs89x0.c)0
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.h (renamed from drivers/net/ethernet/apple/cs89x0.h)0
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c12
-rw-r--r--drivers/net/ethernet/dec/tulip/21142.c3
-rw-r--r--drivers/net/ethernet/dec/tulip/eeprom.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/interrupt.c3
-rw-r--r--drivers/net/ethernet/dec/tulip/media.c3
-rw-r--r--drivers/net/ethernet/dec/tulip/pnic.c3
-rw-r--r--drivers/net/ethernet/dec/tulip/pnic2.c3
-rw-r--r--drivers/net/ethernet/dec/tulip/timer.c3
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip.h3
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c3
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c66
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h10
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c370
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea.h20
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_ethtool.c33
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_hw.h25
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c478
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_qmr.h2
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c6
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c8
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c63
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.h1
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.c2
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c312
-rw-r--r--drivers/net/ethernet/intel/igbvf/mbx.c2
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c24
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c84
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c11
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.c2
-rw-r--r--drivers/net/ethernet/jme.c10
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c9
-rw-r--r--drivers/net/ethernet/marvell/skge.c8
-rw-r--r--drivers/net/ethernet/marvell/sky2.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c74
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c15
-rw-r--r--drivers/net/ethernet/micrel/ks8695net.c2
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c2
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c17
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c4
-rw-r--r--drivers/net/ethernet/neterion/s2io.c12
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c12
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c18
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c8
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c6
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c6
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169.c94
-rw-r--r--drivers/net/ethernet/sfc/rx.c2
-rw-r--r--drivers/net/ethernet/sfc/tx.c8
-rw-r--r--drivers/net/ethernet/smsc/Kconfig3
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig18
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/chain_mode.c137
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h43
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs_com.h126
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c126
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c24
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c220
-rw-r--r--drivers/net/ethernet/sun/cassini.c8
-rw-r--r--drivers/net/ethernet/sun/niu.c6
-rw-r--r--drivers/net/ethernet/sun/sungem.c4
-rw-r--r--drivers/net/ethernet/sun/sunhme.c4
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c6
-rw-r--r--drivers/net/ethernet/tile/tilepro.c4
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c6
-rw-r--r--drivers/net/ethernet/via/via-velocity.c6
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c31
-rw-r--r--drivers/net/irda/Kconfig2
-rw-r--r--drivers/net/macvlan.c3
-rw-r--r--drivers/net/macvtap.c171
-rw-r--r--drivers/net/netconsole.c5
-rw-r--r--drivers/net/phy/broadcom.c2
-rw-r--r--drivers/net/phy/dp83640.c11
-rw-r--r--drivers/net/phy/icplus.c13
-rw-r--r--drivers/net/phy/mdio-gpio.c2
-rw-r--r--drivers/net/phy/vitesse.c34
-rw-r--r--drivers/net/ppp/pptp.c22
-rw-r--r--drivers/net/usb/asix.c3
-rw-r--r--drivers/net/usb/usbnet.c2
-rw-r--r--drivers/net/virtio_net.c28
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c12
-rw-r--r--drivers/net/wimax/i2400m/usb.c4
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c4
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_tx.c2
-rw-r--r--drivers/net/wireless/libertas_tf/deb_defs.h2
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c6
-rw-r--r--drivers/net/xen-netback/netback.c4
-rw-r--r--drivers/net/xen-netfront.c4
150 files changed, 2399 insertions, 1666 deletions
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 47b928ed08f8..b33c099d65a4 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -1135,13 +1135,6 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
__record_pdu(lacpdu, port);
port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(port->actor_oper_port_state & AD_STATE_LACP_TIMEOUT));
port->actor_oper_port_state &= ~AD_STATE_EXPIRED;
- // verify that if the aggregator is enabled, the port is enabled too.
- //(because if the link goes down for a short time, the 802.3ad will not
- // catch it, and the port will continue to be disabled)
- if (port->aggregator
- && port->aggregator->is_active
- && !__port_is_enabled(port))
- __enable_port(port);
break;
default: //to silence the compiler
break;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 6191e6337284..c5944f1a4f9d 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -395,7 +395,6 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
struct net_device *slave_dev)
{
skb->dev = slave_dev;
- skb->priority = 1;
skb->queue_mapping = bond_queue_mapping(skb);
@@ -1432,6 +1431,8 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
struct sk_buff *skb = *pskb;
struct slave *slave;
struct bonding *bond;
+ void (*recv_probe)(struct sk_buff *, struct bonding *,
+ struct slave *);
skb = skb_share_check(skb, GFP_ATOMIC);
if (unlikely(!skb))
@@ -1445,11 +1446,12 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
if (bond->params.arp_interval)
slave->dev->last_rx = jiffies;
- if (bond->recv_probe) {
+ recv_probe = ACCESS_ONCE(bond->recv_probe);
+ if (recv_probe) {
struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
if (likely(nskb)) {
- bond->recv_probe(nskb, bond, slave);
+ recv_probe(nskb, bond, slave);
dev_kfree_skb(nskb);
}
}
@@ -4888,6 +4890,7 @@ static int __net_init bond_net_init(struct net *net)
INIT_LIST_HEAD(&bn->dev_list);
bond_create_proc_dir(bn);
+ bond_create_sysfs(bn);
return 0;
}
@@ -4896,6 +4899,7 @@ static void __net_exit bond_net_exit(struct net *net)
{
struct bond_net *bn = net_generic(net, bond_net_id);
+ bond_destroy_sysfs(bn);
bond_destroy_proc_dir(bn);
}
@@ -4933,10 +4937,6 @@ static int __init bonding_init(void)
goto err;
}
- res = bond_create_sysfs();
- if (res)
- goto err;
-
register_netdevice_notifier(&bond_netdev_notifier);
register_inetaddr_notifier(&bond_inetaddr_notifier);
out:
@@ -4954,7 +4954,6 @@ static void __exit bonding_exit(void)
unregister_netdevice_notifier(&bond_netdev_notifier);
unregister_inetaddr_notifier(&bond_inetaddr_notifier);
- bond_destroy_sysfs();
bond_destroy_debugfs();
rtnl_link_unregister(&bond_link_ops);
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 2dfb4bf90087..5a20804fdece 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -55,8 +55,8 @@ static ssize_t bonding_show_bonds(struct class *cls,
struct class_attribute *attr,
char *buf)
{
- struct net *net = current->nsproxy->net_ns;
- struct bond_net *bn = net_generic(net, bond_net_id);
+ struct bond_net *bn =
+ container_of(attr, struct bond_net, class_attr_bonding_masters);
int res = 0;
struct bonding *bond;
@@ -79,9 +79,8 @@ static ssize_t bonding_show_bonds(struct class *cls,
return res;
}
-static struct net_device *bond_get_by_name(struct net *net, const char *ifname)
+static struct net_device *bond_get_by_name(struct bond_net *bn, const char *ifname)
{
- struct bond_net *bn = net_generic(net, bond_net_id);
struct bonding *bond;
list_for_each_entry(bond, &bn->dev_list, bond_list) {
@@ -103,7 +102,8 @@ static ssize_t bonding_store_bonds(struct class *cls,
struct class_attribute *attr,
const char *buffer, size_t count)
{
- struct net *net = current->nsproxy->net_ns;
+ struct bond_net *bn =
+ container_of(attr, struct bond_net, class_attr_bonding_masters);
char command[IFNAMSIZ + 1] = {0, };
char *ifname;
int rv, res = count;
@@ -116,7 +116,7 @@ static ssize_t bonding_store_bonds(struct class *cls,
if (command[0] == '+') {
pr_info("%s is being created...\n", ifname);
- rv = bond_create(net, ifname);
+ rv = bond_create(bn->net, ifname);
if (rv) {
if (rv == -EEXIST)
pr_info("%s already exists.\n", ifname);
@@ -128,7 +128,7 @@ static ssize_t bonding_store_bonds(struct class *cls,
struct net_device *bond_dev;
rtnl_lock();
- bond_dev = bond_get_by_name(net, ifname);
+ bond_dev = bond_get_by_name(bn, ifname);
if (bond_dev) {
pr_info("%s is being deleted...\n", ifname);
unregister_netdevice(bond_dev);
@@ -150,9 +150,24 @@ err_no_cmd:
return -EPERM;
}
+static const void *bonding_namespace(struct class *cls,
+ const struct class_attribute *attr)
+{
+ const struct bond_net *bn =
+ container_of(attr, struct bond_net, class_attr_bonding_masters);
+ return bn->net;
+}
+
/* class attribute for bond_masters file. This ends up in /sys/class/net */
-static CLASS_ATTR(bonding_masters, S_IWUSR | S_IRUGO,
- bonding_show_bonds, bonding_store_bonds);
+static const struct class_attribute class_attr_bonding_masters = {
+ .attr = {
+ .name = "bonding_masters",
+ .mode = S_IWUSR | S_IRUGO,
+ },
+ .show = bonding_show_bonds,
+ .store = bonding_store_bonds,
+ .namespace = bonding_namespace,
+};
int bond_create_slave_symlinks(struct net_device *master,
struct net_device *slave)
@@ -1655,11 +1670,14 @@ static struct attribute_group bonding_group = {
* Initialize sysfs. This sets up the bonding_masters file in
* /sys/class/net.
*/
-int bond_create_sysfs(void)
+int bond_create_sysfs(struct bond_net *bn)
{
int ret;
- ret = netdev_class_create_file(&class_attr_bonding_masters);
+ bn->class_attr_bonding_masters = class_attr_bonding_masters;
+ sysfs_attr_init(&bn->class_attr_bonding_masters.attr);
+
+ ret = netdev_class_create_file(&bn->class_attr_bonding_masters);
/*
* Permit multiple loads of the module by ignoring failures to
* create the bonding_masters sysfs file. Bonding devices
@@ -1673,7 +1691,7 @@ int bond_create_sysfs(void)
*/
if (ret == -EEXIST) {
/* Is someone being kinky and naming a device bonding_master? */
- if (__dev_get_by_name(&init_net,
+ if (__dev_get_by_name(bn->net,
class_attr_bonding_masters.attr.name))
pr_err("network device named %s already exists in sysfs",
class_attr_bonding_masters.attr.name);
@@ -1687,9 +1705,9 @@ int bond_create_sysfs(void)
/*
* Remove /sys/class/net/bonding_masters.
*/
-void bond_destroy_sysfs(void)
+void bond_destroy_sysfs(struct bond_net *bn)
{
- netdev_class_remove_file(&class_attr_bonding_masters);
+ netdev_class_remove_file(&bn->class_attr_bonding_masters);
}
/*
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index e82336615600..82fec5fc75d7 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -379,11 +379,13 @@ static inline bool bond_is_slave_inactive(struct slave *slave)
return slave->inactive;
}
+struct bond_net;
+
struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr);
int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
int bond_create(struct net *net, const char *name);
-int bond_create_sysfs(void);
-void bond_destroy_sysfs(void);
+int bond_create_sysfs(struct bond_net *net);
+void bond_destroy_sysfs(struct bond_net *net);
void bond_prepare_sysfs_group(struct bonding *bond);
int bond_create_slave_symlinks(struct net_device *master, struct net_device *slave);
void bond_destroy_slave_symlinks(struct net_device *master, struct net_device *slave);
@@ -409,6 +411,7 @@ struct bond_net {
#ifdef CONFIG_PROC_FS
struct proc_dir_entry * proc_dir;
#endif
+ struct class_attribute class_attr_bonding_masters;
};
#ifdef CONFIG_PROC_FS
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index 2fcabba56087..073352517adc 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -18,6 +18,7 @@
#include <linux/sched.h>
#include <linux/if_arp.h>
#include <linux/timer.h>
+#include <linux/rtnetlink.h>
#include <net/caif/caif_layer.h>
#include <net/caif/caif_hsi.h>
@@ -29,6 +30,10 @@ MODULE_DESCRIPTION("CAIF HSI driver");
#define PAD_POW2(x, pow) ((((x)&((pow)-1)) == 0) ? 0 :\
(((pow)-((x)&((pow)-1)))))
+static int inactivity_timeout = 1000;
+module_param(inactivity_timeout, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(inactivity_timeout, "Inactivity timeout on HSI, ms.");
+
/*
* HSI padding options.
* Warning: must be a base of 2 (& operation used) and can not be zero !
@@ -98,7 +103,8 @@ static void cfhsi_abort_tx(struct cfhsi *cfhsi)
}
cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
- mod_timer(&cfhsi->timer, jiffies + CFHSI_INACTIVITY_TOUT);
+ mod_timer(&cfhsi->timer,
+ jiffies + cfhsi->inactivity_timeout);
spin_unlock_bh(&cfhsi->lock);
}
@@ -145,7 +151,7 @@ static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
}
ret = 5 * HZ;
- wait_event_interruptible_timeout(cfhsi->flush_fifo_wait,
+ ret = wait_event_interruptible_timeout(cfhsi->flush_fifo_wait,
!test_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits), ret);
if (ret < 0) {
@@ -178,6 +184,9 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
if (!skb)
return 0;
+ /* Clear offset. */
+ desc->offset = 0;
+
/* Check if we can embed a CAIF frame. */
if (skb->len < CFHSI_MAX_EMB_FRM_SZ) {
struct caif_payload_info *info;
@@ -206,9 +215,7 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
consume_skb(skb);
skb = NULL;
}
- } else
- /* Clear offset. */
- desc->offset = 0;
+ }
/* Create payload CAIF frames. */
pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
@@ -271,16 +278,13 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
return CFHSI_DESC_SZ + pld_len;
}
-static void cfhsi_tx_done_work(struct work_struct *work)
+static void cfhsi_tx_done(struct cfhsi *cfhsi)
{
- struct cfhsi *cfhsi = NULL;
struct cfhsi_desc *desc = NULL;
int len = 0;
int res;
- cfhsi = container_of(work, struct cfhsi, tx_done_work);
- dev_dbg(&cfhsi->ndev->dev, "%s.\n",
- __func__);
+ dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__);
if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
return;
@@ -303,14 +307,22 @@ static void cfhsi_tx_done_work(struct work_struct *work)
spin_unlock_bh(&cfhsi->lock);
/* Create HSI frame. */
- len = cfhsi_tx_frm(desc, cfhsi);
- if (!len) {
- cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
- /* Start inactivity timer. */
- mod_timer(&cfhsi->timer,
- jiffies + CFHSI_INACTIVITY_TOUT);
- break;
- }
+ do {
+ len = cfhsi_tx_frm(desc, cfhsi);
+ if (!len) {
+ spin_lock_bh(&cfhsi->lock);
+ if (unlikely(skb_peek(&cfhsi->qhead))) {
+ spin_unlock_bh(&cfhsi->lock);
+ continue;
+ }
+ cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
+ /* Start inactivity timer. */
+ mod_timer(&cfhsi->timer,
+ jiffies + cfhsi->inactivity_timeout);
+ spin_unlock_bh(&cfhsi->lock);
+ goto done;
+ }
+ } while (!len);
/* Set up new transfer. */
res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
@@ -319,6 +331,9 @@ static void cfhsi_tx_done_work(struct work_struct *work)
__func__, res);
}
} while (res < 0);
+
+done:
+ return;
}
static void cfhsi_tx_done_cb(struct cfhsi_drv *drv)
@@ -331,8 +346,7 @@ static void cfhsi_tx_done_cb(struct cfhsi_drv *drv)
if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
return;
-
- queue_work(cfhsi->wq, &cfhsi->tx_done_work);
+ cfhsi_tx_done(cfhsi);
}
static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
@@ -346,14 +360,14 @@ static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
(desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
dev_err(&cfhsi->ndev->dev, "%s: Invalid descriptor.\n",
__func__);
- return 0;
+ return -EPROTO;
}
/* Check for embedded CAIF frame. */
if (desc->offset) {
struct sk_buff *skb;
u8 *dst = NULL;
- int len = 0, retries = 0;
+ int len = 0;
pfrm = ((u8 *)desc) + desc->offset;
/* Remove offset padding. */
@@ -364,26 +378,19 @@ static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
len |= ((*(pfrm+1)) << 8) & 0xFF00;
len += 2; /* Add FCS fields. */
+ /* Sanity check length of CAIF frame. */
+ if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
+ dev_err(&cfhsi->ndev->dev, "%s: Invalid length.\n",
+ __func__);
+ return -EPROTO;
+ }
/* Allocate SKB (OK even in IRQ context). */
- skb = alloc_skb(len + 1, GFP_KERNEL);
- while (!skb) {
- retries++;
- schedule_timeout(1);
- skb = alloc_skb(len + 1, GFP_KERNEL);
- if (skb) {
- printk(KERN_WARNING "%s: slept for %u "
- "before getting memory\n",
- __func__, retries);
- break;
- }
- if (retries > HZ) {
- printk(KERN_ERR "%s: slept for 1HZ and "
- "did not get memory\n",
- __func__);
- cfhsi->ndev->stats.rx_dropped++;
- goto drop_frame;
- }
+ skb = alloc_skb(len + 1, GFP_ATOMIC);
+ if (!skb) {
+ dev_err(&cfhsi->ndev->dev, "%s: Out of memory !\n",
+ __func__);
+ return -ENOMEM;
}
caif_assert(skb != NULL);
@@ -409,7 +416,6 @@ static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
cfhsi->ndev->stats.rx_bytes += len;
}
-drop_frame:
/* Calculate transfer length. */
plen = desc->cffrm_len;
while (nfrms < CFHSI_MAX_PKTS && *plen) {
@@ -422,13 +428,12 @@ drop_frame:
if (desc->header & CFHSI_PIGGY_DESC)
xfer_sz += CFHSI_DESC_SZ;
- if (xfer_sz % 4) {
+ if ((xfer_sz % 4) || (xfer_sz > (CFHSI_BUF_SZ_RX - CFHSI_DESC_SZ))) {
dev_err(&cfhsi->ndev->dev,
"%s: Invalid payload len: %d, ignored.\n",
__func__, xfer_sz);
- xfer_sz = 0;
+ return -EPROTO;
}
-
return xfer_sz;
}
@@ -444,23 +449,27 @@ static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
(desc->offset > CFHSI_MAX_EMB_FRM_SZ))) {
dev_err(&cfhsi->ndev->dev, "%s: Invalid descriptor.\n",
__func__);
- return -EINVAL;
+ return -EPROTO;
}
/* Set frame pointer to start of payload. */
pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
plen = desc->cffrm_len;
+
+ /* Skip already processed frames. */
+ while (nfrms < cfhsi->rx_state.nfrms) {
+ pfrm += *plen;
+ rx_sz += *plen;
+ plen++;
+ nfrms++;
+ }
+
+ /* Parse payload. */
while (nfrms < CFHSI_MAX_PKTS && *plen) {
struct sk_buff *skb;
u8 *dst = NULL;
u8 *pcffrm = NULL;
- int len = 0, retries = 0;
-
- if (WARN_ON(desc->cffrm_len[nfrms] > CFHSI_MAX_PAYLOAD_SZ)) {
- dev_err(&cfhsi->ndev->dev, "%s: Invalid payload.\n",
- __func__);
- return -EINVAL;
- }
+ int len = 0;
/* CAIF frame starts after head padding. */
pcffrm = pfrm + *pfrm + 1;
@@ -470,25 +479,20 @@ static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
len |= ((*(pcffrm + 1)) << 8) & 0xFF00;
len += 2; /* Add FCS fields. */
+ /* Sanity check length of CAIF frames. */
+ if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
+ dev_err(&cfhsi->ndev->dev, "%s: Invalid length.\n",
+ __func__);
+ return -EPROTO;
+ }
+
/* Allocate SKB (OK even in IRQ context). */
- skb = alloc_skb(len + 1, GFP_KERNEL);
- while (!skb) {
- retries++;
- schedule_timeout(1);
- skb = alloc_skb(len + 1, GFP_KERNEL);
- if (skb) {
- printk(KERN_WARNING "%s: slept for %u "
- "before getting memory\n",
- __func__, retries);
- break;
- }
- if (retries > HZ) {
- printk(KERN_ERR "%s: slept for 1HZ "
- "and did not get memory\n",
- __func__);
- cfhsi->ndev->stats.rx_dropped++;
- goto drop_frame;
- }
+ skb = alloc_skb(len + 1, GFP_ATOMIC);
+ if (!skb) {
+ dev_err(&cfhsi->ndev->dev, "%s: Out of memory !\n",
+ __func__);
+ cfhsi->rx_state.nfrms = nfrms;
+ return -ENOMEM;
}
caif_assert(skb != NULL);
@@ -512,7 +516,6 @@ static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
cfhsi->ndev->stats.rx_packets++;
cfhsi->ndev->stats.rx_bytes += len;
-drop_frame:
pfrm += *plen;
rx_sz += *plen;
plen++;
@@ -522,40 +525,56 @@ drop_frame:
return rx_sz;
}
-static void cfhsi_rx_done_work(struct work_struct *work)
+static void cfhsi_rx_done(struct cfhsi *cfhsi)
{
int res;
int desc_pld_len = 0;
- struct cfhsi *cfhsi = NULL;
struct cfhsi_desc *desc = NULL;
- cfhsi = container_of(work, struct cfhsi, rx_done_work);
desc = (struct cfhsi_desc *)cfhsi->rx_buf;
- dev_dbg(&cfhsi->ndev->dev, "%s: Kick timer if pending.\n",
- __func__);
+ dev_dbg(&cfhsi->ndev->dev, "%s\n", __func__);
if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
return;
/* Update inactivity timer if pending. */
- mod_timer_pending(&cfhsi->timer, jiffies + CFHSI_INACTIVITY_TOUT);
+ spin_lock_bh(&cfhsi->lock);
+ mod_timer_pending(&cfhsi->timer,
+ jiffies + cfhsi->inactivity_timeout);
+ spin_unlock_bh(&cfhsi->lock);
- if (cfhsi->rx_state == CFHSI_RX_STATE_DESC) {
+ if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) {
desc_pld_len = cfhsi_rx_desc(desc, cfhsi);
+ if (desc_pld_len == -ENOMEM)
+ goto restart;
+ if (desc_pld_len == -EPROTO)
+ goto out_of_sync;
} else {
int pld_len;
- pld_len = cfhsi_rx_pld(desc, cfhsi);
+ if (!cfhsi->rx_state.piggy_desc) {
+ pld_len = cfhsi_rx_pld(desc, cfhsi);
+ if (pld_len == -ENOMEM)
+ goto restart;
+ if (pld_len == -EPROTO)
+ goto out_of_sync;
+ cfhsi->rx_state.pld_len = pld_len;
+ } else {
+ pld_len = cfhsi->rx_state.pld_len;
+ }
if ((pld_len > 0) && (desc->header & CFHSI_PIGGY_DESC)) {
struct cfhsi_desc *piggy_desc;
piggy_desc = (struct cfhsi_desc *)
(desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ +
pld_len);
+ cfhsi->rx_state.piggy_desc = true;
/* Extract piggy-backed descriptor. */
desc_pld_len = cfhsi_rx_desc(piggy_desc, cfhsi);
+ if (desc_pld_len == -ENOMEM)
+ goto restart;
/*
* Copy needed information from the piggy-backed
@@ -563,19 +582,22 @@ static void cfhsi_rx_done_work(struct work_struct *work)
*/
memcpy((u8 *)desc, (u8 *)piggy_desc,
CFHSI_DESC_SHORT_SZ);
+
+ if (desc_pld_len == -EPROTO)
+ goto out_of_sync;
}
}
+ memset(&cfhsi->rx_state, 0, sizeof(cfhsi->rx_state));
if (desc_pld_len) {
- cfhsi->rx_state = CFHSI_RX_STATE_PAYLOAD;
+ cfhsi->rx_state.state = CFHSI_RX_STATE_PAYLOAD;
cfhsi->rx_ptr = cfhsi->rx_buf + CFHSI_DESC_SZ;
cfhsi->rx_len = desc_pld_len;
} else {
- cfhsi->rx_state = CFHSI_RX_STATE_DESC;
+ cfhsi->rx_state.state = CFHSI_RX_STATE_DESC;
cfhsi->rx_ptr = cfhsi->rx_buf;
cfhsi->rx_len = CFHSI_DESC_SZ;
}
- clear_bit(CFHSI_PENDING_RX, &cfhsi->bits);
if (test_bit(CFHSI_AWAKE, &cfhsi->bits)) {
/* Set up new transfer. */
@@ -590,6 +612,33 @@ static void cfhsi_rx_done_work(struct work_struct *work)
cfhsi->ndev->stats.rx_dropped++;
}
}
+ return;
+
+restart:
+ if (++cfhsi->rx_state.retries > CFHSI_MAX_RX_RETRIES) {
+ dev_err(&cfhsi->ndev->dev, "%s: No memory available "
+ "in %d iterations.\n",
+ __func__, CFHSI_MAX_RX_RETRIES);
+ BUG();
+ }
+ mod_timer(&cfhsi->rx_slowpath_timer, jiffies + 1);
+ return;
+
+out_of_sync:
+ dev_err(&cfhsi->ndev->dev, "%s: Out of sync.\n", __func__);
+ print_hex_dump_bytes("--> ", DUMP_PREFIX_NONE,
+ cfhsi->rx_buf, CFHSI_DESC_SZ);
+ schedule_work(&cfhsi->out_of_sync_work);
+}
+
+static void cfhsi_rx_slowpath(unsigned long arg)
+{
+ struct cfhsi *cfhsi = (struct cfhsi *)arg;
+
+ dev_dbg(&cfhsi->ndev->dev, "%s.\n",
+ __func__);
+
+ cfhsi_rx_done(cfhsi);
}
static void cfhsi_rx_done_cb(struct cfhsi_drv *drv)
@@ -603,12 +652,10 @@ static void cfhsi_rx_done_cb(struct cfhsi_drv *drv)
if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
return;
- set_bit(CFHSI_PENDING_RX, &cfhsi->bits);
-
if (test_and_clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits))
wake_up_interruptible(&cfhsi->flush_fifo_wait);
else
- queue_work(cfhsi->wq, &cfhsi->rx_done_work);
+ cfhsi_rx_done(cfhsi);
}
static void cfhsi_wake_up(struct work_struct *work)
@@ -627,6 +674,7 @@ static void cfhsi_wake_up(struct work_struct *work)
/* It happenes when wakeup is requested by
* both ends at the same time. */
clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
+ clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
return;
}
@@ -637,25 +685,53 @@ static void cfhsi_wake_up(struct work_struct *work)
__func__);
/* Wait for acknowledge. */
- ret = CFHSI_WAKEUP_TOUT;
- wait_event_interruptible_timeout(cfhsi->wake_up_wait,
- test_bit(CFHSI_WAKE_UP_ACK,
+ ret = CFHSI_WAKE_TOUT;
+ ret = wait_event_interruptible_timeout(cfhsi->wake_up_wait,
+ test_and_clear_bit(CFHSI_WAKE_UP_ACK,
&cfhsi->bits), ret);
if (unlikely(ret < 0)) {
/* Interrupted by signal. */
- dev_info(&cfhsi->ndev->dev, "%s: Signalled: %ld.\n",
+ dev_err(&cfhsi->ndev->dev, "%s: Signalled: %ld.\n",
__func__, ret);
+
clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
return;
} else if (!ret) {
+ bool ca_wake = false;
+ size_t fifo_occupancy = 0;
+
/* Wakeup timeout */
dev_err(&cfhsi->ndev->dev, "%s: Timeout.\n",
__func__);
+
+ /* Check FIFO to check if modem has sent something. */
+ WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
+ &fifo_occupancy));
+
+ dev_err(&cfhsi->ndev->dev, "%s: Bytes in FIFO: %u.\n",
+ __func__, (unsigned) fifo_occupancy);
+
+ /* Check if we misssed the interrupt. */
+ WARN_ON(cfhsi->dev->cfhsi_get_peer_wake(cfhsi->dev,
+ &ca_wake));
+
+ if (ca_wake) {
+ dev_err(&cfhsi->ndev->dev, "%s: CA Wake missed !.\n",
+ __func__);
+
+ /* Clear the CFHSI_WAKE_UP_ACK bit to prevent race. */
+ clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
+
+ /* Continue execution. */
+ goto wake_ack;
+ }
+
clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
return;
}
+wake_ack:
dev_dbg(&cfhsi->ndev->dev, "%s: Woken.\n",
__func__);
@@ -664,16 +740,11 @@ static void cfhsi_wake_up(struct work_struct *work)
clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
/* Resume read operation. */
- if (!test_bit(CFHSI_PENDING_RX, &cfhsi->bits)) {
- dev_dbg(&cfhsi->ndev->dev, "%s: Start RX.\n",
- __func__);
- res = cfhsi->dev->cfhsi_rx(cfhsi->rx_ptr,
- cfhsi->rx_len, cfhsi->dev);
- if (WARN_ON(res < 0)) {
- dev_err(&cfhsi->ndev->dev, "%s: RX error %d.\n",
- __func__, res);
- }
- }
+ dev_dbg(&cfhsi->ndev->dev, "%s: Start RX.\n", __func__);
+ res = cfhsi->dev->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len, cfhsi->dev);
+
+ if (WARN_ON(res < 0))
+ dev_err(&cfhsi->ndev->dev, "%s: RX err %d.\n", __func__, res);
/* Clear power up acknowledment. */
clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
@@ -686,7 +757,7 @@ static void cfhsi_wake_up(struct work_struct *work)
__func__);
/* Start inactivity timer. */
mod_timer(&cfhsi->timer,
- jiffies + CFHSI_INACTIVITY_TOUT);
+ jiffies + cfhsi->inactivity_timeout);
spin_unlock_bh(&cfhsi->lock);
return;
}
@@ -712,79 +783,81 @@ static void cfhsi_wake_up(struct work_struct *work)
"%s: Failed to create HSI frame: %d.\n",
__func__, len);
}
-
}
static void cfhsi_wake_down(struct work_struct *work)
{
long ret;
struct cfhsi *cfhsi = NULL;
- size_t fifo_occupancy;
+ size_t fifo_occupancy = 0;
+ int retry = CFHSI_WAKE_TOUT;
cfhsi = container_of(work, struct cfhsi, wake_down_work);
- dev_dbg(&cfhsi->ndev->dev, "%s.\n",
- __func__);
+ dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__);
if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
return;
- /* Check if there is something in FIFO. */
- if (WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
- &fifo_occupancy)))
- fifo_occupancy = 0;
-
- if (fifo_occupancy) {
- dev_dbg(&cfhsi->ndev->dev,
- "%s: %u words in RX FIFO, restart timer.\n",
- __func__, (unsigned) fifo_occupancy);
- spin_lock_bh(&cfhsi->lock);
- mod_timer(&cfhsi->timer,
- jiffies + CFHSI_INACTIVITY_TOUT);
- spin_unlock_bh(&cfhsi->lock);
- return;
- }
-
- /* Cancel pending RX requests */
- cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev);
-
/* Deactivate wake line. */
cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
/* Wait for acknowledge. */
- ret = CFHSI_WAKEUP_TOUT;
+ ret = CFHSI_WAKE_TOUT;
ret = wait_event_interruptible_timeout(cfhsi->wake_down_wait,
- test_bit(CFHSI_WAKE_DOWN_ACK,
- &cfhsi->bits),
- ret);
+ test_and_clear_bit(CFHSI_WAKE_DOWN_ACK,
+ &cfhsi->bits), ret);
if (ret < 0) {
/* Interrupted by signal. */
- dev_info(&cfhsi->ndev->dev, "%s: Signalled: %ld.\n",
+ dev_err(&cfhsi->ndev->dev, "%s: Signalled: %ld.\n",
__func__, ret);
return;
} else if (!ret) {
+ bool ca_wake = true;
+
/* Timeout */
- dev_err(&cfhsi->ndev->dev, "%s: Timeout.\n",
- __func__);
+ dev_err(&cfhsi->ndev->dev, "%s: Timeout.\n", __func__);
+
+ /* Check if we misssed the interrupt. */
+ WARN_ON(cfhsi->dev->cfhsi_get_peer_wake(cfhsi->dev,
+ &ca_wake));
+ if (!ca_wake)
+ dev_err(&cfhsi->ndev->dev, "%s: CA Wake missed !.\n",
+ __func__);
}
- /* Clear power down acknowledment. */
- clear_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
+ /* Check FIFO occupancy. */
+ while (retry) {
+ WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
+ &fifo_occupancy));
+
+ if (!fifo_occupancy)
+ break;
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(1);
+ retry--;
+ }
+
+ if (!retry)
+ dev_err(&cfhsi->ndev->dev, "%s: FIFO Timeout.\n", __func__);
+
+ /* Clear AWAKE condition. */
clear_bit(CFHSI_AWAKE, &cfhsi->bits);
- /* Check if there is something in FIFO. */
- if (WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
- &fifo_occupancy)))
- fifo_occupancy = 0;
+ /* Cancel pending RX requests. */
+ cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev);
- if (fifo_occupancy) {
- dev_dbg(&cfhsi->ndev->dev,
- "%s: %u words in RX FIFO, wakeup forced.\n",
- __func__, (unsigned) fifo_occupancy);
- if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
- queue_work(cfhsi->wq, &cfhsi->wake_up_work);
- } else
- dev_dbg(&cfhsi->ndev->dev, "%s: Done.\n",
- __func__);
+}
+
+static void cfhsi_out_of_sync(struct work_struct *work)
+{
+ struct cfhsi *cfhsi = NULL;
+
+ cfhsi = container_of(work, struct cfhsi, out_of_sync_work);
+
+ rtnl_lock();
+ dev_close(cfhsi->ndev);
+ rtnl_unlock();
}
static void cfhsi_wake_up_cb(struct cfhsi_drv *drv)
@@ -854,17 +927,15 @@ static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
start_xfer = 1;
}
- spin_unlock_bh(&cfhsi->lock);
-
- if (!start_xfer)
+ if (!start_xfer) {
+ spin_unlock_bh(&cfhsi->lock);
return 0;
+ }
/* Delete inactivity timer if started. */
-#ifdef CONFIG_SMP
timer_active = del_timer_sync(&cfhsi->timer);
-#else
- timer_active = del_timer(&cfhsi->timer);
-#endif /* CONFIG_SMP */
+
+ spin_unlock_bh(&cfhsi->lock);
if (timer_active) {
struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
@@ -946,7 +1017,7 @@ int cfhsi_probe(struct platform_device *pdev)
/* Initialize state vaiables. */
cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
- cfhsi->rx_state = CFHSI_RX_STATE_DESC;
+ cfhsi->rx_state.state = CFHSI_RX_STATE_DESC;
/* Set flow info */
cfhsi->flow_off_sent = 0;
@@ -980,7 +1051,19 @@ int cfhsi_probe(struct platform_device *pdev)
goto err_alloc_rx;
}
- /* Initialize receive variables. */
+ /* Pre-calculate inactivity timeout. */
+ if (inactivity_timeout != -1) {
+ cfhsi->inactivity_timeout =
+ inactivity_timeout * HZ / 1000;
+ if (!cfhsi->inactivity_timeout)
+ cfhsi->inactivity_timeout = 1;
+ else if (cfhsi->inactivity_timeout > NEXT_TIMER_MAX_DELTA)
+ cfhsi->inactivity_timeout = NEXT_TIMER_MAX_DELTA;
+ } else {
+ cfhsi->inactivity_timeout = NEXT_TIMER_MAX_DELTA;
+ }
+
+ /* Initialize recieve vaiables. */
cfhsi->rx_ptr = cfhsi->rx_buf;
cfhsi->rx_len = CFHSI_DESC_SZ;
@@ -990,19 +1073,19 @@ int cfhsi_probe(struct platform_device *pdev)
/* Set up the driver. */
cfhsi->drv.tx_done_cb = cfhsi_tx_done_cb;
cfhsi->drv.rx_done_cb = cfhsi_rx_done_cb;
+ cfhsi->drv.wake_up_cb = cfhsi_wake_up_cb;
+ cfhsi->drv.wake_down_cb = cfhsi_wake_down_cb;
/* Initialize the work queues. */
INIT_WORK(&cfhsi->wake_up_work, cfhsi_wake_up);
INIT_WORK(&cfhsi->wake_down_work, cfhsi_wake_down);
- INIT_WORK(&cfhsi->rx_done_work, cfhsi_rx_done_work);
- INIT_WORK(&cfhsi->tx_done_work, cfhsi_tx_done_work);
+ INIT_WORK(&cfhsi->out_of_sync_work, cfhsi_out_of_sync);
/* Clear all bit fields. */
clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
clear_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
clear_bit(CFHSI_AWAKE, &cfhsi->bits);
- clear_bit(CFHSI_PENDING_RX, &cfhsi->bits);
/* Create work thread. */
cfhsi->wq = create_singlethread_workqueue(pdev->name);
@@ -1022,6 +1105,10 @@ int cfhsi_probe(struct platform_device *pdev)
init_timer(&cfhsi->timer);
cfhsi->timer.data = (unsigned long)cfhsi;
cfhsi->timer.function = cfhsi_inactivity_tout;
+ /* Setup the slowpath RX timer. */
+ init_timer(&cfhsi->rx_slowpath_timer);
+ cfhsi->rx_slowpath_timer.data = (unsigned long)cfhsi;
+ cfhsi->rx_slowpath_timer.function = cfhsi_rx_slowpath;
/* Add CAIF HSI device to list. */
spin_lock(&cfhsi_list_lock);
@@ -1045,9 +1132,6 @@ int cfhsi_probe(struct platform_device *pdev)
goto err_net_reg;
}
- cfhsi->drv.wake_up_cb = cfhsi_wake_up_cb;
- cfhsi->drv.wake_down_cb = cfhsi_wake_down_cb;
-
/* Register network device. */
res = register_netdev(ndev);
if (res) {
@@ -1074,7 +1158,7 @@ int cfhsi_probe(struct platform_device *pdev)
return res;
}
-static void cfhsi_shutdown(struct cfhsi *cfhsi, bool remove_platform_dev)
+static void cfhsi_shutdown(struct cfhsi *cfhsi)
{
u8 *tx_buf, *rx_buf;
@@ -1084,28 +1168,17 @@ static void cfhsi_shutdown(struct cfhsi *cfhsi, bool remove_platform_dev)
/* going to shutdown driver */
set_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
- if (remove_platform_dev) {
- /* Flush workqueue */
- flush_workqueue(cfhsi->wq);
-
- /* Notify device. */
- platform_device_unregister(cfhsi->pdev);
- }
-
/* Flush workqueue */
flush_workqueue(cfhsi->wq);
- /* Delete timer if pending */
-#ifdef CONFIG_SMP
+ /* Delete timers if pending */
del_timer_sync(&cfhsi->timer);
-#else
- del_timer(&cfhsi->timer);
-#endif /* CONFIG_SMP */
+ del_timer_sync(&cfhsi->rx_slowpath_timer);
/* Cancel pending RX request (if any) */
cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev);
- /* Flush again and destroy workqueue */
+ /* Destroy workqueue */
destroy_workqueue(cfhsi->wq);
/* Store bufferes: will be freed later. */
@@ -1144,7 +1217,7 @@ int cfhsi_remove(struct platform_device *pdev)
spin_unlock(&cfhsi_list_lock);
/* Shutdown driver. */
- cfhsi_shutdown(cfhsi, false);
+ cfhsi_shutdown(cfhsi);
return 0;
}
@@ -1177,7 +1250,7 @@ static void __exit cfhsi_exit_module(void)
spin_unlock(&cfhsi_list_lock);
/* Shutdown driver. */
- cfhsi_shutdown(cfhsi, true);
+ cfhsi_shutdown(cfhsi);
spin_lock(&cfhsi_list_lock);
}
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 121ede663e20..044ea0647b04 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -8,8 +8,6 @@
* Public License ("GPL") version 2 as distributed in the 'COPYING'
* file from the main directory of the linux kernel source.
*
- * Send feedback to <socketcan-users@lists.berlios.de>
- *
*
* Your platform definition file should specify something like:
*
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index ac42f5da91b5..ec4a3119e2c9 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -261,11 +261,13 @@ static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev)
void __iomem *data = &regs->tx.dsr1_0;
u16 *payload = (u16 *)frame->data;
- /* It is safe to write into dsr[dlc+1] */
- for (i = 0; i < (frame->can_dlc + 1) / 2; i++) {
+ for (i = 0; i < frame->can_dlc / 2; i++) {
out_be16(data, *payload++);
data += 2 + _MSCAN_RESERVED_DSR_SIZE;
}
+ /* write remaining byte if necessary */
+ if (frame->can_dlc & 1)
+ out_8(data, frame->data[frame->can_dlc - 1]);
}
out_8(&regs->tx.dlr, frame->can_dlc);
@@ -330,10 +332,13 @@ static void mscan_get_rx_frame(struct net_device *dev, struct can_frame *frame)
void __iomem *data = &regs->rx.dsr1_0;
u16 *payload = (u16 *)frame->data;
- for (i = 0; i < (frame->can_dlc + 1) / 2; i++) {
+ for (i = 0; i < frame->can_dlc / 2; i++) {
*payload++ = in_be16(data);
data += 2 + _MSCAN_RESERVED_DSR_SIZE;
}
+ /* read remaining byte if necessary */
+ if (frame->can_dlc & 1)
+ frame->data[frame->can_dlc - 1] = in_8(data);
}
out_8(&regs->canrflg, MSCAN_RXF);
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index f501bba1fc6f..04a3f1b756a8 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -40,8 +40,6 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
- * Send feedback to <socketcan-users@lists.berlios.de>
- *
*/
#include <linux/module.h>
diff --git a/drivers/net/can/sja1000/sja1000.h b/drivers/net/can/sja1000/sja1000.h
index 78bd4ecac140..23fff06875f5 100644
--- a/drivers/net/can/sja1000/sja1000.h
+++ b/drivers/net/can/sja1000/sja1000.h
@@ -40,8 +40,6 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
- * Send feedback to <socketcan-users@lists.berlios.de>
- *
*/
#ifndef SJA1000_DEV_H
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
index cee6ba2b8b58..c3dd9d09be57 100644
--- a/drivers/net/can/sja1000/sja1000_of_platform.c
+++ b/drivers/net/can/sja1000/sja1000_of_platform.c
@@ -29,7 +29,7 @@
* nxp,external-clock-frequency = <16000000>;
* };
*
- * See "Documentation/powerpc/dts-bindings/can/sja1000.txt" for further
+ * See "Documentation/devicetree/bindings/net/can/sja1000.txt" for further
* information.
*/
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index 4b70b7e8bdeb..a979b006f459 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -35,8 +35,6 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
- * Send feedback to <socketcan-users@lists.berlios.de>
- *
*/
#include <linux/module.h>
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index a30b8f480f61..f93e2d6fc88c 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -37,8 +37,6 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
- * Send feedback to <socketcan-users@lists.berlios.de>
- *
*/
#include <linux/module.h>
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 9ca45dcba755..b42c06baba89 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -2182,12 +2182,12 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
cpu_to_le32(pci_map_single(
VORTEX_PCI(vp),
(void *)skb_frag_address(frag),
- frag->size, PCI_DMA_TODEVICE));
+ skb_frag_size(frag), PCI_DMA_TODEVICE));
if (i == skb_shinfo(skb)->nr_frags-1)
- vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(frag->size|LAST_FRAG);
+ vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag)|LAST_FRAG);
else
- vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(frag->size);
+ vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag));
}
}
#else
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index 11f8858c786d..20ea07508ac7 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -810,15 +810,15 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
txd->frag.addrHi = 0;
first_txd->numDesc++;
- for(i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
void *frag_addr;
txd = (struct tx_desc *) (txRing->ringBase +
txRing->lastWrite);
typhoon_inc_tx_index(&txRing->lastWrite, 1);
- len = frag->size;
+ len = skb_frag_size(frag);
frag_addr = skb_frag_address(frag);
skb_dma = pci_map_single(tp->tx_pdev, frag_addr, len,
PCI_DMA_TODEVICE);
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index d6b015598569..6d9f6911000f 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -1256,12 +1256,12 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
np->tx_info[entry].mapping =
pci_map_single(np->pci_dev, skb->data, skb_first_frag_len(skb), PCI_DMA_TODEVICE);
} else {
- skb_frag_t *this_frag = &skb_shinfo(skb)->frags[i - 1];
- status |= this_frag->size;
+ const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[i - 1];
+ status |= skb_frag_size(this_frag);
np->tx_info[entry].mapping =
pci_map_single(np->pci_dev,
skb_frag_address(this_frag),
- this_frag->size,
+ skb_frag_size(this_frag),
PCI_DMA_TODEVICE);
}
@@ -1378,7 +1378,7 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
pci_unmap_single(np->pci_dev,
np->tx_info[entry].mapping,
- skb_shinfo(skb)->frags[i].size,
+ skb_frag_size(&skb_shinfo(skb)->frags[i]),
PCI_DMA_TODEVICE);
np->dirty_tx++;
entry++;
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 6715bf54f04e..442fefa4f2ca 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -198,7 +198,7 @@ static void greth_clean_rings(struct greth_private *greth)
dma_unmap_page(greth->dev,
greth_read_bd(&tx_bdp->addr),
- frag->size,
+ skb_frag_size(frag),
DMA_TO_DEVICE);
greth->tx_last = NEXT_TX(greth->tx_last);
@@ -517,7 +517,7 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
status = GRETH_BD_EN;
if (skb->ip_summed == CHECKSUM_PARTIAL)
status |= GRETH_TXBD_CSALL;
- status |= frag->size & GRETH_BD_LEN;
+ status |= skb_frag_size(frag) & GRETH_BD_LEN;
/* Wrap around descriptor ring */
if (curr_tx == GRETH_TXBD_NUM_MASK)
@@ -531,7 +531,7 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
greth_write_bd(&bdp->stat, status);
- dma_addr = skb_frag_dma_map(greth->dev, frag, 0, frag->size,
+ dma_addr = skb_frag_dma_map(greth->dev, frag, 0, skb_frag_size(frag),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(greth->dev, dma_addr)))
@@ -713,7 +713,7 @@ static void greth_clean_tx_gbit(struct net_device *dev)
dma_unmap_page(greth->dev,
greth_read_bd(&bdp->addr),
- frag->size,
+ skb_frag_size(frag),
DMA_TO_DEVICE);
greth->tx_last = NEXT_TX(greth->tx_last);
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index b1a4e8204437..f872748ab4e6 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -2478,18 +2478,18 @@ restart:
idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
struct tx_ring_info *info;
- len += frag->size;
+ len += skb_frag_size(frag);
info = ap->skb->tx_skbuff + idx;
desc = ap->tx_ring + idx;
mapping = skb_frag_dma_map(&ap->pdev->dev, frag, 0,
- frag->size,
+ skb_frag_size(frag),
DMA_TO_DEVICE);
- flagsize = (frag->size << 16);
+ flagsize = skb_frag_size(frag) << 16;
if (skb->ip_summed == CHECKSUM_PARTIAL)
flagsize |= BD_FLG_TCP_UDP_SUM;
idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
@@ -2508,7 +2508,7 @@ restart:
info->skb = NULL;
}
dma_unmap_addr_set(info, mapping, mapping);
- dma_unmap_len_set(info, maplen, frag->size);
+ dma_unmap_len_set(info, maplen, skb_frag_size(frag));
ace_load_tx_bd(ap, desc, mapping, flagsize, vlan_tag);
}
}
diff --git a/drivers/net/ethernet/apple/Kconfig b/drivers/net/ethernet/apple/Kconfig
index 59d5c2630acb..a759d5483ab9 100644
--- a/drivers/net/ethernet/apple/Kconfig
+++ b/drivers/net/ethernet/apple/Kconfig
@@ -5,8 +5,7 @@
config NET_VENDOR_APPLE
bool "Apple devices"
default y
- depends on (PPC_PMAC && PPC32) || MAC || ISA || EISA || MACH_IXDP2351 \
- || ARCH_IXDP2X01 || MACH_MX31ADS || MACH_QQ2440
+ depends on (PPC_PMAC && PPC32) || MAC
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
@@ -75,23 +74,4 @@ config MACMACE
say Y and read the Ethernet-HOWTO, available from
<http://www.tldp.org/docs.html#howto>.
-config CS89x0
- tristate "CS89x0 support"
- depends on (ISA || EISA || MACH_IXDP2351 \
- || ARCH_IXDP2X01 || MACH_MX31ADS || MACH_QQ2440)
- ---help---
- Support for CS89x0 chipset based Ethernet cards. If you have a
- network (Ethernet) card of this type, say Y and read the
- Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto> as well as
- <file:Documentation/networking/cs89x0.txt>.
-
- To compile this driver as a module, choose M here. The module
- will be called cs89x0.
-
-config CS89x0_NONISA_IRQ
- def_bool y
- depends on CS89x0 != n
- depends on MACH_IXDP2351 || ARCH_IXDP2X01 || MACH_MX31ADS || MACH_QQ2440
-
endif # NET_VENDOR_APPLE
diff --git a/drivers/net/ethernet/apple/Makefile b/drivers/net/ethernet/apple/Makefile
index 9d300864461f..0d3a5919c95b 100644
--- a/drivers/net/ethernet/apple/Makefile
+++ b/drivers/net/ethernet/apple/Makefile
@@ -5,5 +5,4 @@
obj-$(CONFIG_MACE) += mace.o
obj-$(CONFIG_BMAC) += bmac.o
obj-$(CONFIG_MAC89x0) += mac89x0.o
-obj-$(CONFIG_CS89x0) += cs89x0.o
obj-$(CONFIG_MACMACE) += macmace.o
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 12a0b30319db..02c7ed8d9eca 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -2179,7 +2179,7 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter,
memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc));
buffer_info = atl1c_get_tx_buffer(adapter, use_tpd);
- buffer_info->length = frag->size;
+ buffer_info->length = skb_frag_size(frag);
buffer_info->dma = skb_frag_dma_map(&adapter->pdev->dev,
frag, 0,
buffer_info->length,
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 97c45a4b855a..95483bcac1d0 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -1593,7 +1593,7 @@ static u16 atl1e_cal_tdp_req(const struct sk_buff *skb)
u16 proto_hdr_len = 0;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- fg_size = skb_shinfo(skb)->frags[i].size;
+ fg_size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
tpd_req += ((fg_size + MAX_TX_BUF_LEN - 1) >> MAX_TX_BUF_SHIFT);
}
@@ -1744,12 +1744,12 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter,
}
for (f = 0; f < nr_frags; f++) {
- struct skb_frag_struct *frag;
+ const struct skb_frag_struct *frag;
u16 i;
u16 seg_num;
frag = &skb_shinfo(skb)->frags[f];
- buf_len = frag->size;
+ buf_len = skb_frag_size(frag);
seg_num = (buf_len + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN;
for (i = 0; i < seg_num; i++) {
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 7381a49fefb4..33a4e35f5ee8 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -858,7 +858,7 @@ static s32 atl1_init_hw(struct atl1_hw *hw)
atl1_init_flash_opcode(hw);
if (!hw->phy_configured) {
- /* enable GPHY LinkChange Interrrupt */
+ /* enable GPHY LinkChange Interrupt */
ret_val = atl1_write_phy_reg(hw, 18, 0xC00);
if (ret_val)
return ret_val;
@@ -2267,11 +2267,11 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
}
for (f = 0; f < nr_frags; f++) {
- struct skb_frag_struct *frag;
+ const struct skb_frag_struct *frag;
u16 i, nseg;
frag = &skb_shinfo(skb)->frags[f];
- buf_len = frag->size;
+ buf_len = skb_frag_size(frag);
nseg = (buf_len + ATL1_MAX_TX_BUF_LEN - 1) /
ATL1_MAX_TX_BUF_LEN;
@@ -2356,7 +2356,6 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
int count = 1;
int ret_val;
struct tx_packet_desc *ptpd;
- u16 frag_size;
u16 vlan_tag;
unsigned int nr_frags = 0;
unsigned int mss = 0;
@@ -2372,10 +2371,9 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
nr_frags = skb_shinfo(skb)->nr_frags;
for (f = 0; f < nr_frags; f++) {
- frag_size = skb_shinfo(skb)->frags[f].size;
- if (frag_size)
- count += (frag_size + ATL1_MAX_TX_BUF_LEN - 1) /
- ATL1_MAX_TX_BUF_LEN;
+ unsigned int f_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
+ count += (f_size + ATL1_MAX_TX_BUF_LEN - 1) /
+ ATL1_MAX_TX_BUF_LEN;
}
mss = skb_shinfo(skb)->gso_size;
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 6ff7636e73a2..965c7235804d 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -2871,7 +2871,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
dma_unmap_addr(
&txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
mapping),
- skb_shinfo(skb)->frags[i].size,
+ skb_frag_size(&skb_shinfo(skb)->frags[i]),
PCI_DMA_TODEVICE);
}
@@ -3049,7 +3049,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
} else {
skb_frag_t *frag =
&skb_shinfo(skb)->frags[i - 1];
- frag->size -= tail;
+ skb_frag_size_sub(frag, tail);
skb->data_len -= tail;
}
return 0;
@@ -5395,7 +5395,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
dma_unmap_page(&bp->pdev->dev,
dma_unmap_addr(tx_buf, mapping),
- skb_shinfo(skb)->frags[k].size,
+ skb_frag_size(&skb_shinfo(skb)->frags[k]),
PCI_DMA_TODEVICE);
}
dev_kfree_skb(skb);
@@ -6530,13 +6530,13 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_buf->is_gso = skb_is_gso(skb);
for (i = 0; i < last_frag; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
prod = NEXT_TX_BD(prod);
ring_prod = TX_RING_IDX(prod);
txbd = &txr->tx_desc_ring[ring_prod];
- len = frag->size;
+ len = skb_frag_size(frag);
mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
DMA_TO_DEVICE);
if (dma_mapping_error(&bp->pdev->dev, mapping))
@@ -6594,7 +6594,7 @@ dma_error:
ring_prod = TX_RING_IDX(prod);
tx_buf = &txr->tx_buf_ring[ring_prod];
dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
- skb_shinfo(skb)->frags[i].size,
+ skb_frag_size(&skb_shinfo(skb)->frags[i]),
PCI_DMA_TODEVICE);
}
diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h
index fc50d4267df8..99d31a7d6aaa 100644
--- a/drivers/net/ethernet/broadcom/bnx2.h
+++ b/drivers/net/ethernet/broadcom/bnx2.h
@@ -5617,7 +5617,7 @@ struct l2_fhdr {
#define BNX2_TXP_CPU_STATE_FIO_ABORT_HALTED (1L<<8)
#define BNX2_TXP_CPU_STATE_SOFT_HALTED (1L<<10)
#define BNX2_TXP_CPU_STATE_SPAD_UNDERFLOW (1L<<11)
-#define BNX2_TXP_CPU_STATE_INTERRRUPT (1L<<12)
+#define BNX2_TXP_CPU_STATE_INTERRUPT (1L<<12)
#define BNX2_TXP_CPU_STATE_DATA_ACCESS_STALL (1L<<14)
#define BNX2_TXP_CPU_STATE_INST_FETCH_STALL (1L<<15)
#define BNX2_TXP_CPU_STATE_BLOCKED_READ (1L<<31)
@@ -5712,7 +5712,7 @@ struct l2_fhdr {
#define BNX2_TPAT_CPU_STATE_FIO_ABORT_HALTED (1L<<8)
#define BNX2_TPAT_CPU_STATE_SOFT_HALTED (1L<<10)
#define BNX2_TPAT_CPU_STATE_SPAD_UNDERFLOW (1L<<11)
-#define BNX2_TPAT_CPU_STATE_INTERRRUPT (1L<<12)
+#define BNX2_TPAT_CPU_STATE_INTERRUPT (1L<<12)
#define BNX2_TPAT_CPU_STATE_DATA_ACCESS_STALL (1L<<14)
#define BNX2_TPAT_CPU_STATE_INST_FETCH_STALL (1L<<15)
#define BNX2_TPAT_CPU_STATE_BLOCKED_READ (1L<<31)
@@ -5807,7 +5807,7 @@ struct l2_fhdr {
#define BNX2_RXP_CPU_STATE_FIO_ABORT_HALTED (1L<<8)
#define BNX2_RXP_CPU_STATE_SOFT_HALTED (1L<<10)
#define BNX2_RXP_CPU_STATE_SPAD_UNDERFLOW (1L<<11)
-#define BNX2_RXP_CPU_STATE_INTERRRUPT (1L<<12)
+#define BNX2_RXP_CPU_STATE_INTERRUPT (1L<<12)
#define BNX2_RXP_CPU_STATE_DATA_ACCESS_STALL (1L<<14)
#define BNX2_RXP_CPU_STATE_INST_FETCH_STALL (1L<<15)
#define BNX2_RXP_CPU_STATE_BLOCKED_READ (1L<<31)
@@ -5953,7 +5953,7 @@ struct l2_fhdr {
#define BNX2_COM_CPU_STATE_FIO_ABORT_HALTED (1L<<8)
#define BNX2_COM_CPU_STATE_SOFT_HALTED (1L<<10)
#define BNX2_COM_CPU_STATE_SPAD_UNDERFLOW (1L<<11)
-#define BNX2_COM_CPU_STATE_INTERRRUPT (1L<<12)
+#define BNX2_COM_CPU_STATE_INTERRUPT (1L<<12)
#define BNX2_COM_CPU_STATE_DATA_ACCESS_STALL (1L<<14)
#define BNX2_COM_CPU_STATE_INST_FETCH_STALL (1L<<15)
#define BNX2_COM_CPU_STATE_BLOCKED_READ (1L<<31)
@@ -6119,7 +6119,7 @@ struct l2_fhdr {
#define BNX2_CP_CPU_STATE_FIO_ABORT_HALTED (1L<<8)
#define BNX2_CP_CPU_STATE_SOFT_HALTED (1L<<10)
#define BNX2_CP_CPU_STATE_SPAD_UNDERFLOW (1L<<11)
-#define BNX2_CP_CPU_STATE_INTERRRUPT (1L<<12)
+#define BNX2_CP_CPU_STATE_INTERRUPT (1L<<12)
#define BNX2_CP_CPU_STATE_DATA_ACCESS_STALL (1L<<14)
#define BNX2_CP_CPU_STATE_INST_FETCH_STALL (1L<<15)
#define BNX2_CP_CPU_STATE_BLOCKED_READ (1L<<31)
@@ -6291,7 +6291,7 @@ struct l2_fhdr {
#define BNX2_MCP_CPU_STATE_FIO_ABORT_HALTED (1L<<8)
#define BNX2_MCP_CPU_STATE_SOFT_HALTED (1L<<10)
#define BNX2_MCP_CPU_STATE_SPAD_UNDERFLOW (1L<<11)
-#define BNX2_MCP_CPU_STATE_INTERRRUPT (1L<<12)
+#define BNX2_MCP_CPU_STATE_INTERRUPT (1L<<12)
#define BNX2_MCP_CPU_STATE_DATA_ACCESS_STALL (1L<<14)
#define BNX2_MCP_CPU_STATE_INST_FETCH_STALL (1L<<15)
#define BNX2_MCP_CPU_STATE_BLOCKED_READ (1L<<31)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 2f92487724c6..627a5807836d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -234,13 +234,19 @@ do { \
* FUNC_N_CLID_X = N * NUM_SPECIAL_CLIENTS + FUNC_0_CLID_X
*
*/
-/* iSCSI L2 */
-#define BNX2X_ISCSI_ETH_CL_ID_IDX 1
-#define BNX2X_ISCSI_ETH_CID 49
+enum {
+ BNX2X_ISCSI_ETH_CL_ID_IDX,
+ BNX2X_FCOE_ETH_CL_ID_IDX,
+ BNX2X_MAX_CNIC_ETH_CL_ID_IDX,
+};
-/* FCoE L2 */
-#define BNX2X_FCOE_ETH_CL_ID_IDX 2
-#define BNX2X_FCOE_ETH_CID 50
+#define BNX2X_CNIC_START_ETH_CID 48
+enum {
+ /* iSCSI L2 */
+ BNX2X_ISCSI_ETH_CID = BNX2X_CNIC_START_ETH_CID,
+ /* FCoE L2 */
+ BNX2X_FCOE_ETH_CID,
+};
/** Additional rings budgeting */
#ifdef BCM_CNIC
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index e575e89c7d46..580b44edb066 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -454,7 +454,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
skb->data_len += frag_len;
- skb->truesize += frag_len;
+ skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE;
skb->len += frag_len;
frag_size -= frag_len;
@@ -2363,7 +2363,7 @@ static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
/* Calculate the first sum - it's special */
for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
wnd_sum +=
- skb_shinfo(skb)->frags[frag_idx].size;
+ skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
/* If there was data on linear skb data - check it */
if (first_bd_sz > 0) {
@@ -2379,14 +2379,14 @@ static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
check all windows */
for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
wnd_sum +=
- skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
+ skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
if (unlikely(wnd_sum < lso_mss)) {
to_copy = 1;
break;
}
wnd_sum -=
- skb_shinfo(skb)->frags[wnd_idx].size;
+ skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
}
} else {
/* in non-LSO too fragmented packet should always
@@ -2796,8 +2796,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, frag->size,
- DMA_TO_DEVICE);
+ mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
+ skb_frag_size(frag), DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
DP(NETIF_MSG_TX_QUEUED, "Unable to map page - "
@@ -2821,8 +2821,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
- tx_data_bd->nbytes = cpu_to_le16(frag->size);
- le16_add_cpu(&pkt_size, frag->size);
+ tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
+ le16_add_cpu(&pkt_size, skb_frag_size(frag));
nbd++;
DP(NETIF_MSG_TX_QUEUED,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 5b1f9b5ec499..283d663da180 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -1297,7 +1297,7 @@ static inline void bnx2x_init_txdata(struct bnx2x *bp,
static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx)
{
return bp->cnic_base_cl_id + cl_idx +
- (bp->pf_num >> 1) * NON_ETH_CONTEXT_USE;
+ (bp->pf_num >> 1) * BNX2X_MAX_CNIC_ETH_CL_ID_IDX;
}
static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp)
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index fe712f955110..161cbbb4814a 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -5356,7 +5356,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
pci_unmap_page(tp->pdev,
dma_unmap_addr(ri, mapping),
- skb_shinfo(skb)->frags[i].size,
+ skb_frag_size(&skb_shinfo(skb)->frags[i]),
PCI_DMA_TODEVICE);
while (ri->fragmented) {
@@ -6510,14 +6510,14 @@ static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
}
for (i = 0; i < last; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
entry = NEXT_TX(entry);
txb = &tnapi->tx_buffers[entry];
pci_unmap_page(tnapi->tp->pdev,
dma_unmap_addr(txb, mapping),
- frag->size, PCI_DMA_TODEVICE);
+ skb_frag_size(frag), PCI_DMA_TODEVICE);
while (txb->fragmented) {
txb->fragmented = false;
@@ -6529,12 +6529,12 @@ static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
/* Workaround 4GB and 40-bit hardware DMA bugs. */
static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
- struct sk_buff *skb,
+ struct sk_buff **pskb,
u32 *entry, u32 *budget,
u32 base_flags, u32 mss, u32 vlan)
{
struct tg3 *tp = tnapi->tp;
- struct sk_buff *new_skb;
+ struct sk_buff *new_skb, *skb = *pskb;
dma_addr_t new_addr = 0;
int ret = 0;
@@ -6576,7 +6576,7 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
}
dev_kfree_skb(skb);
-
+ *pskb = new_skb;
return ret;
}
@@ -6671,10 +6671,8 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
u32 tcp_opt_len, hdr_len;
if (skb_header_cloned(skb) &&
- pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
- dev_kfree_skb(skb);
- goto out_unlock;
- }
+ pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+ goto drop;
iph = ip_hdr(skb);
tcp_opt_len = tcp_optlen(skb);
@@ -6746,10 +6744,9 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
len = skb_headlen(skb);
mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(tp->pdev, mapping)) {
- dev_kfree_skb(skb);
- goto out_unlock;
- }
+ if (pci_dma_mapping_error(tp->pdev, mapping))
+ goto drop;
+
tnapi->tx_buffers[entry].skb = skb;
dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
@@ -6777,7 +6774,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
for (i = 0; i <= last; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- len = frag->size;
+ len = skb_frag_size(frag);
mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
len, DMA_TO_DEVICE);
@@ -6803,9 +6800,9 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
*/
entry = tnapi->tx_prod;
budget = tg3_tx_avail(tnapi);
- if (tigon3_dma_hwbug_workaround(tnapi, skb, &entry, &budget,
+ if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
base_flags, mss, vlan))
- goto out_unlock;
+ goto drop_nofree;
}
skb_tx_timestamp(skb);
@@ -6827,15 +6824,16 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
netif_tx_wake_queue(txq);
}
-out_unlock:
mmiowb();
-
return NETDEV_TX_OK;
dma_error:
tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
- dev_kfree_skb(skb);
tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
+drop:
+ dev_kfree_skb(skb);
+drop_nofree:
+ tp->tx_dropped++;
return NETDEV_TX_OK;
}
@@ -10009,6 +10007,7 @@ static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
get_stat64(&hw_stats->rx_discards);
stats->rx_dropped = tp->rx_dropped;
+ stats->tx_dropped = tp->tx_dropped;
return stats;
}
@@ -15668,7 +15667,7 @@ static void __devexit tg3_remove_one(struct pci_dev *pdev)
cancel_work_sync(&tp->reset_task);
- if (!tg3_flag(tp, USE_PHYLIB)) {
+ if (tg3_flag(tp, USE_PHYLIB)) {
tg3_phy_fini(tp);
tg3_mdio_fini(tp);
}
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index d2976f39b2fc..f32f288134c7 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -2990,6 +2990,7 @@ struct tg3 {
/* begin "everything else" cacheline(s) section */
unsigned long rx_dropped;
+ unsigned long tx_dropped;
struct rtnl_link_stats64 net_stats_prev;
struct tg3_ethtool_stats estats;
struct tg3_ethtool_stats estats_prev;
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 2f4ced66612a..5d7872ecff52 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -116,7 +116,7 @@ bnad_pci_unmap_skb(struct device *pdev, struct bnad_skb_unmap *array,
for (j = 0; j < frag; j++) {
dma_unmap_page(pdev, dma_unmap_addr(&array[index], dma_addr),
- skb_shinfo(skb)->frags[j].size, DMA_TO_DEVICE);
+ skb_frag_size(&skb_shinfo(skb)->frags[j]), DMA_TO_DEVICE);
dma_unmap_addr_set(&array[index], dma_addr, 0);
BNA_QE_INDX_ADD(index, 1, depth);
}
@@ -2741,8 +2741,8 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
wis_used = 1;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
- u16 size = frag->size;
+ const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+ u16 size = skb_frag_size(frag);
if (unlikely(size == 0)) {
unmap_prod = unmap_q->producer_index;
diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c
index 1b0ba8c819f7..56624d303487 100644
--- a/drivers/net/ethernet/cadence/at91_ether.c
+++ b/drivers/net/ethernet/cadence/at91_ether.c
@@ -35,7 +35,7 @@
#include <asm/mach-types.h>
#include <mach/at91rm9200_emac.h>
-#include <mach/gpio.h>
+#include <asm/gpio.h>
#include <mach/board.h>
#include "at91_ether.h"
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index 0a511c4a0472..f9b602300040 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -1135,8 +1135,8 @@ static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb)
len -= SGE_TX_DESC_MAX_PLEN;
}
for (i = 0; nfrags--; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- len = frag->size;
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ len = skb_frag_size(frag);
while (len > SGE_TX_DESC_MAX_PLEN) {
count++;
len -= SGE_TX_DESC_MAX_PLEN;
@@ -1278,9 +1278,9 @@ static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
}
mapping = skb_frag_dma_map(&adapter->pdev->dev, frag, 0,
- frag->size, DMA_TO_DEVICE);
+ skb_frag_size(frag), DMA_TO_DEVICE);
desc_mapping = mapping;
- desc_len = frag->size;
+ desc_len = skb_frag_size(frag);
pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen,
&desc_mapping, &desc_len,
@@ -1290,7 +1290,7 @@ static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
nfrags == 0);
ce->skb = NULL;
dma_unmap_addr_set(ce, dma_addr, mapping);
- dma_unmap_len_set(ce, dma_len, frag->size);
+ dma_unmap_len_set(ce, dma_len, skb_frag_size(frag));
}
ce->skb = skb;
wmb();
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index 2f46b37e5d16..cfb60e1f51da 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -254,7 +254,7 @@ static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
while (frag_idx < nfrags && curflit < WR_FLITS) {
pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]),
- skb_shinfo(skb)->frags[frag_idx].size,
+ skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]),
PCI_DMA_TODEVICE);
j ^= 1;
if (j == 0) {
@@ -977,11 +977,11 @@ static inline unsigned int make_sgl(const struct sk_buff *skb,
nfrags = skb_shinfo(skb)->nr_frags;
for (i = 0; i < nfrags; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- mapping = skb_frag_dma_map(&pdev->dev, frag, 0, frag->size,
+ mapping = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
DMA_TO_DEVICE);
- sgp->len[j] = cpu_to_be32(frag->size);
+ sgp->len[j] = cpu_to_be32(skb_frag_size(frag));
sgp->addr[j] = cpu_to_be64(mapping);
j ^= 1;
if (j == 0)
@@ -1544,7 +1544,7 @@ static void deferred_unmap_destructor(struct sk_buff *skb)
si = skb_shinfo(skb);
for (i = 0; i < si->nr_frags; i++)
- pci_unmap_page(dui->pdev, *p++, si->frags[i].size,
+ pci_unmap_page(dui->pdev, *p++, skb_frag_size(&si->frags[i]),
PCI_DMA_TODEVICE);
}
@@ -2118,7 +2118,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
rx_frag += nr_frags;
__skb_frag_set_page(rx_frag, sd->pg_chunk.page);
rx_frag->page_offset = sd->pg_chunk.offset + offset;
- rx_frag->size = len;
+ skb_frag_size_set(rx_frag, len);
skb->len += len;
skb->data_len += len;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 223a7f72343b..0fe18850c838 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -326,7 +326,7 @@ struct sge_fl { /* SGE free-buffer queue state */
/* A packet gather list */
struct pkt_gl {
- skb_frag_t frags[MAX_SKB_FRAGS];
+ struct page_frag frags[MAX_SKB_FRAGS];
void *va; /* virtual address of first byte */
unsigned int nfrags; /* # of fragments */
unsigned int tot_len; /* total length of fragments */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 56adf448b9fe..ddc16985d0f6 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -215,8 +215,8 @@ static int map_skb(struct device *dev, const struct sk_buff *skb,
end = &si->frags[si->nr_frags];
for (fp = si->frags; fp < end; fp++) {
- *++addr = dma_map_page(dev, fp->page, fp->page_offset, fp->size,
- DMA_TO_DEVICE);
+ *++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp),
+ DMA_TO_DEVICE);
if (dma_mapping_error(dev, *addr))
goto unwind;
}
@@ -224,7 +224,7 @@ static int map_skb(struct device *dev, const struct sk_buff *skb,
unwind:
while (fp-- > si->frags)
- dma_unmap_page(dev, *--addr, fp->size, DMA_TO_DEVICE);
+ dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
out_err:
@@ -243,7 +243,7 @@ static void unmap_skb(struct device *dev, const struct sk_buff *skb,
si = skb_shinfo(skb);
end = &si->frags[si->nr_frags];
for (fp = si->frags; fp < end; fp++)
- dma_unmap_page(dev, *addr++, fp->size, DMA_TO_DEVICE);
+ dma_unmap_page(dev, *addr++, skb_frag_size(fp), DMA_TO_DEVICE);
}
/**
@@ -717,7 +717,7 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
sgl->addr0 = cpu_to_be64(addr[0] + start);
nfrags++;
} else {
- sgl->len0 = htonl(si->frags[0].size);
+ sgl->len0 = htonl(skb_frag_size(&si->frags[0]));
sgl->addr0 = cpu_to_be64(addr[1]);
}
@@ -732,13 +732,13 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
- to->len[0] = cpu_to_be32(si->frags[i].size);
- to->len[1] = cpu_to_be32(si->frags[++i].size);
+ to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
+ to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i]));
to->addr[0] = cpu_to_be64(addr[i]);
to->addr[1] = cpu_to_be64(addr[++i]);
}
if (nfrags) {
- to->len[0] = cpu_to_be32(si->frags[i].size);
+ to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
to->len[1] = cpu_to_be32(0);
to->addr[0] = cpu_to_be64(addr[i + 1]);
}
@@ -1409,22 +1409,23 @@ int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb)
}
EXPORT_SYMBOL(cxgb4_ofld_send);
-static inline void copy_frags(struct skb_shared_info *ssi,
+static inline void copy_frags(struct sk_buff *skb,
const struct pkt_gl *gl, unsigned int offset)
{
- unsigned int n;
+ int i;
/* usually there's just one frag */
- ssi->frags[0].page = gl->frags[0].page;
- ssi->frags[0].page_offset = gl->frags[0].page_offset + offset;
- ssi->frags[0].size = gl->frags[0].size - offset;
- ssi->nr_frags = gl->nfrags;
- n = gl->nfrags - 1;
- if (n)
- memcpy(&ssi->frags[1], &gl->frags[1], n * sizeof(skb_frag_t));
+ __skb_fill_page_desc(skb, 0, gl->frags[0].page,
+ gl->frags[0].offset + offset,
+ gl->frags[0].size - offset);
+ skb_shinfo(skb)->nr_frags = gl->nfrags;
+ for (i = 1; i < gl->nfrags; i++)
+ __skb_fill_page_desc(skb, i, gl->frags[i].page,
+ gl->frags[i].offset,
+ gl->frags[i].size);
/* get a reference to the last page, we don't own it */
- get_page(gl->frags[n].page);
+ get_page(gl->frags[gl->nfrags - 1].page);
}
/**
@@ -1459,7 +1460,7 @@ struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
__skb_put(skb, pull_len);
skb_copy_to_linear_data(skb, gl->va, pull_len);
- copy_frags(skb_shinfo(skb), gl, pull_len);
+ copy_frags(skb, gl, pull_len);
skb->len = gl->tot_len;
skb->data_len = skb->len - pull_len;
skb->truesize += skb->data_len;
@@ -1478,7 +1479,7 @@ EXPORT_SYMBOL(cxgb4_pktgl_to_skb);
static void t4_pktgl_free(const struct pkt_gl *gl)
{
int n;
- const skb_frag_t *p;
+ const struct page_frag *p;
for (p = gl->frags, n = gl->nfrags - 1; n--; p++)
put_page(p->page);
@@ -1522,7 +1523,7 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
return;
}
- copy_frags(skb_shinfo(skb), gl, RX_PKT_PAD);
+ copy_frags(skb, gl, RX_PKT_PAD);
skb->len = gl->tot_len - RX_PKT_PAD;
skb->data_len = skb->len;
skb->truesize += skb->data_len;
@@ -1698,7 +1699,7 @@ static int process_responses(struct sge_rspq *q, int budget)
rmb();
rsp_type = RSPD_TYPE(rc->type_gen);
if (likely(rsp_type == RSP_TYPE_FLBUF)) {
- skb_frag_t *fp;
+ struct page_frag *fp;
struct pkt_gl si;
const struct rx_sw_desc *rsd;
u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags;
@@ -1717,7 +1718,7 @@ static int process_responses(struct sge_rspq *q, int budget)
rsd = &rxq->fl.sdesc[rxq->fl.cidx];
bufsz = get_buf_size(rsd);
fp->page = rsd->page;
- fp->page_offset = q->offset;
+ fp->offset = q->offset;
fp->size = min(bufsz, len);
len -= fp->size;
if (!len)
@@ -1734,7 +1735,7 @@ static int process_responses(struct sge_rspq *q, int budget)
fp->size, DMA_FROM_DEVICE);
si.va = page_address(si.frags[0].page) +
- si.frags[0].page_offset;
+ si.frags[0].offset;
prefetch(si.va);
si.nfrags = frags + 1;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
index 594334d5c711..611396c4b381 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
@@ -144,7 +144,7 @@ struct sge_fl {
* An ingress packet gather list.
*/
struct pkt_gl {
- skb_frag_t frags[MAX_SKB_FRAGS];
+ struct page_frag frags[MAX_SKB_FRAGS];
void *va; /* virtual address of first byte */
unsigned int nfrags; /* # of fragments */
unsigned int tot_len; /* total length of fragments */
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index cffb328c46c3..8d5d55ad102d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -296,8 +296,8 @@ static int map_skb(struct device *dev, const struct sk_buff *skb,
si = skb_shinfo(skb);
end = &si->frags[si->nr_frags];
for (fp = si->frags; fp < end; fp++) {
- *++addr = dma_map_page(dev, fp->page, fp->page_offset, fp->size,
- DMA_TO_DEVICE);
+ *++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp),
+ DMA_TO_DEVICE);
if (dma_mapping_error(dev, *addr))
goto unwind;
}
@@ -305,7 +305,7 @@ static int map_skb(struct device *dev, const struct sk_buff *skb,
unwind:
while (fp-- > si->frags)
- dma_unmap_page(dev, *--addr, fp->size, DMA_TO_DEVICE);
+ dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
out_err:
@@ -899,7 +899,7 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq,
sgl->addr0 = cpu_to_be64(addr[0] + start);
nfrags++;
} else {
- sgl->len0 = htonl(si->frags[0].size);
+ sgl->len0 = htonl(skb_frag_size(&si->frags[0]));
sgl->addr0 = cpu_to_be64(addr[1]);
}
@@ -915,13 +915,13 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq,
to = (u8 *)end > (u8 *)tq->stat ? buf : sgl->sge;
for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
- to->len[0] = cpu_to_be32(si->frags[i].size);
- to->len[1] = cpu_to_be32(si->frags[++i].size);
+ to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
+ to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i]));
to->addr[0] = cpu_to_be64(addr[i]);
to->addr[1] = cpu_to_be64(addr[++i]);
}
if (nfrags) {
- to->len[0] = cpu_to_be32(si->frags[i].size);
+ to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
to->len[1] = cpu_to_be32(0);
to->addr[0] = cpu_to_be64(addr[i + 1]);
}
@@ -1357,6 +1357,35 @@ out_free:
}
/**
+ * copy_frags - copy fragments from gather list into skb_shared_info
+ * @skb: destination skb
+ * @gl: source internal packet gather list
+ * @offset: packet start offset in first page
+ *
+ * Copy an internal packet gather list into a Linux skb_shared_info
+ * structure.
+ */
+static inline void copy_frags(struct sk_buff *skb,
+ const struct pkt_gl *gl,
+ unsigned int offset)
+{
+ int i;
+
+ /* usually there's just one frag */
+ __skb_fill_page_desc(skb, 0, gl->frags[0].page,
+ gl->frags[0].offset + offset,
+ gl->frags[0].size - offset);
+ skb_shinfo(skb)->nr_frags = gl->nfrags;
+ for (i = 1; i < gl->nfrags; i++)
+ __skb_fill_page_desc(skb, i, gl->frags[i].page,
+ gl->frags[i].offset,
+ gl->frags[i].size);
+
+ /* get a reference to the last page, we don't own it */
+ get_page(gl->frags[gl->nfrags - 1].page);
+}
+
+/**
* t4vf_pktgl_to_skb - build an sk_buff from a packet gather list
* @gl: the gather list
* @skb_len: size of sk_buff main body if it carries fragments
@@ -1369,7 +1398,6 @@ struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl,
unsigned int skb_len, unsigned int pull_len)
{
struct sk_buff *skb;
- struct skb_shared_info *ssi;
/*
* If the ingress packet is small enough, allocate an skb large enough
@@ -1396,21 +1424,10 @@ struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl,
__skb_put(skb, pull_len);
skb_copy_to_linear_data(skb, gl->va, pull_len);
- ssi = skb_shinfo(skb);
- ssi->frags[0].page = gl->frags[0].page;
- ssi->frags[0].page_offset = gl->frags[0].page_offset + pull_len;
- ssi->frags[0].size = gl->frags[0].size - pull_len;
- if (gl->nfrags > 1)
- memcpy(&ssi->frags[1], &gl->frags[1],
- (gl->nfrags-1) * sizeof(skb_frag_t));
- ssi->nr_frags = gl->nfrags;
-
+ copy_frags(skb, gl, pull_len);
skb->len = gl->tot_len;
skb->data_len = skb->len - pull_len;
skb->truesize += skb->data_len;
-
- /* Get a reference for the last page, we don't own it */
- get_page(gl->frags[gl->nfrags - 1].page);
}
out:
@@ -1434,35 +1451,6 @@ void t4vf_pktgl_free(const struct pkt_gl *gl)
}
/**
- * copy_frags - copy fragments from gather list into skb_shared_info
- * @si: destination skb shared info structure
- * @gl: source internal packet gather list
- * @offset: packet start offset in first page
- *
- * Copy an internal packet gather list into a Linux skb_shared_info
- * structure.
- */
-static inline void copy_frags(struct skb_shared_info *si,
- const struct pkt_gl *gl,
- unsigned int offset)
-{
- unsigned int n;
-
- /* usually there's just one frag */
- si->frags[0].page = gl->frags[0].page;
- si->frags[0].page_offset = gl->frags[0].page_offset + offset;
- si->frags[0].size = gl->frags[0].size - offset;
- si->nr_frags = gl->nfrags;
-
- n = gl->nfrags - 1;
- if (n)
- memcpy(&si->frags[1], &gl->frags[1], n * sizeof(skb_frag_t));
-
- /* get a reference to the last page, we don't own it */
- get_page(gl->frags[n].page);
-}
-
-/**
* do_gro - perform Generic Receive Offload ingress packet processing
* @rxq: ingress RX Ethernet Queue
* @gl: gather list for ingress packet
@@ -1484,7 +1472,7 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
return;
}
- copy_frags(skb_shinfo(skb), gl, PKTSHIFT);
+ copy_frags(skb, gl, PKTSHIFT);
skb->len = gl->tot_len - PKTSHIFT;
skb->data_len = skb->len;
skb->truesize += skb->data_len;
@@ -1667,7 +1655,7 @@ int process_responses(struct sge_rspq *rspq, int budget)
rmb();
rsp_type = RSPD_TYPE(rc->type_gen);
if (likely(rsp_type == RSP_TYPE_FLBUF)) {
- skb_frag_t *fp;
+ struct page_frag *fp;
struct pkt_gl gl;
const struct rx_sw_desc *sdesc;
u32 bufsz, frag;
@@ -1701,7 +1689,7 @@ int process_responses(struct sge_rspq *rspq, int budget)
sdesc = &rxq->fl.sdesc[rxq->fl.cidx];
bufsz = get_buf_size(sdesc);
fp->page = sdesc->page;
- fp->page_offset = rspq->offset;
+ fp->offset = rspq->offset;
fp->size = min(bufsz, len);
len -= fp->size;
if (!len)
@@ -1719,7 +1707,7 @@ int process_responses(struct sge_rspq *rspq, int budget)
get_buf_addr(sdesc),
fp->size, DMA_FROM_DEVICE);
gl.va = (page_address(gl.frags[0].page) +
- gl.frags[0].page_offset);
+ gl.frags[0].offset);
prefetch(gl.va);
/*
diff --git a/drivers/net/ethernet/cirrus/Kconfig b/drivers/net/ethernet/cirrus/Kconfig
index e9386ef524aa..6cbb81ccc02e 100644
--- a/drivers/net/ethernet/cirrus/Kconfig
+++ b/drivers/net/ethernet/cirrus/Kconfig
@@ -5,7 +5,8 @@
config NET_VENDOR_CIRRUS
bool "Cirrus devices"
default y
- depends on ARM && ARCH_EP93XX
+ depends on ISA || EISA || MACH_IXDP2351 || ARCH_IXDP2X01 \
+ || MACH_MX31ADS || MACH_QQ2440 || (ARM && ARCH_EP93XX)
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
@@ -18,6 +19,25 @@ config NET_VENDOR_CIRRUS
if NET_VENDOR_CIRRUS
+config CS89x0
+ tristate "CS89x0 support"
+ depends on (ISA || EISA || MACH_IXDP2351 \
+ || ARCH_IXDP2X01 || MACH_MX31ADS || MACH_QQ2440)
+ ---help---
+ Support for CS89x0 chipset based Ethernet cards. If you have a
+ network (Ethernet) card of this type, say Y and read the
+ Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto> as well as
+ <file:Documentation/networking/cs89x0.txt>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called cs89x0.
+
+config CS89x0_NONISA_IRQ
+ def_bool y
+ depends on CS89x0 != n
+ depends on MACH_IXDP2351 || ARCH_IXDP2X01 || MACH_MX31ADS || MACH_QQ2440
+
config EP93XX_ETH
tristate "EP93xx Ethernet support"
depends on ARM && ARCH_EP93XX
diff --git a/drivers/net/ethernet/cirrus/Makefile b/drivers/net/ethernet/cirrus/Makefile
index 9905ea20f9ff..14bd77e0cb57 100644
--- a/drivers/net/ethernet/cirrus/Makefile
+++ b/drivers/net/ethernet/cirrus/Makefile
@@ -2,4 +2,5 @@
# Makefile for the Cirrus network device drivers.
#
+obj-$(CONFIG_CS89x0) += cs89x0.o
obj-$(CONFIG_EP93XX_ETH) += ep93xx_eth.o
diff --git a/drivers/net/ethernet/apple/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index f328da24c8fa..f328da24c8fa 100644
--- a/drivers/net/ethernet/apple/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
diff --git a/drivers/net/ethernet/apple/cs89x0.h b/drivers/net/ethernet/cirrus/cs89x0.h
index 91423b70bb45..91423b70bb45 100644
--- a/drivers/net/ethernet/apple/cs89x0.h
+++ b/drivers/net/ethernet/cirrus/cs89x0.h
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 1bc908f595de..c3786fda11db 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -599,16 +599,16 @@ static inline void enic_queue_wq_skb_cont(struct enic *enic,
struct vnic_wq *wq, struct sk_buff *skb,
unsigned int len_left, int loopback)
{
- skb_frag_t *frag;
+ const skb_frag_t *frag;
/* Queue additional data fragments */
for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
- len_left -= frag->size;
+ len_left -= skb_frag_size(frag);
enic_queue_wq_desc_cont(wq, skb,
skb_frag_dma_map(&enic->pdev->dev,
- frag, 0, frag->size,
+ frag, 0, skb_frag_size(frag),
DMA_TO_DEVICE),
- frag->size,
+ skb_frag_size(frag),
(len_left == 0), /* EOP? */
loopback);
}
@@ -717,8 +717,8 @@ static inline void enic_queue_wq_skb_tso(struct enic *enic,
* for additional data fragments
*/
for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
- len_left -= frag->size;
- frag_len_left = frag->size;
+ len_left -= skb_frag_size(frag);
+ frag_len_left = skb_frag_size(frag);
offset = 0;
while (frag_len_left) {
diff --git a/drivers/net/ethernet/dec/tulip/21142.c b/drivers/net/ethernet/dec/tulip/21142.c
index 092c3faa882a..25b8deedbef8 100644
--- a/drivers/net/ethernet/dec/tulip/21142.c
+++ b/drivers/net/ethernet/dec/tulip/21142.c
@@ -7,9 +7,6 @@
This software may be used and distributed according to the terms
of the GNU General Public License, incorporated herein by reference.
- Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
- for more information on this driver.
-
DC21143 manual "21143 PCI/CardBus 10/100Mb/s Ethernet LAN Controller
Hardware Reference Manual" is currently available at :
http://developer.intel.com/design/network/manuals/278074.htm
diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
index fa5eee925f25..14d5b611783d 100644
--- a/drivers/net/ethernet/dec/tulip/eeprom.c
+++ b/drivers/net/ethernet/dec/tulip/eeprom.c
@@ -7,8 +7,6 @@
This software may be used and distributed according to the terms
of the GNU General Public License, incorporated herein by reference.
- Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
- for more information on this driver.
Please submit bug reports to http://bugzilla.kernel.org/.
*/
diff --git a/drivers/net/ethernet/dec/tulip/interrupt.c b/drivers/net/ethernet/dec/tulip/interrupt.c
index 5350d753e0ff..4fb8c8c0a420 100644
--- a/drivers/net/ethernet/dec/tulip/interrupt.c
+++ b/drivers/net/ethernet/dec/tulip/interrupt.c
@@ -7,10 +7,7 @@
This software may be used and distributed according to the terms
of the GNU General Public License, incorporated herein by reference.
- Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
- for more information on this driver.
Please submit bugs to http://bugzilla.kernel.org/ .
-
*/
#include <linux/pci.h>
diff --git a/drivers/net/ethernet/dec/tulip/media.c b/drivers/net/ethernet/dec/tulip/media.c
index 4bd13922875d..beeb17b52ad4 100644
--- a/drivers/net/ethernet/dec/tulip/media.c
+++ b/drivers/net/ethernet/dec/tulip/media.c
@@ -7,9 +7,6 @@
This software may be used and distributed according to the terms
of the GNU General Public License, incorporated herein by reference.
- Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
- for more information on this driver.
-
Please submit bugs to http://bugzilla.kernel.org/ .
*/
diff --git a/drivers/net/ethernet/dec/tulip/pnic.c b/drivers/net/ethernet/dec/tulip/pnic.c
index 52d898bdbeb4..9c16e4ad02a6 100644
--- a/drivers/net/ethernet/dec/tulip/pnic.c
+++ b/drivers/net/ethernet/dec/tulip/pnic.c
@@ -7,9 +7,6 @@
This software may be used and distributed according to the terms
of the GNU General Public License, incorporated herein by reference.
- Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
- for more information on this driver.
-
Please submit bugs to http://bugzilla.kernel.org/ .
*/
diff --git a/drivers/net/ethernet/dec/tulip/pnic2.c b/drivers/net/ethernet/dec/tulip/pnic2.c
index 93358ee4d830..04a7e477eaff 100644
--- a/drivers/net/ethernet/dec/tulip/pnic2.c
+++ b/drivers/net/ethernet/dec/tulip/pnic2.c
@@ -8,9 +8,6 @@
This software may be used and distributed according to the terms
of the GNU General Public License, incorporated herein by reference.
- Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
- for more information on this driver.
-
Please submit bugs to http://bugzilla.kernel.org/ .
*/
diff --git a/drivers/net/ethernet/dec/tulip/timer.c b/drivers/net/ethernet/dec/tulip/timer.c
index 2017faf2d0e6..19078d28ffb9 100644
--- a/drivers/net/ethernet/dec/tulip/timer.c
+++ b/drivers/net/ethernet/dec/tulip/timer.c
@@ -7,9 +7,6 @@
This software may be used and distributed according to the terms
of the GNU General Public License, incorporated herein by reference.
- Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
- for more information on this driver.
-
Please submit bugs to http://bugzilla.kernel.org/ .
*/
diff --git a/drivers/net/ethernet/dec/tulip/tulip.h b/drivers/net/ethernet/dec/tulip/tulip.h
index 9db528967da9..fb3887c18dc6 100644
--- a/drivers/net/ethernet/dec/tulip/tulip.h
+++ b/drivers/net/ethernet/dec/tulip/tulip.h
@@ -7,9 +7,6 @@
This software may be used and distributed according to the terms
of the GNU General Public License, incorporated herein by reference.
- Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
- for more information on this driver.
-
Please submit bugs to http://bugzilla.kernel.org/ .
*/
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 011f67c7ca47..9656dd0647d9 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -6,9 +6,6 @@
This software may be used and distributed according to the terms
of the GNU General Public License, incorporated herein by reference.
- Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
- for more information on this driver.
-
Please submit bugs to http://bugzilla.kernel.org/ .
*/
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 6e7b5218c784..e0ff96193c49 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -615,7 +615,7 @@ int be_cmd_eq_create(struct be_adapter *adapter,
return status;
}
-/* Uses mbox */
+/* Use MCC */
int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
u8 type, bool permanent, u32 if_handle)
{
@@ -623,10 +623,13 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
struct be_cmd_req_mac_query *req;
int status;
- if (mutex_lock_interruptible(&adapter->mbox_lock))
- return -1;
+ spin_lock_bh(&adapter->mcc_lock);
- wrb = wrb_from_mbox(adapter);
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
@@ -643,13 +646,14 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
req->permanent = 0;
}
- status = be_mbox_notify_wait(adapter);
+ status = be_mcc_notify_wait(adapter);
if (!status) {
struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
}
- mutex_unlock(&adapter->mbox_lock);
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1111,20 +1115,22 @@ err:
}
/* Create an rx filtering policy configuration on an i/f
- * Uses mbox
+ * Uses MCCQ
*/
int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
- u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id,
- u32 domain)
+ u8 *mac, u32 *if_handle, u32 *pmac_id, u32 domain)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_if_create *req;
int status;
- if (mutex_lock_interruptible(&adapter->mbox_lock))
- return -1;
+ spin_lock_bh(&adapter->mcc_lock);
- wrb = wrb_from_mbox(adapter);
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
@@ -1136,23 +1142,25 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
req->hdr.domain = domain;
req->capability_flags = cpu_to_le32(cap_flags);
req->enable_flags = cpu_to_le32(en_flags);
- req->pmac_invalid = pmac_invalid;
- if (!pmac_invalid)
+ if (mac)
memcpy(req->mac_addr, mac, ETH_ALEN);
+ else
+ req->pmac_invalid = true;
- status = be_mbox_notify_wait(adapter);
+ status = be_mcc_notify_wait(adapter);
if (!status) {
struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
*if_handle = le32_to_cpu(resp->interface_id);
- if (!pmac_invalid)
+ if (mac)
*pmac_id = le32_to_cpu(resp->pmac_id);
}
- mutex_unlock(&adapter->mbox_lock);
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
-/* Uses mbox */
+/* Uses MCCQ */
int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id, u32 domain)
{
struct be_mcc_wrb *wrb;
@@ -1162,10 +1170,16 @@ int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id, u32 domain)
if (adapter->eeh_err)
return -EIO;
- if (mutex_lock_interruptible(&adapter->mbox_lock))
- return -1;
+ if (!interface_id)
+ return 0;
- wrb = wrb_from_mbox(adapter);
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
@@ -1177,10 +1191,9 @@ int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id, u32 domain)
req->hdr.domain = domain;
req->interface_id = cpu_to_le32(interface_id);
- status = be_mbox_notify_wait(adapter);
-
- mutex_unlock(&adapter->mbox_lock);
-
+ status = be_mcc_notify_wait(adapter);
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1301,7 +1314,8 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
*link_speed = le16_to_cpu(resp->link_speed);
- *mac_speed = resp->mac_speed;
+ if (mac_speed)
+ *mac_speed = resp->mac_speed;
}
}
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index abaa90cbfea2..a35cd03fac4e 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -1046,6 +1046,12 @@ struct be_cmd_resp_modify_eq_delay {
/******************** Get FW Config *******************/
#define BE_FUNCTION_CAPS_RSS 0x2
+/* The HW can come up in either of the following multi-channel modes
+ * based on the skew/IPL.
+ */
+#define FLEX10_MODE 0x400
+#define VNIC_MODE 0x20000
+#define UMC_ENABLED 0x1000000
struct be_cmd_req_query_fw_cfg {
struct be_cmd_req_hdr hdr;
u32 rsvd[31];
@@ -1413,8 +1419,8 @@ extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id,
u32 pmac_id, u32 domain);
extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
- u32 en_flags, u8 *mac, bool pmac_invalid,
- u32 *if_handle, u32 *pmac_id, u32 domain);
+ u32 en_flags, u8 *mac, u32 *if_handle, u32 *pmac_id,
+ u32 domain);
extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle,
u32 domain);
extern int be_cmd_eq_create(struct be_adapter *adapter,
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 679b8041e43a..d6a232a300ad 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -114,6 +114,13 @@ static const char * const ue_status_hi_desc[] = {
"Unknown"
};
+/* Is BE in a multi-channel mode */
+static inline bool be_is_mc(struct be_adapter *adapter) {
+ return (adapter->function_mode & FLEX10_MODE ||
+ adapter->function_mode & VNIC_MODE ||
+ adapter->function_mode & UMC_ENABLED);
+}
+
static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
{
struct be_dma_mem *mem = &q->dma_mem;
@@ -636,17 +643,17 @@ static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- struct skb_frag_struct *frag =
+ const struct skb_frag_struct *frag =
&skb_shinfo(skb)->frags[i];
busaddr = skb_frag_dma_map(dev, frag, 0,
- frag->size, DMA_TO_DEVICE);
+ skb_frag_size(frag), DMA_TO_DEVICE);
if (dma_mapping_error(dev, busaddr))
goto dma_err;
wrb = queue_head_node(txq);
- wrb_fill(wrb, busaddr, frag->size);
+ wrb_fill(wrb, busaddr, skb_frag_size(frag));
be_dws_cpu_to_le(wrb, sizeof(*wrb));
queue_head_inc(txq);
- copied += frag->size;
+ copied += skb_frag_size(frag);
}
if (dummy_wrb) {
@@ -796,7 +803,7 @@ static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
be_vid_config(adapter, false, 0);
}
-static void be_set_multicast_list(struct net_device *netdev)
+static void be_set_rx_mode(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -1069,7 +1076,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
skb_frag_set_page(skb, 0, page_info->page);
skb_shinfo(skb)->frags[0].page_offset =
page_info->page_offset + hdr_len;
- skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
+ skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
skb->data_len = curr_frag_len - hdr_len;
skb->truesize += rx_frag_size;
skb->tail += hdr_len;
@@ -1095,13 +1102,13 @@ static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
skb_frag_set_page(skb, j, page_info->page);
skb_shinfo(skb)->frags[j].page_offset =
page_info->page_offset;
- skb_shinfo(skb)->frags[j].size = 0;
+ skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
skb_shinfo(skb)->nr_frags++;
} else {
put_page(page_info->page);
}
- skb_shinfo(skb)->frags[j].size += curr_frag_len;
+ skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
skb->len += curr_frag_len;
skb->data_len += curr_frag_len;
skb->truesize += rx_frag_size;
@@ -1176,11 +1183,11 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
skb_frag_set_page(skb, j, page_info->page);
skb_shinfo(skb)->frags[j].page_offset =
page_info->page_offset;
- skb_shinfo(skb)->frags[j].size = 0;
+ skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
} else {
put_page(page_info->page);
}
- skb_shinfo(skb)->frags[j].size += curr_frag_len;
+ skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
skb->truesize += rx_frag_size;
remaining -= curr_frag_len;
index_inc(&rxcp->rxq_idx, rxq->len);
@@ -1289,7 +1296,7 @@ static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
if (rxcp->vlanf) {
/* vlanf could be wrongly set in some cards.
* ignore if vtm is not set */
- if ((adapter->function_mode & 0x400) && !rxcp->vtm)
+ if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
rxcp->vlanf = 0;
if (!lancer_chip(adapter))
@@ -1633,6 +1640,17 @@ static void be_tx_queues_destroy(struct be_adapter *adapter)
be_queue_free(adapter, q);
}
+static int be_num_txqs_want(struct be_adapter *adapter)
+{
+ if ((num_vfs && adapter->sriov_enabled) ||
+ be_is_mc(adapter) ||
+ lancer_chip(adapter) || !be_physfn(adapter) ||
+ adapter->generation == BE_GEN2)
+ return 1;
+ else
+ return MAX_TX_QS;
+}
+
/* One TX event queue is shared by all TX compl qs */
static int be_tx_queues_create(struct be_adapter *adapter)
{
@@ -1640,6 +1658,11 @@ static int be_tx_queues_create(struct be_adapter *adapter)
struct be_tx_obj *txo;
u8 i;
+ adapter->num_tx_qs = be_num_txqs_want(adapter);
+ if (adapter->num_tx_qs != MAX_TX_QS)
+ netif_set_real_num_tx_queues(adapter->netdev,
+ adapter->num_tx_qs);
+
adapter->tx_eq.max_eqd = 0;
adapter->tx_eq.min_eqd = 0;
adapter->tx_eq.cur_eqd = 96;
@@ -1702,7 +1725,8 @@ static void be_rx_queues_destroy(struct be_adapter *adapter)
static u32 be_num_rxqs_want(struct be_adapter *adapter)
{
if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
- !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
+ !adapter->sriov_enabled && be_physfn(adapter) &&
+ !be_is_mc(adapter)) {
return 1 + MAX_RSS_QS; /* one default non-RSS queue */
} else {
dev_warn(&adapter->pdev->dev,
@@ -2069,7 +2093,7 @@ done:
return;
}
-static void be_sriov_enable(struct be_adapter *adapter)
+static int be_sriov_enable(struct be_adapter *adapter)
{
be_check_sriov_fn_type(adapter);
#ifdef CONFIG_PCI_IOV
@@ -2091,8 +2115,17 @@ static void be_sriov_enable(struct be_adapter *adapter)
status = pci_enable_sriov(adapter->pdev, num_vfs);
adapter->sriov_enabled = status ? false : true;
+
+ if (adapter->sriov_enabled) {
+ adapter->vf_cfg = kcalloc(num_vfs,
+ sizeof(struct be_vf_cfg),
+ GFP_KERNEL);
+ if (!adapter->vf_cfg)
+ return -ENOMEM;
+ }
}
#endif
+ return 0;
}
static void be_sriov_disable(struct be_adapter *adapter)
@@ -2100,6 +2133,7 @@ static void be_sriov_disable(struct be_adapter *adapter)
#ifdef CONFIG_PCI_IOV
if (adapter->sriov_enabled) {
pci_disable_sriov(adapter->pdev);
+ kfree(adapter->vf_cfg);
adapter->sriov_enabled = false;
}
#endif
@@ -2352,17 +2386,6 @@ static int be_open(struct net_device *netdev)
/* Now that interrupts are on we can process async mcc */
be_async_mcc_enable(adapter);
- if (be_physfn(adapter)) {
- status = be_vid_config(adapter, false, 0);
- if (status)
- goto err;
-
- status = be_cmd_set_flow_control(adapter,
- adapter->tx_fc, adapter->rx_fc);
- if (status)
- goto err;
- }
-
return 0;
err:
be_close(adapter->netdev);
@@ -2416,7 +2439,7 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
*/
static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
{
- u32 vf = 0;
+ u32 vf;
int status = 0;
u8 mac[ETH_ALEN];
@@ -2438,7 +2461,7 @@ static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
return status;
}
-static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
+static void be_vf_clear(struct be_adapter *adapter)
{
u32 vf;
@@ -2448,135 +2471,159 @@ static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
adapter->vf_cfg[vf].vf_if_handle,
adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
}
+
+ for (vf = 0; vf < num_vfs; vf++)
+ if (adapter->vf_cfg[vf].vf_if_handle)
+ be_cmd_if_destroy(adapter,
+ adapter->vf_cfg[vf].vf_if_handle, vf + 1);
}
-static int be_setup(struct be_adapter *adapter)
+static int be_clear(struct be_adapter *adapter)
{
- struct net_device *netdev = adapter->netdev;
- u32 cap_flags, en_flags, vf = 0;
- int status;
- u8 mac[ETH_ALEN];
+ if (be_physfn(adapter) && adapter->sriov_enabled)
+ be_vf_clear(adapter);
- be_cmd_req_native_mode(adapter);
+ be_cmd_if_destroy(adapter, adapter->if_handle, 0);
- cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
- BE_IF_FLAGS_BROADCAST |
- BE_IF_FLAGS_MULTICAST;
+ be_mcc_queues_destroy(adapter);
+ be_rx_queues_destroy(adapter);
+ be_tx_queues_destroy(adapter);
+ adapter->eq_next_idx = 0;
- if (be_physfn(adapter)) {
- cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
- BE_IF_FLAGS_PROMISCUOUS |
- BE_IF_FLAGS_PASS_L3L4_ERRORS;
- en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
-
- if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
- cap_flags |= BE_IF_FLAGS_RSS;
- en_flags |= BE_IF_FLAGS_RSS;
- }
- }
+ adapter->be3_native = false;
+ adapter->promiscuous = false;
- status = be_cmd_if_create(adapter, cap_flags, en_flags,
- netdev->dev_addr, false/* pmac_invalid */,
- &adapter->if_handle, &adapter->pmac_id, 0);
- if (status != 0)
- goto do_none;
+ /* tell fw we're done with firing cmds */
+ be_cmd_fw_clean(adapter);
+ return 0;
+}
- if (be_physfn(adapter)) {
- if (adapter->sriov_enabled) {
- while (vf < num_vfs) {
- cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
- BE_IF_FLAGS_BROADCAST;
- status = be_cmd_if_create(adapter, cap_flags,
- en_flags, mac, true,
+static int be_vf_setup(struct be_adapter *adapter)
+{
+ u32 cap_flags, en_flags, vf;
+ u16 lnk_speed;
+ int status;
+
+ cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
+ for (vf = 0; vf < num_vfs; vf++) {
+ status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
&adapter->vf_cfg[vf].vf_if_handle,
NULL, vf+1);
- if (status) {
- dev_err(&adapter->pdev->dev,
- "Interface Create failed for VF %d\n",
- vf);
- goto if_destroy;
- }
- adapter->vf_cfg[vf].vf_pmac_id =
- BE_INVALID_PMAC_ID;
- vf++;
- }
- }
- } else {
- status = be_cmd_mac_addr_query(adapter, mac,
- MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
- if (!status) {
- memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
- memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
- }
+ if (status)
+ goto err;
+ adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
}
+ if (!lancer_chip(adapter)) {
+ status = be_vf_eth_addr_config(adapter);
+ if (status)
+ goto err;
+ }
+
+ for (vf = 0; vf < num_vfs; vf++) {
+ status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
+ vf + 1);
+ if (status)
+ goto err;
+ adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
+ }
+ return 0;
+err:
+ return status;
+}
+
+static int be_setup(struct be_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ u32 cap_flags, en_flags;
+ u32 tx_fc, rx_fc;
+ int status;
+ u8 mac[ETH_ALEN];
+
+ /* Allow all priorities by default. A GRP5 evt may modify this */
+ adapter->vlan_prio_bmap = 0xff;
+ adapter->link_speed = -1;
+
+ be_cmd_req_native_mode(adapter);
+
status = be_tx_queues_create(adapter);
if (status != 0)
- goto if_destroy;
+ goto err;
status = be_rx_queues_create(adapter);
if (status != 0)
- goto tx_qs_destroy;
-
- /* Allow all priorities by default. A GRP5 evt may modify this */
- adapter->vlan_prio_bmap = 0xff;
+ goto err;
status = be_mcc_queues_create(adapter);
if (status != 0)
- goto rx_qs_destroy;
-
- adapter->link_speed = -1;
+ goto err;
- be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
+ memset(mac, 0, ETH_ALEN);
+ status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
+ true /*permanent */, 0);
+ if (status)
+ return status;
+ memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
+ memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
- pcie_set_readrq(adapter->pdev, 4096);
- return 0;
+ en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
+ BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
+ cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
+ BE_IF_FLAGS_PROMISCUOUS;
+ if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
+ cap_flags |= BE_IF_FLAGS_RSS;
+ en_flags |= BE_IF_FLAGS_RSS;
+ }
+ status = be_cmd_if_create(adapter, cap_flags, en_flags,
+ netdev->dev_addr, &adapter->if_handle,
+ &adapter->pmac_id, 0);
+ if (status != 0)
+ goto err;
-rx_qs_destroy:
- be_rx_queues_destroy(adapter);
-tx_qs_destroy:
- be_tx_queues_destroy(adapter);
-if_destroy:
- if (be_physfn(adapter) && adapter->sriov_enabled)
- for (vf = 0; vf < num_vfs; vf++)
- if (adapter->vf_cfg[vf].vf_if_handle)
- be_cmd_if_destroy(adapter,
- adapter->vf_cfg[vf].vf_if_handle,
- vf + 1);
- be_cmd_if_destroy(adapter, adapter->if_handle, 0);
-do_none:
- return status;
-}
+ /* For BEx, the VF's permanent mac queried from card is incorrect.
+ * Query the mac configued by the PF using if_handle
+ */
+ if (!be_physfn(adapter) && !lancer_chip(adapter)) {
+ status = be_cmd_mac_addr_query(adapter, mac,
+ MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
+ if (!status) {
+ memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
+ memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
+ }
+ }
-static int be_clear(struct be_adapter *adapter)
-{
- int vf;
+ be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
- if (be_physfn(adapter) && adapter->sriov_enabled)
- be_vf_eth_addr_rem(adapter);
+ status = be_vid_config(adapter, false, 0);
+ if (status)
+ goto err;
- be_mcc_queues_destroy(adapter);
- be_rx_queues_destroy(adapter);
- be_tx_queues_destroy(adapter);
- adapter->eq_next_idx = 0;
+ be_set_rx_mode(adapter->netdev);
- if (be_physfn(adapter) && adapter->sriov_enabled)
- for (vf = 0; vf < num_vfs; vf++)
- if (adapter->vf_cfg[vf].vf_if_handle)
- be_cmd_if_destroy(adapter,
- adapter->vf_cfg[vf].vf_if_handle,
- vf + 1);
+ status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
+ if (status)
+ goto err;
+ if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
+ status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
+ adapter->rx_fc);
+ if (status)
+ goto err;
+ }
- be_cmd_if_destroy(adapter, adapter->if_handle, 0);
+ pcie_set_readrq(adapter->pdev, 4096);
- adapter->be3_native = 0;
+ if (be_physfn(adapter) && adapter->sriov_enabled) {
+ status = be_vf_setup(adapter);
+ if (status)
+ goto err;
+ }
- /* tell fw we're done with firing cmds */
- be_cmd_fw_clean(adapter);
return 0;
+err:
+ be_clear(adapter);
+ return status;
}
-
#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
static bool be_flash_redboot(struct be_adapter *adapter,
const u8 *p, u32 img_start, int image_size,
@@ -2915,7 +2962,7 @@ static struct net_device_ops be_netdev_ops = {
.ndo_open = be_open,
.ndo_stop = be_close,
.ndo_start_xmit = be_xmit,
- .ndo_set_rx_mode = be_set_multicast_list,
+ .ndo_set_rx_mode = be_set_rx_mode,
.ndo_set_mac_address = be_mac_addr_set,
.ndo_change_mtu = be_change_mtu,
.ndo_get_stats64 = be_get_stats64,
@@ -2948,10 +2995,6 @@ static void be_netdev_init(struct net_device *netdev)
netdev->flags |= IFF_MULTICAST;
- /* Default settings for Rx and Tx flow control */
- adapter->rx_fc = true;
- adapter->tx_fc = true;
-
netif_set_gso_max_size(netdev, 65535);
BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
@@ -3132,7 +3175,6 @@ static void __devexit be_remove(struct pci_dev *pdev)
be_ctrl_cleanup(adapter);
- kfree(adapter->vf_cfg);
be_sriov_disable(adapter);
be_msix_disable(adapter);
@@ -3147,31 +3189,13 @@ static void __devexit be_remove(struct pci_dev *pdev)
static int be_get_config(struct be_adapter *adapter)
{
int status;
- u8 mac[ETH_ALEN];
status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
&adapter->function_mode, &adapter->function_caps);
if (status)
return status;
- memset(mac, 0, ETH_ALEN);
-
- /* A default permanent address is given to each VF for Lancer*/
- if (be_physfn(adapter) || lancer_chip(adapter)) {
- status = be_cmd_mac_addr_query(adapter, mac,
- MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
-
- if (status)
- return status;
-
- if (!is_valid_ether_addr(mac))
- return -EADDRNOTAVAIL;
-
- memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
- memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
- }
-
- if (adapter->function_mode & 0x400)
+ if (adapter->function_mode & FLEX10_MODE)
adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
else
adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
@@ -3180,16 +3204,6 @@ static int be_get_config(struct be_adapter *adapter)
if (status)
return status;
- if ((num_vfs && adapter->sriov_enabled) ||
- (adapter->function_mode & 0x400) ||
- lancer_chip(adapter) || !be_physfn(adapter)) {
- adapter->num_tx_qs = 1;
- netif_set_real_num_tx_queues(adapter->netdev,
- adapter->num_tx_qs);
- } else {
- adapter->num_tx_qs = MAX_TX_QS;
- }
-
return 0;
}
@@ -3319,18 +3333,13 @@ static int __devinit be_probe(struct pci_dev *pdev,
}
}
- be_sriov_enable(adapter);
- if (adapter->sriov_enabled) {
- adapter->vf_cfg = kcalloc(num_vfs,
- sizeof(struct be_vf_cfg), GFP_KERNEL);
-
- if (!adapter->vf_cfg)
- goto free_netdev;
- }
+ status = be_sriov_enable(adapter);
+ if (status)
+ goto free_netdev;
status = be_ctrl_init(adapter);
if (status)
- goto free_vf_cfg;
+ goto disable_sriov;
if (lancer_chip(adapter)) {
status = lancer_test_and_set_rdy_state(adapter);
@@ -3373,6 +3382,7 @@ static int __devinit be_probe(struct pci_dev *pdev,
be_msix_enable(adapter);
INIT_DELAYED_WORK(&adapter->work, be_worker);
+ adapter->rx_fc = adapter->tx_fc = true;
status = be_setup(adapter);
if (status)
@@ -3383,33 +3393,11 @@ static int __devinit be_probe(struct pci_dev *pdev,
if (status != 0)
goto unsetup;
- if (be_physfn(adapter) && adapter->sriov_enabled) {
- u8 mac_speed;
- u16 vf, lnk_speed;
-
- if (!lancer_chip(adapter)) {
- status = be_vf_eth_addr_config(adapter);
- if (status)
- goto unreg_netdev;
- }
-
- for (vf = 0; vf < num_vfs; vf++) {
- status = be_cmd_link_status_query(adapter, &mac_speed,
- &lnk_speed, vf + 1);
- if (!status)
- adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
- else
- goto unreg_netdev;
- }
- }
-
dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
return 0;
-unreg_netdev:
- unregister_netdev(netdev);
unsetup:
be_clear(adapter);
msix_disable:
@@ -3418,10 +3406,9 @@ stats_clean:
be_stats_cleanup(adapter);
ctrl_clean:
be_ctrl_cleanup(adapter);
-free_vf_cfg:
- kfree(adapter->vf_cfg);
-free_netdev:
+disable_sriov:
be_sriov_disable(adapter);
+free_netdev:
free_netdev(netdev);
pci_set_drvdata(pdev, NULL);
rel_reg:
@@ -3448,7 +3435,6 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
be_close(netdev);
rtnl_unlock();
}
- be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
be_clear(adapter);
be_msix_disable(adapter);
diff --git a/drivers/net/ethernet/ibm/ehea/ehea.h b/drivers/net/ethernet/ibm/ehea/ehea.h
index 0b8e6a97a980..410d6a1984ed 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea.h
+++ b/drivers/net/ethernet/ibm/ehea/ehea.h
@@ -33,7 +33,6 @@
#include <linux/ethtool.h>
#include <linux/vmalloc.h>
#include <linux/if_vlan.h>
-#include <linux/inet_lro.h>
#include <asm/ibmebus.h>
#include <asm/abs_addr.h>
@@ -58,8 +57,6 @@
#define EHEA_MIN_ENTRIES_QP 127
#define EHEA_SMALL_QUEUES
-#define EHEA_NUM_TX_QP 1
-#define EHEA_LRO_MAX_AGGR 64
#ifdef EHEA_SMALL_QUEUES
#define EHEA_MAX_CQE_COUNT 1023
@@ -83,18 +80,16 @@
#define EHEA_SG_RQ3 0
#define EHEA_MAX_PACKET_SIZE 9022 /* for jumbo frames */
-#define EHEA_RQ2_PKT_SIZE 1522
+#define EHEA_RQ2_PKT_SIZE 2048
#define EHEA_L_PKT_SIZE 256 /* low latency */
-#define MAX_LRO_DESCRIPTORS 8
-
/* Send completion signaling */
/* Protection Domain Identifier */
#define EHEA_PD_ID 0xaabcdeff
#define EHEA_RQ2_THRESHOLD 1
-#define EHEA_RQ3_THRESHOLD 9 /* use RQ3 threshold of 1522 bytes */
+#define EHEA_RQ3_THRESHOLD 4 /* use RQ3 threshold of 2048 bytes */
#define EHEA_SPEED_10G 10000
#define EHEA_SPEED_1G 1000
@@ -363,7 +358,6 @@ struct ehea_port_res {
struct port_stats p_stats;
struct ehea_mr send_mr; /* send memory region */
struct ehea_mr recv_mr; /* receive memory region */
- spinlock_t xmit_lock;
struct ehea_port *port;
char int_recv_name[EHEA_IRQ_NAME_SIZE];
char int_send_name[EHEA_IRQ_NAME_SIZE];
@@ -376,8 +370,6 @@ struct ehea_port_res {
struct ehea_q_skb_arr rq3_skba;
struct ehea_q_skb_arr sq_skba;
int sq_skba_size;
- spinlock_t netif_queue;
- int queue_stopped;
int swqe_refill_th;
atomic_t swqe_avail;
int swqe_ll_count;
@@ -386,9 +378,6 @@ struct ehea_port_res {
u64 tx_bytes;
u64 rx_packets;
u64 rx_bytes;
- u32 poll_counter;
- struct net_lro_mgr lro_mgr;
- struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS];
int sq_restart_flag;
};
@@ -453,7 +442,7 @@ struct ehea_bcmc_reg_array {
struct ehea_port {
struct ehea_adapter *adapter; /* adapter that owns this port */
struct net_device *netdev;
- struct net_device_stats stats;
+ struct rtnl_link_stats64 stats;
struct ehea_port_res port_res[EHEA_MAX_PORT_RES];
struct platform_device ofdev; /* Open Firmware Device */
struct ehea_mc_list *mc_list; /* Multicast MAC addresses */
@@ -464,8 +453,6 @@ struct ehea_port {
char int_aff_name[EHEA_IRQ_NAME_SIZE];
int allmulti; /* Indicates IFF_ALLMULTI state */
int promisc; /* Indicates IFF_PROMISC state */
- int num_tx_qps;
- int num_add_tx_qps;
int num_mcs;
int resets;
unsigned long flags;
@@ -475,7 +462,6 @@ struct ehea_port {
u32 msg_enable;
u32 sig_comp_iv;
u32 state;
- u32 lro_max_aggr;
u8 phy_link;
u8 full_duplex;
u8 autoneg;
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
index 7f642aef5e82..05b7359bde8d 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
@@ -180,7 +180,7 @@ static void ehea_set_msglevel(struct net_device *dev, u32 value)
port->msg_enable = value;
}
-static char ehea_ethtool_stats_keys[][ETH_GSTRING_LEN] = {
+static const char ehea_ethtool_stats_keys[][ETH_GSTRING_LEN] = {
{"sig_comp_iv"},
{"swqe_refill_th"},
{"port resets"},
@@ -189,7 +189,6 @@ static char ehea_ethtool_stats_keys[][ETH_GSTRING_LEN] = {
{"IP cksum errors"},
{"Frame cksum errors"},
{"num SQ stopped"},
- {"SQ stopped"},
{"PR0 free_swqes"},
{"PR1 free_swqes"},
{"PR2 free_swqes"},
@@ -198,9 +197,14 @@ static char ehea_ethtool_stats_keys[][ETH_GSTRING_LEN] = {
{"PR5 free_swqes"},
{"PR6 free_swqes"},
{"PR7 free_swqes"},
- {"LRO aggregated"},
- {"LRO flushed"},
- {"LRO no_desc"},
+ {"PR8 free_swqes"},
+ {"PR9 free_swqes"},
+ {"PR10 free_swqes"},
+ {"PR11 free_swqes"},
+ {"PR12 free_swqes"},
+ {"PR13 free_swqes"},
+ {"PR14 free_swqes"},
+ {"PR15 free_swqes"},
};
static void ehea_get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -255,25 +259,8 @@ static void ehea_get_ethtool_stats(struct net_device *dev,
tmp += port->port_res[k].p_stats.queue_stopped;
data[i++] = tmp;
- for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
- tmp |= port->port_res[k].queue_stopped;
- data[i++] = tmp;
-
- for (k = 0; k < 8; k++)
+ for (k = 0; k < 16; k++)
data[i++] = atomic_read(&port->port_res[k].swqe_avail);
-
- for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
- tmp |= port->port_res[k].lro_mgr.stats.aggregated;
- data[i++] = tmp;
-
- for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
- tmp |= port->port_res[k].lro_mgr.stats.flushed;
- data[i++] = tmp;
-
- for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
- tmp |= port->port_res[k].lro_mgr.stats.no_desc;
- data[i++] = tmp;
-
}
const struct ethtool_ops ehea_ethtool_ops = {
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_hw.h b/drivers/net/ethernet/ibm/ehea/ehea_hw.h
index 567981b4b2cc..1a2fe4dc3eb3 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_hw.h
+++ b/drivers/net/ethernet/ibm/ehea/ehea_hw.h
@@ -210,36 +210,11 @@ static inline void epa_store_acc(struct h_epa epa, u32 offset, u64 value)
__raw_writeq(value, (void __iomem *)(epa.addr + offset));
}
-#define epa_store_eq(epa, offset, value)\
- epa_store(epa, EQTEMM_OFFSET(offset), value)
-#define epa_load_eq(epa, offset)\
- epa_load(epa, EQTEMM_OFFSET(offset))
-
#define epa_store_cq(epa, offset, value)\
epa_store(epa, CQTEMM_OFFSET(offset), value)
#define epa_load_cq(epa, offset)\
epa_load(epa, CQTEMM_OFFSET(offset))
-#define epa_store_qp(epa, offset, value)\
- epa_store(epa, QPTEMM_OFFSET(offset), value)
-#define epa_load_qp(epa, offset)\
- epa_load(epa, QPTEMM_OFFSET(offset))
-
-#define epa_store_qped(epa, offset, value)\
- epa_store(epa, QPEDMM_OFFSET(offset), value)
-#define epa_load_qped(epa, offset)\
- epa_load(epa, QPEDMM_OFFSET(offset))
-
-#define epa_store_mrmw(epa, offset, value)\
- epa_store(epa, MRMWMM_OFFSET(offset), value)
-#define epa_load_mrmw(epa, offset)\
- epa_load(epa, MRMWMM_OFFSET(offset))
-
-#define epa_store_base(epa, offset, value)\
- epa_store(epa, HCAGR_OFFSET(offset), value)
-#define epa_load_base(epa, offset)\
- epa_load(epa, HCAGR_OFFSET(offset))
-
static inline void ehea_update_sqa(struct ehea_qp *qp, u16 nr_wqes)
{
struct h_epa epa = qp->epas.kernel;
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index dfefe809c485..37b70f7052b6 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -61,10 +61,7 @@ static int rq1_entries = EHEA_DEF_ENTRIES_RQ1;
static int rq2_entries = EHEA_DEF_ENTRIES_RQ2;
static int rq3_entries = EHEA_DEF_ENTRIES_RQ3;
static int sq_entries = EHEA_DEF_ENTRIES_SQ;
-static int use_mcs;
-static int use_lro;
-static int lro_max_aggr = EHEA_LRO_MAX_AGGR;
-static int num_tx_qps = EHEA_NUM_TX_QP;
+static int use_mcs = 1;
static int prop_carrier_state;
module_param(msg_level, int, 0);
@@ -74,11 +71,7 @@ module_param(rq3_entries, int, 0);
module_param(sq_entries, int, 0);
module_param(prop_carrier_state, int, 0);
module_param(use_mcs, int, 0);
-module_param(use_lro, int, 0);
-module_param(lro_max_aggr, int, 0);
-module_param(num_tx_qps, int, 0);
-MODULE_PARM_DESC(num_tx_qps, "Number of TX-QPS");
MODULE_PARM_DESC(msg_level, "msg_level");
MODULE_PARM_DESC(prop_carrier_state, "Propagate carrier state of physical "
"port to stack. 1:yes, 0:no. Default = 0 ");
@@ -94,12 +87,8 @@ MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 "
MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue "
"[2^x - 1], x = [6..14]. Default = "
__MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")");
-MODULE_PARM_DESC(use_mcs, " 0:NAPI, 1:Multiple receive queues, Default = 0 ");
-
-MODULE_PARM_DESC(lro_max_aggr, " LRO: Max packets to be aggregated. Default = "
- __MODULE_STRING(EHEA_LRO_MAX_AGGR));
-MODULE_PARM_DESC(use_lro, " Large Receive Offload, 1: enable, 0: disable, "
- "Default = 0");
+MODULE_PARM_DESC(use_mcs, " Multiple receive queues, 1: enable, 0: disable, "
+ "Default = 1");
static int port_name_cnt;
static LIST_HEAD(adapter_list);
@@ -173,7 +162,7 @@ static void ehea_update_firmware_handles(void)
continue;
num_ports++;
- num_portres += port->num_def_qps + port->num_add_tx_qps;
+ num_portres += port->num_def_qps;
}
}
@@ -199,9 +188,7 @@ static void ehea_update_firmware_handles(void)
(num_ports == 0))
continue;
- for (l = 0;
- l < port->num_def_qps + port->num_add_tx_qps;
- l++) {
+ for (l = 0; l < port->num_def_qps; l++) {
struct ehea_port_res *pr = &port->port_res[l];
arr[i].adh = adapter->handle;
@@ -327,10 +314,10 @@ out:
spin_unlock_irqrestore(&ehea_bcmc_regs.lock, flags);
}
-static struct net_device_stats *ehea_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *ehea_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
{
struct ehea_port *port = netdev_priv(dev);
- struct net_device_stats *stats = &port->stats;
u64 rx_packets = 0, tx_packets = 0, rx_bytes = 0, tx_bytes = 0;
int i;
@@ -339,7 +326,7 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev)
rx_bytes += port->port_res[i].rx_bytes;
}
- for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
+ for (i = 0; i < port->num_def_qps; i++) {
tx_packets += port->port_res[i].tx_packets;
tx_bytes += port->port_res[i].tx_bytes;
}
@@ -357,7 +344,7 @@ static void ehea_update_stats(struct work_struct *work)
struct ehea_port *port =
container_of(work, struct ehea_port, stats_work.work);
struct net_device *dev = port->netdev;
- struct net_device_stats *stats = &port->stats;
+ struct rtnl_link_stats64 *stats = &port->stats;
struct hcp_ehea_port_cb2 *cb2;
u64 hret;
@@ -551,7 +538,8 @@ static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num)
}
static inline void ehea_fill_skb(struct net_device *dev,
- struct sk_buff *skb, struct ehea_cqe *cqe)
+ struct sk_buff *skb, struct ehea_cqe *cqe,
+ struct ehea_port_res *pr)
{
int length = cqe->num_bytes_transfered - 4; /*remove CRC */
@@ -565,6 +553,8 @@ static inline void ehea_fill_skb(struct net_device *dev,
skb->csum = csum_unfold(~cqe->inet_checksum_value);
} else
skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ skb_record_rx_queue(skb, pr - &pr->port->port_res[0]);
}
static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array,
@@ -657,49 +647,6 @@ static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
return 0;
}
-static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
- void **tcph, u64 *hdr_flags, void *priv)
-{
- struct ehea_cqe *cqe = priv;
- unsigned int ip_len;
- struct iphdr *iph;
-
- /* non tcp/udp packets */
- if (!cqe->header_length)
- return -1;
-
- /* non tcp packet */
- skb_reset_network_header(skb);
- iph = ip_hdr(skb);
- if (iph->protocol != IPPROTO_TCP)
- return -1;
-
- ip_len = ip_hdrlen(skb);
- skb_set_transport_header(skb, ip_len);
- *tcph = tcp_hdr(skb);
-
- /* check if ip header and tcp header are complete */
- if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb))
- return -1;
-
- *hdr_flags = LRO_IPV4 | LRO_TCP;
- *iphdr = iph;
-
- return 0;
-}
-
-static void ehea_proc_skb(struct ehea_port_res *pr, struct ehea_cqe *cqe,
- struct sk_buff *skb)
-{
- if (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT)
- __vlan_hwaccel_put_tag(skb, cqe->vlan_tag);
-
- if (skb->dev->features & NETIF_F_LRO)
- lro_receive_skb(&pr->lro_mgr, skb, cqe);
- else
- netif_receive_skb(skb);
-}
-
static int ehea_proc_rwqes(struct net_device *dev,
struct ehea_port_res *pr,
int budget)
@@ -750,7 +697,7 @@ static int ehea_proc_rwqes(struct net_device *dev,
}
skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
cqe->num_bytes_transfered - 4);
- ehea_fill_skb(dev, skb, cqe);
+ ehea_fill_skb(dev, skb, cqe, pr);
} else if (rq == 2) {
/* RQ2 */
skb = get_skb_by_index(skb_arr_rq2,
@@ -760,7 +707,7 @@ static int ehea_proc_rwqes(struct net_device *dev,
"rq2: skb=NULL\n");
break;
}
- ehea_fill_skb(dev, skb, cqe);
+ ehea_fill_skb(dev, skb, cqe, pr);
processed_rq2++;
} else {
/* RQ3 */
@@ -771,12 +718,16 @@ static int ehea_proc_rwqes(struct net_device *dev,
"rq3: skb=NULL\n");
break;
}
- ehea_fill_skb(dev, skb, cqe);
+ ehea_fill_skb(dev, skb, cqe, pr);
processed_rq3++;
}
processed_bytes += skb->len;
- ehea_proc_skb(pr, cqe, skb);
+
+ if (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT)
+ __vlan_hwaccel_put_tag(skb, cqe->vlan_tag);
+
+ napi_gro_receive(&pr->napi, skb);
} else {
pr->p_stats.poll_receive_errors++;
port_reset = ehea_treat_poll_error(pr, rq, cqe,
@@ -787,8 +738,6 @@ static int ehea_proc_rwqes(struct net_device *dev,
}
cqe = ehea_poll_rq1(qp, &wqe_index);
}
- if (dev->features & NETIF_F_LRO)
- lro_flush_all(&pr->lro_mgr);
pr->rx_packets += processed;
pr->rx_bytes += processed_bytes;
@@ -806,7 +755,7 @@ static void reset_sq_restart_flag(struct ehea_port *port)
{
int i;
- for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
+ for (i = 0; i < port->num_def_qps; i++) {
struct ehea_port_res *pr = &port->port_res[i];
pr->sq_restart_flag = 0;
}
@@ -819,7 +768,7 @@ static void check_sqs(struct ehea_port *port)
int swqe_index;
int i, k;
- for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
+ for (i = 0; i < port->num_def_qps; i++) {
struct ehea_port_res *pr = &port->port_res[i];
int ret;
k = 0;
@@ -857,7 +806,8 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
int cqe_counter = 0;
int swqe_av = 0;
int index;
- unsigned long flags;
+ struct netdev_queue *txq = netdev_get_tx_queue(pr->port->netdev,
+ pr - &pr->port->port_res[0]);
cqe = ehea_poll_cq(send_cq);
while (cqe && (quota > 0)) {
@@ -907,20 +857,20 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
ehea_update_feca(send_cq, cqe_counter);
atomic_add(swqe_av, &pr->swqe_avail);
- spin_lock_irqsave(&pr->netif_queue, flags);
-
- if (pr->queue_stopped && (atomic_read(&pr->swqe_avail)
- >= pr->swqe_refill_th)) {
- netif_wake_queue(pr->port->netdev);
- pr->queue_stopped = 0;
+ if (unlikely(netif_tx_queue_stopped(txq) &&
+ (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))) {
+ __netif_tx_lock(txq, smp_processor_id());
+ if (netif_tx_queue_stopped(txq) &&
+ (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))
+ netif_tx_wake_queue(txq);
+ __netif_tx_unlock(txq);
}
- spin_unlock_irqrestore(&pr->netif_queue, flags);
+
wake_up(&pr->port->swqe_avail_wq);
return cqe;
}
-#define EHEA_NAPI_POLL_NUM_BEFORE_IRQ 16
#define EHEA_POLL_MAX_CQES 65535
static int ehea_poll(struct napi_struct *napi, int budget)
@@ -930,18 +880,13 @@ static int ehea_poll(struct napi_struct *napi, int budget)
struct net_device *dev = pr->port->netdev;
struct ehea_cqe *cqe;
struct ehea_cqe *cqe_skb = NULL;
- int force_irq, wqe_index;
+ int wqe_index;
int rx = 0;
- force_irq = (pr->poll_counter > EHEA_NAPI_POLL_NUM_BEFORE_IRQ);
cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
+ rx += ehea_proc_rwqes(dev, pr, budget - rx);
- if (!force_irq)
- rx += ehea_proc_rwqes(dev, pr, budget - rx);
-
- while ((rx != budget) || force_irq) {
- pr->poll_counter = 0;
- force_irq = 0;
+ while (rx != budget) {
napi_complete(napi);
ehea_reset_cq_ep(pr->recv_cq);
ehea_reset_cq_ep(pr->send_cq);
@@ -961,7 +906,6 @@ static int ehea_poll(struct napi_struct *napi, int budget)
rx += ehea_proc_rwqes(dev, pr, budget - rx);
}
- pr->poll_counter++;
return rx;
}
@@ -1113,13 +1057,6 @@ int ehea_sense_port_attr(struct ehea_port *port)
goto out_free;
}
- port->num_tx_qps = num_tx_qps;
-
- if (port->num_def_qps >= port->num_tx_qps)
- port->num_add_tx_qps = 0;
- else
- port->num_add_tx_qps = port->num_tx_qps - port->num_def_qps;
-
ret = 0;
out_free:
if (ret || netif_msg_probe(port))
@@ -1251,7 +1188,7 @@ static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
netif_info(port, link, dev,
"Logical port down\n");
netif_carrier_off(dev);
- netif_stop_queue(dev);
+ netif_tx_disable(dev);
}
if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) {
@@ -1282,7 +1219,7 @@ static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
case EHEA_EC_PORT_MALFUNC:
netdev_info(dev, "Port malfunction\n");
netif_carrier_off(dev);
- netif_stop_queue(dev);
+ netif_tx_disable(dev);
break;
default:
netdev_err(dev, "unknown event code %x, eqe=0x%llX\n", ec, eqe);
@@ -1360,7 +1297,7 @@ static int ehea_reg_interrupts(struct net_device *dev)
port->qp_eq->attr.ist1);
- for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
+ for (i = 0; i < port->num_def_qps; i++) {
pr = &port->port_res[i];
snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1,
"%s-queue%d", dev->name, i);
@@ -1403,7 +1340,7 @@ static void ehea_free_interrupts(struct net_device *dev)
/* send */
- for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
+ for (i = 0; i < port->num_def_qps; i++) {
pr = &port->port_res[i];
ibmebus_free_irq(pr->eq->attr.ist1, pr);
netif_info(port, intr, dev,
@@ -1534,8 +1471,6 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
pr->rx_packets = rx_packets;
pr->port = port;
- spin_lock_init(&pr->xmit_lock);
- spin_lock_init(&pr->netif_queue);
pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
if (!pr->eq) {
@@ -1626,15 +1561,6 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll, 64);
- pr->lro_mgr.max_aggr = pr->port->lro_max_aggr;
- pr->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS;
- pr->lro_mgr.lro_arr = pr->lro_desc;
- pr->lro_mgr.get_skb_header = get_skb_hdr;
- pr->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
- pr->lro_mgr.dev = port->netdev;
- pr->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
- pr->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
-
ret = 0;
goto out;
@@ -1691,96 +1617,35 @@ static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
return ret;
}
-/*
- * The write_* functions store information in swqe which is used by
- * the hardware to calculate the ip/tcp/udp checksum
- */
-
-static inline void write_ip_start_end(struct ehea_swqe *swqe,
- const struct sk_buff *skb)
-{
- swqe->ip_start = skb_network_offset(skb);
- swqe->ip_end = (u8)(swqe->ip_start + ip_hdrlen(skb) - 1);
-}
-
-static inline void write_tcp_offset_end(struct ehea_swqe *swqe,
- const struct sk_buff *skb)
-{
- swqe->tcp_offset =
- (u8)(swqe->ip_end + 1 + offsetof(struct tcphdr, check));
-
- swqe->tcp_end = (u16)skb->len - 1;
-}
-
-static inline void write_udp_offset_end(struct ehea_swqe *swqe,
- const struct sk_buff *skb)
-{
- swqe->tcp_offset =
- (u8)(swqe->ip_end + 1 + offsetof(struct udphdr, check));
-
- swqe->tcp_end = (u16)skb->len - 1;
-}
-
-
-static void write_swqe2_TSO(struct sk_buff *skb,
- struct ehea_swqe *swqe, u32 lkey)
-{
- struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
- u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
- int skb_data_size = skb_headlen(skb);
- int headersize;
-
- /* Packet is TCP with TSO enabled */
- swqe->tx_control |= EHEA_SWQE_TSO;
- swqe->mss = skb_shinfo(skb)->gso_size;
- /* copy only eth/ip/tcp headers to immediate data and
- * the rest of skb->data to sg1entry
- */
- headersize = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
-
- skb_data_size = skb_headlen(skb);
-
- if (skb_data_size >= headersize) {
- /* copy immediate data */
- skb_copy_from_linear_data(skb, imm_data, headersize);
- swqe->immediate_data_length = headersize;
-
- if (skb_data_size > headersize) {
- /* set sg1entry data */
- sg1entry->l_key = lkey;
- sg1entry->len = skb_data_size - headersize;
- sg1entry->vaddr =
- ehea_map_vaddr(skb->data + headersize);
- swqe->descriptors++;
- }
- } else
- pr_err("cannot handle fragmented headers\n");
-}
-
-static void write_swqe2_nonTSO(struct sk_buff *skb,
- struct ehea_swqe *swqe, u32 lkey)
+static void write_swqe2_immediate(struct sk_buff *skb, struct ehea_swqe *swqe,
+ u32 lkey)
{
int skb_data_size = skb_headlen(skb);
u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
+ unsigned int immediate_len = SWQE2_MAX_IMM;
+
+ swqe->descriptors = 0;
- /* Packet is any nonTSO type
- *
- * Copy as much as possible skb->data to immediate data and
- * the rest to sg1entry
- */
- if (skb_data_size >= SWQE2_MAX_IMM) {
- /* copy immediate data */
- skb_copy_from_linear_data(skb, imm_data, SWQE2_MAX_IMM);
+ if (skb_is_gso(skb)) {
+ swqe->tx_control |= EHEA_SWQE_TSO;
+ swqe->mss = skb_shinfo(skb)->gso_size;
+ /*
+ * For TSO packets we only copy the headers into the
+ * immediate area.
+ */
+ immediate_len = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
+ }
- swqe->immediate_data_length = SWQE2_MAX_IMM;
+ if (skb_is_gso(skb) || skb_data_size >= SWQE2_MAX_IMM) {
+ skb_copy_from_linear_data(skb, imm_data, immediate_len);
+ swqe->immediate_data_length = immediate_len;
- if (skb_data_size > SWQE2_MAX_IMM) {
- /* copy sg1entry data */
+ if (skb_data_size > immediate_len) {
sg1entry->l_key = lkey;
- sg1entry->len = skb_data_size - SWQE2_MAX_IMM;
+ sg1entry->len = skb_data_size - immediate_len;
sg1entry->vaddr =
- ehea_map_vaddr(skb->data + SWQE2_MAX_IMM);
+ ehea_map_vaddr(skb->data + immediate_len);
swqe->descriptors++;
}
} else {
@@ -1799,13 +1664,9 @@ static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
nfrags = skb_shinfo(skb)->nr_frags;
sg1entry = &swqe->u.immdata_desc.sg_entry;
sg_list = (struct ehea_vsgentry *)&swqe->u.immdata_desc.sg_list;
- swqe->descriptors = 0;
sg1entry_contains_frag_data = 0;
- if ((dev->features & NETIF_F_TSO) && skb_shinfo(skb)->gso_size)
- write_swqe2_TSO(skb, swqe, lkey);
- else
- write_swqe2_nonTSO(skb, swqe, lkey);
+ write_swqe2_immediate(skb, swqe, lkey);
/* write descriptors */
if (nfrags > 0) {
@@ -1815,7 +1676,7 @@ static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
/* copy sg1entry data */
sg1entry->l_key = lkey;
- sg1entry->len = frag->size;
+ sg1entry->len = skb_frag_size(frag);
sg1entry->vaddr =
ehea_map_vaddr(skb_frag_address(frag));
swqe->descriptors++;
@@ -1828,7 +1689,7 @@ static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
sgentry = &sg_list[i - sg1entry_contains_frag_data];
sgentry->l_key = lkey;
- sgentry->len = frag->size;
+ sgentry->len = skb_frag_size(frag);
sgentry->vaddr = ehea_map_vaddr(skb_frag_address(frag));
swqe->descriptors++;
}
@@ -2120,41 +1981,44 @@ static int ehea_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
-static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
- struct ehea_swqe *swqe, u32 lkey)
+static void xmit_common(struct sk_buff *skb, struct ehea_swqe *swqe)
{
- if (skb->protocol == htons(ETH_P_IP)) {
- const struct iphdr *iph = ip_hdr(skb);
+ swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT | EHEA_SWQE_CRC;
- /* IPv4 */
- swqe->tx_control |= EHEA_SWQE_CRC
- | EHEA_SWQE_IP_CHECKSUM
- | EHEA_SWQE_TCP_CHECKSUM
- | EHEA_SWQE_IMM_DATA_PRESENT
- | EHEA_SWQE_DESCRIPTORS_PRESENT;
+ if (skb->protocol != htons(ETH_P_IP))
+ return;
- write_ip_start_end(swqe, skb);
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ swqe->tx_control |= EHEA_SWQE_IP_CHECKSUM;
- if (iph->protocol == IPPROTO_UDP) {
- if ((iph->frag_off & IP_MF) ||
- (iph->frag_off & IP_OFFSET))
- /* IP fragment, so don't change cs */
- swqe->tx_control &= ~EHEA_SWQE_TCP_CHECKSUM;
- else
- write_udp_offset_end(swqe, skb);
- } else if (iph->protocol == IPPROTO_TCP) {
- write_tcp_offset_end(swqe, skb);
- }
+ swqe->ip_start = skb_network_offset(skb);
+ swqe->ip_end = swqe->ip_start + ip_hdrlen(skb) - 1;
- /* icmp (big data) and ip segmentation packets (all other ip
- packets) do not require any special handling */
+ switch (ip_hdr(skb)->protocol) {
+ case IPPROTO_UDP:
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM;
- } else {
- /* Other Ethernet Protocol */
- swqe->tx_control |= EHEA_SWQE_CRC
- | EHEA_SWQE_IMM_DATA_PRESENT
- | EHEA_SWQE_DESCRIPTORS_PRESENT;
+ swqe->tcp_offset = swqe->ip_end + 1 +
+ offsetof(struct udphdr, check);
+ break;
+
+ case IPPROTO_TCP:
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM;
+
+ swqe->tcp_offset = swqe->ip_end + 1 +
+ offsetof(struct tcphdr, check);
+ break;
}
+}
+
+static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
+ struct ehea_swqe *swqe, u32 lkey)
+{
+ swqe->tx_control |= EHEA_SWQE_DESCRIPTORS_PRESENT;
+
+ xmit_common(skb, swqe);
write_swqe2_data(skb, dev, swqe, lkey);
}
@@ -2162,105 +2026,30 @@ static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
struct ehea_swqe *swqe)
{
- int nfrags = skb_shinfo(skb)->nr_frags;
u8 *imm_data = &swqe->u.immdata_nodesc.immediate_data[0];
- skb_frag_t *frag;
- int i;
-
- if (skb->protocol == htons(ETH_P_IP)) {
- const struct iphdr *iph = ip_hdr(skb);
-
- /* IPv4 */
- write_ip_start_end(swqe, skb);
-
- if (iph->protocol == IPPROTO_TCP) {
- swqe->tx_control |= EHEA_SWQE_CRC
- | EHEA_SWQE_IP_CHECKSUM
- | EHEA_SWQE_TCP_CHECKSUM
- | EHEA_SWQE_IMM_DATA_PRESENT;
- write_tcp_offset_end(swqe, skb);
+ xmit_common(skb, swqe);
- } else if (iph->protocol == IPPROTO_UDP) {
- if ((iph->frag_off & IP_MF) ||
- (iph->frag_off & IP_OFFSET))
- /* IP fragment, so don't change cs */
- swqe->tx_control |= EHEA_SWQE_CRC
- | EHEA_SWQE_IMM_DATA_PRESENT;
- else {
- swqe->tx_control |= EHEA_SWQE_CRC
- | EHEA_SWQE_IP_CHECKSUM
- | EHEA_SWQE_TCP_CHECKSUM
- | EHEA_SWQE_IMM_DATA_PRESENT;
-
- write_udp_offset_end(swqe, skb);
- }
- } else {
- /* icmp (big data) and
- ip segmentation packets (all other ip packets) */
- swqe->tx_control |= EHEA_SWQE_CRC
- | EHEA_SWQE_IP_CHECKSUM
- | EHEA_SWQE_IMM_DATA_PRESENT;
- }
- } else {
- /* Other Ethernet Protocol */
- swqe->tx_control |= EHEA_SWQE_CRC | EHEA_SWQE_IMM_DATA_PRESENT;
- }
- /* copy (immediate) data */
- if (nfrags == 0) {
- /* data is in a single piece */
+ if (!skb->data_len)
skb_copy_from_linear_data(skb, imm_data, skb->len);
- } else {
- /* first copy data from the skb->data buffer ... */
- skb_copy_from_linear_data(skb, imm_data,
- skb_headlen(skb));
- imm_data += skb_headlen(skb);
+ else
+ skb_copy_bits(skb, 0, imm_data, skb->len);
- /* ... then copy data from the fragments */
- for (i = 0; i < nfrags; i++) {
- frag = &skb_shinfo(skb)->frags[i];
- memcpy(imm_data, skb_frag_address(frag), frag->size);
- imm_data += frag->size;
- }
- }
swqe->immediate_data_length = skb->len;
dev_kfree_skb(skb);
}
-static inline int ehea_hash_skb(struct sk_buff *skb, int num_qps)
-{
- struct tcphdr *tcp;
- u32 tmp;
-
- if ((skb->protocol == htons(ETH_P_IP)) &&
- (ip_hdr(skb)->protocol == IPPROTO_TCP)) {
- tcp = (struct tcphdr *)(skb_network_header(skb) +
- (ip_hdr(skb)->ihl * 4));
- tmp = (tcp->source + (tcp->dest << 16)) % 31;
- tmp += ip_hdr(skb)->daddr % 31;
- return tmp % num_qps;
- } else
- return 0;
-}
-
static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ehea_port *port = netdev_priv(dev);
struct ehea_swqe *swqe;
- unsigned long flags;
u32 lkey;
int swqe_index;
struct ehea_port_res *pr;
+ struct netdev_queue *txq;
- pr = &port->port_res[ehea_hash_skb(skb, port->num_tx_qps)];
-
- if (!spin_trylock(&pr->xmit_lock))
- return NETDEV_TX_BUSY;
-
- if (pr->queue_stopped) {
- spin_unlock(&pr->xmit_lock);
- return NETDEV_TX_BUSY;
- }
+ pr = &port->port_res[skb_get_queue_mapping(skb)];
+ txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
swqe = ehea_get_swqe(pr->qp, &swqe_index);
memset(swqe, 0, SWQE_HEADER_SIZE);
@@ -2310,23 +2099,16 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
ehea_dump(swqe, 512, "swqe");
if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
- netif_stop_queue(dev);
+ netif_tx_stop_queue(txq);
swqe->tx_control |= EHEA_SWQE_PURGE;
}
ehea_post_swqe(pr->qp, swqe);
if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
- spin_lock_irqsave(&pr->netif_queue, flags);
- if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
- pr->p_stats.queue_stopped++;
- netif_stop_queue(dev);
- pr->queue_stopped = 1;
- }
- spin_unlock_irqrestore(&pr->netif_queue, flags);
+ pr->p_stats.queue_stopped++;
+ netif_tx_stop_queue(txq);
}
- dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
- spin_unlock(&pr->xmit_lock);
return NETDEV_TX_OK;
}
@@ -2471,8 +2253,7 @@ out:
return ret;
}
-static int ehea_port_res_setup(struct ehea_port *port, int def_qps,
- int add_tx_qps)
+static int ehea_port_res_setup(struct ehea_port *port, int def_qps)
{
int ret, i;
struct port_res_cfg pr_cfg, pr_cfg_small_rx;
@@ -2505,7 +2286,7 @@ static int ehea_port_res_setup(struct ehea_port *port, int def_qps,
if (ret)
goto out_clean_pr;
}
- for (i = def_qps; i < def_qps + add_tx_qps; i++) {
+ for (i = def_qps; i < def_qps; i++) {
ret = ehea_init_port_res(port, &port->port_res[i],
&pr_cfg_small_rx, i);
if (ret)
@@ -2528,7 +2309,7 @@ static int ehea_clean_all_portres(struct ehea_port *port)
int ret = 0;
int i;
- for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
+ for (i = 0; i < port->num_def_qps; i++)
ret |= ehea_clean_portres(port, &port->port_res[i]);
ret |= ehea_destroy_eq(port->qp_eq);
@@ -2560,8 +2341,7 @@ static int ehea_up(struct net_device *dev)
if (port->state == EHEA_PORT_UP)
return 0;
- ret = ehea_port_res_setup(port, port->num_def_qps,
- port->num_add_tx_qps);
+ ret = ehea_port_res_setup(port, port->num_def_qps);
if (ret) {
netdev_err(dev, "port_res_failed\n");
goto out;
@@ -2580,7 +2360,7 @@ static int ehea_up(struct net_device *dev)
goto out_clean_pr;
}
- for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
+ for (i = 0; i < port->num_def_qps; i++) {
ret = ehea_activate_qp(port->adapter, port->port_res[i].qp);
if (ret) {
netdev_err(dev, "activate_qp failed\n");
@@ -2626,7 +2406,7 @@ static void port_napi_disable(struct ehea_port *port)
{
int i;
- for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
+ for (i = 0; i < port->num_def_qps; i++)
napi_disable(&port->port_res[i].napi);
}
@@ -2634,7 +2414,7 @@ static void port_napi_enable(struct ehea_port *port)
{
int i;
- for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
+ for (i = 0; i < port->num_def_qps; i++)
napi_enable(&port->port_res[i].napi);
}
@@ -2650,7 +2430,7 @@ static int ehea_open(struct net_device *dev)
ret = ehea_up(dev);
if (!ret) {
port_napi_enable(port);
- netif_start_queue(dev);
+ netif_tx_start_all_queues(dev);
}
mutex_unlock(&port->port_lock);
@@ -2696,7 +2476,7 @@ static int ehea_stop(struct net_device *dev)
cancel_work_sync(&port->reset_task);
cancel_delayed_work_sync(&port->stats_work);
mutex_lock(&port->port_lock);
- netif_stop_queue(dev);
+ netif_tx_stop_all_queues(dev);
port_napi_disable(port);
ret = ehea_down(dev);
mutex_unlock(&port->port_lock);
@@ -2722,7 +2502,7 @@ static void ehea_flush_sq(struct ehea_port *port)
{
int i;
- for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
+ for (i = 0; i < port->num_def_qps; i++) {
struct ehea_port_res *pr = &port->port_res[i];
int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count;
int ret;
@@ -2756,7 +2536,7 @@ int ehea_stop_qps(struct net_device *dev)
goto out;
}
- for (i = 0; i < (port->num_def_qps + port->num_add_tx_qps); i++) {
+ for (i = 0; i < (port->num_def_qps); i++) {
struct ehea_port_res *pr = &port->port_res[i];
struct ehea_qp *qp = pr->qp;
@@ -2858,7 +2638,7 @@ int ehea_restart_qps(struct net_device *dev)
goto out;
}
- for (i = 0; i < (port->num_def_qps + port->num_add_tx_qps); i++) {
+ for (i = 0; i < (port->num_def_qps); i++) {
struct ehea_port_res *pr = &port->port_res[i];
struct ehea_qp *qp = pr->qp;
@@ -2920,7 +2700,7 @@ static void ehea_reset_port(struct work_struct *work)
mutex_lock(&dlpar_mem_lock);
port->resets++;
mutex_lock(&port->port_lock);
- netif_stop_queue(dev);
+ netif_tx_disable(dev);
port_napi_disable(port);
@@ -2936,7 +2716,7 @@ static void ehea_reset_port(struct work_struct *work)
port_napi_enable(port);
- netif_wake_queue(dev);
+ netif_tx_wake_all_queues(dev);
out:
mutex_unlock(&port->port_lock);
mutex_unlock(&dlpar_mem_lock);
@@ -2963,7 +2743,7 @@ static void ehea_rereg_mrs(void)
if (dev->flags & IFF_UP) {
mutex_lock(&port->port_lock);
- netif_stop_queue(dev);
+ netif_tx_disable(dev);
ehea_flush_sq(port);
ret = ehea_stop_qps(dev);
if (ret) {
@@ -3008,7 +2788,7 @@ static void ehea_rereg_mrs(void)
if (!ret) {
check_sqs(port);
port_napi_enable(port);
- netif_wake_queue(dev);
+ netif_tx_wake_all_queues(dev);
} else {
netdev_err(dev, "Unable to restart QPS\n");
}
@@ -3163,7 +2943,7 @@ static const struct net_device_ops ehea_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ehea_netpoll,
#endif
- .ndo_get_stats = ehea_get_stats,
+ .ndo_get_stats64 = ehea_get_stats64,
.ndo_set_mac_address = ehea_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = ehea_set_multicast_list,
@@ -3184,7 +2964,7 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
int jumbo;
/* allocate memory for the port structures */
- dev = alloc_etherdev(sizeof(struct ehea_port));
+ dev = alloc_etherdev_mq(sizeof(struct ehea_port), EHEA_MAX_PORT_RES);
if (!dev) {
pr_err("no mem for net_device\n");
@@ -3216,6 +2996,9 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
if (ret)
goto out_free_mc_list;
+ netif_set_real_num_rx_queues(dev, port->num_def_qps);
+ netif_set_real_num_tx_queues(dev, port->num_def_qps);
+
port_dev = ehea_register_port(port, dn);
if (!port_dev)
goto out_free_mc_list;
@@ -3228,17 +3011,16 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
dev->netdev_ops = &ehea_netdev_ops;
ehea_set_ethtool_ops(dev);
- dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
+ dev->hw_features = NETIF_F_SG | NETIF_F_TSO
| NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX | NETIF_F_LRO;
dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
| NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER
- | NETIF_F_LLTX | NETIF_F_RXCSUM;
+ | NETIF_F_RXCSUM;
+ dev->vlan_features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HIGHDMA |
+ NETIF_F_IP_CSUM;
dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
- if (use_lro)
- dev->features |= NETIF_F_LRO;
-
INIT_WORK(&port->reset_task, ehea_reset_port);
INIT_DELAYED_WORK(&port->stats_work, ehea_update_stats);
@@ -3252,8 +3034,6 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
goto out_unreg_port;
}
- port->lro_max_aggr = lro_max_aggr;
-
ret = ehea_get_jumboframe_status(port, &jumbo);
if (ret)
netdev_err(dev, "failed determining jumbo frame status\n");
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_qmr.h b/drivers/net/ethernet/ibm/ehea/ehea_qmr.h
index fddff8ec8cfd..337a47ecf4aa 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_qmr.h
+++ b/drivers/net/ethernet/ibm/ehea/ehea_qmr.h
@@ -107,7 +107,7 @@ struct ehea_swqe {
u8 immediate_data_length;
u8 tcp_offset;
u8 reserved2;
- u16 tcp_end;
+ u16 reserved2b;
u8 wrap_tag;
u8 descriptors; /* number of valid descriptors in WQE */
u16 reserved3;
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 6b3a033d9de5..ed79b2d3ad3e 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -1453,7 +1453,7 @@ static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
/* skb fragments */
for (i = 0; i < nr_frags; ++i) {
struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
- len = frag->size;
+ len = skb_frag_size(frag);
if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
goto undo_frame;
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 4da972eaabb4..b1cd41b9c61c 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1014,15 +1014,15 @@ retry_bounce:
/* Map the frags */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
dma_addr = skb_frag_dma_map(&adapter->vdev->dev, frag, 0,
- frag->size, DMA_TO_DEVICE);
+ skb_frag_size(frag), DMA_TO_DEVICE);
if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
goto map_failed_frags;
- descs[i+1].fields.flags_len = desc_flags | frag->size;
+ descs[i+1].fields.flags_len = desc_flags | skb_frag_size(frag);
descs[i+1].fields.address = dma_addr;
}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 7b54d7246150..cf480b554622 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -2894,10 +2894,10 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
}
for (f = 0; f < nr_frags; f++) {
- struct skb_frag_struct *frag;
+ const struct skb_frag_struct *frag;
frag = &skb_shinfo(skb)->frags[f];
- len = frag->size;
+ len = skb_frag_size(frag);
offset = 0;
while (len) {
@@ -3183,7 +3183,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
nr_frags = skb_shinfo(skb)->nr_frags;
for (f = 0; f < nr_frags; f++)
- count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
+ count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
max_txd_pwr);
if (adapter->pcix_82544)
count += nr_frags;
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 035ce73c388e..a855db1ad249 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -47,7 +47,7 @@
#include <linux/if_vlan.h>
#include <linux/cpu.h>
#include <linux/smp.h>
-#include <linux/pm_qos_params.h>
+#include <linux/pm_qos.h>
#include <linux/pm_runtime.h>
#include <linux/aer.h>
#include <linux/prefetch.h>
@@ -4673,10 +4673,10 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
}
for (f = 0; f < nr_frags; f++) {
- struct skb_frag_struct *frag;
+ const struct skb_frag_struct *frag;
frag = &skb_shinfo(skb)->frags[f];
- len = frag->size;
+ len = skb_frag_size(frag);
offset = 0;
while (len) {
@@ -4943,7 +4943,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
nr_frags = skb_shinfo(skb)->nr_frags;
for (f = 0; f < nr_frags; f++)
- count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
+ count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
max_txd_pwr);
if (adapter->hw.mac.tx_pkt_filtering)
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 6580cea796c5..7881fb95a25b 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -1051,7 +1051,10 @@ static s32 igb_init_hw_82575(struct e1000_hw *hw)
/* Disabling VLAN filtering */
hw_dbg("Initializing the IEEE VLAN\n");
- igb_clear_vfta(hw);
+ if (hw->mac.type == e1000_i350)
+ igb_clear_vfta_i350(hw);
+ else
+ igb_clear_vfta(hw);
/* Setup the receive address */
igb_init_rx_addrs(hw, rar_count);
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 872119d91afd..73aac082c44d 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -117,6 +117,50 @@ static void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
wrfl();
}
+/* Due to a hw errata, if the host tries to configure the VFTA register
+ * while performing queries from the BMC or DMA, then the VFTA in some
+ * cases won't be written.
+ */
+
+/**
+ * igb_clear_vfta_i350 - Clear VLAN filter table
+ * @hw: pointer to the HW structure
+ *
+ * Clears the register array which contains the VLAN filter table by
+ * setting all the values to 0.
+ **/
+void igb_clear_vfta_i350(struct e1000_hw *hw)
+{
+ u32 offset;
+ int i;
+
+ for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
+ for (i = 0; i < 10; i++)
+ array_wr32(E1000_VFTA, offset, 0);
+
+ wrfl();
+ }
+}
+
+/**
+ * igb_write_vfta_i350 - Write value to VLAN filter table
+ * @hw: pointer to the HW structure
+ * @offset: register offset in VLAN filter table
+ * @value: register value written to VLAN filter table
+ *
+ * Writes value at the given offset in the register array which stores
+ * the VLAN filter table.
+ **/
+void igb_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value)
+{
+ int i;
+
+ for (i = 0; i < 10; i++)
+ array_wr32(E1000_VFTA, offset, value);
+
+ wrfl();
+}
+
/**
* igb_init_rx_addrs - Initialize receive address's
* @hw: pointer to the HW structure
@@ -155,9 +199,12 @@ s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add)
{
u32 index = (vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK;
u32 mask = 1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
- u32 vfta = array_rd32(E1000_VFTA, index);
+ u32 vfta;
+ struct igb_adapter *adapter = hw->back;
s32 ret_val = 0;
+ vfta = adapter->shadow_vfta[index];
+
/* bit was set/cleared before we started */
if ((!!(vfta & mask)) == add) {
ret_val = -E1000_ERR_CONFIG;
@@ -167,8 +214,11 @@ s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add)
else
vfta &= ~mask;
}
-
- igb_write_vfta(hw, index, vfta);
+ if (hw->mac.type == e1000_i350)
+ igb_write_vfta_i350(hw, index, vfta);
+ else
+ igb_write_vfta(hw, index, vfta);
+ adapter->shadow_vfta[index] = vfta;
return ret_val;
}
@@ -191,6 +241,13 @@ s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
u16 offset, nvm_alt_mac_addr_offset, nvm_data;
u8 alt_mac_addr[ETH_ALEN];
+ /*
+ * Alternate MAC address is handled by the option ROM for 82580
+ * and newer. SW support not required.
+ */
+ if (hw->mac.type >= e1000_82580)
+ goto out;
+
ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1,
&nvm_alt_mac_addr_offset);
if (ret_val) {
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h
index 4927f61fbbc8..e45996b4ea34 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.h
@@ -60,6 +60,7 @@ s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
void igb_clear_hw_cntrs_base(struct e1000_hw *hw);
void igb_clear_vfta(struct e1000_hw *hw);
+void igb_clear_vfta_i350(struct e1000_hw *hw);
s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add);
void igb_config_collision_dist(struct e1000_hw *hw);
void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count);
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c
index 74f2f11ac290..469d95eaa154 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c
@@ -34,7 +34,7 @@
* @size: Length of buffer
* @mbx_id: id of mailbox to read
*
- * returns SUCCESS if it successfuly read message from buffer
+ * returns SUCCESS if it successfully read message from buffer
**/
s32 igb_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
{
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 4c500a76972e..c69feebf2653 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -72,6 +72,8 @@ struct igb_adapter;
#define IGB_MAX_VF_MC_ENTRIES 30
#define IGB_MAX_VF_FUNCTIONS 8
#define IGB_MAX_VFTA_ENTRIES 128
+#define IGB_82576_VF_DEV_ID 0x10CA
+#define IGB_I350_VF_DEV_ID 0x1520
struct vf_data_storage {
unsigned char vf_mac_addresses[ETH_ALEN];
@@ -83,6 +85,7 @@ struct vf_data_storage {
u16 pf_vlan; /* When set, guest VLAN config not allowed. */
u16 pf_qos;
u16 tx_rate;
+ struct pci_dev *vfdev;
};
#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */
@@ -360,6 +363,7 @@ struct igb_adapter {
u32 rss_queues;
u32 wvbr;
int node;
+ u32 *shadow_vfta;
};
#define IGB_FLAG_HAS_MSI (1 << 0)
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index c10cc716fdec..ced544499f1b 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -163,6 +163,12 @@ static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
struct ifla_vf_info *ivi);
static void igb_check_vf_rate_limit(struct igb_adapter *);
+#ifdef CONFIG_PCI_IOV
+static int igb_vf_configure(struct igb_adapter *adapter, int vf);
+static int igb_find_enabled_vfs(struct igb_adapter *adapter);
+static int igb_check_vf_assignment(struct igb_adapter *adapter);
+#endif
+
#ifdef CONFIG_PM
static int igb_suspend(struct pci_dev *, pm_message_t);
static int igb_resume(struct pci_dev *);
@@ -198,6 +204,7 @@ static struct pci_error_handlers igb_err_handler = {
.resume = igb_io_resume,
};
+static void igb_init_dmac(struct igb_adapter *adapter, u32 pba);
static struct pci_driver igb_driver = {
.name = igb_driver_name,
@@ -1722,63 +1729,8 @@ void igb_reset(struct igb_adapter *adapter)
if (hw->mac.ops.init_hw(hw))
dev_err(&pdev->dev, "Hardware Error\n");
- if (hw->mac.type > e1000_82580) {
- if (adapter->flags & IGB_FLAG_DMAC) {
- u32 reg;
-
- /*
- * DMA Coalescing high water mark needs to be higher
- * than * the * Rx threshold. The Rx threshold is
- * currently * pba - 6, so we * should use a high water
- * mark of pba * - 4. */
- hwm = (pba - 4) << 10;
-
- reg = (((pba-6) << E1000_DMACR_DMACTHR_SHIFT)
- & E1000_DMACR_DMACTHR_MASK);
-
- /* transition to L0x or L1 if available..*/
- reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
-
- /* watchdog timer= +-1000 usec in 32usec intervals */
- reg |= (1000 >> 5);
- wr32(E1000_DMACR, reg);
-
- /* no lower threshold to disable coalescing(smart fifb)
- * -UTRESH=0*/
- wr32(E1000_DMCRTRH, 0);
- /* set hwm to PBA - 2 * max frame size */
- wr32(E1000_FCRTC, hwm);
-
- /*
- * This sets the time to wait before requesting tran-
- * sition to * low power state to number of usecs needed
- * to receive 1 512 * byte frame at gigabit line rate
- */
- reg = rd32(E1000_DMCTLX);
- reg |= IGB_DMCTLX_DCFLUSH_DIS;
-
- /* Delay 255 usec before entering Lx state. */
- reg |= 0xFF;
- wr32(E1000_DMCTLX, reg);
-
- /* free space in Tx packet buffer to wake from DMAC */
- wr32(E1000_DMCTXTH,
- (IGB_MIN_TXPBSIZE -
- (IGB_TX_BUF_4096 + adapter->max_frame_size))
- >> 6);
-
- /* make low power state decision controlled by DMAC */
- reg = rd32(E1000_PCIEMISC);
- reg |= E1000_PCIEMISC_LX_DECISION;
- wr32(E1000_PCIEMISC, reg);
- } /* end if IGB_FLAG_DMAC set */
- }
- if (hw->mac.type == e1000_82580) {
- u32 reg = rd32(E1000_PCIEMISC);
- wr32(E1000_PCIEMISC,
- reg & ~E1000_PCIEMISC_LX_DECISION);
- }
+ igb_init_dmac(adapter, pba);
if (!netif_running(adapter->netdev))
igb_power_down_link(adapter);
@@ -2232,8 +2184,12 @@ static void __devexit igb_remove(struct pci_dev *pdev)
/* reclaim resources allocated to VFs */
if (adapter->vf_data) {
/* disable iov and allow time for transactions to clear */
- pci_disable_sriov(pdev);
- msleep(500);
+ if (!igb_check_vf_assignment(adapter)) {
+ pci_disable_sriov(pdev);
+ msleep(500);
+ } else {
+ dev_info(&pdev->dev, "VF(s) assigned to guests!\n");
+ }
kfree(adapter->vf_data);
adapter->vf_data = NULL;
@@ -2250,6 +2206,7 @@ static void __devexit igb_remove(struct pci_dev *pdev)
pci_release_selected_regions(pdev,
pci_select_bars(pdev, IORESOURCE_MEM));
+ kfree(adapter->shadow_vfta);
free_netdev(netdev);
pci_disable_pcie_error_reporting(pdev);
@@ -2270,42 +2227,49 @@ static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
{
#ifdef CONFIG_PCI_IOV
struct pci_dev *pdev = adapter->pdev;
+ int old_vfs = igb_find_enabled_vfs(adapter);
+ int i;
- if (adapter->vfs_allocated_count) {
- adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
- sizeof(struct vf_data_storage),
- GFP_KERNEL);
- /* if allocation failed then we do not support SR-IOV */
- if (!adapter->vf_data) {
- adapter->vfs_allocated_count = 0;
- dev_err(&pdev->dev, "Unable to allocate memory for VF "
- "Data Storage\n");
- }
+ if (old_vfs) {
+ dev_info(&pdev->dev, "%d pre-allocated VFs found - override "
+ "max_vfs setting of %d\n", old_vfs, max_vfs);
+ adapter->vfs_allocated_count = old_vfs;
}
- if (pci_enable_sriov(pdev, adapter->vfs_allocated_count)) {
- kfree(adapter->vf_data);
- adapter->vf_data = NULL;
-#endif /* CONFIG_PCI_IOV */
+ if (!adapter->vfs_allocated_count)
+ return;
+
+ adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
+ sizeof(struct vf_data_storage), GFP_KERNEL);
+ /* if allocation failed then we do not support SR-IOV */
+ if (!adapter->vf_data) {
adapter->vfs_allocated_count = 0;
-#ifdef CONFIG_PCI_IOV
- } else {
- unsigned char mac_addr[ETH_ALEN];
- int i;
- dev_info(&pdev->dev, "%d vfs allocated\n",
- adapter->vfs_allocated_count);
- for (i = 0; i < adapter->vfs_allocated_count; i++) {
- random_ether_addr(mac_addr);
- igb_set_vf_mac(adapter, i, mac_addr);
- }
- /* DMA Coalescing is not supported in IOV mode. */
- if (adapter->flags & IGB_FLAG_DMAC)
- adapter->flags &= ~IGB_FLAG_DMAC;
+ dev_err(&pdev->dev, "Unable to allocate memory for VF "
+ "Data Storage\n");
+ goto out;
}
+
+ if (!old_vfs) {
+ if (pci_enable_sriov(pdev, adapter->vfs_allocated_count))
+ goto err_out;
+ }
+ dev_info(&pdev->dev, "%d VFs allocated\n",
+ adapter->vfs_allocated_count);
+ for (i = 0; i < adapter->vfs_allocated_count; i++)
+ igb_vf_configure(adapter, i);
+
+ /* DMA Coalescing is not supported in IOV mode. */
+ adapter->flags &= ~IGB_FLAG_DMAC;
+ goto out;
+err_out:
+ kfree(adapter->vf_data);
+ adapter->vf_data = NULL;
+ adapter->vfs_allocated_count = 0;
+out:
+ return;
#endif /* CONFIG_PCI_IOV */
}
-
/**
* igb_init_hw_timer - Initialize hardware timer used with IEEE 1588 timestamp
* @adapter: board private structure to initialize
@@ -2475,6 +2439,11 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
((adapter->rss_queues > 1) && (adapter->vfs_allocated_count > 6)))
adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
+ /* Setup and initialize a copy of the hw vlan table array */
+ adapter->shadow_vfta = kzalloc(sizeof(u32) *
+ E1000_VLAN_FILTER_TBL_SIZE,
+ GFP_ATOMIC);
+
/* This call may decrease the number of queues */
if (igb_init_interrupt_scheme(adapter)) {
dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
@@ -4254,7 +4223,7 @@ static void igb_tx_map(struct igb_ring *tx_ring,
i = 0;
}
- size = frag->size;
+ size = skb_frag_size(frag);
data_len -= size;
dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
@@ -4917,6 +4886,109 @@ static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
}
#endif /* CONFIG_IGB_DCA */
+#ifdef CONFIG_PCI_IOV
+static int igb_vf_configure(struct igb_adapter *adapter, int vf)
+{
+ unsigned char mac_addr[ETH_ALEN];
+ struct pci_dev *pdev = adapter->pdev;
+ struct e1000_hw *hw = &adapter->hw;
+ struct pci_dev *pvfdev;
+ unsigned int device_id;
+ u16 thisvf_devfn;
+
+ random_ether_addr(mac_addr);
+ igb_set_vf_mac(adapter, vf, mac_addr);
+
+ switch (adapter->hw.mac.type) {
+ case e1000_82576:
+ device_id = IGB_82576_VF_DEV_ID;
+ /* VF Stride for 82576 is 2 */
+ thisvf_devfn = (pdev->devfn + 0x80 + (vf << 1)) |
+ (pdev->devfn & 1);
+ break;
+ case e1000_i350:
+ device_id = IGB_I350_VF_DEV_ID;
+ /* VF Stride for I350 is 4 */
+ thisvf_devfn = (pdev->devfn + 0x80 + (vf << 2)) |
+ (pdev->devfn & 3);
+ break;
+ default:
+ device_id = 0;
+ thisvf_devfn = 0;
+ break;
+ }
+
+ pvfdev = pci_get_device(hw->vendor_id, device_id, NULL);
+ while (pvfdev) {
+ if (pvfdev->devfn == thisvf_devfn)
+ break;
+ pvfdev = pci_get_device(hw->vendor_id,
+ device_id, pvfdev);
+ }
+
+ if (pvfdev)
+ adapter->vf_data[vf].vfdev = pvfdev;
+ else
+ dev_err(&pdev->dev,
+ "Couldn't find pci dev ptr for VF %4.4x\n",
+ thisvf_devfn);
+ return pvfdev != NULL;
+}
+
+static int igb_find_enabled_vfs(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct pci_dev *pdev = adapter->pdev;
+ struct pci_dev *pvfdev;
+ u16 vf_devfn = 0;
+ u16 vf_stride;
+ unsigned int device_id;
+ int vfs_found = 0;
+
+ switch (adapter->hw.mac.type) {
+ case e1000_82576:
+ device_id = IGB_82576_VF_DEV_ID;
+ /* VF Stride for 82576 is 2 */
+ vf_stride = 2;
+ break;
+ case e1000_i350:
+ device_id = IGB_I350_VF_DEV_ID;
+ /* VF Stride for I350 is 4 */
+ vf_stride = 4;
+ break;
+ default:
+ device_id = 0;
+ vf_stride = 0;
+ break;
+ }
+
+ vf_devfn = pdev->devfn + 0x80;
+ pvfdev = pci_get_device(hw->vendor_id, device_id, NULL);
+ while (pvfdev) {
+ if (pvfdev->devfn == vf_devfn)
+ vfs_found++;
+ vf_devfn += vf_stride;
+ pvfdev = pci_get_device(hw->vendor_id,
+ device_id, pvfdev);
+ }
+
+ return vfs_found;
+}
+
+static int igb_check_vf_assignment(struct igb_adapter *adapter)
+{
+ int i;
+ for (i = 0; i < adapter->vfs_allocated_count; i++) {
+ if (adapter->vf_data[i].vfdev) {
+ if (adapter->vf_data[i].vfdev->dev_flags &
+ PCI_DEV_FLAGS_ASSIGNED)
+ return true;
+ }
+ }
+ return false;
+}
+
+#endif
static void igb_ping_all_vfs(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
@@ -6978,4 +7050,70 @@ static void igb_vmm_control(struct igb_adapter *adapter)
}
}
+static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 dmac_thr;
+ u16 hwm;
+
+ if (hw->mac.type > e1000_82580) {
+ if (adapter->flags & IGB_FLAG_DMAC) {
+ u32 reg;
+
+ /* force threshold to 0. */
+ wr32(E1000_DMCTXTH, 0);
+
+ /*
+ * DMA Coalescing high water mark needs to be higher
+ * than the RX threshold. set hwm to PBA - 2 * max
+ * frame size
+ */
+ hwm = pba - (2 * adapter->max_frame_size);
+ reg = rd32(E1000_DMACR);
+ reg &= ~E1000_DMACR_DMACTHR_MASK;
+ dmac_thr = pba - 4;
+
+ reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
+ & E1000_DMACR_DMACTHR_MASK);
+
+ /* transition to L0x or L1 if available..*/
+ reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
+
+ /* watchdog timer= +-1000 usec in 32usec intervals */
+ reg |= (1000 >> 5);
+ wr32(E1000_DMACR, reg);
+
+ /*
+ * no lower threshold to disable
+ * coalescing(smart fifb)-UTRESH=0
+ */
+ wr32(E1000_DMCRTRH, 0);
+ wr32(E1000_FCRTC, hwm);
+
+ reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4);
+
+ wr32(E1000_DMCTLX, reg);
+
+ /*
+ * free space in tx packet buffer to wake from
+ * DMA coal
+ */
+ wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
+ (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
+
+ /*
+ * make low power state decision controlled
+ * by DMA coal
+ */
+ reg = rd32(E1000_PCIEMISC);
+ reg &= ~E1000_PCIEMISC_LX_DECISION;
+ wr32(E1000_PCIEMISC, reg);
+ } /* endif adapter->dmac is not disabled */
+ } else if (hw->mac.type == e1000_82580) {
+ u32 reg = rd32(E1000_PCIEMISC);
+ wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION);
+ wr32(E1000_DMACR, 0);
+ }
+}
+
/* igb_main.c */
diff --git a/drivers/net/ethernet/intel/igbvf/mbx.c b/drivers/net/ethernet/intel/igbvf/mbx.c
index 3d6f4cc3998a..048aae248d06 100644
--- a/drivers/net/ethernet/intel/igbvf/mbx.c
+++ b/drivers/net/ethernet/intel/igbvf/mbx.c
@@ -288,7 +288,7 @@ out_no_write:
* @msg: The message buffer
* @size: Length of buffer
*
- * returns SUCCESS if it successfuly read message from buffer
+ * returns SUCCESS if it successfully read message from buffer
**/
static s32 e1000_read_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size)
{
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 32b3044fa45c..cca78124be31 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -45,13 +45,13 @@
#include "igbvf.h"
-#define DRV_VERSION "2.0.0-k"
+#define DRV_VERSION "2.0.1-k"
char igbvf_driver_name[] = "igbvf";
const char igbvf_driver_version[] = DRV_VERSION;
static const char igbvf_driver_string[] =
- "Intel(R) Virtual Function Network Driver";
+ "Intel(R) Gigabit Virtual Function Network Driver";
static const char igbvf_copyright[] =
- "Copyright (c) 2009 - 2010 Intel Corporation.";
+ "Copyright (c) 2009 - 2011 Intel Corporation.";
static int igbvf_poll(struct napi_struct *napi, int budget);
static void igbvf_reset(struct igbvf_adapter *);
@@ -102,8 +102,8 @@ static void igbvf_receive_skb(struct igbvf_adapter *adapter,
{
if (status & E1000_RXD_STAT_VP) {
u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
-
- __vlan_hwaccel_put_tag(skb, vid);
+ if (test_bit(vid, adapter->active_vlans))
+ __vlan_hwaccel_put_tag(skb, vid);
}
netif_receive_skb(skb);
}
@@ -312,7 +312,7 @@ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter,
skb->len += length;
skb->data_len += length;
- skb->truesize += length;
+ skb->truesize += PAGE_SIZE / 2;
}
send_up:
i++;
@@ -2045,7 +2045,7 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
- struct skb_frag_struct *frag;
+ const struct skb_frag_struct *frag;
count++;
i++;
@@ -2053,7 +2053,7 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
i = 0;
frag = &skb_shinfo(skb)->frags[f];
- len = frag->size;
+ len = skb_frag_size(frag);
buffer_info = &tx_ring->buffer_info[i];
BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD);
@@ -2525,9 +2525,11 @@ static void igbvf_print_device_info(struct igbvf_adapter *adapter)
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
- dev_info(&pdev->dev, "Intel(R) 82576 Virtual Function\n");
+ if (hw->mac.type == e1000_vfadapt_i350)
+ dev_info(&pdev->dev, "Intel(R) I350 Virtual Function\n");
+ else
+ dev_info(&pdev->dev, "Intel(R) 82576 Virtual Function\n");
dev_info(&pdev->dev, "Address: %pM\n", netdev->dev_addr);
- dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type);
}
static int igbvf_set_features(struct net_device *netdev, u32 features)
@@ -2864,7 +2866,7 @@ module_exit(igbvf_exit_module);
MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
-MODULE_DESCRIPTION("Intel(R) 82576 Virtual Function Network Driver");
+MODULE_DESCRIPTION("Intel(R) Gigabit Virtual Function Network Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 88558b1aac07..e21148f8b160 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -1383,10 +1383,10 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
}
for (f = 0; f < nr_frags; f++) {
- struct skb_frag_struct *frag;
+ const struct skb_frag_struct *frag;
frag = &skb_shinfo(skb)->frags[f];
- len = frag->size;
+ len = skb_frag_size(frag);
offset = 0;
while (len) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 6c4d693be08d..a8368d5cf686 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -497,7 +497,8 @@ struct ixgbe_adapter {
u64 rsc_total_count;
u64 rsc_total_flush;
u32 wol;
- u16 eeprom_version;
+ u16 eeprom_verh;
+ u16 eeprom_verl;
u16 eeprom_cap;
int node;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index e02e911057de..ef2afefb0cd4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -1305,6 +1305,8 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
.init_params = &ixgbe_init_eeprom_params_generic,
.read = &ixgbe_read_eerd_generic,
+ .write = &ixgbe_write_eeprom_generic,
+ .write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic,
.read_buffer = &ixgbe_read_eerd_buffer_generic,
.calc_checksum = &ixgbe_calc_eeprom_checksum_generic,
.validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 35fa444556b3..834f044be4c3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -3341,7 +3341,7 @@ static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
* Communicates with the manageability block. On success return 0
* else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
**/
-static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u8 *buffer,
+static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
u32 length)
{
u32 hicr, i;
@@ -3374,7 +3374,7 @@ static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u8 *buffer,
*/
for (i = 0; i < dword_len; i++)
IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
- i, *((u32 *)buffer + i));
+ i, cpu_to_le32(buffer[i]));
/* Setting this bit tells the ARC that a new command is pending. */
IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
@@ -3398,9 +3398,10 @@ static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u8 *buffer,
dword_len = hdr_size >> 2;
/* first pull in the header so we know the buffer length */
- for (i = 0; i < dword_len; i++)
- *((u32 *)buffer + i) =
- IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, i);
+ for (i = 0; i < dword_len; i++) {
+ buffer[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, i);
+ le32_to_cpus(&buffer[i]);
+ }
/* If there is any thing in data position pull it in */
buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
@@ -3418,8 +3419,7 @@ static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u8 *buffer,
/* Pull in the rest of the buffer (i is where we left off)*/
for (; i < buf_len; i++)
- *((u32 *)buffer + i) =
- IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, i);
+ buffer[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, i);
out:
return ret_val;
@@ -3465,7 +3465,7 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
fw_cmd.pad2 = 0;
for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
- ret_val = ixgbe_host_interface_command(hw, (u8 *)&fw_cmd,
+ ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
sizeof(fw_cmd));
if (ret_val != 0)
continue;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index e102ff6fb08d..70d58c3849b0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -814,26 +814,97 @@ static int ixgbe_get_eeprom(struct net_device *netdev,
return ret_val;
}
+static int ixgbe_set_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u16 *eeprom_buff;
+ void *ptr;
+ int max_len, first_word, last_word, ret_val = 0;
+ u16 i;
+
+ if (eeprom->len == 0)
+ return -EINVAL;
+
+ if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
+ return -EINVAL;
+
+ max_len = hw->eeprom.word_size * 2;
+
+ first_word = eeprom->offset >> 1;
+ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+ eeprom_buff = kmalloc(max_len, GFP_KERNEL);
+ if (!eeprom_buff)
+ return -ENOMEM;
+
+ ptr = eeprom_buff;
+
+ if (eeprom->offset & 1) {
+ /*
+ * need read/modify/write of first changed EEPROM word
+ * only the second byte of the word is being modified
+ */
+ ret_val = hw->eeprom.ops.read(hw, first_word, &eeprom_buff[0]);
+ if (ret_val)
+ goto err;
+
+ ptr++;
+ }
+ if ((eeprom->offset + eeprom->len) & 1) {
+ /*
+ * need read/modify/write of last changed EEPROM word
+ * only the first byte of the word is being modified
+ */
+ ret_val = hw->eeprom.ops.read(hw, last_word,
+ &eeprom_buff[last_word - first_word]);
+ if (ret_val)
+ goto err;
+ }
+
+ /* Device's eeprom is always little-endian, word addressable */
+ for (i = 0; i < last_word - first_word + 1; i++)
+ le16_to_cpus(&eeprom_buff[i]);
+
+ memcpy(ptr, bytes, eeprom->len);
+
+ for (i = 0; i < last_word - first_word + 1; i++)
+ cpu_to_le16s(&eeprom_buff[i]);
+
+ ret_val = hw->eeprom.ops.write_buffer(hw, first_word,
+ last_word - first_word + 1,
+ eeprom_buff);
+
+ /* Update the checksum */
+ if (ret_val == 0)
+ hw->eeprom.ops.update_checksum(hw);
+
+err:
+ kfree(eeprom_buff);
+ return ret_val;
+}
+
static void ixgbe_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
char firmware_version[32];
+ u32 nvm_track_id;
strncpy(drvinfo->driver, ixgbe_driver_name,
sizeof(drvinfo->driver) - 1);
strncpy(drvinfo->version, ixgbe_driver_version,
sizeof(drvinfo->version) - 1);
- snprintf(firmware_version, sizeof(firmware_version), "%d.%d-%d",
- (adapter->eeprom_version & 0xF000) >> 12,
- (adapter->eeprom_version & 0x0FF0) >> 4,
- adapter->eeprom_version & 0x000F);
+ nvm_track_id = (adapter->eeprom_verh << 16) |
+ adapter->eeprom_verl;
+ snprintf(firmware_version, sizeof(firmware_version), "0x%08x",
+ nvm_track_id);
strncpy(drvinfo->fw_version, firmware_version,
- sizeof(drvinfo->fw_version));
+ sizeof(drvinfo->fw_version) - 1);
strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
- sizeof(drvinfo->bus_info));
+ sizeof(drvinfo->bus_info) - 1);
drvinfo->n_stats = IXGBE_STATS_LEN;
drvinfo->testinfo_len = IXGBE_TEST_LEN;
drvinfo->regdump_len = ixgbe_get_regs_len(netdev);
@@ -2524,6 +2595,7 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_eeprom_len = ixgbe_get_eeprom_len,
.get_eeprom = ixgbe_get_eeprom,
+ .set_eeprom = ixgbe_set_eeprom,
.get_ringparam = ixgbe_get_ringparam,
.set_ringparam = ixgbe_set_ringparam,
.get_pauseparam = ixgbe_get_pauseparam,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index fb7d8842a362..09b8e88b2999 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -6545,9 +6545,9 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
frag = &skb_shinfo(skb)->frags[f];
#ifdef IXGBE_FCOE
- size = min_t(unsigned int, data_len, frag->size);
+ size = min_t(unsigned int, data_len, skb_frag_size(frag));
#else
- size = frag->size;
+ size = skb_frag_size(frag);
#endif
data_len -= size;
f++;
@@ -7640,6 +7640,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
}
device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+ /* save off EEPROM version number */
+ hw->eeprom.ops.read(hw, 0x2e, &adapter->eeprom_verh);
+ hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl);
+
/* pick up the PCI bus settings for reporting later */
hw->mac.ops.get_bus_info(hw);
@@ -7672,9 +7676,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
"is required.\n");
}
- /* save off EEPROM version number */
- hw->eeprom.ops.read(hw, 0x29, &adapter->eeprom_version);
-
/* reset the hardware with the new settings */
err = hw->mac.ops.start_hw(hw);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
index 1ff0eefcfd0a..3f725d48336d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
@@ -38,7 +38,7 @@
* @size: Length of buffer
* @mbx_id: id of mailbox to read
*
- * returns SUCCESS if it successfuly read message from buffer
+ * returns SUCCESS if it successfully read message from buffer
**/
s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
{
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 4930c4605493..5e92cc2079bd 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -2912,10 +2912,10 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
}
for (f = 0; f < nr_frags; f++) {
- struct skb_frag_struct *frag;
+ const struct skb_frag_struct *frag;
frag = &skb_shinfo(skb)->frags[f];
- len = min((unsigned int)frag->size, total);
+ len = min((unsigned int)skb_frag_size(frag), total);
offset = 0;
while (len) {
@@ -3096,7 +3096,7 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
count += TXD_USE_COUNT(skb_headlen(skb));
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
- count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+ count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]));
if (ixgbevf_maybe_stop_tx(netdev, tx_ring, count)) {
adapter->tx_busy++;
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.c b/drivers/net/ethernet/intel/ixgbevf/mbx.c
index 7a8833125770..930fa83f2568 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.c
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.c
@@ -276,7 +276,7 @@ out_no_write:
* @msg: The message buffer
* @size: Length of buffer
*
- * returns 0 if it successfuly read message from buffer
+ * returns 0 if it successfully read message from buffer
**/
static s32 ixgbevf_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size)
{
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 48a0a23f342f..7becff1f387d 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -1920,7 +1920,7 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
u8 hidma = jme->dev->features & NETIF_F_HIGHDMA;
int i, nr_frags = skb_shinfo(skb)->nr_frags;
int mask = jme->tx_ring_mask;
- struct skb_frag_struct *frag;
+ const struct skb_frag_struct *frag;
u32 len;
for (i = 0 ; i < nr_frags ; ++i) {
@@ -1930,7 +1930,7 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi,
skb_frag_page(frag),
- frag->page_offset, frag->size, hidma);
+ frag->page_offset, skb_frag_size(frag), hidma);
}
len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
@@ -3132,6 +3132,9 @@ jme_suspend(struct device *dev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct jme_adapter *jme = netdev_priv(netdev);
+ if (!netif_running(netdev))
+ return 0;
+
atomic_dec(&jme->link_changing);
netif_device_detach(netdev);
@@ -3172,6 +3175,9 @@ jme_resume(struct device *dev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct jme_adapter *jme = netdev_priv(netdev);
+ if (!netif_running(netdev))
+ return 0;
+
jme_clear_pm(jme);
jme_phy_on(jme);
if (test_bit(JME_FLAG_SSET, &jme->flags))
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index f6821aa5ffbf..194a03113802 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -713,8 +713,9 @@ static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
int frag;
for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
- skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
- if (fragp->size <= 8 && fragp->page_offset & 7)
+ const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
+
+ if (skb_frag_size(fragp) <= 8 && fragp->page_offset & 7)
return 1;
}
@@ -751,10 +752,10 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
}
desc->l4i_chk = 0;
- desc->byte_cnt = this_frag->size;
+ desc->byte_cnt = skb_frag_size(this_frag);
desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent,
this_frag, 0,
- this_frag->size,
+ skb_frag_size(this_frag),
DMA_TO_DEVICE);
}
}
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 297730359b79..c7b60839ac99 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -2770,10 +2770,10 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
control |= BMU_STFWD;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
map = skb_frag_dma_map(&hw->pdev->dev, frag, 0,
- frag->size, DMA_TO_DEVICE);
+ skb_frag_size(frag), DMA_TO_DEVICE);
e = e->next;
e->skb = skb;
@@ -2783,9 +2783,9 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
tf->dma_lo = map;
tf->dma_hi = (u64) map >> 32;
dma_unmap_addr_set(e, mapaddr, map);
- dma_unmap_len_set(e, maplen, frag->size);
+ dma_unmap_len_set(e, maplen, skb_frag_size(frag));
- tf->control = BMU_OWN | BMU_SW | control | frag->size;
+ tf->control = BMU_OWN | BMU_SW | control | skb_frag_size(frag);
}
tf->control |= BMU_EOF | BMU_IRQ_EOF;
}
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 92634907bf8d..cbd026f3bc57 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -1225,10 +1225,10 @@ static int sky2_rx_map_skb(struct pci_dev *pdev, struct rx_ring_info *re,
dma_unmap_len_set(re, data_size, size);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
re->frag_addr[i] = skb_frag_dma_map(&pdev->dev, frag, 0,
- frag->size,
+ skb_frag_size(frag),
DMA_FROM_DEVICE);
if (dma_mapping_error(&pdev->dev, re->frag_addr[i]))
@@ -1239,7 +1239,7 @@ static int sky2_rx_map_skb(struct pci_dev *pdev, struct rx_ring_info *re,
map_page_error:
while (--i >= 0) {
pci_unmap_page(pdev, re->frag_addr[i],
- skb_shinfo(skb)->frags[i].size,
+ skb_frag_size(&skb_shinfo(skb)->frags[i]),
PCI_DMA_FROMDEVICE);
}
@@ -1263,7 +1263,7 @@ static void sky2_rx_unmap_skb(struct pci_dev *pdev, struct rx_ring_info *re)
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
pci_unmap_page(pdev, re->frag_addr[i],
- skb_shinfo(skb)->frags[i].size,
+ skb_frag_size(&skb_shinfo(skb)->frags[i]),
PCI_DMA_FROMDEVICE);
}
@@ -1936,7 +1936,7 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
mapping = skb_frag_dma_map(&hw->pdev->dev, frag, 0,
- frag->size, DMA_TO_DEVICE);
+ skb_frag_size(frag), DMA_TO_DEVICE);
if (dma_mapping_error(&hw->pdev->dev, mapping))
goto mapping_unwind;
@@ -1952,11 +1952,11 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
re = sky2->tx_ring + slot;
re->flags = TX_MAP_PAGE;
dma_unmap_addr_set(re, mapaddr, mapping);
- dma_unmap_len_set(re, maplen, frag->size);
+ dma_unmap_len_set(re, maplen, skb_frag_size(frag));
le = get_tx_le(sky2, &slot);
le->addr = cpu_to_le32(lower_32_bits(mapping));
- le->length = cpu_to_le16(frag->size);
+ le->length = cpu_to_le16(skb_frag_size(frag));
le->ctrl = ctrl;
le->opcode = OP_BUFFER | HW_OWNER;
}
@@ -2082,7 +2082,7 @@ static void sky2_hw_down(struct sky2_port *sky2)
sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
- /* Force any delayed status interrrupt and NAPI */
+ /* Force any delayed status interrupt and NAPI */
sky2_write32(hw, STAT_LEV_TIMER_CNT, 0);
sky2_write32(hw, STAT_TX_TIMER_CNT, 0);
sky2_write32(hw, STAT_ISR_TIMER_CNT, 0);
@@ -2484,7 +2484,7 @@ static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
} else {
size = min(length, (unsigned) PAGE_SIZE);
- frag->size = size;
+ skb_frag_size_set(frag, size);
skb->data_len += size;
skb->truesize += PAGE_SIZE;
skb->len += size;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index c4c4be426921..78d776bc355c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1084,7 +1084,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
dev->vlan_features = dev->hw_features;
- dev->hw_features |= NETIF_F_RXCSUM;
+ dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH;
dev->features = dev->hw_features | NETIF_F_HIGHDMA |
NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
NETIF_F_HW_VLAN_FILTER;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 9d275558094a..03c84cd78cde 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -214,15 +214,21 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
stats->rx_packets = 0;
stats->rx_bytes = 0;
+ priv->port_stats.rx_chksum_good = 0;
+ priv->port_stats.rx_chksum_none = 0;
for (i = 0; i < priv->rx_ring_num; i++) {
stats->rx_packets += priv->rx_ring[i].packets;
stats->rx_bytes += priv->rx_ring[i].bytes;
+ priv->port_stats.rx_chksum_good += priv->rx_ring[i].csum_ok;
+ priv->port_stats.rx_chksum_none += priv->rx_ring[i].csum_none;
}
stats->tx_packets = 0;
stats->tx_bytes = 0;
+ priv->port_stats.tx_chksum_offload = 0;
for (i = 0; i < priv->tx_ring_num; i++) {
stats->tx_packets += priv->tx_ring[i].packets;
stats->tx_bytes += priv->tx_ring[i].bytes;
+ priv->port_stats.tx_chksum_offload += priv->tx_ring[i].tx_csum;
}
stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) +
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 37cc9e5c56be..b89c36dbf5b3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -44,7 +44,7 @@
static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
struct mlx4_en_rx_desc *rx_desc,
- struct skb_frag_struct *skb_frags,
+ struct page_frag *skb_frags,
struct mlx4_en_rx_alloc *ring_alloc,
int i)
{
@@ -61,7 +61,7 @@ static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
return -ENOMEM;
skb_frags[i].page = page_alloc->page;
- skb_frags[i].page_offset = page_alloc->offset;
+ skb_frags[i].offset = page_alloc->offset;
page_alloc->page = page;
page_alloc->offset = frag_info->frag_align;
} else {
@@ -69,11 +69,11 @@ static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
get_page(page);
skb_frags[i].page = page;
- skb_frags[i].page_offset = page_alloc->offset;
+ skb_frags[i].offset = page_alloc->offset;
page_alloc->offset += frag_info->frag_stride;
}
dma = pci_map_single(mdev->pdev, page_address(skb_frags[i].page) +
- skb_frags[i].page_offset, frag_info->frag_size,
+ skb_frags[i].offset, frag_info->frag_size,
PCI_DMA_FROMDEVICE);
rx_desc->data[i].addr = cpu_to_be64(dma);
return 0;
@@ -135,7 +135,7 @@ static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv,
/* Set size and memtype fields */
for (i = 0; i < priv->num_frags; i++) {
- skb_frags[i].size = priv->frag_info[i].frag_size;
+ skb_frag_size_set(&skb_frags[i], priv->frag_info[i].frag_size);
rx_desc->data[i].byte_count =
cpu_to_be32(priv->frag_info[i].frag_size);
rx_desc->data[i].lkey = cpu_to_be32(priv->mdev->mr.key);
@@ -157,8 +157,8 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring, int index)
{
struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride);
- struct skb_frag_struct *skb_frags = ring->rx_info +
- (index << priv->log_rx_info);
+ struct page_frag *skb_frags = ring->rx_info +
+ (index << priv->log_rx_info);
int i;
for (i = 0; i < priv->num_frags; i++)
@@ -183,7 +183,7 @@ static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv,
int index)
{
struct mlx4_en_dev *mdev = priv->mdev;
- struct skb_frag_struct *skb_frags;
+ struct page_frag *skb_frags;
struct mlx4_en_rx_desc *rx_desc = ring->buf + (index << ring->log_stride);
dma_addr_t dma;
int nr;
@@ -403,11 +403,12 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
/* Unmap a completed descriptor and free unused pages */
static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_rx_desc *rx_desc,
- struct skb_frag_struct *skb_frags,
- struct skb_frag_struct *skb_frags_rx,
+ struct page_frag *skb_frags,
+ struct sk_buff *skb,
struct mlx4_en_rx_alloc *page_alloc,
int length)
{
+ struct skb_frag_struct *skb_frags_rx = skb_shinfo(skb)->frags;
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_frag_info *frag_info;
int nr;
@@ -420,9 +421,10 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
break;
/* Save page reference in skb */
- skb_frags_rx[nr].page = skb_frags[nr].page;
- skb_frags_rx[nr].size = skb_frags[nr].size;
- skb_frags_rx[nr].page_offset = skb_frags[nr].page_offset;
+ __skb_frag_set_page(&skb_frags_rx[nr], skb_frags[nr].page);
+ skb_frag_size_set(&skb_frags_rx[nr], skb_frags[nr].size);
+ skb_frags_rx[nr].page_offset = skb_frags[nr].offset;
+ skb->truesize += frag_info->frag_stride;
dma = be64_to_cpu(rx_desc->data[nr].addr);
/* Allocate a replacement page */
@@ -430,13 +432,13 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
goto fail;
/* Unmap buffer */
- pci_unmap_single(mdev->pdev, dma, skb_frags_rx[nr].size,
+ pci_unmap_single(mdev->pdev, dma, skb_frag_size(&skb_frags_rx[nr]),
PCI_DMA_FROMDEVICE);
}
/* Adjust size of last fragment to match actual length */
if (nr > 0)
- skb_frags_rx[nr - 1].size = length -
- priv->frag_info[nr - 1].frag_prefix_size;
+ skb_frag_size_set(&skb_frags_rx[nr - 1],
+ length - priv->frag_info[nr - 1].frag_prefix_size);
return nr;
fail:
@@ -444,7 +446,7 @@ fail:
* the descriptor) of this packet; remaining fragments are reused... */
while (nr > 0) {
nr--;
- put_page(skb_frags_rx[nr].page);
+ __skb_frag_unref(&skb_frags_rx[nr]);
}
return 0;
}
@@ -452,7 +454,7 @@ fail:
static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
struct mlx4_en_rx_desc *rx_desc,
- struct skb_frag_struct *skb_frags,
+ struct page_frag *skb_frags,
struct mlx4_en_rx_alloc *page_alloc,
unsigned int length)
{
@@ -470,11 +472,10 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
skb->dev = priv->dev;
skb_reserve(skb, NET_IP_ALIGN);
skb->len = length;
- skb->truesize = length + sizeof(struct sk_buff);
/* Get pointer to first fragment so we could copy the headers into the
* (linear part of the) skb */
- va = page_address(skb_frags[0].page) + skb_frags[0].page_offset;
+ va = page_address(skb_frags[0].page) + skb_frags[0].offset;
if (length <= SMALL_PACKET_SIZE) {
/* We are copying all relevant data to the skb - temporarily
@@ -490,8 +491,7 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
/* Move relevant fragments to skb */
used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags,
- skb_shinfo(skb)->frags,
- page_alloc, length);
+ skb, page_alloc, length);
if (unlikely(!used_frags)) {
kfree_skb(skb);
return NULL;
@@ -506,7 +506,7 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
skb_shinfo(skb)->frags[0].page_offset += HEADER_COPY_SIZE;
/* Adjust size of first fragment */
- skb_shinfo(skb)->frags[0].size -= HEADER_COPY_SIZE;
+ skb_frag_size_sub(&skb_shinfo(skb)->frags[0], HEADER_COPY_SIZE);
skb->data_len = length - HEADER_COPY_SIZE;
}
return skb;
@@ -533,7 +533,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_cqe *cqe;
struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
- struct skb_frag_struct *skb_frags;
+ struct page_frag *skb_frags;
struct mlx4_en_rx_desc *rx_desc;
struct sk_buff *skb;
int index;
@@ -587,7 +587,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
if (likely(dev->features & NETIF_F_RXCSUM)) {
if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
(cqe->checksum == cpu_to_be16(0xffff))) {
- priv->port_stats.rx_chksum_good++;
+ ring->csum_ok++;
/* This packet is eligible for LRO if it is:
* - DIX Ethernet (type interpretation)
* - TCP/IP (v4)
@@ -600,7 +600,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
nr = mlx4_en_complete_rx_desc(
priv, rx_desc,
- skb_frags, skb_shinfo(gro_skb)->frags,
+ skb_frags, gro_skb,
ring->page_alloc, length);
if (!nr)
goto next;
@@ -608,7 +608,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
skb_shinfo(gro_skb)->nr_frags = nr;
gro_skb->len = length;
gro_skb->data_len = length;
- gro_skb->truesize += length;
gro_skb->ip_summed = CHECKSUM_UNNECESSARY;
if (cqe->vlan_my_qpn &
@@ -618,6 +617,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
__vlan_hwaccel_put_tag(gro_skb, vid);
}
+ if (dev->features & NETIF_F_RXHASH)
+ gro_skb->rxhash = be32_to_cpu(cqe->immed_rss_invalid);
+
+ skb_record_rx_queue(gro_skb, cq->ring);
napi_gro_frags(&cq->napi);
goto next;
@@ -627,11 +630,11 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
ip_summed = CHECKSUM_UNNECESSARY;
} else {
ip_summed = CHECKSUM_NONE;
- priv->port_stats.rx_chksum_none++;
+ ring->csum_none++;
}
} else {
ip_summed = CHECKSUM_NONE;
- priv->port_stats.rx_chksum_none++;
+ ring->csum_none++;
}
skb = mlx4_en_rx_skb(priv, rx_desc, skb_frags,
@@ -650,6 +653,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
skb->protocol = eth_type_trans(skb, dev);
skb_record_rx_queue(skb, cq->ring);
+ if (dev->features & NETIF_F_RXHASH)
+ skb->rxhash = be32_to_cpu(cqe->immed_rss_invalid);
+
if (be32_to_cpu(cqe->vlan_my_qpn) &
MLX4_CQE_VLAN_PRESENT_MASK)
__vlan_hwaccel_put_tag(skb, be16_to_cpu(cqe->sl_vid));
@@ -806,6 +812,10 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
qpn, ring->cqn, context);
context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma);
+ /* Cancel FCS removal if FW allows */
+ if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)
+ context->param3 |= cpu_to_be32(1 << 29);
+
err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state);
if (err) {
mlx4_qp_remove(mdev->dev, qp);
@@ -829,6 +839,9 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
int i, qpn;
int err = 0;
int good_qps = 0;
+ static const u32 rsskey[10] = { 0xD181C62C, 0xF7F4DB5B, 0x1983A2FC,
+ 0x943E1ADB, 0xD9389E6B, 0xD1039C2C, 0xA74499AD,
+ 0x593D56D9, 0xF3253C06, 0x2ADC1FFC};
en_dbg(DRV, priv, "Configuring rss steering\n");
err = mlx4_qp_reserve_range(mdev->dev, priv->rx_ring_num,
@@ -866,6 +879,9 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
(rss_map->base_qpn));
rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn);
rss_context->flags = rss_mask;
+ rss_context->hash_fn = 1;
+ for (i = 0; i < 10; i++)
+ rss_context->rss_key[i] = rsskey[i];
if (priv->mdev->profile.udp_rss)
rss_context->base_qpn_udp = rss_context->default_qpn;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 6e03de034ac7..90f2cd24faac 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -172,7 +172,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
memset(ring->buf, 0, ring->buf_size);
ring->qp_state = MLX4_QP_STATE_RST;
- ring->doorbell_qpn = swab32(ring->qp.qpn << 8);
+ ring->doorbell_qpn = ring->qp.qpn << 8;
mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
ring->cqn, &ring->context);
@@ -226,7 +226,7 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
frag = &skb_shinfo(skb)->frags[i];
pci_unmap_page(mdev->pdev,
(dma_addr_t) be64_to_cpu(data[i].addr),
- frag->size, PCI_DMA_TODEVICE);
+ skb_frag_size(frag), PCI_DMA_TODEVICE);
}
}
/* Stamp the freed descriptor */
@@ -256,7 +256,7 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
frag = &skb_shinfo(skb)->frags[i];
pci_unmap_page(mdev->pdev,
(dma_addr_t) be64_to_cpu(data->addr),
- frag->size, PCI_DMA_TODEVICE);
+ skb_frag_size(frag), PCI_DMA_TODEVICE);
++data;
}
}
@@ -460,26 +460,13 @@ static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
}
}
-static void *get_frag_ptr(struct sk_buff *skb)
-{
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
- struct page *page = frag->page;
- void *ptr;
-
- ptr = page_address(page);
- if (unlikely(!ptr))
- return NULL;
-
- return ptr + frag->page_offset;
-}
-
static int is_inline(struct sk_buff *skb, void **pfrag)
{
void *ptr;
if (inline_thold && !skb_is_gso(skb) && skb->len <= inline_thold) {
if (skb_shinfo(skb)->nr_frags == 1) {
- ptr = get_frag_ptr(skb);
+ ptr = skb_frag_address_safe(&skb_shinfo(skb)->frags[0]);
if (unlikely(!ptr))
return 0;
@@ -550,7 +537,7 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb));
if (skb_shinfo(skb)->nr_frags)
memcpy(((void *)(inl + 1)) + skb_headlen(skb), fragptr,
- skb_shinfo(skb)->frags[0].size);
+ skb_frag_size(&skb_shinfo(skb)->frags[0]));
} else {
inl->byte_count = cpu_to_be32(1 << 31 | spc);
@@ -570,7 +557,7 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
skb_headlen(skb) - spc);
if (skb_shinfo(skb)->nr_frags)
memcpy(((void *)(inl + 1)) + skb_headlen(skb) - spc,
- fragptr, skb_shinfo(skb)->frags[0].size);
+ fragptr, skb_frag_size(&skb_shinfo(skb)->frags[0]));
}
wmb();
@@ -695,7 +682,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
MLX4_WQE_CTRL_TCP_UDP_CSUM);
- priv->port_stats.tx_chksum_offload++;
+ ring->tx_csum++;
}
if (unlikely(priv->validate_loopback)) {
@@ -756,12 +743,13 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
/* Map fragments */
for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) {
frag = &skb_shinfo(skb)->frags[i];
- dma = pci_map_page(mdev->dev->pdev, frag->page, frag->page_offset,
- frag->size, PCI_DMA_TODEVICE);
+ dma = skb_frag_dma_map(&mdev->dev->pdev->dev, frag,
+ 0, skb_frag_size(frag),
+ DMA_TO_DEVICE);
data->addr = cpu_to_be64(dma);
data->lkey = cpu_to_be32(mdev->mr.key);
wmb();
- data->byte_count = cpu_to_be32(frag->size);
+ data->byte_count = cpu_to_be32(skb_frag_size(frag));
--data;
}
@@ -791,7 +779,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
skb_orphan(skb);
if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tag) {
- *(u32 *) (&tx_desc->ctrl.vlan_tag) |= ring->doorbell_qpn;
+ *(__be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn);
op_own |= htonl((bf_index & 0xffff) << 8);
/* Ensure new descirptor hits memory
* before setting ownership of this descriptor to HW */
@@ -812,7 +800,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
wmb();
tx_desc->ctrl.owner_opcode = op_own;
wmb();
- writel(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL);
+ iowrite32be(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL);
}
/* Poll CQ here */
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 7eb8ba822e97..ed452ddfe342 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -101,6 +101,7 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
[25] = "Router support",
[30] = "IBoE support",
[32] = "Unicast loopback support",
+ [34] = "FCS header control",
[38] = "Wake On LAN support",
[40] = "UDP RSS support",
[41] = "Unicast VEP steering support",
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 3b753f7b866a..fca66165110e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -51,8 +51,8 @@
#include "en_port.h"
#define DRV_NAME "mlx4_en"
-#define DRV_VERSION "1.5.4.1"
-#define DRV_RELDATE "March 2011"
+#define DRV_VERSION "1.5.4.2"
+#define DRV_RELDATE "October 2011"
#define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
@@ -249,6 +249,7 @@ struct mlx4_en_tx_ring {
struct mlx4_srq dummy;
unsigned long bytes;
unsigned long packets;
+ unsigned long tx_csum;
spinlock_t comp_lock;
struct mlx4_bf bf;
bool bf_enabled;
@@ -275,6 +276,8 @@ struct mlx4_en_rx_ring {
void *rx_info;
unsigned long bytes;
unsigned long packets;
+ unsigned long csum_ok;
+ unsigned long csum_none;
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 609e0ec14cee..163a314c148f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -65,7 +65,7 @@ void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
table->entries[i] = 0;
table->refs[i] = 0;
}
- table->max = 1 << dev->caps.log_num_vlans;
+ table->max = (1 << dev->caps.log_num_vlans) - MLX4_VLAN_REGULAR;
table->total = 0;
}
@@ -354,6 +354,13 @@ int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
int free = -1;
mutex_lock(&table->mutex);
+
+ if (table->total == table->max) {
+ /* No free vlan entries */
+ err = -ENOSPC;
+ goto out;
+ }
+
for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) {
if (free < 0 && (table->refs[i] == 0)) {
free = i;
@@ -375,12 +382,6 @@ int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
goto out;
}
- if (table->total == table->max) {
- /* No free vlan entries */
- err = -ENOSPC;
- goto out;
- }
-
/* Register new MAC */
table->refs[free] = 1;
table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID);
diff --git a/drivers/net/ethernet/micrel/ks8695net.c b/drivers/net/ethernet/micrel/ks8695net.c
index 70788401d699..ab81c0dc96e2 100644
--- a/drivers/net/ethernet/micrel/ks8695net.c
+++ b/drivers/net/ethernet/micrel/ks8695net.c
@@ -414,7 +414,7 @@ ks8695_tx_irq(int irq, void *dev_id)
* Interrupt Status Register (Offset 0xF208)
* Bit29: WAN MAC Receive Status
* Bit16: LAN MAC Receive Status
- * So, this Rx interrrupt enable/status bit number is equal
+ * So, this Rx interrupt enable/status bit number is equal
* as Rx IRQ number.
*/
static inline u32 ks8695_get_rx_enable_bit(struct ks8695_priv *ksp)
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 710c4aead146..7ece990381c8 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -4700,7 +4700,7 @@ static void send_packet(struct sk_buff *skb, struct net_device *dev)
++hw->tx_int_cnt;
dma_buf = DMA_BUFFER(desc);
- dma_buf->len = this_frag->size;
+ dma_buf->len = skb_frag_size(this_frag);
dma_buf->dma = pci_map_single(
hw_priv->pdev,
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 26637279cd67..0778edcf7b9a 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -1210,13 +1210,12 @@ myri10ge_rx_skb_build(struct sk_buff *skb, u8 * va,
struct skb_frag_struct *skb_frags;
skb->len = skb->data_len = len;
- skb->truesize = len + sizeof(struct sk_buff);
/* attach the page(s) */
skb_frags = skb_shinfo(skb)->frags;
while (len > 0) {
memcpy(skb_frags, rx_frags, sizeof(*skb_frags));
- len -= rx_frags->size;
+ len -= skb_frag_size(rx_frags);
skb_frags++;
rx_frags++;
skb_shinfo(skb)->nr_frags++;
@@ -1228,7 +1227,7 @@ myri10ge_rx_skb_build(struct sk_buff *skb, u8 * va,
* manually */
skb_copy_to_linear_data(skb, va, hlen);
skb_shinfo(skb)->frags[0].page_offset += hlen;
- skb_shinfo(skb)->frags[0].size -= hlen;
+ skb_frag_size_sub(&skb_shinfo(skb)->frags[0], hlen);
skb->data_len -= hlen;
skb->tail += hlen;
skb_pull(skb, MXGEFW_PAD);
@@ -1345,9 +1344,9 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum,
__skb_frag_set_page(&rx_frags[i], rx->info[idx].page);
rx_frags[i].page_offset = rx->info[idx].page_offset;
if (remainder < MYRI10GE_ALLOC_SIZE)
- rx_frags[i].size = remainder;
+ skb_frag_size_set(&rx_frags[i], remainder);
else
- rx_frags[i].size = MYRI10GE_ALLOC_SIZE;
+ skb_frag_size_set(&rx_frags[i], MYRI10GE_ALLOC_SIZE);
rx->cnt++;
idx = rx->cnt & rx->mask;
remainder -= MYRI10GE_ALLOC_SIZE;
@@ -1355,7 +1354,7 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum,
if (lro_enabled) {
rx_frags[0].page_offset += MXGEFW_PAD;
- rx_frags[0].size -= MXGEFW_PAD;
+ skb_frag_size_sub(&rx_frags[0], MXGEFW_PAD);
len -= MXGEFW_PAD;
lro_receive_frags(&ss->rx_done.lro_mgr, rx_frags,
/* opaque, will come back in get_frag_header */
@@ -1382,9 +1381,11 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum,
/* Attach the pages to the skb, and trim off any padding */
myri10ge_rx_skb_build(skb, va, rx_frags, len, hlen);
- if (skb_shinfo(skb)->frags[0].size <= 0) {
+ if (skb_frag_size(&skb_shinfo(skb)->frags[0]) <= 0) {
skb_frag_unref(skb, 0);
skb_shinfo(skb)->nr_frags = 0;
+ } else {
+ skb->truesize += bytes * skb_shinfo(skb)->nr_frags;
}
skb->protocol = eth_type_trans(skb, dev);
skb_record_rx_queue(skb, ss - &mgp->ss[0]);
@@ -2926,7 +2927,7 @@ again:
idx = (count + tx->req) & tx->mask;
frag = &skb_shinfo(skb)->frags[frag_idx];
frag_idx++;
- len = frag->size;
+ len = skb_frag_size(frag);
bus = skb_frag_dma_map(&mgp->pdev->dev, frag, 0, len,
DMA_TO_DEVICE);
dma_unmap_addr_set(&tx->info[idx], bus, bus);
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index 73616b911327..2b8f64ddfb55 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -1161,11 +1161,11 @@ again:
break;
buf = skb_frag_dma_map(&dev->pci_dev->dev, frag, 0,
- frag->size, DMA_TO_DEVICE);
+ skb_frag_size(frag), DMA_TO_DEVICE);
dprintk("frag: buf=%08Lx page=%08lx offset=%08lx\n",
(long long)buf, (long) page_to_pfn(frag->page),
frag->page_offset);
- len = frag->size;
+ len = skb_frag_size(frag);
frag++;
nr_frags--;
}
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index bdd3e6a330cd..c27fb3dda9f4 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -2350,12 +2350,12 @@ static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data,
if (frg_cnt) {
txds++;
for (j = 0; j < frg_cnt; j++, txds++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
if (!txds->Buffer_Pointer)
break;
pci_unmap_page(nic->pdev,
(dma_addr_t)txds->Buffer_Pointer,
- frag->size, PCI_DMA_TODEVICE);
+ skb_frag_size(frag), PCI_DMA_TODEVICE);
}
}
memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
@@ -4185,16 +4185,16 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
frg_cnt = skb_shinfo(skb)->nr_frags;
/* For fragmented SKB. */
for (i = 0; i < frg_cnt; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
/* A '0' length fragment will be ignored */
- if (!frag->size)
+ if (!skb_frag_size(frag))
continue;
txdp++;
txdp->Buffer_Pointer = (u64)skb_frag_dma_map(&sp->pdev->dev,
frag, 0,
- frag->size,
+ skb_frag_size(frag),
DMA_TO_DEVICE);
- txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
+ txdp->Control_1 = TXD_BUFFER0_SIZE(skb_frag_size(frag));
if (offload_type == SKB_GSO_UDP)
txdp->Control_1 |= TXD_UFO_EN;
}
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index a66f8fc0401e..671e166b5af1 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -585,7 +585,7 @@ vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
for (j = 0; j < frg_cnt; j++) {
pci_unmap_page(fifo->pdev,
txd_priv->dma_buffers[i++],
- frag->size, PCI_DMA_TODEVICE);
+ skb_frag_size(frag), PCI_DMA_TODEVICE);
frag += 1;
}
@@ -920,11 +920,11 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
frag = &skb_shinfo(skb)->frags[0];
for (i = 0; i < frg_cnt; i++) {
/* ignore 0 length fragment */
- if (!frag->size)
+ if (!skb_frag_size(frag))
continue;
dma_pointer = (u64)skb_frag_dma_map(&fifo->pdev->dev, frag,
- 0, frag->size,
+ 0, skb_frag_size(frag),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(&fifo->pdev->dev, dma_pointer)))
@@ -936,7 +936,7 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
txdl_priv->dma_buffers[j] = dma_pointer;
vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer,
- frag->size);
+ skb_frag_size(frag));
frag += 1;
}
@@ -979,7 +979,7 @@ _exit1:
for (; j < i; j++) {
pci_unmap_page(fifo->pdev, txdl_priv->dma_buffers[j],
- frag->size, PCI_DMA_TODEVICE);
+ skb_frag_size(frag), PCI_DMA_TODEVICE);
frag += 1;
}
@@ -1050,7 +1050,7 @@ vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata)
for (j = 0; j < frg_cnt; j++) {
pci_unmap_page(fifo->pdev, txd_priv->dma_buffers[i++],
- frag->size, PCI_DMA_TODEVICE);
+ skb_frag_size(frag), PCI_DMA_TODEVICE);
frag += 1;
}
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index d7763ab841d8..1e37eb98c4e2 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -2099,8 +2099,10 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* add fragments to entries count */
for (i = 0; i < fragments; i++) {
- entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
- ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
+ u32 size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
+
+ entries += (size >> NV_TX2_TSO_MAX_SHIFT) +
+ ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
}
spin_lock_irqsave(&np->lock, flags);
@@ -2138,8 +2140,8 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* setup the fragments */
for (i = 0; i < fragments; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- u32 size = frag->size;
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ u32 size = skb_frag_size(frag);
offset = 0;
do {
@@ -2211,8 +2213,10 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
/* add fragments to entries count */
for (i = 0; i < fragments; i++) {
- entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
- ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
+ u32 size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
+
+ entries += (size >> NV_TX2_TSO_MAX_SHIFT) +
+ ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
}
spin_lock_irqsave(&np->lock, flags);
@@ -2253,7 +2257,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
/* setup the fragments */
for (i = 0; i < fragments; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- u32 size = frag->size;
+ u32 size = skb_frag_size(frag);
offset = 0;
do {
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index c6f005684677..49b549ff2c78 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -300,9 +300,9 @@ static int pasemi_mac_unmap_tx_skb(struct pasemi_mac *mac,
pci_unmap_single(pdev, dmas[0], skb_headlen(skb), PCI_DMA_TODEVICE);
for (f = 0; f < nfrags; f++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
- pci_unmap_page(pdev, dmas[f+1], frag->size, PCI_DMA_TODEVICE);
+ pci_unmap_page(pdev, dmas[f+1], skb_frag_size(frag), PCI_DMA_TODEVICE);
}
dev_kfree_skb_irq(skb);
@@ -1506,8 +1506,8 @@ static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
map[i + 1] = skb_frag_dma_map(&mac->dma_pdev->dev, frag, 0,
- frag->size, DMA_TO_DEVICE);
- map_size[i+1] = frag->size;
+ skb_frag_size(frag), DMA_TO_DEVICE);
+ map_size[i+1] = skb_frag_size(frag);
if (dma_mapping_error(&mac->dma_pdev->dev, map[i + 1])) {
nfrags = i;
goto out_err_nolock;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index e2ba78be1c2a..8cf3173ba488 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -1905,13 +1905,13 @@ netxen_map_tx_skb(struct pci_dev *pdev,
frag = &skb_shinfo(skb)->frags[i];
nf = &pbuf->frag_array[i+1];
- map = skb_frag_dma_map(&pdev->dev, frag, 0, frag->size,
+ map = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
DMA_TO_DEVICE);
if (dma_mapping_error(&pdev->dev, map))
goto unwind;
nf->dma = map;
- nf->length = frag->size;
+ nf->length = skb_frag_size(frag);
}
return 0;
@@ -1962,7 +1962,7 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
for (i = 0; i < (frag_count - NETXEN_MAX_FRAGS_PER_TX); i++) {
frag = &skb_shinfo(skb)->frags[i];
- delta += frag->size;
+ delta += skb_frag_size(frag);
}
if (!__pskb_pull_tail(skb, delta))
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 46f9b6499f9b..a4bdff438a5e 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -2388,7 +2388,7 @@ static int ql_send_map(struct ql3_adapter *qdev,
seg++;
}
- map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, frag->size,
+ map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
DMA_TO_DEVICE);
err = dma_mapping_error(&qdev->pdev->dev, map);
@@ -2401,9 +2401,9 @@ static int ql_send_map(struct ql3_adapter *qdev,
oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
- oal_entry->len = cpu_to_le32(frag->size);
+ oal_entry->len = cpu_to_le32(skb_frag_size(frag));
dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
- dma_unmap_len_set(&tx_cb->map[seg], maplen, frag->size);
+ dma_unmap_len_set(&tx_cb->map[seg], maplen, skb_frag_size(frag));
}
/* Terminate the last segment. */
oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index eac19e7d2761..106503f118f6 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -2135,13 +2135,13 @@ qlcnic_map_tx_skb(struct pci_dev *pdev,
frag = &skb_shinfo(skb)->frags[i];
nf = &pbuf->frag_array[i+1];
- map = skb_frag_dma_map(&pdev->dev, frag, 0, frag->size,
+ map = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
DMA_TO_DEVICE);
if (dma_mapping_error(&pdev->dev, map))
goto unwind;
nf->dma = map;
- nf->length = frag->size;
+ nf->length = skb_frag_size(frag);
}
return 0;
@@ -2221,7 +2221,7 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) {
for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++)
- delta += skb_shinfo(skb)->frags[i].size;
+ delta += skb_frag_size(&skb_shinfo(skb)->frags[i]);
if (!__pskb_pull_tail(skb, delta))
goto drop_packet;
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index f2d9bb78ec7f..c92afcd912e2 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -1431,7 +1431,7 @@ static int ql_map_send(struct ql_adapter *qdev,
map_idx++;
}
- map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, frag->size,
+ map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
DMA_TO_DEVICE);
err = dma_mapping_error(&qdev->pdev->dev, map);
@@ -1443,10 +1443,10 @@ static int ql_map_send(struct ql_adapter *qdev,
}
tbd->addr = cpu_to_le64(map);
- tbd->len = cpu_to_le32(frag->size);
+ tbd->len = cpu_to_le32(skb_frag_size(frag));
dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
- frag->size);
+ skb_frag_size(frag));
}
/* Save the number of segments we've mapped. */
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 5dcd5be03f31..ee5da9293ce0 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -777,12 +777,12 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
entry = NEXT_TX(entry);
for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
- skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
+ const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
u32 len;
u32 ctrl;
dma_addr_t mapping;
- len = this_frag->size;
+ len = skb_frag_size(this_frag);
mapping = dma_map_single(&cp->pdev->dev,
skb_frag_address(this_frag),
len, PCI_DMA_TODEVICE);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 2ce60709a455..92b45f08858f 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -2937,7 +2937,7 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x1f, 0x0004);
rtl_writephy(tp, 0x1f, 0x0007);
rtl_writephy(tp, 0x1e, 0x0020);
- rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
+ rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100);
rtl_writephy(tp, 0x1f, 0x0002);
rtl_writephy(tp, 0x1f, 0x0000);
rtl_writephy(tp, 0x0d, 0x0007);
@@ -3491,6 +3491,37 @@ static void __devinit rtl_init_mdio_ops(struct rtl8169_private *tp)
}
}
+static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_29:
+ case RTL_GIGA_MAC_VER_30:
+ case RTL_GIGA_MAC_VER_32:
+ case RTL_GIGA_MAC_VER_33:
+ case RTL_GIGA_MAC_VER_34:
+ RTL_W32(RxConfig, RTL_R32(RxConfig) |
+ AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
+ break;
+ default:
+ break;
+ }
+}
+
+static bool rtl_wol_pll_power_down(struct rtl8169_private *tp)
+{
+ if (!(__rtl8169_get_wol(tp) & WAKE_ANY))
+ return false;
+
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_writephy(tp, MII_BMCR, 0x0000);
+
+ rtl_wol_suspend_quirk(tp);
+
+ return true;
+}
+
static void r810x_phy_power_down(struct rtl8169_private *tp)
{
rtl_writephy(tp, 0x1f, 0x0000);
@@ -3505,18 +3536,8 @@ static void r810x_phy_power_up(struct rtl8169_private *tp)
static void r810x_pll_power_down(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
- if (__rtl8169_get_wol(tp) & WAKE_ANY) {
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_writephy(tp, MII_BMCR, 0x0000);
-
- if (tp->mac_version == RTL_GIGA_MAC_VER_29 ||
- tp->mac_version == RTL_GIGA_MAC_VER_30)
- RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast |
- AcceptMulticast | AcceptMyPhys);
+ if (rtl_wol_pll_power_down(tp))
return;
- }
r810x_phy_power_down(tp);
}
@@ -3605,17 +3626,8 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
tp->mac_version == RTL_GIGA_MAC_VER_33)
rtl_ephy_write(ioaddr, 0x19, 0xff64);
- if (__rtl8169_get_wol(tp) & WAKE_ANY) {
- rtl_writephy(tp, 0x1f, 0x0000);
- rtl_writephy(tp, MII_BMCR, 0x0000);
-
- if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
- tp->mac_version == RTL_GIGA_MAC_VER_33 ||
- tp->mac_version == RTL_GIGA_MAC_VER_34)
- RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast |
- AcceptMulticast | AcceptMyPhys);
+ if (rtl_wol_pll_power_down(tp))
return;
- }
r8168_phy_power_down(tp);
@@ -5413,7 +5425,7 @@ static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
entry = tp->cur_tx;
for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
- skb_frag_t *frag = info->frags + cur_frag;
+ const skb_frag_t *frag = info->frags + cur_frag;
dma_addr_t mapping;
u32 status, len;
void *addr;
@@ -5421,7 +5433,7 @@ static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
entry = (entry + 1) % NUM_TX_DESC;
txd = tp->TxDescArray + entry;
- len = frag->size;
+ len = skb_frag_size(frag);
addr = skb_frag_address(frag);
mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(d, mapping))) {
@@ -6161,11 +6173,30 @@ static const struct dev_pm_ops rtl8169_pm_ops = {
#endif /* !CONFIG_PM */
+static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ /* WoL fails with 8168b when the receiver is disabled. */
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_11:
+ case RTL_GIGA_MAC_VER_12:
+ case RTL_GIGA_MAC_VER_17:
+ pci_clear_master(tp->pci_dev);
+
+ RTL_W8(ChipCmd, CmdRxEnb);
+ /* PCI commit */
+ RTL_R8(ChipCmd);
+ break;
+ default:
+ break;
+ }
+}
+
static void rtl_shutdown(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct rtl8169_private *tp = netdev_priv(dev);
- void __iomem *ioaddr = tp->mmio_addr;
rtl8169_net_suspend(dev);
@@ -6179,16 +6210,9 @@ static void rtl_shutdown(struct pci_dev *pdev)
spin_unlock_irq(&tp->lock);
if (system_state == SYSTEM_POWER_OFF) {
- /* WoL fails with 8168b when the receiver is disabled. */
- if ((tp->mac_version == RTL_GIGA_MAC_VER_11 ||
- tp->mac_version == RTL_GIGA_MAC_VER_12 ||
- tp->mac_version == RTL_GIGA_MAC_VER_17) &&
- (tp->features & RTL_FEATURE_WOL)) {
- pci_clear_master(pdev);
-
- RTL_W8(ChipCmd, CmdRxEnb);
- /* PCI commit */
- RTL_R8(ChipCmd);
+ if (__rtl8169_get_wol(tp) & WAKE_ANY) {
+ rtl_wol_suspend_quirk(tp);
+ rtl_wol_shutdown_quirk(tp);
}
pci_wake_from_d3(pdev, true);
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 91a6b7123539..adbda182f159 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -481,7 +481,7 @@ static void efx_rx_packet_gro(struct efx_channel *channel,
skb_frag_set_page(skb, 0, page);
skb_shinfo(skb)->frags[0].page_offset =
efx_rx_buf_offset(efx, rx_buf);
- skb_shinfo(skb)->frags[0].size = rx_buf->len;
+ skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx_buf->len);
skb_shinfo(skb)->nr_frags = 1;
skb->len = rx_buf->len;
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 3964a62dde8b..df88c5430f95 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -238,7 +238,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
if (i >= skb_shinfo(skb)->nr_frags)
break;
fragment = &skb_shinfo(skb)->frags[i];
- len = fragment->size;
+ len = skb_frag_size(fragment);
i++;
/* Map for DMA */
unmap_single = false;
@@ -926,11 +926,11 @@ static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
skb_frag_t *frag)
{
st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0,
- frag->size, DMA_TO_DEVICE);
+ skb_frag_size(frag), DMA_TO_DEVICE);
if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
st->unmap_single = false;
- st->unmap_len = frag->size;
- st->in_len = frag->size;
+ st->unmap_len = skb_frag_size(frag);
+ st->in_len = skb_frag_size(frag);
st->dma_addr = st->unmap_addr;
return 0;
}
diff --git a/drivers/net/ethernet/smsc/Kconfig b/drivers/net/ethernet/smsc/Kconfig
index 1854c88dfb92..5a689af516e9 100644
--- a/drivers/net/ethernet/smsc/Kconfig
+++ b/drivers/net/ethernet/smsc/Kconfig
@@ -106,8 +106,7 @@ config SMSC911X
Say Y here if you want support for SMSC LAN911x and LAN921x families
of ethernet controllers.
- To compile this driver as a module, choose M here and read
- <file:Documentation/networking/net-modules.txt>. The module
+ To compile this driver as a module, choose M here. The module
will be called smsc911x.
config SMSC911X_ARCH_HOOKS
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index a3aa4c0e87f3..d2be42aafbef 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -26,6 +26,7 @@
* LAN9215, LAN9216, LAN9217, LAN9218
* LAN9210, LAN9211
* LAN9220, LAN9221
+ * LAN89218
*
*/
@@ -1987,6 +1988,7 @@ static int __devinit smsc911x_init(struct net_device *dev)
case 0x01170000:
case 0x01160000:
case 0x01150000:
+ case 0x218A0000:
/* LAN911[5678] family */
pdata->generation = pdata->idrev & 0x0000FFFF;
break;
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 8cd9ddec05a0..ac6f190743dd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -63,4 +63,22 @@ config STMMAC_RTC_TIMER
endchoice
+choice
+ prompt "Select the DMA TX/RX descriptor operating modes"
+ depends on STMMAC_ETH
+ ---help---
+ This driver supports DMA descriptor to operate both in dual buffer
+ (RING) and linked-list(CHAINED) mode. In RING mode each descriptor
+ points to two data buffer pointers whereas in CHAINED mode they
+ points to only one data buffer pointer.
+
+config STMMAC_RING
+ bool "Enable Descriptor Ring Mode"
+
+config STMMAC_CHAINED
+ bool "Enable Descriptor Chained Mode"
+
+endchoice
+
+
endif
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 0f23d95746b7..d7c45164ea79 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -1,5 +1,7 @@
obj-$(CONFIG_STMMAC_ETH) += stmmac.o
stmmac-$(CONFIG_STMMAC_TIMER) += stmmac_timer.o
+stmmac-$(CONFIG_STMMAC_RING) += ring_mode.o
+stmmac-$(CONFIG_STMMAC_CHAINED) += chain_mode.o
stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o \
dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \
diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
new file mode 100644
index 000000000000..0668659803ed
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
@@ -0,0 +1,137 @@
+/*******************************************************************************
+ Specialised functions for managing Chained mode
+
+ Copyright(C) 2011 STMicroelectronics Ltd
+
+ It defines all the functions used to handle the normal/enhanced
+ descriptors in case of the DMA is configured to work in chained or
+ in ring mode.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include "stmmac.h"
+
+unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
+{
+ struct stmmac_priv *priv = (struct stmmac_priv *) p;
+ unsigned int txsize = priv->dma_tx_size;
+ unsigned int entry = priv->cur_tx % txsize;
+ struct dma_desc *desc = priv->dma_tx + entry;
+ unsigned int nopaged_len = skb_headlen(skb);
+ unsigned int bmax;
+ unsigned int i = 1, len;
+
+ if (priv->plat->enh_desc)
+ bmax = BUF_SIZE_8KiB;
+ else
+ bmax = BUF_SIZE_2KiB;
+
+ len = nopaged_len - bmax;
+
+ desc->des2 = dma_map_single(priv->device, skb->data,
+ bmax, DMA_TO_DEVICE);
+ priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum);
+
+ while (len != 0) {
+ entry = (++priv->cur_tx) % txsize;
+ desc = priv->dma_tx + entry;
+
+ if (len > bmax) {
+ desc->des2 = dma_map_single(priv->device,
+ (skb->data + bmax * i),
+ bmax, DMA_TO_DEVICE);
+ priv->hw->desc->prepare_tx_desc(desc, 0, bmax,
+ csum);
+ priv->hw->desc->set_tx_owner(desc);
+ priv->tx_skbuff[entry] = NULL;
+ len -= bmax;
+ i++;
+ } else {
+ desc->des2 = dma_map_single(priv->device,
+ (skb->data + bmax * i), len,
+ DMA_TO_DEVICE);
+ priv->hw->desc->prepare_tx_desc(desc, 0, len,
+ csum);
+ priv->hw->desc->set_tx_owner(desc);
+ priv->tx_skbuff[entry] = NULL;
+ len = 0;
+ }
+ }
+ return entry;
+}
+
+static unsigned int stmmac_is_jumbo_frm(int len, int enh_desc)
+{
+ unsigned int ret = 0;
+
+ if ((enh_desc && (len > BUF_SIZE_8KiB)) ||
+ (!enh_desc && (len > BUF_SIZE_2KiB))) {
+ ret = 1;
+ }
+
+ return ret;
+}
+
+static void stmmac_refill_desc3(int bfsize, struct dma_desc *p)
+{
+}
+
+static void stmmac_init_desc3(int des3_as_data_buf, struct dma_desc *p)
+{
+}
+
+static void stmmac_clean_desc3(struct dma_desc *p)
+{
+}
+
+static void stmmac_init_dma_chain(struct dma_desc *des, dma_addr_t phy_addr,
+ unsigned int size)
+{
+ /*
+ * In chained mode the des3 points to the next element in the ring.
+ * The latest element has to point to the head.
+ */
+ int i;
+ struct dma_desc *p = des;
+ dma_addr_t dma_phy = phy_addr;
+
+ for (i = 0; i < (size - 1); i++) {
+ dma_phy += sizeof(struct dma_desc);
+ p->des3 = (unsigned int)dma_phy;
+ p++;
+ }
+ p->des3 = (unsigned int)phy_addr;
+}
+
+static int stmmac_set_16kib_bfsize(int mtu)
+{
+ /* Not supported */
+ return 0;
+}
+
+const struct stmmac_ring_mode_ops ring_mode_ops = {
+ .is_jumbo_frm = stmmac_is_jumbo_frm,
+ .jumbo_frm = stmmac_jumbo_frm,
+ .refill_desc3 = stmmac_refill_desc3,
+ .init_desc3 = stmmac_init_desc3,
+ .init_dma_chain = stmmac_init_dma_chain,
+ .clean_desc3 = stmmac_clean_desc3,
+ .set_16kib_bfsize = stmmac_set_16kib_bfsize,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 22c61b2ebfa3..9100c100d295 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -103,6 +103,36 @@ struct stmmac_extra_stats {
#define SF_DMA_MODE 1 /* DMA STORE-AND-FORWARD Operation Mode */
+/* DAM HW feature register fields */
+#define DMA_HW_FEAT_MIISEL 0x00000001 /* 10/100 Mbps Support */
+#define DMA_HW_FEAT_GMIISEL 0x00000002 /* 1000 Mbps Support */
+#define DMA_HW_FEAT_HDSEL 0x00000004 /* Half-Duplex Support */
+#define DMA_HW_FEAT_EXTHASHEN 0x00000008 /* Expanded DA Hash Filter */
+#define DMA_HW_FEAT_HASHSEL 0x00000010 /* HASH Filter */
+#define DMA_HW_FEAT_ADDMACADRSEL 0x00000020 /* Multiple MAC Addr Reg */
+#define DMA_HW_FEAT_PCSSEL 0x00000040 /* PCS registers */
+#define DMA_HW_FEAT_L3L4FLTREN 0x00000080 /* Layer 3 & Layer 4 Feature */
+#define DMA_HW_FEAT_SMASEL 0x00000100 /* SMA(MDIO) Interface */
+#define DMA_HW_FEAT_RWKSEL 0x00000200 /* PMT Remote Wakeup */
+#define DMA_HW_FEAT_MGKSEL 0x00000400 /* PMT Magic Packet */
+#define DMA_HW_FEAT_MMCSEL 0x00000800 /* RMON Module */
+#define DMA_HW_FEAT_TSVER1SEL 0x00001000 /* Only IEEE 1588-2002 Timestamp */
+#define DMA_HW_FEAT_TSVER2SEL 0x00002000 /* IEEE 1588-2008 Adv Timestamp */
+#define DMA_HW_FEAT_EEESEL 0x00004000 /* Energy Efficient Ethernet */
+#define DMA_HW_FEAT_AVSEL 0x00008000 /* AV Feature */
+#define DMA_HW_FEAT_TXCOESEL 0x00010000 /* Checksum Offload in Tx */
+#define DMA_HW_FEAT_RXTYP1COE 0x00020000 /* IP csum Offload(Type 1) in Rx */
+#define DMA_HW_FEAT_RXTYP2COE 0x00040000 /* IP csum Offload(Type 2) in Rx */
+#define DMA_HW_FEAT_RXFIFOSIZE 0x00080000 /* Rx FIFO > 2048 Bytes */
+#define DMA_HW_FEAT_RXCHCNT 0x00300000 /* No. of additional Rx Channels */
+#define DMA_HW_FEAT_TXCHCNT 0x00c00000 /* No. of additional Tx Channels */
+#define DMA_HW_FEAT_ENHDESSEL 0x01000000 /* Alternate (Enhanced Descriptor) */
+#define DMA_HW_FEAT_INTTSEN 0x02000000 /* Timestamping with Internal
+ System Time */
+#define DMA_HW_FEAT_FLEXIPPSEN 0x04000000 /* Flexible PPS Output */
+#define DMA_HW_FEAT_SAVLANINS 0x08000000 /* Source Addr or VLAN Insertion */
+#define DMA_HW_FEAT_ACTPHYIF 0x70000000 /* Active/selected PHY interface */
+
enum rx_frame_status { /* IPC status */
good_frame = 0,
discard_frame = 1,
@@ -257,10 +287,22 @@ struct mii_regs {
unsigned int data; /* MII Data */
};
+struct stmmac_ring_mode_ops {
+ unsigned int (*is_jumbo_frm) (int len, int ehn_desc);
+ unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum);
+ void (*refill_desc3) (int bfsize, struct dma_desc *p);
+ void (*init_desc3) (int des3_as_data_buf, struct dma_desc *p);
+ void (*init_dma_chain) (struct dma_desc *des, dma_addr_t phy_addr,
+ unsigned int size);
+ void (*clean_desc3) (struct dma_desc *p);
+ int (*set_16kib_bfsize) (int mtu);
+};
+
struct mac_device_info {
const struct stmmac_ops *mac;
const struct stmmac_desc_ops *desc;
const struct stmmac_dma_ops *dma;
+ const struct stmmac_ring_mode_ops *ring;
struct mii_regs mii; /* MII register Addresses */
struct mac_link link;
unsigned int synopsys_uid;
@@ -274,3 +316,4 @@ extern void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
extern void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
unsigned int high, unsigned int low);
extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
+extern const struct stmmac_ring_mode_ops ring_mode_ops;
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
new file mode 100644
index 000000000000..dd8d6e19dff6
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
@@ -0,0 +1,126 @@
+/*******************************************************************************
+ Header File to describe Normal/enhanced descriptor functions used for RING
+ and CHAINED modes.
+
+ Copyright(C) 2011 STMicroelectronics Ltd
+
+ It defines all the functions used to handle the normal/enhanced
+ descriptors in case of the DMA is configured to work in chained or
+ in ring mode.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#if defined(CONFIG_STMMAC_RING)
+static inline void ehn_desc_rx_set_on_ring_chain(struct dma_desc *p, int end)
+{
+ p->des01.erx.buffer2_size = BUF_SIZE_8KiB - 1;
+ if (end)
+ p->des01.erx.end_ring = 1;
+}
+
+static inline void ehn_desc_tx_set_on_ring_chain(struct dma_desc *p, int end)
+{
+ if (end)
+ p->des01.etx.end_ring = 1;
+}
+
+static inline void enh_desc_end_tx_desc(struct dma_desc *p, int ter)
+{
+ p->des01.etx.end_ring = ter;
+}
+
+static inline void enh_set_tx_desc_len(struct dma_desc *p, int len)
+{
+ if (unlikely(len > BUF_SIZE_4KiB)) {
+ p->des01.etx.buffer1_size = BUF_SIZE_4KiB;
+ p->des01.etx.buffer2_size = len - BUF_SIZE_4KiB;
+ } else
+ p->des01.etx.buffer1_size = len;
+}
+
+static inline void ndesc_rx_set_on_ring_chain(struct dma_desc *p, int end)
+{
+ p->des01.rx.buffer2_size = BUF_SIZE_2KiB - 1;
+ if (end)
+ p->des01.rx.end_ring = 1;
+}
+
+static inline void ndesc_tx_set_on_ring_chain(struct dma_desc *p, int end)
+{
+ if (end)
+ p->des01.tx.end_ring = 1;
+}
+
+static inline void ndesc_end_tx_desc(struct dma_desc *p, int ter)
+{
+ p->des01.tx.end_ring = ter;
+}
+
+static inline void norm_set_tx_desc_len(struct dma_desc *p, int len)
+{
+ if (unlikely(len > BUF_SIZE_2KiB)) {
+ p->des01.etx.buffer1_size = BUF_SIZE_2KiB - 1;
+ p->des01.etx.buffer2_size = len - p->des01.etx.buffer1_size;
+ } else
+ p->des01.tx.buffer1_size = len;
+}
+
+#else
+
+static inline void ehn_desc_rx_set_on_ring_chain(struct dma_desc *p, int end)
+{
+ p->des01.erx.second_address_chained = 1;
+}
+
+static inline void ehn_desc_tx_set_on_ring_chain(struct dma_desc *p, int end)
+{
+ p->des01.etx.second_address_chained = 1;
+}
+
+static inline void enh_desc_end_tx_desc(struct dma_desc *p, int ter)
+{
+ p->des01.etx.second_address_chained = 1;
+}
+
+static inline void enh_set_tx_desc_len(struct dma_desc *p, int len)
+{
+ p->des01.etx.buffer1_size = len;
+}
+
+static inline void ndesc_rx_set_on_ring_chain(struct dma_desc *p, int end)
+{
+ p->des01.rx.second_address_chained = 1;
+}
+
+static inline void ndesc_tx_set_on_ring_chain(struct dma_desc *p, int ring_size)
+{
+ p->des01.tx.second_address_chained = 1;
+}
+
+static inline void ndesc_end_tx_desc(struct dma_desc *p, int ter)
+{
+ p->des01.tx.second_address_chained = 1;
+}
+
+static inline void norm_set_tx_desc_len(struct dma_desc *p, int len)
+{
+ p->des01.tx.buffer1_size = len;
+}
+#endif
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index e5dfb6a30182..d87976364ec5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -23,6 +23,7 @@
*******************************************************************************/
#include "common.h"
+#include "descs_com.h"
static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x,
struct dma_desc *p, void __iomem *ioaddr)
@@ -233,10 +234,9 @@ static void enh_desc_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
for (i = 0; i < ring_size; i++) {
p->des01.erx.own = 1;
p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
- /* To support jumbo frames */
- p->des01.erx.buffer2_size = BUF_SIZE_8KiB - 1;
- if (i == ring_size - 1)
- p->des01.erx.end_ring = 1;
+
+ ehn_desc_rx_set_on_ring_chain(p, (i == ring_size - 1));
+
if (disable_rx_ic)
p->des01.erx.disable_ic = 1;
p++;
@@ -249,8 +249,7 @@ static void enh_desc_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
for (i = 0; i < ring_size; i++) {
p->des01.etx.own = 0;
- if (i == ring_size - 1)
- p->des01.etx.end_ring = 1;
+ ehn_desc_tx_set_on_ring_chain(p, (i == ring_size - 1));
p++;
}
}
@@ -285,19 +284,16 @@ static void enh_desc_release_tx_desc(struct dma_desc *p)
int ter = p->des01.etx.end_ring;
memset(p, 0, offsetof(struct dma_desc, des2));
- p->des01.etx.end_ring = ter;
+ enh_desc_end_tx_desc(p, ter);
}
static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
int csum_flag)
{
p->des01.etx.first_segment = is_fs;
- if (unlikely(len > BUF_SIZE_4KiB)) {
- p->des01.etx.buffer1_size = BUF_SIZE_4KiB;
- p->des01.etx.buffer2_size = len - BUF_SIZE_4KiB;
- } else {
- p->des01.etx.buffer1_size = len;
- }
+
+ enh_set_tx_desc_len(p, len);
+
if (likely(csum_flag))
p->des01.etx.checksum_insertion = cic_full;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index 029c2a2cf524..f7e8ba7f501a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -23,6 +23,7 @@
*******************************************************************************/
#include "common.h"
+#include "descs_com.h"
static int ndesc_get_tx_status(void *data, struct stmmac_extra_stats *x,
struct dma_desc *p, void __iomem *ioaddr)
@@ -126,8 +127,9 @@ static void ndesc_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
for (i = 0; i < ring_size; i++) {
p->des01.rx.own = 1;
p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1;
- if (i == ring_size - 1)
- p->des01.rx.end_ring = 1;
+
+ ndesc_rx_set_on_ring_chain(p, (i == ring_size - 1));
+
if (disable_rx_ic)
p->des01.rx.disable_ic = 1;
p++;
@@ -139,8 +141,7 @@ static void ndesc_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
int i;
for (i = 0; i < ring_size; i++) {
p->des01.tx.own = 0;
- if (i == ring_size - 1)
- p->des01.tx.end_ring = 1;
+ ndesc_tx_set_on_ring_chain(p, (i == (ring_size - 1)));
p++;
}
}
@@ -175,15 +176,14 @@ static void ndesc_release_tx_desc(struct dma_desc *p)
int ter = p->des01.tx.end_ring;
memset(p, 0, offsetof(struct dma_desc, des2));
- /* set termination field */
- p->des01.tx.end_ring = ter;
+ ndesc_end_tx_desc(p, ter);
}
static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
int csum_flag)
{
p->des01.tx.first_segment = is_fs;
- p->des01.tx.buffer1_size = len;
+ norm_set_tx_desc_len(p, len);
}
static void ndesc_clear_tx_ic(struct dma_desc *p)
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
new file mode 100644
index 000000000000..fb8377da1687
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -0,0 +1,126 @@
+/*******************************************************************************
+ Specialised functions for managing Ring mode
+
+ Copyright(C) 2011 STMicroelectronics Ltd
+
+ It defines all the functions used to handle the normal/enhanced
+ descriptors in case of the DMA is configured to work in chained or
+ in ring mode.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include "stmmac.h"
+
+static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
+{
+ struct stmmac_priv *priv = (struct stmmac_priv *) p;
+ unsigned int txsize = priv->dma_tx_size;
+ unsigned int entry = priv->cur_tx % txsize;
+ struct dma_desc *desc = priv->dma_tx + entry;
+ unsigned int nopaged_len = skb_headlen(skb);
+ unsigned int bmax, len;
+
+ if (priv->plat->enh_desc)
+ bmax = BUF_SIZE_8KiB;
+ else
+ bmax = BUF_SIZE_2KiB;
+
+ len = nopaged_len - bmax;
+
+ if (nopaged_len > BUF_SIZE_8KiB) {
+
+ desc->des2 = dma_map_single(priv->device, skb->data,
+ bmax, DMA_TO_DEVICE);
+ desc->des3 = desc->des2 + BUF_SIZE_4KiB;
+ priv->hw->desc->prepare_tx_desc(desc, 1, bmax,
+ csum);
+
+ entry = (++priv->cur_tx) % txsize;
+ desc = priv->dma_tx + entry;
+
+ desc->des2 = dma_map_single(priv->device, skb->data + bmax,
+ len, DMA_TO_DEVICE);
+ desc->des3 = desc->des2 + BUF_SIZE_4KiB;
+ priv->hw->desc->prepare_tx_desc(desc, 0, len, csum);
+ priv->hw->desc->set_tx_owner(desc);
+ priv->tx_skbuff[entry] = NULL;
+ } else {
+ desc->des2 = dma_map_single(priv->device, skb->data,
+ nopaged_len, DMA_TO_DEVICE);
+ desc->des3 = desc->des2 + BUF_SIZE_4KiB;
+ priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum);
+ }
+
+ return entry;
+}
+
+static unsigned int stmmac_is_jumbo_frm(int len, int enh_desc)
+{
+ unsigned int ret = 0;
+
+ if (len >= BUF_SIZE_4KiB)
+ ret = 1;
+
+ return ret;
+}
+
+static void stmmac_refill_desc3(int bfsize, struct dma_desc *p)
+{
+ /* Fill DES3 in case of RING mode */
+ if (bfsize >= BUF_SIZE_8KiB)
+ p->des3 = p->des2 + BUF_SIZE_8KiB;
+}
+
+/* In ring mode we need to fill the desc3 because it is used
+ * as buffer */
+static void stmmac_init_desc3(int des3_as_data_buf, struct dma_desc *p)
+{
+ if (unlikely(des3_as_data_buf))
+ p->des3 = p->des2 + BUF_SIZE_8KiB;
+}
+
+static void stmmac_init_dma_chain(struct dma_desc *des, dma_addr_t phy_addr,
+ unsigned int size)
+{
+}
+
+static void stmmac_clean_desc3(struct dma_desc *p)
+{
+ if (unlikely(p->des3))
+ p->des3 = 0;
+}
+
+static int stmmac_set_16kib_bfsize(int mtu)
+{
+ int ret = 0;
+ if (unlikely(mtu >= BUF_SIZE_8KiB))
+ ret = BUF_SIZE_16KiB;
+ return ret;
+}
+
+const struct stmmac_ring_mode_ops ring_mode_ops = {
+ .is_jumbo_frm = stmmac_is_jumbo_frm,
+ .jumbo_frm = stmmac_jumbo_frm,
+ .refill_desc3 = stmmac_refill_desc3,
+ .init_desc3 = stmmac_init_desc3,
+ .init_dma_chain = stmmac_init_dma_chain,
+ .clean_desc3 = stmmac_clean_desc3,
+ .set_16kib_bfsize = stmmac_set_16kib_bfsize,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 1434bdb390d4..9bafa6cf9e8b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -20,9 +20,9 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
-#define DRV_MODULE_VERSION "Aug_2011"
+#define DRV_MODULE_VERSION "Oct_2011"
#include <linux/stmmac.h>
-
+#include <linux/phy.h>
#include "common.h"
#ifdef CONFIG_STMMAC_TIMER
#include "stmmac_timer.h"
@@ -70,6 +70,7 @@ struct stmmac_priv {
u32 msg_enable;
spinlock_t lock;
+ spinlock_t tx_lock;
int wolopts;
int wolenabled;
int wol_irq;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index aedff9a90ebc..406404f6e321 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -96,7 +96,7 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = {
{ #m, FIELD_SIZEOF(struct stmmac_counters, m), \
offsetof(struct stmmac_priv, mmc.m)}
-static const struct stmmac_stats stmmac_gstr_mmc[] = {
+static const struct stmmac_stats stmmac_mmc[] = {
STMMAC_MMC_STAT(mmc_tx_octetcount_gb),
STMMAC_MMC_STAT(mmc_tx_framecount_gb),
STMMAC_MMC_STAT(mmc_tx_broadcastframe_g),
@@ -177,7 +177,7 @@ static const struct stmmac_stats stmmac_gstr_mmc[] = {
STMMAC_MMC_STAT(mmc_rx_icmp_gd_octets),
STMMAC_MMC_STAT(mmc_rx_icmp_err_octets),
};
-#define STMMAC_MMC_STATS_LEN ARRAY_SIZE(stmmac_gstr_mmc)
+#define STMMAC_MMC_STATS_LEN ARRAY_SIZE(stmmac_mmc)
static void stmmac_ethtool_getdrvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
@@ -348,13 +348,17 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
priv->ioaddr);
else {
/* If supported, for new GMAC chips expose the MMC counters */
- dwmac_mmc_read(priv->ioaddr, &priv->mmc);
+ if (priv->dma_cap.rmon) {
+ dwmac_mmc_read(priv->ioaddr, &priv->mmc);
- for (i = 0; i < STMMAC_MMC_STATS_LEN; i++) {
- char *p = (char *)priv + stmmac_gstr_mmc[i].stat_offset;
+ for (i = 0; i < STMMAC_MMC_STATS_LEN; i++) {
+ char *p;
+ p = (char *)priv + stmmac_mmc[i].stat_offset;
- data[j++] = (stmmac_gstr_mmc[i].sizeof_stat ==
- sizeof(u64)) ? (*(u64 *)p) : (*(u32 *)p);
+ data[j++] = (stmmac_mmc[i].sizeof_stat ==
+ sizeof(u64)) ? (*(u64 *)p) :
+ (*(u32 *)p);
+ }
}
}
for (i = 0; i < STMMAC_STATS_LEN; i++) {
@@ -373,7 +377,7 @@ static int stmmac_get_sset_count(struct net_device *netdev, int sset)
case ETH_SS_STATS:
len = STMMAC_STATS_LEN;
- if (priv->plat->has_gmac)
+ if (priv->dma_cap.rmon)
len += STMMAC_MMC_STATS_LEN;
return len;
@@ -390,9 +394,9 @@ static void stmmac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
switch (stringset) {
case ETH_SS_STATS:
- if (priv->plat->has_gmac)
+ if (priv->dma_cap.rmon)
for (i = 0; i < STMMAC_MMC_STATS_LEN; i++) {
- memcpy(p, stmmac_gstr_mmc[i].stat_string,
+ memcpy(p, stmmac_mmc[i].stat_string,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index c0ee6b6b0198..aeaa15b451de 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2,7 +2,7 @@
This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
ST Ethernet IPs are built around a Synopsys IP Core.
- Copyright (C) 2007-2009 STMicroelectronics Ltd
+ Copyright(C) 2007-2011 STMicroelectronics Ltd
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -41,17 +41,16 @@
#include <linux/if_ether.h>
#include <linux/crc32.h>
#include <linux/mii.h>
-#include <linux/phy.h>
#include <linux/if.h>
#include <linux/if_vlan.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/prefetch.h>
-#include "stmmac.h"
#ifdef CONFIG_STMMAC_DEBUG_FS
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#endif
+#include "stmmac.h"
#define STMMAC_RESOURCE_NAME "stmmaceth"
@@ -304,7 +303,7 @@ static int stmmac_init_phy(struct net_device *dev)
struct phy_device *phydev;
char phy_id[MII_BUS_ID_SIZE + 3];
char bus_id[MII_BUS_ID_SIZE];
-
+ int interface = priv->plat->interface;
priv->oldlink = 0;
priv->speed = 0;
priv->oldduplex = -1;
@@ -314,14 +313,21 @@ static int stmmac_init_phy(struct net_device *dev)
priv->plat->phy_addr);
pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id);
- phydev = phy_connect(dev, phy_id, &stmmac_adjust_link, 0,
- priv->plat->interface);
+ phydev = phy_connect(dev, phy_id, &stmmac_adjust_link, 0, interface);
if (IS_ERR(phydev)) {
pr_err("%s: Could not attach to PHY\n", dev->name);
return PTR_ERR(phydev);
}
+ /* Stop Advertising 1000BASE Capability if interface is not GMII */
+ if ((interface) && ((interface == PHY_INTERFACE_MODE_MII) ||
+ (interface == PHY_INTERFACE_MODE_RMII))) {
+ phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause);
+ priv->phydev->advertising = priv->phydev->supported;
+ }
+
/*
* Broken HW is sometimes missing the pull-up resistor on the
* MDIO line, which results in reads to non-existent devices returning
@@ -381,11 +387,28 @@ static void display_ring(struct dma_desc *p, int size)
}
}
+static int stmmac_set_bfsize(int mtu, int bufsize)
+{
+ int ret = bufsize;
+
+ if (mtu >= BUF_SIZE_4KiB)
+ ret = BUF_SIZE_8KiB;
+ else if (mtu >= BUF_SIZE_2KiB)
+ ret = BUF_SIZE_4KiB;
+ else if (mtu >= DMA_BUFFER_SIZE)
+ ret = BUF_SIZE_2KiB;
+ else
+ ret = DMA_BUFFER_SIZE;
+
+ return ret;
+}
+
/**
* init_dma_desc_rings - init the RX/TX descriptor rings
* @dev: net device structure
* Description: this function initializes the DMA RX/TX descriptors
- * and allocates the socket buffers.
+ * and allocates the socket buffers. It suppors the chained and ring
+ * modes.
*/
static void init_dma_desc_rings(struct net_device *dev)
{
@@ -394,31 +417,24 @@ static void init_dma_desc_rings(struct net_device *dev)
struct sk_buff *skb;
unsigned int txsize = priv->dma_tx_size;
unsigned int rxsize = priv->dma_rx_size;
- unsigned int bfsize = priv->dma_buf_sz;
- int buff2_needed = 0, dis_ic = 0;
+ unsigned int bfsize;
+ int dis_ic = 0;
+ int des3_as_data_buf = 0;
- /* Set the Buffer size according to the MTU;
- * indeed, in case of jumbo we need to bump-up the buffer sizes.
- */
- if (unlikely(dev->mtu >= BUF_SIZE_8KiB))
- bfsize = BUF_SIZE_16KiB;
- else if (unlikely(dev->mtu >= BUF_SIZE_4KiB))
- bfsize = BUF_SIZE_8KiB;
- else if (unlikely(dev->mtu >= BUF_SIZE_2KiB))
- bfsize = BUF_SIZE_4KiB;
- else if (unlikely(dev->mtu >= DMA_BUFFER_SIZE))
- bfsize = BUF_SIZE_2KiB;
+ /* Set the max buffer size according to the DESC mode
+ * and the MTU. Note that RING mode allows 16KiB bsize. */
+ bfsize = priv->hw->ring->set_16kib_bfsize(dev->mtu);
+
+ if (bfsize == BUF_SIZE_16KiB)
+ des3_as_data_buf = 1;
else
- bfsize = DMA_BUFFER_SIZE;
+ bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
#ifdef CONFIG_STMMAC_TIMER
/* Disable interrupts on completion for the reception if timer is on */
if (likely(priv->tm->enable))
dis_ic = 1;
#endif
- /* If the MTU exceeds 8k so use the second buffer in the chain */
- if (bfsize >= BUF_SIZE_8KiB)
- buff2_needed = 1;
DBG(probe, INFO, "stmmac: txsize %d, rxsize %d, bfsize %d\n",
txsize, rxsize, bfsize);
@@ -446,7 +462,7 @@ static void init_dma_desc_rings(struct net_device *dev)
return;
}
- DBG(probe, INFO, "stmmac (%s) DMA desc rings: virt addr (Rx %p, "
+ DBG(probe, INFO, "stmmac (%s) DMA desc: virt addr (Rx %p, "
"Tx %p)\n\tDMA phy addr (Rx 0x%08x, Tx 0x%08x)\n",
dev->name, priv->dma_rx, priv->dma_tx,
(unsigned int)priv->dma_rx_phy, (unsigned int)priv->dma_tx_phy);
@@ -458,18 +474,21 @@ static void init_dma_desc_rings(struct net_device *dev)
for (i = 0; i < rxsize; i++) {
struct dma_desc *p = priv->dma_rx + i;
- skb = netdev_alloc_skb_ip_align(dev, bfsize);
+ skb = __netdev_alloc_skb(dev, bfsize + NET_IP_ALIGN,
+ GFP_KERNEL);
if (unlikely(skb == NULL)) {
pr_err("%s: Rx init fails; skb is NULL\n", __func__);
break;
}
+ skb_reserve(skb, NET_IP_ALIGN);
priv->rx_skbuff[i] = skb;
priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
bfsize, DMA_FROM_DEVICE);
p->des2 = priv->rx_skbuff_dma[i];
- if (unlikely(buff2_needed))
- p->des3 = p->des2 + BUF_SIZE_8KiB;
+
+ priv->hw->ring->init_desc3(des3_as_data_buf, p);
+
DBG(probe, INFO, "[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i],
priv->rx_skbuff[i]->data, priv->rx_skbuff_dma[i]);
}
@@ -483,6 +502,12 @@ static void init_dma_desc_rings(struct net_device *dev)
priv->tx_skbuff[i] = NULL;
priv->dma_tx[i].des2 = 0;
}
+
+ /* In case of Chained mode this sets the des3 to the next
+ * element in the chain */
+ priv->hw->ring->init_dma_chain(priv->dma_rx, priv->dma_rx_phy, rxsize);
+ priv->hw->ring->init_dma_chain(priv->dma_tx, priv->dma_tx_phy, txsize);
+
priv->dirty_tx = 0;
priv->cur_tx = 0;
@@ -581,6 +606,8 @@ static void stmmac_tx(struct stmmac_priv *priv)
{
unsigned int txsize = priv->dma_tx_size;
+ spin_lock(&priv->tx_lock);
+
while (priv->dirty_tx != priv->cur_tx) {
int last;
unsigned int entry = priv->dirty_tx % txsize;
@@ -611,8 +638,7 @@ static void stmmac_tx(struct stmmac_priv *priv)
dma_unmap_single(priv->device, p->des2,
priv->hw->desc->get_tx_len(p),
DMA_TO_DEVICE);
- if (unlikely(p->des3))
- p->des3 = 0;
+ priv->hw->ring->clean_desc3(p);
if (likely(skb != NULL)) {
/*
@@ -644,6 +670,7 @@ static void stmmac_tx(struct stmmac_priv *priv)
}
netif_tx_unlock(priv->dev);
}
+ spin_unlock(&priv->tx_lock);
}
static inline void stmmac_enable_irq(struct stmmac_priv *priv)
@@ -718,7 +745,6 @@ static void stmmac_no_timer_stopped(void)
*/
static void stmmac_tx_err(struct stmmac_priv *priv)
{
-
netif_stop_queue(priv->dev);
priv->hw->dma->stop_tx(priv->ioaddr);
@@ -789,33 +815,45 @@ static int stmmac_get_hw_features(struct stmmac_priv *priv)
u32 hw_cap = priv->hw->dma->get_hw_feature(priv->ioaddr);
if (likely(hw_cap)) {
- priv->dma_cap.mbps_10_100 = (hw_cap & 0x1);
- priv->dma_cap.mbps_1000 = (hw_cap & 0x2) >> 1;
- priv->dma_cap.half_duplex = (hw_cap & 0x4) >> 2;
- priv->dma_cap.hash_filter = (hw_cap & 0x10) >> 4;
- priv->dma_cap.multi_addr = (hw_cap & 0x20) >> 5;
- priv->dma_cap.pcs = (hw_cap & 0x40) >> 6;
- priv->dma_cap.sma_mdio = (hw_cap & 0x100) >> 8;
- priv->dma_cap.pmt_remote_wake_up = (hw_cap & 0x200) >> 9;
- priv->dma_cap.pmt_magic_frame = (hw_cap & 0x400) >> 10;
- priv->dma_cap.rmon = (hw_cap & 0x800) >> 11; /* MMC */
+ priv->dma_cap.mbps_10_100 = (hw_cap & DMA_HW_FEAT_MIISEL);
+ priv->dma_cap.mbps_1000 = (hw_cap & DMA_HW_FEAT_GMIISEL) >> 1;
+ priv->dma_cap.half_duplex = (hw_cap & DMA_HW_FEAT_HDSEL) >> 2;
+ priv->dma_cap.hash_filter = (hw_cap & DMA_HW_FEAT_HASHSEL) >> 4;
+ priv->dma_cap.multi_addr =
+ (hw_cap & DMA_HW_FEAT_ADDMACADRSEL) >> 5;
+ priv->dma_cap.pcs = (hw_cap & DMA_HW_FEAT_PCSSEL) >> 6;
+ priv->dma_cap.sma_mdio = (hw_cap & DMA_HW_FEAT_SMASEL) >> 8;
+ priv->dma_cap.pmt_remote_wake_up =
+ (hw_cap & DMA_HW_FEAT_RWKSEL) >> 9;
+ priv->dma_cap.pmt_magic_frame =
+ (hw_cap & DMA_HW_FEAT_MGKSEL) >> 10;
+ /*MMC*/
+ priv->dma_cap.rmon = (hw_cap & DMA_HW_FEAT_MMCSEL) >> 11;
/* IEEE 1588-2002*/
- priv->dma_cap.time_stamp = (hw_cap & 0x1000) >> 12;
+ priv->dma_cap.time_stamp =
+ (hw_cap & DMA_HW_FEAT_TSVER1SEL) >> 12;
/* IEEE 1588-2008*/
- priv->dma_cap.atime_stamp = (hw_cap & 0x2000) >> 13;
+ priv->dma_cap.atime_stamp =
+ (hw_cap & DMA_HW_FEAT_TSVER2SEL) >> 13;
/* 802.3az - Energy-Efficient Ethernet (EEE) */
- priv->dma_cap.eee = (hw_cap & 0x4000) >> 14;
- priv->dma_cap.av = (hw_cap & 0x8000) >> 15;
+ priv->dma_cap.eee = (hw_cap & DMA_HW_FEAT_EEESEL) >> 14;
+ priv->dma_cap.av = (hw_cap & DMA_HW_FEAT_AVSEL) >> 15;
/* TX and RX csum */
- priv->dma_cap.tx_coe = (hw_cap & 0x10000) >> 16;
- priv->dma_cap.rx_coe_type1 = (hw_cap & 0x20000) >> 17;
- priv->dma_cap.rx_coe_type2 = (hw_cap & 0x40000) >> 18;
- priv->dma_cap.rxfifo_over_2048 = (hw_cap & 0x80000) >> 19;
+ priv->dma_cap.tx_coe = (hw_cap & DMA_HW_FEAT_TXCOESEL) >> 16;
+ priv->dma_cap.rx_coe_type1 =
+ (hw_cap & DMA_HW_FEAT_RXTYP1COE) >> 17;
+ priv->dma_cap.rx_coe_type2 =
+ (hw_cap & DMA_HW_FEAT_RXTYP2COE) >> 18;
+ priv->dma_cap.rxfifo_over_2048 =
+ (hw_cap & DMA_HW_FEAT_RXFIFOSIZE) >> 19;
/* TX and RX number of channels */
- priv->dma_cap.number_rx_channel = (hw_cap & 0x300000) >> 20;
- priv->dma_cap.number_tx_channel = (hw_cap & 0xc00000) >> 22;
+ priv->dma_cap.number_rx_channel =
+ (hw_cap & DMA_HW_FEAT_RXCHCNT) >> 20;
+ priv->dma_cap.number_tx_channel =
+ (hw_cap & DMA_HW_FEAT_TXCHCNT) >> 22;
/* Alternate (enhanced) DESC mode*/
- priv->dma_cap.enh_desc = (hw_cap & 0x1000000) >> 24;
+ priv->dma_cap.enh_desc =
+ (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
} else
pr_debug("\tNo HW DMA feature register supported");
@@ -924,7 +962,8 @@ static int stmmac_open(struct net_device *dev)
memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
priv->xstats.threshold = tc;
- stmmac_mmc_setup(priv);
+ if (priv->dma_cap.rmon)
+ stmmac_mmc_setup(priv);
/* Start the ball rolling... */
DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name);
@@ -1005,47 +1044,6 @@ static int stmmac_release(struct net_device *dev)
return 0;
}
-static unsigned int stmmac_handle_jumbo_frames(struct sk_buff *skb,
- struct net_device *dev,
- int csum_insertion)
-{
- struct stmmac_priv *priv = netdev_priv(dev);
- unsigned int nopaged_len = skb_headlen(skb);
- unsigned int txsize = priv->dma_tx_size;
- unsigned int entry = priv->cur_tx % txsize;
- struct dma_desc *desc = priv->dma_tx + entry;
-
- if (nopaged_len > BUF_SIZE_8KiB) {
-
- int buf2_size = nopaged_len - BUF_SIZE_8KiB;
-
- desc->des2 = dma_map_single(priv->device, skb->data,
- BUF_SIZE_8KiB, DMA_TO_DEVICE);
- desc->des3 = desc->des2 + BUF_SIZE_4KiB;
- priv->hw->desc->prepare_tx_desc(desc, 1, BUF_SIZE_8KiB,
- csum_insertion);
-
- entry = (++priv->cur_tx) % txsize;
- desc = priv->dma_tx + entry;
-
- desc->des2 = dma_map_single(priv->device,
- skb->data + BUF_SIZE_8KiB,
- buf2_size, DMA_TO_DEVICE);
- desc->des3 = desc->des2 + BUF_SIZE_4KiB;
- priv->hw->desc->prepare_tx_desc(desc, 0, buf2_size,
- csum_insertion);
- priv->hw->desc->set_tx_owner(desc);
- priv->tx_skbuff[entry] = NULL;
- } else {
- desc->des2 = dma_map_single(priv->device, skb->data,
- nopaged_len, DMA_TO_DEVICE);
- desc->des3 = desc->des2 + BUF_SIZE_4KiB;
- priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
- csum_insertion);
- }
- return entry;
-}
-
/**
* stmmac_xmit:
* @skb : the socket buffer
@@ -1060,6 +1058,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
int i, csum_insertion = 0;
int nfrags = skb_shinfo(skb)->nr_frags;
struct dma_desc *desc, *first;
+ unsigned int nopaged_len = skb_headlen(skb);
if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
if (!netif_queue_stopped(dev)) {
@@ -1071,6 +1070,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}
+ spin_lock(&priv->tx_lock);
+
entry = priv->cur_tx % txsize;
#ifdef STMMAC_XMIT_DEBUG
@@ -1078,7 +1079,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
pr_info("stmmac xmit:\n"
"\tskb addr %p - len: %d - nopaged_len: %d\n"
"\tn_frags: %d - ip_summed: %d - %s gso\n",
- skb, skb->len, skb_headlen(skb), nfrags, skb->ip_summed,
+ skb, skb->len, nopaged_len, nfrags, skb->ip_summed,
!skb_is_gso(skb) ? "isn't" : "is");
#endif
@@ -1091,14 +1092,14 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if ((nfrags > 0) || (skb->len > ETH_FRAME_LEN))
pr_debug("stmmac xmit: skb len: %d, nopaged_len: %d,\n"
"\t\tn_frags: %d, ip_summed: %d\n",
- skb->len, skb_headlen(skb), nfrags, skb->ip_summed);
+ skb->len, nopaged_len, nfrags, skb->ip_summed);
#endif
priv->tx_skbuff[entry] = skb;
- if (unlikely(skb->len >= BUF_SIZE_4KiB)) {
- entry = stmmac_handle_jumbo_frames(skb, dev, csum_insertion);
+
+ if (priv->hw->ring->is_jumbo_frm(skb->len, priv->plat->enh_desc)) {
+ entry = priv->hw->ring->jumbo_frm(priv, skb, csum_insertion);
desc = priv->dma_tx + entry;
} else {
- unsigned int nopaged_len = skb_headlen(skb);
desc->des2 = dma_map_single(priv->device, skb->data,
nopaged_len, DMA_TO_DEVICE);
priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
@@ -1106,8 +1107,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
}
for (i = 0; i < nfrags; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- int len = frag->size;
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ int len = skb_frag_size(frag);
entry = (++priv->cur_tx) % txsize;
desc = priv->dma_tx + entry;
@@ -1159,6 +1160,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
priv->hw->dma->enable_dma_transmission(priv->ioaddr);
+ spin_unlock(&priv->tx_lock);
+
return NETDEV_TX_OK;
}
@@ -1187,11 +1190,10 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
DMA_FROM_DEVICE);
(p + entry)->des2 = priv->rx_skbuff_dma[entry];
- if (unlikely(priv->plat->has_gmac)) {
- if (bfsize >= BUF_SIZE_8KiB)
- (p + entry)->des3 =
- (p + entry)->des2 + BUF_SIZE_8KiB;
- }
+
+ if (unlikely(priv->plat->has_gmac))
+ priv->hw->ring->refill_desc3(bfsize, p + entry);
+
RX_DBG(KERN_INFO "\trefill entry #%d\n", entry);
}
wmb();
@@ -1398,10 +1400,10 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
return -EBUSY;
}
- if (priv->plat->has_gmac)
+ if (priv->plat->enh_desc)
max_mtu = JUMBO_LEN;
else
- max_mtu = ETH_DATA_LEN;
+ max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
if ((new_mtu < 46) || (new_mtu > max_mtu)) {
pr_err("%s: invalid MTU, max MTU is: %d\n", dev->name, max_mtu);
@@ -1724,6 +1726,7 @@ static int stmmac_probe(struct net_device *dev)
"please, use ifconfig or nwhwconfig!\n");
spin_lock_init(&priv->lock);
+ spin_lock_init(&priv->tx_lock);
ret = register_netdev(dev);
if (ret) {
@@ -1767,6 +1770,7 @@ static int stmmac_mac_device_setup(struct net_device *dev)
device->desc = &ndesc_ops;
priv->hw = device;
+ priv->hw->ring = &ring_mode_ops;
if (device_can_wakeup(priv->device)) {
priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index d9460d81a137..fd40988c19a6 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -2051,7 +2051,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
__skb_frag_set_page(frag, page->buffer);
__skb_frag_ref(frag);
frag->page_offset = off;
- frag->size = hlen - swivel;
+ skb_frag_size_set(frag, hlen - swivel);
/* any more data? */
if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
@@ -2075,7 +2075,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
__skb_frag_set_page(frag, page->buffer);
__skb_frag_ref(frag);
frag->page_offset = 0;
- frag->size = hlen;
+ skb_frag_size_set(frag, hlen);
RX_USED_ADD(page, hlen + cp->crc_size);
}
@@ -2826,9 +2826,9 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
entry = TX_DESC_NEXT(ring, entry);
for (frag = 0; frag < nr_frags; frag++) {
- skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
+ const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
- len = fragp->size;
+ len = skb_frag_size(fragp);
mapping = skb_frag_dma_map(&cp->pdev->dev, fragp, 0, len,
DMA_TO_DEVICE);
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 23740e848ac9..73c708107a37 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -3594,7 +3594,7 @@ static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx)
tb = &rp->tx_buffs[idx];
BUG_ON(tb->skb != NULL);
np->ops->unmap_page(np->device, tb->mapping,
- skb_shinfo(skb)->frags[i].size,
+ skb_frag_size(&skb_shinfo(skb)->frags[i]),
DMA_TO_DEVICE);
idx = NEXT_TX(rp, idx);
}
@@ -6727,9 +6727,9 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- len = frag->size;
+ len = skb_frag_size(frag);
mapping = np->ops->map_page(np->device, skb_frag_page(frag),
frag->page_offset, len,
DMA_TO_DEVICE);
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 6b62a73227c2..ceab215bb4a3 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -1065,12 +1065,12 @@ static netdev_tx_t gem_start_xmit(struct sk_buff *skb,
entry = NEXT_TX(entry);
for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
- skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
+ const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
u32 len;
dma_addr_t mapping;
u64 this_ctrl;
- len = this_frag->size;
+ len = skb_frag_size(this_frag);
mapping = skb_frag_dma_map(&gp->pdev->dev, this_frag,
0, len, DMA_TO_DEVICE);
this_ctrl = ctrl;
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index 869d47be54b4..c517dac02ae1 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -2305,10 +2305,10 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
entry = NEXT_TX(entry);
for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
- skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
+ const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
u32 len, mapping, this_txflags;
- len = this_frag->size;
+ len = skb_frag_size(this_frag);
mapping = skb_frag_dma_map(hp->dma_dev, this_frag,
0, len, DMA_TO_DEVICE);
this_txflags = tx_flags;
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index c77e3bf4750a..3a90af6d111c 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1493,12 +1493,12 @@ bdx_tx_map_skb(struct bdx_priv *priv, struct sk_buff *skb,
bdx_tx_db_inc_wptr(db);
for (i = 0; i < nr_frags; i++) {
- struct skb_frag_struct *frag;
+ const struct skb_frag_struct *frag;
frag = &skb_shinfo(skb)->frags[i];
- db->wptr->len = frag->size;
+ db->wptr->len = skb_frag_size(frag);
db->wptr->addr.dma = skb_frag_dma_map(&priv->pdev->dev, frag,
- 0, frag->size,
+ 0, skb_frag_size(frag),
DMA_TO_DEVICE);
pbl++;
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
index 1e2af96fc29c..10826d8a2a2d 100644
--- a/drivers/net/ethernet/tile/tilepro.c
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -177,7 +177,7 @@ struct tile_net_cpu {
struct tile_net_stats_t stats;
/* True iff NAPI is enabled. */
bool napi_enabled;
- /* True if this tile has succcessfully registered with the IPP. */
+ /* True if this tile has successfully registered with the IPP. */
bool registered;
/* True if the link was down last time we tried to register. */
bool link_down;
@@ -1713,7 +1713,7 @@ static unsigned int tile_net_tx_frags(lepp_frag_t *frags,
cpa = ((phys_addr_t)pfn << PAGE_SHIFT) + f->page_offset;
frags[n].cpa_lo = cpa;
frags[n].cpa_hi = cpa >> 32;
- frags[n].length = f->size;
+ frags[n].length = skb_frag_size(f);
frags[n].hash_for_home = hash_for_home;
n++;
}
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index a03996cf88ed..a8df7eca0956 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -709,13 +709,13 @@ static int tsi108_send_packet(struct sk_buff * skb, struct net_device *dev)
data->txring[tx].len = skb_headlen(skb);
misc |= TSI108_TX_SOF;
} else {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
data->txring[tx].buf0 = skb_frag_dma_map(NULL, frag,
0,
- frag->size,
+ skb_frag_size(frag),
DMA_TO_DEVICE);
- data->txring[tx].len = frag->size;
+ data->txring[tx].len = skb_frag_size(frag);
}
if (i == frags - 1)
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index b47bce1a2e2a..4535d7cc848e 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2554,16 +2554,16 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
/* Handle fragments */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
tdinfo->skb_dma[i + 1] = skb_frag_dma_map(&vptr->pdev->dev,
frag, 0,
- frag->size,
+ skb_frag_size(frag),
DMA_TO_DEVICE);
td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]);
td_ptr->td_buf[i + 1].pa_high = 0;
- td_ptr->td_buf[i + 1].size = cpu_to_le16(frag->size);
+ td_ptr->td_buf[i + 1].size = cpu_to_le16(skb_frag_size(frag));
}
tdinfo->nskb_dma = i + 1;
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 66e3c36c3733..4d1658e78dee 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -716,8 +716,8 @@ static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
cur_p->phys = dma_map_single(ndev->dev.parent,
skb_frag_address(frag),
- frag->size, DMA_TO_DEVICE);
- cur_p->len = frag->size;
+ frag_size(frag), DMA_TO_DEVICE);
+ cur_p->len = frag_size(frag);
cur_p->app0 = 0;
frag++;
}
@@ -955,6 +955,32 @@ static const struct attribute_group temac_attr_group = {
.attrs = temac_device_attrs,
};
+/* ethtool support */
+static int temac_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+ return phy_ethtool_gset(lp->phy_dev, cmd);
+}
+
+static int temac_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+ return phy_ethtool_sset(lp->phy_dev, cmd);
+}
+
+static int temac_nway_reset(struct net_device *ndev)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+ return phy_start_aneg(lp->phy_dev);
+}
+
+static const struct ethtool_ops temac_ethtool_ops = {
+ .get_settings = temac_get_settings,
+ .set_settings = temac_set_settings,
+ .nway_reset = temac_nway_reset,
+ .get_link = ethtool_op_get_link,
+};
+
static int __devinit temac_of_probe(struct platform_device *op)
{
struct device_node *np;
@@ -976,6 +1002,7 @@ static int __devinit temac_of_probe(struct platform_device *op)
ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST;
ndev->netdev_ops = &temac_netdev_ops;
+ ndev->ethtool_ops = &temac_ethtool_ops;
#if 0
ndev->features |= NETIF_F_IP_CSUM; /* Can checksum TCP/UDP over IPv4. */
ndev->features |= NETIF_F_HW_CSUM; /* Can checksum all the packets. */
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index 25bb2a015e18..a40fab44b9ae 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -183,7 +183,7 @@ config OLD_BELKIN_DONGLE
Say Y here if you want to build support for the Adaptec Airport 1000
and 2000 dongles. If you want to compile it as a module, choose
M here. Some information is contained in the comments
- at the top of <file:drivers/net/irda/old_belkin.c>.
+ at the top of <file:drivers/net/irda/old_belkin-sir.c>.
config ACT200L_DONGLE
tristate "ACTiSYS IR-200L dongle"
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 24cf942e1316..a3ce3d4561ed 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -169,6 +169,9 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
port = macvlan_port_get_rcu(skb->dev);
if (is_multicast_ether_addr(eth->h_dest)) {
+ skb = ip_check_defrag(skb, IP_DEFRAG_MACVLAN);
+ if (!skb)
+ return RX_HANDLER_CONSUMED;
src = macvlan_hash_lookup(port, eth->h_source);
if (!src)
/* frame comes from an external address */
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 3da557830937..1b7082d08f33 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -51,15 +51,13 @@ static struct proto macvtap_proto = {
};
/*
- * Minor number matches netdev->ifindex, so need a potentially
- * large value. This also makes it possible to split the
- * tap functionality out again in the future by offering it
- * from other drivers besides macvtap. As long as every device
- * only has one tap, the interface numbers assure that the
- * device nodes are unique.
+ * Variables for dealing with macvtaps device numbers.
*/
static dev_t macvtap_major;
-#define MACVTAP_NUM_DEVS 65536
+#define MACVTAP_NUM_DEVS (1U << MINORBITS)
+static DEFINE_MUTEX(minor_lock);
+static DEFINE_IDR(minor_idr);
+
#define GOODCOPY_LEN 128
static struct class *macvtap_class;
static struct cdev macvtap_cdev;
@@ -231,6 +229,8 @@ static void macvtap_del_queues(struct net_device *dev)
}
}
BUG_ON(vlan->numvtaps != 0);
+ /* guarantee that any future macvtap_set_queue will fail */
+ vlan->numvtaps = MAX_MACVTAP_QUEUES;
spin_unlock(&macvtap_lock);
synchronize_rcu();
@@ -273,39 +273,73 @@ static int macvtap_receive(struct sk_buff *skb)
return macvtap_forward(skb->dev, skb);
}
-static int macvtap_newlink(struct net *src_net,
- struct net_device *dev,
- struct nlattr *tb[],
- struct nlattr *data[])
+static int macvtap_get_minor(struct macvlan_dev *vlan)
{
- struct device *classdev;
- dev_t devt;
- int err;
+ int retval = -ENOMEM;
+ int id;
+
+ mutex_lock(&minor_lock);
+ if (idr_pre_get(&minor_idr, GFP_KERNEL) == 0)
+ goto exit;
+
+ retval = idr_get_new_above(&minor_idr, vlan, 1, &id);
+ if (retval < 0) {
+ if (retval == -EAGAIN)
+ retval = -ENOMEM;
+ goto exit;
+ }
+ if (id < MACVTAP_NUM_DEVS) {
+ vlan->minor = id;
+ } else {
+ printk(KERN_ERR "too many macvtap devices\n");
+ retval = -EINVAL;
+ idr_remove(&minor_idr, id);
+ }
+exit:
+ mutex_unlock(&minor_lock);
+ return retval;
+}
- err = macvlan_common_newlink(src_net, dev, tb, data,
- macvtap_receive, macvtap_forward);
- if (err)
- goto out;
+static void macvtap_free_minor(struct macvlan_dev *vlan)
+{
+ mutex_lock(&minor_lock);
+ if (vlan->minor) {
+ idr_remove(&minor_idr, vlan->minor);
+ vlan->minor = 0;
+ }
+ mutex_unlock(&minor_lock);
+}
- devt = MKDEV(MAJOR(macvtap_major), dev->ifindex);
+static struct net_device *dev_get_by_macvtap_minor(int minor)
+{
+ struct net_device *dev = NULL;
+ struct macvlan_dev *vlan;
- classdev = device_create(macvtap_class, &dev->dev, devt,
- dev, "tap%d", dev->ifindex);
- if (IS_ERR(classdev)) {
- err = PTR_ERR(classdev);
- macvtap_del_queues(dev);
+ mutex_lock(&minor_lock);
+ vlan = idr_find(&minor_idr, minor);
+ if (vlan) {
+ dev = vlan->dev;
+ dev_hold(dev);
}
+ mutex_unlock(&minor_lock);
+ return dev;
+}
-out:
- return err;
+static int macvtap_newlink(struct net *src_net,
+ struct net_device *dev,
+ struct nlattr *tb[],
+ struct nlattr *data[])
+{
+ /* Don't put anything that may fail after macvlan_common_newlink
+ * because we can't undo what it does.
+ */
+ return macvlan_common_newlink(src_net, dev, tb, data,
+ macvtap_receive, macvtap_forward);
}
static void macvtap_dellink(struct net_device *dev,
struct list_head *head)
{
- device_destroy(macvtap_class,
- MKDEV(MAJOR(macvtap_major), dev->ifindex));
-
macvtap_del_queues(dev);
macvlan_dellink(dev, head);
}
@@ -337,11 +371,15 @@ static void macvtap_sock_write_space(struct sock *sk)
wake_up_interruptible_poll(wqueue, POLLOUT | POLLWRNORM | POLLWRBAND);
}
+static void macvtap_sock_destruct(struct sock *sk)
+{
+ skb_queue_purge(&sk->sk_receive_queue);
+}
+
static int macvtap_open(struct inode *inode, struct file *file)
{
struct net *net = current->nsproxy->net_ns;
- struct net_device *dev = dev_get_by_index(net, iminor(inode));
- struct macvlan_dev *vlan = netdev_priv(dev);
+ struct net_device *dev = dev_get_by_macvtap_minor(iminor(inode));
struct macvtap_queue *q;
int err;
@@ -349,11 +387,6 @@ static int macvtap_open(struct inode *inode, struct file *file)
if (!dev)
goto out;
- /* check if this is a macvtap device */
- err = -EINVAL;
- if (dev->rtnl_link_ops != &macvtap_link_ops)
- goto out;
-
err = -ENOMEM;
q = (struct macvtap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
&macvtap_proto);
@@ -368,18 +401,19 @@ static int macvtap_open(struct inode *inode, struct file *file)
q->sock.ops = &macvtap_socket_ops;
sock_init_data(&q->sock, &q->sk);
q->sk.sk_write_space = macvtap_sock_write_space;
+ q->sk.sk_destruct = macvtap_sock_destruct;
q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;
q->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
/*
* so far only KVM virtio_net uses macvtap, enable zero copy between
* guest kernel and host kernel when lower device supports zerocopy
+ *
+ * The macvlan supports zerocopy iff the lower device supports zero
+ * copy so we don't have to look at the lower device directly.
*/
- if (vlan) {
- if ((vlan->lowerdev->features & NETIF_F_HIGHDMA) &&
- (vlan->lowerdev->features & NETIF_F_SG))
- sock_set_flag(&q->sk, SOCK_ZEROCOPY);
- }
+ if ((dev->features & NETIF_F_HIGHDMA) && (dev->features & NETIF_F_SG))
+ sock_set_flag(&q->sk, SOCK_ZEROCOPY);
err = macvtap_set_queue(dev, file, q);
if (err)
@@ -968,6 +1002,52 @@ struct socket *macvtap_get_socket(struct file *file)
}
EXPORT_SYMBOL_GPL(macvtap_get_socket);
+static int macvtap_device_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = ptr;
+ struct macvlan_dev *vlan;
+ struct device *classdev;
+ dev_t devt;
+ int err;
+
+ if (dev->rtnl_link_ops != &macvtap_link_ops)
+ return NOTIFY_DONE;
+
+ vlan = netdev_priv(dev);
+
+ switch (event) {
+ case NETDEV_REGISTER:
+ /* Create the device node here after the network device has
+ * been registered but before register_netdevice has
+ * finished running.
+ */
+ err = macvtap_get_minor(vlan);
+ if (err)
+ return notifier_from_errno(err);
+
+ devt = MKDEV(MAJOR(macvtap_major), vlan->minor);
+ classdev = device_create(macvtap_class, &dev->dev, devt,
+ dev, "tap%d", dev->ifindex);
+ if (IS_ERR(classdev)) {
+ macvtap_free_minor(vlan);
+ return notifier_from_errno(PTR_ERR(classdev));
+ }
+ break;
+ case NETDEV_UNREGISTER:
+ devt = MKDEV(MAJOR(macvtap_major), vlan->minor);
+ device_destroy(macvtap_class, devt);
+ macvtap_free_minor(vlan);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block macvtap_notifier_block __read_mostly = {
+ .notifier_call = macvtap_device_event,
+};
+
static int macvtap_init(void)
{
int err;
@@ -988,12 +1068,18 @@ static int macvtap_init(void)
goto out3;
}
- err = macvlan_link_register(&macvtap_link_ops);
+ err = register_netdevice_notifier(&macvtap_notifier_block);
if (err)
goto out4;
+ err = macvlan_link_register(&macvtap_link_ops);
+ if (err)
+ goto out5;
+
return 0;
+out5:
+ unregister_netdevice_notifier(&macvtap_notifier_block);
out4:
class_unregister(macvtap_class);
out3:
@@ -1008,6 +1094,7 @@ module_init(macvtap_init);
static void macvtap_exit(void)
{
rtnl_link_unregister(&macvtap_link_ops);
+ unregister_netdevice_notifier(&macvtap_notifier_block);
class_unregister(macvtap_class);
cdev_del(&macvtap_cdev);
unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index ed2a3977c6e7..e8882023576b 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -307,6 +307,11 @@ static ssize_t store_enabled(struct netconsole_target *nt,
return err;
if (enabled < 0 || enabled > 1)
return -EINVAL;
+ if (enabled == nt->enabled) {
+ printk(KERN_INFO "netconsole: network logging has already %s\n",
+ nt->enabled ? "started" : "stopped");
+ return -EINVAL;
+ }
if (enabled) { /* 1 */
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index d84c4224dd12..e8be47d6d7d0 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -553,7 +553,7 @@ static int bcm5481_config_aneg(struct phy_device *phydev)
/*
* There is no BCM5481 specification available, so down
* here is everything we know about "register 0x18". This
- * at least helps BCM5481 to successfuly receive packets
+ * at least helps BCM5481 to successfully receive packets
* on MPC8360E-RDK board. Peter Barada <peterb@logicpd.com>
* says: "This sets delay between the RXD and RXC signals
* instead of using trace lengths to achieve timing".
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index c588a162050f..9663e0ba6003 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -1007,6 +1007,7 @@ static void dp83640_remove(struct phy_device *phydev)
struct dp83640_clock *clock;
struct list_head *this, *next;
struct dp83640_private *tmp, *dp83640 = phydev->priv;
+ struct sk_buff *skb;
if (phydev->addr == BROADCAST_ADDR)
return;
@@ -1014,6 +1015,12 @@ static void dp83640_remove(struct phy_device *phydev)
enable_status_frames(phydev, false);
cancel_work_sync(&dp83640->ts_work);
+ while ((skb = skb_dequeue(&dp83640->rx_queue)) != NULL)
+ kfree_skb(skb);
+
+ while ((skb = skb_dequeue(&dp83640->tx_queue)) != NULL)
+ skb_complete_tx_timestamp(skb, NULL);
+
clock = dp83640_clock_get(dp83640->clock);
if (dp83640 == clock->chosen) {
@@ -1192,7 +1199,7 @@ static void dp83640_txtstamp(struct phy_device *phydev,
case HWTSTAMP_TX_ONESTEP_SYNC:
if (is_sync(skb, type)) {
- kfree_skb(skb);
+ skb_complete_tx_timestamp(skb, NULL);
return;
}
/* fall through */
@@ -1203,7 +1210,7 @@ static void dp83640_txtstamp(struct phy_device *phydev,
case HWTSTAMP_TX_OFF:
default:
- kfree_skb(skb);
+ skb_complete_tx_timestamp(skb, NULL);
break;
}
}
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index d66bd8d12599..c81f136ae670 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -128,12 +128,15 @@ static int ip1001_config_init(struct phy_device *phydev)
if (c < 0)
return c;
- /* Additional delay (2ns) used to adjust RX clock phase
- * at GMII/ RGMII interface */
- c = phy_read(phydev, IP10XX_SPEC_CTRL_STATUS);
- c |= IP1001_PHASE_SEL_MASK;
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII) {
+ /* Additional delay (2ns) used to adjust RX clock phase
+ * at RGMII interface */
+ c = phy_read(phydev, IP10XX_SPEC_CTRL_STATUS);
+ c |= IP1001_PHASE_SEL_MASK;
+ c = phy_write(phydev, IP10XX_SPEC_CTRL_STATUS, c);
+ }
- return phy_write(phydev, IP10XX_SPEC_CTRL_STATUS, c);
+ return c;
}
static int ip101a_config_init(struct phy_device *phydev)
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 47c8339a0359..2843c90f712f 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -241,7 +241,7 @@ MODULE_DEVICE_TABLE(of, mdio_ofgpio_match);
static struct platform_driver mdio_ofgpio_driver = {
.driver = {
- .name = "mdio-gpio",
+ .name = "mdio-ofgpio",
.owner = THIS_MODULE,
.of_match_table = mdio_ofgpio_match,
},
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 5d8f6e17bd55..0ec8e09cc2ac 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -3,7 +3,7 @@
*
* Author: Kriston Carson
*
- * Copyright (c) 2005 Freescale Semiconductor, Inc.
+ * Copyright (c) 2005, 2009 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -61,32 +61,42 @@ MODULE_DESCRIPTION("Vitesse PHY driver");
MODULE_AUTHOR("Kriston Carson");
MODULE_LICENSE("GPL");
-static int vsc824x_config_init(struct phy_device *phydev)
+int vsc824x_add_skew(struct phy_device *phydev)
{
- int extcon;
int err;
-
- err = phy_write(phydev, MII_VSC8244_AUX_CONSTAT,
- MII_VSC8244_AUXCONSTAT_INIT);
- if (err < 0)
- return err;
+ int extcon;
extcon = phy_read(phydev, MII_VSC8244_EXT_CON1);
if (extcon < 0)
- return err;
+ return extcon;
extcon &= ~(MII_VSC8244_EXTCON1_TX_SKEW_MASK |
MII_VSC8244_EXTCON1_RX_SKEW_MASK);
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
- extcon |= (MII_VSC8244_EXTCON1_TX_SKEW |
- MII_VSC8244_EXTCON1_RX_SKEW);
+ extcon |= (MII_VSC8244_EXTCON1_TX_SKEW |
+ MII_VSC8244_EXTCON1_RX_SKEW);
err = phy_write(phydev, MII_VSC8244_EXT_CON1, extcon);
return err;
}
+EXPORT_SYMBOL(vsc824x_add_skew);
+
+static int vsc824x_config_init(struct phy_device *phydev)
+{
+ int err;
+
+ err = phy_write(phydev, MII_VSC8244_AUX_CONSTAT,
+ MII_VSC8244_AUXCONSTAT_INIT);
+ if (err < 0)
+ return err;
+
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
+ err = vsc824x_add_skew(phydev);
+
+ return err;
+}
static int vsc824x_ack_interrupt(struct phy_device *phydev)
{
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index eae542a7e987..89f829f5f725 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -285,8 +285,10 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
ip_send_check(iph);
ip_local_out(skb);
+ return 1;
tx_error:
+ kfree_skb(skb);
return 1;
}
@@ -305,11 +307,18 @@ static int pptp_rcv_core(struct sock *sk, struct sk_buff *skb)
}
header = (struct pptp_gre_header *)(skb->data);
+ headersize = sizeof(*header);
/* test if acknowledgement present */
if (PPTP_GRE_IS_A(header->ver)) {
- __u32 ack = (PPTP_GRE_IS_S(header->flags)) ?
- header->ack : header->seq; /* ack in different place if S = 0 */
+ __u32 ack;
+
+ if (!pskb_may_pull(skb, headersize))
+ goto drop;
+ header = (struct pptp_gre_header *)(skb->data);
+
+ /* ack in different place if S = 0 */
+ ack = PPTP_GRE_IS_S(header->flags) ? header->ack : header->seq;
ack = ntohl(ack);
@@ -318,21 +327,18 @@ static int pptp_rcv_core(struct sock *sk, struct sk_buff *skb)
/* also handle sequence number wrap-around */
if (WRAPPED(ack, opt->ack_recv))
opt->ack_recv = ack;
+ } else {
+ headersize -= sizeof(header->ack);
}
-
/* test if payload present */
if (!PPTP_GRE_IS_S(header->flags))
goto drop;
- headersize = sizeof(*header);
payload_len = ntohs(header->payload_len);
seq = ntohl(header->seq);
- /* no ack present? */
- if (!PPTP_GRE_IS_A(header->ver))
- headersize -= sizeof(header->ack);
/* check for incomplete packet (length smaller than expected) */
- if (skb->len - headersize < payload_len)
+ if (!pskb_may_pull(skb, headersize + payload_len))
goto drop;
payload = skb->data + headersize;
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index 1c85c477e174..e81e22e3d1d2 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -1397,6 +1397,9 @@ static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)
int ret;
u8 buf[ETH_ALEN];
u32 phyid;
+ struct asix_data *data = (struct asix_data *)&dev->data;
+
+ data->eeprom_len = AX88772_EEPROM_LEN;
usbnet_get_endpoints(dev,intf);
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index cdb958875ba4..7d6082160bcc 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1476,7 +1476,7 @@ int usbnet_suspend (struct usb_interface *intf, pm_message_t message)
if (!dev->suspend_count++) {
spin_lock_irq(&dev->txq.lock);
/* don't autosuspend while transmitting */
- if (dev->txq.qlen && (message.event & PM_EVENT_AUTO)) {
+ if (dev->txq.qlen && PMSG_IS_AUTO(message)) {
spin_unlock_irq(&dev->txq.lock);
return -EBUSY;
} else {
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index b8225f3b31d1..91039ab16728 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -143,18 +143,16 @@ static void skb_xmit_done(struct virtqueue *svq)
static void set_skb_frag(struct sk_buff *skb, struct page *page,
unsigned int offset, unsigned int *len)
{
+ int size = min((unsigned)PAGE_SIZE - offset, *len);
int i = skb_shinfo(skb)->nr_frags;
- skb_frag_t *f;
- f = &skb_shinfo(skb)->frags[i];
- f->size = min((unsigned)PAGE_SIZE - offset, *len);
- f->page_offset = offset;
- __skb_frag_set_page(f, page);
+ __skb_fill_page_desc(skb, i, page, offset, size);
- skb->data_len += f->size;
- skb->len += f->size;
+ skb->data_len += size;
+ skb->len += size;
+ skb->truesize += PAGE_SIZE;
skb_shinfo(skb)->nr_frags++;
- *len -= f->size;
+ *len -= size;
}
static struct sk_buff *page_to_skb(struct virtnet_info *vi,
@@ -290,7 +288,6 @@ static void receive_buf(struct net_device *dev, void *buf, unsigned int len)
}
hdr = skb_vnet_hdr(skb);
- skb->truesize += skb->data_len;
u64_stats_update_begin(&stats->syncp);
stats->rx_bytes += skb->len;
@@ -880,8 +877,21 @@ static void virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
}
+static void virtnet_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+
+ ring->rx_max_pending = virtqueue_get_vring_size(vi->rvq);
+ ring->tx_max_pending = virtqueue_get_vring_size(vi->svq);
+ ring->rx_pending = ring->rx_max_pending;
+ ring->tx_pending = ring->tx_max_pending;
+
+}
+
static const struct ethtool_ops virtnet_ethtool_ops = {
.get_link = ethtool_op_get_link,
+ .get_ringparam = virtnet_get_ringparam,
};
#define MIN_MTU 68
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 902f284fd054..b771ebac0f01 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -656,8 +656,8 @@ vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
__skb_frag_set_page(frag, rbi->page);
frag->page_offset = 0;
- frag->size = rcd->len;
- skb->data_len += frag->size;
+ skb_frag_size_set(frag, rcd->len);
+ skb->data_len += rcd->len;
skb->truesize += PAGE_SIZE;
skb_shinfo(skb)->nr_frags++;
}
@@ -745,21 +745,21 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+ const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
tbi = tq->buf_info + tq->tx_ring.next2fill;
tbi->map_type = VMXNET3_MAP_PAGE;
tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag,
- 0, frag->size,
+ 0, skb_frag_size(frag),
DMA_TO_DEVICE);
- tbi->len = frag->size;
+ tbi->len = skb_frag_size(frag);
gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
- gdesc->dword[2] = cpu_to_le32(dw2 | frag->size);
+ gdesc->dword[2] = cpu_to_le32(dw2 | skb_frag_size(frag));
gdesc->dword[3] = 0;
dev_dbg(&adapter->netdev->dev,
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index 298f2b0b6311..9a644d052f1e 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -599,7 +599,7 @@ void i2400mu_disconnect(struct usb_interface *iface)
*
* As well, the device might refuse going to sleep for whichever
* reason. In this case we just fail. For system suspend/hibernate,
- * we *can't* fail. We check PM_EVENT_AUTO to see if the
+ * we *can't* fail. We check PMSG_IS_AUTO to see if the
* suspend call comes from the USB stack or from the system and act
* in consequence.
*
@@ -615,7 +615,7 @@ int i2400mu_suspend(struct usb_interface *iface, pm_message_t pm_msg)
struct i2400m *i2400m = &i2400mu->i2400m;
#ifdef CONFIG_PM
- if (pm_msg.event & PM_EVENT_AUTO)
+ if (PMSG_IS_AUTO(pm_msg))
is_autosuspend = 1;
#endif
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index ef9ad79d1bfd..127e9c63beaf 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -161,7 +161,7 @@ that only one external action is invoked at a time.
#include <linux/firmware.h>
#include <linux/acpi.h>
#include <linux/ctype.h>
-#include <linux/pm_qos_params.h>
+#include <linux/pm_qos.h>
#include <net/lib80211.h>
@@ -174,7 +174,7 @@ that only one external action is invoked at a time.
#define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2100 Network Driver"
#define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation"
-static struct pm_qos_request_list ipw2100_pm_qos_req;
+static struct pm_qos_request ipw2100_pm_qos_req;
/* Debugging stuff */
#ifdef CONFIG_IPW2100_DEBUG
diff --git a/drivers/net/wireless/ipw2x00/libipw_tx.c b/drivers/net/wireless/ipw2x00/libipw_tx.c
index 01c88a71abe1..e8c039879b05 100644
--- a/drivers/net/wireless/ipw2x00/libipw_tx.c
+++ b/drivers/net/wireless/ipw2x00/libipw_tx.c
@@ -395,7 +395,7 @@ netdev_tx_t libipw_xmit(struct sk_buff *skb, struct net_device *dev)
(CFG_LIBIPW_COMPUTE_FCS | CFG_LIBIPW_RESERVE_FCS))
bytes_per_frag -= LIBIPW_FCS_LEN;
- /* Each fragment may need to have room for encryptiong
+ /* Each fragment may need to have room for encryption
* pre/postfix */
if (host_encrypt)
bytes_per_frag -= crypt->ops->extra_mpdu_prefix_len +
diff --git a/drivers/net/wireless/libertas_tf/deb_defs.h b/drivers/net/wireless/libertas_tf/deb_defs.h
index ae753962d8b5..4bd3dc5adf7c 100644
--- a/drivers/net/wireless/libertas_tf/deb_defs.h
+++ b/drivers/net/wireless/libertas_tf/deb_defs.h
@@ -3,7 +3,7 @@
* global variable declaration.
*/
#ifndef _LBS_DEB_DEFS_H_
-#define _LBS_DEB_EFS_H_
+#define _LBS_DEB_DEFS_H_
#ifndef DRV_NAME
#define DRV_NAME "libertas_tf"
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 5380f3b040ac..177a8e669241 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -475,7 +475,7 @@ static void _rtl_pci_tx_chk_waitq(struct ieee80211_hw *hw)
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct sk_buff *skb = NULL;
struct ieee80211_tx_info *info = NULL;
- int tid; /* should be int */
+ int tid;
if (!rtlpriv->rtlhal.earlymode_enable)
return;
@@ -1525,7 +1525,7 @@ static int rtl_pci_start(struct ieee80211_hw *hw)
rtl_init_rx_config(hw);
- /*should after adapter start and interrupt enable. */
+ /*should be after adapter start and interrupt enable. */
set_hal_start(rtlhal);
RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
@@ -1546,7 +1546,7 @@ static void rtl_pci_stop(struct ieee80211_hw *hw)
u8 RFInProgressTimeOut = 0;
/*
- *should before disable interrrupt&adapter
+ *should be before disable interrupt&adapter
*and will do it immediately.
*/
set_hal_stop(rtlhal);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 8d70b44fcd8a..d5508957200e 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -334,7 +334,7 @@ unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
count++;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- unsigned long size = skb_shinfo(skb)->frags[i].size;
+ unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
unsigned long bytes;
while (size > 0) {
BUG_ON(copy_off > MAX_BUFFER_OFFSET);
@@ -526,7 +526,7 @@ static int netbk_gop_skb(struct sk_buff *skb,
for (i = 0; i < nr_frags; i++) {
netbk_gop_frag_copy(vif, skb, npo,
skb_frag_page(&skb_shinfo(skb)->frags[i]),
- skb_shinfo(skb)->frags[i].size,
+ skb_frag_size(&skb_shinfo(skb)->frags[i]),
skb_shinfo(skb)->frags[i].page_offset,
&head);
}
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 6e5d4c09e5d7..226faab23603 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -467,7 +467,7 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
tx->gref = np->grant_tx_ref[id] = ref;
tx->offset = frag->page_offset;
- tx->size = frag->size;
+ tx->size = skb_frag_size(frag);
tx->flags = 0;
}
@@ -965,7 +965,7 @@ err:
if (rx->status > len) {
skb_shinfo(skb)->frags[0].page_offset =
rx->offset + len;
- skb_shinfo(skb)->frags[0].size = rx->status - len;
+ skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status - len);
skb->data_len = rx->status - len;
} else {
__skb_fill_page_desc(skb, 0, NULL, 0, 0);
OpenPOWER on IntegriCloud