diff options
Diffstat (limited to 'drivers')
125 files changed, 3431 insertions, 1111 deletions
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c index 486e1e6997fc..caf04284711a 100644 --- a/drivers/net/appletalk/cops.c +++ b/drivers/net/appletalk/cops.c @@ -424,9 +424,7 @@ static int cops_open(struct net_device *dev) */ if(lp->board==TANGENT) /* Poll 20 times per second */ { - init_timer(&cops_timer); - cops_timer.function = cops_poll; - cops_timer.data = (unsigned long)dev; + setup_timer(&cops_timer, cops_poll, (unsigned long)dev); cops_timer.expires = jiffies + HZ/20; add_timer(&cops_timer); } diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c index ac755d2950a6..e4aa374caa4d 100644 --- a/drivers/net/appletalk/ltpc.c +++ b/drivers/net/appletalk/ltpc.c @@ -1165,9 +1165,7 @@ struct net_device * __init ltpc_probe(void) dev->irq = 0; /* polled mode -- 20 times per second */ /* this is really, really slow... should it poll more often? */ - init_timer(<pc_timer); - ltpc_timer.function=ltpc_poll; - ltpc_timer.data = (unsigned long) dev; + setup_timer(<pc_timer, ltpc_poll, (unsigned long)dev); ltpc_timer.expires = jiffies + HZ/20; add_timer(<pc_timer); diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c index fcfccbb3d9a2..13236b2cdf13 100644 --- a/drivers/net/arcnet/arcnet.c +++ b/drivers/net/arcnet/arcnet.c @@ -450,9 +450,7 @@ struct net_device *alloc_arcdev(const char *name) lp->dev = dev; spin_lock_init(&lp->lock); - init_timer(&lp->timer); - lp->timer.data = (unsigned long) dev; - lp->timer.function = arcnet_timer; + setup_timer(&lp->timer, arcnet_timer, (unsigned long)dev); } return dev; diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c index 438966bf51c2..fed75e75207a 100644 --- a/drivers/net/caif/caif_hsi.c +++ b/drivers/net/caif/caif_hsi.c @@ -1211,17 +1211,14 @@ static int cfhsi_open(struct net_device *ndev) init_waitqueue_head(&cfhsi->flush_fifo_wait); /* Setup the inactivity timer. */ - init_timer(&cfhsi->inactivity_timer); - cfhsi->inactivity_timer.data = (unsigned long)cfhsi; - cfhsi->inactivity_timer.function = cfhsi_inactivity_tout; + setup_timer(&cfhsi->inactivity_timer, cfhsi_inactivity_tout, + (unsigned long)cfhsi); /* Setup the slowpath RX timer. */ - init_timer(&cfhsi->rx_slowpath_timer); - cfhsi->rx_slowpath_timer.data = (unsigned long)cfhsi; - cfhsi->rx_slowpath_timer.function = cfhsi_rx_slowpath; + setup_timer(&cfhsi->rx_slowpath_timer, cfhsi_rx_slowpath, + (unsigned long)cfhsi); /* Setup the aggregation timer. */ - init_timer(&cfhsi->aggregation_timer); - cfhsi->aggregation_timer.data = (unsigned long)cfhsi; - cfhsi->aggregation_timer.function = cfhsi_aggregation_tout; + setup_timer(&cfhsi->aggregation_timer, cfhsi_aggregation_tout, + (unsigned long)cfhsi); /* Activate HSI interface. */ res = cfhsi->ops->cfhsi_up(cfhsi->ops); diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c index a7be12d9a139..8570cfdaea75 100644 --- a/drivers/net/can/grcan.c +++ b/drivers/net/can/grcan.c @@ -1626,13 +1626,11 @@ static int grcan_setup_netdev(struct platform_device *ofdev, spin_lock_init(&priv->lock); if (priv->need_txbug_workaround) { - init_timer(&priv->rr_timer); - priv->rr_timer.function = grcan_running_reset; - priv->rr_timer.data = (unsigned long)dev; + setup_timer(&priv->rr_timer, grcan_running_reset, + (unsigned long)dev); - init_timer(&priv->hang_timer); - priv->hang_timer.function = grcan_initiate_running_reset; - priv->hang_timer.data = (unsigned long)dev; + setup_timer(&priv->hang_timer, grcan_initiate_running_reset, + (unsigned long)dev); } netif_napi_add(dev, &priv->napi, grcan_poll, GRCAN_NAPI_WEIGHT); diff --git a/drivers/net/can/sja1000/peak_pcmcia.c b/drivers/net/can/sja1000/peak_pcmcia.c index dd56133cc461..4b8758e10bd4 100644 --- a/drivers/net/can/sja1000/peak_pcmcia.c +++ b/drivers/net/can/sja1000/peak_pcmcia.c @@ -692,9 +692,7 @@ static int pcan_probe(struct pcmcia_device *pdev) } /* init the timer which controls the leds */ - init_timer(&card->led_timer); - card->led_timer.function = pcan_led_timer; - card->led_timer.data = (unsigned long)card; + setup_timer(&card->led_timer, pcan_led_timer, (unsigned long)card); /* request the given irq */ err = request_irq(pdev->irq, &pcan_isr, IRQF_SHARED, PCC_NAME, card); diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c index 838545ce468d..7e10dbdded28 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb.c @@ -798,9 +798,8 @@ static int pcan_usb_init(struct peak_usb_device *dev) int err; /* initialize a timer needed to wait for hardware restart */ - init_timer(&pdev->restart_timer); - pdev->restart_timer.function = pcan_usb_restart; - pdev->restart_timer.data = (unsigned long)dev; + setup_timer(&pdev->restart_timer, pcan_usb_restart, + (unsigned long)dev); /* * explicit use of dev_xxx() instead of netdev_xxx() here: diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 274f3679f33d..d4ce092def83 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -484,7 +484,7 @@ static int b53_fast_age_vlan(struct b53_device *dev, u16 vid) return b53_flush_arl(dev, FAST_AGE_VLAN); } -static void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port) +void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port) { struct b53_device *dev = ds->priv; unsigned int i; @@ -500,9 +500,9 @@ static void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port) b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), pvlan); } } +EXPORT_SYMBOL(b53_imp_vlan_setup); -static int b53_enable_port(struct dsa_switch *ds, int port, - struct phy_device *phy) +int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy) { struct b53_device *dev = ds->priv; unsigned int cpu_port = dev->cpu_port; @@ -523,11 +523,15 @@ static int b53_enable_port(struct dsa_switch *ds, int port, b53_imp_vlan_setup(ds, cpu_port); + /* If EEE was enabled, restore it */ + if (dev->ports[port].eee.eee_enabled) + b53_eee_enable_set(ds, port, true); + return 0; } +EXPORT_SYMBOL(b53_enable_port); -static void b53_disable_port(struct dsa_switch *ds, int port, - struct phy_device *phy) +void b53_disable_port(struct dsa_switch *ds, int port, struct phy_device *phy) { struct b53_device *dev = ds->priv; u8 reg; @@ -537,20 +541,67 @@ static void b53_disable_port(struct dsa_switch *ds, int port, reg |= PORT_CTRL_RX_DISABLE | PORT_CTRL_TX_DISABLE; b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg); } +EXPORT_SYMBOL(b53_disable_port); -static void b53_enable_cpu_port(struct b53_device *dev) +void b53_brcm_hdr_setup(struct dsa_switch *ds, int port) +{ + struct b53_device *dev = ds->priv; + u8 hdr_ctl, val; + u16 reg; + + /* Resolve which bit controls the Broadcom tag */ + switch (port) { + case 8: + val = BRCM_HDR_P8_EN; + break; + case 7: + val = BRCM_HDR_P7_EN; + break; + case 5: + val = BRCM_HDR_P5_EN; + break; + default: + val = 0; + break; + } + + /* Enable Broadcom tags for IMP port */ + b53_read8(dev, B53_MGMT_PAGE, B53_BRCM_HDR, &hdr_ctl); + hdr_ctl |= val; + b53_write8(dev, B53_MGMT_PAGE, B53_BRCM_HDR, hdr_ctl); + + /* Registers below are only accessible on newer devices */ + if (!is58xx(dev)) + return; + + /* Enable reception Broadcom tag for CPU TX (switch RX) to + * allow us to tag outgoing frames + */ + b53_read16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_RX_DIS, ®); + reg &= ~BIT(port); + b53_write16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_RX_DIS, reg); + + /* Enable transmission of Broadcom tags from the switch (CPU RX) to + * allow delivering frames to the per-port net_devices + */ + b53_read16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_TX_DIS, ®); + reg &= ~BIT(port); + b53_write16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_TX_DIS, reg); +} +EXPORT_SYMBOL(b53_brcm_hdr_setup); + +static void b53_enable_cpu_port(struct b53_device *dev, int port) { - unsigned int cpu_port = dev->cpu_port; u8 port_ctrl; /* BCM5325 CPU port is at 8 */ - if ((is5325(dev) || is5365(dev)) && cpu_port == B53_CPU_PORT_25) - cpu_port = B53_CPU_PORT; + if ((is5325(dev) || is5365(dev)) && port == B53_CPU_PORT_25) + port = B53_CPU_PORT; port_ctrl = PORT_CTRL_RX_BCST_EN | PORT_CTRL_RX_MCST_EN | PORT_CTRL_RX_UCST_EN; - b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(cpu_port), port_ctrl); + b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), port_ctrl); } static void b53_enable_mib(struct b53_device *dev) @@ -816,12 +867,13 @@ static int b53_setup(struct dsa_switch *ds) if (ret) dev_err(ds->dev, "failed to apply configuration\n"); + /* Configure IMP/CPU port, disable unused ports. Enabled + * ports will be configured with .port_enable + */ for (port = 0; port < dev->num_ports; port++) { - if (BIT(port) & ds->enabled_port_mask) - b53_enable_port(ds, port, NULL); - else if (dsa_is_cpu_port(ds, port)) - b53_enable_cpu_port(dev); - else + if (dsa_is_cpu_port(ds, port)) + b53_enable_cpu_port(dev, port); + else if (!(BIT(port) & ds->enabled_port_mask)) b53_disable_port(ds, port, NULL); } @@ -832,6 +884,7 @@ static void b53_adjust_link(struct dsa_switch *ds, int port, struct phy_device *phydev) { struct b53_device *dev = ds->priv; + struct ethtool_eee *p = &dev->ports[port].eee; u8 rgmii_ctrl = 0, reg = 0, off; if (!phy_is_pseudo_fixed_link(phydev)) @@ -953,6 +1006,9 @@ static void b53_adjust_link(struct dsa_switch *ds, int port, b53_write8(dev, B53_CTRL_PAGE, po_reg, gmii_po); } } + + /* Re-negotiate EEE if it was enabled already */ + p->eee_enabled = b53_eee_init(ds, port, phydev); } int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering) @@ -1280,7 +1336,7 @@ EXPORT_SYMBOL(b53_fdb_dump); int b53_br_join(struct dsa_switch *ds, int port, struct net_device *br) { struct b53_device *dev = ds->priv; - s8 cpu_port = ds->dst->cpu_dp->index; + s8 cpu_port = ds->ports[port].cpu_dp->index; u16 pvlan, reg; unsigned int i; @@ -1326,7 +1382,7 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br) { struct b53_device *dev = ds->priv; struct b53_vlan *vl = &dev->vlans[0]; - s8 cpu_port = ds->dst->cpu_dp->index; + s8 cpu_port = ds->ports[port].cpu_dp->index; unsigned int i; u16 pvlan, reg, pvid; @@ -1484,6 +1540,69 @@ void b53_mirror_del(struct dsa_switch *ds, int port, } EXPORT_SYMBOL(b53_mirror_del); +void b53_eee_enable_set(struct dsa_switch *ds, int port, bool enable) +{ + struct b53_device *dev = ds->priv; + u16 reg; + + b53_read16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, ®); + if (enable) + reg |= BIT(port); + else + reg &= ~BIT(port); + b53_write16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, reg); +} +EXPORT_SYMBOL(b53_eee_enable_set); + + +/* Returns 0 if EEE was not enabled, or 1 otherwise + */ +int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy) +{ + int ret; + + ret = phy_init_eee(phy, 0); + if (ret) + return 0; + + b53_eee_enable_set(ds, port, true); + + return 1; +} +EXPORT_SYMBOL(b53_eee_init); + +int b53_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e) +{ + struct b53_device *dev = ds->priv; + struct ethtool_eee *p = &dev->ports[port].eee; + u16 reg; + + if (is5325(dev) || is5365(dev)) + return -EOPNOTSUPP; + + b53_read16(dev, B53_EEE_PAGE, B53_EEE_LPI_INDICATE, ®); + e->eee_enabled = p->eee_enabled; + e->eee_active = !!(reg & BIT(port)); + + return 0; +} +EXPORT_SYMBOL(b53_get_mac_eee); + +int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e) +{ + struct b53_device *dev = ds->priv; + struct ethtool_eee *p = &dev->ports[port].eee; + + if (is5325(dev) || is5365(dev)) + return -EOPNOTSUPP; + + p->eee_enabled = e->eee_enabled; + b53_eee_enable_set(ds, port, e->eee_enabled); + + return 0; +} +EXPORT_SYMBOL(b53_set_mac_eee); + static const struct dsa_switch_ops b53_switch_ops = { .get_tag_protocol = b53_get_tag_protocol, .setup = b53_setup, @@ -1495,6 +1614,8 @@ static const struct dsa_switch_ops b53_switch_ops = { .adjust_link = b53_adjust_link, .port_enable = b53_enable_port, .port_disable = b53_disable_port, + .get_mac_eee = b53_get_mac_eee, + .set_mac_eee = b53_set_mac_eee, .port_bridge_join = b53_br_join, .port_bridge_leave = b53_br_leave, .port_stp_state_set = b53_br_set_stp_state, diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index 01bd8cbe9a3f..603c66d240d8 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h @@ -70,6 +70,7 @@ enum { struct b53_port { u16 vlan_ctl_mask; + struct ethtool_eee eee; }; struct b53_vlan { @@ -186,11 +187,6 @@ static inline int is58xx(struct b53_device *dev) #define B53_CPU_PORT_25 5 #define B53_CPU_PORT 8 -static inline int is_cpu_port(struct b53_device *dev, int port) -{ - return dev->cpu_port; -} - struct b53_device *b53_switch_alloc(struct device *base, const struct b53_io_ops *ops, void *priv); @@ -204,119 +200,30 @@ static inline void b53_switch_remove(struct b53_device *dev) dsa_unregister_switch(dev->ds); } -static inline int b53_read8(struct b53_device *dev, u8 page, u8 reg, u8 *val) -{ - int ret; - - mutex_lock(&dev->reg_mutex); - ret = dev->ops->read8(dev, page, reg, val); - mutex_unlock(&dev->reg_mutex); - - return ret; +#define b53_build_op(type_op_size, val_type) \ +static inline int b53_##type_op_size(struct b53_device *dev, u8 page, \ + u8 reg, val_type val) \ +{ \ + int ret; \ + \ + mutex_lock(&dev->reg_mutex); \ + ret = dev->ops->type_op_size(dev, page, reg, val); \ + mutex_unlock(&dev->reg_mutex); \ + \ + return ret; \ } -static inline int b53_read16(struct b53_device *dev, u8 page, u8 reg, u16 *val) -{ - int ret; - - mutex_lock(&dev->reg_mutex); - ret = dev->ops->read16(dev, page, reg, val); - mutex_unlock(&dev->reg_mutex); - - return ret; -} - -static inline int b53_read32(struct b53_device *dev, u8 page, u8 reg, u32 *val) -{ - int ret; - - mutex_lock(&dev->reg_mutex); - ret = dev->ops->read32(dev, page, reg, val); - mutex_unlock(&dev->reg_mutex); - - return ret; -} - -static inline int b53_read48(struct b53_device *dev, u8 page, u8 reg, u64 *val) -{ - int ret; - - mutex_lock(&dev->reg_mutex); - ret = dev->ops->read48(dev, page, reg, val); - mutex_unlock(&dev->reg_mutex); - - return ret; -} - -static inline int b53_read64(struct b53_device *dev, u8 page, u8 reg, u64 *val) -{ - int ret; - - mutex_lock(&dev->reg_mutex); - ret = dev->ops->read64(dev, page, reg, val); - mutex_unlock(&dev->reg_mutex); - - return ret; -} +b53_build_op(read8, u8 *); +b53_build_op(read16, u16 *); +b53_build_op(read32, u32 *); +b53_build_op(read48, u64 *); +b53_build_op(read64, u64 *); -static inline int b53_write8(struct b53_device *dev, u8 page, u8 reg, u8 value) -{ - int ret; - - mutex_lock(&dev->reg_mutex); - ret = dev->ops->write8(dev, page, reg, value); - mutex_unlock(&dev->reg_mutex); - - return ret; -} - -static inline int b53_write16(struct b53_device *dev, u8 page, u8 reg, - u16 value) -{ - int ret; - - mutex_lock(&dev->reg_mutex); - ret = dev->ops->write16(dev, page, reg, value); - mutex_unlock(&dev->reg_mutex); - - return ret; -} - -static inline int b53_write32(struct b53_device *dev, u8 page, u8 reg, - u32 value) -{ - int ret; - - mutex_lock(&dev->reg_mutex); - ret = dev->ops->write32(dev, page, reg, value); - mutex_unlock(&dev->reg_mutex); - - return ret; -} - -static inline int b53_write48(struct b53_device *dev, u8 page, u8 reg, - u64 value) -{ - int ret; - - mutex_lock(&dev->reg_mutex); - ret = dev->ops->write48(dev, page, reg, value); - mutex_unlock(&dev->reg_mutex); - - return ret; -} - -static inline int b53_write64(struct b53_device *dev, u8 page, u8 reg, - u64 value) -{ - int ret; - - mutex_lock(&dev->reg_mutex); - ret = dev->ops->write64(dev, page, reg, value); - mutex_unlock(&dev->reg_mutex); - - return ret; -} +b53_build_op(write8, u8); +b53_build_op(write16, u16); +b53_build_op(write32, u32); +b53_build_op(write48, u64); +b53_build_op(write64, u64); struct b53_arl_entry { u8 port; @@ -377,6 +284,7 @@ static inline int b53_switch_get_reset_gpio(struct b53_device *dev) #endif /* Exported functions towards other drivers */ +void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port); void b53_get_strings(struct dsa_switch *ds, int port, uint8_t *data); void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data); int b53_get_sset_count(struct dsa_switch *ds); @@ -403,5 +311,12 @@ int b53_mirror_add(struct dsa_switch *ds, int port, struct dsa_mall_mirror_tc_entry *mirror, bool ingress); void b53_mirror_del(struct dsa_switch *ds, int port, struct dsa_mall_mirror_tc_entry *mirror); +int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy); +void b53_disable_port(struct dsa_switch *ds, int port, struct phy_device *phy); +void b53_brcm_hdr_setup(struct dsa_switch *ds, int port); +void b53_eee_enable_set(struct dsa_switch *ds, int port, bool enable); +int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy); +int b53_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e); +int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e); #endif diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h index e5c86d44667a..2a9f421680aa 100644 --- a/drivers/net/dsa/b53/b53_regs.h +++ b/drivers/net/dsa/b53/b53_regs.h @@ -50,6 +50,9 @@ /* Jumbo Frame Registers */ #define B53_JUMBO_PAGE 0x40 +/* EEE Control Registers Page */ +#define B53_EEE_PAGE 0x92 + /* CFP Configuration Registers Page */ #define B53_CFP_PAGE 0xa1 @@ -210,6 +213,7 @@ #define B53_BRCM_HDR 0x03 #define BRCM_HDR_P8_EN BIT(0) /* Enable tagging on port 8 */ #define BRCM_HDR_P5_EN BIT(1) /* Enable tagging on port 5 */ +#define BRCM_HDR_P7_EN BIT(2) /* Enable tagging on port 7 */ /* Mirror capture control register (16 bit) */ #define B53_MIR_CAP_CTL 0x10 @@ -249,6 +253,12 @@ /* Revision ID register (8 bit) */ #define B53_REV_ID 0x40 +/* Broadcom header RX control (16 bit) */ +#define B53_BRCM_HDR_RX_DIS 0x60 + +/* Broadcom header TX control (16 bit) */ +#define B53_BRCM_HDR_TX_DIS 0x62 + /************************************************************************* * ARL Access Page Registers *************************************************************************/ @@ -465,6 +475,44 @@ #define JMS_MAX_SIZE 9724 /************************************************************************* + * EEE Configuration Page Registers + *************************************************************************/ + +/* EEE Enable control register (16 bit) */ +#define B53_EEE_EN_CTRL 0x00 + +/* EEE LPI assert status register (16 bit) */ +#define B53_EEE_LPI_ASSERT_STS 0x02 + +/* EEE LPI indicate status register (16 bit) */ +#define B53_EEE_LPI_INDICATE 0x4 + +/* EEE Receiving idle symbols status register (16 bit) */ +#define B53_EEE_RX_IDLE_SYM_STS 0x6 + +/* EEE Pipeline timer register (32 bit) */ +#define B53_EEE_PIP_TIMER 0xC + +/* EEE Sleep timer Gig register (32 bit) */ +#define B53_EEE_SLEEP_TIMER_GIG(i) (0x10 + 4 * (i)) + +/* EEE Sleep timer FE register (32 bit) */ +#define B53_EEE_SLEEP_TIMER_FE(i) (0x34 + 4 * (i)) + +/* EEE Minimum LP timer Gig register (32 bit) */ +#define B53_EEE_MIN_LP_TIMER_GIG(i) (0x58 + 4 * (i)) + +/* EEE Minimum LP timer FE register (32 bit) */ +#define B53_EEE_MIN_LP_TIMER_FE(i) (0x7c + 4 * (i)) + +/* EEE Wake timer Gig register (16 bit) */ +#define B53_EEE_WAKE_TIMER_GIG(i) (0xa0 + 2 * (i)) + +/* EEE Wake timer FE register (16 bit) */ +#define B53_EEE_WAKE_TIMER_FE(i) (0xb2 + 2 * (i)) + + +/************************************************************************* * CFP Configuration Page Registers *************************************************************************/ diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index d7b53d53c116..898d5642b516 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -40,66 +40,6 @@ static enum dsa_tag_protocol bcm_sf2_sw_get_tag_protocol(struct dsa_switch *ds) return DSA_TAG_PROTO_BRCM; } -static void bcm_sf2_imp_vlan_setup(struct dsa_switch *ds, int cpu_port) -{ - struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); - unsigned int i; - u32 reg; - - /* Enable the IMP Port to be in the same VLAN as the other ports - * on a per-port basis such that we only have Port i and IMP in - * the same VLAN. - */ - for (i = 0; i < priv->hw_params.num_ports; i++) { - if (!((1 << i) & ds->enabled_port_mask)) - continue; - - reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i)); - reg |= (1 << cpu_port); - core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i)); - } -} - -static void bcm_sf2_brcm_hdr_setup(struct bcm_sf2_priv *priv, int port) -{ - u32 reg, val; - - /* Resolve which bit controls the Broadcom tag */ - switch (port) { - case 8: - val = BRCM_HDR_EN_P8; - break; - case 7: - val = BRCM_HDR_EN_P7; - break; - case 5: - val = BRCM_HDR_EN_P5; - break; - default: - val = 0; - break; - } - - /* Enable Broadcom tags for IMP port */ - reg = core_readl(priv, CORE_BRCM_HDR_CTRL); - reg |= val; - core_writel(priv, reg, CORE_BRCM_HDR_CTRL); - - /* Enable reception Broadcom tag for CPU TX (switch RX) to - * allow us to tag outgoing frames - */ - reg = core_readl(priv, CORE_BRCM_HDR_RX_DIS); - reg &= ~(1 << port); - core_writel(priv, reg, CORE_BRCM_HDR_RX_DIS); - - /* Enable transmission of Broadcom tags from the switch (CPU RX) to - * allow delivering frames to the per-port net_devices - */ - reg = core_readl(priv, CORE_BRCM_HDR_TX_DIS); - reg &= ~(1 << port); - core_writel(priv, reg, CORE_BRCM_HDR_TX_DIS); -} - static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port) { struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); @@ -138,7 +78,7 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port) reg |= i << (PRT_TO_QID_SHIFT * i); core_writel(priv, reg, CORE_PORT_TC2_QOS_MAP_PORT(port)); - bcm_sf2_brcm_hdr_setup(priv, port); + b53_brcm_hdr_setup(ds, port); /* Force link status for IMP port */ reg = core_readl(priv, offset); @@ -146,19 +86,6 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port) core_writel(priv, reg, offset); } -static void bcm_sf2_eee_enable_set(struct dsa_switch *ds, int port, bool enable) -{ - struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); - u32 reg; - - reg = core_readl(priv, CORE_EEE_EN_CTRL); - if (enable) - reg |= 1 << port; - else - reg &= ~(1 << port); - core_writel(priv, reg, CORE_EEE_EN_CTRL); -} - static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable) { struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); @@ -236,7 +163,6 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port, struct phy_device *phy) { struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); - s8 cpu_port = ds->dst->cpu_dp->index; unsigned int i; u32 reg; @@ -247,7 +173,7 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port, /* Enable Broadcom tags for that port if requested */ if (priv->brcm_tag_mask & BIT(port)) - bcm_sf2_brcm_hdr_setup(priv, port); + b53_brcm_hdr_setup(ds, port); /* Configure Traffic Class to QoS mapping, allow each priority to map * to a different queue number @@ -257,9 +183,6 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port, reg |= i << (PRT_TO_QID_SHIFT * i); core_writel(priv, reg, CORE_PORT_TC2_QOS_MAP_PORT(port)); - /* Clear the Rx and Tx disable bits and set to no spanning tree */ - core_writel(priv, 0, CORE_G_PCTL_PORT(port)); - /* Re-enable the GPHY and re-apply workarounds */ if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1) { bcm_sf2_gphy_enable_set(ds, true); @@ -282,23 +205,7 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port, if (port == priv->moca_port) bcm_sf2_port_intr_enable(priv, port); - /* Set this port, and only this one to be in the default VLAN, - * if member of a bridge, restore its membership prior to - * bringing down this port. - */ - reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port)); - reg &= ~PORT_VLAN_CTRL_MASK; - reg |= (1 << port); - reg |= priv->dev->ports[port].vlan_ctl_mask; - core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(port)); - - bcm_sf2_imp_vlan_setup(ds, cpu_port); - - /* If EEE was enabled, restore it */ - if (priv->port_sts[port].eee.eee_enabled) - bcm_sf2_eee_enable_set(ds, port, true); - - return 0; + return b53_enable_port(ds, port, phy); } static void bcm_sf2_port_disable(struct dsa_switch *ds, int port, @@ -321,9 +228,7 @@ static void bcm_sf2_port_disable(struct dsa_switch *ds, int port, else off = CORE_G_PCTL_PORT(port); - reg = core_readl(priv, off); - reg |= RX_DIS | TX_DIS; - core_writel(priv, reg, off); + b53_disable_port(ds, port, phy); /* Power down the port memory */ reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL); @@ -331,47 +236,6 @@ static void bcm_sf2_port_disable(struct dsa_switch *ds, int port, core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); } -/* Returns 0 if EEE was not enabled, or 1 otherwise - */ -static int bcm_sf2_eee_init(struct dsa_switch *ds, int port, - struct phy_device *phy) -{ - int ret; - - ret = phy_init_eee(phy, 0); - if (ret) - return 0; - - bcm_sf2_eee_enable_set(ds, port, true); - - return 1; -} - -static int bcm_sf2_sw_get_mac_eee(struct dsa_switch *ds, int port, - struct ethtool_eee *e) -{ - struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); - struct ethtool_eee *p = &priv->port_sts[port].eee; - u32 reg; - - reg = core_readl(priv, CORE_EEE_LPI_INDICATE); - e->eee_enabled = p->eee_enabled; - e->eee_active = !!(reg & (1 << port)); - - return 0; -} - -static int bcm_sf2_sw_set_mac_eee(struct dsa_switch *ds, int port, - struct ethtool_eee *e) -{ - struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); - struct ethtool_eee *p = &priv->port_sts[port].eee; - - p->eee_enabled = e->eee_enabled; - bcm_sf2_eee_enable_set(ds, port, e->eee_enabled); - - return 0; -} static int bcm_sf2_sw_indir_rw(struct bcm_sf2_priv *priv, int op, int addr, int regnum, u16 val) @@ -606,7 +470,7 @@ static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port, struct phy_device *phydev) { struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); - struct ethtool_eee *p = &priv->port_sts[port].eee; + struct ethtool_eee *p = &priv->dev->ports[port].eee; u32 id_mode_dis = 0, port_mode; const char *str = NULL; u32 reg, offset; @@ -688,7 +552,7 @@ force_link: core_writel(priv, reg, offset); if (!phydev->is_pseudo_fixed_link) - p->eee_enabled = bcm_sf2_eee_init(ds, port, phydev); + p->eee_enabled = b53_eee_init(ds, port, phydev); } static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port, @@ -797,7 +661,7 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds) static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port, struct ethtool_wolinfo *wol) { - struct net_device *p = ds->dst->cpu_dp->netdev; + struct net_device *p = ds->ports[port].cpu_dp->netdev; struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); struct ethtool_wolinfo pwol; @@ -820,9 +684,9 @@ static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port, static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port, struct ethtool_wolinfo *wol) { - struct net_device *p = ds->dst->cpu_dp->netdev; + struct net_device *p = ds->ports[port].cpu_dp->netdev; struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); - s8 cpu_port = ds->dst->cpu_dp->index; + s8 cpu_port = ds->ports[port].cpu_dp->index; struct ethtool_wolinfo pwol; p->ethtool_ops->get_wol(p, &pwol); @@ -890,14 +754,11 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds) struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); unsigned int port; - /* Enable all valid ports and disable those unused */ + /* Disable unused ports and configure IMP port */ for (port = 0; port < priv->hw_params.num_ports; port++) { - /* IMP port receives special treatment */ - if ((1 << port) & ds->enabled_port_mask) - bcm_sf2_port_setup(ds, port, NULL); - else if (dsa_is_cpu_port(ds, port)) + if (dsa_is_cpu_port(ds, port)) bcm_sf2_imp_setup(ds, port); - else + else if (!((1 << port) & ds->enabled_port_mask)) bcm_sf2_port_disable(ds, port, NULL); } @@ -1020,8 +881,8 @@ static const struct dsa_switch_ops bcm_sf2_ops = { .set_wol = bcm_sf2_sw_set_wol, .port_enable = bcm_sf2_port_setup, .port_disable = bcm_sf2_port_disable, - .get_mac_eee = bcm_sf2_sw_get_mac_eee, - .set_mac_eee = bcm_sf2_sw_set_mac_eee, + .get_mac_eee = b53_get_mac_eee, + .set_mac_eee = b53_set_mac_eee, .port_bridge_join = b53_br_join, .port_bridge_leave = b53_br_leave, .port_stp_state_set = b53_br_set_stp_state, diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h index 02c499f9c56b..1922e027ff59 100644 --- a/drivers/net/dsa/bcm_sf2.h +++ b/drivers/net/dsa/bcm_sf2.h @@ -48,8 +48,6 @@ struct bcm_sf2_hw_params { struct bcm_sf2_port_status { unsigned int link; - - struct ethtool_eee eee; }; struct bcm_sf2_cfp_priv { diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c index 8a1da7e67707..94649e1481ec 100644 --- a/drivers/net/dsa/bcm_sf2_cfp.c +++ b/drivers/net/dsa/bcm_sf2_cfp.c @@ -144,7 +144,7 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port, * destination port is enabled and that we are within the * number of ports supported by the switch */ - port_num = fs->ring_cookie / 8; + port_num = fs->ring_cookie / SF2_NUM_EGRESS_QUEUES; if (fs->ring_cookie == RX_CLS_FLOW_DISC || !(BIT(port_num) & ds->enabled_port_mask) || @@ -280,7 +280,7 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port, * We have a small oddity where Port 6 just does not have a * valid bit here (so we subtract by one). */ - queue_num = fs->ring_cookie % 8; + queue_num = fs->ring_cookie % SF2_NUM_EGRESS_QUEUES; if (port_num >= 7) port_num -= 1; @@ -401,7 +401,7 @@ static int bcm_sf2_cfp_rule_get(struct bcm_sf2_priv *priv, int port, /* There is no Port 6, so we compensate for that here */ if (nfc->fs.ring_cookie >= 6) nfc->fs.ring_cookie++; - nfc->fs.ring_cookie *= 8; + nfc->fs.ring_cookie *= SF2_NUM_EGRESS_QUEUES; /* Extract the destination queue */ queue_num = (reg >> NEW_TC_SHIFT) & NEW_TC_MASK; diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h index 49695fcc2ea8..d8b8074a47b9 100644 --- a/drivers/net/dsa/bcm_sf2_regs.h +++ b/drivers/net/dsa/bcm_sf2_regs.h @@ -205,16 +205,8 @@ enum bcm_sf2_reg_offs { #define CORE_IMP0_PRT_ID 0x0804 -#define CORE_BRCM_HDR_CTRL 0x0080c -#define BRCM_HDR_EN_P8 (1 << 0) -#define BRCM_HDR_EN_P5 (1 << 1) -#define BRCM_HDR_EN_P7 (1 << 2) - #define CORE_RST_MIB_CNT_EN 0x0950 -#define CORE_BRCM_HDR_RX_DIS 0x0980 -#define CORE_BRCM_HDR_TX_DIS 0x0988 - #define CORE_ARLA_VTBL_RWCTRL 0x1600 #define ARLA_VTBL_CMD_WRITE 0 #define ARLA_VTBL_CMD_READ 1 @@ -252,9 +244,6 @@ enum bcm_sf2_reg_offs { #define CORE_JOIN_ALL_VLAN_EN 0xd140 -#define CORE_EEE_EN_CTRL 0x24800 -#define CORE_EEE_LPI_INDICATE 0x24810 - #define CORE_CFP_ACC 0x28000 #define OP_STR_DONE (1 << 0) #define OP_SEL_SHIFT 1 diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c index b471413d3df9..07355db2ad81 100644 --- a/drivers/net/dsa/lan9303-core.c +++ b/drivers/net/dsa/lan9303-core.c @@ -17,6 +17,7 @@ #include <linux/regmap.h> #include <linux/mutex.h> #include <linux/mii.h> +#include <linux/phy.h> #include "lan9303.h" @@ -57,6 +58,7 @@ #define LAN9303_SWITCH_CSR_CMD_LANES (BIT(19) | BIT(18) | BIT(17) | BIT(16)) #define LAN9303_VIRT_PHY_BASE 0x70 #define LAN9303_VIRT_SPECIAL_CTRL 0x77 +#define LAN9303_VIRT_SPECIAL_TURBO BIT(10) /*Turbo MII Enable*/ /*13.4 Switch Fabric Control and Status Registers * Accessed indirectly via SWITCH_CSR_CMD, SWITCH_CSR_DATA. @@ -760,6 +762,43 @@ static int lan9303_phy_write(struct dsa_switch *ds, int phy, int regnum, return chip->ops->phy_write(chip, phy, regnum, val); } +static void lan9303_adjust_link(struct dsa_switch *ds, int port, + struct phy_device *phydev) +{ + struct lan9303 *chip = ds->priv; + int ctl, res; + + if (!phy_is_pseudo_fixed_link(phydev)) + return; + + ctl = lan9303_phy_read(ds, port, MII_BMCR); + + ctl &= ~BMCR_ANENABLE; + + if (phydev->speed == SPEED_100) + ctl |= BMCR_SPEED100; + else if (phydev->speed == SPEED_10) + ctl &= ~BMCR_SPEED100; + else + dev_err(ds->dev, "unsupported speed: %d\n", phydev->speed); + + if (phydev->duplex == DUPLEX_FULL) + ctl |= BMCR_FULLDPLX; + else + ctl &= ~BMCR_FULLDPLX; + + res = lan9303_phy_write(ds, port, MII_BMCR, ctl); + + if (port == chip->phy_addr_sel_strap) { + /* Virtual Phy: Remove Turbo 200Mbit mode */ + lan9303_read(chip->regmap, LAN9303_VIRT_SPECIAL_CTRL, &ctl); + + ctl &= ~LAN9303_VIRT_SPECIAL_TURBO; + res = regmap_write(chip->regmap, + LAN9303_VIRT_SPECIAL_CTRL, ctl); + } +} + static int lan9303_port_enable(struct dsa_switch *ds, int port, struct phy_device *phy) { @@ -803,6 +842,7 @@ static const struct dsa_switch_ops lan9303_switch_ops = { .get_strings = lan9303_get_strings, .phy_read = lan9303_phy_read, .phy_write = lan9303_phy_write, + .adjust_link = lan9303_adjust_link, .get_ethtool_stats = lan9303_get_ethtool_stats, .get_sset_count = lan9303_get_sset_count, .port_enable = lan9303_port_enable, diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index c142b97add2c..faa3b88d2206 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -928,11 +928,11 @@ mt7530_setup(struct dsa_switch *ds) struct device_node *dn; struct mt7530_dummy_poll p; - /* The parent node of cpu_dp->netdev which holds the common system + /* The parent node of master netdev which holds the common system * controller also is the container for two GMACs nodes representing * as two netdev instances. */ - dn = ds->dst->cpu_dp->netdev->dev.of_node->parent; + dn = ds->ports[MT7530_CPU_PORT].netdev->dev.of_node->parent; priv->ethernet = syscon_node_to_regmap(dn); if (IS_ERR(priv->ethernet)) return PTR_ERR(priv->ethernet); diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c index dce7fa57eb55..621cdc46ad81 100644 --- a/drivers/net/dsa/mv88e6060.c +++ b/drivers/net/dsa/mv88e6060.c @@ -176,7 +176,7 @@ static int mv88e6060_setup_port(struct dsa_switch *ds, int p) ((p & 0xf) << PORT_VLAN_MAP_DBNUM_SHIFT) | (dsa_is_cpu_port(ds, p) ? ds->enabled_port_mask : - BIT(ds->dst->cpu_dp->index))); + BIT(ds->ports[p].cpu_dp->index))); /* Port Association Vector: when learning source addresses * of packets, add the address to the address database using diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index 5ada7a41449c..82f09711ac1a 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -506,7 +506,7 @@ qca8k_setup(struct dsa_switch *ds) pr_warn("regmap initialization failed"); /* Initialize CPU port pad mode (xMII type, delays...) */ - phy_mode = of_get_phy_mode(ds->dst->cpu_dp->dn); + phy_mode = of_get_phy_mode(ds->ports[QCA8K_CPU_PORT].dn); if (phy_mode < 0) { pr_err("Can't find phy-mode for master device\n"); return phy_mode; diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c index d0a1f9ce3168..e31ab3b94c6f 100644 --- a/drivers/net/dummy.c +++ b/drivers/net/dummy.c @@ -388,7 +388,7 @@ static int __init dummy_init_one(void) int err; dev_dummy = alloc_netdev(sizeof(struct dummy_priv), - "dummy%d", NET_NAME_UNKNOWN, dummy_setup); + "dummy%d", NET_NAME_ENUM, dummy_setup); if (!dev_dummy) return -ENOMEM; diff --git a/drivers/net/eql.c b/drivers/net/eql.c index fe13bfea30ac..fccce4b47778 100644 --- a/drivers/net/eql.c +++ b/drivers/net/eql.c @@ -178,10 +178,8 @@ static void __init eql_setup(struct net_device *dev) { equalizer_t *eql = netdev_priv(dev); - init_timer(&eql->timer); - eql->timer.data = (unsigned long) eql; + setup_timer(&eql->timer, eql_timer, (unsigned long)eql); eql->timer.expires = jiffies + EQL_DEFAULT_RESCHED_IVAL; - eql->timer.function = eql_timer; spin_lock_init(&eql->queue.lock); INIT_LIST_HEAD(&eql->queue.all_slaves); diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c index a251de8d9a91..0658cde1586a 100644 --- a/drivers/net/ethernet/adi/bfin_mac.c +++ b/drivers/net/ethernet/adi/bfin_mac.c @@ -1650,9 +1650,8 @@ static int bfin_mac_probe(struct platform_device *pdev) ndev->netdev_ops = &bfin_mac_netdev_ops; ndev->ethtool_ops = &bfin_mac_ethtool_ops; - init_timer(&lp->tx_reclaim_timer); - lp->tx_reclaim_timer.data = (unsigned long)lp; - lp->tx_reclaim_timer.function = tx_reclaim_skb_timeout; + setup_timer(&lp->tx_reclaim_timer, tx_reclaim_skb_timeout, + (unsigned long)lp); lp->flags = 0; netif_napi_add(ndev, &lp->napi, bfin_mac_poll, CONFIG_BFIN_RX_DESC_NUM); diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c index 54eff90e2f02..658e92f79d36 100644 --- a/drivers/net/ethernet/agere/et131x.c +++ b/drivers/net/ethernet/agere/et131x.c @@ -3624,11 +3624,10 @@ static int et131x_open(struct net_device *netdev) int result; /* Start the timer to track NIC errors */ - init_timer(&adapter->error_timer); + setup_timer(&adapter->error_timer, et131x_error_timer_handler, + (unsigned long)adapter); adapter->error_timer.expires = jiffies + msecs_to_jiffies(TX_ERROR_PERIOD); - adapter->error_timer.function = et131x_error_timer_handler; - adapter->error_timer.data = (unsigned long)adapter; add_timer(&adapter->error_timer); result = request_irq(irq, et131x_isr, diff --git a/drivers/net/ethernet/amd/a2065.c b/drivers/net/ethernet/amd/a2065.c index e22f976a0d18..998d30e050a6 100644 --- a/drivers/net/ethernet/amd/a2065.c +++ b/drivers/net/ethernet/amd/a2065.c @@ -733,10 +733,9 @@ static int a2065_init_one(struct zorro_dev *z, dev->watchdog_timeo = 5*HZ; dev->dma = 0; - init_timer(&priv->multicast_timer); - priv->multicast_timer.data = (unsigned long) dev; - priv->multicast_timer.function = - (void (*)(unsigned long))lance_set_multicast; + setup_timer(&priv->multicast_timer, + (void(*)(unsigned long))lance_set_multicast, + (unsigned long)dev); err = register_netdev(dev); if (err) { diff --git a/drivers/net/ethernet/amd/am79c961a.c b/drivers/net/ethernet/amd/am79c961a.c index b11e910850f7..0612dbee00d2 100644 --- a/drivers/net/ethernet/amd/am79c961a.c +++ b/drivers/net/ethernet/amd/am79c961a.c @@ -728,9 +728,7 @@ static int am79c961_probe(struct platform_device *pdev) am79c961_banner(); spin_lock_init(&priv->chip_lock); - init_timer(&priv->timer); - priv->timer.data = (unsigned long)dev; - priv->timer.function = am79c961_timer; + setup_timer(&priv->timer, am79c961_timer, (unsigned long)dev); if (am79c961_hw_init(dev)) goto release; diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c index 7b5df562f30f..7f22af6e37e0 100644 --- a/drivers/net/ethernet/amd/amd8111e.c +++ b/drivers/net/ethernet/amd/amd8111e.c @@ -1883,9 +1883,8 @@ static int amd8111e_probe_one(struct pci_dev *pdev, /* Initialize software ipg timer */ if(lp->options & OPTION_DYN_IPG_ENABLE){ - init_timer(&lp->ipg_data.ipg_timer); - lp->ipg_data.ipg_timer.data = (unsigned long) dev; - lp->ipg_data.ipg_timer.function = (void *)&amd8111e_config_ipg; + setup_timer(&lp->ipg_data.ipg_timer, + (void *)&amd8111e_config_ipg, (unsigned long)dev); lp->ipg_data.ipg_timer.expires = jiffies + IPG_CONVERGE_JIFFIES; lp->ipg_data.ipg = DEFAULT_IPG; diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c index 82cc81385033..9bdf81c2cd00 100644 --- a/drivers/net/ethernet/amd/declance.c +++ b/drivers/net/ethernet/amd/declance.c @@ -1246,9 +1246,9 @@ static int dec_lance_probe(struct device *bdev, const int type) * can occur from interrupts (ex. IPv6). So we * use a timer to try again later when necessary. -DaveM */ - init_timer(&lp->multicast_timer); - lp->multicast_timer.data = (unsigned long) dev; - lp->multicast_timer.function = lance_set_multicast_retry; + setup_timer(&lp->multicast_timer, lance_set_multicast_retry, + (unsigned long)dev); + ret = register_netdev(dev); if (ret) { diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index 7f60d17819ce..e46153654016 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c @@ -1970,9 +1970,8 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) lp->options |= PCNET32_PORT_MII; } - init_timer(&lp->watchdog_timer); - lp->watchdog_timer.data = (unsigned long)dev; - lp->watchdog_timer.function = (void *)&pcnet32_watchdog; + setup_timer(&lp->watchdog_timer, (void *)&pcnet32_watchdog, + (unsigned long)dev); /* The PCNET32-specific entries in the device structure. */ dev->netdev_ops = &pcnet32_netdev_ops; diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c index 291ca5187f12..0183ffb9d3ba 100644 --- a/drivers/net/ethernet/amd/sunlance.c +++ b/drivers/net/ethernet/amd/sunlance.c @@ -1459,9 +1459,8 @@ no_link_test: * can occur from interrupts (ex. IPv6). So we * use a timer to try again later when necessary. -DaveM */ - init_timer(&lp->multicast_timer); - lp->multicast_timer.data = (unsigned long) dev; - lp->multicast_timer.function = lance_set_multicast_retry; + setup_timer(&lp->multicast_timer, lance_set_multicast_retry, + (unsigned long)dev); if (register_netdev(dev)) { printk(KERN_ERR "SunLance: Cannot register device.\n"); diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index a1125d10c825..42e44fc03a18 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c @@ -1474,10 +1474,8 @@ static int b44_open(struct net_device *dev) goto out; } - init_timer(&bp->timer); + setup_timer(&bp->timer, b44_timer, (unsigned long)bp); bp->timer.expires = jiffies + HZ; - bp->timer.data = (unsigned long) bp; - bp->timer.function = b44_timer; add_timer(&bp->timer); b44_enable_ints(bp); diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index 4f3845a58126..f8bbbbfca06e 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -1857,9 +1857,8 @@ static int bcm_enet_probe(struct platform_device *pdev) spin_lock_init(&priv->rx_lock); /* init rx timeout (used for oom) */ - init_timer(&priv->rx_timeout); - priv->rx_timeout.function = bcm_enet_refill_rx_timer; - priv->rx_timeout.data = (unsigned long)dev; + setup_timer(&priv->rx_timeout, bcm_enet_refill_rx_timer, + (unsigned long)dev); /* init the mib update lock&work */ mutex_init(&priv->mib_update_lock); diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index e3af1f3cb61f..b3055a76dfbf 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -8462,10 +8462,8 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) bnx2_set_default_link(bp); bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX; - init_timer(&bp->timer); + setup_timer(&bp->timer, bnx2_timer, (unsigned long)bp); bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL); - bp->timer.data = (unsigned long) bp; - bp->timer.function = bnx2_timer; #ifdef BCM_CNIC if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index c12b4d3e946e..54d1571384a0 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -12414,10 +12414,8 @@ static int bnx2x_init_bp(struct bnx2x *bp) bp->current_interval = CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ; - init_timer(&bp->timer); + setup_timer(&bp->timer, bnx2x_timer, (unsigned long)bp); bp->timer.expires = jiffies + bp->current_interval; - bp->timer.data = (unsigned long) bp; - bp->timer.function = bnx2x_timer; if (SHMEM2_HAS(bp, dcbx_lldp_params_offset) && SHMEM2_HAS(bp, dcbx_lldp_dcbx_stat_offset) && diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index aacec8bc19d5..c25f5b555adf 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -7190,9 +7190,7 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS; - init_timer(&bp->timer); - bp->timer.data = (unsigned long)bp; - bp->timer.function = bnxt_timer; + setup_timer(&bp->timer, bnxt_timer, (unsigned long)bp); bp->current_interval = BNXT_TIMER_INTERVAL; clear_bit(BNXT_STATE_OPEN, &bp->state); diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 656e6af70f0a..d8d5f207c759 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -11087,9 +11087,7 @@ static void tg3_timer_init(struct tg3 *tp) tp->asf_multiplier = (HZ / tp->timer_offset) * TG3_FW_UPDATE_FREQ_SEC; - init_timer(&tp->timer); - tp->timer.data = (unsigned long) tp; - tp->timer.function = tg3_timer; + setup_timer(&tp->timer, tg3_timer, (unsigned long)tp); } static void tg3_timer_start(struct tg3 *tp) diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c index 0f13a7f7c1d3..75e439918700 100644 --- a/drivers/net/ethernet/chelsio/cxgb/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb/sge.c @@ -2075,9 +2075,8 @@ struct sge *t1_sge_create(struct adapter *adapter, struct sge_params *p) goto nomem_port; } - init_timer(&sge->tx_reclaim_timer); - sge->tx_reclaim_timer.data = (unsigned long)sge; - sge->tx_reclaim_timer.function = sge_tx_reclaim_cb; + setup_timer(&sge->tx_reclaim_timer, sge_tx_reclaim_cb, + (unsigned long)sge); if (is_T2(sge->adapter)) { init_timer(&sge->espibug_timer); diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile index 817212702f0a..fecd7aab673b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/Makefile +++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile @@ -4,7 +4,9 @@ obj-$(CONFIG_CHELSIO_T4) += cxgb4.o -cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o cxgb4_uld.o sched.o cxgb4_filter.o cxgb4_tc_u32.o cxgb4_ptp.o +cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o \ + cxgb4_uld.o sched.o cxgb4_filter.o cxgb4_tc_u32.o \ + cxgb4_ptp.o cxgb4_tc_flower.o cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index ea72d2d2e1b4..0db3ab6ad094 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -549,6 +549,7 @@ enum { /* adapter flags */ MASTER_PF = (1 << 7), FW_OFLD_CONN = (1 << 9), ROOT_NO_RELAXED_ORDERING = (1 << 10), + SHUTTING_DOWN = (1 << 11), }; enum { @@ -904,6 +905,10 @@ struct adapter { /* TC u32 offload */ struct cxgb4_tc_u32_table *tc_u32; struct chcr_stats_debug chcr_stats; + + /* TC flower offload */ + DECLARE_HASHTABLE(flower_anymatch_tbl, 9); + struct timer_list flower_stats_timer; }; /* Support for "sched-class" command to allow a TX Scheduling Class to be diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c index 45b5853ca2f1..15361ca2857c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c @@ -148,6 +148,106 @@ static int get_filter_steerq(struct net_device *dev, return iq; } +static int get_filter_count(struct adapter *adapter, unsigned int fidx, + u64 *pkts, u64 *bytes) +{ + unsigned int tcb_base, tcbaddr; + unsigned int word_offset; + struct filter_entry *f; + __be64 be64_byte_count; + int ret; + + tcb_base = t4_read_reg(adapter, TP_CMM_TCB_BASE_A); + if ((fidx != (adapter->tids.nftids + adapter->tids.nsftids - 1)) && + fidx >= adapter->tids.nftids) + return -E2BIG; + + f = &adapter->tids.ftid_tab[fidx]; + if (!f->valid) + return -EINVAL; + + tcbaddr = tcb_base + f->tid * TCB_SIZE; + + spin_lock(&adapter->win0_lock); + if (is_t4(adapter->params.chip)) { + __be64 be64_count; + + /* T4 doesn't maintain byte counts in hw */ + *bytes = 0; + + /* Get pkts */ + word_offset = 4; + ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0, + tcbaddr + (word_offset * sizeof(__be32)), + sizeof(be64_count), + (__be32 *)&be64_count, + T4_MEMORY_READ); + if (ret < 0) + goto out; + *pkts = be64_to_cpu(be64_count); + } else { + __be32 be32_count; + + /* Get bytes */ + word_offset = 4; + ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0, + tcbaddr + (word_offset * sizeof(__be32)), + sizeof(be64_byte_count), + &be64_byte_count, + T4_MEMORY_READ); + if (ret < 0) + goto out; + *bytes = be64_to_cpu(be64_byte_count); + + /* Get pkts */ + word_offset = 6; + ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0, + tcbaddr + (word_offset * sizeof(__be32)), + sizeof(be32_count), + &be32_count, + T4_MEMORY_READ); + if (ret < 0) + goto out; + *pkts = (u64)be32_to_cpu(be32_count); + } + +out: + spin_unlock(&adapter->win0_lock); + return ret; +} + +int cxgb4_get_filter_counters(struct net_device *dev, unsigned int fidx, + u64 *hitcnt, u64 *bytecnt) +{ + struct adapter *adapter = netdev2adap(dev); + + return get_filter_count(adapter, fidx, hitcnt, bytecnt); +} + +int cxgb4_get_free_ftid(struct net_device *dev, int family) +{ + struct adapter *adap = netdev2adap(dev); + struct tid_info *t = &adap->tids; + int ftid; + + spin_lock_bh(&t->ftid_lock); + if (family == PF_INET) { + ftid = find_first_zero_bit(t->ftid_bmap, t->nftids); + if (ftid >= t->nftids) + ftid = -1; + } else { + ftid = bitmap_find_free_region(t->ftid_bmap, t->nftids, 2); + if (ftid < 0) + goto out_unlock; + + /* this is only a lookup, keep the found region unallocated */ + bitmap_release_region(t->ftid_bmap, ftid, 2); + } +out_unlock: + spin_unlock_bh(&t->ftid_lock); + return ftid; +} + static int cxgb4_set_ftid(struct tid_info *t, int fidx, int family) { spin_lock_bh(&t->ftid_lock); @@ -191,7 +291,8 @@ static int del_filter_wr(struct adapter *adapter, int fidx) return -ENOMEM; fwr = __skb_put(skb, len); - t4_mk_filtdelwr(f->tid, fwr, adapter->sge.fw_evtq.abs_id); + t4_mk_filtdelwr(f->tid, fwr, (adapter->flags & SHUTTING_DOWN) ? -1 + : adapter->sge.fw_evtq.abs_id); /* Mark the filter as "pending" and ship off the Filter Work Request. * When we get the Work Request Reply we'll clear the pending status. @@ -636,6 +737,10 @@ int cxgb4_del_filter(struct net_device *dev, int filter_id) struct filter_ctx ctx; int ret; + /* If we are shutting down the adapter do not wait for completion */ + if (netdev2adap(dev)->flags & SHUTTING_DOWN) + return __cxgb4_del_filter(dev, filter_id, NULL); + init_completion(&ctx.completion); ret = __cxgb4_del_filter(dev, filter_id, &ctx); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 92d9d795d874..aa93ae95d3b9 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -79,6 +79,7 @@ #include "l2t.h" #include "sched.h" #include "cxgb4_tc_u32.h" +#include "cxgb4_tc_flower.h" #include "cxgb4_ptp.h" char cxgb4_driver_name[] = KBUILD_MODNAME; @@ -2873,6 +2874,25 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate) return err; } +static int cxgb_setup_tc_flower(struct net_device *dev, + struct tc_cls_flower_offload *cls_flower) +{ + if (!is_classid_clsact_ingress(cls_flower->common.classid) || + cls_flower->common.chain_index) + return -EOPNOTSUPP; + + switch (cls_flower->command) { + case TC_CLSFLOWER_REPLACE: + return cxgb4_tc_flower_replace(dev, cls_flower); + case TC_CLSFLOWER_DESTROY: + return cxgb4_tc_flower_destroy(dev, cls_flower); + case TC_CLSFLOWER_STATS: + return cxgb4_tc_flower_stats(dev, cls_flower); + default: + return -EOPNOTSUPP; + } +} + static int cxgb_setup_tc_cls_u32(struct net_device *dev, struct tc_cls_u32_offload *cls_u32) { @@ -2907,6 +2927,8 @@ static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type, switch (type) { case TC_SETUP_CLSU32: return cxgb_setup_tc_cls_u32(dev, type_data); + case TC_SETUP_CLSFLOWER: + return cxgb_setup_tc_flower(dev, type_data); default: return -EOPNOTSUPP; } @@ -4615,6 +4637,7 @@ static void free_some_resources(struct adapter *adapter) kvfree(adapter->l2t); t4_cleanup_sched(adapter); kvfree(adapter->tids.tid_tab); + cxgb4_cleanup_tc_flower(adapter); cxgb4_cleanup_tc_u32(adapter); kfree(adapter->sge.egr_map); kfree(adapter->sge.ingr_map); @@ -5083,6 +5106,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (!adapter->tc_u32) dev_warn(&pdev->dev, "could not offload tc u32, continuing\n"); + + cxgb4_init_tc_flower(adapter); } if (is_offload(adapter)) { @@ -5254,6 +5279,8 @@ static void remove_one(struct pci_dev *pdev) return; } + adapter->flags |= SHUTTING_DOWN; + if (adapter->pf == 4) { int i; @@ -5339,6 +5366,8 @@ static void shutdown_one(struct pci_dev *pdev) return; } + adapter->flags |= SHUTTING_DOWN; + if (adapter->pf == 4) { int i; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c new file mode 100644 index 000000000000..a36bd66d2834 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c @@ -0,0 +1,452 @@ +/* + * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux. + * + * Copyright (c) 2017 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <net/tc_act/tc_gact.h> +#include <net/tc_act/tc_mirred.h> +#include <net/tc_act/tc_vlan.h> + +#include "cxgb4.h" +#include "cxgb4_tc_flower.h" + +#define STATS_CHECK_PERIOD (HZ / 2) + +static struct ch_tc_flower_entry *allocate_flower_entry(void) +{ + struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL); + spin_lock_init(&new->lock); + return new; +} + +/* Must be called with either RTNL or rcu_read_lock */ +static struct ch_tc_flower_entry *ch_flower_lookup(struct adapter *adap, + unsigned long flower_cookie) +{ + struct ch_tc_flower_entry *flower_entry; + + hash_for_each_possible_rcu(adap->flower_anymatch_tbl, flower_entry, + link, flower_cookie) + if (flower_entry->tc_flower_cookie == flower_cookie) + return flower_entry; + return NULL; +} + +static void cxgb4_process_flow_match(struct net_device *dev, + struct tc_cls_flower_offload *cls, + struct ch_filter_specification *fs) +{ + u16 addr_type = 0; + + if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { + struct flow_dissector_key_control *key = + skb_flow_dissector_target(cls->dissector, + FLOW_DISSECTOR_KEY_CONTROL, + cls->key); + + addr_type = key->addr_type; + } + + if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_dissector_key_basic *key = + skb_flow_dissector_target(cls->dissector, + FLOW_DISSECTOR_KEY_BASIC, + cls->key); + struct flow_dissector_key_basic *mask = + skb_flow_dissector_target(cls->dissector, + FLOW_DISSECTOR_KEY_BASIC, + cls->mask); + u16 ethtype_key = ntohs(key->n_proto); + u16 ethtype_mask = ntohs(mask->n_proto); + + if (ethtype_key == ETH_P_ALL) { + ethtype_key = 0; + ethtype_mask = 0; + } + + fs->val.ethtype = ethtype_key; + fs->mask.ethtype = ethtype_mask; + fs->val.proto = key->ip_proto; + fs->mask.proto = mask->ip_proto; + } + + if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { + struct flow_dissector_key_ipv4_addrs *key = + skb_flow_dissector_target(cls->dissector, + FLOW_DISSECTOR_KEY_IPV4_ADDRS, + cls->key); + struct flow_dissector_key_ipv4_addrs *mask = + skb_flow_dissector_target(cls->dissector, + FLOW_DISSECTOR_KEY_IPV4_ADDRS, + cls->mask); + fs->type = 0; + memcpy(&fs->val.lip[0], &key->dst, sizeof(key->dst)); + memcpy(&fs->val.fip[0], &key->src, sizeof(key->src)); + memcpy(&fs->mask.lip[0], &mask->dst, sizeof(mask->dst)); + memcpy(&fs->mask.fip[0], &mask->src, sizeof(mask->src)); + } + + if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { + struct flow_dissector_key_ipv6_addrs *key = + skb_flow_dissector_target(cls->dissector, + FLOW_DISSECTOR_KEY_IPV6_ADDRS, + cls->key); + struct flow_dissector_key_ipv6_addrs *mask = + skb_flow_dissector_target(cls->dissector, + FLOW_DISSECTOR_KEY_IPV6_ADDRS, + cls->mask); + + fs->type = 1; + memcpy(&fs->val.lip[0], key->dst.s6_addr, sizeof(key->dst)); + memcpy(&fs->val.fip[0], key->src.s6_addr, sizeof(key->src)); + memcpy(&fs->mask.lip[0], mask->dst.s6_addr, sizeof(mask->dst)); + memcpy(&fs->mask.fip[0], mask->src.s6_addr, sizeof(mask->src)); + } + + if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_PORTS)) { + struct flow_dissector_key_ports *key, *mask; + + key = skb_flow_dissector_target(cls->dissector, + FLOW_DISSECTOR_KEY_PORTS, + cls->key); + mask = skb_flow_dissector_target(cls->dissector, + FLOW_DISSECTOR_KEY_PORTS, + cls->mask); + fs->val.lport = cpu_to_be16(key->dst); + fs->mask.lport = cpu_to_be16(mask->dst); + fs->val.fport = cpu_to_be16(key->src); + fs->mask.fport = cpu_to_be16(mask->src); + } + + /* Match only packets coming from the ingress port where this + * filter will be created. + */ + fs->val.iport = netdev2pinfo(dev)->port_id; + fs->mask.iport = ~0; +} + +static int cxgb4_validate_flow_match(struct net_device *dev, + struct tc_cls_flower_offload *cls) +{ + if (cls->dissector->used_keys & + ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | + BIT(FLOW_DISSECTOR_KEY_BASIC) | + BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_PORTS))) { + netdev_warn(dev, "Unsupported key used: 0x%x\n", + cls->dissector->used_keys); + return -EOPNOTSUPP; + } + return 0; +} + +static void cxgb4_process_flow_actions(struct net_device *in, + struct tc_cls_flower_offload *cls, + struct ch_filter_specification *fs) +{ + const struct tc_action *a; + LIST_HEAD(actions); + + tcf_exts_to_list(cls->exts, &actions); + list_for_each_entry(a, &actions, list) { + if (is_tcf_gact_shot(a)) { + fs->action = FILTER_DROP; + } else if (is_tcf_mirred_egress_redirect(a)) { + int ifindex = tcf_mirred_ifindex(a); + struct net_device *out = __dev_get_by_index(dev_net(in), + ifindex); + struct port_info *pi = netdev_priv(out); + + fs->action = FILTER_SWITCH; + fs->eport = pi->port_id; + } else if (is_tcf_vlan(a)) { + u32 vlan_action = tcf_vlan_action(a); + u8 prio = tcf_vlan_push_prio(a); + u16 vid = tcf_vlan_push_vid(a); + u16 vlan_tci = (prio << VLAN_PRIO_SHIFT) | vid; + + switch (vlan_action) { + case TCA_VLAN_ACT_POP: + fs->newvlan |= VLAN_REMOVE; + break; + case TCA_VLAN_ACT_PUSH: + fs->newvlan |= VLAN_INSERT; + fs->vlan = vlan_tci; + break; + case TCA_VLAN_ACT_MODIFY: + fs->newvlan |= VLAN_REWRITE; + fs->vlan = vlan_tci; + break; + default: + break; + } + } + } +} + +static int cxgb4_validate_flow_actions(struct net_device *dev, + struct tc_cls_flower_offload *cls) +{ + const struct tc_action *a; + LIST_HEAD(actions); + + tcf_exts_to_list(cls->exts, &actions); + list_for_each_entry(a, &actions, list) { + if (is_tcf_gact_shot(a)) { + /* Do nothing */ + } else if (is_tcf_mirred_egress_redirect(a)) { + struct adapter *adap = netdev2adap(dev); + struct net_device *n_dev; + unsigned int i, ifindex; + bool found = false; + + ifindex = tcf_mirred_ifindex(a); + for_each_port(adap, i) { + n_dev = adap->port[i]; + if (ifindex == n_dev->ifindex) { + found = true; + break; + } + } + + /* If interface doesn't belong to our hw, then + * the provided output port is not valid + */ + if (!found) { + netdev_err(dev, "%s: Out port invalid\n", + __func__); + return -EINVAL; + } + } else if (is_tcf_vlan(a)) { + u16 proto = be16_to_cpu(tcf_vlan_push_proto(a)); + u32 vlan_action = tcf_vlan_action(a); + + switch (vlan_action) { + case TCA_VLAN_ACT_POP: + break; + case TCA_VLAN_ACT_PUSH: + case TCA_VLAN_ACT_MODIFY: + if (proto != ETH_P_8021Q) { + netdev_err(dev, "%s: Unsupported vlan proto\n", + __func__); + return -EOPNOTSUPP; + } + break; + default: + netdev_err(dev, "%s: Unsupported vlan action\n", + __func__); + return -EOPNOTSUPP; + } + } else { + netdev_err(dev, "%s: Unsupported action\n", __func__); + return -EOPNOTSUPP; + } + } + return 0; +} + +int cxgb4_tc_flower_replace(struct net_device *dev, + struct tc_cls_flower_offload *cls) +{ + struct adapter *adap = netdev2adap(dev); + struct ch_tc_flower_entry *ch_flower; + struct ch_filter_specification *fs; + struct filter_ctx ctx; + int fidx; + int ret; + + if (cxgb4_validate_flow_actions(dev, cls)) + return -EOPNOTSUPP; + + if (cxgb4_validate_flow_match(dev, cls)) + return -EOPNOTSUPP; + + ch_flower = allocate_flower_entry(); + if (!ch_flower) { + netdev_err(dev, "%s: ch_flower alloc failed.\n", __func__); + return -ENOMEM; + } + + fs = &ch_flower->fs; + fs->hitcnts = 1; + cxgb4_process_flow_actions(dev, cls, fs); + cxgb4_process_flow_match(dev, cls, fs); + + fidx = cxgb4_get_free_ftid(dev, fs->type ? PF_INET6 : PF_INET); + if (fidx < 0) { + netdev_err(dev, "%s: No fidx for offload.\n", __func__); + ret = -ENOMEM; + goto free_entry; + } + + init_completion(&ctx.completion); + ret = __cxgb4_set_filter(dev, fidx, fs, &ctx); + if (ret) { + netdev_err(dev, "%s: filter creation err %d\n", + __func__, ret); + goto free_entry; + } + + /* Wait for reply */ + ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ); + if (!ret) { + ret = -ETIMEDOUT; + goto free_entry; + } + + ret = ctx.result; + /* Check if hw returned error for filter creation */ + if (ret) { + netdev_err(dev, "%s: filter creation err %d\n", + __func__, ret); + goto free_entry; + } + + INIT_HLIST_NODE(&ch_flower->link); + ch_flower->tc_flower_cookie = cls->cookie; + ch_flower->filter_id = ctx.tid; + hash_add_rcu(adap->flower_anymatch_tbl, &ch_flower->link, cls->cookie); + + return ret; + +free_entry: + kfree(ch_flower); + return ret; +} + +int cxgb4_tc_flower_destroy(struct net_device *dev, + struct tc_cls_flower_offload *cls) +{ + struct adapter *adap = netdev2adap(dev); + struct ch_tc_flower_entry *ch_flower; + int ret; + + ch_flower = ch_flower_lookup(adap, cls->cookie); + if (!ch_flower) + return -ENOENT; + + ret = cxgb4_del_filter(dev, ch_flower->filter_id); + if (ret) + goto err; + + hash_del_rcu(&ch_flower->link); + kfree_rcu(ch_flower, rcu); + +err: + return ret; +} + +void ch_flower_stats_cb(unsigned long data) +{ + struct adapter *adap = (struct adapter *)data; + struct ch_tc_flower_entry *flower_entry; + struct ch_tc_flower_stats *ofld_stats; + unsigned int i; + u64 packets; + u64 bytes; + int ret; + + rcu_read_lock(); + hash_for_each_rcu(adap->flower_anymatch_tbl, i, flower_entry, link) { + ret = cxgb4_get_filter_counters(adap->port[0], + flower_entry->filter_id, + &packets, &bytes); + if (!ret) { + spin_lock(&flower_entry->lock); + ofld_stats = &flower_entry->stats; + + if (ofld_stats->prev_packet_count != packets) { + ofld_stats->prev_packet_count = packets; + ofld_stats->last_used = jiffies; + } + spin_unlock(&flower_entry->lock); + } + } + rcu_read_unlock(); + mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD); +} + +int cxgb4_tc_flower_stats(struct net_device *dev, + struct tc_cls_flower_offload *cls) +{ + struct adapter *adap = netdev2adap(dev); + struct ch_tc_flower_stats *ofld_stats; + struct ch_tc_flower_entry *ch_flower; + u64 packets; + u64 bytes; + int ret; + + ch_flower = ch_flower_lookup(adap, cls->cookie); + if (!ch_flower) { + ret = -ENOENT; + goto err; + } + + ret = cxgb4_get_filter_counters(dev, ch_flower->filter_id, + &packets, &bytes); + if (ret < 0) + goto err; + + spin_lock_bh(&ch_flower->lock); + ofld_stats = &ch_flower->stats; + if (ofld_stats->packet_count != packets) { + if (ofld_stats->prev_packet_count != packets) + ofld_stats->last_used = jiffies; + tcf_exts_stats_update(cls->exts, bytes - ofld_stats->byte_count, + packets - ofld_stats->packet_count, + ofld_stats->last_used); + + ofld_stats->packet_count = packets; + ofld_stats->byte_count = bytes; + ofld_stats->prev_packet_count = packets; + } + spin_unlock_bh(&ch_flower->lock); + return 0; + +err: + return ret; +} + +void cxgb4_init_tc_flower(struct adapter *adap) +{ + hash_init(adap->flower_anymatch_tbl); + setup_timer(&adap->flower_stats_timer, ch_flower_stats_cb, + (unsigned long)adap); + mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD); +} + +void cxgb4_cleanup_tc_flower(struct adapter *adap) +{ + if (adap->flower_stats_timer.function) + del_timer_sync(&adap->flower_stats_timer); +} diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h new file mode 100644 index 000000000000..604feffc752e --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h @@ -0,0 +1,66 @@ +/* + * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux. + * + * Copyright (c) 2017 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __CXGB4_TC_FLOWER_H +#define __CXGB4_TC_FLOWER_H + +#include <net/pkt_cls.h> + +struct ch_tc_flower_stats { + u64 prev_packet_count; + u64 packet_count; + u64 byte_count; + u64 last_used; +}; + +struct ch_tc_flower_entry { + struct ch_filter_specification fs; + struct ch_tc_flower_stats stats; + unsigned long tc_flower_cookie; + struct hlist_node link; + struct rcu_head rcu; + spinlock_t lock; /* lock for stats */ + u32 filter_id; +}; + +int cxgb4_tc_flower_replace(struct net_device *dev, + struct tc_cls_flower_offload *cls); +int cxgb4_tc_flower_destroy(struct net_device *dev, + struct tc_cls_flower_offload *cls); +int cxgb4_tc_flower_stats(struct net_device *dev, + struct tc_cls_flower_offload *cls); + +void cxgb4_init_tc_flower(struct adapter *adap); +void cxgb4_cleanup_tc_flower(struct adapter *adap); +#endif /* __CXGB4_TC_FLOWER_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h index 84541fce94c5..52324c77a4fe 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h @@ -212,6 +212,7 @@ struct filter_ctx { struct ch_filter_specification; +int cxgb4_get_free_ftid(struct net_device *dev, int family); int __cxgb4_set_filter(struct net_device *dev, int filter_id, struct ch_filter_specification *fs, struct filter_ctx *ctx); @@ -220,6 +221,8 @@ int __cxgb4_del_filter(struct net_device *dev, int filter_id, int cxgb4_set_filter(struct net_device *dev, int filter_id, struct ch_filter_specification *fs); int cxgb4_del_filter(struct net_device *dev, int filter_id); +int cxgb4_get_filter_counters(struct net_device *dev, unsigned int fidx, + u64 *hitcnt, u64 *bytecnt); static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue) { diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h index aa28299aef5f..37d90d63e4a3 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h @@ -176,6 +176,11 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN CH_PCI_ID_TABLE_FENTRY(0x50a2), /* Custom T540-KR4 */ CH_PCI_ID_TABLE_FENTRY(0x50a3), /* Custom T580-KR4 */ CH_PCI_ID_TABLE_FENTRY(0x50a4), /* Custom 2x T540-CR */ + CH_PCI_ID_TABLE_FENTRY(0x50a5), /* Custom T522-BT */ + CH_PCI_ID_TABLE_FENTRY(0x50a6), /* Custom T522-BT-SO */ + CH_PCI_ID_TABLE_FENTRY(0x50a7), /* Custom T580-CR */ + CH_PCI_ID_TABLE_FENTRY(0x50a8), /* Custom T580-KR */ + CH_PCI_ID_TABLE_FENTRY(0x50a9), /* Custom T580-KR */ /* T6 adapters: */ diff --git a/drivers/net/ethernet/cisco/enic/enic_clsf.h b/drivers/net/ethernet/cisco/enic/enic_clsf.h index 6aa9f89d073b..4bfbf25f9ddc 100644 --- a/drivers/net/ethernet/cisco/enic/enic_clsf.h +++ b/drivers/net/ethernet/cisco/enic/enic_clsf.h @@ -19,9 +19,8 @@ void enic_flow_may_expire(unsigned long data); static inline void enic_rfs_timer_start(struct enic *enic) { - init_timer(&enic->rfs_h.rfs_may_expire); - enic->rfs_h.rfs_may_expire.function = enic_flow_may_expire; - enic->rfs_h.rfs_may_expire.data = (unsigned long)enic; + setup_timer(&enic->rfs_h.rfs_may_expire, enic_flow_may_expire, + (unsigned long)enic); mod_timer(&enic->rfs_h.rfs_may_expire, jiffies + HZ/4); } diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index d24ee1ad3be1..4a11baffe02d 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -2846,9 +2846,8 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* Setup notification timer, HW reset task, and wq locks */ - init_timer(&enic->notify_timer); - enic->notify_timer.function = enic_notify_timer; - enic->notify_timer.data = (unsigned long)enic; + setup_timer(&enic->notify_timer, enic_notify_timer, + (unsigned long)enic); enic_set_rx_coal_setting(enic); INIT_WORK(&enic->reset, enic_reset); diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c index 0affee9c8aa2..299812e92db7 100644 --- a/drivers/net/ethernet/dec/tulip/de4x5.c +++ b/drivers/net/ethernet/dec/tulip/de4x5.c @@ -1147,9 +1147,8 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev) lp->timeout = -1; lp->gendev = gendev; spin_lock_init(&lp->lock); - init_timer(&lp->timer); - lp->timer.function = (void (*)(unsigned long))de4x5_ast; - lp->timer.data = (unsigned long)dev; + setup_timer(&lp->timer, (void (*)(unsigned long))de4x5_ast, + (unsigned long)dev); de4x5_parse_params(dev); /* diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c index 07e10a45beaa..6585f737d08b 100644 --- a/drivers/net/ethernet/dec/tulip/dmfe.c +++ b/drivers/net/ethernet/dec/tulip/dmfe.c @@ -596,10 +596,8 @@ static int dmfe_open(struct net_device *dev) netif_wake_queue(dev); /* set and active a timer process */ - init_timer(&db->timer); + setup_timer(&db->timer, dmfe_timer, (unsigned long)dev); db->timer.expires = DMFE_TIMER_WUT + HZ * 2; - db->timer.data = (unsigned long)dev; - db->timer.function = dmfe_timer; add_timer(&db->timer); return 0; diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c index 7fc248efc4ba..5fbbc0caba99 100644 --- a/drivers/net/ethernet/dec/tulip/uli526x.c +++ b/drivers/net/ethernet/dec/tulip/uli526x.c @@ -491,10 +491,8 @@ static int uli526x_open(struct net_device *dev) netif_wake_queue(dev); /* set and active a timer process */ - init_timer(&db->timer); + setup_timer(&db->timer, uli526x_timer, (unsigned long)dev); db->timer.expires = ULI526X_TIMER_WUT + HZ * 2; - db->timer.data = (unsigned long)dev; - db->timer.function = uli526x_timer; add_timer(&db->timer); return 0; diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c index 32d7229544fa..6f88d687b6d2 100644 --- a/drivers/net/ethernet/dec/tulip/winbond-840.c +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c @@ -655,10 +655,8 @@ static int netdev_open(struct net_device *dev) netdev_dbg(dev, "Done netdev_open()\n"); /* Set the timer to check for link beat. */ - init_timer(&np->timer); + setup_timer(&np->timer, netdev_timer, (unsigned long)dev); np->timer.expires = jiffies + 1*HZ; - np->timer.data = (unsigned long)dev; - np->timer.function = netdev_timer; /* timer handler */ add_timer(&np->timer); return 0; out_err: diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c index 2704bcf023be..6ca9e981ad57 100644 --- a/drivers/net/ethernet/dlink/sundance.c +++ b/drivers/net/ethernet/dlink/sundance.c @@ -913,10 +913,8 @@ static int netdev_open(struct net_device *dev) ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0)); /* Set the timer to check for link beat. */ - init_timer(&np->timer); + setup_timer(&np->timer, netdev_timer, (unsigned long)dev); np->timer.expires = jiffies + 3*HZ; - np->timer.data = (unsigned long)dev; - np->timer.function = netdev_timer; /* timer handler */ add_timer(&np->timer); /* Enable interrupts by setting the interrupt mask. */ diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c index e92859dab7ae..c8982313d850 100644 --- a/drivers/net/ethernet/fealnx.c +++ b/drivers/net/ethernet/fealnx.c @@ -909,17 +909,13 @@ static int netdev_open(struct net_device *dev) printk(KERN_DEBUG "%s: Done netdev_open().\n", dev->name); /* Set the timer to check for link beat. */ - init_timer(&np->timer); + setup_timer(&np->timer, netdev_timer, (unsigned long)dev); np->timer.expires = RUN_AT(3 * HZ); - np->timer.data = (unsigned long) dev; - np->timer.function = netdev_timer; /* timer handler */ add_timer(&np->timer); - init_timer(&np->reset_timer); - np->reset_timer.data = (unsigned long) dev; - np->reset_timer.function = reset_timer; + setup_timer(&np->reset_timer, reset_timer, (unsigned long)dev); np->reset_timer_armed = 0; return rc; } diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index f77ba9fa257b..a96b838cffce 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -3857,8 +3857,9 @@ static int ucc_geth_probe(struct platform_device* ofdev) } if (netif_msg_probe(&debug)) - pr_info("UCC%1d at 0x%8x (irq = %d)\n", - ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs, + pr_info("UCC%1d at 0x%8llx (irq = %d)\n", + ug_info->uf_info.ucc_num + 1, + (u64)ug_info->uf_info.regs, ug_info->uf_info.irq); /* Create an ethernet device instance */ diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index 4d10270ddf8f..184f11242f56 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -1910,11 +1910,10 @@ static int e100_alloc_cbs(struct nic *nic) nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL; nic->cbs_avail = 0; - nic->cbs = pci_pool_alloc(nic->cbs_pool, GFP_KERNEL, - &nic->cbs_dma_addr); + nic->cbs = pci_pool_zalloc(nic->cbs_pool, GFP_KERNEL, + &nic->cbs_dma_addr); if (!nic->cbs) return -ENOMEM; - memset(nic->cbs, 0, count * sizeof(struct cb)); for (cb = nic->cbs, i = 0; i < count; cb++, i++) { cb->next = (i + 1 < count) ? cb + 1 : nic->cbs; diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 327dfe5bedc0..8436c5f2c3e8 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -7252,13 +7252,10 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_eeprom; } - init_timer(&adapter->watchdog_timer); - adapter->watchdog_timer.function = e1000_watchdog; - adapter->watchdog_timer.data = (unsigned long)adapter; - - init_timer(&adapter->phy_info_timer); - adapter->phy_info_timer.function = e1000_update_phy_info; - adapter->phy_info_timer.data = (unsigned long)adapter; + setup_timer(&adapter->watchdog_timer, e1000_watchdog, + (unsigned long)adapter); + setup_timer(&adapter->phy_info_timer, e1000_update_phy_info, + (unsigned long)adapter); INIT_WORK(&adapter->reset_task, e1000_reset_task); INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 1825d956bb00..c243f9da95ae 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -2686,9 +2686,8 @@ static void i40evf_init_task(struct work_struct *work) ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); } - init_timer(&adapter->watchdog_timer); - adapter->watchdog_timer.function = &i40evf_watchdog_timer; - adapter->watchdog_timer.data = (unsigned long)adapter; + setup_timer(&adapter->watchdog_timer, &i40evf_watchdog_timer, + (unsigned long)adapter); mod_timer(&adapter->watchdog_timer, jiffies + 1); adapter->tx_desc_count = I40EVF_DEFAULT_TXD; diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index 5a713199653c..1e6ec2277d54 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c @@ -508,9 +508,8 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->part_num = ixgb_get_ee_pba_number(&adapter->hw); - init_timer(&adapter->watchdog_timer); - adapter->watchdog_timer.function = ixgb_watchdog; - adapter->watchdog_timer.data = (unsigned long)adapter; + setup_timer(&adapter->watchdog_timer, ixgb_watchdog, + (unsigned long)adapter); INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task); diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c index 3c0a6451273d..7cecd9dbc111 100644 --- a/drivers/net/ethernet/korina.c +++ b/drivers/net/ethernet/korina.c @@ -4,6 +4,7 @@ * Copyright 2004 IDT Inc. (rischelp@idt.com) * Copyright 2006 Felix Fietkau <nbd@openwrt.org> * Copyright 2008 Florian Fainelli <florian@openwrt.org> + * Copyright 2017 Roman Yeryomin <roman@advem.lv> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -64,9 +65,9 @@ #include <asm/mach-rc32434/eth.h> #include <asm/mach-rc32434/dma_v.h> -#define DRV_NAME "korina" -#define DRV_VERSION "0.10" -#define DRV_RELDATE "04Mar2008" +#define DRV_NAME "korina" +#define DRV_VERSION "0.20" +#define DRV_RELDATE "15Sep2017" #define STATION_ADDRESS_HIGH(dev) (((dev)->dev_addr[0] << 8) | \ ((dev)->dev_addr[1])) @@ -75,7 +76,7 @@ ((dev)->dev_addr[4] << 8) | \ ((dev)->dev_addr[5])) -#define MII_CLOCK 1250000 /* no more than 2.5MHz */ +#define MII_CLOCK 1250000 /* no more than 2.5MHz */ /* the following must be powers of two */ #define KORINA_NUM_RDS 64 /* number of receive descriptors */ @@ -87,15 +88,19 @@ #define KORINA_RBSIZE 1536 /* size of one resource buffer = Ether MTU */ #define KORINA_RDS_MASK (KORINA_NUM_RDS - 1) #define KORINA_TDS_MASK (KORINA_NUM_TDS - 1) -#define RD_RING_SIZE (KORINA_NUM_RDS * sizeof(struct dma_desc)) +#define RD_RING_SIZE (KORINA_NUM_RDS * sizeof(struct dma_desc)) #define TD_RING_SIZE (KORINA_NUM_TDS * sizeof(struct dma_desc)) -#define TX_TIMEOUT (6000 * HZ / 1000) +#define TX_TIMEOUT (6000 * HZ / 1000) -enum chain_status { desc_filled, desc_empty }; -#define IS_DMA_FINISHED(X) (((X) & (DMA_DESC_FINI)) != 0) -#define IS_DMA_DONE(X) (((X) & (DMA_DESC_DONE)) != 0) -#define RCVPKT_LENGTH(X) (((X) & ETH_RX_LEN) >> ETH_RX_LEN_BIT) +enum chain_status { + desc_filled, + desc_empty +}; + +#define IS_DMA_FINISHED(X) (((X) & (DMA_DESC_FINI)) != 0) +#define IS_DMA_DONE(X) (((X) & (DMA_DESC_DONE)) != 0) +#define RCVPKT_LENGTH(X) (((X) & ETH_RX_LEN) >> ETH_RX_LEN_BIT) /* Information that need to be kept for each board. */ struct korina_private { @@ -122,10 +127,8 @@ struct korina_private { int rx_irq; int tx_irq; - int ovr_irq; - int und_irq; - spinlock_t lock; /* NIC xmit lock */ + spinlock_t lock; /* NIC xmit lock */ int dma_halt_cnt; int dma_run_cnt; @@ -148,17 +151,17 @@ static inline void korina_start_dma(struct dma_reg *ch, u32 dma_addr) static inline void korina_abort_dma(struct net_device *dev, struct dma_reg *ch) { - if (readl(&ch->dmac) & DMA_CHAN_RUN_BIT) { - writel(0x10, &ch->dmac); + if (readl(&ch->dmac) & DMA_CHAN_RUN_BIT) { + writel(0x10, &ch->dmac); - while (!(readl(&ch->dmas) & DMA_STAT_HALT)) - netif_trans_update(dev); + while (!(readl(&ch->dmas) & DMA_STAT_HALT)) + netif_trans_update(dev); - writel(0, &ch->dmas); - } + writel(0, &ch->dmas); + } - writel(0, &ch->dmadptr); - writel(0, &ch->dmandptr); + writel(0, &ch->dmadptr); + writel(0, &ch->dmandptr); } static inline void korina_chain_dma(struct dma_reg *ch, u32 dma_addr) @@ -365,59 +368,60 @@ static int korina_rx(struct net_device *dev, int limit) if ((KORINA_RBSIZE - (u32)DMA_COUNT(rd->control)) == 0) break; - /* Update statistics counters */ - if (devcs & ETH_RX_CRC) - dev->stats.rx_crc_errors++; - if (devcs & ETH_RX_LOR) - dev->stats.rx_length_errors++; - if (devcs & ETH_RX_LE) - dev->stats.rx_length_errors++; - if (devcs & ETH_RX_OVR) - dev->stats.rx_fifo_errors++; - if (devcs & ETH_RX_CV) - dev->stats.rx_frame_errors++; - if (devcs & ETH_RX_CES) - dev->stats.rx_length_errors++; - if (devcs & ETH_RX_MP) - dev->stats.multicast++; + /* check that this is a whole packet + * WARNING: DMA_FD bit incorrectly set + * in Rc32434 (errata ref #077) */ + if (!(devcs & ETH_RX_LD)) + goto next; - if ((devcs & ETH_RX_LD) != ETH_RX_LD) { - /* check that this is a whole packet - * WARNING: DMA_FD bit incorrectly set - * in Rc32434 (errata ref #077) */ + if (!(devcs & ETH_RX_ROK)) { + /* Update statistics counters */ dev->stats.rx_errors++; dev->stats.rx_dropped++; - } else if ((devcs & ETH_RX_ROK)) { - pkt_len = RCVPKT_LENGTH(devcs); + if (devcs & ETH_RX_CRC) + dev->stats.rx_crc_errors++; + if (devcs & ETH_RX_LE) + dev->stats.rx_length_errors++; + if (devcs & ETH_RX_OVR) + dev->stats.rx_fifo_errors++; + if (devcs & ETH_RX_CV) + dev->stats.rx_frame_errors++; + if (devcs & ETH_RX_CES) + dev->stats.rx_frame_errors++; + + goto next; + } - /* must be the (first and) last - * descriptor then */ - pkt_buf = (u8 *)lp->rx_skb[lp->rx_next_done]->data; + pkt_len = RCVPKT_LENGTH(devcs); - /* invalidate the cache */ - dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4); + /* must be the (first and) last + * descriptor then */ + pkt_buf = (u8 *)lp->rx_skb[lp->rx_next_done]->data; - /* Malloc up new buffer. */ - skb_new = netdev_alloc_skb_ip_align(dev, KORINA_RBSIZE); + /* invalidate the cache */ + dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4); - if (!skb_new) - break; - /* Do not count the CRC */ - skb_put(skb, pkt_len - 4); - skb->protocol = eth_type_trans(skb, dev); + /* Malloc up new buffer. */ + skb_new = netdev_alloc_skb_ip_align(dev, KORINA_RBSIZE); - /* Pass the packet to upper layers */ - netif_receive_skb(skb); - dev->stats.rx_packets++; - dev->stats.rx_bytes += pkt_len; + if (!skb_new) + break; + /* Do not count the CRC */ + skb_put(skb, pkt_len - 4); + skb->protocol = eth_type_trans(skb, dev); - /* Update the mcast stats */ - if (devcs & ETH_RX_MP) - dev->stats.multicast++; + /* Pass the packet to upper layers */ + napi_gro_receive(&lp->napi, skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; - lp->rx_skb[lp->rx_next_done] = skb_new; - } + /* Update the mcast stats */ + if (devcs & ETH_RX_MP) + dev->stats.multicast++; + + lp->rx_skb[lp->rx_next_done] = skb_new; +next: rd->devcs = 0; /* Restore descriptor's curr_addr */ @@ -686,7 +690,7 @@ static int korina_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) /* ethtool helpers */ static void netdev_get_drvinfo(struct net_device *dev, - struct ethtool_drvinfo *info) + struct ethtool_drvinfo *info) { struct korina_private *lp = netdev_priv(dev); @@ -729,10 +733,10 @@ static u32 netdev_get_link(struct net_device *dev) } static const struct ethtool_ops netdev_ethtool_ops = { - .get_drvinfo = netdev_get_drvinfo, - .get_link = netdev_get_link, - .get_link_ksettings = netdev_get_link_ksettings, - .set_link_ksettings = netdev_set_link_ksettings, + .get_drvinfo = netdev_get_drvinfo, + .get_link = netdev_get_link, + .get_link_ksettings = netdev_get_link_ksettings, + .set_link_ksettings = netdev_set_link_ksettings, }; static int korina_alloc_ring(struct net_device *dev) @@ -864,7 +868,7 @@ static int korina_init(struct net_device *dev) /* Management Clock Prescaler Divisor * Clock independent setting */ writel(((idt_cpu_freq) / MII_CLOCK + 1) & ~1, - &lp->eth_regs->ethmcp); + &lp->eth_regs->ethmcp); /* don't transmit until fifo contains 48b */ writel(48, &lp->eth_regs->ethfifott); @@ -891,8 +895,6 @@ static void korina_restart_task(struct work_struct *work) */ disable_irq(lp->rx_irq); disable_irq(lp->tx_irq); - disable_irq(lp->ovr_irq); - disable_irq(lp->und_irq); writel(readl(&lp->tx_dma_regs->dmasm) | DMA_STAT_FINI | DMA_STAT_ERR, @@ -911,40 +913,10 @@ static void korina_restart_task(struct work_struct *work) } korina_multicast_list(dev); - enable_irq(lp->und_irq); - enable_irq(lp->ovr_irq); enable_irq(lp->tx_irq); enable_irq(lp->rx_irq); } -static void korina_clear_and_restart(struct net_device *dev, u32 value) -{ - struct korina_private *lp = netdev_priv(dev); - - netif_stop_queue(dev); - writel(value, &lp->eth_regs->ethintfc); - schedule_work(&lp->restart_task); -} - -/* Ethernet Tx Underflow interrupt */ -static irqreturn_t korina_und_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - struct korina_private *lp = netdev_priv(dev); - unsigned int und; - - spin_lock(&lp->lock); - - und = readl(&lp->eth_regs->ethintfc); - - if (und & ETH_INT_FC_UND) - korina_clear_and_restart(dev, und & ~ETH_INT_FC_UND); - - spin_unlock(&lp->lock); - - return IRQ_HANDLED; -} - static void korina_tx_timeout(struct net_device *dev) { struct korina_private *lp = netdev_priv(dev); @@ -952,25 +924,6 @@ static void korina_tx_timeout(struct net_device *dev) schedule_work(&lp->restart_task); } -/* Ethernet Rx Overflow interrupt */ -static irqreturn_t -korina_ovr_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - struct korina_private *lp = netdev_priv(dev); - unsigned int ovr; - - spin_lock(&lp->lock); - ovr = readl(&lp->eth_regs->ethintfc); - - if (ovr & ETH_INT_FC_OVR) - korina_clear_and_restart(dev, ovr & ~ETH_INT_FC_OVR); - - spin_unlock(&lp->lock); - - return IRQ_HANDLED; -} - #ifdef CONFIG_NET_POLL_CONTROLLER static void korina_poll_controller(struct net_device *dev) { @@ -993,48 +946,26 @@ static int korina_open(struct net_device *dev) } /* Install the interrupt handler - * that handles the Done Finished - * Ovr and Und Events */ + * that handles the Done Finished */ ret = request_irq(lp->rx_irq, korina_rx_dma_interrupt, 0, "Korina ethernet Rx", dev); if (ret < 0) { printk(KERN_ERR "%s: unable to get Rx DMA IRQ %d\n", - dev->name, lp->rx_irq); + dev->name, lp->rx_irq); goto err_release; } ret = request_irq(lp->tx_irq, korina_tx_dma_interrupt, 0, "Korina ethernet Tx", dev); if (ret < 0) { printk(KERN_ERR "%s: unable to get Tx DMA IRQ %d\n", - dev->name, lp->tx_irq); + dev->name, lp->tx_irq); goto err_free_rx_irq; } - /* Install handler for overrun error. */ - ret = request_irq(lp->ovr_irq, korina_ovr_interrupt, - 0, "Ethernet Overflow", dev); - if (ret < 0) { - printk(KERN_ERR "%s: unable to get OVR IRQ %d\n", - dev->name, lp->ovr_irq); - goto err_free_tx_irq; - } - - /* Install handler for underflow error. */ - ret = request_irq(lp->und_irq, korina_und_interrupt, - 0, "Ethernet Underflow", dev); - if (ret < 0) { - printk(KERN_ERR "%s: unable to get UND IRQ %d\n", - dev->name, lp->und_irq); - goto err_free_ovr_irq; - } mod_timer(&lp->media_check_timer, jiffies + 1); out: return ret; -err_free_ovr_irq: - free_irq(lp->ovr_irq, dev); -err_free_tx_irq: - free_irq(lp->tx_irq, dev); err_free_rx_irq: free_irq(lp->rx_irq, dev); err_release: @@ -1052,8 +983,6 @@ static int korina_close(struct net_device *dev) /* Disable interrupts */ disable_irq(lp->rx_irq); disable_irq(lp->tx_irq); - disable_irq(lp->ovr_irq); - disable_irq(lp->und_irq); korina_abort_tx(dev); tmp = readl(&lp->tx_dma_regs->dmasm); @@ -1073,8 +1002,6 @@ static int korina_close(struct net_device *dev) free_irq(lp->rx_irq, dev); free_irq(lp->tx_irq, dev); - free_irq(lp->ovr_irq, dev); - free_irq(lp->und_irq, dev); return 0; } @@ -1113,8 +1040,6 @@ static int korina_probe(struct platform_device *pdev) lp->rx_irq = platform_get_irq_byname(pdev, "korina_rx"); lp->tx_irq = platform_get_irq_byname(pdev, "korina_tx"); - lp->ovr_irq = platform_get_irq_byname(pdev, "korina_ovr"); - lp->und_irq = platform_get_irq_byname(pdev, "korina_und"); r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_regs"); dev->base_addr = r->start; @@ -1162,7 +1087,7 @@ static int korina_probe(struct platform_device *pdev) dev->netdev_ops = &korina_netdev_ops; dev->ethtool_ops = &netdev_ethtool_ops; dev->watchdog_timeo = TX_TIMEOUT; - netif_napi_add(dev, &lp->napi, korina_poll, 64); + netif_napi_add(dev, &lp->napi, korina_poll, NAPI_POLL_WEIGHT); lp->phy_addr = (((lp->rx_irq == 0x2c? 1:0) << 8) | 0x05); lp->mii_if.dev = dev; @@ -1226,5 +1151,6 @@ module_platform_driver(korina_driver); MODULE_AUTHOR("Philip Rischel <rischelp@idt.com>"); MODULE_AUTHOR("Felix Fietkau <nbd@openwrt.org>"); MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>"); +MODULE_AUTHOR("Roman Yeryomin <roman@advem.lv>"); MODULE_DESCRIPTION("IDT RC32434 (Korina) Ethernet driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index dd0ee2691c86..8041d692db3c 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -6452,7 +6452,6 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev) if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) { tx_desc_unmap_put(port, txq, tx_desc); frags = 0; - goto out; } } diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index 993724959a7c..91b1c154fd29 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -1496,9 +1496,8 @@ static int pxa168_eth_probe(struct platform_device *pdev) netif_napi_add(dev, &pep->napi, pxa168_rx_poll, pep->rx_ring_size); memset(&pep->timeout, 0, sizeof(struct timer_list)); - init_timer(&pep->timeout); - pep->timeout.function = rxq_refill_timer_wrapper; - pep->timeout.data = (unsigned long)pep; + setup_timer(&pep->timeout, rxq_refill_timer_wrapper, + (unsigned long)pep); pep->smi_bus = mdiobus_alloc(); if (!pep->smi_bus) { diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c index 53daa6ca5d83..de0f9e5e42ec 100644 --- a/drivers/net/ethernet/mellanox/mlx4/catas.c +++ b/drivers/net/ethernet/mellanox/mlx4/catas.c @@ -277,7 +277,7 @@ void mlx4_start_catas_poll(struct mlx4_dev *dev) phys_addr_t addr; INIT_LIST_HEAD(&priv->catas_err.list); - init_timer(&priv->catas_err.timer); + setup_timer(&priv->catas_err.timer, poll_catas, (unsigned long)dev); priv->catas_err.map = NULL; if (!mlx4_is_slave(dev)) { @@ -293,8 +293,6 @@ void mlx4_start_catas_poll(struct mlx4_dev *dev) } } - priv->catas_err.timer.data = (unsigned long) dev; - priv->catas_err.timer.function = poll_catas; priv->catas_err.timer.expires = round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL); add_timer(&priv->catas_err.timer); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 8aea0a065e56..a89a68ce53ad 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -320,15 +320,13 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; - init_timer(&health->timer); + setup_timer(&health->timer, poll_health, (unsigned long)dev); health->sick = 0; clear_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags); clear_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags); health->health = &dev->iseg->health; health->health_counter = &dev->iseg->health_counter; - health->timer.data = (unsigned long)dev; - health->timer.function = poll_health; health->timer.expires = round_jiffies(jiffies + MLX5_HEALTH_POLL_INTERVAL); add_timer(&health->timer); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile index 891ff418bb5e..4b88158173f3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Makefile +++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile @@ -17,7 +17,7 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \ spectrum_kvdl.o spectrum_acl_tcam.o \ spectrum_acl.o spectrum_flower.o \ spectrum_cnt.o spectrum_fid.o \ - spectrum_ipip.o + spectrum_ipip.o spectrum_acl_flex_actions.o mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o mlxsw_spectrum-$(CONFIG_NET_DEVLINK) += spectrum_dpipe.o obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c index 5ae110172c22..ab3ffe7a8eda 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c @@ -712,7 +712,7 @@ int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block) } EXPORT_SYMBOL(mlxsw_afa_block_append_drop); -int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block) +int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id) { char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_TRAPDISC_CODE, @@ -722,7 +722,7 @@ int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block) return -ENOBUFS; mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_TRAP, MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD, - MLXSW_TRAP_ID_ACL0); + trap_id); return 0; } EXPORT_SYMBOL(mlxsw_afa_block_append_trap); @@ -891,3 +891,74 @@ int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid) return 0; } EXPORT_SYMBOL(mlxsw_afa_block_append_fid_set); + +/* MC Routing Action + * ----------------- + * The Multicast router action. Can be used by RMFT_V2 - Router Multicast + * Forwarding Table Version 2 Register. + */ + +#define MLXSW_AFA_MCROUTER_CODE 0x10 +#define MLXSW_AFA_MCROUTER_SIZE 2 + +enum mlxsw_afa_mcrouter_rpf_action { + MLXSW_AFA_MCROUTER_RPF_ACTION_NOP, + MLXSW_AFA_MCROUTER_RPF_ACTION_TRAP, + MLXSW_AFA_MCROUTER_RPF_ACTION_DISCARD_ERROR, +}; + +/* afa_mcrouter_rpf_action */ +MLXSW_ITEM32(afa, mcrouter, rpf_action, 0x00, 28, 3); + +/* afa_mcrouter_expected_irif */ +MLXSW_ITEM32(afa, mcrouter, expected_irif, 0x00, 0, 16); + +/* afa_mcrouter_min_mtu */ +MLXSW_ITEM32(afa, mcrouter, min_mtu, 0x08, 0, 16); + +enum mlxsw_afa_mrouter_vrmid { + MLXSW_AFA_MCROUTER_VRMID_INVALID, + MLXSW_AFA_MCROUTER_VRMID_VALID +}; + +/* afa_mcrouter_vrmid + * Valid RMID: rigr_rmid_index is used as RMID + */ +MLXSW_ITEM32(afa, mcrouter, vrmid, 0x0C, 31, 1); + +/* afa_mcrouter_rigr_rmid_index + * When the vrmid field is set to invalid, the field is used as pointer to + * Router Interface Group (RIGR) Table in the KVD linear. + * When the vrmid is set to valid, the field is used as RMID index, ranged + * from 0 to max_mid - 1. The index is to the Port Group Table. + */ +MLXSW_ITEM32(afa, mcrouter, rigr_rmid_index, 0x0C, 0, 24); + +static inline void +mlxsw_afa_mcrouter_pack(char *payload, + enum mlxsw_afa_mcrouter_rpf_action rpf_action, + u16 expected_irif, u16 min_mtu, + enum mlxsw_afa_mrouter_vrmid vrmid, u32 rigr_rmid_index) + +{ + mlxsw_afa_mcrouter_rpf_action_set(payload, rpf_action); + mlxsw_afa_mcrouter_expected_irif_set(payload, expected_irif); + mlxsw_afa_mcrouter_min_mtu_set(payload, min_mtu); + mlxsw_afa_mcrouter_vrmid_set(payload, vrmid); + mlxsw_afa_mcrouter_rigr_rmid_index_set(payload, rigr_rmid_index); +} + +int mlxsw_afa_block_append_mcrouter(struct mlxsw_afa_block *block, + u16 expected_irif, u16 min_mtu, + bool rmid_valid, u32 kvdl_index) +{ + char *act = mlxsw_afa_block_append_action(block, + MLXSW_AFA_MCROUTER_CODE, + MLXSW_AFA_MCROUTER_SIZE); + if (!act) + return -ENOBUFS; + mlxsw_afa_mcrouter_pack(act, MLXSW_AFA_MCROUTER_RPF_ACTION_TRAP, + expected_irif, min_mtu, rmid_valid, kvdl_index); + return 0; +} +EXPORT_SYMBOL(mlxsw_afa_block_append_mcrouter); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h index f99c341b2497..501819c790d6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h @@ -60,7 +60,7 @@ u32 mlxsw_afa_block_first_set_kvdl_index(struct mlxsw_afa_block *block); void mlxsw_afa_block_continue(struct mlxsw_afa_block *block); void mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id); int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block); -int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block); +int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id); int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block, u8 local_port, bool in_port); int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block, @@ -68,5 +68,8 @@ int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block, int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block, u32 counter_index); int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid); +int mlxsw_afa_block_append_mcrouter(struct mlxsw_afa_block *block, + u16 expected_irif, u16 min_mtu, + bool rmid_valid, u32 kvdl_index); #endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index cc27c5de5a1d..17eba19100de 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -2142,15 +2142,14 @@ MLXSW_REG_DEFINE(pefa, MLXSW_REG_PEFA_ID, MLXSW_REG_PEFA_LEN); */ MLXSW_ITEM32(reg, pefa, index, 0x00, 0, 24); -#define MLXSW_REG_PXXX_FLEX_ACTION_SET_LEN 0xA8 +#define MLXSW_REG_FLEX_ACTION_SET_LEN 0xA8 /* reg_pefa_flex_action_set * Action-set to perform when rule is matched. * Must be zero padded if action set is shorter. * Access: RW */ -MLXSW_ITEM_BUF(reg, pefa, flex_action_set, 0x08, - MLXSW_REG_PXXX_FLEX_ACTION_SET_LEN); +MLXSW_ITEM_BUF(reg, pefa, flex_action_set, 0x08, MLXSW_REG_FLEX_ACTION_SET_LEN); static inline void mlxsw_reg_pefa_pack(char *payload, u32 index, const char *flex_action_set) @@ -2243,7 +2242,7 @@ MLXSW_ITEM_BUF(reg, ptce2, mask, 0x80, * Access: RW */ MLXSW_ITEM_BUF(reg, ptce2, flex_action_set, 0xE0, - MLXSW_REG_PXXX_FLEX_ACTION_SET_LEN); + MLXSW_REG_FLEX_ACTION_SET_LEN); static inline void mlxsw_reg_ptce2_pack(char *payload, bool valid, enum mlxsw_reg_ptce2_op op, @@ -3682,12 +3681,15 @@ enum mlxsw_reg_htgt_trap_group { MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP, MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP, MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF, + MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM, + MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST, MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP, MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS, MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP, MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE, MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME, MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP, + MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF, MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT, MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD, MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND, @@ -3992,6 +3994,12 @@ MLXSW_ITEM32(reg, ritr, ipv4, 0x00, 29, 1); */ MLXSW_ITEM32(reg, ritr, ipv6, 0x00, 28, 1); +/* reg_ritr_ipv4_mc + * IPv4 multicast routing enable. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, ipv4_mc, 0x00, 27, 1); + enum mlxsw_reg_ritr_if_type { /* VLAN interface. */ MLXSW_REG_RITR_VLAN_IF, @@ -4049,6 +4057,14 @@ MLXSW_ITEM32(reg, ritr, ipv4_fe, 0x04, 29, 1); */ MLXSW_ITEM32(reg, ritr, ipv6_fe, 0x04, 28, 1); +/* reg_ritr_ipv4_mc_fe + * IPv4 Multicast Forwarding Enable. + * When disabled, forwarding is blocked but local traffic (traps and IP to me) + * will be enabled. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, ipv4_mc_fe, 0x04, 27, 1); + /* reg_ritr_lb_en * Loop-back filter enable for unicast packets. * If the flag is set then loop-back filter for unicast packets is @@ -4271,11 +4287,13 @@ static inline void mlxsw_reg_ritr_pack(char *payload, bool enable, mlxsw_reg_ritr_enable_set(payload, enable); mlxsw_reg_ritr_ipv4_set(payload, 1); mlxsw_reg_ritr_ipv6_set(payload, 1); + mlxsw_reg_ritr_ipv4_mc_set(payload, 1); mlxsw_reg_ritr_type_set(payload, type); mlxsw_reg_ritr_op_set(payload, op); mlxsw_reg_ritr_rif_set(payload, rif); mlxsw_reg_ritr_ipv4_fe_set(payload, 1); mlxsw_reg_ritr_ipv6_fe_set(payload, 1); + mlxsw_reg_ritr_ipv4_mc_fe_set(payload, 1); mlxsw_reg_ritr_lb_en_set(payload, 1); mlxsw_reg_ritr_virtual_router_set(payload, vr_id); mlxsw_reg_ritr_mtu_set(payload, mtu); @@ -4311,6 +4329,57 @@ mlxsw_reg_ritr_loopback_ipip4_pack(char *payload, mlxsw_reg_ritr_loopback_ipip_usip4_set(payload, usip); } +/* RTAR - Router TCAM Allocation Register + * -------------------------------------- + * This register is used for allocation of regions in the TCAM table. + */ +#define MLXSW_REG_RTAR_ID 0x8004 +#define MLXSW_REG_RTAR_LEN 0x20 + +MLXSW_REG_DEFINE(rtar, MLXSW_REG_RTAR_ID, MLXSW_REG_RTAR_LEN); + +enum mlxsw_reg_rtar_op { + MLXSW_REG_RTAR_OP_ALLOCATE, + MLXSW_REG_RTAR_OP_RESIZE, + MLXSW_REG_RTAR_OP_DEALLOCATE, +}; + +/* reg_rtar_op + * Access: WO + */ +MLXSW_ITEM32(reg, rtar, op, 0x00, 28, 4); + +enum mlxsw_reg_rtar_key_type { + MLXSW_REG_RTAR_KEY_TYPE_IPV4_MULTICAST = 1, + MLXSW_REG_RTAR_KEY_TYPE_IPV6_MULTICAST = 3 +}; + +/* reg_rtar_key_type + * TCAM key type for the region. + * Access: WO + */ +MLXSW_ITEM32(reg, rtar, key_type, 0x00, 0, 8); + +/* reg_rtar_region_size + * TCAM region size. When allocating/resizing this is the requested + * size, the response is the actual size. + * Note: Actual size may be larger than requested. + * Reserved for op = Deallocate + * Access: WO + */ +MLXSW_ITEM32(reg, rtar, region_size, 0x04, 0, 16); + +static inline void mlxsw_reg_rtar_pack(char *payload, + enum mlxsw_reg_rtar_op op, + enum mlxsw_reg_rtar_key_type key_type, + u16 region_size) +{ + MLXSW_REG_ZERO(rtar, payload); + mlxsw_reg_rtar_op_set(payload, op); + mlxsw_reg_rtar_key_type_set(payload, key_type); + mlxsw_reg_rtar_region_size_set(payload, region_size); +} + /* RATR - Router Adjacency Table Register * -------------------------------------- * The RATR register is used to configure the Router Adjacency (next-hop) @@ -4630,6 +4699,65 @@ static inline void mlxsw_reg_ricnt_pack(char *payload, u32 index, MLXSW_REG_RICNT_COUNTER_SET_TYPE_BASIC); } +/* RRCR - Router Rules Copy Register Layout + * ---------------------------------------- + * This register is used for moving and copying route entry rules. + */ +#define MLXSW_REG_RRCR_ID 0x800F +#define MLXSW_REG_RRCR_LEN 0x24 + +MLXSW_REG_DEFINE(rrcr, MLXSW_REG_RRCR_ID, MLXSW_REG_RRCR_LEN); + +enum mlxsw_reg_rrcr_op { + /* Move rules */ + MLXSW_REG_RRCR_OP_MOVE, + /* Copy rules */ + MLXSW_REG_RRCR_OP_COPY, +}; + +/* reg_rrcr_op + * Access: WO + */ +MLXSW_ITEM32(reg, rrcr, op, 0x00, 28, 4); + +/* reg_rrcr_offset + * Offset within the region from which to copy/move. + * Access: Index + */ +MLXSW_ITEM32(reg, rrcr, offset, 0x00, 0, 16); + +/* reg_rrcr_size + * The number of rules to copy/move. + * Access: WO + */ +MLXSW_ITEM32(reg, rrcr, size, 0x04, 0, 16); + +/* reg_rrcr_table_id + * Identifier of the table on which to perform the operation. Encoding is the + * same as in RTAR.key_type + * Access: Index + */ +MLXSW_ITEM32(reg, rrcr, table_id, 0x10, 0, 4); + +/* reg_rrcr_dest_offset + * Offset within the region to which to copy/move + * Access: Index + */ +MLXSW_ITEM32(reg, rrcr, dest_offset, 0x20, 0, 16); + +static inline void mlxsw_reg_rrcr_pack(char *payload, enum mlxsw_reg_rrcr_op op, + u16 offset, u16 size, + enum mlxsw_reg_rtar_key_type table_id, + u16 dest_offset) +{ + MLXSW_REG_ZERO(rrcr, payload); + mlxsw_reg_rrcr_op_set(payload, op); + mlxsw_reg_rrcr_offset_set(payload, offset); + mlxsw_reg_rrcr_size_set(payload, size); + mlxsw_reg_rrcr_table_id_set(payload, table_id); + mlxsw_reg_rrcr_dest_offset_set(payload, dest_offset); +} + /* RALTA - Router Algorithmic LPM Tree Allocation Register * ------------------------------------------------------- * RALTA is used to allocate the LPM trees of the SHSPM method. @@ -5596,6 +5724,229 @@ mlxsw_reg_rtdp_ipip4_pack(char *payload, u16 irif, mlxsw_reg_rtdp_ipip_expected_gre_key_set(payload, expected_gre_key); } +/* RIGR-V2 - Router Interface Group Register Version 2 + * --------------------------------------------------- + * The RIGR_V2 register is used to add, remove and query egress interface list + * of a multicast forwarding entry. + */ +#define MLXSW_REG_RIGR2_ID 0x8023 +#define MLXSW_REG_RIGR2_LEN 0xB0 + +#define MLXSW_REG_RIGR2_MAX_ERIFS 32 + +MLXSW_REG_DEFINE(rigr2, MLXSW_REG_RIGR2_ID, MLXSW_REG_RIGR2_LEN); + +/* reg_rigr2_rigr_index + * KVD Linear index. + * Access: Index + */ +MLXSW_ITEM32(reg, rigr2, rigr_index, 0x04, 0, 24); + +/* reg_rigr2_vnext + * Next RIGR Index is valid. + * Access: RW + */ +MLXSW_ITEM32(reg, rigr2, vnext, 0x08, 31, 1); + +/* reg_rigr2_next_rigr_index + * Next RIGR Index. The index is to the KVD linear. + * Reserved when vnxet = '0'. + * Access: RW + */ +MLXSW_ITEM32(reg, rigr2, next_rigr_index, 0x08, 0, 24); + +/* reg_rigr2_vrmid + * RMID Index is valid. + * Access: RW + */ +MLXSW_ITEM32(reg, rigr2, vrmid, 0x20, 31, 1); + +/* reg_rigr2_rmid_index + * RMID Index. + * Range 0 .. max_mid - 1 + * Reserved when vrmid = '0'. + * The index is to the Port Group Table (PGT) + * Access: RW + */ +MLXSW_ITEM32(reg, rigr2, rmid_index, 0x20, 0, 16); + +/* reg_rigr2_erif_entry_v + * Egress Router Interface is valid. + * Note that low-entries must be set if high-entries are set. For + * example: if erif_entry[2].v is set then erif_entry[1].v and + * erif_entry[0].v must be set. + * Index can be from 0 to cap_mc_erif_list_entries-1 + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, rigr2, erif_entry_v, 0x24, 31, 1, 4, 0, false); + +/* reg_rigr2_erif_entry_erif + * Egress Router Interface. + * Valid range is from 0 to cap_max_router_interfaces - 1 + * Index can be from 0 to MLXSW_REG_RIGR2_MAX_ERIFS - 1 + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, rigr2, erif_entry_erif, 0x24, 0, 16, 4, 0, false); + +static inline void mlxsw_reg_rigr2_pack(char *payload, u32 rigr_index, + bool vnext, u32 next_rigr_index) +{ + MLXSW_REG_ZERO(rigr2, payload); + mlxsw_reg_rigr2_rigr_index_set(payload, rigr_index); + mlxsw_reg_rigr2_vnext_set(payload, vnext); + mlxsw_reg_rigr2_next_rigr_index_set(payload, next_rigr_index); + mlxsw_reg_rigr2_vrmid_set(payload, 0); + mlxsw_reg_rigr2_rmid_index_set(payload, 0); +} + +static inline void mlxsw_reg_rigr2_erif_entry_pack(char *payload, int index, + bool v, u16 erif) +{ + mlxsw_reg_rigr2_erif_entry_v_set(payload, index, v); + mlxsw_reg_rigr2_erif_entry_erif_set(payload, index, erif); +} + +/* RMFT-V2 - Router Multicast Forwarding Table Version 2 Register + * -------------------------------------------------------------- + * The RMFT_V2 register is used to configure and query the multicast table. + */ +#define MLXSW_REG_RMFT2_ID 0x8027 +#define MLXSW_REG_RMFT2_LEN 0x174 + +MLXSW_REG_DEFINE(rmft2, MLXSW_REG_RMFT2_ID, MLXSW_REG_RMFT2_LEN); + +/* reg_rmft2_v + * Valid + * Access: RW + */ +MLXSW_ITEM32(reg, rmft2, v, 0x00, 31, 1); + +enum mlxsw_reg_rmft2_type { + MLXSW_REG_RMFT2_TYPE_IPV4, + MLXSW_REG_RMFT2_TYPE_IPV6 +}; + +/* reg_rmft2_type + * Access: Index + */ +MLXSW_ITEM32(reg, rmft2, type, 0x00, 28, 2); + +enum mlxsw_sp_reg_rmft2_op { + /* For Write: + * Write operation. Used to write a new entry to the table. All RW + * fields are relevant for new entry. Activity bit is set for new + * entries - Note write with v (Valid) 0 will delete the entry. + * For Query: + * Read operation + */ + MLXSW_REG_RMFT2_OP_READ_WRITE, +}; + +/* reg_rmft2_op + * Operation. + * Access: OP + */ +MLXSW_ITEM32(reg, rmft2, op, 0x00, 20, 2); + +/* reg_rmft2_a + * Activity. Set for new entries. Set if a packet lookup has hit on the specific + * entry. + * Access: RO + */ +MLXSW_ITEM32(reg, rmft2, a, 0x00, 16, 1); + +/* reg_rmft2_offset + * Offset within the multicast forwarding table to write to. + * Access: Index + */ +MLXSW_ITEM32(reg, rmft2, offset, 0x00, 0, 16); + +/* reg_rmft2_virtual_router + * Virtual Router ID. Range from 0..cap_max_virtual_routers-1 + * Access: RW + */ +MLXSW_ITEM32(reg, rmft2, virtual_router, 0x04, 0, 16); + +enum mlxsw_reg_rmft2_irif_mask { + MLXSW_REG_RMFT2_IRIF_MASK_IGNORE, + MLXSW_REG_RMFT2_IRIF_MASK_COMPARE +}; + +/* reg_rmft2_irif_mask + * Ingress RIF mask. + * Access: RW + */ +MLXSW_ITEM32(reg, rmft2, irif_mask, 0x08, 24, 1); + +/* reg_rmft2_irif + * Ingress RIF index. + * Access: RW + */ +MLXSW_ITEM32(reg, rmft2, irif, 0x08, 0, 16); + +/* reg_rmft2_dip4 + * Destination IPv4 address + * Access: RW + */ +MLXSW_ITEM32(reg, rmft2, dip4, 0x1C, 0, 32); + +/* reg_rmft2_dip4_mask + * A bit that is set directs the TCAM to compare the corresponding bit in key. A + * bit that is clear directs the TCAM to ignore the corresponding bit in key. + * Access: RW + */ +MLXSW_ITEM32(reg, rmft2, dip4_mask, 0x2C, 0, 32); + +/* reg_rmft2_sip4 + * Source IPv4 address + * Access: RW + */ +MLXSW_ITEM32(reg, rmft2, sip4, 0x3C, 0, 32); + +/* reg_rmft2_sip4_mask + * A bit that is set directs the TCAM to compare the corresponding bit in key. A + * bit that is clear directs the TCAM to ignore the corresponding bit in key. + * Access: RW + */ +MLXSW_ITEM32(reg, rmft2, sip4_mask, 0x4C, 0, 32); + +/* reg_rmft2_flexible_action_set + * ACL action set. The only supported action types in this field and in any + * action-set pointed from here are as follows: + * 00h: ACTION_NULL + * 01h: ACTION_MAC_TTL, only TTL configuration is supported. + * 03h: ACTION_TRAP + * 06h: ACTION_QOS + * 08h: ACTION_POLICING_MONITORING + * 10h: ACTION_ROUTER_MC + * Access: RW + */ +MLXSW_ITEM_BUF(reg, rmft2, flexible_action_set, 0x80, + MLXSW_REG_FLEX_ACTION_SET_LEN); + +static inline void +mlxsw_reg_rmft2_ipv4_pack(char *payload, bool v, u16 offset, u16 virtual_router, + enum mlxsw_reg_rmft2_irif_mask irif_mask, u16 irif, + u32 dip4, u32 dip4_mask, u32 sip4, u32 sip4_mask, + const char *flexible_action_set) +{ + MLXSW_REG_ZERO(rmft2, payload); + mlxsw_reg_rmft2_v_set(payload, v); + mlxsw_reg_rmft2_type_set(payload, MLXSW_REG_RMFT2_TYPE_IPV4); + mlxsw_reg_rmft2_op_set(payload, MLXSW_REG_RMFT2_OP_READ_WRITE); + mlxsw_reg_rmft2_offset_set(payload, offset); + mlxsw_reg_rmft2_virtual_router_set(payload, virtual_router); + mlxsw_reg_rmft2_irif_mask_set(payload, irif_mask); + mlxsw_reg_rmft2_irif_set(payload, irif); + mlxsw_reg_rmft2_dip4_set(payload, dip4); + mlxsw_reg_rmft2_dip4_mask_set(payload, dip4_mask); + mlxsw_reg_rmft2_sip4_set(payload, sip4); + mlxsw_reg_rmft2_sip4_mask_set(payload, sip4_mask); + if (flexible_action_set) + mlxsw_reg_rmft2_flexible_action_set_memcpy_to(payload, + flexible_action_set); +} + /* MFCR - Management Fan Control Register * -------------------------------------- * This register controls the settings of the Fan Speed PWM mechanism. @@ -6856,9 +7207,11 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(hpkt), MLXSW_REG(rgcr), MLXSW_REG(ritr), + MLXSW_REG(rtar), MLXSW_REG(ratr), MLXSW_REG(rtdp), MLXSW_REG(ricnt), + MLXSW_REG(rrcr), MLXSW_REG(ralta), MLXSW_REG(ralst), MLXSW_REG(raltb), @@ -6866,6 +7219,8 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(rauht), MLXSW_REG(raleu), MLXSW_REG(rauhtd), + MLXSW_REG(rigr2), + MLXSW_REG(rmft2), MLXSW_REG(mfcr), MLXSW_REG(mfsc), MLXSW_REG(mfsm), diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h index 9556d934714b..087aad52c195 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/resources.h +++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h @@ -63,6 +63,7 @@ enum mlxsw_res_id { MLXSW_RES_ID_MAX_CPU_POLICERS, MLXSW_RES_ID_MAX_VRS, MLXSW_RES_ID_MAX_RIFS, + MLXSW_RES_ID_MC_ERIF_LIST_ENTRIES, MLXSW_RES_ID_MAX_LPM_TREES, /* Internal resources. @@ -100,6 +101,7 @@ static u16 mlxsw_res_ids[] = { [MLXSW_RES_ID_MAX_CPU_POLICERS] = 0x2A13, [MLXSW_RES_ID_MAX_VRS] = 0x2C01, [MLXSW_RES_ID_MAX_RIFS] = 0x2C02, + [MLXSW_RES_ID_MC_ERIF_LIST_ENTRIES] = 0x2C10, [MLXSW_RES_ID_MAX_LPM_TREES] = 0x2C30, }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 696b99e65a5a..e9b94430afed 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -69,6 +69,7 @@ #include "txheader.h" #include "spectrum_cnt.h" #include "spectrum_dpipe.h" +#include "spectrum_acl_flex_actions.h" #include "../mlxfw/mlxfw.h" #define MLXSW_FWREV_MAJOR 13 @@ -3420,6 +3421,10 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = { false, SP_IP2ME, DISCARD), /* ACL trap */ MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false), + /* Multicast Router Traps */ + MLXSW_SP_RXL_MARK(IPV4_PIM, TRAP_TO_CPU, PIM, false), + MLXSW_SP_RXL_MARK(RPF, TRAP_TO_CPU, RPF, false), + MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), }; static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) @@ -3445,6 +3450,8 @@ static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF: rate = 128; burst_size = 7; break; @@ -3460,6 +3467,7 @@ static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: rate = 1024; burst_size = 7; break; @@ -3505,6 +3513,7 @@ static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: priority = 5; tc = 5; break; @@ -3521,12 +3530,14 @@ static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) break; case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF: priority = 2; tc = 2; break; case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: priority = 1; tc = 1; break; @@ -3693,6 +3704,18 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, goto err_switchdev_init; } + err = mlxsw_sp_counter_pool_init(mlxsw_sp); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n"); + goto err_counter_pool_init; + } + + err = mlxsw_sp_afa_init(mlxsw_sp); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n"); + goto err_afa_init; + } + err = mlxsw_sp_router_init(mlxsw_sp); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); @@ -3711,12 +3734,6 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, goto err_acl_init; } - err = mlxsw_sp_counter_pool_init(mlxsw_sp); - if (err) { - dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n"); - goto err_counter_pool_init; - } - err = mlxsw_sp_dpipe_init(mlxsw_sp); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n"); @@ -3734,14 +3751,16 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, err_ports_create: mlxsw_sp_dpipe_fini(mlxsw_sp); err_dpipe_init: - mlxsw_sp_counter_pool_fini(mlxsw_sp); -err_counter_pool_init: mlxsw_sp_acl_fini(mlxsw_sp); err_acl_init: mlxsw_sp_span_fini(mlxsw_sp); err_span_init: mlxsw_sp_router_fini(mlxsw_sp); err_router_init: + mlxsw_sp_afa_fini(mlxsw_sp); +err_afa_init: + mlxsw_sp_counter_pool_fini(mlxsw_sp); +err_counter_pool_init: mlxsw_sp_switchdev_fini(mlxsw_sp); err_switchdev_init: mlxsw_sp_lag_fini(mlxsw_sp); @@ -3760,10 +3779,11 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) mlxsw_sp_ports_remove(mlxsw_sp); mlxsw_sp_dpipe_fini(mlxsw_sp); - mlxsw_sp_counter_pool_fini(mlxsw_sp); mlxsw_sp_acl_fini(mlxsw_sp); mlxsw_sp_span_fini(mlxsw_sp); mlxsw_sp_router_fini(mlxsw_sp); + mlxsw_sp_afa_fini(mlxsw_sp); + mlxsw_sp_counter_pool_fini(mlxsw_sp); mlxsw_sp_switchdev_fini(mlxsw_sp); mlxsw_sp_lag_fini(mlxsw_sp); mlxsw_sp_buffers_fini(mlxsw_sp); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 84ce83acdc19..e907ec446a73 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -94,7 +94,8 @@ struct mlxsw_sp_mid { unsigned char addr[ETH_ALEN]; u16 fid; u16 mid; - unsigned int ref_count; + bool in_hw; + unsigned long *ports_in_mid; /* bits array */ }; enum mlxsw_sp_span_type { @@ -152,6 +153,7 @@ struct mlxsw_sp { struct mlxsw_sp_sb *sb; struct mlxsw_sp_bridge *bridge; struct mlxsw_sp_router *router; + struct mlxsw_afa *afa; struct mlxsw_sp_acl *acl; struct mlxsw_sp_fid_core *fid_core; struct { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c index 4b2455e3e079..eede75fbd585 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c @@ -52,7 +52,6 @@ struct mlxsw_sp_acl { struct mlxsw_sp *mlxsw_sp; struct mlxsw_afk *afk; - struct mlxsw_afa *afa; struct mlxsw_sp_fid *dummy_fid; const struct mlxsw_sp_acl_ops *ops; struct rhashtable ruleset_ht; @@ -333,7 +332,7 @@ mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl) rulei = kzalloc(sizeof(*rulei), GFP_KERNEL); if (!rulei) return NULL; - rulei->act_block = mlxsw_afa_block_create(acl->afa); + rulei->act_block = mlxsw_afa_block_create(acl->mlxsw_sp->afa); if (IS_ERR(rulei->act_block)) { err = PTR_ERR(rulei->act_block); goto err_afa_block_create; @@ -397,7 +396,8 @@ int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei) int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei) { - return mlxsw_afa_block_append_trap(rulei->act_block); + return mlxsw_afa_block_append_trap(rulei->act_block, + MLXSW_TRAP_ID_ACL0); } int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp, @@ -653,85 +653,6 @@ int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp, return 0; } -#define MLXSW_SP_KDVL_ACT_EXT_SIZE 1 - -static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index, - char *enc_actions, bool is_first) -{ - struct mlxsw_sp *mlxsw_sp = priv; - char pefa_pl[MLXSW_REG_PEFA_LEN]; - u32 kvdl_index; - int err; - - /* The first action set of a TCAM entry is stored directly in TCAM, - * not KVD linear area. - */ - if (is_first) - return 0; - - err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KDVL_ACT_EXT_SIZE, - &kvdl_index); - if (err) - return err; - mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, enc_actions); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl); - if (err) - goto err_pefa_write; - *p_kvdl_index = kvdl_index; - return 0; - -err_pefa_write: - mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index); - return err; -} - -static void mlxsw_sp_act_kvdl_set_del(void *priv, u32 kvdl_index, - bool is_first) -{ - struct mlxsw_sp *mlxsw_sp = priv; - - if (is_first) - return; - mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index); -} - -static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index, - u8 local_port) -{ - struct mlxsw_sp *mlxsw_sp = priv; - char ppbs_pl[MLXSW_REG_PPBS_LEN]; - u32 kvdl_index; - int err; - - err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &kvdl_index); - if (err) - return err; - mlxsw_reg_ppbs_pack(ppbs_pl, kvdl_index, local_port); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbs), ppbs_pl); - if (err) - goto err_ppbs_write; - *p_kvdl_index = kvdl_index; - return 0; - -err_ppbs_write: - mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index); - return err; -} - -static void mlxsw_sp_act_kvdl_fwd_entry_del(void *priv, u32 kvdl_index) -{ - struct mlxsw_sp *mlxsw_sp = priv; - - mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index); -} - -static const struct mlxsw_afa_ops mlxsw_sp_act_afa_ops = { - .kvdl_set_add = mlxsw_sp_act_kvdl_set_add, - .kvdl_set_del = mlxsw_sp_act_kvdl_set_del, - .kvdl_fwd_entry_add = mlxsw_sp_act_kvdl_fwd_entry_add, - .kvdl_fwd_entry_del = mlxsw_sp_act_kvdl_fwd_entry_del, -}; - int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp) { const struct mlxsw_sp_acl_ops *acl_ops = &mlxsw_sp_acl_tcam_ops; @@ -753,14 +674,6 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp) goto err_afk_create; } - acl->afa = mlxsw_afa_create(MLXSW_CORE_RES_GET(mlxsw_sp->core, - ACL_ACTIONS_PER_SET), - &mlxsw_sp_act_afa_ops, mlxsw_sp); - if (IS_ERR(acl->afa)) { - err = PTR_ERR(acl->afa); - goto err_afa_create; - } - err = rhashtable_init(&acl->ruleset_ht, &mlxsw_sp_acl_ruleset_ht_params); if (err) @@ -792,8 +705,6 @@ err_acl_ops_init: err_fid_get: rhashtable_destroy(&acl->ruleset_ht); err_rhashtable_init: - mlxsw_afa_destroy(acl->afa); -err_afa_create: mlxsw_afk_destroy(acl->afk); err_afk_create: kfree(acl); @@ -810,7 +721,6 @@ void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp) WARN_ON(!list_empty(&acl->rules)); mlxsw_sp_fid_put(acl->dummy_fid); rhashtable_destroy(&acl->ruleset_ht); - mlxsw_afa_destroy(acl->afa); mlxsw_afk_destroy(acl->afk); kfree(acl); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c new file mode 100644 index 000000000000..4d3340ed0291 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c @@ -0,0 +1,129 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com> + * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "spectrum_acl_flex_actions.h" +#include "core_acl_flex_actions.h" + +#define MLXSW_SP_KVDL_ACT_EXT_SIZE 1 + +static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index, + char *enc_actions, bool is_first) +{ + struct mlxsw_sp *mlxsw_sp = priv; + char pefa_pl[MLXSW_REG_PEFA_LEN]; + u32 kvdl_index; + int err; + + /* The first action set of a TCAM entry is stored directly in TCAM, + * not KVD linear area. + */ + if (is_first) + return 0; + + err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ACT_EXT_SIZE, + &kvdl_index); + if (err) + return err; + mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, enc_actions); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl); + if (err) + goto err_pefa_write; + *p_kvdl_index = kvdl_index; + return 0; + +err_pefa_write: + mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index); + return err; +} + +static void mlxsw_sp_act_kvdl_set_del(void *priv, u32 kvdl_index, + bool is_first) +{ + struct mlxsw_sp *mlxsw_sp = priv; + + if (is_first) + return; + mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index); +} + +static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index, + u8 local_port) +{ + struct mlxsw_sp *mlxsw_sp = priv; + char ppbs_pl[MLXSW_REG_PPBS_LEN]; + u32 kvdl_index; + int err; + + err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &kvdl_index); + if (err) + return err; + mlxsw_reg_ppbs_pack(ppbs_pl, kvdl_index, local_port); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbs), ppbs_pl); + if (err) + goto err_ppbs_write; + *p_kvdl_index = kvdl_index; + return 0; + +err_ppbs_write: + mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index); + return err; +} + +static void mlxsw_sp_act_kvdl_fwd_entry_del(void *priv, u32 kvdl_index) +{ + struct mlxsw_sp *mlxsw_sp = priv; + + mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index); +} + +static const struct mlxsw_afa_ops mlxsw_sp_act_afa_ops = { + .kvdl_set_add = mlxsw_sp_act_kvdl_set_add, + .kvdl_set_del = mlxsw_sp_act_kvdl_set_del, + .kvdl_fwd_entry_add = mlxsw_sp_act_kvdl_fwd_entry_add, + .kvdl_fwd_entry_del = mlxsw_sp_act_kvdl_fwd_entry_del, +}; + +int mlxsw_sp_afa_init(struct mlxsw_sp *mlxsw_sp) +{ + mlxsw_sp->afa = mlxsw_afa_create(MLXSW_CORE_RES_GET(mlxsw_sp->core, + ACL_ACTIONS_PER_SET), + &mlxsw_sp_act_afa_ops, mlxsw_sp); + return PTR_ERR_OR_ZERO(mlxsw_sp->afa); +} + +void mlxsw_sp_afa_fini(struct mlxsw_sp *mlxsw_sp) +{ + mlxsw_afa_destroy(mlxsw_sp->afa); +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h new file mode 100644 index 000000000000..2726192836ad --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h @@ -0,0 +1,44 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com> + * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MLXSW_SPECTRUM_ACL_FLEX_KEYS_H +#define _MLXSW_SPECTRUM_ACL_FLEX_KEYS_H + +#include "spectrum.h" + +int mlxsw_sp_afa_init(struct mlxsw_sp *mlxsw_sp); +void mlxsw_sp_afa_fini(struct mlxsw_sp *mlxsw_sp); + +#endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 2cfb3f5d092d..0bd93dc88ffa 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -5049,6 +5049,11 @@ int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif) return rif->dev->ifindex; } +const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif) +{ + return rif->dev; +} + static struct mlxsw_sp_rif * mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp, const struct mlxsw_sp_rif_params *params) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h index 345fcc4f38e9..ae4c99b3f2fc 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h @@ -69,6 +69,7 @@ u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif); u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *rif); u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *rif); int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif); +const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif); int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_rif *rif, enum mlxsw_sp_rif_counter_dir dir, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index d39ffbfcc436..0f9eac5f4ebf 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -67,7 +67,6 @@ struct mlxsw_sp_bridge { u32 ageing_time; bool vlan_enabled_exists; struct list_head bridges_list; - struct list_head mids_list; DECLARE_BITMAP(mids_bitmap, MLXSW_SP_MID_MAX); const struct mlxsw_sp_bridge_ops *bridge_8021q_ops; const struct mlxsw_sp_bridge_ops *bridge_8021d_ops; @@ -77,6 +76,7 @@ struct mlxsw_sp_bridge_device { struct net_device *dev; struct list_head list; struct list_head ports_list; + struct list_head mids_list; u8 vlan_enabled:1, multicast_enabled:1; const struct mlxsw_sp_bridge_ops *ops; @@ -121,6 +121,20 @@ mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_bridge_port *bridge_port, u16 fid_index); +static void +mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_bridge_port *bridge_port); + +static void +mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_bridge_device + *bridge_device); + +static void +mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_bridge_port *bridge_port, + bool add); + static struct mlxsw_sp_bridge_device * mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge, const struct net_device *br_dev) @@ -161,6 +175,7 @@ mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge, } else { bridge_device->ops = bridge->bridge_8021d_ops; } + INIT_LIST_HEAD(&bridge_device->mids_list); list_add(&bridge_device->list, &bridge->bridges_list); return bridge_device; @@ -174,6 +189,7 @@ mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge, if (bridge_device->vlan_enabled) bridge->vlan_enabled_exists = false; WARN_ON(!list_empty(&bridge_device->ports_list)); + WARN_ON(!list_empty(&bridge_device->mids_list)); kfree(bridge_device); } @@ -249,7 +265,8 @@ mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device, bridge_port->dev = brport_dev; bridge_port->bridge_device = bridge_device; bridge_port->stp_state = BR_STATE_DISABLED; - bridge_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC; + bridge_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC | + BR_MCAST_FLOOD; INIT_LIST_HEAD(&bridge_port->vlans_list); list_add(&bridge_port->list, &bridge_device->ports_list); bridge_port->ref_count = 1; @@ -455,7 +472,8 @@ static int mlxsw_sp_port_attr_get(struct net_device *dev, &attr->u.brport_flags); break; case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT: - attr->u.brport_flags_support = BR_LEARNING | BR_FLOOD; + attr->u.brport_flags_support = BR_LEARNING | BR_FLOOD | + BR_MCAST_FLOOD; break; default: return -EOPNOTSUPP; @@ -640,8 +658,18 @@ static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port, if (err) return err; - memcpy(&bridge_port->flags, &brport_flags, sizeof(brport_flags)); + if (bridge_port->bridge_device->multicast_enabled) + goto out; + err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port, + MLXSW_SP_FLOOD_TYPE_MC, + brport_flags & + BR_MCAST_FLOOD); + if (err) + return err; + +out: + memcpy(&bridge_port->flags, &brport_flags, sizeof(brport_flags)); return 0; } @@ -699,10 +727,10 @@ static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, return -EINVAL; } -static int mlxsw_sp_port_attr_mc_router_set(struct mlxsw_sp_port *mlxsw_sp_port, - struct switchdev_trans *trans, - struct net_device *orig_dev, - bool is_port_mc_router) +static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port, + struct switchdev_trans *trans, + struct net_device *orig_dev, + bool is_port_mrouter) { struct mlxsw_sp_bridge_port *bridge_port; int err; @@ -720,15 +748,26 @@ static int mlxsw_sp_port_attr_mc_router_set(struct mlxsw_sp_port *mlxsw_sp_port, err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port, MLXSW_SP_FLOOD_TYPE_MC, - is_port_mc_router); + is_port_mrouter); if (err) return err; + mlxsw_sp_port_mrouter_update_mdb(mlxsw_sp_port, bridge_port, + is_port_mrouter); out: - bridge_port->mrouter = is_port_mc_router; + bridge_port->mrouter = is_port_mrouter; return 0; } +static bool mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port *bridge_port) +{ + const struct mlxsw_sp_bridge_device *bridge_device; + + bridge_device = bridge_port->bridge_device; + return bridge_device->multicast_enabled ? bridge_port->mrouter : + bridge_port->flags & BR_MCAST_FLOOD; +} + static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port, struct switchdev_trans *trans, struct net_device *orig_dev, @@ -749,9 +788,15 @@ static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port, if (!bridge_device) return 0; + if (bridge_device->multicast_enabled != !mc_disabled) { + bridge_device->multicast_enabled = !mc_disabled; + mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp_port, + bridge_device); + } + list_for_each_entry(bridge_port, &bridge_device->ports_list, list) { enum mlxsw_sp_flood_type packet_type = MLXSW_SP_FLOOD_TYPE_MC; - bool member = mc_disabled ? true : bridge_port->mrouter; + bool member = mlxsw_sp_mc_flood(bridge_port); err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port, @@ -793,9 +838,9 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev, attr->u.vlan_filtering); break; case SWITCHDEV_ATTR_ID_PORT_MROUTER: - err = mlxsw_sp_port_attr_mc_router_set(mlxsw_sp_port, trans, - attr->orig_dev, - attr->u.mrouter); + err = mlxsw_sp_port_attr_mrouter_set(mlxsw_sp_port, trans, + attr->orig_dev, + attr->u.mrouter); break; case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED: err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port, trans, @@ -810,14 +855,6 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev, return err; } -static bool mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port *bridge_port) -{ - const struct mlxsw_sp_bridge_device *bridge_device; - - bridge_device = bridge_port->bridge_device; - return !bridge_device->multicast_enabled ? true : bridge_port->mrouter; -} - static int mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, struct mlxsw_sp_bridge_port *bridge_port) @@ -955,24 +992,28 @@ mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) struct mlxsw_sp_bridge_vlan *bridge_vlan; struct mlxsw_sp_bridge_port *bridge_port; u16 vid = mlxsw_sp_port_vlan->vid; - bool last; + bool last_port, last_vlan; if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021Q && mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021D)) return; bridge_port = mlxsw_sp_port_vlan->bridge_port; + last_vlan = list_is_singular(&bridge_port->vlans_list); bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid); - last = list_is_singular(&bridge_vlan->port_vlan_list); + last_port = list_is_singular(&bridge_vlan->port_vlan_list); list_del(&mlxsw_sp_port_vlan->bridge_vlan_node); mlxsw_sp_bridge_vlan_put(bridge_vlan); mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED); mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false); - if (last) + if (last_port) mlxsw_sp_bridge_port_fdb_flush(mlxsw_sp_port->mlxsw_sp, bridge_port, mlxsw_sp_fid_index(fid)); + if (last_vlan) + mlxsw_sp_bridge_port_mdb_flush(mlxsw_sp_port, bridge_port); + mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan); mlxsw_sp_bridge_port_put(mlxsw_sp_port->mlxsw_sp->bridge, bridge_port); @@ -1182,7 +1223,7 @@ mlxsw_sp_port_fdb_set(struct mlxsw_sp_port *mlxsw_sp_port, } static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr, - u16 fid, u16 mid, bool adding) + u16 fid, u16 mid_idx, bool adding) { char *sfd_pl; int err; @@ -1193,16 +1234,15 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr, mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid, - MLXSW_REG_SFD_REC_ACTION_NOP, mid); + MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); kfree(sfd_pl); return err; } -static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mid, - bool add, bool clear_all_ports) +static int mlxsw_sp_port_smid_full_entry(struct mlxsw_sp *mlxsw_sp, u16 mid_idx, + long *ports_bitmap) { - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; char *smid_pl; int err, i; @@ -1210,66 +1250,201 @@ static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mid, if (!smid_pl) return -ENOMEM; - mlxsw_reg_smid_pack(smid_pl, mid, mlxsw_sp_port->local_port, add); - if (clear_all_ports) { - for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) - if (mlxsw_sp->ports[i]) - mlxsw_reg_smid_port_mask_set(smid_pl, i, 1); + mlxsw_reg_smid_pack(smid_pl, mid_idx, 0, false); + for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) { + if (mlxsw_sp->ports[i]) + mlxsw_reg_smid_port_mask_set(smid_pl, i, 1); } + + for_each_set_bit(i, ports_bitmap, mlxsw_core_max_ports(mlxsw_sp->core)) + mlxsw_reg_smid_port_set(smid_pl, i, 1); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl); kfree(smid_pl); return err; } -static struct mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp *mlxsw_sp, - const unsigned char *addr, - u16 fid) +static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port, + u16 mid_idx, bool add) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char *smid_pl; + int err; + + smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL); + if (!smid_pl) + return -ENOMEM; + + mlxsw_reg_smid_pack(smid_pl, mid_idx, mlxsw_sp_port->local_port, add); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl); + kfree(smid_pl); + return err; +} + +static struct +mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp_bridge_device *bridge_device, + const unsigned char *addr, + u16 fid) { struct mlxsw_sp_mid *mid; - list_for_each_entry(mid, &mlxsw_sp->bridge->mids_list, list) { + list_for_each_entry(mid, &bridge_device->mids_list, list) { if (ether_addr_equal(mid->addr, addr) && mid->fid == fid) return mid; } return NULL; } -static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp, - const unsigned char *addr, - u16 fid) +static void +mlxsw_sp_bridge_port_get_ports_bitmap(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_bridge_port *bridge_port, + unsigned long *ports_bitmap) { - struct mlxsw_sp_mid *mid; + struct mlxsw_sp_port *mlxsw_sp_port; + u64 max_lag_members, i; + int lag_id; + + if (!bridge_port->lagged) { + set_bit(bridge_port->system_port, ports_bitmap); + } else { + max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, + MAX_LAG_MEMBERS); + lag_id = bridge_port->lag_id; + for (i = 0; i < max_lag_members; i++) { + mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, + lag_id, i); + if (mlxsw_sp_port) + set_bit(mlxsw_sp_port->local_port, + ports_bitmap); + } + } +} + +static void +mlxsw_sp_mc_get_mrouters_bitmap(unsigned long *flood_bitmap, + struct mlxsw_sp_bridge_device *bridge_device, + struct mlxsw_sp *mlxsw_sp) +{ + struct mlxsw_sp_bridge_port *bridge_port; + + list_for_each_entry(bridge_port, &bridge_device->ports_list, list) { + if (bridge_port->mrouter) { + mlxsw_sp_bridge_port_get_ports_bitmap(mlxsw_sp, + bridge_port, + flood_bitmap); + } + } +} + +static bool +mlxsw_sp_mc_write_mdb_entry(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_mid *mid, + struct mlxsw_sp_bridge_device *bridge_device) +{ + long *flood_bitmap; + int num_of_ports; + int alloc_size; u16 mid_idx; + int err; mid_idx = find_first_zero_bit(mlxsw_sp->bridge->mids_bitmap, MLXSW_SP_MID_MAX); if (mid_idx == MLXSW_SP_MID_MAX) - return NULL; + return false; + + num_of_ports = mlxsw_core_max_ports(mlxsw_sp->core); + alloc_size = sizeof(long) * BITS_TO_LONGS(num_of_ports); + flood_bitmap = kzalloc(alloc_size, GFP_KERNEL); + if (!flood_bitmap) + return false; + + bitmap_copy(flood_bitmap, mid->ports_in_mid, num_of_ports); + mlxsw_sp_mc_get_mrouters_bitmap(flood_bitmap, bridge_device, mlxsw_sp); + + mid->mid = mid_idx; + err = mlxsw_sp_port_smid_full_entry(mlxsw_sp, mid_idx, flood_bitmap); + kfree(flood_bitmap); + if (err) + return false; + + err = mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid_idx, + true); + if (err) + return false; + + set_bit(mid_idx, mlxsw_sp->bridge->mids_bitmap); + mid->in_hw = true; + return true; +} + +static int mlxsw_sp_mc_remove_mdb_entry(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_mid *mid) +{ + if (!mid->in_hw) + return 0; + + clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap); + mid->in_hw = false; + return mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid->mid, + false); +} + +static struct +mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_bridge_device *bridge_device, + const unsigned char *addr, + u16 fid) +{ + struct mlxsw_sp_mid *mid; + size_t alloc_size; mid = kzalloc(sizeof(*mid), GFP_KERNEL); if (!mid) return NULL; - set_bit(mid_idx, mlxsw_sp->bridge->mids_bitmap); + alloc_size = sizeof(unsigned long) * + BITS_TO_LONGS(mlxsw_core_max_ports(mlxsw_sp->core)); + + mid->ports_in_mid = kzalloc(alloc_size, GFP_KERNEL); + if (!mid->ports_in_mid) + goto err_ports_in_mid_alloc; + ether_addr_copy(mid->addr, addr); mid->fid = fid; - mid->mid = mid_idx; - mid->ref_count = 0; - list_add_tail(&mid->list, &mlxsw_sp->bridge->mids_list); + mid->in_hw = false; + + if (!bridge_device->multicast_enabled) + goto out; + + if (!mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid, bridge_device)) + goto err_write_mdb_entry; +out: + list_add_tail(&mid->list, &bridge_device->mids_list); return mid; + +err_write_mdb_entry: + kfree(mid->ports_in_mid); +err_ports_in_mid_alloc: + kfree(mid); + return NULL; } -static int __mlxsw_sp_mc_dec_ref(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_mid *mid) +static int mlxsw_sp_port_remove_from_mid(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_mid *mid) { - if (--mid->ref_count == 0) { + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + int err = 0; + + clear_bit(mlxsw_sp_port->local_port, mid->ports_in_mid); + if (bitmap_empty(mid->ports_in_mid, + mlxsw_core_max_ports(mlxsw_sp->core))) { + err = mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid); list_del(&mid->list); - clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap); + kfree(mid->ports_in_mid); kfree(mid); - return 1; } - return 0; + return err; } static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port, @@ -1302,39 +1477,72 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port, fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid); - mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid_index); + mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index); if (!mid) { - mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, fid_index); + mid = __mlxsw_sp_mc_alloc(mlxsw_sp, bridge_device, mdb->addr, + fid_index); if (!mid) { netdev_err(dev, "Unable to allocate MC group\n"); return -ENOMEM; } } - mid->ref_count++; + set_bit(mlxsw_sp_port->local_port, mid->ports_in_mid); + + if (!bridge_device->multicast_enabled) + return 0; + + if (bridge_port->mrouter) + return 0; - err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true, - mid->ref_count == 1); + err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true); if (err) { netdev_err(dev, "Unable to set SMID\n"); goto err_out; } - if (mid->ref_count == 1) { - err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid_index, - mid->mid, true); - if (err) { - netdev_err(dev, "Unable to set MC SFD\n"); - goto err_out; - } - } - return 0; err_out: - __mlxsw_sp_mc_dec_ref(mlxsw_sp, mid); + mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid); return err; } +static void +mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_bridge_device + *bridge_device) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_mid *mid; + bool mc_enabled; + + mc_enabled = bridge_device->multicast_enabled; + + list_for_each_entry(mid, &bridge_device->mids_list, list) { + if (mc_enabled) + mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid, + bridge_device); + else + mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid); + } +} + +static void +mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_bridge_port *bridge_port, + bool add) +{ + struct mlxsw_sp_bridge_device *bridge_device; + struct mlxsw_sp_mid *mid; + + bridge_device = bridge_port->bridge_device; + + list_for_each_entry(mid, &bridge_device->mids_list, list) { + if (!test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid)) + mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, add); + } +} + static int mlxsw_sp_port_obj_add(struct net_device *dev, const struct switchdev_obj *obj, struct switchdev_trans *trans) @@ -1399,6 +1607,30 @@ static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, return 0; } +static int +__mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_bridge_port *bridge_port, + struct mlxsw_sp_mid *mid) +{ + struct net_device *dev = mlxsw_sp_port->dev; + int err; + + if (bridge_port->bridge_device->multicast_enabled) { + if (bridge_port->bridge_device->multicast_enabled) { + err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, + false); + if (err) + netdev_err(dev, "Unable to remove port from SMID\n"); + } + } + + err = mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid); + if (err) + netdev_err(dev, "Unable to remove MC SFD\n"); + + return err; +} + static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port, const struct switchdev_obj_port_mdb *mdb) { @@ -1410,8 +1642,6 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_bridge_port *bridge_port; struct mlxsw_sp_mid *mid; u16 fid_index; - u16 mid_idx; - int err = 0; bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); if (!bridge_port) @@ -1426,25 +1656,33 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port, fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid); - mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid_index); + mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index); if (!mid) { netdev_err(dev, "Unable to remove port from MC DB\n"); return -EINVAL; } - err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false, false); - if (err) - netdev_err(dev, "Unable to remove port from SMID\n"); + return __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port, mid); +} - mid_idx = mid->mid; - if (__mlxsw_sp_mc_dec_ref(mlxsw_sp, mid)) { - err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid_index, - mid_idx, false); - if (err) - netdev_err(dev, "Unable to remove MC SFD\n"); - } +static void +mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_bridge_port *bridge_port) +{ + struct mlxsw_sp_bridge_device *bridge_device; + struct mlxsw_sp_mid *mid, *tmp; - return err; + bridge_device = bridge_port->bridge_device; + + list_for_each_entry_safe(mid, tmp, &bridge_device->mids_list, list) { + if (test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid)) { + __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port, + mid); + } else if (bridge_device->multicast_enabled && + bridge_port->mrouter) { + mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false); + } + } } static int mlxsw_sp_port_obj_del(struct net_device *dev, @@ -1981,17 +2219,6 @@ static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp) } -static void mlxsw_sp_mids_fini(struct mlxsw_sp *mlxsw_sp) -{ - struct mlxsw_sp_mid *mid, *tmp; - - list_for_each_entry_safe(mid, tmp, &mlxsw_sp->bridge->mids_list, list) { - list_del(&mid->list); - clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap); - kfree(mid); - } -} - int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp) { struct mlxsw_sp_bridge *bridge; @@ -2003,7 +2230,6 @@ int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp) bridge->mlxsw_sp = mlxsw_sp; INIT_LIST_HEAD(&mlxsw_sp->bridge->bridges_list); - INIT_LIST_HEAD(&mlxsw_sp->bridge->mids_list); bridge->bridge_8021q_ops = &mlxsw_sp_bridge_8021q_ops; bridge->bridge_8021d_ops = &mlxsw_sp_bridge_8021d_ops; @@ -2014,7 +2240,6 @@ int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp) void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp) { mlxsw_sp_fdb_fini(mlxsw_sp); - mlxsw_sp_mids_fini(mlxsw_sp); WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list)); kfree(mlxsw_sp->bridge); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h index f396a1fef633..a98103539f6b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/trap.h +++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h @@ -62,6 +62,8 @@ enum { MLXSW_TRAP_ID_TTLERROR = 0x53, MLXSW_TRAP_ID_LBERROR = 0x54, MLXSW_TRAP_ID_IPV4_OSPF = 0x55, + MLXSW_TRAP_ID_IPV4_PIM = 0x58, + MLXSW_TRAP_ID_RPF = 0x5C, MLXSW_TRAP_ID_IP2ME = 0x5F, MLXSW_TRAP_ID_IPV6_UNSPECIFIED_ADDRESS = 0x60, MLXSW_TRAP_ID_IPV6_LINK_LOCAL_DEST = 0x61, @@ -89,6 +91,8 @@ enum { MLXSW_TRAP_ID_ROUTER_ALERT_IPV4 = 0xD6, MLXSW_TRAP_ID_ROUTER_ALERT_IPV6 = 0xD7, MLXSW_TRAP_ID_ACL0 = 0x1C0, + /* Multicast trap used for routes with trap action */ + MLXSW_TRAP_ID_ACL1 = 0x1C1, MLXSW_TRAP_ID_MAX = 0x1FF }; diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c index 18af2a23a933..dedeacd0bbca 100644 --- a/drivers/net/ethernet/natsemi/natsemi.c +++ b/drivers/net/ethernet/natsemi/natsemi.c @@ -1571,10 +1571,8 @@ static int netdev_open(struct net_device *dev) dev->name, (int)readl(ioaddr + ChipCmd)); /* Set the timer to check for link beat. */ - init_timer(&np->timer); + setup_timer(&np->timer, netdev_timer, (unsigned long)dev); np->timer.expires = round_jiffies(jiffies + NATSEMI_TIMER_FREQ); - np->timer.data = (unsigned long)dev; - np->timer.function = netdev_timer; /* timer handler */ add_timer(&np->timer); return 0; diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c index 729095db3e08..99d3c7884a4a 100644 --- a/drivers/net/ethernet/natsemi/ns83820.c +++ b/drivers/net/ethernet/natsemi/ns83820.c @@ -1652,9 +1652,7 @@ static int ns83820_open(struct net_device *ndev) writel(0, dev->base + TXDP_HI); writel(desc, dev->base + TXDP); - init_timer(&dev->tx_watchdog); - dev->tx_watchdog.data = (unsigned long)ndev; - dev->tx_watchdog.function = ns83820_tx_watch; + setup_timer(&dev->tx_watchdog, ns83820_tx_watch, (unsigned long)ndev); mod_timer(&dev->tx_watchdog, jiffies + 2*HZ); netif_start_queue(ndev); /* FIXME: wait for phy to come up */ diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index 994a83a1f0a5..b605b94f4567 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -1813,12 +1813,12 @@ static int nv_alloc_rx(struct net_device *dev) struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD); if (skb) { np->put_rx_ctx->skb = skb; - np->put_rx_ctx->dma = pci_map_single(np->pci_dev, + np->put_rx_ctx->dma = dma_map_single(&np->pci_dev->dev, skb->data, skb_tailroom(skb), - PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(np->pci_dev, - np->put_rx_ctx->dma)) { + DMA_FROM_DEVICE); + if (dma_mapping_error(&np->pci_dev->dev, + np->put_rx_ctx->dma)) { kfree_skb(skb); goto packet_dropped; } @@ -1854,12 +1854,12 @@ static int nv_alloc_rx_optimized(struct net_device *dev) struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD); if (skb) { np->put_rx_ctx->skb = skb; - np->put_rx_ctx->dma = pci_map_single(np->pci_dev, + np->put_rx_ctx->dma = dma_map_single(&np->pci_dev->dev, skb->data, skb_tailroom(skb), - PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(np->pci_dev, - np->put_rx_ctx->dma)) { + DMA_FROM_DEVICE); + if (dma_mapping_error(&np->pci_dev->dev, + np->put_rx_ctx->dma)) { kfree_skb(skb); goto packet_dropped; } @@ -1977,9 +1977,9 @@ static void nv_unmap_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb) { if (tx_skb->dma) { if (tx_skb->dma_single) - pci_unmap_single(np->pci_dev, tx_skb->dma, + dma_unmap_single(&np->pci_dev->dev, tx_skb->dma, tx_skb->dma_len, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); else pci_unmap_page(np->pci_dev, tx_skb->dma, tx_skb->dma_len, @@ -2047,10 +2047,10 @@ static void nv_drain_rx(struct net_device *dev) } wmb(); if (np->rx_skb[i].skb) { - pci_unmap_single(np->pci_dev, np->rx_skb[i].dma, + dma_unmap_single(&np->pci_dev->dev, np->rx_skb[i].dma, (skb_end_pointer(np->rx_skb[i].skb) - - np->rx_skb[i].skb->data), - PCI_DMA_FROMDEVICE); + np->rx_skb[i].skb->data), + DMA_FROM_DEVICE); dev_kfree_skb(np->rx_skb[i].skb); np->rx_skb[i].skb = NULL; } @@ -2224,10 +2224,11 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) prev_tx = put_tx; prev_tx_ctx = np->put_tx_ctx; bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; - np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt, - PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(np->pci_dev, - np->put_tx_ctx->dma)) { + np->put_tx_ctx->dma = dma_map_single(&np->pci_dev->dev, + skb->data + offset, bcnt, + DMA_TO_DEVICE); + if (dma_mapping_error(&np->pci_dev->dev, + np->put_tx_ctx->dma)) { /* on DMA mapping error - drop the packet */ dev_kfree_skb_any(skb); u64_stats_update_begin(&np->swstats_tx_syncp); @@ -2373,10 +2374,11 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, prev_tx = put_tx; prev_tx_ctx = np->put_tx_ctx; bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; - np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt, - PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(np->pci_dev, - np->put_tx_ctx->dma)) { + np->put_tx_ctx->dma = dma_map_single(&np->pci_dev->dev, + skb->data + offset, bcnt, + DMA_TO_DEVICE); + if (dma_mapping_error(&np->pci_dev->dev, + np->put_tx_ctx->dma)) { /* on DMA mapping error - drop the packet */ dev_kfree_skb_any(skb); u64_stats_update_begin(&np->swstats_tx_syncp); @@ -2810,9 +2812,9 @@ static int nv_rx_process(struct net_device *dev, int limit) * TODO: check if a prefetch of the first cacheline improves * the performance. */ - pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma, - np->get_rx_ctx->dma_len, - PCI_DMA_FROMDEVICE); + dma_unmap_single(&np->pci_dev->dev, np->get_rx_ctx->dma, + np->get_rx_ctx->dma_len, + DMA_FROM_DEVICE); skb = np->get_rx_ctx->skb; np->get_rx_ctx->skb = NULL; @@ -2916,9 +2918,9 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit) * TODO: check if a prefetch of the first cacheline improves * the performance. */ - pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma, - np->get_rx_ctx->dma_len, - PCI_DMA_FROMDEVICE); + dma_unmap_single(&np->pci_dev->dev, np->get_rx_ctx->dma, + np->get_rx_ctx->dma_len, + DMA_FROM_DEVICE); skb = np->get_rx_ctx->skb; np->get_rx_ctx->skb = NULL; @@ -5070,11 +5072,11 @@ static int nv_loopback_test(struct net_device *dev) ret = 0; goto out; } - test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data, + test_dma_addr = dma_map_single(&np->pci_dev->dev, tx_skb->data, skb_tailroom(tx_skb), - PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(np->pci_dev, - test_dma_addr)) { + DMA_FROM_DEVICE); + if (dma_mapping_error(&np->pci_dev->dev, + test_dma_addr)) { dev_kfree_skb_any(tx_skb); goto out; } @@ -5129,9 +5131,9 @@ static int nv_loopback_test(struct net_device *dev) } } - pci_unmap_single(np->pci_dev, test_dma_addr, - (skb_end_pointer(tx_skb) - tx_skb->data), - PCI_DMA_TODEVICE); + dma_unmap_single(&np->pci_dev->dev, test_dma_addr, + (skb_end_pointer(tx_skb) - tx_skb->data), + DMA_TO_DEVICE); dev_kfree_skb_any(tx_skb); out: /* stop engines */ diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c index 482b85e4d665..77bc7cca8980 100644 --- a/drivers/net/ethernet/packetengines/hamachi.c +++ b/drivers/net/ethernet/packetengines/hamachi.c @@ -979,10 +979,8 @@ static int hamachi_open(struct net_device *dev) dev->name, readw(ioaddr + RxStatus), readw(ioaddr + TxStatus)); } /* Set the timer to check for link beat. */ - init_timer(&hmp->timer); + setup_timer(&hmp->timer, hamachi_timer, (unsigned long)dev); hmp->timer.expires = RUN_AT((24*HZ)/10); /* 2.4 sec. */ - hmp->timer.data = (unsigned long)dev; - hmp->timer.function = hamachi_timer; /* timer handler */ add_timer(&hmp->timer); return 0; diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c index fa7770da6ef8..33c241f52a71 100644 --- a/drivers/net/ethernet/packetengines/yellowfin.c +++ b/drivers/net/ethernet/packetengines/yellowfin.c @@ -632,10 +632,8 @@ static int yellowfin_open(struct net_device *dev) } /* Set the timer to check for link beat. */ - init_timer(&yp->timer); + setup_timer(&yp->timer, yellowfin_timer, (unsigned long)dev); yp->timer.expires = jiffies + 3*HZ; - yp->timer.data = (unsigned long)dev; - yp->timer.function = yellowfin_timer; /* timer handler */ add_timer(&yp->timer); out: return rc; diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c index 2991179c2fd0..05479d435469 100644 --- a/drivers/net/ethernet/qlogic/qla3xxx.c +++ b/drivers/net/ethernet/qlogic/qla3xxx.c @@ -3891,10 +3891,8 @@ static int ql3xxx_probe(struct pci_dev *pdev, INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work); INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work); - init_timer(&qdev->adapter_timer); - qdev->adapter_timer.function = ql3xxx_timer; + setup_timer(&qdev->adapter_timer, ql3xxx_timer, (unsigned long)qdev); qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */ - qdev->adapter_timer.data = (unsigned long)qdev; if (!cards_found) { pr_alert("%s\n", DRV_STRING); diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c index bed34684994f..bdc3833fab7e 100644 --- a/drivers/net/ethernet/realtek/atp.c +++ b/drivers/net/ethernet/realtek/atp.c @@ -438,10 +438,8 @@ static int net_open(struct net_device *dev) hardware_init(dev); - init_timer(&lp->timer); + setup_timer(&lp->timer, atp_timed_checker, (unsigned long)dev); lp->timer.expires = jiffies + TIMED_CHECKER; - lp->timer.data = (unsigned long)dev; - lp->timer.function = atp_timed_checker; /* timer handler */ add_timer(&lp->timer); netif_start_queue(dev); diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c index 40bd88362e3d..cb61247b0526 100644 --- a/drivers/net/ethernet/sis/sis900.c +++ b/drivers/net/ethernet/sis/sis900.c @@ -1065,10 +1065,8 @@ sis900_open(struct net_device *net_dev) /* Set the timer to switch to check for link beat and perhaps switch to an alternate media type. */ - init_timer(&sis_priv->timer); + setup_timer(&sis_priv->timer, sis900_timer, (unsigned long)net_dev); sis_priv->timer.expires = jiffies + HZ; - sis_priv->timer.data = (unsigned long)net_dev; - sis_priv->timer.function = sis900_timer; add_timer(&sis_priv->timer); return 0; diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c index 6a0e1d4b597c..2a9724898fcf 100644 --- a/drivers/net/ethernet/smsc/epic100.c +++ b/drivers/net/ethernet/smsc/epic100.c @@ -739,10 +739,8 @@ static int epic_open(struct net_device *dev) /* Set the timer to switch to check for link beat and perhaps switch to an alternate media type. */ - init_timer(&ep->timer); + setup_timer(&ep->timer, epic_timer, (unsigned long)dev); ep->timer.expires = jiffies + 3*HZ; - ep->timer.data = (unsigned long)dev; - ep->timer.function = epic_timer; /* timer handler */ add_timer(&ep->timer); return rc; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 1763e48c84e2..f41661a04f23 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -2217,10 +2217,8 @@ static void stmmac_init_tx_coalesce(struct stmmac_priv *priv) { priv->tx_coal_frames = STMMAC_TX_FRAMES; priv->tx_coal_timer = STMMAC_COAL_TX_TIMER; - init_timer(&priv->txtimer); + setup_timer(&priv->txtimer, stmmac_tx_timer, (unsigned long)priv); priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer); - priv->txtimer.data = (unsigned long)priv; - priv->txtimer.function = stmmac_tx_timer; add_timer(&priv->txtimer); } diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index 382993c1561c..a74d78f64af9 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -5039,10 +5039,7 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) spin_lock_init(&cp->stat_lock[N_TX_RINGS]); mutex_init(&cp->pm_mutex); - init_timer(&cp->link_timer); - cp->link_timer.function = cas_link_timer; - cp->link_timer.data = (unsigned long) cp; - + setup_timer(&cp->link_timer, cas_link_timer, (unsigned long)cp); #if 1 /* Just in case the implementation of atomic operations * change so that an explicit initialization is necessary. diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 6a4e8e1bbd90..bde19b307d0d 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -6123,10 +6123,8 @@ static int niu_open(struct net_device *dev) err = niu_init_hw(np); if (!err) { - init_timer(&np->timer); + setup_timer(&np->timer, niu_timer, (unsigned long)np); np->timer.expires = jiffies + HZ; - np->timer.data = (unsigned long) np; - np->timer.function = niu_timer; err = niu_enable_interrupts(np, 1); if (err) diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c index fa607d062cb3..b75ab8f44968 100644 --- a/drivers/net/ethernet/sun/sungem.c +++ b/drivers/net/ethernet/sun/sungem.c @@ -2910,9 +2910,7 @@ static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) gp->msg_enable = DEFAULT_MSG; - init_timer(&gp->link_timer); - gp->link_timer.function = gem_link_timer; - gp->link_timer.data = (unsigned long) gp; + setup_timer(&gp->link_timer, gem_link_timer, (unsigned long)gp); INIT_WORK(&gp->reset_task, gem_reset_task); diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c index ddd43e09111e..cd1185e66133 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.c +++ b/drivers/net/ethernet/ti/cpsw_ale.c @@ -859,9 +859,7 @@ void cpsw_ale_start(struct cpsw_ale *ale) cpsw_ale_control_set(ale, 0, ALE_ENABLE, 1); cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1); - init_timer(&ale->timer); - ale->timer.data = (unsigned long)ale; - ale->timer.function = cpsw_ale_timer; + setup_timer(&ale->timer, cpsw_ale_timer, (unsigned long)ale); if (ale->ageout) { ale->timer.expires = jiffies + ale->ageout; add_timer(&ale->timer); diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 28cb38af1a34..4ad821655e51 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -3616,9 +3616,8 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, } spin_unlock_bh(&gbe_dev->hw_stats_lock); - init_timer(&gbe_dev->timer); - gbe_dev->timer.data = (unsigned long)gbe_dev; - gbe_dev->timer.function = netcp_ethss_timer; + setup_timer(&gbe_dev->timer, netcp_ethss_timer, + (unsigned long)gbe_dev); gbe_dev->timer.expires = jiffies + GBE_TIMER_INTERVAL; add_timer(&gbe_dev->timer); *inst_priv = gbe_dev; diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c index cec9e70ab995..a913538d3213 100644 --- a/drivers/net/ethernet/toshiba/spider_net.c +++ b/drivers/net/ethernet/toshiba/spider_net.c @@ -2256,16 +2256,14 @@ spider_net_setup_netdev(struct spider_net_card *card) pci_set_drvdata(card->pdev, netdev); - init_timer(&card->tx_timer); - card->tx_timer.function = - (void (*)(unsigned long)) spider_net_cleanup_tx_ring; - card->tx_timer.data = (unsigned long) card; + setup_timer(&card->tx_timer, + (void(*)(unsigned long))spider_net_cleanup_tx_ring, + (unsigned long)card); netdev->irq = card->pdev->irq; card->aneg_count = 0; - init_timer(&card->aneg_timer); - card->aneg_timer.function = spider_net_link_phy; - card->aneg_timer.data = (unsigned long) card; + setup_timer(&card->aneg_timer, spider_net_link_phy, + (unsigned long)card); netif_napi_add(netdev, &card->napi, spider_net_poll, SPIDER_NET_NAPI_WEIGHT); diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c index 021a8ec411ab..97fe8dfb602d 100644 --- a/drivers/net/hamradio/6pack.c +++ b/drivers/net/hamradio/6pack.c @@ -623,9 +623,7 @@ static int sixpack_open(struct tty_struct *tty) netif_start_queue(dev); - init_timer(&sp->tx_t); - sp->tx_t.function = sp_xmit_on_air; - sp->tx_t.data = (unsigned long) sp; + setup_timer(&sp->tx_t, sp_xmit_on_air, (unsigned long)sp); init_timer(&sp->resync_t); diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c index 1503f10122f7..1e62d00732f2 100644 --- a/drivers/net/hamradio/baycom_epp.c +++ b/drivers/net/hamradio/baycom_epp.c @@ -840,6 +840,7 @@ static int epp_open(struct net_device *dev) unsigned char tmp[128]; unsigned char stat; unsigned long tstart; + struct pardev_cb par_cb; if (!pp) { printk(KERN_ERR "%s: parport at 0x%lx unknown\n", bc_drvname, dev->base_addr); @@ -859,8 +860,21 @@ static int epp_open(struct net_device *dev) return -EIO; } memset(&bc->modem, 0, sizeof(bc->modem)); - bc->pdev = parport_register_device(pp, dev->name, NULL, epp_wakeup, - NULL, PARPORT_DEV_EXCL, dev); + memset(&par_cb, 0, sizeof(par_cb)); + par_cb.wakeup = epp_wakeup; + par_cb.private = (void *)dev; + par_cb.flags = PARPORT_DEV_EXCL; + for (i = 0; i < NR_PORTS; i++) + if (baycom_device[i] == dev) + break; + + if (i == NR_PORTS) { + pr_err("%s: no device found\n", bc_drvname); + parport_put_port(pp); + return -ENODEV; + } + + bc->pdev = parport_register_dev_model(pp, dev->name, &par_cb, i); parport_put_port(pp); if (!bc->pdev) { printk(KERN_ERR "%s: cannot register parport at 0x%lx\n", bc_drvname, pp->base); @@ -1185,6 +1199,23 @@ MODULE_LICENSE("GPL"); /* --------------------------------------------------------------------- */ +static int baycom_epp_par_probe(struct pardevice *par_dev) +{ + struct device_driver *drv = par_dev->dev.driver; + int len = strlen(drv->name); + + if (strncmp(par_dev->name, drv->name, len)) + return -ENODEV; + + return 0; +} + +static struct parport_driver baycom_epp_par_driver = { + .name = "bce", + .probe = baycom_epp_par_probe, + .devmodel = true, +}; + static void __init baycom_epp_dev_setup(struct net_device *dev) { struct baycom_state *bc = netdev_priv(dev); @@ -1204,10 +1235,15 @@ static void __init baycom_epp_dev_setup(struct net_device *dev) static int __init init_baycomepp(void) { - int i, found = 0; + int i, found = 0, ret; char set_hw = 1; printk(bc_drvinfo); + + ret = parport_register_driver(&baycom_epp_par_driver); + if (ret) + return ret; + /* * register net devices */ @@ -1241,7 +1277,12 @@ static int __init init_baycomepp(void) found++; } - return found ? 0 : -ENXIO; + if (found == 0) { + parport_unregister_driver(&baycom_epp_par_driver); + return -ENXIO; + } + + return 0; } static void __exit cleanup_baycomepp(void) @@ -1260,6 +1301,7 @@ static void __exit cleanup_baycomepp(void) printk(paranoia_str, "cleanup_module"); } } + parport_unregister_driver(&baycom_epp_par_driver); } module_init(init_baycomepp); diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c index 71ddadbf2368..76cc140774a2 100644 --- a/drivers/net/hippi/rrunner.c +++ b/drivers/net/hippi/rrunner.c @@ -1229,10 +1229,8 @@ static int rr_open(struct net_device *dev) /* Set the timer to switch to check for link beat and perhaps switch to an alternate media type. */ - init_timer(&rrpriv->timer); + setup_timer(&rrpriv->timer, rr_timer, (unsigned long)dev); rrpriv->timer.expires = RUN_AT(5*HZ); /* 5 sec. watchdog */ - rrpriv->timer.data = (unsigned long)dev; - rrpriv->timer.function = rr_timer; /* timer handler */ add_timer(&rrpriv->timer); netif_start_queue(dev); diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c index ef688518ad77..115fa3f37a86 100644 --- a/drivers/net/ieee802154/atusb.c +++ b/drivers/net/ieee802154/atusb.c @@ -21,6 +21,9 @@ * * USB initialization is * Copyright (c) 2013 Alexander Aring <alex.aring@gmail.com> + * + * Busware HUL support is + * Copyright (c) 2017 Josef Filzmaier <j.filzmaier@gmx.at> */ #include <linux/kernel.h> @@ -42,9 +45,12 @@ #define ATUSB_ALLOC_DELAY_MS 100 /* delay after failed allocation */ #define ATUSB_TX_TIMEOUT_MS 200 /* on the air timeout */ +struct atusb_chip_data; + struct atusb { struct ieee802154_hw *hw; struct usb_device *usb_dev; + struct atusb_chip_data *data; int shutdown; /* non-zero if shutting down */ int err; /* set by first error */ @@ -65,6 +71,14 @@ struct atusb { unsigned char fw_hw_type; /* Firmware hardware type */ }; +struct atusb_chip_data { + u16 t_channel_switch; + int rssi_base_val; + + int (*set_channel)(struct ieee802154_hw*, u8, u8); + int (*set_txpower)(struct ieee802154_hw*, s32); +}; + /* ----- USB commands without data ----------------------------------------- */ /* To reduce the number of error checks in the code, we record the first error @@ -163,6 +177,18 @@ static int atusb_write_subreg(struct atusb *atusb, uint8_t reg, uint8_t mask, return ret; } +static int atusb_read_subreg(struct atusb *lp, + unsigned int addr, unsigned int mask, + unsigned int shift) +{ + int rc; + + rc = atusb_read_reg(lp, addr); + rc = (rc & mask) >> shift; + + return rc; +} + static int atusb_get_and_clear_error(struct atusb *atusb) { int err = atusb->err; @@ -379,18 +405,6 @@ static int atusb_xmit(struct ieee802154_hw *hw, struct sk_buff *skb) return ret; } -static int atusb_channel(struct ieee802154_hw *hw, u8 page, u8 channel) -{ - struct atusb *atusb = hw->priv; - int ret; - - ret = atusb_write_subreg(atusb, SR_CHANNEL, channel); - if (ret < 0) - return ret; - msleep(1); /* @@@ ugly synchronization */ - return 0; -} - static int atusb_ed(struct ieee802154_hw *hw, u8 *level) { BUG_ON(!level); @@ -475,6 +489,17 @@ static const s32 atusb_powers[ATUSB_MAX_TX_POWERS + 1] = { }; static int +atusb_txpower(struct ieee802154_hw *hw, s32 mbm) +{ + struct atusb *atusb = hw->priv; + + if (atusb->data) + return atusb->data->set_txpower(hw, mbm); + else + return -ENOTSUPP; +} + +static int atusb_set_txpower(struct ieee802154_hw *hw, s32 mbm) { struct atusb *atusb = hw->priv; @@ -488,12 +513,43 @@ atusb_set_txpower(struct ieee802154_hw *hw, s32 mbm) return -EINVAL; } +static int +hulusb_set_txpower(struct ieee802154_hw *hw, s32 mbm) +{ + u32 i; + + for (i = 0; i < hw->phy->supported.tx_powers_size; i++) { + if (hw->phy->supported.tx_powers[i] == mbm) + return atusb_write_subreg(hw->priv, SR_TX_PWR_212, i); + } + + return -EINVAL; +} + #define ATUSB_MAX_ED_LEVELS 0xF static const s32 atusb_ed_levels[ATUSB_MAX_ED_LEVELS + 1] = { -9100, -8900, -8700, -8500, -8300, -8100, -7900, -7700, -7500, -7300, -7100, -6900, -6700, -6500, -6300, -6100, }; +#define AT86RF212_MAX_TX_POWERS 0x1F +static const s32 at86rf212_powers[AT86RF212_MAX_TX_POWERS + 1] = { + 500, 400, 300, 200, 100, 0, -100, -200, -300, -400, -500, -600, -700, + -800, -900, -1000, -1100, -1200, -1300, -1400, -1500, -1600, -1700, + -1800, -1900, -2000, -2100, -2200, -2300, -2400, -2500, -2600, +}; + +#define AT86RF2XX_MAX_ED_LEVELS 0xF +static const s32 at86rf212_ed_levels_100[AT86RF2XX_MAX_ED_LEVELS + 1] = { + -10000, -9800, -9600, -9400, -9200, -9000, -8800, -8600, -8400, -8200, + -8000, -7800, -7600, -7400, -7200, -7000, +}; + +static const s32 at86rf212_ed_levels_98[AT86RF2XX_MAX_ED_LEVELS + 1] = { + -9800, -9600, -9400, -9200, -9000, -8800, -8600, -8400, -8200, -8000, + -7800, -7600, -7400, -7200, -7000, -6800, +}; + static int atusb_set_cca_mode(struct ieee802154_hw *hw, const struct wpan_phy_cca *cca) { @@ -527,6 +583,30 @@ atusb_set_cca_mode(struct ieee802154_hw *hw, const struct wpan_phy_cca *cca) return atusb_write_subreg(atusb, SR_CCA_MODE, val); } +static int hulusb_set_cca_ed_level(struct atusb *lp, int rssi_base_val) +{ + unsigned int cca_ed_thres; + + cca_ed_thres = atusb_read_subreg(lp, SR_CCA_ED_THRES); + + switch (rssi_base_val) { + case -98: + lp->hw->phy->supported.cca_ed_levels = at86rf212_ed_levels_98; + lp->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(at86rf212_ed_levels_98); + lp->hw->phy->cca_ed_level = at86rf212_ed_levels_98[cca_ed_thres]; + break; + case -100: + lp->hw->phy->supported.cca_ed_levels = at86rf212_ed_levels_100; + lp->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(at86rf212_ed_levels_100); + lp->hw->phy->cca_ed_level = at86rf212_ed_levels_100[cca_ed_thres]; + break; + default: + WARN_ON(1); + } + + return 0; +} + static int atusb_set_cca_ed_level(struct ieee802154_hw *hw, s32 mbm) { @@ -541,6 +621,92 @@ atusb_set_cca_ed_level(struct ieee802154_hw *hw, s32 mbm) return -EINVAL; } +static int atusb_channel(struct ieee802154_hw *hw, u8 page, u8 channel) +{ + struct atusb *atusb = hw->priv; + int ret = -ENOTSUPP; + + if (atusb->data) { + ret = atusb->data->set_channel(hw, page, channel); + /* @@@ ugly synchronization */ + msleep(atusb->data->t_channel_switch); + } + + return ret; +} + +static int atusb_set_channel(struct ieee802154_hw *hw, u8 page, u8 channel) +{ + struct atusb *atusb = hw->priv; + int ret; + + ret = atusb_write_subreg(atusb, SR_CHANNEL, channel); + if (ret < 0) + return ret; + return 0; +} + +static int hulusb_set_channel(struct ieee802154_hw *hw, u8 page, u8 channel) +{ + int rc; + int rssi_base_val; + + struct atusb *lp = hw->priv; + + if (channel == 0) + rc = atusb_write_subreg(lp, SR_SUB_MODE, 0); + else + rc = atusb_write_subreg(lp, SR_SUB_MODE, 1); + if (rc < 0) + return rc; + + if (page == 0) { + rc = atusb_write_subreg(lp, SR_BPSK_QPSK, 0); + rssi_base_val = -100; + } else { + rc = atusb_write_subreg(lp, SR_BPSK_QPSK, 1); + rssi_base_val = -98; + } + if (rc < 0) + return rc; + + rc = hulusb_set_cca_ed_level(lp, rssi_base_val); + if (rc < 0) + return rc; + + /* This sets the symbol_duration according frequency on the 212. + * TODO move this handling while set channel and page in cfg802154. + * We can do that, this timings are according 802.15.4 standard. + * If we do that in cfg802154, this is a more generic calculation. + * + * This should also protected from ifs_timer. Means cancel timer and + * init with a new value. For now, this is okay. + */ + if (channel == 0) { + if (page == 0) { + /* SUB:0 and BPSK:0 -> BPSK-20 */ + lp->hw->phy->symbol_duration = 50; + } else { + /* SUB:1 and BPSK:0 -> BPSK-40 */ + lp->hw->phy->symbol_duration = 25; + } + } else { + if (page == 0) + /* SUB:0 and BPSK:1 -> OQPSK-100/200/400 */ + lp->hw->phy->symbol_duration = 40; + else + /* SUB:1 and BPSK:1 -> OQPSK-250/500/1000 */ + lp->hw->phy->symbol_duration = 16; + } + + lp->hw->phy->lifs_period = IEEE802154_LIFS_PERIOD * + lp->hw->phy->symbol_duration; + lp->hw->phy->sifs_period = IEEE802154_SIFS_PERIOD * + lp->hw->phy->symbol_duration; + + return atusb_write_subreg(lp, SR_CHANNEL, channel); +} + static int atusb_set_csma_params(struct ieee802154_hw *hw, u8 min_be, u8 max_be, u8 retries) { @@ -559,6 +725,14 @@ atusb_set_csma_params(struct ieee802154_hw *hw, u8 min_be, u8 max_be, u8 retries } static int +hulusb_set_lbt(struct ieee802154_hw *hw, bool on) +{ + struct atusb *atusb = hw->priv; + + return atusb_write_subreg(atusb, SR_CSMA_LBT_MODE, on); +} + +static int atusb_set_frame_retries(struct ieee802154_hw *hw, s8 retries) { struct atusb *atusb = hw->priv; @@ -593,6 +767,20 @@ atusb_set_promiscuous_mode(struct ieee802154_hw *hw, const bool on) return 0; } +struct atusb_chip_data atusb_chip_data = { + .t_channel_switch = 1, + .rssi_base_val = -91, + .set_txpower = atusb_set_txpower, + .set_channel = atusb_set_channel, +}; + +struct atusb_chip_data hulusb_chip_data = { + .t_channel_switch = 11, + .rssi_base_val = -100, + .set_txpower = hulusb_set_txpower, + .set_channel = hulusb_set_channel, +}; + static const struct ieee802154_ops atusb_ops = { .owner = THIS_MODULE, .xmit_async = atusb_xmit, @@ -601,7 +789,8 @@ static const struct ieee802154_ops atusb_ops = { .start = atusb_start, .stop = atusb_stop, .set_hw_addr_filt = atusb_set_hw_addr_filt, - .set_txpower = atusb_set_txpower, + .set_txpower = atusb_txpower, + .set_lbt = hulusb_set_lbt, .set_cca_mode = atusb_set_cca_mode, .set_cca_ed_level = atusb_set_cca_ed_level, .set_csma_params = atusb_set_csma_params, @@ -614,6 +803,7 @@ static const struct ieee802154_ops atusb_ops = { static int atusb_get_and_show_revision(struct atusb *atusb) { struct usb_device *usb_dev = atusb->usb_dev; + char *hw_name; unsigned char *buffer; int ret; @@ -630,9 +820,31 @@ static int atusb_get_and_show_revision(struct atusb *atusb) atusb->fw_ver_min = buffer[1]; atusb->fw_hw_type = buffer[2]; + switch (atusb->fw_hw_type) { + case ATUSB_HW_TYPE_100813: + case ATUSB_HW_TYPE_101216: + case ATUSB_HW_TYPE_110131: + hw_name = "ATUSB"; + atusb->data = &atusb_chip_data; + break; + case ATUSB_HW_TYPE_RZUSB: + hw_name = "RZUSB"; + atusb->data = &atusb_chip_data; + break; + case ATUSB_HW_TYPE_HULUSB: + hw_name = "HULUSB"; + atusb->data = &hulusb_chip_data; + break; + default: + hw_name = "UNKNOWN"; + atusb->err = -ENOTSUPP; + ret = -ENOTSUPP; + break; + } + dev_info(&usb_dev->dev, - "Firmware: major: %u, minor: %u, hardware type: %u\n", - atusb->fw_ver_maj, atusb->fw_ver_min, atusb->fw_hw_type); + "Firmware: major: %u, minor: %u, hardware type: %s (%d)\n", + atusb->fw_ver_maj, atusb->fw_ver_min, hw_name, atusb->fw_hw_type); } if (atusb->fw_ver_maj == 0 && atusb->fw_ver_min < 2) { dev_info(&usb_dev->dev, @@ -667,11 +879,12 @@ static int atusb_get_and_show_build(struct atusb *atusb) return ret; } -static int atusb_get_and_show_chip(struct atusb *atusb) +static int atusb_get_and_conf_chip(struct atusb *atusb) { struct usb_device *usb_dev = atusb->usb_dev; uint8_t man_id_0, man_id_1, part_num, version_num; const char *chip; + struct ieee802154_hw *hw = atusb->hw; man_id_0 = atusb_read_reg(atusb, RG_MAN_ID_0); man_id_1 = atusb_read_reg(atusb, RG_MAN_ID_1); @@ -681,6 +894,22 @@ static int atusb_get_and_show_chip(struct atusb *atusb) if (atusb->err) return atusb->err; + hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AFILT | + IEEE802154_HW_PROMISCUOUS | IEEE802154_HW_CSMA_PARAMS; + + hw->phy->flags = WPAN_PHY_FLAG_TXPOWER | WPAN_PHY_FLAG_CCA_ED_LEVEL | + WPAN_PHY_FLAG_CCA_MODE; + + hw->phy->supported.cca_modes = BIT(NL802154_CCA_ENERGY) | + BIT(NL802154_CCA_CARRIER) | + BIT(NL802154_CCA_ENERGY_CARRIER); + hw->phy->supported.cca_opts = BIT(NL802154_CCA_OPT_ENERGY_CARRIER_AND) | + BIT(NL802154_CCA_OPT_ENERGY_CARRIER_OR); + + hw->phy->cca.mode = NL802154_CCA_ENERGY; + + hw->phy->current_page = 0; + if ((man_id_1 << 8 | man_id_0) != ATUSB_JEDEC_ATMEL) { dev_err(&usb_dev->dev, "non-Atmel transceiver xxxx%02x%02x\n", @@ -691,9 +920,36 @@ static int atusb_get_and_show_chip(struct atusb *atusb) switch (part_num) { case 2: chip = "AT86RF230"; + atusb->hw->phy->supported.channels[0] = 0x7FFF800; + atusb->hw->phy->current_channel = 11; /* reset default */ + atusb->hw->phy->symbol_duration = 16; + atusb->hw->phy->supported.tx_powers = atusb_powers; + atusb->hw->phy->supported.tx_powers_size = ARRAY_SIZE(atusb_powers); + hw->phy->supported.cca_ed_levels = atusb_ed_levels; + hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(atusb_ed_levels); break; case 3: chip = "AT86RF231"; + atusb->hw->phy->supported.channels[0] = 0x7FFF800; + atusb->hw->phy->current_channel = 11; /* reset default */ + atusb->hw->phy->symbol_duration = 16; + atusb->hw->phy->supported.tx_powers = atusb_powers; + atusb->hw->phy->supported.tx_powers_size = ARRAY_SIZE(atusb_powers); + hw->phy->supported.cca_ed_levels = atusb_ed_levels; + hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(atusb_ed_levels); + break; + case 7: + chip = "AT86RF212"; + atusb->hw->flags |= IEEE802154_HW_LBT; + atusb->hw->phy->supported.channels[0] = 0x00007FF; + atusb->hw->phy->supported.channels[2] = 0x00007FF; + atusb->hw->phy->current_channel = 5; + atusb->hw->phy->symbol_duration = 25; + atusb->hw->phy->supported.lbt = NL802154_SUPPORTED_BOOL_BOTH; + atusb->hw->phy->supported.tx_powers = at86rf212_powers; + atusb->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf212_powers); + atusb->hw->phy->supported.cca_ed_levels = at86rf212_ed_levels_100; + atusb->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(at86rf212_ed_levels_100); break; default: dev_err(&usb_dev->dev, @@ -702,6 +958,9 @@ static int atusb_get_and_show_chip(struct atusb *atusb) goto fail; } + hw->phy->transmit_power = hw->phy->supported.tx_powers[0]; + hw->phy->cca_ed_level = hw->phy->supported.cca_ed_levels[7]; + dev_info(&usb_dev->dev, "ATUSB: %s version %d\n", chip, version_num); return 0; @@ -794,32 +1053,9 @@ static int atusb_probe(struct usb_interface *interface, goto fail; hw->parent = &usb_dev->dev; - hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AFILT | - IEEE802154_HW_PROMISCUOUS | IEEE802154_HW_CSMA_PARAMS; - - hw->phy->flags = WPAN_PHY_FLAG_TXPOWER | WPAN_PHY_FLAG_CCA_ED_LEVEL | - WPAN_PHY_FLAG_CCA_MODE; - - hw->phy->supported.cca_modes = BIT(NL802154_CCA_ENERGY) | - BIT(NL802154_CCA_CARRIER) | BIT(NL802154_CCA_ENERGY_CARRIER); - hw->phy->supported.cca_opts = BIT(NL802154_CCA_OPT_ENERGY_CARRIER_AND) | - BIT(NL802154_CCA_OPT_ENERGY_CARRIER_OR); - - hw->phy->supported.cca_ed_levels = atusb_ed_levels; - hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(atusb_ed_levels); - - hw->phy->cca.mode = NL802154_CCA_ENERGY; - - hw->phy->current_page = 0; - hw->phy->current_channel = 11; /* reset default */ - hw->phy->supported.channels[0] = 0x7FFF800; - hw->phy->supported.tx_powers = atusb_powers; - hw->phy->supported.tx_powers_size = ARRAY_SIZE(atusb_powers); - hw->phy->transmit_power = hw->phy->supported.tx_powers[0]; - hw->phy->cca_ed_level = hw->phy->supported.cca_ed_levels[7]; atusb_command(atusb, ATUSB_RF_RESET, 0); - atusb_get_and_show_chip(atusb); + atusb_get_and_conf_chip(atusb); atusb_get_and_show_revision(atusb); atusb_get_and_show_build(atusb); atusb_set_extended_addr(atusb); @@ -941,5 +1177,6 @@ MODULE_AUTHOR("Alexander Aring <alex.aring@gmail.com>"); MODULE_AUTHOR("Richard Sharpe <realrichardsharpe@gmail.com>"); MODULE_AUTHOR("Stefan Schmidt <stefan@datenfreihafen.org>"); MODULE_AUTHOR("Werner Almesberger <werner@almesberger.net>"); +MODULE_AUTHOR("Josef Filzmaier <j.filzmaier@gmx.at>"); MODULE_DESCRIPTION("ATUSB IEEE 802.15.4 Driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ieee802154/atusb.h b/drivers/net/ieee802154/atusb.h index b22bbaa77590..555d14bf14a3 100644 --- a/drivers/net/ieee802154/atusb.h +++ b/drivers/net/ieee802154/atusb.h @@ -50,6 +50,14 @@ enum atusb_requests { ATUSB_EUI64_READ, }; +enum { + ATUSB_HW_TYPE_100813, /* 2010-08-13 */ + ATUSB_HW_TYPE_101216, /* 2010-12-16 */ + ATUSB_HW_TYPE_110131, /* 2011-01-31, ATmega32U2-based */ + ATUSB_HW_TYPE_RZUSB, /* Atmel Raven USB dongle with at86rf230 */ + ATUSB_HW_TYPE_HULUSB, /* Busware HUL USB dongle with at86rf212 */ +}; + /* * Direction bRequest wValue wIndex wLength * diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index d2aea961e0f4..1ffe77e95d46 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -1231,11 +1231,14 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[], return -EADDRNOTAVAIL; } - if (data && data[IFLA_MACVLAN_FLAGS] && + if (!data) + return 0; + + if (data[IFLA_MACVLAN_FLAGS] && nla_get_u16(data[IFLA_MACVLAN_FLAGS]) & ~MACVLAN_FLAG_NOPROMISC) return -EINVAL; - if (data && data[IFLA_MACVLAN_MODE]) { + if (data[IFLA_MACVLAN_MODE]) { switch (nla_get_u32(data[IFLA_MACVLAN_MODE])) { case MACVLAN_MODE_PRIVATE: case MACVLAN_MODE_VEPA: @@ -1248,7 +1251,7 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[], } } - if (data && data[IFLA_MACVLAN_MACADDR_MODE]) { + if (data[IFLA_MACVLAN_MACADDR_MODE]) { switch (nla_get_u32(data[IFLA_MACVLAN_MACADDR_MODE])) { case MACVLAN_MACADDR_ADD: case MACVLAN_MACADDR_DEL: @@ -1260,7 +1263,7 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[], } } - if (data && data[IFLA_MACVLAN_MACADDR]) { + if (data[IFLA_MACVLAN_MACADDR]) { if (nla_len(data[IFLA_MACVLAN_MACADDR]) != ETH_ALEN) return -EINVAL; @@ -1268,7 +1271,7 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[], return -EADDRNOTAVAIL; } - if (data && data[IFLA_MACVLAN_MACADDR_COUNT]) + if (data[IFLA_MACVLAN_MACADDR_COUNT]) return -EINVAL; return 0; diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c index 9cbe645e3d89..d4670ecdb1ba 100644 --- a/drivers/net/phy/realtek.c +++ b/drivers/net/phy/realtek.c @@ -22,17 +22,29 @@ #define RTL821x_INER 0x12 #define RTL821x_INER_INIT 0x6400 #define RTL821x_INSR 0x13 +#define RTL821x_PAGE_SELECT 0x1f #define RTL8211E_INER_LINK_STATUS 0x400 #define RTL8211F_INER_LINK_STATUS 0x0010 #define RTL8211F_INSR 0x1d -#define RTL8211F_PAGE_SELECT 0x1f #define RTL8211F_TX_DELAY 0x100 +#define RTL8201F_ISR 0x1e +#define RTL8201F_IER 0x13 + MODULE_DESCRIPTION("Realtek PHY driver"); MODULE_AUTHOR("Johnson Leung"); MODULE_LICENSE("GPL"); +static int rtl8201_ack_interrupt(struct phy_device *phydev) +{ + int err; + + err = phy_read(phydev, RTL8201F_ISR); + + return (err < 0) ? err : 0; +} + static int rtl821x_ack_interrupt(struct phy_device *phydev) { int err; @@ -46,14 +58,33 @@ static int rtl8211f_ack_interrupt(struct phy_device *phydev) { int err; - phy_write(phydev, RTL8211F_PAGE_SELECT, 0xa43); + phy_write(phydev, RTL821x_PAGE_SELECT, 0xa43); err = phy_read(phydev, RTL8211F_INSR); /* restore to default page 0 */ - phy_write(phydev, RTL8211F_PAGE_SELECT, 0x0); + phy_write(phydev, RTL821x_PAGE_SELECT, 0x0); return (err < 0) ? err : 0; } +static int rtl8201_config_intr(struct phy_device *phydev) +{ + int err; + + /* switch to page 7 */ + phy_write(phydev, RTL821x_PAGE_SELECT, 0x7); + + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) + err = phy_write(phydev, RTL8201F_IER, + BIT(13) | BIT(12) | BIT(11)); + else + err = phy_write(phydev, RTL8201F_IER, 0); + + /* restore to default page 0 */ + phy_write(phydev, RTL821x_PAGE_SELECT, 0x0); + + return err; +} + static int rtl8211b_config_intr(struct phy_device *phydev) { int err; @@ -102,7 +133,7 @@ static int rtl8211f_config_init(struct phy_device *phydev) if (ret < 0) return ret; - phy_write(phydev, RTL8211F_PAGE_SELECT, 0xd08); + phy_write(phydev, RTL821x_PAGE_SELECT, 0xd08); reg = phy_read(phydev, 0x11); /* enable TX-delay for rgmii-id and rgmii-txid, otherwise disable it */ @@ -114,7 +145,7 @@ static int rtl8211f_config_init(struct phy_device *phydev) phy_write(phydev, 0x11, reg); /* restore to default page 0 */ - phy_write(phydev, RTL8211F_PAGE_SELECT, 0x0); + phy_write(phydev, RTL821x_PAGE_SELECT, 0x0); return 0; } @@ -129,6 +160,18 @@ static struct phy_driver realtek_drvs[] = { .config_aneg = &genphy_config_aneg, .read_status = &genphy_read_status, }, { + .phy_id = 0x001cc816, + .name = "RTL8201F 10/100Mbps Ethernet", + .phy_id_mask = 0x001fffff, + .features = PHY_BASIC_FEATURES, + .flags = PHY_HAS_INTERRUPT, + .config_aneg = &genphy_config_aneg, + .read_status = &genphy_read_status, + .ack_interrupt = &rtl8201_ack_interrupt, + .config_intr = &rtl8201_config_intr, + .suspend = genphy_suspend, + .resume = genphy_resume, + }, { .phy_id = 0x001cc912, .name = "RTL8211B Gigabit Ethernet", .phy_id_mask = 0x001fffff, @@ -181,6 +224,7 @@ static struct phy_driver realtek_drvs[] = { module_phy_driver(realtek_drvs); static struct mdio_device_id __maybe_unused realtek_tbl[] = { + { 0x001cc816, 0x001fffff }, { 0x001cc912, 0x001fffff }, { 0x001cc914, 0x001fffff }, { 0x001cc915, 0x001fffff }, diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c index 436dd78c396a..eb8a18991d8c 100644 --- a/drivers/net/slip/slip.c +++ b/drivers/net/slip/slip.c @@ -763,12 +763,8 @@ static struct slip *sl_alloc(dev_t line) sl->mode = SL_MODE_DEFAULT; #ifdef CONFIG_SLIP_SMART /* initialize timer_list struct */ - init_timer(&sl->keepalive_timer); - sl->keepalive_timer.data = (unsigned long)sl; - sl->keepalive_timer.function = sl_keepalive; - init_timer(&sl->outfill_timer); - sl->outfill_timer.data = (unsigned long)sl; - sl->outfill_timer.function = sl_outfill; + setup_timer(&sl->keepalive_timer, sl_keepalive, (unsigned long)sl); + setup_timer(&sl->outfill_timer, sl_outfill, (unsigned long)sl); #endif slip_devs[i] = dev; return sl; diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c index 1468ddf424cc..a5ef97010eb3 100644 --- a/drivers/net/team/team_mode_loadbalance.c +++ b/drivers/net/team/team_mode_loadbalance.c @@ -137,7 +137,13 @@ static struct team_port *lb_htpm_select_tx_port(struct team *team, struct sk_buff *skb, unsigned char hash) { - return rcu_dereference_bh(LB_HTPM_PORT_BY_HASH(lb_priv, hash)); + struct team_port *port; + + port = rcu_dereference_bh(LB_HTPM_PORT_BY_HASH(lb_priv, hash)); + if (likely(port)) + return port; + /* If no valid port in the table, fall back to simple hash */ + return lb_hash_select_tx_port(team, lb_priv, skb, hash); } struct lb_select_tx_port { diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c index 522d2900cd1d..f4d7362eb325 100644 --- a/drivers/net/usb/asix_common.c +++ b/drivers/net/usb/asix_common.c @@ -245,7 +245,7 @@ struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb, * - We are allowed to put 4 bytes at tail if skb_cloned() * is false (and if we have 4 bytes of tailroom) * - * TCP packets for example are cloned, but skb_header_release() + * TCP packets for example are cloned, but __skb_header_release() * was called in tcp stack, allowing us to use headroom for our needs. */ if (!skb_header_cloned(skb) && diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c index dbc90313f472..aeb62e17d19d 100644 --- a/drivers/net/usb/catc.c +++ b/drivers/net/usb/catc.c @@ -805,9 +805,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id spin_lock_init(&catc->tx_lock); spin_lock_init(&catc->ctrl_lock); - init_timer(&catc->timer); - catc->timer.data = (long) catc; - catc->timer.function = catc_stats_timer; + setup_timer(&catc->timer, catc_stats_timer, (long)catc); catc->ctrl_urb = usb_alloc_urb(0, GFP_KERNEL); catc->tx_urb = usb_alloc_urb(0, GFP_KERNEL); diff --git a/drivers/net/virtio_net. b/drivers/net/virtio_net. new file mode 100644 index 000000000000..e69de29bb2d1 --- /dev/null +++ b/drivers/net/virtio_net. diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 511f8339fa96..dd14a4547932 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -29,6 +29,7 @@ #include <linux/slab.h> #include <linux/cpu.h> #include <linux/average.h> +#include <linux/filter.h> #include <net/route.h> static int napi_weight = NAPI_POLL_WEIGHT; @@ -372,9 +373,20 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi, return skb; } -static bool virtnet_xdp_xmit(struct virtnet_info *vi, - struct receive_queue *rq, - struct xdp_buff *xdp) +static void virtnet_xdp_flush(struct net_device *dev) +{ + struct virtnet_info *vi = netdev_priv(dev); + struct send_queue *sq; + unsigned int qp; + + qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id(); + sq = &vi->sq[qp]; + + virtqueue_kick(sq->vq); +} + +static bool __virtnet_xdp_xmit(struct virtnet_info *vi, + struct xdp_buff *xdp) { struct virtio_net_hdr_mrg_rxbuf *hdr; unsigned int len; @@ -408,10 +420,19 @@ static bool virtnet_xdp_xmit(struct virtnet_info *vi, return false; } - virtqueue_kick(sq->vq); return true; } +static int virtnet_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp) +{ + struct virtnet_info *vi = netdev_priv(dev); + bool sent = __virtnet_xdp_xmit(vi, xdp); + + if (!sent) + return -ENOSPC; + return 0; +} + static unsigned int virtnet_get_headroom(struct virtnet_info *vi) { return vi->xdp_queue_pairs ? VIRTIO_XDP_HEADROOM : 0; @@ -484,7 +505,8 @@ static struct sk_buff *receive_small(struct net_device *dev, struct virtnet_info *vi, struct receive_queue *rq, void *buf, void *ctx, - unsigned int len) + unsigned int len, + bool *xdp_xmit) { struct sk_buff *skb; struct bpf_prog *xdp_prog; @@ -494,7 +516,7 @@ static struct sk_buff *receive_small(struct net_device *dev, unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); struct page *page = virt_to_head_page(buf); - unsigned int delta = 0; + unsigned int delta = 0, err; struct page *xdp_page; len -= vi->hdr_len; @@ -542,8 +564,16 @@ static struct sk_buff *receive_small(struct net_device *dev, delta = orig_data - xdp.data; break; case XDP_TX: - if (unlikely(!virtnet_xdp_xmit(vi, rq, &xdp))) + if (unlikely(!__virtnet_xdp_xmit(vi, &xdp))) trace_xdp_exception(vi->dev, xdp_prog, act); + else + *xdp_xmit = true; + rcu_read_unlock(); + goto xdp_xmit; + case XDP_REDIRECT: + err = xdp_do_redirect(dev, &xdp, xdp_prog); + if (!err) + *xdp_xmit = true; rcu_read_unlock(); goto xdp_xmit; default: @@ -604,7 +634,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, struct receive_queue *rq, void *buf, void *ctx, - unsigned int len) + unsigned int len, + bool *xdp_xmit) { struct virtio_net_hdr_mrg_rxbuf *hdr = buf; u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); @@ -614,6 +645,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, struct bpf_prog *xdp_prog; unsigned int truesize; unsigned int headroom = mergeable_ctx_to_headroom(ctx); + int err; head_skb = NULL; @@ -657,6 +689,9 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, xdp.data_end = xdp.data + (len - vi->hdr_len); act = bpf_prog_run_xdp(xdp_prog, &xdp); + if (act != XDP_PASS) + ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len); + switch (act) { case XDP_PASS: /* recalculate offset to account for any header @@ -672,18 +707,24 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, put_page(page); head_skb = page_to_skb(vi, rq, xdp_page, offset, len, PAGE_SIZE); - ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len); return head_skb; } break; case XDP_TX: - if (unlikely(!virtnet_xdp_xmit(vi, rq, &xdp))) + if (unlikely(!__virtnet_xdp_xmit(vi, &xdp))) trace_xdp_exception(vi->dev, xdp_prog, act); - ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len); + else + *xdp_xmit = true; if (unlikely(xdp_page != page)) goto err_xdp; rcu_read_unlock(); goto xdp_xmit; + case XDP_REDIRECT: + err = xdp_do_redirect(dev, &xdp, xdp_prog); + if (!err) + *xdp_xmit = true; + rcu_read_unlock(); + goto xdp_xmit; default: bpf_warn_invalid_xdp_action(act); case XDP_ABORTED: @@ -691,7 +732,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, case XDP_DROP: if (unlikely(xdp_page != page)) __free_pages(xdp_page, 0); - ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len); goto err_xdp; } } @@ -789,7 +829,7 @@ xdp_xmit: } static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq, - void *buf, unsigned int len, void **ctx) + void *buf, unsigned int len, void **ctx, bool *xdp_xmit) { struct net_device *dev = vi->dev; struct sk_buff *skb; @@ -810,11 +850,11 @@ static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq, } if (vi->mergeable_rx_bufs) - skb = receive_mergeable(dev, vi, rq, buf, ctx, len); + skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit); else if (vi->big_packets) skb = receive_big(dev, vi, rq, buf, len); else - skb = receive_small(dev, vi, rq, buf, ctx, len); + skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit); if (unlikely(!skb)) return 0; @@ -1072,7 +1112,7 @@ static void refill_work(struct work_struct *work) } } -static int virtnet_receive(struct receive_queue *rq, int budget) +static int virtnet_receive(struct receive_queue *rq, int budget, bool *xdp_xmit) { struct virtnet_info *vi = rq->vq->vdev->priv; unsigned int len, received = 0, bytes = 0; @@ -1084,13 +1124,13 @@ static int virtnet_receive(struct receive_queue *rq, int budget) while (received < budget && (buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx))) { - bytes += receive_buf(vi, rq, buf, len, ctx); + bytes += receive_buf(vi, rq, buf, len, ctx, xdp_xmit); received++; } } else { while (received < budget && (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { - bytes += receive_buf(vi, rq, buf, len, NULL); + bytes += receive_buf(vi, rq, buf, len, NULL, xdp_xmit); received++; } } @@ -1162,15 +1202,19 @@ static int virtnet_poll(struct napi_struct *napi, int budget) struct receive_queue *rq = container_of(napi, struct receive_queue, napi); unsigned int received; + bool xdp_xmit = false; virtnet_poll_cleantx(rq); - received = virtnet_receive(rq, budget); + received = virtnet_receive(rq, budget, &xdp_xmit); /* Out of packets? */ if (received < budget) virtqueue_napi_complete(napi, rq->vq, received); + if (xdp_xmit) + xdp_do_flush_map(); + return received; } @@ -2070,6 +2114,8 @@ static const struct net_device_ops virtnet_netdev = { .ndo_poll_controller = virtnet_netpoll, #endif .ndo_xdp = virtnet_xdp, + .ndo_xdp_xmit = virtnet_xdp_xmit, + .ndo_xdp_flush = virtnet_xdp_flush, .ndo_features_check = passthru_features_check, }; diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 9b243e6f3008..cc18b7b11612 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -132,7 +132,6 @@ static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev, skb_orphan(skb); skb_dst_set(skb, dst); - skb_dst_force(skb); /* set pkt_type to avoid skb hitting packet taps twice - * once on Tx and again in Rx processing diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c index a043fb1367bd..64f176496da4 100644 --- a/drivers/net/wan/dscc4.c +++ b/drivers/net/wan/dscc4.c @@ -1127,10 +1127,8 @@ static int dscc4_open(struct net_device *dev) done: netif_start_queue(dev); - init_timer(&dpriv->timer); + setup_timer(&dpriv->timer, dscc4_timer, (unsigned long)dev); dpriv->timer.expires = jiffies + 10*HZ; - dpriv->timer.data = (unsigned long)dev; - dpriv->timer.function = dscc4_timer; add_timer(&dpriv->timer); netif_carrier_on(dev); diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c index a408abc25512..c696d42f4502 100644 --- a/drivers/net/wan/hdlc_cisco.c +++ b/drivers/net/wan/hdlc_cisco.c @@ -293,10 +293,8 @@ static void cisco_start(struct net_device *dev) st->up = st->txseq = st->rxseq = 0; spin_unlock_irqrestore(&st->lock, flags); - init_timer(&st->timer); + setup_timer(&st->timer, cisco_timer, (unsigned long)dev); st->timer.expires = jiffies + HZ; /* First poll after 1 s */ - st->timer.function = cisco_timer; - st->timer.data = (unsigned long)dev; add_timer(&st->timer); } diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c index 78596e42a3f3..425a47ffed25 100644 --- a/drivers/net/wan/hdlc_fr.c +++ b/drivers/net/wan/hdlc_fr.c @@ -1003,11 +1003,10 @@ static void fr_start(struct net_device *dev) state(hdlc)->n391cnt = 0; state(hdlc)->txseq = state(hdlc)->rxseq = 0; - init_timer(&state(hdlc)->timer); + setup_timer(&state(hdlc)->timer, fr_timer, + (unsigned long)dev); /* First poll after 1 s */ state(hdlc)->timer.expires = jiffies + HZ; - state(hdlc)->timer.function = fr_timer; - state(hdlc)->timer.data = (unsigned long)dev; add_timer(&state(hdlc)->timer); } else fr_set_link_state(1, dev); diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c index 0d2e00ece804..c7721c729541 100644 --- a/drivers/net/wan/hdlc_ppp.c +++ b/drivers/net/wan/hdlc_ppp.c @@ -610,9 +610,7 @@ static void ppp_start(struct net_device *dev) for (i = 0; i < IDX_COUNT; i++) { struct proto *proto = &ppp->protos[i]; proto->dev = dev; - init_timer(&proto->timer); - proto->timer.function = ppp_timer; - proto->timer.data = (unsigned long)proto; + setup_timer(&proto->timer, ppp_timer, (unsigned long)proto); proto->state = CLOSED; } ppp->protos[IDX_LCP].pid = PID_LCP; diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c index 4698450c77d1..ae69d65158e6 100644 --- a/drivers/net/wan/lmc/lmc_main.c +++ b/drivers/net/wan/lmc/lmc_main.c @@ -1084,10 +1084,8 @@ static int lmc_open(struct net_device *dev) * Setup a timer for the watchdog on probe, and start it running. * Since lmc_ok == 0, it will be a NOP for now. */ - init_timer (&sc->timer); + setup_timer(&sc->timer, lmc_watchdog, (unsigned long)dev); sc->timer.expires = jiffies + HZ; - sc->timer.data = (unsigned long) dev; - sc->timer.function = lmc_watchdog; add_timer (&sc->timer); lmc_trace(dev, "lmc_open out"); diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c index 236c62538036..0cc48902dbb9 100644 --- a/drivers/net/wan/sdla.c +++ b/drivers/net/wan/sdla.c @@ -1617,10 +1617,8 @@ static void setup_sdla(struct net_device *dev) flp->deassoc = sdla_deassoc; flp->dlci_conf = sdla_dlci_conf; - init_timer(&flp->timer); + setup_timer(&flp->timer, sdla_poll, (unsigned long)dev); flp->timer.expires = 1; - flp->timer.data = (unsigned long) dev; - flp->timer.function = sdla_poll; } static struct net_device *sdla; diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c index e6b2517e6334..4e5cc2b7045a 100644 --- a/drivers/net/wireless/ath/ath6kl/txrx.c +++ b/drivers/net/wireless/ath/ath6kl/txrx.c @@ -1753,9 +1753,7 @@ void aggr_conn_init(struct ath6kl_vif *vif, struct aggr_info *aggr_info, aggr_conn->aggr_sz = AGGR_SZ_DEFAULT; aggr_conn->dev = vif->ndev; - init_timer(&aggr_conn->timer); - aggr_conn->timer.function = aggr_timeout; - aggr_conn->timer.data = (unsigned long) aggr_conn; + setup_timer(&aggr_conn->timer, aggr_timeout, (unsigned long)aggr_conn); aggr_conn->aggr_info = aggr_info; aggr_conn->timer_scheduled = false; diff --git a/drivers/net/wireless/atmel/atmel.c b/drivers/net/wireless/atmel/atmel.c index b68436b23a63..e816d53c2c05 100644 --- a/drivers/net/wireless/atmel/atmel.c +++ b/drivers/net/wireless/atmel/atmel.c @@ -1579,11 +1579,10 @@ struct net_device *init_atmel_card(unsigned short irq, unsigned long port, priv->default_beacon_period = priv->beacon_period = 100; priv->listen_interval = 1; - init_timer(&priv->management_timer); + setup_timer(&priv->management_timer, atmel_management_timer, + (unsigned long)dev); spin_lock_init(&priv->irqlock); spin_lock_init(&priv->timerlock); - priv->management_timer.function = atmel_management_timer; - priv->management_timer.data = (unsigned long) dev; dev->netdev_ops = &atmel_netdev_ops; dev->wireless_handlers = &atmel_handler_def; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index aaed4ab503ad..ab3f22353154 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -3260,9 +3260,8 @@ static void brcmf_init_escan(struct brcmf_cfg80211_info *cfg) brcmf_cfg80211_escan_handler); cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE; /* Init scan_timeout timer */ - init_timer(&cfg->escan_timeout); - cfg->escan_timeout.data = (unsigned long) cfg; - cfg->escan_timeout.function = brcmf_escan_timeout; + setup_timer(&cfg->escan_timeout, brcmf_escan_timeout, + (unsigned long)cfg); INIT_WORK(&cfg->escan_timeout_work, brcmf_cfg80211_escan_timeout_worker); } diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c index 070dfd68bb83..7ad286d6e9a7 100644 --- a/drivers/net/wireless/rsi/rsi_91x_hal.c +++ b/drivers/net/wireless/rsi/rsi_91x_hal.c @@ -411,9 +411,8 @@ static void bl_cmd_timeout(unsigned long priv) static int bl_start_cmd_timer(struct rsi_hw *adapter, u32 timeout) { - init_timer(&adapter->bl_cmd_timer); - adapter->bl_cmd_timer.data = (unsigned long)adapter; - adapter->bl_cmd_timer.function = (void *)&bl_cmd_timeout; + setup_timer(&adapter->bl_cmd_timer, (void *)&bl_cmd_timeout, + (unsigned long)adapter); adapter->bl_cmd_timer.expires = (msecs_to_jiffies(timeout) + jiffies); adapter->blcmd_timer_expired = false; diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index ee8ed9da00ad..dcfcb153918c 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -520,8 +520,7 @@ int xenvif_init_queue(struct xenvif_queue *queue) queue->credit_bytes = queue->remaining_credit = ~0UL; queue->credit_usec = 0UL; - init_timer(&queue->credit_timeout); - queue->credit_timeout.function = xenvif_tx_credit_callback; + setup_timer(&queue->credit_timeout, xenvif_tx_credit_callback, 0UL); queue->credit_window_start = get_jiffies_64(); queue->rx_queue_max = XENVIF_RX_QUEUE_BYTES; diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 59e09854c4f7..91fcadbede80 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -182,6 +182,21 @@ struct qeth_sbp_info { __u32 reflect_promisc_primary:1; }; +struct qeth_vnicc_info { + /* supported/currently configured VNICCs; updated in IPA exchanges */ + u32 sup_chars; + u32 cur_chars; + /* supported commands: bitmasks which VNICCs support respective cmd */ + u32 set_char_sup; + u32 getset_timeout_sup; + /* timeout value for the learning characteristic */ + u32 learning_timeout; + /* characteristics wanted/configured by user */ + u32 wanted_chars; + /* has user explicitly enabled rx_bcast while online? */ + bool rx_bcast_enabled; +}; + static inline int qeth_is_ipa_supported(struct qeth_ipa_info *ipa, enum qeth_ipa_funcs func) { @@ -673,6 +688,7 @@ struct qeth_card_options { struct qeth_routing_info route6; struct qeth_ipa_info ipa6; struct qeth_sbp_info sbp; /* SETBRIDGEPORT options */ + struct qeth_vnicc_info vnicc; /* VNICC options */ int fake_broadcast; int layer2; int performance_stats; @@ -946,13 +962,13 @@ int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int); int qeth_get_elements_no(struct qeth_card *card, struct sk_buff *skb, int extra_elems, int data_offset); int qeth_get_elements_for_frags(struct sk_buff *); -int qeth_do_send_packet_fast(struct qeth_card *card, - struct qeth_qdio_out_q *queue, struct sk_buff *skb, +int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue, struct sk_buff *skb, struct qeth_hdr *hdr, unsigned int offset, unsigned int hd_len); int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, struct sk_buff *skb, struct qeth_hdr *hdr, - unsigned int hd_len, unsigned int offset, int elements); + unsigned int offset, unsigned int hd_len, + int elements_needed); int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); int qeth_core_get_sset_count(struct net_device *, int); void qeth_core_get_ethtool_stats(struct net_device *, diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index bae7440abc01..68e118f1202e 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -4040,35 +4040,23 @@ static int qeth_fill_buffer(struct qeth_qdio_out_q *queue, return flush_cnt; } -int qeth_do_send_packet_fast(struct qeth_card *card, - struct qeth_qdio_out_q *queue, struct sk_buff *skb, +int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue, struct sk_buff *skb, struct qeth_hdr *hdr, unsigned int offset, unsigned int hd_len) { - struct qeth_qdio_out_buffer *buffer; - int index; + int index = queue->next_buf_to_fill; + struct qeth_qdio_out_buffer *buffer = queue->bufs[index]; - /* spin until we get the queue ... */ - while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED, - QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED); - /* ... now we've got the queue */ - index = queue->next_buf_to_fill; - buffer = queue->bufs[queue->next_buf_to_fill]; /* * check if buffer is empty to make sure that we do not 'overtake' * ourselves and try to fill a buffer that is already primed */ if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) - goto out; - queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) % - QDIO_MAX_BUFFERS_PER_Q; - atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); + return -EBUSY; + queue->next_buf_to_fill = (index + 1) % QDIO_MAX_BUFFERS_PER_Q; qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len); qeth_flush_buffers(queue, index, 1); return 0; -out: - atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); - return -EBUSY; } EXPORT_SYMBOL_GPL(qeth_do_send_packet_fast); @@ -4923,7 +4911,6 @@ static void qeth_qdio_establish_cq(struct qeth_card *card, if (card->options.cq == QETH_CQ_ENABLED) { int offset = QDIO_MAX_BUFFERS_PER_Q * (card->qdio.no_in_queues - 1); - i = QDIO_MAX_BUFFERS_PER_Q * (card->qdio.no_in_queues - 1); for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) { in_sbal_ptrs[offset + i] = (struct qdio_buffer *) virt_to_phys(card->qdio.c_q->bufs[i].buffer); diff --git a/drivers/s390/net/qeth_core_mpc.c b/drivers/s390/net/qeth_core_mpc.c index 6dd7d05e5693..5f8a2b834d39 100644 --- a/drivers/s390/net/qeth_core_mpc.c +++ b/drivers/s390/net/qeth_core_mpc.c @@ -167,7 +167,7 @@ static struct ipa_rc_msg qeth_ipa_rc_msg[] = { {IPA_RC_IP_TABLE_FULL, "Add Addr IP Table Full - ipv6"}, {IPA_RC_UNKNOWN_ERROR, "IPA command failed - reason unknown"}, {IPA_RC_UNSUPPORTED_COMMAND, "Command not supported"}, - {IPA_RC_TRACE_ALREADY_ACTIVE, "trace already active"}, + {IPA_RC_VNICC_OOSEQ, "Command issued out of sequence"}, {IPA_RC_INVALID_FORMAT, "invalid format or length"}, {IPA_RC_DUP_IPV6_REMOTE, "ipv6 address already registered remote"}, {IPA_RC_SBP_IQD_NOT_CONFIGURED, "Not configured for bridgeport"}, @@ -193,6 +193,7 @@ static struct ipa_rc_msg qeth_ipa_rc_msg[] = { {IPA_RC_L2_INVALID_VLAN_ID, "L2 invalid vlan id"}, {IPA_RC_L2_DUP_VLAN_ID, "L2 duplicate vlan id"}, {IPA_RC_L2_VLAN_ID_NOT_FOUND, "L2 vlan id not found"}, + {IPA_RC_VNICC_VNICBP, "VNIC is BridgePort"}, {IPA_RC_SBP_OSA_NOT_CONFIGURED, "Not configured for bridgeport"}, {IPA_RC_SBP_OSA_OS_MISMATCH, "OS mismatch"}, {IPA_RC_SBP_OSA_ANO_DEV_PRIMARY, "Primary bridgeport exists already"}, @@ -253,6 +254,7 @@ static struct ipa_cmd_names qeth_ipa_cmd_names[] = { {IPA_CMD_DELGMAC, "delgmac"}, {IPA_CMD_SETVLAN, "setvlan"}, {IPA_CMD_DELVLAN, "delvlan"}, + {IPA_CMD_VNICC, "vnic_characteristics"}, {IPA_CMD_SETBRIDGEPORT_OSA, "set_bridge_port(osa)"}, {IPA_CMD_SETCCID, "setccid"}, {IPA_CMD_DELCCID, "delccid"}, diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h index 912e0107de8f..6a7654df6e78 100644 --- a/drivers/s390/net/qeth_core_mpc.h +++ b/drivers/s390/net/qeth_core_mpc.h @@ -90,6 +90,7 @@ enum qeth_ipa_cmds { IPA_CMD_DELGMAC = 0x24, IPA_CMD_SETVLAN = 0x25, IPA_CMD_DELVLAN = 0x26, + IPA_CMD_VNICC = 0x2a, IPA_CMD_SETBRIDGEPORT_OSA = 0x2b, IPA_CMD_SETCCID = 0x41, IPA_CMD_DELCCID = 0x42, @@ -165,6 +166,8 @@ enum qeth_ipa_return_codes { IPA_RC_L2_INVALID_VLAN_ID = 0x2015, IPA_RC_L2_DUP_VLAN_ID = 0x2016, IPA_RC_L2_VLAN_ID_NOT_FOUND = 0x2017, + IPA_RC_L2_VLAN_ID_NOT_ALLOWED = 0x2050, + IPA_RC_VNICC_VNICBP = 0x20B0, IPA_RC_SBP_OSA_NOT_CONFIGURED = 0x2B0C, IPA_RC_SBP_OSA_OS_MISMATCH = 0x2B10, IPA_RC_SBP_OSA_ANO_DEV_PRIMARY = 0x2B14, @@ -197,6 +200,9 @@ enum qeth_ipa_return_codes { IPA_RC_ENOMEM = 0xfffe, IPA_RC_FFFF = 0xffff }; +/* for VNIC Characteristics */ +#define IPA_RC_VNICC_OOSEQ 0x0005 + /* for SET_DIAGNOSTIC_ASSIST */ #define IPA_RC_INVALID_SUBCMD IPA_RC_IP_TABLE_FULL #define IPA_RC_HARDWARE_AUTH_ERROR IPA_RC_UNKNOWN_ERROR @@ -551,6 +557,71 @@ struct qeth_ipacmd_diagass { __u8 cdata[64]; } __attribute__ ((packed)); +/* VNIC Characteristics IPA Command: *****************************************/ +/* IPA commands/sub commands for VNICC */ +#define IPA_VNICC_QUERY_CHARS 0x00000000L +#define IPA_VNICC_QUERY_CMDS 0x00000001L +#define IPA_VNICC_ENABLE 0x00000002L +#define IPA_VNICC_DISABLE 0x00000004L +#define IPA_VNICC_SET_TIMEOUT 0x00000008L +#define IPA_VNICC_GET_TIMEOUT 0x00000010L + +/* VNICC flags */ +#define QETH_VNICC_FLOODING 0x80000000 +#define QETH_VNICC_MCAST_FLOODING 0x40000000 +#define QETH_VNICC_LEARNING 0x20000000 +#define QETH_VNICC_TAKEOVER_SETVMAC 0x10000000 +#define QETH_VNICC_TAKEOVER_LEARNING 0x08000000 +#define QETH_VNICC_BRIDGE_INVISIBLE 0x04000000 +#define QETH_VNICC_RX_BCAST 0x02000000 + +/* VNICC default values */ +#define QETH_VNICC_ALL 0xff000000 +#define QETH_VNICC_DEFAULT QETH_VNICC_RX_BCAST +/* default VNICC timeout in seconds */ +#define QETH_VNICC_DEFAULT_TIMEOUT 600 + +/* VNICC header */ +struct qeth_ipacmd_vnicc_hdr { + u32 sup; + u32 cur; +}; + +/* VNICC sub command header */ +struct qeth_vnicc_sub_hdr { + u16 data_length; + u16 reserved; + u32 sub_command; +}; + +/* query supported commands for VNIC characteristic */ +struct qeth_vnicc_query_cmds { + u32 vnic_char; + u32 sup_cmds; +}; + +/* enable/disable VNIC characteristic */ +struct qeth_vnicc_set_char { + u32 vnic_char; +}; + +/* get/set timeout for VNIC characteristic */ +struct qeth_vnicc_getset_timeout { + u32 vnic_char; + u32 timeout; +}; + +/* complete VNICC IPA command message */ +struct qeth_ipacmd_vnicc { + struct qeth_ipacmd_vnicc_hdr hdr; + struct qeth_vnicc_sub_hdr sub_hdr; + union { + struct qeth_vnicc_query_cmds query_cmds; + struct qeth_vnicc_set_char set_char; + struct qeth_vnicc_getset_timeout getset_timeout; + }; +}; + /* SETBRIDGEPORT IPA Command: *********************************************/ enum qeth_ipa_sbp_cmd { IPA_SBP_QUERY_COMMANDS_SUPPORTED = 0x00000000L, @@ -692,6 +763,7 @@ struct qeth_ipa_cmd { struct qeth_ipacmd_diagass diagass; struct qeth_ipacmd_setbridgeport sbp; struct qeth_ipacmd_addr_change addrchange; + struct qeth_ipacmd_vnicc vnicc; } data; } __attribute__ ((packed)); diff --git a/drivers/s390/net/qeth_l2.h b/drivers/s390/net/qeth_l2.h index 0d59f9a45ea9..241df6b98ab4 100644 --- a/drivers/s390/net/qeth_l2.h +++ b/drivers/s390/net/qeth_l2.h @@ -14,6 +14,12 @@ int qeth_l2_create_device_attributes(struct device *); void qeth_l2_remove_device_attributes(struct device *); void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card); +int qeth_l2_vnicc_set_state(struct qeth_card *card, u32 vnicc, bool state); +int qeth_l2_vnicc_get_state(struct qeth_card *card, u32 vnicc, bool *state); +int qeth_l2_vnicc_set_timeout(struct qeth_card *card, u32 timeout); +int qeth_l2_vnicc_get_timeout(struct qeth_card *card, u32 *timeout); +bool qeth_l2_vnicc_is_in_use(struct qeth_card *card); + struct qeth_mac { u8 mac_addr[OSA_ADDR_LEN]; u8 is_uc:1; diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 760b023eae95..7b61c2ef4c74 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -33,6 +33,10 @@ static void qeth_bridge_state_change(struct qeth_card *card, struct qeth_ipa_cmd *cmd); static void qeth_bridge_host_event(struct qeth_card *card, struct qeth_ipa_cmd *cmd); +static void qeth_l2_vnicc_set_defaults(struct qeth_card *card); +static void qeth_l2_vnicc_init(struct qeth_card *card); +static bool qeth_l2_vnicc_recover_timeout(struct qeth_card *card, u32 vnicc, + u32 *timeout); static int qeth_l2_verify_dev(struct net_device *dev) { @@ -74,7 +78,7 @@ static struct net_device *qeth_l2_netdev_by_devno(unsigned char *read_dev_no) return ndev; } -static int qeth_setdel_makerc(struct qeth_card *card, int retcode) +static int qeth_setdelmac_makerc(struct qeth_card *card, int retcode) { int rc; @@ -124,8 +128,8 @@ static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac, cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); cmd->data.setdelmac.mac_length = OSA_ADDR_LEN; memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN); - return qeth_setdel_makerc(card, qeth_send_ipa_cmd(card, iob, - NULL, NULL)); + return qeth_setdelmac_makerc(card, qeth_send_ipa_cmd(card, iob, + NULL, NULL)); } static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac) @@ -285,17 +289,40 @@ static void qeth_l2_fill_header(struct qeth_hdr *hdr, struct sk_buff *skb, } } +static int qeth_setdelvlan_makerc(struct qeth_card *card, int retcode) +{ + if (retcode) + QETH_CARD_TEXT_(card, 2, "err%04x", retcode); + + switch (retcode) { + case IPA_RC_SUCCESS: + return 0; + case IPA_RC_L2_INVALID_VLAN_ID: + return -EINVAL; + case IPA_RC_L2_DUP_VLAN_ID: + return -EEXIST; + case IPA_RC_L2_VLAN_ID_NOT_FOUND: + return -ENOENT; + case IPA_RC_L2_VLAN_ID_NOT_ALLOWED: + return -EPERM; + case -ENOMEM: + return -ENOMEM; + default: + return -EIO; + } +} + static int qeth_l2_send_setdelvlan_cb(struct qeth_card *card, - struct qeth_reply *reply, unsigned long data) + struct qeth_reply *reply, + unsigned long data) { - struct qeth_ipa_cmd *cmd; + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; QETH_CARD_TEXT(card, 2, "L2sdvcb"); - cmd = (struct qeth_ipa_cmd *) data; if (cmd->hdr.return_code) { - QETH_DBF_MESSAGE(2, "Error in processing VLAN %i on %s: 0x%x. " - "Continuing\n", cmd->data.setdelvlan.vlan_id, - QETH_CARD_IFNAME(card), cmd->hdr.return_code); + QETH_DBF_MESSAGE(2, "Error in processing VLAN %i on %s: 0x%x.\n", + cmd->data.setdelvlan.vlan_id, + QETH_CARD_IFNAME(card), cmd->hdr.return_code); QETH_CARD_TEXT_(card, 2, "L2VL%4x", cmd->hdr.command); QETH_CARD_TEXT_(card, 2, "err%d", cmd->hdr.return_code); } @@ -303,7 +330,7 @@ static int qeth_l2_send_setdelvlan_cb(struct qeth_card *card, } static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i, - enum qeth_ipa_cmds ipacmd) + enum qeth_ipa_cmds ipacmd) { struct qeth_ipa_cmd *cmd; struct qeth_cmd_buffer *iob; @@ -314,8 +341,8 @@ static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i, return -ENOMEM; cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); cmd->data.setdelvlan.vlan_id = i; - return qeth_send_ipa_cmd(card, iob, - qeth_l2_send_setdelvlan_cb, NULL); + return qeth_setdelvlan_makerc(card, qeth_send_ipa_cmd(card, iob, + qeth_l2_send_setdelvlan_cb, NULL)); } static void qeth_l2_process_vlans(struct qeth_card *card) @@ -694,7 +721,7 @@ static int qeth_l2_xmit_iqd(struct qeth_card *card, struct sk_buff *skb, rc = -E2BIG; goto out; } - rc = qeth_do_send_packet_fast(card, queue, skb, hdr, data_offset, + rc = qeth_do_send_packet_fast(queue, skb, hdr, data_offset, sizeof(*hdr) + data_offset); out: if (rc) @@ -919,6 +946,7 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev) hash_init(card->mac_htable); card->options.layer2 = 1; card->info.hwtrap = 0; + qeth_l2_vnicc_set_defaults(card); return 0; } @@ -1045,9 +1073,14 @@ static int qeth_l2_start_ipassists(struct qeth_card *card) static void qeth_l2_trace_features(struct qeth_card *card) { - QETH_CARD_TEXT(card, 2, "l2featur"); + /* Set BridgePort features */ + QETH_CARD_TEXT(card, 2, "featuSBP"); QETH_CARD_HEX(card, 2, &card->options.sbp.supported_funcs, sizeof(card->options.sbp.supported_funcs)); + /* VNIC Characteristics features */ + QETH_CARD_TEXT(card, 2, "feaVNICC"); + QETH_CARD_HEX(card, 2, &card->options.vnicc.sup_chars, + sizeof(card->options.vnicc.sup_chars)); } static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) @@ -1072,8 +1105,6 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) if (card->options.sbp.supported_funcs) dev_info(&card->gdev->dev, "The device represents a Bridge Capable Port\n"); - qeth_trace_features(card); - qeth_l2_trace_features(card); if (!card->dev && qeth_l2_setup_netdev(card)) { rc = -ENODEV; @@ -1090,6 +1121,12 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) } else card->info.hwtrap = 0; + /* for the rx_bcast characteristic, init VNICC after setmac */ + qeth_l2_vnicc_init(card); + + qeth_trace_features(card); + qeth_l2_trace_features(card); + qeth_l2_setup_bridgeport_attrs(card); card->state = CARD_STATE_HARDSETUP; @@ -2039,6 +2076,454 @@ int qeth_bridgeport_an_set(struct qeth_card *card, int enable) } EXPORT_SYMBOL_GPL(qeth_bridgeport_an_set); +static bool qeth_bridgeport_is_in_use(struct qeth_card *card) +{ + return (card->options.sbp.role || card->options.sbp.reflect_promisc || + card->options.sbp.hostnotification); +} + +/* VNIC Characteristics support */ + +/* handle VNICC IPA command return codes; convert to error codes */ +static int qeth_l2_vnicc_makerc(struct qeth_card *card, int ipa_rc) +{ + int rc; + + switch (ipa_rc) { + case IPA_RC_SUCCESS: + return ipa_rc; + case IPA_RC_L2_UNSUPPORTED_CMD: + case IPA_RC_NOTSUPP: + rc = -EOPNOTSUPP; + break; + case IPA_RC_VNICC_OOSEQ: + rc = -EALREADY; + break; + case IPA_RC_VNICC_VNICBP: + rc = -EBUSY; + break; + case IPA_RC_L2_ADDR_TABLE_FULL: + rc = -ENOSPC; + break; + case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP: + rc = -EACCES; + break; + default: + rc = -EIO; + } + + QETH_CARD_TEXT_(card, 2, "err%04x", ipa_rc); + return rc; +} + +/* generic VNICC request call back control */ +struct _qeth_l2_vnicc_request_cbctl { + u32 sub_cmd; + struct { + u32 vnic_char; + u32 timeout; + } param; + struct { + union{ + u32 *sup_cmds; + u32 *timeout; + }; + } result; +}; + +/* generic VNICC request call back */ +static int qeth_l2_vnicc_request_cb(struct qeth_card *card, + struct qeth_reply *reply, + unsigned long data) +{ + struct _qeth_l2_vnicc_request_cbctl *cbctl = + (struct _qeth_l2_vnicc_request_cbctl *) reply->param; + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + struct qeth_ipacmd_vnicc *rep = &cmd->data.vnicc; + + QETH_CARD_TEXT(card, 2, "vniccrcb"); + if (cmd->hdr.return_code) + return 0; + /* return results to caller */ + card->options.vnicc.sup_chars = rep->hdr.sup; + card->options.vnicc.cur_chars = rep->hdr.cur; + + if (cbctl->sub_cmd == IPA_VNICC_QUERY_CMDS) + *cbctl->result.sup_cmds = rep->query_cmds.sup_cmds; + + if (cbctl->sub_cmd == IPA_VNICC_GET_TIMEOUT) + *cbctl->result.timeout = rep->getset_timeout.timeout; + + return 0; +} + +/* generic VNICC request */ +static int qeth_l2_vnicc_request(struct qeth_card *card, + struct _qeth_l2_vnicc_request_cbctl *cbctl) +{ + struct qeth_ipacmd_vnicc *req; + struct qeth_cmd_buffer *iob; + struct qeth_ipa_cmd *cmd; + int rc; + + QETH_CARD_TEXT(card, 2, "vniccreq"); + + /* get new buffer for request */ + iob = qeth_get_ipacmd_buffer(card, IPA_CMD_VNICC, 0); + if (!iob) + return -ENOMEM; + + /* create header for request */ + cmd = (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE); + req = &cmd->data.vnicc; + + /* create sub command header for request */ + req->sub_hdr.data_length = sizeof(req->sub_hdr); + req->sub_hdr.sub_command = cbctl->sub_cmd; + + /* create sub command specific request fields */ + switch (cbctl->sub_cmd) { + case IPA_VNICC_QUERY_CHARS: + break; + case IPA_VNICC_QUERY_CMDS: + req->sub_hdr.data_length += sizeof(req->query_cmds); + req->query_cmds.vnic_char = cbctl->param.vnic_char; + break; + case IPA_VNICC_ENABLE: + case IPA_VNICC_DISABLE: + req->sub_hdr.data_length += sizeof(req->set_char); + req->set_char.vnic_char = cbctl->param.vnic_char; + break; + case IPA_VNICC_SET_TIMEOUT: + req->getset_timeout.timeout = cbctl->param.timeout; + /* fallthrough */ + case IPA_VNICC_GET_TIMEOUT: + req->sub_hdr.data_length += sizeof(req->getset_timeout); + req->getset_timeout.vnic_char = cbctl->param.vnic_char; + break; + default: + qeth_release_buffer(iob->channel, iob); + return -EOPNOTSUPP; + } + + /* send request */ + rc = qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, + (void *) cbctl); + + return qeth_l2_vnicc_makerc(card, rc); +} + +/* VNICC query VNIC characteristics request */ +static int qeth_l2_vnicc_query_chars(struct qeth_card *card) +{ + struct _qeth_l2_vnicc_request_cbctl cbctl; + + /* prepare callback control */ + cbctl.sub_cmd = IPA_VNICC_QUERY_CHARS; + + QETH_CARD_TEXT(card, 2, "vniccqch"); + return qeth_l2_vnicc_request(card, &cbctl); +} + +/* VNICC query sub commands request */ +static int qeth_l2_vnicc_query_cmds(struct qeth_card *card, u32 vnic_char, + u32 *sup_cmds) +{ + struct _qeth_l2_vnicc_request_cbctl cbctl; + + /* prepare callback control */ + cbctl.sub_cmd = IPA_VNICC_QUERY_CMDS; + cbctl.param.vnic_char = vnic_char; + cbctl.result.sup_cmds = sup_cmds; + + QETH_CARD_TEXT(card, 2, "vniccqcm"); + return qeth_l2_vnicc_request(card, &cbctl); +} + +/* VNICC enable/disable characteristic request */ +static int qeth_l2_vnicc_set_char(struct qeth_card *card, u32 vnic_char, + u32 cmd) +{ + struct _qeth_l2_vnicc_request_cbctl cbctl; + + /* prepare callback control */ + cbctl.sub_cmd = cmd; + cbctl.param.vnic_char = vnic_char; + + QETH_CARD_TEXT(card, 2, "vniccedc"); + return qeth_l2_vnicc_request(card, &cbctl); +} + +/* VNICC get/set timeout for characteristic request */ +static int qeth_l2_vnicc_getset_timeout(struct qeth_card *card, u32 vnicc, + u32 cmd, u32 *timeout) +{ + struct _qeth_l2_vnicc_request_cbctl cbctl; + + /* prepare callback control */ + cbctl.sub_cmd = cmd; + cbctl.param.vnic_char = vnicc; + if (cmd == IPA_VNICC_SET_TIMEOUT) + cbctl.param.timeout = *timeout; + if (cmd == IPA_VNICC_GET_TIMEOUT) + cbctl.result.timeout = timeout; + + QETH_CARD_TEXT(card, 2, "vniccgst"); + return qeth_l2_vnicc_request(card, &cbctl); +} + +/* set current VNICC flag state; called from sysfs store function */ +int qeth_l2_vnicc_set_state(struct qeth_card *card, u32 vnicc, bool state) +{ + int rc = 0; + u32 cmd; + + QETH_CARD_TEXT(card, 2, "vniccsch"); + + /* do not change anything if BridgePort is enabled */ + if (qeth_bridgeport_is_in_use(card)) + return -EBUSY; + + /* check if characteristic and enable/disable are supported */ + if (!(card->options.vnicc.sup_chars & vnicc) || + !(card->options.vnicc.set_char_sup & vnicc)) + return -EOPNOTSUPP; + + /* set enable/disable command and store wanted characteristic */ + if (state) { + cmd = IPA_VNICC_ENABLE; + card->options.vnicc.wanted_chars |= vnicc; + } else { + cmd = IPA_VNICC_DISABLE; + card->options.vnicc.wanted_chars &= ~vnicc; + } + + /* do we need to do anything? */ + if (card->options.vnicc.cur_chars == card->options.vnicc.wanted_chars) + return rc; + + /* if card is not ready, simply stop here */ + if (!qeth_card_hw_is_reachable(card)) { + if (state) + card->options.vnicc.cur_chars |= vnicc; + else + card->options.vnicc.cur_chars &= ~vnicc; + return rc; + } + + rc = qeth_l2_vnicc_set_char(card, vnicc, cmd); + if (rc) + card->options.vnicc.wanted_chars = + card->options.vnicc.cur_chars; + else { + /* successful online VNICC change; handle special cases */ + if (state && vnicc == QETH_VNICC_RX_BCAST) + card->options.vnicc.rx_bcast_enabled = true; + if (!state && vnicc == QETH_VNICC_LEARNING) + qeth_l2_vnicc_recover_timeout(card, vnicc, + &card->options.vnicc.learning_timeout); + } + + return rc; +} + +/* get current VNICC flag state; called from sysfs show function */ +int qeth_l2_vnicc_get_state(struct qeth_card *card, u32 vnicc, bool *state) +{ + int rc = 0; + + QETH_CARD_TEXT(card, 2, "vniccgch"); + + /* do not get anything if BridgePort is enabled */ + if (qeth_bridgeport_is_in_use(card)) + return -EBUSY; + + /* check if characteristic is supported */ + if (!(card->options.vnicc.sup_chars & vnicc)) + return -EOPNOTSUPP; + + /* if card is ready, query current VNICC state */ + if (qeth_card_hw_is_reachable(card)) + rc = qeth_l2_vnicc_query_chars(card); + + *state = (card->options.vnicc.cur_chars & vnicc) ? true : false; + return rc; +} + +/* set VNICC timeout; called from sysfs store function. Currently, only learning + * supports timeout + */ +int qeth_l2_vnicc_set_timeout(struct qeth_card *card, u32 timeout) +{ + int rc = 0; + + QETH_CARD_TEXT(card, 2, "vniccsto"); + + /* do not change anything if BridgePort is enabled */ + if (qeth_bridgeport_is_in_use(card)) + return -EBUSY; + + /* check if characteristic and set_timeout are supported */ + if (!(card->options.vnicc.sup_chars & QETH_VNICC_LEARNING) || + !(card->options.vnicc.getset_timeout_sup & QETH_VNICC_LEARNING)) + return -EOPNOTSUPP; + + /* do we need to do anything? */ + if (card->options.vnicc.learning_timeout == timeout) + return rc; + + /* if card is not ready, simply store the value internally and return */ + if (!qeth_card_hw_is_reachable(card)) { + card->options.vnicc.learning_timeout = timeout; + return rc; + } + + /* send timeout value to card; if successful, store value internally */ + rc = qeth_l2_vnicc_getset_timeout(card, QETH_VNICC_LEARNING, + IPA_VNICC_SET_TIMEOUT, &timeout); + if (!rc) + card->options.vnicc.learning_timeout = timeout; + + return rc; +} + +/* get current VNICC timeout; called from sysfs show function. Currently, only + * learning supports timeout + */ +int qeth_l2_vnicc_get_timeout(struct qeth_card *card, u32 *timeout) +{ + int rc = 0; + + QETH_CARD_TEXT(card, 2, "vniccgto"); + + /* do not get anything if BridgePort is enabled */ + if (qeth_bridgeport_is_in_use(card)) + return -EBUSY; + + /* check if characteristic and get_timeout are supported */ + if (!(card->options.vnicc.sup_chars & QETH_VNICC_LEARNING) || + !(card->options.vnicc.getset_timeout_sup & QETH_VNICC_LEARNING)) + return -EOPNOTSUPP; + /* if card is ready, get timeout. Otherwise, just return stored value */ + *timeout = card->options.vnicc.learning_timeout; + if (qeth_card_hw_is_reachable(card)) + rc = qeth_l2_vnicc_getset_timeout(card, QETH_VNICC_LEARNING, + IPA_VNICC_GET_TIMEOUT, + timeout); + + return rc; +} + +/* check if VNICC is currently enabled */ +bool qeth_l2_vnicc_is_in_use(struct qeth_card *card) +{ + /* if everything is turned off, VNICC is not active */ + if (!card->options.vnicc.cur_chars) + return false; + /* default values are only OK if rx_bcast was not enabled by user + * or the card is offline. + */ + if (card->options.vnicc.cur_chars == QETH_VNICC_DEFAULT) { + if (!card->options.vnicc.rx_bcast_enabled || + !qeth_card_hw_is_reachable(card)) + return false; + } + return true; +} + +/* recover user timeout setting */ +static bool qeth_l2_vnicc_recover_timeout(struct qeth_card *card, u32 vnicc, + u32 *timeout) +{ + if (card->options.vnicc.sup_chars & vnicc && + card->options.vnicc.getset_timeout_sup & vnicc && + !qeth_l2_vnicc_getset_timeout(card, vnicc, IPA_VNICC_SET_TIMEOUT, + timeout)) + return false; + *timeout = QETH_VNICC_DEFAULT_TIMEOUT; + return true; +} + +/* recover user characteristic setting */ +static bool qeth_l2_vnicc_recover_char(struct qeth_card *card, u32 vnicc, + bool enable) +{ + u32 cmd = enable ? IPA_VNICC_ENABLE : IPA_VNICC_DISABLE; + + if (card->options.vnicc.sup_chars & vnicc && + card->options.vnicc.set_char_sup & vnicc && + !qeth_l2_vnicc_set_char(card, vnicc, cmd)) + return false; + card->options.vnicc.wanted_chars &= ~vnicc; + card->options.vnicc.wanted_chars |= QETH_VNICC_DEFAULT & vnicc; + return true; +} + +/* (re-)initialize VNICC */ +static void qeth_l2_vnicc_init(struct qeth_card *card) +{ + u32 *timeout = &card->options.vnicc.learning_timeout; + unsigned int chars_len, i; + unsigned long chars_tmp; + u32 sup_cmds, vnicc; + bool enable, error; + + QETH_CARD_TEXT(card, 2, "vniccini"); + /* reset rx_bcast */ + card->options.vnicc.rx_bcast_enabled = 0; + /* initial query and storage of VNIC characteristics */ + if (qeth_l2_vnicc_query_chars(card)) { + if (card->options.vnicc.wanted_chars != QETH_VNICC_DEFAULT || + *timeout != QETH_VNICC_DEFAULT_TIMEOUT) + dev_err(&card->gdev->dev, "Configuring the VNIC characteristics failed\n"); + /* fail quietly if user didn't change the default config */ + card->options.vnicc.sup_chars = 0; + card->options.vnicc.cur_chars = 0; + card->options.vnicc.wanted_chars = QETH_VNICC_DEFAULT; + return; + } + /* get supported commands for each supported characteristic */ + chars_tmp = card->options.vnicc.sup_chars; + chars_len = sizeof(card->options.vnicc.sup_chars) * BITS_PER_BYTE; + for_each_set_bit(i, &chars_tmp, chars_len) { + vnicc = BIT(i); + qeth_l2_vnicc_query_cmds(card, vnicc, &sup_cmds); + if (!(sup_cmds & IPA_VNICC_SET_TIMEOUT) || + !(sup_cmds & IPA_VNICC_GET_TIMEOUT)) + card->options.vnicc.getset_timeout_sup &= ~vnicc; + if (!(sup_cmds & IPA_VNICC_ENABLE) || + !(sup_cmds & IPA_VNICC_DISABLE)) + card->options.vnicc.set_char_sup &= ~vnicc; + } + /* enforce assumed default values and recover settings, if changed */ + error = qeth_l2_vnicc_recover_timeout(card, QETH_VNICC_LEARNING, + timeout); + chars_tmp = card->options.vnicc.wanted_chars ^ QETH_VNICC_DEFAULT; + chars_tmp |= QETH_VNICC_BRIDGE_INVISIBLE; + chars_len = sizeof(card->options.vnicc.wanted_chars) * BITS_PER_BYTE; + for_each_set_bit(i, &chars_tmp, chars_len) { + vnicc = BIT(i); + enable = card->options.vnicc.wanted_chars & vnicc; + error |= qeth_l2_vnicc_recover_char(card, vnicc, enable); + } + if (error) + dev_err(&card->gdev->dev, "Configuring the VNIC characteristics failed\n"); +} + +/* configure default values of VNIC characteristics */ +static void qeth_l2_vnicc_set_defaults(struct qeth_card *card) +{ + /* characteristics values */ + card->options.vnicc.sup_chars = QETH_VNICC_ALL; + card->options.vnicc.cur_chars = QETH_VNICC_DEFAULT; + card->options.vnicc.learning_timeout = QETH_VNICC_DEFAULT_TIMEOUT; + /* supported commands */ + card->options.vnicc.set_char_sup = QETH_VNICC_ALL; + card->options.vnicc.getset_timeout_sup = QETH_VNICC_LEARNING; + /* settings wanted by users */ + card->options.vnicc.wanted_chars = QETH_VNICC_DEFAULT; +} + module_init(qeth_l2_init); module_exit(qeth_l2_exit); MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>"); diff --git a/drivers/s390/net/qeth_l2_sys.c b/drivers/s390/net/qeth_l2_sys.c index 9696baa49e2d..4608daedb204 100644 --- a/drivers/s390/net/qeth_l2_sys.c +++ b/drivers/s390/net/qeth_l2_sys.c @@ -20,6 +20,9 @@ static ssize_t qeth_bridge_port_role_state_show(struct device *dev, if (!card) return -EINVAL; + if (qeth_l2_vnicc_is_in_use(card)) + return sprintf(buf, "n/a (VNIC characteristics)\n"); + if (qeth_card_hw_is_reachable(card) && card->options.sbp.supported_funcs) rc = qeth_bridgeport_query_ports(card, @@ -60,6 +63,11 @@ static ssize_t qeth_bridge_port_role_state_show(struct device *dev, static ssize_t qeth_bridge_port_role_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct qeth_card *card = dev_get_drvdata(dev); + + if (qeth_l2_vnicc_is_in_use(card)) + return sprintf(buf, "n/a (VNIC characteristics)\n"); + return qeth_bridge_port_role_state_show(dev, attr, buf, 0); } @@ -83,7 +91,10 @@ static ssize_t qeth_bridge_port_role_store(struct device *dev, mutex_lock(&card->conf_mutex); - if (card->options.sbp.reflect_promisc) /* Forbid direct manipulation */ + if (qeth_l2_vnicc_is_in_use(card)) + rc = -EBUSY; + else if (card->options.sbp.reflect_promisc) + /* Forbid direct manipulation */ rc = -EPERM; else if (qeth_card_hw_is_reachable(card)) { rc = qeth_bridgeport_setrole(card, role); @@ -103,6 +114,11 @@ static DEVICE_ATTR(bridge_role, 0644, qeth_bridge_port_role_show, static ssize_t qeth_bridge_port_state_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct qeth_card *card = dev_get_drvdata(dev); + + if (qeth_l2_vnicc_is_in_use(card)) + return sprintf(buf, "n/a (VNIC characteristics)\n"); + return qeth_bridge_port_role_state_show(dev, attr, buf, 1); } @@ -118,6 +134,9 @@ static ssize_t qeth_bridgeport_hostnotification_show(struct device *dev, if (!card) return -EINVAL; + if (qeth_l2_vnicc_is_in_use(card)) + return sprintf(buf, "n/a (VNIC characteristics)\n"); + enabled = card->options.sbp.hostnotification; return sprintf(buf, "%d\n", enabled); @@ -142,7 +161,9 @@ static ssize_t qeth_bridgeport_hostnotification_store(struct device *dev, mutex_lock(&card->conf_mutex); - if (qeth_card_hw_is_reachable(card)) { + if (qeth_l2_vnicc_is_in_use(card)) + rc = -EBUSY; + else if (qeth_card_hw_is_reachable(card)) { rc = qeth_bridgeport_an_set(card, enable); if (!rc) card->options.sbp.hostnotification = enable; @@ -167,6 +188,9 @@ static ssize_t qeth_bridgeport_reflect_show(struct device *dev, if (!card) return -EINVAL; + if (qeth_l2_vnicc_is_in_use(card)) + return sprintf(buf, "n/a (VNIC characteristics)\n"); + if (card->options.sbp.reflect_promisc) { if (card->options.sbp.reflect_promisc_primary) state = "primary"; @@ -202,7 +226,9 @@ static ssize_t qeth_bridgeport_reflect_store(struct device *dev, mutex_lock(&card->conf_mutex); - if (card->options.sbp.role != QETH_SBP_ROLE_NONE) + if (qeth_l2_vnicc_is_in_use(card)) + rc = -EBUSY; + else if (card->options.sbp.role != QETH_SBP_ROLE_NONE) rc = -EPERM; else { card->options.sbp.reflect_promisc = enable; @@ -231,16 +257,6 @@ static struct attribute_group qeth_l2_bridgeport_attr_group = { .attrs = qeth_l2_bridgeport_attrs, }; -int qeth_l2_create_device_attributes(struct device *dev) -{ - return sysfs_create_group(&dev->kobj, &qeth_l2_bridgeport_attr_group); -} - -void qeth_l2_remove_device_attributes(struct device *dev) -{ - sysfs_remove_group(&dev->kobj, &qeth_l2_bridgeport_attr_group); -} - /** * qeth_l2_setup_bridgeport_attrs() - set/restore attrs when turning online. * @card: qeth_card structure pointer @@ -270,10 +286,168 @@ void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card) qeth_bridgeport_an_set(card, 0); } +/* VNIC CHARS support */ + +/* convert sysfs attr name to VNIC characteristic */ +static u32 qeth_l2_vnicc_sysfs_attr_to_char(const char *attr_name) +{ + if (sysfs_streq(attr_name, "flooding")) + return QETH_VNICC_FLOODING; + else if (sysfs_streq(attr_name, "mcast_flooding")) + return QETH_VNICC_MCAST_FLOODING; + else if (sysfs_streq(attr_name, "learning")) + return QETH_VNICC_LEARNING; + else if (sysfs_streq(attr_name, "takeover_setvmac")) + return QETH_VNICC_TAKEOVER_SETVMAC; + else if (sysfs_streq(attr_name, "takeover_learning")) + return QETH_VNICC_TAKEOVER_LEARNING; + else if (sysfs_streq(attr_name, "bridge_invisible")) + return QETH_VNICC_BRIDGE_INVISIBLE; + else if (sysfs_streq(attr_name, "rx_bcast")) + return QETH_VNICC_RX_BCAST; + + return 0; +} + +/* get current timeout setting */ +static ssize_t qeth_vnicc_timeout_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + u32 timeout; + int rc; + + if (!card) + return -EINVAL; + + rc = qeth_l2_vnicc_get_timeout(card, &timeout); + if (rc == -EBUSY) + return sprintf(buf, "n/a (BridgePort)\n"); + if (rc == -EOPNOTSUPP) + return sprintf(buf, "n/a\n"); + return rc ? rc : sprintf(buf, "%d\n", timeout); +} + +/* change timeout setting */ +static ssize_t qeth_vnicc_timeout_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + u32 timeout; + int rc; + + if (!card) + return -EINVAL; + + rc = kstrtou32(buf, 10, &timeout); + if (rc) + return rc; + + mutex_lock(&card->conf_mutex); + rc = qeth_l2_vnicc_set_timeout(card, timeout); + mutex_unlock(&card->conf_mutex); + return rc ? rc : count; +} + +/* get current setting of characteristic */ +static ssize_t qeth_vnicc_char_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + bool state; + u32 vnicc; + int rc; + + if (!card) + return -EINVAL; + + vnicc = qeth_l2_vnicc_sysfs_attr_to_char(attr->attr.name); + rc = qeth_l2_vnicc_get_state(card, vnicc, &state); + + if (rc == -EBUSY) + return sprintf(buf, "n/a (BridgePort)\n"); + if (rc == -EOPNOTSUPP) + return sprintf(buf, "n/a\n"); + return rc ? rc : sprintf(buf, "%d\n", state); +} + +/* change setting of characteristic */ +static ssize_t qeth_vnicc_char_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + bool state; + u32 vnicc; + int rc; + + if (!card) + return -EINVAL; + + if (kstrtobool(buf, &state)) + return -EINVAL; + + vnicc = qeth_l2_vnicc_sysfs_attr_to_char(attr->attr.name); + mutex_lock(&card->conf_mutex); + rc = qeth_l2_vnicc_set_state(card, vnicc, state); + mutex_unlock(&card->conf_mutex); + + return rc ? rc : count; +} + +static DEVICE_ATTR(flooding, 0644, qeth_vnicc_char_show, qeth_vnicc_char_store); +static DEVICE_ATTR(mcast_flooding, 0644, qeth_vnicc_char_show, + qeth_vnicc_char_store); +static DEVICE_ATTR(learning, 0644, qeth_vnicc_char_show, qeth_vnicc_char_store); +static DEVICE_ATTR(learning_timeout, 0644, qeth_vnicc_timeout_show, + qeth_vnicc_timeout_store); +static DEVICE_ATTR(takeover_setvmac, 0644, qeth_vnicc_char_show, + qeth_vnicc_char_store); +static DEVICE_ATTR(takeover_learning, 0644, qeth_vnicc_char_show, + qeth_vnicc_char_store); +static DEVICE_ATTR(bridge_invisible, 0644, qeth_vnicc_char_show, + qeth_vnicc_char_store); +static DEVICE_ATTR(rx_bcast, 0644, qeth_vnicc_char_show, qeth_vnicc_char_store); + +static struct attribute *qeth_l2_vnicc_attrs[] = { + &dev_attr_flooding.attr, + &dev_attr_mcast_flooding.attr, + &dev_attr_learning.attr, + &dev_attr_learning_timeout.attr, + &dev_attr_takeover_setvmac.attr, + &dev_attr_takeover_learning.attr, + &dev_attr_bridge_invisible.attr, + &dev_attr_rx_bcast.attr, + NULL, +}; + +static struct attribute_group qeth_l2_vnicc_attr_group = { + .attrs = qeth_l2_vnicc_attrs, + .name = "vnicc", +}; + +static const struct attribute_group *qeth_l2_only_attr_groups[] = { + &qeth_l2_bridgeport_attr_group, + &qeth_l2_vnicc_attr_group, + NULL, +}; + +int qeth_l2_create_device_attributes(struct device *dev) +{ + return sysfs_create_groups(&dev->kobj, qeth_l2_only_attr_groups); +} + +void qeth_l2_remove_device_attributes(struct device *dev) +{ + sysfs_remove_groups(&dev->kobj, qeth_l2_only_attr_groups); +} + const struct attribute_group *qeth_l2_attr_groups[] = { &qeth_device_attr_group, &qeth_device_blkt_group, - /* l2 specific, see l2_{create,remove}_device_attributes(): */ + /* l2 specific, see qeth_l2_only_attr_groups: */ &qeth_l2_bridgeport_attr_group, + &qeth_l2_vnicc_attr_group, NULL, }; diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index ab661a431f7c..270ac9515f6b 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -1646,13 +1646,12 @@ static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev, return 0; } -static int qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, - struct qeth_hdr *hdr, unsigned short *vlan_id) +static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, + struct qeth_hdr *hdr) { __u16 prot; struct iphdr *ip_hdr; unsigned char tg_addr[MAX_ADDR_LEN]; - int is_vlan = 0; if (!(hdr->hdr.l3.flags & QETH_HDR_PASSTHRU)) { prot = (hdr->hdr.l3.flags & QETH_HDR_IPV6) ? ETH_P_IPV6 : @@ -1706,11 +1705,14 @@ static int qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, skb->protocol = eth_type_trans(skb, card->dev); - if (hdr->hdr.l3.ext_flags & - (QETH_HDR_EXT_VLAN_FRAME | QETH_HDR_EXT_INCLUDE_VLAN_TAG)) { - *vlan_id = (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_VLAN_FRAME) ? - hdr->hdr.l3.vlan_id : *((u16 *)&hdr->hdr.l3.dest_addr[12]); - is_vlan = 1; + /* copy VLAN tag from hdr into skb */ + if (!card->options.sniffer && + (hdr->hdr.l3.ext_flags & (QETH_HDR_EXT_VLAN_FRAME | + QETH_HDR_EXT_INCLUDE_VLAN_TAG))) { + u16 tag = (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_VLAN_FRAME) ? + hdr->hdr.l3.vlan_id : + *((u16 *)&hdr->hdr.l3.dest_addr[12]); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag); } if (card->dev->features & NETIF_F_RXCSUM) { @@ -1724,7 +1726,6 @@ static int qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, skb->ip_summed = CHECKSUM_NONE; } else skb->ip_summed = CHECKSUM_NONE; - return is_vlan; } static int qeth_l3_process_inbound_buffer(struct qeth_card *card, @@ -1733,8 +1734,6 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card, int work_done = 0; struct sk_buff *skb; struct qeth_hdr *hdr; - __u16 vlan_tag = 0; - int is_vlan; unsigned int len; __u16 magic; @@ -1764,12 +1763,8 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card, card->dev->addr_len); netif_receive_skb(skb); } else { - is_vlan = qeth_l3_rebuild_skb(card, skb, hdr, - &vlan_tag); + qeth_l3_rebuild_skb(card, skb, hdr); len = skb->len; - if (is_vlan && !card->options.sniffer) - __vlan_hwaccel_put_tag(skb, - htons(ETH_P_8021Q), vlan_tag); napi_gro_receive(&card->napi, skb); } break; @@ -2771,8 +2766,8 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, rc = qeth_do_send_packet(card, queue, new_skb, hdr, hd_len, hd_len, elements); } else - rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr, - data_offset, hd_len); + rc = qeth_do_send_packet_fast(queue, new_skb, hdr, data_offset, + hd_len); if (!rc) { card->stats.tx_packets++; diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c index e8bcc314cc5f..0ad0f7f4b23f 100644 --- a/drivers/s390/net/qeth_l3_sys.c +++ b/drivers/s390/net/qeth_l3_sys.c @@ -1028,52 +1028,31 @@ static const struct attribute_group qeth_device_rxip_group = { .attrs = qeth_rxip_device_attrs, }; +static const struct attribute_group *qeth_l3_only_attr_groups[] = { + &qeth_l3_device_attr_group, + &qeth_device_ipato_group, + &qeth_device_vipa_group, + &qeth_device_rxip_group, + NULL, +}; + int qeth_l3_create_device_attributes(struct device *dev) { - int ret; - - ret = sysfs_create_group(&dev->kobj, &qeth_l3_device_attr_group); - if (ret) - return ret; - - ret = sysfs_create_group(&dev->kobj, &qeth_device_ipato_group); - if (ret) { - sysfs_remove_group(&dev->kobj, &qeth_l3_device_attr_group); - return ret; - } - - ret = sysfs_create_group(&dev->kobj, &qeth_device_vipa_group); - if (ret) { - sysfs_remove_group(&dev->kobj, &qeth_l3_device_attr_group); - sysfs_remove_group(&dev->kobj, &qeth_device_ipato_group); - return ret; - } - - ret = sysfs_create_group(&dev->kobj, &qeth_device_rxip_group); - if (ret) { - sysfs_remove_group(&dev->kobj, &qeth_l3_device_attr_group); - sysfs_remove_group(&dev->kobj, &qeth_device_ipato_group); - sysfs_remove_group(&dev->kobj, &qeth_device_vipa_group); - return ret; - } - return 0; + return sysfs_create_groups(&dev->kobj, qeth_l3_only_attr_groups); } void qeth_l3_remove_device_attributes(struct device *dev) { - sysfs_remove_group(&dev->kobj, &qeth_l3_device_attr_group); - sysfs_remove_group(&dev->kobj, &qeth_device_ipato_group); - sysfs_remove_group(&dev->kobj, &qeth_device_vipa_group); - sysfs_remove_group(&dev->kobj, &qeth_device_rxip_group); + sysfs_remove_groups(&dev->kobj, qeth_l3_only_attr_groups); } const struct attribute_group *qeth_l3_attr_groups[] = { &qeth_device_attr_group, &qeth_device_blkt_group, - /* l3 specific, see l3_{create,remove}_device_attributes(): */ + /* l3 specific, see qeth_l3_only_attr_groups: */ &qeth_l3_device_attr_group, &qeth_device_ipato_group, &qeth_device_vipa_group, &qeth_device_rxip_group, -NULL, + NULL, }; |