diff options
Diffstat (limited to 'drivers/net')
254 files changed, 24944 insertions, 7401 deletions
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c index 71f0e791355b..b3d02759c226 100644 --- a/drivers/net/can/usb/ems_usb.c +++ b/drivers/net/can/usb/ems_usb.c @@ -600,7 +600,6 @@ static int ems_usb_start(struct ems_usb *dev) /* create a URB, and a buffer for it */ urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { - netdev_err(netdev, "No memory left for URBs\n"); err = -ENOMEM; break; } @@ -752,10 +751,8 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne /* create a URB, and a buffer for it, and copy the data to the URB */ urb = usb_alloc_urb(0, GFP_ATOMIC); - if (!urb) { - netdev_err(netdev, "No memory left for URBs\n"); + if (!urb) goto nomem; - } buf = usb_alloc_coherent(dev->udev, size, GFP_ATOMIC, &urb->transfer_dma); if (!buf) { @@ -1007,10 +1004,8 @@ static int ems_usb_probe(struct usb_interface *intf, dev->tx_contexts[i].echo_index = MAX_TX_URBS; dev->intr_urb = usb_alloc_urb(0, GFP_KERNEL); - if (!dev->intr_urb) { - dev_err(&intf->dev, "Couldn't alloc intr URB\n"); + if (!dev->intr_urb) goto cleanup_candev; - } dev->intr_in_buffer = kzalloc(INTR_IN_BUFFER_SIZE, GFP_KERNEL); if (!dev->intr_in_buffer) diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c index 784a9002fbb9..be928ce62d32 100644 --- a/drivers/net/can/usb/esd_usb2.c +++ b/drivers/net/can/usb/esd_usb2.c @@ -558,8 +558,6 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev) /* create a URB, and a buffer for it */ urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { - dev_warn(dev->udev->dev.parent, - "No memory left for URBs\n"); err = -ENOMEM; break; } @@ -730,7 +728,6 @@ static netdev_tx_t esd_usb2_start_xmit(struct sk_buff *skb, /* create a URB, and a buffer for it, and copy the data to the URB */ urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) { - netdev_err(netdev, "No memory left for URBs\n"); stats->tx_dropped++; dev_kfree_skb(skb); goto nourbmem; diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index 6f0cbc38782e..77e3cc06a30c 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -493,10 +493,8 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, /* create a URB, and a buffer for it */ urb = usb_alloc_urb(0, GFP_ATOMIC); - if (!urb) { - netdev_err(netdev, "No memory left for URB\n"); + if (!urb) goto nomem_urb; - } hf = usb_alloc_coherent(dev->udev, sizeof(*hf), GFP_ATOMIC, &urb->transfer_dma); @@ -600,11 +598,8 @@ static int gs_can_open(struct net_device *netdev) /* alloc rx urb */ urb = usb_alloc_urb(0, GFP_KERNEL); - if (!urb) { - netdev_err(netdev, - "No memory left for URB\n"); + if (!urb) return -ENOMEM; - } /* alloc rx buffer */ buf = usb_alloc_coherent(dev->udev, diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c index 6f1f3b675ff5..d51e0c401b48 100644 --- a/drivers/net/can/usb/kvaser_usb.c +++ b/drivers/net/can/usb/kvaser_usb.c @@ -787,10 +787,8 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv, int err; urb = usb_alloc_urb(0, GFP_ATOMIC); - if (!urb) { - netdev_err(netdev, "No memory left for URBs\n"); + if (!urb) return -ENOMEM; - } buf = kmalloc(sizeof(struct kvaser_msg), GFP_ATOMIC); if (!buf) { @@ -1393,8 +1391,6 @@ static int kvaser_usb_setup_rx_urbs(struct kvaser_usb *dev) urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { - dev_warn(dev->udev->dev.parent, - "No memory left for URBs\n"); err = -ENOMEM; break; } @@ -1670,7 +1666,6 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) { - netdev_err(netdev, "No memory left for URBs\n"); stats->tx_dropped++; dev_kfree_skb(skb); return NETDEV_TX_OK; diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c index bfb91d8fa460..c06382cdfdfe 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c @@ -399,7 +399,6 @@ static int peak_usb_start(struct peak_usb_device *dev) /* create a URB, and a buffer for it, to receive usb messages */ urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { - netdev_err(netdev, "No memory left for URBs\n"); err = -ENOMEM; break; } @@ -454,7 +453,6 @@ static int peak_usb_start(struct peak_usb_device *dev) /* create a URB and a buffer for it, to transmit usb messages */ urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { - netdev_err(netdev, "No memory left for URBs\n"); err = -ENOMEM; break; } @@ -651,10 +649,8 @@ static int peak_usb_restart(struct peak_usb_device *dev) /* first allocate a urb to handle the asynchronous steps */ urb = usb_alloc_urb(0, GFP_ATOMIC); - if (!urb) { - netdev_err(dev->netdev, "no memory left for urb\n"); + if (!urb) return -ENOMEM; - } /* also allocate enough space for the commands to send */ buf = kmalloc(PCAN_USB_MAX_CMD_LEN, GFP_ATOMIC); diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c index a731720f1d13..108a30e15097 100644 --- a/drivers/net/can/usb/usb_8dev.c +++ b/drivers/net/can/usb/usb_8dev.c @@ -623,10 +623,8 @@ static netdev_tx_t usb_8dev_start_xmit(struct sk_buff *skb, /* create a URB, and a buffer for it, and copy the data to the URB */ urb = usb_alloc_urb(0, GFP_ATOMIC); - if (!urb) { - netdev_err(netdev, "No memory left for URBs\n"); + if (!urb) goto nomem; - } buf = usb_alloc_coherent(priv->udev, size, GFP_ATOMIC, &urb->transfer_dma); @@ -748,7 +746,6 @@ static int usb_8dev_start(struct usb_8dev_priv *priv) /* create a URB, and a buffer for it */ urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { - netdev_err(netdev, "No memory left for URBs\n"); err = -ENOMEM; break; } diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig index 8f4544394f44..de6d04429a70 100644 --- a/drivers/net/dsa/Kconfig +++ b/drivers/net/dsa/Kconfig @@ -16,6 +16,7 @@ config NET_DSA_BCM_SF2 select FIXED_PHY select BCM7XXX_PHY select MDIO_BCM_UNIMAC + select B53 ---help--- This enables support for the Broadcom Starfighter 2 Ethernet switch chips. diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index bda37d336736..1299104a87d4 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -167,6 +167,65 @@ static const struct b53_mib_desc b53_mibs[] = { #define B53_MIBS_SIZE ARRAY_SIZE(b53_mibs) +static const struct b53_mib_desc b53_mibs_58xx[] = { + { 8, 0x00, "TxOctets" }, + { 4, 0x08, "TxDropPkts" }, + { 4, 0x0c, "TxQPKTQ0" }, + { 4, 0x10, "TxBroadcastPkts" }, + { 4, 0x14, "TxMulticastPkts" }, + { 4, 0x18, "TxUnicastPKts" }, + { 4, 0x1c, "TxCollisions" }, + { 4, 0x20, "TxSingleCollision" }, + { 4, 0x24, "TxMultipleCollision" }, + { 4, 0x28, "TxDeferredCollision" }, + { 4, 0x2c, "TxLateCollision" }, + { 4, 0x30, "TxExcessiveCollision" }, + { 4, 0x34, "TxFrameInDisc" }, + { 4, 0x38, "TxPausePkts" }, + { 4, 0x3c, "TxQPKTQ1" }, + { 4, 0x40, "TxQPKTQ2" }, + { 4, 0x44, "TxQPKTQ3" }, + { 4, 0x48, "TxQPKTQ4" }, + { 4, 0x4c, "TxQPKTQ5" }, + { 8, 0x50, "RxOctets" }, + { 4, 0x58, "RxUndersizePkts" }, + { 4, 0x5c, "RxPausePkts" }, + { 4, 0x60, "RxPkts64Octets" }, + { 4, 0x64, "RxPkts65to127Octets" }, + { 4, 0x68, "RxPkts128to255Octets" }, + { 4, 0x6c, "RxPkts256to511Octets" }, + { 4, 0x70, "RxPkts512to1023Octets" }, + { 4, 0x74, "RxPkts1024toMaxPktsOctets" }, + { 4, 0x78, "RxOversizePkts" }, + { 4, 0x7c, "RxJabbers" }, + { 4, 0x80, "RxAlignmentErrors" }, + { 4, 0x84, "RxFCSErrors" }, + { 8, 0x88, "RxGoodOctets" }, + { 4, 0x90, "RxDropPkts" }, + { 4, 0x94, "RxUnicastPkts" }, + { 4, 0x98, "RxMulticastPkts" }, + { 4, 0x9c, "RxBroadcastPkts" }, + { 4, 0xa0, "RxSAChanges" }, + { 4, 0xa4, "RxFragments" }, + { 4, 0xa8, "RxJumboPkt" }, + { 4, 0xac, "RxSymblErr" }, + { 4, 0xb0, "InRangeErrCount" }, + { 4, 0xb4, "OutRangeErrCount" }, + { 4, 0xb8, "EEELpiEvent" }, + { 4, 0xbc, "EEELpiDuration" }, + { 4, 0xc0, "RxDiscard" }, + { 4, 0xc8, "TxQPKTQ6" }, + { 4, 0xcc, "TxQPKTQ7" }, + { 4, 0xd0, "TxPkts64Octets" }, + { 4, 0xd4, "TxPkts65to127Octets" }, + { 4, 0xd8, "TxPkts128to255Octets" }, + { 4, 0xdc, "TxPkts256to511Ocets" }, + { 4, 0xe0, "TxPkts512to1023Ocets" }, + { 4, 0xe4, "TxPkts1024toMaxPktOcets" }, +}; + +#define B53_MIBS_58XX_SIZE ARRAY_SIZE(b53_mibs_58xx) + static int b53_do_vlan_op(struct b53_device *dev, u8 op) { unsigned int i; @@ -635,6 +694,8 @@ static const struct b53_mib_desc *b53_get_mib(struct b53_device *dev) return b53_mibs_65; else if (is63xx(dev)) return b53_mibs_63xx; + else if (is58xx(dev)) + return b53_mibs_58xx; else return b53_mibs; } @@ -645,6 +706,8 @@ static unsigned int b53_get_mib_size(struct b53_device *dev) return B53_MIBS_65_SIZE; else if (is63xx(dev)) return B53_MIBS_63XX_SIZE; + else if (is58xx(dev)) + return B53_MIBS_58XX_SIZE; else return B53_MIBS_SIZE; } @@ -1252,9 +1315,21 @@ static int b53_br_join(struct dsa_switch *ds, int port, struct net_device *bridge) { struct b53_device *dev = ds_to_priv(ds); + s8 cpu_port = ds->dst->cpu_port; u16 pvlan, reg; unsigned int i; + /* Make this port leave the all VLANs join since we will have proper + * VLAN entries from now on + */ + if (is58xx(dev)) { + b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, ®); + reg &= ~BIT(port); + if ((reg & BIT(cpu_port)) == BIT(cpu_port)) + reg &= ~BIT(cpu_port); + b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg); + } + dev->ports[port].bridge_dev = bridge; b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan); @@ -1287,6 +1362,7 @@ static void b53_br_leave(struct dsa_switch *ds, int port) struct b53_device *dev = ds_to_priv(ds); struct net_device *bridge = dev->ports[port].bridge_dev; struct b53_vlan *vl = &dev->vlans[0]; + s8 cpu_port = ds->dst->cpu_port; unsigned int i; u16 pvlan, reg, pvid; @@ -1316,10 +1392,19 @@ static void b53_br_leave(struct dsa_switch *ds, int port) else pvid = 0; - b53_get_vlan_entry(dev, pvid, vl); - vl->members |= BIT(port) | BIT(dev->cpu_port); - vl->untag |= BIT(port) | BIT(dev->cpu_port); - b53_set_vlan_entry(dev, pvid, vl); + /* Make this port join all VLANs without VLAN entries */ + if (is58xx(dev)) { + b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, ®); + reg |= BIT(port); + if (!(reg & BIT(cpu_port))) + reg |= BIT(cpu_port); + b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg); + } else { + b53_get_vlan_entry(dev, pvid, vl); + vl->members |= BIT(port) | BIT(dev->cpu_port); + vl->untag |= BIT(port) | BIT(dev->cpu_port); + b53_set_vlan_entry(dev, pvid, vl); + } } static void b53_br_set_stp_state(struct dsa_switch *ds, int port, @@ -1373,8 +1458,13 @@ static void b53_br_set_stp_state(struct dsa_switch *ds, int port, b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg); } -static struct dsa_switch_driver b53_switch_ops = { - .tag_protocol = DSA_TAG_PROTO_NONE, +static enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds) +{ + return DSA_TAG_PROTO_NONE; +} + +static struct dsa_switch_ops b53_switch_ops = { + .get_tag_protocol = b53_get_tag_protocol, .setup = b53_setup, .set_addr = b53_set_addr, .get_strings = b53_get_strings, @@ -1593,11 +1683,22 @@ static const struct b53_chip_data b53_switch_chips[] = { .jumbo_pm_reg = B53_JUMBO_PORT_MASK, .jumbo_size_reg = B53_JUMBO_MAX_SIZE, }, + { + .chip_id = BCM7445_DEVICE_ID, + .dev_name = "BCM7445", + .vlans = 4096, + .enabled_ports = 0x1ff, + .arl_entries = 4, + .cpu_port = B53_CPU_PORT, + .vta_regs = B53_VTA_REGS, + .duplex_reg = B53_DUPLEX_STAT_GE, + .jumbo_pm_reg = B53_JUMBO_PORT_MASK, + .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + }, }; static int b53_switch_init(struct b53_device *dev) { - struct dsa_switch *ds = dev->ds; unsigned int i; int ret; @@ -1613,7 +1714,6 @@ static int b53_switch_init(struct b53_device *dev) dev->vta_regs[1] = chip->vta_regs[1]; dev->vta_regs[2] = chip->vta_regs[2]; dev->jumbo_pm_reg = chip->jumbo_pm_reg; - ds->drv = &b53_switch_ops; dev->cpu_port = chip->cpu_port; dev->num_vlans = chip->vlans; dev->num_arl_entries = chip->arl_entries; @@ -1681,7 +1781,8 @@ static int b53_switch_init(struct b53_device *dev) return 0; } -struct b53_device *b53_switch_alloc(struct device *base, struct b53_io_ops *ops, +struct b53_device *b53_switch_alloc(struct device *base, + const struct b53_io_ops *ops, void *priv) { struct dsa_switch *ds; @@ -1700,6 +1801,7 @@ struct b53_device *b53_switch_alloc(struct device *base, struct b53_io_ops *ops, dev->ds = ds; dev->priv = priv; dev->ops = ops; + ds->ops = &b53_switch_ops; mutex_init(&dev->reg_mutex); mutex_init(&dev->stats_mutex); diff --git a/drivers/net/dsa/b53/b53_mdio.c b/drivers/net/dsa/b53/b53_mdio.c index aa87c3fffdac..477a16b5660a 100644 --- a/drivers/net/dsa/b53/b53_mdio.c +++ b/drivers/net/dsa/b53/b53_mdio.c @@ -267,7 +267,7 @@ static int b53_mdio_phy_write16(struct b53_device *dev, int addr, int reg, return mdiobus_write_nested(bus, addr, reg, value); } -static struct b53_io_ops b53_mdio_ops = { +static const struct b53_io_ops b53_mdio_ops = { .read8 = b53_mdio_read8, .read16 = b53_mdio_read16, .read32 = b53_mdio_read32, diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c index 77ffc4312808..cc9e6bd83e0e 100644 --- a/drivers/net/dsa/b53/b53_mmap.c +++ b/drivers/net/dsa/b53/b53_mmap.c @@ -208,7 +208,7 @@ static int b53_mmap_write64(struct b53_device *dev, u8 page, u8 reg, return 0; } -static struct b53_io_ops b53_mmap_ops = { +static const struct b53_io_ops b53_mmap_ops = { .read8 = b53_mmap_read8, .read16 = b53_mmap_read16, .read32 = b53_mmap_read32, diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index 835a744f206e..76672dae412d 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h @@ -60,6 +60,7 @@ enum { BCM53018_DEVICE_ID = 0x53018, BCM53019_DEVICE_ID = 0x53019, BCM58XX_DEVICE_ID = 0x5800, + BCM7445_DEVICE_ID = 0x7445, }; #define B53_N_PORTS 9 @@ -174,6 +175,12 @@ static inline int is5301x(struct b53_device *dev) dev->chip_id == BCM53019_DEVICE_ID; } +static inline int is58xx(struct b53_device *dev) +{ + return dev->chip_id == BCM58XX_DEVICE_ID || + dev->chip_id == BCM7445_DEVICE_ID; +} + #define B53_CPU_PORT_25 5 #define B53_CPU_PORT 8 @@ -182,7 +189,8 @@ static inline int is_cpu_port(struct b53_device *dev, int port) return dev->cpu_port; } -struct b53_device *b53_switch_alloc(struct device *base, struct b53_io_ops *ops, +struct b53_device *b53_switch_alloc(struct device *base, + const struct b53_io_ops *ops, void *priv); int b53_switch_detect(struct b53_device *dev); diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h index a0b453ea34c9..dac0af4e2cd0 100644 --- a/drivers/net/dsa/b53/b53_regs.h +++ b/drivers/net/dsa/b53/b53_regs.h @@ -309,6 +309,9 @@ /* Port VLAN mask (16 bit) IMP port is always 8, also on 5325 & co */ #define B53_PVLAN_PORT_MASK(i) ((i) * 2) +/* Join all VLANs register (16 bit) */ +#define B53_JOIN_ALL_VLAN_EN 0x50 + /************************************************************************* * 802.1Q Page Registers *************************************************************************/ diff --git a/drivers/net/dsa/b53/b53_spi.c b/drivers/net/dsa/b53/b53_spi.c index 2bda0b5f1578..f89f5308a99b 100644 --- a/drivers/net/dsa/b53/b53_spi.c +++ b/drivers/net/dsa/b53/b53_spi.c @@ -270,7 +270,7 @@ static int b53_spi_write64(struct b53_device *dev, u8 page, u8 reg, u64 value) return spi_write(spi, txbuf, sizeof(txbuf)); } -static struct b53_io_ops b53_spi_ops = { +static const struct b53_io_ops b53_spi_ops = { .read8 = b53_spi_read8, .read16 = b53_spi_read16, .read32 = b53_spi_read32, @@ -317,8 +317,6 @@ static int b53_spi_remove(struct spi_device *spi) static struct spi_driver b53_spi_driver = { .driver = { .name = "b53-switch", - .bus = &spi_bus_type, - .owner = THIS_MODULE, }, .probe = b53_spi_probe, .remove = b53_spi_remove, diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c index 3e2d4a5fcd5a..8a62b6a69703 100644 --- a/drivers/net/dsa/b53/b53_srab.c +++ b/drivers/net/dsa/b53/b53_srab.c @@ -344,7 +344,7 @@ err: return ret; } -static struct b53_io_ops b53_srab_ops = { +static const struct b53_io_ops b53_srab_ops = { .read8 = b53_srab_read8, .read16 = b53_srab_read16, .read32 = b53_srab_read32, diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index b2b838724a9b..51f1fc0dddc5 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -29,130 +29,21 @@ #include <linux/brcmphy.h> #include <linux/etherdevice.h> #include <net/switchdev.h> +#include <linux/platform_data/b53.h> #include "bcm_sf2.h" #include "bcm_sf2_regs.h" +#include "b53/b53_priv.h" +#include "b53/b53_regs.h" -/* String, offset, and register size in bytes if different from 4 bytes */ -static const struct bcm_sf2_hw_stats bcm_sf2_mib[] = { - { "TxOctets", 0x000, 8 }, - { "TxDropPkts", 0x020 }, - { "TxQPKTQ0", 0x030 }, - { "TxBroadcastPkts", 0x040 }, - { "TxMulticastPkts", 0x050 }, - { "TxUnicastPKts", 0x060 }, - { "TxCollisions", 0x070 }, - { "TxSingleCollision", 0x080 }, - { "TxMultipleCollision", 0x090 }, - { "TxDeferredCollision", 0x0a0 }, - { "TxLateCollision", 0x0b0 }, - { "TxExcessiveCollision", 0x0c0 }, - { "TxFrameInDisc", 0x0d0 }, - { "TxPausePkts", 0x0e0 }, - { "TxQPKTQ1", 0x0f0 }, - { "TxQPKTQ2", 0x100 }, - { "TxQPKTQ3", 0x110 }, - { "TxQPKTQ4", 0x120 }, - { "TxQPKTQ5", 0x130 }, - { "RxOctets", 0x140, 8 }, - { "RxUndersizePkts", 0x160 }, - { "RxPausePkts", 0x170 }, - { "RxPkts64Octets", 0x180 }, - { "RxPkts65to127Octets", 0x190 }, - { "RxPkts128to255Octets", 0x1a0 }, - { "RxPkts256to511Octets", 0x1b0 }, - { "RxPkts512to1023Octets", 0x1c0 }, - { "RxPkts1024toMaxPktsOctets", 0x1d0 }, - { "RxOversizePkts", 0x1e0 }, - { "RxJabbers", 0x1f0 }, - { "RxAlignmentErrors", 0x200 }, - { "RxFCSErrors", 0x210 }, - { "RxGoodOctets", 0x220, 8 }, - { "RxDropPkts", 0x240 }, - { "RxUnicastPkts", 0x250 }, - { "RxMulticastPkts", 0x260 }, - { "RxBroadcastPkts", 0x270 }, - { "RxSAChanges", 0x280 }, - { "RxFragments", 0x290 }, - { "RxJumboPkt", 0x2a0 }, - { "RxSymblErr", 0x2b0 }, - { "InRangeErrCount", 0x2c0 }, - { "OutRangeErrCount", 0x2d0 }, - { "EEELpiEvent", 0x2e0 }, - { "EEELpiDuration", 0x2f0 }, - { "RxDiscard", 0x300, 8 }, - { "TxQPKTQ6", 0x320 }, - { "TxQPKTQ7", 0x330 }, - { "TxPkts64Octets", 0x340 }, - { "TxPkts65to127Octets", 0x350 }, - { "TxPkts128to255Octets", 0x360 }, - { "TxPkts256to511Ocets", 0x370 }, - { "TxPkts512to1023Ocets", 0x380 }, - { "TxPkts1024toMaxPktOcets", 0x390 }, -}; - -#define BCM_SF2_STATS_SIZE ARRAY_SIZE(bcm_sf2_mib) - -static void bcm_sf2_sw_get_strings(struct dsa_switch *ds, - int port, uint8_t *data) -{ - unsigned int i; - - for (i = 0; i < BCM_SF2_STATS_SIZE; i++) - memcpy(data + i * ETH_GSTRING_LEN, - bcm_sf2_mib[i].string, ETH_GSTRING_LEN); -} - -static void bcm_sf2_sw_get_ethtool_stats(struct dsa_switch *ds, - int port, uint64_t *data) -{ - struct bcm_sf2_priv *priv = ds_to_priv(ds); - const struct bcm_sf2_hw_stats *s; - unsigned int i; - u64 val = 0; - u32 offset; - - mutex_lock(&priv->stats_mutex); - - /* Now fetch the per-port counters */ - for (i = 0; i < BCM_SF2_STATS_SIZE; i++) { - s = &bcm_sf2_mib[i]; - - /* Do a latched 64-bit read if needed */ - offset = s->reg + CORE_P_MIB_OFFSET(port); - if (s->sizeof_stat == 8) - val = core_readq(priv, offset); - else - val = core_readl(priv, offset); - - data[i] = (u64)val; - } - - mutex_unlock(&priv->stats_mutex); -} - -static int bcm_sf2_sw_get_sset_count(struct dsa_switch *ds) -{ - return BCM_SF2_STATS_SIZE; -} - -static const char *bcm_sf2_sw_drv_probe(struct device *dsa_dev, - struct device *host_dev, int sw_addr, - void **_priv) +static enum dsa_tag_protocol bcm_sf2_sw_get_tag_protocol(struct dsa_switch *ds) { - struct bcm_sf2_priv *priv; - - priv = devm_kzalloc(dsa_dev, sizeof(*priv), GFP_KERNEL); - if (!priv) - return NULL; - *_priv = priv; - - return "Broadcom Starfighter 2"; + return DSA_TAG_PROTO_BRCM; } static void bcm_sf2_imp_vlan_setup(struct dsa_switch *ds, int cpu_port) { - struct bcm_sf2_priv *priv = ds_to_priv(ds); + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); unsigned int i; u32 reg; @@ -172,7 +63,7 @@ static void bcm_sf2_imp_vlan_setup(struct dsa_switch *ds, int cpu_port) static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port) { - struct bcm_sf2_priv *priv = ds_to_priv(ds); + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); u32 reg, val; /* Enable the port memories */ @@ -237,7 +128,7 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port) static void bcm_sf2_eee_enable_set(struct dsa_switch *ds, int port, bool enable) { - struct bcm_sf2_priv *priv = ds_to_priv(ds); + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); u32 reg; reg = core_readl(priv, CORE_EEE_EN_CTRL); @@ -250,7 +141,7 @@ static void bcm_sf2_eee_enable_set(struct dsa_switch *ds, int port, bool enable) static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable) { - struct bcm_sf2_priv *priv = ds_to_priv(ds); + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); u32 reg; reg = reg_readl(priv, REG_SPHY_CNTRL); @@ -324,7 +215,7 @@ static inline void bcm_sf2_port_intr_disable(struct bcm_sf2_priv *priv, static int bcm_sf2_port_setup(struct dsa_switch *ds, int port, struct phy_device *phy) { - struct bcm_sf2_priv *priv = ds_to_priv(ds); + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); s8 cpu_port = ds->dst[ds->index].cpu_port; u32 reg; @@ -380,7 +271,7 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port, static void bcm_sf2_port_disable(struct dsa_switch *ds, int port, struct phy_device *phy) { - struct bcm_sf2_priv *priv = ds_to_priv(ds); + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); u32 off, reg; if (priv->wol_ports_mask & (1 << port)) @@ -412,7 +303,7 @@ static void bcm_sf2_port_disable(struct dsa_switch *ds, int port, static int bcm_sf2_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy) { - struct bcm_sf2_priv *priv = ds_to_priv(ds); + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); struct ethtool_eee *p = &priv->port_sts[port].eee; int ret; @@ -430,7 +321,7 @@ static int bcm_sf2_eee_init(struct dsa_switch *ds, int port, static int bcm_sf2_sw_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e) { - struct bcm_sf2_priv *priv = ds_to_priv(ds); + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); struct ethtool_eee *p = &priv->port_sts[port].eee; u32 reg; @@ -445,7 +336,7 @@ static int bcm_sf2_sw_set_eee(struct dsa_switch *ds, int port, struct phy_device *phydev, struct ethtool_eee *e) { - struct bcm_sf2_priv *priv = ds_to_priv(ds); + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); struct ethtool_eee *p = &priv->port_sts[port].eee; p->eee_enabled = e->eee_enabled; @@ -461,469 +352,6 @@ static int bcm_sf2_sw_set_eee(struct dsa_switch *ds, int port, return 0; } -static int bcm_sf2_fast_age_op(struct bcm_sf2_priv *priv) -{ - unsigned int timeout = 1000; - u32 reg; - - reg = core_readl(priv, CORE_FAST_AGE_CTRL); - reg |= EN_AGE_PORT | EN_AGE_VLAN | EN_AGE_DYNAMIC | FAST_AGE_STR_DONE; - core_writel(priv, reg, CORE_FAST_AGE_CTRL); - - do { - reg = core_readl(priv, CORE_FAST_AGE_CTRL); - if (!(reg & FAST_AGE_STR_DONE)) - break; - - cpu_relax(); - } while (timeout--); - - if (!timeout) - return -ETIMEDOUT; - - core_writel(priv, 0, CORE_FAST_AGE_CTRL); - - return 0; -} - -/* Fast-ageing of ARL entries for a given port, equivalent to an ARL - * flush for that port. - */ -static int bcm_sf2_sw_fast_age_port(struct dsa_switch *ds, int port) -{ - struct bcm_sf2_priv *priv = ds_to_priv(ds); - - core_writel(priv, port, CORE_FAST_AGE_PORT); - - return bcm_sf2_fast_age_op(priv); -} - -static int bcm_sf2_sw_fast_age_vlan(struct bcm_sf2_priv *priv, u16 vid) -{ - core_writel(priv, vid, CORE_FAST_AGE_VID); - - return bcm_sf2_fast_age_op(priv); -} - -static int bcm_sf2_vlan_op_wait(struct bcm_sf2_priv *priv) -{ - unsigned int timeout = 10; - u32 reg; - - do { - reg = core_readl(priv, CORE_ARLA_VTBL_RWCTRL); - if (!(reg & ARLA_VTBL_STDN)) - return 0; - - usleep_range(1000, 2000); - } while (timeout--); - - return -ETIMEDOUT; -} - -static int bcm_sf2_vlan_op(struct bcm_sf2_priv *priv, u8 op) -{ - core_writel(priv, ARLA_VTBL_STDN | op, CORE_ARLA_VTBL_RWCTRL); - - return bcm_sf2_vlan_op_wait(priv); -} - -static void bcm_sf2_set_vlan_entry(struct bcm_sf2_priv *priv, u16 vid, - struct bcm_sf2_vlan *vlan) -{ - int ret; - - core_writel(priv, vid & VTBL_ADDR_INDEX_MASK, CORE_ARLA_VTBL_ADDR); - core_writel(priv, vlan->untag << UNTAG_MAP_SHIFT | vlan->members, - CORE_ARLA_VTBL_ENTRY); - - ret = bcm_sf2_vlan_op(priv, ARLA_VTBL_CMD_WRITE); - if (ret) - pr_err("failed to write VLAN entry\n"); -} - -static int bcm_sf2_get_vlan_entry(struct bcm_sf2_priv *priv, u16 vid, - struct bcm_sf2_vlan *vlan) -{ - u32 entry; - int ret; - - core_writel(priv, vid & VTBL_ADDR_INDEX_MASK, CORE_ARLA_VTBL_ADDR); - - ret = bcm_sf2_vlan_op(priv, ARLA_VTBL_CMD_READ); - if (ret) - return ret; - - entry = core_readl(priv, CORE_ARLA_VTBL_ENTRY); - vlan->members = entry & FWD_MAP_MASK; - vlan->untag = (entry >> UNTAG_MAP_SHIFT) & UNTAG_MAP_MASK; - - return 0; -} - -static int bcm_sf2_sw_br_join(struct dsa_switch *ds, int port, - struct net_device *bridge) -{ - struct bcm_sf2_priv *priv = ds_to_priv(ds); - s8 cpu_port = ds->dst->cpu_port; - unsigned int i; - u32 reg, p_ctl; - - /* Make this port leave the all VLANs join since we will have proper - * VLAN entries from now on - */ - reg = core_readl(priv, CORE_JOIN_ALL_VLAN_EN); - reg &= ~BIT(port); - if ((reg & BIT(cpu_port)) == BIT(cpu_port)) - reg &= ~BIT(cpu_port); - core_writel(priv, reg, CORE_JOIN_ALL_VLAN_EN); - - priv->port_sts[port].bridge_dev = bridge; - p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port)); - - for (i = 0; i < priv->hw_params.num_ports; i++) { - if (priv->port_sts[i].bridge_dev != bridge) - continue; - - /* Add this local port to the remote port VLAN control - * membership and update the remote port bitmask - */ - reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i)); - reg |= 1 << port; - core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i)); - priv->port_sts[i].vlan_ctl_mask = reg; - - p_ctl |= 1 << i; - } - - /* Configure the local port VLAN control membership to include - * remote ports and update the local port bitmask - */ - core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port)); - priv->port_sts[port].vlan_ctl_mask = p_ctl; - - return 0; -} - -static void bcm_sf2_sw_br_leave(struct dsa_switch *ds, int port) -{ - struct bcm_sf2_priv *priv = ds_to_priv(ds); - struct net_device *bridge = priv->port_sts[port].bridge_dev; - s8 cpu_port = ds->dst->cpu_port; - unsigned int i; - u32 reg, p_ctl; - - p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port)); - - for (i = 0; i < priv->hw_params.num_ports; i++) { - /* Don't touch the remaining ports */ - if (priv->port_sts[i].bridge_dev != bridge) - continue; - - reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i)); - reg &= ~(1 << port); - core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i)); - priv->port_sts[port].vlan_ctl_mask = reg; - - /* Prevent self removal to preserve isolation */ - if (port != i) - p_ctl &= ~(1 << i); - } - - core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port)); - priv->port_sts[port].vlan_ctl_mask = p_ctl; - priv->port_sts[port].bridge_dev = NULL; - - /* Make this port join all VLANs without VLAN entries */ - reg = core_readl(priv, CORE_JOIN_ALL_VLAN_EN); - reg |= BIT(port); - if (!(reg & BIT(cpu_port))) - reg |= BIT(cpu_port); - core_writel(priv, reg, CORE_JOIN_ALL_VLAN_EN); -} - -static void bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port, - u8 state) -{ - struct bcm_sf2_priv *priv = ds_to_priv(ds); - u8 hw_state, cur_hw_state; - u32 reg; - - reg = core_readl(priv, CORE_G_PCTL_PORT(port)); - cur_hw_state = reg & (G_MISTP_STATE_MASK << G_MISTP_STATE_SHIFT); - - switch (state) { - case BR_STATE_DISABLED: - hw_state = G_MISTP_DIS_STATE; - break; - case BR_STATE_LISTENING: - hw_state = G_MISTP_LISTEN_STATE; - break; - case BR_STATE_LEARNING: - hw_state = G_MISTP_LEARN_STATE; - break; - case BR_STATE_FORWARDING: - hw_state = G_MISTP_FWD_STATE; - break; - case BR_STATE_BLOCKING: - hw_state = G_MISTP_BLOCK_STATE; - break; - default: - pr_err("%s: invalid STP state: %d\n", __func__, state); - return; - } - - /* Fast-age ARL entries if we are moving a port from Learning or - * Forwarding (cur_hw_state) state to Disabled, Blocking or Listening - * state (hw_state) - */ - if (cur_hw_state != hw_state) { - if (cur_hw_state >= G_MISTP_LEARN_STATE && - hw_state <= G_MISTP_LISTEN_STATE) { - if (bcm_sf2_sw_fast_age_port(ds, port)) { - pr_err("%s: fast-ageing failed\n", __func__); - return; - } - } - } - - reg = core_readl(priv, CORE_G_PCTL_PORT(port)); - reg &= ~(G_MISTP_STATE_MASK << G_MISTP_STATE_SHIFT); - reg |= hw_state; - core_writel(priv, reg, CORE_G_PCTL_PORT(port)); -} - -/* Address Resolution Logic routines */ -static int bcm_sf2_arl_op_wait(struct bcm_sf2_priv *priv) -{ - unsigned int timeout = 10; - u32 reg; - - do { - reg = core_readl(priv, CORE_ARLA_RWCTL); - if (!(reg & ARL_STRTDN)) - return 0; - - usleep_range(1000, 2000); - } while (timeout--); - - return -ETIMEDOUT; -} - -static int bcm_sf2_arl_rw_op(struct bcm_sf2_priv *priv, unsigned int op) -{ - u32 cmd; - - if (op > ARL_RW) - return -EINVAL; - - cmd = core_readl(priv, CORE_ARLA_RWCTL); - cmd &= ~IVL_SVL_SELECT; - cmd |= ARL_STRTDN; - if (op) - cmd |= ARL_RW; - else - cmd &= ~ARL_RW; - core_writel(priv, cmd, CORE_ARLA_RWCTL); - - return bcm_sf2_arl_op_wait(priv); -} - -static int bcm_sf2_arl_read(struct bcm_sf2_priv *priv, u64 mac, - u16 vid, struct bcm_sf2_arl_entry *ent, u8 *idx, - bool is_valid) -{ - unsigned int i; - int ret; - - ret = bcm_sf2_arl_op_wait(priv); - if (ret) - return ret; - - /* Read the 4 bins */ - for (i = 0; i < 4; i++) { - u64 mac_vid; - u32 fwd_entry; - - mac_vid = core_readq(priv, CORE_ARLA_MACVID_ENTRY(i)); - fwd_entry = core_readl(priv, CORE_ARLA_FWD_ENTRY(i)); - bcm_sf2_arl_to_entry(ent, mac_vid, fwd_entry); - - if (ent->is_valid && is_valid) { - *idx = i; - return 0; - } - - /* This is the MAC we just deleted */ - if (!is_valid && (mac_vid & mac)) - return 0; - } - - return -ENOENT; -} - -static int bcm_sf2_arl_op(struct bcm_sf2_priv *priv, int op, int port, - const unsigned char *addr, u16 vid, bool is_valid) -{ - struct bcm_sf2_arl_entry ent; - u32 fwd_entry; - u64 mac, mac_vid = 0; - u8 idx = 0; - int ret; - - /* Convert the array into a 64-bit MAC */ - mac = bcm_sf2_mac_to_u64(addr); - - /* Perform a read for the given MAC and VID */ - core_writeq(priv, mac, CORE_ARLA_MAC); - core_writel(priv, vid, CORE_ARLA_VID); - - /* Issue a read operation for this MAC */ - ret = bcm_sf2_arl_rw_op(priv, 1); - if (ret) - return ret; - - ret = bcm_sf2_arl_read(priv, mac, vid, &ent, &idx, is_valid); - /* If this is a read, just finish now */ - if (op) - return ret; - - /* We could not find a matching MAC, so reset to a new entry */ - if (ret) { - fwd_entry = 0; - idx = 0; - } - - memset(&ent, 0, sizeof(ent)); - ent.port = port; - ent.is_valid = is_valid; - ent.vid = vid; - ent.is_static = true; - memcpy(ent.mac, addr, ETH_ALEN); - bcm_sf2_arl_from_entry(&mac_vid, &fwd_entry, &ent); - - core_writeq(priv, mac_vid, CORE_ARLA_MACVID_ENTRY(idx)); - core_writel(priv, fwd_entry, CORE_ARLA_FWD_ENTRY(idx)); - - ret = bcm_sf2_arl_rw_op(priv, 0); - if (ret) - return ret; - - /* Re-read the entry to check */ - return bcm_sf2_arl_read(priv, mac, vid, &ent, &idx, is_valid); -} - -static int bcm_sf2_sw_fdb_prepare(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb, - struct switchdev_trans *trans) -{ - /* We do not need to do anything specific here yet */ - return 0; -} - -static void bcm_sf2_sw_fdb_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb, - struct switchdev_trans *trans) -{ - struct bcm_sf2_priv *priv = ds_to_priv(ds); - - if (bcm_sf2_arl_op(priv, 0, port, fdb->addr, fdb->vid, true)) - pr_err("%s: failed to add MAC address\n", __func__); -} - -static int bcm_sf2_sw_fdb_del(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb) -{ - struct bcm_sf2_priv *priv = ds_to_priv(ds); - - return bcm_sf2_arl_op(priv, 0, port, fdb->addr, fdb->vid, false); -} - -static int bcm_sf2_arl_search_wait(struct bcm_sf2_priv *priv) -{ - unsigned timeout = 1000; - u32 reg; - - do { - reg = core_readl(priv, CORE_ARLA_SRCH_CTL); - if (!(reg & ARLA_SRCH_STDN)) - return 0; - - if (reg & ARLA_SRCH_VLID) - return 0; - - usleep_range(1000, 2000); - } while (timeout--); - - return -ETIMEDOUT; -} - -static void bcm_sf2_arl_search_rd(struct bcm_sf2_priv *priv, u8 idx, - struct bcm_sf2_arl_entry *ent) -{ - u64 mac_vid; - u32 fwd_entry; - - mac_vid = core_readq(priv, CORE_ARLA_SRCH_RSLT_MACVID(idx)); - fwd_entry = core_readl(priv, CORE_ARLA_SRCH_RSLT(idx)); - bcm_sf2_arl_to_entry(ent, mac_vid, fwd_entry); -} - -static int bcm_sf2_sw_fdb_copy(struct net_device *dev, int port, - const struct bcm_sf2_arl_entry *ent, - struct switchdev_obj_port_fdb *fdb, - int (*cb)(struct switchdev_obj *obj)) -{ - if (!ent->is_valid) - return 0; - - if (port != ent->port) - return 0; - - ether_addr_copy(fdb->addr, ent->mac); - fdb->vid = ent->vid; - fdb->ndm_state = ent->is_static ? NUD_NOARP : NUD_REACHABLE; - - return cb(&fdb->obj); -} - -static int bcm_sf2_sw_fdb_dump(struct dsa_switch *ds, int port, - struct switchdev_obj_port_fdb *fdb, - int (*cb)(struct switchdev_obj *obj)) -{ - struct bcm_sf2_priv *priv = ds_to_priv(ds); - struct net_device *dev = ds->ports[port].netdev; - struct bcm_sf2_arl_entry results[2]; - unsigned int count = 0; - int ret; - - /* Start search operation */ - core_writel(priv, ARLA_SRCH_STDN, CORE_ARLA_SRCH_CTL); - - do { - ret = bcm_sf2_arl_search_wait(priv); - if (ret) - return ret; - - /* Read both entries, then return their values back */ - bcm_sf2_arl_search_rd(priv, 0, &results[0]); - ret = bcm_sf2_sw_fdb_copy(dev, port, &results[0], fdb, cb); - if (ret) - return ret; - - bcm_sf2_arl_search_rd(priv, 1, &results[1]); - ret = bcm_sf2_sw_fdb_copy(dev, port, &results[1], fdb, cb); - if (ret) - return ret; - - if (!results[0].is_valid && !results[1].is_valid) - break; - - } while (count++ < CORE_ARLA_NUM_ENTRIES); - - return 0; -} - static int bcm_sf2_sw_indir_rw(struct bcm_sf2_priv *priv, int op, int addr, int regnum, u16 val) { @@ -1036,12 +464,10 @@ static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv) static void bcm_sf2_intr_disable(struct bcm_sf2_priv *priv) { - intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET); + intrl2_0_mask_set(priv, 0xffffffff); intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); - intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); - intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET); + intrl2_1_mask_set(priv, 0xffffffff); intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); - intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); } static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv, @@ -1082,7 +508,7 @@ static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv, static int bcm_sf2_mdio_register(struct dsa_switch *ds) { - struct bcm_sf2_priv *priv = ds_to_priv(ds); + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); struct device_node *dn; static int index; int err; @@ -1146,14 +572,9 @@ static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv) of_node_put(priv->master_mii_dn); } -static int bcm_sf2_sw_set_addr(struct dsa_switch *ds, u8 *addr) -{ - return 0; -} - static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port) { - struct bcm_sf2_priv *priv = ds_to_priv(ds); + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); /* The BCM7xxx PHY driver expects to find the integrated PHY revision * in bits 15:8 and the patch level in bits 7:0 which is exactly what @@ -1166,7 +587,7 @@ static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port) static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port, struct phy_device *phydev) { - struct bcm_sf2_priv *priv = ds_to_priv(ds); + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); u32 id_mode_dis = 0, port_mode; const char *str = NULL; u32 reg; @@ -1246,7 +667,7 @@ force_link: static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port, struct fixed_phy_status *status) { - struct bcm_sf2_priv *priv = ds_to_priv(ds); + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); u32 duplex, pause; u32 reg; @@ -1298,7 +719,7 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port, static int bcm_sf2_sw_suspend(struct dsa_switch *ds) { - struct bcm_sf2_priv *priv = ds_to_priv(ds); + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); unsigned int port; bcm_sf2_intr_disable(priv); @@ -1318,7 +739,7 @@ static int bcm_sf2_sw_suspend(struct dsa_switch *ds) static int bcm_sf2_sw_resume(struct dsa_switch *ds) { - struct bcm_sf2_priv *priv = ds_to_priv(ds); + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); unsigned int port; int ret; @@ -1345,7 +766,7 @@ static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port, struct ethtool_wolinfo *wol) { struct net_device *p = ds->dst[ds->index].master_netdev; - struct bcm_sf2_priv *priv = ds_to_priv(ds); + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); struct ethtool_wolinfo pwol; /* Get the parent device WoL settings */ @@ -1368,7 +789,7 @@ static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port, struct ethtool_wolinfo *wol) { struct net_device *p = ds->dst[ds->index].master_netdev; - struct bcm_sf2_priv *priv = ds_to_priv(ds); + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); s8 cpu_port = ds->dst[ds->index].cpu_port; struct ethtool_wolinfo pwol; @@ -1393,43 +814,32 @@ static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port, return p->ethtool_ops->set_wol(p, wol); } -static void bcm_sf2_enable_vlan(struct bcm_sf2_priv *priv, bool enable) +static int bcm_sf2_vlan_op_wait(struct bcm_sf2_priv *priv) { - u32 mgmt, vc0, vc1, vc4, vc5; + unsigned int timeout = 10; + u32 reg; - mgmt = core_readl(priv, CORE_SWMODE); - vc0 = core_readl(priv, CORE_VLAN_CTRL0); - vc1 = core_readl(priv, CORE_VLAN_CTRL1); - vc4 = core_readl(priv, CORE_VLAN_CTRL4); - vc5 = core_readl(priv, CORE_VLAN_CTRL5); + do { + reg = core_readl(priv, CORE_ARLA_VTBL_RWCTRL); + if (!(reg & ARLA_VTBL_STDN)) + return 0; - mgmt &= ~SW_FWDG_MODE; + usleep_range(1000, 2000); + } while (timeout--); - if (enable) { - vc0 |= VLAN_EN | VLAN_LEARN_MODE_IVL; - vc1 |= EN_RSV_MCAST_UNTAG | EN_RSV_MCAST_FWDMAP; - vc4 &= ~(INGR_VID_CHK_MASK << INGR_VID_CHK_SHIFT); - vc4 |= INGR_VID_CHK_DROP; - vc5 |= DROP_VTABLE_MISS | EN_VID_FFF_FWD; - } else { - vc0 &= ~(VLAN_EN | VLAN_LEARN_MODE_IVL); - vc1 &= ~(EN_RSV_MCAST_UNTAG | EN_RSV_MCAST_FWDMAP); - vc4 &= ~(INGR_VID_CHK_MASK << INGR_VID_CHK_SHIFT); - vc5 &= ~(DROP_VTABLE_MISS | EN_VID_FFF_FWD); - vc4 |= INGR_VID_CHK_VID_VIOL_IMP; - } + return -ETIMEDOUT; +} + +static int bcm_sf2_vlan_op(struct bcm_sf2_priv *priv, u8 op) +{ + core_writel(priv, ARLA_VTBL_STDN | op, CORE_ARLA_VTBL_RWCTRL); - core_writel(priv, vc0, CORE_VLAN_CTRL0); - core_writel(priv, vc1, CORE_VLAN_CTRL1); - core_writel(priv, 0, CORE_VLAN_CTRL3); - core_writel(priv, vc4, CORE_VLAN_CTRL4); - core_writel(priv, vc5, CORE_VLAN_CTRL5); - core_writel(priv, mgmt, CORE_SWMODE); + return bcm_sf2_vlan_op_wait(priv); } static void bcm_sf2_sw_configure_vlan(struct dsa_switch *ds) { - struct bcm_sf2_priv *priv = ds_to_priv(ds); + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); unsigned int port; /* Clear all VLANs */ @@ -1443,162 +853,199 @@ static void bcm_sf2_sw_configure_vlan(struct dsa_switch *ds) } } -static int bcm_sf2_sw_vlan_filtering(struct dsa_switch *ds, int port, - bool vlan_filtering) +static int bcm_sf2_sw_setup(struct dsa_switch *ds) { + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); + unsigned int port; + + /* Enable all valid ports and disable those unused */ + for (port = 0; port < priv->hw_params.num_ports; port++) { + /* IMP port receives special treatment */ + if ((1 << port) & ds->enabled_port_mask) + bcm_sf2_port_setup(ds, port, NULL); + else if (dsa_is_cpu_port(ds, port)) + bcm_sf2_imp_setup(ds, port); + else + bcm_sf2_port_disable(ds, port, NULL); + } + + bcm_sf2_sw_configure_vlan(ds); + return 0; } -static int bcm_sf2_sw_vlan_prepare(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_vlan *vlan, - struct switchdev_trans *trans) +/* The SWITCH_CORE register space is managed by b53 but operates on a page + + * register basis so we need to translate that into an address that the + * bus-glue understands. + */ +#define SF2_PAGE_REG_MKADDR(page, reg) ((page) << 10 | (reg) << 2) + +static int bcm_sf2_core_read8(struct b53_device *dev, u8 page, u8 reg, + u8 *val) { - struct bcm_sf2_priv *priv = ds_to_priv(ds); + struct bcm_sf2_priv *priv = dev->priv; - bcm_sf2_enable_vlan(priv, true); + *val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg)); return 0; } -static void bcm_sf2_sw_vlan_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_vlan *vlan, - struct switchdev_trans *trans) +static int bcm_sf2_core_read16(struct b53_device *dev, u8 page, u8 reg, + u16 *val) { - struct bcm_sf2_priv *priv = ds_to_priv(ds); - bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; - bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; - s8 cpu_port = ds->dst->cpu_port; - struct bcm_sf2_vlan *vl; - u16 vid; + struct bcm_sf2_priv *priv = dev->priv; - for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { - vl = &priv->vlans[vid]; + *val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg)); - bcm_sf2_get_vlan_entry(priv, vid, vl); + return 0; +} - vl->members |= BIT(port) | BIT(cpu_port); - if (untagged) - vl->untag |= BIT(port) | BIT(cpu_port); - else - vl->untag &= ~(BIT(port) | BIT(cpu_port)); +static int bcm_sf2_core_read32(struct b53_device *dev, u8 page, u8 reg, + u32 *val) +{ + struct bcm_sf2_priv *priv = dev->priv; - bcm_sf2_set_vlan_entry(priv, vid, vl); - bcm_sf2_sw_fast_age_vlan(priv, vid); - } + *val = core_readl(priv, SF2_PAGE_REG_MKADDR(page, reg)); - if (pvid) { - core_writel(priv, vlan->vid_end, CORE_DEFAULT_1Q_TAG_P(port)); - core_writel(priv, vlan->vid_end, - CORE_DEFAULT_1Q_TAG_P(cpu_port)); - bcm_sf2_sw_fast_age_vlan(priv, vid); - } + return 0; } -static int bcm_sf2_sw_vlan_del(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_vlan *vlan) +static int bcm_sf2_core_read64(struct b53_device *dev, u8 page, u8 reg, + u64 *val) { - struct bcm_sf2_priv *priv = ds_to_priv(ds); - bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; - s8 cpu_port = ds->dst->cpu_port; - struct bcm_sf2_vlan *vl; - u16 vid, pvid; - int ret; + struct bcm_sf2_priv *priv = dev->priv; - pvid = core_readl(priv, CORE_DEFAULT_1Q_TAG_P(port)); + *val = core_readq(priv, SF2_PAGE_REG_MKADDR(page, reg)); - for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { - vl = &priv->vlans[vid]; - - ret = bcm_sf2_get_vlan_entry(priv, vid, vl); - if (ret) - return ret; - - vl->members &= ~BIT(port); - if ((vl->members & BIT(cpu_port)) == BIT(cpu_port)) - vl->members = 0; - if (pvid == vid) - pvid = 0; - if (untagged) { - vl->untag &= ~BIT(port); - if ((vl->untag & BIT(port)) == BIT(cpu_port)) - vl->untag = 0; - } + return 0; +} - bcm_sf2_set_vlan_entry(priv, vid, vl); - bcm_sf2_sw_fast_age_vlan(priv, vid); - } +static int bcm_sf2_core_write8(struct b53_device *dev, u8 page, u8 reg, + u8 value) +{ + struct bcm_sf2_priv *priv = dev->priv; - core_writel(priv, pvid, CORE_DEFAULT_1Q_TAG_P(port)); - core_writel(priv, pvid, CORE_DEFAULT_1Q_TAG_P(cpu_port)); - bcm_sf2_sw_fast_age_vlan(priv, vid); + core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg)); return 0; } -static int bcm_sf2_sw_vlan_dump(struct dsa_switch *ds, int port, - struct switchdev_obj_port_vlan *vlan, - int (*cb)(struct switchdev_obj *obj)) +static int bcm_sf2_core_write16(struct b53_device *dev, u8 page, u8 reg, + u16 value) { - struct bcm_sf2_priv *priv = ds_to_priv(ds); - struct bcm_sf2_port_status *p = &priv->port_sts[port]; - struct bcm_sf2_vlan *vl; - u16 vid, pvid; - int err = 0; + struct bcm_sf2_priv *priv = dev->priv; - pvid = core_readl(priv, CORE_DEFAULT_1Q_TAG_P(port)); + core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg)); - for (vid = 0; vid < VLAN_N_VID; vid++) { - vl = &priv->vlans[vid]; + return 0; +} - if (!(vl->members & BIT(port))) - continue; +static int bcm_sf2_core_write32(struct b53_device *dev, u8 page, u8 reg, + u32 value) +{ + struct bcm_sf2_priv *priv = dev->priv; - vlan->vid_begin = vlan->vid_end = vid; - vlan->flags = 0; + core_writel(priv, value, SF2_PAGE_REG_MKADDR(page, reg)); - if (vl->untag & BIT(port)) - vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED; - if (p->pvid == vid) - vlan->flags |= BRIDGE_VLAN_INFO_PVID; + return 0; +} - err = cb(&vlan->obj); - if (err) - break; - } +static int bcm_sf2_core_write64(struct b53_device *dev, u8 page, u8 reg, + u64 value) +{ + struct bcm_sf2_priv *priv = dev->priv; - return err; + core_writeq(priv, value, SF2_PAGE_REG_MKADDR(page, reg)); + + return 0; } -static int bcm_sf2_sw_setup(struct dsa_switch *ds) +struct b53_io_ops bcm_sf2_io_ops = { + .read8 = bcm_sf2_core_read8, + .read16 = bcm_sf2_core_read16, + .read32 = bcm_sf2_core_read32, + .read48 = bcm_sf2_core_read64, + .read64 = bcm_sf2_core_read64, + .write8 = bcm_sf2_core_write8, + .write16 = bcm_sf2_core_write16, + .write32 = bcm_sf2_core_write32, + .write48 = bcm_sf2_core_write64, + .write64 = bcm_sf2_core_write64, +}; + +static int bcm_sf2_sw_probe(struct platform_device *pdev) { const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME; - struct bcm_sf2_priv *priv = ds_to_priv(ds); - struct device_node *dn; + struct device_node *dn = pdev->dev.of_node; + struct b53_platform_data *pdata; + struct bcm_sf2_priv *priv; + struct b53_device *dev; + struct dsa_switch *ds; void __iomem **base; - unsigned int port; + struct resource *r; unsigned int i; u32 reg, rev; int ret; + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + dev = b53_switch_alloc(&pdev->dev, &bcm_sf2_io_ops, priv); + if (!dev) + return -ENOMEM; + + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return -ENOMEM; + + /* Auto-detection using standard registers will not work, so + * provide an indication of what kind of device we are for + * b53_common to work with + */ + pdata->chip_id = BCM7445_DEVICE_ID; + dev->pdata = pdata; + + priv->dev = dev; + ds = dev->ds; + + /* Override the parts that are non-standard wrt. normal b53 devices */ + ds->ops->get_tag_protocol = bcm_sf2_sw_get_tag_protocol; + ds->ops->setup = bcm_sf2_sw_setup; + ds->ops->get_phy_flags = bcm_sf2_sw_get_phy_flags; + ds->ops->adjust_link = bcm_sf2_sw_adjust_link; + ds->ops->fixed_link_update = bcm_sf2_sw_fixed_link_update; + ds->ops->suspend = bcm_sf2_sw_suspend; + ds->ops->resume = bcm_sf2_sw_resume; + ds->ops->get_wol = bcm_sf2_sw_get_wol; + ds->ops->set_wol = bcm_sf2_sw_set_wol; + ds->ops->port_enable = bcm_sf2_port_setup; + ds->ops->port_disable = bcm_sf2_port_disable; + ds->ops->get_eee = bcm_sf2_sw_get_eee; + ds->ops->set_eee = bcm_sf2_sw_set_eee; + + /* Avoid having DSA free our slave MDIO bus (checking for + * ds->slave_mii_bus and ds->ops->phy_read being non-NULL) + */ + ds->ops->phy_read = NULL; + + dev_set_drvdata(&pdev->dev, priv); + spin_lock_init(&priv->indir_lock); mutex_init(&priv->stats_mutex); - /* All the interesting properties are at the parent device_node - * level - */ - dn = ds->cd->of_node->parent; - bcm_sf2_identify_ports(priv, ds->cd->of_node); + bcm_sf2_identify_ports(priv, dn->child); priv->irq0 = irq_of_parse_and_map(dn, 0); priv->irq1 = irq_of_parse_and_map(dn, 1); base = &priv->core; for (i = 0; i < BCM_SF2_REGS_NUM; i++) { - *base = of_iomap(dn, i); - if (*base == NULL) { + r = platform_get_resource(pdev, IORESOURCE_MEM, i); + *base = devm_ioremap_resource(&pdev->dev, r); + if (IS_ERR(*base)) { pr_err("unable to find register: %s\n", reg_names[i]); - ret = -ENOMEM; - goto out_unmap; + return PTR_ERR(*base); } base++; } @@ -1606,30 +1053,30 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds) ret = bcm_sf2_sw_rst(priv); if (ret) { pr_err("unable to software reset switch: %d\n", ret); - goto out_unmap; + return ret; } ret = bcm_sf2_mdio_register(ds); if (ret) { pr_err("failed to register MDIO bus\n"); - goto out_unmap; + return ret; } /* Disable all interrupts and request them */ bcm_sf2_intr_disable(priv); - ret = request_irq(priv->irq0, bcm_sf2_switch_0_isr, 0, - "switch_0", priv); + ret = devm_request_irq(&pdev->dev, priv->irq0, bcm_sf2_switch_0_isr, 0, + "switch_0", priv); if (ret < 0) { pr_err("failed to request switch_0 IRQ\n"); goto out_mdio; } - ret = request_irq(priv->irq1, bcm_sf2_switch_1_isr, 0, - "switch_1", priv); + ret = devm_request_irq(&pdev->dev, priv->irq1, bcm_sf2_switch_1_isr, 0, + "switch_1", priv); if (ret < 0) { pr_err("failed to request switch_1 IRQ\n"); - goto out_free_irq0; + goto out_mdio; } /* Reset the MIB counters */ @@ -1649,19 +1096,6 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds) &priv->hw_params.num_gphy)) priv->hw_params.num_gphy = 1; - /* Enable all valid ports and disable those unused */ - for (port = 0; port < priv->hw_params.num_ports; port++) { - /* IMP port receives special treatment */ - if ((1 << port) & ds->enabled_port_mask) - bcm_sf2_port_setup(ds, port, NULL); - else if (dsa_is_cpu_port(ds, port)) - bcm_sf2_imp_setup(ds, port); - else - bcm_sf2_port_disable(ds, port, NULL); - } - - bcm_sf2_sw_configure_vlan(ds); - rev = reg_readl(priv, REG_SWITCH_REVISION); priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) & SWITCH_TOP_REV_MASK; @@ -1670,6 +1104,10 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds) rev = reg_readl(priv, REG_PHY_REVISION); priv->hw_params.gphy_rev = rev & PHY_REVISION_MASK; + ret = b53_switch_register(dev); + if (ret) + goto out_mdio; + pr_info("Starfighter 2 top: %x.%02x, core: %x.%02x base: 0x%p, IRQs: %d, %d\n", priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff, priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff, @@ -1677,66 +1115,60 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds) return 0; -out_free_irq0: - free_irq(priv->irq0, priv); out_mdio: bcm_sf2_mdio_unregister(priv); -out_unmap: - base = &priv->core; - for (i = 0; i < BCM_SF2_REGS_NUM; i++) { - if (*base) - iounmap(*base); - base++; - } return ret; } -static struct dsa_switch_driver bcm_sf2_switch_driver = { - .tag_protocol = DSA_TAG_PROTO_BRCM, - .probe = bcm_sf2_sw_drv_probe, - .setup = bcm_sf2_sw_setup, - .set_addr = bcm_sf2_sw_set_addr, - .get_phy_flags = bcm_sf2_sw_get_phy_flags, - .get_strings = bcm_sf2_sw_get_strings, - .get_ethtool_stats = bcm_sf2_sw_get_ethtool_stats, - .get_sset_count = bcm_sf2_sw_get_sset_count, - .adjust_link = bcm_sf2_sw_adjust_link, - .fixed_link_update = bcm_sf2_sw_fixed_link_update, - .suspend = bcm_sf2_sw_suspend, - .resume = bcm_sf2_sw_resume, - .get_wol = bcm_sf2_sw_get_wol, - .set_wol = bcm_sf2_sw_set_wol, - .port_enable = bcm_sf2_port_setup, - .port_disable = bcm_sf2_port_disable, - .get_eee = bcm_sf2_sw_get_eee, - .set_eee = bcm_sf2_sw_set_eee, - .port_bridge_join = bcm_sf2_sw_br_join, - .port_bridge_leave = bcm_sf2_sw_br_leave, - .port_stp_state_set = bcm_sf2_sw_br_set_stp_state, - .port_fdb_prepare = bcm_sf2_sw_fdb_prepare, - .port_fdb_add = bcm_sf2_sw_fdb_add, - .port_fdb_del = bcm_sf2_sw_fdb_del, - .port_fdb_dump = bcm_sf2_sw_fdb_dump, - .port_vlan_filtering = bcm_sf2_sw_vlan_filtering, - .port_vlan_prepare = bcm_sf2_sw_vlan_prepare, - .port_vlan_add = bcm_sf2_sw_vlan_add, - .port_vlan_del = bcm_sf2_sw_vlan_del, - .port_vlan_dump = bcm_sf2_sw_vlan_dump, -}; - -static int __init bcm_sf2_init(void) +static int bcm_sf2_sw_remove(struct platform_device *pdev) { - register_switch_driver(&bcm_sf2_switch_driver); + struct bcm_sf2_priv *priv = platform_get_drvdata(pdev); + + /* Disable all ports and interrupts */ + priv->wol_ports_mask = 0; + bcm_sf2_sw_suspend(priv->dev->ds); + dsa_unregister_switch(priv->dev->ds); + bcm_sf2_mdio_unregister(priv); return 0; } -module_init(bcm_sf2_init); -static void __exit bcm_sf2_exit(void) +#ifdef CONFIG_PM_SLEEP +static int bcm_sf2_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct bcm_sf2_priv *priv = platform_get_drvdata(pdev); + + return dsa_switch_suspend(priv->dev->ds); +} + +static int bcm_sf2_resume(struct device *dev) { - unregister_switch_driver(&bcm_sf2_switch_driver); + struct platform_device *pdev = to_platform_device(dev); + struct bcm_sf2_priv *priv = platform_get_drvdata(pdev); + + return dsa_switch_resume(priv->dev->ds); } -module_exit(bcm_sf2_exit); +#endif /* CONFIG_PM_SLEEP */ + +static SIMPLE_DEV_PM_OPS(bcm_sf2_pm_ops, + bcm_sf2_suspend, bcm_sf2_resume); + +static const struct of_device_id bcm_sf2_of_match[] = { + { .compatible = "brcm,bcm7445-switch-v4.0" }, + { /* sentinel */ }, +}; + +static struct platform_driver bcm_sf2_driver = { + .probe = bcm_sf2_sw_probe, + .remove = bcm_sf2_sw_remove, + .driver = { + .name = "brcm-sf2", + .of_match_table = bcm_sf2_of_match, + .pm = &bcm_sf2_pm_ops, + }, +}; +module_platform_driver(bcm_sf2_driver); MODULE_AUTHOR("Broadcom Corporation"); MODULE_DESCRIPTION("Driver for Broadcom Starfighter 2 ethernet switch chip"); diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h index dd446e466699..afe56686b3d7 100644 --- a/drivers/net/dsa/bcm_sf2.h +++ b/drivers/net/dsa/bcm_sf2.h @@ -26,6 +26,7 @@ #include <net/dsa.h> #include "bcm_sf2_regs.h" +#include "b53/b53_priv.h" struct bcm_sf2_hw_params { u16 top_rev; @@ -50,71 +51,9 @@ struct bcm_sf2_port_status { struct ethtool_eee eee; - u32 vlan_ctl_mask; - u16 pvid; - - struct net_device *bridge_dev; -}; - -struct bcm_sf2_arl_entry { - u8 port; - u8 mac[ETH_ALEN]; - u16 vid; - u8 is_valid:1; - u8 is_age:1; - u8 is_static:1; + u16 vlan_ctl_mask; }; -struct bcm_sf2_vlan { - u16 members; - u16 untag; -}; - -static inline void bcm_sf2_mac_from_u64(u64 src, u8 *dst) -{ - unsigned int i; - - for (i = 0; i < ETH_ALEN; i++) - dst[ETH_ALEN - 1 - i] = (src >> (8 * i)) & 0xff; -} - -static inline u64 bcm_sf2_mac_to_u64(const u8 *src) -{ - unsigned int i; - u64 dst = 0; - - for (i = 0; i < ETH_ALEN; i++) - dst |= (u64)src[ETH_ALEN - 1 - i] << (8 * i); - - return dst; -} - -static inline void bcm_sf2_arl_to_entry(struct bcm_sf2_arl_entry *ent, - u64 mac_vid, u32 fwd_entry) -{ - memset(ent, 0, sizeof(*ent)); - ent->port = fwd_entry & PORTID_MASK; - ent->is_valid = !!(fwd_entry & ARL_VALID); - ent->is_age = !!(fwd_entry & ARL_AGE); - ent->is_static = !!(fwd_entry & ARL_STATIC); - bcm_sf2_mac_from_u64(mac_vid, ent->mac); - ent->vid = mac_vid >> VID_SHIFT; -} - -static inline void bcm_sf2_arl_from_entry(u64 *mac_vid, u32 *fwd_entry, - const struct bcm_sf2_arl_entry *ent) -{ - *mac_vid = bcm_sf2_mac_to_u64(ent->mac); - *mac_vid |= (u64)(ent->vid & VID_MASK) << VID_SHIFT; - *fwd_entry = ent->port & PORTID_MASK; - if (ent->is_valid) - *fwd_entry |= ARL_VALID; - if (ent->is_static) - *fwd_entry |= ARL_STATIC; - if (ent->is_age) - *fwd_entry |= ARL_AGE; -} - struct bcm_sf2_priv { /* Base registers, keep those in order with BCM_SF2_REGS_NAME */ void __iomem *core; @@ -134,6 +73,9 @@ struct bcm_sf2_priv { u32 irq1_stat; u32 irq1_mask; + /* Backing b53_device */ + struct b53_device *dev; + /* Mutex protecting access to the MIB counters */ struct mutex stats_mutex; @@ -155,16 +97,14 @@ struct bcm_sf2_priv { struct device_node *master_mii_dn; struct mii_bus *slave_mii_bus; struct mii_bus *master_mii_bus; - - /* Cache of programmed VLANs */ - struct bcm_sf2_vlan vlans[VLAN_N_VID]; }; -struct bcm_sf2_hw_stats { - const char *string; - u16 reg; - u8 sizeof_stat; -}; +static inline struct bcm_sf2_priv *bcm_sf2_to_priv(struct dsa_switch *ds) +{ + struct b53_device *dev = ds_to_priv(ds); + + return dev->priv; +} #define SF2_IO_MACRO(name) \ static inline u32 name##_readl(struct bcm_sf2_priv *priv, u32 off) \ diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h index 9f2a9cb42074..838fe373cd6f 100644 --- a/drivers/net/dsa/bcm_sf2_regs.h +++ b/drivers/net/dsa/bcm_sf2_regs.h @@ -115,14 +115,6 @@ #define RX_BCST_EN (1 << 2) #define RX_MCST_EN (1 << 3) #define RX_UCST_EN (1 << 4) -#define G_MISTP_STATE_SHIFT 5 -#define G_MISTP_NO_STP (0 << G_MISTP_STATE_SHIFT) -#define G_MISTP_DIS_STATE (1 << G_MISTP_STATE_SHIFT) -#define G_MISTP_BLOCK_STATE (2 << G_MISTP_STATE_SHIFT) -#define G_MISTP_LISTEN_STATE (3 << G_MISTP_STATE_SHIFT) -#define G_MISTP_LEARN_STATE (4 << G_MISTP_STATE_SHIFT) -#define G_MISTP_FWD_STATE (5 << G_MISTP_STATE_SHIFT) -#define G_MISTP_STATE_MASK 0x7 #define CORE_SWMODE 0x0002c #define SW_FWDG_MODE (1 << 0) @@ -205,75 +197,11 @@ #define BRCM_HDR_EN_P5 (1 << 1) #define BRCM_HDR_EN_P7 (1 << 2) -#define CORE_BRCM_HDR_CTRL2 0x0828 - -#define CORE_HL_PRTC_CTRL 0x0940 -#define ARP_EN (1 << 0) -#define RARP_EN (1 << 1) -#define DHCP_EN (1 << 2) -#define ICMPV4_EN (1 << 3) -#define ICMPV6_EN (1 << 4) -#define ICMPV6_FWD_MODE (1 << 5) -#define IGMP_DIP_EN (1 << 8) -#define IGMP_RPTLVE_EN (1 << 9) -#define IGMP_RTPLVE_FWD_MODE (1 << 10) -#define IGMP_QRY_EN (1 << 11) -#define IGMP_QRY_FWD_MODE (1 << 12) -#define IGMP_UKN_EN (1 << 13) -#define IGMP_UKN_FWD_MODE (1 << 14) -#define MLD_RPTDONE_EN (1 << 15) -#define MLD_RPTDONE_FWD_MODE (1 << 16) -#define MLD_QRY_EN (1 << 17) -#define MLD_QRY_FWD_MODE (1 << 18) - #define CORE_RST_MIB_CNT_EN 0x0950 #define CORE_BRCM_HDR_RX_DIS 0x0980 #define CORE_BRCM_HDR_TX_DIS 0x0988 -#define CORE_ARLA_NUM_ENTRIES 1024 - -#define CORE_ARLA_RWCTL 0x1400 -#define ARL_RW (1 << 0) -#define IVL_SVL_SELECT (1 << 6) -#define ARL_STRTDN (1 << 7) - -#define CORE_ARLA_MAC 0x1408 -#define CORE_ARLA_VID 0x1420 -#define ARLA_VIDTAB_INDX_MASK 0x1fff - -#define CORE_ARLA_MACVID0 0x1440 -#define MAC_MASK 0xffffffffff -#define VID_SHIFT 48 -#define VID_MASK 0xfff - -#define CORE_ARLA_FWD_ENTRY0 0x1460 -#define PORTID_MASK 0x1ff -#define ARL_CON_SHIFT 9 -#define ARL_CON_MASK 0x3 -#define ARL_PRI_SHIFT 11 -#define ARL_PRI_MASK 0x7 -#define ARL_AGE (1 << 14) -#define ARL_STATIC (1 << 15) -#define ARL_VALID (1 << 16) - -#define CORE_ARLA_MACVID_ENTRY(x) (CORE_ARLA_MACVID0 + ((x) * 0x40)) -#define CORE_ARLA_FWD_ENTRY(x) (CORE_ARLA_FWD_ENTRY0 + ((x) * 0x40)) - -#define CORE_ARLA_SRCH_CTL 0x1540 -#define ARLA_SRCH_VLID (1 << 0) -#define IVL_SVL_SELECT (1 << 6) -#define ARLA_SRCH_STDN (1 << 7) - -#define CORE_ARLA_SRCH_ADR 0x1544 -#define ARLA_SRCH_ADR_VALID (1 << 15) - -#define CORE_ARLA_SRCH_RSLT_0_MACVID 0x1580 -#define CORE_ARLA_SRCH_RSLT_0 0x15a0 - -#define CORE_ARLA_SRCH_RSLT_MACVID(x) (CORE_ARLA_SRCH_RSLT_0_MACVID + ((x) * 0x40)) -#define CORE_ARLA_SRCH_RSLT(x) (CORE_ARLA_SRCH_RSLT_0 + ((x) * 0x40)) - #define CORE_ARLA_VTBL_RWCTRL 0x1600 #define ARLA_VTBL_CMD_WRITE 0 #define ARLA_VTBL_CMD_READ 1 @@ -297,59 +225,9 @@ #define P_TXQ_PSM_VDD(x) (P_TXQ_PSM_VDD_MASK << \ ((x) * P_TXQ_PSM_VDD_SHIFT)) -#define CORE_P0_MIB_OFFSET 0x8000 -#define P_MIB_SIZE 0x400 -#define CORE_P_MIB_OFFSET(x) (CORE_P0_MIB_OFFSET + (x) * P_MIB_SIZE) - #define CORE_PORT_VLAN_CTL_PORT(x) (0xc400 + ((x) * 0x8)) #define PORT_VLAN_CTRL_MASK 0x1ff -#define CORE_VLAN_CTRL0 0xd000 -#define CHANGE_1P_VID_INNER (1 << 0) -#define CHANGE_1P_VID_OUTER (1 << 1) -#define CHANGE_1Q_VID (1 << 3) -#define VLAN_LEARN_MODE_SVL (0 << 5) -#define VLAN_LEARN_MODE_IVL (3 << 5) -#define VLAN_EN (1 << 7) - -#define CORE_VLAN_CTRL1 0xd004 -#define EN_RSV_MCAST_FWDMAP (1 << 2) -#define EN_RSV_MCAST_UNTAG (1 << 3) -#define EN_IPMC_BYPASS_FWDMAP (1 << 5) -#define EN_IPMC_BYPASS_UNTAG (1 << 6) - -#define CORE_VLAN_CTRL2 0xd008 -#define EN_MIIM_BYPASS_V_FWDMAP (1 << 2) -#define EN_GMRP_GVRP_V_FWDMAP (1 << 5) -#define EN_GMRP_GVRP_UNTAG_MAP (1 << 6) - -#define CORE_VLAN_CTRL3 0xd00c -#define EN_DROP_NON1Q_MASK 0x1ff - -#define CORE_VLAN_CTRL4 0xd014 -#define RESV_MCAST_FLOOD (1 << 1) -#define EN_DOUBLE_TAG_MASK 0x3 -#define EN_DOUBLE_TAG_SHIFT 2 -#define EN_MGE_REV_GMRP (1 << 4) -#define EN_MGE_REV_GVRP (1 << 5) -#define INGR_VID_CHK_SHIFT 6 -#define INGR_VID_CHK_MASK 0x3 -#define INGR_VID_CHK_FWD (0 << INGR_VID_CHK_SHIFT) -#define INGR_VID_CHK_DROP (1 << INGR_VID_CHK_SHIFT) -#define INGR_VID_CHK_NO_CHK (2 << INGR_VID_CHK_SHIFT) -#define INGR_VID_CHK_VID_VIOL_IMP (3 << INGR_VID_CHK_SHIFT) - -#define CORE_VLAN_CTRL5 0xd018 -#define EN_CPU_RX_BYP_INNER_CRCCHCK (1 << 0) -#define EN_VID_FFF_FWD (1 << 2) -#define DROP_VTABLE_MISS (1 << 3) -#define EGRESS_DIR_FRM_BYP_TRUNK_EN (1 << 4) -#define PRESV_NON1Q (1 << 6) - -#define CORE_VLAN_CTRL6 0xd01c -#define STRICT_SFD_DETECT (1 << 0) -#define DIS_ARL_BUST_LMIT (1 << 4) - #define CORE_DEFAULT_1Q_TAG_P(x) (0xd040 + ((x) * 8)) #define CFI_SHIFT 12 #define PRI_SHIFT 13 diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c index e36b40886bd8..7ff9d373a9ee 100644 --- a/drivers/net/dsa/mv88e6060.c +++ b/drivers/net/dsa/mv88e6060.c @@ -69,6 +69,11 @@ static const char *mv88e6060_get_name(struct mii_bus *bus, int sw_addr) return NULL; } +static enum dsa_tag_protocol mv88e6060_get_tag_protocol(struct dsa_switch *ds) +{ + return DSA_TAG_PROTO_TRAILER; +} + static const char *mv88e6060_drv_probe(struct device *dsa_dev, struct device *host_dev, int sw_addr, void **_priv) @@ -247,8 +252,8 @@ mv88e6060_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val) return reg_write(ds, addr, regnum, val); } -static struct dsa_switch_driver mv88e6060_switch_driver = { - .tag_protocol = DSA_TAG_PROTO_TRAILER, +static struct dsa_switch_ops mv88e6060_switch_ops = { + .get_tag_protocol = mv88e6060_get_tag_protocol, .probe = mv88e6060_drv_probe, .setup = mv88e6060_setup, .set_addr = mv88e6060_set_addr, @@ -258,14 +263,14 @@ static struct dsa_switch_driver mv88e6060_switch_driver = { static int __init mv88e6060_init(void) { - register_switch_driver(&mv88e6060_switch_driver); + register_switch_driver(&mv88e6060_switch_ops); return 0; } module_init(mv88e6060_init); static void __exit mv88e6060_cleanup(void) { - unregister_switch_driver(&mv88e6060_switch_driver); + unregister_switch_driver(&mv88e6060_switch_ops); } module_exit(mv88e6060_cleanup); diff --git a/drivers/net/dsa/mv88e6xxx/Kconfig b/drivers/net/dsa/mv88e6xxx/Kconfig index 490bc06f993e..ac77737bbd87 100644 --- a/drivers/net/dsa/mv88e6xxx/Kconfig +++ b/drivers/net/dsa/mv88e6xxx/Kconfig @@ -2,6 +2,7 @@ config NET_DSA_MV88E6XXX tristate "Marvell 88E6xxx Ethernet switch fabric support" depends on NET_DSA select NET_DSA_TAG_EDSA + select NET_DSA_TAG_DSA help This driver adds support for most of the Marvell 88E6xxx models of Ethernet switch chips, except 88E6060. diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 710679067594..4e697eea6e0f 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -216,25 +216,130 @@ static int mv88e6xxx_write(struct mv88e6xxx_chip *chip, return 0; } -/* Indirect write to single pointer-data register with an Update bit */ -static int mv88e6xxx_update(struct mv88e6xxx_chip *chip, int addr, int reg, - u16 update) +static int mv88e6xxx_phy_read(struct mv88e6xxx_chip *chip, int phy, + int reg, u16 *val) { - u16 val; - int i, err; + int addr = phy; /* PHY devices addresses start at 0x0 */ + + if (!chip->phy_ops) + return -EOPNOTSUPP; + + return chip->phy_ops->read(chip, addr, reg, val); +} + +static int mv88e6xxx_phy_write(struct mv88e6xxx_chip *chip, int phy, + int reg, u16 val) +{ + int addr = phy; /* PHY devices addresses start at 0x0 */ + + if (!chip->phy_ops) + return -EOPNOTSUPP; + + return chip->phy_ops->write(chip, addr, reg, val); +} + +static int mv88e6xxx_phy_page_get(struct mv88e6xxx_chip *chip, int phy, u8 page) +{ + if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_PHY_PAGE)) + return -EOPNOTSUPP; + + return mv88e6xxx_phy_write(chip, phy, PHY_PAGE, page); +} + +static void mv88e6xxx_phy_page_put(struct mv88e6xxx_chip *chip, int phy) +{ + int err; + + /* Restore PHY page Copper 0x0 for access via the registered MDIO bus */ + err = mv88e6xxx_phy_write(chip, phy, PHY_PAGE, PHY_PAGE_COPPER); + if (unlikely(err)) { + dev_err(chip->dev, "failed to restore PHY %d page Copper (%d)\n", + phy, err); + } +} + +static int mv88e6xxx_phy_page_read(struct mv88e6xxx_chip *chip, int phy, + u8 page, int reg, u16 *val) +{ + int err; + + /* There is no paging for registers 22 */ + if (reg == PHY_PAGE) + return -EINVAL; + + err = mv88e6xxx_phy_page_get(chip, phy, page); + if (!err) { + err = mv88e6xxx_phy_read(chip, phy, reg, val); + mv88e6xxx_phy_page_put(chip, phy); + } + + return err; +} + +static int mv88e6xxx_phy_page_write(struct mv88e6xxx_chip *chip, int phy, + u8 page, int reg, u16 val) +{ + int err; + + /* There is no paging for registers 22 */ + if (reg == PHY_PAGE) + return -EINVAL; + + err = mv88e6xxx_phy_page_get(chip, phy, page); + if (!err) { + err = mv88e6xxx_phy_write(chip, phy, PHY_PAGE, page); + mv88e6xxx_phy_page_put(chip, phy); + } + + return err; +} + +static int mv88e6xxx_serdes_read(struct mv88e6xxx_chip *chip, int reg, u16 *val) +{ + return mv88e6xxx_phy_page_read(chip, ADDR_SERDES, SERDES_PAGE_FIBER, + reg, val); +} + +static int mv88e6xxx_serdes_write(struct mv88e6xxx_chip *chip, int reg, u16 val) +{ + return mv88e6xxx_phy_page_write(chip, ADDR_SERDES, SERDES_PAGE_FIBER, + reg, val); +} + +static int mv88e6xxx_wait(struct mv88e6xxx_chip *chip, int addr, int reg, + u16 mask) +{ + int i; + + for (i = 0; i < 16; i++) { + u16 val; + int err; - /* Wait until the previous operation is completed */ - for (i = 0; i < 16; ++i) { err = mv88e6xxx_read(chip, addr, reg, &val); if (err) return err; - if (!(val & BIT(15))) - break; + if (!(val & mask)) + return 0; + + usleep_range(1000, 2000); } - if (i == 16) - return -ETIMEDOUT; + dev_err(chip->dev, "Timeout while waiting for switch\n"); + return -ETIMEDOUT; +} + +/* Indirect write to single pointer-data register with an Update bit */ +static int mv88e6xxx_update(struct mv88e6xxx_chip *chip, int addr, int reg, + u16 update) +{ + u16 val; + int err; + + /* Wait until the previous operation is completed */ + err = mv88e6xxx_wait(chip, addr, reg, BIT(15)); + if (err) + return err; /* Set the Update bit to trigger a write operation */ val = BIT(15) | update; @@ -260,26 +365,10 @@ static int _mv88e6xxx_reg_write(struct mv88e6xxx_chip *chip, int addr, return mv88e6xxx_write(chip, addr, reg, val); } -static int mv88e6xxx_mdio_read_direct(struct mv88e6xxx_chip *chip, - int addr, int regnum) -{ - if (addr >= 0) - return _mv88e6xxx_reg_read(chip, addr, regnum); - return 0xffff; -} - -static int mv88e6xxx_mdio_write_direct(struct mv88e6xxx_chip *chip, - int addr, int regnum, u16 val) -{ - if (addr >= 0) - return _mv88e6xxx_reg_write(chip, addr, regnum, val); - return 0; -} - static int mv88e6xxx_ppu_disable(struct mv88e6xxx_chip *chip) { int ret; - unsigned long timeout; + int i; ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_CONTROL); if (ret < 0) @@ -290,8 +379,7 @@ static int mv88e6xxx_ppu_disable(struct mv88e6xxx_chip *chip) if (ret) return ret; - timeout = jiffies + 1 * HZ; - while (time_before(jiffies, timeout)) { + for (i = 0; i < 16; i++) { ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_STATUS); if (ret < 0) return ret; @@ -307,8 +395,7 @@ static int mv88e6xxx_ppu_disable(struct mv88e6xxx_chip *chip) static int mv88e6xxx_ppu_enable(struct mv88e6xxx_chip *chip) { - int ret, err; - unsigned long timeout; + int ret, err, i; ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_CONTROL); if (ret < 0) @@ -319,8 +406,7 @@ static int mv88e6xxx_ppu_enable(struct mv88e6xxx_chip *chip) if (err) return err; - timeout = jiffies + 1 * HZ; - while (time_before(jiffies, timeout)) { + for (i = 0; i < 16; i++) { ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_STATUS); if (ret < 0) return ret; @@ -400,34 +486,44 @@ static void mv88e6xxx_ppu_state_init(struct mv88e6xxx_chip *chip) chip->ppu_timer.function = mv88e6xxx_ppu_reenable_timer; } -static int mv88e6xxx_mdio_read_ppu(struct mv88e6xxx_chip *chip, int addr, - int regnum) +static void mv88e6xxx_ppu_state_destroy(struct mv88e6xxx_chip *chip) { - int ret; + del_timer_sync(&chip->ppu_timer); +} - ret = mv88e6xxx_ppu_access_get(chip); - if (ret >= 0) { - ret = _mv88e6xxx_reg_read(chip, addr, regnum); +static int mv88e6xxx_phy_ppu_read(struct mv88e6xxx_chip *chip, int addr, + int reg, u16 *val) +{ + int err; + + err = mv88e6xxx_ppu_access_get(chip); + if (!err) { + err = mv88e6xxx_read(chip, addr, reg, val); mv88e6xxx_ppu_access_put(chip); } - return ret; + return err; } -static int mv88e6xxx_mdio_write_ppu(struct mv88e6xxx_chip *chip, int addr, - int regnum, u16 val) +static int mv88e6xxx_phy_ppu_write(struct mv88e6xxx_chip *chip, int addr, + int reg, u16 val) { - int ret; + int err; - ret = mv88e6xxx_ppu_access_get(chip); - if (ret >= 0) { - ret = _mv88e6xxx_reg_write(chip, addr, regnum, val); + err = mv88e6xxx_ppu_access_get(chip); + if (!err) { + err = mv88e6xxx_write(chip, addr, reg, val); mv88e6xxx_ppu_access_put(chip); } - return ret; + return err; } +static const struct mv88e6xxx_ops mv88e6xxx_phy_ppu_ops = { + .read = mv88e6xxx_phy_ppu_read, + .write = mv88e6xxx_phy_ppu_write, +}; + static bool mv88e6xxx_6065_family(struct mv88e6xxx_chip *chip) { return chip->info->family == MV88E6XXX_FAMILY_6065; @@ -819,130 +915,69 @@ static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port, mutex_unlock(&chip->reg_lock); } -static int _mv88e6xxx_wait(struct mv88e6xxx_chip *chip, int reg, int offset, - u16 mask) -{ - unsigned long timeout = jiffies + HZ / 10; - - while (time_before(jiffies, timeout)) { - int ret; - - ret = _mv88e6xxx_reg_read(chip, reg, offset); - if (ret < 0) - return ret; - if (!(ret & mask)) - return 0; - - usleep_range(1000, 2000); - } - return -ETIMEDOUT; -} - -static int mv88e6xxx_mdio_wait(struct mv88e6xxx_chip *chip) -{ - return _mv88e6xxx_wait(chip, REG_GLOBAL2, GLOBAL2_SMI_OP, - GLOBAL2_SMI_OP_BUSY); -} - static int _mv88e6xxx_atu_wait(struct mv88e6xxx_chip *chip) { - return _mv88e6xxx_wait(chip, REG_GLOBAL, GLOBAL_ATU_OP, - GLOBAL_ATU_OP_BUSY); -} - -static int mv88e6xxx_mdio_read_indirect(struct mv88e6xxx_chip *chip, - int addr, int regnum) -{ - int ret; - - ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL2, GLOBAL2_SMI_OP, - GLOBAL2_SMI_OP_22_READ | (addr << 5) | - regnum); - if (ret < 0) - return ret; - - ret = mv88e6xxx_mdio_wait(chip); - if (ret < 0) - return ret; - - ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL2, GLOBAL2_SMI_DATA); - - return ret; -} - -static int mv88e6xxx_mdio_write_indirect(struct mv88e6xxx_chip *chip, - int addr, int regnum, u16 val) -{ - int ret; - - ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL2, GLOBAL2_SMI_DATA, val); - if (ret < 0) - return ret; - - ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL2, GLOBAL2_SMI_OP, - GLOBAL2_SMI_OP_22_WRITE | (addr << 5) | - regnum); - - return mv88e6xxx_mdio_wait(chip); + return mv88e6xxx_wait(chip, REG_GLOBAL, GLOBAL_ATU_OP, + GLOBAL_ATU_OP_BUSY); } static int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e) { struct mv88e6xxx_chip *chip = ds_to_priv(ds); - int reg; + u16 reg; + int err; if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_EEE)) return -EOPNOTSUPP; mutex_lock(&chip->reg_lock); - reg = mv88e6xxx_mdio_read_indirect(chip, port, 16); - if (reg < 0) + err = mv88e6xxx_phy_read(chip, port, 16, ®); + if (err) goto out; e->eee_enabled = !!(reg & 0x0200); e->tx_lpi_enabled = !!(reg & 0x0100); - reg = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_STATUS); - if (reg < 0) + err = mv88e6xxx_read(chip, REG_PORT(port), PORT_STATUS, ®); + if (err) goto out; e->eee_active = !!(reg & PORT_STATUS_EEE); - reg = 0; - out: mutex_unlock(&chip->reg_lock); - return reg; + + return err; } static int mv88e6xxx_set_eee(struct dsa_switch *ds, int port, struct phy_device *phydev, struct ethtool_eee *e) { struct mv88e6xxx_chip *chip = ds_to_priv(ds); - int reg; - int ret; + u16 reg; + int err; if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_EEE)) return -EOPNOTSUPP; mutex_lock(&chip->reg_lock); - ret = mv88e6xxx_mdio_read_indirect(chip, port, 16); - if (ret < 0) + err = mv88e6xxx_phy_read(chip, port, 16, ®); + if (err) goto out; - reg = ret & ~0x0300; + reg &= ~0x0300; if (e->eee_enabled) reg |= 0x0200; if (e->tx_lpi_enabled) reg |= 0x0100; - ret = mv88e6xxx_mdio_write_indirect(chip, port, 16, reg); + err = mv88e6xxx_phy_write(chip, port, 16, reg); out: mutex_unlock(&chip->reg_lock); - return ret; + return err; } static int _mv88e6xxx_atu_cmd(struct mv88e6xxx_chip *chip, u16 fid, u16 cmd) @@ -1227,8 +1262,8 @@ static int _mv88e6xxx_port_pvid_set(struct mv88e6xxx_chip *chip, static int _mv88e6xxx_vtu_wait(struct mv88e6xxx_chip *chip) { - return _mv88e6xxx_wait(chip, REG_GLOBAL, GLOBAL_VTU_OP, - GLOBAL_VTU_OP_BUSY); + return mv88e6xxx_wait(chip, REG_GLOBAL, GLOBAL_VTU_OP, + GLOBAL_VTU_OP_BUSY); } static int _mv88e6xxx_vtu_cmd(struct mv88e6xxx_chip *chip, u16 op) @@ -2302,38 +2337,6 @@ static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port) mutex_unlock(&chip->reg_lock); } -static int _mv88e6xxx_mdio_page_write(struct mv88e6xxx_chip *chip, - int port, int page, int reg, int val) -{ - int ret; - - ret = mv88e6xxx_mdio_write_indirect(chip, port, 0x16, page); - if (ret < 0) - goto restore_page_0; - - ret = mv88e6xxx_mdio_write_indirect(chip, port, reg, val); -restore_page_0: - mv88e6xxx_mdio_write_indirect(chip, port, 0x16, 0x0); - - return ret; -} - -static int _mv88e6xxx_mdio_page_read(struct mv88e6xxx_chip *chip, - int port, int page, int reg) -{ - int ret; - - ret = mv88e6xxx_mdio_write_indirect(chip, port, 0x16, page); - if (ret < 0) - goto restore_page_0; - - ret = mv88e6xxx_mdio_read_indirect(chip, port, reg); -restore_page_0: - mv88e6xxx_mdio_write_indirect(chip, port, 0x16, 0x0); - - return ret; -} - static int mv88e6xxx_switch_reset(struct mv88e6xxx_chip *chip) { bool ppu_active = mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU_ACTIVE); @@ -2396,23 +2399,22 @@ static int mv88e6xxx_switch_reset(struct mv88e6xxx_chip *chip) return ret; } -static int mv88e6xxx_power_on_serdes(struct mv88e6xxx_chip *chip) +static int mv88e6xxx_serdes_power_on(struct mv88e6xxx_chip *chip) { - int ret; + u16 val; + int err; - ret = _mv88e6xxx_mdio_page_read(chip, REG_FIBER_SERDES, - PAGE_FIBER_SERDES, MII_BMCR); - if (ret < 0) - return ret; + /* Clear Power Down bit */ + err = mv88e6xxx_serdes_read(chip, MII_BMCR, &val); + if (err) + return err; - if (ret & BMCR_PDOWN) { - ret &= ~BMCR_PDOWN; - ret = _mv88e6xxx_mdio_page_write(chip, REG_FIBER_SERDES, - PAGE_FIBER_SERDES, MII_BMCR, - ret); + if (val & BMCR_PDOWN) { + val &= ~BMCR_PDOWN; + err = mv88e6xxx_serdes_write(chip, MII_BMCR, val); } - return ret; + return err; } static int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, @@ -2486,28 +2488,13 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port) PORT_CONTROL_USE_TAG | PORT_CONTROL_USE_IP | PORT_CONTROL_STATE_FORWARDING; if (dsa_is_cpu_port(ds, port)) { - if (mv88e6xxx_6095_family(chip) || mv88e6xxx_6185_family(chip)) - reg |= PORT_CONTROL_DSA_TAG; - if (mv88e6xxx_6352_family(chip) || - mv88e6xxx_6351_family(chip) || - mv88e6xxx_6165_family(chip) || - mv88e6xxx_6097_family(chip) || - mv88e6xxx_6320_family(chip)) { + if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_EDSA)) reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA | - PORT_CONTROL_FORWARD_UNKNOWN | PORT_CONTROL_FORWARD_UNKNOWN_MC; - } - - if (mv88e6xxx_6352_family(chip) || - mv88e6xxx_6351_family(chip) || - mv88e6xxx_6165_family(chip) || - mv88e6xxx_6097_family(chip) || - mv88e6xxx_6095_family(chip) || - mv88e6xxx_6065_family(chip) || - mv88e6xxx_6185_family(chip) || - mv88e6xxx_6320_family(chip)) { - reg |= PORT_CONTROL_EGRESS_ADD_TAG; - } + else + reg |= PORT_CONTROL_DSA_TAG; + reg |= PORT_CONTROL_EGRESS_ADD_TAG | + PORT_CONTROL_FORWARD_UNKNOWN; } if (dsa_is_dsa_port(ds, port)) { if (mv88e6xxx_6095_family(chip) || @@ -2535,7 +2522,7 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port) /* If this port is connected to a SerDes, make sure the SerDes is not * powered down. */ - if (mv88e6xxx_6352_family(chip)) { + if (mv88e6xxx_has(chip, MV88E6XXX_FLAGS_SERDES)) { ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_STATUS); if (ret < 0) return ret; @@ -2543,7 +2530,7 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port) if ((ret == PORT_STATUS_CMODE_100BASE_X) || (ret == PORT_STATUS_CMODE_1000BASE_X) || (ret == PORT_STATUS_CMODE_SGMII)) { - ret = mv88e6xxx_power_on_serdes(chip); + ret = mv88e6xxx_serdes_power_on(chip); if (ret < 0) return ret; } @@ -2635,10 +2622,13 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port) /* Port Ethertype: use the Ethertype DSA Ethertype * value. */ - ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), - PORT_ETH_TYPE, ETH_P_EDSA); - if (ret) - return ret; + if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_EDSA)) { + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), + PORT_ETH_TYPE, ETH_P_EDSA); + if (ret) + return ret; + } + /* Tag Remap: use an identity 802.1p prio -> switch * prio mapping. */ @@ -2953,8 +2943,8 @@ static int mv88e6xxx_g2_clear_irl(struct mv88e6xxx_chip *chip) break; /* Wait for the operation to complete */ - err = _mv88e6xxx_wait(chip, REG_GLOBAL2, GLOBAL2_IRL_CMD, - GLOBAL2_IRL_CMD_BUSY); + err = mv88e6xxx_wait(chip, REG_GLOBAL2, GLOBAL2_IRL_CMD, + GLOBAL2_IRL_CMD_BUSY); if (err) break; } @@ -3008,9 +2998,9 @@ static int mv88e6xxx_g2_clear_pot(struct mv88e6xxx_chip *chip) static int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip) { - return _mv88e6xxx_wait(chip, REG_GLOBAL2, GLOBAL2_EEPROM_CMD, - GLOBAL2_EEPROM_CMD_BUSY | - GLOBAL2_EEPROM_CMD_RUNNING); + return mv88e6xxx_wait(chip, REG_GLOBAL2, GLOBAL2_EEPROM_CMD, + GLOBAL2_EEPROM_CMD_BUSY | + GLOBAL2_EEPROM_CMD_RUNNING); } static int mv88e6xxx_g2_eeprom_cmd(struct mv88e6xxx_chip *chip, u16 cmd) @@ -3058,6 +3048,62 @@ static int mv88e6xxx_g2_eeprom_write16(struct mv88e6xxx_chip *chip, return mv88e6xxx_g2_eeprom_cmd(chip, cmd); } +static int mv88e6xxx_g2_smi_phy_wait(struct mv88e6xxx_chip *chip) +{ + return mv88e6xxx_wait(chip, REG_GLOBAL2, GLOBAL2_SMI_PHY_CMD, + GLOBAL2_SMI_PHY_CMD_BUSY); +} + +static int mv88e6xxx_g2_smi_phy_cmd(struct mv88e6xxx_chip *chip, u16 cmd) +{ + int err; + + err = mv88e6xxx_write(chip, REG_GLOBAL2, GLOBAL2_SMI_PHY_CMD, cmd); + if (err) + return err; + + return mv88e6xxx_g2_smi_phy_wait(chip); +} + +static int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip, int addr, + int reg, u16 *val) +{ + u16 cmd = GLOBAL2_SMI_PHY_CMD_OP_22_READ_DATA | (addr << 5) | reg; + int err; + + err = mv88e6xxx_g2_smi_phy_wait(chip); + if (err) + return err; + + err = mv88e6xxx_g2_smi_phy_cmd(chip, cmd); + if (err) + return err; + + return mv88e6xxx_read(chip, REG_GLOBAL2, GLOBAL2_SMI_PHY_DATA, val); +} + +static int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip, int addr, + int reg, u16 val) +{ + u16 cmd = GLOBAL2_SMI_PHY_CMD_OP_22_WRITE_DATA | (addr << 5) | reg; + int err; + + err = mv88e6xxx_g2_smi_phy_wait(chip); + if (err) + return err; + + err = mv88e6xxx_write(chip, REG_GLOBAL2, GLOBAL2_SMI_PHY_DATA, val); + if (err) + return err; + + return mv88e6xxx_g2_smi_phy_cmd(chip, cmd); +} + +static const struct mv88e6xxx_ops mv88e6xxx_g2_smi_phy_ops = { + .read = mv88e6xxx_g2_smi_phy_read, + .write = mv88e6xxx_g2_smi_phy_write, +}; + static int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip) { u16 reg; @@ -3191,84 +3237,35 @@ static int mv88e6xxx_set_addr(struct dsa_switch *ds, u8 *addr) return err; } -#ifdef CONFIG_NET_DSA_HWMON -static int mv88e6xxx_mdio_page_read(struct dsa_switch *ds, int port, int page, - int reg) -{ - struct mv88e6xxx_chip *chip = ds_to_priv(ds); - int ret; - - mutex_lock(&chip->reg_lock); - ret = _mv88e6xxx_mdio_page_read(chip, port, page, reg); - mutex_unlock(&chip->reg_lock); - - return ret; -} - -static int mv88e6xxx_mdio_page_write(struct dsa_switch *ds, int port, int page, - int reg, int val) -{ - struct mv88e6xxx_chip *chip = ds_to_priv(ds); - int ret; - - mutex_lock(&chip->reg_lock); - ret = _mv88e6xxx_mdio_page_write(chip, port, page, reg, val); - mutex_unlock(&chip->reg_lock); - - return ret; -} -#endif - -static int mv88e6xxx_port_to_mdio_addr(struct mv88e6xxx_chip *chip, int port) -{ - if (port >= 0 && port < chip->info->num_ports) - return port; - return -EINVAL; -} - -static int mv88e6xxx_mdio_read(struct mii_bus *bus, int port, int regnum) +static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg) { struct mv88e6xxx_chip *chip = bus->priv; - int addr = mv88e6xxx_port_to_mdio_addr(chip, port); - int ret; + u16 val; + int err; - if (addr < 0) + if (phy >= chip->info->num_ports) return 0xffff; mutex_lock(&chip->reg_lock); - - if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU)) - ret = mv88e6xxx_mdio_read_ppu(chip, addr, regnum); - else if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_SMI_PHY)) - ret = mv88e6xxx_mdio_read_indirect(chip, addr, regnum); - else - ret = mv88e6xxx_mdio_read_direct(chip, addr, regnum); - + err = mv88e6xxx_phy_read(chip, phy, reg, &val); mutex_unlock(&chip->reg_lock); - return ret; + + return err ? err : val; } -static int mv88e6xxx_mdio_write(struct mii_bus *bus, int port, int regnum, - u16 val) +static int mv88e6xxx_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val) { struct mv88e6xxx_chip *chip = bus->priv; - int addr = mv88e6xxx_port_to_mdio_addr(chip, port); - int ret; + int err; - if (addr < 0) + if (phy >= chip->info->num_ports) return 0xffff; mutex_lock(&chip->reg_lock); - - if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU)) - ret = mv88e6xxx_mdio_write_ppu(chip, addr, regnum, val); - else if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_SMI_PHY)) - ret = mv88e6xxx_mdio_write_indirect(chip, addr, regnum, val); - else - ret = mv88e6xxx_mdio_write_direct(chip, addr, regnum, val); - + err = mv88e6xxx_phy_write(chip, phy, reg, val); mutex_unlock(&chip->reg_lock); - return ret; + + return err; } static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip, @@ -3278,9 +3275,6 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip, struct mii_bus *bus; int err; - if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU)) - mv88e6xxx_ppu_state_init(chip); - if (np) chip->mdio_np = of_get_child_by_name(np, "mdio"); @@ -3336,44 +3330,42 @@ static void mv88e6xxx_mdio_unregister(struct mv88e6xxx_chip *chip) static int mv88e61xx_get_temp(struct dsa_switch *ds, int *temp) { struct mv88e6xxx_chip *chip = ds_to_priv(ds); + u16 val; int ret; - int val; *temp = 0; mutex_lock(&chip->reg_lock); - ret = mv88e6xxx_mdio_write_direct(chip, 0x0, 0x16, 0x6); + ret = mv88e6xxx_phy_write(chip, 0x0, 0x16, 0x6); if (ret < 0) goto error; /* Enable temperature sensor */ - ret = mv88e6xxx_mdio_read_direct(chip, 0x0, 0x1a); + ret = mv88e6xxx_phy_read(chip, 0x0, 0x1a, &val); if (ret < 0) goto error; - ret = mv88e6xxx_mdio_write_direct(chip, 0x0, 0x1a, ret | (1 << 5)); + ret = mv88e6xxx_phy_write(chip, 0x0, 0x1a, val | (1 << 5)); if (ret < 0) goto error; /* Wait for temperature to stabilize */ usleep_range(10000, 12000); - val = mv88e6xxx_mdio_read_direct(chip, 0x0, 0x1a); - if (val < 0) { - ret = val; + ret = mv88e6xxx_phy_read(chip, 0x0, 0x1a, &val); + if (ret < 0) goto error; - } /* Disable temperature sensor */ - ret = mv88e6xxx_mdio_write_direct(chip, 0x0, 0x1a, ret & ~(1 << 5)); + ret = mv88e6xxx_phy_write(chip, 0x0, 0x1a, val & ~(1 << 5)); if (ret < 0) goto error; *temp = ((val & 0x1f) - 5) * 5; error: - mv88e6xxx_mdio_write_direct(chip, 0x0, 0x16, 0x0); + mv88e6xxx_phy_write(chip, 0x0, 0x16, 0x0); mutex_unlock(&chip->reg_lock); return ret; } @@ -3382,15 +3374,18 @@ static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp) { struct mv88e6xxx_chip *chip = ds_to_priv(ds); int phy = mv88e6xxx_6320_family(chip) ? 3 : 0; + u16 val; int ret; *temp = 0; - ret = mv88e6xxx_mdio_page_read(ds, phy, 6, 27); + mutex_lock(&chip->reg_lock); + ret = mv88e6xxx_phy_page_read(chip, phy, 6, 27, &val); + mutex_unlock(&chip->reg_lock); if (ret < 0) return ret; - *temp = (ret & 0xff) - 25; + *temp = (val & 0xff) - 25; return 0; } @@ -3412,6 +3407,7 @@ static int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp) { struct mv88e6xxx_chip *chip = ds_to_priv(ds); int phy = mv88e6xxx_6320_family(chip) ? 3 : 0; + u16 val; int ret; if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP_LIMIT)) @@ -3419,11 +3415,13 @@ static int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp) *temp = 0; - ret = mv88e6xxx_mdio_page_read(ds, phy, 6, 26); + mutex_lock(&chip->reg_lock); + ret = mv88e6xxx_phy_page_read(chip, phy, 6, 26, &val); + mutex_unlock(&chip->reg_lock); if (ret < 0) return ret; - *temp = (((ret >> 8) & 0x1f) * 5) - 25; + *temp = (((val >> 8) & 0x1f) * 5) - 25; return 0; } @@ -3432,23 +3430,30 @@ static int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp) { struct mv88e6xxx_chip *chip = ds_to_priv(ds); int phy = mv88e6xxx_6320_family(chip) ? 3 : 0; - int ret; + u16 val; + int err; if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP_LIMIT)) return -EOPNOTSUPP; - ret = mv88e6xxx_mdio_page_read(ds, phy, 6, 26); - if (ret < 0) - return ret; + mutex_lock(&chip->reg_lock); + err = mv88e6xxx_phy_page_read(chip, phy, 6, 26, &val); + if (err) + goto unlock; temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f); - return mv88e6xxx_mdio_page_write(ds, phy, 6, 26, - (ret & 0xe0ff) | (temp << 8)); + err = mv88e6xxx_phy_page_write(chip, phy, 6, 26, + (val & 0xe0ff) | (temp << 8)); +unlock: + mutex_unlock(&chip->reg_lock); + + return err; } static int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm) { struct mv88e6xxx_chip *chip = ds_to_priv(ds); int phy = mv88e6xxx_6320_family(chip) ? 3 : 0; + u16 val; int ret; if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP_LIMIT)) @@ -3456,11 +3461,13 @@ static int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm) *alarm = false; - ret = mv88e6xxx_mdio_page_read(ds, phy, 6, 26); + mutex_lock(&chip->reg_lock); + ret = mv88e6xxx_phy_page_read(chip, phy, 6, 26, &val); + mutex_unlock(&chip->reg_lock); if (ret < 0) return ret; - *alarm = !!(ret & 0x40); + *alarm = !!(val & 0x40); return 0; } @@ -3877,6 +3884,30 @@ static struct mv88e6xxx_chip *mv88e6xxx_alloc_chip(struct device *dev) return chip; } +static const struct mv88e6xxx_ops mv88e6xxx_phy_ops = { + .read = mv88e6xxx_read, + .write = mv88e6xxx_write, +}; + +static void mv88e6xxx_phy_init(struct mv88e6xxx_chip *chip) +{ + if (mv88e6xxx_has(chip, MV88E6XXX_FLAGS_SMI_PHY)) { + chip->phy_ops = &mv88e6xxx_g2_smi_phy_ops; + } else if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU)) { + chip->phy_ops = &mv88e6xxx_phy_ppu_ops; + mv88e6xxx_ppu_state_init(chip); + } else { + chip->phy_ops = &mv88e6xxx_phy_ops; + } +} + +static void mv88e6xxx_phy_destroy(struct mv88e6xxx_chip *chip) +{ + if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU)) { + mv88e6xxx_ppu_state_destroy(chip); + } +} + static int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip, struct mii_bus *bus, int sw_addr) { @@ -3886,7 +3917,7 @@ static int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip, if (sw_addr == 0) chip->smi_ops = &mv88e6xxx_smi_single_chip_ops; - else if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_MULTI_CHIP)) + else if (mv88e6xxx_has(chip, MV88E6XXX_FLAGS_MULTI_CHIP)) chip->smi_ops = &mv88e6xxx_smi_multi_chip_ops; else return -EINVAL; @@ -3897,6 +3928,16 @@ static int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip, return 0; } +static enum dsa_tag_protocol mv88e6xxx_get_tag_protocol(struct dsa_switch *ds) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + + if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_EDSA)) + return DSA_TAG_PROTO_EDSA; + + return DSA_TAG_PROTO_DSA; +} + static const char *mv88e6xxx_drv_probe(struct device *dsa_dev, struct device *host_dev, int sw_addr, void **priv) @@ -3924,6 +3965,8 @@ static const char *mv88e6xxx_drv_probe(struct device *dsa_dev, if (err) goto free; + mv88e6xxx_phy_init(chip); + err = mv88e6xxx_mdio_register(chip, NULL); if (err) goto free; @@ -3937,9 +3980,9 @@ free: return NULL; } -static struct dsa_switch_driver mv88e6xxx_switch_driver = { - .tag_protocol = DSA_TAG_PROTO_EDSA, +static struct dsa_switch_ops mv88e6xxx_switch_ops = { .probe = mv88e6xxx_drv_probe, + .get_tag_protocol = mv88e6xxx_get_tag_protocol, .setup = mv88e6xxx_setup, .set_addr = mv88e6xxx_set_addr, .adjust_link = mv88e6xxx_adjust_link, @@ -3986,7 +4029,7 @@ static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip, ds->dev = dev; ds->priv = chip; - ds->drv = &mv88e6xxx_switch_driver; + ds->ops = &mv88e6xxx_switch_ops; dev_set_drvdata(dev, ds); @@ -4025,6 +4068,8 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev) if (err) return err; + mv88e6xxx_phy_init(chip); + chip->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_ASIS); if (IS_ERR(chip->reset)) return PTR_ERR(chip->reset); @@ -4051,6 +4096,7 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev) struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev); struct mv88e6xxx_chip *chip = ds_to_priv(ds); + mv88e6xxx_phy_destroy(chip); mv88e6xxx_unregister_switch(chip); mv88e6xxx_mdio_unregister(chip); } @@ -4076,7 +4122,7 @@ static struct mdio_driver mv88e6xxx_driver = { static int __init mv88e6xxx_init(void) { - register_switch_driver(&mv88e6xxx_switch_driver); + register_switch_driver(&mv88e6xxx_switch_ops); return mdio_driver_register(&mv88e6xxx_driver); } module_init(mv88e6xxx_init); @@ -4084,7 +4130,7 @@ module_init(mv88e6xxx_init); static void __exit mv88e6xxx_cleanup(void) { mdio_driver_unregister(&mv88e6xxx_driver); - unregister_switch_driver(&mv88e6xxx_switch_driver); + unregister_switch_driver(&mv88e6xxx_switch_ops); } module_exit(mv88e6xxx_cleanup); diff --git a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h index 48d6ea77f9bd..e157d4f69864 100644 --- a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h @@ -30,9 +30,12 @@ #define SMI_CMD_OP_45_READ_DATA_INC ((3 << 10) | SMI_CMD_BUSY) #define SMI_DATA 0x01 -/* Fiber/SERDES Registers are located at SMI address F, page 1 */ -#define REG_FIBER_SERDES 0x0f -#define PAGE_FIBER_SERDES 0x01 +/* PHY Registers */ +#define PHY_PAGE 0x16 +#define PHY_PAGE_COPPER 0x00 + +#define ADDR_SERDES 0x0f +#define SERDES_PAGE_FIBER 0x01 #define REG_PORT(p) (0x10 + (p)) #define PORT_STATUS 0x00 @@ -329,17 +332,16 @@ #define GLOBAL2_EEPROM_DATA 0x15 #define GLOBAL2_PTP_AVB_OP 0x16 #define GLOBAL2_PTP_AVB_DATA 0x17 -#define GLOBAL2_SMI_OP 0x18 -#define GLOBAL2_SMI_OP_BUSY BIT(15) -#define GLOBAL2_SMI_OP_CLAUSE_22 BIT(12) -#define GLOBAL2_SMI_OP_22_WRITE ((1 << 10) | GLOBAL2_SMI_OP_BUSY | \ - GLOBAL2_SMI_OP_CLAUSE_22) -#define GLOBAL2_SMI_OP_22_READ ((2 << 10) | GLOBAL2_SMI_OP_BUSY | \ - GLOBAL2_SMI_OP_CLAUSE_22) -#define GLOBAL2_SMI_OP_45_WRITE_ADDR ((0 << 10) | GLOBAL2_SMI_OP_BUSY) -#define GLOBAL2_SMI_OP_45_WRITE_DATA ((1 << 10) | GLOBAL2_SMI_OP_BUSY) -#define GLOBAL2_SMI_OP_45_READ_DATA ((2 << 10) | GLOBAL2_SMI_OP_BUSY) -#define GLOBAL2_SMI_DATA 0x19 +#define GLOBAL2_SMI_PHY_CMD 0x18 +#define GLOBAL2_SMI_PHY_CMD_BUSY BIT(15) +#define GLOBAL2_SMI_PHY_CMD_MODE_22 BIT(12) +#define GLOBAL2_SMI_PHY_CMD_OP_22_WRITE_DATA ((0x1 << 10) | \ + GLOBAL2_SMI_PHY_CMD_MODE_22 | \ + GLOBAL2_SMI_PHY_CMD_BUSY) +#define GLOBAL2_SMI_PHY_CMD_OP_22_READ_DATA ((0x2 << 10) | \ + GLOBAL2_SMI_PHY_CMD_MODE_22 | \ + GLOBAL2_SMI_PHY_CMD_BUSY) +#define GLOBAL2_SMI_PHY_DATA 0x19 #define GLOBAL2_SCRATCH_MISC 0x1a #define GLOBAL2_SCRATCH_BUSY BIT(15) #define GLOBAL2_SCRATCH_REGISTER_SHIFT 8 @@ -384,10 +386,31 @@ enum mv88e6xxx_family { }; enum mv88e6xxx_cap { + /* Two different tag protocols can be used by the driver. All + * switches support DSA, but only later generations support + * EDSA. + */ + MV88E6XXX_CAP_EDSA, + /* Energy Efficient Ethernet. */ MV88E6XXX_CAP_EEE, + /* Multi-chip Addressing Mode. + * Some chips respond to only 2 registers of its own SMI device address + * when it is non-zero, and use indirect access to internal registers. + */ + MV88E6XXX_CAP_SMI_CMD, /* (0x00) SMI Command */ + MV88E6XXX_CAP_SMI_DATA, /* (0x01) SMI Data */ + + /* PHY Registers. + */ + MV88E6XXX_CAP_PHY_PAGE, /* (0x16) Page Register */ + + /* Fiber/SERDES Registers (SMI address F). + */ + MV88E6XXX_CAP_SERDES, + /* Switch Global 2 Registers. * The device contains a second set of global 16-bit registers. */ @@ -402,12 +425,8 @@ enum mv88e6xxx_cap { MV88E6XXX_CAP_G2_POT, /* (0x0f) Priority Override Table */ MV88E6XXX_CAP_G2_EEPROM_CMD, /* (0x14) EEPROM Command */ MV88E6XXX_CAP_G2_EEPROM_DATA, /* (0x15) EEPROM Data */ - - /* Multi-chip Addressing Mode. - * Some chips require an indirect SMI access when their SMI device - * address is not zero. See SMI_CMD and SMI_DATA. - */ - MV88E6XXX_CAP_MULTI_CHIP, + MV88E6XXX_CAP_G2_SMI_PHY_CMD, /* (0x18) SMI PHY Command */ + MV88E6XXX_CAP_G2_SMI_PHY_DATA, /* (0x19) SMI PHY Data */ /* PHY Polling Unit. * See GLOBAL_CONTROL_PPU_ENABLE and GLOBAL_STATUS_PPU_POLLING. @@ -415,12 +434,6 @@ enum mv88e6xxx_cap { MV88E6XXX_CAP_PPU, MV88E6XXX_CAP_PPU_ACTIVE, - /* SMI PHY Command and Data registers. - * This requires an indirect access to PHY registers through - * GLOBAL2_SMI_OP, otherwise direct access to PHY registers is done. - */ - MV88E6XXX_CAP_SMI_PHY, - /* Per VLAN Spanning Tree Unit (STU). * The Port State database, if present, is accessed through VTU * operations and dedicated SID registers. See GLOBAL_VTU_SID. @@ -440,7 +453,16 @@ enum mv88e6xxx_cap { }; /* Bitmask of capabilities */ +#define MV88E6XXX_FLAG_EDSA BIT(MV88E6XXX_CAP_EDSA) #define MV88E6XXX_FLAG_EEE BIT(MV88E6XXX_CAP_EEE) + +#define MV88E6XXX_FLAG_SMI_CMD BIT(MV88E6XXX_CAP_SMI_CMD) +#define MV88E6XXX_FLAG_SMI_DATA BIT(MV88E6XXX_CAP_SMI_DATA) + +#define MV88E6XXX_FLAG_PHY_PAGE BIT(MV88E6XXX_CAP_PHY_PAGE) + +#define MV88E6XXX_FLAG_SERDES BIT(MV88E6XXX_CAP_SERDES) + #define MV88E6XXX_FLAG_GLOBAL2 BIT(MV88E6XXX_CAP_GLOBAL2) #define MV88E6XXX_FLAG_G2_MGMT_EN_2X BIT(MV88E6XXX_CAP_G2_MGMT_EN_2X) #define MV88E6XXX_FLAG_G2_MGMT_EN_0X BIT(MV88E6XXX_CAP_G2_MGMT_EN_0X) @@ -452,10 +474,11 @@ enum mv88e6xxx_cap { #define MV88E6XXX_FLAG_G2_POT BIT(MV88E6XXX_CAP_G2_POT) #define MV88E6XXX_FLAG_G2_EEPROM_CMD BIT(MV88E6XXX_CAP_G2_EEPROM_CMD) #define MV88E6XXX_FLAG_G2_EEPROM_DATA BIT(MV88E6XXX_CAP_G2_EEPROM_DATA) -#define MV88E6XXX_FLAG_MULTI_CHIP BIT(MV88E6XXX_CAP_MULTI_CHIP) +#define MV88E6XXX_FLAG_G2_SMI_PHY_CMD BIT(MV88E6XXX_CAP_G2_SMI_PHY_CMD) +#define MV88E6XXX_FLAG_G2_SMI_PHY_DATA BIT(MV88E6XXX_CAP_G2_SMI_PHY_DATA) + #define MV88E6XXX_FLAG_PPU BIT(MV88E6XXX_CAP_PPU) #define MV88E6XXX_FLAG_PPU_ACTIVE BIT(MV88E6XXX_CAP_PPU_ACTIVE) -#define MV88E6XXX_FLAG_SMI_PHY BIT(MV88E6XXX_CAP_SMI_PHY) #define MV88E6XXX_FLAG_STU BIT(MV88E6XXX_CAP_STU) #define MV88E6XXX_FLAG_TEMP BIT(MV88E6XXX_CAP_TEMP) #define MV88E6XXX_FLAG_TEMP_LIMIT BIT(MV88E6XXX_CAP_TEMP_LIMIT) @@ -471,28 +494,43 @@ enum mv88e6xxx_cap { (MV88E6XXX_FLAG_G2_IRL_CMD | \ MV88E6XXX_FLAG_G2_IRL_DATA) +/* Multi-chip Addressing Mode */ +#define MV88E6XXX_FLAGS_MULTI_CHIP \ + (MV88E6XXX_FLAG_SMI_CMD | \ + MV88E6XXX_FLAG_SMI_DATA) + /* Cross-chip Port VLAN Table */ #define MV88E6XXX_FLAGS_PVT \ (MV88E6XXX_FLAG_G2_PVT_ADDR | \ MV88E6XXX_FLAG_G2_PVT_DATA) +/* Fiber/SERDES Registers at SMI address F, page 1 */ +#define MV88E6XXX_FLAGS_SERDES \ + (MV88E6XXX_FLAG_PHY_PAGE | \ + MV88E6XXX_FLAG_SERDES) + +/* Indirect PHY access via Global2 SMI PHY registers */ +#define MV88E6XXX_FLAGS_SMI_PHY \ + (MV88E6XXX_FLAG_G2_SMI_PHY_CMD |\ + MV88E6XXX_FLAG_G2_SMI_PHY_DATA) + #define MV88E6XXX_FLAGS_FAMILY_6095 \ (MV88E6XXX_FLAG_GLOBAL2 | \ MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ - MV88E6XXX_FLAG_MULTI_CHIP | \ MV88E6XXX_FLAG_PPU | \ - MV88E6XXX_FLAG_VTU) + MV88E6XXX_FLAG_VTU | \ + MV88E6XXX_FLAGS_MULTI_CHIP) #define MV88E6XXX_FLAGS_FAMILY_6097 \ (MV88E6XXX_FLAG_GLOBAL2 | \ MV88E6XXX_FLAG_G2_MGMT_EN_2X | \ MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ MV88E6XXX_FLAG_G2_POT | \ - MV88E6XXX_FLAG_MULTI_CHIP | \ MV88E6XXX_FLAG_PPU | \ MV88E6XXX_FLAG_STU | \ MV88E6XXX_FLAG_VTU | \ MV88E6XXX_FLAGS_IRL | \ + MV88E6XXX_FLAGS_MULTI_CHIP | \ MV88E6XXX_FLAGS_PVT) #define MV88E6XXX_FLAGS_FAMILY_6165 \ @@ -501,69 +539,73 @@ enum mv88e6xxx_cap { MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ MV88E6XXX_FLAG_G2_SWITCH_MAC | \ MV88E6XXX_FLAG_G2_POT | \ - MV88E6XXX_FLAG_MULTI_CHIP | \ MV88E6XXX_FLAG_STU | \ MV88E6XXX_FLAG_TEMP | \ MV88E6XXX_FLAG_VTU | \ MV88E6XXX_FLAGS_IRL | \ + MV88E6XXX_FLAGS_MULTI_CHIP | \ MV88E6XXX_FLAGS_PVT) #define MV88E6XXX_FLAGS_FAMILY_6185 \ (MV88E6XXX_FLAG_GLOBAL2 | \ MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ - MV88E6XXX_FLAG_MULTI_CHIP | \ + MV88E6XXX_FLAGS_MULTI_CHIP | \ MV88E6XXX_FLAG_PPU | \ MV88E6XXX_FLAG_VTU) #define MV88E6XXX_FLAGS_FAMILY_6320 \ - (MV88E6XXX_FLAG_EEE | \ + (MV88E6XXX_FLAG_EDSA | \ + MV88E6XXX_FLAG_EEE | \ MV88E6XXX_FLAG_GLOBAL2 | \ MV88E6XXX_FLAG_G2_MGMT_EN_2X | \ MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ MV88E6XXX_FLAG_G2_SWITCH_MAC | \ MV88E6XXX_FLAG_G2_POT | \ - MV88E6XXX_FLAG_MULTI_CHIP | \ MV88E6XXX_FLAG_PPU_ACTIVE | \ - MV88E6XXX_FLAG_SMI_PHY | \ MV88E6XXX_FLAG_TEMP | \ MV88E6XXX_FLAG_TEMP_LIMIT | \ MV88E6XXX_FLAG_VTU | \ MV88E6XXX_FLAGS_EEPROM16 | \ MV88E6XXX_FLAGS_IRL | \ - MV88E6XXX_FLAGS_PVT) + MV88E6XXX_FLAGS_MULTI_CHIP | \ + MV88E6XXX_FLAGS_PVT | \ + MV88E6XXX_FLAGS_SMI_PHY) #define MV88E6XXX_FLAGS_FAMILY_6351 \ - (MV88E6XXX_FLAG_GLOBAL2 | \ + (MV88E6XXX_FLAG_EDSA | \ + MV88E6XXX_FLAG_GLOBAL2 | \ MV88E6XXX_FLAG_G2_MGMT_EN_2X | \ MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ MV88E6XXX_FLAG_G2_SWITCH_MAC | \ MV88E6XXX_FLAG_G2_POT | \ - MV88E6XXX_FLAG_MULTI_CHIP | \ MV88E6XXX_FLAG_PPU_ACTIVE | \ - MV88E6XXX_FLAG_SMI_PHY | \ MV88E6XXX_FLAG_STU | \ MV88E6XXX_FLAG_TEMP | \ MV88E6XXX_FLAG_VTU | \ MV88E6XXX_FLAGS_IRL | \ - MV88E6XXX_FLAGS_PVT) + MV88E6XXX_FLAGS_MULTI_CHIP | \ + MV88E6XXX_FLAGS_PVT | \ + MV88E6XXX_FLAGS_SMI_PHY) #define MV88E6XXX_FLAGS_FAMILY_6352 \ - (MV88E6XXX_FLAG_EEE | \ + (MV88E6XXX_FLAG_EDSA | \ + MV88E6XXX_FLAG_EEE | \ MV88E6XXX_FLAG_GLOBAL2 | \ MV88E6XXX_FLAG_G2_MGMT_EN_2X | \ MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ MV88E6XXX_FLAG_G2_SWITCH_MAC | \ MV88E6XXX_FLAG_G2_POT | \ - MV88E6XXX_FLAG_MULTI_CHIP | \ MV88E6XXX_FLAG_PPU_ACTIVE | \ - MV88E6XXX_FLAG_SMI_PHY | \ MV88E6XXX_FLAG_STU | \ MV88E6XXX_FLAG_TEMP | \ MV88E6XXX_FLAG_TEMP_LIMIT | \ MV88E6XXX_FLAG_VTU | \ MV88E6XXX_FLAGS_EEPROM16 | \ MV88E6XXX_FLAGS_IRL | \ - MV88E6XXX_FLAGS_PVT) + MV88E6XXX_FLAGS_MULTI_CHIP | \ + MV88E6XXX_FLAGS_PVT | \ + MV88E6XXX_FLAGS_SERDES | \ + MV88E6XXX_FLAGS_SMI_PHY) struct mv88e6xxx_info { enum mv88e6xxx_family family; @@ -623,6 +665,7 @@ struct mv88e6xxx_chip { /* Handles automatic disabling and re-enabling of the PHY * polling unit. */ + const struct mv88e6xxx_ops *phy_ops; struct mutex ppu_mutex; int ppu_disabled; struct work_struct ppu_work; diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 2ffd63463299..8cc7467b6c1f 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -24,6 +24,7 @@ source "drivers/net/ethernet/agere/Kconfig" source "drivers/net/ethernet/allwinner/Kconfig" source "drivers/net/ethernet/alteon/Kconfig" source "drivers/net/ethernet/altera/Kconfig" +source "drivers/net/ethernet/amazon/Kconfig" source "drivers/net/ethernet/amd/Kconfig" source "drivers/net/ethernet/apm/Kconfig" source "drivers/net/ethernet/apple/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 1d349e9aa9a6..a09423df83f2 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -10,6 +10,7 @@ obj-$(CONFIG_NET_VENDOR_AGERE) += agere/ obj-$(CONFIG_NET_VENDOR_ALLWINNER) += allwinner/ obj-$(CONFIG_NET_VENDOR_ALTEON) += alteon/ obj-$(CONFIG_ALTERA_TSE) += altera/ +obj-$(CONFIG_NET_VENDOR_AMAZON) += amazon/ obj-$(CONFIG_NET_VENDOR_AMD) += amd/ obj-$(CONFIG_NET_XGENE) += apm/ obj-$(CONFIG_NET_VENDOR_APPLE) += apple/ diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c index 38eaea18da23..00f9ee3fc3e5 100644 --- a/drivers/net/ethernet/adi/bfin_mac.c +++ b/drivers/net/ethernet/adi/bfin_mac.c @@ -192,8 +192,8 @@ static int desc_list_init(struct net_device *dev) goto init_error; skb_reserve(new_skb, NET_IP_ALIGN); - /* Invidate the data cache of skb->data range when it is write back - * cache. It will prevent overwritting the new data from DMA + /* Invalidate the data cache of skb->data range when it is write back + * cache. It will prevent overwriting the new data from DMA */ blackfin_dcache_invalidate_range((unsigned long)new_skb->head, (unsigned long)new_skb->end); @@ -1205,7 +1205,7 @@ static void bfin_mac_rx(struct bfin_mac_local *lp) } /* reserve 2 bytes for RXDWA padding */ skb_reserve(new_skb, NET_IP_ALIGN); - /* Invidate the data cache of skb->data range when it is write back + /* Invalidate the data cache of skb->data range when it is write back * cache. It will prevent overwritting the new data from DMA */ blackfin_dcache_invalidate_range((unsigned long)new_skb->head, @@ -1599,7 +1599,7 @@ static int bfin_mac_probe(struct platform_device *pdev) *(__le16 *) (&(ndev->dev_addr[4])) = cpu_to_le16((u16) bfin_read_EMAC_ADDRHI()); /* probe mac */ - /*todo: how to proble? which is revision_register */ + /*todo: how to probe? which is revision_register */ bfin_write_EMAC_ADDRLO(0x12345678); if (bfin_read_EMAC_ADDRLO() != 0x12345678) { dev_err(&pdev->dev, "Cannot detect Blackfin on-chip ethernet MAC controller!\n"); diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c index bca07c5c94bd..f8df8248035e 100644 --- a/drivers/net/ethernet/aeroflex/greth.c +++ b/drivers/net/ethernet/aeroflex/greth.c @@ -1105,27 +1105,6 @@ static void greth_set_msglevel(struct net_device *dev, u32 value) struct greth_private *greth = netdev_priv(dev); greth->msg_enable = value; } -static int greth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct greth_private *greth = netdev_priv(dev); - struct phy_device *phy = greth->phy; - - if (!phy) - return -ENODEV; - - return phy_ethtool_gset(phy, cmd); -} - -static int greth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct greth_private *greth = netdev_priv(dev); - struct phy_device *phy = greth->phy; - - if (!phy) - return -ENODEV; - - return phy_ethtool_sset(phy, cmd); -} static int greth_get_regs_len(struct net_device *dev) { @@ -1157,12 +1136,12 @@ static void greth_get_regs(struct net_device *dev, struct ethtool_regs *regs, vo static const struct ethtool_ops greth_ethtool_ops = { .get_msglevel = greth_get_msglevel, .set_msglevel = greth_set_msglevel, - .get_settings = greth_get_settings, - .set_settings = greth_set_settings, .get_drvinfo = greth_get_drvinfo, .get_regs_len = greth_get_regs_len, .get_regs = greth_get_regs, .get_link = ethtool_op_get_link, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static struct net_device_ops greth_netdev_ops = { @@ -1224,7 +1203,7 @@ static int greth_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val) static void greth_link_change(struct net_device *dev) { struct greth_private *greth = netdev_priv(dev); - struct phy_device *phydev = greth->phy; + struct phy_device *phydev = dev->phydev; unsigned long flags; int status_change = 0; u32 ctrl; @@ -1307,7 +1286,6 @@ static int greth_mdio_probe(struct net_device *dev) greth->link = 0; greth->speed = 0; greth->duplex = -1; - greth->phy = phy; return 0; } @@ -1325,6 +1303,7 @@ static int greth_mdio_init(struct greth_private *greth) { int ret; unsigned long timeout; + struct net_device *ndev = greth->netdev; greth->mdio = mdiobus_alloc(); if (!greth->mdio) { @@ -1349,15 +1328,16 @@ static int greth_mdio_init(struct greth_private *greth) goto unreg_mdio; } - phy_start(greth->phy); + phy_start(ndev->phydev); /* If Ethernet debug link is used make autoneg happen right away */ if (greth->edcl && greth_edcl == 1) { - phy_start_aneg(greth->phy); + phy_start_aneg(ndev->phydev); timeout = jiffies + 6*HZ; - while (!phy_aneg_done(greth->phy) && time_before(jiffies, timeout)) { + while (!phy_aneg_done(ndev->phydev) && + time_before(jiffies, timeout)) { } - phy_read_status(greth->phy); + phy_read_status(ndev->phydev); greth_link_change(greth->netdev); } @@ -1569,8 +1549,8 @@ static int greth_of_remove(struct platform_device *of_dev) dma_free_coherent(&of_dev->dev, 1024, greth->tx_bd_base, greth->tx_bd_base_phys); - if (greth->phy) - phy_stop(greth->phy); + if (ndev->phydev) + phy_stop(ndev->phydev); mdiobus_unregister(greth->mdio); unregister_netdev(ndev); diff --git a/drivers/net/ethernet/aeroflex/greth.h b/drivers/net/ethernet/aeroflex/greth.h index 92dd918e4a83..9c07140a5d8d 100644 --- a/drivers/net/ethernet/aeroflex/greth.h +++ b/drivers/net/ethernet/aeroflex/greth.h @@ -123,7 +123,6 @@ struct greth_private { struct napi_struct napi; spinlock_t devlock; - struct phy_device *phy; struct mii_bus *mdio; unsigned int link; unsigned int speed; diff --git a/drivers/net/ethernet/amazon/Kconfig b/drivers/net/ethernet/amazon/Kconfig new file mode 100644 index 000000000000..99b30353541a --- /dev/null +++ b/drivers/net/ethernet/amazon/Kconfig @@ -0,0 +1,27 @@ +# +# Amazon network device configuration +# + +config NET_VENDOR_AMAZON + bool "Amazon Devices" + default y + ---help--- + If you have a network (Ethernet) device belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Amazon devices. If you say Y, you will be asked + for your specific device in the following questions. + +if NET_VENDOR_AMAZON + +config ENA_ETHERNET + tristate "Elastic Network Adapter (ENA) support" + depends on (PCI_MSI && X86) + ---help--- + This driver supports Elastic Network Adapter (ENA)" + + To compile this driver as a module, choose M here. + The module will be called ena. + +endif #NET_VENDOR_AMAZON diff --git a/drivers/net/ethernet/amazon/Makefile b/drivers/net/ethernet/amazon/Makefile new file mode 100644 index 000000000000..8e0b73f60d51 --- /dev/null +++ b/drivers/net/ethernet/amazon/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Amazon network device drivers. +# + +obj-$(CONFIG_ENA_ETHERNET) += ena/ diff --git a/drivers/net/ethernet/amazon/ena/Makefile b/drivers/net/ethernet/amazon/ena/Makefile new file mode 100644 index 000000000000..eaeeae06c5d9 --- /dev/null +++ b/drivers/net/ethernet/amazon/ena/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for the Elastic Network Adapter (ENA) device drivers. +# + +obj-$(CONFIG_ENA_ETHERNET) += ena.o + +ena-y := ena_netdev.o ena_com.o ena_eth_com.o ena_ethtool.o diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h new file mode 100644 index 000000000000..a46e749bf226 --- /dev/null +++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h @@ -0,0 +1,973 @@ +/* + * Copyright 2015 - 2016 Amazon.com, Inc. or its affiliates. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _ENA_ADMIN_H_ +#define _ENA_ADMIN_H_ + +enum ena_admin_aq_opcode { + ENA_ADMIN_CREATE_SQ = 1, + + ENA_ADMIN_DESTROY_SQ = 2, + + ENA_ADMIN_CREATE_CQ = 3, + + ENA_ADMIN_DESTROY_CQ = 4, + + ENA_ADMIN_GET_FEATURE = 8, + + ENA_ADMIN_SET_FEATURE = 9, + + ENA_ADMIN_GET_STATS = 11, +}; + +enum ena_admin_aq_completion_status { + ENA_ADMIN_SUCCESS = 0, + + ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE = 1, + + ENA_ADMIN_BAD_OPCODE = 2, + + ENA_ADMIN_UNSUPPORTED_OPCODE = 3, + + ENA_ADMIN_MALFORMED_REQUEST = 4, + + /* Additional status is provided in ACQ entry extended_status */ + ENA_ADMIN_ILLEGAL_PARAMETER = 5, + + ENA_ADMIN_UNKNOWN_ERROR = 6, +}; + +enum ena_admin_aq_feature_id { + ENA_ADMIN_DEVICE_ATTRIBUTES = 1, + + ENA_ADMIN_MAX_QUEUES_NUM = 2, + + ENA_ADMIN_RSS_HASH_FUNCTION = 10, + + ENA_ADMIN_STATELESS_OFFLOAD_CONFIG = 11, + + ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG = 12, + + ENA_ADMIN_MTU = 14, + + ENA_ADMIN_RSS_HASH_INPUT = 18, + + ENA_ADMIN_INTERRUPT_MODERATION = 20, + + ENA_ADMIN_AENQ_CONFIG = 26, + + ENA_ADMIN_LINK_CONFIG = 27, + + ENA_ADMIN_HOST_ATTR_CONFIG = 28, + + ENA_ADMIN_FEATURES_OPCODE_NUM = 32, +}; + +enum ena_admin_placement_policy_type { + /* descriptors and headers are in host memory */ + ENA_ADMIN_PLACEMENT_POLICY_HOST = 1, + + /* descriptors and headers are in device memory (a.k.a Low Latency + * Queue) + */ + ENA_ADMIN_PLACEMENT_POLICY_DEV = 3, +}; + +enum ena_admin_link_types { + ENA_ADMIN_LINK_SPEED_1G = 0x1, + + ENA_ADMIN_LINK_SPEED_2_HALF_G = 0x2, + + ENA_ADMIN_LINK_SPEED_5G = 0x4, + + ENA_ADMIN_LINK_SPEED_10G = 0x8, + + ENA_ADMIN_LINK_SPEED_25G = 0x10, + + ENA_ADMIN_LINK_SPEED_40G = 0x20, + + ENA_ADMIN_LINK_SPEED_50G = 0x40, + + ENA_ADMIN_LINK_SPEED_100G = 0x80, + + ENA_ADMIN_LINK_SPEED_200G = 0x100, + + ENA_ADMIN_LINK_SPEED_400G = 0x200, +}; + +enum ena_admin_completion_policy_type { + /* completion queue entry for each sq descriptor */ + ENA_ADMIN_COMPLETION_POLICY_DESC = 0, + + /* completion queue entry upon request in sq descriptor */ + ENA_ADMIN_COMPLETION_POLICY_DESC_ON_DEMAND = 1, + + /* current queue head pointer is updated in OS memory upon sq + * descriptor request + */ + ENA_ADMIN_COMPLETION_POLICY_HEAD_ON_DEMAND = 2, + + /* current queue head pointer is updated in OS memory for each sq + * descriptor + */ + ENA_ADMIN_COMPLETION_POLICY_HEAD = 3, +}; + +/* basic stats return ena_admin_basic_stats while extanded stats return a + * buffer (string format) with additional statistics per queue and per + * device id + */ +enum ena_admin_get_stats_type { + ENA_ADMIN_GET_STATS_TYPE_BASIC = 0, + + ENA_ADMIN_GET_STATS_TYPE_EXTENDED = 1, +}; + +enum ena_admin_get_stats_scope { + ENA_ADMIN_SPECIFIC_QUEUE = 0, + + ENA_ADMIN_ETH_TRAFFIC = 1, +}; + +struct ena_admin_aq_common_desc { + /* 11:0 : command_id + * 15:12 : reserved12 + */ + u16 command_id; + + /* as appears in ena_admin_aq_opcode */ + u8 opcode; + + /* 0 : phase + * 1 : ctrl_data - control buffer address valid + * 2 : ctrl_data_indirect - control buffer address + * points to list of pages with addresses of control + * buffers + * 7:3 : reserved3 + */ + u8 flags; +}; + +/* used in ena_admin_aq_entry. Can point directly to control data, or to a + * page list chunk. Used also at the end of indirect mode page list chunks, + * for chaining. + */ +struct ena_admin_ctrl_buff_info { + u32 length; + + struct ena_common_mem_addr address; +}; + +struct ena_admin_sq { + u16 sq_idx; + + /* 4:0 : reserved + * 7:5 : sq_direction - 0x1 - Tx; 0x2 - Rx + */ + u8 sq_identity; + + u8 reserved1; +}; + +struct ena_admin_aq_entry { + struct ena_admin_aq_common_desc aq_common_descriptor; + + union { + u32 inline_data_w1[3]; + + struct ena_admin_ctrl_buff_info control_buffer; + } u; + + u32 inline_data_w4[12]; +}; + +struct ena_admin_acq_common_desc { + /* command identifier to associate it with the aq descriptor + * 11:0 : command_id + * 15:12 : reserved12 + */ + u16 command; + + u8 status; + + /* 0 : phase + * 7:1 : reserved1 + */ + u8 flags; + + u16 extended_status; + + /* serves as a hint what AQ entries can be revoked */ + u16 sq_head_indx; +}; + +struct ena_admin_acq_entry { + struct ena_admin_acq_common_desc acq_common_descriptor; + + u32 response_specific_data[14]; +}; + +struct ena_admin_aq_create_sq_cmd { + struct ena_admin_aq_common_desc aq_common_descriptor; + + /* 4:0 : reserved0_w1 + * 7:5 : sq_direction - 0x1 - Tx, 0x2 - Rx + */ + u8 sq_identity; + + u8 reserved8_w1; + + /* 3:0 : placement_policy - Describing where the SQ + * descriptor ring and the SQ packet headers reside: + * 0x1 - descriptors and headers are in OS memory, + * 0x3 - descriptors and headers in device memory + * (a.k.a Low Latency Queue) + * 6:4 : completion_policy - Describing what policy + * to use for generation completion entry (cqe) in + * the CQ associated with this SQ: 0x0 - cqe for each + * sq descriptor, 0x1 - cqe upon request in sq + * descriptor, 0x2 - current queue head pointer is + * updated in OS memory upon sq descriptor request + * 0x3 - current queue head pointer is updated in OS + * memory for each sq descriptor + * 7 : reserved15_w1 + */ + u8 sq_caps_2; + + /* 0 : is_physically_contiguous - Described if the + * queue ring memory is allocated in physical + * contiguous pages or split. + * 7:1 : reserved17_w1 + */ + u8 sq_caps_3; + + /* associated completion queue id. This CQ must be created prior to + * SQ creation + */ + u16 cq_idx; + + /* submission queue depth in entries */ + u16 sq_depth; + + /* SQ physical base address in OS memory. This field should not be + * used for Low Latency queues. Has to be page aligned. + */ + struct ena_common_mem_addr sq_ba; + + /* specifies queue head writeback location in OS memory. Valid if + * completion_policy is set to completion_policy_head_on_demand or + * completion_policy_head. Has to be cache aligned + */ + struct ena_common_mem_addr sq_head_writeback; + + u32 reserved0_w7; + + u32 reserved0_w8; +}; + +enum ena_admin_sq_direction { + ENA_ADMIN_SQ_DIRECTION_TX = 1, + + ENA_ADMIN_SQ_DIRECTION_RX = 2, +}; + +struct ena_admin_acq_create_sq_resp_desc { + struct ena_admin_acq_common_desc acq_common_desc; + + u16 sq_idx; + + u16 reserved; + + /* queue doorbell address as an offset to PCIe MMIO REG BAR */ + u32 sq_doorbell_offset; + + /* low latency queue ring base address as an offset to PCIe MMIO + * LLQ_MEM BAR + */ + u32 llq_descriptors_offset; + + /* low latency queue headers' memory as an offset to PCIe MMIO + * LLQ_MEM BAR + */ + u32 llq_headers_offset; +}; + +struct ena_admin_aq_destroy_sq_cmd { + struct ena_admin_aq_common_desc aq_common_descriptor; + + struct ena_admin_sq sq; +}; + +struct ena_admin_acq_destroy_sq_resp_desc { + struct ena_admin_acq_common_desc acq_common_desc; +}; + +struct ena_admin_aq_create_cq_cmd { + struct ena_admin_aq_common_desc aq_common_descriptor; + + /* 4:0 : reserved5 + * 5 : interrupt_mode_enabled - if set, cq operates + * in interrupt mode, otherwise - polling + * 7:6 : reserved6 + */ + u8 cq_caps_1; + + /* 4:0 : cq_entry_size_words - size of CQ entry in + * 32-bit words, valid values: 4, 8. + * 7:5 : reserved7 + */ + u8 cq_caps_2; + + /* completion queue depth in # of entries. must be power of 2 */ + u16 cq_depth; + + /* msix vector assigned to this cq */ + u32 msix_vector; + + /* cq physical base address in OS memory. CQ must be physically + * contiguous + */ + struct ena_common_mem_addr cq_ba; +}; + +struct ena_admin_acq_create_cq_resp_desc { + struct ena_admin_acq_common_desc acq_common_desc; + + u16 cq_idx; + + /* actual cq depth in number of entries */ + u16 cq_actual_depth; + + u32 numa_node_register_offset; + + u32 cq_head_db_register_offset; + + u32 cq_interrupt_unmask_register_offset; +}; + +struct ena_admin_aq_destroy_cq_cmd { + struct ena_admin_aq_common_desc aq_common_descriptor; + + u16 cq_idx; + + u16 reserved1; +}; + +struct ena_admin_acq_destroy_cq_resp_desc { + struct ena_admin_acq_common_desc acq_common_desc; +}; + +/* ENA AQ Get Statistics command. Extended statistics are placed in control + * buffer pointed by AQ entry + */ +struct ena_admin_aq_get_stats_cmd { + struct ena_admin_aq_common_desc aq_common_descriptor; + + union { + /* command specific inline data */ + u32 inline_data_w1[3]; + + struct ena_admin_ctrl_buff_info control_buffer; + } u; + + /* stats type as defined in enum ena_admin_get_stats_type */ + u8 type; + + /* stats scope defined in enum ena_admin_get_stats_scope */ + u8 scope; + + u16 reserved3; + + /* queue id. used when scope is specific_queue */ + u16 queue_idx; + + /* device id, value 0xFFFF means mine. only privileged device can get + * stats of other device + */ + u16 device_id; +}; + +/* Basic Statistics Command. */ +struct ena_admin_basic_stats { + u32 tx_bytes_low; + + u32 tx_bytes_high; + + u32 tx_pkts_low; + + u32 tx_pkts_high; + + u32 rx_bytes_low; + + u32 rx_bytes_high; + + u32 rx_pkts_low; + + u32 rx_pkts_high; + + u32 rx_drops_low; + + u32 rx_drops_high; +}; + +struct ena_admin_acq_get_stats_resp { + struct ena_admin_acq_common_desc acq_common_desc; + + struct ena_admin_basic_stats basic_stats; +}; + +struct ena_admin_get_set_feature_common_desc { + /* 1:0 : select - 0x1 - current value; 0x3 - default + * value + * 7:3 : reserved3 + */ + u8 flags; + + /* as appears in ena_admin_aq_feature_id */ + u8 feature_id; + + u16 reserved16; +}; + +struct ena_admin_device_attr_feature_desc { + u32 impl_id; + + u32 device_version; + + /* bitmap of ena_admin_aq_feature_id */ + u32 supported_features; + + u32 reserved3; + + /* Indicates how many bits are used physical address access. */ + u32 phys_addr_width; + + /* Indicates how many bits are used virtual address access. */ + u32 virt_addr_width; + + /* unicast MAC address (in Network byte order) */ + u8 mac_addr[6]; + + u8 reserved7[2]; + + u32 max_mtu; +}; + +struct ena_admin_queue_feature_desc { + /* including LLQs */ + u32 max_sq_num; + + u32 max_sq_depth; + + u32 max_cq_num; + + u32 max_cq_depth; + + u32 max_llq_num; + + u32 max_llq_depth; + + u32 max_header_size; + + /* Maximum Descriptors number, including meta descriptor, allowed for + * a single Tx packet + */ + u16 max_packet_tx_descs; + + /* Maximum Descriptors number allowed for a single Rx packet */ + u16 max_packet_rx_descs; +}; + +struct ena_admin_set_feature_mtu_desc { + /* exclude L2 */ + u32 mtu; +}; + +struct ena_admin_set_feature_host_attr_desc { + /* host OS info base address in OS memory. host info is 4KB of + * physically contiguous + */ + struct ena_common_mem_addr os_info_ba; + + /* host debug area base address in OS memory. debug area must be + * physically contiguous + */ + struct ena_common_mem_addr debug_ba; + + /* debug area size */ + u32 debug_area_size; +}; + +struct ena_admin_feature_intr_moder_desc { + /* interrupt delay granularity in usec */ + u16 intr_delay_resolution; + + u16 reserved; +}; + +struct ena_admin_get_feature_link_desc { + /* Link speed in Mb */ + u32 speed; + + /* bit field of enum ena_admin_link types */ + u32 supported; + + /* 0 : autoneg + * 1 : duplex - Full Duplex + * 31:2 : reserved2 + */ + u32 flags; +}; + +struct ena_admin_feature_aenq_desc { + /* bitmask for AENQ groups the device can report */ + u32 supported_groups; + + /* bitmask for AENQ groups to report */ + u32 enabled_groups; +}; + +struct ena_admin_feature_offload_desc { + /* 0 : TX_L3_csum_ipv4 + * 1 : TX_L4_ipv4_csum_part - The checksum field + * should be initialized with pseudo header checksum + * 2 : TX_L4_ipv4_csum_full + * 3 : TX_L4_ipv6_csum_part - The checksum field + * should be initialized with pseudo header checksum + * 4 : TX_L4_ipv6_csum_full + * 5 : tso_ipv4 + * 6 : tso_ipv6 + * 7 : tso_ecn + */ + u32 tx; + + /* Receive side supported stateless offload + * 0 : RX_L3_csum_ipv4 - IPv4 checksum + * 1 : RX_L4_ipv4_csum - TCP/UDP/IPv4 checksum + * 2 : RX_L4_ipv6_csum - TCP/UDP/IPv6 checksum + * 3 : RX_hash - Hash calculation + */ + u32 rx_supported; + + u32 rx_enabled; +}; + +enum ena_admin_hash_functions { + ENA_ADMIN_TOEPLITZ = 1, + + ENA_ADMIN_CRC32 = 2, +}; + +struct ena_admin_feature_rss_flow_hash_control { + u32 keys_num; + + u32 reserved; + + u32 key[10]; +}; + +struct ena_admin_feature_rss_flow_hash_function { + /* 7:0 : funcs - bitmask of ena_admin_hash_functions */ + u32 supported_func; + + /* 7:0 : selected_func - bitmask of + * ena_admin_hash_functions + */ + u32 selected_func; + + /* initial value */ + u32 init_val; +}; + +/* RSS flow hash protocols */ +enum ena_admin_flow_hash_proto { + ENA_ADMIN_RSS_TCP4 = 0, + + ENA_ADMIN_RSS_UDP4 = 1, + + ENA_ADMIN_RSS_TCP6 = 2, + + ENA_ADMIN_RSS_UDP6 = 3, + + ENA_ADMIN_RSS_IP4 = 4, + + ENA_ADMIN_RSS_IP6 = 5, + + ENA_ADMIN_RSS_IP4_FRAG = 6, + + ENA_ADMIN_RSS_NOT_IP = 7, + + ENA_ADMIN_RSS_PROTO_NUM = 16, +}; + +/* RSS flow hash fields */ +enum ena_admin_flow_hash_fields { + /* Ethernet Dest Addr */ + ENA_ADMIN_RSS_L2_DA = 0, + + /* Ethernet Src Addr */ + ENA_ADMIN_RSS_L2_SA = 1, + + /* ipv4/6 Dest Addr */ + ENA_ADMIN_RSS_L3_DA = 2, + + /* ipv4/6 Src Addr */ + ENA_ADMIN_RSS_L3_SA = 5, + + /* tcp/udp Dest Port */ + ENA_ADMIN_RSS_L4_DP = 6, + + /* tcp/udp Src Port */ + ENA_ADMIN_RSS_L4_SP = 7, +}; + +struct ena_admin_proto_input { + /* flow hash fields (bitwise according to ena_admin_flow_hash_fields) */ + u16 fields; + + u16 reserved2; +}; + +struct ena_admin_feature_rss_hash_control { + struct ena_admin_proto_input supported_fields[ENA_ADMIN_RSS_PROTO_NUM]; + + struct ena_admin_proto_input selected_fields[ENA_ADMIN_RSS_PROTO_NUM]; + + struct ena_admin_proto_input reserved2[ENA_ADMIN_RSS_PROTO_NUM]; + + struct ena_admin_proto_input reserved3[ENA_ADMIN_RSS_PROTO_NUM]; +}; + +struct ena_admin_feature_rss_flow_hash_input { + /* supported hash input sorting + * 1 : L3_sort - support swap L3 addresses if DA is + * smaller than SA + * 2 : L4_sort - support swap L4 ports if DP smaller + * SP + */ + u16 supported_input_sort; + + /* enabled hash input sorting + * 1 : enable_L3_sort - enable swap L3 addresses if + * DA smaller than SA + * 2 : enable_L4_sort - enable swap L4 ports if DP + * smaller than SP + */ + u16 enabled_input_sort; +}; + +enum ena_admin_os_type { + ENA_ADMIN_OS_LINUX = 1, + + ENA_ADMIN_OS_WIN = 2, + + ENA_ADMIN_OS_DPDK = 3, + + ENA_ADMIN_OS_FREEBSD = 4, + + ENA_ADMIN_OS_IPXE = 5, +}; + +struct ena_admin_host_info { + /* defined in enum ena_admin_os_type */ + u32 os_type; + + /* os distribution string format */ + u8 os_dist_str[128]; + + /* OS distribution numeric format */ + u32 os_dist; + + /* kernel version string format */ + u8 kernel_ver_str[32]; + + /* Kernel version numeric format */ + u32 kernel_ver; + + /* 7:0 : major + * 15:8 : minor + * 23:16 : sub_minor + */ + u32 driver_version; + + /* features bitmap */ + u32 supported_network_features[4]; +}; + +struct ena_admin_rss_ind_table_entry { + u16 cq_idx; + + u16 reserved; +}; + +struct ena_admin_feature_rss_ind_table { + /* min supported table size (2^min_size) */ + u16 min_size; + + /* max supported table size (2^max_size) */ + u16 max_size; + + /* table size (2^size) */ + u16 size; + + u16 reserved; + + /* index of the inline entry. 0xFFFFFFFF means invalid */ + u32 inline_index; + + /* used for updating single entry, ignored when setting the entire + * table through the control buffer. + */ + struct ena_admin_rss_ind_table_entry inline_entry; +}; + +struct ena_admin_get_feat_cmd { + struct ena_admin_aq_common_desc aq_common_descriptor; + + struct ena_admin_ctrl_buff_info control_buffer; + + struct ena_admin_get_set_feature_common_desc feat_common; + + u32 raw[11]; +}; + +struct ena_admin_get_feat_resp { + struct ena_admin_acq_common_desc acq_common_desc; + + union { + u32 raw[14]; + + struct ena_admin_device_attr_feature_desc dev_attr; + + struct ena_admin_queue_feature_desc max_queue; + + struct ena_admin_feature_aenq_desc aenq; + + struct ena_admin_get_feature_link_desc link; + + struct ena_admin_feature_offload_desc offload; + + struct ena_admin_feature_rss_flow_hash_function flow_hash_func; + + struct ena_admin_feature_rss_flow_hash_input flow_hash_input; + + struct ena_admin_feature_rss_ind_table ind_table; + + struct ena_admin_feature_intr_moder_desc intr_moderation; + } u; +}; + +struct ena_admin_set_feat_cmd { + struct ena_admin_aq_common_desc aq_common_descriptor; + + struct ena_admin_ctrl_buff_info control_buffer; + + struct ena_admin_get_set_feature_common_desc feat_common; + + union { + u32 raw[11]; + + /* mtu size */ + struct ena_admin_set_feature_mtu_desc mtu; + + /* host attributes */ + struct ena_admin_set_feature_host_attr_desc host_attr; + + /* AENQ configuration */ + struct ena_admin_feature_aenq_desc aenq; + + /* rss flow hash function */ + struct ena_admin_feature_rss_flow_hash_function flow_hash_func; + + /* rss flow hash input */ + struct ena_admin_feature_rss_flow_hash_input flow_hash_input; + + /* rss indirection table */ + struct ena_admin_feature_rss_ind_table ind_table; + } u; +}; + +struct ena_admin_set_feat_resp { + struct ena_admin_acq_common_desc acq_common_desc; + + union { + u32 raw[14]; + } u; +}; + +struct ena_admin_aenq_common_desc { + u16 group; + + u16 syndrom; + + /* 0 : phase */ + u8 flags; + + u8 reserved1[3]; + + u32 timestamp_low; + + u32 timestamp_high; +}; + +/* asynchronous event notification groups */ +enum ena_admin_aenq_group { + ENA_ADMIN_LINK_CHANGE = 0, + + ENA_ADMIN_FATAL_ERROR = 1, + + ENA_ADMIN_WARNING = 2, + + ENA_ADMIN_NOTIFICATION = 3, + + ENA_ADMIN_KEEP_ALIVE = 4, + + ENA_ADMIN_AENQ_GROUPS_NUM = 5, +}; + +enum ena_admin_aenq_notification_syndrom { + ENA_ADMIN_SUSPEND = 0, + + ENA_ADMIN_RESUME = 1, +}; + +struct ena_admin_aenq_entry { + struct ena_admin_aenq_common_desc aenq_common_desc; + + /* command specific inline data */ + u32 inline_data_w4[12]; +}; + +struct ena_admin_aenq_link_change_desc { + struct ena_admin_aenq_common_desc aenq_common_desc; + + /* 0 : link_status */ + u32 flags; +}; + +struct ena_admin_ena_mmio_req_read_less_resp { + u16 req_id; + + u16 reg_off; + + /* value is valid when poll is cleared */ + u32 reg_val; +}; + +/* aq_common_desc */ +#define ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0) +#define ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK BIT(0) +#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT 1 +#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK BIT(1) +#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT 2 +#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK BIT(2) + +/* sq */ +#define ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT 5 +#define ENA_ADMIN_SQ_SQ_DIRECTION_MASK GENMASK(7, 5) + +/* acq_common_desc */ +#define ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0) +#define ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK BIT(0) + +/* aq_create_sq_cmd */ +#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT 5 +#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK GENMASK(7, 5) +#define ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK GENMASK(3, 0) +#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT 4 +#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK GENMASK(6, 4) +#define ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK BIT(0) + +/* aq_create_cq_cmd */ +#define ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT 5 +#define ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK BIT(5) +#define ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK GENMASK(4, 0) + +/* get_set_feature_common_desc */ +#define ENA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK GENMASK(1, 0) + +/* get_feature_link_desc */ +#define ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK BIT(0) +#define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT 1 +#define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK BIT(1) + +/* feature_offload_desc */ +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK BIT(0) +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_SHIFT 1 +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK BIT(1) +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_SHIFT 2 +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK BIT(2) +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_SHIFT 3 +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK BIT(3) +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_SHIFT 4 +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK BIT(4) +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT 5 +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK BIT(5) +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT 6 +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK BIT(6) +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT 7 +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK BIT(7) +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK BIT(0) +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_SHIFT 1 +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK BIT(1) +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_SHIFT 2 +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK BIT(2) +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT 3 +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK BIT(3) + +/* feature_rss_flow_hash_function */ +#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK GENMASK(7, 0) +#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK GENMASK(7, 0) + +/* feature_rss_flow_hash_input */ +#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT 1 +#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK BIT(1) +#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT 2 +#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK BIT(2) +#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_SHIFT 1 +#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_MASK BIT(1) +#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_SHIFT 2 +#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_MASK BIT(2) + +/* host_info */ +#define ENA_ADMIN_HOST_INFO_MAJOR_MASK GENMASK(7, 0) +#define ENA_ADMIN_HOST_INFO_MINOR_SHIFT 8 +#define ENA_ADMIN_HOST_INFO_MINOR_MASK GENMASK(15, 8) +#define ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT 16 +#define ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK GENMASK(23, 16) + +/* aenq_common_desc */ +#define ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK BIT(0) + +/* aenq_link_change_desc */ +#define ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK BIT(0) + +#endif /*_ENA_ADMIN_H_ */ diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c new file mode 100644 index 000000000000..3066d9c99984 --- /dev/null +++ b/drivers/net/ethernet/amazon/ena/ena_com.c @@ -0,0 +1,2666 @@ +/* + * Copyright 2015 Amazon.com, Inc. or its affiliates. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "ena_com.h" + +/*****************************************************************************/ +/*****************************************************************************/ + +/* Timeout in micro-sec */ +#define ADMIN_CMD_TIMEOUT_US (1000000) + +#define ENA_ASYNC_QUEUE_DEPTH 4 +#define ENA_ADMIN_QUEUE_DEPTH 32 + +#define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \ + ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) \ + | (ENA_COMMON_SPEC_VERSION_MINOR)) + +#define ENA_CTRL_MAJOR 0 +#define ENA_CTRL_MINOR 0 +#define ENA_CTRL_SUB_MINOR 1 + +#define MIN_ENA_CTRL_VER \ + (((ENA_CTRL_MAJOR) << \ + (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \ + ((ENA_CTRL_MINOR) << \ + (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \ + (ENA_CTRL_SUB_MINOR)) + +#define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x))) +#define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32)) + +#define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF + +/*****************************************************************************/ +/*****************************************************************************/ +/*****************************************************************************/ + +enum ena_cmd_status { + ENA_CMD_SUBMITTED, + ENA_CMD_COMPLETED, + /* Abort - canceled by the driver */ + ENA_CMD_ABORTED, +}; + +struct ena_comp_ctx { + struct completion wait_event; + struct ena_admin_acq_entry *user_cqe; + u32 comp_size; + enum ena_cmd_status status; + /* status from the device */ + u8 comp_status; + u8 cmd_opcode; + bool occupied; +}; + +struct ena_com_stats_ctx { + struct ena_admin_aq_get_stats_cmd get_cmd; + struct ena_admin_acq_get_stats_resp get_resp; +}; + +static inline int ena_com_mem_addr_set(struct ena_com_dev *ena_dev, + struct ena_common_mem_addr *ena_addr, + dma_addr_t addr) +{ + if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) { + pr_err("dma address has more bits that the device supports\n"); + return -EINVAL; + } + + ena_addr->mem_addr_low = (u32)addr; + ena_addr->mem_addr_high = (u64)addr >> 32; + + return 0; +} + +static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue) +{ + struct ena_com_admin_sq *sq = &queue->sq; + u16 size = ADMIN_SQ_SIZE(queue->q_depth); + + sq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &sq->dma_addr, + GFP_KERNEL); + + if (!sq->entries) { + pr_err("memory allocation failed"); + return -ENOMEM; + } + + sq->head = 0; + sq->tail = 0; + sq->phase = 1; + + sq->db_addr = NULL; + + return 0; +} + +static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue) +{ + struct ena_com_admin_cq *cq = &queue->cq; + u16 size = ADMIN_CQ_SIZE(queue->q_depth); + + cq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &cq->dma_addr, + GFP_KERNEL); + + if (!cq->entries) { + pr_err("memory allocation failed"); + return -ENOMEM; + } + + cq->head = 0; + cq->phase = 1; + + return 0; +} + +static int ena_com_admin_init_aenq(struct ena_com_dev *dev, + struct ena_aenq_handlers *aenq_handlers) +{ + struct ena_com_aenq *aenq = &dev->aenq; + u32 addr_low, addr_high, aenq_caps; + u16 size; + + dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH; + size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH); + aenq->entries = dma_zalloc_coherent(dev->dmadev, size, &aenq->dma_addr, + GFP_KERNEL); + + if (!aenq->entries) { + pr_err("memory allocation failed"); + return -ENOMEM; + } + + aenq->head = aenq->q_depth; + aenq->phase = 1; + + addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr); + addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr); + + writel(addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF); + writel(addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF); + + aenq_caps = 0; + aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK; + aenq_caps |= (sizeof(struct ena_admin_aenq_entry) + << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) & + ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK; + writel(aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF); + + if (unlikely(!aenq_handlers)) { + pr_err("aenq handlers pointer is NULL\n"); + return -EINVAL; + } + + aenq->aenq_handlers = aenq_handlers; + + return 0; +} + +static inline void comp_ctxt_release(struct ena_com_admin_queue *queue, + struct ena_comp_ctx *comp_ctx) +{ + comp_ctx->occupied = false; + atomic_dec(&queue->outstanding_cmds); +} + +static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue, + u16 command_id, bool capture) +{ + if (unlikely(command_id >= queue->q_depth)) { + pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n", + command_id, queue->q_depth); + return NULL; + } + + if (unlikely(queue->comp_ctx[command_id].occupied && capture)) { + pr_err("Completion context is occupied\n"); + return NULL; + } + + if (capture) { + atomic_inc(&queue->outstanding_cmds); + queue->comp_ctx[command_id].occupied = true; + } + + return &queue->comp_ctx[command_id]; +} + +static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue, + struct ena_admin_aq_entry *cmd, + size_t cmd_size_in_bytes, + struct ena_admin_acq_entry *comp, + size_t comp_size_in_bytes) +{ + struct ena_comp_ctx *comp_ctx; + u16 tail_masked, cmd_id; + u16 queue_size_mask; + u16 cnt; + + queue_size_mask = admin_queue->q_depth - 1; + + tail_masked = admin_queue->sq.tail & queue_size_mask; + + /* In case of queue FULL */ + cnt = admin_queue->sq.tail - admin_queue->sq.head; + if (cnt >= admin_queue->q_depth) { + pr_debug("admin queue is FULL (tail %d head %d depth: %d)\n", + admin_queue->sq.tail, admin_queue->sq.head, + admin_queue->q_depth); + admin_queue->stats.out_of_space++; + return ERR_PTR(-ENOSPC); + } + + cmd_id = admin_queue->curr_cmd_id; + + cmd->aq_common_descriptor.flags |= admin_queue->sq.phase & + ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK; + + cmd->aq_common_descriptor.command_id |= cmd_id & + ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK; + + comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true); + if (unlikely(!comp_ctx)) + return ERR_PTR(-EINVAL); + + comp_ctx->status = ENA_CMD_SUBMITTED; + comp_ctx->comp_size = (u32)comp_size_in_bytes; + comp_ctx->user_cqe = comp; + comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode; + + reinit_completion(&comp_ctx->wait_event); + + memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes); + + admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) & + queue_size_mask; + + admin_queue->sq.tail++; + admin_queue->stats.submitted_cmd++; + + if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0)) + admin_queue->sq.phase = !admin_queue->sq.phase; + + writel(admin_queue->sq.tail, admin_queue->sq.db_addr); + + return comp_ctx; +} + +static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue) +{ + size_t size = queue->q_depth * sizeof(struct ena_comp_ctx); + struct ena_comp_ctx *comp_ctx; + u16 i; + + queue->comp_ctx = devm_kzalloc(queue->q_dmadev, size, GFP_KERNEL); + if (unlikely(!queue->comp_ctx)) { + pr_err("memory allocation failed"); + return -ENOMEM; + } + + for (i = 0; i < queue->q_depth; i++) { + comp_ctx = get_comp_ctxt(queue, i, false); + if (comp_ctx) + init_completion(&comp_ctx->wait_event); + } + + return 0; +} + +static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue, + struct ena_admin_aq_entry *cmd, + size_t cmd_size_in_bytes, + struct ena_admin_acq_entry *comp, + size_t comp_size_in_bytes) +{ + unsigned long flags; + struct ena_comp_ctx *comp_ctx; + + spin_lock_irqsave(&admin_queue->q_lock, flags); + if (unlikely(!admin_queue->running_state)) { + spin_unlock_irqrestore(&admin_queue->q_lock, flags); + return ERR_PTR(-ENODEV); + } + comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd, + cmd_size_in_bytes, + comp, + comp_size_in_bytes); + if (unlikely(IS_ERR(comp_ctx))) + admin_queue->running_state = false; + spin_unlock_irqrestore(&admin_queue->q_lock, flags); + + return comp_ctx; +} + +static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, + struct ena_com_create_io_ctx *ctx, + struct ena_com_io_sq *io_sq) +{ + size_t size; + int dev_node = 0; + + memset(&io_sq->desc_addr, 0x0, sizeof(struct ena_com_io_desc_addr)); + + io_sq->desc_entry_size = + (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ? + sizeof(struct ena_eth_io_tx_desc) : + sizeof(struct ena_eth_io_rx_desc); + + size = io_sq->desc_entry_size * io_sq->q_depth; + + if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) { + dev_node = dev_to_node(ena_dev->dmadev); + set_dev_node(ena_dev->dmadev, ctx->numa_node); + io_sq->desc_addr.virt_addr = + dma_zalloc_coherent(ena_dev->dmadev, size, + &io_sq->desc_addr.phys_addr, + GFP_KERNEL); + set_dev_node(ena_dev->dmadev, dev_node); + if (!io_sq->desc_addr.virt_addr) { + io_sq->desc_addr.virt_addr = + dma_zalloc_coherent(ena_dev->dmadev, size, + &io_sq->desc_addr.phys_addr, + GFP_KERNEL); + } + } else { + dev_node = dev_to_node(ena_dev->dmadev); + set_dev_node(ena_dev->dmadev, ctx->numa_node); + io_sq->desc_addr.virt_addr = + devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL); + set_dev_node(ena_dev->dmadev, dev_node); + if (!io_sq->desc_addr.virt_addr) { + io_sq->desc_addr.virt_addr = + devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL); + } + } + + if (!io_sq->desc_addr.virt_addr) { + pr_err("memory allocation failed"); + return -ENOMEM; + } + + io_sq->tail = 0; + io_sq->next_to_comp = 0; + io_sq->phase = 1; + + return 0; +} + +static int ena_com_init_io_cq(struct ena_com_dev *ena_dev, + struct ena_com_create_io_ctx *ctx, + struct ena_com_io_cq *io_cq) +{ + size_t size; + int prev_node = 0; + + memset(&io_cq->cdesc_addr, 0x0, sizeof(struct ena_com_io_desc_addr)); + + /* Use the basic completion descriptor for Rx */ + io_cq->cdesc_entry_size_in_bytes = + (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ? + sizeof(struct ena_eth_io_tx_cdesc) : + sizeof(struct ena_eth_io_rx_cdesc_base); + + size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth; + + prev_node = dev_to_node(ena_dev->dmadev); + set_dev_node(ena_dev->dmadev, ctx->numa_node); + io_cq->cdesc_addr.virt_addr = + dma_zalloc_coherent(ena_dev->dmadev, size, + &io_cq->cdesc_addr.phys_addr, GFP_KERNEL); + set_dev_node(ena_dev->dmadev, prev_node); + if (!io_cq->cdesc_addr.virt_addr) { + io_cq->cdesc_addr.virt_addr = + dma_zalloc_coherent(ena_dev->dmadev, size, + &io_cq->cdesc_addr.phys_addr, + GFP_KERNEL); + } + + if (!io_cq->cdesc_addr.virt_addr) { + pr_err("memory allocation failed"); + return -ENOMEM; + } + + io_cq->phase = 1; + io_cq->head = 0; + + return 0; +} + +static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue, + struct ena_admin_acq_entry *cqe) +{ + struct ena_comp_ctx *comp_ctx; + u16 cmd_id; + + cmd_id = cqe->acq_common_descriptor.command & + ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK; + + comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false); + if (unlikely(!comp_ctx)) { + pr_err("comp_ctx is NULL. Changing the admin queue running state\n"); + admin_queue->running_state = false; + return; + } + + comp_ctx->status = ENA_CMD_COMPLETED; + comp_ctx->comp_status = cqe->acq_common_descriptor.status; + + if (comp_ctx->user_cqe) + memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size); + + if (!admin_queue->polling) + complete(&comp_ctx->wait_event); +} + +static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue) +{ + struct ena_admin_acq_entry *cqe = NULL; + u16 comp_num = 0; + u16 head_masked; + u8 phase; + + head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1); + phase = admin_queue->cq.phase; + + cqe = &admin_queue->cq.entries[head_masked]; + + /* Go over all the completions */ + while ((cqe->acq_common_descriptor.flags & + ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) { + /* Do not read the rest of the completion entry before the + * phase bit was validated + */ + rmb(); + ena_com_handle_single_admin_completion(admin_queue, cqe); + + head_masked++; + comp_num++; + if (unlikely(head_masked == admin_queue->q_depth)) { + head_masked = 0; + phase = !phase; + } + + cqe = &admin_queue->cq.entries[head_masked]; + } + + admin_queue->cq.head += comp_num; + admin_queue->cq.phase = phase; + admin_queue->sq.head += comp_num; + admin_queue->stats.completed_cmd += comp_num; +} + +static int ena_com_comp_status_to_errno(u8 comp_status) +{ + if (unlikely(comp_status != 0)) + pr_err("admin command failed[%u]\n", comp_status); + + if (unlikely(comp_status > ENA_ADMIN_UNKNOWN_ERROR)) + return -EINVAL; + + switch (comp_status) { + case ENA_ADMIN_SUCCESS: + return 0; + case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE: + return -ENOMEM; + case ENA_ADMIN_UNSUPPORTED_OPCODE: + return -EPERM; + case ENA_ADMIN_BAD_OPCODE: + case ENA_ADMIN_MALFORMED_REQUEST: + case ENA_ADMIN_ILLEGAL_PARAMETER: + case ENA_ADMIN_UNKNOWN_ERROR: + return -EINVAL; + } + + return 0; +} + +static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx, + struct ena_com_admin_queue *admin_queue) +{ + unsigned long flags; + u32 start_time; + int ret; + + start_time = ((u32)jiffies_to_usecs(jiffies)); + + while (comp_ctx->status == ENA_CMD_SUBMITTED) { + if ((((u32)jiffies_to_usecs(jiffies)) - start_time) > + ADMIN_CMD_TIMEOUT_US) { + pr_err("Wait for completion (polling) timeout\n"); + /* ENA didn't have any completion */ + spin_lock_irqsave(&admin_queue->q_lock, flags); + admin_queue->stats.no_completion++; + admin_queue->running_state = false; + spin_unlock_irqrestore(&admin_queue->q_lock, flags); + + ret = -ETIME; + goto err; + } + + spin_lock_irqsave(&admin_queue->q_lock, flags); + ena_com_handle_admin_completion(admin_queue); + spin_unlock_irqrestore(&admin_queue->q_lock, flags); + + msleep(100); + } + + if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) { + pr_err("Command was aborted\n"); + spin_lock_irqsave(&admin_queue->q_lock, flags); + admin_queue->stats.aborted_cmd++; + spin_unlock_irqrestore(&admin_queue->q_lock, flags); + ret = -ENODEV; + goto err; + } + + WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n", + comp_ctx->status); + + ret = ena_com_comp_status_to_errno(comp_ctx->comp_status); +err: + comp_ctxt_release(admin_queue, comp_ctx); + return ret; +} + +static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx, + struct ena_com_admin_queue *admin_queue) +{ + unsigned long flags; + int ret; + + wait_for_completion_timeout(&comp_ctx->wait_event, + usecs_to_jiffies(ADMIN_CMD_TIMEOUT_US)); + + /* In case the command wasn't completed find out the root cause. + * There might be 2 kinds of errors + * 1) No completion (timeout reached) + * 2) There is completion but the device didn't get any msi-x interrupt. + */ + if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) { + spin_lock_irqsave(&admin_queue->q_lock, flags); + ena_com_handle_admin_completion(admin_queue); + admin_queue->stats.no_completion++; + spin_unlock_irqrestore(&admin_queue->q_lock, flags); + + if (comp_ctx->status == ENA_CMD_COMPLETED) + pr_err("The ena device have completion but the driver didn't receive any MSI-X interrupt (cmd %d)\n", + comp_ctx->cmd_opcode); + else + pr_err("The ena device doesn't send any completion for the admin cmd %d status %d\n", + comp_ctx->cmd_opcode, comp_ctx->status); + + admin_queue->running_state = false; + ret = -ETIME; + goto err; + } + + ret = ena_com_comp_status_to_errno(comp_ctx->comp_status); +err: + comp_ctxt_release(admin_queue, comp_ctx); + return ret; +} + +/* This method read the hardware device register through posting writes + * and waiting for response + * On timeout the function will return ENA_MMIO_READ_TIMEOUT + */ +static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset) +{ + struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; + volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp = + mmio_read->read_resp; + u32 mmio_read_reg, ret; + unsigned long flags; + int i; + + might_sleep(); + + /* If readless is disabled, perform regular read */ + if (!mmio_read->readless_supported) + return readl(ena_dev->reg_bar + offset); + + spin_lock_irqsave(&mmio_read->lock, flags); + mmio_read->seq_num++; + + read_resp->req_id = mmio_read->seq_num + 0xDEAD; + mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) & + ENA_REGS_MMIO_REG_READ_REG_OFF_MASK; + mmio_read_reg |= mmio_read->seq_num & + ENA_REGS_MMIO_REG_READ_REQ_ID_MASK; + + /* make sure read_resp->req_id get updated before the hw can write + * there + */ + wmb(); + + writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF); + + for (i = 0; i < ENA_REG_READ_TIMEOUT; i++) { + if (read_resp->req_id == mmio_read->seq_num) + break; + + udelay(1); + } + + if (unlikely(i == ENA_REG_READ_TIMEOUT)) { + pr_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n", + mmio_read->seq_num, offset, read_resp->req_id, + read_resp->reg_off); + ret = ENA_MMIO_READ_TIMEOUT; + goto err; + } + + if (read_resp->reg_off != offset) { + pr_err("Read failure: wrong offset provided"); + ret = ENA_MMIO_READ_TIMEOUT; + } else { + ret = read_resp->reg_val; + } +err: + spin_unlock_irqrestore(&mmio_read->lock, flags); + + return ret; +} + +/* There are two types to wait for completion. + * Polling mode - wait until the completion is available. + * Async mode - wait on wait queue until the completion is ready + * (or the timeout expired). + * It is expected that the IRQ called ena_com_handle_admin_completion + * to mark the completions. + */ +static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx, + struct ena_com_admin_queue *admin_queue) +{ + if (admin_queue->polling) + return ena_com_wait_and_process_admin_cq_polling(comp_ctx, + admin_queue); + + return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx, + admin_queue); +} + +static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev, + struct ena_com_io_sq *io_sq) +{ + struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; + struct ena_admin_aq_destroy_sq_cmd destroy_cmd; + struct ena_admin_acq_destroy_sq_resp_desc destroy_resp; + u8 direction; + int ret; + + memset(&destroy_cmd, 0x0, sizeof(struct ena_admin_aq_destroy_sq_cmd)); + + if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) + direction = ENA_ADMIN_SQ_DIRECTION_TX; + else + direction = ENA_ADMIN_SQ_DIRECTION_RX; + + destroy_cmd.sq.sq_identity |= (direction << + ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) & + ENA_ADMIN_SQ_SQ_DIRECTION_MASK; + + destroy_cmd.sq.sq_idx = io_sq->idx; + destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ; + + ret = ena_com_execute_admin_command(admin_queue, + (struct ena_admin_aq_entry *)&destroy_cmd, + sizeof(destroy_cmd), + (struct ena_admin_acq_entry *)&destroy_resp, + sizeof(destroy_resp)); + + if (unlikely(ret && (ret != -ENODEV))) + pr_err("failed to destroy io sq error: %d\n", ret); + + return ret; +} + +static void ena_com_io_queue_free(struct ena_com_dev *ena_dev, + struct ena_com_io_sq *io_sq, + struct ena_com_io_cq *io_cq) +{ + size_t size; + + if (io_cq->cdesc_addr.virt_addr) { + size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth; + + dma_free_coherent(ena_dev->dmadev, size, + io_cq->cdesc_addr.virt_addr, + io_cq->cdesc_addr.phys_addr); + + io_cq->cdesc_addr.virt_addr = NULL; + } + + if (io_sq->desc_addr.virt_addr) { + size = io_sq->desc_entry_size * io_sq->q_depth; + + if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) + dma_free_coherent(ena_dev->dmadev, size, + io_sq->desc_addr.virt_addr, + io_sq->desc_addr.phys_addr); + else + devm_kfree(ena_dev->dmadev, io_sq->desc_addr.virt_addr); + + io_sq->desc_addr.virt_addr = NULL; + } +} + +static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout, + u16 exp_state) +{ + u32 val, i; + + for (i = 0; i < timeout; i++) { + val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); + + if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) { + pr_err("Reg read timeout occurred\n"); + return -ETIME; + } + + if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) == + exp_state) + return 0; + + /* The resolution of the timeout is 100ms */ + msleep(100); + } + + return -ETIME; +} + +static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev, + enum ena_admin_aq_feature_id feature_id) +{ + u32 feature_mask = 1 << feature_id; + + /* Device attributes is always supported */ + if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) && + !(ena_dev->supported_features & feature_mask)) + return false; + + return true; +} + +static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev, + struct ena_admin_get_feat_resp *get_resp, + enum ena_admin_aq_feature_id feature_id, + dma_addr_t control_buf_dma_addr, + u32 control_buff_size) +{ + struct ena_com_admin_queue *admin_queue; + struct ena_admin_get_feat_cmd get_cmd; + int ret; + + if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) { + pr_info("Feature %d isn't supported\n", feature_id); + return -EPERM; + } + + memset(&get_cmd, 0x0, sizeof(get_cmd)); + admin_queue = &ena_dev->admin_queue; + + get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE; + + if (control_buff_size) + get_cmd.aq_common_descriptor.flags = + ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; + else + get_cmd.aq_common_descriptor.flags = 0; + + ret = ena_com_mem_addr_set(ena_dev, + &get_cmd.control_buffer.address, + control_buf_dma_addr); + if (unlikely(ret)) { + pr_err("memory address set failed\n"); + return ret; + } + + get_cmd.control_buffer.length = control_buff_size; + + get_cmd.feat_common.feature_id = feature_id; + + ret = ena_com_execute_admin_command(admin_queue, + (struct ena_admin_aq_entry *) + &get_cmd, + sizeof(get_cmd), + (struct ena_admin_acq_entry *) + get_resp, + sizeof(*get_resp)); + + if (unlikely(ret)) + pr_err("Failed to submit get_feature command %d error: %d\n", + feature_id, ret); + + return ret; +} + +static int ena_com_get_feature(struct ena_com_dev *ena_dev, + struct ena_admin_get_feat_resp *get_resp, + enum ena_admin_aq_feature_id feature_id) +{ + return ena_com_get_feature_ex(ena_dev, + get_resp, + feature_id, + 0, + 0); +} + +static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev) +{ + struct ena_rss *rss = &ena_dev->rss; + + rss->hash_key = + dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), + &rss->hash_key_dma_addr, GFP_KERNEL); + + if (unlikely(!rss->hash_key)) + return -ENOMEM; + + return 0; +} + +static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev) +{ + struct ena_rss *rss = &ena_dev->rss; + + if (rss->hash_key) + dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), + rss->hash_key, rss->hash_key_dma_addr); + rss->hash_key = NULL; +} + +static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev) +{ + struct ena_rss *rss = &ena_dev->rss; + + rss->hash_ctrl = + dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl), + &rss->hash_ctrl_dma_addr, GFP_KERNEL); + + if (unlikely(!rss->hash_ctrl)) + return -ENOMEM; + + return 0; +} + +static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev) +{ + struct ena_rss *rss = &ena_dev->rss; + + if (rss->hash_ctrl) + dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl), + rss->hash_ctrl, rss->hash_ctrl_dma_addr); + rss->hash_ctrl = NULL; +} + +static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev, + u16 log_size) +{ + struct ena_rss *rss = &ena_dev->rss; + struct ena_admin_get_feat_resp get_resp; + size_t tbl_size; + int ret; + + ret = ena_com_get_feature(ena_dev, &get_resp, + ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG); + if (unlikely(ret)) + return ret; + + if ((get_resp.u.ind_table.min_size > log_size) || + (get_resp.u.ind_table.max_size < log_size)) { + pr_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n", + 1 << log_size, 1 << get_resp.u.ind_table.min_size, + 1 << get_resp.u.ind_table.max_size); + return -EINVAL; + } + + tbl_size = (1ULL << log_size) * + sizeof(struct ena_admin_rss_ind_table_entry); + + rss->rss_ind_tbl = + dma_zalloc_coherent(ena_dev->dmadev, tbl_size, + &rss->rss_ind_tbl_dma_addr, GFP_KERNEL); + if (unlikely(!rss->rss_ind_tbl)) + goto mem_err1; + + tbl_size = (1ULL << log_size) * sizeof(u16); + rss->host_rss_ind_tbl = + devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL); + if (unlikely(!rss->host_rss_ind_tbl)) + goto mem_err2; + + rss->tbl_log_size = log_size; + + return 0; + +mem_err2: + tbl_size = (1ULL << log_size) * + sizeof(struct ena_admin_rss_ind_table_entry); + + dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl, + rss->rss_ind_tbl_dma_addr); + rss->rss_ind_tbl = NULL; +mem_err1: + rss->tbl_log_size = 0; + return -ENOMEM; +} + +static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev) +{ + struct ena_rss *rss = &ena_dev->rss; + size_t tbl_size = (1ULL << rss->tbl_log_size) * + sizeof(struct ena_admin_rss_ind_table_entry); + + if (rss->rss_ind_tbl) + dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl, + rss->rss_ind_tbl_dma_addr); + rss->rss_ind_tbl = NULL; + + if (rss->host_rss_ind_tbl) + devm_kfree(ena_dev->dmadev, rss->host_rss_ind_tbl); + rss->host_rss_ind_tbl = NULL; +} + +static int ena_com_create_io_sq(struct ena_com_dev *ena_dev, + struct ena_com_io_sq *io_sq, u16 cq_idx) +{ + struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; + struct ena_admin_aq_create_sq_cmd create_cmd; + struct ena_admin_acq_create_sq_resp_desc cmd_completion; + u8 direction; + int ret; + + memset(&create_cmd, 0x0, sizeof(struct ena_admin_aq_create_sq_cmd)); + + create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ; + + if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) + direction = ENA_ADMIN_SQ_DIRECTION_TX; + else + direction = ENA_ADMIN_SQ_DIRECTION_RX; + + create_cmd.sq_identity |= (direction << + ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) & + ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK; + + create_cmd.sq_caps_2 |= io_sq->mem_queue_type & + ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK; + + create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC << + ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) & + ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK; + + create_cmd.sq_caps_3 |= + ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK; + + create_cmd.cq_idx = cq_idx; + create_cmd.sq_depth = io_sq->q_depth; + + if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) { + ret = ena_com_mem_addr_set(ena_dev, + &create_cmd.sq_ba, + io_sq->desc_addr.phys_addr); + if (unlikely(ret)) { + pr_err("memory address set failed\n"); + return ret; + } + } + + ret = ena_com_execute_admin_command(admin_queue, + (struct ena_admin_aq_entry *)&create_cmd, + sizeof(create_cmd), + (struct ena_admin_acq_entry *)&cmd_completion, + sizeof(cmd_completion)); + if (unlikely(ret)) { + pr_err("Failed to create IO SQ. error: %d\n", ret); + return ret; + } + + io_sq->idx = cmd_completion.sq_idx; + + io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + + (uintptr_t)cmd_completion.sq_doorbell_offset); + + if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { + io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar + + cmd_completion.llq_headers_offset); + + io_sq->desc_addr.pbuf_dev_addr = + (u8 __iomem *)((uintptr_t)ena_dev->mem_bar + + cmd_completion.llq_descriptors_offset); + } + + pr_debug("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth); + + return ret; +} + +static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev) +{ + struct ena_rss *rss = &ena_dev->rss; + struct ena_com_io_sq *io_sq; + u16 qid; + int i; + + for (i = 0; i < 1 << rss->tbl_log_size; i++) { + qid = rss->host_rss_ind_tbl[i]; + if (qid >= ENA_TOTAL_NUM_QUEUES) + return -EINVAL; + + io_sq = &ena_dev->io_sq_queues[qid]; + + if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX) + return -EINVAL; + + rss->rss_ind_tbl[i].cq_idx = io_sq->idx; + } + + return 0; +} + +static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev) +{ + u16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { (u16)-1 }; + struct ena_rss *rss = &ena_dev->rss; + u8 idx; + u16 i; + + for (i = 0; i < ENA_TOTAL_NUM_QUEUES; i++) + dev_idx_to_host_tbl[ena_dev->io_sq_queues[i].idx] = i; + + for (i = 0; i < 1 << rss->tbl_log_size; i++) { + if (rss->rss_ind_tbl[i].cq_idx > ENA_TOTAL_NUM_QUEUES) + return -EINVAL; + idx = (u8)rss->rss_ind_tbl[i].cq_idx; + + if (dev_idx_to_host_tbl[idx] > ENA_TOTAL_NUM_QUEUES) + return -EINVAL; + + rss->host_rss_ind_tbl[i] = dev_idx_to_host_tbl[idx]; + } + + return 0; +} + +static int ena_com_init_interrupt_moderation_table(struct ena_com_dev *ena_dev) +{ + size_t size; + + size = sizeof(struct ena_intr_moder_entry) * ENA_INTR_MAX_NUM_OF_LEVELS; + + ena_dev->intr_moder_tbl = + devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL); + if (!ena_dev->intr_moder_tbl) + return -ENOMEM; + + ena_com_config_default_interrupt_moderation_table(ena_dev); + + return 0; +} + +static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev, + u16 intr_delay_resolution) +{ + struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; + unsigned int i; + + if (!intr_delay_resolution) { + pr_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n"); + intr_delay_resolution = 1; + } + ena_dev->intr_delay_resolution = intr_delay_resolution; + + /* update Rx */ + for (i = 0; i < ENA_INTR_MAX_NUM_OF_LEVELS; i++) + intr_moder_tbl[i].intr_moder_interval /= intr_delay_resolution; + + /* update Tx */ + ena_dev->intr_moder_tx_interval /= intr_delay_resolution; +} + +/*****************************************************************************/ +/******************************* API ******************************/ +/*****************************************************************************/ + +int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue, + struct ena_admin_aq_entry *cmd, + size_t cmd_size, + struct ena_admin_acq_entry *comp, + size_t comp_size) +{ + struct ena_comp_ctx *comp_ctx; + int ret; + + comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size, + comp, comp_size); + if (unlikely(IS_ERR(comp_ctx))) { + pr_err("Failed to submit command [%ld]\n", PTR_ERR(comp_ctx)); + return PTR_ERR(comp_ctx); + } + + ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue); + if (unlikely(ret)) { + if (admin_queue->running_state) + pr_err("Failed to process command. ret = %d\n", ret); + else + pr_debug("Failed to process command. ret = %d\n", ret); + } + return ret; +} + +int ena_com_create_io_cq(struct ena_com_dev *ena_dev, + struct ena_com_io_cq *io_cq) +{ + struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; + struct ena_admin_aq_create_cq_cmd create_cmd; + struct ena_admin_acq_create_cq_resp_desc cmd_completion; + int ret; + + memset(&create_cmd, 0x0, sizeof(struct ena_admin_aq_create_cq_cmd)); + + create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ; + + create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) & + ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK; + create_cmd.cq_caps_1 |= + ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK; + + create_cmd.msix_vector = io_cq->msix_vector; + create_cmd.cq_depth = io_cq->q_depth; + + ret = ena_com_mem_addr_set(ena_dev, + &create_cmd.cq_ba, + io_cq->cdesc_addr.phys_addr); + if (unlikely(ret)) { + pr_err("memory address set failed\n"); + return ret; + } + + ret = ena_com_execute_admin_command(admin_queue, + (struct ena_admin_aq_entry *)&create_cmd, + sizeof(create_cmd), + (struct ena_admin_acq_entry *)&cmd_completion, + sizeof(cmd_completion)); + if (unlikely(ret)) { + pr_err("Failed to create IO CQ. error: %d\n", ret); + return ret; + } + + io_cq->idx = cmd_completion.cq_idx; + + io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + + cmd_completion.cq_interrupt_unmask_register_offset); + + if (cmd_completion.cq_head_db_register_offset) + io_cq->cq_head_db_reg = + (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + + cmd_completion.cq_head_db_register_offset); + + if (cmd_completion.numa_node_register_offset) + io_cq->numa_node_cfg_reg = + (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + + cmd_completion.numa_node_register_offset); + + pr_debug("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth); + + return ret; +} + +int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid, + struct ena_com_io_sq **io_sq, + struct ena_com_io_cq **io_cq) +{ + if (qid >= ENA_TOTAL_NUM_QUEUES) { + pr_err("Invalid queue number %d but the max is %d\n", qid, + ENA_TOTAL_NUM_QUEUES); + return -EINVAL; + } + + *io_sq = &ena_dev->io_sq_queues[qid]; + *io_cq = &ena_dev->io_cq_queues[qid]; + + return 0; +} + +void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev) +{ + struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; + struct ena_comp_ctx *comp_ctx; + u16 i; + + if (!admin_queue->comp_ctx) + return; + + for (i = 0; i < admin_queue->q_depth; i++) { + comp_ctx = get_comp_ctxt(admin_queue, i, false); + if (unlikely(!comp_ctx)) + break; + + comp_ctx->status = ENA_CMD_ABORTED; + + complete(&comp_ctx->wait_event); + } +} + +void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev) +{ + struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; + unsigned long flags; + + spin_lock_irqsave(&admin_queue->q_lock, flags); + while (atomic_read(&admin_queue->outstanding_cmds) != 0) { + spin_unlock_irqrestore(&admin_queue->q_lock, flags); + msleep(20); + spin_lock_irqsave(&admin_queue->q_lock, flags); + } + spin_unlock_irqrestore(&admin_queue->q_lock, flags); +} + +int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev, + struct ena_com_io_cq *io_cq) +{ + struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; + struct ena_admin_aq_destroy_cq_cmd destroy_cmd; + struct ena_admin_acq_destroy_cq_resp_desc destroy_resp; + int ret; + + memset(&destroy_cmd, 0x0, sizeof(struct ena_admin_aq_destroy_sq_cmd)); + + destroy_cmd.cq_idx = io_cq->idx; + destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ; + + ret = ena_com_execute_admin_command(admin_queue, + (struct ena_admin_aq_entry *)&destroy_cmd, + sizeof(destroy_cmd), + (struct ena_admin_acq_entry *)&destroy_resp, + sizeof(destroy_resp)); + + if (unlikely(ret && (ret != -ENODEV))) + pr_err("Failed to destroy IO CQ. error: %d\n", ret); + + return ret; +} + +bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev) +{ + return ena_dev->admin_queue.running_state; +} + +void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state) +{ + struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; + unsigned long flags; + + spin_lock_irqsave(&admin_queue->q_lock, flags); + ena_dev->admin_queue.running_state = state; + spin_unlock_irqrestore(&admin_queue->q_lock, flags); +} + +void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev) +{ + u16 depth = ena_dev->aenq.q_depth; + + WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n"); + + /* Init head_db to mark that all entries in the queue + * are initially available + */ + writel(depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF); +} + +int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag) +{ + struct ena_com_admin_queue *admin_queue; + struct ena_admin_set_feat_cmd cmd; + struct ena_admin_set_feat_resp resp; + struct ena_admin_get_feat_resp get_resp; + int ret; + + ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG); + if (ret) { + pr_info("Can't get aenq configuration\n"); + return ret; + } + + if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) { + pr_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n", + get_resp.u.aenq.supported_groups, groups_flag); + return -EPERM; + } + + memset(&cmd, 0x0, sizeof(cmd)); + admin_queue = &ena_dev->admin_queue; + + cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; + cmd.aq_common_descriptor.flags = 0; + cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG; + cmd.u.aenq.enabled_groups = groups_flag; + + ret = ena_com_execute_admin_command(admin_queue, + (struct ena_admin_aq_entry *)&cmd, + sizeof(cmd), + (struct ena_admin_acq_entry *)&resp, + sizeof(resp)); + + if (unlikely(ret)) + pr_err("Failed to config AENQ ret: %d\n", ret); + + return ret; +} + +int ena_com_get_dma_width(struct ena_com_dev *ena_dev) +{ + u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF); + int width; + + if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) { + pr_err("Reg read timeout occurred\n"); + return -ETIME; + } + + width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >> + ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT; + + pr_debug("ENA dma width: %d\n", width); + + if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) { + pr_err("DMA width illegal value: %d\n", width); + return -EINVAL; + } + + ena_dev->dma_addr_bits = width; + + return width; +} + +int ena_com_validate_version(struct ena_com_dev *ena_dev) +{ + u32 ver; + u32 ctrl_ver; + u32 ctrl_ver_masked; + + /* Make sure the ENA version and the controller version are at least + * as the driver expects + */ + ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF); + ctrl_ver = ena_com_reg_bar_read32(ena_dev, + ENA_REGS_CONTROLLER_VERSION_OFF); + + if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) || + (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) { + pr_err("Reg read timeout occurred\n"); + return -ETIME; + } + + pr_info("ena device version: %d.%d\n", + (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >> + ENA_REGS_VERSION_MAJOR_VERSION_SHIFT, + ver & ENA_REGS_VERSION_MINOR_VERSION_MASK); + + if (ver < MIN_ENA_VER) { + pr_err("ENA version is lower than the minimal version the driver supports\n"); + return -1; + } + + pr_info("ena controller version: %d.%d.%d implementation version %d\n", + (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >> + ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT, + (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >> + ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT, + (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK), + (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >> + ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT); + + ctrl_ver_masked = + (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) | + (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) | + (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK); + + /* Validate the ctrl version without the implementation ID */ + if (ctrl_ver_masked < MIN_ENA_CTRL_VER) { + pr_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n"); + return -1; + } + + return 0; +} + +void ena_com_admin_destroy(struct ena_com_dev *ena_dev) +{ + struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; + struct ena_com_admin_cq *cq = &admin_queue->cq; + struct ena_com_admin_sq *sq = &admin_queue->sq; + struct ena_com_aenq *aenq = &ena_dev->aenq; + u16 size; + + if (admin_queue->comp_ctx) + devm_kfree(ena_dev->dmadev, admin_queue->comp_ctx); + admin_queue->comp_ctx = NULL; + size = ADMIN_SQ_SIZE(admin_queue->q_depth); + if (sq->entries) + dma_free_coherent(ena_dev->dmadev, size, sq->entries, + sq->dma_addr); + sq->entries = NULL; + + size = ADMIN_CQ_SIZE(admin_queue->q_depth); + if (cq->entries) + dma_free_coherent(ena_dev->dmadev, size, cq->entries, + cq->dma_addr); + cq->entries = NULL; + + size = ADMIN_AENQ_SIZE(aenq->q_depth); + if (ena_dev->aenq.entries) + dma_free_coherent(ena_dev->dmadev, size, aenq->entries, + aenq->dma_addr); + aenq->entries = NULL; +} + +void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling) +{ + ena_dev->admin_queue.polling = polling; +} + +int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev) +{ + struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; + + spin_lock_init(&mmio_read->lock); + mmio_read->read_resp = + dma_zalloc_coherent(ena_dev->dmadev, + sizeof(*mmio_read->read_resp), + &mmio_read->read_resp_dma_addr, GFP_KERNEL); + if (unlikely(!mmio_read->read_resp)) + return -ENOMEM; + + ena_com_mmio_reg_read_request_write_dev_addr(ena_dev); + + mmio_read->read_resp->req_id = 0x0; + mmio_read->seq_num = 0x0; + mmio_read->readless_supported = true; + + return 0; +} + +void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported) +{ + struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; + + mmio_read->readless_supported = readless_supported; +} + +void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev) +{ + struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; + + writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF); + writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF); + + dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp), + mmio_read->read_resp, mmio_read->read_resp_dma_addr); + + mmio_read->read_resp = NULL; +} + +void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev) +{ + struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; + u32 addr_low, addr_high; + + addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr); + addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr); + + writel(addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF); + writel(addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF); +} + +int ena_com_admin_init(struct ena_com_dev *ena_dev, + struct ena_aenq_handlers *aenq_handlers, + bool init_spinlock) +{ + struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; + u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high; + int ret; + + dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); + + if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) { + pr_err("Reg read timeout occurred\n"); + return -ETIME; + } + + if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) { + pr_err("Device isn't ready, abort com init\n"); + return -ENODEV; + } + + admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH; + + admin_queue->q_dmadev = ena_dev->dmadev; + admin_queue->polling = false; + admin_queue->curr_cmd_id = 0; + + atomic_set(&admin_queue->outstanding_cmds, 0); + + if (init_spinlock) + spin_lock_init(&admin_queue->q_lock); + + ret = ena_com_init_comp_ctxt(admin_queue); + if (ret) + goto error; + + ret = ena_com_admin_init_sq(admin_queue); + if (ret) + goto error; + + ret = ena_com_admin_init_cq(admin_queue); + if (ret) + goto error; + + admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + + ENA_REGS_AQ_DB_OFF); + + addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr); + addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr); + + writel(addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF); + writel(addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF); + + addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr); + addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr); + + writel(addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF); + writel(addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF); + + aq_caps = 0; + aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK; + aq_caps |= (sizeof(struct ena_admin_aq_entry) << + ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) & + ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK; + + acq_caps = 0; + acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK; + acq_caps |= (sizeof(struct ena_admin_acq_entry) << + ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) & + ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK; + + writel(aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF); + writel(acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF); + ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers); + if (ret) + goto error; + + admin_queue->running_state = true; + + return 0; +error: + ena_com_admin_destroy(ena_dev); + + return ret; +} + +int ena_com_create_io_queue(struct ena_com_dev *ena_dev, + struct ena_com_create_io_ctx *ctx) +{ + struct ena_com_io_sq *io_sq; + struct ena_com_io_cq *io_cq; + int ret; + + if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) { + pr_err("Qid (%d) is bigger than max num of queues (%d)\n", + ctx->qid, ENA_TOTAL_NUM_QUEUES); + return -EINVAL; + } + + io_sq = &ena_dev->io_sq_queues[ctx->qid]; + io_cq = &ena_dev->io_cq_queues[ctx->qid]; + + memset(io_sq, 0x0, sizeof(struct ena_com_io_sq)); + memset(io_cq, 0x0, sizeof(struct ena_com_io_cq)); + + /* Init CQ */ + io_cq->q_depth = ctx->queue_size; + io_cq->direction = ctx->direction; + io_cq->qid = ctx->qid; + + io_cq->msix_vector = ctx->msix_vector; + + io_sq->q_depth = ctx->queue_size; + io_sq->direction = ctx->direction; + io_sq->qid = ctx->qid; + + io_sq->mem_queue_type = ctx->mem_queue_type; + + if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) + /* header length is limited to 8 bits */ + io_sq->tx_max_header_size = + min_t(u32, ena_dev->tx_max_header_size, SZ_256); + + ret = ena_com_init_io_sq(ena_dev, ctx, io_sq); + if (ret) + goto error; + ret = ena_com_init_io_cq(ena_dev, ctx, io_cq); + if (ret) + goto error; + + ret = ena_com_create_io_cq(ena_dev, io_cq); + if (ret) + goto error; + + ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx); + if (ret) + goto destroy_io_cq; + + return 0; + +destroy_io_cq: + ena_com_destroy_io_cq(ena_dev, io_cq); +error: + ena_com_io_queue_free(ena_dev, io_sq, io_cq); + return ret; +} + +void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid) +{ + struct ena_com_io_sq *io_sq; + struct ena_com_io_cq *io_cq; + + if (qid >= ENA_TOTAL_NUM_QUEUES) { + pr_err("Qid (%d) is bigger than max num of queues (%d)\n", qid, + ENA_TOTAL_NUM_QUEUES); + return; + } + + io_sq = &ena_dev->io_sq_queues[qid]; + io_cq = &ena_dev->io_cq_queues[qid]; + + ena_com_destroy_io_sq(ena_dev, io_sq); + ena_com_destroy_io_cq(ena_dev, io_cq); + + ena_com_io_queue_free(ena_dev, io_sq, io_cq); +} + +int ena_com_get_link_params(struct ena_com_dev *ena_dev, + struct ena_admin_get_feat_resp *resp) +{ + return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG); +} + +int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev, + struct ena_com_dev_get_features_ctx *get_feat_ctx) +{ + struct ena_admin_get_feat_resp get_resp; + int rc; + + rc = ena_com_get_feature(ena_dev, &get_resp, + ENA_ADMIN_DEVICE_ATTRIBUTES); + if (rc) + return rc; + + memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr, + sizeof(get_resp.u.dev_attr)); + ena_dev->supported_features = get_resp.u.dev_attr.supported_features; + + rc = ena_com_get_feature(ena_dev, &get_resp, + ENA_ADMIN_MAX_QUEUES_NUM); + if (rc) + return rc; + + memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue, + sizeof(get_resp.u.max_queue)); + ena_dev->tx_max_header_size = get_resp.u.max_queue.max_header_size; + + rc = ena_com_get_feature(ena_dev, &get_resp, + ENA_ADMIN_AENQ_CONFIG); + if (rc) + return rc; + + memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq, + sizeof(get_resp.u.aenq)); + + rc = ena_com_get_feature(ena_dev, &get_resp, + ENA_ADMIN_STATELESS_OFFLOAD_CONFIG); + if (rc) + return rc; + + memcpy(&get_feat_ctx->offload, &get_resp.u.offload, + sizeof(get_resp.u.offload)); + + return 0; +} + +void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev) +{ + ena_com_handle_admin_completion(&ena_dev->admin_queue); +} + +/* ena_handle_specific_aenq_event: + * return the handler that is relevant to the specific event group + */ +static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev, + u16 group) +{ + struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers; + + if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group]) + return aenq_handlers->handlers[group]; + + return aenq_handlers->unimplemented_handler; +} + +/* ena_aenq_intr_handler: + * handles the aenq incoming events. + * pop events from the queue and apply the specific handler + */ +void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data) +{ + struct ena_admin_aenq_entry *aenq_e; + struct ena_admin_aenq_common_desc *aenq_common; + struct ena_com_aenq *aenq = &dev->aenq; + ena_aenq_handler handler_cb; + u16 masked_head, processed = 0; + u8 phase; + + masked_head = aenq->head & (aenq->q_depth - 1); + phase = aenq->phase; + aenq_e = &aenq->entries[masked_head]; /* Get first entry */ + aenq_common = &aenq_e->aenq_common_desc; + + /* Go over all the events */ + while ((aenq_common->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == + phase) { + pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n", + aenq_common->group, aenq_common->syndrom, + (u64)aenq_common->timestamp_low + + ((u64)aenq_common->timestamp_high << 32)); + + /* Handle specific event*/ + handler_cb = ena_com_get_specific_aenq_cb(dev, + aenq_common->group); + handler_cb(data, aenq_e); /* call the actual event handler*/ + + /* Get next event entry */ + masked_head++; + processed++; + + if (unlikely(masked_head == aenq->q_depth)) { + masked_head = 0; + phase = !phase; + } + aenq_e = &aenq->entries[masked_head]; + aenq_common = &aenq_e->aenq_common_desc; + } + + aenq->head += processed; + aenq->phase = phase; + + /* Don't update aenq doorbell if there weren't any processed events */ + if (!processed) + return; + + /* write the aenq doorbell after all AENQ descriptors were read */ + mb(); + writel((u32)aenq->head, dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF); +} + +int ena_com_dev_reset(struct ena_com_dev *ena_dev) +{ + u32 stat, timeout, cap, reset_val; + int rc; + + stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); + cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF); + + if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) || + (cap == ENA_MMIO_READ_TIMEOUT))) { + pr_err("Reg read32 timeout occurred\n"); + return -ETIME; + } + + if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) { + pr_err("Device isn't ready, can't reset device\n"); + return -EINVAL; + } + + timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >> + ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT; + if (timeout == 0) { + pr_err("Invalid timeout value\n"); + return -EINVAL; + } + + /* start reset */ + reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK; + writel(reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF); + + /* Write again the MMIO read request address */ + ena_com_mmio_reg_read_request_write_dev_addr(ena_dev); + + rc = wait_for_reset_state(ena_dev, timeout, + ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK); + if (rc != 0) { + pr_err("Reset indication didn't turn on\n"); + return rc; + } + + /* reset done */ + writel(0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF); + rc = wait_for_reset_state(ena_dev, timeout, 0); + if (rc != 0) { + pr_err("Reset indication didn't turn off\n"); + return rc; + } + + return 0; +} + +static int ena_get_dev_stats(struct ena_com_dev *ena_dev, + struct ena_com_stats_ctx *ctx, + enum ena_admin_get_stats_type type) +{ + struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd; + struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp; + struct ena_com_admin_queue *admin_queue; + int ret; + + admin_queue = &ena_dev->admin_queue; + + get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS; + get_cmd->aq_common_descriptor.flags = 0; + get_cmd->type = type; + + ret = ena_com_execute_admin_command(admin_queue, + (struct ena_admin_aq_entry *)get_cmd, + sizeof(*get_cmd), + (struct ena_admin_acq_entry *)get_resp, + sizeof(*get_resp)); + + if (unlikely(ret)) + pr_err("Failed to get stats. error: %d\n", ret); + + return ret; +} + +int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev, + struct ena_admin_basic_stats *stats) +{ + struct ena_com_stats_ctx ctx; + int ret; + + memset(&ctx, 0x0, sizeof(ctx)); + ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC); + if (likely(ret == 0)) + memcpy(stats, &ctx.get_resp.basic_stats, + sizeof(ctx.get_resp.basic_stats)); + + return ret; +} + +int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu) +{ + struct ena_com_admin_queue *admin_queue; + struct ena_admin_set_feat_cmd cmd; + struct ena_admin_set_feat_resp resp; + int ret; + + if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) { + pr_info("Feature %d isn't supported\n", ENA_ADMIN_MTU); + return -EPERM; + } + + memset(&cmd, 0x0, sizeof(cmd)); + admin_queue = &ena_dev->admin_queue; + + cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; + cmd.aq_common_descriptor.flags = 0; + cmd.feat_common.feature_id = ENA_ADMIN_MTU; + cmd.u.mtu.mtu = mtu; + + ret = ena_com_execute_admin_command(admin_queue, + (struct ena_admin_aq_entry *)&cmd, + sizeof(cmd), + (struct ena_admin_acq_entry *)&resp, + sizeof(resp)); + + if (unlikely(ret)) + pr_err("Failed to set mtu %d. error: %d\n", mtu, ret); + + return ret; +} + +int ena_com_get_offload_settings(struct ena_com_dev *ena_dev, + struct ena_admin_feature_offload_desc *offload) +{ + int ret; + struct ena_admin_get_feat_resp resp; + + ret = ena_com_get_feature(ena_dev, &resp, + ENA_ADMIN_STATELESS_OFFLOAD_CONFIG); + if (unlikely(ret)) { + pr_err("Failed to get offload capabilities %d\n", ret); + return ret; + } + + memcpy(offload, &resp.u.offload, sizeof(resp.u.offload)); + + return 0; +} + +int ena_com_set_hash_function(struct ena_com_dev *ena_dev) +{ + struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; + struct ena_rss *rss = &ena_dev->rss; + struct ena_admin_set_feat_cmd cmd; + struct ena_admin_set_feat_resp resp; + struct ena_admin_get_feat_resp get_resp; + int ret; + + if (!ena_com_check_supported_feature_id(ena_dev, + ENA_ADMIN_RSS_HASH_FUNCTION)) { + pr_info("Feature %d isn't supported\n", + ENA_ADMIN_RSS_HASH_FUNCTION); + return -EPERM; + } + + /* Validate hash function is supported */ + ret = ena_com_get_feature(ena_dev, &get_resp, + ENA_ADMIN_RSS_HASH_FUNCTION); + if (unlikely(ret)) + return ret; + + if (get_resp.u.flow_hash_func.supported_func & (1 << rss->hash_func)) { + pr_err("Func hash %d isn't supported by device, abort\n", + rss->hash_func); + return -EPERM; + } + + memset(&cmd, 0x0, sizeof(cmd)); + + cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; + cmd.aq_common_descriptor.flags = + ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; + cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION; + cmd.u.flow_hash_func.init_val = rss->hash_init_val; + cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func; + + ret = ena_com_mem_addr_set(ena_dev, + &cmd.control_buffer.address, + rss->hash_key_dma_addr); + if (unlikely(ret)) { + pr_err("memory address set failed\n"); + return ret; + } + + cmd.control_buffer.length = sizeof(*rss->hash_key); + + ret = ena_com_execute_admin_command(admin_queue, + (struct ena_admin_aq_entry *)&cmd, + sizeof(cmd), + (struct ena_admin_acq_entry *)&resp, + sizeof(resp)); + if (unlikely(ret)) { + pr_err("Failed to set hash function %d. error: %d\n", + rss->hash_func, ret); + return -EINVAL; + } + + return 0; +} + +int ena_com_fill_hash_function(struct ena_com_dev *ena_dev, + enum ena_admin_hash_functions func, + const u8 *key, u16 key_len, u32 init_val) +{ + struct ena_rss *rss = &ena_dev->rss; + struct ena_admin_get_feat_resp get_resp; + struct ena_admin_feature_rss_flow_hash_control *hash_key = + rss->hash_key; + int rc; + + /* Make sure size is a mult of DWs */ + if (unlikely(key_len & 0x3)) + return -EINVAL; + + rc = ena_com_get_feature_ex(ena_dev, &get_resp, + ENA_ADMIN_RSS_HASH_FUNCTION, + rss->hash_key_dma_addr, + sizeof(*rss->hash_key)); + if (unlikely(rc)) + return rc; + + if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) { + pr_err("Flow hash function %d isn't supported\n", func); + return -EPERM; + } + + switch (func) { + case ENA_ADMIN_TOEPLITZ: + if (key_len > sizeof(hash_key->key)) { + pr_err("key len (%hu) is bigger than the max supported (%zu)\n", + key_len, sizeof(hash_key->key)); + return -EINVAL; + } + + memcpy(hash_key->key, key, key_len); + rss->hash_init_val = init_val; + hash_key->keys_num = key_len >> 2; + break; + case ENA_ADMIN_CRC32: + rss->hash_init_val = init_val; + break; + default: + pr_err("Invalid hash function (%d)\n", func); + return -EINVAL; + } + + rc = ena_com_set_hash_function(ena_dev); + + /* Restore the old function */ + if (unlikely(rc)) + ena_com_get_hash_function(ena_dev, NULL, NULL); + + return rc; +} + +int ena_com_get_hash_function(struct ena_com_dev *ena_dev, + enum ena_admin_hash_functions *func, + u8 *key) +{ + struct ena_rss *rss = &ena_dev->rss; + struct ena_admin_get_feat_resp get_resp; + struct ena_admin_feature_rss_flow_hash_control *hash_key = + rss->hash_key; + int rc; + + rc = ena_com_get_feature_ex(ena_dev, &get_resp, + ENA_ADMIN_RSS_HASH_FUNCTION, + rss->hash_key_dma_addr, + sizeof(*rss->hash_key)); + if (unlikely(rc)) + return rc; + + rss->hash_func = get_resp.u.flow_hash_func.selected_func; + if (func) + *func = rss->hash_func; + + if (key) + memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2); + + return 0; +} + +int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev, + enum ena_admin_flow_hash_proto proto, + u16 *fields) +{ + struct ena_rss *rss = &ena_dev->rss; + struct ena_admin_get_feat_resp get_resp; + int rc; + + rc = ena_com_get_feature_ex(ena_dev, &get_resp, + ENA_ADMIN_RSS_HASH_INPUT, + rss->hash_ctrl_dma_addr, + sizeof(*rss->hash_ctrl)); + if (unlikely(rc)) + return rc; + + if (fields) + *fields = rss->hash_ctrl->selected_fields[proto].fields; + + return 0; +} + +int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev) +{ + struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; + struct ena_rss *rss = &ena_dev->rss; + struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl; + struct ena_admin_set_feat_cmd cmd; + struct ena_admin_set_feat_resp resp; + int ret; + + if (!ena_com_check_supported_feature_id(ena_dev, + ENA_ADMIN_RSS_HASH_INPUT)) { + pr_info("Feature %d isn't supported\n", ENA_ADMIN_RSS_HASH_INPUT); + return -EPERM; + } + + memset(&cmd, 0x0, sizeof(cmd)); + + cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; + cmd.aq_common_descriptor.flags = + ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; + cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT; + cmd.u.flow_hash_input.enabled_input_sort = + ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK | + ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK; + + ret = ena_com_mem_addr_set(ena_dev, + &cmd.control_buffer.address, + rss->hash_ctrl_dma_addr); + if (unlikely(ret)) { + pr_err("memory address set failed\n"); + return ret; + } + cmd.control_buffer.length = sizeof(*hash_ctrl); + + ret = ena_com_execute_admin_command(admin_queue, + (struct ena_admin_aq_entry *)&cmd, + sizeof(cmd), + (struct ena_admin_acq_entry *)&resp, + sizeof(resp)); + if (unlikely(ret)) + pr_err("Failed to set hash input. error: %d\n", ret); + + return ret; +} + +int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev) +{ + struct ena_rss *rss = &ena_dev->rss; + struct ena_admin_feature_rss_hash_control *hash_ctrl = + rss->hash_ctrl; + u16 available_fields = 0; + int rc, i; + + /* Get the supported hash input */ + rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL); + if (unlikely(rc)) + return rc; + + hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields = + ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA | + ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP; + + hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields = + ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA | + ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP; + + hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields = + ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA | + ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP; + + hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields = + ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA | + ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP; + + hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields = + ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA; + + hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields = + ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA; + + hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields = + ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA; + + hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields = + ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA; + + for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) { + available_fields = hash_ctrl->selected_fields[i].fields & + hash_ctrl->supported_fields[i].fields; + if (available_fields != hash_ctrl->selected_fields[i].fields) { + pr_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n", + i, hash_ctrl->supported_fields[i].fields, + hash_ctrl->selected_fields[i].fields); + return -EPERM; + } + } + + rc = ena_com_set_hash_ctrl(ena_dev); + + /* In case of failure, restore the old hash ctrl */ + if (unlikely(rc)) + ena_com_get_hash_ctrl(ena_dev, 0, NULL); + + return rc; +} + +int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev, + enum ena_admin_flow_hash_proto proto, + u16 hash_fields) +{ + struct ena_rss *rss = &ena_dev->rss; + struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl; + u16 supported_fields; + int rc; + + if (proto >= ENA_ADMIN_RSS_PROTO_NUM) { + pr_err("Invalid proto num (%u)\n", proto); + return -EINVAL; + } + + /* Get the ctrl table */ + rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL); + if (unlikely(rc)) + return rc; + + /* Make sure all the fields are supported */ + supported_fields = hash_ctrl->supported_fields[proto].fields; + if ((hash_fields & supported_fields) != hash_fields) { + pr_err("proto %d doesn't support the required fields %x. supports only: %x\n", + proto, hash_fields, supported_fields); + } + + hash_ctrl->selected_fields[proto].fields = hash_fields; + + rc = ena_com_set_hash_ctrl(ena_dev); + + /* In case of failure, restore the old hash ctrl */ + if (unlikely(rc)) + ena_com_get_hash_ctrl(ena_dev, 0, NULL); + + return 0; +} + +int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev, + u16 entry_idx, u16 entry_value) +{ + struct ena_rss *rss = &ena_dev->rss; + + if (unlikely(entry_idx >= (1 << rss->tbl_log_size))) + return -EINVAL; + + if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES))) + return -EINVAL; + + rss->host_rss_ind_tbl[entry_idx] = entry_value; + + return 0; +} + +int ena_com_indirect_table_set(struct ena_com_dev *ena_dev) +{ + struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; + struct ena_rss *rss = &ena_dev->rss; + struct ena_admin_set_feat_cmd cmd; + struct ena_admin_set_feat_resp resp; + int ret; + + if (!ena_com_check_supported_feature_id( + ena_dev, ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) { + pr_info("Feature %d isn't supported\n", + ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG); + return -EPERM; + } + + ret = ena_com_ind_tbl_convert_to_device(ena_dev); + if (ret) { + pr_err("Failed to convert host indirection table to device table\n"); + return ret; + } + + memset(&cmd, 0x0, sizeof(cmd)); + + cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; + cmd.aq_common_descriptor.flags = + ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; + cmd.feat_common.feature_id = ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG; + cmd.u.ind_table.size = rss->tbl_log_size; + cmd.u.ind_table.inline_index = 0xFFFFFFFF; + + ret = ena_com_mem_addr_set(ena_dev, + &cmd.control_buffer.address, + rss->rss_ind_tbl_dma_addr); + if (unlikely(ret)) { + pr_err("memory address set failed\n"); + return ret; + } + + cmd.control_buffer.length = (1ULL << rss->tbl_log_size) * + sizeof(struct ena_admin_rss_ind_table_entry); + + ret = ena_com_execute_admin_command(admin_queue, + (struct ena_admin_aq_entry *)&cmd, + sizeof(cmd), + (struct ena_admin_acq_entry *)&resp, + sizeof(resp)); + + if (unlikely(ret)) + pr_err("Failed to set indirect table. error: %d\n", ret); + + return ret; +} + +int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl) +{ + struct ena_rss *rss = &ena_dev->rss; + struct ena_admin_get_feat_resp get_resp; + u32 tbl_size; + int i, rc; + + tbl_size = (1ULL << rss->tbl_log_size) * + sizeof(struct ena_admin_rss_ind_table_entry); + + rc = ena_com_get_feature_ex(ena_dev, &get_resp, + ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG, + rss->rss_ind_tbl_dma_addr, + tbl_size); + if (unlikely(rc)) + return rc; + + if (!ind_tbl) + return 0; + + rc = ena_com_ind_tbl_convert_from_device(ena_dev); + if (unlikely(rc)) + return rc; + + for (i = 0; i < (1 << rss->tbl_log_size); i++) + ind_tbl[i] = rss->host_rss_ind_tbl[i]; + + return 0; +} + +int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size) +{ + int rc; + + memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss)); + + rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size); + if (unlikely(rc)) + goto err_indr_tbl; + + rc = ena_com_hash_key_allocate(ena_dev); + if (unlikely(rc)) + goto err_hash_key; + + rc = ena_com_hash_ctrl_init(ena_dev); + if (unlikely(rc)) + goto err_hash_ctrl; + + return 0; + +err_hash_ctrl: + ena_com_hash_key_destroy(ena_dev); +err_hash_key: + ena_com_indirect_table_destroy(ena_dev); +err_indr_tbl: + + return rc; +} + +void ena_com_rss_destroy(struct ena_com_dev *ena_dev) +{ + ena_com_indirect_table_destroy(ena_dev); + ena_com_hash_key_destroy(ena_dev); + ena_com_hash_ctrl_destroy(ena_dev); + + memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss)); +} + +int ena_com_allocate_host_info(struct ena_com_dev *ena_dev) +{ + struct ena_host_attribute *host_attr = &ena_dev->host_attr; + + host_attr->host_info = + dma_zalloc_coherent(ena_dev->dmadev, SZ_4K, + &host_attr->host_info_dma_addr, GFP_KERNEL); + if (unlikely(!host_attr->host_info)) + return -ENOMEM; + + return 0; +} + +int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev, + u32 debug_area_size) +{ + struct ena_host_attribute *host_attr = &ena_dev->host_attr; + + host_attr->debug_area_virt_addr = + dma_zalloc_coherent(ena_dev->dmadev, debug_area_size, + &host_attr->debug_area_dma_addr, GFP_KERNEL); + if (unlikely(!host_attr->debug_area_virt_addr)) { + host_attr->debug_area_size = 0; + return -ENOMEM; + } + + host_attr->debug_area_size = debug_area_size; + + return 0; +} + +void ena_com_delete_host_info(struct ena_com_dev *ena_dev) +{ + struct ena_host_attribute *host_attr = &ena_dev->host_attr; + + if (host_attr->host_info) { + dma_free_coherent(ena_dev->dmadev, SZ_4K, host_attr->host_info, + host_attr->host_info_dma_addr); + host_attr->host_info = NULL; + } +} + +void ena_com_delete_debug_area(struct ena_com_dev *ena_dev) +{ + struct ena_host_attribute *host_attr = &ena_dev->host_attr; + + if (host_attr->debug_area_virt_addr) { + dma_free_coherent(ena_dev->dmadev, host_attr->debug_area_size, + host_attr->debug_area_virt_addr, + host_attr->debug_area_dma_addr); + host_attr->debug_area_virt_addr = NULL; + } +} + +int ena_com_set_host_attributes(struct ena_com_dev *ena_dev) +{ + struct ena_host_attribute *host_attr = &ena_dev->host_attr; + struct ena_com_admin_queue *admin_queue; + struct ena_admin_set_feat_cmd cmd; + struct ena_admin_set_feat_resp resp; + + int ret; + + if (!ena_com_check_supported_feature_id(ena_dev, + ENA_ADMIN_HOST_ATTR_CONFIG)) { + pr_warn("Set host attribute isn't supported\n"); + return -EPERM; + } + + memset(&cmd, 0x0, sizeof(cmd)); + admin_queue = &ena_dev->admin_queue; + + cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; + cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG; + + ret = ena_com_mem_addr_set(ena_dev, + &cmd.u.host_attr.debug_ba, + host_attr->debug_area_dma_addr); + if (unlikely(ret)) { + pr_err("memory address set failed\n"); + return ret; + } + + ret = ena_com_mem_addr_set(ena_dev, + &cmd.u.host_attr.os_info_ba, + host_attr->host_info_dma_addr); + if (unlikely(ret)) { + pr_err("memory address set failed\n"); + return ret; + } + + cmd.u.host_attr.debug_area_size = host_attr->debug_area_size; + + ret = ena_com_execute_admin_command(admin_queue, + (struct ena_admin_aq_entry *)&cmd, + sizeof(cmd), + (struct ena_admin_acq_entry *)&resp, + sizeof(resp)); + + if (unlikely(ret)) + pr_err("Failed to set host attributes: %d\n", ret); + + return ret; +} + +/* Interrupt moderation */ +bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev) +{ + return ena_com_check_supported_feature_id(ena_dev, + ENA_ADMIN_INTERRUPT_MODERATION); +} + +int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev, + u32 tx_coalesce_usecs) +{ + if (!ena_dev->intr_delay_resolution) { + pr_err("Illegal interrupt delay granularity value\n"); + return -EFAULT; + } + + ena_dev->intr_moder_tx_interval = tx_coalesce_usecs / + ena_dev->intr_delay_resolution; + + return 0; +} + +int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev, + u32 rx_coalesce_usecs) +{ + if (!ena_dev->intr_delay_resolution) { + pr_err("Illegal interrupt delay granularity value\n"); + return -EFAULT; + } + + /* We use LOWEST entry of moderation table for storing + * nonadaptive interrupt coalescing values + */ + ena_dev->intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval = + rx_coalesce_usecs / ena_dev->intr_delay_resolution; + + return 0; +} + +void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev) +{ + if (ena_dev->intr_moder_tbl) + devm_kfree(ena_dev->dmadev, ena_dev->intr_moder_tbl); + ena_dev->intr_moder_tbl = NULL; +} + +int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev) +{ + struct ena_admin_get_feat_resp get_resp; + u16 delay_resolution; + int rc; + + rc = ena_com_get_feature(ena_dev, &get_resp, + ENA_ADMIN_INTERRUPT_MODERATION); + + if (rc) { + if (rc == -EPERM) { + pr_info("Feature %d isn't supported\n", + ENA_ADMIN_INTERRUPT_MODERATION); + rc = 0; + } else { + pr_err("Failed to get interrupt moderation admin cmd. rc: %d\n", + rc); + } + + /* no moderation supported, disable adaptive support */ + ena_com_disable_adaptive_moderation(ena_dev); + return rc; + } + + rc = ena_com_init_interrupt_moderation_table(ena_dev); + if (rc) + goto err; + + /* if moderation is supported by device we set adaptive moderation */ + delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution; + ena_com_update_intr_delay_resolution(ena_dev, delay_resolution); + ena_com_enable_adaptive_moderation(ena_dev); + + return 0; +err: + ena_com_destroy_interrupt_moderation(ena_dev); + return rc; +} + +void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev) +{ + struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; + + if (!intr_moder_tbl) + return; + + intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval = + ENA_INTR_LOWEST_USECS; + intr_moder_tbl[ENA_INTR_MODER_LOWEST].pkts_per_interval = + ENA_INTR_LOWEST_PKTS; + intr_moder_tbl[ENA_INTR_MODER_LOWEST].bytes_per_interval = + ENA_INTR_LOWEST_BYTES; + + intr_moder_tbl[ENA_INTR_MODER_LOW].intr_moder_interval = + ENA_INTR_LOW_USECS; + intr_moder_tbl[ENA_INTR_MODER_LOW].pkts_per_interval = + ENA_INTR_LOW_PKTS; + intr_moder_tbl[ENA_INTR_MODER_LOW].bytes_per_interval = + ENA_INTR_LOW_BYTES; + + intr_moder_tbl[ENA_INTR_MODER_MID].intr_moder_interval = + ENA_INTR_MID_USECS; + intr_moder_tbl[ENA_INTR_MODER_MID].pkts_per_interval = + ENA_INTR_MID_PKTS; + intr_moder_tbl[ENA_INTR_MODER_MID].bytes_per_interval = + ENA_INTR_MID_BYTES; + + intr_moder_tbl[ENA_INTR_MODER_HIGH].intr_moder_interval = + ENA_INTR_HIGH_USECS; + intr_moder_tbl[ENA_INTR_MODER_HIGH].pkts_per_interval = + ENA_INTR_HIGH_PKTS; + intr_moder_tbl[ENA_INTR_MODER_HIGH].bytes_per_interval = + ENA_INTR_HIGH_BYTES; + + intr_moder_tbl[ENA_INTR_MODER_HIGHEST].intr_moder_interval = + ENA_INTR_HIGHEST_USECS; + intr_moder_tbl[ENA_INTR_MODER_HIGHEST].pkts_per_interval = + ENA_INTR_HIGHEST_PKTS; + intr_moder_tbl[ENA_INTR_MODER_HIGHEST].bytes_per_interval = + ENA_INTR_HIGHEST_BYTES; +} + +unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev) +{ + return ena_dev->intr_moder_tx_interval; +} + +unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev) +{ + struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; + + if (intr_moder_tbl) + return intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval; + + return 0; +} + +void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev, + enum ena_intr_moder_level level, + struct ena_intr_moder_entry *entry) +{ + struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; + + if (level >= ENA_INTR_MAX_NUM_OF_LEVELS) + return; + + intr_moder_tbl[level].intr_moder_interval = entry->intr_moder_interval; + if (ena_dev->intr_delay_resolution) + intr_moder_tbl[level].intr_moder_interval /= + ena_dev->intr_delay_resolution; + intr_moder_tbl[level].pkts_per_interval = entry->pkts_per_interval; + + /* use hardcoded value until ethtool supports bytecount parameter */ + if (entry->bytes_per_interval != ENA_INTR_BYTE_COUNT_NOT_SUPPORTED) + intr_moder_tbl[level].bytes_per_interval = entry->bytes_per_interval; +} + +void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev, + enum ena_intr_moder_level level, + struct ena_intr_moder_entry *entry) +{ + struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; + + if (level >= ENA_INTR_MAX_NUM_OF_LEVELS) + return; + + entry->intr_moder_interval = intr_moder_tbl[level].intr_moder_interval; + if (ena_dev->intr_delay_resolution) + entry->intr_moder_interval *= ena_dev->intr_delay_resolution; + entry->pkts_per_interval = + intr_moder_tbl[level].pkts_per_interval; + entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval; +} diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h new file mode 100644 index 000000000000..509d7b8e15ab --- /dev/null +++ b/drivers/net/ethernet/amazon/ena/ena_com.h @@ -0,0 +1,1038 @@ +/* + * Copyright 2015 Amazon.com, Inc. or its affiliates. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef ENA_COM +#define ENA_COM + +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/gfp.h> +#include <linux/sched.h> +#include <linux/sizes.h> +#include <linux/spinlock.h> +#include <linux/types.h> +#include <linux/wait.h> + +#include "ena_common_defs.h" +#include "ena_admin_defs.h" +#include "ena_eth_io_defs.h" +#include "ena_regs_defs.h" + +#undef pr_fmt +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#define ENA_MAX_NUM_IO_QUEUES 128U +/* We need to queues for each IO (on for Tx and one for Rx) */ +#define ENA_TOTAL_NUM_QUEUES (2 * (ENA_MAX_NUM_IO_QUEUES)) + +#define ENA_MAX_HANDLERS 256 + +#define ENA_MAX_PHYS_ADDR_SIZE_BITS 48 + +/* Unit in usec */ +#define ENA_REG_READ_TIMEOUT 200000 + +#define ADMIN_SQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_aq_entry)) +#define ADMIN_CQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_acq_entry)) +#define ADMIN_AENQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_aenq_entry)) + +/*****************************************************************************/ +/*****************************************************************************/ +/* ENA adaptive interrupt moderation settings */ + +#define ENA_INTR_LOWEST_USECS (0) +#define ENA_INTR_LOWEST_PKTS (3) +#define ENA_INTR_LOWEST_BYTES (2 * 1524) + +#define ENA_INTR_LOW_USECS (32) +#define ENA_INTR_LOW_PKTS (12) +#define ENA_INTR_LOW_BYTES (16 * 1024) + +#define ENA_INTR_MID_USECS (80) +#define ENA_INTR_MID_PKTS (48) +#define ENA_INTR_MID_BYTES (64 * 1024) + +#define ENA_INTR_HIGH_USECS (128) +#define ENA_INTR_HIGH_PKTS (96) +#define ENA_INTR_HIGH_BYTES (128 * 1024) + +#define ENA_INTR_HIGHEST_USECS (192) +#define ENA_INTR_HIGHEST_PKTS (128) +#define ENA_INTR_HIGHEST_BYTES (192 * 1024) + +#define ENA_INTR_INITIAL_TX_INTERVAL_USECS 196 +#define ENA_INTR_INITIAL_RX_INTERVAL_USECS 4 +#define ENA_INTR_DELAY_OLD_VALUE_WEIGHT 6 +#define ENA_INTR_DELAY_NEW_VALUE_WEIGHT 4 +#define ENA_INTR_MODER_LEVEL_STRIDE 2 +#define ENA_INTR_BYTE_COUNT_NOT_SUPPORTED 0xFFFFFF + +enum ena_intr_moder_level { + ENA_INTR_MODER_LOWEST = 0, + ENA_INTR_MODER_LOW, + ENA_INTR_MODER_MID, + ENA_INTR_MODER_HIGH, + ENA_INTR_MODER_HIGHEST, + ENA_INTR_MAX_NUM_OF_LEVELS, +}; + +struct ena_intr_moder_entry { + unsigned int intr_moder_interval; + unsigned int pkts_per_interval; + unsigned int bytes_per_interval; +}; + +enum queue_direction { + ENA_COM_IO_QUEUE_DIRECTION_TX, + ENA_COM_IO_QUEUE_DIRECTION_RX +}; + +struct ena_com_buf { + dma_addr_t paddr; /**< Buffer physical address */ + u16 len; /**< Buffer length in bytes */ +}; + +struct ena_com_rx_buf_info { + u16 len; + u16 req_id; +}; + +struct ena_com_io_desc_addr { + u8 __iomem *pbuf_dev_addr; /* LLQ address */ + u8 *virt_addr; + dma_addr_t phys_addr; +}; + +struct ena_com_tx_meta { + u16 mss; + u16 l3_hdr_len; + u16 l3_hdr_offset; + u16 l4_hdr_len; /* In words */ +}; + +struct ena_com_io_cq { + struct ena_com_io_desc_addr cdesc_addr; + + /* Interrupt unmask register */ + u32 __iomem *unmask_reg; + + /* The completion queue head doorbell register */ + u32 __iomem *cq_head_db_reg; + + /* numa configuration register (for TPH) */ + u32 __iomem *numa_node_cfg_reg; + + /* The value to write to the above register to unmask + * the interrupt of this queue + */ + u32 msix_vector; + + enum queue_direction direction; + + /* holds the number of cdesc of the current packet */ + u16 cur_rx_pkt_cdesc_count; + /* save the firt cdesc idx of the current packet */ + u16 cur_rx_pkt_cdesc_start_idx; + + u16 q_depth; + /* Caller qid */ + u16 qid; + + /* Device queue index */ + u16 idx; + u16 head; + u16 last_head_update; + u8 phase; + u8 cdesc_entry_size_in_bytes; + +} ____cacheline_aligned; + +struct ena_com_io_sq { + struct ena_com_io_desc_addr desc_addr; + + u32 __iomem *db_addr; + u8 __iomem *header_addr; + + enum queue_direction direction; + enum ena_admin_placement_policy_type mem_queue_type; + + u32 msix_vector; + struct ena_com_tx_meta cached_tx_meta; + + u16 q_depth; + u16 qid; + + u16 idx; + u16 tail; + u16 next_to_comp; + u32 tx_max_header_size; + u8 phase; + u8 desc_entry_size; + u8 dma_addr_bits; +} ____cacheline_aligned; + +struct ena_com_admin_cq { + struct ena_admin_acq_entry *entries; + dma_addr_t dma_addr; + + u16 head; + u8 phase; +}; + +struct ena_com_admin_sq { + struct ena_admin_aq_entry *entries; + dma_addr_t dma_addr; + + u32 __iomem *db_addr; + + u16 head; + u16 tail; + u8 phase; + +}; + +struct ena_com_stats_admin { + u32 aborted_cmd; + u32 submitted_cmd; + u32 completed_cmd; + u32 out_of_space; + u32 no_completion; +}; + +struct ena_com_admin_queue { + void *q_dmadev; + spinlock_t q_lock; /* spinlock for the admin queue */ + struct ena_comp_ctx *comp_ctx; + u16 q_depth; + struct ena_com_admin_cq cq; + struct ena_com_admin_sq sq; + + /* Indicate if the admin queue should poll for completion */ + bool polling; + + u16 curr_cmd_id; + + /* Indicate that the ena was initialized and can + * process new admin commands + */ + bool running_state; + + /* Count the number of outstanding admin commands */ + atomic_t outstanding_cmds; + + struct ena_com_stats_admin stats; +}; + +struct ena_aenq_handlers; + +struct ena_com_aenq { + u16 head; + u8 phase; + struct ena_admin_aenq_entry *entries; + dma_addr_t dma_addr; + u16 q_depth; + struct ena_aenq_handlers *aenq_handlers; +}; + +struct ena_com_mmio_read { + struct ena_admin_ena_mmio_req_read_less_resp *read_resp; + dma_addr_t read_resp_dma_addr; + u16 seq_num; + bool readless_supported; + /* spin lock to ensure a single outstanding read */ + spinlock_t lock; +}; + +struct ena_rss { + /* Indirect table */ + u16 *host_rss_ind_tbl; + struct ena_admin_rss_ind_table_entry *rss_ind_tbl; + dma_addr_t rss_ind_tbl_dma_addr; + u16 tbl_log_size; + + /* Hash key */ + enum ena_admin_hash_functions hash_func; + struct ena_admin_feature_rss_flow_hash_control *hash_key; + dma_addr_t hash_key_dma_addr; + u32 hash_init_val; + + /* Flow Control */ + struct ena_admin_feature_rss_hash_control *hash_ctrl; + dma_addr_t hash_ctrl_dma_addr; + +}; + +struct ena_host_attribute { + /* Debug area */ + u8 *debug_area_virt_addr; + dma_addr_t debug_area_dma_addr; + u32 debug_area_size; + + /* Host information */ + struct ena_admin_host_info *host_info; + dma_addr_t host_info_dma_addr; +}; + +/* Each ena_dev is a PCI function. */ +struct ena_com_dev { + struct ena_com_admin_queue admin_queue; + struct ena_com_aenq aenq; + struct ena_com_io_cq io_cq_queues[ENA_TOTAL_NUM_QUEUES]; + struct ena_com_io_sq io_sq_queues[ENA_TOTAL_NUM_QUEUES]; + u8 __iomem *reg_bar; + void __iomem *mem_bar; + void *dmadev; + + enum ena_admin_placement_policy_type tx_mem_queue_type; + u32 tx_max_header_size; + u16 stats_func; /* Selected function for extended statistic dump */ + u16 stats_queue; /* Selected queue for extended statistic dump */ + + struct ena_com_mmio_read mmio_read; + + struct ena_rss rss; + u32 supported_features; + u32 dma_addr_bits; + + struct ena_host_attribute host_attr; + bool adaptive_coalescing; + u16 intr_delay_resolution; + u32 intr_moder_tx_interval; + struct ena_intr_moder_entry *intr_moder_tbl; +}; + +struct ena_com_dev_get_features_ctx { + struct ena_admin_queue_feature_desc max_queues; + struct ena_admin_device_attr_feature_desc dev_attr; + struct ena_admin_feature_aenq_desc aenq; + struct ena_admin_feature_offload_desc offload; +}; + +struct ena_com_create_io_ctx { + enum ena_admin_placement_policy_type mem_queue_type; + enum queue_direction direction; + int numa_node; + u32 msix_vector; + u16 queue_size; + u16 qid; +}; + +typedef void (*ena_aenq_handler)(void *data, + struct ena_admin_aenq_entry *aenq_e); + +/* Holds aenq handlers. Indexed by AENQ event group */ +struct ena_aenq_handlers { + ena_aenq_handler handlers[ENA_MAX_HANDLERS]; + ena_aenq_handler unimplemented_handler; +}; + +/*****************************************************************************/ +/*****************************************************************************/ + +/* ena_com_mmio_reg_read_request_init - Init the mmio reg read mechanism + * @ena_dev: ENA communication layer struct + * + * Initialize the register read mechanism. + * + * @note: This method must be the first stage in the initialization sequence. + * + * @return - 0 on success, negative value on failure. + */ +int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev); + +/* ena_com_set_mmio_read_mode - Enable/disable the mmio reg read mechanism + * @ena_dev: ENA communication layer struct + * @readless_supported: readless mode (enable/disable) + */ +void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, + bool readless_supported); + +/* ena_com_mmio_reg_read_request_write_dev_addr - Write the mmio reg read return + * value physical address. + * @ena_dev: ENA communication layer struct + */ +void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev); + +/* ena_com_mmio_reg_read_request_destroy - Destroy the mmio reg read mechanism + * @ena_dev: ENA communication layer struct + */ +void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev); + +/* ena_com_admin_init - Init the admin and the async queues + * @ena_dev: ENA communication layer struct + * @aenq_handlers: Those handlers to be called upon event. + * @init_spinlock: Indicate if this method should init the admin spinlock or + * the spinlock was init before (for example, in a case of FLR). + * + * Initialize the admin submission and completion queues. + * Initialize the asynchronous events notification queues. + * + * @return - 0 on success, negative value on failure. + */ +int ena_com_admin_init(struct ena_com_dev *ena_dev, + struct ena_aenq_handlers *aenq_handlers, + bool init_spinlock); + +/* ena_com_admin_destroy - Destroy the admin and the async events queues. + * @ena_dev: ENA communication layer struct + * + * @note: Before calling this method, the caller must validate that the device + * won't send any additional admin completions/aenq. + * To achieve that, a FLR is recommended. + */ +void ena_com_admin_destroy(struct ena_com_dev *ena_dev); + +/* ena_com_dev_reset - Perform device FLR to the device. + * @ena_dev: ENA communication layer struct + * + * @return - 0 on success, negative value on failure. + */ +int ena_com_dev_reset(struct ena_com_dev *ena_dev); + +/* ena_com_create_io_queue - Create io queue. + * @ena_dev: ENA communication layer struct + * @ctx - create context structure + * + * Create the submission and the completion queues. + * + * @return - 0 on success, negative value on failure. + */ +int ena_com_create_io_queue(struct ena_com_dev *ena_dev, + struct ena_com_create_io_ctx *ctx); + +/* ena_com_destroy_io_queue - Destroy IO queue with the queue id - qid. + * @ena_dev: ENA communication layer struct + * @qid - the caller virtual queue id. + */ +void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid); + +/* ena_com_get_io_handlers - Return the io queue handlers + * @ena_dev: ENA communication layer struct + * @qid - the caller virtual queue id. + * @io_sq - IO submission queue handler + * @io_cq - IO completion queue handler. + * + * @return - 0 on success, negative value on failure. + */ +int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid, + struct ena_com_io_sq **io_sq, + struct ena_com_io_cq **io_cq); + +/* ena_com_admin_aenq_enable - ENAble asynchronous event notifications + * @ena_dev: ENA communication layer struct + * + * After this method, aenq event can be received via AENQ. + */ +void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev); + +/* ena_com_set_admin_running_state - Set the state of the admin queue + * @ena_dev: ENA communication layer struct + * + * Change the state of the admin queue (enable/disable) + */ +void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state); + +/* ena_com_get_admin_running_state - Get the admin queue state + * @ena_dev: ENA communication layer struct + * + * Retrieve the state of the admin queue (enable/disable) + * + * @return - current polling mode (enable/disable) + */ +bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev); + +/* ena_com_set_admin_polling_mode - Set the admin completion queue polling mode + * @ena_dev: ENA communication layer struct + * @polling: ENAble/Disable polling mode + * + * Set the admin completion mode. + */ +void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling); + +/* ena_com_set_admin_polling_mode - Get the admin completion queue polling mode + * @ena_dev: ENA communication layer struct + * + * Get the admin completion mode. + * If polling mode is on, ena_com_execute_admin_command will perform a + * polling on the admin completion queue for the commands completion, + * otherwise it will wait on wait event. + * + * @return state + */ +bool ena_com_get_ena_admin_polling_mode(struct ena_com_dev *ena_dev); + +/* ena_com_admin_q_comp_intr_handler - admin queue interrupt handler + * @ena_dev: ENA communication layer struct + * + * This method go over the admin completion queue and wake up all the pending + * threads that wait on the commands wait event. + * + * @note: Should be called after MSI-X interrupt. + */ +void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev); + +/* ena_com_aenq_intr_handler - AENQ interrupt handler + * @ena_dev: ENA communication layer struct + * + * This method go over the async event notification queue and call the proper + * aenq handler. + */ +void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data); + +/* ena_com_abort_admin_commands - Abort all the outstanding admin commands. + * @ena_dev: ENA communication layer struct + * + * This method aborts all the outstanding admin commands. + * The caller should then call ena_com_wait_for_abort_completion to make sure + * all the commands were completed. + */ +void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev); + +/* ena_com_wait_for_abort_completion - Wait for admin commands abort. + * @ena_dev: ENA communication layer struct + * + * This method wait until all the outstanding admin commands will be completed. + */ +void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev); + +/* ena_com_validate_version - Validate the device parameters + * @ena_dev: ENA communication layer struct + * + * This method validate the device parameters are the same as the saved + * parameters in ena_dev. + * This method is useful after device reset, to validate the device mac address + * and the device offloads are the same as before the reset. + * + * @return - 0 on success negative value otherwise. + */ +int ena_com_validate_version(struct ena_com_dev *ena_dev); + +/* ena_com_get_link_params - Retrieve physical link parameters. + * @ena_dev: ENA communication layer struct + * @resp: Link parameters + * + * Retrieve the physical link parameters, + * like speed, auto-negotiation and full duplex support. + * + * @return - 0 on Success negative value otherwise. + */ +int ena_com_get_link_params(struct ena_com_dev *ena_dev, + struct ena_admin_get_feat_resp *resp); + +/* ena_com_get_dma_width - Retrieve physical dma address width the device + * supports. + * @ena_dev: ENA communication layer struct + * + * Retrieve the maximum physical address bits the device can handle. + * + * @return: > 0 on Success and negative value otherwise. + */ +int ena_com_get_dma_width(struct ena_com_dev *ena_dev); + +/* ena_com_set_aenq_config - Set aenq groups configurations + * @ena_dev: ENA communication layer struct + * @groups flag: bit fields flags of enum ena_admin_aenq_group. + * + * Configure which aenq event group the driver would like to receive. + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag); + +/* ena_com_get_dev_attr_feat - Get device features + * @ena_dev: ENA communication layer struct + * @get_feat_ctx: returned context that contain the get features. + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev, + struct ena_com_dev_get_features_ctx *get_feat_ctx); + +/* ena_com_get_dev_basic_stats - Get device basic statistics + * @ena_dev: ENA communication layer struct + * @stats: stats return value + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev, + struct ena_admin_basic_stats *stats); + +/* ena_com_set_dev_mtu - Configure the device mtu. + * @ena_dev: ENA communication layer struct + * @mtu: mtu value + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu); + +/* ena_com_get_offload_settings - Retrieve the device offloads capabilities + * @ena_dev: ENA communication layer struct + * @offlad: offload return value + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_get_offload_settings(struct ena_com_dev *ena_dev, + struct ena_admin_feature_offload_desc *offload); + +/* ena_com_rss_init - Init RSS + * @ena_dev: ENA communication layer struct + * @log_size: indirection log size + * + * Allocate RSS/RFS resources. + * The caller then can configure rss using ena_com_set_hash_function, + * ena_com_set_hash_ctrl and ena_com_indirect_table_set. + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 log_size); + +/* ena_com_rss_destroy - Destroy rss + * @ena_dev: ENA communication layer struct + * + * Free all the RSS/RFS resources. + */ +void ena_com_rss_destroy(struct ena_com_dev *ena_dev); + +/* ena_com_fill_hash_function - Fill RSS hash function + * @ena_dev: ENA communication layer struct + * @func: The hash function (Toeplitz or crc) + * @key: Hash key (for toeplitz hash) + * @key_len: key length (max length 10 DW) + * @init_val: initial value for the hash function + * + * Fill the ena_dev resources with the desire hash function, hash key, key_len + * and key initial value (if needed by the hash function). + * To flush the key into the device the caller should call + * ena_com_set_hash_function. + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_fill_hash_function(struct ena_com_dev *ena_dev, + enum ena_admin_hash_functions func, + const u8 *key, u16 key_len, u32 init_val); + +/* ena_com_set_hash_function - Flush the hash function and it dependencies to + * the device. + * @ena_dev: ENA communication layer struct + * + * Flush the hash function and it dependencies (key, key length and + * initial value) if needed. + * + * @note: Prior to this method the caller should call ena_com_fill_hash_function + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_set_hash_function(struct ena_com_dev *ena_dev); + +/* ena_com_get_hash_function - Retrieve the hash function and the hash key + * from the device. + * @ena_dev: ENA communication layer struct + * @func: hash function + * @key: hash key + * + * Retrieve the hash function and the hash key from the device. + * + * @note: If the caller called ena_com_fill_hash_function but didn't flash + * it to the device, the new configuration will be lost. + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_get_hash_function(struct ena_com_dev *ena_dev, + enum ena_admin_hash_functions *func, + u8 *key); + +/* ena_com_fill_hash_ctrl - Fill RSS hash control + * @ena_dev: ENA communication layer struct. + * @proto: The protocol to configure. + * @hash_fields: bit mask of ena_admin_flow_hash_fields + * + * Fill the ena_dev resources with the desire hash control (the ethernet + * fields that take part of the hash) for a specific protocol. + * To flush the hash control to the device, the caller should call + * ena_com_set_hash_ctrl. + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev, + enum ena_admin_flow_hash_proto proto, + u16 hash_fields); + +/* ena_com_set_hash_ctrl - Flush the hash control resources to the device. + * @ena_dev: ENA communication layer struct + * + * Flush the hash control (the ethernet fields that take part of the hash) + * + * @note: Prior to this method the caller should call ena_com_fill_hash_ctrl. + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev); + +/* ena_com_get_hash_ctrl - Retrieve the hash control from the device. + * @ena_dev: ENA communication layer struct + * @proto: The protocol to retrieve. + * @fields: bit mask of ena_admin_flow_hash_fields. + * + * Retrieve the hash control from the device. + * + * @note, If the caller called ena_com_fill_hash_ctrl but didn't flash + * it to the device, the new configuration will be lost. + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev, + enum ena_admin_flow_hash_proto proto, + u16 *fields); + +/* ena_com_set_default_hash_ctrl - Set the hash control to a default + * configuration. + * @ena_dev: ENA communication layer struct + * + * Fill the ena_dev resources with the default hash control configuration. + * To flush the hash control to the device, the caller should call + * ena_com_set_hash_ctrl. + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev); + +/* ena_com_indirect_table_fill_entry - Fill a single entry in the RSS + * indirection table + * @ena_dev: ENA communication layer struct. + * @entry_idx - indirection table entry. + * @entry_value - redirection value + * + * Fill a single entry of the RSS indirection table in the ena_dev resources. + * To flush the indirection table to the device, the called should call + * ena_com_indirect_table_set. + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev, + u16 entry_idx, u16 entry_value); + +/* ena_com_indirect_table_set - Flush the indirection table to the device. + * @ena_dev: ENA communication layer struct + * + * Flush the indirection hash control to the device. + * Prior to this method the caller should call ena_com_indirect_table_fill_entry + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_indirect_table_set(struct ena_com_dev *ena_dev); + +/* ena_com_indirect_table_get - Retrieve the indirection table from the device. + * @ena_dev: ENA communication layer struct + * @ind_tbl: indirection table + * + * Retrieve the RSS indirection table from the device. + * + * @note: If the caller called ena_com_indirect_table_fill_entry but didn't flash + * it to the device, the new configuration will be lost. + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl); + +/* ena_com_allocate_host_info - Allocate host info resources. + * @ena_dev: ENA communication layer struct + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_allocate_host_info(struct ena_com_dev *ena_dev); + +/* ena_com_allocate_debug_area - Allocate debug area. + * @ena_dev: ENA communication layer struct + * @debug_area_size - debug area size. + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev, + u32 debug_area_size); + +/* ena_com_delete_debug_area - Free the debug area resources. + * @ena_dev: ENA communication layer struct + * + * Free the allocate debug area. + */ +void ena_com_delete_debug_area(struct ena_com_dev *ena_dev); + +/* ena_com_delete_host_info - Free the host info resources. + * @ena_dev: ENA communication layer struct + * + * Free the allocate host info. + */ +void ena_com_delete_host_info(struct ena_com_dev *ena_dev); + +/* ena_com_set_host_attributes - Update the device with the host + * attributes (debug area and host info) base address. + * @ena_dev: ENA communication layer struct + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_set_host_attributes(struct ena_com_dev *ena_dev); + +/* ena_com_create_io_cq - Create io completion queue. + * @ena_dev: ENA communication layer struct + * @io_cq - io completion queue handler + + * Create IO completion queue. + * + * @return - 0 on success, negative value on failure. + */ +int ena_com_create_io_cq(struct ena_com_dev *ena_dev, + struct ena_com_io_cq *io_cq); + +/* ena_com_destroy_io_cq - Destroy io completion queue. + * @ena_dev: ENA communication layer struct + * @io_cq - io completion queue handler + + * Destroy IO completion queue. + * + * @return - 0 on success, negative value on failure. + */ +int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev, + struct ena_com_io_cq *io_cq); + +/* ena_com_execute_admin_command - Execute admin command + * @admin_queue: admin queue. + * @cmd: the admin command to execute. + * @cmd_size: the command size. + * @cmd_completion: command completion return value. + * @cmd_comp_size: command completion size. + + * Submit an admin command and then wait until the device will return a + * completion. + * The completion will be copyed into cmd_comp. + * + * @return - 0 on success, negative value on failure. + */ +int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue, + struct ena_admin_aq_entry *cmd, + size_t cmd_size, + struct ena_admin_acq_entry *cmd_comp, + size_t cmd_comp_size); + +/* ena_com_init_interrupt_moderation - Init interrupt moderation + * @ena_dev: ENA communication layer struct + * + * @return - 0 on success, negative value on failure. + */ +int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev); + +/* ena_com_destroy_interrupt_moderation - Destroy interrupt moderation resources + * @ena_dev: ENA communication layer struct + */ +void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev); + +/* ena_com_interrupt_moderation_supported - Return if interrupt moderation + * capability is supported by the device. + * + * @return - supported or not. + */ +bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev); + +/* ena_com_config_default_interrupt_moderation_table - Restore the interrupt + * moderation table back to the default parameters. + * @ena_dev: ENA communication layer struct + */ +void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev); + +/* ena_com_update_nonadaptive_moderation_interval_tx - Update the + * non-adaptive interval in Tx direction. + * @ena_dev: ENA communication layer struct + * @tx_coalesce_usecs: Interval in usec. + * + * @return - 0 on success, negative value on failure. + */ +int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev, + u32 tx_coalesce_usecs); + +/* ena_com_update_nonadaptive_moderation_interval_rx - Update the + * non-adaptive interval in Rx direction. + * @ena_dev: ENA communication layer struct + * @rx_coalesce_usecs: Interval in usec. + * + * @return - 0 on success, negative value on failure. + */ +int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev, + u32 rx_coalesce_usecs); + +/* ena_com_get_nonadaptive_moderation_interval_tx - Retrieve the + * non-adaptive interval in Tx direction. + * @ena_dev: ENA communication layer struct + * + * @return - interval in usec + */ +unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev); + +/* ena_com_get_nonadaptive_moderation_interval_rx - Retrieve the + * non-adaptive interval in Rx direction. + * @ena_dev: ENA communication layer struct + * + * @return - interval in usec + */ +unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev); + +/* ena_com_init_intr_moderation_entry - Update a single entry in the interrupt + * moderation table. + * @ena_dev: ENA communication layer struct + * @level: Interrupt moderation table level + * @entry: Entry value + * + * Update a single entry in the interrupt moderation table. + */ +void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev, + enum ena_intr_moder_level level, + struct ena_intr_moder_entry *entry); + +/* ena_com_get_intr_moderation_entry - Init ena_intr_moder_entry. + * @ena_dev: ENA communication layer struct + * @level: Interrupt moderation table level + * @entry: Entry to fill. + * + * Initialize the entry according to the adaptive interrupt moderation table. + */ +void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev, + enum ena_intr_moder_level level, + struct ena_intr_moder_entry *entry); + +static inline bool ena_com_get_adaptive_moderation_enabled(struct ena_com_dev *ena_dev) +{ + return ena_dev->adaptive_coalescing; +} + +static inline void ena_com_enable_adaptive_moderation(struct ena_com_dev *ena_dev) +{ + ena_dev->adaptive_coalescing = true; +} + +static inline void ena_com_disable_adaptive_moderation(struct ena_com_dev *ena_dev) +{ + ena_dev->adaptive_coalescing = false; +} + +/* ena_com_calculate_interrupt_delay - Calculate new interrupt delay + * @ena_dev: ENA communication layer struct + * @pkts: Number of packets since the last update + * @bytes: Number of bytes received since the last update. + * @smoothed_interval: Returned interval + * @moder_tbl_idx: Current table level as input update new level as return + * value. + */ +static inline void ena_com_calculate_interrupt_delay(struct ena_com_dev *ena_dev, + unsigned int pkts, + unsigned int bytes, + unsigned int *smoothed_interval, + unsigned int *moder_tbl_idx) +{ + enum ena_intr_moder_level curr_moder_idx, new_moder_idx; + struct ena_intr_moder_entry *curr_moder_entry; + struct ena_intr_moder_entry *pred_moder_entry; + struct ena_intr_moder_entry *new_moder_entry; + struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; + unsigned int interval; + + /* We apply adaptive moderation on Rx path only. + * Tx uses static interrupt moderation. + */ + if (!pkts || !bytes) + /* Tx interrupt, or spurious interrupt, + * in both cases we just use same delay values + */ + return; + + curr_moder_idx = (enum ena_intr_moder_level)(*moder_tbl_idx); + if (unlikely(curr_moder_idx >= ENA_INTR_MAX_NUM_OF_LEVELS)) { + pr_err("Wrong moderation index %u\n", curr_moder_idx); + return; + } + + curr_moder_entry = &intr_moder_tbl[curr_moder_idx]; + new_moder_idx = curr_moder_idx; + + if (curr_moder_idx == ENA_INTR_MODER_LOWEST) { + if ((pkts > curr_moder_entry->pkts_per_interval) || + (bytes > curr_moder_entry->bytes_per_interval)) + new_moder_idx = + (enum ena_intr_moder_level)(curr_moder_idx + ENA_INTR_MODER_LEVEL_STRIDE); + } else { + pred_moder_entry = &intr_moder_tbl[curr_moder_idx - ENA_INTR_MODER_LEVEL_STRIDE]; + + if ((pkts <= pred_moder_entry->pkts_per_interval) || + (bytes <= pred_moder_entry->bytes_per_interval)) + new_moder_idx = + (enum ena_intr_moder_level)(curr_moder_idx - ENA_INTR_MODER_LEVEL_STRIDE); + else if ((pkts > curr_moder_entry->pkts_per_interval) || + (bytes > curr_moder_entry->bytes_per_interval)) { + if (curr_moder_idx != ENA_INTR_MODER_HIGHEST) + new_moder_idx = + (enum ena_intr_moder_level)(curr_moder_idx + ENA_INTR_MODER_LEVEL_STRIDE); + } + } + new_moder_entry = &intr_moder_tbl[new_moder_idx]; + + interval = new_moder_entry->intr_moder_interval; + *smoothed_interval = ( + (interval * ENA_INTR_DELAY_NEW_VALUE_WEIGHT + + ENA_INTR_DELAY_OLD_VALUE_WEIGHT * (*smoothed_interval)) + 5) / + 10; + + *moder_tbl_idx = new_moder_idx; +} + +/* ena_com_update_intr_reg - Prepare interrupt register + * @intr_reg: interrupt register to update. + * @rx_delay_interval: Rx interval in usecs + * @tx_delay_interval: Tx interval in usecs + * @unmask: unask enable/disable + * + * Prepare interrupt update register with the supplied parameters. + */ +static inline void ena_com_update_intr_reg(struct ena_eth_io_intr_reg *intr_reg, + u32 rx_delay_interval, + u32 tx_delay_interval, + bool unmask) +{ + intr_reg->intr_control = 0; + intr_reg->intr_control |= rx_delay_interval & + ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK; + + intr_reg->intr_control |= + (tx_delay_interval << ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT) + & ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK; + + if (unmask) + intr_reg->intr_control |= ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK; +} + +#endif /* !(ENA_COM) */ diff --git a/drivers/net/ethernet/amazon/ena/ena_common_defs.h b/drivers/net/ethernet/amazon/ena/ena_common_defs.h new file mode 100644 index 000000000000..bb8d73676eab --- /dev/null +++ b/drivers/net/ethernet/amazon/ena/ena_common_defs.h @@ -0,0 +1,48 @@ +/* + * Copyright 2015 - 2016 Amazon.com, Inc. or its affiliates. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _ENA_COMMON_H_ +#define _ENA_COMMON_H_ + +#define ENA_COMMON_SPEC_VERSION_MAJOR 0 /* */ +#define ENA_COMMON_SPEC_VERSION_MINOR 10 /* */ + +/* ENA operates with 48-bit memory addresses. ena_mem_addr_t */ +struct ena_common_mem_addr { + u32 mem_addr_low; + + u16 mem_addr_high; + + /* MBZ */ + u16 reserved16; +}; + +#endif /*_ENA_COMMON_H_ */ diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c new file mode 100644 index 000000000000..539c536464a5 --- /dev/null +++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c @@ -0,0 +1,501 @@ +/* + * Copyright 2015 Amazon.com, Inc. or its affiliates. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "ena_eth_com.h" + +static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc( + struct ena_com_io_cq *io_cq) +{ + struct ena_eth_io_rx_cdesc_base *cdesc; + u16 expected_phase, head_masked; + u16 desc_phase; + + head_masked = io_cq->head & (io_cq->q_depth - 1); + expected_phase = io_cq->phase; + + cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr + + (head_masked * io_cq->cdesc_entry_size_in_bytes)); + + desc_phase = (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >> + ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT; + + if (desc_phase != expected_phase) + return NULL; + + return cdesc; +} + +static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq) +{ + io_cq->head++; + + /* Switch phase bit in case of wrap around */ + if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0)) + io_cq->phase ^= 1; +} + +static inline void *get_sq_desc(struct ena_com_io_sq *io_sq) +{ + u16 tail_masked; + u32 offset; + + tail_masked = io_sq->tail & (io_sq->q_depth - 1); + + offset = tail_masked * io_sq->desc_entry_size; + + return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset); +} + +static inline void ena_com_copy_curr_sq_desc_to_dev(struct ena_com_io_sq *io_sq) +{ + u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1); + u32 offset = tail_masked * io_sq->desc_entry_size; + + /* In case this queue isn't a LLQ */ + if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) + return; + + memcpy_toio(io_sq->desc_addr.pbuf_dev_addr + offset, + io_sq->desc_addr.virt_addr + offset, + io_sq->desc_entry_size); +} + +static inline void ena_com_sq_update_tail(struct ena_com_io_sq *io_sq) +{ + io_sq->tail++; + + /* Switch phase bit in case of wrap around */ + if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0)) + io_sq->phase ^= 1; +} + +static inline int ena_com_write_header(struct ena_com_io_sq *io_sq, + u8 *head_src, u16 header_len) +{ + u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1); + u8 __iomem *dev_head_addr = + io_sq->header_addr + (tail_masked * io_sq->tx_max_header_size); + + if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) + return 0; + + if (unlikely(!io_sq->header_addr)) { + pr_err("Push buffer header ptr is NULL\n"); + return -EINVAL; + } + + memcpy_toio(dev_head_addr, head_src, header_len); + + return 0; +} + +static inline struct ena_eth_io_rx_cdesc_base * + ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx) +{ + idx &= (io_cq->q_depth - 1); + return (struct ena_eth_io_rx_cdesc_base *) + ((uintptr_t)io_cq->cdesc_addr.virt_addr + + idx * io_cq->cdesc_entry_size_in_bytes); +} + +static inline u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq, + u16 *first_cdesc_idx) +{ + struct ena_eth_io_rx_cdesc_base *cdesc; + u16 count = 0, head_masked; + u32 last = 0; + + do { + cdesc = ena_com_get_next_rx_cdesc(io_cq); + if (!cdesc) + break; + + ena_com_cq_inc_head(io_cq); + count++; + last = (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >> + ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT; + } while (!last); + + if (last) { + *first_cdesc_idx = io_cq->cur_rx_pkt_cdesc_start_idx; + count += io_cq->cur_rx_pkt_cdesc_count; + + head_masked = io_cq->head & (io_cq->q_depth - 1); + + io_cq->cur_rx_pkt_cdesc_count = 0; + io_cq->cur_rx_pkt_cdesc_start_idx = head_masked; + + pr_debug("ena q_id: %d packets were completed. first desc idx %u descs# %d\n", + io_cq->qid, *first_cdesc_idx, count); + } else { + io_cq->cur_rx_pkt_cdesc_count += count; + count = 0; + } + + return count; +} + +static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq, + struct ena_com_tx_ctx *ena_tx_ctx) +{ + int rc; + + if (ena_tx_ctx->meta_valid) { + rc = memcmp(&io_sq->cached_tx_meta, + &ena_tx_ctx->ena_meta, + sizeof(struct ena_com_tx_meta)); + + if (unlikely(rc != 0)) + return true; + } + + return false; +} + +static inline void ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq, + struct ena_com_tx_ctx *ena_tx_ctx) +{ + struct ena_eth_io_tx_meta_desc *meta_desc = NULL; + struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta; + + meta_desc = get_sq_desc(io_sq); + memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc)); + + meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_DESC_MASK; + + meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK; + + /* bits 0-9 of the mss */ + meta_desc->word2 |= (ena_meta->mss << + ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) & + ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK; + /* bits 10-13 of the mss */ + meta_desc->len_ctrl |= ((ena_meta->mss >> 10) << + ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT) & + ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK; + + /* Extended meta desc */ + meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK; + meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK; + meta_desc->len_ctrl |= (io_sq->phase << + ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) & + ENA_ETH_IO_TX_META_DESC_PHASE_MASK; + + meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_FIRST_MASK; + meta_desc->word2 |= ena_meta->l3_hdr_len & + ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK; + meta_desc->word2 |= (ena_meta->l3_hdr_offset << + ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) & + ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK; + + meta_desc->word2 |= (ena_meta->l4_hdr_len << + ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) & + ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK; + + meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK; + + /* Cached the meta desc */ + memcpy(&io_sq->cached_tx_meta, ena_meta, + sizeof(struct ena_com_tx_meta)); + + ena_com_copy_curr_sq_desc_to_dev(io_sq); + ena_com_sq_update_tail(io_sq); +} + +static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx, + struct ena_eth_io_rx_cdesc_base *cdesc) +{ + ena_rx_ctx->l3_proto = cdesc->status & + ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK; + ena_rx_ctx->l4_proto = + (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >> + ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT; + ena_rx_ctx->l3_csum_err = + (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >> + ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT; + ena_rx_ctx->l4_csum_err = + (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >> + ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT; + ena_rx_ctx->hash = cdesc->hash; + ena_rx_ctx->frag = + (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >> + ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT; + + pr_debug("ena_rx_ctx->l3_proto %d ena_rx_ctx->l4_proto %d\nena_rx_ctx->l3_csum_err %d ena_rx_ctx->l4_csum_err %d\nhash frag %d frag: %d cdesc_status: %x\n", + ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto, + ena_rx_ctx->l3_csum_err, ena_rx_ctx->l4_csum_err, + ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status); +} + +/*****************************************************************************/ +/***************************** API **********************************/ +/*****************************************************************************/ + +int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, + struct ena_com_tx_ctx *ena_tx_ctx, + int *nb_hw_desc) +{ + struct ena_eth_io_tx_desc *desc = NULL; + struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs; + void *push_header = ena_tx_ctx->push_header; + u16 header_len = ena_tx_ctx->header_len; + u16 num_bufs = ena_tx_ctx->num_bufs; + int total_desc, i, rc; + bool have_meta; + u64 addr_hi; + + WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX, "wrong Q type"); + + /* num_bufs +1 for potential meta desc */ + if (ena_com_sq_empty_space(io_sq) < (num_bufs + 1)) { + pr_err("Not enough space in the tx queue\n"); + return -ENOMEM; + } + + if (unlikely(header_len > io_sq->tx_max_header_size)) { + pr_err("header size is too large %d max header: %d\n", + header_len, io_sq->tx_max_header_size); + return -EINVAL; + } + + /* start with pushing the header (if needed) */ + rc = ena_com_write_header(io_sq, push_header, header_len); + if (unlikely(rc)) + return rc; + + have_meta = ena_tx_ctx->meta_valid && ena_com_meta_desc_changed(io_sq, + ena_tx_ctx); + if (have_meta) + ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx); + + /* If the caller doesn't want send packets */ + if (unlikely(!num_bufs && !header_len)) { + *nb_hw_desc = have_meta ? 0 : 1; + return 0; + } + + desc = get_sq_desc(io_sq); + memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc)); + + /* Set first desc when we don't have meta descriptor */ + if (!have_meta) + desc->len_ctrl |= ENA_ETH_IO_TX_DESC_FIRST_MASK; + + desc->buff_addr_hi_hdr_sz |= (header_len << + ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) & + ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK; + desc->len_ctrl |= (io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) & + ENA_ETH_IO_TX_DESC_PHASE_MASK; + + desc->len_ctrl |= ENA_ETH_IO_TX_DESC_COMP_REQ_MASK; + + /* Bits 0-9 */ + desc->meta_ctrl |= (ena_tx_ctx->req_id << + ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) & + ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK; + + desc->meta_ctrl |= (ena_tx_ctx->df << + ENA_ETH_IO_TX_DESC_DF_SHIFT) & + ENA_ETH_IO_TX_DESC_DF_MASK; + + /* Bits 10-15 */ + desc->len_ctrl |= ((ena_tx_ctx->req_id >> 10) << + ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT) & + ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK; + + if (ena_tx_ctx->meta_valid) { + desc->meta_ctrl |= (ena_tx_ctx->tso_enable << + ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT) & + ENA_ETH_IO_TX_DESC_TSO_EN_MASK; + desc->meta_ctrl |= ena_tx_ctx->l3_proto & + ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK; + desc->meta_ctrl |= (ena_tx_ctx->l4_proto << + ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT) & + ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK; + desc->meta_ctrl |= (ena_tx_ctx->l3_csum_enable << + ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT) & + ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK; + desc->meta_ctrl |= (ena_tx_ctx->l4_csum_enable << + ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT) & + ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK; + desc->meta_ctrl |= (ena_tx_ctx->l4_csum_partial << + ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT) & + ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK; + } + + for (i = 0; i < num_bufs; i++) { + /* The first desc share the same desc as the header */ + if (likely(i != 0)) { + ena_com_copy_curr_sq_desc_to_dev(io_sq); + ena_com_sq_update_tail(io_sq); + + desc = get_sq_desc(io_sq); + memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc)); + + desc->len_ctrl |= (io_sq->phase << + ENA_ETH_IO_TX_DESC_PHASE_SHIFT) & + ENA_ETH_IO_TX_DESC_PHASE_MASK; + } + + desc->len_ctrl |= ena_bufs->len & + ENA_ETH_IO_TX_DESC_LENGTH_MASK; + + addr_hi = ((ena_bufs->paddr & + GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32); + + desc->buff_addr_lo = (u32)ena_bufs->paddr; + desc->buff_addr_hi_hdr_sz |= addr_hi & + ENA_ETH_IO_TX_DESC_ADDR_HI_MASK; + ena_bufs++; + } + + /* set the last desc indicator */ + desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK; + + ena_com_copy_curr_sq_desc_to_dev(io_sq); + + ena_com_sq_update_tail(io_sq); + + total_desc = max_t(u16, num_bufs, 1); + total_desc += have_meta ? 1 : 0; + + *nb_hw_desc = total_desc; + return 0; +} + +int ena_com_rx_pkt(struct ena_com_io_cq *io_cq, + struct ena_com_io_sq *io_sq, + struct ena_com_rx_ctx *ena_rx_ctx) +{ + struct ena_com_rx_buf_info *ena_buf = &ena_rx_ctx->ena_bufs[0]; + struct ena_eth_io_rx_cdesc_base *cdesc = NULL; + u16 cdesc_idx = 0; + u16 nb_hw_desc; + u16 i; + + WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type"); + + nb_hw_desc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx); + if (nb_hw_desc == 0) { + ena_rx_ctx->descs = nb_hw_desc; + return 0; + } + + pr_debug("fetch rx packet: queue %d completed desc: %d\n", io_cq->qid, + nb_hw_desc); + + if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) { + pr_err("Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc, + ena_rx_ctx->max_bufs); + return -ENOSPC; + } + + for (i = 0; i < nb_hw_desc; i++) { + cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i); + + ena_buf->len = cdesc->length; + ena_buf->req_id = cdesc->req_id; + ena_buf++; + } + + /* Update SQ head ptr */ + io_sq->next_to_comp += nb_hw_desc; + + pr_debug("[%s][QID#%d] Updating SQ head to: %d\n", __func__, io_sq->qid, + io_sq->next_to_comp); + + /* Get rx flags from the last pkt */ + ena_com_rx_set_flags(ena_rx_ctx, cdesc); + + ena_rx_ctx->descs = nb_hw_desc; + return 0; +} + +int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq, + struct ena_com_buf *ena_buf, + u16 req_id) +{ + struct ena_eth_io_rx_desc *desc; + + WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type"); + + if (unlikely(ena_com_sq_empty_space(io_sq) == 0)) + return -ENOSPC; + + desc = get_sq_desc(io_sq); + memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc)); + + desc->length = ena_buf->len; + + desc->ctrl |= ENA_ETH_IO_RX_DESC_FIRST_MASK; + desc->ctrl |= ENA_ETH_IO_RX_DESC_LAST_MASK; + desc->ctrl |= io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK; + desc->ctrl |= ENA_ETH_IO_RX_DESC_COMP_REQ_MASK; + + desc->req_id = req_id; + + desc->buff_addr_lo = (u32)ena_buf->paddr; + desc->buff_addr_hi = + ((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32); + + ena_com_sq_update_tail(io_sq); + + return 0; +} + +int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id) +{ + u8 expected_phase, cdesc_phase; + struct ena_eth_io_tx_cdesc *cdesc; + u16 masked_head; + + masked_head = io_cq->head & (io_cq->q_depth - 1); + expected_phase = io_cq->phase; + + cdesc = (struct ena_eth_io_tx_cdesc *) + ((uintptr_t)io_cq->cdesc_addr.virt_addr + + (masked_head * io_cq->cdesc_entry_size_in_bytes)); + + /* When the current completion descriptor phase isn't the same as the + * expected, it mean that the device still didn't update + * this completion. + */ + cdesc_phase = cdesc->flags & ENA_ETH_IO_TX_CDESC_PHASE_MASK; + if (cdesc_phase != expected_phase) + return -EAGAIN; + + ena_com_cq_inc_head(io_cq); + + *req_id = cdesc->req_id; + + return 0; +} diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h new file mode 100644 index 000000000000..bb53c3a4f8e9 --- /dev/null +++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h @@ -0,0 +1,160 @@ +/* + * Copyright 2015 Amazon.com, Inc. or its affiliates. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef ENA_ETH_COM_H_ +#define ENA_ETH_COM_H_ + +#include "ena_com.h" + +/* head update threshold in units of (queue size / ENA_COMP_HEAD_THRESH) */ +#define ENA_COMP_HEAD_THRESH 4 + +struct ena_com_tx_ctx { + struct ena_com_tx_meta ena_meta; + struct ena_com_buf *ena_bufs; + /* For LLQ, header buffer - pushed to the device mem space */ + void *push_header; + + enum ena_eth_io_l3_proto_index l3_proto; + enum ena_eth_io_l4_proto_index l4_proto; + u16 num_bufs; + u16 req_id; + /* For regular queue, indicate the size of the header + * For LLQ, indicate the size of the pushed buffer + */ + u16 header_len; + + u8 meta_valid; + u8 tso_enable; + u8 l3_csum_enable; + u8 l4_csum_enable; + u8 l4_csum_partial; + u8 df; /* Don't fragment */ +}; + +struct ena_com_rx_ctx { + struct ena_com_rx_buf_info *ena_bufs; + enum ena_eth_io_l3_proto_index l3_proto; + enum ena_eth_io_l4_proto_index l4_proto; + bool l3_csum_err; + bool l4_csum_err; + /* fragmented packet */ + bool frag; + u32 hash; + u16 descs; + int max_bufs; +}; + +int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, + struct ena_com_tx_ctx *ena_tx_ctx, + int *nb_hw_desc); + +int ena_com_rx_pkt(struct ena_com_io_cq *io_cq, + struct ena_com_io_sq *io_sq, + struct ena_com_rx_ctx *ena_rx_ctx); + +int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq, + struct ena_com_buf *ena_buf, + u16 req_id); + +int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id); + +static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq, + struct ena_eth_io_intr_reg *intr_reg) +{ + writel(intr_reg->intr_control, io_cq->unmask_reg); +} + +static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq) +{ + u16 tail, next_to_comp, cnt; + + next_to_comp = io_sq->next_to_comp; + tail = io_sq->tail; + cnt = tail - next_to_comp; + + return io_sq->q_depth - 1 - cnt; +} + +static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq) +{ + u16 tail; + + tail = io_sq->tail; + + pr_debug("write submission queue doorbell for queue: %d tail: %d\n", + io_sq->qid, tail); + + writel(tail, io_sq->db_addr); + + return 0; +} + +static inline int ena_com_update_dev_comp_head(struct ena_com_io_cq *io_cq) +{ + u16 unreported_comp, head; + bool need_update; + + head = io_cq->head; + unreported_comp = head - io_cq->last_head_update; + need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH); + + if (io_cq->cq_head_db_reg && need_update) { + pr_debug("Write completion queue doorbell for queue %d: head: %d\n", + io_cq->qid, head); + writel(head, io_cq->cq_head_db_reg); + io_cq->last_head_update = head; + } + + return 0; +} + +static inline void ena_com_update_numa_node(struct ena_com_io_cq *io_cq, + u8 numa_node) +{ + struct ena_eth_io_numa_node_cfg_reg numa_cfg; + + if (!io_cq->numa_node_cfg_reg) + return; + + numa_cfg.numa_cfg = (numa_node & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK) + | ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK; + + writel(numa_cfg.numa_cfg, io_cq->numa_node_cfg_reg); +} + +static inline void ena_com_comp_ack(struct ena_com_io_sq *io_sq, u16 elem) +{ + io_sq->next_to_comp += elem; +} + +#endif /* ENA_ETH_COM_H_ */ diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h b/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h new file mode 100644 index 000000000000..f320c58793a5 --- /dev/null +++ b/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h @@ -0,0 +1,416 @@ +/* + * Copyright 2015 - 2016 Amazon.com, Inc. or its affiliates. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _ENA_ETH_IO_H_ +#define _ENA_ETH_IO_H_ + +enum ena_eth_io_l3_proto_index { + ENA_ETH_IO_L3_PROTO_UNKNOWN = 0, + + ENA_ETH_IO_L3_PROTO_IPV4 = 8, + + ENA_ETH_IO_L3_PROTO_IPV6 = 11, + + ENA_ETH_IO_L3_PROTO_FCOE = 21, + + ENA_ETH_IO_L3_PROTO_ROCE = 22, +}; + +enum ena_eth_io_l4_proto_index { + ENA_ETH_IO_L4_PROTO_UNKNOWN = 0, + + ENA_ETH_IO_L4_PROTO_TCP = 12, + + ENA_ETH_IO_L4_PROTO_UDP = 13, + + ENA_ETH_IO_L4_PROTO_ROUTEABLE_ROCE = 23, +}; + +struct ena_eth_io_tx_desc { + /* 15:0 : length - Buffer length in bytes, must + * include any packet trailers that the ENA supposed + * to update like End-to-End CRC, Authentication GMAC + * etc. This length must not include the + * 'Push_Buffer' length. This length must not include + * the 4-byte added in the end for 802.3 Ethernet FCS + * 21:16 : req_id_hi - Request ID[15:10] + * 22 : reserved22 - MBZ + * 23 : meta_desc - MBZ + * 24 : phase + * 25 : reserved1 - MBZ + * 26 : first - Indicates first descriptor in + * transaction + * 27 : last - Indicates last descriptor in + * transaction + * 28 : comp_req - Indicates whether completion + * should be posted, after packet is transmitted. + * Valid only for first descriptor + * 30:29 : reserved29 - MBZ + * 31 : reserved31 - MBZ + */ + u32 len_ctrl; + + /* 3:0 : l3_proto_idx - L3 protocol. This field + * required when l3_csum_en,l3_csum or tso_en are set. + * 4 : DF - IPv4 DF, must be 0 if packet is IPv4 and + * DF flags of the IPv4 header is 0. Otherwise must + * be set to 1 + * 6:5 : reserved5 + * 7 : tso_en - Enable TSO, For TCP only. + * 12:8 : l4_proto_idx - L4 protocol. This field need + * to be set when l4_csum_en or tso_en are set. + * 13 : l3_csum_en - enable IPv4 header checksum. + * 14 : l4_csum_en - enable TCP/UDP checksum. + * 15 : ethernet_fcs_dis - when set, the controller + * will not append the 802.3 Ethernet Frame Check + * Sequence to the packet + * 16 : reserved16 + * 17 : l4_csum_partial - L4 partial checksum. when + * set to 0, the ENA calculates the L4 checksum, + * where the Destination Address required for the + * TCP/UDP pseudo-header is taken from the actual + * packet L3 header. when set to 1, the ENA doesn't + * calculate the sum of the pseudo-header, instead, + * the checksum field of the L4 is used instead. When + * TSO enabled, the checksum of the pseudo-header + * must not include the tcp length field. L4 partial + * checksum should be used for IPv6 packet that + * contains Routing Headers. + * 20:18 : reserved18 - MBZ + * 21 : reserved21 - MBZ + * 31:22 : req_id_lo - Request ID[9:0] + */ + u32 meta_ctrl; + + u32 buff_addr_lo; + + /* address high and header size + * 15:0 : addr_hi - Buffer Pointer[47:32] + * 23:16 : reserved16_w2 + * 31:24 : header_length - Header length. For Low + * Latency Queues, this fields indicates the number + * of bytes written to the headers' memory. For + * normal queues, if packet is TCP or UDP, and longer + * than max_header_size, then this field should be + * set to the sum of L4 header offset and L4 header + * size(without options), otherwise, this field + * should be set to 0. For both modes, this field + * must not exceed the max_header_size. + * max_header_size value is reported by the Max + * Queues Feature descriptor + */ + u32 buff_addr_hi_hdr_sz; +}; + +struct ena_eth_io_tx_meta_desc { + /* 9:0 : req_id_lo - Request ID[9:0] + * 11:10 : reserved10 - MBZ + * 12 : reserved12 - MBZ + * 13 : reserved13 - MBZ + * 14 : ext_valid - if set, offset fields in Word2 + * are valid Also MSS High in Word 0 and bits [31:24] + * in Word 3 + * 15 : reserved15 + * 19:16 : mss_hi + * 20 : eth_meta_type - 0: Tx Metadata Descriptor, 1: + * Extended Metadata Descriptor + * 21 : meta_store - Store extended metadata in queue + * cache + * 22 : reserved22 - MBZ + * 23 : meta_desc - MBO + * 24 : phase + * 25 : reserved25 - MBZ + * 26 : first - Indicates first descriptor in + * transaction + * 27 : last - Indicates last descriptor in + * transaction + * 28 : comp_req - Indicates whether completion + * should be posted, after packet is transmitted. + * Valid only for first descriptor + * 30:29 : reserved29 - MBZ + * 31 : reserved31 - MBZ + */ + u32 len_ctrl; + + /* 5:0 : req_id_hi + * 31:6 : reserved6 - MBZ + */ + u32 word1; + + /* 7:0 : l3_hdr_len + * 15:8 : l3_hdr_off + * 21:16 : l4_hdr_len_in_words - counts the L4 header + * length in words. there is an explicit assumption + * that L4 header appears right after L3 header and + * L4 offset is based on l3_hdr_off+l3_hdr_len + * 31:22 : mss_lo + */ + u32 word2; + + u32 reserved; +}; + +struct ena_eth_io_tx_cdesc { + /* Request ID[15:0] */ + u16 req_id; + + u8 status; + + /* flags + * 0 : phase + * 7:1 : reserved1 + */ + u8 flags; + + u16 sub_qid; + + u16 sq_head_idx; +}; + +struct ena_eth_io_rx_desc { + /* In bytes. 0 means 64KB */ + u16 length; + + /* MBZ */ + u8 reserved2; + + /* 0 : phase + * 1 : reserved1 - MBZ + * 2 : first - Indicates first descriptor in + * transaction + * 3 : last - Indicates last descriptor in transaction + * 4 : comp_req + * 5 : reserved5 - MBO + * 7:6 : reserved6 - MBZ + */ + u8 ctrl; + + u16 req_id; + + /* MBZ */ + u16 reserved6; + + u32 buff_addr_lo; + + u16 buff_addr_hi; + + /* MBZ */ + u16 reserved16_w3; +}; + +/* 4-word format Note: all ethernet parsing information are valid only when + * last=1 + */ +struct ena_eth_io_rx_cdesc_base { + /* 4:0 : l3_proto_idx + * 6:5 : src_vlan_cnt + * 7 : reserved7 - MBZ + * 12:8 : l4_proto_idx + * 13 : l3_csum_err - when set, either the L3 + * checksum error detected, or, the controller didn't + * validate the checksum. This bit is valid only when + * l3_proto_idx indicates IPv4 packet + * 14 : l4_csum_err - when set, either the L4 + * checksum error detected, or, the controller didn't + * validate the checksum. This bit is valid only when + * l4_proto_idx indicates TCP/UDP packet, and, + * ipv4_frag is not set + * 15 : ipv4_frag - Indicates IPv4 fragmented packet + * 23:16 : reserved16 + * 24 : phase + * 25 : l3_csum2 - second checksum engine result + * 26 : first - Indicates first descriptor in + * transaction + * 27 : last - Indicates last descriptor in + * transaction + * 29:28 : reserved28 + * 30 : buffer - 0: Metadata descriptor. 1: Buffer + * Descriptor was used + * 31 : reserved31 + */ + u32 status; + + u16 length; + + u16 req_id; + + /* 32-bit hash result */ + u32 hash; + + u16 sub_qid; + + u16 reserved; +}; + +/* 8-word format */ +struct ena_eth_io_rx_cdesc_ext { + struct ena_eth_io_rx_cdesc_base base; + + u32 buff_addr_lo; + + u16 buff_addr_hi; + + u16 reserved16; + + u32 reserved_w6; + + u32 reserved_w7; +}; + +struct ena_eth_io_intr_reg { + /* 14:0 : rx_intr_delay + * 29:15 : tx_intr_delay + * 30 : intr_unmask + * 31 : reserved + */ + u32 intr_control; +}; + +struct ena_eth_io_numa_node_cfg_reg { + /* 7:0 : numa + * 30:8 : reserved + * 31 : enabled + */ + u32 numa_cfg; +}; + +/* tx_desc */ +#define ENA_ETH_IO_TX_DESC_LENGTH_MASK GENMASK(15, 0) +#define ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT 16 +#define ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK GENMASK(21, 16) +#define ENA_ETH_IO_TX_DESC_META_DESC_SHIFT 23 +#define ENA_ETH_IO_TX_DESC_META_DESC_MASK BIT(23) +#define ENA_ETH_IO_TX_DESC_PHASE_SHIFT 24 +#define ENA_ETH_IO_TX_DESC_PHASE_MASK BIT(24) +#define ENA_ETH_IO_TX_DESC_FIRST_SHIFT 26 +#define ENA_ETH_IO_TX_DESC_FIRST_MASK BIT(26) +#define ENA_ETH_IO_TX_DESC_LAST_SHIFT 27 +#define ENA_ETH_IO_TX_DESC_LAST_MASK BIT(27) +#define ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT 28 +#define ENA_ETH_IO_TX_DESC_COMP_REQ_MASK BIT(28) +#define ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK GENMASK(3, 0) +#define ENA_ETH_IO_TX_DESC_DF_SHIFT 4 +#define ENA_ETH_IO_TX_DESC_DF_MASK BIT(4) +#define ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT 7 +#define ENA_ETH_IO_TX_DESC_TSO_EN_MASK BIT(7) +#define ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT 8 +#define ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK GENMASK(12, 8) +#define ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT 13 +#define ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK BIT(13) +#define ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT 14 +#define ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK BIT(14) +#define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT 15 +#define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK BIT(15) +#define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT 17 +#define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK BIT(17) +#define ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT 22 +#define ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK GENMASK(31, 22) +#define ENA_ETH_IO_TX_DESC_ADDR_HI_MASK GENMASK(15, 0) +#define ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT 24 +#define ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK GENMASK(31, 24) + +/* tx_meta_desc */ +#define ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK GENMASK(9, 0) +#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT 14 +#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK BIT(14) +#define ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT 16 +#define ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK GENMASK(19, 16) +#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT 20 +#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK BIT(20) +#define ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT 21 +#define ENA_ETH_IO_TX_META_DESC_META_STORE_MASK BIT(21) +#define ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT 23 +#define ENA_ETH_IO_TX_META_DESC_META_DESC_MASK BIT(23) +#define ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT 24 +#define ENA_ETH_IO_TX_META_DESC_PHASE_MASK BIT(24) +#define ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT 26 +#define ENA_ETH_IO_TX_META_DESC_FIRST_MASK BIT(26) +#define ENA_ETH_IO_TX_META_DESC_LAST_SHIFT 27 +#define ENA_ETH_IO_TX_META_DESC_LAST_MASK BIT(27) +#define ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT 28 +#define ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK BIT(28) +#define ENA_ETH_IO_TX_META_DESC_REQ_ID_HI_MASK GENMASK(5, 0) +#define ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK GENMASK(7, 0) +#define ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT 8 +#define ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK GENMASK(15, 8) +#define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT 16 +#define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK GENMASK(21, 16) +#define ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT 22 +#define ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK GENMASK(31, 22) + +/* tx_cdesc */ +#define ENA_ETH_IO_TX_CDESC_PHASE_MASK BIT(0) + +/* rx_desc */ +#define ENA_ETH_IO_RX_DESC_PHASE_MASK BIT(0) +#define ENA_ETH_IO_RX_DESC_FIRST_SHIFT 2 +#define ENA_ETH_IO_RX_DESC_FIRST_MASK BIT(2) +#define ENA_ETH_IO_RX_DESC_LAST_SHIFT 3 +#define ENA_ETH_IO_RX_DESC_LAST_MASK BIT(3) +#define ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT 4 +#define ENA_ETH_IO_RX_DESC_COMP_REQ_MASK BIT(4) + +/* rx_cdesc_base */ +#define ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK GENMASK(4, 0) +#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT 5 +#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK GENMASK(6, 5) +#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT 8 +#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK GENMASK(12, 8) +#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT 13 +#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK BIT(13) +#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT 14 +#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK BIT(14) +#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT 15 +#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK BIT(15) +#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT 24 +#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK BIT(24) +#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT 25 +#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK BIT(25) +#define ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT 26 +#define ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK BIT(26) +#define ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT 27 +#define ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK BIT(27) +#define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT 30 +#define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK BIT(30) + +/* intr_reg */ +#define ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK GENMASK(14, 0) +#define ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT 15 +#define ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK GENMASK(29, 15) +#define ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT 30 +#define ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK BIT(30) + +/* numa_node_cfg_reg */ +#define ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK GENMASK(7, 0) +#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT 31 +#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK BIT(31) + +#endif /*_ENA_ETH_IO_H_ */ diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c new file mode 100644 index 000000000000..67b2338f8fb3 --- /dev/null +++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c @@ -0,0 +1,895 @@ +/* + * Copyright 2015 Amazon.com, Inc. or its affiliates. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/pci.h> + +#include "ena_netdev.h" + +struct ena_stats { + char name[ETH_GSTRING_LEN]; + int stat_offset; +}; + +#define ENA_STAT_ENA_COM_ENTRY(stat) { \ + .name = #stat, \ + .stat_offset = offsetof(struct ena_com_stats_admin, stat) \ +} + +#define ENA_STAT_ENTRY(stat, stat_type) { \ + .name = #stat, \ + .stat_offset = offsetof(struct ena_stats_##stat_type, stat) \ +} + +#define ENA_STAT_RX_ENTRY(stat) \ + ENA_STAT_ENTRY(stat, rx) + +#define ENA_STAT_TX_ENTRY(stat) \ + ENA_STAT_ENTRY(stat, tx) + +#define ENA_STAT_GLOBAL_ENTRY(stat) \ + ENA_STAT_ENTRY(stat, dev) + +static const struct ena_stats ena_stats_global_strings[] = { + ENA_STAT_GLOBAL_ENTRY(tx_timeout), + ENA_STAT_GLOBAL_ENTRY(io_suspend), + ENA_STAT_GLOBAL_ENTRY(io_resume), + ENA_STAT_GLOBAL_ENTRY(wd_expired), + ENA_STAT_GLOBAL_ENTRY(interface_up), + ENA_STAT_GLOBAL_ENTRY(interface_down), + ENA_STAT_GLOBAL_ENTRY(admin_q_pause), +}; + +static const struct ena_stats ena_stats_tx_strings[] = { + ENA_STAT_TX_ENTRY(cnt), + ENA_STAT_TX_ENTRY(bytes), + ENA_STAT_TX_ENTRY(queue_stop), + ENA_STAT_TX_ENTRY(queue_wakeup), + ENA_STAT_TX_ENTRY(dma_mapping_err), + ENA_STAT_TX_ENTRY(linearize), + ENA_STAT_TX_ENTRY(linearize_failed), + ENA_STAT_TX_ENTRY(napi_comp), + ENA_STAT_TX_ENTRY(tx_poll), + ENA_STAT_TX_ENTRY(doorbells), + ENA_STAT_TX_ENTRY(prepare_ctx_err), + ENA_STAT_TX_ENTRY(missing_tx_comp), + ENA_STAT_TX_ENTRY(bad_req_id), +}; + +static const struct ena_stats ena_stats_rx_strings[] = { + ENA_STAT_RX_ENTRY(cnt), + ENA_STAT_RX_ENTRY(bytes), + ENA_STAT_RX_ENTRY(refil_partial), + ENA_STAT_RX_ENTRY(bad_csum), + ENA_STAT_RX_ENTRY(page_alloc_fail), + ENA_STAT_RX_ENTRY(skb_alloc_fail), + ENA_STAT_RX_ENTRY(dma_mapping_err), + ENA_STAT_RX_ENTRY(bad_desc_num), + ENA_STAT_RX_ENTRY(rx_copybreak_pkt), +}; + +static const struct ena_stats ena_stats_ena_com_strings[] = { + ENA_STAT_ENA_COM_ENTRY(aborted_cmd), + ENA_STAT_ENA_COM_ENTRY(submitted_cmd), + ENA_STAT_ENA_COM_ENTRY(completed_cmd), + ENA_STAT_ENA_COM_ENTRY(out_of_space), + ENA_STAT_ENA_COM_ENTRY(no_completion), +}; + +#define ENA_STATS_ARRAY_GLOBAL ARRAY_SIZE(ena_stats_global_strings) +#define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings) +#define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings) +#define ENA_STATS_ARRAY_ENA_COM ARRAY_SIZE(ena_stats_ena_com_strings) + +static void ena_safe_update_stat(u64 *src, u64 *dst, + struct u64_stats_sync *syncp) +{ + unsigned int start; + + do { + start = u64_stats_fetch_begin_irq(syncp); + *(dst) = *src; + } while (u64_stats_fetch_retry_irq(syncp, start)); +} + +static void ena_queue_stats(struct ena_adapter *adapter, u64 **data) +{ + const struct ena_stats *ena_stats; + struct ena_ring *ring; + + u64 *ptr; + int i, j; + + for (i = 0; i < adapter->num_queues; i++) { + /* Tx stats */ + ring = &adapter->tx_ring[i]; + + for (j = 0; j < ENA_STATS_ARRAY_TX; j++) { + ena_stats = &ena_stats_tx_strings[j]; + + ptr = (u64 *)((uintptr_t)&ring->tx_stats + + (uintptr_t)ena_stats->stat_offset); + + ena_safe_update_stat(ptr, (*data)++, &ring->syncp); + } + + /* Rx stats */ + ring = &adapter->rx_ring[i]; + + for (j = 0; j < ENA_STATS_ARRAY_RX; j++) { + ena_stats = &ena_stats_rx_strings[j]; + + ptr = (u64 *)((uintptr_t)&ring->rx_stats + + (uintptr_t)ena_stats->stat_offset); + + ena_safe_update_stat(ptr, (*data)++, &ring->syncp); + } + } +} + +static void ena_dev_admin_queue_stats(struct ena_adapter *adapter, u64 **data) +{ + const struct ena_stats *ena_stats; + u32 *ptr; + int i; + + for (i = 0; i < ENA_STATS_ARRAY_ENA_COM; i++) { + ena_stats = &ena_stats_ena_com_strings[i]; + + ptr = (u32 *)((uintptr_t)&adapter->ena_dev->admin_queue.stats + + (uintptr_t)ena_stats->stat_offset); + + *(*data)++ = *ptr; + } +} + +static void ena_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, + u64 *data) +{ + struct ena_adapter *adapter = netdev_priv(netdev); + const struct ena_stats *ena_stats; + u64 *ptr; + int i; + + for (i = 0; i < ENA_STATS_ARRAY_GLOBAL; i++) { + ena_stats = &ena_stats_global_strings[i]; + + ptr = (u64 *)((uintptr_t)&adapter->dev_stats + + (uintptr_t)ena_stats->stat_offset); + + ena_safe_update_stat(ptr, data++, &adapter->syncp); + } + + ena_queue_stats(adapter, &data); + ena_dev_admin_queue_stats(adapter, &data); +} + +int ena_get_sset_count(struct net_device *netdev, int sset) +{ + struct ena_adapter *adapter = netdev_priv(netdev); + + if (sset != ETH_SS_STATS) + return -EOPNOTSUPP; + + return adapter->num_queues * (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX) + + ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENA_COM; +} + +static void ena_queue_strings(struct ena_adapter *adapter, u8 **data) +{ + const struct ena_stats *ena_stats; + int i, j; + + for (i = 0; i < adapter->num_queues; i++) { + /* Tx stats */ + for (j = 0; j < ENA_STATS_ARRAY_TX; j++) { + ena_stats = &ena_stats_tx_strings[j]; + + snprintf(*data, ETH_GSTRING_LEN, + "queue_%u_tx_%s", i, ena_stats->name); + (*data) += ETH_GSTRING_LEN; + } + /* Rx stats */ + for (j = 0; j < ENA_STATS_ARRAY_RX; j++) { + ena_stats = &ena_stats_rx_strings[j]; + + snprintf(*data, ETH_GSTRING_LEN, + "queue_%u_rx_%s", i, ena_stats->name); + (*data) += ETH_GSTRING_LEN; + } + } +} + +static void ena_com_dev_strings(u8 **data) +{ + const struct ena_stats *ena_stats; + int i; + + for (i = 0; i < ENA_STATS_ARRAY_ENA_COM; i++) { + ena_stats = &ena_stats_ena_com_strings[i]; + + snprintf(*data, ETH_GSTRING_LEN, + "ena_admin_q_%s", ena_stats->name); + (*data) += ETH_GSTRING_LEN; + } +} + +static void ena_get_strings(struct net_device *netdev, u32 sset, u8 *data) +{ + struct ena_adapter *adapter = netdev_priv(netdev); + const struct ena_stats *ena_stats; + int i; + + if (sset != ETH_SS_STATS) + return; + + for (i = 0; i < ENA_STATS_ARRAY_GLOBAL; i++) { + ena_stats = &ena_stats_global_strings[i]; + + memcpy(data, ena_stats->name, ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + + ena_queue_strings(adapter, &data); + ena_com_dev_strings(&data); +} + +static int ena_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *link_ksettings) +{ + struct ena_adapter *adapter = netdev_priv(netdev); + struct ena_com_dev *ena_dev = adapter->ena_dev; + struct ena_admin_get_feature_link_desc *link; + struct ena_admin_get_feat_resp feat_resp; + int rc; + + rc = ena_com_get_link_params(ena_dev, &feat_resp); + if (rc) + return rc; + + link = &feat_resp.u.link; + link_ksettings->base.speed = link->speed; + + if (link->flags & ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK) { + ethtool_link_ksettings_add_link_mode(link_ksettings, + supported, Autoneg); + ethtool_link_ksettings_add_link_mode(link_ksettings, + supported, Autoneg); + } + + link_ksettings->base.autoneg = + (link->flags & ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK) ? + AUTONEG_ENABLE : AUTONEG_DISABLE; + + link_ksettings->base.duplex = DUPLEX_FULL; + + return 0; +} + +static int ena_get_coalesce(struct net_device *net_dev, + struct ethtool_coalesce *coalesce) +{ + struct ena_adapter *adapter = netdev_priv(net_dev); + struct ena_com_dev *ena_dev = adapter->ena_dev; + struct ena_intr_moder_entry intr_moder_entry; + + if (!ena_com_interrupt_moderation_supported(ena_dev)) { + /* the devie doesn't support interrupt moderation */ + return -EOPNOTSUPP; + } + coalesce->tx_coalesce_usecs = + ena_com_get_nonadaptive_moderation_interval_tx(ena_dev) / + ena_dev->intr_delay_resolution; + if (!ena_com_get_adaptive_moderation_enabled(ena_dev)) { + coalesce->rx_coalesce_usecs = + ena_com_get_nonadaptive_moderation_interval_rx(ena_dev) + / ena_dev->intr_delay_resolution; + } else { + ena_com_get_intr_moderation_entry(adapter->ena_dev, ENA_INTR_MODER_LOWEST, &intr_moder_entry); + coalesce->rx_coalesce_usecs_low = intr_moder_entry.intr_moder_interval; + coalesce->rx_max_coalesced_frames_low = intr_moder_entry.pkts_per_interval; + + ena_com_get_intr_moderation_entry(adapter->ena_dev, ENA_INTR_MODER_MID, &intr_moder_entry); + coalesce->rx_coalesce_usecs = intr_moder_entry.intr_moder_interval; + coalesce->rx_max_coalesced_frames = intr_moder_entry.pkts_per_interval; + + ena_com_get_intr_moderation_entry(adapter->ena_dev, ENA_INTR_MODER_HIGHEST, &intr_moder_entry); + coalesce->rx_coalesce_usecs_high = intr_moder_entry.intr_moder_interval; + coalesce->rx_max_coalesced_frames_high = intr_moder_entry.pkts_per_interval; + } + coalesce->use_adaptive_rx_coalesce = + ena_com_get_adaptive_moderation_enabled(ena_dev); + + return 0; +} + +static void ena_update_tx_rings_intr_moderation(struct ena_adapter *adapter) +{ + unsigned int val; + int i; + + val = ena_com_get_nonadaptive_moderation_interval_tx(adapter->ena_dev); + + for (i = 0; i < adapter->num_queues; i++) + adapter->tx_ring[i].smoothed_interval = val; +} + +static int ena_set_coalesce(struct net_device *net_dev, + struct ethtool_coalesce *coalesce) +{ + struct ena_adapter *adapter = netdev_priv(net_dev); + struct ena_com_dev *ena_dev = adapter->ena_dev; + struct ena_intr_moder_entry intr_moder_entry; + int rc; + + if (!ena_com_interrupt_moderation_supported(ena_dev)) { + /* the devie doesn't support interrupt moderation */ + return -EOPNOTSUPP; + } + + if (coalesce->rx_coalesce_usecs_irq || + coalesce->rx_max_coalesced_frames_irq || + coalesce->tx_coalesce_usecs_irq || + coalesce->tx_max_coalesced_frames || + coalesce->tx_max_coalesced_frames_irq || + coalesce->stats_block_coalesce_usecs || + coalesce->use_adaptive_tx_coalesce || + coalesce->pkt_rate_low || + coalesce->tx_coalesce_usecs_low || + coalesce->tx_max_coalesced_frames_low || + coalesce->pkt_rate_high || + coalesce->tx_coalesce_usecs_high || + coalesce->tx_max_coalesced_frames_high || + coalesce->rate_sample_interval) + return -EINVAL; + + rc = ena_com_update_nonadaptive_moderation_interval_tx(ena_dev, + coalesce->tx_coalesce_usecs); + if (rc) + return rc; + + ena_update_tx_rings_intr_moderation(adapter); + + if (ena_com_get_adaptive_moderation_enabled(ena_dev)) { + if (!coalesce->use_adaptive_rx_coalesce) { + ena_com_disable_adaptive_moderation(ena_dev); + rc = ena_com_update_nonadaptive_moderation_interval_rx(ena_dev, + coalesce->rx_coalesce_usecs); + return rc; + } + } else { /* was in non-adaptive mode */ + if (coalesce->use_adaptive_rx_coalesce) { + ena_com_enable_adaptive_moderation(ena_dev); + } else { + rc = ena_com_update_nonadaptive_moderation_interval_rx(ena_dev, + coalesce->rx_coalesce_usecs); + return rc; + } + } + + intr_moder_entry.intr_moder_interval = coalesce->rx_coalesce_usecs_low; + intr_moder_entry.pkts_per_interval = coalesce->rx_max_coalesced_frames_low; + intr_moder_entry.bytes_per_interval = ENA_INTR_BYTE_COUNT_NOT_SUPPORTED; + ena_com_init_intr_moderation_entry(adapter->ena_dev, ENA_INTR_MODER_LOWEST, &intr_moder_entry); + + intr_moder_entry.intr_moder_interval = coalesce->rx_coalesce_usecs; + intr_moder_entry.pkts_per_interval = coalesce->rx_max_coalesced_frames; + intr_moder_entry.bytes_per_interval = ENA_INTR_BYTE_COUNT_NOT_SUPPORTED; + ena_com_init_intr_moderation_entry(adapter->ena_dev, ENA_INTR_MODER_MID, &intr_moder_entry); + + intr_moder_entry.intr_moder_interval = coalesce->rx_coalesce_usecs_high; + intr_moder_entry.pkts_per_interval = coalesce->rx_max_coalesced_frames_high; + intr_moder_entry.bytes_per_interval = ENA_INTR_BYTE_COUNT_NOT_SUPPORTED; + ena_com_init_intr_moderation_entry(adapter->ena_dev, ENA_INTR_MODER_HIGHEST, &intr_moder_entry); + + return 0; +} + +static u32 ena_get_msglevel(struct net_device *netdev) +{ + struct ena_adapter *adapter = netdev_priv(netdev); + + return adapter->msg_enable; +} + +static void ena_set_msglevel(struct net_device *netdev, u32 value) +{ + struct ena_adapter *adapter = netdev_priv(netdev); + + adapter->msg_enable = value; +} + +static void ena_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct ena_adapter *adapter = netdev_priv(dev); + + strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(adapter->pdev), + sizeof(info->bus_info)); +} + +static void ena_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct ena_adapter *adapter = netdev_priv(netdev); + struct ena_ring *tx_ring = &adapter->tx_ring[0]; + struct ena_ring *rx_ring = &adapter->rx_ring[0]; + + ring->rx_max_pending = rx_ring->ring_size; + ring->tx_max_pending = tx_ring->ring_size; + ring->rx_pending = rx_ring->ring_size; + ring->tx_pending = tx_ring->ring_size; +} + +static u32 ena_flow_hash_to_flow_type(u16 hash_fields) +{ + u32 data = 0; + + if (hash_fields & ENA_ADMIN_RSS_L2_DA) + data |= RXH_L2DA; + + if (hash_fields & ENA_ADMIN_RSS_L3_DA) + data |= RXH_IP_DST; + + if (hash_fields & ENA_ADMIN_RSS_L3_SA) + data |= RXH_IP_SRC; + + if (hash_fields & ENA_ADMIN_RSS_L4_DP) + data |= RXH_L4_B_2_3; + + if (hash_fields & ENA_ADMIN_RSS_L4_SP) + data |= RXH_L4_B_0_1; + + return data; +} + +static u16 ena_flow_data_to_flow_hash(u32 hash_fields) +{ + u16 data = 0; + + if (hash_fields & RXH_L2DA) + data |= ENA_ADMIN_RSS_L2_DA; + + if (hash_fields & RXH_IP_DST) + data |= ENA_ADMIN_RSS_L3_DA; + + if (hash_fields & RXH_IP_SRC) + data |= ENA_ADMIN_RSS_L3_SA; + + if (hash_fields & RXH_L4_B_2_3) + data |= ENA_ADMIN_RSS_L4_DP; + + if (hash_fields & RXH_L4_B_0_1) + data |= ENA_ADMIN_RSS_L4_SP; + + return data; +} + +static int ena_get_rss_hash(struct ena_com_dev *ena_dev, + struct ethtool_rxnfc *cmd) +{ + enum ena_admin_flow_hash_proto proto; + u16 hash_fields; + int rc; + + cmd->data = 0; + + switch (cmd->flow_type) { + case TCP_V4_FLOW: + proto = ENA_ADMIN_RSS_TCP4; + break; + case UDP_V4_FLOW: + proto = ENA_ADMIN_RSS_UDP4; + break; + case TCP_V6_FLOW: + proto = ENA_ADMIN_RSS_TCP6; + break; + case UDP_V6_FLOW: + proto = ENA_ADMIN_RSS_UDP6; + break; + case IPV4_FLOW: + proto = ENA_ADMIN_RSS_IP4; + break; + case IPV6_FLOW: + proto = ENA_ADMIN_RSS_IP6; + break; + case ETHER_FLOW: + proto = ENA_ADMIN_RSS_NOT_IP; + break; + case AH_V4_FLOW: + case ESP_V4_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + return -EOPNOTSUPP; + default: + return -EINVAL; + } + + rc = ena_com_get_hash_ctrl(ena_dev, proto, &hash_fields); + if (rc) { + /* If device don't have permission, return unsupported */ + if (rc == -EPERM) + rc = -EOPNOTSUPP; + return rc; + } + + cmd->data = ena_flow_hash_to_flow_type(hash_fields); + + return 0; +} + +static int ena_set_rss_hash(struct ena_com_dev *ena_dev, + struct ethtool_rxnfc *cmd) +{ + enum ena_admin_flow_hash_proto proto; + u16 hash_fields; + + switch (cmd->flow_type) { + case TCP_V4_FLOW: + proto = ENA_ADMIN_RSS_TCP4; + break; + case UDP_V4_FLOW: + proto = ENA_ADMIN_RSS_UDP4; + break; + case TCP_V6_FLOW: + proto = ENA_ADMIN_RSS_TCP6; + break; + case UDP_V6_FLOW: + proto = ENA_ADMIN_RSS_UDP6; + break; + case IPV4_FLOW: + proto = ENA_ADMIN_RSS_IP4; + break; + case IPV6_FLOW: + proto = ENA_ADMIN_RSS_IP6; + break; + case ETHER_FLOW: + proto = ENA_ADMIN_RSS_NOT_IP; + break; + case AH_V4_FLOW: + case ESP_V4_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + return -EOPNOTSUPP; + default: + return -EINVAL; + } + + hash_fields = ena_flow_data_to_flow_hash(cmd->data); + + return ena_com_fill_hash_ctrl(ena_dev, proto, hash_fields); +} + +static int ena_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info) +{ + struct ena_adapter *adapter = netdev_priv(netdev); + int rc = 0; + + switch (info->cmd) { + case ETHTOOL_SRXFH: + rc = ena_set_rss_hash(adapter->ena_dev, info); + break; + case ETHTOOL_SRXCLSRLDEL: + case ETHTOOL_SRXCLSRLINS: + default: + netif_err(adapter, drv, netdev, + "Command parameter %d is not supported\n", info->cmd); + rc = -EOPNOTSUPP; + } + + return (rc == -EPERM) ? -EOPNOTSUPP : rc; +} + +static int ena_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info, + u32 *rules) +{ + struct ena_adapter *adapter = netdev_priv(netdev); + int rc = 0; + + switch (info->cmd) { + case ETHTOOL_GRXRINGS: + info->data = adapter->num_queues; + rc = 0; + break; + case ETHTOOL_GRXFH: + rc = ena_get_rss_hash(adapter->ena_dev, info); + break; + case ETHTOOL_GRXCLSRLCNT: + case ETHTOOL_GRXCLSRULE: + case ETHTOOL_GRXCLSRLALL: + default: + netif_err(adapter, drv, netdev, + "Command parameter %d is not supported\n", info->cmd); + rc = -EOPNOTSUPP; + } + + return (rc == -EPERM) ? -EOPNOTSUPP : rc; +} + +static u32 ena_get_rxfh_indir_size(struct net_device *netdev) +{ + return ENA_RX_RSS_TABLE_SIZE; +} + +static u32 ena_get_rxfh_key_size(struct net_device *netdev) +{ + return ENA_HASH_KEY_SIZE; +} + +static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct ena_adapter *adapter = netdev_priv(netdev); + enum ena_admin_hash_functions ena_func; + u8 func; + int rc; + + rc = ena_com_indirect_table_get(adapter->ena_dev, indir); + if (rc) + return rc; + + rc = ena_com_get_hash_function(adapter->ena_dev, &ena_func, key); + if (rc) + return rc; + + switch (ena_func) { + case ENA_ADMIN_TOEPLITZ: + func = ETH_RSS_HASH_TOP; + break; + case ENA_ADMIN_CRC32: + func = ETH_RSS_HASH_XOR; + break; + default: + netif_err(adapter, drv, netdev, + "Command parameter is not supported\n"); + return -EOPNOTSUPP; + } + + if (hfunc) + *hfunc = func; + + return rc; +} + +static int ena_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct ena_adapter *adapter = netdev_priv(netdev); + struct ena_com_dev *ena_dev = adapter->ena_dev; + enum ena_admin_hash_functions func; + int rc, i; + + if (indir) { + for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) { + rc = ena_com_indirect_table_fill_entry(ena_dev, + ENA_IO_RXQ_IDX(indir[i]), + i); + if (unlikely(rc)) { + netif_err(adapter, drv, netdev, + "Cannot fill indirect table (index is too large)\n"); + return rc; + } + } + + rc = ena_com_indirect_table_set(ena_dev); + if (rc) { + netif_err(adapter, drv, netdev, + "Cannot set indirect table\n"); + return rc == -EPERM ? -EOPNOTSUPP : rc; + } + } + + switch (hfunc) { + case ETH_RSS_HASH_TOP: + func = ENA_ADMIN_TOEPLITZ; + break; + case ETH_RSS_HASH_XOR: + func = ENA_ADMIN_CRC32; + break; + default: + netif_err(adapter, drv, netdev, "Unsupported hfunc %d\n", + hfunc); + return -EOPNOTSUPP; + } + + if (key) { + rc = ena_com_fill_hash_function(ena_dev, func, key, + ENA_HASH_KEY_SIZE, + 0xFFFFFFFF); + if (unlikely(rc)) { + netif_err(adapter, drv, netdev, "Cannot fill key\n"); + return rc == -EPERM ? -EOPNOTSUPP : rc; + } + } + + return 0; +} + +static void ena_get_channels(struct net_device *netdev, + struct ethtool_channels *channels) +{ + struct ena_adapter *adapter = netdev_priv(netdev); + + channels->max_rx = ENA_MAX_NUM_IO_QUEUES; + channels->max_tx = ENA_MAX_NUM_IO_QUEUES; + channels->max_other = 0; + channels->max_combined = 0; + channels->rx_count = adapter->num_queues; + channels->tx_count = adapter->num_queues; + channels->other_count = 0; + channels->combined_count = 0; +} + +static int ena_get_tunable(struct net_device *netdev, + const struct ethtool_tunable *tuna, void *data) +{ + struct ena_adapter *adapter = netdev_priv(netdev); + int ret = 0; + + switch (tuna->id) { + case ETHTOOL_RX_COPYBREAK: + *(u32 *)data = adapter->rx_copybreak; + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int ena_set_tunable(struct net_device *netdev, + const struct ethtool_tunable *tuna, + const void *data) +{ + struct ena_adapter *adapter = netdev_priv(netdev); + int ret = 0; + u32 len; + + switch (tuna->id) { + case ETHTOOL_RX_COPYBREAK: + len = *(u32 *)data; + if (len > adapter->netdev->mtu) { + ret = -EINVAL; + break; + } + adapter->rx_copybreak = len; + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static const struct ethtool_ops ena_ethtool_ops = { + .get_link_ksettings = ena_get_link_ksettings, + .get_drvinfo = ena_get_drvinfo, + .get_msglevel = ena_get_msglevel, + .set_msglevel = ena_set_msglevel, + .get_link = ethtool_op_get_link, + .get_coalesce = ena_get_coalesce, + .set_coalesce = ena_set_coalesce, + .get_ringparam = ena_get_ringparam, + .get_sset_count = ena_get_sset_count, + .get_strings = ena_get_strings, + .get_ethtool_stats = ena_get_ethtool_stats, + .get_rxnfc = ena_get_rxnfc, + .set_rxnfc = ena_set_rxnfc, + .get_rxfh_indir_size = ena_get_rxfh_indir_size, + .get_rxfh_key_size = ena_get_rxfh_key_size, + .get_rxfh = ena_get_rxfh, + .set_rxfh = ena_set_rxfh, + .get_channels = ena_get_channels, + .get_tunable = ena_get_tunable, + .set_tunable = ena_set_tunable, +}; + +void ena_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &ena_ethtool_ops; +} + +static void ena_dump_stats_ex(struct ena_adapter *adapter, u8 *buf) +{ + struct net_device *netdev = adapter->netdev; + u8 *strings_buf; + u64 *data_buf; + int strings_num; + int i, rc; + + strings_num = ena_get_sset_count(netdev, ETH_SS_STATS); + if (strings_num <= 0) { + netif_err(adapter, drv, netdev, "Can't get stats num\n"); + return; + } + + strings_buf = devm_kzalloc(&adapter->pdev->dev, + strings_num * ETH_GSTRING_LEN, + GFP_ATOMIC); + if (!strings_buf) { + netif_err(adapter, drv, netdev, + "failed to alloc strings_buf\n"); + return; + } + + data_buf = devm_kzalloc(&adapter->pdev->dev, + strings_num * sizeof(u64), + GFP_ATOMIC); + if (!data_buf) { + netif_err(adapter, drv, netdev, + "failed to allocate data buf\n"); + devm_kfree(&adapter->pdev->dev, strings_buf); + return; + } + + ena_get_strings(netdev, ETH_SS_STATS, strings_buf); + ena_get_ethtool_stats(netdev, NULL, data_buf); + + /* If there is a buffer, dump stats, otherwise print them to dmesg */ + if (buf) + for (i = 0; i < strings_num; i++) { + rc = snprintf(buf, ETH_GSTRING_LEN + sizeof(u64), + "%s %llu\n", + strings_buf + i * ETH_GSTRING_LEN, + data_buf[i]); + buf += rc; + } + else + for (i = 0; i < strings_num; i++) + netif_err(adapter, drv, netdev, "%s: %llu\n", + strings_buf + i * ETH_GSTRING_LEN, + data_buf[i]); + + devm_kfree(&adapter->pdev->dev, strings_buf); + devm_kfree(&adapter->pdev->dev, data_buf); +} + +void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf) +{ + if (!buf) + return; + + ena_dump_stats_ex(adapter, buf); +} + +void ena_dump_stats_to_dmesg(struct ena_adapter *adapter) +{ + ena_dump_stats_ex(adapter, NULL); +} diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c new file mode 100644 index 000000000000..bfeaec5bd7b9 --- /dev/null +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -0,0 +1,3272 @@ +/* + * Copyright 2015 Amazon.com, Inc. or its affiliates. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#ifdef CONFIG_RFS_ACCEL +#include <linux/cpu_rmap.h> +#endif /* CONFIG_RFS_ACCEL */ +#include <linux/ethtool.h> +#include <linux/if_vlan.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/numa.h> +#include <linux/pci.h> +#include <linux/utsname.h> +#include <linux/version.h> +#include <linux/vmalloc.h> +#include <net/ip.h> + +#include "ena_netdev.h" +#include "ena_pci_id_tbl.h" + +static char version[] = DEVICE_NAME " v" DRV_MODULE_VERSION "\n"; + +MODULE_AUTHOR("Amazon.com, Inc. or its affiliates"); +MODULE_DESCRIPTION(DEVICE_NAME); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_MODULE_VERSION); + +/* Time in jiffies before concluding the transmitter is hung. */ +#define TX_TIMEOUT (5 * HZ) + +#define ENA_NAPI_BUDGET 64 + +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \ + NETIF_MSG_TX_DONE | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR) +static int debug = -1; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +static struct ena_aenq_handlers aenq_handlers; + +static struct workqueue_struct *ena_wq; + +MODULE_DEVICE_TABLE(pci, ena_pci_tbl); + +static int ena_rss_init_default(struct ena_adapter *adapter); + +static void ena_tx_timeout(struct net_device *dev) +{ + struct ena_adapter *adapter = netdev_priv(dev); + + u64_stats_update_begin(&adapter->syncp); + adapter->dev_stats.tx_timeout++; + u64_stats_update_end(&adapter->syncp); + + netif_err(adapter, tx_err, dev, "Transmit time out\n"); + + /* Change the state of the device to trigger reset */ + set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); +} + +static void update_rx_ring_mtu(struct ena_adapter *adapter, int mtu) +{ + int i; + + for (i = 0; i < adapter->num_queues; i++) + adapter->rx_ring[i].mtu = mtu; +} + +static int ena_change_mtu(struct net_device *dev, int new_mtu) +{ + struct ena_adapter *adapter = netdev_priv(dev); + int ret; + + if ((new_mtu > adapter->max_mtu) || (new_mtu < ENA_MIN_MTU)) { + netif_err(adapter, drv, dev, + "Invalid MTU setting. new_mtu: %d\n", new_mtu); + + return -EINVAL; + } + + ret = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu); + if (!ret) { + netif_dbg(adapter, drv, dev, "set MTU to %d\n", new_mtu); + update_rx_ring_mtu(adapter, new_mtu); + dev->mtu = new_mtu; + } else { + netif_err(adapter, drv, dev, "Failed to set MTU to %d\n", + new_mtu); + } + + return ret; +} + +static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter) +{ +#ifdef CONFIG_RFS_ACCEL + u32 i; + int rc; + + adapter->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adapter->num_queues); + if (!adapter->netdev->rx_cpu_rmap) + return -ENOMEM; + for (i = 0; i < adapter->num_queues; i++) { + int irq_idx = ENA_IO_IRQ_IDX(i); + + rc = irq_cpu_rmap_add(adapter->netdev->rx_cpu_rmap, + adapter->msix_entries[irq_idx].vector); + if (rc) { + free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap); + adapter->netdev->rx_cpu_rmap = NULL; + return rc; + } + } +#endif /* CONFIG_RFS_ACCEL */ + return 0; +} + +static void ena_init_io_rings_common(struct ena_adapter *adapter, + struct ena_ring *ring, u16 qid) +{ + ring->qid = qid; + ring->pdev = adapter->pdev; + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + ring->napi = &adapter->ena_napi[qid].napi; + ring->adapter = adapter; + ring->ena_dev = adapter->ena_dev; + ring->per_napi_packets = 0; + ring->per_napi_bytes = 0; + ring->cpu = 0; + u64_stats_init(&ring->syncp); +} + +static void ena_init_io_rings(struct ena_adapter *adapter) +{ + struct ena_com_dev *ena_dev; + struct ena_ring *txr, *rxr; + int i; + + ena_dev = adapter->ena_dev; + + for (i = 0; i < adapter->num_queues; i++) { + txr = &adapter->tx_ring[i]; + rxr = &adapter->rx_ring[i]; + + /* TX/RX common ring state */ + ena_init_io_rings_common(adapter, txr, i); + ena_init_io_rings_common(adapter, rxr, i); + + /* TX specific ring state */ + txr->ring_size = adapter->tx_ring_size; + txr->tx_max_header_size = ena_dev->tx_max_header_size; + txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type; + txr->sgl_size = adapter->max_tx_sgl_size; + txr->smoothed_interval = + ena_com_get_nonadaptive_moderation_interval_tx(ena_dev); + + /* RX specific ring state */ + rxr->ring_size = adapter->rx_ring_size; + rxr->rx_copybreak = adapter->rx_copybreak; + rxr->sgl_size = adapter->max_rx_sgl_size; + rxr->smoothed_interval = + ena_com_get_nonadaptive_moderation_interval_rx(ena_dev); + } +} + +/* ena_setup_tx_resources - allocate I/O Tx resources (Descriptors) + * @adapter: network interface device structure + * @qid: queue index + * + * Return 0 on success, negative on failure + */ +static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid) +{ + struct ena_ring *tx_ring = &adapter->tx_ring[qid]; + struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)]; + int size, i, node; + + if (tx_ring->tx_buffer_info) { + netif_err(adapter, ifup, + adapter->netdev, "tx_buffer_info info is not NULL"); + return -EEXIST; + } + + size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size; + node = cpu_to_node(ena_irq->cpu); + + tx_ring->tx_buffer_info = vzalloc_node(size, node); + if (!tx_ring->tx_buffer_info) { + tx_ring->tx_buffer_info = vzalloc(size); + if (!tx_ring->tx_buffer_info) + return -ENOMEM; + } + + size = sizeof(u16) * tx_ring->ring_size; + tx_ring->free_tx_ids = vzalloc_node(size, node); + if (!tx_ring->free_tx_ids) { + tx_ring->free_tx_ids = vzalloc(size); + if (!tx_ring->free_tx_ids) { + vfree(tx_ring->tx_buffer_info); + return -ENOMEM; + } + } + + /* Req id ring for TX out of order completions */ + for (i = 0; i < tx_ring->ring_size; i++) + tx_ring->free_tx_ids[i] = i; + + /* Reset tx statistics */ + memset(&tx_ring->tx_stats, 0x0, sizeof(tx_ring->tx_stats)); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + tx_ring->cpu = ena_irq->cpu; + return 0; +} + +/* ena_free_tx_resources - Free I/O Tx Resources per Queue + * @adapter: network interface device structure + * @qid: queue index + * + * Free all transmit software resources + */ +static void ena_free_tx_resources(struct ena_adapter *adapter, int qid) +{ + struct ena_ring *tx_ring = &adapter->tx_ring[qid]; + + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + + vfree(tx_ring->free_tx_ids); + tx_ring->free_tx_ids = NULL; +} + +/* ena_setup_all_tx_resources - allocate I/O Tx queues resources for All queues + * @adapter: private structure + * + * Return 0 on success, negative on failure + */ +static int ena_setup_all_tx_resources(struct ena_adapter *adapter) +{ + int i, rc = 0; + + for (i = 0; i < adapter->num_queues; i++) { + rc = ena_setup_tx_resources(adapter, i); + if (rc) + goto err_setup_tx; + } + + return 0; + +err_setup_tx: + + netif_err(adapter, ifup, adapter->netdev, + "Tx queue %d: allocation failed\n", i); + + /* rewind the index freeing the rings as we go */ + while (i--) + ena_free_tx_resources(adapter, i); + return rc; +} + +/* ena_free_all_io_tx_resources - Free I/O Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + */ +static void ena_free_all_io_tx_resources(struct ena_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_queues; i++) + ena_free_tx_resources(adapter, i); +} + +/* ena_setup_rx_resources - allocate I/O Rx resources (Descriptors) + * @adapter: network interface device structure + * @qid: queue index + * + * Returns 0 on success, negative on failure + */ +static int ena_setup_rx_resources(struct ena_adapter *adapter, + u32 qid) +{ + struct ena_ring *rx_ring = &adapter->rx_ring[qid]; + struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)]; + int size, node; + + if (rx_ring->rx_buffer_info) { + netif_err(adapter, ifup, adapter->netdev, + "rx_buffer_info is not NULL"); + return -EEXIST; + } + + /* alloc extra element so in rx path + * we can always prefetch rx_info + 1 + */ + size = sizeof(struct ena_rx_buffer) * (rx_ring->ring_size + 1); + node = cpu_to_node(ena_irq->cpu); + + rx_ring->rx_buffer_info = vzalloc_node(size, node); + if (!rx_ring->rx_buffer_info) { + rx_ring->rx_buffer_info = vzalloc(size); + if (!rx_ring->rx_buffer_info) + return -ENOMEM; + } + + /* Reset rx statistics */ + memset(&rx_ring->rx_stats, 0x0, sizeof(rx_ring->rx_stats)); + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + rx_ring->cpu = ena_irq->cpu; + + return 0; +} + +/* ena_free_rx_resources - Free I/O Rx Resources + * @adapter: network interface device structure + * @qid: queue index + * + * Free all receive software resources + */ +static void ena_free_rx_resources(struct ena_adapter *adapter, + u32 qid) +{ + struct ena_ring *rx_ring = &adapter->rx_ring[qid]; + + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; +} + +/* ena_setup_all_rx_resources - allocate I/O Rx queues resources for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + */ +static int ena_setup_all_rx_resources(struct ena_adapter *adapter) +{ + int i, rc = 0; + + for (i = 0; i < adapter->num_queues; i++) { + rc = ena_setup_rx_resources(adapter, i); + if (rc) + goto err_setup_rx; + } + + return 0; + +err_setup_rx: + + netif_err(adapter, ifup, adapter->netdev, + "Rx queue %d: allocation failed\n", i); + + /* rewind the index freeing the rings as we go */ + while (i--) + ena_free_rx_resources(adapter, i); + return rc; +} + +/* ena_free_all_io_rx_resources - Free I/O Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + */ +static void ena_free_all_io_rx_resources(struct ena_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_queues; i++) + ena_free_rx_resources(adapter, i); +} + +static inline int ena_alloc_rx_page(struct ena_ring *rx_ring, + struct ena_rx_buffer *rx_info, gfp_t gfp) +{ + struct ena_com_buf *ena_buf; + struct page *page; + dma_addr_t dma; + + /* if previous allocated page is not used */ + if (unlikely(rx_info->page)) + return 0; + + page = alloc_page(gfp); + if (unlikely(!page)) { + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->rx_stats.page_alloc_fail++; + u64_stats_update_end(&rx_ring->syncp); + return -ENOMEM; + } + + dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(rx_ring->dev, dma))) { + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->rx_stats.dma_mapping_err++; + u64_stats_update_end(&rx_ring->syncp); + + __free_page(page); + return -EIO; + } + netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, + "alloc page %p, rx_info %p\n", page, rx_info); + + rx_info->page = page; + rx_info->page_offset = 0; + ena_buf = &rx_info->ena_buf; + ena_buf->paddr = dma; + ena_buf->len = PAGE_SIZE; + + return 0; +} + +static void ena_free_rx_page(struct ena_ring *rx_ring, + struct ena_rx_buffer *rx_info) +{ + struct page *page = rx_info->page; + struct ena_com_buf *ena_buf = &rx_info->ena_buf; + + if (unlikely(!page)) { + netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, + "Trying to free unallocated buffer\n"); + return; + } + + dma_unmap_page(rx_ring->dev, ena_buf->paddr, PAGE_SIZE, + DMA_FROM_DEVICE); + + __free_page(page); + rx_info->page = NULL; +} + +static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num) +{ + u16 next_to_use; + u32 i; + int rc; + + next_to_use = rx_ring->next_to_use; + + for (i = 0; i < num; i++) { + struct ena_rx_buffer *rx_info = + &rx_ring->rx_buffer_info[next_to_use]; + + rc = ena_alloc_rx_page(rx_ring, rx_info, + __GFP_COLD | GFP_ATOMIC | __GFP_COMP); + if (unlikely(rc < 0)) { + netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, + "failed to alloc buffer for rx queue %d\n", + rx_ring->qid); + break; + } + rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq, + &rx_info->ena_buf, + next_to_use); + if (unlikely(rc)) { + netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev, + "failed to add buffer for rx queue %d\n", + rx_ring->qid); + break; + } + next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use, + rx_ring->ring_size); + } + + if (unlikely(i < num)) { + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->rx_stats.refil_partial++; + u64_stats_update_end(&rx_ring->syncp); + netdev_warn(rx_ring->netdev, + "refilled rx qid %d with only %d buffers (from %d)\n", + rx_ring->qid, i, num); + } + + if (likely(i)) { + /* Add memory barrier to make sure the desc were written before + * issue a doorbell + */ + wmb(); + ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq); + } + + rx_ring->next_to_use = next_to_use; + + return i; +} + +static void ena_free_rx_bufs(struct ena_adapter *adapter, + u32 qid) +{ + struct ena_ring *rx_ring = &adapter->rx_ring[qid]; + u32 i; + + for (i = 0; i < rx_ring->ring_size; i++) { + struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i]; + + if (rx_info->page) + ena_free_rx_page(rx_ring, rx_info); + } +} + +/* ena_refill_all_rx_bufs - allocate all queues Rx buffers + * @adapter: board private structure + * + */ +static void ena_refill_all_rx_bufs(struct ena_adapter *adapter) +{ + struct ena_ring *rx_ring; + int i, rc, bufs_num; + + for (i = 0; i < adapter->num_queues; i++) { + rx_ring = &adapter->rx_ring[i]; + bufs_num = rx_ring->ring_size - 1; + rc = ena_refill_rx_bufs(rx_ring, bufs_num); + + if (unlikely(rc != bufs_num)) + netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev, + "refilling Queue %d failed. allocated %d buffers from: %d\n", + i, rc, bufs_num); + } +} + +static void ena_free_all_rx_bufs(struct ena_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_queues; i++) + ena_free_rx_bufs(adapter, i); +} + +/* ena_free_tx_bufs - Free Tx Buffers per Queue + * @tx_ring: TX ring for which buffers be freed + */ +static void ena_free_tx_bufs(struct ena_ring *tx_ring) +{ + u32 i; + + for (i = 0; i < tx_ring->ring_size; i++) { + struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i]; + struct ena_com_buf *ena_buf; + int nr_frags; + int j; + + if (!tx_info->skb) + continue; + + netdev_notice(tx_ring->netdev, + "free uncompleted tx skb qid %d idx 0x%x\n", + tx_ring->qid, i); + + ena_buf = tx_info->bufs; + dma_unmap_single(tx_ring->dev, + ena_buf->paddr, + ena_buf->len, + DMA_TO_DEVICE); + + /* unmap remaining mapped pages */ + nr_frags = tx_info->num_of_bufs - 1; + for (j = 0; j < nr_frags; j++) { + ena_buf++; + dma_unmap_page(tx_ring->dev, + ena_buf->paddr, + ena_buf->len, + DMA_TO_DEVICE); + } + + dev_kfree_skb_any(tx_info->skb); + } + netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev, + tx_ring->qid)); +} + +static void ena_free_all_tx_bufs(struct ena_adapter *adapter) +{ + struct ena_ring *tx_ring; + int i; + + for (i = 0; i < adapter->num_queues; i++) { + tx_ring = &adapter->tx_ring[i]; + ena_free_tx_bufs(tx_ring); + } +} + +static void ena_destroy_all_tx_queues(struct ena_adapter *adapter) +{ + u16 ena_qid; + int i; + + for (i = 0; i < adapter->num_queues; i++) { + ena_qid = ENA_IO_TXQ_IDX(i); + ena_com_destroy_io_queue(adapter->ena_dev, ena_qid); + } +} + +static void ena_destroy_all_rx_queues(struct ena_adapter *adapter) +{ + u16 ena_qid; + int i; + + for (i = 0; i < adapter->num_queues; i++) { + ena_qid = ENA_IO_RXQ_IDX(i); + ena_com_destroy_io_queue(adapter->ena_dev, ena_qid); + } +} + +static void ena_destroy_all_io_queues(struct ena_adapter *adapter) +{ + ena_destroy_all_tx_queues(adapter); + ena_destroy_all_rx_queues(adapter); +} + +static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id) +{ + struct ena_tx_buffer *tx_info = NULL; + + if (likely(req_id < tx_ring->ring_size)) { + tx_info = &tx_ring->tx_buffer_info[req_id]; + if (likely(tx_info->skb)) + return 0; + } + + if (tx_info) + netif_err(tx_ring->adapter, tx_done, tx_ring->netdev, + "tx_info doesn't have valid skb\n"); + else + netif_err(tx_ring->adapter, tx_done, tx_ring->netdev, + "Invalid req_id: %hu\n", req_id); + + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->tx_stats.bad_req_id++; + u64_stats_update_end(&tx_ring->syncp); + + /* Trigger device reset */ + set_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags); + return -EFAULT; +} + +static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget) +{ + struct netdev_queue *txq; + bool above_thresh; + u32 tx_bytes = 0; + u32 total_done = 0; + u16 next_to_clean; + u16 req_id; + int tx_pkts = 0; + int rc; + + next_to_clean = tx_ring->next_to_clean; + txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->qid); + + while (tx_pkts < budget) { + struct ena_tx_buffer *tx_info; + struct sk_buff *skb; + struct ena_com_buf *ena_buf; + int i, nr_frags; + + rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, + &req_id); + if (rc) + break; + + rc = validate_tx_req_id(tx_ring, req_id); + if (rc) + break; + + tx_info = &tx_ring->tx_buffer_info[req_id]; + skb = tx_info->skb; + + /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */ + prefetch(&skb->end); + + tx_info->skb = NULL; + tx_info->last_jiffies = 0; + + if (likely(tx_info->num_of_bufs != 0)) { + ena_buf = tx_info->bufs; + + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(ena_buf, paddr), + dma_unmap_len(ena_buf, len), + DMA_TO_DEVICE); + + /* unmap remaining mapped pages */ + nr_frags = tx_info->num_of_bufs - 1; + for (i = 0; i < nr_frags; i++) { + ena_buf++; + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(ena_buf, paddr), + dma_unmap_len(ena_buf, len), + DMA_TO_DEVICE); + } + } + + netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev, + "tx_poll: q %d skb %p completed\n", tx_ring->qid, + skb); + + tx_bytes += skb->len; + dev_kfree_skb(skb); + tx_pkts++; + total_done += tx_info->tx_descs; + + tx_ring->free_tx_ids[next_to_clean] = req_id; + next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean, + tx_ring->ring_size); + } + + tx_ring->next_to_clean = next_to_clean; + ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done); + ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq); + + netdev_tx_completed_queue(txq, tx_pkts, tx_bytes); + + netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev, + "tx_poll: q %d done. total pkts: %d\n", + tx_ring->qid, tx_pkts); + + /* need to make the rings circular update visible to + * ena_start_xmit() before checking for netif_queue_stopped(). + */ + smp_mb(); + + above_thresh = ena_com_sq_empty_space(tx_ring->ena_com_io_sq) > + ENA_TX_WAKEUP_THRESH; + if (unlikely(netif_tx_queue_stopped(txq) && above_thresh)) { + __netif_tx_lock(txq, smp_processor_id()); + above_thresh = ena_com_sq_empty_space(tx_ring->ena_com_io_sq) > + ENA_TX_WAKEUP_THRESH; + if (netif_tx_queue_stopped(txq) && above_thresh) { + netif_tx_wake_queue(txq); + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->tx_stats.queue_wakeup++; + u64_stats_update_end(&tx_ring->syncp); + } + __netif_tx_unlock(txq); + } + + tx_ring->per_napi_bytes += tx_bytes; + tx_ring->per_napi_packets += tx_pkts; + + return tx_pkts; +} + +static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring, + struct ena_com_rx_buf_info *ena_bufs, + u32 descs, + u16 *next_to_clean) +{ + struct sk_buff *skb; + struct ena_rx_buffer *rx_info = + &rx_ring->rx_buffer_info[*next_to_clean]; + u32 len; + u32 buf = 0; + void *va; + + len = ena_bufs[0].len; + if (unlikely(!rx_info->page)) { + netif_err(rx_ring->adapter, rx_err, rx_ring->netdev, + "Page is NULL\n"); + return NULL; + } + + netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, + "rx_info %p page %p\n", + rx_info, rx_info->page); + + /* save virt address of first buffer */ + va = page_address(rx_info->page) + rx_info->page_offset; + prefetch(va + NET_IP_ALIGN); + + if (len <= rx_ring->rx_copybreak) { + skb = netdev_alloc_skb_ip_align(rx_ring->netdev, + rx_ring->rx_copybreak); + if (unlikely(!skb)) { + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->rx_stats.skb_alloc_fail++; + u64_stats_update_end(&rx_ring->syncp); + netif_err(rx_ring->adapter, rx_err, rx_ring->netdev, + "Failed to allocate skb\n"); + return NULL; + } + + netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, + "rx allocated small packet. len %d. data_len %d\n", + skb->len, skb->data_len); + + /* sync this buffer for CPU use */ + dma_sync_single_for_cpu(rx_ring->dev, + dma_unmap_addr(&rx_info->ena_buf, paddr), + len, + DMA_FROM_DEVICE); + skb_copy_to_linear_data(skb, va, len); + dma_sync_single_for_device(rx_ring->dev, + dma_unmap_addr(&rx_info->ena_buf, paddr), + len, + DMA_FROM_DEVICE); + + skb_put(skb, len); + skb->protocol = eth_type_trans(skb, rx_ring->netdev); + *next_to_clean = ENA_RX_RING_IDX_ADD(*next_to_clean, descs, + rx_ring->ring_size); + return skb; + } + + skb = napi_get_frags(rx_ring->napi); + if (unlikely(!skb)) { + netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, + "Failed allocating skb\n"); + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->rx_stats.skb_alloc_fail++; + u64_stats_update_end(&rx_ring->syncp); + return NULL; + } + + do { + dma_unmap_page(rx_ring->dev, + dma_unmap_addr(&rx_info->ena_buf, paddr), + PAGE_SIZE, DMA_FROM_DEVICE); + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page, + rx_info->page_offset, len, PAGE_SIZE); + + netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, + "rx skb updated. len %d. data_len %d\n", + skb->len, skb->data_len); + + rx_info->page = NULL; + *next_to_clean = + ENA_RX_RING_IDX_NEXT(*next_to_clean, + rx_ring->ring_size); + if (likely(--descs == 0)) + break; + rx_info = &rx_ring->rx_buffer_info[*next_to_clean]; + len = ena_bufs[++buf].len; + } while (1); + + return skb; +} + +/* ena_rx_checksum - indicate in skb if hw indicated a good cksum + * @adapter: structure containing adapter specific data + * @ena_rx_ctx: received packet context/metadata + * @skb: skb currently being received and modified + */ +static inline void ena_rx_checksum(struct ena_ring *rx_ring, + struct ena_com_rx_ctx *ena_rx_ctx, + struct sk_buff *skb) +{ + /* Rx csum disabled */ + if (unlikely(!(rx_ring->netdev->features & NETIF_F_RXCSUM))) { + skb->ip_summed = CHECKSUM_NONE; + return; + } + + /* For fragmented packets the checksum isn't valid */ + if (ena_rx_ctx->frag) { + skb->ip_summed = CHECKSUM_NONE; + return; + } + + /* if IP and error */ + if (unlikely((ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) && + (ena_rx_ctx->l3_csum_err))) { + /* ipv4 checksum error */ + skb->ip_summed = CHECKSUM_NONE; + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->rx_stats.bad_csum++; + u64_stats_update_end(&rx_ring->syncp); + netif_err(rx_ring->adapter, rx_err, rx_ring->netdev, + "RX IPv4 header checksum error\n"); + return; + } + + /* if TCP/UDP */ + if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) || + (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) { + if (unlikely(ena_rx_ctx->l4_csum_err)) { + /* TCP/UDP checksum error */ + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->rx_stats.bad_csum++; + u64_stats_update_end(&rx_ring->syncp); + netif_err(rx_ring->adapter, rx_err, rx_ring->netdev, + "RX L4 checksum error\n"); + skb->ip_summed = CHECKSUM_NONE; + return; + } + + skb->ip_summed = CHECKSUM_UNNECESSARY; + } +} + +static void ena_set_rx_hash(struct ena_ring *rx_ring, + struct ena_com_rx_ctx *ena_rx_ctx, + struct sk_buff *skb) +{ + enum pkt_hash_types hash_type; + + if (likely(rx_ring->netdev->features & NETIF_F_RXHASH)) { + if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) || + (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) + + hash_type = PKT_HASH_TYPE_L4; + else + hash_type = PKT_HASH_TYPE_NONE; + + /* Override hash type if the packet is fragmented */ + if (ena_rx_ctx->frag) + hash_type = PKT_HASH_TYPE_NONE; + + skb_set_hash(skb, ena_rx_ctx->hash, hash_type); + } +} + +/* ena_clean_rx_irq - Cleanup RX irq + * @rx_ring: RX ring to clean + * @napi: napi handler + * @budget: how many packets driver is allowed to clean + * + * Returns the number of cleaned buffers. + */ +static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, + u32 budget) +{ + u16 next_to_clean = rx_ring->next_to_clean; + u32 res_budget, work_done; + + struct ena_com_rx_ctx ena_rx_ctx; + struct ena_adapter *adapter; + struct sk_buff *skb; + int refill_required; + int refill_threshold; + int rc = 0; + int total_len = 0; + int rx_copybreak_pkt = 0; + + netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, + "%s qid %d\n", __func__, rx_ring->qid); + res_budget = budget; + + do { + ena_rx_ctx.ena_bufs = rx_ring->ena_bufs; + ena_rx_ctx.max_bufs = rx_ring->sgl_size; + ena_rx_ctx.descs = 0; + rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq, + rx_ring->ena_com_io_sq, + &ena_rx_ctx); + if (unlikely(rc)) + goto error; + + if (unlikely(ena_rx_ctx.descs == 0)) + break; + + netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, + "rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n", + rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto, + ena_rx_ctx.l4_proto, ena_rx_ctx.hash); + + /* allocate skb and fill it */ + skb = ena_rx_skb(rx_ring, rx_ring->ena_bufs, ena_rx_ctx.descs, + &next_to_clean); + + /* exit if we failed to retrieve a buffer */ + if (unlikely(!skb)) { + next_to_clean = ENA_RX_RING_IDX_ADD(next_to_clean, + ena_rx_ctx.descs, + rx_ring->ring_size); + break; + } + + ena_rx_checksum(rx_ring, &ena_rx_ctx, skb); + + ena_set_rx_hash(rx_ring, &ena_rx_ctx, skb); + + skb_record_rx_queue(skb, rx_ring->qid); + + if (rx_ring->ena_bufs[0].len <= rx_ring->rx_copybreak) { + total_len += rx_ring->ena_bufs[0].len; + rx_copybreak_pkt++; + napi_gro_receive(napi, skb); + } else { + total_len += skb->len; + napi_gro_frags(napi); + } + + res_budget--; + } while (likely(res_budget)); + + work_done = budget - res_budget; + rx_ring->per_napi_bytes += total_len; + rx_ring->per_napi_packets += work_done; + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->rx_stats.bytes += total_len; + rx_ring->rx_stats.cnt += work_done; + rx_ring->rx_stats.rx_copybreak_pkt += rx_copybreak_pkt; + u64_stats_update_end(&rx_ring->syncp); + + rx_ring->next_to_clean = next_to_clean; + + refill_required = ena_com_sq_empty_space(rx_ring->ena_com_io_sq); + refill_threshold = rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER; + + /* Optimization, try to batch new rx buffers */ + if (refill_required > refill_threshold) { + ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq); + ena_refill_rx_bufs(rx_ring, refill_required); + } + + return work_done; + +error: + adapter = netdev_priv(rx_ring->netdev); + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->rx_stats.bad_desc_num++; + u64_stats_update_end(&rx_ring->syncp); + + /* Too many desc from the device. Trigger reset */ + set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); + + return 0; +} + +inline void ena_adjust_intr_moderation(struct ena_ring *rx_ring, + struct ena_ring *tx_ring) +{ + /* We apply adaptive moderation on Rx path only. + * Tx uses static interrupt moderation. + */ + ena_com_calculate_interrupt_delay(rx_ring->ena_dev, + rx_ring->per_napi_packets, + rx_ring->per_napi_bytes, + &rx_ring->smoothed_interval, + &rx_ring->moder_tbl_idx); + + /* Reset per napi packets/bytes */ + tx_ring->per_napi_packets = 0; + tx_ring->per_napi_bytes = 0; + rx_ring->per_napi_packets = 0; + rx_ring->per_napi_bytes = 0; +} + +static inline void ena_update_ring_numa_node(struct ena_ring *tx_ring, + struct ena_ring *rx_ring) +{ + int cpu = get_cpu(); + int numa_node; + + /* Check only one ring since the 2 rings are running on the same cpu */ + if (likely(tx_ring->cpu == cpu)) + goto out; + + numa_node = cpu_to_node(cpu); + put_cpu(); + + if (numa_node != NUMA_NO_NODE) { + ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node); + ena_com_update_numa_node(rx_ring->ena_com_io_cq, numa_node); + } + + tx_ring->cpu = cpu; + rx_ring->cpu = cpu; + + return; +out: + put_cpu(); +} + +static int ena_io_poll(struct napi_struct *napi, int budget) +{ + struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi); + struct ena_ring *tx_ring, *rx_ring; + struct ena_eth_io_intr_reg intr_reg; + + u32 tx_work_done; + u32 rx_work_done; + int tx_budget; + int napi_comp_call = 0; + int ret; + + tx_ring = ena_napi->tx_ring; + rx_ring = ena_napi->rx_ring; + + tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER; + + if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags)) { + napi_complete_done(napi, 0); + return 0; + } + + tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget); + rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget); + + if ((budget > rx_work_done) && (tx_budget > tx_work_done)) { + napi_complete_done(napi, rx_work_done); + + napi_comp_call = 1; + /* Tx and Rx share the same interrupt vector */ + if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev)) + ena_adjust_intr_moderation(rx_ring, tx_ring); + + /* Update intr register: rx intr delay, tx intr delay and + * interrupt unmask + */ + ena_com_update_intr_reg(&intr_reg, + rx_ring->smoothed_interval, + tx_ring->smoothed_interval, + true); + + /* It is a shared MSI-X. Tx and Rx CQ have pointer to it. + * So we use one of them to reach the intr reg + */ + ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg); + + ena_update_ring_numa_node(tx_ring, rx_ring); + + ret = rx_work_done; + } else { + ret = budget; + } + + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->tx_stats.napi_comp += napi_comp_call; + tx_ring->tx_stats.tx_poll++; + u64_stats_update_end(&tx_ring->syncp); + + return ret; +} + +static irqreturn_t ena_intr_msix_mgmnt(int irq, void *data) +{ + struct ena_adapter *adapter = (struct ena_adapter *)data; + + ena_com_admin_q_comp_intr_handler(adapter->ena_dev); + + /* Don't call the aenq handler before probe is done */ + if (likely(test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))) + ena_com_aenq_intr_handler(adapter->ena_dev, data); + + return IRQ_HANDLED; +} + +/* ena_intr_msix_io - MSI-X Interrupt Handler for Tx/Rx + * @irq: interrupt number + * @data: pointer to a network interface private napi device structure + */ +static irqreturn_t ena_intr_msix_io(int irq, void *data) +{ + struct ena_napi *ena_napi = data; + + napi_schedule(&ena_napi->napi); + + return IRQ_HANDLED; +} + +static int ena_enable_msix(struct ena_adapter *adapter, int num_queues) +{ + int i, msix_vecs, rc; + + if (test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) { + netif_err(adapter, probe, adapter->netdev, + "Error, MSI-X is already enabled\n"); + return -EPERM; + } + + /* Reserved the max msix vectors we might need */ + msix_vecs = ENA_MAX_MSIX_VEC(num_queues); + + netif_dbg(adapter, probe, adapter->netdev, + "trying to enable MSI-X, vectors %d\n", msix_vecs); + + adapter->msix_entries = vzalloc(msix_vecs * sizeof(struct msix_entry)); + + if (!adapter->msix_entries) + return -ENOMEM; + + for (i = 0; i < msix_vecs; i++) + adapter->msix_entries[i].entry = i; + + rc = pci_enable_msix(adapter->pdev, adapter->msix_entries, msix_vecs); + if (rc != 0) { + netif_err(adapter, probe, adapter->netdev, + "Failed to enable MSI-X, vectors %d rc %d\n", + msix_vecs, rc); + return -ENOSPC; + } + + netif_dbg(adapter, probe, adapter->netdev, "enable MSI-X, vectors %d\n", + msix_vecs); + + if (msix_vecs >= 1) { + if (ena_init_rx_cpu_rmap(adapter)) + netif_warn(adapter, probe, adapter->netdev, + "Failed to map IRQs to CPUs\n"); + } + + adapter->msix_vecs = msix_vecs; + set_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags); + + return 0; +} + +static void ena_setup_mgmnt_intr(struct ena_adapter *adapter) +{ + u32 cpu; + + snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name, + ENA_IRQNAME_SIZE, "ena-mgmnt@pci:%s", + pci_name(adapter->pdev)); + adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler = + ena_intr_msix_mgmnt; + adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter; + adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector = + adapter->msix_entries[ENA_MGMNT_IRQ_IDX].vector; + cpu = cpumask_first(cpu_online_mask); + adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].cpu = cpu; + cpumask_set_cpu(cpu, + &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].affinity_hint_mask); +} + +static void ena_setup_io_intr(struct ena_adapter *adapter) +{ + struct net_device *netdev; + int irq_idx, i, cpu; + + netdev = adapter->netdev; + + for (i = 0; i < adapter->num_queues; i++) { + irq_idx = ENA_IO_IRQ_IDX(i); + cpu = i % num_online_cpus(); + + snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE, + "%s-Tx-Rx-%d", netdev->name, i); + adapter->irq_tbl[irq_idx].handler = ena_intr_msix_io; + adapter->irq_tbl[irq_idx].data = &adapter->ena_napi[i]; + adapter->irq_tbl[irq_idx].vector = + adapter->msix_entries[irq_idx].vector; + adapter->irq_tbl[irq_idx].cpu = cpu; + + cpumask_set_cpu(cpu, + &adapter->irq_tbl[irq_idx].affinity_hint_mask); + } +} + +static int ena_request_mgmnt_irq(struct ena_adapter *adapter) +{ + unsigned long flags = 0; + struct ena_irq *irq; + int rc; + + irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX]; + rc = request_irq(irq->vector, irq->handler, flags, irq->name, + irq->data); + if (rc) { + netif_err(adapter, probe, adapter->netdev, + "failed to request admin irq\n"); + return rc; + } + + netif_dbg(adapter, probe, adapter->netdev, + "set affinity hint of mgmnt irq.to 0x%lx (irq vector: %d)\n", + irq->affinity_hint_mask.bits[0], irq->vector); + + irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask); + + return rc; +} + +static int ena_request_io_irq(struct ena_adapter *adapter) +{ + unsigned long flags = 0; + struct ena_irq *irq; + int rc = 0, i, k; + + if (!test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) { + netif_err(adapter, ifup, adapter->netdev, + "Failed to request I/O IRQ: MSI-X is not enabled\n"); + return -EINVAL; + } + + for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) { + irq = &adapter->irq_tbl[i]; + rc = request_irq(irq->vector, irq->handler, flags, irq->name, + irq->data); + if (rc) { + netif_err(adapter, ifup, adapter->netdev, + "Failed to request I/O IRQ. index %d rc %d\n", + i, rc); + goto err; + } + + netif_dbg(adapter, ifup, adapter->netdev, + "set affinity hint of irq. index %d to 0x%lx (irq vector: %d)\n", + i, irq->affinity_hint_mask.bits[0], irq->vector); + + irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask); + } + + return rc; + +err: + for (k = ENA_IO_IRQ_FIRST_IDX; k < i; k++) { + irq = &adapter->irq_tbl[k]; + free_irq(irq->vector, irq->data); + } + + return rc; +} + +static void ena_free_mgmnt_irq(struct ena_adapter *adapter) +{ + struct ena_irq *irq; + + irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX]; + synchronize_irq(irq->vector); + irq_set_affinity_hint(irq->vector, NULL); + free_irq(irq->vector, irq->data); +} + +static void ena_free_io_irq(struct ena_adapter *adapter) +{ + struct ena_irq *irq; + int i; + +#ifdef CONFIG_RFS_ACCEL + if (adapter->msix_vecs >= 1) { + free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap); + adapter->netdev->rx_cpu_rmap = NULL; + } +#endif /* CONFIG_RFS_ACCEL */ + + for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) { + irq = &adapter->irq_tbl[i]; + irq_set_affinity_hint(irq->vector, NULL); + free_irq(irq->vector, irq->data); + } +} + +static void ena_disable_msix(struct ena_adapter *adapter) +{ + if (test_and_clear_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) + pci_disable_msix(adapter->pdev); + + if (adapter->msix_entries) + vfree(adapter->msix_entries); + adapter->msix_entries = NULL; +} + +static void ena_disable_io_intr_sync(struct ena_adapter *adapter) +{ + int i; + + if (!netif_running(adapter->netdev)) + return; + + for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) + synchronize_irq(adapter->irq_tbl[i].vector); +} + +static void ena_del_napi(struct ena_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_queues; i++) + netif_napi_del(&adapter->ena_napi[i].napi); +} + +static void ena_init_napi(struct ena_adapter *adapter) +{ + struct ena_napi *napi; + int i; + + for (i = 0; i < adapter->num_queues; i++) { + napi = &adapter->ena_napi[i]; + + netif_napi_add(adapter->netdev, + &adapter->ena_napi[i].napi, + ena_io_poll, + ENA_NAPI_BUDGET); + napi->rx_ring = &adapter->rx_ring[i]; + napi->tx_ring = &adapter->tx_ring[i]; + napi->qid = i; + } +} + +static void ena_napi_disable_all(struct ena_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_queues; i++) + napi_disable(&adapter->ena_napi[i].napi); +} + +static void ena_napi_enable_all(struct ena_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_queues; i++) + napi_enable(&adapter->ena_napi[i].napi); +} + +static void ena_restore_ethtool_params(struct ena_adapter *adapter) +{ + adapter->tx_usecs = 0; + adapter->rx_usecs = 0; + adapter->tx_frames = 1; + adapter->rx_frames = 1; +} + +/* Configure the Rx forwarding */ +static int ena_rss_configure(struct ena_adapter *adapter) +{ + struct ena_com_dev *ena_dev = adapter->ena_dev; + int rc; + + /* In case the RSS table wasn't initialized by probe */ + if (!ena_dev->rss.tbl_log_size) { + rc = ena_rss_init_default(adapter); + if (rc && (rc != -EPERM)) { + netif_err(adapter, ifup, adapter->netdev, + "Failed to init RSS rc: %d\n", rc); + return rc; + } + } + + /* Set indirect table */ + rc = ena_com_indirect_table_set(ena_dev); + if (unlikely(rc && rc != -EPERM)) + return rc; + + /* Configure hash function (if supported) */ + rc = ena_com_set_hash_function(ena_dev); + if (unlikely(rc && (rc != -EPERM))) + return rc; + + /* Configure hash inputs (if supported) */ + rc = ena_com_set_hash_ctrl(ena_dev); + if (unlikely(rc && (rc != -EPERM))) + return rc; + + return 0; +} + +static int ena_up_complete(struct ena_adapter *adapter) +{ + int rc, i; + + rc = ena_rss_configure(adapter); + if (rc) + return rc; + + ena_init_napi(adapter); + + ena_change_mtu(adapter->netdev, adapter->netdev->mtu); + + ena_refill_all_rx_bufs(adapter); + + /* enable transmits */ + netif_tx_start_all_queues(adapter->netdev); + + ena_restore_ethtool_params(adapter); + + ena_napi_enable_all(adapter); + + /* schedule napi in case we had pending packets + * from the last time we disable napi + */ + for (i = 0; i < adapter->num_queues; i++) + napi_schedule(&adapter->ena_napi[i].napi); + + return 0; +} + +static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid) +{ + struct ena_com_create_io_ctx ctx = { 0 }; + struct ena_com_dev *ena_dev; + struct ena_ring *tx_ring; + u32 msix_vector; + u16 ena_qid; + int rc; + + ena_dev = adapter->ena_dev; + + tx_ring = &adapter->tx_ring[qid]; + msix_vector = ENA_IO_IRQ_IDX(qid); + ena_qid = ENA_IO_TXQ_IDX(qid); + + ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; + ctx.qid = ena_qid; + ctx.mem_queue_type = ena_dev->tx_mem_queue_type; + ctx.msix_vector = msix_vector; + ctx.queue_size = adapter->tx_ring_size; + ctx.numa_node = cpu_to_node(tx_ring->cpu); + + rc = ena_com_create_io_queue(ena_dev, &ctx); + if (rc) { + netif_err(adapter, ifup, adapter->netdev, + "Failed to create I/O TX queue num %d rc: %d\n", + qid, rc); + return rc; + } + + rc = ena_com_get_io_handlers(ena_dev, ena_qid, + &tx_ring->ena_com_io_sq, + &tx_ring->ena_com_io_cq); + if (rc) { + netif_err(adapter, ifup, adapter->netdev, + "Failed to get TX queue handlers. TX queue num %d rc: %d\n", + qid, rc); + ena_com_destroy_io_queue(ena_dev, ena_qid); + } + + ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node); + return rc; +} + +static int ena_create_all_io_tx_queues(struct ena_adapter *adapter) +{ + struct ena_com_dev *ena_dev = adapter->ena_dev; + int rc, i; + + for (i = 0; i < adapter->num_queues; i++) { + rc = ena_create_io_tx_queue(adapter, i); + if (rc) + goto create_err; + } + + return 0; + +create_err: + while (i--) + ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i)); + + return rc; +} + +static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid) +{ + struct ena_com_dev *ena_dev; + struct ena_com_create_io_ctx ctx = { 0 }; + struct ena_ring *rx_ring; + u32 msix_vector; + u16 ena_qid; + int rc; + + ena_dev = adapter->ena_dev; + + rx_ring = &adapter->rx_ring[qid]; + msix_vector = ENA_IO_IRQ_IDX(qid); + ena_qid = ENA_IO_RXQ_IDX(qid); + + ctx.qid = ena_qid; + ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; + ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; + ctx.msix_vector = msix_vector; + ctx.queue_size = adapter->rx_ring_size; + ctx.numa_node = cpu_to_node(rx_ring->cpu); + + rc = ena_com_create_io_queue(ena_dev, &ctx); + if (rc) { + netif_err(adapter, ifup, adapter->netdev, + "Failed to create I/O RX queue num %d rc: %d\n", + qid, rc); + return rc; + } + + rc = ena_com_get_io_handlers(ena_dev, ena_qid, + &rx_ring->ena_com_io_sq, + &rx_ring->ena_com_io_cq); + if (rc) { + netif_err(adapter, ifup, adapter->netdev, + "Failed to get RX queue handlers. RX queue num %d rc: %d\n", + qid, rc); + ena_com_destroy_io_queue(ena_dev, ena_qid); + } + + ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node); + + return rc; +} + +static int ena_create_all_io_rx_queues(struct ena_adapter *adapter) +{ + struct ena_com_dev *ena_dev = adapter->ena_dev; + int rc, i; + + for (i = 0; i < adapter->num_queues; i++) { + rc = ena_create_io_rx_queue(adapter, i); + if (rc) + goto create_err; + } + + return 0; + +create_err: + while (i--) + ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i)); + + return rc; +} + +static int ena_up(struct ena_adapter *adapter) +{ + int rc; + + netdev_dbg(adapter->netdev, "%s\n", __func__); + + ena_setup_io_intr(adapter); + + rc = ena_request_io_irq(adapter); + if (rc) + goto err_req_irq; + + /* allocate transmit descriptors */ + rc = ena_setup_all_tx_resources(adapter); + if (rc) + goto err_setup_tx; + + /* allocate receive descriptors */ + rc = ena_setup_all_rx_resources(adapter); + if (rc) + goto err_setup_rx; + + /* Create TX queues */ + rc = ena_create_all_io_tx_queues(adapter); + if (rc) + goto err_create_tx_queues; + + /* Create RX queues */ + rc = ena_create_all_io_rx_queues(adapter); + if (rc) + goto err_create_rx_queues; + + rc = ena_up_complete(adapter); + if (rc) + goto err_up; + + if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags)) + netif_carrier_on(adapter->netdev); + + u64_stats_update_begin(&adapter->syncp); + adapter->dev_stats.interface_up++; + u64_stats_update_end(&adapter->syncp); + + set_bit(ENA_FLAG_DEV_UP, &adapter->flags); + + return rc; + +err_up: + ena_destroy_all_rx_queues(adapter); +err_create_rx_queues: + ena_destroy_all_tx_queues(adapter); +err_create_tx_queues: + ena_free_all_io_rx_resources(adapter); +err_setup_rx: + ena_free_all_io_tx_resources(adapter); +err_setup_tx: + ena_free_io_irq(adapter); +err_req_irq: + + return rc; +} + +static void ena_down(struct ena_adapter *adapter) +{ + netif_info(adapter, ifdown, adapter->netdev, "%s\n", __func__); + + clear_bit(ENA_FLAG_DEV_UP, &adapter->flags); + + u64_stats_update_begin(&adapter->syncp); + adapter->dev_stats.interface_down++; + u64_stats_update_end(&adapter->syncp); + + /* After this point the napi handler won't enable the tx queue */ + ena_napi_disable_all(adapter); + netif_carrier_off(adapter->netdev); + netif_tx_disable(adapter->netdev); + + /* After destroy the queue there won't be any new interrupts */ + ena_destroy_all_io_queues(adapter); + + ena_disable_io_intr_sync(adapter); + ena_free_io_irq(adapter); + ena_del_napi(adapter); + + ena_free_all_tx_bufs(adapter); + ena_free_all_rx_bufs(adapter); + ena_free_all_io_tx_resources(adapter); + ena_free_all_io_rx_resources(adapter); +} + +/* ena_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + */ +static int ena_open(struct net_device *netdev) +{ + struct ena_adapter *adapter = netdev_priv(netdev); + int rc; + + /* Notify the stack of the actual queue counts. */ + rc = netif_set_real_num_tx_queues(netdev, adapter->num_queues); + if (rc) { + netif_err(adapter, ifup, netdev, "Can't set num tx queues\n"); + return rc; + } + + rc = netif_set_real_num_rx_queues(netdev, adapter->num_queues); + if (rc) { + netif_err(adapter, ifup, netdev, "Can't set num rx queues\n"); + return rc; + } + + rc = ena_up(adapter); + if (rc) + return rc; + + return rc; +} + +/* ena_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + */ +static int ena_close(struct net_device *netdev) +{ + struct ena_adapter *adapter = netdev_priv(netdev); + + netif_dbg(adapter, ifdown, netdev, "%s\n", __func__); + + if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) + ena_down(adapter); + + return 0; +} + +static void ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx, struct sk_buff *skb) +{ + u32 mss = skb_shinfo(skb)->gso_size; + struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta; + u8 l4_protocol = 0; + + if ((skb->ip_summed == CHECKSUM_PARTIAL) || mss) { + ena_tx_ctx->l4_csum_enable = 1; + if (mss) { + ena_tx_ctx->tso_enable = 1; + ena_meta->l4_hdr_len = tcp_hdr(skb)->doff; + ena_tx_ctx->l4_csum_partial = 0; + } else { + ena_tx_ctx->tso_enable = 0; + ena_meta->l4_hdr_len = 0; + ena_tx_ctx->l4_csum_partial = 1; + } + + switch (ip_hdr(skb)->version) { + case IPVERSION: + ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4; + if (ip_hdr(skb)->frag_off & htons(IP_DF)) + ena_tx_ctx->df = 1; + if (mss) + ena_tx_ctx->l3_csum_enable = 1; + l4_protocol = ip_hdr(skb)->protocol; + break; + case 6: + ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6; + l4_protocol = ipv6_hdr(skb)->nexthdr; + break; + default: + break; + } + + if (l4_protocol == IPPROTO_TCP) + ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP; + else + ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP; + + ena_meta->mss = mss; + ena_meta->l3_hdr_len = skb_network_header_len(skb); + ena_meta->l3_hdr_offset = skb_network_offset(skb); + ena_tx_ctx->meta_valid = 1; + + } else { + ena_tx_ctx->meta_valid = 0; + } +} + +static int ena_check_and_linearize_skb(struct ena_ring *tx_ring, + struct sk_buff *skb) +{ + int num_frags, header_len, rc; + + num_frags = skb_shinfo(skb)->nr_frags; + header_len = skb_headlen(skb); + + if (num_frags < tx_ring->sgl_size) + return 0; + + if ((num_frags == tx_ring->sgl_size) && + (header_len < tx_ring->tx_max_header_size)) + return 0; + + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->tx_stats.linearize++; + u64_stats_update_end(&tx_ring->syncp); + + rc = skb_linearize(skb); + if (unlikely(rc)) { + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->tx_stats.linearize_failed++; + u64_stats_update_end(&tx_ring->syncp); + } + + return rc; +} + +/* Called with netif_tx_lock. */ +static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct ena_adapter *adapter = netdev_priv(dev); + struct ena_tx_buffer *tx_info; + struct ena_com_tx_ctx ena_tx_ctx; + struct ena_ring *tx_ring; + struct netdev_queue *txq; + struct ena_com_buf *ena_buf; + void *push_hdr; + u32 len, last_frag; + u16 next_to_use; + u16 req_id; + u16 push_len; + u16 header_len; + dma_addr_t dma; + int qid, rc, nb_hw_desc; + int i = -1; + + netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb); + /* Determine which tx ring we will be placed on */ + qid = skb_get_queue_mapping(skb); + tx_ring = &adapter->tx_ring[qid]; + txq = netdev_get_tx_queue(dev, qid); + + rc = ena_check_and_linearize_skb(tx_ring, skb); + if (unlikely(rc)) + goto error_drop_packet; + + skb_tx_timestamp(skb); + len = skb_headlen(skb); + + next_to_use = tx_ring->next_to_use; + req_id = tx_ring->free_tx_ids[next_to_use]; + tx_info = &tx_ring->tx_buffer_info[req_id]; + tx_info->num_of_bufs = 0; + + WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id); + ena_buf = tx_info->bufs; + tx_info->skb = skb; + + if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { + /* prepared the push buffer */ + push_len = min_t(u32, len, tx_ring->tx_max_header_size); + header_len = push_len; + push_hdr = skb->data; + } else { + push_len = 0; + header_len = min_t(u32, len, tx_ring->tx_max_header_size); + push_hdr = NULL; + } + + netif_dbg(adapter, tx_queued, dev, + "skb: %p header_buf->vaddr: %p push_len: %d\n", skb, + push_hdr, push_len); + + if (len > push_len) { + dma = dma_map_single(tx_ring->dev, skb->data + push_len, + len - push_len, DMA_TO_DEVICE); + if (dma_mapping_error(tx_ring->dev, dma)) + goto error_report_dma_error; + + ena_buf->paddr = dma; + ena_buf->len = len - push_len; + + ena_buf++; + tx_info->num_of_bufs++; + } + + last_frag = skb_shinfo(skb)->nr_frags; + + for (i = 0; i < last_frag; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + len = skb_frag_size(frag); + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len, + DMA_TO_DEVICE); + if (dma_mapping_error(tx_ring->dev, dma)) + goto error_report_dma_error; + + ena_buf->paddr = dma; + ena_buf->len = len; + ena_buf++; + } + + tx_info->num_of_bufs += last_frag; + + memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx)); + ena_tx_ctx.ena_bufs = tx_info->bufs; + ena_tx_ctx.push_header = push_hdr; + ena_tx_ctx.num_bufs = tx_info->num_of_bufs; + ena_tx_ctx.req_id = req_id; + ena_tx_ctx.header_len = header_len; + + /* set flags and meta data */ + ena_tx_csum(&ena_tx_ctx, skb); + + /* prepare the packet's descriptors to dma engine */ + rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx, + &nb_hw_desc); + + if (unlikely(rc)) { + netif_err(adapter, tx_queued, dev, + "failed to prepare tx bufs\n"); + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->tx_stats.queue_stop++; + tx_ring->tx_stats.prepare_ctx_err++; + u64_stats_update_end(&tx_ring->syncp); + netif_tx_stop_queue(txq); + goto error_unmap_dma; + } + + netdev_tx_sent_queue(txq, skb->len); + + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->tx_stats.cnt++; + tx_ring->tx_stats.bytes += skb->len; + u64_stats_update_end(&tx_ring->syncp); + + tx_info->tx_descs = nb_hw_desc; + tx_info->last_jiffies = jiffies; + + tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use, + tx_ring->ring_size); + + /* This WMB is aimed to: + * 1 - perform smp barrier before reading next_to_completion + * 2 - make sure the desc were written before trigger DB + */ + wmb(); + + /* stop the queue when no more space available, the packet can have up + * to sgl_size + 2. one for the meta descriptor and one for header + * (if the header is larger than tx_max_header_size). + */ + if (unlikely(ena_com_sq_empty_space(tx_ring->ena_com_io_sq) < + (tx_ring->sgl_size + 2))) { + netif_dbg(adapter, tx_queued, dev, "%s stop queue %d\n", + __func__, qid); + + netif_tx_stop_queue(txq); + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->tx_stats.queue_stop++; + u64_stats_update_end(&tx_ring->syncp); + + /* There is a rare condition where this function decide to + * stop the queue but meanwhile clean_tx_irq updates + * next_to_completion and terminates. + * The queue will remain stopped forever. + * To solve this issue this function perform rmb, check + * the wakeup condition and wake up the queue if needed. + */ + smp_rmb(); + + if (ena_com_sq_empty_space(tx_ring->ena_com_io_sq) + > ENA_TX_WAKEUP_THRESH) { + netif_tx_wake_queue(txq); + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->tx_stats.queue_wakeup++; + u64_stats_update_end(&tx_ring->syncp); + } + } + + if (netif_xmit_stopped(txq) || !skb->xmit_more) { + /* trigger the dma engine */ + ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->tx_stats.doorbells++; + u64_stats_update_end(&tx_ring->syncp); + } + + return NETDEV_TX_OK; + +error_report_dma_error: + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->tx_stats.dma_mapping_err++; + u64_stats_update_end(&tx_ring->syncp); + netdev_warn(adapter->netdev, "failed to map skb\n"); + + tx_info->skb = NULL; + +error_unmap_dma: + if (i >= 0) { + /* save value of frag that failed */ + last_frag = i; + + /* start back at beginning and unmap skb */ + tx_info->skb = NULL; + ena_buf = tx_info->bufs; + dma_unmap_single(tx_ring->dev, dma_unmap_addr(ena_buf, paddr), + dma_unmap_len(ena_buf, len), DMA_TO_DEVICE); + + /* unmap remaining mapped pages */ + for (i = 0; i < last_frag; i++) { + ena_buf++; + dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr), + dma_unmap_len(ena_buf, len), DMA_TO_DEVICE); + } + } + +error_drop_packet: + + dev_kfree_skb(skb); + return NETDEV_TX_OK; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void ena_netpoll(struct net_device *netdev) +{ + struct ena_adapter *adapter = netdev_priv(netdev); + int i; + + for (i = 0; i < adapter->num_queues; i++) + napi_schedule(&adapter->ena_napi[i].napi); +} +#endif /* CONFIG_NET_POLL_CONTROLLER */ + +static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv, select_queue_fallback_t fallback) +{ + u16 qid; + /* we suspect that this is good for in--kernel network services that + * want to loop incoming skb rx to tx in normal user generated traffic, + * most probably we will not get to this + */ + if (skb_rx_queue_recorded(skb)) + qid = skb_get_rx_queue(skb); + else + qid = fallback(dev, skb); + + return qid; +} + +static void ena_config_host_info(struct ena_com_dev *ena_dev) +{ + struct ena_admin_host_info *host_info; + int rc; + + /* Allocate only the host info */ + rc = ena_com_allocate_host_info(ena_dev); + if (rc) { + pr_err("Cannot allocate host info\n"); + return; + } + + host_info = ena_dev->host_attr.host_info; + + host_info->os_type = ENA_ADMIN_OS_LINUX; + host_info->kernel_ver = LINUX_VERSION_CODE; + strncpy(host_info->kernel_ver_str, utsname()->version, + sizeof(host_info->kernel_ver_str) - 1); + host_info->os_dist = 0; + strncpy(host_info->os_dist_str, utsname()->release, + sizeof(host_info->os_dist_str) - 1); + host_info->driver_version = + (DRV_MODULE_VER_MAJOR) | + (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | + (DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT); + + rc = ena_com_set_host_attributes(ena_dev); + if (rc) { + if (rc == -EPERM) + pr_warn("Cannot set host attributes\n"); + else + pr_err("Cannot set host attributes\n"); + + goto err; + } + + return; + +err: + ena_com_delete_host_info(ena_dev); +} + +static void ena_config_debug_area(struct ena_adapter *adapter) +{ + u32 debug_area_size; + int rc, ss_count; + + ss_count = ena_get_sset_count(adapter->netdev, ETH_SS_STATS); + if (ss_count <= 0) { + netif_err(adapter, drv, adapter->netdev, + "SS count is negative\n"); + return; + } + + /* allocate 32 bytes for each string and 64bit for the value */ + debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count; + + rc = ena_com_allocate_debug_area(adapter->ena_dev, debug_area_size); + if (rc) { + pr_err("Cannot allocate debug area\n"); + return; + } + + rc = ena_com_set_host_attributes(adapter->ena_dev); + if (rc) { + if (rc == -EPERM) + netif_warn(adapter, drv, adapter->netdev, + "Cannot set host attributes\n"); + else + netif_err(adapter, drv, adapter->netdev, + "Cannot set host attributes\n"); + goto err; + } + + return; +err: + ena_com_delete_debug_area(adapter->ena_dev); +} + +static struct rtnl_link_stats64 *ena_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct ena_adapter *adapter = netdev_priv(netdev); + struct ena_admin_basic_stats ena_stats; + int rc; + + if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) + return NULL; + + rc = ena_com_get_dev_basic_stats(adapter->ena_dev, &ena_stats); + if (rc) + return NULL; + + stats->tx_bytes = ((u64)ena_stats.tx_bytes_high << 32) | + ena_stats.tx_bytes_low; + stats->rx_bytes = ((u64)ena_stats.rx_bytes_high << 32) | + ena_stats.rx_bytes_low; + + stats->rx_packets = ((u64)ena_stats.rx_pkts_high << 32) | + ena_stats.rx_pkts_low; + stats->tx_packets = ((u64)ena_stats.tx_pkts_high << 32) | + ena_stats.tx_pkts_low; + + stats->rx_dropped = ((u64)ena_stats.rx_drops_high << 32) | + ena_stats.rx_drops_low; + + stats->multicast = 0; + stats->collisions = 0; + + stats->rx_length_errors = 0; + stats->rx_crc_errors = 0; + stats->rx_frame_errors = 0; + stats->rx_fifo_errors = 0; + stats->rx_missed_errors = 0; + stats->tx_window_errors = 0; + + stats->rx_errors = 0; + stats->tx_errors = 0; + + return stats; +} + +static const struct net_device_ops ena_netdev_ops = { + .ndo_open = ena_open, + .ndo_stop = ena_close, + .ndo_start_xmit = ena_start_xmit, + .ndo_select_queue = ena_select_queue, + .ndo_get_stats64 = ena_get_stats64, + .ndo_tx_timeout = ena_tx_timeout, + .ndo_change_mtu = ena_change_mtu, + .ndo_set_mac_address = NULL, + .ndo_validate_addr = eth_validate_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = ena_netpoll, +#endif /* CONFIG_NET_POLL_CONTROLLER */ +}; + +static void ena_device_io_suspend(struct work_struct *work) +{ + struct ena_adapter *adapter = + container_of(work, struct ena_adapter, suspend_io_task); + struct net_device *netdev = adapter->netdev; + + /* ena_napi_disable_all disables only the IO handling. + * We are still subject to AENQ keep alive watchdog. + */ + u64_stats_update_begin(&adapter->syncp); + adapter->dev_stats.io_suspend++; + u64_stats_update_begin(&adapter->syncp); + ena_napi_disable_all(adapter); + netif_tx_lock(netdev); + netif_device_detach(netdev); + netif_tx_unlock(netdev); +} + +static void ena_device_io_resume(struct work_struct *work) +{ + struct ena_adapter *adapter = + container_of(work, struct ena_adapter, resume_io_task); + struct net_device *netdev = adapter->netdev; + + u64_stats_update_begin(&adapter->syncp); + adapter->dev_stats.io_resume++; + u64_stats_update_end(&adapter->syncp); + + netif_device_attach(netdev); + ena_napi_enable_all(adapter); +} + +static int ena_device_validate_params(struct ena_adapter *adapter, + struct ena_com_dev_get_features_ctx *get_feat_ctx) +{ + struct net_device *netdev = adapter->netdev; + int rc; + + rc = ether_addr_equal(get_feat_ctx->dev_attr.mac_addr, + adapter->mac_addr); + if (!rc) { + netif_err(adapter, drv, netdev, + "Error, mac address are different\n"); + return -EINVAL; + } + + if ((get_feat_ctx->max_queues.max_cq_num < adapter->num_queues) || + (get_feat_ctx->max_queues.max_sq_num < adapter->num_queues)) { + netif_err(adapter, drv, netdev, + "Error, device doesn't support enough queues\n"); + return -EINVAL; + } + + if (get_feat_ctx->dev_attr.max_mtu < netdev->mtu) { + netif_err(adapter, drv, netdev, + "Error, device max mtu is smaller than netdev MTU\n"); + return -EINVAL; + } + + return 0; +} + +static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev, + struct ena_com_dev_get_features_ctx *get_feat_ctx, + bool *wd_state) +{ + struct device *dev = &pdev->dev; + bool readless_supported; + u32 aenq_groups; + int dma_width; + int rc; + + rc = ena_com_mmio_reg_read_request_init(ena_dev); + if (rc) { + dev_err(dev, "failed to init mmio read less\n"); + return rc; + } + + /* The PCIe configuration space revision id indicate if mmio reg + * read is disabled + */ + readless_supported = !(pdev->revision & ENA_MMIO_DISABLE_REG_READ); + ena_com_set_mmio_read_mode(ena_dev, readless_supported); + + rc = ena_com_dev_reset(ena_dev); + if (rc) { + dev_err(dev, "Can not reset device\n"); + goto err_mmio_read_less; + } + + rc = ena_com_validate_version(ena_dev); + if (rc) { + dev_err(dev, "device version is too low\n"); + goto err_mmio_read_less; + } + + dma_width = ena_com_get_dma_width(ena_dev); + if (dma_width < 0) { + dev_err(dev, "Invalid dma width value %d", dma_width); + rc = dma_width; + goto err_mmio_read_less; + } + + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_width)); + if (rc) { + dev_err(dev, "pci_set_dma_mask failed 0x%x\n", rc); + goto err_mmio_read_less; + } + + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(dma_width)); + if (rc) { + dev_err(dev, "err_pci_set_consistent_dma_mask failed 0x%x\n", + rc); + goto err_mmio_read_less; + } + + /* ENA admin level init */ + rc = ena_com_admin_init(ena_dev, &aenq_handlers, true); + if (rc) { + dev_err(dev, + "Can not initialize ena admin queue with device\n"); + goto err_mmio_read_less; + } + + /* To enable the msix interrupts the driver needs to know the number + * of queues. So the driver uses polling mode to retrieve this + * information + */ + ena_com_set_admin_polling_mode(ena_dev, true); + + /* Get Device Attributes*/ + rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx); + if (rc) { + dev_err(dev, "Cannot get attribute for ena device rc=%d\n", rc); + goto err_admin_init; + } + + /* Try to turn all the available aenq groups */ + aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) | + BIT(ENA_ADMIN_FATAL_ERROR) | + BIT(ENA_ADMIN_WARNING) | + BIT(ENA_ADMIN_NOTIFICATION) | + BIT(ENA_ADMIN_KEEP_ALIVE); + + aenq_groups &= get_feat_ctx->aenq.supported_groups; + + rc = ena_com_set_aenq_config(ena_dev, aenq_groups); + if (rc) { + dev_err(dev, "Cannot configure aenq groups rc= %d\n", rc); + goto err_admin_init; + } + + *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE)); + + ena_config_host_info(ena_dev); + + return 0; + +err_admin_init: + ena_com_admin_destroy(ena_dev); +err_mmio_read_less: + ena_com_mmio_reg_read_request_destroy(ena_dev); + + return rc; +} + +static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter, + int io_vectors) +{ + struct ena_com_dev *ena_dev = adapter->ena_dev; + struct device *dev = &adapter->pdev->dev; + int rc; + + rc = ena_enable_msix(adapter, io_vectors); + if (rc) { + dev_err(dev, "Can not reserve msix vectors\n"); + return rc; + } + + ena_setup_mgmnt_intr(adapter); + + rc = ena_request_mgmnt_irq(adapter); + if (rc) { + dev_err(dev, "Can not setup management interrupts\n"); + goto err_disable_msix; + } + + ena_com_set_admin_polling_mode(ena_dev, false); + + ena_com_admin_aenq_enable(ena_dev); + + return 0; + +err_disable_msix: + ena_disable_msix(adapter); + + return rc; +} + +static void ena_fw_reset_device(struct work_struct *work) +{ + struct ena_com_dev_get_features_ctx get_feat_ctx; + struct ena_adapter *adapter = + container_of(work, struct ena_adapter, reset_task); + struct net_device *netdev = adapter->netdev; + struct ena_com_dev *ena_dev = adapter->ena_dev; + struct pci_dev *pdev = adapter->pdev; + bool dev_up, wd_state; + int rc; + + del_timer_sync(&adapter->timer_service); + + rtnl_lock(); + + dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags); + ena_com_set_admin_running_state(ena_dev, false); + + /* After calling ena_close the tx queues and the napi + * are disabled so no one can interfere or touch the + * data structures + */ + ena_close(netdev); + + rc = ena_com_dev_reset(ena_dev); + if (rc) { + dev_err(&pdev->dev, "Device reset failed\n"); + goto err; + } + + ena_free_mgmnt_irq(adapter); + + ena_disable_msix(adapter); + + ena_com_abort_admin_commands(ena_dev); + + ena_com_wait_for_abort_completion(ena_dev); + + ena_com_admin_destroy(ena_dev); + + ena_com_mmio_reg_read_request_destroy(ena_dev); + + /* Finish with the destroy part. Start the init part */ + + rc = ena_device_init(ena_dev, adapter->pdev, &get_feat_ctx, &wd_state); + if (rc) { + dev_err(&pdev->dev, "Can not initialize device\n"); + goto err; + } + adapter->wd_state = wd_state; + + rc = ena_device_validate_params(adapter, &get_feat_ctx); + if (rc) { + dev_err(&pdev->dev, "Validation of device parameters failed\n"); + goto err_device_destroy; + } + + rc = ena_enable_msix_and_set_admin_interrupts(adapter, + adapter->num_queues); + if (rc) { + dev_err(&pdev->dev, "Enable MSI-X failed\n"); + goto err_device_destroy; + } + /* If the interface was up before the reset bring it up */ + if (dev_up) { + rc = ena_up(adapter); + if (rc) { + dev_err(&pdev->dev, "Failed to create I/O queues\n"); + goto err_disable_msix; + } + } + + mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); + + rtnl_unlock(); + + dev_err(&pdev->dev, "Device reset completed successfully\n"); + + return; +err_disable_msix: + ena_free_mgmnt_irq(adapter); + ena_disable_msix(adapter); +err_device_destroy: + ena_com_admin_destroy(ena_dev); +err: + rtnl_unlock(); + + dev_err(&pdev->dev, + "Reset attempt failed. Can not reset the device\n"); +} + +static void check_for_missing_tx_completions(struct ena_adapter *adapter) +{ + struct ena_tx_buffer *tx_buf; + unsigned long last_jiffies; + struct ena_ring *tx_ring; + int i, j, budget; + u32 missed_tx; + + /* Make sure the driver doesn't turn the device in other process */ + smp_rmb(); + + if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) + return; + + budget = ENA_MONITORED_TX_QUEUES; + + for (i = adapter->last_monitored_tx_qid; i < adapter->num_queues; i++) { + tx_ring = &adapter->tx_ring[i]; + + for (j = 0; j < tx_ring->ring_size; j++) { + tx_buf = &tx_ring->tx_buffer_info[j]; + last_jiffies = tx_buf->last_jiffies; + if (unlikely(last_jiffies && time_is_before_jiffies(last_jiffies + TX_TIMEOUT))) { + netif_notice(adapter, tx_err, adapter->netdev, + "Found a Tx that wasn't completed on time, qid %d, index %d.\n", + tx_ring->qid, j); + + u64_stats_update_begin(&tx_ring->syncp); + missed_tx = tx_ring->tx_stats.missing_tx_comp++; + u64_stats_update_end(&tx_ring->syncp); + + /* Clear last jiffies so the lost buffer won't + * be counted twice. + */ + tx_buf->last_jiffies = 0; + + if (unlikely(missed_tx > MAX_NUM_OF_TIMEOUTED_PACKETS)) { + netif_err(adapter, tx_err, adapter->netdev, + "The number of lost tx completion is above the threshold (%d > %d). Reset the device\n", + missed_tx, MAX_NUM_OF_TIMEOUTED_PACKETS); + set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); + } + } + } + + budget--; + if (!budget) + break; + } + + adapter->last_monitored_tx_qid = i % adapter->num_queues; +} + +/* Check for keep alive expiration */ +static void check_for_missing_keep_alive(struct ena_adapter *adapter) +{ + unsigned long keep_alive_expired; + + if (!adapter->wd_state) + return; + + keep_alive_expired = round_jiffies(adapter->last_keep_alive_jiffies + + ENA_DEVICE_KALIVE_TIMEOUT); + if (unlikely(time_is_before_jiffies(keep_alive_expired))) { + netif_err(adapter, drv, adapter->netdev, + "Keep alive watchdog timeout.\n"); + u64_stats_update_begin(&adapter->syncp); + adapter->dev_stats.wd_expired++; + u64_stats_update_end(&adapter->syncp); + set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); + } +} + +static void check_for_admin_com_state(struct ena_adapter *adapter) +{ + if (unlikely(!ena_com_get_admin_running_state(adapter->ena_dev))) { + netif_err(adapter, drv, adapter->netdev, + "ENA admin queue is not in running state!\n"); + u64_stats_update_begin(&adapter->syncp); + adapter->dev_stats.admin_q_pause++; + u64_stats_update_end(&adapter->syncp); + set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); + } +} + +static void ena_update_host_info(struct ena_admin_host_info *host_info, + struct net_device *netdev) +{ + host_info->supported_network_features[0] = + netdev->features & GENMASK_ULL(31, 0); + host_info->supported_network_features[1] = + (netdev->features & GENMASK_ULL(63, 32)) >> 32; +} + +static void ena_timer_service(unsigned long data) +{ + struct ena_adapter *adapter = (struct ena_adapter *)data; + u8 *debug_area = adapter->ena_dev->host_attr.debug_area_virt_addr; + struct ena_admin_host_info *host_info = + adapter->ena_dev->host_attr.host_info; + + check_for_missing_keep_alive(adapter); + + check_for_admin_com_state(adapter); + + check_for_missing_tx_completions(adapter); + + if (debug_area) + ena_dump_stats_to_buf(adapter, debug_area); + + if (host_info) + ena_update_host_info(host_info, adapter->netdev); + + if (unlikely(test_and_clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { + netif_err(adapter, drv, adapter->netdev, + "Trigger reset is on\n"); + ena_dump_stats_to_dmesg(adapter); + queue_work(ena_wq, &adapter->reset_task); + return; + } + + /* Reset the timer */ + mod_timer(&adapter->timer_service, jiffies + HZ); +} + +static int ena_calc_io_queue_num(struct pci_dev *pdev, + struct ena_com_dev *ena_dev, + struct ena_com_dev_get_features_ctx *get_feat_ctx) +{ + int io_sq_num, io_queue_num; + + /* In case of LLQ use the llq number in the get feature cmd */ + if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { + io_sq_num = get_feat_ctx->max_queues.max_llq_num; + + if (io_sq_num == 0) { + dev_err(&pdev->dev, + "Trying to use LLQ but llq_num is 0. Fall back into regular queues\n"); + + ena_dev->tx_mem_queue_type = + ENA_ADMIN_PLACEMENT_POLICY_HOST; + io_sq_num = get_feat_ctx->max_queues.max_sq_num; + } + } else { + io_sq_num = get_feat_ctx->max_queues.max_sq_num; + } + + io_queue_num = min_t(int, num_possible_cpus(), ENA_MAX_NUM_IO_QUEUES); + io_queue_num = min_t(int, io_queue_num, io_sq_num); + io_queue_num = min_t(int, io_queue_num, + get_feat_ctx->max_queues.max_cq_num); + /* 1 IRQ for for mgmnt and 1 IRQs for each IO direction */ + io_queue_num = min_t(int, io_queue_num, pci_msix_vec_count(pdev) - 1); + if (unlikely(!io_queue_num)) { + dev_err(&pdev->dev, "The device doesn't have io queues\n"); + return -EFAULT; + } + + return io_queue_num; +} + +static void ena_set_push_mode(struct pci_dev *pdev, struct ena_com_dev *ena_dev, + struct ena_com_dev_get_features_ctx *get_feat_ctx) +{ + bool has_mem_bar; + + has_mem_bar = pci_select_bars(pdev, IORESOURCE_MEM) & BIT(ENA_MEM_BAR); + + /* Enable push mode if device supports LLQ */ + if (has_mem_bar && (get_feat_ctx->max_queues.max_llq_num > 0)) + ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV; + else + ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; +} + +static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat, + struct net_device *netdev) +{ + netdev_features_t dev_features = 0; + + /* Set offload features */ + if (feat->offload.tx & + ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK) + dev_features |= NETIF_F_IP_CSUM; + + if (feat->offload.tx & + ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK) + dev_features |= NETIF_F_IPV6_CSUM; + + if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) + dev_features |= NETIF_F_TSO; + + if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK) + dev_features |= NETIF_F_TSO6; + + if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK) + dev_features |= NETIF_F_TSO_ECN; + + if (feat->offload.rx_supported & + ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) + dev_features |= NETIF_F_RXCSUM; + + if (feat->offload.rx_supported & + ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK) + dev_features |= NETIF_F_RXCSUM; + + netdev->features = + dev_features | + NETIF_F_SG | + NETIF_F_NTUPLE | + NETIF_F_RXHASH | + NETIF_F_HIGHDMA; + + netdev->hw_features |= netdev->features; + netdev->vlan_features |= netdev->features; +} + +static void ena_set_conf_feat_params(struct ena_adapter *adapter, + struct ena_com_dev_get_features_ctx *feat) +{ + struct net_device *netdev = adapter->netdev; + + /* Copy mac address */ + if (!is_valid_ether_addr(feat->dev_attr.mac_addr)) { + eth_hw_addr_random(netdev); + ether_addr_copy(adapter->mac_addr, netdev->dev_addr); + } else { + ether_addr_copy(adapter->mac_addr, feat->dev_attr.mac_addr); + ether_addr_copy(netdev->dev_addr, adapter->mac_addr); + } + + /* Set offload features */ + ena_set_dev_offloads(feat, netdev); + + adapter->max_mtu = feat->dev_attr.max_mtu; +} + +static int ena_rss_init_default(struct ena_adapter *adapter) +{ + struct ena_com_dev *ena_dev = adapter->ena_dev; + struct device *dev = &adapter->pdev->dev; + int rc, i; + u32 val; + + rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE); + if (unlikely(rc)) { + dev_err(dev, "Cannot init indirect table\n"); + goto err_rss_init; + } + + for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) { + val = ethtool_rxfh_indir_default(i, adapter->num_queues); + rc = ena_com_indirect_table_fill_entry(ena_dev, i, + ENA_IO_RXQ_IDX(val)); + if (unlikely(rc && (rc != -EPERM))) { + dev_err(dev, "Cannot fill indirect table\n"); + goto err_fill_indir; + } + } + + rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL, + ENA_HASH_KEY_SIZE, 0xFFFFFFFF); + if (unlikely(rc && (rc != -EPERM))) { + dev_err(dev, "Cannot fill hash function\n"); + goto err_fill_indir; + } + + rc = ena_com_set_default_hash_ctrl(ena_dev); + if (unlikely(rc && (rc != -EPERM))) { + dev_err(dev, "Cannot fill hash control\n"); + goto err_fill_indir; + } + + return 0; + +err_fill_indir: + ena_com_rss_destroy(ena_dev); +err_rss_init: + + return rc; +} + +static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev) +{ + int release_bars; + + release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK; + pci_release_selected_regions(pdev, release_bars); +} + +static int ena_calc_queue_size(struct pci_dev *pdev, + struct ena_com_dev *ena_dev, + u16 *max_tx_sgl_size, + u16 *max_rx_sgl_size, + struct ena_com_dev_get_features_ctx *get_feat_ctx) +{ + u32 queue_size = ENA_DEFAULT_RING_SIZE; + + queue_size = min_t(u32, queue_size, + get_feat_ctx->max_queues.max_cq_depth); + queue_size = min_t(u32, queue_size, + get_feat_ctx->max_queues.max_sq_depth); + + if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) + queue_size = min_t(u32, queue_size, + get_feat_ctx->max_queues.max_llq_depth); + + queue_size = rounddown_pow_of_two(queue_size); + + if (unlikely(!queue_size)) { + dev_err(&pdev->dev, "Invalid queue size\n"); + return -EFAULT; + } + + *max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, + get_feat_ctx->max_queues.max_packet_tx_descs); + *max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, + get_feat_ctx->max_queues.max_packet_rx_descs); + + return queue_size; +} + +/* ena_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in ena_pci_tbl + * + * Returns 0 on success, negative on failure + * + * ena_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + */ +static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct ena_com_dev_get_features_ctx get_feat_ctx; + static int version_printed; + struct net_device *netdev; + struct ena_adapter *adapter; + struct ena_com_dev *ena_dev = NULL; + static int adapters_found; + int io_queue_num, bars, rc; + int queue_size; + u16 tx_sgl_size = 0; + u16 rx_sgl_size = 0; + bool wd_state; + + dev_dbg(&pdev->dev, "%s\n", __func__); + + if (version_printed++ == 0) + dev_info(&pdev->dev, "%s", version); + + rc = pci_enable_device_mem(pdev); + if (rc) { + dev_err(&pdev->dev, "pci_enable_device_mem() failed!\n"); + return rc; + } + + pci_set_master(pdev); + + ena_dev = vzalloc(sizeof(*ena_dev)); + if (!ena_dev) { + rc = -ENOMEM; + goto err_disable_device; + } + + bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK; + rc = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME); + if (rc) { + dev_err(&pdev->dev, "pci_request_selected_regions failed %d\n", + rc); + goto err_free_ena_dev; + } + + ena_dev->reg_bar = ioremap(pci_resource_start(pdev, ENA_REG_BAR), + pci_resource_len(pdev, ENA_REG_BAR)); + if (!ena_dev->reg_bar) { + dev_err(&pdev->dev, "failed to remap regs bar\n"); + rc = -EFAULT; + goto err_free_region; + } + + ena_dev->dmadev = &pdev->dev; + + rc = ena_device_init(ena_dev, pdev, &get_feat_ctx, &wd_state); + if (rc) { + dev_err(&pdev->dev, "ena device init failed\n"); + if (rc == -ETIME) + rc = -EPROBE_DEFER; + goto err_free_region; + } + + ena_set_push_mode(pdev, ena_dev, &get_feat_ctx); + + if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { + ena_dev->mem_bar = ioremap_wc(pci_resource_start(pdev, ENA_MEM_BAR), + pci_resource_len(pdev, ENA_MEM_BAR)); + if (!ena_dev->mem_bar) { + rc = -EFAULT; + goto err_device_destroy; + } + } + + /* initial Tx interrupt delay, Assumes 1 usec granularity. + * Updated during device initialization with the real granularity + */ + ena_dev->intr_moder_tx_interval = ENA_INTR_INITIAL_TX_INTERVAL_USECS; + io_queue_num = ena_calc_io_queue_num(pdev, ena_dev, &get_feat_ctx); + queue_size = ena_calc_queue_size(pdev, ena_dev, &tx_sgl_size, + &rx_sgl_size, &get_feat_ctx); + if ((queue_size <= 0) || (io_queue_num <= 0)) { + rc = -EFAULT; + goto err_device_destroy; + } + + dev_info(&pdev->dev, "creating %d io queues. queue size: %d\n", + io_queue_num, queue_size); + + /* dev zeroed in init_etherdev */ + netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), io_queue_num); + if (!netdev) { + dev_err(&pdev->dev, "alloc_etherdev_mq failed\n"); + rc = -ENOMEM; + goto err_device_destroy; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + adapter = netdev_priv(netdev); + pci_set_drvdata(pdev, adapter); + + adapter->ena_dev = ena_dev; + adapter->netdev = netdev; + adapter->pdev = pdev; + + ena_set_conf_feat_params(adapter, &get_feat_ctx); + + adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + + adapter->tx_ring_size = queue_size; + adapter->rx_ring_size = queue_size; + + adapter->max_tx_sgl_size = tx_sgl_size; + adapter->max_rx_sgl_size = rx_sgl_size; + + adapter->num_queues = io_queue_num; + adapter->last_monitored_tx_qid = 0; + + adapter->rx_copybreak = ENA_DEFAULT_RX_COPYBREAK; + adapter->wd_state = wd_state; + + snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", adapters_found); + + rc = ena_com_init_interrupt_moderation(adapter->ena_dev); + if (rc) { + dev_err(&pdev->dev, + "Failed to query interrupt moderation feature\n"); + goto err_netdev_destroy; + } + ena_init_io_rings(adapter); + + netdev->netdev_ops = &ena_netdev_ops; + netdev->watchdog_timeo = TX_TIMEOUT; + ena_set_ethtool_ops(netdev); + + netdev->priv_flags |= IFF_UNICAST_FLT; + + u64_stats_init(&adapter->syncp); + + rc = ena_enable_msix_and_set_admin_interrupts(adapter, io_queue_num); + if (rc) { + dev_err(&pdev->dev, + "Failed to enable and set the admin interrupts\n"); + goto err_worker_destroy; + } + rc = ena_rss_init_default(adapter); + if (rc && (rc != -EPERM)) { + dev_err(&pdev->dev, "Cannot init RSS rc: %d\n", rc); + goto err_free_msix; + } + + ena_config_debug_area(adapter); + + memcpy(adapter->netdev->perm_addr, adapter->mac_addr, netdev->addr_len); + + netif_carrier_off(netdev); + + rc = register_netdev(netdev); + if (rc) { + dev_err(&pdev->dev, "Cannot register net device\n"); + goto err_rss; + } + + INIT_WORK(&adapter->suspend_io_task, ena_device_io_suspend); + INIT_WORK(&adapter->resume_io_task, ena_device_io_resume); + INIT_WORK(&adapter->reset_task, ena_fw_reset_device); + + adapter->last_keep_alive_jiffies = jiffies; + + init_timer(&adapter->timer_service); + adapter->timer_service.expires = round_jiffies(jiffies + HZ); + adapter->timer_service.function = ena_timer_service; + adapter->timer_service.data = (unsigned long)adapter; + + add_timer(&adapter->timer_service); + + dev_info(&pdev->dev, "%s found at mem %lx, mac addr %pM Queues %d\n", + DEVICE_NAME, (long)pci_resource_start(pdev, 0), + netdev->dev_addr, io_queue_num); + + set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); + + adapters_found++; + + return 0; + +err_rss: + ena_com_delete_debug_area(ena_dev); + ena_com_rss_destroy(ena_dev); +err_free_msix: + ena_com_dev_reset(ena_dev); + ena_free_mgmnt_irq(adapter); + ena_disable_msix(adapter); +err_worker_destroy: + ena_com_destroy_interrupt_moderation(ena_dev); + del_timer(&adapter->timer_service); + cancel_work_sync(&adapter->suspend_io_task); + cancel_work_sync(&adapter->resume_io_task); +err_netdev_destroy: + free_netdev(netdev); +err_device_destroy: + ena_com_delete_host_info(ena_dev); + ena_com_admin_destroy(ena_dev); +err_free_region: + ena_release_bars(ena_dev, pdev); +err_free_ena_dev: + vfree(ena_dev); +err_disable_device: + pci_disable_device(pdev); + return rc; +} + +/*****************************************************************************/ +static int ena_sriov_configure(struct pci_dev *dev, int numvfs) +{ + int rc; + + if (numvfs > 0) { + rc = pci_enable_sriov(dev, numvfs); + if (rc != 0) { + dev_err(&dev->dev, + "pci_enable_sriov failed to enable: %d vfs with the error: %d\n", + numvfs, rc); + return rc; + } + + return numvfs; + } + + if (numvfs == 0) { + pci_disable_sriov(dev); + return 0; + } + + return -EINVAL; +} + +/*****************************************************************************/ +/*****************************************************************************/ + +/* ena_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * ena_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. + */ +static void ena_remove(struct pci_dev *pdev) +{ + struct ena_adapter *adapter = pci_get_drvdata(pdev); + struct ena_com_dev *ena_dev; + struct net_device *netdev; + + if (!adapter) + /* This device didn't load properly and it's resources + * already released, nothing to do + */ + return; + + ena_dev = adapter->ena_dev; + netdev = adapter->netdev; + +#ifdef CONFIG_RFS_ACCEL + if ((adapter->msix_vecs >= 1) && (netdev->rx_cpu_rmap)) { + free_irq_cpu_rmap(netdev->rx_cpu_rmap); + netdev->rx_cpu_rmap = NULL; + } +#endif /* CONFIG_RFS_ACCEL */ + + unregister_netdev(netdev); + del_timer_sync(&adapter->timer_service); + + cancel_work_sync(&adapter->reset_task); + + cancel_work_sync(&adapter->suspend_io_task); + + cancel_work_sync(&adapter->resume_io_task); + + ena_com_dev_reset(ena_dev); + + ena_free_mgmnt_irq(adapter); + + ena_disable_msix(adapter); + + free_netdev(netdev); + + ena_com_mmio_reg_read_request_destroy(ena_dev); + + ena_com_abort_admin_commands(ena_dev); + + ena_com_wait_for_abort_completion(ena_dev); + + ena_com_admin_destroy(ena_dev); + + ena_com_rss_destroy(ena_dev); + + ena_com_delete_debug_area(ena_dev); + + ena_com_delete_host_info(ena_dev); + + ena_release_bars(ena_dev, pdev); + + pci_disable_device(pdev); + + ena_com_destroy_interrupt_moderation(ena_dev); + + vfree(ena_dev); +} + +static struct pci_driver ena_pci_driver = { + .name = DRV_MODULE_NAME, + .id_table = ena_pci_tbl, + .probe = ena_probe, + .remove = ena_remove, + .sriov_configure = ena_sriov_configure, +}; + +static int __init ena_init(void) +{ + pr_info("%s", version); + + ena_wq = create_singlethread_workqueue(DRV_MODULE_NAME); + if (!ena_wq) { + pr_err("Failed to create workqueue\n"); + return -ENOMEM; + } + + return pci_register_driver(&ena_pci_driver); +} + +static void __exit ena_cleanup(void) +{ + pci_unregister_driver(&ena_pci_driver); + + if (ena_wq) { + destroy_workqueue(ena_wq); + ena_wq = NULL; + } +} + +/****************************************************************************** + ******************************** AENQ Handlers ******************************* + *****************************************************************************/ +/* ena_update_on_link_change: + * Notify the network interface about the change in link status + */ +static void ena_update_on_link_change(void *adapter_data, + struct ena_admin_aenq_entry *aenq_e) +{ + struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; + struct ena_admin_aenq_link_change_desc *aenq_desc = + (struct ena_admin_aenq_link_change_desc *)aenq_e; + int status = aenq_desc->flags & + ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK; + + if (status) { + netdev_dbg(adapter->netdev, "%s\n", __func__); + set_bit(ENA_FLAG_LINK_UP, &adapter->flags); + netif_carrier_on(adapter->netdev); + } else { + clear_bit(ENA_FLAG_LINK_UP, &adapter->flags); + netif_carrier_off(adapter->netdev); + } +} + +static void ena_keep_alive_wd(void *adapter_data, + struct ena_admin_aenq_entry *aenq_e) +{ + struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; + + adapter->last_keep_alive_jiffies = jiffies; +} + +static void ena_notification(void *adapter_data, + struct ena_admin_aenq_entry *aenq_e) +{ + struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; + + WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION, + "Invalid group(%x) expected %x\n", + aenq_e->aenq_common_desc.group, + ENA_ADMIN_NOTIFICATION); + + switch (aenq_e->aenq_common_desc.syndrom) { + case ENA_ADMIN_SUSPEND: + /* Suspend just the IO queues. + * We deliberately don't suspend admin so the timer and + * the keep_alive events should remain. + */ + queue_work(ena_wq, &adapter->suspend_io_task); + break; + case ENA_ADMIN_RESUME: + queue_work(ena_wq, &adapter->resume_io_task); + break; + default: + netif_err(adapter, drv, adapter->netdev, + "Invalid aenq notification link state %d\n", + aenq_e->aenq_common_desc.syndrom); + } +} + +/* This handler will called for unknown event group or unimplemented handlers*/ +static void unimplemented_aenq_handler(void *data, + struct ena_admin_aenq_entry *aenq_e) +{ + struct ena_adapter *adapter = (struct ena_adapter *)data; + + netif_err(adapter, drv, adapter->netdev, + "Unknown event was received or event with unimplemented handler\n"); +} + +static struct ena_aenq_handlers aenq_handlers = { + .handlers = { + [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change, + [ENA_ADMIN_NOTIFICATION] = ena_notification, + [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd, + }, + .unimplemented_handler = unimplemented_aenq_handler +}; + +module_init(ena_init); +module_exit(ena_cleanup); diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h new file mode 100644 index 000000000000..69d7e9ed5bc8 --- /dev/null +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h @@ -0,0 +1,324 @@ +/* + * Copyright 2015 Amazon.com, Inc. or its affiliates. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef ENA_H +#define ENA_H + +#include <linux/bitops.h> +#include <linux/etherdevice.h> +#include <linux/inetdevice.h> +#include <linux/interrupt.h> +#include <linux/netdevice.h> +#include <linux/skbuff.h> + +#include "ena_com.h" +#include "ena_eth_com.h" + +#define DRV_MODULE_VER_MAJOR 1 +#define DRV_MODULE_VER_MINOR 0 +#define DRV_MODULE_VER_SUBMINOR 2 + +#define DRV_MODULE_NAME "ena" +#ifndef DRV_MODULE_VERSION +#define DRV_MODULE_VERSION \ + __stringify(DRV_MODULE_VER_MAJOR) "." \ + __stringify(DRV_MODULE_VER_MINOR) "." \ + __stringify(DRV_MODULE_VER_SUBMINOR) +#endif + +#define DEVICE_NAME "Elastic Network Adapter (ENA)" + +/* 1 for AENQ + ADMIN */ +#define ENA_MAX_MSIX_VEC(io_queues) (1 + (io_queues)) + +#define ENA_REG_BAR 0 +#define ENA_MEM_BAR 2 +#define ENA_BAR_MASK (BIT(ENA_REG_BAR) | BIT(ENA_MEM_BAR)) + +#define ENA_DEFAULT_RING_SIZE (1024) + +#define ENA_TX_WAKEUP_THRESH (MAX_SKB_FRAGS + 2) +#define ENA_DEFAULT_RX_COPYBREAK (128 - NET_IP_ALIGN) + +/* limit the buffer size to 600 bytes to handle MTU changes from very + * small to very large, in which case the number of buffers per packet + * could exceed ENA_PKT_MAX_BUFS + */ +#define ENA_DEFAULT_MIN_RX_BUFF_ALLOC_SIZE 600 + +#define ENA_MIN_MTU 128 + +#define ENA_NAME_MAX_LEN 20 +#define ENA_IRQNAME_SIZE 40 + +#define ENA_PKT_MAX_BUFS 19 + +#define ENA_RX_RSS_TABLE_LOG_SIZE 7 +#define ENA_RX_RSS_TABLE_SIZE (1 << ENA_RX_RSS_TABLE_LOG_SIZE) + +#define ENA_HASH_KEY_SIZE 40 + +/* The number of tx packet completions that will be handled each NAPI poll + * cycle is ring_size / ENA_TX_POLL_BUDGET_DIVIDER. + */ +#define ENA_TX_POLL_BUDGET_DIVIDER 4 + +/* Refill Rx queue when number of available descriptors is below + * QUEUE_SIZE / ENA_RX_REFILL_THRESH_DIVIDER + */ +#define ENA_RX_REFILL_THRESH_DIVIDER 8 + +/* Number of queues to check for missing queues per timer service */ +#define ENA_MONITORED_TX_QUEUES 4 +/* Max timeout packets before device reset */ +#define MAX_NUM_OF_TIMEOUTED_PACKETS 32 + +#define ENA_TX_RING_IDX_NEXT(idx, ring_size) (((idx) + 1) & ((ring_size) - 1)) + +#define ENA_RX_RING_IDX_NEXT(idx, ring_size) (((idx) + 1) & ((ring_size) - 1)) +#define ENA_RX_RING_IDX_ADD(idx, n, ring_size) \ + (((idx) + (n)) & ((ring_size) - 1)) + +#define ENA_IO_TXQ_IDX(q) (2 * (q)) +#define ENA_IO_RXQ_IDX(q) (2 * (q) + 1) + +#define ENA_MGMNT_IRQ_IDX 0 +#define ENA_IO_IRQ_FIRST_IDX 1 +#define ENA_IO_IRQ_IDX(q) (ENA_IO_IRQ_FIRST_IDX + (q)) + +/* ENA device should send keep alive msg every 1 sec. + * We wait for 3 sec just to be on the safe side. + */ +#define ENA_DEVICE_KALIVE_TIMEOUT (3 * HZ) + +#define ENA_MMIO_DISABLE_REG_READ BIT(0) + +struct ena_irq { + irq_handler_t handler; + void *data; + int cpu; + u32 vector; + cpumask_t affinity_hint_mask; + char name[ENA_IRQNAME_SIZE]; +}; + +struct ena_napi { + struct napi_struct napi ____cacheline_aligned; + struct ena_ring *tx_ring; + struct ena_ring *rx_ring; + u32 qid; +}; + +struct ena_tx_buffer { + struct sk_buff *skb; + /* num of ena desc for this specific skb + * (includes data desc and metadata desc) + */ + u32 tx_descs; + /* num of buffers used by this skb */ + u32 num_of_bufs; + /* Save the last jiffies to detect missing tx packets */ + unsigned long last_jiffies; + struct ena_com_buf bufs[ENA_PKT_MAX_BUFS]; +} ____cacheline_aligned; + +struct ena_rx_buffer { + struct sk_buff *skb; + struct page *page; + u32 page_offset; + struct ena_com_buf ena_buf; +} ____cacheline_aligned; + +struct ena_stats_tx { + u64 cnt; + u64 bytes; + u64 queue_stop; + u64 prepare_ctx_err; + u64 queue_wakeup; + u64 dma_mapping_err; + u64 linearize; + u64 linearize_failed; + u64 napi_comp; + u64 tx_poll; + u64 doorbells; + u64 missing_tx_comp; + u64 bad_req_id; +}; + +struct ena_stats_rx { + u64 cnt; + u64 bytes; + u64 refil_partial; + u64 bad_csum; + u64 page_alloc_fail; + u64 skb_alloc_fail; + u64 dma_mapping_err; + u64 bad_desc_num; + u64 rx_copybreak_pkt; +}; + +struct ena_ring { + /* Holds the empty requests for TX out of order completions */ + u16 *free_tx_ids; + union { + struct ena_tx_buffer *tx_buffer_info; + struct ena_rx_buffer *rx_buffer_info; + }; + + /* cache ptr to avoid using the adapter */ + struct device *dev; + struct pci_dev *pdev; + struct napi_struct *napi; + struct net_device *netdev; + struct ena_com_dev *ena_dev; + struct ena_adapter *adapter; + struct ena_com_io_cq *ena_com_io_cq; + struct ena_com_io_sq *ena_com_io_sq; + + u16 next_to_use; + u16 next_to_clean; + u16 rx_copybreak; + u16 qid; + u16 mtu; + u16 sgl_size; + + /* The maximum header length the device can handle */ + u8 tx_max_header_size; + + /* cpu for TPH */ + int cpu; + /* number of tx/rx_buffer_info's entries */ + int ring_size; + + enum ena_admin_placement_policy_type tx_mem_queue_type; + + struct ena_com_rx_buf_info ena_bufs[ENA_PKT_MAX_BUFS]; + u32 smoothed_interval; + u32 per_napi_packets; + u32 per_napi_bytes; + enum ena_intr_moder_level moder_tbl_idx; + struct u64_stats_sync syncp; + union { + struct ena_stats_tx tx_stats; + struct ena_stats_rx rx_stats; + }; +} ____cacheline_aligned; + +struct ena_stats_dev { + u64 tx_timeout; + u64 io_suspend; + u64 io_resume; + u64 wd_expired; + u64 interface_up; + u64 interface_down; + u64 admin_q_pause; +}; + +enum ena_flags_t { + ENA_FLAG_DEVICE_RUNNING, + ENA_FLAG_DEV_UP, + ENA_FLAG_LINK_UP, + ENA_FLAG_MSIX_ENABLED, + ENA_FLAG_TRIGGER_RESET +}; + +/* adapter specific private data structure */ +struct ena_adapter { + struct ena_com_dev *ena_dev; + /* OS defined structs */ + struct net_device *netdev; + struct pci_dev *pdev; + + /* rx packets that shorter that this len will be copied to the skb + * header + */ + u32 rx_copybreak; + u32 max_mtu; + + int num_queues; + + struct msix_entry *msix_entries; + int msix_vecs; + + u32 tx_usecs, rx_usecs; /* interrupt moderation */ + u32 tx_frames, rx_frames; /* interrupt moderation */ + + u32 tx_ring_size; + u32 rx_ring_size; + + u32 msg_enable; + + u16 max_tx_sgl_size; + u16 max_rx_sgl_size; + + u8 mac_addr[ETH_ALEN]; + + char name[ENA_NAME_MAX_LEN]; + + unsigned long flags; + /* TX */ + struct ena_ring tx_ring[ENA_MAX_NUM_IO_QUEUES] + ____cacheline_aligned_in_smp; + + /* RX */ + struct ena_ring rx_ring[ENA_MAX_NUM_IO_QUEUES] + ____cacheline_aligned_in_smp; + + struct ena_napi ena_napi[ENA_MAX_NUM_IO_QUEUES]; + + struct ena_irq irq_tbl[ENA_MAX_MSIX_VEC(ENA_MAX_NUM_IO_QUEUES)]; + + /* timer service */ + struct work_struct reset_task; + struct work_struct suspend_io_task; + struct work_struct resume_io_task; + struct timer_list timer_service; + + bool wd_state; + unsigned long last_keep_alive_jiffies; + + struct u64_stats_sync syncp; + struct ena_stats_dev dev_stats; + + /* last queue index that was checked for uncompleted tx packets */ + u32 last_monitored_tx_qid; +}; + +void ena_set_ethtool_ops(struct net_device *netdev); + +void ena_dump_stats_to_dmesg(struct ena_adapter *adapter); + +void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf); + +int ena_get_sset_count(struct net_device *netdev, int sset); + +#endif /* !(ENA_H) */ diff --git a/drivers/net/ethernet/amazon/ena/ena_pci_id_tbl.h b/drivers/net/ethernet/amazon/ena/ena_pci_id_tbl.h new file mode 100644 index 000000000000..f80d2a47fa94 --- /dev/null +++ b/drivers/net/ethernet/amazon/ena/ena_pci_id_tbl.h @@ -0,0 +1,67 @@ +/* + * Copyright 2015 Amazon.com, Inc. or its affiliates. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef ENA_PCI_ID_TBL_H_ +#define ENA_PCI_ID_TBL_H_ + +#ifndef PCI_VENDOR_ID_AMAZON +#define PCI_VENDOR_ID_AMAZON 0x1d0f +#endif + +#ifndef PCI_DEV_ID_ENA_PF +#define PCI_DEV_ID_ENA_PF 0x0ec2 +#endif + +#ifndef PCI_DEV_ID_ENA_LLQ_PF +#define PCI_DEV_ID_ENA_LLQ_PF 0x1ec2 +#endif + +#ifndef PCI_DEV_ID_ENA_VF +#define PCI_DEV_ID_ENA_VF 0xec20 +#endif + +#ifndef PCI_DEV_ID_ENA_LLQ_VF +#define PCI_DEV_ID_ENA_LLQ_VF 0xec21 +#endif + +#define ENA_PCI_ID_TABLE_ENTRY(devid) \ + {PCI_DEVICE(PCI_VENDOR_ID_AMAZON, devid)}, + +static const struct pci_device_id ena_pci_tbl[] = { + ENA_PCI_ID_TABLE_ENTRY(PCI_DEV_ID_ENA_PF) + ENA_PCI_ID_TABLE_ENTRY(PCI_DEV_ID_ENA_LLQ_PF) + ENA_PCI_ID_TABLE_ENTRY(PCI_DEV_ID_ENA_VF) + ENA_PCI_ID_TABLE_ENTRY(PCI_DEV_ID_ENA_LLQ_VF) + { } +}; + +#endif /* ENA_PCI_ID_TBL_H_ */ diff --git a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h new file mode 100644 index 000000000000..26097a2b6030 --- /dev/null +++ b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h @@ -0,0 +1,133 @@ +/* + * Copyright 2015 - 2016 Amazon.com, Inc. or its affiliates. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _ENA_REGS_H_ +#define _ENA_REGS_H_ + +/* ena_registers offsets */ +#define ENA_REGS_VERSION_OFF 0x0 +#define ENA_REGS_CONTROLLER_VERSION_OFF 0x4 +#define ENA_REGS_CAPS_OFF 0x8 +#define ENA_REGS_CAPS_EXT_OFF 0xc +#define ENA_REGS_AQ_BASE_LO_OFF 0x10 +#define ENA_REGS_AQ_BASE_HI_OFF 0x14 +#define ENA_REGS_AQ_CAPS_OFF 0x18 +#define ENA_REGS_ACQ_BASE_LO_OFF 0x20 +#define ENA_REGS_ACQ_BASE_HI_OFF 0x24 +#define ENA_REGS_ACQ_CAPS_OFF 0x28 +#define ENA_REGS_AQ_DB_OFF 0x2c +#define ENA_REGS_ACQ_TAIL_OFF 0x30 +#define ENA_REGS_AENQ_CAPS_OFF 0x34 +#define ENA_REGS_AENQ_BASE_LO_OFF 0x38 +#define ENA_REGS_AENQ_BASE_HI_OFF 0x3c +#define ENA_REGS_AENQ_HEAD_DB_OFF 0x40 +#define ENA_REGS_AENQ_TAIL_OFF 0x44 +#define ENA_REGS_INTR_MASK_OFF 0x4c +#define ENA_REGS_DEV_CTL_OFF 0x54 +#define ENA_REGS_DEV_STS_OFF 0x58 +#define ENA_REGS_MMIO_REG_READ_OFF 0x5c +#define ENA_REGS_MMIO_RESP_LO_OFF 0x60 +#define ENA_REGS_MMIO_RESP_HI_OFF 0x64 +#define ENA_REGS_RSS_IND_ENTRY_UPDATE_OFF 0x68 + +/* version register */ +#define ENA_REGS_VERSION_MINOR_VERSION_MASK 0xff +#define ENA_REGS_VERSION_MAJOR_VERSION_SHIFT 8 +#define ENA_REGS_VERSION_MAJOR_VERSION_MASK 0xff00 + +/* controller_version register */ +#define ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK 0xff +#define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT 8 +#define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK 0xff00 +#define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT 16 +#define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK 0xff0000 +#define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT 24 +#define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK 0xff000000 + +/* caps register */ +#define ENA_REGS_CAPS_CONTIGUOUS_QUEUE_REQUIRED_MASK 0x1 +#define ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT 1 +#define ENA_REGS_CAPS_RESET_TIMEOUT_MASK 0x3e +#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT 8 +#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK 0xff00 + +/* aq_caps register */ +#define ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK 0xffff +#define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT 16 +#define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK 0xffff0000 + +/* acq_caps register */ +#define ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK 0xffff +#define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT 16 +#define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK 0xffff0000 + +/* aenq_caps register */ +#define ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK 0xffff +#define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT 16 +#define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK 0xffff0000 + +/* dev_ctl register */ +#define ENA_REGS_DEV_CTL_DEV_RESET_MASK 0x1 +#define ENA_REGS_DEV_CTL_AQ_RESTART_SHIFT 1 +#define ENA_REGS_DEV_CTL_AQ_RESTART_MASK 0x2 +#define ENA_REGS_DEV_CTL_QUIESCENT_SHIFT 2 +#define ENA_REGS_DEV_CTL_QUIESCENT_MASK 0x4 +#define ENA_REGS_DEV_CTL_IO_RESUME_SHIFT 3 +#define ENA_REGS_DEV_CTL_IO_RESUME_MASK 0x8 + +/* dev_sts register */ +#define ENA_REGS_DEV_STS_READY_MASK 0x1 +#define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_SHIFT 1 +#define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_MASK 0x2 +#define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_SHIFT 2 +#define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_MASK 0x4 +#define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_SHIFT 3 +#define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK 0x8 +#define ENA_REGS_DEV_STS_RESET_FINISHED_SHIFT 4 +#define ENA_REGS_DEV_STS_RESET_FINISHED_MASK 0x10 +#define ENA_REGS_DEV_STS_FATAL_ERROR_SHIFT 5 +#define ENA_REGS_DEV_STS_FATAL_ERROR_MASK 0x20 +#define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_SHIFT 6 +#define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_MASK 0x40 +#define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_SHIFT 7 +#define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_MASK 0x80 + +/* mmio_reg_read register */ +#define ENA_REGS_MMIO_REG_READ_REQ_ID_MASK 0xffff +#define ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT 16 +#define ENA_REGS_MMIO_REG_READ_REG_OFF_MASK 0xffff0000 + +/* rss_ind_entry_update register */ +#define ENA_REGS_RSS_IND_ENTRY_UPDATE_INDEX_MASK 0xffff +#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_SHIFT 16 +#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_MASK 0xffff0000 + +#endif /*_ENA_REGS_H_ */ diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c index 3eee3201b58f..9de078819aa6 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c @@ -861,9 +861,15 @@ static int xgbe_resume(struct device *dev) pdata->lpm_ctrl &= ~MDIO_CTRL1_LPOWER; XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl); - if (netif_running(netdev)) + if (netif_running(netdev)) { ret = xgbe_powerup(netdev, XGMAC_DRIVER_CONTEXT); + /* Schedule a restart in case the link or phy state changed + * while we were powered down. + */ + schedule_work(&pdata->restart_work); + } + DBGPR("<--xgbe_resume\n"); return ret; diff --git a/drivers/net/ethernet/apm/xgene/Kconfig b/drivers/net/ethernet/apm/xgene/Kconfig index 300e3b5c54e0..afccb033177b 100644 --- a/drivers/net/ethernet/apm/xgene/Kconfig +++ b/drivers/net/ethernet/apm/xgene/Kconfig @@ -4,6 +4,7 @@ config NET_XGENE depends on ARCH_XGENE || COMPILE_TEST select PHYLIB select MDIO_XGENE + select GPIOLIB help This is the Ethernet driver for the on-chip ethernet interface on the APM X-Gene SoC. diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c index 472c0fb3f4c4..23d72af83d82 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c @@ -32,12 +32,19 @@ static void xgene_cle_sband_to_hw(u8 frag, enum xgene_cle_prot_version ver, SET_VAL(SB_HDRLEN, len); } -static void xgene_cle_idt_to_hw(u32 dstqid, u32 fpsel, +static void xgene_cle_idt_to_hw(struct xgene_enet_pdata *pdata, + u32 dstqid, u32 fpsel, u32 nfpsel, u32 *idt_reg) { - *idt_reg = SET_VAL(IDT_DSTQID, dstqid) | - SET_VAL(IDT_FPSEL, fpsel) | - SET_VAL(IDT_NFPSEL, nfpsel); + if (pdata->enet_id == XGENE_ENET1) { + *idt_reg = SET_VAL(IDT_DSTQID, dstqid) | + SET_VAL(IDT_FPSEL1, fpsel) | + SET_VAL(IDT_NFPSEL1, nfpsel); + } else { + *idt_reg = SET_VAL(IDT_DSTQID, dstqid) | + SET_VAL(IDT_FPSEL, fpsel) | + SET_VAL(IDT_NFPSEL, nfpsel); + } } static void xgene_cle_dbptr_to_hw(struct xgene_enet_pdata *pdata, @@ -344,7 +351,7 @@ static int xgene_cle_set_rss_idt(struct xgene_enet_pdata *pdata) nfpsel = 0; idt_reg = 0; - xgene_cle_idt_to_hw(dstqid, fpsel, nfpsel, &idt_reg); + xgene_cle_idt_to_hw(pdata, dstqid, fpsel, nfpsel, &idt_reg); ret = xgene_cle_dram_wr(&pdata->cle, &idt_reg, 1, i, RSS_IDT, CLE_CMD_WR); if (ret) diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h index 33c5f6b25824..9ac9f8e145ec 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h @@ -196,9 +196,13 @@ enum xgene_cle_ptree_dbptrs { #define IDT_DSTQID_POS 0 #define IDT_DSTQID_LEN 12 #define IDT_FPSEL_POS 12 -#define IDT_FPSEL_LEN 4 -#define IDT_NFPSEL_POS 16 -#define IDT_NFPSEL_LEN 4 +#define IDT_FPSEL_LEN 5 +#define IDT_NFPSEL_POS 17 +#define IDT_NFPSEL_LEN 5 +#define IDT_FPSEL1_POS 12 +#define IDT_FPSEL1_LEN 4 +#define IDT_NFPSEL1_POS 16 +#define IDT_NFPSEL1_LEN 4 struct xgene_cle_ptree_branch { bool valid; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c index 18bb9556dd00..321fb197621e 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c @@ -761,18 +761,18 @@ int xgene_enet_phy_connect(struct net_device *ndev) if (dev->of_node) { for (i = 0 ; i < 2; i++) { np = of_parse_phandle(dev->of_node, "phy-handle", i); - if (np) - break; - } - if (!np) { - netdev_dbg(ndev, "No phy-handle found in DT\n"); - return -ENODEV; + if (!np) + continue; + + phy_dev = of_phy_connect(ndev, np, + &xgene_enet_adjust_link, + 0, pdata->phy_mode); + of_node_put(np); + if (phy_dev) + break; } - phy_dev = of_phy_connect(ndev, np, &xgene_enet_adjust_link, - 0, pdata->phy_mode); - of_node_put(np); if (!phy_dev) { netdev_err(ndev, "Could not connect to PHY\n"); return -ENODEV; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h index 179a44dceb29..8a8d05500894 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h @@ -124,6 +124,12 @@ enum xgene_enet_rm { #define MAC_READ_REG_OFFSET 0x0c #define MAC_COMMAND_DONE_REG_OFFSET 0x10 +#define PCS_ADDR_REG_OFFSET 0x00 +#define PCS_COMMAND_REG_OFFSET 0x04 +#define PCS_WRITE_REG_OFFSET 0x08 +#define PCS_READ_REG_OFFSET 0x0c +#define PCS_COMMAND_DONE_REG_OFFSET 0x10 + #define MII_MGMT_CONFIG_ADDR 0x20 #define MII_MGMT_COMMAND_ADDR 0x24 #define MII_MGMT_ADDRESS_ADDR 0x28 diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index d1d6b5eeb613..b8b9495e6da6 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -19,6 +19,7 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ +#include <linux/gpio.h> #include "xgene_enet_main.h" #include "xgene_enet_hw.h" #include "xgene_enet_sgmac.h" @@ -72,7 +73,6 @@ static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool, skb = netdev_alloc_skb_ip_align(ndev, len); if (unlikely(!skb)) return -ENOMEM; - buf_pool->rx_skb[tail] = skb; dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE); if (dma_mapping_error(dev, dma_addr)) { @@ -81,6 +81,8 @@ static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool, return -EINVAL; } + buf_pool->rx_skb[tail] = skb; + raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) | SET_VAL(BUFDATALEN, bufdatalen) | SET_BIT(COHERENT)); @@ -102,12 +104,21 @@ static u8 xgene_enet_hdr_len(const void *data) static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool) { + struct device *dev = ndev_to_dev(buf_pool->ndev); + struct xgene_enet_raw_desc16 *raw_desc; + dma_addr_t dma_addr; int i; /* Free up the buffers held by hardware */ for (i = 0; i < buf_pool->slots; i++) { - if (buf_pool->rx_skb[i]) + if (buf_pool->rx_skb[i]) { dev_kfree_skb_any(buf_pool->rx_skb[i]); + + raw_desc = &buf_pool->raw_desc16[i]; + dma_addr = GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)); + dma_unmap_single(dev, dma_addr, XGENE_ENET_MAX_MTU, + DMA_FROM_DEVICE); + } } } @@ -452,7 +463,6 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring, struct xgene_enet_raw_desc *raw_desc) { struct net_device *ndev; - struct xgene_enet_pdata *pdata; struct device *dev; struct xgene_enet_desc_ring *buf_pool; u32 datalen, skb_index; @@ -461,7 +471,6 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring, int ret = 0; ndev = rx_ring->ndev; - pdata = netdev_priv(ndev); dev = ndev_to_dev(rx_ring->ndev); buf_pool = rx_ring->buf_pool; @@ -1312,6 +1321,18 @@ static int xgene_enet_check_phy_handle(struct xgene_enet_pdata *pdata) return 0; } +static void xgene_enet_gpiod_get(struct xgene_enet_pdata *pdata) +{ + struct device *dev = &pdata->pdev->dev; + + if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) + return; + + pdata->sfp_rdy = gpiod_get(dev, "rxlos", GPIOD_IN); + if (IS_ERR(pdata->sfp_rdy)) + pdata->sfp_rdy = gpiod_get(dev, "sfp", GPIOD_IN); +} + static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) { struct platform_device *pdev; @@ -1401,6 +1422,8 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) if (ret) return ret; + xgene_enet_gpiod_get(pdata); + pdata->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(pdata->clk)) { /* Firmware may have set up the clock already. */ @@ -1425,6 +1448,7 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) } else { pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET; pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET; + pdata->pcs_addr = base_addr + BLOCK_PCS_OFFSET; } pdata->rx_buff_cnt = NUM_PKT_BUF; @@ -1454,10 +1478,8 @@ static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata) buf_pool = pdata->rx_ring[i]->buf_pool; xgene_enet_init_bufpool(buf_pool); ret = xgene_enet_refill_bufpool(buf_pool, pdata->rx_buff_cnt); - if (ret) { - xgene_enet_delete_desc_rings(pdata); - return ret; - } + if (ret) + goto err; } dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring[0]); @@ -1474,7 +1496,7 @@ static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata) ret = pdata->cle_ops->cle_init(pdata); if (ret) { netdev_err(ndev, "Preclass Tree init error\n"); - return ret; + goto err; } } else { pdata->port_ops->cle_bypass(pdata, dst_ring_num, buf_pool->id); @@ -1484,6 +1506,10 @@ static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata) pdata->mac_ops->init(pdata); return ret; + +err: + xgene_enet_delete_desc_rings(pdata); + return ret; } static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata) @@ -1631,8 +1657,8 @@ static int xgene_enet_probe(struct platform_device *pdev) } #endif if (!pdata->enet_id) { - free_netdev(ndev); - return -ENODEV; + ret = -ENODEV; + goto err; } ret = xgene_enet_get_resources(pdata); @@ -1655,7 +1681,7 @@ static int xgene_enet_probe(struct platform_device *pdev) ret = xgene_enet_init_hw(pdata); if (ret) - goto err_netdev; + goto err; link_state = pdata->mac_ops->link_state; if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { @@ -1665,21 +1691,32 @@ static int xgene_enet_probe(struct platform_device *pdev) ret = xgene_enet_mdio_config(pdata); else INIT_DELAYED_WORK(&pdata->link_work, link_state); + + if (ret) + goto err1; } - if (ret) - goto err; xgene_enet_napi_add(pdata); ret = register_netdev(ndev); if (ret) { netdev_err(ndev, "Failed to register netdev\n"); - goto err; + goto err2; } return 0; -err_netdev: - unregister_netdev(ndev); +err2: + /* + * If necessary, free_netdev() will call netif_napi_del() and undo + * the effects of xgene_enet_napi_add()'s calls to netif_napi_add(). + */ + + if (pdata->mdio_driver) + xgene_enet_phy_disconnect(pdata); + else if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) + xgene_enet_mdio_remove(pdata); +err1: + xgene_enet_delete_desc_rings(pdata); err: free_netdev(ndev); return ret; @@ -1688,11 +1725,9 @@ err: static int xgene_enet_remove(struct platform_device *pdev) { struct xgene_enet_pdata *pdata; - const struct xgene_mac_ops *mac_ops; struct net_device *ndev; pdata = platform_get_drvdata(pdev); - mac_ops = pdata->mac_ops; ndev = pdata->ndev; rtnl_lock(); diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h index 217546e5714a..b339fc1e8841 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h @@ -196,6 +196,7 @@ struct xgene_enet_pdata { void __iomem *mcx_mac_addr; void __iomem *mcx_mac_csr_addr; void __iomem *base_addr; + void __iomem *pcs_addr; void __iomem *ring_csr_addr; void __iomem *ring_cmd_addr; int phy_mode; @@ -216,6 +217,7 @@ struct xgene_enet_pdata { u8 tx_delay; u8 rx_delay; bool mdio_driver; + struct gpio_desc *sfp_rdy; }; struct xgene_indirect_ctl { diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c index 9c6ad0dce00f..279ee27004f7 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c @@ -18,6 +18,8 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ +#include <linux/of_gpio.h> +#include <linux/gpio.h> #include "xgene_enet_main.h" #include "xgene_enet_hw.h" #include "xgene_enet_xgmac.h" @@ -84,6 +86,21 @@ static void xgene_enet_wr_mac(struct xgene_enet_pdata *pdata, wr_addr); } +static void xgene_enet_wr_pcs(struct xgene_enet_pdata *pdata, + u32 wr_addr, u32 wr_data) +{ + void __iomem *addr, *wr, *cmd, *cmd_done; + + addr = pdata->pcs_addr + PCS_ADDR_REG_OFFSET; + wr = pdata->pcs_addr + PCS_WRITE_REG_OFFSET; + cmd = pdata->pcs_addr + PCS_COMMAND_REG_OFFSET; + cmd_done = pdata->pcs_addr + PCS_COMMAND_DONE_REG_OFFSET; + + if (!xgene_enet_wr_indirect(addr, wr, cmd, cmd_done, wr_addr, wr_data)) + netdev_err(pdata->ndev, "PCS write failed, addr: %04x\n", + wr_addr); +} + static void xgene_enet_rd_csr(struct xgene_enet_pdata *pdata, u32 offset, u32 *val) { @@ -122,6 +139,7 @@ static bool xgene_enet_rd_indirect(void __iomem *addr, void __iomem *rd, return true; } + static void xgene_enet_rd_mac(struct xgene_enet_pdata *pdata, u32 rd_addr, u32 *rd_data) { @@ -137,6 +155,25 @@ static void xgene_enet_rd_mac(struct xgene_enet_pdata *pdata, rd_addr); } +static bool xgene_enet_rd_pcs(struct xgene_enet_pdata *pdata, + u32 rd_addr, u32 *rd_data) +{ + void __iomem *addr, *rd, *cmd, *cmd_done; + bool success; + + addr = pdata->pcs_addr + PCS_ADDR_REG_OFFSET; + rd = pdata->pcs_addr + PCS_READ_REG_OFFSET; + cmd = pdata->pcs_addr + PCS_COMMAND_REG_OFFSET; + cmd_done = pdata->pcs_addr + PCS_COMMAND_DONE_REG_OFFSET; + + success = xgene_enet_rd_indirect(addr, rd, cmd, cmd_done, rd_addr, rd_data); + if (!success) + netdev_err(pdata->ndev, "PCS read failed, addr: %04x\n", + rd_addr); + + return success; +} + static int xgene_enet_ecc_init(struct xgene_enet_pdata *pdata) { struct net_device *ndev = pdata->ndev; @@ -171,6 +208,17 @@ static void xgene_xgmac_reset(struct xgene_enet_pdata *pdata) xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_0, 0); } +static void xgene_pcs_reset(struct xgene_enet_pdata *pdata) +{ + u32 data; + + if (!xgene_enet_rd_pcs(pdata, PCS_CONTROL_1, &data)) + return; + + xgene_enet_wr_pcs(pdata, PCS_CONTROL_1, data | PCS_CTRL_PCS_RST); + xgene_enet_wr_pcs(pdata, PCS_CONTROL_1, data & ~PCS_CTRL_PCS_RST); +} + static void xgene_xgmac_set_mac_addr(struct xgene_enet_pdata *pdata) { u32 addr0, addr1; @@ -216,12 +264,12 @@ static void xgene_xgmac_init(struct xgene_enet_pdata *pdata) data |= CFG_RSIF_FPBUFF_TIMEOUT_EN; xgene_enet_wr_csr(pdata, XG_RSIF_CONFIG_REG_ADDR, data); - xgene_enet_wr_csr(pdata, XG_CFG_BYPASS_ADDR, RESUME_TX); - xgene_enet_wr_csr(pdata, XGENET_RX_DV_GATE_REG_0_ADDR, 0); xgene_enet_rd_csr(pdata, XG_ENET_SPARE_CFG_REG_ADDR, &data); data |= BIT(12); xgene_enet_wr_csr(pdata, XG_ENET_SPARE_CFG_REG_ADDR, data); xgene_enet_wr_csr(pdata, XG_ENET_SPARE_CFG_REG_1_ADDR, 0x82); + xgene_enet_wr_csr(pdata, XGENET_RX_DV_GATE_REG_0_ADDR, 0); + xgene_enet_wr_csr(pdata, XG_CFG_BYPASS_ADDR, RESUME_TX); } static void xgene_xgmac_rx_enable(struct xgene_enet_pdata *pdata) @@ -359,14 +407,17 @@ static void xgene_enet_link_state(struct work_struct *work) { struct xgene_enet_pdata *pdata = container_of(to_delayed_work(work), struct xgene_enet_pdata, link_work); + struct gpio_desc *sfp_rdy = pdata->sfp_rdy; struct net_device *ndev = pdata->ndev; u32 link_status, poll_interval; link_status = xgene_enet_link_status(pdata); + if (link_status && !IS_ERR(sfp_rdy) && !gpiod_get_value(sfp_rdy)) + link_status = 0; + if (link_status) { if (!netif_carrier_ok(ndev)) { netif_carrier_on(ndev); - xgene_xgmac_init(pdata); xgene_xgmac_rx_enable(pdata); xgene_xgmac_tx_enable(pdata); netdev_info(ndev, "Link is Up - 10Gbps\n"); @@ -380,6 +431,8 @@ static void xgene_enet_link_state(struct work_struct *work) netdev_info(ndev, "Link is Down\n"); } poll_interval = PHY_POLL_LINK_OFF; + + xgene_pcs_reset(pdata); } schedule_delayed_work(&pdata->link_work, poll_interval); diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h index f1ea485f916b..360ccbd95566 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h @@ -24,6 +24,7 @@ #define X2_BLOCK_ETH_MAC_CSR_OFFSET 0x3000 #define BLOCK_AXG_MAC_OFFSET 0x0800 #define BLOCK_AXG_MAC_CSR_OFFSET 0x2000 +#define BLOCK_PCS_OFFSET 0x3800 #define XGENET_CONFIG_REG_ADDR 0x20 #define XGENET_SRST_ADDR 0x00 @@ -72,6 +73,9 @@ #define XG_MCX_ICM_CONFIG0_REG_0_ADDR 0x00e0 #define XG_MCX_ICM_CONFIG2_REG_0_ADDR 0x00e8 +#define PCS_CONTROL_1 0x0000 +#define PCS_CTRL_PCS_RST BIT(15) + extern const struct xgene_mac_ops xgene_xgmac_ops; extern const struct xgene_port_ops xgene_xgport_ops; diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index 4eb17daefc4f..d29a4f3102d6 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -993,6 +993,18 @@ static void alx_reset(struct work_struct *work) rtnl_unlock(); } +static int alx_tpd_req(struct sk_buff *skb) +{ + int num; + + num = skb_shinfo(skb)->nr_frags + 1; + /* we need one extra descriptor for LSOv2 */ + if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) + num++; + + return num; +} + static int alx_tx_csum(struct sk_buff *skb, struct alx_txd *first) { u8 cso, css; @@ -1012,6 +1024,45 @@ static int alx_tx_csum(struct sk_buff *skb, struct alx_txd *first) return 0; } +static int alx_tso(struct sk_buff *skb, struct alx_txd *first) +{ + int err; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (!skb_is_gso(skb)) + return 0; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + if (skb->protocol == htons(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); + + iph->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, + 0, IPPROTO_TCP, 0); + first->word1 |= 1 << TPD_IPV4_SHIFT; + } else if (skb_is_gso_v6(skb)) { + ipv6_hdr(skb)->payload_len = 0; + tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + /* LSOv2: the first TPD only provides the packet length */ + first->adrl.l.pkt_len = skb->len; + first->word1 |= 1 << TPD_LSO_V2_SHIFT; + } + + first->word1 |= 1 << TPD_LSO_EN_SHIFT; + first->word1 |= (skb_transport_offset(skb) & + TPD_L4HDROFFSET_MASK) << TPD_L4HDROFFSET_SHIFT; + first->word1 |= (skb_shinfo(skb)->gso_size & + TPD_MSS_MASK) << TPD_MSS_SHIFT; + return 1; +} + static int alx_map_tx_skb(struct alx_priv *alx, struct sk_buff *skb) { struct alx_tx_queue *txq = &alx->txq; @@ -1022,6 +1073,16 @@ static int alx_map_tx_skb(struct alx_priv *alx, struct sk_buff *skb) first_tpd = &txq->tpd[txq->write_idx]; tpd = first_tpd; + if (tpd->word1 & (1 << TPD_LSO_V2_SHIFT)) { + if (++txq->write_idx == alx->tx_ringsz) + txq->write_idx = 0; + + tpd = &txq->tpd[txq->write_idx]; + tpd->len = first_tpd->len; + tpd->vlan_tag = first_tpd->vlan_tag; + tpd->word1 = first_tpd->word1; + } + maplen = skb_headlen(skb); dma = dma_map_single(&alx->hw.pdev->dev, skb->data, maplen, DMA_TO_DEVICE); @@ -1082,9 +1143,9 @@ static netdev_tx_t alx_start_xmit(struct sk_buff *skb, struct alx_priv *alx = netdev_priv(netdev); struct alx_tx_queue *txq = &alx->txq; struct alx_txd *first; - int tpdreq = skb_shinfo(skb)->nr_frags + 1; + int tso; - if (alx_tpd_avail(alx) < tpdreq) { + if (alx_tpd_avail(alx) < alx_tpd_req(skb)) { netif_stop_queue(alx->dev); goto drop; } @@ -1092,7 +1153,10 @@ static netdev_tx_t alx_start_xmit(struct sk_buff *skb, first = &txq->tpd[txq->write_idx]; memset(first, 0, sizeof(*first)); - if (alx_tx_csum(skb, first)) + tso = alx_tso(skb, first); + if (tso < 0) + goto drop; + else if (!tso && alx_tx_csum(skb, first)) goto drop; if (alx_map_tx_skb(alx, skb) < 0) @@ -1351,7 +1415,10 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } } - netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM; + netdev->hw_features = NETIF_F_SG | + NETIF_F_HW_CSUM | + NETIF_F_TSO | + NETIF_F_TSO6; if (alx_get_perm_macaddr(hw, hw->perm_addr)) { dev_warn(&pdev->dev, diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index b2d30863caeb..2059911014db 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -58,8 +58,8 @@ BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET); static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \ u32 mask) \ { \ - intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \ priv->irq##which##_mask &= ~(mask); \ + intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \ } \ static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \ u32 mask) \ diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c index 625235db644f..c16ec3a51876 100644 --- a/drivers/net/ethernet/broadcom/bgmac-bcma.c +++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c @@ -92,6 +92,7 @@ MODULE_DEVICE_TABLE(bcma, bgmac_bcma_tbl); /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipattach */ static int bgmac_probe(struct bcma_device *core) { + struct bcma_chipinfo *ci = &core->bus->chipinfo; struct ssb_sprom *sprom = &core->bus->sprom; struct mii_bus *mii_bus; struct bgmac *bgmac; @@ -157,7 +158,8 @@ static int bgmac_probe(struct bcma_device *core) dev_info(bgmac->dev, "Found PHY addr: %d%s\n", bgmac->phyaddr, bgmac->phyaddr == BGMAC_PHY_NOREGS ? " (NOREGS)" : ""); - if (!bgmac_is_bcm4707_family(core)) { + if (!bgmac_is_bcm4707_family(core) && + !(ci->id == BCMA_CHIP_ID_BCM53573 && core->core_unit == 1)) { mii_bus = bcma_mdio_mii_register(core, bgmac->phyaddr); if (IS_ERR(mii_bus)) { err = PTR_ERR(mii_bus); @@ -230,6 +232,21 @@ static int bgmac_probe(struct bcma_device *core) bgmac->feature_flags |= BGMAC_FEAT_NO_RESET; bgmac->feature_flags |= BGMAC_FEAT_FORCE_SPEED_2500; break; + case BCMA_CHIP_ID_BCM53573: + bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST; + bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK; + if (ci->pkg == BCMA_PKG_ID_BCM47189) + bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED; + if (core->core_unit == 0) { + bgmac->feature_flags |= BGMAC_FEAT_CC4_IF_SW_TYPE; + if (ci->pkg == BCMA_PKG_ID_BCM47189) + bgmac->feature_flags |= + BGMAC_FEAT_CC4_IF_SW_TYPE_RGMII; + } else if (core->core_unit == 1) { + bgmac->feature_flags |= BGMAC_FEAT_IRQ_ID_OOB_6; + bgmac->feature_flags |= BGMAC_FEAT_CC7_IF_TYPE_RGMII; + } + break; default: bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST; bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK; diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index c4751ece76f6..6ea0e5ff1e44 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -932,7 +932,8 @@ static void bgmac_chip_reset(struct bgmac *bgmac) et_swtype <<= 4; sw_type = et_swtype; } else if (bgmac->feature_flags & BGMAC_FEAT_SW_TYPE_EPHYRMII) { - sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII; + sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RMII | + BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII; } else if (bgmac->feature_flags & BGMAC_FEAT_SW_TYPE_RGMII) { sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RGMII | BGMAC_CHIPCTL_1_SW_TYPE_RGMII; @@ -940,6 +941,27 @@ static void bgmac_chip_reset(struct bgmac *bgmac) bgmac_cco_ctl_maskset(bgmac, 1, ~(BGMAC_CHIPCTL_1_IF_TYPE_MASK | BGMAC_CHIPCTL_1_SW_TYPE_MASK), sw_type); + } else if (bgmac->feature_flags & BGMAC_FEAT_CC4_IF_SW_TYPE) { + u32 sw_type = BGMAC_CHIPCTL_4_IF_TYPE_MII | + BGMAC_CHIPCTL_4_SW_TYPE_EPHY; + u8 et_swtype = 0; + char buf[4]; + + if (bcm47xx_nvram_getenv("et_swtype", buf, sizeof(buf)) > 0) { + if (kstrtou8(buf, 0, &et_swtype)) + dev_err(bgmac->dev, "Failed to parse et_swtype (%s)\n", + buf); + sw_type = (et_swtype & 0x0f) << 12; + } else if (bgmac->feature_flags & BGMAC_FEAT_CC4_IF_SW_TYPE_RGMII) { + sw_type = BGMAC_CHIPCTL_4_IF_TYPE_RGMII | + BGMAC_CHIPCTL_4_SW_TYPE_RGMII; + } + bgmac_cco_ctl_maskset(bgmac, 4, ~(BGMAC_CHIPCTL_4_IF_TYPE_MASK | + BGMAC_CHIPCTL_4_SW_TYPE_MASK), + sw_type); + } else if (bgmac->feature_flags & BGMAC_FEAT_CC7_IF_TYPE_RGMII) { + bgmac_cco_ctl_maskset(bgmac, 7, ~BGMAC_CHIPCTL_7_IF_TYPE_MASK, + BGMAC_CHIPCTL_7_IF_TYPE_RGMII); } if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw) @@ -1467,6 +1489,10 @@ int bgmac_enet_probe(struct bgmac *info) */ bgmac_clk_enable(bgmac, 0); + /* This seems to be fixing IRQ by assigning OOB #6 to the core */ + if (bgmac->feature_flags & BGMAC_FEAT_IRQ_ID_OOB_6) + bgmac_idm_write(bgmac, BCMA_OOB_SEL_OUT_A30, 0x86); + bgmac_chip_reset(bgmac); err = bgmac_dma_alloc(bgmac); diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h index 24a250267b88..80836b4c9f38 100644 --- a/drivers/net/ethernet/broadcom/bgmac.h +++ b/drivers/net/ethernet/broadcom/bgmac.h @@ -369,6 +369,21 @@ #define BGMAC_CHIPCTL_1_SW_TYPE_RGMII 0x000000C0 #define BGMAC_CHIPCTL_1_RXC_DLL_BYPASS 0x00010000 +#define BGMAC_CHIPCTL_4_IF_TYPE_MASK 0x00003000 +#define BGMAC_CHIPCTL_4_IF_TYPE_RMII 0x00000000 +#define BGMAC_CHIPCTL_4_IF_TYPE_MII 0x00001000 +#define BGMAC_CHIPCTL_4_IF_TYPE_RGMII 0x00002000 +#define BGMAC_CHIPCTL_4_SW_TYPE_MASK 0x0000C000 +#define BGMAC_CHIPCTL_4_SW_TYPE_EPHY 0x00000000 +#define BGMAC_CHIPCTL_4_SW_TYPE_EPHYMII 0x00004000 +#define BGMAC_CHIPCTL_4_SW_TYPE_EPHYRMII 0x00008000 +#define BGMAC_CHIPCTL_4_SW_TYPE_RGMII 0x0000C000 + +#define BGMAC_CHIPCTL_7_IF_TYPE_MASK 0x000000C0 +#define BGMAC_CHIPCTL_7_IF_TYPE_RMII 0x00000000 +#define BGMAC_CHIPCTL_7_IF_TYPE_MII 0x00000040 +#define BGMAC_CHIPCTL_7_IF_TYPE_RGMII 0x00000080 + #define BGMAC_WEIGHT 64 #define ETHER_MAX_LEN 1518 @@ -390,6 +405,10 @@ #define BGMAC_FEAT_NO_CLR_MIB BIT(13) #define BGMAC_FEAT_FORCE_SPEED_2500 BIT(14) #define BGMAC_FEAT_CMDCFG_SR_REV4 BIT(15) +#define BGMAC_FEAT_IRQ_ID_OOB_6 BIT(16) +#define BGMAC_FEAT_CC4_IF_SW_TYPE BIT(17) +#define BGMAC_FEAT_CC4_IF_SW_TYPE_RGMII BIT(18) +#define BGMAC_FEAT_CC7_IF_TYPE_RGMII BIT(19) struct bgmac_slot_info { union { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 97e892511666..de2d32690394 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -12560,8 +12560,10 @@ static int bnx2x_init_mcast_macs_list(struct bnx2x *bp, kcalloc(mc_count, sizeof(*mc_mac), GFP_ATOMIC); struct netdev_hw_addr *ha; - if (!mc_mac) + if (!mc_mac) { + BNX2X_ERR("Failed to allocate mc MAC list\n"); return -ENOMEM; + } INIT_LIST_HEAD(&p->mcast_list); @@ -12632,7 +12634,7 @@ static int bnx2x_set_uc_list(struct bnx2x *bp) BNX2X_UC_LIST_MAC, &ramrod_flags); } -static int bnx2x_set_mc_list(struct bnx2x *bp) +static int bnx2x_set_mc_list_e1x(struct bnx2x *bp) { struct net_device *dev = bp->dev; struct bnx2x_mcast_ramrod_params rparam = {NULL}; @@ -12650,11 +12652,8 @@ static int bnx2x_set_mc_list(struct bnx2x *bp) /* then, configure a new MACs list */ if (netdev_mc_count(dev)) { rc = bnx2x_init_mcast_macs_list(bp, &rparam); - if (rc) { - BNX2X_ERR("Failed to create multicast MACs list: %d\n", - rc); + if (rc) return rc; - } /* Now add the new MACs */ rc = bnx2x_config_mcast(bp, &rparam, @@ -12669,6 +12668,42 @@ static int bnx2x_set_mc_list(struct bnx2x *bp) return rc; } +static int bnx2x_set_mc_list(struct bnx2x *bp) +{ + struct bnx2x_mcast_ramrod_params rparam = {NULL}; + struct net_device *dev = bp->dev; + int rc = 0; + + /* On older adapters, we need to flush and re-add filters */ + if (CHIP_IS_E1x(bp)) + return bnx2x_set_mc_list_e1x(bp); + + rparam.mcast_obj = &bp->mcast_obj; + + if (netdev_mc_count(dev)) { + rc = bnx2x_init_mcast_macs_list(bp, &rparam); + if (rc) + return rc; + + /* Override the curently configured set of mc filters */ + rc = bnx2x_config_mcast(bp, &rparam, + BNX2X_MCAST_CMD_SET); + if (rc < 0) + BNX2X_ERR("Failed to set a new multicast configuration: %d\n", + rc); + + bnx2x_free_mcast_macs_list(&rparam); + } else { + /* If no mc addresses are required, flush the configuration */ + rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); + if (rc) + BNX2X_ERR("Failed to clear multicast configuration %d\n", + rc); + } + + return rc; +} + /* If bp->state is OPEN, should be called with netif_addr_lock_bh() */ static void bnx2x_set_rx_mode(struct net_device *dev) { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index ff702a707a91..d468380c2a23 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -2600,6 +2600,12 @@ struct bnx2x_mcast_mac_elem { u8 pad[2]; /* For a natural alignment of the following buffer */ }; +struct bnx2x_mcast_bin_elem { + struct list_head link; + int bin; + int type; /* BNX2X_MCAST_CMD_SET_{ADD, DEL} */ +}; + struct bnx2x_pending_mcast_cmd { struct list_head link; int type; /* BNX2X_MCAST_CMD_X */ @@ -2609,6 +2615,11 @@ struct bnx2x_pending_mcast_cmd { int next_bin; /* Needed for RESTORE flow with aprox match */ } data; + bool set_convert; /* in case type == BNX2X_MCAST_CMD_SET, this is set + * when macs_head had been converted to a list of + * bnx2x_mcast_bin_elem. + */ + bool done; /* set to true, when the command has been handled, * practically used in 57712 handling only, where one pending * command may be handled in a few operations. As long as for @@ -2636,15 +2647,30 @@ static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp, struct bnx2x_pending_mcast_cmd *new_cmd; struct bnx2x_mcast_mac_elem *cur_mac = NULL; struct bnx2x_mcast_list_elem *pos; - int macs_list_len = ((cmd == BNX2X_MCAST_CMD_ADD) ? - p->mcast_list_len : 0); + int macs_list_len = 0, macs_list_len_size; + + /* When adding MACs we'll need to store their values */ + if (cmd == BNX2X_MCAST_CMD_ADD || cmd == BNX2X_MCAST_CMD_SET) + macs_list_len = p->mcast_list_len; /* If the command is empty ("handle pending commands only"), break */ if (!p->mcast_list_len) return 0; - total_sz = sizeof(*new_cmd) + - macs_list_len * sizeof(struct bnx2x_mcast_mac_elem); + /* For a set command, we need to allocate sufficient memory for all + * the bins, since we can't analyze at this point how much memory would + * be required. + */ + macs_list_len_size = macs_list_len * + sizeof(struct bnx2x_mcast_mac_elem); + if (cmd == BNX2X_MCAST_CMD_SET) { + int bin_size = BNX2X_MCAST_BINS_NUM * + sizeof(struct bnx2x_mcast_bin_elem); + + if (bin_size > macs_list_len_size) + macs_list_len_size = bin_size; + } + total_sz = sizeof(*new_cmd) + macs_list_len_size; /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */ new_cmd = kzalloc(total_sz, GFP_ATOMIC); @@ -2662,6 +2688,7 @@ static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp, switch (cmd) { case BNX2X_MCAST_CMD_ADD: + case BNX2X_MCAST_CMD_SET: cur_mac = (struct bnx2x_mcast_mac_elem *) ((u8 *)new_cmd + sizeof(*new_cmd)); @@ -2771,7 +2798,8 @@ static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp, u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o); int bin; - if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) + if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE) || + (cmd == BNX2X_MCAST_CMD_SET_ADD)) rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD; data->rules[idx].cmd_general_data |= rx_tx_add_flag; @@ -2797,6 +2825,16 @@ static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp, bin = cfg_data->bin; break; + case BNX2X_MCAST_CMD_SET_ADD: + bin = cfg_data->bin; + BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin); + break; + + case BNX2X_MCAST_CMD_SET_DEL: + bin = cfg_data->bin; + BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, bin); + break; + default: BNX2X_ERR("Unknown command: %d\n", cmd); return; @@ -2932,6 +2970,102 @@ static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp, cmd_pos->data.next_bin++; } +static void +bnx2x_mcast_hdl_pending_set_e2_convert(struct bnx2x *bp, + struct bnx2x_mcast_obj *o, + struct bnx2x_pending_mcast_cmd *cmd_pos) +{ + u64 cur[BNX2X_MCAST_VEC_SZ], req[BNX2X_MCAST_VEC_SZ]; + struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n; + struct bnx2x_mcast_bin_elem *p_item; + int i, cnt = 0, mac_cnt = 0; + + memset(req, 0, sizeof(u64) * BNX2X_MCAST_VEC_SZ); + memcpy(cur, o->registry.aprox_match.vec, + sizeof(u64) * BNX2X_MCAST_VEC_SZ); + + /* Fill `current' with the required set of bins to configure */ + list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head, + link) { + int bin = bnx2x_mcast_bin_from_mac(pmac_pos->mac); + + DP(BNX2X_MSG_SP, "Set contains %pM mcast MAC\n", + pmac_pos->mac); + + BIT_VEC64_SET_BIT(req, bin); + list_del(&pmac_pos->link); + mac_cnt++; + } + + /* We no longer have use for the MACs; Need to re-use memory for + * a list that will be used to configure bins. + */ + cmd_pos->set_convert = true; + p_item = (struct bnx2x_mcast_bin_elem *)(cmd_pos + 1); + INIT_LIST_HEAD(&cmd_pos->data.macs_head); + + for (i = 0; i < BNX2X_MCAST_BINS_NUM; i++) { + bool b_current = !!BIT_VEC64_TEST_BIT(cur, i); + bool b_required = !!BIT_VEC64_TEST_BIT(req, i); + + if (b_current == b_required) + continue; + + p_item->bin = i; + p_item->type = b_required ? BNX2X_MCAST_CMD_SET_ADD + : BNX2X_MCAST_CMD_SET_DEL; + list_add_tail(&p_item->link , &cmd_pos->data.macs_head); + p_item++; + cnt++; + } + + /* We now definitely know how many commands are hiding here. + * Also need to correct the disruption we've added to guarantee this + * would be enqueued. + */ + o->total_pending_num -= (o->max_cmd_len + mac_cnt); + o->total_pending_num += cnt; + + DP(BNX2X_MSG_SP, "o->total_pending_num=%d\n", o->total_pending_num); +} + +static void +bnx2x_mcast_hdl_pending_set_e2(struct bnx2x *bp, + struct bnx2x_mcast_obj *o, + struct bnx2x_pending_mcast_cmd *cmd_pos, + int *cnt) +{ + union bnx2x_mcast_config_data cfg_data = {NULL}; + struct bnx2x_mcast_bin_elem *p_item, *p_item_n; + + /* This is actually a 2-part scheme - it starts by converting the MACs + * into a list of bins to be added/removed, and correcting the numbers + * on the object. this is now allowed, as we're now sure that all + * previous configured requests have already applied. + * The second part is actually adding rules for the newly introduced + * entries [like all the rest of the hdl_pending functions]. + */ + if (!cmd_pos->set_convert) + bnx2x_mcast_hdl_pending_set_e2_convert(bp, o, cmd_pos); + + list_for_each_entry_safe(p_item, p_item_n, &cmd_pos->data.macs_head, + link) { + cfg_data.bin = (u8)p_item->bin; + o->set_one_rule(bp, o, *cnt, &cfg_data, p_item->type); + (*cnt)++; + + list_del(&p_item->link); + + /* Break if we reached the maximum number of rules. */ + if (*cnt >= o->max_cmd_len) + break; + } + + /* if no more MACs to configure - we are done */ + if (list_empty(&cmd_pos->data.macs_head)) + cmd_pos->done = true; +} + static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p) { @@ -2955,6 +3089,10 @@ static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp, &cnt); break; + case BNX2X_MCAST_CMD_SET: + bnx2x_mcast_hdl_pending_set_e2(bp, o, cmd_pos, &cnt); + break; + default: BNX2X_ERR("Unknown command: %d\n", cmd_pos->type); return -EINVAL; @@ -3095,6 +3233,19 @@ static int bnx2x_mcast_validate_e2(struct bnx2x *bp, o->set_registry_size(o, reg_sz + p->mcast_list_len); break; + case BNX2X_MCAST_CMD_SET: + /* We can only learn how many commands would actually be used + * when this is being configured. So for now, simply guarantee + * the command will be enqueued [to refrain from adding logic + * that handles this and THEN learns it needs several ramrods]. + * Just like for ADD/Cont, the mcast_list_len might be an over + * estimation; or even more so, since we don't take into + * account the possibility of removal of existing bins. + */ + o->set_registry_size(o, reg_sz + p->mcast_list_len); + o->total_pending_num += o->max_cmd_len; + break; + default: BNX2X_ERR("Unknown command: %d\n", cmd); return -EINVAL; @@ -3108,12 +3259,16 @@ static int bnx2x_mcast_validate_e2(struct bnx2x *bp, static void bnx2x_mcast_revert_e2(struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p, - int old_num_bins) + int old_num_bins, + enum bnx2x_mcast_cmd cmd) { struct bnx2x_mcast_obj *o = p->mcast_obj; o->set_registry_size(o, old_num_bins); o->total_pending_num -= p->mcast_list_len; + + if (cmd == BNX2X_MCAST_CMD_SET) + o->total_pending_num -= o->max_cmd_len; } /** @@ -3223,9 +3378,11 @@ static int bnx2x_mcast_setup_e2(struct bnx2x *bp, bnx2x_mcast_refresh_registry_e2(bp, o); /* If CLEAR_ONLY was requested - don't send a ramrod and clear - * RAMROD_PENDING status immediately. + * RAMROD_PENDING status immediately. due to the SET option, it's also + * possible that after evaluating the differences there's no need for + * a ramrod. In that case, we can skip it as well. */ - if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { + if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags) || !cnt) { raw->clear_pending(raw); return 0; } else { @@ -3253,6 +3410,11 @@ static int bnx2x_mcast_validate_e1h(struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p, enum bnx2x_mcast_cmd cmd) { + if (cmd == BNX2X_MCAST_CMD_SET) { + BNX2X_ERR("Can't use `set' command on e1h!\n"); + return -EINVAL; + } + /* Mark, that there is a work to do */ if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE)) p->mcast_list_len = 1; @@ -3262,7 +3424,8 @@ static int bnx2x_mcast_validate_e1h(struct bnx2x *bp, static void bnx2x_mcast_revert_e1h(struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p, - int old_num_bins) + int old_num_bins, + enum bnx2x_mcast_cmd cmd) { /* Do nothing */ } @@ -3372,6 +3535,11 @@ static int bnx2x_mcast_validate_e1(struct bnx2x *bp, struct bnx2x_mcast_obj *o = p->mcast_obj; int reg_sz = o->get_registry_size(o); + if (cmd == BNX2X_MCAST_CMD_SET) { + BNX2X_ERR("Can't use `set' command on e1!\n"); + return -EINVAL; + } + switch (cmd) { /* DEL command deletes all currently configured MACs */ case BNX2X_MCAST_CMD_DEL: @@ -3422,7 +3590,8 @@ static int bnx2x_mcast_validate_e1(struct bnx2x *bp, static void bnx2x_mcast_revert_e1(struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p, - int old_num_macs) + int old_num_macs, + enum bnx2x_mcast_cmd cmd) { struct bnx2x_mcast_obj *o = p->mcast_obj; @@ -3816,7 +3985,7 @@ error_exit2: r->clear_pending(r); error_exit1: - o->revert(bp, p, old_reg_size); + o->revert(bp, p, old_reg_size, cmd); return rc; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h index 4048fc594cce..0bf2fd470819 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h @@ -536,6 +536,15 @@ enum bnx2x_mcast_cmd { BNX2X_MCAST_CMD_CONT, BNX2X_MCAST_CMD_DEL, BNX2X_MCAST_CMD_RESTORE, + + /* Following this, multicast configuration should equal to approx + * the set of MACs provided [i.e., remove all else]. + * The two sub-commands are used internally to decide whether a given + * bin is to be added or removed + */ + BNX2X_MCAST_CMD_SET, + BNX2X_MCAST_CMD_SET_ADD, + BNX2X_MCAST_CMD_SET_DEL, }; struct bnx2x_mcast_obj { @@ -635,7 +644,8 @@ struct bnx2x_mcast_obj { */ void (*revert)(struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p, - int old_num_bins); + int old_num_bins, + enum bnx2x_mcast_cmd cmd); int (*get_registry_size)(struct bnx2x_mcast_obj *o); void (*set_registry_size)(struct bnx2x_mcast_obj *o, int n); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 632daff117d3..6c586b045d1d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -573,17 +573,6 @@ int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf, } } - /* clear existing mcasts */ - mcast.mcast_list_len = vf->mcast_list_len; - vf->mcast_list_len = mc_num; - rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL); - if (rc) { - BNX2X_ERR("Failed to remove multicasts\n"); - kfree(mc); - return rc; - } - - /* update mcast list on the ramrod params */ if (mc_num) { INIT_LIST_HEAD(&mcast.mcast_list); for (i = 0; i < mc_num; i++) { @@ -594,12 +583,18 @@ int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf, /* add new mcasts */ mcast.mcast_list_len = mc_num; - rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_ADD); + rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_SET); if (rc) - BNX2X_ERR("Faled to add multicasts\n"); - kfree(mc); + BNX2X_ERR("Faled to set multicasts\n"); + } else { + /* clear existing mcasts */ + rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL); + if (rc) + BNX2X_ERR("Failed to remove multicasts\n"); } + kfree(mc); + return rc; } @@ -1583,7 +1578,6 @@ int bnx2x_iov_nic_init(struct bnx2x *bp) * It needs to be initialized here so that it can be safely * handled by a subsequent FLR flow. */ - vf->mcast_list_len = 0; bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF, 0xFF, 0xFF, 0xFF, bnx2x_vf_sp(bp, vf, mcast_rdata), diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h index 670a581ffabc..7a6d406f4c11 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h @@ -195,7 +195,6 @@ struct bnx2x_virtf { int leading_rss; /* MCAST object */ - int mcast_list_len; struct bnx2x_mcast_obj mcast_obj; /* RSS configuration object */ diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index 771cc267f217..f9df4b5ae90e 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -54,9 +54,7 @@ MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1," * Global variables */ static u32 bnad_rxqs_per_cq = 2; -static u32 bna_id; -static struct mutex bnad_list_mutex; -static LIST_HEAD(bnad_list); +static atomic_t bna_id; static const u8 bnad_bcast_addr[] __aligned(2) = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; @@ -76,23 +74,6 @@ do { \ (_res_info)->res_u.mem_info.len = (_size); \ } while (0) -static void -bnad_add_to_list(struct bnad *bnad) -{ - mutex_lock(&bnad_list_mutex); - list_add_tail(&bnad->list_entry, &bnad_list); - bnad->id = bna_id++; - mutex_unlock(&bnad_list_mutex); -} - -static void -bnad_remove_from_list(struct bnad *bnad) -{ - mutex_lock(&bnad_list_mutex); - list_del(&bnad->list_entry); - mutex_unlock(&bnad_list_mutex); -} - /* * Reinitialize completions in CQ, once Rx is taken down */ @@ -3573,14 +3554,12 @@ bnad_lock_init(struct bnad *bnad) { spin_lock_init(&bnad->bna_lock); mutex_init(&bnad->conf_mutex); - mutex_init(&bnad_list_mutex); } static void bnad_lock_uninit(struct bnad *bnad) { mutex_destroy(&bnad->conf_mutex); - mutex_destroy(&bnad_list_mutex); } /* PCI Initialization */ @@ -3653,7 +3632,7 @@ bnad_pci_probe(struct pci_dev *pdev, } bnad = netdev_priv(netdev); bnad_lock_init(bnad); - bnad_add_to_list(bnad); + bnad->id = atomic_inc_return(&bna_id) - 1; mutex_lock(&bnad->conf_mutex); /* @@ -3807,7 +3786,6 @@ pci_uninit: bnad_pci_uninit(pdev); unlock_mutex: mutex_unlock(&bnad->conf_mutex); - bnad_remove_from_list(bnad); bnad_lock_uninit(bnad); free_netdev(netdev); return err; @@ -3845,7 +3823,6 @@ bnad_pci_remove(struct pci_dev *pdev) bnad_disable_msix(bnad); bnad_pci_uninit(pdev); mutex_unlock(&bnad->conf_mutex); - bnad_remove_from_list(bnad); bnad_lock_uninit(bnad); /* Remove the debugfs node for this bnad */ kfree(bnad->regdata); diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h index f4ed816b93ee..46f7b842b39c 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.h +++ b/drivers/net/ethernet/brocade/bna/bnad.h @@ -288,7 +288,6 @@ struct bnad_rx_unmap_q { struct bnad { struct net_device *netdev; u32 id; - struct list_head list_entry; /* Data path */ struct bnad_tx_info tx_info[BNAD_MAX_TX]; diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index 89c0cfa9719f..32568392b9f9 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -541,6 +541,14 @@ static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb) } } +static inline void macb_set_addr(struct macb_dma_desc *desc, dma_addr_t addr) +{ + desc->addr = (u32)addr; +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + desc->addrh = (u32)(addr >> 32); +#endif +} + static void macb_tx_error_task(struct work_struct *work) { struct macb_queue *queue = container_of(work, struct macb_queue, @@ -621,14 +629,17 @@ static void macb_tx_error_task(struct work_struct *work) /* Set end of TX queue */ desc = macb_tx_desc(queue, 0); - desc->addr = 0; + macb_set_addr(desc, 0); desc->ctrl = MACB_BIT(TX_USED); /* Make descriptor updates visible to hardware */ wmb(); /* Reinitialize the TX desc queue */ - queue_writel(queue, TBQP, queue->tx_ring_dma); + queue_writel(queue, TBQP, (u32)(queue->tx_ring_dma)); +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + queue_writel(queue, TBQPH, (u32)(queue->tx_ring_dma >> 32)); +#endif /* Make TX ring reflect state of hardware */ queue->tx_head = 0; queue->tx_tail = 0; @@ -750,7 +761,7 @@ static void gem_rx_refill(struct macb *bp) if (entry == RX_RING_SIZE - 1) paddr |= MACB_BIT(RX_WRAP); - bp->rx_ring[entry].addr = paddr; + macb_set_addr(&(bp->rx_ring[entry]), paddr); bp->rx_ring[entry].ctrl = 0; /* properly align Ethernet header */ @@ -798,7 +809,9 @@ static int gem_rx(struct macb *bp, int budget) int count = 0; while (count < budget) { - u32 addr, ctrl; + u32 ctrl; + dma_addr_t addr; + bool rxused; entry = macb_rx_ring_wrap(bp->rx_tail); desc = &bp->rx_ring[entry]; @@ -806,10 +819,14 @@ static int gem_rx(struct macb *bp, int budget) /* Make hw descriptor updates visible to CPU */ rmb(); - addr = desc->addr; + rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false; + addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + addr |= ((u64)(desc->addrh) << 32); +#endif ctrl = desc->ctrl; - if (!(addr & MACB_BIT(RX_USED))) + if (!rxused) break; bp->rx_tail++; @@ -835,7 +852,6 @@ static int gem_rx(struct macb *bp, int budget) netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len); skb_put(skb, len); - addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, addr)); dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size, DMA_FROM_DEVICE); @@ -1299,7 +1315,7 @@ static unsigned int macb_tx_map(struct macb *bp, ctrl |= MACB_BIT(TX_WRAP); /* Set TX buffer descriptor */ - desc->addr = tx_skb->mapping; + macb_set_addr(desc, tx_skb->mapping); /* desc->addr must be visible to hardware before clearing * 'TX_USED' bit in desc->ctrl. */ @@ -1422,6 +1438,9 @@ static void gem_free_rx_buffers(struct macb *bp) desc = &bp->rx_ring[i]; addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + addr |= ((u64)(desc->addrh) << 32); +#endif dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size, DMA_FROM_DEVICE); dev_kfree_skb_any(skb); @@ -1547,7 +1566,7 @@ static void gem_init_rings(struct macb *bp) for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { for (i = 0; i < TX_RING_SIZE; i++) { - queue->tx_ring[i].addr = 0; + macb_set_addr(&(queue->tx_ring[i]), 0); queue->tx_ring[i].ctrl = MACB_BIT(TX_USED); } queue->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); @@ -1694,6 +1713,10 @@ static void macb_configure_dma(struct macb *bp) dmacfg |= GEM_BIT(TXCOEN); else dmacfg &= ~GEM_BIT(TXCOEN); + +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + dmacfg |= GEM_BIT(ADDR64); +#endif netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n", dmacfg); gem_writel(bp, DMACFG, dmacfg); @@ -1739,9 +1762,15 @@ static void macb_init_hw(struct macb *bp) macb_configure_dma(bp); /* Initialize TX and RX buffers */ - macb_writel(bp, RBQP, bp->rx_ring_dma); + macb_writel(bp, RBQP, (u32)(bp->rx_ring_dma)); +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + macb_writel(bp, RBQPH, (u32)(bp->rx_ring_dma >> 32)); +#endif for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { - queue_writel(queue, TBQP, queue->tx_ring_dma); + queue_writel(queue, TBQP, (u32)(queue->tx_ring_dma)); +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + queue_writel(queue, TBQPH, (u32)(queue->tx_ring_dma >> 32)); +#endif /* Enable interrupts */ queue_writel(queue, IER, @@ -2303,7 +2332,8 @@ static void macb_probe_queues(void __iomem *mem, } static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, - struct clk **hclk, struct clk **tx_clk) + struct clk **hclk, struct clk **tx_clk, + struct clk **rx_clk) { int err; @@ -2325,6 +2355,10 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, if (IS_ERR(*tx_clk)) *tx_clk = NULL; + *rx_clk = devm_clk_get(&pdev->dev, "rx_clk"); + if (IS_ERR(*rx_clk)) + *rx_clk = NULL; + err = clk_prepare_enable(*pclk); if (err) { dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err); @@ -2343,8 +2377,17 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, goto err_disable_hclk; } + err = clk_prepare_enable(*rx_clk); + if (err) { + dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err); + goto err_disable_txclk; + } + return 0; +err_disable_txclk: + clk_disable_unprepare(*tx_clk); + err_disable_hclk: clk_disable_unprepare(*hclk); @@ -2379,6 +2422,9 @@ static int macb_init(struct platform_device *pdev) queue->IDR = GEM_IDR(hw_q - 1); queue->IMR = GEM_IMR(hw_q - 1); queue->TBQP = GEM_TBQP(hw_q - 1); +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + queue->TBQPH = GEM_TBQPH(hw_q -1); +#endif } else { /* queue0 uses legacy registers */ queue->ISR = MACB_ISR; @@ -2386,6 +2432,9 @@ static int macb_init(struct platform_device *pdev) queue->IDR = MACB_IDR; queue->IMR = MACB_IMR; queue->TBQP = MACB_TBQP; +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + queue->TBQPH = MACB_TBQPH; +#endif } /* get irq: here we use the linux queue index, not the hardware @@ -2728,12 +2777,14 @@ static const struct net_device_ops at91ether_netdev_ops = { }; static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk, - struct clk **hclk, struct clk **tx_clk) + struct clk **hclk, struct clk **tx_clk, + struct clk **rx_clk) { int err; *hclk = NULL; *tx_clk = NULL; + *rx_clk = NULL; *pclk = devm_clk_get(&pdev->dev, "ether_clk"); if (IS_ERR(*pclk)) @@ -2857,13 +2908,13 @@ MODULE_DEVICE_TABLE(of, macb_dt_ids); static int macb_probe(struct platform_device *pdev) { int (*clk_init)(struct platform_device *, struct clk **, - struct clk **, struct clk **) + struct clk **, struct clk **, struct clk **) = macb_clk_init; int (*init)(struct platform_device *) = macb_init; struct device_node *np = pdev->dev.of_node; struct device_node *phy_node; const struct macb_config *macb_config = NULL; - struct clk *pclk, *hclk = NULL, *tx_clk = NULL; + struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL; unsigned int queue_mask, num_queues; struct macb_platform_data *pdata; bool native_io; @@ -2891,7 +2942,7 @@ static int macb_probe(struct platform_device *pdev) } } - err = clk_init(pdev, &pclk, &hclk, &tx_clk); + err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk); if (err) return err; @@ -2927,6 +2978,7 @@ static int macb_probe(struct platform_device *pdev) bp->pclk = pclk; bp->hclk = hclk; bp->tx_clk = tx_clk; + bp->rx_clk = rx_clk; if (macb_config) bp->jumbo_max_len = macb_config->jumbo_max_len; @@ -2935,6 +2987,11 @@ static int macb_probe(struct platform_device *pdev) bp->wol |= MACB_WOL_HAS_MAGIC_PACKET; device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + if (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1)) > GEM_DBW32) + dma_set_mask(&pdev->dev, DMA_BIT_MASK(44)); +#endif + spin_lock_init(&bp->lock); /* setup capabilities */ @@ -2945,7 +3002,7 @@ static int macb_probe(struct platform_device *pdev) dev->irq = platform_get_irq(pdev, 0); if (dev->irq < 0) { err = dev->irq; - goto err_disable_clocks; + goto err_out_free_netdev; } mac = of_get_mac_address(np); @@ -3020,6 +3077,7 @@ err_disable_clocks: clk_disable_unprepare(tx_clk); clk_disable_unprepare(hclk); clk_disable_unprepare(pclk); + clk_disable_unprepare(rx_clk); return err; } @@ -3046,6 +3104,7 @@ static int macb_remove(struct platform_device *pdev) clk_disable_unprepare(bp->tx_clk); clk_disable_unprepare(bp->hclk); clk_disable_unprepare(bp->pclk); + clk_disable_unprepare(bp->rx_clk); free_netdev(dev); } @@ -3069,6 +3128,7 @@ static int __maybe_unused macb_suspend(struct device *dev) clk_disable_unprepare(bp->tx_clk); clk_disable_unprepare(bp->hclk); clk_disable_unprepare(bp->pclk); + clk_disable_unprepare(bp->rx_clk); } return 0; @@ -3088,6 +3148,7 @@ static int __maybe_unused macb_resume(struct device *dev) clk_prepare_enable(bp->pclk); clk_prepare_enable(bp->hclk); clk_prepare_enable(bp->tx_clk); + clk_prepare_enable(bp->rx_clk); } netif_device_attach(netdev); diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index b6fcf10621b6..8bed4b52fef5 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -66,6 +66,8 @@ #define MACB_USRIO 0x00c0 #define MACB_WOL 0x00c4 #define MACB_MID 0x00fc +#define MACB_TBQPH 0x04C8 +#define MACB_RBQPH 0x04D4 /* GEM register offsets. */ #define GEM_NCFGR 0x0004 /* Network Config */ @@ -139,6 +141,7 @@ #define GEM_ISR(hw_q) (0x0400 + ((hw_q) << 2)) #define GEM_TBQP(hw_q) (0x0440 + ((hw_q) << 2)) +#define GEM_TBQPH(hw_q) (0x04C8) #define GEM_RBQP(hw_q) (0x0480 + ((hw_q) << 2)) #define GEM_IER(hw_q) (0x0600 + ((hw_q) << 2)) #define GEM_IDR(hw_q) (0x0620 + ((hw_q) << 2)) @@ -249,6 +252,8 @@ #define GEM_RXBS_SIZE 8 #define GEM_DDRP_OFFSET 24 /* disc_when_no_ahb */ #define GEM_DDRP_SIZE 1 +#define GEM_ADDR64_OFFSET 30 /* Address bus width - 64b or 32b */ +#define GEM_ADDR64_SIZE 1 /* Bitfields in NSR */ @@ -474,6 +479,10 @@ struct macb_dma_desc { u32 addr; u32 ctrl; +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + u32 addrh; + u32 resvd; +#endif }; /* DMA descriptor bitfields */ @@ -763,7 +772,8 @@ struct macb_config { u32 caps; unsigned int dma_burst_length; int (*clk_init)(struct platform_device *pdev, struct clk **pclk, - struct clk **hclk, struct clk **tx_clk); + struct clk **hclk, struct clk **tx_clk, + struct clk **rx_clk); int (*init)(struct platform_device *pdev); int jumbo_max_len; }; @@ -777,6 +787,7 @@ struct macb_queue { unsigned int IDR; unsigned int IMR; unsigned int TBQP; + unsigned int TBQPH; unsigned int tx_head, tx_tail; struct macb_dma_desc *tx_ring; @@ -809,6 +820,7 @@ struct macb { struct clk *pclk; struct clk *hclk; struct clk *tx_clk; + struct clk *rx_clk; struct net_device *dev; struct napi_struct napi; struct net_device_stats stats; diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig index 0ef232d3331e..e1b78b500309 100644 --- a/drivers/net/ethernet/cavium/Kconfig +++ b/drivers/net/ethernet/cavium/Kconfig @@ -36,10 +36,20 @@ config THUNDER_NIC_BGX depends on 64BIT select PHYLIB select MDIO_THUNDER + select THUNDER_NIC_RGX ---help--- This driver supports programming and controlling of MAC interface from NIC physical function driver. +config THUNDER_NIC_RGX + tristate "Thunder MAC interface driver (RGX)" + depends on 64BIT + select PHYLIB + select MDIO_THUNDER + ---help--- + This driver supports configuring XCV block of RGX interface + present on CN81XX chip. + config LIQUIDIO tristate "Cavium LiquidIO support" depends on 64BIT diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 20d6942edf40..f659a95ffc94 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -3190,8 +3190,8 @@ static int liquidio_vlan_rx_kill_vid(struct net_device *netdev, * OCTNET_CMD_RXCSUM_DISABLE * @returns SUCCESS or FAILURE */ -int liquidio_set_rxcsum_command(struct net_device *netdev, int command, - u8 rx_cmd) +static int liquidio_set_rxcsum_command(struct net_device *netdev, int command, + u8 rx_cmd) { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; diff --git a/drivers/net/ethernet/cavium/thunder/Makefile b/drivers/net/ethernet/cavium/thunder/Makefile index 5c4615ccaa14..6b4d4add7353 100644 --- a/drivers/net/ethernet/cavium/thunder/Makefile +++ b/drivers/net/ethernet/cavium/thunder/Makefile @@ -2,6 +2,7 @@ # Makefile for Cavium's Thunder ethernet device # +obj-$(CONFIG_THUNDER_NIC_RGX) += thunder_xcv.o obj-$(CONFIG_THUNDER_NIC_BGX) += thunder_bgx.o obj-$(CONFIG_THUNDER_NIC_PF) += nicpf.o obj-$(CONFIG_THUNDER_NIC_VF) += nicvf.o diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h index 83025bb4737c..dd63f961827a 100644 --- a/drivers/net/ethernet/cavium/thunder/nic.h +++ b/drivers/net/ethernet/cavium/thunder/nic.h @@ -20,6 +20,17 @@ #define PCI_DEVICE_ID_THUNDER_NIC_VF 0xA034 #define PCI_DEVICE_ID_THUNDER_BGX 0xA026 +/* Subsystem device IDs */ +#define PCI_SUBSYS_DEVID_88XX_NIC_PF 0xA11E +#define PCI_SUBSYS_DEVID_81XX_NIC_PF 0xA21E +#define PCI_SUBSYS_DEVID_83XX_NIC_PF 0xA31E + +#define PCI_SUBSYS_DEVID_88XX_PASS1_NIC_VF 0xA11E +#define PCI_SUBSYS_DEVID_88XX_NIC_VF 0xA134 +#define PCI_SUBSYS_DEVID_81XX_NIC_VF 0xA234 +#define PCI_SUBSYS_DEVID_83XX_NIC_VF 0xA334 + + /* PCI BAR nos */ #define PCI_CFG_REG_BAR_NUM 0 #define PCI_MSIX_REG_BAR_NUM 4 @@ -41,40 +52,8 @@ /* Max pkinds */ #define NIC_MAX_PKIND 16 -/* Rx Channels */ -/* Receive channel configuration in TNS bypass mode - * Below is configuration in TNS bypass mode - * BGX0-LMAC0-CHAN0 - VNIC CHAN0 - * BGX0-LMAC1-CHAN0 - VNIC CHAN16 - * ... - * BGX1-LMAC0-CHAN0 - VNIC CHAN128 - * ... - * BGX1-LMAC3-CHAN0 - VNIC CHAN174 - */ -#define NIC_INTF_COUNT 2 /* Interfaces btw VNIC and TNS/BGX */ -#define NIC_CHANS_PER_INF 128 -#define NIC_MAX_CHANS (NIC_INTF_COUNT * NIC_CHANS_PER_INF) -#define NIC_CPI_COUNT 2048 /* No of channel parse indices */ - -/* TNS bypass mode: 1-1 mapping between VNIC and BGX:LMAC */ -#define NIC_MAX_BGX MAX_BGX_PER_CN88XX -#define NIC_CPI_PER_BGX (NIC_CPI_COUNT / NIC_MAX_BGX) -#define NIC_MAX_CPI_PER_LMAC 64 /* Max when CPI_ALG is IP diffserv */ -#define NIC_RSSI_PER_BGX (NIC_RSSI_COUNT / NIC_MAX_BGX) - -/* Tx scheduling */ -#define NIC_MAX_TL4 1024 -#define NIC_MAX_TL4_SHAPERS 256 /* 1 shaper for 4 TL4s */ -#define NIC_MAX_TL3 256 -#define NIC_MAX_TL3_SHAPERS 64 /* 1 shaper for 4 TL3s */ -#define NIC_MAX_TL2 64 -#define NIC_MAX_TL2_SHAPERS 2 /* 1 shaper for 32 TL2s */ -#define NIC_MAX_TL1 2 - -/* TNS bypass mode */ -#define NIC_TL2_PER_BGX 32 -#define NIC_TL4_PER_BGX (NIC_MAX_TL4 / NIC_MAX_BGX) -#define NIC_TL4_PER_LMAC (NIC_MAX_TL4 / NIC_CHANS_PER_INF) +/* Max when CPI_ALG is IP diffserv */ +#define NIC_MAX_CPI_PER_LMAC 64 /* NIC VF Interrupts */ #define NICVF_INTR_CQ 0 @@ -148,7 +127,6 @@ struct nicvf_cq_poll { struct napi_struct napi; }; -#define NIC_RSSI_COUNT 4096 /* Total no of RSS indices */ #define NIC_MAX_RSS_HASH_BITS 8 #define NIC_MAX_RSS_IDR_TBL_SIZE (1 << NIC_MAX_RSS_HASH_BITS) #define RSS_HASH_KEY_SIZE 5 /* 320 bit key */ @@ -273,6 +251,7 @@ struct nicvf { struct net_device *netdev; struct pci_dev *pdev; void __iomem *reg_base; +#define MAX_QUEUES_PER_QSET 8 struct queue_set *qs; struct nicvf_cq_poll *napi[8]; u8 vf_id; @@ -368,6 +347,7 @@ struct nicvf { #define NIC_MBOX_MSG_PNICVF_PTR 0x14 /* Get primary qset nicvf ptr */ #define NIC_MBOX_MSG_SNICVF_PTR 0x15 /* Send sqet nicvf ptr to PVF */ #define NIC_MBOX_MSG_LOOPBACK 0x16 /* Set interface in loopback */ +#define NIC_MBOX_MSG_RESET_STAT_COUNTER 0x17 /* Reset statistics counters */ #define NIC_MBOX_MSG_CFG_DONE 0xF0 /* VF configuration done */ #define NIC_MBOX_MSG_SHUTDOWN 0xF1 /* VF is being shutdown */ @@ -484,6 +464,31 @@ struct set_loopback { bool enable; }; +/* Reset statistics counters */ +struct reset_stat_cfg { + u8 msg; + /* Bitmap to select NIC_PF_VNIC(vf_id)_RX_STAT(0..13) */ + u16 rx_stat_mask; + /* Bitmap to select NIC_PF_VNIC(vf_id)_TX_STAT(0..4) */ + u8 tx_stat_mask; + /* Bitmap to select NIC_PF_QS(0..127)_RQ(0..7)_STAT(0..1) + * bit14, bit15 NIC_PF_QS(vf_id)_RQ7_STAT(0..1) + * bit12, bit13 NIC_PF_QS(vf_id)_RQ6_STAT(0..1) + * .. + * bit2, bit3 NIC_PF_QS(vf_id)_RQ1_STAT(0..1) + * bit0, bit1 NIC_PF_QS(vf_id)_RQ0_STAT(0..1) + */ + u16 rq_stat_mask; + /* Bitmap to select NIC_PF_QS(0..127)_SQ(0..7)_STAT(0..1) + * bit14, bit15 NIC_PF_QS(vf_id)_SQ7_STAT(0..1) + * bit12, bit13 NIC_PF_QS(vf_id)_SQ6_STAT(0..1) + * .. + * bit2, bit3 NIC_PF_QS(vf_id)_SQ1_STAT(0..1) + * bit0, bit1 NIC_PF_QS(vf_id)_SQ0_STAT(0..1) + */ + u16 sq_stat_mask; +}; + /* 128 bit shared memory between PF and each VF */ union nic_mbx { struct { u8 msg; } msg; @@ -501,6 +506,7 @@ union nic_mbx { struct sqs_alloc sqs_alloc; struct nicvf_ptr nicvf; struct set_loopback lbk; + struct reset_stat_cfg reset_stat; }; #define NIC_NODE_ID_MASK 0x03 @@ -514,7 +520,14 @@ static inline int nic_get_node_id(struct pci_dev *pdev) static inline bool pass1_silicon(struct pci_dev *pdev) { - return pdev->revision < 8; + return (pdev->revision < 8) && + (pdev->subsystem_device == PCI_SUBSYS_DEVID_88XX_NIC_PF); +} + +static inline bool pass2_silicon(struct pci_dev *pdev) +{ + return (pdev->revision >= 8) && + (pdev->subsystem_device == PCI_SUBSYS_DEVID_88XX_NIC_PF); } int nicvf_set_real_num_queues(struct net_device *netdev, diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c index 16ed20357c5c..25618d203931 100644 --- a/drivers/net/ethernet/cavium/thunder/nic_main.c +++ b/drivers/net/ethernet/cavium/thunder/nic_main.c @@ -20,8 +20,25 @@ #define DRV_NAME "thunder-nic" #define DRV_VERSION "1.0" +struct hw_info { + u8 bgx_cnt; + u8 chans_per_lmac; + u8 chans_per_bgx; /* Rx/Tx chans */ + u8 chans_per_rgx; + u8 chans_per_lbk; + u16 cpi_cnt; + u16 rssi_cnt; + u16 rss_ind_tbl_size; + u16 tl4_cnt; + u16 tl3_cnt; + u8 tl2_cnt; + u8 tl1_cnt; + bool tl1_per_bgx; /* TL1 per BGX or per LMAC */ +}; + struct nicpf { struct pci_dev *pdev; + struct hw_info *hw; u8 node; unsigned int flags; u8 num_vf_en; /* No of VF enabled */ @@ -36,22 +53,22 @@ struct nicpf { #define NIC_SET_VF_LMAC_MAP(bgx, lmac) (((bgx & 0xF) << 4) | (lmac & 0xF)) #define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF) #define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF) - u8 vf_lmac_map[MAX_LMAC]; + u8 *vf_lmac_map; struct delayed_work dwork; struct workqueue_struct *check_link; - u8 link[MAX_LMAC]; - u8 duplex[MAX_LMAC]; - u32 speed[MAX_LMAC]; + u8 *link; + u8 *duplex; + u32 *speed; u16 cpi_base[MAX_NUM_VFS_SUPPORTED]; u16 rssi_base[MAX_NUM_VFS_SUPPORTED]; - u16 rss_ind_tbl_size; bool mbx_lock[MAX_NUM_VFS_SUPPORTED]; /* MSI-X */ bool msix_enabled; u8 num_vec; - struct msix_entry msix_entries[NIC_PF_MSIX_VECTORS]; + struct msix_entry *msix_entries; bool irq_allocated[NIC_PF_MSIX_VECTORS]; + char irq_name[NIC_PF_MSIX_VECTORS][20]; }; /* Supported devices */ @@ -89,9 +106,22 @@ static u64 nic_reg_read(struct nicpf *nic, u64 offset) /* PF -> VF mailbox communication APIs */ static void nic_enable_mbx_intr(struct nicpf *nic) { - /* Enable mailbox interrupt for all 128 VFs */ - nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S, ~0ull); - nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S + sizeof(u64), ~0ull); + int vf_cnt = pci_sriov_get_totalvfs(nic->pdev); + +#define INTR_MASK(vfs) ((vfs < 64) ? (BIT_ULL(vfs) - 1) : (~0ull)) + + /* Clear it, to avoid spurious interrupts (if any) */ + nic_reg_write(nic, NIC_PF_MAILBOX_INT, INTR_MASK(vf_cnt)); + + /* Enable mailbox interrupt for all VFs */ + nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S, INTR_MASK(vf_cnt)); + /* One mailbox intr enable reg per 64 VFs */ + if (vf_cnt > 64) { + nic_reg_write(nic, NIC_PF_MAILBOX_INT + sizeof(u64), + INTR_MASK(vf_cnt - 64)); + nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S + sizeof(u64), + INTR_MASK(vf_cnt - 64)); + } } static void nic_clear_mbx_intr(struct nicpf *nic, int vf, int mbx_reg) @@ -144,7 +174,7 @@ static void nic_mbx_send_ready(struct nicpf *nic, int vf) mbx.nic_cfg.tns_mode = NIC_TNS_BYPASS_MODE; - if (vf < MAX_LMAC) { + if (vf < nic->num_vf_en) { bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); @@ -155,7 +185,7 @@ static void nic_mbx_send_ready(struct nicpf *nic, int vf) mbx.nic_cfg.sqs_mode = (vf >= nic->num_vf_en) ? true : false; mbx.nic_cfg.node_id = nic->node; - mbx.nic_cfg.loopback_supported = vf < MAX_LMAC; + mbx.nic_cfg.loopback_supported = vf < nic->num_vf_en; nic_send_msg_to_vf(nic, vf, &mbx); } @@ -248,14 +278,22 @@ static int nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf) /* Set minimum transmit packet size */ static void nic_set_tx_pkt_pad(struct nicpf *nic, int size) { - int lmac; + int lmac, max_lmac; + u16 sdevid; u64 lmac_cfg; /* Max value that can be set is 60 */ if (size > 60) size = 60; - for (lmac = 0; lmac < (MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX); lmac++) { + pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid); + /* 81xx's RGX has only one LMAC */ + if (sdevid == PCI_SUBSYS_DEVID_81XX_NIC_PF) + max_lmac = ((nic->hw->bgx_cnt - 1) * MAX_LMAC_PER_BGX) + 1; + else + max_lmac = nic->hw->bgx_cnt * MAX_LMAC_PER_BGX; + + for (lmac = 0; lmac < max_lmac; lmac++) { lmac_cfg = nic_reg_read(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3)); lmac_cfg &= ~(0xF << 2); lmac_cfg |= ((size / 4) << 2); @@ -275,7 +313,7 @@ static void nic_set_lmac_vf_mapping(struct nicpf *nic) nic->num_vf_en = 0; - for (bgx = 0; bgx < NIC_MAX_BGX; bgx++) { + for (bgx = 0; bgx < nic->hw->bgx_cnt; bgx++) { if (!(bgx_map & (1 << bgx))) continue; lmac_cnt = bgx_get_lmac_count(nic->node, bgx); @@ -295,28 +333,125 @@ static void nic_set_lmac_vf_mapping(struct nicpf *nic) nic_reg_write(nic, NIC_PF_LMAC_0_7_CREDIT + (lmac * 8), lmac_credit); + + /* On CN81XX there are only 8 VFs but max possible no of + * interfaces are 9. + */ + if (nic->num_vf_en >= pci_sriov_get_totalvfs(nic->pdev)) { + nic->num_vf_en = pci_sriov_get_totalvfs(nic->pdev); + break; + } } } +static void nic_free_lmacmem(struct nicpf *nic) +{ + kfree(nic->vf_lmac_map); + kfree(nic->link); + kfree(nic->duplex); + kfree(nic->speed); +} + +static int nic_get_hw_info(struct nicpf *nic) +{ + u8 max_lmac; + u16 sdevid; + struct hw_info *hw = nic->hw; + + pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid); + + switch (sdevid) { + case PCI_SUBSYS_DEVID_88XX_NIC_PF: + hw->bgx_cnt = MAX_BGX_PER_CN88XX; + hw->chans_per_lmac = 16; + hw->chans_per_bgx = 128; + hw->cpi_cnt = 2048; + hw->rssi_cnt = 4096; + hw->rss_ind_tbl_size = NIC_MAX_RSS_IDR_TBL_SIZE; + hw->tl3_cnt = 256; + hw->tl2_cnt = 64; + hw->tl1_cnt = 2; + hw->tl1_per_bgx = true; + break; + case PCI_SUBSYS_DEVID_81XX_NIC_PF: + hw->bgx_cnt = MAX_BGX_PER_CN81XX; + hw->chans_per_lmac = 8; + hw->chans_per_bgx = 32; + hw->chans_per_rgx = 8; + hw->chans_per_lbk = 24; + hw->cpi_cnt = 512; + hw->rssi_cnt = 256; + hw->rss_ind_tbl_size = 32; /* Max RSSI / Max interfaces */ + hw->tl3_cnt = 64; + hw->tl2_cnt = 16; + hw->tl1_cnt = 10; + hw->tl1_per_bgx = false; + break; + case PCI_SUBSYS_DEVID_83XX_NIC_PF: + hw->bgx_cnt = MAX_BGX_PER_CN83XX; + hw->chans_per_lmac = 8; + hw->chans_per_bgx = 32; + hw->chans_per_lbk = 64; + hw->cpi_cnt = 2048; + hw->rssi_cnt = 1024; + hw->rss_ind_tbl_size = 64; /* Max RSSI / Max interfaces */ + hw->tl3_cnt = 256; + hw->tl2_cnt = 64; + hw->tl1_cnt = 18; + hw->tl1_per_bgx = false; + break; + } + hw->tl4_cnt = MAX_QUEUES_PER_QSET * pci_sriov_get_totalvfs(nic->pdev); + + /* Allocate memory for LMAC tracking elements */ + max_lmac = hw->bgx_cnt * MAX_LMAC_PER_BGX; + nic->vf_lmac_map = kmalloc_array(max_lmac, sizeof(u8), GFP_KERNEL); + if (!nic->vf_lmac_map) + goto error; + nic->link = kmalloc_array(max_lmac, sizeof(u8), GFP_KERNEL); + if (!nic->link) + goto error; + nic->duplex = kmalloc_array(max_lmac, sizeof(u8), GFP_KERNEL); + if (!nic->duplex) + goto error; + nic->speed = kmalloc_array(max_lmac, sizeof(u32), GFP_KERNEL); + if (!nic->speed) + goto error; + return 0; + +error: + nic_free_lmacmem(nic); + return -ENOMEM; +} + #define BGX0_BLOCK 8 #define BGX1_BLOCK 9 -static void nic_init_hw(struct nicpf *nic) +static int nic_init_hw(struct nicpf *nic) { - int i; + int i, err; u64 cqm_cfg; + /* Get HW capability info */ + err = nic_get_hw_info(nic); + if (err) + return err; + /* Enable NIC HW block */ nic_reg_write(nic, NIC_PF_CFG, 0x3); /* Enable backpressure */ nic_reg_write(nic, NIC_PF_BP_CFG, (1ULL << 6) | 0x03); - /* Disable TNS mode on both interfaces */ - nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG, - (NIC_TNS_BYPASS_MODE << 7) | BGX0_BLOCK); - nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8), - (NIC_TNS_BYPASS_MODE << 7) | BGX1_BLOCK); + /* TNS and TNS bypass modes are present only on 88xx */ + if (nic->pdev->subsystem_device == PCI_SUBSYS_DEVID_88XX_NIC_PF) { + /* Disable TNS mode on both interfaces */ + nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG, + (NIC_TNS_BYPASS_MODE << 7) | BGX0_BLOCK); + nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8), + (NIC_TNS_BYPASS_MODE << 7) | BGX1_BLOCK); + } + nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG, (1ULL << 63) | BGX0_BLOCK); nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG + (1 << 8), @@ -346,11 +481,14 @@ static void nic_init_hw(struct nicpf *nic) cqm_cfg = nic_reg_read(nic, NIC_PF_CQM_CFG); if (cqm_cfg < NICPF_CQM_MIN_DROP_LEVEL) nic_reg_write(nic, NIC_PF_CQM_CFG, NICPF_CQM_MIN_DROP_LEVEL); + + return 0; } /* Channel parse index configuration */ static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg) { + struct hw_info *hw = nic->hw; u32 vnic, bgx, lmac, chan; u32 padd, cpi_count = 0; u64 cpi_base, cpi, rssi_base, rssi; @@ -360,9 +498,9 @@ static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg) bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]); lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]); - chan = (lmac * MAX_BGX_CHANS_PER_LMAC) + (bgx * NIC_CHANS_PER_INF); - cpi_base = (lmac * NIC_MAX_CPI_PER_LMAC) + (bgx * NIC_CPI_PER_BGX); - rssi_base = (lmac * nic->rss_ind_tbl_size) + (bgx * NIC_RSSI_PER_BGX); + chan = (lmac * hw->chans_per_lmac) + (bgx * hw->chans_per_bgx); + cpi_base = vnic * NIC_MAX_CPI_PER_LMAC; + rssi_base = vnic * hw->rss_ind_tbl_size; /* Rx channel configuration */ nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_BP_CFG | (chan << 3), @@ -434,7 +572,7 @@ static void nic_send_rss_size(struct nicpf *nic, int vf) msg = (u64 *)&mbx; mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE; - mbx.rss_size.ind_tbl_size = nic->rss_ind_tbl_size; + mbx.rss_size.ind_tbl_size = nic->hw->rss_ind_tbl_size; nic_send_msg_to_vf(nic, vf, &mbx); } @@ -481,7 +619,7 @@ static void nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg) /* 4 level transmit side scheduler configutation * for TNS bypass mode * - * Sample configuration for SQ0 + * Sample configuration for SQ0 on 88xx * VNIC0-SQ0 -> TL4(0) -> TL3[0] -> TL2[0] -> TL1[0] -> BGX0 * VNIC1-SQ0 -> TL4(8) -> TL3[2] -> TL2[0] -> TL1[0] -> BGX0 * VNIC2-SQ0 -> TL4(16) -> TL3[4] -> TL2[1] -> TL1[0] -> BGX0 @@ -494,6 +632,7 @@ static void nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg) static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic, struct sq_cfg_msg *sq) { + struct hw_info *hw = nic->hw; u32 bgx, lmac, chan; u32 tl2, tl3, tl4; u32 rr_quantum; @@ -512,21 +651,28 @@ static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic, /* 24 bytes for FCS, IPG and preamble */ rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4); - if (!sq->sqs_mode) { - tl4 = (lmac * NIC_TL4_PER_LMAC) + (bgx * NIC_TL4_PER_BGX); - } else { - for (svf = 0; svf < MAX_SQS_PER_VF; svf++) { - if (nic->vf_sqs[pqs_vnic][svf] == vnic) - break; + /* For 88xx 0-511 TL4 transmits via BGX0 and + * 512-1023 TL4s transmit via BGX1. + */ + if (hw->tl1_per_bgx) { + tl4 = bgx * (hw->tl4_cnt / hw->bgx_cnt); + if (!sq->sqs_mode) { + tl4 += (lmac * MAX_QUEUES_PER_QSET); + } else { + for (svf = 0; svf < MAX_SQS_PER_VF; svf++) { + if (nic->vf_sqs[pqs_vnic][svf] == vnic) + break; + } + tl4 += (MAX_LMAC_PER_BGX * MAX_QUEUES_PER_QSET); + tl4 += (lmac * MAX_QUEUES_PER_QSET * MAX_SQS_PER_VF); + tl4 += (svf * MAX_QUEUES_PER_QSET); } - tl4 = (MAX_LMAC_PER_BGX * NIC_TL4_PER_LMAC); - tl4 += (lmac * NIC_TL4_PER_LMAC * MAX_SQS_PER_VF); - tl4 += (svf * NIC_TL4_PER_LMAC); - tl4 += (bgx * NIC_TL4_PER_BGX); + } else { + tl4 = (vnic * MAX_QUEUES_PER_QSET); } tl4 += sq_idx; - tl3 = tl4 / (NIC_MAX_TL4 / NIC_MAX_TL3); + tl3 = tl4 / (hw->tl4_cnt / hw->tl3_cnt); nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 | ((u64)vnic << NIC_QS_ID_SHIFT) | ((u32)sq_idx << NIC_Q_NUM_SHIFT), tl4); @@ -534,8 +680,19 @@ static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic, ((u64)vnic << 27) | ((u32)sq_idx << 24) | rr_quantum); nic_reg_write(nic, NIC_PF_TL3_0_255_CFG | (tl3 << 3), rr_quantum); - chan = (lmac * MAX_BGX_CHANS_PER_LMAC) + (bgx * NIC_CHANS_PER_INF); - nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), chan); + + /* On 88xx 0-127 channels are for BGX0 and + * 127-255 channels for BGX1. + * + * On 81xx/83xx TL3_CHAN reg should be configured with channel + * within LMAC i.e 0-7 and not the actual channel number like on 88xx + */ + chan = (lmac * hw->chans_per_lmac) + (bgx * hw->chans_per_bgx); + if (hw->tl1_per_bgx) + nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), chan); + else + nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), 0); + /* Enable backpressure on the channel */ nic_reg_write(nic, NIC_PF_CHAN_0_255_TX_CFG | (chan << 3), 1); @@ -544,6 +701,16 @@ static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic, nic_reg_write(nic, NIC_PF_TL2_0_63_CFG | (tl2 << 3), rr_quantum); /* No priorities as of now */ nic_reg_write(nic, NIC_PF_TL2_0_63_PRI | (tl2 << 3), 0x00); + + /* Unlike 88xx where TL2s 0-31 transmits to TL1 '0' and rest to TL1 '1' + * on 81xx/83xx TL2 needs to be configured to transmit to one of the + * possible LMACs. + * + * This register doesn't exist on 88xx. + */ + if (!hw->tl1_per_bgx) + nic_reg_write(nic, NIC_PF_TL2_LMAC | (tl2 << 3), + lmac + (bgx * MAX_LMAC_PER_BGX)); } /* Send primary nicvf pointer to secondary QS's VF */ @@ -615,7 +782,7 @@ static int nic_config_loopback(struct nicpf *nic, struct set_loopback *lbk) { int bgx_idx, lmac_idx; - if (lbk->vf_id > MAX_LMAC) + if (lbk->vf_id >= nic->num_vf_en) return -1; bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]); @@ -626,6 +793,67 @@ static int nic_config_loopback(struct nicpf *nic, struct set_loopback *lbk) return 0; } +/* Reset statistics counters */ +static int nic_reset_stat_counters(struct nicpf *nic, + int vf, struct reset_stat_cfg *cfg) +{ + int i, stat, qnum; + u64 reg_addr; + + for (i = 0; i < RX_STATS_ENUM_LAST; i++) { + if (cfg->rx_stat_mask & BIT(i)) { + reg_addr = NIC_PF_VNIC_0_127_RX_STAT_0_13 | + (vf << NIC_QS_ID_SHIFT) | + (i << 3); + nic_reg_write(nic, reg_addr, 0); + } + } + + for (i = 0; i < TX_STATS_ENUM_LAST; i++) { + if (cfg->tx_stat_mask & BIT(i)) { + reg_addr = NIC_PF_VNIC_0_127_TX_STAT_0_4 | + (vf << NIC_QS_ID_SHIFT) | + (i << 3); + nic_reg_write(nic, reg_addr, 0); + } + } + + for (i = 0; i <= 15; i++) { + qnum = i >> 1; + stat = i & 1 ? 1 : 0; + reg_addr = (vf << NIC_QS_ID_SHIFT) | + (qnum << NIC_Q_NUM_SHIFT) | (stat << 3); + if (cfg->rq_stat_mask & BIT(i)) { + reg_addr |= NIC_PF_QSET_0_127_RQ_0_7_STAT_0_1; + nic_reg_write(nic, reg_addr, 0); + } + if (cfg->sq_stat_mask & BIT(i)) { + reg_addr |= NIC_PF_QSET_0_127_SQ_0_7_STAT_0_1; + nic_reg_write(nic, reg_addr, 0); + } + } + return 0; +} + +static void nic_enable_tunnel_parsing(struct nicpf *nic, int vf) +{ + u64 prot_def = (IPV6_PROT << 32) | (IPV4_PROT << 16) | ET_PROT; + u64 vxlan_prot_def = (IPV6_PROT_DEF << 32) | + (IPV4_PROT_DEF) << 16 | ET_PROT_DEF; + + /* Configure tunnel parsing parameters */ + nic_reg_write(nic, NIC_PF_RX_GENEVE_DEF, + (1ULL << 63 | UDP_GENEVE_PORT_NUM)); + nic_reg_write(nic, NIC_PF_RX_GENEVE_PROT_DEF, + ((7ULL << 61) | prot_def)); + nic_reg_write(nic, NIC_PF_RX_NVGRE_PROT_DEF, + ((7ULL << 61) | prot_def)); + nic_reg_write(nic, NIC_PF_RX_VXLAN_DEF_0_1, + ((1ULL << 63) | UDP_VXLAN_PORT_NUM)); + nic_reg_write(nic, NIC_PF_RX_VXLAN_PROT_DEF, + ((0xfULL << 60) | vxlan_prot_def)); +} + static void nic_enable_vf(struct nicpf *nic, int vf, bool enable) { int bgx, lmac; @@ -664,18 +892,17 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) mbx_addr += sizeof(u64); } - dev_dbg(&nic->pdev->dev, "%s: Mailbox msg %d from VF%d\n", + dev_dbg(&nic->pdev->dev, "%s: Mailbox msg 0x%02x from VF%d\n", __func__, mbx.msg.msg, vf); switch (mbx.msg.msg) { case NIC_MBOX_MSG_READY: nic_mbx_send_ready(nic, vf); - if (vf < MAX_LMAC) { + if (vf < nic->num_vf_en) { nic->link[vf] = 0; nic->duplex[vf] = 0; nic->speed[vf] = 0; } - ret = 1; - break; + goto unlock; case NIC_MBOX_MSG_QS_CFG: reg_addr = NIC_PF_QSET_0_127_CFG | (mbx.qs.num << NIC_QS_ID_SHIFT); @@ -693,6 +920,15 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) (mbx.rq.qs_num << NIC_QS_ID_SHIFT) | (mbx.rq.rq_num << NIC_Q_NUM_SHIFT); nic_reg_write(nic, reg_addr, mbx.rq.cfg); + /* Enable CQE_RX2_S extension in CQE_RX descriptor. + * This gets appended by default on 81xx/83xx chips, + * for consistency enabling the same on 88xx pass2 + * where this is introduced. + */ + if (pass2_silicon(nic->pdev)) + nic_reg_write(nic, NIC_PF_RX_CFG, 0x01); + if (!pass1_silicon(nic->pdev)) + nic_enable_tunnel_parsing(nic, vf); break; case NIC_MBOX_MSG_RQ_BP_CFG: reg_addr = NIC_PF_QSET_0_127_RQ_0_7_BP_CFG | @@ -717,8 +953,10 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) nic_tx_channel_cfg(nic, mbx.qs.num, &mbx.sq); break; case NIC_MBOX_MSG_SET_MAC: - if (vf >= nic->num_vf_en) + if (vf >= nic->num_vf_en) { + ret = -1; /* NACK */ break; + } lmac = mbx.mac.vf_id; bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]); lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]); @@ -767,25 +1005,38 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) case NIC_MBOX_MSG_LOOPBACK: ret = nic_config_loopback(nic, &mbx.lbk); break; + case NIC_MBOX_MSG_RESET_STAT_COUNTER: + ret = nic_reset_stat_counters(nic, vf, &mbx.reset_stat); + break; default: dev_err(&nic->pdev->dev, "Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg); break; } - if (!ret) + if (!ret) { nic_mbx_send_ack(nic, vf); - else if (mbx.msg.msg != NIC_MBOX_MSG_READY) + } else if (mbx.msg.msg != NIC_MBOX_MSG_READY) { + dev_err(&nic->pdev->dev, "NACK for MBOX 0x%02x from VF %d\n", + mbx.msg.msg, vf); nic_mbx_send_nack(nic, vf); + } unlock: nic->mbx_lock[vf] = false; } -static void nic_mbx_intr_handler (struct nicpf *nic, int mbx) +static irqreturn_t nic_mbx_intr_handler(int irq, void *nic_irq) { + struct nicpf *nic = (struct nicpf *)nic_irq; + int mbx; u64 intr; u8 vf, vf_per_mbx_reg = 64; + if (irq == nic->msix_entries[NIC_PF_INTR_ID_MBOX0].vector) + mbx = 0; + else + mbx = 1; + intr = nic_reg_read(nic, NIC_PF_MAILBOX_INT + (mbx << 3)); dev_dbg(&nic->pdev->dev, "PF interrupt Mbox%d 0x%llx\n", mbx, intr); for (vf = 0; vf < vf_per_mbx_reg; vf++) { @@ -797,23 +1048,6 @@ static void nic_mbx_intr_handler (struct nicpf *nic, int mbx) nic_clear_mbx_intr(nic, vf, mbx); } } -} - -static irqreturn_t nic_mbx0_intr_handler (int irq, void *nic_irq) -{ - struct nicpf *nic = (struct nicpf *)nic_irq; - - nic_mbx_intr_handler(nic, 0); - - return IRQ_HANDLED; -} - -static irqreturn_t nic_mbx1_intr_handler (int irq, void *nic_irq) -{ - struct nicpf *nic = (struct nicpf *)nic_irq; - - nic_mbx_intr_handler(nic, 1); - return IRQ_HANDLED; } @@ -821,7 +1055,13 @@ static int nic_enable_msix(struct nicpf *nic) { int i, ret; - nic->num_vec = NIC_PF_MSIX_VECTORS; + nic->num_vec = pci_msix_vec_count(nic->pdev); + + nic->msix_entries = kmalloc_array(nic->num_vec, + sizeof(struct msix_entry), + GFP_KERNEL); + if (!nic->msix_entries) + return -ENOMEM; for (i = 0; i < nic->num_vec; i++) nic->msix_entries[i].entry = i; @@ -829,8 +1069,9 @@ static int nic_enable_msix(struct nicpf *nic) ret = pci_enable_msix(nic->pdev, nic->msix_entries, nic->num_vec); if (ret) { dev_err(&nic->pdev->dev, - "Request for #%d msix vectors failed\n", - nic->num_vec); + "Request for #%d msix vectors failed, returned %d\n", + nic->num_vec, ret); + kfree(nic->msix_entries); return ret; } @@ -842,6 +1083,7 @@ static void nic_disable_msix(struct nicpf *nic) { if (nic->msix_enabled) { pci_disable_msix(nic->pdev); + kfree(nic->msix_entries); nic->msix_enabled = 0; nic->num_vec = 0; } @@ -860,27 +1102,26 @@ static void nic_free_all_interrupts(struct nicpf *nic) static int nic_register_interrupts(struct nicpf *nic) { - int ret; + int i, ret; /* Enable MSI-X */ ret = nic_enable_msix(nic); if (ret) return ret; - /* Register mailbox interrupt handlers */ - ret = request_irq(nic->msix_entries[NIC_PF_INTR_ID_MBOX0].vector, - nic_mbx0_intr_handler, 0, "NIC Mbox0", nic); - if (ret) - goto fail; - - nic->irq_allocated[NIC_PF_INTR_ID_MBOX0] = true; + /* Register mailbox interrupt handler */ + for (i = NIC_PF_INTR_ID_MBOX0; i < nic->num_vec; i++) { + sprintf(nic->irq_name[i], + "NICPF Mbox%d", (i - NIC_PF_INTR_ID_MBOX0)); - ret = request_irq(nic->msix_entries[NIC_PF_INTR_ID_MBOX1].vector, - nic_mbx1_intr_handler, 0, "NIC Mbox1", nic); - if (ret) - goto fail; + ret = request_irq(nic->msix_entries[i].vector, + nic_mbx_intr_handler, 0, + nic->irq_name[i], nic); + if (ret) + goto fail; - nic->irq_allocated[NIC_PF_INTR_ID_MBOX1] = true; + nic->irq_allocated[i] = true; + } /* Enable mailbox interrupt */ nic_enable_mbx_intr(nic); @@ -889,6 +1130,7 @@ static int nic_register_interrupts(struct nicpf *nic) fail: dev_err(&nic->pdev->dev, "Request irq failed\n"); nic_free_all_interrupts(nic); + nic_disable_msix(nic); return ret; } @@ -903,6 +1145,12 @@ static int nic_num_sqs_en(struct nicpf *nic, int vf_en) int pos, sqs_per_vf = MAX_SQS_PER_VF_SINGLE_NODE; u16 total_vf; + /* Secondary Qsets are needed only if CPU count is + * morethan MAX_QUEUES_PER_QSET. + */ + if (num_online_cpus() <= MAX_QUEUES_PER_QSET) + return 0; + /* Check if its a multi-node environment */ if (nr_node_ids > 1) sqs_per_vf = MAX_SQS_PER_VF; @@ -1008,6 +1256,12 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (!nic) return -ENOMEM; + nic->hw = devm_kzalloc(dev, sizeof(struct hw_info), GFP_KERNEL); + if (!nic->hw) { + devm_kfree(dev, nic); + return -ENOMEM; + } + pci_set_drvdata(pdev, nic); nic->pdev = pdev; @@ -1047,13 +1301,12 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) nic->node = nic_get_node_id(pdev); - nic_set_lmac_vf_mapping(nic); - /* Initialize hardware */ - nic_init_hw(nic); + err = nic_init_hw(nic); + if (err) + goto err_release_regions; - /* Set RSS TBL size for each VF */ - nic->rss_ind_tbl_size = NIC_MAX_RSS_IDR_TBL_SIZE; + nic_set_lmac_vf_mapping(nic); /* Register interrupts */ err = nic_register_interrupts(nic); @@ -1086,6 +1339,9 @@ err_unregister_interrupts: err_release_regions: pci_release_regions(pdev); err_disable_device: + nic_free_lmacmem(nic); + devm_kfree(dev, nic->hw); + devm_kfree(dev, nic); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); return err; @@ -1106,6 +1362,11 @@ static void nic_remove(struct pci_dev *pdev) nic_unregister_interrupts(nic); pci_release_regions(pdev); + + nic_free_lmacmem(nic); + devm_kfree(&pdev->dev, nic->hw); + devm_kfree(&pdev->dev, nic); + pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); } diff --git a/drivers/net/ethernet/cavium/thunder/nic_reg.h b/drivers/net/ethernet/cavium/thunder/nic_reg.h index fab35a593898..edf779f5a227 100644 --- a/drivers/net/ethernet/cavium/thunder/nic_reg.h +++ b/drivers/net/ethernet/cavium/thunder/nic_reg.h @@ -36,6 +36,20 @@ #define NIC_PF_MAILBOX_ENA_W1C (0x0450) #define NIC_PF_MAILBOX_ENA_W1S (0x0470) #define NIC_PF_RX_ETYPE_0_7 (0x0500) +#define NIC_PF_RX_GENEVE_DEF (0x0580) +#define UDP_GENEVE_PORT_NUM 0x17C1ULL +#define NIC_PF_RX_GENEVE_PROT_DEF (0x0588) +#define IPV6_PROT 0x86DDULL +#define IPV4_PROT 0x800ULL +#define ET_PROT 0x6558ULL +#define NIC_PF_RX_NVGRE_PROT_DEF (0x0598) +#define NIC_PF_RX_VXLAN_DEF_0_1 (0x05A0) +#define UDP_VXLAN_PORT_NUM 0x12B5 +#define NIC_PF_RX_VXLAN_PROT_DEF (0x05B0) +#define IPV6_PROT_DEF 0x2ULL +#define IPV4_PROT_DEF 0x1ULL +#define ET_PROT_DEF 0x3ULL +#define NIC_PF_RX_CFG (0x05D0) #define NIC_PF_PKIND_0_15_CFG (0x0600) #define NIC_PF_ECC0_FLIP0 (0x1000) #define NIC_PF_ECC1_FLIP0 (0x1008) @@ -103,6 +117,7 @@ #define NIC_PF_SW_SYNC_RX_DONE (0x490008) #define NIC_PF_TL2_0_63_CFG (0x500000) #define NIC_PF_TL2_0_63_PRI (0x520000) +#define NIC_PF_TL2_LMAC (0x540000) #define NIC_PF_TL2_0_63_SH_STATUS (0x580000) #define NIC_PF_TL3A_0_63_CFG (0x5F0000) #define NIC_PF_TL3_0_255_CFG (0x600000) diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index a19e73f11d73..06c014edf762 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -29,10 +29,20 @@ static const struct pci_device_id nicvf_id_table[] = { { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_NIC_VF, - PCI_VENDOR_ID_CAVIUM, 0xA134) }, + PCI_VENDOR_ID_CAVIUM, + PCI_SUBSYS_DEVID_88XX_NIC_VF) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF, - PCI_VENDOR_ID_CAVIUM, 0xA11E) }, + PCI_VENDOR_ID_CAVIUM, + PCI_SUBSYS_DEVID_88XX_PASS1_NIC_VF) }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, + PCI_DEVICE_ID_THUNDER_NIC_VF, + PCI_VENDOR_ID_CAVIUM, + PCI_SUBSYS_DEVID_81XX_NIC_VF) }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, + PCI_DEVICE_ID_THUNDER_NIC_VF, + PCI_VENDOR_ID_CAVIUM, + PCI_SUBSYS_DEVID_83XX_NIC_VF) }, { 0, } /* end of table */ }; @@ -134,15 +144,19 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx) /* Wait for previous message to be acked, timeout 2sec */ while (!nic->pf_acked) { - if (nic->pf_nacked) + if (nic->pf_nacked) { + netdev_err(nic->netdev, + "PF NACK to mbox msg 0x%02x from VF%d\n", + (mbx->msg.msg & 0xFF), nic->vf_id); return -EINVAL; + } msleep(sleep); if (nic->pf_acked) break; timeout -= sleep; if (!timeout) { netdev_err(nic->netdev, - "PF didn't ack to mbox msg %d from VF%d\n", + "PF didn't ACK to mbox msg 0x%02x from VF%d\n", (mbx->msg.msg & 0xFF), nic->vf_id); return -EBUSY; } @@ -352,13 +366,7 @@ static int nicvf_rss_init(struct nicvf *nic) rss->enable = true; - /* Using the HW reset value for now */ - rss->key[0] = 0xFEED0BADFEED0BADULL; - rss->key[1] = 0xFEED0BADFEED0BADULL; - rss->key[2] = 0xFEED0BADFEED0BADULL; - rss->key[3] = 0xFEED0BADFEED0BADULL; - rss->key[4] = 0xFEED0BADFEED0BADULL; - + netdev_rss_key_fill(rss->key, RSS_HASH_KEY_SIZE * sizeof(u64)); nicvf_set_rss_key(nic); rss->cfg = RSS_IP_HASH_ENA | RSS_TCP_HASH_ENA | RSS_UDP_HASH_ENA; @@ -507,7 +515,8 @@ static int nicvf_init_resources(struct nicvf *nic) static void nicvf_snd_pkt_handler(struct net_device *netdev, struct cmp_queue *cq, - struct cqe_send_t *cqe_tx, int cqe_type) + struct cqe_send_t *cqe_tx, + int cqe_type, int budget) { struct sk_buff *skb = NULL; struct nicvf *nic = netdev_priv(netdev); @@ -531,7 +540,7 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev, if (skb) { nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); prefetch(skb); - dev_consume_skb_any(skb); + napi_consume_skb(skb, budget); sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL; } else { /* In case of HW TSO, HW sends a CQE for each segment of a TSO @@ -686,7 +695,8 @@ loop: break; case CQE_TYPE_SEND: nicvf_snd_pkt_handler(netdev, cq, - (void *)cq_desc, CQE_TYPE_SEND); + (void *)cq_desc, CQE_TYPE_SEND, + budget); tx_done++; break; case CQE_TYPE_INVALID: @@ -928,16 +938,19 @@ static int nicvf_register_interrupts(struct nicvf *nic) int vector; for_each_cq_irq(irq) - sprintf(nic->irq_name[irq], "NICVF%d CQ%d", - nic->vf_id, irq); + sprintf(nic->irq_name[irq], "%s-rxtx-%d", + nic->pnicvf->netdev->name, + nicvf_netdev_qidx(nic, irq)); for_each_sq_irq(irq) - sprintf(nic->irq_name[irq], "NICVF%d SQ%d", - nic->vf_id, irq - NICVF_INTR_ID_SQ); + sprintf(nic->irq_name[irq], "%s-sq-%d", + nic->pnicvf->netdev->name, + nicvf_netdev_qidx(nic, irq - NICVF_INTR_ID_SQ)); for_each_rbdr_irq(irq) - sprintf(nic->irq_name[irq], "NICVF%d RBDR%d", - nic->vf_id, irq - NICVF_INTR_ID_RBDR); + sprintf(nic->irq_name[irq], "%s-rbdr-%d", + nic->pnicvf->netdev->name, + nic->sqs_mode ? (nic->sqs_id + 1) : 0); /* Register CQ interrupts */ for (irq = 0; irq < nic->qs->cq_cnt; irq++) { @@ -961,8 +974,9 @@ static int nicvf_register_interrupts(struct nicvf *nic) } /* Register QS error interrupt */ - sprintf(nic->irq_name[NICVF_INTR_ID_QS_ERR], - "NICVF%d Qset error", nic->vf_id); + sprintf(nic->irq_name[NICVF_INTR_ID_QS_ERR], "%s-qset-err-%d", + nic->pnicvf->netdev->name, + nic->sqs_mode ? (nic->sqs_id + 1) : 0); irq = NICVF_INTR_ID_QS_ERR; ret = request_irq(nic->msix_entries[irq].vector, nicvf_qs_err_intr_handler, @@ -1191,7 +1205,7 @@ int nicvf_open(struct net_device *netdev) } /* Check if we got MAC address from PF or else generate a radom MAC */ - if (is_zero_ether_addr(netdev->dev_addr)) { + if (!nic->sqs_mode && is_zero_ether_addr(netdev->dev_addr)) { eth_hw_addr_random(netdev); nicvf_hw_set_mac_addr(nic, netdev); } @@ -1527,14 +1541,13 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_release_regions; } - qcount = MAX_CMP_QUEUES_PER_QS; + qcount = netif_get_num_default_rss_queues(); /* Restrict multiqset support only for host bound VFs */ if (pdev->is_virtfn) { /* Set max number of queues per VF */ - qcount = roundup(num_online_cpus(), MAX_CMP_QUEUES_PER_QS); - qcount = min(qcount, - (MAX_SQS_PER_VF + 1) * MAX_CMP_QUEUES_PER_QS); + qcount = min_t(int, num_online_cpus(), + (MAX_SQS_PER_VF + 1) * MAX_CMP_QUEUES_PER_QS); } netdev = alloc_etherdev_mqs(sizeof(struct nicvf), qcount, qcount); diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index 0ff8e60deccb..7d90856c9783 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -479,6 +479,16 @@ void nicvf_config_vlan_stripping(struct nicvf *nic, netdev_features_t features) NIC_QSET_RQ_GEN_CFG, 0, rq_cfg); } +static void nicvf_reset_rcv_queue_stats(struct nicvf *nic) +{ + union nic_mbx mbx = {}; + + /* Reset all RXQ's stats */ + mbx.reset_stat.msg = NIC_MBOX_MSG_RESET_STAT_COUNTER; + mbx.reset_stat.rq_stat_mask = 0xFFFF; + nicvf_send_msg_to_pf(nic, &mbx); +} + /* Configures receive queue */ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, int qidx, bool enable) @@ -762,10 +772,10 @@ int nicvf_set_qset_resources(struct nicvf *nic) nic->qs = qs; /* Set count of each queue */ - qs->rbdr_cnt = RBDR_CNT; - qs->rq_cnt = RCV_QUEUE_CNT; - qs->sq_cnt = SND_QUEUE_CNT; - qs->cq_cnt = CMP_QUEUE_CNT; + qs->rbdr_cnt = DEFAULT_RBDR_CNT; + qs->rq_cnt = min_t(u8, MAX_RCV_QUEUES_PER_QS, num_online_cpus()); + qs->sq_cnt = min_t(u8, MAX_SND_QUEUES_PER_QS, num_online_cpus()); + qs->cq_cnt = max_t(u8, qs->rq_cnt, qs->sq_cnt); /* Set queue lengths */ qs->rbdr_len = RCV_BUF_COUNT; @@ -812,6 +822,11 @@ int nicvf_config_data_transfer(struct nicvf *nic, bool enable) nicvf_free_resources(nic); } + /* Reset RXQ's stats. + * SQ's stats will get reset automatically once SQ is reset. + */ + nicvf_reset_rcv_queue_stats(nic); + return 0; } @@ -1184,13 +1199,23 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx) int frag; int payload_len = 0; struct sk_buff *skb = NULL; - struct sk_buff *skb_frag = NULL; - struct sk_buff *prev_frag = NULL; + struct page *page; + int offset; u16 *rb_lens = NULL; u64 *rb_ptrs = NULL; rb_lens = (void *)cqe_rx + (3 * sizeof(u64)); - rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64)); + /* Except 88xx pass1 on all other chips CQE_RX2_S is added to + * CQE_RX at word6, hence buffer pointers move by word + * + * Use existing 'hw_tso' flag which will be set for all chips + * except 88xx pass1 instead of a additional cache line + * access (or miss) by using pci dev's revision. + */ + if (!nic->hw_tso) + rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64)); + else + rb_ptrs = (void *)cqe_rx + (7 * sizeof(u64)); netdev_dbg(nic->netdev, "%s rb_cnt %d rb0_ptr %llx rb0_sz %d\n", __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz); @@ -1208,22 +1233,10 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx) skb_put(skb, payload_len); } else { /* Add fragments */ - skb_frag = nicvf_rb_ptr_to_skb(nic, *rb_ptrs, - payload_len); - if (!skb_frag) { - dev_kfree_skb(skb); - return NULL; - } - - if (!skb_shinfo(skb)->frag_list) - skb_shinfo(skb)->frag_list = skb_frag; - else - prev_frag->next = skb_frag; - - prev_frag = skb_frag; - skb->len += payload_len; - skb->data_len += payload_len; - skb_frag->len = payload_len; + page = virt_to_page(phys_to_virt(*rb_ptrs)); + offset = phys_to_virt(*rb_ptrs) - page_address(page); + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + offset, payload_len, RCV_FRAG_LEN); } /* Next buffer pointer */ rb_ptrs++; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h index 6673e1133523..869f3386028b 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h @@ -57,10 +57,7 @@ #define CMP_QUEUE_SIZE6 6ULL /* 64K entries */ /* Default queue count per QS, its lengths and threshold values */ -#define RBDR_CNT 1 -#define RCV_QUEUE_CNT 8 -#define SND_QUEUE_CNT 8 -#define CMP_QUEUE_CNT 8 /* Max of RCV and SND qcount */ +#define DEFAULT_RBDR_CNT 1 #define SND_QSIZE SND_QUEUE_SIZE2 #define SND_QUEUE_LEN (1ULL << (SND_QSIZE + 10)) diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index 63a39ac97d53..8bbaedbb7b94 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -28,6 +28,9 @@ struct lmac { struct bgx *bgx; int dmac; u8 mac[ETH_ALEN]; + u8 lmac_type; + u8 lane_to_sds; + bool use_training; bool link_up; int lmacid; /* ID within BGX */ int lmacid_bd; /* ID on board */ @@ -43,14 +46,13 @@ struct lmac { struct bgx { u8 bgx_id; - u8 qlm_mode; struct lmac lmac[MAX_LMAC_PER_BGX]; int lmac_count; - int lmac_type; - int lane_to_sds; - int use_training; + u8 max_lmac; void __iomem *reg_base; struct pci_dev *pdev; + bool is_dlm; + bool is_rgx; }; static struct bgx *bgx_vnic[MAX_BGX_THUNDER]; @@ -61,6 +63,7 @@ static int bgx_xaui_check_link(struct lmac *lmac); /* Supported devices */ static const struct pci_device_id bgx_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_BGX) }, + { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_RGX) }, { 0, } /* end of table */ }; @@ -124,8 +127,8 @@ unsigned bgx_get_map(int node) int i; unsigned map = 0; - for (i = 0; i < MAX_BGX_PER_CN88XX; i++) { - if (bgx_vnic[(node * MAX_BGX_PER_CN88XX) + i]) + for (i = 0; i < MAX_BGX_PER_NODE; i++) { + if (bgx_vnic[(node * MAX_BGX_PER_NODE) + i]) map |= (1 << i); } @@ -138,7 +141,7 @@ int bgx_get_lmac_count(int node, int bgx_idx) { struct bgx *bgx; - bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; + bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; if (bgx) return bgx->lmac_count; @@ -153,7 +156,7 @@ void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status) struct bgx *bgx; struct lmac *lmac; - bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; + bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; if (!bgx) return; @@ -166,7 +169,7 @@ EXPORT_SYMBOL(bgx_get_lmac_link_state); const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid) { - struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; + struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; if (bgx) return bgx->lmac[lmacid].mac; @@ -177,7 +180,7 @@ EXPORT_SYMBOL(bgx_get_lmac_mac); void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac) { - struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; + struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; if (!bgx) return; @@ -188,11 +191,13 @@ EXPORT_SYMBOL(bgx_set_lmac_mac); void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable) { - struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; + struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; + struct lmac *lmac; u64 cfg; if (!bgx) return; + lmac = &bgx->lmac[lmacid]; cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); if (enable) @@ -200,6 +205,9 @@ void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable) else cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN); bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg); + + if (bgx->is_rgx) + xcv_setup_link(enable ? lmac->link_up : 0, lmac->last_speed); } EXPORT_SYMBOL(bgx_lmac_rx_tx_enable); @@ -266,9 +274,12 @@ static void bgx_sgmii_change_link_state(struct lmac *lmac) port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG); - /* renable lmac */ + /* Re-enable lmac */ cmr_cfg |= CMR_EN; bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg); + + if (bgx->is_rgx && (cmr_cfg & (CMR_PKT_RX_EN | CMR_PKT_TX_EN))) + xcv_setup_link(lmac->link_up, lmac->last_speed); } static void bgx_lmac_handler(struct net_device *netdev) @@ -314,7 +325,7 @@ u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx) { struct bgx *bgx; - bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; + bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; if (!bgx) return 0; @@ -328,7 +339,7 @@ u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx) { struct bgx *bgx; - bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; + bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; if (!bgx) return 0; @@ -356,7 +367,7 @@ void bgx_lmac_internal_loopback(int node, int bgx_idx, struct lmac *lmac; u64 cfg; - bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; + bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx]; if (!bgx) return; @@ -379,8 +390,9 @@ void bgx_lmac_internal_loopback(int node, int bgx_idx, } EXPORT_SYMBOL(bgx_lmac_internal_loopback); -static int bgx_lmac_sgmii_init(struct bgx *bgx, int lmacid) +static int bgx_lmac_sgmii_init(struct bgx *bgx, struct lmac *lmac) { + int lmacid = lmac->lmacid; u64 cfg; bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_THRESH, 0x30); @@ -409,18 +421,29 @@ static int bgx_lmac_sgmii_init(struct bgx *bgx, int lmacid) cfg |= (PCS_MRX_CTL_RST_AN | PCS_MRX_CTL_AN_EN); bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg); - if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS, - PCS_MRX_STATUS_AN_CPT, false)) { - dev_err(&bgx->pdev->dev, "BGX AN_CPT not completed\n"); - return -1; + if (lmac->lmac_type == BGX_MODE_QSGMII) { + /* Disable disparity check for QSGMII */ + cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MISCX_CTL); + cfg &= ~PCS_MISC_CTL_DISP_EN; + bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MISCX_CTL, cfg); + return 0; + } + + if (lmac->lmac_type == BGX_MODE_SGMII) { + if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS, + PCS_MRX_STATUS_AN_CPT, false)) { + dev_err(&bgx->pdev->dev, "BGX AN_CPT not completed\n"); + return -1; + } } return 0; } -static int bgx_lmac_xaui_init(struct bgx *bgx, int lmacid, int lmac_type) +static int bgx_lmac_xaui_init(struct bgx *bgx, struct lmac *lmac) { u64 cfg; + int lmacid = lmac->lmacid; /* Reset SPU */ bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET); @@ -436,12 +459,14 @@ static int bgx_lmac_xaui_init(struct bgx *bgx, int lmacid, int lmac_type) bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER); /* Set interleaved running disparity for RXAUI */ - if (bgx->lmac_type != BGX_MODE_RXAUI) - bgx_reg_modify(bgx, lmacid, - BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS); - else + if (lmac->lmac_type == BGX_MODE_RXAUI) bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL, - SPU_MISC_CTL_RX_DIS | SPU_MISC_CTL_INTLV_RDISP); + SPU_MISC_CTL_INTLV_RDISP); + + /* Clear receive packet disable */ + cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL); + cfg &= ~SPU_MISC_CTL_RX_DIS; + bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg); /* clear all interrupts */ cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_INT); @@ -451,7 +476,7 @@ static int bgx_lmac_xaui_init(struct bgx *bgx, int lmacid, int lmac_type) cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT); bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg); - if (bgx->use_training) { + if (lmac->use_training) { bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LP_CUP, 0x00); bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_CUP, 0x00); bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_REP, 0x00); @@ -474,9 +499,9 @@ static int bgx_lmac_xaui_init(struct bgx *bgx, int lmacid, int lmac_type) bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_CONTROL, cfg); cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_ADV); - if (bgx->lmac_type == BGX_MODE_10G_KR) + if (lmac->lmac_type == BGX_MODE_10G_KR) cfg |= (1 << 23); - else if (bgx->lmac_type == BGX_MODE_40G_KR) + else if (lmac->lmac_type == BGX_MODE_40G_KR) cfg |= (1 << 24); else cfg &= ~((1 << 23) | (1 << 24)); @@ -511,11 +536,10 @@ static int bgx_xaui_check_link(struct lmac *lmac) { struct bgx *bgx = lmac->bgx; int lmacid = lmac->lmacid; - int lmac_type = bgx->lmac_type; + int lmac_type = lmac->lmac_type; u64 cfg; - bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS); - if (bgx->use_training) { + if (lmac->use_training) { cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT); if (!(cfg & (1ull << 13))) { cfg = (1ull << 13) | (1ull << 14); @@ -556,7 +580,7 @@ static int bgx_xaui_check_link(struct lmac *lmac) BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT); if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) { dev_err(&bgx->pdev->dev, "Receive fault, retry training\n"); - if (bgx->use_training) { + if (lmac->use_training) { cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT); if (!(cfg & (1ull << 13))) { cfg = (1ull << 13) | (1ull << 14); @@ -584,11 +608,6 @@ static int bgx_xaui_check_link(struct lmac *lmac) return -1; } - /* Clear receive packet disable */ - cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL); - cfg &= ~SPU_MISC_CTL_RX_DIS; - bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg); - /* Check for MAC RX faults */ cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_CTL); /* 0 - Link is okay, 1 - Local fault, 2 - Remote fault */ @@ -599,7 +618,7 @@ static int bgx_xaui_check_link(struct lmac *lmac) /* Rx local/remote fault seen. * Do lmac reinit to see if condition recovers */ - bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type); + bgx_lmac_xaui_init(bgx, lmac); return -1; } @@ -623,7 +642,7 @@ static void bgx_poll_for_link(struct work_struct *work) if ((spu_link & SPU_STATUS1_RCV_LNK) && !(smu_link & SMU_RX_CTL_STATUS)) { lmac->link_up = 1; - if (lmac->bgx->lmac_type == BGX_MODE_XLAUI) + if (lmac->lmac_type == BGX_MODE_XLAUI) lmac->last_speed = 40000; else lmac->last_speed = 10000; @@ -649,6 +668,16 @@ static void bgx_poll_for_link(struct work_struct *work) queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 2); } +static int phy_interface_mode(u8 lmac_type) +{ + if (lmac_type == BGX_MODE_QSGMII) + return PHY_INTERFACE_MODE_QSGMII; + if (lmac_type == BGX_MODE_RGMII) + return PHY_INTERFACE_MODE_RGMII; + + return PHY_INTERFACE_MODE_SGMII; +} + static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid) { struct lmac *lmac; @@ -657,13 +686,15 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid) lmac = &bgx->lmac[lmacid]; lmac->bgx = bgx; - if (bgx->lmac_type == BGX_MODE_SGMII) { + if ((lmac->lmac_type == BGX_MODE_SGMII) || + (lmac->lmac_type == BGX_MODE_QSGMII) || + (lmac->lmac_type == BGX_MODE_RGMII)) { lmac->is_sgmii = 1; - if (bgx_lmac_sgmii_init(bgx, lmacid)) + if (bgx_lmac_sgmii_init(bgx, lmac)) return -1; } else { lmac->is_sgmii = 0; - if (bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type)) + if (bgx_lmac_xaui_init(bgx, lmac)) return -1; } @@ -685,10 +716,10 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid) /* Restore default cfg, incase low level firmware changed it */ bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03); - if ((bgx->lmac_type != BGX_MODE_XFI) && - (bgx->lmac_type != BGX_MODE_XLAUI) && - (bgx->lmac_type != BGX_MODE_40G_KR) && - (bgx->lmac_type != BGX_MODE_10G_KR)) { + if ((lmac->lmac_type != BGX_MODE_XFI) && + (lmac->lmac_type != BGX_MODE_XLAUI) && + (lmac->lmac_type != BGX_MODE_40G_KR) && + (lmac->lmac_type != BGX_MODE_10G_KR)) { if (!lmac->phydev) return -ENODEV; @@ -696,7 +727,7 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid) if (phy_connect_direct(&lmac->netdev, lmac->phydev, bgx_lmac_handler, - PHY_INTERFACE_MODE_SGMII)) + phy_interface_mode(lmac->lmac_type))) return -ENODEV; phy_start_aneg(lmac->phydev); @@ -753,76 +784,19 @@ static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid) bgx_flush_dmac_addrs(bgx, lmacid); - if ((bgx->lmac_type != BGX_MODE_XFI) && - (bgx->lmac_type != BGX_MODE_XLAUI) && - (bgx->lmac_type != BGX_MODE_40G_KR) && - (bgx->lmac_type != BGX_MODE_10G_KR) && lmac->phydev) + if ((lmac->lmac_type != BGX_MODE_XFI) && + (lmac->lmac_type != BGX_MODE_XLAUI) && + (lmac->lmac_type != BGX_MODE_40G_KR) && + (lmac->lmac_type != BGX_MODE_10G_KR) && lmac->phydev) phy_disconnect(lmac->phydev); lmac->phydev = NULL; } -static void bgx_set_num_ports(struct bgx *bgx) -{ - u64 lmac_count; - - switch (bgx->qlm_mode) { - case QLM_MODE_SGMII: - bgx->lmac_count = 4; - bgx->lmac_type = BGX_MODE_SGMII; - bgx->lane_to_sds = 0; - break; - case QLM_MODE_XAUI_1X4: - bgx->lmac_count = 1; - bgx->lmac_type = BGX_MODE_XAUI; - bgx->lane_to_sds = 0xE4; - break; - case QLM_MODE_RXAUI_2X2: - bgx->lmac_count = 2; - bgx->lmac_type = BGX_MODE_RXAUI; - bgx->lane_to_sds = 0xE4; - break; - case QLM_MODE_XFI_4X1: - bgx->lmac_count = 4; - bgx->lmac_type = BGX_MODE_XFI; - bgx->lane_to_sds = 0; - break; - case QLM_MODE_XLAUI_1X4: - bgx->lmac_count = 1; - bgx->lmac_type = BGX_MODE_XLAUI; - bgx->lane_to_sds = 0xE4; - break; - case QLM_MODE_10G_KR_4X1: - bgx->lmac_count = 4; - bgx->lmac_type = BGX_MODE_10G_KR; - bgx->lane_to_sds = 0; - bgx->use_training = 1; - break; - case QLM_MODE_40G_KR4_1X4: - bgx->lmac_count = 1; - bgx->lmac_type = BGX_MODE_40G_KR; - bgx->lane_to_sds = 0xE4; - bgx->use_training = 1; - break; - default: - bgx->lmac_count = 0; - break; - } - - /* Check if low level firmware has programmed LMAC count - * based on board type, if yes consider that otherwise - * the default static values - */ - lmac_count = bgx_reg_read(bgx, 0, BGX_CMR_RX_LMACS) & 0x7; - if (lmac_count != 4) - bgx->lmac_count = lmac_count; -} - static void bgx_init_hw(struct bgx *bgx) { int i; - - bgx_set_num_ports(bgx); + struct lmac *lmac; bgx_reg_modify(bgx, 0, BGX_CMR_GLOBAL_CFG, CMR_GLOBAL_CFG_FCS_STRIP); if (bgx_reg_read(bgx, 0, BGX_CMR_BIST_STATUS)) @@ -830,17 +804,9 @@ static void bgx_init_hw(struct bgx *bgx) /* Set lmac type and lane2serdes mapping */ for (i = 0; i < bgx->lmac_count; i++) { - if (bgx->lmac_type == BGX_MODE_RXAUI) { - if (i) - bgx->lane_to_sds = 0x0e; - else - bgx->lane_to_sds = 0x04; - bgx_reg_write(bgx, i, BGX_CMRX_CFG, - (bgx->lmac_type << 8) | bgx->lane_to_sds); - continue; - } + lmac = &bgx->lmac[i]; bgx_reg_write(bgx, i, BGX_CMRX_CFG, - (bgx->lmac_type << 8) | (bgx->lane_to_sds + i)); + (lmac->lmac_type << 8) | lmac->lane_to_sds); bgx->lmac[i].lmacid_bd = lmac_count; lmac_count++; } @@ -863,55 +829,212 @@ static void bgx_init_hw(struct bgx *bgx) bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00); } -static void bgx_get_qlm_mode(struct bgx *bgx) +static u8 bgx_get_lane2sds_cfg(struct bgx *bgx, struct lmac *lmac) +{ + return (u8)(bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG) & 0xFF); +} + +static void bgx_print_qlm_mode(struct bgx *bgx, u8 lmacid) { struct device *dev = &bgx->pdev->dev; - int lmac_type; - int train_en; + struct lmac *lmac; + char str[20]; + u8 dlm; - /* Read LMAC0 type to figure out QLM mode - * This is configured by low level firmware - */ - lmac_type = bgx_reg_read(bgx, 0, BGX_CMRX_CFG); - lmac_type = (lmac_type >> 8) & 0x07; + if (lmacid > bgx->max_lmac) + return; - train_en = bgx_reg_read(bgx, 0, BGX_SPUX_BR_PMD_CRTL) & - SPU_PMD_CRTL_TRAIN_EN; + lmac = &bgx->lmac[lmacid]; + dlm = (lmacid / 2) + (bgx->bgx_id * 2); + if (!bgx->is_dlm) + sprintf(str, "BGX%d QLM mode", bgx->bgx_id); + else + sprintf(str, "BGX%d DLM%d mode", bgx->bgx_id, dlm); - switch (lmac_type) { + switch (lmac->lmac_type) { case BGX_MODE_SGMII: - bgx->qlm_mode = QLM_MODE_SGMII; - dev_info(dev, "BGX%d QLM mode: SGMII\n", bgx->bgx_id); + dev_info(dev, "%s: SGMII\n", (char *)str); break; case BGX_MODE_XAUI: - bgx->qlm_mode = QLM_MODE_XAUI_1X4; - dev_info(dev, "BGX%d QLM mode: XAUI\n", bgx->bgx_id); + dev_info(dev, "%s: XAUI\n", (char *)str); break; case BGX_MODE_RXAUI: - bgx->qlm_mode = QLM_MODE_RXAUI_2X2; - dev_info(dev, "BGX%d QLM mode: RXAUI\n", bgx->bgx_id); + dev_info(dev, "%s: RXAUI\n", (char *)str); break; case BGX_MODE_XFI: - if (!train_en) { - bgx->qlm_mode = QLM_MODE_XFI_4X1; - dev_info(dev, "BGX%d QLM mode: XFI\n", bgx->bgx_id); - } else { - bgx->qlm_mode = QLM_MODE_10G_KR_4X1; - dev_info(dev, "BGX%d QLM mode: 10G_KR\n", bgx->bgx_id); - } + if (!lmac->use_training) + dev_info(dev, "%s: XFI\n", (char *)str); + else + dev_info(dev, "%s: 10G_KR\n", (char *)str); break; case BGX_MODE_XLAUI: - if (!train_en) { - bgx->qlm_mode = QLM_MODE_XLAUI_1X4; - dev_info(dev, "BGX%d QLM mode: XLAUI\n", bgx->bgx_id); - } else { - bgx->qlm_mode = QLM_MODE_40G_KR4_1X4; - dev_info(dev, "BGX%d QLM mode: 40G_KR4\n", bgx->bgx_id); - } + if (!lmac->use_training) + dev_info(dev, "%s: XLAUI\n", (char *)str); + else + dev_info(dev, "%s: 40G_KR4\n", (char *)str); + break; + case BGX_MODE_QSGMII: + if ((lmacid == 0) && + (bgx_get_lane2sds_cfg(bgx, lmac) != lmacid)) + return; + if ((lmacid == 2) && + (bgx_get_lane2sds_cfg(bgx, lmac) == lmacid)) + return; + dev_info(dev, "%s: QSGMII\n", (char *)str); + break; + case BGX_MODE_RGMII: + dev_info(dev, "%s: RGMII\n", (char *)str); + break; + case BGX_MODE_INVALID: + /* Nothing to do */ + break; + } +} + +static void lmac_set_lane2sds(struct bgx *bgx, struct lmac *lmac) +{ + switch (lmac->lmac_type) { + case BGX_MODE_SGMII: + case BGX_MODE_XFI: + lmac->lane_to_sds = lmac->lmacid; + break; + case BGX_MODE_XAUI: + case BGX_MODE_XLAUI: + case BGX_MODE_RGMII: + lmac->lane_to_sds = 0xE4; + break; + case BGX_MODE_RXAUI: + lmac->lane_to_sds = (lmac->lmacid) ? 0xE : 0x4; + break; + case BGX_MODE_QSGMII: + /* There is no way to determine if DLM0/2 is QSGMII or + * DLM1/3 is configured to QSGMII as bootloader will + * configure all LMACs, so take whatever is configured + * by low level firmware. + */ + lmac->lane_to_sds = bgx_get_lane2sds_cfg(bgx, lmac); break; default: - bgx->qlm_mode = QLM_MODE_SGMII; - dev_info(dev, "BGX%d QLM default mode: SGMII\n", bgx->bgx_id); + lmac->lane_to_sds = 0; + break; + } +} + +static void lmac_set_training(struct bgx *bgx, struct lmac *lmac, int lmacid) +{ + if ((lmac->lmac_type != BGX_MODE_10G_KR) && + (lmac->lmac_type != BGX_MODE_40G_KR)) { + lmac->use_training = 0; + return; + } + + lmac->use_training = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL) & + SPU_PMD_CRTL_TRAIN_EN; +} + +static void bgx_set_lmac_config(struct bgx *bgx, u8 idx) +{ + struct lmac *lmac; + struct lmac *olmac; + u64 cmr_cfg; + u8 lmac_type; + u8 lane_to_sds; + + lmac = &bgx->lmac[idx]; + + if (!bgx->is_dlm || bgx->is_rgx) { + /* Read LMAC0 type to figure out QLM mode + * This is configured by low level firmware + */ + cmr_cfg = bgx_reg_read(bgx, 0, BGX_CMRX_CFG); + lmac->lmac_type = (cmr_cfg >> 8) & 0x07; + if (bgx->is_rgx) + lmac->lmac_type = BGX_MODE_RGMII; + lmac_set_training(bgx, lmac, 0); + lmac_set_lane2sds(bgx, lmac); + return; + } + + /* On 81xx BGX can be split across 2 DLMs + * firmware programs lmac_type of LMAC0 and LMAC2 + */ + if ((idx == 0) || (idx == 2)) { + cmr_cfg = bgx_reg_read(bgx, idx, BGX_CMRX_CFG); + lmac_type = (u8)((cmr_cfg >> 8) & 0x07); + lane_to_sds = (u8)(cmr_cfg & 0xFF); + /* Check if config is not reset value */ + if ((lmac_type == 0) && (lane_to_sds == 0xE4)) + lmac->lmac_type = BGX_MODE_INVALID; + else + lmac->lmac_type = lmac_type; + lmac_set_training(bgx, lmac, lmac->lmacid); + lmac_set_lane2sds(bgx, lmac); + + /* Set LMAC type of other lmac on same DLM i.e LMAC 1/3 */ + olmac = &bgx->lmac[idx + 1]; + olmac->lmac_type = lmac->lmac_type; + lmac_set_training(bgx, olmac, olmac->lmacid); + lmac_set_lane2sds(bgx, olmac); + } +} + +static bool is_dlm0_in_bgx_mode(struct bgx *bgx) +{ + struct lmac *lmac; + + if (!bgx->is_dlm) + return true; + + lmac = &bgx->lmac[0]; + if (lmac->lmac_type == BGX_MODE_INVALID) + return false; + + return true; +} + +static void bgx_get_qlm_mode(struct bgx *bgx) +{ + struct lmac *lmac; + struct lmac *lmac01; + struct lmac *lmac23; + u8 idx; + + /* Init all LMAC's type to invalid */ + for (idx = 0; idx < bgx->max_lmac; idx++) { + lmac = &bgx->lmac[idx]; + lmac->lmacid = idx; + lmac->lmac_type = BGX_MODE_INVALID; + lmac->use_training = false; + } + + /* It is assumed that low level firmware sets this value */ + bgx->lmac_count = bgx_reg_read(bgx, 0, BGX_CMR_RX_LMACS) & 0x7; + if (bgx->lmac_count > bgx->max_lmac) + bgx->lmac_count = bgx->max_lmac; + + for (idx = 0; idx < bgx->max_lmac; idx++) + bgx_set_lmac_config(bgx, idx); + + if (!bgx->is_dlm || bgx->is_rgx) { + bgx_print_qlm_mode(bgx, 0); + return; + } + + if (bgx->lmac_count) { + bgx_print_qlm_mode(bgx, 0); + bgx_print_qlm_mode(bgx, 2); + } + + /* If DLM0 is not in BGX mode then LMAC0/1 have + * to be configured with serdes lanes of DLM1 + */ + if (is_dlm0_in_bgx_mode(bgx) || (bgx->lmac_count > 2)) + return; + for (idx = 0; idx < bgx->lmac_count; idx++) { + lmac01 = &bgx->lmac[idx]; + lmac23 = &bgx->lmac[idx + 2]; + lmac01->lmac_type = lmac23->lmac_type; + lmac01->lane_to_sds = lmac23->lane_to_sds; } } @@ -1042,7 +1165,7 @@ static int bgx_init_of_phy(struct bgx *bgx) } lmac++; - if (lmac == MAX_LMAC_PER_BGX) { + if (lmac == bgx->max_lmac) { of_node_put(node); break; } @@ -1087,6 +1210,7 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct device *dev = &pdev->dev; struct bgx *bgx = NULL; u8 lmac; + u16 sdevid; bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL); if (!bgx) @@ -1115,10 +1239,30 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = -ENOMEM; goto err_release_regions; } - bgx->bgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24) & 1; - bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_CN88XX; - bgx_vnic[bgx->bgx_id] = bgx; + pci_read_config_word(pdev, PCI_DEVICE_ID, &sdevid); + if (sdevid != PCI_DEVICE_ID_THUNDER_RGX) { + bgx->bgx_id = + (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24) & 1; + bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_NODE; + bgx->max_lmac = MAX_LMAC_PER_BGX; + bgx_vnic[bgx->bgx_id] = bgx; + } else { + bgx->is_rgx = true; + bgx->max_lmac = 1; + bgx->bgx_id = MAX_BGX_PER_CN81XX - 1; + bgx_vnic[bgx->bgx_id] = bgx; + xcv_init_hw(); + } + + /* On 81xx all are DLMs and on 83xx there are 3 BGX QLMs and one + * BGX i.e BGX2 can be split across 2 DLMs. + */ + pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &sdevid); + if ((sdevid == PCI_SUBSYS_DEVID_81XX_BGX) || + ((sdevid == PCI_SUBSYS_DEVID_83XX_BGX) && (bgx->bgx_id == 2))) + bgx->is_dlm = true; + bgx_get_qlm_mode(bgx); err = bgx_init_phy(bgx); @@ -1133,6 +1277,8 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) { dev_err(dev, "BGX%d failed to enable lmac%d\n", bgx->bgx_id, lmac); + while (lmac) + bgx_lmac_disable(bgx, --lmac); goto err_enable; } } diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h index 42010d2e5ddf..d59c71e4a000 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h @@ -9,8 +9,20 @@ #ifndef THUNDER_BGX_H #define THUNDER_BGX_H -#define MAX_BGX_THUNDER 8 /* Max 4 nodes, 2 per node */ +/* PCI device ID */ +#define PCI_DEVICE_ID_THUNDER_BGX 0xA026 +#define PCI_DEVICE_ID_THUNDER_RGX 0xA054 + +/* Subsystem device IDs */ +#define PCI_SUBSYS_DEVID_88XX_BGX 0xA126 +#define PCI_SUBSYS_DEVID_81XX_BGX 0xA226 +#define PCI_SUBSYS_DEVID_83XX_BGX 0xA326 + +#define MAX_BGX_THUNDER 8 /* Max 2 nodes, 4 per node */ #define MAX_BGX_PER_CN88XX 2 +#define MAX_BGX_PER_CN81XX 3 /* 2 BGXs + 1 RGX */ +#define MAX_BGX_PER_CN83XX 4 +#define MAX_BGX_PER_NODE 4 #define MAX_LMAC_PER_BGX 4 #define MAX_BGX_CHANS_PER_LMAC 16 #define MAX_DMAC_PER_LMAC 8 @@ -18,8 +30,6 @@ #define MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE 2 -#define MAX_LMAC (MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX) - /* Registers */ #define BGX_CMRX_CFG 0x00 #define CMR_PKT_TX_EN BIT_ULL(13) @@ -136,6 +146,7 @@ #define BGX_GMP_PCS_ANX_AN_RESULTS 0x30020 #define BGX_GMP_PCS_SGM_AN_ADV 0x30068 #define BGX_GMP_PCS_MISCX_CTL 0x30078 +#define PCS_MISC_CTL_DISP_EN BIT_ULL(13) #define PCS_MISC_CTL_GMX_ENO BIT_ULL(11) #define PCS_MISC_CTL_SAMP_PT_MASK 0x7Full #define BGX_GMP_GMI_PRTX_CFG 0x38020 @@ -194,6 +205,9 @@ void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac); void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status); void bgx_lmac_internal_loopback(int node, int bgx_idx, int lmac_idx, bool enable); +void xcv_init_hw(void); +void xcv_setup_link(bool link_up, int link_speed); + u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx); u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx); #define BGX_RX_STATS_COUNT 11 @@ -213,16 +227,9 @@ enum LMAC_TYPE { BGX_MODE_XLAUI = 4, /* 4 lanes, 10.3125 Gbaud */ BGX_MODE_10G_KR = 3,/* 1 lane, 10.3125 Gbaud */ BGX_MODE_40G_KR = 4,/* 4 lanes, 10.3125 Gbaud */ -}; - -enum qlm_mode { - QLM_MODE_SGMII, /* SGMII, each lane independent */ - QLM_MODE_XAUI_1X4, /* 1 XAUI or DXAUI, 4 lanes */ - QLM_MODE_RXAUI_2X2, /* 2 RXAUI, 2 lanes each */ - QLM_MODE_XFI_4X1, /* 4 XFI, 1 lane each */ - QLM_MODE_XLAUI_1X4, /* 1 XLAUI, 4 lanes each */ - QLM_MODE_10G_KR_4X1, /* 4 10GBASE-KR, 1 lane each */ - QLM_MODE_40G_KR4_1X4, /* 1 40GBASE-KR4, 4 lanes each */ + BGX_MODE_RGMII = 5, + BGX_MODE_QSGMII = 6, + BGX_MODE_INVALID = 7, }; #endif /* THUNDER_BGX_H */ diff --git a/drivers/net/ethernet/cavium/thunder/thunder_xcv.c b/drivers/net/ethernet/cavium/thunder/thunder_xcv.c new file mode 100644 index 000000000000..67befedef709 --- /dev/null +++ b/drivers/net/ethernet/cavium/thunder/thunder_xcv.c @@ -0,0 +1,235 @@ +/* + * Copyright (C) 2016 Cavium, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + */ + +#include <linux/acpi.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/phy.h> +#include <linux/of.h> +#include <linux/of_mdio.h> +#include <linux/of_net.h> + +#include "nic.h" +#include "thunder_bgx.h" + +#define DRV_NAME "thunder-xcv" +#define DRV_VERSION "1.0" + +/* Register offsets */ +#define XCV_RESET 0x00 +#define PORT_EN BIT_ULL(63) +#define CLK_RESET BIT_ULL(15) +#define DLL_RESET BIT_ULL(11) +#define COMP_EN BIT_ULL(7) +#define TX_PKT_RESET BIT_ULL(3) +#define TX_DATA_RESET BIT_ULL(2) +#define RX_PKT_RESET BIT_ULL(1) +#define RX_DATA_RESET BIT_ULL(0) +#define XCV_DLL_CTL 0x10 +#define CLKRX_BYP BIT_ULL(23) +#define CLKTX_BYP BIT_ULL(15) +#define XCV_COMP_CTL 0x20 +#define DRV_BYP BIT_ULL(63) +#define XCV_CTL 0x30 +#define XCV_INT 0x40 +#define XCV_INT_W1S 0x48 +#define XCV_INT_ENA_W1C 0x50 +#define XCV_INT_ENA_W1S 0x58 +#define XCV_INBND_STATUS 0x80 +#define XCV_BATCH_CRD_RET 0x100 + +struct xcv { + void __iomem *reg_base; + struct pci_dev *pdev; +}; + +static struct xcv *xcv; + +/* Supported devices */ +static const struct pci_device_id xcv_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xA056) }, + { 0, } /* end of table */ +}; + +MODULE_AUTHOR("Cavium Inc"); +MODULE_DESCRIPTION("Cavium Thunder RGX/XCV Driver"); +MODULE_LICENSE("GPL v2"); +MODULE_VERSION(DRV_VERSION); +MODULE_DEVICE_TABLE(pci, xcv_id_table); + +void xcv_init_hw(void) +{ + u64 cfg; + + /* Take DLL out of reset */ + cfg = readq_relaxed(xcv->reg_base + XCV_RESET); + cfg &= ~DLL_RESET; + writeq_relaxed(cfg, xcv->reg_base + XCV_RESET); + + /* Take clock tree out of reset */ + cfg = readq_relaxed(xcv->reg_base + XCV_RESET); + cfg &= ~CLK_RESET; + writeq_relaxed(cfg, xcv->reg_base + XCV_RESET); + /* Wait for DLL to lock */ + msleep(1); + + /* Configure DLL - enable or bypass + * TX no bypass, RX bypass + */ + cfg = readq_relaxed(xcv->reg_base + XCV_DLL_CTL); + cfg &= ~0xFF03; + cfg |= CLKRX_BYP; + writeq_relaxed(cfg, xcv->reg_base + XCV_DLL_CTL); + + /* Enable compensation controller and force the + * write to be visible to HW by readig back. + */ + cfg = readq_relaxed(xcv->reg_base + XCV_RESET); + cfg |= COMP_EN; + writeq_relaxed(cfg, xcv->reg_base + XCV_RESET); + readq_relaxed(xcv->reg_base + XCV_RESET); + /* Wait for compensation state machine to lock */ + msleep(10); + + /* enable the XCV block */ + cfg = readq_relaxed(xcv->reg_base + XCV_RESET); + cfg |= PORT_EN; + writeq_relaxed(cfg, xcv->reg_base + XCV_RESET); + + cfg = readq_relaxed(xcv->reg_base + XCV_RESET); + cfg |= CLK_RESET; + writeq_relaxed(cfg, xcv->reg_base + XCV_RESET); +} +EXPORT_SYMBOL(xcv_init_hw); + +void xcv_setup_link(bool link_up, int link_speed) +{ + u64 cfg; + int speed = 2; + + if (!xcv) { + dev_err(&xcv->pdev->dev, + "XCV init not done, probe may have failed\n"); + return; + } + + if (link_speed == 100) + speed = 1; + else if (link_speed == 10) + speed = 0; + + if (link_up) { + /* set operating speed */ + cfg = readq_relaxed(xcv->reg_base + XCV_CTL); + cfg &= ~0x03; + cfg |= speed; + writeq_relaxed(cfg, xcv->reg_base + XCV_CTL); + + /* Reset datapaths */ + cfg = readq_relaxed(xcv->reg_base + XCV_RESET); + cfg |= TX_DATA_RESET | RX_DATA_RESET; + writeq_relaxed(cfg, xcv->reg_base + XCV_RESET); + + /* Enable the packet flow */ + cfg = readq_relaxed(xcv->reg_base + XCV_RESET); + cfg |= TX_PKT_RESET | RX_PKT_RESET; + writeq_relaxed(cfg, xcv->reg_base + XCV_RESET); + + /* Return credits to RGX */ + writeq_relaxed(0x01, xcv->reg_base + XCV_BATCH_CRD_RET); + } else { + /* Disable packet flow */ + cfg = readq_relaxed(xcv->reg_base + XCV_RESET); + cfg &= ~(TX_PKT_RESET | RX_PKT_RESET); + writeq_relaxed(cfg, xcv->reg_base + XCV_RESET); + readq_relaxed(xcv->reg_base + XCV_RESET); + } +} +EXPORT_SYMBOL(xcv_setup_link); + +static int xcv_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + int err; + struct device *dev = &pdev->dev; + + xcv = devm_kzalloc(dev, sizeof(struct xcv), GFP_KERNEL); + if (!xcv) + return -ENOMEM; + xcv->pdev = pdev; + + pci_set_drvdata(pdev, xcv); + + err = pci_enable_device(pdev); + if (err) { + dev_err(dev, "Failed to enable PCI device\n"); + goto err_kfree; + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { + dev_err(dev, "PCI request regions failed 0x%x\n", err); + goto err_disable_device; + } + + /* MAP configuration registers */ + xcv->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); + if (!xcv->reg_base) { + dev_err(dev, "XCV: Cannot map CSR memory space, aborting\n"); + err = -ENOMEM; + goto err_release_regions; + } + + return 0; + +err_release_regions: + pci_release_regions(pdev); +err_disable_device: + pci_disable_device(pdev); +err_kfree: + devm_kfree(dev, xcv); + xcv = NULL; + return err; +} + +static void xcv_remove(struct pci_dev *pdev) +{ + struct device *dev = &pdev->dev; + + if (xcv) { + devm_kfree(dev, xcv); + xcv = NULL; + } + + pci_release_regions(pdev); + pci_disable_device(pdev); +} + +static struct pci_driver xcv_driver = { + .name = DRV_NAME, + .id_table = xcv_id_table, + .probe = xcv_probe, + .remove = xcv_remove, +}; + +static int __init xcv_init_module(void) +{ + pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION); + + return pci_register_driver(&xcv_driver); +} + +static void __exit xcv_cleanup_module(void) +{ + pci_unregister_driver(&xcv_driver); +} + +module_init(xcv_init_module); +module_exit(xcv_cleanup_module); diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile index ace0ab98d0f1..246129650967 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/Makefile +++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile @@ -4,7 +4,7 @@ obj-$(CONFIG_CHELSIO_T4) += cxgb4.o -cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o +cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o cxgb4_uld.o sched.o cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 2e2aa9fec9bb..3f7b33aa5ec5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -1,7 +1,7 @@ /* * This file is part of the Chelsio T4 Ethernet driver for Linux. * - * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved. + * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -53,6 +53,8 @@ #include "cxgb4_uld.h" #define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__) +extern struct list_head adapter_list; +extern struct mutex uld_mutex; enum { MAX_NPORTS = 4, /* max # of ports */ @@ -338,12 +340,14 @@ struct adapter_params { enum chip_type chip; /* chip code */ struct arch_specific_params arch; /* chip specific params */ unsigned char offload; + unsigned char crypto; /* HW capability for crypto */ unsigned char bypass; unsigned int ofldq_wr_cred; bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */ + unsigned int nsched_cls; /* number of traffic classes */ unsigned int max_ordird_qp; /* Max read depth per RDMA QP */ unsigned int max_ird_adapter; /* Max read depth per adapter */ }; @@ -403,7 +407,6 @@ struct fw_info { struct fw_hdr fw_hdr; }; - struct trace_params { u32 data[TRACE_LEN / 4]; u32 mask[TRACE_LEN / 4]; @@ -493,6 +496,7 @@ struct port_info { #endif /* CONFIG_CHELSIO_T4_FCOE */ bool rxtstamp; /* Enable TS */ struct hwtstamp_config tstamp_config; + struct sched_table *sched_tbl; }; struct dentry; @@ -510,6 +514,10 @@ enum { /* adapter flags */ FW_OFLD_CONN = (1 << 9), }; +enum { + ULP_CRYPTO_LOOKASIDE = 1 << 0, +}; + struct rx_sw_desc; struct sge_fl { /* SGE free-buffer queue state */ @@ -680,6 +688,16 @@ struct sge_ctrl_txq { /* state for an SGE control Tx queue */ u8 full; /* the Tx ring is full */ } ____cacheline_aligned_in_smp; +struct sge_uld_rxq_info { + char name[IFNAMSIZ]; /* name of ULD driver */ + struct sge_ofld_rxq *uldrxq; /* Rxq's for ULD */ + u16 *msix_tbl; /* msix_tbl for uld */ + u16 *rspq_id; /* response queue id's of rxq */ + u16 nrxq; /* # of ingress uld queues */ + u16 nciq; /* # of completion queues */ + u8 uld; /* uld type */ +}; + struct sge { struct sge_eth_txq ethtxq[MAX_ETH_QSETS]; struct sge_ofld_txq ofldtxq[MAX_OFLD_QSETS]; @@ -691,6 +709,7 @@ struct sge { struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES]; struct sge_ofld_rxq rdmaciq[MAX_RDMA_CIQS]; struct sge_rspq fw_evtq ____cacheline_aligned_in_smp; + struct sge_uld_rxq_info **uld_rxq_info; struct sge_rspq intrq ____cacheline_aligned_in_smp; spinlock_t intrq_lock; @@ -702,6 +721,7 @@ struct sge { u16 niscsitq; /* # of available iSCST Rx queues */ u16 rdmaqs; /* # of available RDMA Rx queues */ u16 rdmaciqs; /* # of available RDMA concentrator IQs */ + u16 nqs_per_uld; /* # of Rx queues per ULD */ u16 iscsi_rxq[MAX_OFLD_QSETS]; u16 iscsit_rxq[MAX_ISCSIT_QUEUES]; u16 rdma_rxq[MAX_RDMA_QUEUES]; @@ -757,6 +777,17 @@ struct hash_mac_addr { u8 addr[ETH_ALEN]; }; +struct uld_msix_bmap { + unsigned long *msix_bmap; + unsigned int mapsize; + spinlock_t lock; /* lock for acquiring bitmap */ +}; + +struct uld_msix_info { + unsigned short vec; + char desc[IFNAMSIZ + 10]; +}; + struct adapter { void __iomem *regs; void __iomem *bar2; @@ -767,6 +798,7 @@ struct adapter { unsigned int mbox; unsigned int pf; unsigned int flags; + unsigned int adap_idx; enum chip_type chip; int msg_enable; @@ -779,6 +811,9 @@ struct adapter { unsigned short vec; char desc[IFNAMSIZ + 10]; } msix_info[MAX_INGQ + 1]; + struct uld_msix_info *msix_info_ulds; /* msix info for uld's */ + struct uld_msix_bmap msix_bmap_ulds; /* msix bitmap for all uld */ + unsigned int msi_idx; struct doorbell_stats db_stats; struct sge sge; @@ -793,7 +828,9 @@ struct adapter { unsigned int clipt_start; unsigned int clipt_end; struct clip_tbl *clipt; + struct cxgb4_pci_uld_info *uld; void *uld_handle[CXGB4_ULD_MAX]; + unsigned int num_uld; struct list_head list_node; struct list_head rcu_node; struct list_head mac_hlist; /* list of MAC addresses in MPS Hash */ @@ -824,6 +861,55 @@ struct adapter { spinlock_t win0_lock ____cacheline_aligned_in_smp; }; +/* Support for "sched-class" command to allow a TX Scheduling Class to be + * programmed with various parameters. + */ +struct ch_sched_params { + s8 type; /* packet or flow */ + union { + struct { + s8 level; /* scheduler hierarchy level */ + s8 mode; /* per-class or per-flow */ + s8 rateunit; /* bit or packet rate */ + s8 ratemode; /* %port relative or kbps absolute */ + s8 channel; /* scheduler channel [0..N] */ + s8 class; /* scheduler class [0..N] */ + s32 minrate; /* minimum rate */ + s32 maxrate; /* maximum rate */ + s16 weight; /* percent weight */ + s16 pktsize; /* average packet size */ + } params; + } u; +}; + +enum { + SCHED_CLASS_TYPE_PACKET = 0, /* class type */ +}; + +enum { + SCHED_CLASS_LEVEL_CL_RL = 0, /* class rate limiter */ +}; + +enum { + SCHED_CLASS_MODE_CLASS = 0, /* per-class scheduling */ +}; + +enum { + SCHED_CLASS_RATEUNIT_BITS = 0, /* bit rate scheduling */ +}; + +enum { + SCHED_CLASS_RATEMODE_ABS = 1, /* Kb/s */ +}; + +/* Support for "sched_queue" command to allow one or more NIC TX Queues + * to be bound to a TX Scheduling Class. + */ +struct ch_sched_queue { + s8 queue; /* queue index */ + s8 class; /* class index */ +}; + /* Defined bit width of user definable filter tuples */ #define ETHTYPE_BITWIDTH 16 @@ -952,6 +1038,11 @@ static inline int is_offload(const struct adapter *adap) return adap->params.offload; } +static inline int is_pci_uld(const struct adapter *adap) +{ + return adap->params.crypto; +} + static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr) { return readl(adap->regs + reg_addr); @@ -1185,8 +1276,6 @@ int t4_sge_init(struct adapter *adap); void t4_sge_start(struct adapter *adap); void t4_sge_stop(struct adapter *adap); int cxgb_busy_poll(struct napi_struct *napi); -int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us, - unsigned int cnt); void cxgb4_set_ethtool_ops(struct net_device *netdev); int cxgb4_write_rss(const struct port_info *pi, const u16 *queues); extern int dbfifo_int_thresh; @@ -1289,6 +1378,18 @@ static inline int hash_mac_addr(const u8 *addr) return a & 0x3f; } +int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us, + unsigned int cnt); +static inline void init_rspq(struct adapter *adap, struct sge_rspq *q, + unsigned int us, unsigned int cnt, + unsigned int size, unsigned int iqe_size) +{ + q->adap = adap; + cxgb4_set_rspq_intr_params(q, us, cnt); + q->iqe_len = iqe_size; + q->size = size; +} + void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, unsigned int data_reg, const u32 *vals, unsigned int nregs, unsigned int start_idx); @@ -1514,6 +1615,9 @@ void t4_get_trace_filter(struct adapter *adapter, struct trace_params *tp, int filter_index, int *enabled); int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, u32 addr, u32 val); +int t4_sched_params(struct adapter *adapter, int type, int level, int mode, + int rateunit, int ratemode, int channel, int class, + int minrate, int maxrate, int weight, int pktsize); void t4_sge_decode_idma_state(struct adapter *adapter, int state); void t4_free_mem(void *addr); void t4_idma_monitor_init(struct adapter *adapter, @@ -1521,4 +1625,9 @@ void t4_idma_monitor_init(struct adapter *adapter, void t4_idma_monitor(struct adapter *adapter, struct sge_idma_monitor_state *idma, int hz, int ticks); +int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf, + unsigned int naddr, u8 *addr); +void uld_mem_free(struct adapter *adap); +int uld_mem_alloc(struct adapter *adap); +void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, struct sge_fl *fl); #endif /* __CXGB4_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index c762a8c8c954..44019bdd526d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -1,7 +1,7 @@ /* * This file is part of the Chelsio T4 Ethernet driver for Linux. * - * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved. + * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -76,6 +76,7 @@ #include "cxgb4_debugfs.h" #include "clip_tbl.h" #include "l2t.h" +#include "sched.h" char cxgb4_driver_name[] = KBUILD_MODNAME; @@ -223,8 +224,8 @@ MODULE_PARM_DESC(select_queue, static struct dentry *cxgb4_debugfs_root; -static LIST_HEAD(adapter_list); -static DEFINE_MUTEX(uld_mutex); +LIST_HEAD(adapter_list); +DEFINE_MUTEX(uld_mutex); /* Adapter list to be accessed from atomic context */ static LIST_HEAD(adap_rcu_list); static DEFINE_SPINLOCK(adap_rcu_lock); @@ -1066,20 +1067,20 @@ static int alloc_ofld_rxqs(struct adapter *adap, struct sge_ofld_rxq *q, */ static int setup_sge_queues(struct adapter *adap) { - int err, msi_idx, i, j; + int err, i, j; struct sge *s = &adap->sge; bitmap_zero(s->starving_fl, s->egr_sz); bitmap_zero(s->txq_maperr, s->egr_sz); if (adap->flags & USING_MSIX) - msi_idx = 1; /* vector 0 is for non-queue interrupts */ + adap->msi_idx = 1; /* vector 0 is for non-queue interrupts */ else { err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0, NULL, NULL, NULL, -1); if (err) return err; - msi_idx = -((int)s->intrq.abs_id + 1); + adap->msi_idx = -((int)s->intrq.abs_id + 1); } /* NOTE: If you add/delete any Ingress/Egress Queue allocations in here, @@ -1096,7 +1097,7 @@ static int setup_sge_queues(struct adapter *adap) * new/deleted queues. */ err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0], - msi_idx, NULL, fwevtq_handler, NULL, -1); + adap->msi_idx, NULL, fwevtq_handler, NULL, -1); if (err) { freeout: t4_free_sge_resources(adap); return err; @@ -1109,10 +1110,10 @@ freeout: t4_free_sge_resources(adap); struct sge_eth_txq *t = &s->ethtxq[pi->first_qset]; for (j = 0; j < pi->nqsets; j++, q++) { - if (msi_idx > 0) - msi_idx++; + if (adap->msi_idx > 0) + adap->msi_idx++; err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, - msi_idx, &q->fl, + adap->msi_idx, &q->fl, t4_ethrx_handler, NULL, t4_get_mps_bg_map(adap, @@ -1141,11 +1142,11 @@ freeout: t4_free_sge_resources(adap); } #define ALLOC_OFLD_RXQS(firstq, nq, per_chan, ids, lro) do { \ - err = alloc_ofld_rxqs(adap, firstq, nq, per_chan, msi_idx, ids, lro); \ + err = alloc_ofld_rxqs(adap, firstq, nq, per_chan, adap->msi_idx, ids, lro); \ if (err) \ goto freeout; \ - if (msi_idx > 0) \ - msi_idx += nq; \ + if (adap->msi_idx > 0) \ + adap->msi_idx += nq; \ } while (0) ALLOC_OFLD_RXQS(s->iscsirxq, s->iscsiqsets, j, s->iscsi_rxq, false); @@ -2565,6 +2566,12 @@ static void detach_ulds(struct adapter *adap) CXGB4_STATE_DETACH); adap->uld_handle[i] = NULL; } + for (i = 0; i < CXGB4_PCI_ULD_MAX; i++) + if (adap->uld && adap->uld[i].handle) { + adap->uld[i].state_change(adap->uld[i].handle, + CXGB4_STATE_DETACH); + adap->uld[i].handle = NULL; + } if (netevent_registered && list_empty(&adapter_list)) { unregister_netevent_notifier(&cxgb4_netevent_nb); netevent_registered = false; @@ -2584,6 +2591,10 @@ static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state) for (i = 0; i < CXGB4_ULD_MAX; i++) if (adap->uld_handle[i]) ulds[i].state_change(adap->uld_handle[i], new_state); + for (i = 0; i < CXGB4_PCI_ULD_MAX; i++) + if (adap->uld && adap->uld[i].handle) + adap->uld[i].state_change(adap->uld[i].handle, + new_state); mutex_unlock(&uld_mutex); } @@ -2922,7 +2933,6 @@ EXPORT_SYMBOL(cxgb4_create_server_filter); int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid, unsigned int queue, bool ipv6) { - int ret; struct filter_entry *f; struct adapter *adap; @@ -2936,11 +2946,7 @@ int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid, /* Unlock the filter */ f->locked = 0; - ret = delete_filter(adap, stid); - if (ret) - return ret; - - return 0; + return delete_filter(adap, stid); } EXPORT_SYMBOL(cxgb4_remove_server_filter); @@ -3078,6 +3084,35 @@ static int cxgb_change_mtu(struct net_device *dev, int new_mtu) return ret; } +#ifdef CONFIG_PCI_IOV +static int dummy_open(struct net_device *dev) +{ + /* Turn carrier off since we don't have to transmit anything on this + * interface. + */ + netif_carrier_off(dev); + return 0; +} + +static int cxgb_set_vf_mac(struct net_device *dev, int vf, u8 *mac) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adap = pi->adapter; + + /* verify MAC addr is valid */ + if (!is_valid_ether_addr(mac)) { + dev_err(pi->adapter->pdev_dev, + "Invalid Ethernet address %pM for VF %d\n", + mac, vf); + return -EINVAL; + } + + dev_info(pi->adapter->pdev_dev, + "Setting MAC %pM on VF %d\n", mac, vf); + return t4_set_vf_mac_acl(adap, vf + 1, 1, mac); +} +#endif + static int cxgb_set_mac_addr(struct net_device *dev, void *p) { int ret; @@ -3114,6 +3149,87 @@ static void cxgb_netpoll(struct net_device *dev) } #endif +static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adap = pi->adapter; + struct sched_class *e; + struct ch_sched_params p; + struct ch_sched_queue qe; + u32 req_rate; + int err = 0; + + if (!can_sched(dev)) + return -ENOTSUPP; + + if (index < 0 || index > pi->nqsets - 1) + return -EINVAL; + + if (!(adap->flags & FULL_INIT_DONE)) { + dev_err(adap->pdev_dev, + "Failed to rate limit on queue %d. Link Down?\n", + index); + return -EINVAL; + } + + /* Convert from Mbps to Kbps */ + req_rate = rate << 10; + + /* Max rate is 10 Gbps */ + if (req_rate >= SCHED_MAX_RATE_KBPS) { + dev_err(adap->pdev_dev, + "Invalid rate %u Mbps, Max rate is %u Gbps\n", + rate, SCHED_MAX_RATE_KBPS); + return -ERANGE; + } + + /* First unbind the queue from any existing class */ + memset(&qe, 0, sizeof(qe)); + qe.queue = index; + qe.class = SCHED_CLS_NONE; + + err = cxgb4_sched_class_unbind(dev, (void *)(&qe), SCHED_QUEUE); + if (err) { + dev_err(adap->pdev_dev, + "Unbinding Queue %d on port %d fail. Err: %d\n", + index, pi->port_id, err); + return err; + } + + /* Queue already unbound */ + if (!req_rate) + return 0; + + /* Fetch any available unused or matching scheduling class */ + memset(&p, 0, sizeof(p)); + p.type = SCHED_CLASS_TYPE_PACKET; + p.u.params.level = SCHED_CLASS_LEVEL_CL_RL; + p.u.params.mode = SCHED_CLASS_MODE_CLASS; + p.u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS; + p.u.params.ratemode = SCHED_CLASS_RATEMODE_ABS; + p.u.params.channel = pi->tx_chan; + p.u.params.class = SCHED_CLS_NONE; + p.u.params.minrate = 0; + p.u.params.maxrate = req_rate; + p.u.params.weight = 0; + p.u.params.pktsize = dev->mtu; + + e = cxgb4_sched_class_alloc(dev, &p); + if (!e) + return -ENOMEM; + + /* Bind the queue to a scheduling class */ + memset(&qe, 0, sizeof(qe)); + qe.queue = index; + qe.class = e->idx; + + err = cxgb4_sched_class_bind(dev, (void *)(&qe), SCHED_QUEUE); + if (err) + dev_err(adap->pdev_dev, + "Queue rate limiting failed. Err: %d\n", err); + return err; +} + static const struct net_device_ops cxgb4_netdev_ops = { .ndo_open = cxgb_open, .ndo_stop = cxgb_close, @@ -3136,7 +3252,29 @@ static const struct net_device_ops cxgb4_netdev_ops = { #ifdef CONFIG_NET_RX_BUSY_POLL .ndo_busy_poll = cxgb_busy_poll, #endif + .ndo_set_tx_maxrate = cxgb_set_tx_maxrate, +}; + +#ifdef CONFIG_PCI_IOV +static const struct net_device_ops cxgb4_mgmt_netdev_ops = { + .ndo_open = dummy_open, + .ndo_set_vf_mac = cxgb_set_vf_mac, +}; +#endif + +static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct adapter *adapter = netdev2adap(dev); + strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver)); + strlcpy(info->version, cxgb4_driver_version, + sizeof(info->version)); + strlcpy(info->bus_info, pci_name(adapter->pdev), + sizeof(info->bus_info)); +} + +static const struct ethtool_ops cxgb4_mgmt_ethtool_ops = { + .get_drvinfo = get_drvinfo, }; void t4_fatal_err(struct adapter *adap) @@ -3979,6 +4117,12 @@ static int adap_init0(struct adapter *adap) adap->clipt_start = val[0]; adap->clipt_end = val[1]; + /* We don't yet have a PARAMs calls to retrieve the number of Traffic + * Classes supported by the hardware/firmware so we hard code it here + * for now. + */ + adap->params.nsched_cls = is_t4(adap->params.chip) ? 15 : 16; + /* query params related to active filter region */ params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START); params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END); @@ -4130,6 +4274,11 @@ static int adap_init0(struct adapter *adap) adap->vres.iscsi.start = val[0]; adap->vres.iscsi.size = val[1] - val[0] + 1; } + if (caps_cmd.cryptocaps) { + /* Should query params here...TODO */ + adap->params.crypto |= ULP_CRYPTO_LOOKASIDE; + adap->num_uld += 1; + } #undef FW_PARAM_PFVF #undef FW_PARAM_DEV @@ -4311,16 +4460,6 @@ static inline bool is_x_10g_port(const struct link_config *lc) (lc->supported & FW_PORT_CAP_SPEED_40G) != 0; } -static inline void init_rspq(struct adapter *adap, struct sge_rspq *q, - unsigned int us, unsigned int cnt, - unsigned int size, unsigned int iqe_size) -{ - q->adap = adap; - cxgb4_set_rspq_intr_params(q, us, cnt); - q->iqe_len = iqe_size; - q->size = size; -} - /* * Perform default configuration of DMA queues depending on the number and type * of ports we found and the number of available CPUs. Most settings can be @@ -4337,8 +4476,12 @@ static void cfg_queues(struct adapter *adap) /* Reduce memory usage in kdump environment, disable all offload. */ - if (is_kdump_kernel()) + if (is_kdump_kernel()) { adap->params.offload = 0; + adap->params.crypto = 0; + } else if (adap->num_uld && uld_mem_alloc(adap)) { + adap->params.crypto = 0; + } for_each_port(adap, i) n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg); @@ -4498,23 +4641,58 @@ static void reduce_ethqs(struct adapter *adap, int n) } } +static int get_msix_info(struct adapter *adap) +{ + struct uld_msix_info *msix_info; + int max_ingq = (MAX_OFLD_QSETS * adap->num_uld); + + msix_info = kcalloc(max_ingq, sizeof(*msix_info), GFP_KERNEL); + if (!msix_info) + return -ENOMEM; + + adap->msix_bmap_ulds.msix_bmap = kcalloc(BITS_TO_LONGS(max_ingq), + sizeof(long), GFP_KERNEL); + if (!adap->msix_bmap_ulds.msix_bmap) { + kfree(msix_info); + return -ENOMEM; + } + spin_lock_init(&adap->msix_bmap_ulds.lock); + adap->msix_info_ulds = msix_info; + return 0; +} + +static void free_msix_info(struct adapter *adap) +{ + if (!adap->num_uld) + return; + + kfree(adap->msix_info_ulds); + kfree(adap->msix_bmap_ulds.msix_bmap); +} + /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */ #define EXTRA_VECS 2 static int enable_msix(struct adapter *adap) { - int ofld_need = 0; - int i, want, need, allocated; + int ofld_need = 0, uld_need = 0; + int i, j, want, need, allocated; struct sge *s = &adap->sge; unsigned int nchan = adap->params.nports; struct msix_entry *entries; + int max_ingq = MAX_INGQ; - entries = kmalloc(sizeof(*entries) * (MAX_INGQ + 1), + max_ingq += (MAX_OFLD_QSETS * adap->num_uld); + entries = kmalloc(sizeof(*entries) * (max_ingq + 1), GFP_KERNEL); if (!entries) return -ENOMEM; - for (i = 0; i < MAX_INGQ + 1; ++i) + /* map for msix */ + if (is_pci_uld(adap) && get_msix_info(adap)) + adap->params.crypto = 0; + + for (i = 0; i < max_ingq + 1; ++i) entries[i].entry = i; want = s->max_ethqsets + EXTRA_VECS; @@ -4527,13 +4705,17 @@ static int enable_msix(struct adapter *adap) else ofld_need = 4 * nchan; } + if (is_pci_uld(adap)) { + want += netif_get_num_default_rss_queues() * nchan; + uld_need = nchan; + } #ifdef CONFIG_CHELSIO_T4_DCB /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for * each port. */ - need = 8 * adap->params.nports + EXTRA_VECS + ofld_need; + need = 8 * adap->params.nports + EXTRA_VECS + ofld_need + uld_need; #else - need = adap->params.nports + EXTRA_VECS + ofld_need; + need = adap->params.nports + EXTRA_VECS + ofld_need + uld_need; #endif allocated = pci_enable_msix_range(adap->pdev, entries, need, want); if (allocated < 0) { @@ -4547,12 +4729,20 @@ static int enable_msix(struct adapter *adap) * Every group gets its minimum requirement and NIC gets top * priority for leftovers. */ - i = allocated - EXTRA_VECS - ofld_need; + i = allocated - EXTRA_VECS - ofld_need - uld_need; if (i < s->max_ethqsets) { s->max_ethqsets = i; if (i < s->ethqsets) reduce_ethqs(adap, i); } + if (is_pci_uld(adap)) { + if (allocated < want) + s->nqs_per_uld = nchan; + else + s->nqs_per_uld = netif_get_num_default_rss_queues() * + nchan; + } + if (is_offload(adap)) { if (allocated < want) { s->rdmaqs = nchan; @@ -4564,16 +4754,24 @@ static int enable_msix(struct adapter *adap) /* leftovers go to OFLD */ i = allocated - EXTRA_VECS - s->max_ethqsets - - s->rdmaqs - s->rdmaciqs - s->niscsitq; + s->rdmaqs - s->rdmaciqs - s->niscsitq; + if (is_pci_uld(adap)) + i -= s->nqs_per_uld * adap->num_uld; s->iscsiqsets = (i / nchan) * nchan; /* round down */ } - for (i = 0; i < allocated; ++i) + + for (i = 0; i < (allocated - (s->nqs_per_uld * adap->num_uld)); ++i) adap->msix_info[i].vec = entries[i].vector; + if (is_pci_uld(adap)) { + for (j = 0 ; i < allocated; ++i, j++) + adap->msix_info_ulds[j].vec = entries[i].vector; + adap->msix_bmap_ulds.mapsize = j; + } dev_info(adap->pdev_dev, "%d MSI-X vectors allocated, " - "nic %d iscsi %d rdma cpl %d rdma ciq %d\n", + "nic %d iscsi %d rdma cpl %d rdma ciq %d uld %d\n", allocated, s->max_ethqsets, s->iscsiqsets, s->rdmaqs, - s->rdmaciqs); + s->rdmaciqs, s->nqs_per_uld); kfree(entries); return 0; @@ -4783,6 +4981,7 @@ static void free_some_resources(struct adapter *adapter) unsigned int i; t4_free_mem(adapter->l2t); + t4_cleanup_sched(adapter); t4_free_mem(adapter->tids.tid_tab); kfree(adapter->sge.egr_map); kfree(adapter->sge.ingr_map); @@ -4834,21 +5033,59 @@ static int get_chip_type(struct pci_dev *pdev, u32 pl_rev) } #ifdef CONFIG_PCI_IOV +static void dummy_setup(struct net_device *dev) +{ + dev->type = ARPHRD_NONE; + dev->mtu = 0; + dev->hard_header_len = 0; + dev->addr_len = 0; + dev->tx_queue_len = 0; + dev->flags |= IFF_NOARP; + dev->priv_flags |= IFF_NO_QUEUE; + + /* Initialize the device structure. */ + dev->netdev_ops = &cxgb4_mgmt_netdev_ops; + dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops; + dev->destructor = free_netdev; +} + +static int config_mgmt_dev(struct pci_dev *pdev) +{ + struct adapter *adap = pci_get_drvdata(pdev); + struct net_device *netdev; + struct port_info *pi; + char name[IFNAMSIZ]; + int err; + + snprintf(name, IFNAMSIZ, "mgmtpf%d%d", adap->adap_idx, adap->pf); + netdev = alloc_netdev(0, name, NET_NAME_UNKNOWN, dummy_setup); + if (!netdev) + return -ENOMEM; + + pi = netdev_priv(netdev); + pi->adapter = adap; + SET_NETDEV_DEV(netdev, &pdev->dev); + + adap->port[0] = netdev; + + err = register_netdev(adap->port[0]); + if (err) { + pr_info("Unable to register VF mgmt netdev %s\n", name); + free_netdev(adap->port[0]); + adap->port[0] = NULL; + return err; + } + return 0; +} + static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs) { + struct adapter *adap = pci_get_drvdata(pdev); int err = 0; int current_vfs = pci_num_vf(pdev); u32 pcie_fw; - void __iomem *regs; - regs = pci_ioremap_bar(pdev, 0); - if (!regs) { - dev_err(&pdev->dev, "cannot map device registers\n"); - return -ENOMEM; - } - - pcie_fw = readl(regs + PCIE_FW_A); - iounmap(regs); + pcie_fw = readl(adap->regs + PCIE_FW_A); /* Check if cxgb4 is the MASTER and fw is initialized */ if (!(pcie_fw & PCIE_FW_INIT_F) || !(pcie_fw & PCIE_FW_MASTER_VLD_F) || @@ -4875,6 +5112,10 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs) */ if (!num_vfs) { pci_disable_sriov(pdev); + if (adap->port[0]) { + unregister_netdev(adap->port[0]); + adap->port[0] = NULL; + } return num_vfs; } @@ -4882,6 +5123,10 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs) err = pci_enable_sriov(pdev, num_vfs); if (err) return err; + + err = config_mgmt_dev(pdev); + if (err) + return err; } return num_vfs; } @@ -4893,9 +5138,11 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) struct port_info *pi; bool highdma = false; struct adapter *adapter = NULL; + struct net_device *netdev; void __iomem *regs; u32 whoami, pl_rev; enum chip_type chip; + static int adap_idx = 1; printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION); @@ -4930,7 +5177,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) func = CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5 ? SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami); if (func != ent->driver_data) { +#ifndef CONFIG_PCI_IOV iounmap(regs); +#endif pci_disable_device(pdev); pci_save_state(pdev); /* to restore SR-IOV later */ goto sriov; @@ -4962,6 +5211,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) err = -ENOMEM; goto out_unmap_bar0; } + adap_idx++; adapter->workq = create_singlethread_workqueue("cxgb4"); if (!adapter->workq) { @@ -5048,8 +5298,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) T6_STATMODE_V(0))); for_each_port(adapter, i) { - struct net_device *netdev; - netdev = alloc_etherdev_mq(sizeof(struct port_info), MAX_ETH_QSETS); if (!netdev) { @@ -5143,6 +5391,16 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) } } #endif + + for_each_port(adapter, i) { + pi = adap2pinfo(adapter, i); + pi->sched_tbl = t4_init_sched(adapter->params.nsched_cls); + if (!pi->sched_tbl) + dev_warn(&pdev->dev, + "could not activate scheduling on port %d\n", + i); + } + if (is_offload(adapter) && tid_init(&adapter->tids) < 0) { dev_warn(&pdev->dev, "could not allocate TID table, " "continuing\n"); @@ -5168,8 +5426,11 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* See what interrupts we'll be using */ if (msi > 1 && enable_msix(adapter) == 0) adapter->flags |= USING_MSIX; - else if (msi > 0 && pci_enable_msi(pdev) == 0) + else if (msi > 0 && pci_enable_msi(pdev) == 0) { adapter->flags |= USING_MSI; + if (msi > 1) + free_msix_info(adapter); + } /* check for PCI Express bandwidth capabiltites */ cxgb4_check_pcie_caps(adapter); @@ -5217,6 +5478,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) attach_ulds(adapter); print_adapter_info(adapter); + return 0; sriov: #ifdef CONFIG_PCI_IOV @@ -5230,11 +5492,48 @@ sriov: "instantiated %u virtual functions\n", num_vf[func]); } -#endif + + adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); + if (!adapter) { + err = -ENOMEM; + goto free_pci_region; + } + + adapter->pdev = pdev; + adapter->pdev_dev = &pdev->dev; + adapter->name = pci_name(pdev); + adapter->mbox = func; + adapter->pf = func; + adapter->regs = regs; + adapter->adap_idx = adap_idx; + adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) + + (sizeof(struct mbox_cmd) * + T4_OS_LOG_MBOX_CMDS), + GFP_KERNEL); + if (!adapter->mbox_log) { + err = -ENOMEM; + goto free_adapter; + } + pci_set_drvdata(pdev, adapter); + return 0; + + free_adapter: + kfree(adapter); + free_pci_region: + iounmap(regs); + pci_disable_sriov(pdev); + pci_release_regions(pdev); + return err; +#else return 0; +#endif out_free_dev: free_some_resources(adapter); + if (adapter->flags & USING_MSIX) + free_msix_info(adapter); + if (adapter->num_uld) + uld_mem_free(adapter); out_unmap_bar: if (!is_t4(adapter->params.chip)) iounmap(adapter->bar2); @@ -5258,12 +5557,12 @@ static void remove_one(struct pci_dev *pdev) { struct adapter *adapter = pci_get_drvdata(pdev); -#ifdef CONFIG_PCI_IOV - pci_disable_sriov(pdev); - -#endif + if (!adapter) { + pci_release_regions(pdev); + return; + } - if (adapter) { + if (adapter->pf == 4) { int i; /* Tear down per-adapter Work Queue first since it can contain @@ -5296,6 +5595,10 @@ static void remove_one(struct pci_dev *pdev) if (adapter->flags & FULL_INIT_DONE) cxgb_down(adapter); + if (adapter->flags & USING_MSIX) + free_msix_info(adapter); + if (adapter->num_uld) + uld_mem_free(adapter); free_some_resources(adapter); #if IS_ENABLED(CONFIG_IPV6) t4_cleanup_clip_tbl(adapter); @@ -5312,8 +5615,17 @@ static void remove_one(struct pci_dev *pdev) kfree(adapter->mbox_log); synchronize_rcu(); kfree(adapter); - } else + } +#ifdef CONFIG_PCI_IOV + else { + if (adapter->port[0]) + unregister_netdev(adapter->port[0]); + iounmap(adapter->regs); + kfree(adapter); + pci_disable_sriov(pdev); pci_release_regions(pdev); + } +#endif } static struct pci_driver cxgb4_driver = { diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c new file mode 100644 index 000000000000..5d402bace6c1 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c @@ -0,0 +1,554 @@ +/* + * cxgb4_uld.c:Chelsio Upper Layer Driver Interface for T4/T5/T6 SGE management + * + * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Written by: Atul Gupta (atul.gupta@chelsio.com) + * Written by: Hariprasad Shenai (hariprasad@chelsio.com) + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/debugfs.h> +#include <linux/export.h> +#include <linux/list.h> +#include <linux/skbuff.h> +#include <linux/pci.h> + +#include "cxgb4.h" +#include "cxgb4_uld.h" +#include "t4_regs.h" +#include "t4fw_api.h" +#include "t4_msg.h" + +#define for_each_uldrxq(m, i) for (i = 0; i < ((m)->nrxq + (m)->nciq); i++) + +static int get_msix_idx_from_bmap(struct adapter *adap) +{ + struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds; + unsigned long flags; + unsigned int msix_idx; + + spin_lock_irqsave(&bmap->lock, flags); + msix_idx = find_first_zero_bit(bmap->msix_bmap, bmap->mapsize); + if (msix_idx < bmap->mapsize) { + __set_bit(msix_idx, bmap->msix_bmap); + } else { + spin_unlock_irqrestore(&bmap->lock, flags); + return -ENOSPC; + } + + spin_unlock_irqrestore(&bmap->lock, flags); + return msix_idx; +} + +static void free_msix_idx_in_bmap(struct adapter *adap, unsigned int msix_idx) +{ + struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds; + unsigned long flags; + + spin_lock_irqsave(&bmap->lock, flags); + __clear_bit(msix_idx, bmap->msix_bmap); + spin_unlock_irqrestore(&bmap->lock, flags); +} + +static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp, + const struct pkt_gl *gl) +{ + struct adapter *adap = q->adap; + struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq); + int ret; + + /* FW can send CPLs encapsulated in a CPL_FW4_MSG */ + if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG && + ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL) + rsp += 2; + + if (q->flush_handler) + ret = adap->uld[q->uld].lro_rx_handler(adap->uld[q->uld].handle, + rsp, gl, &q->lro_mgr, + &q->napi); + else + ret = adap->uld[q->uld].rx_handler(adap->uld[q->uld].handle, + rsp, gl); + + if (ret) { + rxq->stats.nomem++; + return -1; + } + + if (!gl) + rxq->stats.imm++; + else if (gl == CXGB4_MSG_AN) + rxq->stats.an++; + else + rxq->stats.pkts++; + return 0; +} + +static int alloc_uld_rxqs(struct adapter *adap, + struct sge_uld_rxq_info *rxq_info, + unsigned int nq, unsigned int offset, bool lro) +{ + struct sge *s = &adap->sge; + struct sge_ofld_rxq *q = rxq_info->uldrxq + offset; + unsigned short *ids = rxq_info->rspq_id + offset; + unsigned int per_chan = nq / adap->params.nports; + unsigned int msi_idx, bmap_idx; + int i, err; + + if (adap->flags & USING_MSIX) + msi_idx = 1; + else + msi_idx = -((int)s->intrq.abs_id + 1); + + for (i = 0; i < nq; i++, q++) { + if (msi_idx >= 0) { + bmap_idx = get_msix_idx_from_bmap(adap); + adap->msi_idx++; + } + err = t4_sge_alloc_rxq(adap, &q->rspq, false, + adap->port[i / per_chan], + adap->msi_idx, + q->fl.size ? &q->fl : NULL, + uldrx_handler, + NULL, + 0); + if (err) + goto freeout; + if (msi_idx >= 0) + rxq_info->msix_tbl[i + offset] = bmap_idx; + memset(&q->stats, 0, sizeof(q->stats)); + if (ids) + ids[i] = q->rspq.abs_id; + } + return 0; +freeout: + q = rxq_info->uldrxq + offset; + for ( ; i; i--, q++) { + if (q->rspq.desc) + free_rspq_fl(adap, &q->rspq, + q->fl.size ? &q->fl : NULL); + adap->msi_idx--; + } + + /* We need to free rxq also in case of ciq allocation failure */ + if (offset) { + q = rxq_info->uldrxq + offset; + for ( ; i; i--, q++) { + if (q->rspq.desc) + free_rspq_fl(adap, &q->rspq, + q->fl.size ? &q->fl : NULL); + adap->msi_idx--; + } + } + return err; +} + +int setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro) +{ + struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; + + if (adap->flags & USING_MSIX) { + rxq_info->msix_tbl = kzalloc(rxq_info->nrxq + rxq_info->nciq, + GFP_KERNEL); + if (!rxq_info->msix_tbl) + return -ENOMEM; + } + + return !(!alloc_uld_rxqs(adap, rxq_info, rxq_info->nrxq, 0, lro) && + !alloc_uld_rxqs(adap, rxq_info, rxq_info->nciq, + rxq_info->nrxq, lro)); +} + +static void t4_free_uld_rxqs(struct adapter *adap, int n, + struct sge_ofld_rxq *q) +{ + for ( ; n; n--, q++) { + if (q->rspq.desc) + free_rspq_fl(adap, &q->rspq, + q->fl.size ? &q->fl : NULL); + adap->msi_idx--; + } +} + +void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type) +{ + struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; + + if (rxq_info->nciq) + t4_free_uld_rxqs(adap, rxq_info->nciq, + rxq_info->uldrxq + rxq_info->nrxq); + t4_free_uld_rxqs(adap, rxq_info->nrxq, rxq_info->uldrxq); + if (adap->flags & USING_MSIX) + kfree(rxq_info->msix_tbl); +} + +int cfg_queues_uld(struct adapter *adap, unsigned int uld_type, + const struct cxgb4_pci_uld_info *uld_info) +{ + struct sge *s = &adap->sge; + struct sge_uld_rxq_info *rxq_info; + int i, nrxq; + + rxq_info = kzalloc(sizeof(*rxq_info), GFP_KERNEL); + if (!rxq_info) + return -ENOMEM; + + if (uld_info->nrxq > s->nqs_per_uld) + rxq_info->nrxq = s->nqs_per_uld; + else + rxq_info->nrxq = uld_info->nrxq; + if (!uld_info->nciq) + rxq_info->nciq = 0; + else if (uld_info->nciq && uld_info->nciq > s->nqs_per_uld) + rxq_info->nciq = s->nqs_per_uld; + else + rxq_info->nciq = uld_info->nciq; + + nrxq = rxq_info->nrxq + rxq_info->nciq; /* total rxq's */ + rxq_info->uldrxq = kcalloc(nrxq, sizeof(struct sge_ofld_rxq), + GFP_KERNEL); + if (!rxq_info->uldrxq) { + kfree(rxq_info); + return -ENOMEM; + } + + rxq_info->rspq_id = kcalloc(nrxq, sizeof(unsigned short), GFP_KERNEL); + if (!rxq_info->uldrxq) { + kfree(rxq_info->uldrxq); + kfree(rxq_info); + return -ENOMEM; + } + + for (i = 0; i < rxq_info->nrxq; i++) { + struct sge_ofld_rxq *r = &rxq_info->uldrxq[i]; + + init_rspq(adap, &r->rspq, 5, 1, uld_info->rxq_size, 64); + r->rspq.uld = uld_type; + r->fl.size = 72; + } + + for (i = rxq_info->nrxq; i < nrxq; i++) { + struct sge_ofld_rxq *r = &rxq_info->uldrxq[i]; + + init_rspq(adap, &r->rspq, 5, 1, uld_info->ciq_size, 64); + r->rspq.uld = uld_type; + r->fl.size = 72; + } + + memcpy(rxq_info->name, uld_info->name, IFNAMSIZ); + adap->sge.uld_rxq_info[uld_type] = rxq_info; + + return 0; +} + +void free_queues_uld(struct adapter *adap, unsigned int uld_type) +{ + struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; + + kfree(rxq_info->rspq_id); + kfree(rxq_info->uldrxq); + kfree(rxq_info); +} + +int request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type) +{ + struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; + int idx, bmap_idx, err = 0; + + for_each_uldrxq(rxq_info, idx) { + bmap_idx = rxq_info->msix_tbl[idx]; + err = request_irq(adap->msix_info_ulds[bmap_idx].vec, + t4_sge_intr_msix, 0, + adap->msix_info_ulds[bmap_idx].desc, + &rxq_info->uldrxq[idx].rspq); + if (err) + goto unwind; + } + return 0; +unwind: + while (--idx >= 0) { + bmap_idx = rxq_info->msix_tbl[idx]; + free_msix_idx_in_bmap(adap, bmap_idx); + free_irq(adap->msix_info_ulds[bmap_idx].vec, + &rxq_info->uldrxq[idx].rspq); + } + return err; +} + +void free_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type) +{ + struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; + int idx; + + for_each_uldrxq(rxq_info, idx) { + unsigned int bmap_idx = rxq_info->msix_tbl[idx]; + + free_msix_idx_in_bmap(adap, bmap_idx); + free_irq(adap->msix_info_ulds[bmap_idx].vec, + &rxq_info->uldrxq[idx].rspq); + } +} + +void name_msix_vecs_uld(struct adapter *adap, unsigned int uld_type) +{ + struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; + int n = sizeof(adap->msix_info_ulds[0].desc); + int idx; + + for_each_uldrxq(rxq_info, idx) { + unsigned int bmap_idx = rxq_info->msix_tbl[idx]; + + snprintf(adap->msix_info_ulds[bmap_idx].desc, n, "%s-%s%d", + adap->port[0]->name, rxq_info->name, idx); + } +} + +static void enable_rx(struct adapter *adap, struct sge_rspq *q) +{ + if (!q) + return; + + if (q->handler) { + cxgb_busy_poll_init_lock(q); + napi_enable(&q->napi); + } + /* 0-increment GTS to start the timer and enable interrupts */ + t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A), + SEINTARM_V(q->intr_params) | + INGRESSQID_V(q->cntxt_id)); +} + +static void quiesce_rx(struct adapter *adap, struct sge_rspq *q) +{ + if (q && q->handler) { + napi_disable(&q->napi); + local_bh_disable(); + while (!cxgb_poll_lock_napi(q)) + mdelay(1); + local_bh_enable(); + } +} + +void enable_rx_uld(struct adapter *adap, unsigned int uld_type) +{ + struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; + int idx; + + for_each_uldrxq(rxq_info, idx) + enable_rx(adap, &rxq_info->uldrxq[idx].rspq); +} + +void quiesce_rx_uld(struct adapter *adap, unsigned int uld_type) +{ + struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; + int idx; + + for_each_uldrxq(rxq_info, idx) + quiesce_rx(adap, &rxq_info->uldrxq[idx].rspq); +} + +static void uld_queue_init(struct adapter *adap, unsigned int uld_type, + struct cxgb4_lld_info *lli) +{ + struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; + + lli->rxq_ids = rxq_info->rspq_id; + lli->nrxq = rxq_info->nrxq; + lli->ciq_ids = rxq_info->rspq_id + rxq_info->nrxq; + lli->nciq = rxq_info->nciq; +} + +int uld_mem_alloc(struct adapter *adap) +{ + struct sge *s = &adap->sge; + + adap->uld = kcalloc(adap->num_uld, sizeof(*adap->uld), GFP_KERNEL); + if (!adap->uld) + return -ENOMEM; + + s->uld_rxq_info = kzalloc(adap->num_uld * + sizeof(struct sge_uld_rxq_info *), + GFP_KERNEL); + if (!s->uld_rxq_info) + goto err_uld; + + return 0; +err_uld: + kfree(adap->uld); + return -ENOMEM; +} + +void uld_mem_free(struct adapter *adap) +{ + struct sge *s = &adap->sge; + + kfree(s->uld_rxq_info); + kfree(adap->uld); +} + +static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld) +{ + int i; + + lld->pdev = adap->pdev; + lld->pf = adap->pf; + lld->l2t = adap->l2t; + lld->tids = &adap->tids; + lld->ports = adap->port; + lld->vr = &adap->vres; + lld->mtus = adap->params.mtus; + lld->ntxq = adap->sge.iscsiqsets; + lld->nchan = adap->params.nports; + lld->nports = adap->params.nports; + lld->wr_cred = adap->params.ofldq_wr_cred; + lld->adapter_type = adap->params.chip; + lld->cclk_ps = 1000000000 / adap->params.vpd.cclk; + lld->udb_density = 1 << adap->params.sge.eq_qpp; + lld->ucq_density = 1 << adap->params.sge.iq_qpp; + lld->filt_mode = adap->params.tp.vlan_pri_map; + /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */ + for (i = 0; i < NCHAN; i++) + lld->tx_modq[i] = i; + lld->gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A); + lld->db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A); + lld->fw_vers = adap->params.fw_vers; + lld->dbfifo_int_thresh = dbfifo_int_thresh; + lld->sge_ingpadboundary = adap->sge.fl_align; + lld->sge_egrstatuspagesize = adap->sge.stat_len; + lld->sge_pktshift = adap->sge.pktshift; + lld->enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN; + lld->max_ordird_qp = adap->params.max_ordird_qp; + lld->max_ird_adapter = adap->params.max_ird_adapter; + lld->ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl; + lld->nodeid = dev_to_node(adap->pdev_dev); +} + +static void uld_attach(struct adapter *adap, unsigned int uld) +{ + void *handle; + struct cxgb4_lld_info lli; + + uld_init(adap, &lli); + uld_queue_init(adap, uld, &lli); + + handle = adap->uld[uld].add(&lli); + if (IS_ERR(handle)) { + dev_warn(adap->pdev_dev, + "could not attach to the %s driver, error %ld\n", + adap->uld[uld].name, PTR_ERR(handle)); + return; + } + + adap->uld[uld].handle = handle; + + if (adap->flags & FULL_INIT_DONE) + adap->uld[uld].state_change(handle, CXGB4_STATE_UP); +} + +int cxgb4_register_pci_uld(enum cxgb4_pci_uld type, + struct cxgb4_pci_uld_info *p) +{ + int ret = 0; + struct adapter *adap; + + if (type >= CXGB4_PCI_ULD_MAX) + return -EINVAL; + + mutex_lock(&uld_mutex); + list_for_each_entry(adap, &adapter_list, list_node) { + if (!is_pci_uld(adap)) + continue; + ret = cfg_queues_uld(adap, type, p); + if (ret) + goto out; + ret = setup_sge_queues_uld(adap, type, p->lro); + if (ret) + goto free_queues; + if (adap->flags & USING_MSIX) { + name_msix_vecs_uld(adap, type); + ret = request_msix_queue_irqs_uld(adap, type); + if (ret) + goto free_rxq; + } + if (adap->flags & FULL_INIT_DONE) + enable_rx_uld(adap, type); + if (adap->uld[type].add) { + ret = -EBUSY; + goto free_irq; + } + adap->uld[type] = *p; + uld_attach(adap, type); + } + mutex_unlock(&uld_mutex); + return 0; + +free_irq: + if (adap->flags & USING_MSIX) + free_msix_queue_irqs_uld(adap, type); +free_rxq: + free_sge_queues_uld(adap, type); +free_queues: + free_queues_uld(adap, type); +out: + mutex_unlock(&uld_mutex); + return ret; +} +EXPORT_SYMBOL(cxgb4_register_pci_uld); + +int cxgb4_unregister_pci_uld(enum cxgb4_pci_uld type) +{ + struct adapter *adap; + + if (type >= CXGB4_PCI_ULD_MAX) + return -EINVAL; + + mutex_lock(&uld_mutex); + list_for_each_entry(adap, &adapter_list, list_node) { + if (!is_pci_uld(adap)) + continue; + adap->uld[type].handle = NULL; + adap->uld[type].add = NULL; + if (adap->flags & FULL_INIT_DONE) + quiesce_rx_uld(adap, type); + if (adap->flags & USING_MSIX) + free_msix_queue_irqs_uld(adap, type); + free_sge_queues_uld(adap, type); + free_queues_uld(adap, type); + } + mutex_unlock(&uld_mutex); + + return 0; +} +EXPORT_SYMBOL(cxgb4_unregister_pci_uld); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h index f3c58aaa932d..ab4037222f8d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h @@ -32,8 +32,8 @@ * SOFTWARE. */ -#ifndef __CXGB4_OFLD_H -#define __CXGB4_OFLD_H +#ifndef __CXGB4_ULD_H +#define __CXGB4_ULD_H #include <linux/cache.h> #include <linux/spinlock.h> @@ -296,8 +296,36 @@ struct cxgb4_uld_info { void (*lro_flush)(struct t4_lro_mgr *); }; +enum cxgb4_pci_uld { + CXGB4_PCI_ULD1, + CXGB4_PCI_ULD_MAX +}; + +struct cxgb4_pci_uld_info { + const char *name; + bool lro; + void *handle; + unsigned int nrxq; + unsigned int nciq; + unsigned int rxq_size; + unsigned int ciq_size; + void *(*add)(const struct cxgb4_lld_info *p); + int (*rx_handler)(void *handle, const __be64 *rsp, + const struct pkt_gl *gl); + int (*state_change)(void *handle, enum cxgb4_state new_state); + int (*control)(void *handle, enum cxgb4_control control, ...); + int (*lro_rx_handler)(void *handle, const __be64 *rsp, + const struct pkt_gl *gl, + struct t4_lro_mgr *lro_mgr, + struct napi_struct *napi); + void (*lro_flush)(struct t4_lro_mgr *); +}; + int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p); int cxgb4_unregister_uld(enum cxgb4_uld type); +int cxgb4_register_pci_uld(enum cxgb4_pci_uld type, + struct cxgb4_pci_uld_info *p); +int cxgb4_unregister_pci_uld(enum cxgb4_pci_uld type); int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb); unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo); unsigned int cxgb4_port_chan(const struct net_device *dev); @@ -330,4 +358,4 @@ int cxgb4_bar2_sge_qregs(struct net_device *dev, u64 *pbar2_qoffset, unsigned int *pbar2_qid); -#endif /* !__CXGB4_OFLD_H */ +#endif /* !__CXGB4_ULD_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c new file mode 100644 index 000000000000..539de764bbd3 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c @@ -0,0 +1,556 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/module.h> +#include <linux/netdevice.h> + +#include "cxgb4.h" +#include "sched.h" + +/* Spinlock must be held by caller */ +static int t4_sched_class_fw_cmd(struct port_info *pi, + struct ch_sched_params *p, + enum sched_fw_ops op) +{ + struct adapter *adap = pi->adapter; + struct sched_table *s = pi->sched_tbl; + struct sched_class *e; + int err = 0; + + e = &s->tab[p->u.params.class]; + switch (op) { + case SCHED_FW_OP_ADD: + err = t4_sched_params(adap, p->type, + p->u.params.level, p->u.params.mode, + p->u.params.rateunit, + p->u.params.ratemode, + p->u.params.channel, e->idx, + p->u.params.minrate, p->u.params.maxrate, + p->u.params.weight, p->u.params.pktsize); + break; + default: + err = -ENOTSUPP; + break; + } + + return err; +} + +/* Spinlock must be held by caller */ +static int t4_sched_bind_unbind_op(struct port_info *pi, void *arg, + enum sched_bind_type type, bool bind) +{ + struct adapter *adap = pi->adapter; + u32 fw_mnem, fw_class, fw_param; + unsigned int pf = adap->pf; + unsigned int vf = 0; + int err = 0; + + switch (type) { + case SCHED_QUEUE: { + struct sched_queue_entry *qe; + + qe = (struct sched_queue_entry *)arg; + + /* Create a template for the FW_PARAMS_CMD mnemonic and + * value (TX Scheduling Class in this case). + */ + fw_mnem = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) | + FW_PARAMS_PARAM_X_V( + FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH)); + fw_class = bind ? qe->param.class : FW_SCHED_CLS_NONE; + fw_param = (fw_mnem | FW_PARAMS_PARAM_YZ_V(qe->cntxt_id)); + + pf = adap->pf; + vf = 0; + break; + } + default: + err = -ENOTSUPP; + goto out; + } + + err = t4_set_params(adap, adap->mbox, pf, vf, 1, &fw_param, &fw_class); + +out: + return err; +} + +static struct sched_class *t4_sched_queue_lookup(struct port_info *pi, + const unsigned int qid, + int *index) +{ + struct sched_table *s = pi->sched_tbl; + struct sched_class *e, *end; + struct sched_class *found = NULL; + int i; + + /* Look for a class with matching bound queue parameters */ + end = &s->tab[s->sched_size]; + for (e = &s->tab[0]; e != end; ++e) { + struct sched_queue_entry *qe; + + i = 0; + if (e->state == SCHED_STATE_UNUSED) + continue; + + list_for_each_entry(qe, &e->queue_list, list) { + if (qe->cntxt_id == qid) { + found = e; + if (index) + *index = i; + break; + } + i++; + } + + if (found) + break; + } + + return found; +} + +static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p) +{ + struct adapter *adap = pi->adapter; + struct sched_class *e; + struct sched_queue_entry *qe = NULL; + struct sge_eth_txq *txq; + unsigned int qid; + int index = -1; + int err = 0; + + if (p->queue < 0 || p->queue >= pi->nqsets) + return -ERANGE; + + txq = &adap->sge.ethtxq[pi->first_qset + p->queue]; + qid = txq->q.cntxt_id; + + /* Find the existing class that the queue is bound to */ + e = t4_sched_queue_lookup(pi, qid, &index); + if (e && index >= 0) { + int i = 0; + + spin_lock(&e->lock); + list_for_each_entry(qe, &e->queue_list, list) { + if (i == index) + break; + i++; + } + err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE, + false); + if (err) { + spin_unlock(&e->lock); + goto out; + } + + list_del(&qe->list); + t4_free_mem(qe); + if (atomic_dec_and_test(&e->refcnt)) { + e->state = SCHED_STATE_UNUSED; + memset(&e->info, 0, sizeof(e->info)); + } + spin_unlock(&e->lock); + } +out: + return err; +} + +static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p) +{ + struct adapter *adap = pi->adapter; + struct sched_table *s = pi->sched_tbl; + struct sched_class *e; + struct sched_queue_entry *qe = NULL; + struct sge_eth_txq *txq; + unsigned int qid; + int err = 0; + + if (p->queue < 0 || p->queue >= pi->nqsets) + return -ERANGE; + + qe = t4_alloc_mem(sizeof(struct sched_queue_entry)); + if (!qe) + return -ENOMEM; + + txq = &adap->sge.ethtxq[pi->first_qset + p->queue]; + qid = txq->q.cntxt_id; + + /* Unbind queue from any existing class */ + err = t4_sched_queue_unbind(pi, p); + if (err) + goto out; + + /* Bind queue to specified class */ + memset(qe, 0, sizeof(*qe)); + qe->cntxt_id = qid; + memcpy(&qe->param, p, sizeof(qe->param)); + + e = &s->tab[qe->param.class]; + spin_lock(&e->lock); + err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE, true); + if (err) { + t4_free_mem(qe); + spin_unlock(&e->lock); + goto out; + } + + list_add_tail(&qe->list, &e->queue_list); + atomic_inc(&e->refcnt); + spin_unlock(&e->lock); +out: + return err; +} + +static void t4_sched_class_unbind_all(struct port_info *pi, + struct sched_class *e, + enum sched_bind_type type) +{ + if (!e) + return; + + switch (type) { + case SCHED_QUEUE: { + struct sched_queue_entry *qe; + + list_for_each_entry(qe, &e->queue_list, list) + t4_sched_queue_unbind(pi, &qe->param); + break; + } + default: + break; + } +} + +static int t4_sched_class_bind_unbind_op(struct port_info *pi, void *arg, + enum sched_bind_type type, bool bind) +{ + int err = 0; + + if (!arg) + return -EINVAL; + + switch (type) { + case SCHED_QUEUE: { + struct ch_sched_queue *qe = (struct ch_sched_queue *)arg; + + if (bind) + err = t4_sched_queue_bind(pi, qe); + else + err = t4_sched_queue_unbind(pi, qe); + break; + } + default: + err = -ENOTSUPP; + break; + } + + return err; +} + +/** + * cxgb4_sched_class_bind - Bind an entity to a scheduling class + * @dev: net_device pointer + * @arg: Entity opaque data + * @type: Entity type (Queue) + * + * Binds an entity (queue) to a scheduling class. If the entity + * is bound to another class, it will be unbound from the other class + * and bound to the class specified in @arg. + */ +int cxgb4_sched_class_bind(struct net_device *dev, void *arg, + enum sched_bind_type type) +{ + struct port_info *pi = netdev2pinfo(dev); + struct sched_table *s; + int err = 0; + u8 class_id; + + if (!can_sched(dev)) + return -ENOTSUPP; + + if (!arg) + return -EINVAL; + + switch (type) { + case SCHED_QUEUE: { + struct ch_sched_queue *qe = (struct ch_sched_queue *)arg; + + class_id = qe->class; + break; + } + default: + return -ENOTSUPP; + } + + if (!valid_class_id(dev, class_id)) + return -EINVAL; + + if (class_id == SCHED_CLS_NONE) + return -ENOTSUPP; + + s = pi->sched_tbl; + write_lock(&s->rw_lock); + err = t4_sched_class_bind_unbind_op(pi, arg, type, true); + write_unlock(&s->rw_lock); + + return err; +} + +/** + * cxgb4_sched_class_unbind - Unbind an entity from a scheduling class + * @dev: net_device pointer + * @arg: Entity opaque data + * @type: Entity type (Queue) + * + * Unbinds an entity (queue) from a scheduling class. + */ +int cxgb4_sched_class_unbind(struct net_device *dev, void *arg, + enum sched_bind_type type) +{ + struct port_info *pi = netdev2pinfo(dev); + struct sched_table *s; + int err = 0; + u8 class_id; + + if (!can_sched(dev)) + return -ENOTSUPP; + + if (!arg) + return -EINVAL; + + switch (type) { + case SCHED_QUEUE: { + struct ch_sched_queue *qe = (struct ch_sched_queue *)arg; + + class_id = qe->class; + break; + } + default: + return -ENOTSUPP; + } + + if (!valid_class_id(dev, class_id)) + return -EINVAL; + + s = pi->sched_tbl; + write_lock(&s->rw_lock); + err = t4_sched_class_bind_unbind_op(pi, arg, type, false); + write_unlock(&s->rw_lock); + + return err; +} + +/* If @p is NULL, fetch any available unused class */ +static struct sched_class *t4_sched_class_lookup(struct port_info *pi, + const struct ch_sched_params *p) +{ + struct sched_table *s = pi->sched_tbl; + struct sched_class *e, *end; + struct sched_class *found = NULL; + + if (!p) { + /* Get any available unused class */ + end = &s->tab[s->sched_size]; + for (e = &s->tab[0]; e != end; ++e) { + if (e->state == SCHED_STATE_UNUSED) { + found = e; + break; + } + } + } else { + /* Look for a class with matching scheduling parameters */ + struct ch_sched_params info; + struct ch_sched_params tp; + + memset(&info, 0, sizeof(info)); + memset(&tp, 0, sizeof(tp)); + + memcpy(&tp, p, sizeof(tp)); + /* Don't try to match class parameter */ + tp.u.params.class = SCHED_CLS_NONE; + + end = &s->tab[s->sched_size]; + for (e = &s->tab[0]; e != end; ++e) { + if (e->state == SCHED_STATE_UNUSED) + continue; + + memset(&info, 0, sizeof(info)); + memcpy(&info, &e->info, sizeof(info)); + /* Don't try to match class parameter */ + info.u.params.class = SCHED_CLS_NONE; + + if ((info.type == tp.type) && + (!memcmp(&info.u.params, &tp.u.params, + sizeof(info.u.params)))) { + found = e; + break; + } + } + } + + return found; +} + +static struct sched_class *t4_sched_class_alloc(struct port_info *pi, + struct ch_sched_params *p) +{ + struct sched_table *s = pi->sched_tbl; + struct sched_class *e; + u8 class_id; + int err; + + if (!p) + return NULL; + + class_id = p->u.params.class; + + /* Only accept search for existing class with matching params + * or allocation of new class with specified params + */ + if (class_id != SCHED_CLS_NONE) + return NULL; + + write_lock(&s->rw_lock); + /* See if there's an exisiting class with same + * requested sched params + */ + e = t4_sched_class_lookup(pi, p); + if (!e) { + struct ch_sched_params np; + + /* Fetch any available unused class */ + e = t4_sched_class_lookup(pi, NULL); + if (!e) + goto out; + + memset(&np, 0, sizeof(np)); + memcpy(&np, p, sizeof(np)); + np.u.params.class = e->idx; + + spin_lock(&e->lock); + /* New class */ + err = t4_sched_class_fw_cmd(pi, &np, SCHED_FW_OP_ADD); + if (err) { + spin_unlock(&e->lock); + e = NULL; + goto out; + } + memcpy(&e->info, &np, sizeof(e->info)); + atomic_set(&e->refcnt, 0); + e->state = SCHED_STATE_ACTIVE; + spin_unlock(&e->lock); + } + +out: + write_unlock(&s->rw_lock); + return e; +} + +/** + * cxgb4_sched_class_alloc - allocate a scheduling class + * @dev: net_device pointer + * @p: new scheduling class to create. + * + * Returns pointer to the scheduling class created. If @p is NULL, then + * it allocates and returns any available unused scheduling class. If a + * scheduling class with matching @p is found, then the matching class is + * returned. + */ +struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev, + struct ch_sched_params *p) +{ + struct port_info *pi = netdev2pinfo(dev); + u8 class_id; + + if (!can_sched(dev)) + return NULL; + + class_id = p->u.params.class; + if (!valid_class_id(dev, class_id)) + return NULL; + + return t4_sched_class_alloc(pi, p); +} + +static void t4_sched_class_free(struct port_info *pi, struct sched_class *e) +{ + t4_sched_class_unbind_all(pi, e, SCHED_QUEUE); +} + +struct sched_table *t4_init_sched(unsigned int sched_size) +{ + struct sched_table *s; + unsigned int i; + + s = t4_alloc_mem(sizeof(*s) + sched_size * sizeof(struct sched_class)); + if (!s) + return NULL; + + s->sched_size = sched_size; + rwlock_init(&s->rw_lock); + + for (i = 0; i < s->sched_size; i++) { + memset(&s->tab[i], 0, sizeof(struct sched_class)); + s->tab[i].idx = i; + s->tab[i].state = SCHED_STATE_UNUSED; + INIT_LIST_HEAD(&s->tab[i].queue_list); + spin_lock_init(&s->tab[i].lock); + atomic_set(&s->tab[i].refcnt, 0); + } + return s; +} + +void t4_cleanup_sched(struct adapter *adap) +{ + struct sched_table *s; + unsigned int i; + + for_each_port(adap, i) { + struct port_info *pi = netdev2pinfo(adap->port[i]); + + s = pi->sched_tbl; + for (i = 0; i < s->sched_size; i++) { + struct sched_class *e; + + write_lock(&s->rw_lock); + e = &s->tab[i]; + if (e->state == SCHED_STATE_ACTIVE) + t4_sched_class_free(pi, e); + write_unlock(&s->rw_lock); + } + t4_free_mem(s); + } +} diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.h b/drivers/net/ethernet/chelsio/cxgb4/sched.h new file mode 100644 index 000000000000..77b2b3fd9021 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/sched.h @@ -0,0 +1,110 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __CXGB4_SCHED_H +#define __CXGB4_SCHED_H + +#include <linux/spinlock.h> +#include <linux/atomic.h> + +#define SCHED_CLS_NONE 0xff + +#define FW_SCHED_CLS_NONE 0xffffffff + +/* Max rate that can be set to a scheduling class is 10 Gbps */ +#define SCHED_MAX_RATE_KBPS 10000000U + +enum { + SCHED_STATE_ACTIVE, + SCHED_STATE_UNUSED, +}; + +enum sched_fw_ops { + SCHED_FW_OP_ADD, +}; + +enum sched_bind_type { + SCHED_QUEUE, +}; + +struct sched_queue_entry { + struct list_head list; + unsigned int cntxt_id; + struct ch_sched_queue param; +}; + +struct sched_class { + u8 state; + u8 idx; + struct ch_sched_params info; + struct list_head queue_list; + spinlock_t lock; /* Per class lock */ + atomic_t refcnt; +}; + +struct sched_table { /* per port scheduling table */ + u8 sched_size; + rwlock_t rw_lock; /* Table lock */ + struct sched_class tab[0]; +}; + +static inline bool can_sched(struct net_device *dev) +{ + struct port_info *pi = netdev2pinfo(dev); + + return !pi->sched_tbl ? false : true; +} + +static inline bool valid_class_id(struct net_device *dev, u8 class_id) +{ + struct port_info *pi = netdev2pinfo(dev); + + if ((class_id > pi->sched_tbl->sched_size - 1) && + (class_id != SCHED_CLS_NONE)) + return false; + + return true; +} + +int cxgb4_sched_class_bind(struct net_device *dev, void *arg, + enum sched_bind_type type); +int cxgb4_sched_class_unbind(struct net_device *dev, void *arg, + enum sched_bind_type type); + +struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev, + struct ch_sched_params *p); + +struct sched_table *t4_init_sched(unsigned int size); +void t4_cleanup_sched(struct adapter *adap); +#endif /* __CXGB4_SCHED_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index ad3552df0545..9a607dbc6ca8 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -2928,8 +2928,8 @@ static void free_txq(struct adapter *adap, struct sge_txq *q) q->desc = NULL; } -static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, - struct sge_fl *fl) +void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, + struct sge_fl *fl) { struct sge *s = &adap->sge; unsigned int fl_id = fl ? fl->cntxt_id : 0xffff; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index dc92c80a75f4..de451ee2ba75 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -1,7 +1,7 @@ /* * This file is part of the Chelsio T4 Ethernet driver for Linux. * - * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved. + * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -8264,3 +8264,73 @@ void t4_idma_monitor(struct adapter *adapter, t4_sge_decode_idma_state(adapter, idma->idma_state[i]); } } + +/** + * t4_set_vf_mac - Set MAC address for the specified VF + * @adapter: The adapter + * @vf: one of the VFs instantiated by the specified PF + * @naddr: the number of MAC addresses + * @addr: the MAC address(es) to be set to the specified VF + */ +int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf, + unsigned int naddr, u8 *addr) +{ + struct fw_acl_mac_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_ACL_MAC_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + FW_ACL_MAC_CMD_PFN_V(adapter->pf) | + FW_ACL_MAC_CMD_VFN_V(vf)); + + /* Note: Do not enable the ACL */ + cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd)); + cmd.nmac = naddr; + + switch (adapter->pf) { + case 3: + memcpy(cmd.macaddr3, addr, sizeof(cmd.macaddr3)); + break; + case 2: + memcpy(cmd.macaddr2, addr, sizeof(cmd.macaddr2)); + break; + case 1: + memcpy(cmd.macaddr1, addr, sizeof(cmd.macaddr1)); + break; + case 0: + memcpy(cmd.macaddr0, addr, sizeof(cmd.macaddr0)); + break; + } + + return t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &cmd); +} + +int t4_sched_params(struct adapter *adapter, int type, int level, int mode, + int rateunit, int ratemode, int channel, int class, + int minrate, int maxrate, int weight, int pktsize) +{ + struct fw_sched_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_SCHED_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F); + cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); + + cmd.u.params.sc = FW_SCHED_SC_PARAMS; + cmd.u.params.type = type; + cmd.u.params.level = level; + cmd.u.params.mode = mode; + cmd.u.params.ch = channel; + cmd.u.params.cl = class; + cmd.u.params.unit = rateunit; + cmd.u.params.rate = ratemode; + cmd.u.params.min = cpu_to_be32(minrate); + cmd.u.params.max = cpu_to_be32(maxrate); + cmd.u.params.weight = cpu_to_be16(weight); + cmd.u.params.pktsize = cpu_to_be16(pktsize); + + return t4_wr_mbox_meat(adapter, adapter->mbox, &cmd, sizeof(cmd), + NULL, 1); +} diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h index e0ebe1378cb2..fba3b2ad382d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h @@ -61,6 +61,7 @@ enum { CPL_ABORT_REQ_RSS = 0x2B, CPL_ABORT_RPL_RSS = 0x2D, + CPL_RX_PHYS_ADDR = 0x30, CPL_CLOSE_CON_RPL = 0x32, CPL_ISCSI_HDR = 0x33, CPL_RDMA_CQE = 0x35, @@ -83,6 +84,10 @@ enum { CPL_PASS_OPEN_REQ6 = 0x81, CPL_ACT_OPEN_REQ6 = 0x83, + CPL_TX_TLS_PDU = 0x88, + CPL_TX_SEC_PDU = 0x8A, + CPL_TX_TLS_ACK = 0x8B, + CPL_RDMA_TERMINATE = 0xA2, CPL_RDMA_WRITE = 0xA4, CPL_SGE_EGR_UPDATE = 0xA5, @@ -94,6 +99,8 @@ enum { CPL_FW4_PLD = 0xC1, CPL_FW4_ACK = 0xC3, + CPL_RX_PHYS_DSGL = 0xD0, + CPL_FW6_MSG = 0xE0, CPL_FW6_PLD = 0xE1, CPL_TX_PKT_LSO = 0xED, @@ -1362,6 +1369,15 @@ struct ulptx_idata { __be32 len; }; +struct ulp_txpkt { + __be32 cmd_dest; + __be32 len; +}; + +#define ULPTX_CMD_S 24 +#define ULPTX_CMD_M 0xFF +#define ULPTX_CMD_V(x) ((x) << ULPTX_CMD_S) + #define ULPTX_NSGE_S 0 #define ULPTX_NSGE_V(x) ((x) << ULPTX_NSGE_S) @@ -1369,6 +1385,22 @@ struct ulptx_idata { #define ULPTX_MORE_V(x) ((x) << ULPTX_MORE_S) #define ULPTX_MORE_F ULPTX_MORE_V(1U) +#define ULP_TXPKT_DEST_S 16 +#define ULP_TXPKT_DEST_M 0x3 +#define ULP_TXPKT_DEST_V(x) ((x) << ULP_TXPKT_DEST_S) + +#define ULP_TXPKT_FID_S 4 +#define ULP_TXPKT_FID_M 0x7ff +#define ULP_TXPKT_FID_V(x) ((x) << ULP_TXPKT_FID_S) + +#define ULP_TXPKT_RO_S 3 +#define ULP_TXPKT_RO_V(x) ((x) << ULP_TXPKT_RO_S) +#define ULP_TXPKT_RO_F ULP_TXPKT_RO_V(1U) + +#define ULP_TX_SC_MORE_S 23 +#define ULP_TX_SC_MORE_V(x) ((x) << ULP_TX_SC_MORE_S) +#define ULP_TX_SC_MORE_F ULP_TX_SC_MORE_V(1U) + struct ulp_mem_io { WR_HDR; __be32 cmd; @@ -1406,4 +1438,409 @@ struct ulp_mem_io { #define ULP_MEMIO_DATA_LEN_S 0 #define ULP_MEMIO_DATA_LEN_V(x) ((x) << ULP_MEMIO_DATA_LEN_S) +#define ULPTX_NSGE_S 0 +#define ULPTX_NSGE_M 0xFFFF +#define ULPTX_NSGE_V(x) ((x) << ULPTX_NSGE_S) +#define ULPTX_NSGE_G(x) (((x) >> ULPTX_NSGE_S) & ULPTX_NSGE_M) + +struct ulptx_sc_memrd { + __be32 cmd_to_len; + __be32 addr; +}; + +#define ULP_TXPKT_DATAMODIFY_S 23 +#define ULP_TXPKT_DATAMODIFY_M 0x1 +#define ULP_TXPKT_DATAMODIFY_V(x) ((x) << ULP_TXPKT_DATAMODIFY_S) +#define ULP_TXPKT_DATAMODIFY_G(x) \ + (((x) >> ULP_TXPKT_DATAMODIFY_S) & ULP_TXPKT_DATAMODIFY__M) +#define ULP_TXPKT_DATAMODIFY_F ULP_TXPKT_DATAMODIFY_V(1U) + +#define ULP_TXPKT_CHANNELID_S 22 +#define ULP_TXPKT_CHANNELID_M 0x1 +#define ULP_TXPKT_CHANNELID_V(x) ((x) << ULP_TXPKT_CHANNELID_S) +#define ULP_TXPKT_CHANNELID_G(x) \ + (((x) >> ULP_TXPKT_CHANNELID_S) & ULP_TXPKT_CHANNELID_M) +#define ULP_TXPKT_CHANNELID_F ULP_TXPKT_CHANNELID_V(1U) + +#define SCMD_SEQ_NO_CTRL_S 29 +#define SCMD_SEQ_NO_CTRL_M 0x3 +#define SCMD_SEQ_NO_CTRL_V(x) ((x) << SCMD_SEQ_NO_CTRL_S) +#define SCMD_SEQ_NO_CTRL_G(x) \ + (((x) >> SCMD_SEQ_NO_CTRL_S) & SCMD_SEQ_NO_CTRL_M) + +/* StsFieldPrsnt- Status field at the end of the TLS PDU */ +#define SCMD_STATUS_PRESENT_S 28 +#define SCMD_STATUS_PRESENT_M 0x1 +#define SCMD_STATUS_PRESENT_V(x) ((x) << SCMD_STATUS_PRESENT_S) +#define SCMD_STATUS_PRESENT_G(x) \ + (((x) >> SCMD_STATUS_PRESENT_S) & SCMD_STATUS_PRESENT_M) +#define SCMD_STATUS_PRESENT_F SCMD_STATUS_PRESENT_V(1U) + +/* ProtoVersion - Protocol Version 0: 1.2, 1:1.1, 2:DTLS, 3:Generic, + * 3-15: Reserved. + */ +#define SCMD_PROTO_VERSION_S 24 +#define SCMD_PROTO_VERSION_M 0xf +#define SCMD_PROTO_VERSION_V(x) ((x) << SCMD_PROTO_VERSION_S) +#define SCMD_PROTO_VERSION_G(x) \ + (((x) >> SCMD_PROTO_VERSION_S) & SCMD_PROTO_VERSION_M) + +/* EncDecCtrl - Encryption/Decryption Control. 0: Encrypt, 1: Decrypt */ +#define SCMD_ENC_DEC_CTRL_S 23 +#define SCMD_ENC_DEC_CTRL_M 0x1 +#define SCMD_ENC_DEC_CTRL_V(x) ((x) << SCMD_ENC_DEC_CTRL_S) +#define SCMD_ENC_DEC_CTRL_G(x) \ + (((x) >> SCMD_ENC_DEC_CTRL_S) & SCMD_ENC_DEC_CTRL_M) +#define SCMD_ENC_DEC_CTRL_F SCMD_ENC_DEC_CTRL_V(1U) + +/* CipherAuthSeqCtrl - Cipher Authentication Sequence Control. */ +#define SCMD_CIPH_AUTH_SEQ_CTRL_S 22 +#define SCMD_CIPH_AUTH_SEQ_CTRL_M 0x1 +#define SCMD_CIPH_AUTH_SEQ_CTRL_V(x) \ + ((x) << SCMD_CIPH_AUTH_SEQ_CTRL_S) +#define SCMD_CIPH_AUTH_SEQ_CTRL_G(x) \ + (((x) >> SCMD_CIPH_AUTH_SEQ_CTRL_S) & SCMD_CIPH_AUTH_SEQ_CTRL_M) +#define SCMD_CIPH_AUTH_SEQ_CTRL_F SCMD_CIPH_AUTH_SEQ_CTRL_V(1U) + +/* CiphMode - Cipher Mode. 0: NOP, 1:AES-CBC, 2:AES-GCM, 3:AES-CTR, + * 4:Generic-AES, 5-15: Reserved. + */ +#define SCMD_CIPH_MODE_S 18 +#define SCMD_CIPH_MODE_M 0xf +#define SCMD_CIPH_MODE_V(x) ((x) << SCMD_CIPH_MODE_S) +#define SCMD_CIPH_MODE_G(x) \ + (((x) >> SCMD_CIPH_MODE_S) & SCMD_CIPH_MODE_M) + +/* AuthMode - Auth Mode. 0: NOP, 1:SHA1, 2:SHA2-224, 3:SHA2-256 + * 4-15: Reserved + */ +#define SCMD_AUTH_MODE_S 14 +#define SCMD_AUTH_MODE_M 0xf +#define SCMD_AUTH_MODE_V(x) ((x) << SCMD_AUTH_MODE_S) +#define SCMD_AUTH_MODE_G(x) \ + (((x) >> SCMD_AUTH_MODE_S) & SCMD_AUTH_MODE_M) + +/* HmacCtrl - HMAC Control. 0:NOP, 1:No truncation, 2:Support HMAC Truncation + * per RFC 4366, 3:IPSec 96 bits, 4-7:Reserved + */ +#define SCMD_HMAC_CTRL_S 11 +#define SCMD_HMAC_CTRL_M 0x7 +#define SCMD_HMAC_CTRL_V(x) ((x) << SCMD_HMAC_CTRL_S) +#define SCMD_HMAC_CTRL_G(x) \ + (((x) >> SCMD_HMAC_CTRL_S) & SCMD_HMAC_CTRL_M) + +/* IvSize - IV size in units of 2 bytes */ +#define SCMD_IV_SIZE_S 7 +#define SCMD_IV_SIZE_M 0xf +#define SCMD_IV_SIZE_V(x) ((x) << SCMD_IV_SIZE_S) +#define SCMD_IV_SIZE_G(x) \ + (((x) >> SCMD_IV_SIZE_S) & SCMD_IV_SIZE_M) + +/* NumIVs - Number of IVs */ +#define SCMD_NUM_IVS_S 0 +#define SCMD_NUM_IVS_M 0x7f +#define SCMD_NUM_IVS_V(x) ((x) << SCMD_NUM_IVS_S) +#define SCMD_NUM_IVS_G(x) \ + (((x) >> SCMD_NUM_IVS_S) & SCMD_NUM_IVS_M) + +/* EnbDbgId - If this is enabled upper 20 (63:44) bits if SeqNumber + * (below) are used as Cid (connection id for debug status), these + * bits are padded to zero for forming the 64 bit + * sequence number for TLS + */ +#define SCMD_ENB_DBGID_S 31 +#define SCMD_ENB_DBGID_M 0x1 +#define SCMD_ENB_DBGID_V(x) ((x) << SCMD_ENB_DBGID_S) +#define SCMD_ENB_DBGID_G(x) \ + (((x) >> SCMD_ENB_DBGID_S) & SCMD_ENB_DBGID_M) + +/* IV generation in SW. */ +#define SCMD_IV_GEN_CTRL_S 30 +#define SCMD_IV_GEN_CTRL_M 0x1 +#define SCMD_IV_GEN_CTRL_V(x) ((x) << SCMD_IV_GEN_CTRL_S) +#define SCMD_IV_GEN_CTRL_G(x) \ + (((x) >> SCMD_IV_GEN_CTRL_S) & SCMD_IV_GEN_CTRL_M) +#define SCMD_IV_GEN_CTRL_F SCMD_IV_GEN_CTRL_V(1U) + +/* More frags */ +#define SCMD_MORE_FRAGS_S 20 +#define SCMD_MORE_FRAGS_M 0x1 +#define SCMD_MORE_FRAGS_V(x) ((x) << SCMD_MORE_FRAGS_S) +#define SCMD_MORE_FRAGS_G(x) (((x) >> SCMD_MORE_FRAGS_S) & SCMD_MORE_FRAGS_M) + +/*last frag */ +#define SCMD_LAST_FRAG_S 19 +#define SCMD_LAST_FRAG_M 0x1 +#define SCMD_LAST_FRAG_V(x) ((x) << SCMD_LAST_FRAG_S) +#define SCMD_LAST_FRAG_G(x) (((x) >> SCMD_LAST_FRAG_S) & SCMD_LAST_FRAG_M) + +/* TlsCompPdu */ +#define SCMD_TLS_COMPPDU_S 18 +#define SCMD_TLS_COMPPDU_M 0x1 +#define SCMD_TLS_COMPPDU_V(x) ((x) << SCMD_TLS_COMPPDU_S) +#define SCMD_TLS_COMPPDU_G(x) (((x) >> SCMD_TLS_COMPPDU_S) & SCMD_TLS_COMPPDU_M) + +/* KeyCntxtInline - Key context inline after the scmd OR PayloadOnly*/ +#define SCMD_KEY_CTX_INLINE_S 17 +#define SCMD_KEY_CTX_INLINE_M 0x1 +#define SCMD_KEY_CTX_INLINE_V(x) ((x) << SCMD_KEY_CTX_INLINE_S) +#define SCMD_KEY_CTX_INLINE_G(x) \ + (((x) >> SCMD_KEY_CTX_INLINE_S) & SCMD_KEY_CTX_INLINE_M) +#define SCMD_KEY_CTX_INLINE_F SCMD_KEY_CTX_INLINE_V(1U) + +/* TLSFragEnable - 0: Host created TLS PDUs, 1: TLS Framgmentation in ASIC */ +#define SCMD_TLS_FRAG_ENABLE_S 16 +#define SCMD_TLS_FRAG_ENABLE_M 0x1 +#define SCMD_TLS_FRAG_ENABLE_V(x) ((x) << SCMD_TLS_FRAG_ENABLE_S) +#define SCMD_TLS_FRAG_ENABLE_G(x) \ + (((x) >> SCMD_TLS_FRAG_ENABLE_S) & SCMD_TLS_FRAG_ENABLE_M) +#define SCMD_TLS_FRAG_ENABLE_F SCMD_TLS_FRAG_ENABLE_V(1U) + +/* MacOnly - Only send the MAC and discard PDU. This is valid for hash only + * modes, in this case TLS_TX will drop the PDU and only + * send back the MAC bytes. + */ +#define SCMD_MAC_ONLY_S 15 +#define SCMD_MAC_ONLY_M 0x1 +#define SCMD_MAC_ONLY_V(x) ((x) << SCMD_MAC_ONLY_S) +#define SCMD_MAC_ONLY_G(x) \ + (((x) >> SCMD_MAC_ONLY_S) & SCMD_MAC_ONLY_M) +#define SCMD_MAC_ONLY_F SCMD_MAC_ONLY_V(1U) + +/* AadIVDrop - Drop the AAD and IV fields. Useful in protocols + * which have complex AAD and IV formations Eg:AES-CCM + */ +#define SCMD_AADIVDROP_S 14 +#define SCMD_AADIVDROP_M 0x1 +#define SCMD_AADIVDROP_V(x) ((x) << SCMD_AADIVDROP_S) +#define SCMD_AADIVDROP_G(x) \ + (((x) >> SCMD_AADIVDROP_S) & SCMD_AADIVDROP_M) +#define SCMD_AADIVDROP_F SCMD_AADIVDROP_V(1U) + +/* HdrLength - Length of all headers excluding TLS header + * present before start of crypto PDU/payload. + */ +#define SCMD_HDR_LEN_S 0 +#define SCMD_HDR_LEN_M 0x3fff +#define SCMD_HDR_LEN_V(x) ((x) << SCMD_HDR_LEN_S) +#define SCMD_HDR_LEN_G(x) \ + (((x) >> SCMD_HDR_LEN_S) & SCMD_HDR_LEN_M) + +struct cpl_tx_sec_pdu { + __be32 op_ivinsrtofst; + __be32 pldlen; + __be32 aadstart_cipherstop_hi; + __be32 cipherstop_lo_authinsert; + __be32 seqno_numivs; + __be32 ivgen_hdrlen; + __be64 scmd1; +}; + +#define CPL_TX_SEC_PDU_OPCODE_S 24 +#define CPL_TX_SEC_PDU_OPCODE_M 0xff +#define CPL_TX_SEC_PDU_OPCODE_V(x) ((x) << CPL_TX_SEC_PDU_OPCODE_S) +#define CPL_TX_SEC_PDU_OPCODE_G(x) \ + (((x) >> CPL_TX_SEC_PDU_OPCODE_S) & CPL_TX_SEC_PDU_OPCODE_M) + +/* RX Channel Id */ +#define CPL_TX_SEC_PDU_RXCHID_S 22 +#define CPL_TX_SEC_PDU_RXCHID_M 0x1 +#define CPL_TX_SEC_PDU_RXCHID_V(x) ((x) << CPL_TX_SEC_PDU_RXCHID_S) +#define CPL_TX_SEC_PDU_RXCHID_G(x) \ + (((x) >> CPL_TX_SEC_PDU_RXCHID_S) & CPL_TX_SEC_PDU_RXCHID_M) +#define CPL_TX_SEC_PDU_RXCHID_F CPL_TX_SEC_PDU_RXCHID_V(1U) + +/* Ack Follows */ +#define CPL_TX_SEC_PDU_ACKFOLLOWS_S 21 +#define CPL_TX_SEC_PDU_ACKFOLLOWS_M 0x1 +#define CPL_TX_SEC_PDU_ACKFOLLOWS_V(x) ((x) << CPL_TX_SEC_PDU_ACKFOLLOWS_S) +#define CPL_TX_SEC_PDU_ACKFOLLOWS_G(x) \ + (((x) >> CPL_TX_SEC_PDU_ACKFOLLOWS_S) & CPL_TX_SEC_PDU_ACKFOLLOWS_M) +#define CPL_TX_SEC_PDU_ACKFOLLOWS_F CPL_TX_SEC_PDU_ACKFOLLOWS_V(1U) + +/* Loopback bit in cpl_tx_sec_pdu */ +#define CPL_TX_SEC_PDU_ULPTXLPBK_S 20 +#define CPL_TX_SEC_PDU_ULPTXLPBK_M 0x1 +#define CPL_TX_SEC_PDU_ULPTXLPBK_V(x) ((x) << CPL_TX_SEC_PDU_ULPTXLPBK_S) +#define CPL_TX_SEC_PDU_ULPTXLPBK_G(x) \ + (((x) >> CPL_TX_SEC_PDU_ULPTXLPBK_S) & CPL_TX_SEC_PDU_ULPTXLPBK_M) +#define CPL_TX_SEC_PDU_ULPTXLPBK_F CPL_TX_SEC_PDU_ULPTXLPBK_V(1U) + +/* Length of cpl header encapsulated */ +#define CPL_TX_SEC_PDU_CPLLEN_S 16 +#define CPL_TX_SEC_PDU_CPLLEN_M 0xf +#define CPL_TX_SEC_PDU_CPLLEN_V(x) ((x) << CPL_TX_SEC_PDU_CPLLEN_S) +#define CPL_TX_SEC_PDU_CPLLEN_G(x) \ + (((x) >> CPL_TX_SEC_PDU_CPLLEN_S) & CPL_TX_SEC_PDU_CPLLEN_M) + +/* PlaceHolder */ +#define CPL_TX_SEC_PDU_PLACEHOLDER_S 10 +#define CPL_TX_SEC_PDU_PLACEHOLDER_M 0x1 +#define CPL_TX_SEC_PDU_PLACEHOLDER_V(x) ((x) << CPL_TX_SEC_PDU_PLACEHOLDER_S) +#define CPL_TX_SEC_PDU_PLACEHOLDER_G(x) \ + (((x) >> CPL_TX_SEC_PDU_PLACEHOLDER_S) & \ + CPL_TX_SEC_PDU_PLACEHOLDER_M) + +/* IvInsrtOffset: Insertion location for IV */ +#define CPL_TX_SEC_PDU_IVINSRTOFST_S 0 +#define CPL_TX_SEC_PDU_IVINSRTOFST_M 0x3ff +#define CPL_TX_SEC_PDU_IVINSRTOFST_V(x) ((x) << CPL_TX_SEC_PDU_IVINSRTOFST_S) +#define CPL_TX_SEC_PDU_IVINSRTOFST_G(x) \ + (((x) >> CPL_TX_SEC_PDU_IVINSRTOFST_S) & \ + CPL_TX_SEC_PDU_IVINSRTOFST_M) + +/* AadStartOffset: Offset in bytes for AAD start from + * the first byte following the pkt headers (0-255 bytes) + */ +#define CPL_TX_SEC_PDU_AADSTART_S 24 +#define CPL_TX_SEC_PDU_AADSTART_M 0xff +#define CPL_TX_SEC_PDU_AADSTART_V(x) ((x) << CPL_TX_SEC_PDU_AADSTART_S) +#define CPL_TX_SEC_PDU_AADSTART_G(x) \ + (((x) >> CPL_TX_SEC_PDU_AADSTART_S) & \ + CPL_TX_SEC_PDU_AADSTART_M) + +/* AadStopOffset: offset in bytes for AAD stop/end from the first byte following + * the pkt headers (0-511 bytes) + */ +#define CPL_TX_SEC_PDU_AADSTOP_S 15 +#define CPL_TX_SEC_PDU_AADSTOP_M 0x1ff +#define CPL_TX_SEC_PDU_AADSTOP_V(x) ((x) << CPL_TX_SEC_PDU_AADSTOP_S) +#define CPL_TX_SEC_PDU_AADSTOP_G(x) \ + (((x) >> CPL_TX_SEC_PDU_AADSTOP_S) & CPL_TX_SEC_PDU_AADSTOP_M) + +/* CipherStartOffset: offset in bytes for encryption/decryption start from the + * first byte following the pkt headers (0-1023 bytes) + */ +#define CPL_TX_SEC_PDU_CIPHERSTART_S 5 +#define CPL_TX_SEC_PDU_CIPHERSTART_M 0x3ff +#define CPL_TX_SEC_PDU_CIPHERSTART_V(x) ((x) << CPL_TX_SEC_PDU_CIPHERSTART_S) +#define CPL_TX_SEC_PDU_CIPHERSTART_G(x) \ + (((x) >> CPL_TX_SEC_PDU_CIPHERSTART_S) & \ + CPL_TX_SEC_PDU_CIPHERSTART_M) + +/* CipherStopOffset: offset in bytes for encryption/decryption end + * from end of the payload of this command (0-511 bytes) + */ +#define CPL_TX_SEC_PDU_CIPHERSTOP_HI_S 0 +#define CPL_TX_SEC_PDU_CIPHERSTOP_HI_M 0x1f +#define CPL_TX_SEC_PDU_CIPHERSTOP_HI_V(x) \ + ((x) << CPL_TX_SEC_PDU_CIPHERSTOP_HI_S) +#define CPL_TX_SEC_PDU_CIPHERSTOP_HI_G(x) \ + (((x) >> CPL_TX_SEC_PDU_CIPHERSTOP_HI_S) & \ + CPL_TX_SEC_PDU_CIPHERSTOP_HI_M) + +#define CPL_TX_SEC_PDU_CIPHERSTOP_LO_S 28 +#define CPL_TX_SEC_PDU_CIPHERSTOP_LO_M 0xf +#define CPL_TX_SEC_PDU_CIPHERSTOP_LO_V(x) \ + ((x) << CPL_TX_SEC_PDU_CIPHERSTOP_LO_S) +#define CPL_TX_SEC_PDU_CIPHERSTOP_LO_G(x) \ + (((x) >> CPL_TX_SEC_PDU_CIPHERSTOP_LO_S) & \ + CPL_TX_SEC_PDU_CIPHERSTOP_LO_M) + +/* AuthStartOffset: offset in bytes for authentication start from + * the first byte following the pkt headers (0-1023) + */ +#define CPL_TX_SEC_PDU_AUTHSTART_S 18 +#define CPL_TX_SEC_PDU_AUTHSTART_M 0x3ff +#define CPL_TX_SEC_PDU_AUTHSTART_V(x) ((x) << CPL_TX_SEC_PDU_AUTHSTART_S) +#define CPL_TX_SEC_PDU_AUTHSTART_G(x) \ + (((x) >> CPL_TX_SEC_PDU_AUTHSTART_S) & \ + CPL_TX_SEC_PDU_AUTHSTART_M) + +/* AuthStopOffset: offset in bytes for authentication + * end from end of the payload of this command (0-511 Bytes) + */ +#define CPL_TX_SEC_PDU_AUTHSTOP_S 9 +#define CPL_TX_SEC_PDU_AUTHSTOP_M 0x1ff +#define CPL_TX_SEC_PDU_AUTHSTOP_V(x) ((x) << CPL_TX_SEC_PDU_AUTHSTOP_S) +#define CPL_TX_SEC_PDU_AUTHSTOP_G(x) \ + (((x) >> CPL_TX_SEC_PDU_AUTHSTOP_S) & \ + CPL_TX_SEC_PDU_AUTHSTOP_M) + +/* AuthInsrtOffset: offset in bytes for authentication insertion + * from end of the payload of this command (0-511 bytes) + */ +#define CPL_TX_SEC_PDU_AUTHINSERT_S 0 +#define CPL_TX_SEC_PDU_AUTHINSERT_M 0x1ff +#define CPL_TX_SEC_PDU_AUTHINSERT_V(x) ((x) << CPL_TX_SEC_PDU_AUTHINSERT_S) +#define CPL_TX_SEC_PDU_AUTHINSERT_G(x) \ + (((x) >> CPL_TX_SEC_PDU_AUTHINSERT_S) & \ + CPL_TX_SEC_PDU_AUTHINSERT_M) + +struct cpl_rx_phys_dsgl { + __be32 op_to_tid; + __be32 pcirlxorder_to_noofsgentr; + struct rss_header rss_hdr_int; +}; + +#define CPL_RX_PHYS_DSGL_OPCODE_S 24 +#define CPL_RX_PHYS_DSGL_OPCODE_M 0xff +#define CPL_RX_PHYS_DSGL_OPCODE_V(x) ((x) << CPL_RX_PHYS_DSGL_OPCODE_S) +#define CPL_RX_PHYS_DSGL_OPCODE_G(x) \ + (((x) >> CPL_RX_PHYS_DSGL_OPCODE_S) & CPL_RX_PHYS_DSGL_OPCODE_M) + +#define CPL_RX_PHYS_DSGL_ISRDMA_S 23 +#define CPL_RX_PHYS_DSGL_ISRDMA_M 0x1 +#define CPL_RX_PHYS_DSGL_ISRDMA_V(x) ((x) << CPL_RX_PHYS_DSGL_ISRDMA_S) +#define CPL_RX_PHYS_DSGL_ISRDMA_G(x) \ + (((x) >> CPL_RX_PHYS_DSGL_ISRDMA_S) & CPL_RX_PHYS_DSGL_ISRDMA_M) +#define CPL_RX_PHYS_DSGL_ISRDMA_F CPL_RX_PHYS_DSGL_ISRDMA_V(1U) + +#define CPL_RX_PHYS_DSGL_RSVD1_S 20 +#define CPL_RX_PHYS_DSGL_RSVD1_M 0x7 +#define CPL_RX_PHYS_DSGL_RSVD1_V(x) ((x) << CPL_RX_PHYS_DSGL_RSVD1_S) +#define CPL_RX_PHYS_DSGL_RSVD1_G(x) \ + (((x) >> CPL_RX_PHYS_DSGL_RSVD1_S) & \ + CPL_RX_PHYS_DSGL_RSVD1_M) + +#define CPL_RX_PHYS_DSGL_PCIRLXORDER_S 31 +#define CPL_RX_PHYS_DSGL_PCIRLXORDER_M 0x1 +#define CPL_RX_PHYS_DSGL_PCIRLXORDER_V(x) \ + ((x) << CPL_RX_PHYS_DSGL_PCIRLXORDER_S) +#define CPL_RX_PHYS_DSGL_PCIRLXORDER_G(x) \ + (((x) >> CPL_RX_PHYS_DSGL_PCIRLXORDER_S) & \ + CPL_RX_PHYS_DSGL_PCIRLXORDER_M) +#define CPL_RX_PHYS_DSGL_PCIRLXORDER_F CPL_RX_PHYS_DSGL_PCIRLXORDER_V(1U) + +#define CPL_RX_PHYS_DSGL_PCINOSNOOP_S 30 +#define CPL_RX_PHYS_DSGL_PCINOSNOOP_M 0x1 +#define CPL_RX_PHYS_DSGL_PCINOSNOOP_V(x) \ + ((x) << CPL_RX_PHYS_DSGL_PCINOSNOOP_S) +#define CPL_RX_PHYS_DSGL_PCINOSNOOP_G(x) \ + (((x) >> CPL_RX_PHYS_DSGL_PCINOSNOOP_S) & \ + CPL_RX_PHYS_DSGL_PCINOSNOOP_M) + +#define CPL_RX_PHYS_DSGL_PCINOSNOOP_F CPL_RX_PHYS_DSGL_PCINOSNOOP_V(1U) + +#define CPL_RX_PHYS_DSGL_PCITPHNTENB_S 29 +#define CPL_RX_PHYS_DSGL_PCITPHNTENB_M 0x1 +#define CPL_RX_PHYS_DSGL_PCITPHNTENB_V(x) \ + ((x) << CPL_RX_PHYS_DSGL_PCITPHNTENB_S) +#define CPL_RX_PHYS_DSGL_PCITPHNTENB_G(x) \ + (((x) >> CPL_RX_PHYS_DSGL_PCITPHNTENB_S) & \ + CPL_RX_PHYS_DSGL_PCITPHNTENB_M) +#define CPL_RX_PHYS_DSGL_PCITPHNTENB_F CPL_RX_PHYS_DSGL_PCITPHNTENB_V(1U) + +#define CPL_RX_PHYS_DSGL_PCITPHNT_S 27 +#define CPL_RX_PHYS_DSGL_PCITPHNT_M 0x3 +#define CPL_RX_PHYS_DSGL_PCITPHNT_V(x) ((x) << CPL_RX_PHYS_DSGL_PCITPHNT_S) +#define CPL_RX_PHYS_DSGL_PCITPHNT_G(x) \ + (((x) >> CPL_RX_PHYS_DSGL_PCITPHNT_S) & \ + CPL_RX_PHYS_DSGL_PCITPHNT_M) + +#define CPL_RX_PHYS_DSGL_DCAID_S 16 +#define CPL_RX_PHYS_DSGL_DCAID_M 0x7ff +#define CPL_RX_PHYS_DSGL_DCAID_V(x) ((x) << CPL_RX_PHYS_DSGL_DCAID_S) +#define CPL_RX_PHYS_DSGL_DCAID_G(x) \ + (((x) >> CPL_RX_PHYS_DSGL_DCAID_S) & \ + CPL_RX_PHYS_DSGL_DCAID_M) + +#define CPL_RX_PHYS_DSGL_NOOFSGENTR_S 0 +#define CPL_RX_PHYS_DSGL_NOOFSGENTR_M 0xffff +#define CPL_RX_PHYS_DSGL_NOOFSGENTR_V(x) \ + ((x) << CPL_RX_PHYS_DSGL_NOOFSGENTR_S) +#define CPL_RX_PHYS_DSGL_NOOFSGENTR_G(x) \ + (((x) >> CPL_RX_PHYS_DSGL_NOOFSGENTR_S) & \ + CPL_RX_PHYS_DSGL_NOOFSGENTR_M) + #endif /* __T4_MSG_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index a89b30720e38..ffe4bf4b96da 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -1,7 +1,7 @@ /* * This file is part of the Chelsio T4 Ethernet driver for Linux. * - * Copyright (c) 2009-2014 Chelsio Communications, Inc. All rights reserved. + * Copyright (c) 2009-2016 Chelsio Communications, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -102,6 +102,7 @@ enum fw_wr_opcodes { FW_RI_FR_NSMR_WR = 0x19, FW_RI_INV_LSTAG_WR = 0x1a, FW_ISCSI_TX_DATA_WR = 0x45, + FW_CRYPTO_LOOKASIDE_WR = 0X6d, FW_LASTC2E_WR = 0x70 }; @@ -680,6 +681,7 @@ enum fw_cmd_opcodes { FW_RSS_IND_TBL_CMD = 0x20, FW_RSS_GLB_CONFIG_CMD = 0x22, FW_RSS_VI_CONFIG_CMD = 0x23, + FW_SCHED_CMD = 0x24, FW_DEVLOG_CMD = 0x25, FW_CLIP_CMD = 0x28, FW_LASTC2E_CMD = 0x40, @@ -1060,7 +1062,7 @@ struct fw_caps_config_cmd { __be16 niccaps; __be16 ofldcaps; __be16 rdmacaps; - __be16 r4; + __be16 cryptocaps; __be16 iscsicaps; __be16 fcoecaps; __be32 cfcsum; @@ -2961,6 +2963,41 @@ struct fw_rss_vi_config_cmd { #define FW_RSS_VI_CONFIG_CMD_UDPEN_V(x) ((x) << FW_RSS_VI_CONFIG_CMD_UDPEN_S) #define FW_RSS_VI_CONFIG_CMD_UDPEN_F FW_RSS_VI_CONFIG_CMD_UDPEN_V(1U) +enum fw_sched_sc { + FW_SCHED_SC_PARAMS = 1, +}; + +struct fw_sched_cmd { + __be32 op_to_write; + __be32 retval_len16; + union fw_sched { + struct fw_sched_config { + __u8 sc; + __u8 type; + __u8 minmaxen; + __u8 r3[5]; + __u8 nclasses[4]; + __be32 r4; + } config; + struct fw_sched_params { + __u8 sc; + __u8 type; + __u8 level; + __u8 mode; + __u8 unit; + __u8 rate; + __u8 ch; + __u8 cl; + __be32 min; + __be32 max; + __be16 weight; + __be16 pktsize; + __be16 burstsize; + __be16 r4; + } params; + } u; +}; + struct fw_clip_cmd { __be32 op_to_write; __be32 alloc_to_len16; @@ -3249,4 +3286,127 @@ struct fw_devlog_cmd { #define PCIE_FW_PF_DEVLOG_MEMTYPE_G(x) \ (((x) >> PCIE_FW_PF_DEVLOG_MEMTYPE_S) & PCIE_FW_PF_DEVLOG_MEMTYPE_M) +#define MAX_IMM_OFLD_TX_DATA_WR_LEN (0xff + sizeof(struct fw_ofld_tx_data_wr)) + +struct fw_crypto_lookaside_wr { + __be32 op_to_cctx_size; + __be32 len16_pkd; + __be32 session_id; + __be32 rx_chid_to_rx_q_id; + __be32 key_addr; + __be32 pld_size_hash_size; + __be64 cookie; +}; + +#define FW_CRYPTO_LOOKASIDE_WR_OPCODE_S 24 +#define FW_CRYPTO_LOOKASIDE_WR_OPCODE_M 0xff +#define FW_CRYPTO_LOOKASIDE_WR_OPCODE_V(x) \ + ((x) << FW_CRYPTO_LOOKASIDE_WR_OPCODE_S) +#define FW_CRYPTO_LOOKASIDE_WR_OPCODE_G(x) \ + (((x) >> FW_CRYPTO_LOOKASIDE_WR_OPCODE_S) & \ + FW_CRYPTO_LOOKASIDE_WR_OPCODE_M) + +#define FW_CRYPTO_LOOKASIDE_WR_COMPL_S 23 +#define FW_CRYPTO_LOOKASIDE_WR_COMPL_M 0x1 +#define FW_CRYPTO_LOOKASIDE_WR_COMPL_V(x) \ + ((x) << FW_CRYPTO_LOOKASIDE_WR_COMPL_S) +#define FW_CRYPTO_LOOKASIDE_WR_COMPL_G(x) \ + (((x) >> FW_CRYPTO_LOOKASIDE_WR_COMPL_S) & \ + FW_CRYPTO_LOOKASIDE_WR_COMPL_M) +#define FW_CRYPTO_LOOKASIDE_WR_COMPL_F FW_CRYPTO_LOOKASIDE_WR_COMPL_V(1U) + +#define FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_S 15 +#define FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_M 0xff +#define FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_V(x) \ + ((x) << FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_S) +#define FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_G(x) \ + (((x) >> FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_S) & \ + FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_M) + +#define FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_S 5 +#define FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_M 0x3 +#define FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_V(x) \ + ((x) << FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_S) +#define FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_G(x) \ + (((x) >> FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_S) & \ + FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_M) + +#define FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_S 0 +#define FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_M 0x1f +#define FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_V(x) \ + ((x) << FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_S) +#define FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_G(x) \ + (((x) >> FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_S) & \ + FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_M) + +#define FW_CRYPTO_LOOKASIDE_WR_LEN16_S 0 +#define FW_CRYPTO_LOOKASIDE_WR_LEN16_M 0xff +#define FW_CRYPTO_LOOKASIDE_WR_LEN16_V(x) \ + ((x) << FW_CRYPTO_LOOKASIDE_WR_LEN16_S) +#define FW_CRYPTO_LOOKASIDE_WR_LEN16_G(x) \ + (((x) >> FW_CRYPTO_LOOKASIDE_WR_LEN16_S) & \ + FW_CRYPTO_LOOKASIDE_WR_LEN16_M) + +#define FW_CRYPTO_LOOKASIDE_WR_RX_CHID_S 29 +#define FW_CRYPTO_LOOKASIDE_WR_RX_CHID_M 0x3 +#define FW_CRYPTO_LOOKASIDE_WR_RX_CHID_V(x) \ + ((x) << FW_CRYPTO_LOOKASIDE_WR_RX_CHID_S) +#define FW_CRYPTO_LOOKASIDE_WR_RX_CHID_G(x) \ + (((x) >> FW_CRYPTO_LOOKASIDE_WR_RX_CHID_S) & \ + FW_CRYPTO_LOOKASIDE_WR_RX_CHID_M) + +#define FW_CRYPTO_LOOKASIDE_WR_LCB_S 27 +#define FW_CRYPTO_LOOKASIDE_WR_LCB_M 0x3 +#define FW_CRYPTO_LOOKASIDE_WR_LCB_V(x) \ + ((x) << FW_CRYPTO_LOOKASIDE_WR_LCB_S) +#define FW_CRYPTO_LOOKASIDE_WR_LCB_G(x) \ + (((x) >> FW_CRYPTO_LOOKASIDE_WR_LCB_S) & FW_CRYPTO_LOOKASIDE_WR_LCB_M) + +#define FW_CRYPTO_LOOKASIDE_WR_PHASH_S 25 +#define FW_CRYPTO_LOOKASIDE_WR_PHASH_M 0x3 +#define FW_CRYPTO_LOOKASIDE_WR_PHASH_V(x) \ + ((x) << FW_CRYPTO_LOOKASIDE_WR_PHASH_S) +#define FW_CRYPTO_LOOKASIDE_WR_PHASH_G(x) \ + (((x) >> FW_CRYPTO_LOOKASIDE_WR_PHASH_S) & \ + FW_CRYPTO_LOOKASIDE_WR_PHASH_M) + +#define FW_CRYPTO_LOOKASIDE_WR_IV_S 23 +#define FW_CRYPTO_LOOKASIDE_WR_IV_M 0x3 +#define FW_CRYPTO_LOOKASIDE_WR_IV_V(x) \ + ((x) << FW_CRYPTO_LOOKASIDE_WR_IV_S) +#define FW_CRYPTO_LOOKASIDE_WR_IV_G(x) \ + (((x) >> FW_CRYPTO_LOOKASIDE_WR_IV_S) & FW_CRYPTO_LOOKASIDE_WR_IV_M) + +#define FW_CRYPTO_LOOKASIDE_WR_TX_CH_S 10 +#define FW_CRYPTO_LOOKASIDE_WR_TX_CH_M 0x3 +#define FW_CRYPTO_LOOKASIDE_WR_TX_CH_V(x) \ + ((x) << FW_CRYPTO_LOOKASIDE_WR_TX_CH_S) +#define FW_CRYPTO_LOOKASIDE_WR_TX_CH_G(x) \ + (((x) >> FW_CRYPTO_LOOKASIDE_WR_TX_CH_S) & \ + FW_CRYPTO_LOOKASIDE_WR_TX_CH_M) + +#define FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_S 0 +#define FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_M 0x3ff +#define FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_V(x) \ + ((x) << FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_S) +#define FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_G(x) \ + (((x) >> FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_S) & \ + FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_M) + +#define FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_S 24 +#define FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_M 0xff +#define FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_V(x) \ + ((x) << FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_S) +#define FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_G(x) \ + (((x) >> FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_S) & \ + FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_M) + +#define FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_S 17 +#define FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_M 0x7f +#define FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(x) \ + ((x) << FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_S) +#define FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_G(x) \ + (((x) >> FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_S) & \ + FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_M) + #endif /* _T4FW_INTERFACE_H_ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index e116bb8d1729..100b2cc064a3 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -2378,7 +2378,7 @@ static void size_nports_qsets(struct adapter *adapter) */ pmask_nports = hweight32(adapter->params.vfres.pmask); if (pmask_nports < adapter->params.nports) { - dev_warn(adapter->pdev_dev, "only using %d of %d provissioned" + dev_warn(adapter->pdev_dev, "only using %d of %d provisioned" " virtual interfaces; limited by Port Access Rights" " mask %#x\n", pmask_nports, adapter->params.nports, adapter->params.vfres.pmask); @@ -2777,6 +2777,7 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, struct adapter *adapter; struct port_info *pi; struct net_device *netdev; + unsigned int pf; /* * Print our driver banner the first time we're called to initialize a @@ -2903,8 +2904,11 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, * Allocate our "adapter ports" and stitch everything together. */ pmask = adapter->params.vfres.pmask; + pf = t4vf_get_pf_from_vf(adapter); for_each_port(adapter, pidx) { int port_id, viid; + u8 mac[ETH_ALEN]; + unsigned int naddr = 1; /* * We simplistically allocate our virtual interfaces @@ -2975,6 +2979,26 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, pidx); goto err_free_dev; } + + err = t4vf_get_vf_mac_acl(adapter, pf, &naddr, mac); + if (err) { + dev_err(&pdev->dev, + "unable to determine MAC ACL address, " + "continuing anyway.. (status %d)\n", err); + } else if (naddr && adapter->params.vfres.nvi == 1) { + struct sockaddr addr; + + ether_addr_copy(addr.sa_data, mac); + err = cxgb4vf_set_mac_addr(netdev, &addr); + if (err) { + dev_err(&pdev->dev, + "unable to set MAC address %pM\n", + mac); + goto err_free_dev; + } + dev_info(&pdev->dev, + "Using assigned MAC ACL: %pM\n", mac); + } } /* See what interrupts we'll be using. If we've been configured to diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h index 8ee541431e8b..8067424ad4a8 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h @@ -347,6 +347,7 @@ int t4vf_bar2_sge_qregs(struct adapter *adapter, u64 *pbar2_qoffset, unsigned int *pbar2_qid); +unsigned int t4vf_get_pf_from_vf(struct adapter *); int t4vf_get_sge_params(struct adapter *); int t4vf_get_vpd_params(struct adapter *); int t4vf_get_dev_params(struct adapter *); @@ -381,5 +382,7 @@ int t4vf_eth_eq_free(struct adapter *, unsigned int); int t4vf_handle_fw_rpl(struct adapter *, const __be64 *); int t4vf_prep_adapter(struct adapter *); +int t4vf_get_vf_mac_acl(struct adapter *adapter, unsigned int pf, + unsigned int *naddr, u8 *addr); #endif /* __T4VF_COMMON_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index 427bfa71388b..879f4c52b3d5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c @@ -639,6 +639,15 @@ int t4vf_bar2_sge_qregs(struct adapter *adapter, return 0; } +unsigned int t4vf_get_pf_from_vf(struct adapter *adapter) +{ + u32 whoami; + + whoami = t4_read_reg(adapter, T4VF_PL_BASE_ADDR + PL_VF_WHOAMI_A); + return (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ? + SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami)); +} + /** * t4vf_get_sge_params - retrieve adapter Scatter gather Engine parameters * @adapter: the adapter @@ -716,7 +725,6 @@ int t4vf_get_sge_params(struct adapter *adapter) * read. */ if (!is_t4(adapter->params.chip)) { - u32 whoami; unsigned int pf, s_hps, s_qpp; params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | @@ -740,11 +748,7 @@ int t4vf_get_sge_params(struct adapter *adapter) * register we just read. Do it once here so other code in * the driver can just use it. */ - whoami = t4_read_reg(adapter, - T4VF_PL_BASE_ADDR + PL_VF_WHOAMI_A); - pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ? - SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami); - + pf = t4vf_get_pf_from_vf(adapter); s_hps = (HOSTPAGESIZEPF0_S + (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * pf); sge_params->sge_vf_hps = @@ -1807,3 +1811,50 @@ int t4vf_prep_adapter(struct adapter *adapter) return 0; } + +/** + * t4vf_get_vf_mac_acl - Get the MAC address to be set to + * the VI of this VF. + * @adapter: The adapter + * @pf: The pf associated with vf + * @naddr: the number of ACL MAC addresses returned in addr + * @addr: Placeholder for MAC addresses + * + * Find the MAC address to be set to the VF's VI. The requested MAC address + * is from the host OS via callback in the PF driver. + */ +int t4vf_get_vf_mac_acl(struct adapter *adapter, unsigned int pf, + unsigned int *naddr, u8 *addr) +{ + struct fw_acl_mac_cmd cmd; + int ret; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_ACL_MAC_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F); + cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd)); + ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &cmd); + if (ret) + return ret; + + if (cmd.nmac < *naddr) + *naddr = cmd.nmac; + + switch (pf) { + case 3: + memcpy(addr, cmd.macaddr3, sizeof(cmd.macaddr3)); + break; + case 2: + memcpy(addr, cmd.macaddr2, sizeof(cmd.macaddr2)); + break; + case 1: + memcpy(addr, cmd.macaddr1, sizeof(cmd.macaddr1)); + break; + case 0: + memcpy(addr, cmd.macaddr0, sizeof(cmd.macaddr0)); + break; + } + + return ret; +} diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c index f0e9e2ef62a0..6620fc861c47 100644 --- a/drivers/net/ethernet/dec/tulip/de4x5.c +++ b/drivers/net/ethernet/dec/tulip/de4x5.c @@ -1966,7 +1966,7 @@ SetMulticastFilter(struct net_device *dev) } else if (lp->setup_f == HASH_PERF) { /* Hash Filtering */ netdev_for_each_mc_addr(ha, dev) { crc = ether_crc_le(ETH_ALEN, ha->addr); - hashcode = crc & HASH_BITS; /* hashcode is 9 LSb of CRC */ + hashcode = crc & DE4X5_HASH_BITS; /* hashcode is 9 LSb of CRC */ byte = hashcode >> 3; /* bit[3-8] -> byte in filter */ bit = 1 << (hashcode & 0x07);/* bit[0-2] -> bit in byte */ @@ -5043,7 +5043,7 @@ build_setup_frame(struct net_device *dev, int mode) *(pa + i) = dev->dev_addr[i]; /* Host address */ if (i & 0x01) pa += 2; } - *(lp->setup_frame + (HASH_TABLE_LEN >> 3) - 3) = 0x80; + *(lp->setup_frame + (DE4X5_HASH_TABLE_LEN >> 3) - 3) = 0x80; } else { for (i=0; i<ETH_ALEN; i++) { /* Host address */ *(pa + (i&1)) = dev->dev_addr[i]; diff --git a/drivers/net/ethernet/dec/tulip/de4x5.h b/drivers/net/ethernet/dec/tulip/de4x5.h index ec756eba397b..1bfdc9b117f6 100644 --- a/drivers/net/ethernet/dec/tulip/de4x5.h +++ b/drivers/net/ethernet/dec/tulip/de4x5.h @@ -860,8 +860,8 @@ #define PCI 0 #define EISA 1 -#define HASH_TABLE_LEN 512 /* Bits */ -#define HASH_BITS 0x01ff /* 9 LS bits */ +#define DE4X5_HASH_TABLE_LEN 512 /* Bits */ +#define DE4X5_HASH_BITS 0x01ff /* 9 LS bits */ #define SETUP_FRAME_LEN 192 /* Bytes */ #define IMPERF_PA_OFFSET 156 /* Bytes */ diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 4555e041ef69..86780b5c40ef 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -508,6 +508,10 @@ struct be_wrb_params { u16 lso_mss; /* MSS for LSO */ }; +struct be_eth_addr { + unsigned char mac[ETH_ALEN]; +}; + struct be_adapter { struct pci_dev *pdev; struct net_device *netdev; @@ -523,7 +527,7 @@ struct be_adapter { struct be_dma_mem mbox_mem_alloced; struct be_mcc_obj mcc_obj; - spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */ + struct mutex mcc_lock; /* For serializing mcc cmds to BE card */ spinlock_t mcc_cq_lock; u16 cfg_num_rx_irqs; /* configured via set-channels */ @@ -570,9 +574,15 @@ struct be_adapter { int if_handle; /* Used to configure filtering */ u32 if_flags; /* Interface filtering flags */ u32 *pmac_id; /* MAC addr handle used by BE card */ + struct be_eth_addr *uc_list;/* list of uc-addrs programmed (not perm) */ u32 uc_macs; /* Count of secondary UC MAC programmed */ + struct be_eth_addr *mc_list;/* list of mcast addrs programmed */ + u32 mc_count; unsigned long vids[BITS_TO_LONGS(VLAN_N_VID)]; u16 vlans_added; + bool update_uc_list; + bool update_mc_list; + struct mutex rx_filter_lock;/* For protecting vids[] & mc/uc_list[] */ u32 beacon_state; /* for set_phys_id */ @@ -626,6 +636,15 @@ struct be_adapter { u8 phy_state; /* state of sfp optics (functional, faulted, etc.,) */ }; +/* Used for defered FW config cmds. Add fields to this struct as reqd */ +struct be_cmd_work { + struct work_struct work; + struct be_adapter *adapter; + union { + __be16 vxlan_port; + } info; +}; + #define be_physfn(adapter) (!adapter->virtfn) #define be_virtfn(adapter) (adapter->virtfn) #define sriov_enabled(adapter) (adapter->flags & \ diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 2cc11756859f..fa11a5a8c354 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -571,7 +571,7 @@ int be_process_mcc(struct be_adapter *adapter) /* Wait till no more pending mcc requests are present */ static int be_mcc_wait_compl(struct be_adapter *adapter) { -#define mcc_timeout 120000 /* 12s timeout */ +#define mcc_timeout 12000 /* 12s timeout */ int i, status = 0; struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; @@ -585,7 +585,7 @@ static int be_mcc_wait_compl(struct be_adapter *adapter) if (atomic_read(&mcc_obj->q.used) == 0) break; - udelay(100); + usleep_range(500, 1000); } if (i == mcc_timeout) { dev_err(&adapter->pdev->dev, "FW not responding\n"); @@ -863,7 +863,7 @@ static bool use_mcc(struct be_adapter *adapter) static int be_cmd_lock(struct be_adapter *adapter) { if (use_mcc(adapter)) { - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); return 0; } else { return mutex_lock_interruptible(&adapter->mbox_lock); @@ -874,7 +874,7 @@ static int be_cmd_lock(struct be_adapter *adapter) static void be_cmd_unlock(struct be_adapter *adapter) { if (use_mcc(adapter)) - spin_unlock_bh(&adapter->mcc_lock); + return mutex_unlock(&adapter->mcc_lock); else return mutex_unlock(&adapter->mbox_lock); } @@ -1044,7 +1044,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, struct be_cmd_req_mac_query *req; int status; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1073,7 +1073,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, } err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -1085,7 +1085,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, struct be_cmd_req_pmac_add *req; int status; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1110,7 +1110,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, } err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); if (status == MCC_STATUS_UNAUTHORIZED_REQUEST) status = -EPERM; @@ -1128,7 +1128,7 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom) if (pmac_id == -1) return 0; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1148,7 +1148,7 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom) status = be_mcc_notify_wait(adapter); err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -1411,7 +1411,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter, struct be_dma_mem *q_mem = &rxq->dma_mem; int status; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1441,7 +1441,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter, } err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -1505,7 +1505,7 @@ int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q) struct be_cmd_req_q_destroy *req; int status; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1522,7 +1522,7 @@ int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q) q->created = false; err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -1590,7 +1590,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd) struct be_cmd_req_hdr *hdr; int status = 0; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1618,7 +1618,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd) adapter->stats_cmd_sent = true; err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -1634,7 +1634,7 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter, CMD_SUBSYSTEM_ETH)) return -EPERM; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1657,7 +1657,7 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter, adapter->stats_cmd_sent = true; err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -1694,7 +1694,7 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed, struct be_cmd_req_link_status *req; int status; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); if (link_status) *link_status = LINK_DOWN; @@ -1733,7 +1733,7 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed, } err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -1744,7 +1744,7 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter) struct be_cmd_req_get_cntl_addnl_attribs *req; int status = 0; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1759,7 +1759,7 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter) status = be_mcc_notify(adapter); err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -1808,7 +1808,7 @@ int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf) if (!get_fat_cmd.va) return -ENOMEM; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); while (total_size) { buf_size = min(total_size, (u32)60*1024); @@ -1848,7 +1848,7 @@ int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf) err: dma_free_coherent(&adapter->pdev->dev, get_fat_cmd.size, get_fat_cmd.va, get_fat_cmd.dma); - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -1859,7 +1859,7 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter) struct be_cmd_req_get_fw_version *req; int status; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1882,7 +1882,7 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter) sizeof(adapter->fw_on_flash)); } err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -1896,7 +1896,7 @@ static int __be_cmd_modify_eqd(struct be_adapter *adapter, struct be_cmd_req_modify_eq_delay *req; int status = 0, i; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1919,7 +1919,7 @@ static int __be_cmd_modify_eqd(struct be_adapter *adapter, status = be_mcc_notify(adapter); err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -1946,7 +1946,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, struct be_cmd_req_vlan_config *req; int status; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1968,7 +1968,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, status = be_mcc_notify_wait(adapter); err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -1979,7 +1979,7 @@ static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value) struct be_cmd_req_rx_filter *req = mem->va; int status; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1996,8 +1996,7 @@ static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value) req->if_flags = (value == ON) ? req->if_flags_mask : 0; if (flags & BE_IF_FLAGS_MULTICAST) { - struct netdev_hw_addr *ha; - int i = 0; + int i; /* Reset mcast promisc mode if already set by setting mask * and not setting flags field @@ -2005,14 +2004,15 @@ static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value) req->if_flags_mask |= cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS & be_if_cap_flags(adapter)); - req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev)); - netdev_for_each_mc_addr(ha, adapter->netdev) - memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN); + req->mcast_num = cpu_to_le32(adapter->mc_count); + for (i = 0; i < adapter->mc_count; i++) + ether_addr_copy(req->mcast_mac[i].byte, + adapter->mc_list[i].mac); } status = be_mcc_notify_wait(adapter); err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -2043,7 +2043,7 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc) CMD_SUBSYSTEM_COMMON)) return -EPERM; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -2063,7 +2063,7 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc) status = be_mcc_notify_wait(adapter); err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); if (base_status(status) == MCC_STATUS_FEATURE_NOT_SUPPORTED) return -EOPNOTSUPP; @@ -2082,7 +2082,7 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc) CMD_SUBSYSTEM_COMMON)) return -EPERM; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -2105,7 +2105,7 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc) } err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -2186,7 +2186,7 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS)) return 0; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -2211,7 +2211,7 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, status = be_mcc_notify_wait(adapter); err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -2223,7 +2223,7 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, struct be_cmd_req_enable_disable_beacon *req; int status; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -2244,7 +2244,7 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, status = be_mcc_notify_wait(adapter); err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -2255,7 +2255,7 @@ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state) struct be_cmd_req_get_beacon_state *req; int status; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -2279,7 +2279,7 @@ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state) } err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -2303,7 +2303,7 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter, return -ENOMEM; } - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -2325,7 +2325,7 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter, memcpy(data, resp->page_data, PAGE_DATA_LEN); } err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma); return status; } @@ -2342,7 +2342,7 @@ static int lancer_cmd_write_object(struct be_adapter *adapter, void *ctxt = NULL; int status; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); adapter->flash_status = 0; wrb = wrb_from_mccq(adapter); @@ -2384,7 +2384,7 @@ static int lancer_cmd_write_object(struct be_adapter *adapter, if (status) goto err_unlock; - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); if (!wait_for_completion_timeout(&adapter->et_cmd_compl, msecs_to_jiffies(60000))) @@ -2403,7 +2403,7 @@ static int lancer_cmd_write_object(struct be_adapter *adapter, return status; err_unlock: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -2457,7 +2457,7 @@ static int lancer_cmd_delete_object(struct be_adapter *adapter, struct be_mcc_wrb *wrb; int status; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -2475,7 +2475,7 @@ static int lancer_cmd_delete_object(struct be_adapter *adapter, status = be_mcc_notify_wait(adapter); err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -2488,7 +2488,7 @@ int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd, struct lancer_cmd_resp_read_object *resp; int status; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -2522,7 +2522,7 @@ int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd, } err_unlock: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -2534,7 +2534,7 @@ static int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_cmd_write_flashrom *req; int status; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); adapter->flash_status = 0; wrb = wrb_from_mccq(adapter); @@ -2559,7 +2559,7 @@ static int be_cmd_write_flashrom(struct be_adapter *adapter, if (status) goto err_unlock; - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); if (!wait_for_completion_timeout(&adapter->et_cmd_compl, msecs_to_jiffies(40000))) @@ -2570,7 +2570,7 @@ static int be_cmd_write_flashrom(struct be_adapter *adapter, return status; err_unlock: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -2581,7 +2581,7 @@ static int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc, struct be_mcc_wrb *wrb; int status; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -2608,7 +2608,7 @@ static int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc, memcpy(flashed_crc, req->crc, 4); err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -3192,7 +3192,7 @@ int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac, struct be_cmd_req_acpi_wol_magic_config *req; int status; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3209,7 +3209,7 @@ int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac, status = be_mcc_notify_wait(adapter); err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -3224,7 +3224,7 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, CMD_SUBSYSTEM_LOWLEVEL)) return -EPERM; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3247,7 +3247,7 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, if (status) goto err_unlock; - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); if (!wait_for_completion_timeout(&adapter->et_cmd_compl, msecs_to_jiffies(SET_LB_MODE_TIMEOUT))) @@ -3256,7 +3256,7 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, return status; err_unlock: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -3273,7 +3273,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, CMD_SUBSYSTEM_LOWLEVEL)) return -EPERM; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3299,7 +3299,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, if (status) goto err; - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); wait_for_completion(&adapter->et_cmd_compl); resp = embedded_payload(wrb); @@ -3307,7 +3307,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, return status; err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -3323,7 +3323,7 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern, CMD_SUBSYSTEM_LOWLEVEL)) return -EPERM; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3357,7 +3357,7 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern, } err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -3368,7 +3368,7 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter, struct be_cmd_req_seeprom_read *req; int status; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3384,7 +3384,7 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter, status = be_mcc_notify_wait(adapter); err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -3399,7 +3399,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter) CMD_SUBSYSTEM_COMMON)) return -EPERM; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3444,7 +3444,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter) } dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma); err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -3454,7 +3454,7 @@ static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain) struct be_cmd_req_set_qos *req; int status; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3474,7 +3474,7 @@ static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain) status = be_mcc_notify_wait(adapter); err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -3581,7 +3581,7 @@ int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege, struct be_cmd_req_get_fn_privileges *req; int status; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3613,7 +3613,7 @@ int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege, } err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -3625,7 +3625,7 @@ int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges, struct be_cmd_req_set_fn_privileges *req; int status; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3645,7 +3645,7 @@ int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges, status = be_mcc_notify_wait(adapter); err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -3677,7 +3677,7 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, return -ENOMEM; } - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3741,7 +3741,7 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, } out: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); dma_free_coherent(&adapter->pdev->dev, get_mac_list_cmd.size, get_mac_list_cmd.va, get_mac_list_cmd.dma); return status; @@ -3801,7 +3801,7 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, if (!cmd.va) return -ENOMEM; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3823,7 +3823,7 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, err: dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma); - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -3859,7 +3859,7 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, CMD_SUBSYSTEM_COMMON)) return -EPERM; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3900,7 +3900,7 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, status = be_mcc_notify_wait(adapter); err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -3914,7 +3914,7 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, int status; u16 vid; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3961,7 +3961,7 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, } err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -4156,7 +4156,7 @@ int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter, struct be_cmd_req_set_ext_fat_caps *req; int status; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -4172,7 +4172,7 @@ int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter, status = be_mcc_notify_wait(adapter); err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -4650,7 +4650,7 @@ int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op) if (iface == 0xFFFFFFFF) return -1; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -4667,7 +4667,7 @@ int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op) status = be_mcc_notify_wait(adapter); err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -4701,7 +4701,7 @@ int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg, struct be_cmd_resp_get_iface_list *resp; int status; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -4722,7 +4722,7 @@ int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg, } err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -4816,7 +4816,7 @@ int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain) if (BEx_chip(adapter)) return 0; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -4834,7 +4834,7 @@ int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain) req->enable = 1; status = be_mcc_notify_wait(adapter); err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -4905,7 +4905,7 @@ int __be_cmd_set_logical_link_config(struct be_adapter *adapter, struct be_cmd_req_set_ll_link *req; int status; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -4931,7 +4931,7 @@ int __be_cmd_set_logical_link_config(struct be_adapter *adapter, status = be_mcc_notify_wait(adapter); err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } @@ -4964,7 +4964,7 @@ int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload, struct be_cmd_resp_hdr *resp; int status; - spin_lock_bh(&adapter->mcc_lock); + mutex_lock(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -4987,7 +4987,7 @@ int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload, memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length); be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length); err: - spin_unlock_bh(&adapter->mcc_lock); + mutex_unlock(&adapter->mcc_lock); return status; } EXPORT_SYMBOL(be_roce_mcc_cmd); diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 874c7539a79d..f7584d4139ff 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -53,6 +53,10 @@ static const struct pci_device_id be_dev_ids[] = { { 0 } }; MODULE_DEVICE_TABLE(pci, be_dev_ids); + +/* Workqueue used by all functions for defering cmd calls to the adapter */ +struct workqueue_struct *be_wq; + /* UE Status Low CSR */ static const char * const ue_status_low_desc[] = { "CEV", @@ -1420,13 +1424,18 @@ static int be_vid_config(struct be_adapter *adapter) u16 num = 0, i = 0; int status = 0; - /* No need to further configure vids if in promiscuous mode */ - if (be_in_all_promisc(adapter)) + /* No need to change the VLAN state if the I/F is in promiscuous */ + if (adapter->netdev->flags & IFF_PROMISC) return 0; if (adapter->vlans_added > be_max_vlans(adapter)) return be_set_vlan_promisc(adapter); + if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) { + status = be_clear_vlan_promisc(adapter); + if (status) + return status; + } /* Construct VLAN Table to give to HW */ for_each_set_bit(i, adapter->vids, VLAN_N_VID) vids[num++] = cpu_to_le16(i); @@ -1439,8 +1448,6 @@ static int be_vid_config(struct be_adapter *adapter) addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES) return be_set_vlan_promisc(adapter); - } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) { - status = be_clear_vlan_promisc(adapter); } return status; } @@ -1450,46 +1457,45 @@ static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid) struct be_adapter *adapter = netdev_priv(netdev); int status = 0; + mutex_lock(&adapter->rx_filter_lock); + /* Packets with VID 0 are always received by Lancer by default */ if (lancer_chip(adapter) && vid == 0) - return status; + goto done; if (test_bit(vid, adapter->vids)) - return status; + goto done; set_bit(vid, adapter->vids); adapter->vlans_added++; status = be_vid_config(adapter); - if (status) { - adapter->vlans_added--; - clear_bit(vid, adapter->vids); - } - +done: + mutex_unlock(&adapter->rx_filter_lock); return status; } static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid) { struct be_adapter *adapter = netdev_priv(netdev); + int status = 0; + + mutex_lock(&adapter->rx_filter_lock); /* Packets with VID 0 are always received by Lancer by default */ if (lancer_chip(adapter) && vid == 0) - return 0; + goto done; if (!test_bit(vid, adapter->vids)) - return 0; + goto done; clear_bit(vid, adapter->vids); adapter->vlans_added--; - return be_vid_config(adapter); -} - -static void be_clear_all_promisc(struct be_adapter *adapter) -{ - be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF); - adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS; + status = be_vid_config(adapter); +done: + mutex_unlock(&adapter->rx_filter_lock); + return status; } static void be_set_all_promisc(struct be_adapter *adapter) @@ -1510,75 +1516,207 @@ static void be_set_mc_promisc(struct be_adapter *adapter) adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS; } -static void be_set_mc_list(struct be_adapter *adapter) +static void be_set_uc_promisc(struct be_adapter *adapter) { int status; - status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON); + if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS) + return; + + status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, ON); if (!status) - adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS; - else + adapter->if_flags |= BE_IF_FLAGS_PROMISCUOUS; +} + +static void be_clear_uc_promisc(struct be_adapter *adapter) +{ + int status; + + if (!(adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS)) + return; + + status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, OFF); + if (!status) + adapter->if_flags &= ~BE_IF_FLAGS_PROMISCUOUS; +} + +/* The below 2 functions are the callback args for __dev_mc_sync/dev_uc_sync(). + * We use a single callback function for both sync and unsync. We really don't + * add/remove addresses through this callback. But, we use it to detect changes + * to the uc/mc lists. The entire uc/mc list is programmed in be_set_rx_mode(). + */ +static int be_uc_list_update(struct net_device *netdev, + const unsigned char *addr) +{ + struct be_adapter *adapter = netdev_priv(netdev); + + adapter->update_uc_list = true; + return 0; +} + +static int be_mc_list_update(struct net_device *netdev, + const unsigned char *addr) +{ + struct be_adapter *adapter = netdev_priv(netdev); + + adapter->update_mc_list = true; + return 0; +} + +static void be_set_mc_list(struct be_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct netdev_hw_addr *ha; + bool mc_promisc = false; + int status; + + netif_addr_lock_bh(netdev); + __dev_mc_sync(netdev, be_mc_list_update, be_mc_list_update); + + if (netdev->flags & IFF_PROMISC) { + adapter->update_mc_list = false; + } else if (netdev->flags & IFF_ALLMULTI || + netdev_mc_count(netdev) > be_max_mc(adapter)) { + /* Enable multicast promisc if num configured exceeds + * what we support + */ + mc_promisc = true; + adapter->update_mc_list = false; + } else if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS) { + /* Update mc-list unconditionally if the iface was previously + * in mc-promisc mode and now is out of that mode. + */ + adapter->update_mc_list = true; + } + + if (adapter->update_mc_list) { + int i = 0; + + /* cache the mc-list in adapter */ + netdev_for_each_mc_addr(ha, netdev) { + ether_addr_copy(adapter->mc_list[i].mac, ha->addr); + i++; + } + adapter->mc_count = netdev_mc_count(netdev); + } + netif_addr_unlock_bh(netdev); + + if (mc_promisc) { be_set_mc_promisc(adapter); + } else if (adapter->update_mc_list) { + status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON); + if (!status) + adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS; + else + be_set_mc_promisc(adapter); + + adapter->update_mc_list = false; + } +} + +static void be_clear_mc_list(struct be_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + __dev_mc_unsync(netdev, NULL); + be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, OFF); + adapter->mc_count = 0; } static void be_set_uc_list(struct be_adapter *adapter) { + struct net_device *netdev = adapter->netdev; struct netdev_hw_addr *ha; - int i = 1; /* First slot is claimed by the Primary MAC */ + bool uc_promisc = false; + int curr_uc_macs = 0, i; - for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) - be_cmd_pmac_del(adapter, adapter->if_handle, - adapter->pmac_id[i], 0); + netif_addr_lock_bh(netdev); + __dev_uc_sync(netdev, be_uc_list_update, be_uc_list_update); - if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) { - be_set_all_promisc(adapter); - return; + if (netdev->flags & IFF_PROMISC) { + adapter->update_uc_list = false; + } else if (netdev_uc_count(netdev) > (be_max_uc(adapter) - 1)) { + uc_promisc = true; + adapter->update_uc_list = false; + } else if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS) { + /* Update uc-list unconditionally if the iface was previously + * in uc-promisc mode and now is out of that mode. + */ + adapter->update_uc_list = true; } - netdev_for_each_uc_addr(ha, adapter->netdev) { - adapter->uc_macs++; /* First slot is for Primary MAC */ - be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle, - &adapter->pmac_id[adapter->uc_macs], 0); + if (adapter->update_uc_list) { + i = 1; /* First slot is claimed by the Primary MAC */ + + /* cache the uc-list in adapter array */ + netdev_for_each_uc_addr(ha, netdev) { + ether_addr_copy(adapter->uc_list[i].mac, ha->addr); + i++; + } + curr_uc_macs = netdev_uc_count(netdev); + } + netif_addr_unlock_bh(netdev); + + if (uc_promisc) { + be_set_uc_promisc(adapter); + } else if (adapter->update_uc_list) { + be_clear_uc_promisc(adapter); + + for (i = 0; i < adapter->uc_macs; i++) + be_cmd_pmac_del(adapter, adapter->if_handle, + adapter->pmac_id[i + 1], 0); + + for (i = 0; i < curr_uc_macs; i++) + be_cmd_pmac_add(adapter, adapter->uc_list[i].mac, + adapter->if_handle, + &adapter->pmac_id[i + 1], 0); + adapter->uc_macs = curr_uc_macs; + adapter->update_uc_list = false; } } static void be_clear_uc_list(struct be_adapter *adapter) { + struct net_device *netdev = adapter->netdev; int i; - for (i = 1; i < (adapter->uc_macs + 1); i++) + __dev_uc_unsync(netdev, NULL); + for (i = 0; i < adapter->uc_macs; i++) be_cmd_pmac_del(adapter, adapter->if_handle, - adapter->pmac_id[i], 0); + adapter->pmac_id[i + 1], 0); adapter->uc_macs = 0; } -static void be_set_rx_mode(struct net_device *netdev) +static void __be_set_rx_mode(struct be_adapter *adapter) { - struct be_adapter *adapter = netdev_priv(netdev); + struct net_device *netdev = adapter->netdev; + + mutex_lock(&adapter->rx_filter_lock); if (netdev->flags & IFF_PROMISC) { - be_set_all_promisc(adapter); - return; + if (!be_in_all_promisc(adapter)) + be_set_all_promisc(adapter); + } else if (be_in_all_promisc(adapter)) { + /* We need to re-program the vlan-list or clear + * vlan-promisc mode (if needed) when the interface + * comes out of promisc mode. + */ + be_vid_config(adapter); } - /* Interface was previously in promiscuous mode; disable it */ - if (be_in_all_promisc(adapter)) { - be_clear_all_promisc(adapter); - if (adapter->vlans_added) - be_vid_config(adapter); - } + be_set_uc_list(adapter); + be_set_mc_list(adapter); - /* Enable multicast promisc if num configured exceeds what we support */ - if (netdev->flags & IFF_ALLMULTI || - netdev_mc_count(netdev) > be_max_mc(adapter)) { - be_set_mc_promisc(adapter); - return; - } + mutex_unlock(&adapter->rx_filter_lock); +} - if (netdev_uc_count(netdev) != adapter->uc_macs) - be_set_uc_list(adapter); +static void be_work_set_rx_mode(struct work_struct *work) +{ + struct be_cmd_work *cmd_work = + container_of(work, struct be_cmd_work, work); - be_set_mc_list(adapter); + __be_set_rx_mode(cmd_work->adapter); + kfree(cmd_work); } static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) @@ -3429,6 +3567,7 @@ static void be_disable_if_filters(struct be_adapter *adapter) adapter->pmac_id[0], 0); be_clear_uc_list(adapter); + be_clear_mc_list(adapter); /* The IFACE flags are enabled in the open path and cleared * in the close path. When a VF gets detached from the host and @@ -3462,6 +3601,11 @@ static int be_close(struct net_device *netdev) if (!(adapter->flags & BE_FLAGS_SETUP_DONE)) return 0; + /* Before attempting cleanup ensure all the pending cmds in the + * config_wq have finished execution + */ + flush_workqueue(be_wq); + be_disable_if_filters(adapter); if (adapter->flags & BE_FLAGS_NAPI_ENABLED) { @@ -3586,7 +3730,7 @@ static int be_enable_if_filters(struct be_adapter *adapter) if (adapter->vlans_added) be_vid_config(adapter); - be_set_rx_mode(adapter->netdev); + __be_set_rx_mode(adapter); return 0; } @@ -3860,6 +4004,20 @@ static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs, vft_res->max_mcc_count = res.max_mcc_count / (num_vfs + 1); } +static void be_if_destroy(struct be_adapter *adapter) +{ + be_cmd_if_destroy(adapter, adapter->if_handle, 0); + + kfree(adapter->pmac_id); + adapter->pmac_id = NULL; + + kfree(adapter->mc_list); + adapter->mc_list = NULL; + + kfree(adapter->uc_list); + adapter->uc_list = NULL; +} + static int be_clear(struct be_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; @@ -3867,6 +4025,8 @@ static int be_clear(struct be_adapter *adapter) be_cancel_worker(adapter); + flush_workqueue(be_wq); + if (sriov_enabled(adapter)) be_vf_clear(adapter); @@ -3884,10 +4044,8 @@ static int be_clear(struct be_adapter *adapter) } be_disable_vxlan_offloads(adapter); - kfree(adapter->pmac_id); - adapter->pmac_id = NULL; - be_cmd_if_destroy(adapter, adapter->if_handle, 0); + be_if_destroy(adapter); be_clear_queues(adapter); @@ -4341,7 +4499,7 @@ static int be_mac_setup(struct be_adapter *adapter) static void be_schedule_worker(struct be_adapter *adapter) { - schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); + queue_delayed_work(be_wq, &adapter->work, msecs_to_jiffies(1000)); adapter->flags |= BE_FLAGS_WORKER_SCHEDULED; } @@ -4393,6 +4551,22 @@ static int be_if_create(struct be_adapter *adapter) u32 cap_flags = be_if_cap_flags(adapter); int status; + /* alloc required memory for other filtering fields */ + adapter->pmac_id = kcalloc(be_max_uc(adapter), + sizeof(*adapter->pmac_id), GFP_KERNEL); + if (!adapter->pmac_id) + return -ENOMEM; + + adapter->mc_list = kcalloc(be_max_mc(adapter), + sizeof(*adapter->mc_list), GFP_KERNEL); + if (!adapter->mc_list) + return -ENOMEM; + + adapter->uc_list = kcalloc(be_max_uc(adapter), + sizeof(*adapter->uc_list), GFP_KERNEL); + if (!adapter->uc_list) + return -ENOMEM; + if (adapter->cfg_num_rx_irqs == 1) cap_flags &= ~(BE_IF_FLAGS_DEFQ_RSS | BE_IF_FLAGS_RSS); @@ -4401,7 +4575,10 @@ static int be_if_create(struct be_adapter *adapter) status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags, &adapter->if_handle, 0); - return status; + if (status) + return status; + + return 0; } int be_update_queues(struct be_adapter *adapter) @@ -4530,11 +4707,6 @@ static int be_setup(struct be_adapter *adapter) if (status) goto err; - adapter->pmac_id = kcalloc(be_max_uc(adapter), - sizeof(*adapter->pmac_id), GFP_KERNEL); - if (!adapter->pmac_id) - return -ENOMEM; - status = be_msix_enable(adapter); if (status) goto err; @@ -4728,6 +4900,23 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 0, 0, nlflags, filter_mask, NULL); } +static struct be_cmd_work *be_alloc_work(struct be_adapter *adapter, + void (*func)(struct work_struct *)) +{ + struct be_cmd_work *work; + + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (!work) { + dev_err(&adapter->pdev->dev, + "be_work memory allocation failed\n"); + return NULL; + } + + INIT_WORK(&work->work, func); + work->adapter = adapter; + return work; +} + /* VxLAN offload Notes: * * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't @@ -4742,23 +4931,19 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, * adds more than one port, disable offloads and don't re-enable them again * until after all the tunnels are removed. */ -static void be_add_vxlan_port(struct net_device *netdev, - struct udp_tunnel_info *ti) +static void be_work_add_vxlan_port(struct work_struct *work) { - struct be_adapter *adapter = netdev_priv(netdev); + struct be_cmd_work *cmd_work = + container_of(work, struct be_cmd_work, work); + struct be_adapter *adapter = cmd_work->adapter; + struct net_device *netdev = adapter->netdev; struct device *dev = &adapter->pdev->dev; - __be16 port = ti->port; + __be16 port = cmd_work->info.vxlan_port; int status; - if (ti->type != UDP_TUNNEL_TYPE_VXLAN) - return; - - if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter)) - return; - if (adapter->vxlan_port == port && adapter->vxlan_port_count) { adapter->vxlan_port_aliases++; - return; + goto done; } if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) { @@ -4770,7 +4955,7 @@ static void be_add_vxlan_port(struct net_device *netdev, } if (adapter->vxlan_port_count++ >= 1) - return; + goto done; status = be_cmd_manage_iface(adapter, adapter->if_handle, OP_CONVERT_NORMAL_TO_TUNNEL); @@ -4795,29 +4980,26 @@ static void be_add_vxlan_port(struct net_device *netdev, dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n", be16_to_cpu(port)); - return; + goto done; err: be_disable_vxlan_offloads(adapter); +done: + kfree(cmd_work); } -static void be_del_vxlan_port(struct net_device *netdev, - struct udp_tunnel_info *ti) +static void be_work_del_vxlan_port(struct work_struct *work) { - struct be_adapter *adapter = netdev_priv(netdev); - __be16 port = ti->port; - - if (ti->type != UDP_TUNNEL_TYPE_VXLAN) - return; - - if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter)) - return; + struct be_cmd_work *cmd_work = + container_of(work, struct be_cmd_work, work); + struct be_adapter *adapter = cmd_work->adapter; + __be16 port = cmd_work->info.vxlan_port; if (adapter->vxlan_port != port) goto done; if (adapter->vxlan_port_aliases) { adapter->vxlan_port_aliases--; - return; + goto out; } be_disable_vxlan_offloads(adapter); @@ -4827,6 +5009,40 @@ static void be_del_vxlan_port(struct net_device *netdev, be16_to_cpu(port)); done: adapter->vxlan_port_count--; +out: + kfree(cmd_work); +} + +static void be_cfg_vxlan_port(struct net_device *netdev, + struct udp_tunnel_info *ti, + void (*func)(struct work_struct *)) +{ + struct be_adapter *adapter = netdev_priv(netdev); + struct be_cmd_work *cmd_work; + + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) + return; + + if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter)) + return; + + cmd_work = be_alloc_work(adapter, func); + if (cmd_work) { + cmd_work->info.vxlan_port = ti->port; + queue_work(be_wq, &cmd_work->work); + } +} + +static void be_del_vxlan_port(struct net_device *netdev, + struct udp_tunnel_info *ti) +{ + be_cfg_vxlan_port(netdev, ti, be_work_del_vxlan_port); +} + +static void be_add_vxlan_port(struct net_device *netdev, + struct udp_tunnel_info *ti) +{ + be_cfg_vxlan_port(netdev, ti, be_work_add_vxlan_port); } static netdev_features_t be_features_check(struct sk_buff *skb, @@ -4891,6 +5107,16 @@ static int be_get_phys_port_id(struct net_device *dev, return 0; } +static void be_set_rx_mode(struct net_device *dev) +{ + struct be_adapter *adapter = netdev_priv(dev); + struct be_cmd_work *work; + + work = be_alloc_work(adapter, be_work_set_rx_mode); + if (work) + queue_work(be_wq, &work->work); +} + static const struct net_device_ops be_netdev_ops = { .ndo_open = be_open, .ndo_stop = be_close, @@ -5116,7 +5342,7 @@ static void be_worker(struct work_struct *work) reschedule: adapter->work_counter++; - schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); + queue_delayed_work(be_wq, &adapter->work, msecs_to_jiffies(1000)); } static void be_unmap_pci_bars(struct be_adapter *adapter) @@ -5256,7 +5482,8 @@ static int be_drv_init(struct be_adapter *adapter) } mutex_init(&adapter->mbox_lock); - spin_lock_init(&adapter->mcc_lock); + mutex_init(&adapter->mcc_lock); + mutex_init(&adapter->rx_filter_lock); spin_lock_init(&adapter->mcc_cq_lock); init_completion(&adapter->et_cmd_compl); @@ -5712,6 +5939,12 @@ static int __init be_init_module(void) pr_info(DRV_NAME " : Use sysfs method to enable VFs\n"); } + be_wq = create_singlethread_workqueue("be_wq"); + if (!be_wq) { + pr_warn(DRV_NAME "workqueue creation failed\n"); + return -1; + } + return pci_register_driver(&be_driver); } module_init(be_init_module); @@ -5719,5 +5952,8 @@ module_init(be_init_module); static void __exit be_exit_module(void) { pci_unregister_driver(&be_driver); + + if (be_wq) + destroy_workqueue(be_wq); } module_exit(be_exit_module); diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 01f7e811739b..fb5c63881340 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -2887,7 +2887,7 @@ fec_enet_close(struct net_device *ndev) * this kind of feature?). */ -#define HASH_BITS 6 /* #bits in hash */ +#define FEC_HASH_BITS 6 /* #bits in hash */ #define CRC32_POLY 0xEDB88320 static void set_multicast_list(struct net_device *ndev) @@ -2935,10 +2935,10 @@ static void set_multicast_list(struct net_device *ndev) } } - /* only upper 6 bits (HASH_BITS) are used + /* only upper 6 bits (FEC_HASH_BITS) are used * which point to specific bit in he hash registers */ - hash = (crc >> (32 - HASH_BITS)) & 0x3f; + hash = (crc >> (32 - FEC_HASH_BITS)) & 0x3f; if (hash > 31) { tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH); diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index 5bf1ade28315..186ef8f16c80 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -3756,7 +3756,7 @@ static int ucc_geth_probe(struct platform_device* ofdev) return -EINVAL; } if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) { - pr_err("invalid rx-clock propperty\n"); + pr_err("invalid rx-clock property\n"); return -EINVAL; } ug_info->uf_info.rx_clock = *prop; diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c index 7b8fe866f603..e03b30c60dcf 100644 --- a/drivers/net/ethernet/freescale/xgmac_mdio.c +++ b/drivers/net/ethernet/freescale/xgmac_mdio.c @@ -271,11 +271,8 @@ static int xgmac_mdio_probe(struct platform_device *pdev) goto err_ioremap; } - if (of_get_property(pdev->dev.of_node, - "little-endian", NULL)) - priv->is_little_endian = true; - else - priv->is_little_endian = false; + priv->is_little_endian = of_property_read_bool(pdev->dev.of_node, + "little-endian"); ret = of_mdiobus_register(bus, np); if (ret) { diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index 5c8afe1a5ccb..a834774fdb02 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c @@ -684,8 +684,7 @@ hns_mac_register_phydev(struct mii_bus *mdio, struct hns_mac_cb *mac_cb, if (!phy || IS_ERR(phy)) return -EIO; - if (mdio->irq) - phy->irq = mdio->irq[addr]; + phy->irq = mdio->irq[addr]; /* All data is now stored in the phy struct; * register it diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index afb5daa3721d..eb448dff7564 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -18,6 +18,7 @@ #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> +#include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/vmalloc.h> @@ -115,10 +116,8 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev) dsaf_dev->sc_base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(dsaf_dev->sc_base)) { - dev_err(dsaf_dev->dev, "subctrl can not map!\n"); + if (IS_ERR(dsaf_dev->sc_base)) return PTR_ERR(dsaf_dev->sc_base); - } res = platform_get_resource(pdev, IORESOURCE_MEM, res_idx++); @@ -129,10 +128,8 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev) dsaf_dev->sds_base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(dsaf_dev->sds_base)) { - dev_err(dsaf_dev->dev, "serdes-ctrl can not map!\n"); + if (IS_ERR(dsaf_dev->sds_base)) return PTR_ERR(dsaf_dev->sds_base); - } } else { dsaf_dev->sub_ctrl = syscon; } @@ -147,10 +144,8 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev) } } dsaf_dev->ppe_base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(dsaf_dev->ppe_base)) { - dev_err(dsaf_dev->dev, "ppe-base resource can not map!\n"); + if (IS_ERR(dsaf_dev->ppe_base)) return PTR_ERR(dsaf_dev->ppe_base); - } dsaf_dev->ppe_paddr = res->start; if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) { @@ -166,10 +161,8 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev) } } dsaf_dev->io_base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(dsaf_dev->io_base)) { - dev_err(dsaf_dev->dev, "dsaf-base resource can not map!\n"); + if (IS_ERR(dsaf_dev->io_base)) return PTR_ERR(dsaf_dev->io_base); - } } ret = device_property_read_u32(dsaf_dev->dev, "desc-num", &desc_num); @@ -2781,6 +2774,89 @@ static struct platform_driver g_dsaf_driver = { module_platform_driver(g_dsaf_driver); +/** + * hns_dsaf_roce_reset - reset dsaf and roce + * @dsaf_fwnode: Pointer to framework node for the dasf + * @enable: false - request reset , true - drop reset + * retuen 0 - success , negative -fail + */ +int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool enable) +{ + struct dsaf_device *dsaf_dev; + struct platform_device *pdev; + u32 mp; + u32 sl; + u32 credit; + int i; + const u32 port_map[DSAF_ROCE_CREDIT_CHN][DSAF_ROCE_CHAN_MODE_NUM] = { + {DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0}, + {DSAF_ROCE_PORT_1, DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0}, + {DSAF_ROCE_PORT_2, DSAF_ROCE_PORT_1, DSAF_ROCE_PORT_0}, + {DSAF_ROCE_PORT_3, DSAF_ROCE_PORT_1, DSAF_ROCE_PORT_0}, + {DSAF_ROCE_PORT_4, DSAF_ROCE_PORT_2, DSAF_ROCE_PORT_1}, + {DSAF_ROCE_PORT_4, DSAF_ROCE_PORT_2, DSAF_ROCE_PORT_1}, + {DSAF_ROCE_PORT_5, DSAF_ROCE_PORT_3, DSAF_ROCE_PORT_1}, + {DSAF_ROCE_PORT_5, DSAF_ROCE_PORT_3, DSAF_ROCE_PORT_1}, + }; + const u32 sl_map[DSAF_ROCE_CREDIT_CHN][DSAF_ROCE_CHAN_MODE_NUM] = { + {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_0}, + {DSAF_ROCE_SL_0, DSAF_ROCE_SL_1, DSAF_ROCE_SL_1}, + {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_2}, + {DSAF_ROCE_SL_0, DSAF_ROCE_SL_1, DSAF_ROCE_SL_3}, + {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_0}, + {DSAF_ROCE_SL_1, DSAF_ROCE_SL_1, DSAF_ROCE_SL_1}, + {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_2}, + {DSAF_ROCE_SL_1, DSAF_ROCE_SL_1, DSAF_ROCE_SL_3}, + }; + + if (!is_of_node(dsaf_fwnode)) { + pr_err("hisi_dsaf: Only support DT node!\n"); + return -EINVAL; + } + pdev = of_find_device_by_node(to_of_node(dsaf_fwnode)); + dsaf_dev = dev_get_drvdata(&pdev->dev); + if (AE_IS_VER1(dsaf_dev->dsaf_ver)) { + dev_err(dsaf_dev->dev, "%s v1 chip doesn't support RoCE!\n", + dsaf_dev->ae_dev.name); + return -ENODEV; + } + + if (!enable) { + /* Reset rocee-channels in dsaf and rocee */ + hns_dsaf_srst_chns(dsaf_dev, DSAF_CHNS_MASK, false); + hns_dsaf_roce_srst(dsaf_dev, false); + } else { + /* Configure dsaf tx roce correspond to port map and sl map */ + mp = dsaf_read_dev(dsaf_dev, DSAF_ROCE_PORT_MAP_REG); + for (i = 0; i < DSAF_ROCE_CREDIT_CHN; i++) + dsaf_set_field(mp, 7 << i * 3, i * 3, + port_map[i][DSAF_ROCE_6PORT_MODE]); + dsaf_set_field(mp, 3 << i * 3, i * 3, 0); + dsaf_write_dev(dsaf_dev, DSAF_ROCE_PORT_MAP_REG, mp); + + sl = dsaf_read_dev(dsaf_dev, DSAF_ROCE_SL_MAP_REG); + for (i = 0; i < DSAF_ROCE_CREDIT_CHN; i++) + dsaf_set_field(sl, 3 << i * 2, i * 2, + sl_map[i][DSAF_ROCE_6PORT_MODE]); + dsaf_write_dev(dsaf_dev, DSAF_ROCE_SL_MAP_REG, sl); + + /* De-reset rocee-channels in dsaf and rocee */ + hns_dsaf_srst_chns(dsaf_dev, DSAF_CHNS_MASK, true); + msleep(SRST_TIME_INTERVAL); + hns_dsaf_roce_srst(dsaf_dev, true); + + /* Eanble dsaf channel rocee credit */ + credit = dsaf_read_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG); + dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 0); + dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit); + + dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 1); + dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit); + } + return 0; +} +EXPORT_SYMBOL(hns_dsaf_roce_reset); + MODULE_LICENSE("GPL"); MODULE_AUTHOR("Huawei Tech. Co., Ltd."); MODULE_DESCRIPTION("HNS DSAF driver"); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h index 1daf018d9071..f3681d566ae6 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h @@ -43,6 +43,32 @@ struct hns_mac_cb; #define DSAF_PRIO_NR 8 #define DSAF_REG_PER_ZONE 3 +#define DSAF_ROCE_CREDIT_CHN 8 +#define DSAF_ROCE_CHAN_MODE 3 + +enum dsaf_roce_port_mode { + DSAF_ROCE_6PORT_MODE, + DSAF_ROCE_4PORT_MODE, + DSAF_ROCE_2PORT_MODE, + DSAF_ROCE_CHAN_MODE_NUM, +}; + +enum dsaf_roce_port_num { + DSAF_ROCE_PORT_0, + DSAF_ROCE_PORT_1, + DSAF_ROCE_PORT_2, + DSAF_ROCE_PORT_3, + DSAF_ROCE_PORT_4, + DSAF_ROCE_PORT_5, +}; + +enum dsaf_roce_qos_sl { + DSAF_ROCE_SL_0, + DSAF_ROCE_SL_1, + DSAF_ROCE_SL_2, + DSAF_ROCE_SL_3, +}; + #define DSAF_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset)))) #define HNS_DSAF_IS_DEBUG(dev) (dev->dsaf_mode == DSAF_MODE_DISABLE_SP) @@ -419,6 +445,10 @@ int hns_dsaf_get_mac_entry_by_index( void hns_dsaf_fix_mac_mode(struct hns_mac_cb *mac_cb); +void hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool enable); + +void hns_dsaf_roce_srst(struct dsaf_device *dsaf_dev, bool enable); + int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev); void hns_dsaf_ae_uninit(struct dsaf_device *dsaf_dev); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c index 611b67b6f450..36b9f791cf2f 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c @@ -231,6 +231,42 @@ static void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev, dsaf_write_sub(dsaf_dev, reg_addr, reg_val); } +/** + * hns_dsaf_srst_chns - reset dsaf channels + * @dsaf_dev: dsaf device struct pointer + * @msk: xbar channels mask value: + * bit0-5 for xge0-5 + * bit6-11 for ppe0-5 + * bit12-17 for roce0-5 + * bit18-19 for com/dfx + * @enable: false - request reset , true - drop reset + */ +void hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool enable) +{ + u32 reg_addr; + + if (!enable) + reg_addr = DSAF_SUB_SC_DSAF_RESET_REQ_REG; + else + reg_addr = DSAF_SUB_SC_DSAF_RESET_DREQ_REG; + + dsaf_write_sub(dsaf_dev, reg_addr, msk); +} + +void hns_dsaf_roce_srst(struct dsaf_device *dsaf_dev, bool enable) +{ + if (!enable) { + dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_ROCEE_RESET_REQ_REG, 1); + } else { + dsaf_write_sub(dsaf_dev, + DSAF_SUB_SC_ROCEE_CLK_DIS_REG, 1); + dsaf_write_sub(dsaf_dev, + DSAF_SUB_SC_ROCEE_RESET_DREQ_REG, 1); + msleep(20); + dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_ROCEE_CLK_EN_REG, 1); + } +} + static void hns_dsaf_xge_core_srst_by_port_acpi(struct dsaf_device *dsaf_dev, u32 port, bool dereset) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h index 235f74444b1d..13c16ab7be48 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h @@ -77,6 +77,12 @@ #define DSAF_SUB_SC_PPE_RESET_DREQ_REG 0xA4C #define DSAF_SUB_SC_RCB_PPE_COM_RESET_REQ_REG 0xA88 #define DSAF_SUB_SC_RCB_PPE_COM_RESET_DREQ_REG 0xA8C +#define DSAF_SUB_SC_DSAF_RESET_REQ_REG 0xAA8 +#define DSAF_SUB_SC_ROCEE_RESET_REQ_REG 0xA50 +#define DSAF_SUB_SC_DSAF_RESET_DREQ_REG 0xAAC +#define DSAF_SUB_SC_ROCEE_CLK_DIS_REG 0x32C +#define DSAF_SUB_SC_ROCEE_RESET_DREQ_REG 0xA54 +#define DSAF_SUB_SC_ROCEE_CLK_EN_REG 0x328 #define DSAF_SUB_SC_LIGHT_MODULE_DETECT_EN_REG 0x2060 #define DSAF_SUB_SC_TCAM_MBIST_EN_REG 0x2300 #define DSAF_SUB_SC_DSAF_CLK_ST_REG 0x5300 @@ -133,6 +139,8 @@ #define DSAF_ROCEE_INT_STS_0_REG 0x200 #define DSAFV2_SERDES_LBK_0_REG 0x220 #define DSAF_PAUSE_CFG_REG 0x240 +#define DSAF_ROCE_PORT_MAP_REG 0x2A0 +#define DSAF_ROCE_SL_MAP_REG 0x2A4 #define DSAF_PPE_QID_CFG_0_REG 0x300 #define DSAF_SW_PORT_TYPE_0_REG 0x320 #define DSAF_STP_PORT_TYPE_0_REG 0x340 @@ -178,6 +186,7 @@ #define DSAF_SBM_BP_CFG_2_XGE_REG_0_REG 0x200C #define DSAF_SBM_BP_CFG_2_PPE_REG_0_REG 0x230C #define DSAF_SBM_BP_CFG_2_ROCEE_REG_0_REG 0x260C +#define DSAF_SBM_ROCEE_CFG_REG_REG 0x2380 #define DSAFV2_SBM_BP_CFG_2_ROCEE_REG_0_REG 0x238C #define DSAF_SBM_FREE_CNT_0_0_REG 0x2010 #define DSAF_SBM_FREE_CNT_1_0_REG 0x2014 @@ -796,6 +805,9 @@ #define DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_S 9 #define DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_M (((1ULL << 9) - 1) << 9) +#define DSAF_CHNS_MASK 0x3f000 +#define DSAF_SBM_ROCEE_CFG_CRD_EN_B 2 +#define SRST_TIME_INTERVAL 20 #define DSAFV2_SBM_CFG2_ROCEE_SET_BUF_NUM_S 0 #define DSAFV2_SBM_CFG2_ROCEE_SET_BUF_NUM_M (((1ULL << 8) - 1) << 0) #define DSAFV2_SBM_CFG2_ROCEE_RESET_BUF_NUM_S 8 diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 88f3c85fb04a..62454d7a062a 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -203,7 +203,8 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter, struct device *dev = &adapter->vdev->dev; dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); - send_request_unmap(adapter, ltb->map_id); + if (!adapter->failover) + send_request_unmap(adapter, ltb->map_id); } static int alloc_rx_pool(struct ibmvnic_adapter *adapter, @@ -522,7 +523,8 @@ static int ibmvnic_close(struct net_device *netdev) for (i = 0; i < adapter->req_rx_queues; i++) napi_disable(&adapter->napi[i]); - netif_tx_stop_all_queues(netdev); + if (!adapter->failover) + netif_tx_stop_all_queues(netdev); if (adapter->bounce_buffer) { if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) { @@ -2777,12 +2779,6 @@ static void handle_control_ras_rsp(union ibmvnic_crq *crq, } } -static int ibmvnic_fw_comp_open(struct inode *inode, struct file *file) -{ - file->private_data = inode->i_private; - return 0; -} - static ssize_t trace_read(struct file *file, char __user *user_buf, size_t len, loff_t *ppos) { @@ -2834,7 +2830,7 @@ static ssize_t trace_read(struct file *file, char __user *user_buf, size_t len, static const struct file_operations trace_ops = { .owner = THIS_MODULE, - .open = ibmvnic_fw_comp_open, + .open = simple_open, .read = trace_read, }; @@ -2884,7 +2880,7 @@ static ssize_t paused_write(struct file *file, const char __user *user_buf, static const struct file_operations paused_ops = { .owner = THIS_MODULE, - .open = ibmvnic_fw_comp_open, + .open = simple_open, .read = paused_read, .write = paused_write, }; @@ -2932,7 +2928,7 @@ static ssize_t tracing_write(struct file *file, const char __user *user_buf, static const struct file_operations tracing_ops = { .owner = THIS_MODULE, - .open = ibmvnic_fw_comp_open, + .open = simple_open, .read = tracing_read, .write = tracing_write, }; @@ -2985,7 +2981,7 @@ static ssize_t error_level_write(struct file *file, const char __user *user_buf, static const struct file_operations error_level_ops = { .owner = THIS_MODULE, - .open = ibmvnic_fw_comp_open, + .open = simple_open, .read = error_level_read, .write = error_level_write, }; @@ -3036,7 +3032,7 @@ static ssize_t trace_level_write(struct file *file, const char __user *user_buf, static const struct file_operations trace_level_ops = { .owner = THIS_MODULE, - .open = ibmvnic_fw_comp_open, + .open = simple_open, .read = trace_level_read, .write = trace_level_write, }; @@ -3089,7 +3085,7 @@ static ssize_t trace_buff_size_write(struct file *file, static const struct file_operations trace_size_ops = { .owner = THIS_MODULE, - .open = ibmvnic_fw_comp_open, + .open = simple_open, .read = trace_buff_size_read, .write = trace_buff_size_write, }; @@ -3280,6 +3276,10 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, rc = ibmvnic_send_crq_init(adapter); if (rc) dev_err(dev, "Error sending init rc=%ld\n", rc); + } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) { + dev_info(dev, "Backing device failover detected\n"); + netif_carrier_off(netdev); + adapter->failover = true; } else { /* The adapter lost the connection */ dev_err(dev, "Virtual Adapter failed (rc=%d)\n", @@ -3615,8 +3615,18 @@ static void handle_crq_init_rsp(struct work_struct *work) struct device *dev = &adapter->vdev->dev; struct net_device *netdev = adapter->netdev; unsigned long timeout = msecs_to_jiffies(30000); + bool restart = false; int rc; + if (adapter->failover) { + release_sub_crqs(adapter); + if (netif_running(netdev)) { + netif_tx_disable(netdev); + ibmvnic_close(netdev); + restart = true; + } + } + send_version_xchg(adapter); reinit_completion(&adapter->init_done); if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { @@ -3645,6 +3655,17 @@ static void handle_crq_init_rsp(struct work_struct *work) netdev->real_num_tx_queues = adapter->req_tx_queues; + if (adapter->failover) { + adapter->failover = false; + if (restart) { + rc = ibmvnic_open(netdev); + if (rc) + goto restart_failed; + } + netif_carrier_on(netdev); + return; + } + rc = register_netdev(netdev); if (rc) { dev_err(dev, @@ -3655,6 +3676,8 @@ static void handle_crq_init_rsp(struct work_struct *work) return; +restart_failed: + dev_err(dev, "Failed to restart ibmvnic, rc=%d\n", rc); register_failed: release_sub_crqs(adapter); task_failed: @@ -3692,6 +3715,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) dev_set_drvdata(&dev->dev, netdev); adapter->vdev = dev; adapter->netdev = netdev; + adapter->failover = false; ether_addr_copy(adapter->mac_addr, mac_addr_p); ether_addr_copy(netdev->dev_addr, adapter->mac_addr); @@ -3721,6 +3745,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) if (dma_mapping_error(&dev->dev, adapter->stats_token)) { if (!firmware_has_feature(FW_FEATURE_CMO)) dev_err(&dev->dev, "Couldn't map stats buffer\n"); + rc = -ENOMEM; goto free_crq; } diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index e82898fd518e..bfc84c7d0e11 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -830,6 +830,7 @@ enum ibmvfc_crq_format { IBMVNIC_CRQ_INIT = 0x01, IBMVNIC_CRQ_INIT_COMPLETE = 0x02, IBMVNIC_PARTITION_MIGRATED = 0x06, + IBMVNIC_DEVICE_FAILOVER = 0x08, }; struct ibmvnic_crq_queue { @@ -1047,4 +1048,5 @@ struct ibmvnic_adapter { u8 map_id; struct work_struct vnic_crq_init; + bool failover; }; diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 2a882916b4f6..19103a6a7dcc 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -65,76 +65,72 @@ #include "i40e_dcb.h" /* Useful i40e defaults */ -#define I40E_MAX_VEB 16 - -#define I40E_MAX_NUM_DESCRIPTORS 4096 -#define I40E_MAX_CSR_SPACE (4 * 1024 * 1024 - 64 * 1024) -#define I40E_DEFAULT_NUM_DESCRIPTORS 512 -#define I40E_REQ_DESCRIPTOR_MULTIPLE 32 -#define I40E_MIN_NUM_DESCRIPTORS 64 -#define I40E_MIN_MSIX 2 -#define I40E_DEFAULT_NUM_VMDQ_VSI 8 /* max 256 VSIs */ -#define I40E_MIN_VSI_ALLOC 51 /* LAN, ATR, FCOE, 32 VF, 16 VMDQ */ +#define I40E_MAX_VEB 16 + +#define I40E_MAX_NUM_DESCRIPTORS 4096 +#define I40E_MAX_CSR_SPACE (4 * 1024 * 1024 - 64 * 1024) +#define I40E_DEFAULT_NUM_DESCRIPTORS 512 +#define I40E_REQ_DESCRIPTOR_MULTIPLE 32 +#define I40E_MIN_NUM_DESCRIPTORS 64 +#define I40E_MIN_MSIX 2 +#define I40E_DEFAULT_NUM_VMDQ_VSI 8 /* max 256 VSIs */ +#define I40E_MIN_VSI_ALLOC 51 /* LAN, ATR, FCOE, 32 VF, 16 VMDQ */ /* max 16 qps */ #define i40e_default_queues_per_vmdq(pf) \ (((pf)->flags & I40E_FLAG_RSS_AQ_CAPABLE) ? 4 : 1) -#define I40E_DEFAULT_QUEUES_PER_VF 4 -#define I40E_DEFAULT_QUEUES_PER_TC 1 /* should be a power of 2 */ +#define I40E_DEFAULT_QUEUES_PER_VF 4 +#define I40E_DEFAULT_QUEUES_PER_TC 1 /* should be a power of 2 */ #define i40e_pf_get_max_q_per_tc(pf) \ (((pf)->flags & I40E_FLAG_128_QP_RSS_CAPABLE) ? 128 : 64) -#define I40E_FDIR_RING 0 -#define I40E_FDIR_RING_COUNT 32 +#define I40E_FDIR_RING 0 +#define I40E_FDIR_RING_COUNT 32 #ifdef I40E_FCOE -#define I40E_DEFAULT_FCOE 8 /* default number of QPs for FCoE */ -#define I40E_MINIMUM_FCOE 1 /* minimum number of QPs for FCoE */ +#define I40E_DEFAULT_FCOE 8 /* default number of QPs for FCoE */ +#define I40E_MINIMUM_FCOE 1 /* minimum number of QPs for FCoE */ #endif /* I40E_FCOE */ -#define I40E_MAX_AQ_BUF_SIZE 4096 -#define I40E_AQ_LEN 256 -#define I40E_AQ_WORK_LIMIT 66 /* max number of VFs + a little */ -#define I40E_MAX_USER_PRIORITY 8 -#define I40E_DEFAULT_MSG_ENABLE 4 -#define I40E_QUEUE_WAIT_RETRY_LIMIT 10 -#define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 16) +#define I40E_MAX_AQ_BUF_SIZE 4096 +#define I40E_AQ_LEN 256 +#define I40E_AQ_WORK_LIMIT 66 /* max number of VFs + a little */ +#define I40E_MAX_USER_PRIORITY 8 +#define I40E_DEFAULT_MSG_ENABLE 4 +#define I40E_QUEUE_WAIT_RETRY_LIMIT 10 +#define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 16) /* Ethtool Private Flags */ -#define I40E_PRIV_FLAGS_MFP_FLAG BIT(0) -#define I40E_PRIV_FLAGS_LINKPOLL_FLAG BIT(1) +#define I40E_PRIV_FLAGS_MFP_FLAG BIT(0) +#define I40E_PRIV_FLAGS_LINKPOLL_FLAG BIT(1) #define I40E_PRIV_FLAGS_FD_ATR BIT(2) #define I40E_PRIV_FLAGS_VEB_STATS BIT(3) #define I40E_PRIV_FLAGS_HW_ATR_EVICT BIT(4) #define I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT BIT(5) -#define I40E_NVM_VERSION_LO_SHIFT 0 -#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT) -#define I40E_NVM_VERSION_HI_SHIFT 12 -#define I40E_NVM_VERSION_HI_MASK (0xf << I40E_NVM_VERSION_HI_SHIFT) -#define I40E_OEM_VER_BUILD_MASK 0xffff -#define I40E_OEM_VER_PATCH_MASK 0xff -#define I40E_OEM_VER_BUILD_SHIFT 8 -#define I40E_OEM_VER_SHIFT 24 +#define I40E_NVM_VERSION_LO_SHIFT 0 +#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT) +#define I40E_NVM_VERSION_HI_SHIFT 12 +#define I40E_NVM_VERSION_HI_MASK (0xf << I40E_NVM_VERSION_HI_SHIFT) +#define I40E_OEM_VER_BUILD_MASK 0xffff +#define I40E_OEM_VER_PATCH_MASK 0xff +#define I40E_OEM_VER_BUILD_SHIFT 8 +#define I40E_OEM_VER_SHIFT 24 #define I40E_PHY_DEBUG_ALL \ (I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW | \ I40E_AQ_PHY_DEBUG_DISABLE_ALL_LINK_FW) /* The values in here are decimal coded as hex as is the case in the NVM map*/ -#define I40E_CURRENT_NVM_VERSION_HI 0x2 -#define I40E_CURRENT_NVM_VERSION_LO 0x40 +#define I40E_CURRENT_NVM_VERSION_HI 0x2 +#define I40E_CURRENT_NVM_VERSION_LO 0x40 -/* magic for getting defines into strings */ -#define STRINGIFY(foo) #foo -#define XSTRINGIFY(bar) STRINGIFY(bar) - -#define I40E_RX_DESC(R, i) \ +#define I40E_RX_DESC(R, i) \ (&(((union i40e_32byte_rx_desc *)((R)->desc))[i])) -#define I40E_TX_DESC(R, i) \ +#define I40E_TX_DESC(R, i) \ (&(((struct i40e_tx_desc *)((R)->desc))[i])) -#define I40E_TX_CTXTDESC(R, i) \ +#define I40E_TX_CTXTDESC(R, i) \ (&(((struct i40e_tx_context_desc *)((R)->desc))[i])) -#define I40E_TX_FDIRDESC(R, i) \ +#define I40E_TX_FDIRDESC(R, i) \ (&(((struct i40e_filter_program_desc *)((R)->desc))[i])) /* default to trying for four seconds */ -#define I40E_TRY_LINK_TIMEOUT (4 * HZ) +#define I40E_TRY_LINK_TIMEOUT (4 * HZ) /** * i40e_is_mac_710 - Return true if MAC is X710/XL710 @@ -199,9 +195,9 @@ struct i40e_lump_tracking { #define I40E_FDIR_BUFFER_HEAD_ROOM 32 #define I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR (I40E_FDIR_BUFFER_HEAD_ROOM * 4) -#define I40E_HKEY_ARRAY_SIZE ((I40E_PFQF_HKEY_MAX_INDEX + 1) * 4) -#define I40E_HLUT_ARRAY_SIZE ((I40E_PFQF_HLUT_MAX_INDEX + 1) * 4) -#define I40E_VF_HLUT_ARRAY_SIZE ((I40E_VFQF_HLUT1_MAX_INDEX + 1) * 4) +#define I40E_HKEY_ARRAY_SIZE ((I40E_PFQF_HKEY_MAX_INDEX + 1) * 4) +#define I40E_HLUT_ARRAY_SIZE ((I40E_PFQF_HLUT_MAX_INDEX + 1) * 4) +#define I40E_VF_HLUT_ARRAY_SIZE ((I40E_VFQF_HLUT1_MAX_INDEX + 1) * 4) enum i40e_fd_stat_idx { I40E_FD_STAT_ATR, @@ -387,8 +383,8 @@ struct i40e_pf { struct mutex switch_mutex; u16 lan_vsi; /* our default LAN VSI */ u16 lan_veb; /* initial relay, if exists */ -#define I40E_NO_VEB 0xffff -#define I40E_NO_VSI 0xffff +#define I40E_NO_VEB 0xffff +#define I40E_NO_VSI 0xffff u16 next_vsi; /* Next unallocated VSI - 0-based! */ struct i40e_vsi **vsi; struct i40e_veb *veb[I40E_MAX_VEB]; @@ -423,8 +419,8 @@ struct i40e_pf { */ u16 dcbx_cap; - u32 fcoe_hmc_filt_num; - u32 fcoe_hmc_cntx_num; + u32 fcoe_hmc_filt_num; + u32 fcoe_hmc_cntx_num; struct i40e_filter_control_settings filter_settings; struct ptp_clock *ptp_clock; @@ -470,10 +466,10 @@ struct i40e_mac_filter { struct i40e_veb { struct i40e_pf *pf; u16 idx; - u16 veb_idx; /* index of VEB parent */ + u16 veb_idx; /* index of VEB parent */ u16 seid; u16 uplink_seid; - u16 stats_idx; /* index of VEB parent */ + u16 stats_idx; /* index of VEB parent */ u8 enabled_tc; u16 bridge_mode; /* Bridge Mode (VEB/VEPA) */ u16 flags; @@ -534,12 +530,13 @@ struct i40e_vsi { u32 promisc_threshold; u16 work_limit; - u16 int_rate_limit; /* value in usecs */ + u16 int_rate_limit; /* value in usecs */ + + u16 rss_table_size; /* HW RSS table size */ + u16 rss_size; /* Allocated RSS queues */ + u8 *rss_hkey_user; /* User configured hash keys */ + u8 *rss_lut_user; /* User configured lookup table entries */ - u16 rss_table_size; /* HW RSS table size */ - u16 rss_size; /* Allocated RSS queues */ - u8 *rss_hkey_user; /* User configured hash keys */ - u8 *rss_lut_user; /* User configured lookup table entries */ u16 max_frame; u16 rx_buf_len; @@ -550,14 +547,14 @@ struct i40e_vsi { int base_vector; bool irqs_ready; - u16 seid; /* HW index of this VSI (absolute index) */ - u16 id; /* VSI number */ + u16 seid; /* HW index of this VSI (absolute index) */ + u16 id; /* VSI number */ u16 uplink_seid; - u16 base_queue; /* vsi's first queue in hw array */ - u16 alloc_queue_pairs; /* Allocated Tx/Rx queues */ - u16 req_queue_pairs; /* User requested queue pairs */ - u16 num_queue_pairs; /* Used tx and rx pairs */ + u16 base_queue; /* vsi's first queue in hw array */ + u16 alloc_queue_pairs; /* Allocated Tx/Rx queues */ + u16 req_queue_pairs; /* User requested queue pairs */ + u16 num_queue_pairs; /* Used tx and rx pairs */ u16 num_desc; enum i40e_vsi_type type; /* VSI type, e.g., LAN, FCoE, etc */ s16 vf_id; /* Virtual function ID for SRIOV VSIs */ @@ -576,19 +573,16 @@ struct i40e_vsi { /* TC BW limit max quanta within VSI */ u8 bw_ets_max_quanta[I40E_MAX_TRAFFIC_CLASS]; - struct i40e_pf *back; /* Backreference to associated PF */ - u16 idx; /* index in pf->vsi[] */ - u16 veb_idx; /* index of VEB parent */ - struct kobject *kobj; /* sysfs object */ - bool current_isup; /* Sync 'link up' logging */ + struct i40e_pf *back; /* Backreference to associated PF */ + u16 idx; /* index in pf->vsi[] */ + u16 veb_idx; /* index of VEB parent */ + struct kobject *kobj; /* sysfs object */ + bool current_isup; /* Sync 'link up' logging */ void *priv; /* client driver data reference. */ /* VSI specific handlers */ irqreturn_t (*irq_handler)(int irq, void *data); - - /* current rxnfc data */ - struct ethtool_rxnfc rxnfc; /* current rss hash opts */ } ____cacheline_internodealigned_in_smp; struct i40e_netdev_priv { diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index 11cf1a5ebccf..67e396b2b347 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -204,6 +204,9 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_suspend_port_tx = 0x041B, i40e_aqc_opc_resume_port_tx = 0x041C, i40e_aqc_opc_configure_partition_bw = 0x041D, + /* hmc */ + i40e_aqc_opc_query_hmc_resource_profile = 0x0500, + i40e_aqc_opc_set_hmc_resource_profile = 0x0501, /* phy commands*/ i40e_aqc_opc_get_phy_abilities = 0x0600, @@ -450,13 +453,15 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration); /* Set ARP Proxy command / response (indirect 0x0104) */ struct i40e_aqc_arp_proxy_data { __le16 command_flags; -#define I40E_AQ_ARP_INIT_IPV4 0x0008 -#define I40E_AQ_ARP_UNSUP_CTL 0x0010 -#define I40E_AQ_ARP_ENA 0x0020 -#define I40E_AQ_ARP_ADD_IPV4 0x0040 -#define I40E_AQ_ARP_DEL_IPV4 0x0080 +#define I40E_AQ_ARP_INIT_IPV4 0x0800 +#define I40E_AQ_ARP_UNSUP_CTL 0x1000 +#define I40E_AQ_ARP_ENA 0x2000 +#define I40E_AQ_ARP_ADD_IPV4 0x4000 +#define I40E_AQ_ARP_DEL_IPV4 0x8000 __le16 table_id; - __le32 pfpm_proxyfc; + __le32 enabled_offloads; +#define I40E_AQ_ARP_DIRECTED_OFFLOAD_ENABLE 0x00000020 +#define I40E_AQ_ARP_OFFLOAD_ENABLE 0x00000800 __le32 ip_addr; u8 mac_addr[6]; u8 reserved[2]; @@ -471,17 +476,19 @@ struct i40e_aqc_ns_proxy_data { __le16 table_idx_ipv6_0; __le16 table_idx_ipv6_1; __le16 control; -#define I40E_AQ_NS_PROXY_ADD_0 0x0100 -#define I40E_AQ_NS_PROXY_DEL_0 0x0200 -#define I40E_AQ_NS_PROXY_ADD_1 0x0400 -#define I40E_AQ_NS_PROXY_DEL_1 0x0800 -#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000 -#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000 -#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000 -#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000 -#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001 -#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002 -#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004 +#define I40E_AQ_NS_PROXY_ADD_0 0x0001 +#define I40E_AQ_NS_PROXY_DEL_0 0x0002 +#define I40E_AQ_NS_PROXY_ADD_1 0x0004 +#define I40E_AQ_NS_PROXY_DEL_1 0x0008 +#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x0010 +#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x0020 +#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x0040 +#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x0080 +#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0100 +#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0200 +#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0400 +#define I40E_AQ_NS_PROXY_OFFLOAD_ENABLE 0x0800 +#define I40E_AQ_NS_PROXY_DIRECTED_OFFLOAD_ENABLE 0x1000 u8 mac_addr_0[6]; u8 mac_addr_1[6]; u8 local_mac_addr[6]; @@ -1582,6 +1589,24 @@ struct i40e_aqc_configure_partition_bw_data { I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data); +/* Get and set the active HMC resource profile and status. + * (direct 0x0500) and (direct 0x0501) + */ +struct i40e_aq_get_set_hmc_resource_profile { + u8 pm_profile; + u8 pe_vf_enabled; + u8 reserved[14]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile); + +enum i40e_aq_hmc_profile { + /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */ + I40E_HMC_PROFILE_DEFAULT = 1, + I40E_HMC_PROFILE_FAVOR_VF = 2, + I40E_HMC_PROFILE_EQUAL = 3, +}; + /* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */ /* set in param0 for get phy abilities to report qualified modules */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c index 618f18436618..250db0b244b7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_client.c +++ b/drivers/net/ethernet/intel/i40e/i40e_client.c @@ -148,6 +148,11 @@ i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id, u8 *msg, u16 len) "Cannot locate client instance virtual channel receive routine\n"); continue; } + if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, + &cdev->state)) { + dev_dbg(&vsi->back->pdev->dev, "Client is not open, abort virtchnl_receive\n"); + continue; + } cdev->client->ops->virtchnl_receive(&cdev->lan_info, cdev->client, vf_id, msg, len); @@ -181,6 +186,11 @@ void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi) "Cannot locate client instance l2_param_change routine\n"); continue; } + if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, + &cdev->state)) { + dev_dbg(&vsi->back->pdev->dev, "Client is not open, abort l2 param change\n"); + continue; + } cdev->lan_info.params = params; cdev->client->ops->l2_param_change(&cdev->lan_info, cdev->client, @@ -306,6 +316,11 @@ void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id) "Cannot locate client instance VF reset routine\n"); continue; } + if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, + &cdev->state)) { + dev_dbg(&pf->pdev->dev, "Client is not open, abort vf-reset\n"); + continue; + } cdev->client->ops->vf_reset(&cdev->lan_info, cdev->client, vf_id); } @@ -336,6 +351,11 @@ void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs) "Cannot locate client instance VF enable routine\n"); continue; } + if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, + &cdev->state)) { + dev_dbg(&pf->pdev->dev, "Client is not open, abort vf-enable\n"); + continue; + } cdev->client->ops->vf_enable(&cdev->lan_info, cdev->client, num_vfs); } @@ -370,6 +390,11 @@ int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id, "Cannot locate client instance VF capability routine\n"); continue; } + if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, + &cdev->state)) { + dev_dbg(&pf->pdev->dev, "Client is not open, abort vf-capable\n"); + continue; + } capable = cdev->client->ops->vf_capable(&cdev->lan_info, cdev->client, vf_id); @@ -559,6 +584,7 @@ void i40e_client_subtask(struct i40e_pf *pf) pf->hw.bus.device, pf->hw.bus.func); } + mutex_lock(&i40e_client_instance_mutex); /* Send an Open request to the client */ atomic_inc(&cdev->ref_cnt); if (client->ops && client->ops->open) @@ -568,10 +594,12 @@ void i40e_client_subtask(struct i40e_pf *pf) set_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state); } else { /* remove client instance */ + mutex_unlock(&i40e_client_instance_mutex); i40e_client_del_instance(pf, client); atomic_dec(&client->ref_cnt); continue; } + mutex_unlock(&i40e_client_instance_mutex); } mutex_unlock(&i40e_client_mutex); } @@ -654,7 +682,7 @@ int i40e_lan_del_device(struct i40e_pf *pf) static int i40e_client_release(struct i40e_client *client) { struct i40e_client_instance *cdev, *tmp; - struct i40e_pf *pf = NULL; + struct i40e_pf *pf; int ret = 0; LIST_HEAD(cdevs_tmp); @@ -664,12 +692,12 @@ static int i40e_client_release(struct i40e_client *client) if (strncmp(cdev->client->name, client->name, I40E_CLIENT_STR_LENGTH)) continue; + pf = (struct i40e_pf *)cdev->lan_info.pf; if (test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) { if (atomic_read(&cdev->ref_cnt) > 0) { ret = I40E_ERR_NOT_READY; goto out; } - pf = (struct i40e_pf *)cdev->lan_info.pf; if (client->ops && client->ops->close) client->ops->close(&cdev->lan_info, client, false); @@ -681,8 +709,7 @@ static int i40e_client_release(struct i40e_client *client) client->name, pf->hw.pf_id); } /* delete the client instance from the list */ - list_del(&cdev->list); - list_add(&cdev->list, &cdevs_tmp); + list_move(&cdev->list, &cdevs_tmp); atomic_dec(&client->ref_cnt); dev_info(&pf->pdev->dev, "Deleted client instance of Client %s\n", client->name); @@ -811,7 +838,8 @@ static int i40e_client_setup_qvlist(struct i40e_info *ldev, wr32(hw, I40E_PFINT_AEQCTL, reg); } } - + /* Mitigate sync problems with iwarp VF driver */ + i40e_flush(hw); return 0; err: kfree(ldev->qvlist_info); @@ -1009,7 +1037,6 @@ int i40e_unregister_client(struct i40e_client *client) if (!i40e_client_is_registered(client)) { pr_info("i40e: Client %s has not been registered\n", client->name); - mutex_unlock(&i40e_client_mutex); ret = -ENODEV; goto out; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.h b/drivers/net/ethernet/intel/i40e/i40e_client.h index a4601d97fb24..38a6c36a6a0e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_client.h +++ b/drivers/net/ethernet/intel/i40e/i40e_client.h @@ -36,9 +36,9 @@ #define I40E_CLIENT_VERSION_MINOR 01 #define I40E_CLIENT_VERSION_BUILD 00 #define I40E_CLIENT_VERSION_STR \ - XSTRINGIFY(I40E_CLIENT_VERSION_MAJOR) "." \ - XSTRINGIFY(I40E_CLIENT_VERSION_MINOR) "." \ - XSTRINGIFY(I40E_CLIENT_VERSION_BUILD) + __stringify(I40E_CLIENT_VERSION_MAJOR) "." \ + __stringify(I40E_CLIENT_VERSION_MINOR) "." \ + __stringify(I40E_CLIENT_VERSION_BUILD) struct i40e_client_version { u8 major; diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index c912e041d102..1835186b62c9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -1560,13 +1560,13 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset, } #endif for (i = 0; i < vsi->num_queue_pairs; i++) { - snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_packets", i); + snprintf(p, ETH_GSTRING_LEN, "tx-%d.tx_packets", i); p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_bytes", i); + snprintf(p, ETH_GSTRING_LEN, "tx-%d.tx_bytes", i); p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_packets", i); + snprintf(p, ETH_GSTRING_LEN, "rx-%d.rx_packets", i); p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i); + snprintf(p, ETH_GSTRING_LEN, "rx-%d.rx_bytes", i); p += ETH_GSTRING_LEN; } if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1) @@ -1581,16 +1581,16 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset, } for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { snprintf(p, ETH_GSTRING_LEN, - "veb.tc_%u_tx_packets", i); + "veb.tc_%d_tx_packets", i); p += ETH_GSTRING_LEN; snprintf(p, ETH_GSTRING_LEN, - "veb.tc_%u_tx_bytes", i); + "veb.tc_%d_tx_bytes", i); p += ETH_GSTRING_LEN; snprintf(p, ETH_GSTRING_LEN, - "veb.tc_%u_rx_packets", i); + "veb.tc_%d_rx_packets", i); p += ETH_GSTRING_LEN; snprintf(p, ETH_GSTRING_LEN, - "veb.tc_%u_rx_bytes", i); + "veb.tc_%d_rx_bytes", i); p += ETH_GSTRING_LEN; } } @@ -1601,23 +1601,23 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset, } for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { snprintf(p, ETH_GSTRING_LEN, - "port.tx_priority_%u_xon", i); + "port.tx_priority_%d_xon", i); p += ETH_GSTRING_LEN; snprintf(p, ETH_GSTRING_LEN, - "port.tx_priority_%u_xoff", i); + "port.tx_priority_%d_xoff", i); p += ETH_GSTRING_LEN; } for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { snprintf(p, ETH_GSTRING_LEN, - "port.rx_priority_%u_xon", i); + "port.rx_priority_%d_xon", i); p += ETH_GSTRING_LEN; snprintf(p, ETH_GSTRING_LEN, - "port.rx_priority_%u_xoff", i); + "port.rx_priority_%d_xoff", i); p += ETH_GSTRING_LEN; } for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { snprintf(p, ETH_GSTRING_LEN, - "port.rx_priority_%u_xon_2_xoff", i); + "port.rx_priority_%d_xon_2_xoff", i); p += ETH_GSTRING_LEN; } /* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */ @@ -2141,41 +2141,72 @@ static int i40e_set_per_queue_coalesce(struct net_device *netdev, u32 queue, **/ static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd) { + struct i40e_hw *hw = &pf->hw; + u8 flow_pctype = 0; + u64 i_set = 0; + cmd->data = 0; - if (pf->vsi[pf->lan_vsi]->rxnfc.data != 0) { - cmd->data = pf->vsi[pf->lan_vsi]->rxnfc.data; - cmd->flow_type = pf->vsi[pf->lan_vsi]->rxnfc.flow_type; - return 0; - } - /* Report default options for RSS on i40e */ switch (cmd->flow_type) { case TCP_V4_FLOW: + flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP; + break; case UDP_V4_FLOW: - cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* fall through to add IP fields */ + flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP; + break; + case TCP_V6_FLOW: + flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_TCP; + break; + case UDP_V6_FLOW: + flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_UDP; + break; case SCTP_V4_FLOW: case AH_ESP_V4_FLOW: case AH_V4_FLOW: case ESP_V4_FLOW: case IPV4_FLOW: - cmd->data |= RXH_IP_SRC | RXH_IP_DST; - break; - case TCP_V6_FLOW: - case UDP_V6_FLOW: - cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* fall through to add IP fields */ case SCTP_V6_FLOW: case AH_ESP_V6_FLOW: case AH_V6_FLOW: case ESP_V6_FLOW: case IPV6_FLOW: + /* Default is src/dest for IP, no matter the L4 hashing */ cmd->data |= RXH_IP_SRC | RXH_IP_DST; break; default: return -EINVAL; } + /* Read flow based hash input set register */ + if (flow_pctype) { + i_set = (u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, + flow_pctype)) | + ((u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, + flow_pctype)) << 32); + } + + /* Process bits of hash input set */ + if (i_set) { + if (i_set & I40E_L4_SRC_MASK) + cmd->data |= RXH_L4_B_0_1; + if (i_set & I40E_L4_DST_MASK) + cmd->data |= RXH_L4_B_2_3; + + if (cmd->flow_type == TCP_V4_FLOW || + cmd->flow_type == UDP_V4_FLOW) { + if (i_set & I40E_L3_SRC_MASK) + cmd->data |= RXH_IP_SRC; + if (i_set & I40E_L3_DST_MASK) + cmd->data |= RXH_IP_DST; + } else if (cmd->flow_type == TCP_V6_FLOW || + cmd->flow_type == UDP_V6_FLOW) { + if (i_set & I40E_L3_V6_SRC_MASK) + cmd->data |= RXH_IP_SRC; + if (i_set & I40E_L3_V6_DST_MASK) + cmd->data |= RXH_IP_DST; + } + } + return 0; } @@ -2318,6 +2349,51 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, } /** + * i40e_get_rss_hash_bits - Read RSS Hash bits from register + * @nfc: pointer to user request + * @i_setc bits currently set + * + * Returns value of bits to be set per user request + **/ +static u64 i40e_get_rss_hash_bits(struct ethtool_rxnfc *nfc, u64 i_setc) +{ + u64 i_set = i_setc; + u64 src_l3 = 0, dst_l3 = 0; + + if (nfc->data & RXH_L4_B_0_1) + i_set |= I40E_L4_SRC_MASK; + else + i_set &= ~I40E_L4_SRC_MASK; + if (nfc->data & RXH_L4_B_2_3) + i_set |= I40E_L4_DST_MASK; + else + i_set &= ~I40E_L4_DST_MASK; + + if (nfc->flow_type == TCP_V6_FLOW || nfc->flow_type == UDP_V6_FLOW) { + src_l3 = I40E_L3_V6_SRC_MASK; + dst_l3 = I40E_L3_V6_DST_MASK; + } else if (nfc->flow_type == TCP_V4_FLOW || + nfc->flow_type == UDP_V4_FLOW) { + src_l3 = I40E_L3_SRC_MASK; + dst_l3 = I40E_L3_DST_MASK; + } else { + /* Any other flow type are not supported here */ + return i_set; + } + + if (nfc->data & RXH_IP_SRC) + i_set |= src_l3; + else + i_set &= ~src_l3; + if (nfc->data & RXH_IP_DST) + i_set |= dst_l3; + else + i_set &= ~dst_l3; + + return i_set; +} + +/** * i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash * @pf: pointer to the physical function struct * @cmd: ethtool rxnfc command @@ -2329,6 +2405,8 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) struct i40e_hw *hw = &pf->hw; u64 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) | ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32); + u8 flow_pctype = 0; + u64 i_set, i_setc; /* RSS does not support anything other than hashing * to queues on src and dst IPs and ports @@ -2337,75 +2415,39 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) RXH_L4_B_0_1 | RXH_L4_B_2_3)) return -EINVAL; - /* We need at least the IP SRC and DEST fields for hashing */ - if (!(nfc->data & RXH_IP_SRC) || - !(nfc->data & RXH_IP_DST)) - return -EINVAL; - switch (nfc->flow_type) { case TCP_V4_FLOW: - switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { - case 0: - return -EINVAL; - case (RXH_L4_B_0_1 | RXH_L4_B_2_3): - if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) - hena |= - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK); - - hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP); - break; - default: - return -EINVAL; - } + flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP; + if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) + hena |= + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK); break; case TCP_V6_FLOW: - switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { - case 0: - return -EINVAL; - case (RXH_L4_B_0_1 | RXH_L4_B_2_3): - if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) - hena |= - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK); - - hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP); - break; - default: - return -EINVAL; - } + flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_TCP; + if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) + hena |= + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK); + if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) + hena |= + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK); break; case UDP_V4_FLOW: - switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { - case 0: - return -EINVAL; - case (RXH_L4_B_0_1 | RXH_L4_B_2_3): - if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) - hena |= - BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | - BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP); - - hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | - BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4)); - break; - default: - return -EINVAL; - } + flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP; + if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) + hena |= + BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | + BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP); + + hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4); break; case UDP_V6_FLOW: - switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { - case 0: - return -EINVAL; - case (RXH_L4_B_0_1 | RXH_L4_B_2_3): - if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) - hena |= - BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | - BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP); - - hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | - BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6)); - break; - default: - return -EINVAL; - } + flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_UDP; + if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) + hena |= + BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | + BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP); + + hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6); break; case AH_ESP_V4_FLOW: case AH_V4_FLOW: @@ -2437,13 +2479,23 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) return -EINVAL; } + if (flow_pctype) { + i_setc = (u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, + flow_pctype)) | + ((u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, + flow_pctype)) << 32); + i_set = i40e_get_rss_hash_bits(nfc, i_setc); + i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, flow_pctype), + (u32)i_set); + i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, flow_pctype), + (u32)(i_set >> 32)); + hena |= BIT_ULL(flow_pctype); + } + i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena); i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); i40e_flush(hw); - /* Save setting for future output/update */ - pf->vsi[pf->lan_vsi]->rxnfc = *nfc; - return 0; } @@ -2744,11 +2796,15 @@ static void i40e_get_channels(struct net_device *dev, static int i40e_set_channels(struct net_device *dev, struct ethtool_channels *ch) { + const u8 drop = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET; struct i40e_netdev_priv *np = netdev_priv(dev); unsigned int count = ch->combined_count; struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; + struct i40e_fdir_filter *rule; + struct hlist_node *node2; int new_count; + int err = 0; /* We do not support setting channels for any other VSI at present */ if (vsi->type != I40E_VSI_MAIN) @@ -2766,6 +2822,26 @@ static int i40e_set_channels(struct net_device *dev, if (count > i40e_max_channels(vsi)) return -EINVAL; + /* verify that the number of channels does not invalidate any current + * flow director rules + */ + hlist_for_each_entry_safe(rule, node2, + &pf->fdir_filter_list, fdir_node) { + if (rule->dest_ctl != drop && count <= rule->q_index) { + dev_warn(&pf->pdev->dev, + "Existing user defined filter %d assigns flow to queue %d\n", + rule->fd_id, rule->q_index); + err = -EINVAL; + } + } + + if (err) { + dev_err(&pf->pdev->dev, + "Existing filter rules must be deleted to reduce combined channel count to %d\n", + count); + return err; + } + /* update feature limits from largest to smallest supported values */ /* TODO: Flow director limit, DCB etc */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 828ed28c3c14..fcdea29be4ee 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -41,7 +41,7 @@ static const char i40e_driver_string[] = #define DRV_VERSION_MAJOR 1 #define DRV_VERSION_MINOR 6 -#define DRV_VERSION_BUILD 11 +#define DRV_VERSION_BUILD 12 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN @@ -527,6 +527,7 @@ void i40e_pf_reset_stats(struct i40e_pf *pf) pf->veb[i]->stat_offsets_loaded = false; } } + pf->hw_csum_rx_error = 0; } /** @@ -4616,7 +4617,7 @@ static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg) static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; - u8 i, enabled_tc; + u8 i, enabled_tc = 1; u8 num_tc = 0; struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; @@ -4634,8 +4635,6 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) else return 1; /* Only TC0 */ - /* At least have TC0 */ - enabled_tc = (enabled_tc ? enabled_tc : 0x1); for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { if (enabled_tc & BIT(i)) num_tc++; @@ -7985,72 +7984,34 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf) static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed, u8 *lut, u16 lut_size) { - struct i40e_aqc_get_set_rss_key_data rss_key; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; - bool pf_lut = false; - u8 *rss_lut; - int ret, i; - - memcpy(&rss_key, seed, sizeof(rss_key)); - - rss_lut = kzalloc(pf->rss_table_size, GFP_KERNEL); - if (!rss_lut) - return -ENOMEM; - - /* Populate the LUT with max no. of queues in round robin fashion */ - for (i = 0; i < vsi->rss_table_size; i++) - rss_lut[i] = i % vsi->rss_size; + int ret = 0; - ret = i40e_aq_set_rss_key(hw, vsi->id, &rss_key); - if (ret) { - dev_info(&pf->pdev->dev, - "Cannot set RSS key, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); - goto config_rss_aq_out; + if (seed) { + struct i40e_aqc_get_set_rss_key_data *seed_dw = + (struct i40e_aqc_get_set_rss_key_data *)seed; + ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw); + if (ret) { + dev_info(&pf->pdev->dev, + "Cannot set RSS key, err %s aq_err %s\n", + i40e_stat_str(hw, ret), + i40e_aq_str(hw, hw->aq.asq_last_status)); + return ret; + } } + if (lut) { + bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false; - if (vsi->type == I40E_VSI_MAIN) - pf_lut = true; - - ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, rss_lut, - vsi->rss_table_size); - if (ret) - dev_info(&pf->pdev->dev, - "Cannot set RSS lut, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); - -config_rss_aq_out: - kfree(rss_lut); - return ret; -} - -/** - * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used - * @vsi: VSI structure - **/ -static int i40e_vsi_config_rss(struct i40e_vsi *vsi) -{ - u8 seed[I40E_HKEY_ARRAY_SIZE]; - struct i40e_pf *pf = vsi->back; - u8 *lut; - int ret; - - if (!(pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)) - return 0; - - lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); - if (!lut) - return -ENOMEM; - - i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size); - netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE); - vsi->rss_size = min_t(int, pf->alloc_rss_size, vsi->num_queue_pairs); - ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size); - kfree(lut); - + ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size); + if (ret) { + dev_info(&pf->pdev->dev, + "Cannot set RSS lut, err %s aq_err %s\n", + i40e_stat_str(hw, ret), + i40e_aq_str(hw, hw->aq.asq_last_status)); + return ret; + } + } return ret; } @@ -8101,6 +8062,46 @@ static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed, } /** + * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used + * @vsi: VSI structure + **/ +static int i40e_vsi_config_rss(struct i40e_vsi *vsi) +{ + u8 seed[I40E_HKEY_ARRAY_SIZE]; + struct i40e_pf *pf = vsi->back; + u8 *lut; + int ret; + + if (!(pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)) + return 0; + + if (!vsi->rss_size) + vsi->rss_size = min_t(int, pf->alloc_rss_size, + vsi->num_queue_pairs); + if (!vsi->rss_size) + return -EINVAL; + + lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); + if (!lut) + return -ENOMEM; + /* Use the user configured hash keys and lookup table if there is one, + * otherwise use default + */ + if (vsi->rss_lut_user) + memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); + else + i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size); + if (vsi->rss_hkey_user) + memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE); + else + netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE); + ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size); + kfree(lut); + + return ret; +} + +/** * i40e_config_rss_reg - Configure RSS keys and lut by writing registers * @vsi: Pointer to vsi structure * @seed: RSS hash seed @@ -8691,6 +8692,28 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features) } /** + * i40e_clear_rss_lut - clear the rx hash lookup table + * @vsi: the VSI being configured + **/ +static void i40e_clear_rss_lut(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + u16 vf_id = vsi->vf_id; + u8 i; + + if (vsi->type == I40E_VSI_MAIN) { + for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) + wr32(hw, I40E_PFQF_HLUT(i), 0); + } else if (vsi->type == I40E_VSI_SRIOV) { + for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) + i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf_id), 0); + } else { + dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n"); + } +} + +/** * i40e_set_features - set the netdev feature flags * @netdev: ptr to the netdev being adjusted * @features: the feature set that the stack is suggesting @@ -8703,6 +8726,12 @@ static int i40e_set_features(struct net_device *netdev, struct i40e_pf *pf = vsi->back; bool need_reset; + if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH)) + i40e_pf_config_rss(pf); + else if (!(features & NETIF_F_RXHASH) && + netdev->features & NETIF_F_RXHASH) + i40e_clear_rss_lut(vsi); + if (features & NETIF_F_HW_VLAN_CTAG_RX) i40e_vlan_stripping_enable(vsi); else @@ -11575,7 +11604,8 @@ static int __init i40e_init_module(void) * it can't be any worse than using the system workqueue which * was already single threaded */ - i40e_wq = create_singlethread_workqueue(i40e_driver_name); + i40e_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1, + i40e_driver_name); if (!i40e_wq) { pr_err("%s: Failed to create workqueue\n", i40e_driver_name); return -ENOMEM; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index df7ecc9578c9..f8d66236fcbf 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2840,10 +2840,9 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, I40E_TXD_QW1_CMD_SHIFT); /* notify HW of packet */ - if (!tail_bump) + if (!tail_bump) { prefetchw(tx_desc + 1); - - if (tail_bump) { + } else { /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, @@ -2852,7 +2851,6 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, wmb(); writel(i, tx_ring->tail); } - return; dma_error: diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 6fcbf764f32b..da3423561b3a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -991,7 +991,10 @@ complete_reset: i40e_enable_vf_mappings(vf); set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); clear_bit(I40E_VF_STAT_DISABLED, &vf->vf_states); - i40e_notify_client_of_vf_reset(pf, abs_vf_id); + /* Do not notify the client during VF init */ + if (vf->pf->num_alloc_vfs) + i40e_notify_client_of_vf_reset(pf, abs_vf_id); + vf->num_vlan = 0; } /* tell the VF the reset is done */ wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE); @@ -1089,7 +1092,6 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) goto err_iov; } } - i40e_notify_client_of_vf_enable(pf, num_alloc_vfs); /* allocate memory */ vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL); if (!vfs) { @@ -1113,6 +1115,8 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) } pf->num_alloc_vfs = num_alloc_vfs; + i40e_notify_client_of_vf_enable(pf, num_alloc_vfs); + err_alloc: if (ret) i40e_free_vfs(pf); @@ -2314,6 +2318,7 @@ err: /* send the response back to the VF */ aq_ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS, aq_ret, (u8 *)vrh, len); + kfree(vrh); return aq_ret; } @@ -2995,6 +3000,7 @@ int i40e_ndo_get_vf_config(struct net_device *netdev, else ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; ivi->spoofchk = vf->spoofchk; + ivi->trusted = vf->trusted; ret = 0; error_param: diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h index 3114dcfa1724..40b0eafd0c71 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h @@ -204,6 +204,9 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_suspend_port_tx = 0x041B, i40e_aqc_opc_resume_port_tx = 0x041C, i40e_aqc_opc_configure_partition_bw = 0x041D, + /* hmc */ + i40e_aqc_opc_query_hmc_resource_profile = 0x0500, + i40e_aqc_opc_set_hmc_resource_profile = 0x0501, /* phy commands*/ i40e_aqc_opc_get_phy_abilities = 0x0600, @@ -447,13 +450,15 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration); /* Set ARP Proxy command / response (indirect 0x0104) */ struct i40e_aqc_arp_proxy_data { __le16 command_flags; -#define I40E_AQ_ARP_INIT_IPV4 0x0008 -#define I40E_AQ_ARP_UNSUP_CTL 0x0010 -#define I40E_AQ_ARP_ENA 0x0020 -#define I40E_AQ_ARP_ADD_IPV4 0x0040 -#define I40E_AQ_ARP_DEL_IPV4 0x0080 +#define I40E_AQ_ARP_INIT_IPV4 0x0800 +#define I40E_AQ_ARP_UNSUP_CTL 0x1000 +#define I40E_AQ_ARP_ENA 0x2000 +#define I40E_AQ_ARP_ADD_IPV4 0x4000 +#define I40E_AQ_ARP_DEL_IPV4 0x8000 __le16 table_id; - __le32 pfpm_proxyfc; + __le32 enabled_offloads; +#define I40E_AQ_ARP_DIRECTED_OFFLOAD_ENABLE 0x00000020 +#define I40E_AQ_ARP_OFFLOAD_ENABLE 0x00000800 __le32 ip_addr; u8 mac_addr[6]; u8 reserved[2]; @@ -468,17 +473,19 @@ struct i40e_aqc_ns_proxy_data { __le16 table_idx_ipv6_0; __le16 table_idx_ipv6_1; __le16 control; -#define I40E_AQ_NS_PROXY_ADD_0 0x0100 -#define I40E_AQ_NS_PROXY_DEL_0 0x0200 -#define I40E_AQ_NS_PROXY_ADD_1 0x0400 -#define I40E_AQ_NS_PROXY_DEL_1 0x0800 -#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000 -#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000 -#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000 -#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000 -#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001 -#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002 -#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004 +#define I40E_AQ_NS_PROXY_ADD_0 0x0001 +#define I40E_AQ_NS_PROXY_DEL_0 0x0002 +#define I40E_AQ_NS_PROXY_ADD_1 0x0004 +#define I40E_AQ_NS_PROXY_DEL_1 0x0008 +#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x0010 +#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x0020 +#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x0040 +#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x0080 +#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0100 +#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0200 +#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0400 +#define I40E_AQ_NS_PROXY_OFFLOAD_ENABLE 0x0800 +#define I40E_AQ_NS_PROXY_DIRECTED_OFFLOAD_ENABLE 0x1000 u8 mac_addr_0[6]; u8 mac_addr_1[6]; u8 local_mac_addr[6]; @@ -1579,6 +1586,24 @@ struct i40e_aqc_configure_partition_bw_data { I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data); +/* Get and set the active HMC resource profile and status. + * (direct 0x0500) and (direct 0x0501) + */ +struct i40e_aq_get_set_hmc_resource_profile { + u8 pm_profile; + u8 pe_vf_enabled; + u8 reserved[14]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile); + +enum i40e_aq_hmc_profile { + /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */ + I40E_HMC_PROFILE_DEFAULT = 1, + I40E_HMC_PROFILE_FAVOR_VF = 2, + I40E_HMC_PROFILE_EQUAL = 3, +}; + /* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */ /* set in param0 for get phy abilities to report qualified modules */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index a579193b2c21..0130458264e5 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -2068,10 +2068,9 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, I40E_TXD_QW1_CMD_SHIFT); /* notify HW of packet */ - if (!tail_bump) + if (!tail_bump) { prefetchw(tx_desc + 1); - - if (tail_bump) { + } else { /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, @@ -2080,7 +2079,6 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, wmb(); writel(i, tx_ring->tail); } - return; dma_error: diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h index 76ed97db28e2..dc00aaf94687 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ b/drivers/net/ethernet/intel/i40evf/i40evf.h @@ -71,20 +71,20 @@ struct i40e_vsi { /* How many Rx Buffers do we bundle into one write to the hardware ? */ #define I40EVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */ -#define I40EVF_DEFAULT_TXD 512 -#define I40EVF_DEFAULT_RXD 512 -#define I40EVF_MAX_TXD 4096 -#define I40EVF_MIN_TXD 64 -#define I40EVF_MAX_RXD 4096 -#define I40EVF_MIN_RXD 64 -#define I40EVF_REQ_DESCRIPTOR_MULTIPLE 32 +#define I40EVF_DEFAULT_TXD 512 +#define I40EVF_DEFAULT_RXD 512 +#define I40EVF_MAX_TXD 4096 +#define I40EVF_MIN_TXD 64 +#define I40EVF_MAX_RXD 4096 +#define I40EVF_MIN_RXD 64 +#define I40EVF_REQ_DESCRIPTOR_MULTIPLE 32 /* Supported Rx Buffer Sizes */ -#define I40EVF_RXBUFFER_2048 2048 -#define I40EVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */ -#define I40EVF_MAX_AQ_BUF_SIZE 4096 -#define I40EVF_AQ_LEN 32 -#define I40EVF_AQ_MAX_ERR 20 /* times to try before resetting AQ */ +#define I40EVF_RXBUFFER_2048 2048 +#define I40EVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */ +#define I40EVF_MAX_AQ_BUF_SIZE 4096 +#define I40EVF_AQ_LEN 32 +#define I40EVF_AQ_MAX_ERR 20 /* times to try before resetting AQ */ #define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) @@ -111,7 +111,7 @@ struct i40e_q_vector { u8 num_ringpairs; /* total number of ring pairs in vector */ #define ITR_COUNTDOWN_START 100 u8 itr_countdown; /* when 0 or 1 update ITR */ - int v_idx; /* vector index in list */ + int v_idx; /* vector index in list */ char name[IFNAMSIZ + 9]; bool arm_wb_state; cpumask_var_t affinity_mask; @@ -129,11 +129,11 @@ struct i40e_q_vector { ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ (R)->next_to_clean - (R)->next_to_use - 1) -#define I40EVF_RX_DESC_ADV(R, i) \ +#define I40EVF_RX_DESC_ADV(R, i) \ (&(((union i40e_adv_rx_desc *)((R).desc))[i])) -#define I40EVF_TX_DESC_ADV(R, i) \ +#define I40EVF_TX_DESC_ADV(R, i) \ (&(((union i40e_adv_tx_desc *)((R).desc))[i])) -#define I40EVF_TX_CTXTDESC_ADV(R, i) \ +#define I40EVF_TX_CTXTDESC_ADV(R, i) \ (&(((struct i40e_adv_tx_context_desc *)((R).desc))[i])) #define OTHER_VECTOR 1 @@ -204,22 +204,25 @@ struct i40evf_adapter { struct msix_entry *msix_entries; u32 flags; -#define I40EVF_FLAG_RX_CSUM_ENABLED BIT(0) -#define I40EVF_FLAG_IMIR_ENABLED BIT(5) -#define I40EVF_FLAG_MQ_CAPABLE BIT(6) -#define I40EVF_FLAG_NEED_LINK_UPDATE BIT(7) -#define I40EVF_FLAG_PF_COMMS_FAILED BIT(8) -#define I40EVF_FLAG_RESET_PENDING BIT(9) -#define I40EVF_FLAG_RESET_NEEDED BIT(10) +#define I40EVF_FLAG_RX_CSUM_ENABLED BIT(0) +#define I40EVF_FLAG_IN_NETPOLL BIT(4) +#define I40EVF_FLAG_IMIR_ENABLED BIT(5) +#define I40EVF_FLAG_MQ_CAPABLE BIT(6) +#define I40EVF_FLAG_NEED_LINK_UPDATE BIT(7) +#define I40EVF_FLAG_PF_COMMS_FAILED BIT(8) +#define I40EVF_FLAG_RESET_PENDING BIT(9) +#define I40EVF_FLAG_RESET_NEEDED BIT(10) #define I40EVF_FLAG_WB_ON_ITR_CAPABLE BIT(11) #define I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE BIT(12) #define I40EVF_FLAG_ADDR_SET_BY_PF BIT(13) +#define I40EVF_FLAG_SERVICE_CLIENT_REQUESTED BIT(14) #define I40EVF_FLAG_PROMISC_ON BIT(15) #define I40EVF_FLAG_ALLMULTI_ON BIT(16) /* duplicates for common code */ -#define I40E_FLAG_FDIR_ATR_ENABLED 0 -#define I40E_FLAG_DCB_ENABLED 0 -#define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED +#define I40E_FLAG_FDIR_ATR_ENABLED 0 +#define I40E_FLAG_DCB_ENABLED 0 +#define I40E_FLAG_IN_NETPOLL I40EVF_FLAG_IN_NETPOLL +#define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED #define I40E_FLAG_WB_ON_ITR_CAPABLE I40EVF_FLAG_WB_ON_ITR_CAPABLE #define I40E_FLAG_OUTER_UDP_CSUM_CAPABLE I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE /* flags for admin queue service task */ @@ -233,7 +236,7 @@ struct i40evf_adapter { #define I40EVF_FLAG_AQ_CONFIGURE_QUEUES BIT(6) #define I40EVF_FLAG_AQ_MAP_VECTORS BIT(7) #define I40EVF_FLAG_AQ_HANDLE_RESET BIT(8) -#define I40EVF_FLAG_AQ_CONFIGURE_RSS BIT(9) /* direct AQ config */ +#define I40EVF_FLAG_AQ_CONFIGURE_RSS BIT(9) /* direct AQ config */ #define I40EVF_FLAG_AQ_GET_CONFIG BIT(10) /* Newer style, RSS done by the PF so we can ignore hardware vagaries. */ #define I40EVF_FLAG_AQ_GET_HENA BIT(11) @@ -258,6 +261,7 @@ struct i40evf_adapter { struct work_struct watchdog_task; bool netdev_registered; bool link_up; + enum i40e_aq_link_speed link_speed; enum i40e_virtchnl_ops current_op; #define CLIENT_ENABLED(_a) ((_a)->vf_res ? \ (_a)->vf_res->vf_offload_flags & \ diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c index c9c202f6c521..e17a15456266 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c @@ -74,13 +74,33 @@ static const struct i40evf_stats i40evf_gstrings_stats[] = { static int i40evf_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) { - /* In the future the VF will be able to query the PF for - * some information - for now use a dummy value - */ + struct i40evf_adapter *adapter = netdev_priv(netdev); + ecmd->supported = 0; ecmd->autoneg = AUTONEG_DISABLE; ecmd->transceiver = XCVR_DUMMY1; ecmd->port = PORT_NONE; + /* Set speed and duplex */ + switch (adapter->link_speed) { + case I40E_LINK_SPEED_40GB: + ethtool_cmd_speed_set(ecmd, SPEED_40000); + break; + case I40E_LINK_SPEED_20GB: + ethtool_cmd_speed_set(ecmd, SPEED_20000); + break; + case I40E_LINK_SPEED_10GB: + ethtool_cmd_speed_set(ecmd, SPEED_10000); + break; + case I40E_LINK_SPEED_1GB: + ethtool_cmd_speed_set(ecmd, SPEED_1000); + break; + case I40E_LINK_SPEED_100MB: + ethtool_cmd_speed_set(ecmd, SPEED_100); + break; + default: + break; + } + ecmd->duplex = DUPLEX_FULL; return 0; } diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 600fb9c4a7f0..f751f7bc0d81 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -38,7 +38,7 @@ static const char i40evf_driver_string[] = #define DRV_VERSION_MAJOR 1 #define DRV_VERSION_MINOR 6 -#define DRV_VERSION_BUILD 11 +#define DRV_VERSION_BUILD 12 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) \ @@ -1420,7 +1420,9 @@ int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter) { int err; + rtnl_lock(); err = i40evf_set_interrupt_capability(adapter); + rtnl_unlock(); if (err) { dev_err(&adapter->pdev->dev, "Unable to setup interrupt capabilities\n"); @@ -1802,6 +1804,8 @@ continue_reset: } adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER; adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER; + /* Open RDMA Client again */ + adapter->aq_required |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED; clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); i40evf_misc_irq_enable(adapter); @@ -2831,7 +2835,8 @@ static int __init i40evf_init_module(void) pr_info("%s\n", i40evf_copyright); - i40evf_wq = create_singlethread_workqueue(i40evf_driver_name); + i40evf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1, + i40evf_driver_name); if (!i40evf_wq) { pr_err("%s: Failed to create workqueue\n", i40evf_driver_name); return -ENOMEM; diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c index d76c221d4c8a..cc6cb30c1667 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c @@ -817,6 +817,45 @@ void i40evf_set_rss_lut(struct i40evf_adapter *adapter) } /** + * i40evf_print_link_message - print link up or down + * @adapter: adapter structure + * + * Log a message telling the world of our wonderous link status + */ +static void i40evf_print_link_message(struct i40evf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + char *speed = "Unknown "; + + if (!adapter->link_up) { + netdev_info(netdev, "NIC Link is Down\n"); + return; + } + + switch (adapter->link_speed) { + case I40E_LINK_SPEED_40GB: + speed = "40 G"; + break; + case I40E_LINK_SPEED_20GB: + speed = "20 G"; + break; + case I40E_LINK_SPEED_10GB: + speed = "10 G"; + break; + case I40E_LINK_SPEED_1GB: + speed = "1000 M"; + break; + case I40E_LINK_SPEED_100MB: + speed = "100 M"; + break; + default: + break; + } + + netdev_info(netdev, "NIC Link is Up %sbps Full Duplex\n", speed); +} + +/** * i40evf_request_reset * @adapter: adapter structure * @@ -853,15 +892,13 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, (struct i40e_virtchnl_pf_event *)msg; switch (vpe->event) { case I40E_VIRTCHNL_EVENT_LINK_CHANGE: - adapter->link_up = - vpe->event_data.link_event.link_status; - if (adapter->link_up && !netif_carrier_ok(netdev)) { - dev_info(&adapter->pdev->dev, "NIC Link is Up\n"); - netif_carrier_on(netdev); - netif_tx_wake_all_queues(netdev); - } else if (!adapter->link_up) { - dev_info(&adapter->pdev->dev, "NIC Link is Down\n"); - netif_carrier_off(netdev); + adapter->link_speed = + vpe->event_data.link_event.link_speed; + if (adapter->link_up != + vpe->event_data.link_event.link_status) { + adapter->link_up = + vpe->event_data.link_event.link_status; + i40evf_print_link_message(adapter); netif_tx_stop_all_queues(netdev); } break; diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h index 199ff98209cf..acf06051e111 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.h +++ b/drivers/net/ethernet/intel/igb/e1000_82575.h @@ -188,6 +188,11 @@ struct e1000_adv_tx_context_desc { /* ETQF register bit definitions */ #define E1000_ETQF_FILTER_ENABLE BIT(26) #define E1000_ETQF_1588 BIT(30) +#define E1000_ETQF_IMM_INT BIT(29) +#define E1000_ETQF_QUEUE_ENABLE BIT(31) +#define E1000_ETQF_QUEUE_SHIFT 16 +#define E1000_ETQF_QUEUE_MASK 0x00070000 +#define E1000_ETQF_ETYPE_MASK 0x0000FFFF /* FTQF register bit definitions */ #define E1000_FTQF_VF_BP 0x00008000 diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index 2997c443c5dc..2688180a7acd 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -1024,4 +1024,8 @@ #define E1000_RTTBCNRC_RF_INT_MASK \ (E1000_RTTBCNRC_RF_DEC_MASK << E1000_RTTBCNRC_RF_INT_SHIFT) +#define E1000_VLAPQF_QUEUE_SEL(_n, q_idx) (q_idx << ((_n) * 4)) +#define E1000_VLAPQF_P_VALID(_n) (0x1 << (3 + (_n) * 4)) +#define E1000_VLAPQF_QUEUE_MASK 0x03 + #endif diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h index 21d9d02885cb..d84afdd83e53 100644 --- a/drivers/net/ethernet/intel/igb/e1000_regs.h +++ b/drivers/net/ethernet/intel/igb/e1000_regs.h @@ -309,6 +309,7 @@ (0x054E0 + ((_i - 16) * 8))) #define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ (0x054E4 + ((_i - 16) * 8))) +#define E1000_VLAPQF 0x055B0 /* VLAN Priority Queue Filter VLAPQF */ #define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8)) #define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4)) #define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4)) diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 5387b3a96489..03fbe4b7663b 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -350,11 +350,49 @@ struct hwmon_buff { }; #endif +/* The number of L2 ether-type filter registers, Index 3 is reserved + * for PTP 1588 timestamp + */ +#define MAX_ETYPE_FILTER (4 - 1) +/* ETQF filter list: one static filter per filter consumer. This is + * to avoid filter collisions later. Add new filters here!! + * + * Current filters: Filter 3 + */ +#define IGB_ETQF_FILTER_1588 3 + #define IGB_N_EXTTS 2 #define IGB_N_PEROUT 2 #define IGB_N_SDP 4 #define IGB_RETA_SIZE 128 +enum igb_filter_match_flags { + IGB_FILTER_FLAG_ETHER_TYPE = 0x1, + IGB_FILTER_FLAG_VLAN_TCI = 0x2, +}; + +#define IGB_MAX_RXNFC_FILTERS 16 + +/* RX network flow classification data structure */ +struct igb_nfc_input { + /* Byte layout in order, all values with MSB first: + * match_flags - 1 byte + * etype - 2 bytes + * vlan_tci - 2 bytes + */ + u8 match_flags; + __be16 etype; + __be16 vlan_tci; +}; + +struct igb_nfc_filter { + struct hlist_node nfc_node; + struct igb_nfc_input filter; + u16 etype_reg_index; + u16 sw_idx; + u16 action; +}; + /* board specific private data structure */ struct igb_adapter { unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; @@ -473,6 +511,13 @@ struct igb_adapter { int copper_tries; struct e1000_info ei; u16 eee_advert; + + /* RX network flow classification support */ + struct hlist_head nfc_filter_list; + unsigned int nfc_filter_count; + /* lock for RX network flow classification filter */ + spinlock_t nfc_lock; + bool etype_bitmap[MAX_ETYPE_FILTER]; }; /* flags controlling PTP/1588 function */ @@ -599,4 +644,9 @@ static inline struct netdev_queue *txring_txq(const struct igb_ring *tx_ring) return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index); } +int igb_add_filter(struct igb_adapter *adapter, + struct igb_nfc_filter *input); +int igb_erase_filter(struct igb_adapter *adapter, + struct igb_nfc_filter *input); + #endif /* _IGB_H_ */ diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 64e91c575a39..0c33eca7c832 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -2431,6 +2431,63 @@ static int igb_get_ts_info(struct net_device *dev, } } +#define ETHER_TYPE_FULL_MASK ((__force __be16)~0) +static int igb_get_ethtool_nfc_entry(struct igb_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = &cmd->fs; + struct igb_nfc_filter *rule = NULL; + + /* report total rule count */ + cmd->data = IGB_MAX_RXNFC_FILTERS; + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) { + if (fsp->location <= rule->sw_idx) + break; + } + + if (!rule || fsp->location != rule->sw_idx) + return -EINVAL; + + if (rule->filter.match_flags) { + fsp->flow_type = ETHER_FLOW; + fsp->ring_cookie = rule->action; + if (rule->filter.match_flags & IGB_FILTER_FLAG_ETHER_TYPE) { + fsp->h_u.ether_spec.h_proto = rule->filter.etype; + fsp->m_u.ether_spec.h_proto = ETHER_TYPE_FULL_MASK; + } + if (rule->filter.match_flags & IGB_FILTER_FLAG_VLAN_TCI) { + fsp->flow_type |= FLOW_EXT; + fsp->h_ext.vlan_tci = rule->filter.vlan_tci; + fsp->m_ext.vlan_tci = htons(VLAN_PRIO_MASK); + } + return 0; + } + return -EINVAL; +} + +static int igb_get_ethtool_nfc_all(struct igb_adapter *adapter, + struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct igb_nfc_filter *rule; + int cnt = 0; + + /* report total rule count */ + cmd->data = IGB_MAX_RXNFC_FILTERS; + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) { + if (cnt == cmd->rule_cnt) + return -EMSGSIZE; + rule_locs[cnt] = rule->sw_idx; + cnt++; + } + + cmd->rule_cnt = cnt; + + return 0; +} + static int igb_get_rss_hash_opts(struct igb_adapter *adapter, struct ethtool_rxnfc *cmd) { @@ -2484,6 +2541,16 @@ static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, cmd->data = adapter->num_rx_queues; ret = 0; break; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = adapter->nfc_filter_count; + ret = 0; + break; + case ETHTOOL_GRXCLSRULE: + ret = igb_get_ethtool_nfc_entry(adapter, cmd); + break; + case ETHTOOL_GRXCLSRLALL: + ret = igb_get_ethtool_nfc_all(adapter, cmd, rule_locs); + break; case ETHTOOL_GRXFH: ret = igb_get_rss_hash_opts(adapter, cmd); break; @@ -2598,6 +2665,279 @@ static int igb_set_rss_hash_opt(struct igb_adapter *adapter, return 0; } +static int igb_rxnfc_write_etype_filter(struct igb_adapter *adapter, + struct igb_nfc_filter *input) +{ + struct e1000_hw *hw = &adapter->hw; + u8 i; + u32 etqf; + u16 etype; + + /* find an empty etype filter register */ + for (i = 0; i < MAX_ETYPE_FILTER; ++i) { + if (!adapter->etype_bitmap[i]) + break; + } + if (i == MAX_ETYPE_FILTER) { + dev_err(&adapter->pdev->dev, "ethtool -N: etype filters are all used.\n"); + return -EINVAL; + } + + adapter->etype_bitmap[i] = true; + + etqf = rd32(E1000_ETQF(i)); + etype = ntohs(input->filter.etype & ETHER_TYPE_FULL_MASK); + + etqf |= E1000_ETQF_FILTER_ENABLE; + etqf &= ~E1000_ETQF_ETYPE_MASK; + etqf |= (etype & E1000_ETQF_ETYPE_MASK); + + etqf &= ~E1000_ETQF_QUEUE_MASK; + etqf |= ((input->action << E1000_ETQF_QUEUE_SHIFT) + & E1000_ETQF_QUEUE_MASK); + etqf |= E1000_ETQF_QUEUE_ENABLE; + + wr32(E1000_ETQF(i), etqf); + + input->etype_reg_index = i; + + return 0; +} + +int igb_rxnfc_write_vlan_prio_filter(struct igb_adapter *adapter, + struct igb_nfc_filter *input) +{ + struct e1000_hw *hw = &adapter->hw; + u8 vlan_priority; + u16 queue_index; + u32 vlapqf; + + vlapqf = rd32(E1000_VLAPQF); + vlan_priority = (ntohs(input->filter.vlan_tci) & VLAN_PRIO_MASK) + >> VLAN_PRIO_SHIFT; + queue_index = (vlapqf >> (vlan_priority * 4)) & E1000_VLAPQF_QUEUE_MASK; + + /* check whether this vlan prio is already set */ + if ((vlapqf & E1000_VLAPQF_P_VALID(vlan_priority)) && + (queue_index != input->action)) { + dev_err(&adapter->pdev->dev, "ethtool rxnfc set vlan prio filter failed.\n"); + return -EEXIST; + } + + vlapqf |= E1000_VLAPQF_P_VALID(vlan_priority); + vlapqf |= E1000_VLAPQF_QUEUE_SEL(vlan_priority, input->action); + + wr32(E1000_VLAPQF, vlapqf); + + return 0; +} + +int igb_add_filter(struct igb_adapter *adapter, struct igb_nfc_filter *input) +{ + int err = -EINVAL; + + if (input->filter.match_flags & IGB_FILTER_FLAG_ETHER_TYPE) { + err = igb_rxnfc_write_etype_filter(adapter, input); + if (err) + return err; + } + + if (input->filter.match_flags & IGB_FILTER_FLAG_VLAN_TCI) + err = igb_rxnfc_write_vlan_prio_filter(adapter, input); + + return err; +} + +static void igb_clear_etype_filter_regs(struct igb_adapter *adapter, + u16 reg_index) +{ + struct e1000_hw *hw = &adapter->hw; + u32 etqf = rd32(E1000_ETQF(reg_index)); + + etqf &= ~E1000_ETQF_QUEUE_ENABLE; + etqf &= ~E1000_ETQF_QUEUE_MASK; + etqf &= ~E1000_ETQF_FILTER_ENABLE; + + wr32(E1000_ETQF(reg_index), etqf); + + adapter->etype_bitmap[reg_index] = false; +} + +static void igb_clear_vlan_prio_filter(struct igb_adapter *adapter, + u16 vlan_tci) +{ + struct e1000_hw *hw = &adapter->hw; + u8 vlan_priority; + u32 vlapqf; + + vlan_priority = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; + + vlapqf = rd32(E1000_VLAPQF); + vlapqf &= ~E1000_VLAPQF_P_VALID(vlan_priority); + vlapqf &= ~E1000_VLAPQF_QUEUE_SEL(vlan_priority, + E1000_VLAPQF_QUEUE_MASK); + + wr32(E1000_VLAPQF, vlapqf); +} + +int igb_erase_filter(struct igb_adapter *adapter, struct igb_nfc_filter *input) +{ + if (input->filter.match_flags & IGB_FILTER_FLAG_ETHER_TYPE) + igb_clear_etype_filter_regs(adapter, + input->etype_reg_index); + + if (input->filter.match_flags & IGB_FILTER_FLAG_VLAN_TCI) + igb_clear_vlan_prio_filter(adapter, + ntohs(input->filter.vlan_tci)); + + return 0; +} + +static int igb_update_ethtool_nfc_entry(struct igb_adapter *adapter, + struct igb_nfc_filter *input, + u16 sw_idx) +{ + struct igb_nfc_filter *rule, *parent; + int err = -EINVAL; + + parent = NULL; + rule = NULL; + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) { + /* hash found, or no matching entry */ + if (rule->sw_idx >= sw_idx) + break; + parent = rule; + } + + /* if there is an old rule occupying our place remove it */ + if (rule && (rule->sw_idx == sw_idx)) { + if (!input) + err = igb_erase_filter(adapter, rule); + + hlist_del(&rule->nfc_node); + kfree(rule); + adapter->nfc_filter_count--; + } + + /* If no input this was a delete, err should be 0 if a rule was + * successfully found and removed from the list else -EINVAL + */ + if (!input) + return err; + + /* initialize node */ + INIT_HLIST_NODE(&input->nfc_node); + + /* add filter to the list */ + if (parent) + hlist_add_behind(&parent->nfc_node, &input->nfc_node); + else + hlist_add_head(&input->nfc_node, &adapter->nfc_filter_list); + + /* update counts */ + adapter->nfc_filter_count++; + + return 0; +} + +static int igb_add_ethtool_nfc_entry(struct igb_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct net_device *netdev = adapter->netdev; + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct igb_nfc_filter *input, *rule; + int err = 0; + + if (!(netdev->hw_features & NETIF_F_NTUPLE)) + return -EOPNOTSUPP; + + /* Don't allow programming if the action is a queue greater than + * the number of online Rx queues. + */ + if ((fsp->ring_cookie == RX_CLS_FLOW_DISC) || + (fsp->ring_cookie >= adapter->num_rx_queues)) { + dev_err(&adapter->pdev->dev, "ethtool -N: The specified action is invalid\n"); + return -EINVAL; + } + + /* Don't allow indexes to exist outside of available space */ + if (fsp->location >= IGB_MAX_RXNFC_FILTERS) { + dev_err(&adapter->pdev->dev, "Location out of range\n"); + return -EINVAL; + } + + if ((fsp->flow_type & ~FLOW_EXT) != ETHER_FLOW) + return -EINVAL; + + if (fsp->m_u.ether_spec.h_proto != ETHER_TYPE_FULL_MASK && + fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK)) + return -EINVAL; + + input = kzalloc(sizeof(*input), GFP_KERNEL); + if (!input) + return -ENOMEM; + + if (fsp->m_u.ether_spec.h_proto == ETHER_TYPE_FULL_MASK) { + input->filter.etype = fsp->h_u.ether_spec.h_proto; + input->filter.match_flags = IGB_FILTER_FLAG_ETHER_TYPE; + } + + if ((fsp->flow_type & FLOW_EXT) && fsp->m_ext.vlan_tci) { + if (fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK)) { + err = -EINVAL; + goto err_out; + } + input->filter.vlan_tci = fsp->h_ext.vlan_tci; + input->filter.match_flags |= IGB_FILTER_FLAG_VLAN_TCI; + } + + input->action = fsp->ring_cookie; + input->sw_idx = fsp->location; + + spin_lock(&adapter->nfc_lock); + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) { + if (!memcmp(&input->filter, &rule->filter, + sizeof(input->filter))) { + err = -EEXIST; + dev_err(&adapter->pdev->dev, + "ethtool: this filter is already set\n"); + goto err_out_w_lock; + } + } + + err = igb_add_filter(adapter, input); + if (err) + goto err_out_w_lock; + + igb_update_ethtool_nfc_entry(adapter, input, input->sw_idx); + + spin_unlock(&adapter->nfc_lock); + return 0; + +err_out_w_lock: + spin_unlock(&adapter->nfc_lock); +err_out: + kfree(input); + return err; +} + +static int igb_del_ethtool_nfc_entry(struct igb_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + int err; + + spin_lock(&adapter->nfc_lock); + err = igb_update_ethtool_nfc_entry(adapter, NULL, fsp->location); + spin_unlock(&adapter->nfc_lock); + + return err; +} + static int igb_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) { struct igb_adapter *adapter = netdev_priv(dev); @@ -2607,6 +2947,11 @@ static int igb_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) case ETHTOOL_SRXFH: ret = igb_set_rss_hash_opt(adapter, cmd); break; + case ETHTOOL_SRXCLSRLINS: + ret = igb_add_ethtool_nfc_entry(adapter, cmd); + break; + case ETHTOOL_SRXCLSRLDEL: + ret = igb_del_ethtool_nfc_entry(adapter, cmd); default: break; } diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 942a89fb0090..af75eac5fa16 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -176,6 +176,8 @@ static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, static int igb_ndo_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi); static void igb_check_vf_rate_limit(struct igb_adapter *); +static void igb_nfc_filter_exit(struct igb_adapter *adapter); +static void igb_nfc_filter_restore(struct igb_adapter *adapter); #ifdef CONFIG_PCI_IOV static int igb_vf_configure(struct igb_adapter *adapter, int vf); @@ -1611,6 +1613,7 @@ static void igb_configure(struct igb_adapter *adapter) igb_setup_mrqc(adapter); igb_setup_rctl(adapter); + igb_nfc_filter_restore(adapter); igb_configure_tx(adapter); igb_configure_rx(adapter); @@ -2059,6 +2062,21 @@ static int igb_set_features(struct net_device *netdev, if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE))) return 0; + if (!(features & NETIF_F_NTUPLE)) { + struct hlist_node *node2; + struct igb_nfc_filter *rule; + + spin_lock(&adapter->nfc_lock); + hlist_for_each_entry_safe(rule, node2, + &adapter->nfc_filter_list, nfc_node) { + igb_erase_filter(adapter, rule); + hlist_del(&rule->nfc_node); + kfree(rule); + } + spin_unlock(&adapter->nfc_lock); + adapter->nfc_filter_count = 0; + } + netdev->features = features; if (netif_running(netdev)) @@ -3053,6 +3071,7 @@ static int igb_sw_init(struct igb_adapter *adapter) VLAN_HLEN; adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; + spin_lock_init(&adapter->nfc_lock); spin_lock_init(&adapter->stats64_lock); #ifdef CONFIG_PCI_IOV switch (hw->mac.type) { @@ -3240,6 +3259,8 @@ static int __igb_close(struct net_device *netdev, bool suspending) igb_down(adapter); igb_free_irq(adapter); + igb_nfc_filter_exit(adapter); + igb_free_all_tx_resources(adapter); igb_free_all_rx_resources(adapter); @@ -8306,4 +8327,28 @@ int igb_reinit_queues(struct igb_adapter *adapter) return err; } + +static void igb_nfc_filter_exit(struct igb_adapter *adapter) +{ + struct igb_nfc_filter *rule; + + spin_lock(&adapter->nfc_lock); + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) + igb_erase_filter(adapter, rule); + + spin_unlock(&adapter->nfc_lock); +} + +static void igb_nfc_filter_restore(struct igb_adapter *adapter) +{ + struct igb_nfc_filter *rule; + + spin_lock(&adapter->nfc_lock); + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) + igb_add_filter(adapter, rule); + + spin_unlock(&adapter->nfc_lock); +} /* igb_main.c */ diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index 336c103ae374..66dfa2085cc7 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -998,12 +998,12 @@ static int igb_ptp_set_timestamp_mode(struct igb_adapter *adapter, /* define ethertype filter for timestamped packets */ if (is_l2) - wr32(E1000_ETQF(3), + wr32(E1000_ETQF(IGB_ETQF_FILTER_1588), (E1000_ETQF_FILTER_ENABLE | /* enable filter */ E1000_ETQF_1588 | /* enable timestamping */ ETH_P_1588)); /* 1588 eth protocol type */ else - wr32(E1000_ETQF(3), 0); + wr32(E1000_ETQF(IGB_ETQF_FILTER_1588), 0); /* L4 Queue Filter[3]: filter by destination port and protocol */ if (is_l4) { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 9475ff9055aa..33c025055011 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -645,6 +645,7 @@ struct ixgbe_adapter { #define IXGBE_FLAG_RX_HWTSTAMP_ENABLED BIT(25) #define IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER BIT(26) #define IXGBE_FLAG_DCB_CAPABLE BIT(27) +#define IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE BIT(28) u32 flags2; #define IXGBE_FLAG2_RSC_CAPABLE BIT(0) @@ -653,13 +654,12 @@ struct ixgbe_adapter { #define IXGBE_FLAG2_TEMP_SENSOR_EVENT BIT(3) #define IXGBE_FLAG2_SEARCH_FOR_SFP BIT(4) #define IXGBE_FLAG2_SFP_NEEDS_RESET BIT(5) -#define IXGBE_FLAG2_RESET_REQUESTED BIT(6) #define IXGBE_FLAG2_FDIR_REQUIRES_REINIT BIT(7) #define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP BIT(8) #define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP BIT(9) #define IXGBE_FLAG2_PTP_PPS_ENABLED BIT(10) #define IXGBE_FLAG2_PHY_INTERRUPT BIT(11) -#define IXGBE_FLAG2_VXLAN_REREG_NEEDED BIT(12) +#define IXGBE_FLAG2_UDP_TUN_REREG_NEEDED BIT(12) #define IXGBE_FLAG2_VLAN_PROMISC BIT(13) /* Tx fast path data */ @@ -673,6 +673,7 @@ struct ixgbe_adapter { /* Port number used to identify VXLAN traffic */ __be16 vxlan_port; + __be16 geneve_port; /* TX */ struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp; @@ -840,6 +841,7 @@ enum ixgbe_state_t { __IXGBE_IN_SFP_INIT, __IXGBE_PTP_RUNNING, __IXGBE_PTP_TX_IN_PROGRESS, + __IXGBE_RESET_REQUESTED, }; struct ixgbe_cb { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index c47b605e8651..77d3039283f6 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -99,6 +99,7 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) case IXGBE_DEV_ID_X550T: case IXGBE_DEV_ID_X550T1: case IXGBE_DEV_ID_X550EM_X_10G_T: + case IXGBE_DEV_ID_X550EM_A_10G_T: supported = true; break; default: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 0d7209eb5abf..9547191e26c9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -193,7 +193,9 @@ static int ixgbe_get_settings(struct net_device *netdev, if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) ecmd->supported |= ixgbe_get_supported_10gtypes(hw); if (supported_link & IXGBE_LINK_SPEED_1GB_FULL) - ecmd->supported |= SUPPORTED_1000baseT_Full; + ecmd->supported |= (ixgbe_isbackplane(hw->phy.media_type)) ? + SUPPORTED_1000baseKX_Full : + SUPPORTED_1000baseT_Full; if (supported_link & IXGBE_LINK_SPEED_100_FULL) ecmd->supported |= ixgbe_isbackplane(hw->phy.media_type) ? SUPPORTED_1000baseKX_Full : diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index b4f03748adc0..d76bc1a313ea 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -137,6 +137,7 @@ static const struct pci_device_id ixgbe_pci_tbl[] = { {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP_N), board_x550em_a }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII), board_x550em_a }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII_L), board_x550em_a }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_10G_T), board_x550em_a}, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a }, /* required last entry */ {0, } @@ -1103,7 +1104,7 @@ static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter) /* Do the reset outside of interrupt context */ if (!test_bit(__IXGBE_DOWN, &adapter->state)) { - adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; + set_bit(__IXGBE_RESET_REQUESTED, &adapter->state); e_warn(drv, "initiating reset due to tx timeout\n"); ixgbe_service_event_schedule(adapter); } @@ -1495,7 +1496,6 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring, struct sk_buff *skb) { __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; - __le16 hdr_info = rx_desc->wb.lower.lo_dword.hs_rss.hdr_info; bool encap_pkt = false; skb_checksum_none_assert(skb); @@ -1504,8 +1504,8 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring, if (!(ring->netdev->features & NETIF_F_RXCSUM)) return; - if ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_VXLAN)) && - (hdr_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_TUNNEL >> 16))) { + /* check for VXLAN and Geneve packets */ + if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_VXLAN)) { encap_pkt = true; skb->encapsulation = 1; } @@ -2777,7 +2777,7 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data) } if (eicr & IXGBE_EICR_ECC) { e_info(link, "Received ECC Err, initiating reset\n"); - adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; + set_bit(__IXGBE_RESET_REQUESTED, &adapter->state); ixgbe_service_event_schedule(adapter); IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC); } @@ -3007,7 +3007,7 @@ static irqreturn_t ixgbe_intr(int irq, void *data) case ixgbe_mac_x550em_a: if (eicr & IXGBE_EICR_ECC) { e_info(link, "Received ECC Err, initiating reset\n"); - adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; + set_bit(__IXGBE_RESET_REQUESTED, &adapter->state); ixgbe_service_event_schedule(adapter); IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC); } @@ -3224,7 +3224,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE)); if (!wait_loop) - e_err(drv, "Could not enable Tx Queue %d\n", reg_idx); + hw_dbg(hw, "Could not enable Tx Queue %d\n", reg_idx); } static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) @@ -3922,6 +3922,9 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) rfctl &= ~IXGBE_RFCTL_RSC_DIS; if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) rfctl |= IXGBE_RFCTL_RSC_DIS; + + /* disable NFS filtering */ + rfctl |= (IXGBE_RFCTL_NFSW_DIS | IXGBE_RFCTL_NFSR_DIS); IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl); /* Program registers for the distribution of queues */ @@ -4586,18 +4589,23 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter) } } -static void ixgbe_clear_vxlan_port(struct ixgbe_adapter *adapter) +static void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *adapter, u32 mask) { - switch (adapter->hw.mac.type) { - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: - IXGBE_WRITE_REG(&adapter->hw, IXGBE_VXLANCTRL, 0); + struct ixgbe_hw *hw = &adapter->hw; + u32 vxlanctrl; + + if (!(adapter->flags & (IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE | + IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE))) + return; + + vxlanctrl = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) && ~mask; + IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, vxlanctrl); + + if (mask & IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK) adapter->vxlan_port = 0; - break; - default: - break; - } + + if (mask & IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK) + adapter->geneve_port = 0; } #ifdef CONFIG_IXGBE_DCB @@ -5500,8 +5508,8 @@ void ixgbe_down(struct ixgbe_adapter *adapter) ixgbe_napi_disable_all(adapter); - adapter->flags2 &= ~(IXGBE_FLAG2_FDIR_REQUIRES_REINIT | - IXGBE_FLAG2_RESET_REQUESTED); + clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state); + adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT; adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; del_timer_sync(&adapter->service_timer); @@ -5711,8 +5719,10 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) if (fwsm & IXGBE_FWSM_TS_ENABLED) adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; break; - case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + adapter->flags |= IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE; + /* fall through */ + case ixgbe_mac_X550EM_x: #ifdef CONFIG_IXGBE_DCB adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE; #endif @@ -6144,7 +6154,7 @@ int ixgbe_open(struct net_device *netdev) ixgbe_up_complete(adapter); - ixgbe_clear_vxlan_port(adapter); + ixgbe_clear_udp_tunnel_port(adapter, IXGBE_VXLANCTRL_ALL_UDPPORT_MASK); udp_tunnel_get_rx_info(netdev); return 0; @@ -6921,7 +6931,7 @@ static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter) * (Do the reset outside of interrupt context). */ e_warn(drv, "initiating reset to clear Tx work after link loss\n"); - adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; + set_bit(__IXGBE_RESET_REQUESTED, &adapter->state); } } } @@ -7187,11 +7197,9 @@ static void ixgbe_phy_interrupt_subtask(struct ixgbe_adapter *adapter) static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter) { - if (!(adapter->flags2 & IXGBE_FLAG2_RESET_REQUESTED)) + if (!test_and_clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state)) return; - adapter->flags2 &= ~IXGBE_FLAG2_RESET_REQUESTED; - /* If we're already down, removing or resetting, just bail */ if (test_bit(__IXGBE_DOWN, &adapter->state) || test_bit(__IXGBE_REMOVING, &adapter->state) || @@ -7225,9 +7233,9 @@ static void ixgbe_service_task(struct work_struct *work) ixgbe_service_event_complete(adapter); return; } - if (adapter->flags2 & IXGBE_FLAG2_VXLAN_REREG_NEEDED) { + if (adapter->flags2 & IXGBE_FLAG2_UDP_TUN_REREG_NEEDED) { rtnl_lock(); - adapter->flags2 &= ~IXGBE_FLAG2_VXLAN_REREG_NEEDED; + adapter->flags2 &= ~IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; udp_tunnel_get_rx_info(adapter->netdev); rtnl_unlock(); } @@ -7667,6 +7675,10 @@ static void ixgbe_atr(struct ixgbe_ring *ring, if (adapter->vxlan_port && udp_hdr(skb)->dest == adapter->vxlan_port) hdr.network = skb_inner_network_header(skb); + + if (adapter->geneve_port && + udp_hdr(skb)->dest == adapter->geneve_port) + hdr.network = skb_inner_network_header(skb); } /* Currently only IPv4/IPv6 with TCP is supported */ @@ -8802,10 +8814,23 @@ static int ixgbe_set_features(struct net_device *netdev, netdev->features = features; if ((adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) { - if (features & NETIF_F_RXCSUM) - adapter->flags2 |= IXGBE_FLAG2_VXLAN_REREG_NEEDED; - else - ixgbe_clear_vxlan_port(adapter); + if (features & NETIF_F_RXCSUM) { + adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; + } else { + u32 port_mask = IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK; + + ixgbe_clear_udp_tunnel_port(adapter, port_mask); + } + } + + if ((adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)) { + if (features & NETIF_F_RXCSUM) { + adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; + } else { + u32 port_mask = IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK; + + ixgbe_clear_udp_tunnel_port(adapter, port_mask); + } } if (need_reset) @@ -8818,67 +8843,115 @@ static int ixgbe_set_features(struct net_device *netdev, } /** - * ixgbe_add_vxlan_port - Get notifications about VXLAN ports that come up + * ixgbe_add_udp_tunnel_port - Get notifications about adding UDP tunnel ports * @dev: The port's netdev * @ti: Tunnel endpoint information **/ -static void ixgbe_add_vxlan_port(struct net_device *dev, - struct udp_tunnel_info *ti) +static void ixgbe_add_udp_tunnel_port(struct net_device *dev, + struct udp_tunnel_info *ti) { struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_hw *hw = &adapter->hw; __be16 port = ti->port; - - if (ti->type != UDP_TUNNEL_TYPE_VXLAN) - return; + u32 port_shift = 0; + u32 reg; if (ti->sa_family != AF_INET) return; - if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) - return; + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) + return; - if (adapter->vxlan_port == port) - return; + if (adapter->vxlan_port == port) + return; + + if (adapter->vxlan_port) { + netdev_info(dev, + "VXLAN port %d set, not adding port %d\n", + ntohs(adapter->vxlan_port), + ntohs(port)); + return; + } + + adapter->vxlan_port = port; + break; + case UDP_TUNNEL_TYPE_GENEVE: + if (!(adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)) + return; + + if (adapter->geneve_port == port) + return; + + if (adapter->geneve_port) { + netdev_info(dev, + "GENEVE port %d set, not adding port %d\n", + ntohs(adapter->geneve_port), + ntohs(port)); + return; + } - if (adapter->vxlan_port) { - netdev_info(dev, - "Hit Max num of VXLAN ports, not adding port %d\n", - ntohs(port)); + port_shift = IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT; + adapter->geneve_port = port; + break; + default: return; } - adapter->vxlan_port = port; - IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, ntohs(port)); + reg = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) | ntohs(port) << port_shift; + IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, reg); } /** - * ixgbe_del_vxlan_port - Get notifications about VXLAN ports that go away + * ixgbe_del_udp_tunnel_port - Get notifications about removing UDP tunnel ports * @dev: The port's netdev * @ti: Tunnel endpoint information **/ -static void ixgbe_del_vxlan_port(struct net_device *dev, - struct udp_tunnel_info *ti) +static void ixgbe_del_udp_tunnel_port(struct net_device *dev, + struct udp_tunnel_info *ti) { struct ixgbe_adapter *adapter = netdev_priv(dev); + u32 port_mask; - if (ti->type != UDP_TUNNEL_TYPE_VXLAN) + if (ti->type != UDP_TUNNEL_TYPE_VXLAN && + ti->type != UDP_TUNNEL_TYPE_GENEVE) return; if (ti->sa_family != AF_INET) return; - if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) - return; + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) + return; - if (adapter->vxlan_port != ti->port) { - netdev_info(dev, "Port %d was not found, not deleting\n", - ntohs(ti->port)); + if (adapter->vxlan_port != ti->port) { + netdev_info(dev, "VXLAN port %d not found\n", + ntohs(ti->port)); + return; + } + + port_mask = IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK; + break; + case UDP_TUNNEL_TYPE_GENEVE: + if (!(adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)) + return; + + if (adapter->geneve_port != ti->port) { + netdev_info(dev, "GENEVE port %d not found\n", + ntohs(ti->port)); + return; + } + + port_mask = IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK; + break; + default: return; } - ixgbe_clear_vxlan_port(adapter); - adapter->flags2 |= IXGBE_FLAG2_VXLAN_REREG_NEEDED; + ixgbe_clear_udp_tunnel_port(adapter, port_mask); + adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; } static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], @@ -9192,8 +9265,8 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink, .ndo_dfwd_add_station = ixgbe_fwd_add, .ndo_dfwd_del_station = ixgbe_fwd_del, - .ndo_udp_tunnel_add = ixgbe_add_vxlan_port, - .ndo_udp_tunnel_del = ixgbe_del_vxlan_port, + .ndo_udp_tunnel_add = ixgbe_add_udp_tunnel_port, + .ndo_udp_tunnel_del = ixgbe_del_udp_tunnel_port, .ndo_features_check = ixgbe_features_check, }; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 1248a9936f7a..31d82e3abac8 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -90,6 +90,7 @@ #define IXGBE_DEV_ID_X550EM_A_SFP_N 0x15C4 #define IXGBE_DEV_ID_X550EM_A_SGMII 0x15C6 #define IXGBE_DEV_ID_X550EM_A_SGMII_L 0x15C7 +#define IXGBE_DEV_ID_X550EM_A_10G_T 0x15C8 #define IXGBE_DEV_ID_X550EM_A_SFP 0x15CE /* VF Device IDs */ @@ -487,6 +488,13 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_FHFT_EXT(_n) (0x09800 + ((_n) * 0x100)) /* Ext Flexible Host * Filter Table */ +/* masks for accessing VXLAN and GENEVE UDP ports */ +#define IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK 0x0000ffff /* VXLAN port */ +#define IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK 0xffff0000 /* GENEVE port */ +#define IXGBE_VXLANCTRL_ALL_UDPPORT_MASK 0xffffffff /* GENEVE/VXLAN */ + +#define IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT 16 + #define IXGBE_FLEXIBLE_FILTER_COUNT_MAX 4 #define IXGBE_EXT_FLEXIBLE_FILTER_COUNT_MAX 2 @@ -1823,6 +1831,9 @@ enum { #define IXGBE_LED_IVRT(_i) IXGBE_LED_OFFSET(IXGBE_LED_IVRT_BASE, _i) #define IXGBE_LED_BLINK(_i) IXGBE_LED_OFFSET(IXGBE_LED_BLINK_BASE, _i) #define IXGBE_LED_MODE_MASK(_i) IXGBE_LED_OFFSET(IXGBE_LED_MODE_MASK_BASE, _i) +#define IXGBE_X557_LED_MANUAL_SET_MASK BIT(8) +#define IXGBE_X557_MAX_LED_INDEX 3 +#define IXGBE_X557_LED_PROVISIONING 0xC430 /* LED modes */ #define IXGBE_LED_LINK_UP 0x0 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 4716ca499e67..fb1b819d8311 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -295,6 +295,12 @@ static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw) case IXGBE_DEV_ID_X550EM_A_KR_L: hw->phy.type = ixgbe_phy_x550em_kr; break; + case IXGBE_DEV_ID_X550EM_A_10G_T: + if (hw->bus.lan_id) + hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM; + else + hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM; + /* Fallthrough */ case IXGBE_DEV_ID_X550EM_X_1G_T: case IXGBE_DEV_ID_X550EM_X_10G_T: return ixgbe_identify_phy_generic(hw); @@ -2114,6 +2120,50 @@ static s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw) return ixgbe_enable_lasi_ext_t_x550em(hw); } +/** + * ixgbe_led_on_t_x550em - Turns on the software controllable LEDs. + * @hw: pointer to hardware structure + * @led_idx: led number to turn on + **/ +s32 ixgbe_led_on_t_x550em(struct ixgbe_hw *hw, u32 led_idx) +{ + u16 phy_data; + + if (led_idx >= IXGBE_X557_MAX_LED_INDEX) + return IXGBE_ERR_PARAM; + + /* To turn on the LED, set mode to ON. */ + hw->phy.ops.read_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data); + phy_data |= IXGBE_X557_LED_MANUAL_SET_MASK; + hw->phy.ops.write_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data); + + return 0; +} + +/** + * ixgbe_led_off_t_x550em - Turns off the software controllable LEDs. + * @hw: pointer to hardware structure + * @led_idx: led number to turn off + **/ +s32 ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx) +{ + u16 phy_data; + + if (led_idx >= IXGBE_X557_MAX_LED_INDEX) + return IXGBE_ERR_PARAM; + + /* To turn on the LED, set mode to ON. */ + hw->phy.ops.read_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data); + phy_data &= ~IXGBE_X557_LED_MANUAL_SET_MASK; + hw->phy.ops.write_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data); + + return 0; +} + /** ixgbe_get_lcd_x550em - Determine lowest common denominator * @hw: pointer to hardware structure * @lcd_speed: pointer to lowest common link speed @@ -2456,6 +2506,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw) break; case IXGBE_DEV_ID_X550EM_X_1G_T: case IXGBE_DEV_ID_X550EM_X_10G_T: + case IXGBE_DEV_ID_X550EM_A_10G_T: media_type = ixgbe_media_type_copper; break; default: @@ -2514,6 +2565,9 @@ static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw) switch (hw->device_id) { case IXGBE_DEV_ID_X550EM_X_10G_T: + case IXGBE_DEV_ID_X550EM_A_SGMII: + case IXGBE_DEV_ID_X550EM_A_SGMII_L: + case IXGBE_DEV_ID_X550EM_A_10G_T: case IXGBE_DEV_ID_X550EM_A_SFP: /* Config MDIO clock speed before the first MDIO PHY access */ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); @@ -2853,8 +2907,6 @@ static s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, .write_analog_reg8 = NULL, \ .set_rxpba = &ixgbe_set_rxpba_generic, \ .check_link = &ixgbe_check_mac_link_generic, \ - .led_on = &ixgbe_led_on_generic, \ - .led_off = &ixgbe_led_off_generic, \ .blink_led_start = &ixgbe_blink_led_start_X540, \ .blink_led_stop = &ixgbe_blink_led_stop_X540, \ .set_rar = &ixgbe_set_rar_generic, \ @@ -2886,6 +2938,8 @@ static s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, static const struct ixgbe_mac_operations mac_ops_X550 = { X550_COMMON_MAC + .led_on = ixgbe_led_on_generic, + .led_off = ixgbe_led_off_generic, .reset_hw = &ixgbe_reset_hw_X540, .get_media_type = &ixgbe_get_media_type_X540, .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic, @@ -2904,6 +2958,8 @@ static const struct ixgbe_mac_operations mac_ops_X550 = { static const struct ixgbe_mac_operations mac_ops_X550EM_x = { X550_COMMON_MAC + .led_on = ixgbe_led_on_t_x550em, + .led_off = ixgbe_led_off_t_x550em, .reset_hw = &ixgbe_reset_hw_X550em, .get_media_type = &ixgbe_get_media_type_X550em, .get_san_mac_addr = NULL, @@ -2922,6 +2978,8 @@ static const struct ixgbe_mac_operations mac_ops_X550EM_x = { static struct ixgbe_mac_operations mac_ops_x550em_a = { X550_COMMON_MAC + .led_on = ixgbe_led_on_t_x550em, + .led_off = ixgbe_led_off_t_x550em, .reset_hw = ixgbe_reset_hw_X550em, .get_media_type = ixgbe_get_media_type_X550em, .get_san_mac_addr = NULL, diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index be52f597688b..5639fbe294d0 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -502,12 +502,9 @@ extern void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector); void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter); void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter); -#ifdef DEBUG -char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw); -#define hw_dbg(hw, format, arg...) \ - printk(KERN_DEBUG "%s: " format, ixgbevf_get_hw_dev_name(hw), ##arg) -#else -#define hw_dbg(hw, format, arg...) do {} while (0) -#endif +#define ixgbevf_hw_to_netdev(hw) \ + (((struct ixgbevf_adapter *)(hw)->back)->netdev) +#define hw_dbg(hw, format, arg...) \ + netdev_dbg(ixgbevf_hw_to_netdev(hw), format, ## arg) #endif /* _IXGBEVF_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index d9d6616f02a4..4044608083cd 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -1612,7 +1612,7 @@ static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter, txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(reg_idx)); } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE)); if (!wait_loop) - pr_err("Could not enable Tx Queue %d\n", reg_idx); + hw_dbg(hw, "Could not enable Tx Queue %d\n", reg_idx); } /** @@ -2993,6 +2993,7 @@ static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter) **/ int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring) { + struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev); int size; size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index a52f70ec42b6..d46ba1dabcb7 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -284,7 +284,8 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr) if (addr) ether_addr_copy(msg_addr, addr); - ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 3); + ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, + sizeof(msgbuf) / sizeof(u32)); if (!ret_val) { msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; @@ -441,7 +442,8 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, msgbuf[0] = IXGBE_VF_SET_MAC_ADDR; ether_addr_copy(msg_addr, addr); - ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2); + ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, + sizeof(msgbuf) / sizeof(u32)); msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; @@ -551,7 +553,8 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode) msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE; msgbuf[1] = xcast_mode; - err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2); + err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, + sizeof(msgbuf) / sizeof(u32)); if (err) return err; @@ -588,7 +591,8 @@ static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */ msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT; - err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2); + err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, + sizeof(msgbuf) / sizeof(u32)); if (err) goto mbx_err; @@ -791,7 +795,8 @@ static s32 ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size) msgbuf[0] = IXGBE_VF_SET_LPE; msgbuf[1] = max_size; - ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2); + ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, + sizeof(msgbuf) / sizeof(u32)); if (ret_val) return ret_val; if ((msgbuf[0] & IXGBE_VF_SET_LPE) && @@ -837,7 +842,8 @@ static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api) msg[1] = api; msg[2] = 0; - err = ixgbevf_write_msg_read_ack(hw, msg, msg, 3); + err = ixgbevf_write_msg_read_ack(hw, msg, msg, + sizeof(msg) / sizeof(u32)); if (!err) { msg[0] &= ~IXGBE_VT_MSGTYPE_CTS; @@ -887,7 +893,8 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, msg[0] = IXGBE_VF_GET_QUEUE; msg[1] = msg[2] = msg[3] = msg[4] = 0; - err = ixgbevf_write_msg_read_ack(hw, msg, msg, 5); + err = ixgbevf_write_msg_read_ack(hw, msg, msg, + sizeof(msg) / sizeof(u32)); if (!err) { msg[0] &= ~IXGBE_VT_MSGTYPE_CTS; diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index d41c28d00b57..8e4252dd9a9d 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -399,7 +399,6 @@ struct mvneta_port { u16 rx_ring_size; struct mii_bus *mii_bus; - struct phy_device *phy_dev; phy_interface_t phy_interface; struct device_node *phy_node; unsigned int link; @@ -2651,6 +2650,7 @@ static int mvneta_poll(struct napi_struct *napi, int budget) u32 cause_rx_tx; int rx_queue; struct mvneta_port *pp = netdev_priv(napi->dev); + struct net_device *ndev = pp->dev; struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports); if (!netif_running(pp->dev)) { @@ -2668,7 +2668,7 @@ static int mvneta_poll(struct napi_struct *napi, int budget) (MVNETA_CAUSE_PHY_STATUS_CHANGE | MVNETA_CAUSE_LINK_CHANGE | MVNETA_CAUSE_PSC_SYNC_CHANGE))) { - mvneta_fixed_link_update(pp, pp->phy_dev); + mvneta_fixed_link_update(pp, ndev->phydev); } } @@ -2963,6 +2963,7 @@ static int mvneta_setup_txqs(struct mvneta_port *pp) static void mvneta_start_dev(struct mvneta_port *pp) { int cpu; + struct net_device *ndev = pp->dev; mvneta_max_rx_size_set(pp, pp->pkt_size); mvneta_txq_max_tx_size_set(pp, pp->pkt_size); @@ -2985,15 +2986,16 @@ static void mvneta_start_dev(struct mvneta_port *pp) MVNETA_CAUSE_LINK_CHANGE | MVNETA_CAUSE_PSC_SYNC_CHANGE); - phy_start(pp->phy_dev); + phy_start(ndev->phydev); netif_tx_start_all_queues(pp->dev); } static void mvneta_stop_dev(struct mvneta_port *pp) { unsigned int cpu; + struct net_device *ndev = pp->dev; - phy_stop(pp->phy_dev); + phy_stop(ndev->phydev); for_each_online_cpu(cpu) { struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); @@ -3166,7 +3168,7 @@ static int mvneta_set_mac_addr(struct net_device *dev, void *addr) static void mvneta_adjust_link(struct net_device *ndev) { struct mvneta_port *pp = netdev_priv(ndev); - struct phy_device *phydev = pp->phy_dev; + struct phy_device *phydev = ndev->phydev; int status_change = 0; if (phydev->link) { @@ -3244,7 +3246,6 @@ static int mvneta_mdio_probe(struct mvneta_port *pp) phy_dev->supported &= PHY_GBIT_FEATURES; phy_dev->advertising = phy_dev->supported; - pp->phy_dev = phy_dev; pp->link = 0; pp->duplex = 0; pp->speed = 0; @@ -3254,8 +3255,9 @@ static int mvneta_mdio_probe(struct mvneta_port *pp) static void mvneta_mdio_remove(struct mvneta_port *pp) { - phy_disconnect(pp->phy_dev); - pp->phy_dev = NULL; + struct net_device *ndev = pp->dev; + + phy_disconnect(ndev->phydev); } /* Electing a CPU must be done in an atomic way: it should be done @@ -3495,42 +3497,30 @@ static int mvneta_stop(struct net_device *dev) static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { - struct mvneta_port *pp = netdev_priv(dev); - - if (!pp->phy_dev) + if (!dev->phydev) return -ENOTSUPP; - return phy_mii_ioctl(pp->phy_dev, ifr, cmd); + return phy_mii_ioctl(dev->phydev, ifr, cmd); } /* Ethtool methods */ -/* Get settings (phy address, speed) for ethtools */ -int mvneta_ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +/* Set link ksettings (phy address, speed) for ethtools */ +int mvneta_ethtool_set_link_ksettings(struct net_device *ndev, + const struct ethtool_link_ksettings *cmd) { - struct mvneta_port *pp = netdev_priv(dev); - - if (!pp->phy_dev) - return -ENODEV; - - return phy_ethtool_gset(pp->phy_dev, cmd); -} - -/* Set settings (phy address, speed) for ethtools */ -int mvneta_ethtool_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct mvneta_port *pp = netdev_priv(dev); - struct phy_device *phydev = pp->phy_dev; + struct mvneta_port *pp = netdev_priv(ndev); + struct phy_device *phydev = ndev->phydev; if (!phydev) return -ENODEV; - if ((cmd->autoneg == AUTONEG_ENABLE) != pp->use_inband_status) { + if ((cmd->base.autoneg == AUTONEG_ENABLE) != pp->use_inband_status) { u32 val; - mvneta_set_autoneg(pp, cmd->autoneg == AUTONEG_ENABLE); + mvneta_set_autoneg(pp, cmd->base.autoneg == AUTONEG_ENABLE); - if (cmd->autoneg == AUTONEG_DISABLE) { + if (cmd->base.autoneg == AUTONEG_DISABLE) { val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED | MVNETA_GMAC_CONFIG_GMII_SPEED | @@ -3547,17 +3537,17 @@ int mvneta_ethtool_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); } - pp->use_inband_status = (cmd->autoneg == AUTONEG_ENABLE); + pp->use_inband_status = (cmd->base.autoneg == AUTONEG_ENABLE); netdev_info(pp->dev, "autoneg status set to %i\n", pp->use_inband_status); - if (netif_running(dev)) { + if (netif_running(ndev)) { mvneta_port_down(pp); mvneta_port_up(pp); } } - return phy_ethtool_sset(pp->phy_dev, cmd); + return phy_ethtool_ksettings_set(ndev->phydev, cmd); } /* Set interrupt coalescing for ethtools */ @@ -3821,8 +3811,6 @@ static const struct net_device_ops mvneta_netdev_ops = { const struct ethtool_ops mvneta_eth_tool_ops = { .get_link = ethtool_op_get_link, - .get_settings = mvneta_ethtool_get_settings, - .set_settings = mvneta_ethtool_set_settings, .set_coalesce = mvneta_ethtool_set_coalesce, .get_coalesce = mvneta_ethtool_get_coalesce, .get_drvinfo = mvneta_ethtool_get_drvinfo, @@ -3835,6 +3823,8 @@ const struct ethtool_ops mvneta_eth_tool_ops = { .get_rxnfc = mvneta_ethtool_get_rxnfc, .get_rxfh = mvneta_ethtool_get_rxfh, .set_rxfh = mvneta_ethtool_set_rxfh, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = mvneta_ethtool_set_link_ksettings, }; /* Initialize hw */ diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index f1609542adf1..0fd9fc8d2a79 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -342,25 +342,27 @@ static void mtk_mdio_cleanup(struct mtk_eth *eth) mdiobus_free(eth->mii_bus); } -static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask) +static inline void mtk_irq_disable(struct mtk_eth *eth, + unsigned reg, u32 mask) { unsigned long flags; u32 val; spin_lock_irqsave(ð->irq_lock, flags); - val = mtk_r32(eth, MTK_QDMA_INT_MASK); - mtk_w32(eth, val & ~mask, MTK_QDMA_INT_MASK); + val = mtk_r32(eth, reg); + mtk_w32(eth, val & ~mask, reg); spin_unlock_irqrestore(ð->irq_lock, flags); } -static inline void mtk_irq_enable(struct mtk_eth *eth, u32 mask) +static inline void mtk_irq_enable(struct mtk_eth *eth, + unsigned reg, u32 mask) { unsigned long flags; u32 val; spin_lock_irqsave(ð->irq_lock, flags); - val = mtk_r32(eth, MTK_QDMA_INT_MASK); - mtk_w32(eth, val | mask, MTK_QDMA_INT_MASK); + val = mtk_r32(eth, reg); + mtk_w32(eth, val | mask, reg); spin_unlock_irqrestore(ð->irq_lock, flags); } @@ -369,18 +371,17 @@ static int mtk_set_mac_address(struct net_device *dev, void *p) int ret = eth_mac_addr(dev, p); struct mtk_mac *mac = netdev_priv(dev); const char *macaddr = dev->dev_addr; - unsigned long flags; if (ret) return ret; - spin_lock_irqsave(&mac->hw->page_lock, flags); + spin_lock_bh(&mac->hw->page_lock); mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1], MTK_GDMA_MAC_ADRH(mac->id)); mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) | (macaddr[4] << 8) | macaddr[5], MTK_GDMA_MAC_ADRL(mac->id)); - spin_unlock_irqrestore(&mac->hw->page_lock, flags); + spin_unlock_bh(&mac->hw->page_lock); return 0; } @@ -764,7 +765,6 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev) struct mtk_eth *eth = mac->hw; struct mtk_tx_ring *ring = ð->tx_ring; struct net_device_stats *stats = &dev->stats; - unsigned long flags; bool gso = false; int tx_num; @@ -772,14 +772,14 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev) * however we have 2 queues running on the same ring so we need to lock * the ring access */ - spin_lock_irqsave(ð->page_lock, flags); + spin_lock(ð->page_lock); tx_num = mtk_cal_txd_req(skb); if (unlikely(atomic_read(&ring->free_count) <= tx_num)) { mtk_stop_queue(eth); netif_err(eth, tx_queued, dev, "Tx Ring full when queue awake!\n"); - spin_unlock_irqrestore(ð->page_lock, flags); + spin_unlock(ð->page_lock); return NETDEV_TX_BUSY; } @@ -804,12 +804,12 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev) if (unlikely(atomic_read(&ring->free_count) <= ring->thresh)) mtk_stop_queue(eth); - spin_unlock_irqrestore(ð->page_lock, flags); + spin_unlock(ð->page_lock); return NETDEV_TX_OK; drop: - spin_unlock_irqrestore(ð->page_lock, flags); + spin_unlock(ð->page_lock); stats->tx_dropped++; dev_kfree_skb(skb); return NETDEV_TX_OK; @@ -899,12 +899,12 @@ release_desc: * we continue */ wmb(); - mtk_w32(eth, ring->calc_idx, MTK_QRX_CRX_IDX0); + mtk_w32(eth, ring->calc_idx, MTK_PRX_CRX_IDX0); done++; } if (done < budget) - mtk_w32(eth, MTK_RX_DONE_INT, MTK_QMTK_INT_STATUS); + mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_STATUS); return done; } @@ -1014,7 +1014,7 @@ static int mtk_napi_tx(struct napi_struct *napi, int budget) return budget; napi_complete(napi); - mtk_irq_enable(eth, MTK_TX_DONE_INT); + mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT); return tx_done; } @@ -1026,12 +1026,12 @@ static int mtk_napi_rx(struct napi_struct *napi, int budget) int rx_done = 0; mtk_handle_status_irq(eth); - mtk_w32(eth, MTK_RX_DONE_INT, MTK_QMTK_INT_STATUS); + mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_STATUS); rx_done = mtk_poll_rx(napi, budget, eth); if (unlikely(netif_msg_intr(eth))) { - status = mtk_r32(eth, MTK_QMTK_INT_STATUS); - mask = mtk_r32(eth, MTK_QDMA_INT_MASK); + status = mtk_r32(eth, MTK_PDMA_INT_STATUS); + mask = mtk_r32(eth, MTK_PDMA_INT_MASK); dev_info(eth->dev, "done rx %d, intr 0x%08x/0x%x\n", rx_done, status, mask); @@ -1040,12 +1040,12 @@ static int mtk_napi_rx(struct napi_struct *napi, int budget) if (rx_done == budget) return budget; - status = mtk_r32(eth, MTK_QMTK_INT_STATUS); + status = mtk_r32(eth, MTK_PDMA_INT_STATUS); if (status & MTK_RX_DONE_INT) return budget; napi_complete(napi); - mtk_irq_enable(eth, MTK_RX_DONE_INT); + mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT); return rx_done; } @@ -1094,6 +1094,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth) mtk_w32(eth, ring->phys + ((MTK_DMA_SIZE - 1) * sz), MTK_QTX_DRX_PTR); + mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0)); return 0; @@ -1164,11 +1165,10 @@ static int mtk_rx_alloc(struct mtk_eth *eth) */ wmb(); - mtk_w32(eth, eth->rx_ring.phys, MTK_QRX_BASE_PTR0); - mtk_w32(eth, MTK_DMA_SIZE, MTK_QRX_MAX_CNT0); - mtk_w32(eth, eth->rx_ring.calc_idx, MTK_QRX_CRX_IDX0); - mtk_w32(eth, MTK_PST_DRX_IDX0, MTK_QDMA_RST_IDX); - mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0)); + mtk_w32(eth, eth->rx_ring.phys, MTK_PRX_BASE_PTR0); + mtk_w32(eth, MTK_DMA_SIZE, MTK_PRX_MAX_CNT0); + mtk_w32(eth, eth->rx_ring.calc_idx, MTK_PRX_CRX_IDX0); + mtk_w32(eth, MTK_PST_DRX_IDX0, MTK_PDMA_RST_IDX); return 0; } @@ -1287,7 +1287,7 @@ static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth) if (likely(napi_schedule_prep(ð->rx_napi))) { __napi_schedule(ð->rx_napi); - mtk_irq_disable(eth, MTK_RX_DONE_INT); + mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT); } return IRQ_HANDLED; @@ -1299,7 +1299,7 @@ static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth) if (likely(napi_schedule_prep(ð->tx_napi))) { __napi_schedule(ð->tx_napi); - mtk_irq_disable(eth, MTK_TX_DONE_INT); + mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT); } return IRQ_HANDLED; @@ -1310,11 +1310,12 @@ static void mtk_poll_controller(struct net_device *dev) { struct mtk_mac *mac = netdev_priv(dev); struct mtk_eth *eth = mac->hw; - u32 int_mask = MTK_TX_DONE_INT | MTK_RX_DONE_INT; - mtk_irq_disable(eth, int_mask); + mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT); + mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT); mtk_handle_irq_rx(eth->irq[2], dev); - mtk_irq_enable(eth, int_mask); + mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT); + mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT); } #endif @@ -1329,11 +1330,15 @@ static int mtk_start_dma(struct mtk_eth *eth) } mtk_w32(eth, - MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN | - MTK_RX_2B_OFFSET | MTK_DMA_SIZE_16DWORDS | - MTK_RX_BT_32DWORDS | MTK_NDP_CO_PRO, + MTK_TX_WB_DDONE | MTK_TX_DMA_EN | + MTK_DMA_SIZE_16DWORDS | MTK_NDP_CO_PRO, MTK_QDMA_GLO_CFG); + mtk_w32(eth, + MTK_RX_DMA_EN | MTK_RX_2B_OFFSET | + MTK_RX_BT_32DWORDS | MTK_MULTI_EN, + MTK_PDMA_GLO_CFG); + return 0; } @@ -1351,7 +1356,8 @@ static int mtk_open(struct net_device *dev) napi_enable(ð->tx_napi); napi_enable(ð->rx_napi); - mtk_irq_enable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT); + mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT); + mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT); } atomic_inc(ð->dma_refcnt); @@ -1363,16 +1369,15 @@ static int mtk_open(struct net_device *dev) static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg) { - unsigned long flags; u32 val; int i; /* stop the dma engine */ - spin_lock_irqsave(ð->page_lock, flags); + spin_lock_bh(ð->page_lock); val = mtk_r32(eth, glo_cfg); mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN), glo_cfg); - spin_unlock_irqrestore(ð->page_lock, flags); + spin_unlock_bh(ð->page_lock); /* wait for dma stop */ for (i = 0; i < 10; i++) { @@ -1397,7 +1402,8 @@ static int mtk_stop(struct net_device *dev) if (!atomic_dec_and_test(ð->dma_refcnt)) return 0; - mtk_irq_disable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT); + mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT); + mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT); napi_disable(ð->tx_napi); napi_disable(ð->rx_napi); @@ -1451,7 +1457,9 @@ static int __init mtk_hw_init(struct mtk_eth *eth) /* disable delay and normal interrupt */ mtk_w32(eth, 0, MTK_QDMA_DELAY_INT); - mtk_irq_disable(eth, ~0); + mtk_w32(eth, 0, MTK_PDMA_DELAY_INT); + mtk_irq_disable(eth, MTK_QDMA_INT_MASK, ~0); + mtk_irq_disable(eth, MTK_PDMA_INT_MASK, ~0); mtk_w32(eth, RST_GL_PSE, MTK_RST_GL); mtk_w32(eth, 0, MTK_RST_GL); @@ -1465,9 +1473,8 @@ static int __init mtk_hw_init(struct mtk_eth *eth) for (i = 0; i < 2; i++) { u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i)); - /* setup the forward port to send frame to QDMA */ + /* setup the forward port to send frame to PDMA */ val &= ~0xffff; - val |= 0x5555; /* Enable RX checksum */ val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN; @@ -1507,7 +1514,8 @@ static void mtk_uninit(struct net_device *dev) phy_disconnect(mac->phy_dev); mtk_mdio_cleanup(eth); - mtk_irq_disable(eth, ~0); + mtk_irq_disable(eth, MTK_QDMA_INT_MASK, ~0); + mtk_irq_disable(eth, MTK_PDMA_INT_MASK, ~0); free_irq(eth->irq[1], dev); free_irq(eth->irq[2], dev); } @@ -1686,7 +1694,7 @@ static void mtk_get_ethtool_stats(struct net_device *dev, } do { - data_src = (u64*)hwstats; + data_src = (u64 *)hwstats; data_dst = data; start = u64_stats_fetch_begin_irq(&hwstats->syncp); @@ -1912,7 +1920,6 @@ static int mtk_remove(struct platform_device *pdev) netif_napi_del(ð->tx_napi); netif_napi_del(ð->rx_napi); mtk_cleanup(eth); - platform_set_drvdata(pdev, NULL); return 0; } diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index f82e3acb947b..7c1f3f2e11d4 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -68,6 +68,32 @@ /* Unicast Filter MAC Address Register - High */ #define MTK_GDMA_MAC_ADRH(x) (0x50C + (x * 0x1000)) +/* PDMA RX Base Pointer Register */ +#define MTK_PRX_BASE_PTR0 0x900 + +/* PDMA RX Maximum Count Register */ +#define MTK_PRX_MAX_CNT0 0x904 + +/* PDMA RX CPU Pointer Register */ +#define MTK_PRX_CRX_IDX0 0x908 + +/* PDMA Global Configuration Register */ +#define MTK_PDMA_GLO_CFG 0xa04 +#define MTK_MULTI_EN BIT(10) + +/* PDMA Reset Index Register */ +#define MTK_PDMA_RST_IDX 0xa08 +#define MTK_PST_DRX_IDX0 BIT(16) + +/* PDMA Delay Interrupt Register */ +#define MTK_PDMA_DELAY_INT 0xa0c + +/* PDMA Interrupt Status Register */ +#define MTK_PDMA_INT_STATUS 0xa20 + +/* PDMA Interrupt Mask Register */ +#define MTK_PDMA_INT_MASK 0xa28 + /* PDMA Interrupt grouping registers */ #define MTK_PDMA_INT_GRP1 0xa50 #define MTK_PDMA_INT_GRP2 0xa54 @@ -119,13 +145,16 @@ /* QDMA Interrupt Status Register */ #define MTK_QMTK_INT_STATUS 0x1A18 +#define MTK_RX_DONE_INT3 BIT(19) +#define MTK_RX_DONE_INT2 BIT(18) #define MTK_RX_DONE_INT1 BIT(17) #define MTK_RX_DONE_INT0 BIT(16) #define MTK_TX_DONE_INT3 BIT(3) #define MTK_TX_DONE_INT2 BIT(2) #define MTK_TX_DONE_INT1 BIT(1) #define MTK_TX_DONE_INT0 BIT(0) -#define MTK_RX_DONE_INT (MTK_RX_DONE_INT0 | MTK_RX_DONE_INT1) +#define MTK_RX_DONE_INT (MTK_RX_DONE_INT0 | MTK_RX_DONE_INT1 | \ + MTK_RX_DONE_INT2 | MTK_RX_DONE_INT3) #define MTK_TX_DONE_INT (MTK_TX_DONE_INT0 | MTK_TX_DONE_INT1 | \ MTK_TX_DONE_INT2 | MTK_TX_DONE_INT3) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 05cc1effc13c..dad326ccd4dd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -3,7 +3,7 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \ mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \ - fs_counters.o rl.o + fs_counters.o rl.o lag.o mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o eswitch_offloads.o \ en_main.o en_common.o en_fs.o en_ethtool.o en_tx.o \ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index c2ec01a22d55..1e639f886021 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -294,11 +294,13 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_DEALLOC_Q_COUNTER: case MLX5_CMD_OP_DEALLOC_PD: case MLX5_CMD_OP_DEALLOC_UAR: - case MLX5_CMD_OP_DETTACH_FROM_MCG: + case MLX5_CMD_OP_DETACH_FROM_MCG: case MLX5_CMD_OP_DEALLOC_XRCD: case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN: case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT: case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY: + case MLX5_CMD_OP_DESTROY_LAG: + case MLX5_CMD_OP_DESTROY_VPORT_LAG: case MLX5_CMD_OP_DESTROY_TIR: case MLX5_CMD_OP_DESTROY_SQ: case MLX5_CMD_OP_DESTROY_RQ: @@ -315,6 +317,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_MODIFY_FLOW_TABLE: case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT: + case MLX5_CMD_OP_DEALLOC_ENCAP_HEADER: return MLX5_CMD_STAT_OK; case MLX5_CMD_OP_QUERY_HCA_CAP: @@ -389,6 +392,10 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT: case MLX5_CMD_OP_SET_L2_TABLE_ENTRY: case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY: + case MLX5_CMD_OP_CREATE_LAG: + case MLX5_CMD_OP_MODIFY_LAG: + case MLX5_CMD_OP_QUERY_LAG: + case MLX5_CMD_OP_CREATE_VPORT_LAG: case MLX5_CMD_OP_CREATE_TIR: case MLX5_CMD_OP_MODIFY_TIR: case MLX5_CMD_OP_QUERY_TIR: @@ -416,6 +423,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY: case MLX5_CMD_OP_ALLOC_FLOW_COUNTER: case MLX5_CMD_OP_QUERY_FLOW_COUNTER: + case MLX5_CMD_OP_ALLOC_ENCAP_HEADER: *status = MLX5_DRIVER_STATUS_ABORTED; *synd = MLX5_DRIVER_SYND; return -EIO; @@ -504,7 +512,7 @@ const char *mlx5_command_str(int command) MLX5_COMMAND_STR_CASE(CONFIG_INT_MODERATION); MLX5_COMMAND_STR_CASE(ACCESS_REG); MLX5_COMMAND_STR_CASE(ATTACH_TO_MCG); - MLX5_COMMAND_STR_CASE(DETTACH_FROM_MCG); + MLX5_COMMAND_STR_CASE(DETACH_FROM_MCG); MLX5_COMMAND_STR_CASE(GET_DROPPED_PACKET_LOG); MLX5_COMMAND_STR_CASE(MAD_IFC); MLX5_COMMAND_STR_CASE(QUERY_MAD_DEMUX); @@ -526,6 +534,12 @@ const char *mlx5_command_str(int command) MLX5_COMMAND_STR_CASE(DELETE_L2_TABLE_ENTRY); MLX5_COMMAND_STR_CASE(SET_WOL_ROL); MLX5_COMMAND_STR_CASE(QUERY_WOL_ROL); + MLX5_COMMAND_STR_CASE(CREATE_LAG); + MLX5_COMMAND_STR_CASE(MODIFY_LAG); + MLX5_COMMAND_STR_CASE(QUERY_LAG); + MLX5_COMMAND_STR_CASE(DESTROY_LAG); + MLX5_COMMAND_STR_CASE(CREATE_VPORT_LAG); + MLX5_COMMAND_STR_CASE(DESTROY_VPORT_LAG); MLX5_COMMAND_STR_CASE(CREATE_TIR); MLX5_COMMAND_STR_CASE(MODIFY_TIR); MLX5_COMMAND_STR_CASE(DESTROY_TIR); @@ -564,15 +578,130 @@ const char *mlx5_command_str(int command) MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER); MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER); MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE); + MLX5_COMMAND_STR_CASE(ALLOC_ENCAP_HEADER); + MLX5_COMMAND_STR_CASE(DEALLOC_ENCAP_HEADER); default: return "unknown command opcode"; } } +static const char *cmd_status_str(u8 status) +{ + switch (status) { + case MLX5_CMD_STAT_OK: + return "OK"; + case MLX5_CMD_STAT_INT_ERR: + return "internal error"; + case MLX5_CMD_STAT_BAD_OP_ERR: + return "bad operation"; + case MLX5_CMD_STAT_BAD_PARAM_ERR: + return "bad parameter"; + case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: + return "bad system state"; + case MLX5_CMD_STAT_BAD_RES_ERR: + return "bad resource"; + case MLX5_CMD_STAT_RES_BUSY: + return "resource busy"; + case MLX5_CMD_STAT_LIM_ERR: + return "limits exceeded"; + case MLX5_CMD_STAT_BAD_RES_STATE_ERR: + return "bad resource state"; + case MLX5_CMD_STAT_IX_ERR: + return "bad index"; + case MLX5_CMD_STAT_NO_RES_ERR: + return "no resources"; + case MLX5_CMD_STAT_BAD_INP_LEN_ERR: + return "bad input length"; + case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: + return "bad output length"; + case MLX5_CMD_STAT_BAD_QP_STATE_ERR: + return "bad QP state"; + case MLX5_CMD_STAT_BAD_PKT_ERR: + return "bad packet (discarded)"; + case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: + return "bad size too many outstanding CQEs"; + default: + return "unknown status"; + } +} + +static int cmd_status_to_err(u8 status) +{ + switch (status) { + case MLX5_CMD_STAT_OK: return 0; + case MLX5_CMD_STAT_INT_ERR: return -EIO; + case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL; + case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL; + case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO; + case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL; + case MLX5_CMD_STAT_RES_BUSY: return -EBUSY; + case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM; + case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL; + case MLX5_CMD_STAT_IX_ERR: return -EINVAL; + case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN; + case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO; + case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO; + case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL; + case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL; + case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL; + default: return -EIO; + } +} + +struct mlx5_ifc_mbox_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_mbox_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x40]; +}; + +void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome) +{ + *status = MLX5_GET(mbox_out, out, status); + *syndrome = MLX5_GET(mbox_out, out, syndrome); +} + +static int mlx5_cmd_check(struct mlx5_core_dev *dev, void *in, void *out) +{ + u32 syndrome; + u8 status; + u16 opcode; + u16 op_mod; + + mlx5_cmd_mbox_status(out, &status, &syndrome); + if (!status) + return 0; + + opcode = MLX5_GET(mbox_in, in, opcode); + op_mod = MLX5_GET(mbox_in, in, op_mod); + + mlx5_core_err(dev, + "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n", + mlx5_command_str(opcode), + opcode, op_mod, + cmd_status_str(status), + status, + syndrome); + + return cmd_status_to_err(status); +} + static void dump_command(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent, int input) { - u16 op = be16_to_cpu(((struct mlx5_inbox_hdr *)(ent->lay->in))->opcode); struct mlx5_cmd_msg *msg = input ? ent->in : ent->out; + u16 op = MLX5_GET(mbox_in, ent->lay->in, opcode); struct mlx5_cmd_mailbox *next = msg->next; int data_only; u32 offset = 0; @@ -622,9 +751,7 @@ static void dump_command(struct mlx5_core_dev *dev, static u16 msg_to_opcode(struct mlx5_cmd_msg *in) { - struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data); - - return be16_to_cpu(hdr->opcode); + return MLX5_GET(mbox_in, in->first.data, opcode); } static void cb_timeout_handler(struct work_struct *work) @@ -762,16 +889,6 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) return err; } -static __be32 *get_synd_ptr(struct mlx5_outbox_hdr *out) -{ - return &out->syndrome; -} - -static u8 *get_status_ptr(struct mlx5_outbox_hdr *out) -{ - return &out->status; -} - /* Notes: * 1. Callback functions may not sleep * 2. page queue commands do not support asynchrous completion @@ -820,7 +937,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, goto out_free; ds = ent->ts2 - ent->ts1; - op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode); + op = MLX5_GET(mbox_in, in->first.data, opcode); if (op < ARRAY_SIZE(cmd->stats)) { stats = &cmd->stats[op]; spin_lock_irq(&stats->lock); @@ -1035,7 +1152,6 @@ static ssize_t data_write(struct file *filp, const char __user *buf, struct mlx5_core_dev *dev = filp->private_data; struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; void *ptr; - int err; if (*pos != 0) return -EINVAL; @@ -1043,25 +1159,15 @@ static ssize_t data_write(struct file *filp, const char __user *buf, kfree(dbg->in_msg); dbg->in_msg = NULL; dbg->inlen = 0; - - ptr = kzalloc(count, GFP_KERNEL); - if (!ptr) - return -ENOMEM; - - if (copy_from_user(ptr, buf, count)) { - err = -EFAULT; - goto out; - } + ptr = memdup_user(buf, count); + if (IS_ERR(ptr)) + return PTR_ERR(ptr); dbg->in_msg = ptr; dbg->inlen = count; *pos = count; return count; - -out: - kfree(ptr); - return err; } static ssize_t data_read(struct file *filp, char __user *buf, size_t count, @@ -1321,11 +1427,16 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec) callback = ent->callback; context = ent->context; err = ent->ret; - if (!err) + if (!err) { err = mlx5_copy_from_msg(ent->uout, ent->out, ent->uout_size); + err = err ? err : mlx5_cmd_check(dev, + ent->in->first.data, + ent->uout); + } + mlx5_free_cmd_msg(dev, ent->out); free_msg(dev, ent->in); @@ -1377,14 +1488,9 @@ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size, return msg; } -static u16 opcode_from_in(struct mlx5_inbox_hdr *in) -{ - return be16_to_cpu(in->opcode); -} - -static int is_manage_pages(struct mlx5_inbox_hdr *in) +static int is_manage_pages(void *in) { - return be16_to_cpu(in->opcode) == MLX5_CMD_OP_MANAGE_PAGES; + return MLX5_GET(mbox_in, in, opcode) == MLX5_CMD_OP_MANAGE_PAGES; } static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, @@ -1401,9 +1507,11 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, if (pci_channel_offline(dev->pdev) || dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { - err = mlx5_internal_err_ret_value(dev, opcode_from_in(in), &drv_synd, &status); - *get_synd_ptr(out) = cpu_to_be32(drv_synd); - *get_status_ptr(out) = status; + u16 opcode = MLX5_GET(mbox_in, in, opcode); + + err = mlx5_internal_err_ret_value(dev, opcode, &drv_synd, &status); + MLX5_SET(mbox_out, out, status, status); + MLX5_SET(mbox_out, out, syndrome, drv_synd); return err; } @@ -1457,7 +1565,10 @@ out_in: int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size) { - return cmd_exec(dev, in, in_size, out, out_size, NULL, NULL); + int err; + + err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL); + return err ? : mlx5_cmd_check(dev, in, out); } EXPORT_SYMBOL(mlx5_cmd_exec); @@ -1694,96 +1805,3 @@ void mlx5_cmd_cleanup(struct mlx5_core_dev *dev) pci_pool_destroy(cmd->pool); } EXPORT_SYMBOL(mlx5_cmd_cleanup); - -static const char *cmd_status_str(u8 status) -{ - switch (status) { - case MLX5_CMD_STAT_OK: - return "OK"; - case MLX5_CMD_STAT_INT_ERR: - return "internal error"; - case MLX5_CMD_STAT_BAD_OP_ERR: - return "bad operation"; - case MLX5_CMD_STAT_BAD_PARAM_ERR: - return "bad parameter"; - case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: - return "bad system state"; - case MLX5_CMD_STAT_BAD_RES_ERR: - return "bad resource"; - case MLX5_CMD_STAT_RES_BUSY: - return "resource busy"; - case MLX5_CMD_STAT_LIM_ERR: - return "limits exceeded"; - case MLX5_CMD_STAT_BAD_RES_STATE_ERR: - return "bad resource state"; - case MLX5_CMD_STAT_IX_ERR: - return "bad index"; - case MLX5_CMD_STAT_NO_RES_ERR: - return "no resources"; - case MLX5_CMD_STAT_BAD_INP_LEN_ERR: - return "bad input length"; - case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: - return "bad output length"; - case MLX5_CMD_STAT_BAD_QP_STATE_ERR: - return "bad QP state"; - case MLX5_CMD_STAT_BAD_PKT_ERR: - return "bad packet (discarded)"; - case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: - return "bad size too many outstanding CQEs"; - default: - return "unknown status"; - } -} - -static int cmd_status_to_err(u8 status) -{ - switch (status) { - case MLX5_CMD_STAT_OK: return 0; - case MLX5_CMD_STAT_INT_ERR: return -EIO; - case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL; - case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL; - case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO; - case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL; - case MLX5_CMD_STAT_RES_BUSY: return -EBUSY; - case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM; - case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL; - case MLX5_CMD_STAT_IX_ERR: return -EINVAL; - case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN; - case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO; - case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO; - case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL; - case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL; - case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL; - default: return -EIO; - } -} - -/* this will be available till all the commands use set/get macros */ -int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr) -{ - if (!hdr->status) - return 0; - - pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n", - cmd_status_str(hdr->status), hdr->status, - be32_to_cpu(hdr->syndrome)); - - return cmd_status_to_err(hdr->status); -} - -int mlx5_cmd_status_to_err_v2(void *ptr) -{ - u32 syndrome; - u8 status; - - status = be32_to_cpu(*(__be32 *)ptr) >> 24; - if (!status) - return 0; - - syndrome = be32_to_cpu(*(__be32 *)(ptr + 4)); - - pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n", - cmd_status_str(status), status, syndrome); - - return cmd_status_to_err(status); -} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c index 873a631ad155..32d4af9b594d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c @@ -134,33 +134,29 @@ void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type) complete(&cq->free); } - int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, - struct mlx5_create_cq_mbox_in *in, int inlen) + u32 *in, int inlen) { - int err; struct mlx5_cq_table *table = &dev->priv.cq_table; - struct mlx5_create_cq_mbox_out out; - struct mlx5_destroy_cq_mbox_in din; - struct mlx5_destroy_cq_mbox_out dout; + u32 out[MLX5_ST_SZ_DW(create_cq_out)]; + u32 din[MLX5_ST_SZ_DW(destroy_cq_in)]; + u32 dout[MLX5_ST_SZ_DW(destroy_cq_out)]; int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context), c_eqn); struct mlx5_eq *eq; + int err; eq = mlx5_eqn2eq(dev, eqn); if (IS_ERR(eq)) return PTR_ERR(eq); - in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_CQ); - memset(&out, 0, sizeof(out)); - err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); + memset(out, 0, sizeof(out)); + MLX5_SET(create_cq_in, in, opcode, MLX5_CMD_OP_CREATE_CQ); + err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); if (err) return err; - if (out.hdr.status) - return mlx5_cmd_status_to_err(&out.hdr); - - cq->cqn = be32_to_cpu(out.cqn) & 0xffffff; + cq->cqn = MLX5_GET(create_cq_out, out, cqn); cq->cons_index = 0; cq->arm_sn = 0; atomic_set(&cq->refcount, 1); @@ -186,10 +182,11 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, return 0; err_cmd: - memset(&din, 0, sizeof(din)); - memset(&dout, 0, sizeof(dout)); - din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_CQ); - mlx5_cmd_exec(dev, &din, sizeof(din), &dout, sizeof(dout)); + memset(din, 0, sizeof(din)); + memset(dout, 0, sizeof(dout)); + MLX5_SET(destroy_cq_in, din, opcode, MLX5_CMD_OP_DESTROY_CQ); + MLX5_SET(destroy_cq_in, din, cqn, cq->cqn); + mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout)); return err; } EXPORT_SYMBOL(mlx5_core_create_cq); @@ -197,8 +194,8 @@ EXPORT_SYMBOL(mlx5_core_create_cq); int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) { struct mlx5_cq_table *table = &dev->priv.cq_table; - struct mlx5_destroy_cq_mbox_in in; - struct mlx5_destroy_cq_mbox_out out; + u32 out[MLX5_ST_SZ_DW(destroy_cq_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {0}; struct mlx5_core_cq *tmp; int err; @@ -214,17 +211,12 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) return -EINVAL; } - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_CQ); - in.cqn = cpu_to_be32(cq->cqn); - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + MLX5_SET(destroy_cq_in, in, opcode, MLX5_CMD_OP_DESTROY_CQ); + MLX5_SET(destroy_cq_in, in, cqn, cq->cqn); + err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); if (err) return err; - if (out.hdr.status) - return mlx5_cmd_status_to_err(&out.hdr); - synchronize_irq(cq->irqn); mlx5_debug_cq_remove(dev, cq); @@ -237,44 +229,23 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) EXPORT_SYMBOL(mlx5_core_destroy_cq); int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, - struct mlx5_query_cq_mbox_out *out) + u32 *out, int outlen) { - struct mlx5_query_cq_mbox_in in; - int err; - - memset(&in, 0, sizeof(in)); - memset(out, 0, sizeof(*out)); - - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_CQ); - in.cqn = cpu_to_be32(cq->cqn); - err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out)); - if (err) - return err; - - if (out->hdr.status) - return mlx5_cmd_status_to_err(&out->hdr); + u32 in[MLX5_ST_SZ_DW(query_cq_in)] = {0}; - return err; + MLX5_SET(query_cq_in, in, opcode, MLX5_CMD_OP_QUERY_CQ); + MLX5_SET(query_cq_in, in, cqn, cq->cqn); + return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); } EXPORT_SYMBOL(mlx5_core_query_cq); - int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, - struct mlx5_modify_cq_mbox_in *in, int in_sz) + u32 *in, int inlen) { - struct mlx5_modify_cq_mbox_out out; - int err; - - memset(&out, 0, sizeof(out)); - in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MODIFY_CQ); - err = mlx5_cmd_exec(dev, in, in_sz, &out, sizeof(out)); - if (err) - return err; + u32 out[MLX5_ST_SZ_DW(modify_cq_out)] = {0}; - if (out.hdr.status) - return mlx5_cmd_status_to_err(&out.hdr); - - return 0; + MLX5_SET(modify_cq_in, in, opcode, MLX5_CMD_OP_MODIFY_CQ); + return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); } EXPORT_SYMBOL(mlx5_core_modify_cq); @@ -283,18 +254,20 @@ int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev, u16 cq_period, u16 cq_max_count) { - struct mlx5_modify_cq_mbox_in in; - - memset(&in, 0, sizeof(in)); - - in.cqn = cpu_to_be32(cq->cqn); - in.ctx.cq_period = cpu_to_be16(cq_period); - in.ctx.cq_max_count = cpu_to_be16(cq_max_count); - in.field_select = cpu_to_be32(MLX5_CQ_MODIFY_PERIOD | - MLX5_CQ_MODIFY_COUNT); - - return mlx5_core_modify_cq(dev, cq, &in, sizeof(in)); + u32 in[MLX5_ST_SZ_DW(modify_cq_in)] = {0}; + void *cqc; + + MLX5_SET(modify_cq_in, in, cqn, cq->cqn); + cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context); + MLX5_SET(cqc, cqc, cq_period, cq_period); + MLX5_SET(cqc, cqc, cq_max_count, cq_max_count); + MLX5_SET(modify_cq_in, in, + modify_field_select_resize_field_select.modify_field_select.modify_field_select, + MLX5_CQ_MODIFY_PERIOD | MLX5_CQ_MODIFY_COUNT); + + return mlx5_core_modify_cq(dev, cq, in, sizeof(in)); } +EXPORT_SYMBOL(mlx5_core_modify_cq_moderation); int mlx5_init_cq_table(struct mlx5_core_dev *dev) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c index 5210d92e6bc7..e94a9532e218 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c @@ -277,24 +277,28 @@ void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev) static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, int index, int *is_str) { - struct mlx5_query_qp_mbox_out *out; + int outlen = MLX5_ST_SZ_BYTES(query_qp_out); struct mlx5_qp_context *ctx; u64 param = 0; + u32 *out; int err; int no_sq; - out = kzalloc(sizeof(*out), GFP_KERNEL); + out = kzalloc(outlen, GFP_KERNEL); if (!out) return param; - err = mlx5_core_qp_query(dev, qp, out, sizeof(*out)); + err = mlx5_core_qp_query(dev, qp, out, outlen); if (err) { - mlx5_core_warn(dev, "failed to query qp\n"); + mlx5_core_warn(dev, "failed to query qp err=%d\n", err); goto out; } *is_str = 0; - ctx = &out->ctx; + + /* FIXME: use MLX5_GET rather than mlx5_qp_context manual struct */ + ctx = (struct mlx5_qp_context *)MLX5_ADDR_OF(query_qp_out, out, qpc); + switch (index) { case QP_PID: param = qp->pid; @@ -358,32 +362,32 @@ out: static u64 eq_read_field(struct mlx5_core_dev *dev, struct mlx5_eq *eq, int index) { - struct mlx5_query_eq_mbox_out *out; - struct mlx5_eq_context *ctx; + int outlen = MLX5_ST_SZ_BYTES(query_eq_out); u64 param = 0; + void *ctx; + u32 *out; int err; - out = kzalloc(sizeof(*out), GFP_KERNEL); + out = kzalloc(outlen, GFP_KERNEL); if (!out) return param; - ctx = &out->ctx; - - err = mlx5_core_eq_query(dev, eq, out, sizeof(*out)); + err = mlx5_core_eq_query(dev, eq, out, outlen); if (err) { mlx5_core_warn(dev, "failed to query eq\n"); goto out; } + ctx = MLX5_ADDR_OF(query_eq_out, out, eq_context_entry); switch (index) { case EQ_NUM_EQES: - param = 1 << ((be32_to_cpu(ctx->log_sz_usr_page) >> 24) & 0x1f); + param = 1 << MLX5_GET(eqc, ctx, log_eq_size); break; case EQ_INTR: - param = ctx->intr; + param = MLX5_GET(eqc, ctx, intr); break; case EQ_LOG_PG_SZ: - param = (ctx->log_page_size & 0x1f) + 12; + param = MLX5_GET(eqc, ctx, log_page_size) + 12; break; } @@ -395,37 +399,37 @@ out: static u64 cq_read_field(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, int index) { - struct mlx5_query_cq_mbox_out *out; - struct mlx5_cq_context *ctx; + int outlen = MLX5_ST_SZ_BYTES(query_cq_out); u64 param = 0; + void *ctx; + u32 *out; int err; - out = kzalloc(sizeof(*out), GFP_KERNEL); + out = mlx5_vzalloc(outlen); if (!out) return param; - ctx = &out->ctx; - - err = mlx5_core_query_cq(dev, cq, out); + err = mlx5_core_query_cq(dev, cq, out, outlen); if (err) { mlx5_core_warn(dev, "failed to query cq\n"); goto out; } + ctx = MLX5_ADDR_OF(query_cq_out, out, cq_context); switch (index) { case CQ_PID: param = cq->pid; break; case CQ_NUM_CQES: - param = 1 << ((be32_to_cpu(ctx->log_sz_usr_page) >> 24) & 0x1f); + param = 1 << MLX5_GET(cqc, ctx, log_cq_size); break; case CQ_LOG_PG_SZ: - param = (ctx->log_pg_sz & 0x1f) + 12; + param = MLX5_GET(cqc, ctx, log_page_size); break; } out: - kfree(out); + kvfree(out); return param; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index bf722aa88cf0..96995609f205 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -654,40 +654,6 @@ struct mlx5e_priv { void *ppriv; }; -enum mlx5e_link_mode { - MLX5E_1000BASE_CX_SGMII = 0, - MLX5E_1000BASE_KX = 1, - MLX5E_10GBASE_CX4 = 2, - MLX5E_10GBASE_KX4 = 3, - MLX5E_10GBASE_KR = 4, - MLX5E_20GBASE_KR2 = 5, - MLX5E_40GBASE_CR4 = 6, - MLX5E_40GBASE_KR4 = 7, - MLX5E_56GBASE_R4 = 8, - MLX5E_10GBASE_CR = 12, - MLX5E_10GBASE_SR = 13, - MLX5E_10GBASE_ER = 14, - MLX5E_40GBASE_SR4 = 15, - MLX5E_40GBASE_LR4 = 16, - MLX5E_50GBASE_SR2 = 18, - MLX5E_100GBASE_CR4 = 20, - MLX5E_100GBASE_SR4 = 21, - MLX5E_100GBASE_KR4 = 22, - MLX5E_100GBASE_LR4 = 23, - MLX5E_100BASE_TX = 24, - MLX5E_1000BASE_T = 25, - MLX5E_10GBASE_T = 26, - MLX5E_25GBASE_CR = 27, - MLX5E_25GBASE_KR = 28, - MLX5E_25GBASE_SR = 29, - MLX5E_50GBASE_CR2 = 30, - MLX5E_50GBASE_KR2 = 31, - MLX5E_LINK_MODES_NUMBER, -}; - -#define MLX5E_PROT_MASK(link_mode) (1 << link_mode) - - void mlx5e_build_ptys2ethtool_map(void); void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c index 9cce153e1035..029e856f72a0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c @@ -60,24 +60,27 @@ void mlx5e_destroy_tir(struct mlx5_core_dev *mdev, static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, struct mlx5_core_mkey *mkey) { - struct mlx5_create_mkey_mbox_in *in; + int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); + void *mkc; + u32 *in; int err; - in = mlx5_vzalloc(sizeof(*in)); + in = mlx5_vzalloc(inlen); if (!in) return -ENOMEM; - in->seg.flags = MLX5_PERM_LOCAL_WRITE | - MLX5_PERM_LOCAL_READ | - MLX5_ACCESS_MODE_PA; - in->seg.flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64); - in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); + mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); + MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_PA); + MLX5_SET(mkc, mkc, lw, 1); + MLX5_SET(mkc, mkc, lr, 1); - err = mlx5_core_create_mkey(mdev, mkey, in, sizeof(*in), NULL, NULL, - NULL); + MLX5_SET(mkc, mkc, pd, pdn); + MLX5_SET(mkc, mkc, length64, 1); + MLX5_SET(mkc, mkc, qpn, 0xffffff); - kvfree(in); + err = mlx5_core_create_mkey(mdev, mkey, in, inlen); + kvfree(in); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index d0cf8fa22659..d1cd1564e9b9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -803,7 +803,7 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev, { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; - u32 out[MLX5_ST_SZ_DW(ptys_reg)]; + u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0}; u32 eth_proto_cap; u32 eth_proto_admin; u32 eth_proto_lp; @@ -813,7 +813,6 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev, int err; err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1); - if (err) { netdev_err(netdev, "%s: query port ptys failed: %d\n", __func__, err); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 2459c7f3db8d..03586ee68fc4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -174,18 +174,15 @@ static void mlx5e_update_vport_counters(struct mlx5e_priv *priv) { int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out); u32 *out = (u32 *)priv->stats.vport.query_vport_out; - u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)]; + u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0}; struct mlx5_core_dev *mdev = priv->mdev; - memset(in, 0, sizeof(in)); - MLX5_SET(query_vport_counter_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_COUNTER); MLX5_SET(query_vport_counter_in, in, op_mod, 0); MLX5_SET(query_vport_counter_in, in, other_vport, 0); memset(out, 0, outlen); - mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen); } @@ -488,7 +485,8 @@ static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd) rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx); MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY); - MLX5_SET64(modify_rq_in, in, modify_bitmask, MLX5_RQ_BITMASK_VSD); + MLX5_SET64(modify_rq_in, in, modify_bitmask, + MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD); MLX5_SET(rqc, rqc, vsd, vsd); MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY); @@ -1999,14 +1997,15 @@ static void mlx5e_close_drop_rq(struct mlx5e_priv *priv) static int mlx5e_create_tis(struct mlx5e_priv *priv, int tc) { struct mlx5_core_dev *mdev = priv->mdev; - u32 in[MLX5_ST_SZ_DW(create_tis_in)]; + u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {0}; void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx); - memset(in, 0, sizeof(in)); - MLX5_SET(tisc, tisc, prio, tc << 1); MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.td.tdn); + if (mlx5_lag_is_lacp_owner(mdev)) + MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1); + return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]); } @@ -3211,37 +3210,37 @@ static void mlx5e_destroy_q_counter(struct mlx5e_priv *priv) static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; - struct mlx5_create_mkey_mbox_in *in; - struct mlx5_mkey_seg *mkc; - int inlen = sizeof(*in); u64 npages = MLX5E_REQUIRED_MTTS(priv->profile->max_nch(mdev), BIT(MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW)); + int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); + void *mkc; + u32 *in; int err; in = mlx5_vzalloc(inlen); if (!in) return -ENOMEM; - mkc = &in->seg; - mkc->status = MLX5_MKEY_STATUS_FREE; - mkc->flags = MLX5_PERM_UMR_EN | - MLX5_PERM_LOCAL_READ | - MLX5_PERM_LOCAL_WRITE | - MLX5_ACCESS_MODE_MTT; + mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); npages = min_t(u32, ALIGN(U16_MAX, 4) * 2, npages); - mkc->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); - mkc->flags_pd = cpu_to_be32(mdev->mlx5e_res.pdn); - mkc->len = cpu_to_be64(npages << PAGE_SHIFT); - mkc->xlt_oct_size = cpu_to_be32(MLX5_MTT_OCTW(npages)); - mkc->log2_page_size = PAGE_SHIFT; + MLX5_SET(mkc, mkc, free, 1); + MLX5_SET(mkc, mkc, umr_en, 1); + MLX5_SET(mkc, mkc, lw, 1); + MLX5_SET(mkc, mkc, lr, 1); + MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT); - err = mlx5_core_create_mkey(mdev, &priv->umr_mkey, in, inlen, NULL, - NULL, NULL); + MLX5_SET(mkc, mkc, qpn, 0xffffff); + MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.pdn); + MLX5_SET64(mkc, mkc, len, npages << PAGE_SHIFT); + MLX5_SET(mkc, mkc, translations_octword_size, + MLX5_MTT_OCTW(npages)); + MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT); - kvfree(in); + err = mlx5_core_create_mkey(mdev, &priv->umr_mkey, in, inlen); + kvfree(in); return err; } @@ -3360,6 +3359,8 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv) struct mlx5_eswitch *esw = mdev->priv.eswitch; struct mlx5_eswitch_rep rep; + mlx5_lag_add(mdev, netdev); + if (mlx5e_vxlan_allowed(mdev)) { rtnl_lock(); udp_tunnel_get_rx_info(netdev); @@ -3383,6 +3384,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv) { queue_work(priv->wq, &priv->set_rx_mode_work); mlx5e_disable_async_events(priv); + mlx5_lag_remove(priv->mdev); } static const struct mlx5e_profile mlx5e_nic_profile = { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 134de4a11f1d..29db4735182a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -415,8 +415,8 @@ int mlx5e_vport_rep_load(struct mlx5_eswitch *esw, { rep->priv_data = mlx5e_create_netdev(esw->dev, &mlx5e_rep_profile, rep); if (!rep->priv_data) { - pr_warn("Failed to create representor for vport %d\n", - rep->vport); + mlx5_core_warn(esw->dev, "Failed to create representor for vport %d\n", + rep->vport); return -EINVAL; } return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 0e30602ef76d..aaca09002ca6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -86,23 +86,12 @@ struct cre_des_eq { static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn) { - struct mlx5_destroy_eq_mbox_in in; - struct mlx5_destroy_eq_mbox_out out; - int err; - - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_EQ); - in.eqn = eqn; - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); - if (!err) - goto ex; - - if (out.hdr.status) - err = mlx5_cmd_status_to_err(&out.hdr); + u32 out[MLX5_ST_SZ_DW(destroy_eq_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(destroy_eq_in)] = {0}; -ex: - return err; + MLX5_SET(destroy_eq_in, in, opcode, MLX5_CMD_OP_DESTROY_EQ); + MLX5_SET(destroy_eq_in, in, eq_number, eqn); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } static struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry) @@ -351,11 +340,13 @@ static void init_eq_buf(struct mlx5_eq *eq) int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, int nent, u64 mask, const char *name, struct mlx5_uar *uar) { + u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0}; struct mlx5_priv *priv = &dev->priv; - struct mlx5_create_eq_mbox_in *in; - struct mlx5_create_eq_mbox_out out; - int err; + __be64 *pas; + void *eqc; int inlen; + u32 *in; + int err; eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE); eq->cons_index = 0; @@ -365,35 +356,36 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, init_eq_buf(eq); - inlen = sizeof(*in) + sizeof(in->pas[0]) * eq->buf.npages; + inlen = MLX5_ST_SZ_BYTES(create_eq_in) + + MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->buf.npages; + in = mlx5_vzalloc(inlen); if (!in) { err = -ENOMEM; goto err_buf; } - memset(&out, 0, sizeof(out)); - mlx5_fill_page_array(&eq->buf, in->pas); + pas = (__be64 *)MLX5_ADDR_OF(create_eq_in, in, pas); + mlx5_fill_page_array(&eq->buf, pas); - in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_EQ); - in->ctx.log_sz_usr_page = cpu_to_be32(ilog2(eq->nent) << 24 | uar->index); - in->ctx.intr = vecidx; - in->ctx.log_page_size = eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT; - in->events_mask = cpu_to_be64(mask); + MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ); + MLX5_SET64(create_eq_in, in, event_bitmask, mask); - err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); - if (err) - goto err_in; + eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry); + MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent)); + MLX5_SET(eqc, eqc, uar_page, uar->index); + MLX5_SET(eqc, eqc, intr, vecidx); + MLX5_SET(eqc, eqc, log_page_size, + eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); - if (out.hdr.status) { - err = mlx5_cmd_status_to_err(&out.hdr); + err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); + if (err) goto err_in; - } snprintf(priv->irq_info[vecidx].name, MLX5_MAX_IRQ_NAME, "%s@pci:%s", name, pci_name(dev->pdev)); - eq->eqn = out.eq_number; + eq->eqn = MLX5_GET(create_eq_out, out, eq_number); eq->irqn = priv->msix_arr[vecidx].vector; eq->dev = dev; eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET; @@ -547,22 +539,12 @@ int mlx5_stop_eqs(struct mlx5_core_dev *dev) } int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, - struct mlx5_query_eq_mbox_out *out, int outlen) + u32 *out, int outlen) { - struct mlx5_query_eq_mbox_in in; - int err; - - memset(&in, 0, sizeof(in)); - memset(out, 0, outlen); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_EQ); - in.eqn = eq->eqn; - err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); - if (err) - return err; + u32 in[MLX5_ST_SZ_DW(query_eq_in)] = {0}; - if (out->hdr.status) - err = mlx5_cmd_status_to_err(&out->hdr); - - return err; + MLX5_SET(query_eq_in, in, opcode, MLX5_CMD_OP_QUERY_EQ); + MLX5_SET(query_eq_in, in, eq_number, eq->eqn); + return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); } EXPORT_SYMBOL_GPL(mlx5_core_eq_query); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 8b78f156214e..101430571d6d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -87,13 +87,9 @@ void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports); static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport, u32 events_mask) { - int in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)]; - int out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)]; + int in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {0}; + int out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0}; void *nic_vport_ctx; - int err; - - memset(out, 0, sizeof(out)); - memset(in, 0, sizeof(in)); MLX5_SET(modify_nic_vport_context_in, in, opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); @@ -116,45 +112,31 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport, MLX5_SET(nic_vport_context, nic_vport_ctx, event_on_promisc_change, 1); - err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); - if (err) - goto ex; - err = mlx5_cmd_status_to_err_v2(out); - if (err) - goto ex; - return 0; -ex: - return err; + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } /* E-Switch vport context HW commands */ static int query_esw_vport_context_cmd(struct mlx5_core_dev *mdev, u32 vport, u32 *out, int outlen) { - u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)]; - - memset(in, 0, sizeof(in)); + u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {0}; MLX5_SET(query_nic_vport_context_in, in, opcode, MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT); - MLX5_SET(query_esw_vport_context_in, in, vport_number, vport); if (vport) MLX5_SET(query_esw_vport_context_in, in, other_vport, 1); - - return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen); + return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen); } static int query_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport, u16 *vlan, u8 *qos) { - u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)]; + u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {0}; int err; bool cvlan_strip; bool cvlan_insert; - memset(out, 0, sizeof(out)); - *vlan = 0; *qos = 0; @@ -188,27 +170,20 @@ out: static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport, void *in, int inlen) { - u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)]; - - memset(out, 0, sizeof(out)); + u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)] = {0}; + MLX5_SET(modify_esw_vport_context_in, in, opcode, + MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT); MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport); if (vport) MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1); - - MLX5_SET(modify_esw_vport_context_in, in, opcode, - MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT); - - return mlx5_cmd_exec_check_status(dev, in, inlen, - out, sizeof(out)); + return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); } static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport, u16 vlan, u8 qos, bool set) { - u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)]; - - memset(in, 0, sizeof(in)); + u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {0}; if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) || !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist)) @@ -216,7 +191,6 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport, esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%d\n", vport, vlan, qos, set); - if (set) { MLX5_SET(modify_esw_vport_context_in, in, esw_vport_context.vport_cvlan_strip, 1); @@ -241,13 +215,10 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport, static int set_l2_table_entry_cmd(struct mlx5_core_dev *dev, u32 index, u8 *mac, u8 vlan_valid, u16 vlan) { - u32 in[MLX5_ST_SZ_DW(set_l2_table_entry_in)]; - u32 out[MLX5_ST_SZ_DW(set_l2_table_entry_out)]; + u32 in[MLX5_ST_SZ_DW(set_l2_table_entry_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(set_l2_table_entry_out)] = {0}; u8 *in_mac_addr; - memset(in, 0, sizeof(in)); - memset(out, 0, sizeof(out)); - MLX5_SET(set_l2_table_entry_in, in, opcode, MLX5_CMD_OP_SET_L2_TABLE_ENTRY); MLX5_SET(set_l2_table_entry_in, in, table_index, index); @@ -257,23 +228,18 @@ static int set_l2_table_entry_cmd(struct mlx5_core_dev *dev, u32 index, in_mac_addr = MLX5_ADDR_OF(set_l2_table_entry_in, in, mac_address); ether_addr_copy(&in_mac_addr[2], mac); - return mlx5_cmd_exec_check_status(dev, in, sizeof(in), - out, sizeof(out)); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } static int del_l2_table_entry_cmd(struct mlx5_core_dev *dev, u32 index) { - u32 in[MLX5_ST_SZ_DW(delete_l2_table_entry_in)]; - u32 out[MLX5_ST_SZ_DW(delete_l2_table_entry_out)]; - - memset(in, 0, sizeof(in)); - memset(out, 0, sizeof(out)); + u32 in[MLX5_ST_SZ_DW(delete_l2_table_entry_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(delete_l2_table_entry_out)] = {0}; MLX5_SET(delete_l2_table_entry_in, in, opcode, MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY); MLX5_SET(delete_l2_table_entry_in, in, table_index, index); - return mlx5_cmd_exec_check_status(dev, in, sizeof(in), - out, sizeof(out)); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } static int alloc_l2_table_index(struct mlx5_l2_table *l2_table, u32 *ix) @@ -340,7 +306,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule, spec = mlx5_vzalloc(sizeof(*spec)); if (!spec) { - pr_warn("FDB: Failed to alloc match parameters\n"); + esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n"); return NULL; } dmac_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, @@ -374,8 +340,8 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 0, &dest); if (IS_ERR(flow_rule)) { - pr_warn( - "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n", + esw_warn(esw->dev, + "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n", dmac_v, dmac_c, vport, PTR_ERR(flow_rule)); flow_rule = NULL; } @@ -1352,8 +1318,9 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, 0, NULL); if (IS_ERR(vport->ingress.allow_rule)) { err = PTR_ERR(vport->ingress.allow_rule); - pr_warn("vport[%d] configure ingress allow rule, err(%d)\n", - vport->vport, err); + esw_warn(esw->dev, + "vport[%d] configure ingress allow rule, err(%d)\n", + vport->vport, err); vport->ingress.allow_rule = NULL; goto out; } @@ -1365,8 +1332,9 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, 0, NULL); if (IS_ERR(vport->ingress.drop_rule)) { err = PTR_ERR(vport->ingress.drop_rule); - pr_warn("vport[%d] configure ingress drop rule, err(%d)\n", - vport->vport, err); + esw_warn(esw->dev, + "vport[%d] configure ingress drop rule, err(%d)\n", + vport->vport, err); vport->ingress.drop_rule = NULL; goto out; } @@ -1418,8 +1386,9 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw, 0, NULL); if (IS_ERR(vport->egress.allowed_vlan)) { err = PTR_ERR(vport->egress.allowed_vlan); - pr_warn("vport[%d] configure egress allowed vlan rule failed, err(%d)\n", - vport->vport, err); + esw_warn(esw->dev, + "vport[%d] configure egress allowed vlan rule failed, err(%d)\n", + vport->vport, err); vport->egress.allowed_vlan = NULL; goto out; } @@ -1432,8 +1401,9 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw, 0, NULL); if (IS_ERR(vport->egress.drop_rule)) { err = PTR_ERR(vport->egress.drop_rule); - pr_warn("vport[%d] configure egress drop rule failed, err(%d)\n", - vport->vport, err); + esw_warn(esw->dev, + "vport[%d] configure egress drop rule failed, err(%d)\n", + vport->vport, err); vport->egress.drop_rule = NULL; } out: @@ -1905,7 +1875,7 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, struct ifla_vf_stats *vf_stats) { int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out); - u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)]; + u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0}; int err = 0; u32 *out; @@ -1918,8 +1888,6 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, if (!out) return -ENOMEM; - memset(in, 0, sizeof(in)); - MLX5_SET(query_vport_counter_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_COUNTER); MLX5_SET(query_vport_counter_in, in, op_mod, 0); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index 9134010e2921..7a0415e6d339 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -41,10 +41,8 @@ int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft) { - u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)]; - u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)]; - - memset(in, 0, sizeof(in)); + u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {0}; MLX5_SET(set_flow_table_root_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ROOT); @@ -55,30 +53,23 @@ int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev, MLX5_SET(set_flow_table_root_in, in, other_vport, 1); } - memset(out, 0, sizeof(out)); - return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, - sizeof(out)); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev, u16 vport, + enum fs_flow_table_op_mod op_mod, enum fs_flow_table_type type, unsigned int level, unsigned int log_size, struct mlx5_flow_table *next_ft, unsigned int *table_id) { - u32 out[MLX5_ST_SZ_DW(create_flow_table_out)]; - u32 in[MLX5_ST_SZ_DW(create_flow_table_in)]; + u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0}; int err; - memset(in, 0, sizeof(in)); - MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE); - if (next_ft) { - MLX5_SET(create_flow_table_in, in, table_miss_mode, 1); - MLX5_SET(create_flow_table_in, in, table_miss_id, next_ft->id); - } MLX5_SET(create_flow_table_in, in, table_type, type); MLX5_SET(create_flow_table_in, in, level, level); MLX5_SET(create_flow_table_in, in, log_size, log_size); @@ -87,10 +78,23 @@ int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev, MLX5_SET(create_flow_table_in, in, other_vport, 1); } - memset(out, 0, sizeof(out)); - err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, - sizeof(out)); + switch (op_mod) { + case FS_FT_OP_MOD_NORMAL: + if (next_ft) { + MLX5_SET(create_flow_table_in, in, table_miss_mode, 1); + MLX5_SET(create_flow_table_in, in, table_miss_id, next_ft->id); + } + break; + + case FS_FT_OP_MOD_LAG_DEMUX: + MLX5_SET(create_flow_table_in, in, op_mod, 0x1); + if (next_ft) + MLX5_SET(create_flow_table_in, in, lag_master_next_table_id, + next_ft->id); + break; + } + err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); if (!err) *table_id = MLX5_GET(create_flow_table_out, out, table_id); @@ -100,11 +104,8 @@ int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev, int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft) { - u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)]; - u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)]; - - memset(in, 0, sizeof(in)); - memset(out, 0, sizeof(out)); + u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)] = {0}; MLX5_SET(destroy_flow_table_in, in, opcode, MLX5_CMD_OP_DESTROY_FLOW_TABLE); @@ -115,39 +116,49 @@ int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev, MLX5_SET(destroy_flow_table_in, in, other_vport, 1); } - return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, - sizeof(out)); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft, struct mlx5_flow_table *next_ft) { - u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)]; - u32 out[MLX5_ST_SZ_DW(modify_flow_table_out)]; - - memset(in, 0, sizeof(in)); - memset(out, 0, sizeof(out)); + u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(modify_flow_table_out)] = {0}; MLX5_SET(modify_flow_table_in, in, opcode, MLX5_CMD_OP_MODIFY_FLOW_TABLE); MLX5_SET(modify_flow_table_in, in, table_type, ft->type); MLX5_SET(modify_flow_table_in, in, table_id, ft->id); - if (ft->vport) { - MLX5_SET(modify_flow_table_in, in, vport_number, ft->vport); - MLX5_SET(modify_flow_table_in, in, other_vport, 1); - } - MLX5_SET(modify_flow_table_in, in, modify_field_select, - MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID); - if (next_ft) { - MLX5_SET(modify_flow_table_in, in, table_miss_mode, 1); - MLX5_SET(modify_flow_table_in, in, table_miss_id, next_ft->id); + + if (ft->op_mod == FS_FT_OP_MOD_LAG_DEMUX) { + MLX5_SET(modify_flow_table_in, in, modify_field_select, + MLX5_MODIFY_FLOW_TABLE_LAG_NEXT_TABLE_ID); + if (next_ft) { + MLX5_SET(modify_flow_table_in, in, + lag_master_next_table_id, next_ft->id); + } else { + MLX5_SET(modify_flow_table_in, in, + lag_master_next_table_id, 0); + } } else { - MLX5_SET(modify_flow_table_in, in, table_miss_mode, 0); + if (ft->vport) { + MLX5_SET(modify_flow_table_in, in, vport_number, + ft->vport); + MLX5_SET(modify_flow_table_in, in, other_vport, 1); + } + MLX5_SET(modify_flow_table_in, in, modify_field_select, + MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID); + if (next_ft) { + MLX5_SET(modify_flow_table_in, in, table_miss_mode, 1); + MLX5_SET(modify_flow_table_in, in, table_miss_id, + next_ft->id); + } else { + MLX5_SET(modify_flow_table_in, in, table_miss_mode, 0); + } } - return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, - sizeof(out)); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev, @@ -155,12 +166,10 @@ int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev, u32 *in, unsigned int *group_id) { + u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0}; int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); - u32 out[MLX5_ST_SZ_DW(create_flow_group_out)]; int err; - memset(out, 0, sizeof(out)); - MLX5_SET(create_flow_group_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_GROUP); MLX5_SET(create_flow_group_in, in, table_type, ft->type); @@ -170,13 +179,10 @@ int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev, MLX5_SET(create_flow_group_in, in, other_vport, 1); } - err = mlx5_cmd_exec_check_status(dev, in, - inlen, out, - sizeof(out)); + err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); if (!err) *group_id = MLX5_GET(create_flow_group_out, out, group_id); - return err; } @@ -184,11 +190,8 @@ int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft, unsigned int group_id) { - u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)]; - u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)]; - - memset(in, 0, sizeof(in)); - memset(out, 0, sizeof(out)); + u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {0}; MLX5_SET(destroy_flow_group_in, in, opcode, MLX5_CMD_OP_DESTROY_FLOW_GROUP); @@ -200,8 +203,7 @@ int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev, MLX5_SET(destroy_flow_group_in, in, other_vport, 1); } - return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, - sizeof(out)); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, @@ -212,7 +214,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, { unsigned int inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->dests_size * MLX5_ST_SZ_BYTES(dest_format_struct); - u32 out[MLX5_ST_SZ_DW(set_fte_out)]; + u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0}; struct mlx5_flow_rule *dst; void *in_flow_context; void *in_match_value; @@ -290,11 +292,8 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, list_size); } - memset(out, 0, sizeof(out)); - err = mlx5_cmd_exec_check_status(dev, in, inlen, out, - sizeof(out)); + err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); kvfree(in); - return err; } @@ -303,7 +302,7 @@ int mlx5_cmd_create_fte(struct mlx5_core_dev *dev, unsigned group_id, struct fs_fte *fte) { - return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte); + return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte); } int mlx5_cmd_update_fte(struct mlx5_core_dev *dev, @@ -327,12 +326,8 @@ int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft, unsigned int index) { - u32 out[MLX5_ST_SZ_DW(delete_fte_out)]; - u32 in[MLX5_ST_SZ_DW(delete_fte_in)]; - int err; - - memset(in, 0, sizeof(in)); - memset(out, 0, sizeof(out)); + u32 out[MLX5_ST_SZ_DW(delete_fte_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {0}; MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY); MLX5_SET(delete_fte_in, in, table_type, ft->type); @@ -343,74 +338,55 @@ int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev, MLX5_SET(delete_fte_in, in, other_vport, 1); } - err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); - - return err; + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u16 *id) { - u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)]; - u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)]; + u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0}; int err; - memset(in, 0, sizeof(in)); - memset(out, 0, sizeof(out)); - MLX5_SET(alloc_flow_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_FLOW_COUNTER); - err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, - sizeof(out)); - if (err) - return err; - - *id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id); - - return 0; + err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + if (!err) + *id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id); + return err; } int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u16 id) { - u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)]; - u32 out[MLX5_ST_SZ_DW(dealloc_flow_counter_out)]; - - memset(in, 0, sizeof(in)); - memset(out, 0, sizeof(out)); + u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(dealloc_flow_counter_out)] = {0}; MLX5_SET(dealloc_flow_counter_in, in, opcode, MLX5_CMD_OP_DEALLOC_FLOW_COUNTER); MLX5_SET(dealloc_flow_counter_in, in, flow_counter_id, id); - - return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, - sizeof(out)); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u16 id, u64 *packets, u64 *bytes) { u32 out[MLX5_ST_SZ_BYTES(query_flow_counter_out) + - MLX5_ST_SZ_BYTES(traffic_counter)]; - u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)]; + MLX5_ST_SZ_BYTES(traffic_counter)] = {0}; + u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0}; void *stats; int err = 0; - memset(in, 0, sizeof(in)); - memset(out, 0, sizeof(out)); - MLX5_SET(query_flow_counter_in, in, opcode, MLX5_CMD_OP_QUERY_FLOW_COUNTER); MLX5_SET(query_flow_counter_in, in, op_mod, 0); MLX5_SET(query_flow_counter_in, in, flow_counter_id, id); - - err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); + err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); if (err) return err; stats = MLX5_ADDR_OF(query_flow_counter_out, out, flow_statistics); *packets = MLX5_GET64(traffic_counter, stats, packets); *bytes = MLX5_GET64(traffic_counter, stats, octets); - return 0; } @@ -448,18 +424,14 @@ void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b) int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b) { - u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)]; - - memset(in, 0, sizeof(in)); + u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0}; MLX5_SET(query_flow_counter_in, in, opcode, MLX5_CMD_OP_QUERY_FLOW_COUNTER); MLX5_SET(query_flow_counter_in, in, op_mod, 0); MLX5_SET(query_flow_counter_in, in, flow_counter_id, b->id); MLX5_SET(query_flow_counter_in, in, num_of_counters, b->num); - - return mlx5_cmd_exec_check_status(dev, in, sizeof(in), - b->out, b->outlen); + return mlx5_cmd_exec(dev, in, sizeof(in), b->out, b->outlen); } void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev, @@ -480,3 +452,51 @@ void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev, *packets = MLX5_GET64(traffic_counter, stats, packets); *bytes = MLX5_GET64(traffic_counter, stats, octets); } + +#define MAX_ENCAP_SIZE (128) + +int mlx5_cmd_alloc_encap(struct mlx5_core_dev *dev, + int header_type, + size_t size, + void *encap_header, + u32 *encap_id) +{ + u32 out[MLX5_ST_SZ_DW(alloc_encap_header_out)]; + u32 in[MLX5_ST_SZ_DW(alloc_encap_header_in) + + (MAX_ENCAP_SIZE / sizeof(u32))]; + void *encap_header_in = MLX5_ADDR_OF(alloc_encap_header_in, in, + encap_header); + void *header = MLX5_ADDR_OF(encap_header_in, encap_header_in, + encap_header); + int inlen = header - (void *)in + size; + int err; + + if (size > MAX_ENCAP_SIZE) + return -EINVAL; + + memset(in, 0, inlen); + MLX5_SET(alloc_encap_header_in, in, opcode, + MLX5_CMD_OP_ALLOC_ENCAP_HEADER); + MLX5_SET(encap_header_in, encap_header_in, encap_header_size, size); + MLX5_SET(encap_header_in, encap_header_in, header_type, header_type); + memcpy(header, encap_header, size); + + memset(out, 0, sizeof(out)); + err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); + + *encap_id = MLX5_GET(alloc_encap_header_out, out, encap_id); + return err; +} + +void mlx5_cmd_dealloc_encap(struct mlx5_core_dev *dev, u32 encap_id) +{ + u32 in[MLX5_ST_SZ_DW(dealloc_encap_header_in)]; + u32 out[MLX5_ST_SZ_DW(dealloc_encap_header_out)]; + + memset(in, 0, sizeof(in)); + MLX5_SET(dealloc_encap_header_in, in, opcode, + MLX5_CMD_OP_DEALLOC_ENCAP_HEADER); + MLX5_SET(dealloc_encap_header_in, in, encap_id, encap_id); + + mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h index 158844cef82b..c5bc4686c832 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h @@ -35,6 +35,7 @@ int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev, u16 vport, + enum fs_flow_table_op_mod op_mod, enum fs_flow_table_type type, unsigned int level, unsigned int log_size, struct mlx5_flow_table *next_ft, unsigned int *table_id); @@ -88,4 +89,11 @@ void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b, u16 id, u64 *packets, u64 *bytes); +int mlx5_cmd_alloc_encap(struct mlx5_core_dev *dev, + int header_type, + size_t size, + void *encap_header, + u32 *encap_id); +void mlx5_cmd_dealloc_encap(struct mlx5_core_dev *dev, u32 encap_id); + #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 3d6c1f65e586..5da2cc878582 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -96,6 +96,10 @@ #define OFFLOADS_NUM_PRIOS 1 #define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + 1) +#define LAG_PRIO_NUM_LEVELS 1 +#define LAG_NUM_PRIOS 1 +#define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + 1) + struct node_caps { size_t arr_sz; long *caps; @@ -111,12 +115,16 @@ static struct init_tree_node { int num_levels; } root_fs = { .type = FS_TYPE_NAMESPACE, - .ar_size = 6, + .ar_size = 7, .children = (struct init_tree_node[]) { ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS, ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS, BY_PASS_PRIO_NUM_LEVELS))), + ADD_PRIO(0, LAG_MIN_LEVEL, 0, + FS_CHAINING_CAPS, + ADD_NS(ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS, + LAG_PRIO_NUM_LEVELS))), ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, {}, ADD_NS(ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS, OFFLOADS_MAX_FT))), ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0, @@ -345,7 +353,7 @@ static void del_flow_table(struct fs_node *node) err = mlx5_cmd_destroy_flow_table(dev, ft); if (err) - pr_warn("flow steering can't destroy ft\n"); + mlx5_core_warn(dev, "flow steering can't destroy ft\n"); fs_get_obj(prio, ft->node.parent); prio->num_ft--; } @@ -364,7 +372,7 @@ static void del_rule(struct fs_node *node) match_value = mlx5_vzalloc(match_len); if (!match_value) { - pr_warn("failed to allocate inbox\n"); + mlx5_core_warn(dev, "failed to allocate inbox\n"); return; } @@ -387,8 +395,9 @@ static void del_rule(struct fs_node *node) modify_mask, fte); if (err) - pr_warn("%s can't del rule fg id=%d fte_index=%d\n", - __func__, fg->id, fte->index); + mlx5_core_warn(dev, + "%s can't del rule fg id=%d fte_index=%d\n", + __func__, fg->id, fte->index); } kvfree(match_value); } @@ -409,8 +418,9 @@ static void del_fte(struct fs_node *node) err = mlx5_cmd_delete_fte(dev, ft, fte->index); if (err) - pr_warn("flow steering can't delete fte in index %d of flow group id %d\n", - fte->index, fg->id); + mlx5_core_warn(dev, + "flow steering can't delete fte in index %d of flow group id %d\n", + fte->index, fg->id); fte->status = 0; fg->num_ftes--; @@ -427,8 +437,8 @@ static void del_flow_group(struct fs_node *node) dev = get_dev(&ft->node); if (mlx5_cmd_destroy_flow_group(dev, ft, fg->id)) - pr_warn("flow steering can't destroy fg %d of ft %d\n", - fg->id, ft->id); + mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n", + fg->id, ft->id); } static struct fs_fte *alloc_fte(u8 action, @@ -475,7 +485,8 @@ static struct mlx5_flow_group *alloc_flow_group(u32 *create_fg_in) } static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_fte, - enum fs_flow_table_type table_type) + enum fs_flow_table_type table_type, + enum fs_flow_table_op_mod op_mod) { struct mlx5_flow_table *ft; @@ -485,6 +496,7 @@ static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_ft ft->level = level; ft->node.type = FS_TYPE_FLOW_TABLE; + ft->op_mod = op_mod; ft->type = table_type; ft->vport = vport; ft->max_fte = max_fte; @@ -722,6 +734,7 @@ static void list_add_flow_table(struct mlx5_flow_table *ft, } static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespace *ns, + enum fs_flow_table_op_mod op_mod, u16 vport, int prio, int max_fte, u32 level) { @@ -754,18 +767,19 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa level += fs_prio->start_level; ft = alloc_flow_table(level, vport, - roundup_pow_of_two(max_fte), - root->table_type); + max_fte ? roundup_pow_of_two(max_fte) : 0, + root->table_type, + op_mod); if (!ft) { err = -ENOMEM; goto unlock_root; } tree_init_node(&ft->node, 1, del_flow_table); - log_table_sz = ilog2(ft->max_fte); + log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0; next_ft = find_next_chained_ft(fs_prio); - err = mlx5_cmd_create_flow_table(root->dev, ft->vport, ft->type, ft->level, - log_table_sz, next_ft, &ft->id); + err = mlx5_cmd_create_flow_table(root->dev, ft->vport, ft->op_mod, ft->type, + ft->level, log_table_sz, next_ft, &ft->id); if (err) goto free_ft; @@ -792,15 +806,26 @@ struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns, int prio, int max_fte, u32 level) { - return __mlx5_create_flow_table(ns, 0, prio, max_fte, level); + return __mlx5_create_flow_table(ns, FS_FT_OP_MOD_NORMAL, 0, prio, + max_fte, level); } struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns, int prio, int max_fte, u32 level, u16 vport) { - return __mlx5_create_flow_table(ns, vport, prio, max_fte, level); + return __mlx5_create_flow_table(ns, FS_FT_OP_MOD_NORMAL, vport, prio, + max_fte, level); +} + +struct mlx5_flow_table *mlx5_create_lag_demux_flow_table( + struct mlx5_flow_namespace *ns, + int prio, u32 level) +{ + return __mlx5_create_flow_table(ns, FS_FT_OP_MOD_LAG_DEMUX, 0, prio, 0, + level); } +EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table); struct mlx5_flow_table *mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns, int prio, @@ -1379,6 +1404,7 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, switch (type) { case MLX5_FLOW_NAMESPACE_BYPASS: + case MLX5_FLOW_NAMESPACE_LAG: case MLX5_FLOW_NAMESPACE_OFFLOADS: case MLX5_FLOW_NAMESPACE_ETHTOOL: case MLX5_FLOW_NAMESPACE_KERNEL: @@ -1401,6 +1427,16 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, return &steering->esw_ingress_root_ns->ns; else return NULL; + case MLX5_FLOW_NAMESPACE_SNIFFER_RX: + if (steering->sniffer_rx_root_ns) + return &steering->sniffer_rx_root_ns->ns; + else + return NULL; + case MLX5_FLOW_NAMESPACE_SNIFFER_TX: + if (steering->sniffer_tx_root_ns) + return &steering->sniffer_tx_root_ns->ns; + else + return NULL; default: return NULL; } @@ -1700,10 +1736,46 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev) cleanup_root_ns(steering->esw_egress_root_ns); cleanup_root_ns(steering->esw_ingress_root_ns); cleanup_root_ns(steering->fdb_root_ns); + cleanup_root_ns(steering->sniffer_rx_root_ns); + cleanup_root_ns(steering->sniffer_tx_root_ns); mlx5_cleanup_fc_stats(dev); kfree(steering); } +static int init_sniffer_tx_root_ns(struct mlx5_flow_steering *steering) +{ + struct fs_prio *prio; + + steering->sniffer_tx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_TX); + if (!steering->sniffer_tx_root_ns) + return -ENOMEM; + + /* Create single prio */ + prio = fs_create_prio(&steering->sniffer_tx_root_ns->ns, 0, 1); + if (IS_ERR(prio)) { + cleanup_root_ns(steering->sniffer_tx_root_ns); + return PTR_ERR(prio); + } + return 0; +} + +static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering) +{ + struct fs_prio *prio; + + steering->sniffer_rx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_RX); + if (!steering->sniffer_rx_root_ns) + return -ENOMEM; + + /* Create single prio */ + prio = fs_create_prio(&steering->sniffer_rx_root_ns->ns, 0, 1); + if (IS_ERR(prio)) { + cleanup_root_ns(steering->sniffer_rx_root_ns); + return PTR_ERR(prio); + } + return 0; +} + static int init_fdb_root_ns(struct mlx5_flow_steering *steering) { struct fs_prio *prio; @@ -1800,6 +1872,18 @@ int mlx5_init_fs(struct mlx5_core_dev *dev) } } + if (MLX5_CAP_FLOWTABLE_SNIFFER_RX(dev, ft_support)) { + err = init_sniffer_rx_root_ns(steering); + if (err) + goto err; + } + + if (MLX5_CAP_FLOWTABLE_SNIFFER_TX(dev, ft_support)) { + err = init_sniffer_tx_root_ns(steering); + if (err) + goto err; + } + return 0; err: mlx5_cleanup_fs(dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index 9cffb6aeb4e9..71ff03bceabb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -49,6 +49,13 @@ enum fs_flow_table_type { FS_FT_ESW_EGRESS_ACL = 0x2, FS_FT_ESW_INGRESS_ACL = 0x3, FS_FT_FDB = 0X4, + FS_FT_SNIFFER_RX = 0X5, + FS_FT_SNIFFER_TX = 0X6, +}; + +enum fs_flow_table_op_mod { + FS_FT_OP_MOD_NORMAL, + FS_FT_OP_MOD_LAG_DEMUX, }; enum fs_fte_status { @@ -61,6 +68,8 @@ struct mlx5_flow_steering { struct mlx5_flow_root_namespace *fdb_root_ns; struct mlx5_flow_root_namespace *esw_egress_root_ns; struct mlx5_flow_root_namespace *esw_ingress_root_ns; + struct mlx5_flow_root_namespace *sniffer_tx_root_ns; + struct mlx5_flow_root_namespace *sniffer_rx_root_ns; }; struct fs_node { @@ -93,6 +102,7 @@ struct mlx5_flow_table { unsigned int max_fte; unsigned int level; enum fs_flow_table_type type; + enum fs_flow_table_op_mod op_mod; struct { bool active; unsigned int required_groups; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index 77fc1aa26114..5718aada6605 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -38,13 +38,10 @@ static int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev, u32 *out, int outlen) { - u32 in[MLX5_ST_SZ_DW(query_adapter_in)]; - - memset(in, 0, sizeof(in)); + u32 in[MLX5_ST_SZ_DW(query_adapter_in)] = {0}; MLX5_SET(query_adapter_in, in, opcode, MLX5_CMD_OP_QUERY_ADAPTER); - - return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen); + return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); } int mlx5_query_board_id(struct mlx5_core_dev *dev) @@ -162,38 +159,18 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev) int mlx5_cmd_init_hca(struct mlx5_core_dev *dev) { - struct mlx5_cmd_init_hca_mbox_in in; - struct mlx5_cmd_init_hca_mbox_out out; - int err; - - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_INIT_HCA); - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); - if (err) - return err; - - if (out.hdr.status) - err = mlx5_cmd_status_to_err(&out.hdr); + u32 out[MLX5_ST_SZ_DW(init_hca_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(init_hca_in)] = {0}; - return err; + MLX5_SET(init_hca_in, in, opcode, MLX5_CMD_OP_INIT_HCA); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev) { - struct mlx5_cmd_teardown_hca_mbox_in in; - struct mlx5_cmd_teardown_hca_mbox_out out; - int err; + u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {0}; - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_TEARDOWN_HCA); - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); - if (err) - return err; - - if (out.hdr.status) - err = mlx5_cmd_status_to_err(&out.hdr); - - return err; + MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c new file mode 100644 index 000000000000..92c3e0dbcbdc --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c @@ -0,0 +1,602 @@ +/* + * Copyright (c) 2016, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/netdevice.h> +#include <linux/mlx5/driver.h> +#include <linux/mlx5/vport.h> +#include "mlx5_core.h" + +enum { + MLX5_LAG_FLAG_BONDED = 1 << 0, +}; + +struct lag_func { + struct mlx5_core_dev *dev; + struct net_device *netdev; +}; + +/* Used for collection of netdev event info. */ +struct lag_tracker { + enum netdev_lag_tx_type tx_type; + struct netdev_lag_lower_state_info netdev_state[MLX5_MAX_PORTS]; + bool is_bonded; +}; + +/* LAG data of a ConnectX card. + * It serves both its phys functions. + */ +struct mlx5_lag { + u8 flags; + u8 v2p_map[MLX5_MAX_PORTS]; + struct lag_func pf[MLX5_MAX_PORTS]; + struct lag_tracker tracker; + struct delayed_work bond_work; + struct notifier_block nb; +}; + +/* General purpose, use for short periods of time. + * Beware of lock dependencies (preferably, no locks should be acquired + * under it). + */ +static DEFINE_MUTEX(lag_mutex); + +static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 remap_port1, + u8 remap_port2) +{ + u32 in[MLX5_ST_SZ_DW(create_lag_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(create_lag_out)] = {0}; + void *lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx); + + MLX5_SET(create_lag_in, in, opcode, MLX5_CMD_OP_CREATE_LAG); + + MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1); + MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2); + + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); +} + +static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, u8 remap_port1, + u8 remap_port2) +{ + u32 in[MLX5_ST_SZ_DW(modify_lag_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(modify_lag_out)] = {0}; + void *lag_ctx = MLX5_ADDR_OF(modify_lag_in, in, ctx); + + MLX5_SET(modify_lag_in, in, opcode, MLX5_CMD_OP_MODIFY_LAG); + MLX5_SET(modify_lag_in, in, field_select, 0x1); + + MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1); + MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2); + + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); +} + +static int mlx5_cmd_destroy_lag(struct mlx5_core_dev *dev) +{ + u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(destroy_lag_out)] = {0}; + + MLX5_SET(destroy_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_LAG); + + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); +} + +int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev) +{ + u32 in[MLX5_ST_SZ_DW(create_vport_lag_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(create_vport_lag_out)] = {0}; + + MLX5_SET(create_vport_lag_in, in, opcode, MLX5_CMD_OP_CREATE_VPORT_LAG); + + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); +} +EXPORT_SYMBOL(mlx5_cmd_create_vport_lag); + +int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev) +{ + u32 in[MLX5_ST_SZ_DW(destroy_vport_lag_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(destroy_vport_lag_out)] = {0}; + + MLX5_SET(destroy_vport_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_VPORT_LAG); + + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); +} +EXPORT_SYMBOL(mlx5_cmd_destroy_vport_lag); + +static struct mlx5_lag *mlx5_lag_dev_get(struct mlx5_core_dev *dev) +{ + return dev->priv.lag; +} + +static int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev, + struct net_device *ndev) +{ + int i; + + for (i = 0; i < MLX5_MAX_PORTS; i++) + if (ldev->pf[i].netdev == ndev) + return i; + + return -1; +} + +static bool mlx5_lag_is_bonded(struct mlx5_lag *ldev) +{ + return !!(ldev->flags & MLX5_LAG_FLAG_BONDED); +} + +static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker, + u8 *port1, u8 *port2) +{ + if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) { + if (tracker->netdev_state[0].tx_enabled) { + *port1 = 1; + *port2 = 1; + } else { + *port1 = 2; + *port2 = 2; + } + } else { + *port1 = 1; + *port2 = 2; + if (!tracker->netdev_state[0].link_up) + *port1 = 2; + else if (!tracker->netdev_state[1].link_up) + *port2 = 1; + } +} + +static void mlx5_activate_lag(struct mlx5_lag *ldev, + struct lag_tracker *tracker) +{ + struct mlx5_core_dev *dev0 = ldev->pf[0].dev; + int err; + + ldev->flags |= MLX5_LAG_FLAG_BONDED; + + mlx5_infer_tx_affinity_mapping(tracker, &ldev->v2p_map[0], + &ldev->v2p_map[1]); + + err = mlx5_cmd_create_lag(dev0, ldev->v2p_map[0], ldev->v2p_map[1]); + if (err) + mlx5_core_err(dev0, + "Failed to create LAG (%d)\n", + err); +} + +static void mlx5_deactivate_lag(struct mlx5_lag *ldev) +{ + struct mlx5_core_dev *dev0 = ldev->pf[0].dev; + int err; + + ldev->flags &= ~MLX5_LAG_FLAG_BONDED; + + err = mlx5_cmd_destroy_lag(dev0); + if (err) + mlx5_core_err(dev0, + "Failed to destroy LAG (%d)\n", + err); +} + +static void mlx5_do_bond(struct mlx5_lag *ldev) +{ + struct mlx5_core_dev *dev0 = ldev->pf[0].dev; + struct mlx5_core_dev *dev1 = ldev->pf[1].dev; + struct lag_tracker tracker; + u8 v2p_port1, v2p_port2; + int i, err; + + if (!dev0 || !dev1) + return; + + mutex_lock(&lag_mutex); + tracker = ldev->tracker; + mutex_unlock(&lag_mutex); + + if (tracker.is_bonded && !mlx5_lag_is_bonded(ldev)) { + if (mlx5_sriov_is_enabled(dev0) || + mlx5_sriov_is_enabled(dev1)) { + mlx5_core_warn(dev0, "LAG is not supported with SRIOV"); + return; + } + + for (i = 0; i < MLX5_MAX_PORTS; i++) + mlx5_remove_dev_by_protocol(ldev->pf[i].dev, + MLX5_INTERFACE_PROTOCOL_IB); + + mlx5_activate_lag(ldev, &tracker); + + mlx5_add_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB); + mlx5_nic_vport_enable_roce(dev1); + } else if (tracker.is_bonded && mlx5_lag_is_bonded(ldev)) { + mlx5_infer_tx_affinity_mapping(&tracker, &v2p_port1, + &v2p_port2); + + if ((v2p_port1 != ldev->v2p_map[0]) || + (v2p_port2 != ldev->v2p_map[1])) { + ldev->v2p_map[0] = v2p_port1; + ldev->v2p_map[1] = v2p_port2; + + err = mlx5_cmd_modify_lag(dev0, v2p_port1, v2p_port2); + if (err) + mlx5_core_err(dev0, + "Failed to modify LAG (%d)\n", + err); + } + } else if (!tracker.is_bonded && mlx5_lag_is_bonded(ldev)) { + mlx5_remove_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB); + mlx5_nic_vport_disable_roce(dev1); + + mlx5_deactivate_lag(ldev); + + for (i = 0; i < MLX5_MAX_PORTS; i++) + if (ldev->pf[i].dev) + mlx5_add_dev_by_protocol(ldev->pf[i].dev, + MLX5_INTERFACE_PROTOCOL_IB); + } +} + +static void mlx5_queue_bond_work(struct mlx5_lag *ldev, unsigned long delay) +{ + schedule_delayed_work(&ldev->bond_work, delay); +} + +static void mlx5_do_bond_work(struct work_struct *work) +{ + struct delayed_work *delayed_work = to_delayed_work(work); + struct mlx5_lag *ldev = container_of(delayed_work, struct mlx5_lag, + bond_work); + int status; + + status = mutex_trylock(&mlx5_intf_mutex); + if (!status) { + /* 1 sec delay. */ + mlx5_queue_bond_work(ldev, HZ); + return; + } + + mlx5_do_bond(ldev); + mutex_unlock(&mlx5_intf_mutex); +} + +static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev, + struct lag_tracker *tracker, + struct net_device *ndev, + struct netdev_notifier_changeupper_info *info) +{ + struct net_device *upper = info->upper_dev, *ndev_tmp; + struct netdev_lag_upper_info *lag_upper_info; + bool is_bonded; + int bond_status = 0; + int num_slaves = 0; + int idx; + + if (!netif_is_lag_master(upper)) + return 0; + + lag_upper_info = info->upper_info; + + /* The event may still be of interest if the slave does not belong to + * us, but is enslaved to a master which has one or more of our netdevs + * as slaves (e.g., if a new slave is added to a master that bonds two + * of our netdevs, we should unbond). + */ + rcu_read_lock(); + for_each_netdev_in_bond_rcu(upper, ndev_tmp) { + idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev_tmp); + if (idx > -1) + bond_status |= (1 << idx); + + num_slaves++; + } + rcu_read_unlock(); + + /* None of this lagdev's netdevs are slaves of this master. */ + if (!(bond_status & 0x3)) + return 0; + + if (lag_upper_info) + tracker->tx_type = lag_upper_info->tx_type; + + /* Determine bonding status: + * A device is considered bonded if both its physical ports are slaves + * of the same lag master, and only them. + * Lag mode must be activebackup or hash. + */ + is_bonded = (num_slaves == MLX5_MAX_PORTS) && + (bond_status == 0x3) && + ((tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) || + (tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH)); + + if (tracker->is_bonded != is_bonded) { + tracker->is_bonded = is_bonded; + return 1; + } + + return 0; +} + +static int mlx5_handle_changelowerstate_event(struct mlx5_lag *ldev, + struct lag_tracker *tracker, + struct net_device *ndev, + struct netdev_notifier_changelowerstate_info *info) +{ + struct netdev_lag_lower_state_info *lag_lower_info; + int idx; + + if (!netif_is_lag_port(ndev)) + return 0; + + idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev); + if (idx == -1) + return 0; + + /* This information is used to determine virtual to physical + * port mapping. + */ + lag_lower_info = info->lower_state_info; + if (!lag_lower_info) + return 0; + + tracker->netdev_state[idx] = *lag_lower_info; + + return 1; +} + +static int mlx5_lag_netdev_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct net_device *ndev = netdev_notifier_info_to_dev(ptr); + struct lag_tracker tracker; + struct mlx5_lag *ldev; + int changed = 0; + + if (!net_eq(dev_net(ndev), &init_net)) + return NOTIFY_DONE; + + if ((event != NETDEV_CHANGEUPPER) && (event != NETDEV_CHANGELOWERSTATE)) + return NOTIFY_DONE; + + ldev = container_of(this, struct mlx5_lag, nb); + tracker = ldev->tracker; + + switch (event) { + case NETDEV_CHANGEUPPER: + changed = mlx5_handle_changeupper_event(ldev, &tracker, ndev, + ptr); + break; + case NETDEV_CHANGELOWERSTATE: + changed = mlx5_handle_changelowerstate_event(ldev, &tracker, + ndev, ptr); + break; + } + + mutex_lock(&lag_mutex); + ldev->tracker = tracker; + mutex_unlock(&lag_mutex); + + if (changed) + mlx5_queue_bond_work(ldev, 0); + + return NOTIFY_DONE; +} + +static struct mlx5_lag *mlx5_lag_dev_alloc(void) +{ + struct mlx5_lag *ldev; + + ldev = kzalloc(sizeof(*ldev), GFP_KERNEL); + if (!ldev) + return NULL; + + INIT_DELAYED_WORK(&ldev->bond_work, mlx5_do_bond_work); + + return ldev; +} + +static void mlx5_lag_dev_free(struct mlx5_lag *ldev) +{ + kfree(ldev); +} + +static void mlx5_lag_dev_add_pf(struct mlx5_lag *ldev, + struct mlx5_core_dev *dev, + struct net_device *netdev) +{ + unsigned int fn = PCI_FUNC(dev->pdev->devfn); + + if (fn >= MLX5_MAX_PORTS) + return; + + mutex_lock(&lag_mutex); + ldev->pf[fn].dev = dev; + ldev->pf[fn].netdev = netdev; + ldev->tracker.netdev_state[fn].link_up = 0; + ldev->tracker.netdev_state[fn].tx_enabled = 0; + + dev->priv.lag = ldev; + mutex_unlock(&lag_mutex); +} + +static void mlx5_lag_dev_remove_pf(struct mlx5_lag *ldev, + struct mlx5_core_dev *dev) +{ + int i; + + for (i = 0; i < MLX5_MAX_PORTS; i++) + if (ldev->pf[i].dev == dev) + break; + + if (i == MLX5_MAX_PORTS) + return; + + mutex_lock(&lag_mutex); + memset(&ldev->pf[i], 0, sizeof(*ldev->pf)); + + dev->priv.lag = NULL; + mutex_unlock(&lag_mutex); +} + +static u16 mlx5_gen_pci_id(struct mlx5_core_dev *dev) +{ + return (u16)((dev->pdev->bus->number << 8) | + PCI_SLOT(dev->pdev->devfn)); +} + +/* Must be called with intf_mutex held */ +void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev) +{ + struct mlx5_lag *ldev = NULL; + struct mlx5_core_dev *tmp_dev; + struct mlx5_priv *priv; + u16 pci_id; + + if (!MLX5_CAP_GEN(dev, vport_group_manager) || + !MLX5_CAP_GEN(dev, lag_master) || + (MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_MAX_PORTS)) + return; + + pci_id = mlx5_gen_pci_id(dev); + + mlx5_core_for_each_priv(priv) { + tmp_dev = container_of(priv, struct mlx5_core_dev, priv); + if ((dev != tmp_dev) && + (mlx5_gen_pci_id(tmp_dev) == pci_id)) { + ldev = tmp_dev->priv.lag; + break; + } + } + + if (!ldev) { + ldev = mlx5_lag_dev_alloc(); + if (!ldev) { + mlx5_core_err(dev, "Failed to alloc lag dev\n"); + return; + } + } + + mlx5_lag_dev_add_pf(ldev, dev, netdev); + + if (!ldev->nb.notifier_call) { + ldev->nb.notifier_call = mlx5_lag_netdev_event; + if (register_netdevice_notifier(&ldev->nb)) { + ldev->nb.notifier_call = NULL; + mlx5_core_err(dev, "Failed to register LAG netdev notifier\n"); + } + } +} + +/* Must be called with intf_mutex held */ +void mlx5_lag_remove(struct mlx5_core_dev *dev) +{ + struct mlx5_lag *ldev; + int i; + + ldev = mlx5_lag_dev_get(dev); + if (!ldev) + return; + + if (mlx5_lag_is_bonded(ldev)) + mlx5_deactivate_lag(ldev); + + mlx5_lag_dev_remove_pf(ldev, dev); + + for (i = 0; i < MLX5_MAX_PORTS; i++) + if (ldev->pf[i].dev) + break; + + if (i == MLX5_MAX_PORTS) { + if (ldev->nb.notifier_call) + unregister_netdevice_notifier(&ldev->nb); + cancel_delayed_work_sync(&ldev->bond_work); + mlx5_lag_dev_free(ldev); + } +} + +bool mlx5_lag_is_active(struct mlx5_core_dev *dev) +{ + struct mlx5_lag *ldev; + bool res; + + mutex_lock(&lag_mutex); + ldev = mlx5_lag_dev_get(dev); + res = ldev && mlx5_lag_is_bonded(ldev); + mutex_unlock(&lag_mutex); + + return res; +} +EXPORT_SYMBOL(mlx5_lag_is_active); + +struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev) +{ + struct net_device *ndev = NULL; + struct mlx5_lag *ldev; + + mutex_lock(&lag_mutex); + ldev = mlx5_lag_dev_get(dev); + + if (!(ldev && mlx5_lag_is_bonded(ldev))) + goto unlock; + + if (ldev->tracker.tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) { + ndev = ldev->tracker.netdev_state[0].tx_enabled ? + ldev->pf[0].netdev : ldev->pf[1].netdev; + } else { + ndev = ldev->pf[0].netdev; + } + if (ndev) + dev_hold(ndev); + +unlock: + mutex_unlock(&lag_mutex); + + return ndev; +} +EXPORT_SYMBOL(mlx5_lag_get_roce_netdev); + +bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv) +{ + struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, + priv); + struct mlx5_lag *ldev; + + if (intf->protocol != MLX5_INTERFACE_PROTOCOL_IB) + return true; + + ldev = mlx5_lag_dev_get(dev); + if (!ldev || !mlx5_lag_is_bonded(ldev) || ldev->pf[0].dev == dev) + return true; + + /* If bonded, we do not add an IB device for PF1. */ + return false; +} + diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mad.c b/drivers/net/ethernet/mellanox/mlx5/core/mad.c index 1368dac00da0..3a3b0005fd2b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mad.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/mad.c @@ -39,36 +39,33 @@ int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb, u16 opmod, u8 port) { - struct mlx5_mad_ifc_mbox_in *in = NULL; - struct mlx5_mad_ifc_mbox_out *out = NULL; - int err; + int outlen = MLX5_ST_SZ_BYTES(mad_ifc_out); + int inlen = MLX5_ST_SZ_BYTES(mad_ifc_in); + int err = -ENOMEM; + void *data; + void *resp; + u32 *out; + u32 *in; - in = kzalloc(sizeof(*in), GFP_KERNEL); - if (!in) - return -ENOMEM; - - out = kzalloc(sizeof(*out), GFP_KERNEL); - if (!out) { - err = -ENOMEM; + in = kzalloc(inlen, GFP_KERNEL); + out = kzalloc(outlen, GFP_KERNEL); + if (!in || !out) goto out; - } - in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MAD_IFC); - in->hdr.opmod = cpu_to_be16(opmod); - in->port = port; + MLX5_SET(mad_ifc_in, in, opcode, MLX5_CMD_OP_MAD_IFC); + MLX5_SET(mad_ifc_in, in, op_mod, opmod); + MLX5_SET(mad_ifc_in, in, port, port); - memcpy(in->data, inb, sizeof(in->data)); + data = MLX5_ADDR_OF(mad_ifc_in, in, mad); + memcpy(data, inb, MLX5_FLD_SZ_BYTES(mad_ifc_in, mad)); - err = mlx5_cmd_exec(dev, in, sizeof(*in), out, sizeof(*out)); + err = mlx5_cmd_exec(dev, in, inlen, out, outlen); if (err) goto out; - if (out->hdr.status) { - err = mlx5_cmd_status_to_err(&out->hdr); - goto out; - } - - memcpy(outb, out->data, sizeof(out->data)); + resp = MLX5_ADDR_OF(mad_ifc_out, out, response_mad_packet); + memcpy(outb, resp, + MLX5_FLD_SZ_BYTES(mad_ifc_out, response_mad_packet)); out: kfree(out); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 2385bae92672..c132ef1faefe 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -73,8 +73,9 @@ module_param_named(prof_sel, prof_sel, int, 0444); MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2"); static LIST_HEAD(intf_list); -static LIST_HEAD(dev_list); -static DEFINE_MUTEX(intf_mutex); + +LIST_HEAD(mlx5_dev_list); +DEFINE_MUTEX(mlx5_intf_mutex); struct mlx5_device_context { struct list_head list; @@ -324,7 +325,7 @@ enum { MLX5_DEV_CAP_FLAG_DCT, }; -static u16 to_fw_pkey_sz(u32 size) +static u16 to_fw_pkey_sz(struct mlx5_core_dev *dev, u32 size) { switch (size) { case 128: @@ -340,7 +341,7 @@ static u16 to_fw_pkey_sz(u32 size) case 4096: return 5; default: - pr_warn("invalid pkey table size %d\n", size); + mlx5_core_warn(dev, "invalid pkey table size %d\n", size); return 0; } } @@ -363,10 +364,6 @@ static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev, MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); MLX5_SET(query_hca_cap_in, in, op_mod, opmod); err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz); - if (err) - goto query_ex; - - err = mlx5_cmd_status_to_err_v2(out); if (err) { mlx5_core_warn(dev, "QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n", @@ -409,20 +406,11 @@ int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type) static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz, int opmod) { - u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)]; - int err; - - memset(out, 0, sizeof(out)); + u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)] = {0}; MLX5_SET(set_hca_cap_in, in, opcode, MLX5_CMD_OP_SET_HCA_CAP); MLX5_SET(set_hca_cap_in, in, op_mod, opmod << 1); - err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out)); - if (err) - return err; - - err = mlx5_cmd_status_to_err_v2(out); - - return err; + return mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out)); } static int handle_hca_cap_atomic(struct mlx5_core_dev *dev) @@ -490,7 +478,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev) 128); /* we limit the size of the pkey table to 128 entries for now */ MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size, - to_fw_pkey_sz(128)); + to_fw_pkey_sz(dev, 128)); if (prof->mask & MLX5_PROF_MASK_QP_SIZE) MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp, @@ -528,37 +516,22 @@ static int set_hca_ctrl(struct mlx5_core_dev *dev) int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id) { - u32 out[MLX5_ST_SZ_DW(enable_hca_out)]; - u32 in[MLX5_ST_SZ_DW(enable_hca_in)]; - int err; + u32 out[MLX5_ST_SZ_DW(enable_hca_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {0}; - memset(in, 0, sizeof(in)); MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA); MLX5_SET(enable_hca_in, in, function_id, func_id); - memset(out, 0, sizeof(out)); - - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); - if (err) - return err; - - return mlx5_cmd_status_to_err_v2(out); + return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); } int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id) { - u32 out[MLX5_ST_SZ_DW(disable_hca_out)]; - u32 in[MLX5_ST_SZ_DW(disable_hca_in)]; - int err; + u32 out[MLX5_ST_SZ_DW(disable_hca_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {0}; - memset(in, 0, sizeof(in)); MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA); MLX5_SET(disable_hca_in, in, function_id, func_id); - memset(out, 0, sizeof(out)); - err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); - if (err) - return err; - - return mlx5_cmd_status_to_err_v2(out); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } cycle_t mlx5_read_internal_timer(struct mlx5_core_dev *dev) @@ -758,44 +731,40 @@ clean: static int mlx5_core_set_issi(struct mlx5_core_dev *dev) { - u32 query_in[MLX5_ST_SZ_DW(query_issi_in)]; - u32 query_out[MLX5_ST_SZ_DW(query_issi_out)]; - u32 set_in[MLX5_ST_SZ_DW(set_issi_in)]; - u32 set_out[MLX5_ST_SZ_DW(set_issi_out)]; - int err; + u32 query_in[MLX5_ST_SZ_DW(query_issi_in)] = {0}; + u32 query_out[MLX5_ST_SZ_DW(query_issi_out)] = {0}; u32 sup_issi; - - memset(query_in, 0, sizeof(query_in)); - memset(query_out, 0, sizeof(query_out)); + int err; MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI); - - err = mlx5_cmd_exec_check_status(dev, query_in, sizeof(query_in), - query_out, sizeof(query_out)); + err = mlx5_cmd_exec(dev, query_in, sizeof(query_in), + query_out, sizeof(query_out)); if (err) { - if (((struct mlx5_outbox_hdr *)query_out)->status == - MLX5_CMD_STAT_BAD_OP_ERR) { + u32 syndrome; + u8 status; + + mlx5_cmd_mbox_status(query_out, &status, &syndrome); + if (status == MLX5_CMD_STAT_BAD_OP_ERR) { pr_debug("Only ISSI 0 is supported\n"); return 0; } - pr_err("failed to query ISSI\n"); + pr_err("failed to query ISSI err(%d)\n", err); return err; } sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0); if (sup_issi & (1 << 1)) { - memset(set_in, 0, sizeof(set_in)); - memset(set_out, 0, sizeof(set_out)); + u32 set_in[MLX5_ST_SZ_DW(set_issi_in)] = {0}; + u32 set_out[MLX5_ST_SZ_DW(set_issi_out)] = {0}; MLX5_SET(set_issi_in, set_in, opcode, MLX5_CMD_OP_SET_ISSI); MLX5_SET(set_issi_in, set_in, current_issi, 1); - - err = mlx5_cmd_exec_check_status(dev, set_in, sizeof(set_in), - set_out, sizeof(set_out)); + err = mlx5_cmd_exec(dev, set_in, sizeof(set_in), + set_out, sizeof(set_out)); if (err) { - pr_err("failed to set ISSI=1\n"); + pr_err("failed to set ISSI=1 err(%d)\n", err); return err; } @@ -814,6 +783,9 @@ static void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv) struct mlx5_device_context *dev_ctx; struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv); + if (!mlx5_lag_intf_add(intf, priv)) + return; + dev_ctx = kmalloc(sizeof(*dev_ctx), GFP_KERNEL); if (!dev_ctx) return; @@ -852,11 +824,11 @@ static int mlx5_register_device(struct mlx5_core_dev *dev) struct mlx5_priv *priv = &dev->priv; struct mlx5_interface *intf; - mutex_lock(&intf_mutex); - list_add_tail(&priv->dev_list, &dev_list); + mutex_lock(&mlx5_intf_mutex); + list_add_tail(&priv->dev_list, &mlx5_dev_list); list_for_each_entry(intf, &intf_list, list) mlx5_add_device(intf, priv); - mutex_unlock(&intf_mutex); + mutex_unlock(&mlx5_intf_mutex); return 0; } @@ -866,11 +838,11 @@ static void mlx5_unregister_device(struct mlx5_core_dev *dev) struct mlx5_priv *priv = &dev->priv; struct mlx5_interface *intf; - mutex_lock(&intf_mutex); + mutex_lock(&mlx5_intf_mutex); list_for_each_entry(intf, &intf_list, list) mlx5_remove_device(intf, priv); list_del(&priv->dev_list); - mutex_unlock(&intf_mutex); + mutex_unlock(&mlx5_intf_mutex); } int mlx5_register_interface(struct mlx5_interface *intf) @@ -880,11 +852,11 @@ int mlx5_register_interface(struct mlx5_interface *intf) if (!intf->add || !intf->remove) return -EINVAL; - mutex_lock(&intf_mutex); + mutex_lock(&mlx5_intf_mutex); list_add_tail(&intf->list, &intf_list); - list_for_each_entry(priv, &dev_list, dev_list) + list_for_each_entry(priv, &mlx5_dev_list, dev_list) mlx5_add_device(intf, priv); - mutex_unlock(&intf_mutex); + mutex_unlock(&mlx5_intf_mutex); return 0; } @@ -894,11 +866,11 @@ void mlx5_unregister_interface(struct mlx5_interface *intf) { struct mlx5_priv *priv; - mutex_lock(&intf_mutex); - list_for_each_entry(priv, &dev_list, dev_list) + mutex_lock(&mlx5_intf_mutex); + list_for_each_entry(priv, &mlx5_dev_list, dev_list) mlx5_remove_device(intf, priv); list_del(&intf->list); - mutex_unlock(&intf_mutex); + mutex_unlock(&mlx5_intf_mutex); } EXPORT_SYMBOL(mlx5_unregister_interface); @@ -924,6 +896,30 @@ void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol) } EXPORT_SYMBOL(mlx5_get_protocol_dev); +/* Must be called with intf_mutex held */ +void mlx5_add_dev_by_protocol(struct mlx5_core_dev *dev, int protocol) +{ + struct mlx5_interface *intf; + + list_for_each_entry(intf, &intf_list, list) + if (intf->protocol == protocol) { + mlx5_add_device(intf, &dev->priv); + break; + } +} + +/* Must be called with intf_mutex held */ +void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol) +{ + struct mlx5_interface *intf; + + list_for_each_entry(intf, &intf_list, list) + if (intf->protocol == protocol) { + mlx5_remove_device(intf, &dev->priv); + break; + } +} + static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv) { struct pci_dev *pdev = dev->pdev; @@ -1344,8 +1340,9 @@ static int init_one(struct pci_dev *pdev, pci_set_drvdata(pdev, dev); if (prof_sel < 0 || prof_sel >= ARRAY_SIZE(profile)) { - pr_warn("selected profile out of range, selecting default (%d)\n", - MLX5_DEFAULT_PROF); + mlx5_core_warn(dev, + "selected profile out of range, selecting default (%d)\n", + MLX5_DEFAULT_PROF); prof_sel = MLX5_DEFAULT_PROF; } dev->profile = &profile[prof_sel]; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c index d5a0c2d61a18..ba2b09cc192f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c @@ -37,70 +37,30 @@ #include <rdma/ib_verbs.h> #include "mlx5_core.h" -struct mlx5_attach_mcg_mbox_in { - struct mlx5_inbox_hdr hdr; - __be32 qpn; - __be32 rsvd; - u8 gid[16]; -}; - -struct mlx5_attach_mcg_mbox_out { - struct mlx5_outbox_hdr hdr; - u8 rsvf[8]; -}; - -struct mlx5_detach_mcg_mbox_in { - struct mlx5_inbox_hdr hdr; - __be32 qpn; - __be32 rsvd; - u8 gid[16]; -}; - -struct mlx5_detach_mcg_mbox_out { - struct mlx5_outbox_hdr hdr; - u8 rsvf[8]; -}; - int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn) { - struct mlx5_attach_mcg_mbox_in in; - struct mlx5_attach_mcg_mbox_out out; - int err; + u32 out[MLX5_ST_SZ_DW(attach_to_mcg_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(attach_to_mcg_in)] = {0}; + void *gid; - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ATTACH_TO_MCG); - memcpy(in.gid, mgid, sizeof(*mgid)); - in.qpn = cpu_to_be32(qpn); - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); - if (err) - return err; - - if (out.hdr.status) - err = mlx5_cmd_status_to_err(&out.hdr); - - return err; + MLX5_SET(attach_to_mcg_in, in, opcode, MLX5_CMD_OP_ATTACH_TO_MCG); + MLX5_SET(attach_to_mcg_in, in, qpn, qpn); + gid = MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid); + memcpy(gid, mgid, sizeof(*mgid)); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } EXPORT_SYMBOL(mlx5_core_attach_mcg); int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn) { - struct mlx5_detach_mcg_mbox_in in; - struct mlx5_detach_mcg_mbox_out out; - int err; - - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DETTACH_FROM_MCG); - memcpy(in.gid, mgid, sizeof(*mgid)); - in.qpn = cpu_to_be32(qpn); - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); - if (err) - return err; - - if (out.hdr.status) - err = mlx5_cmd_status_to_err(&out.hdr); + u32 out[MLX5_ST_SZ_DW(detach_from_mcg_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(detach_from_mcg_in)] = {0}; + void *gid; - return err; + MLX5_SET(detach_from_mcg_in, in, opcode, MLX5_CMD_OP_DETACH_FROM_MCG); + MLX5_SET(detach_from_mcg_in, in, qpn, qpn); + gid = MLX5_ADDR_OF(detach_from_mcg_in, in, multicast_gid); + memcpy(gid, mgid, sizeof(*mgid)); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } EXPORT_SYMBOL(mlx5_core_detach_mcg); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 2f86ec6fcf25..714b71bed2be 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -46,6 +46,9 @@ extern int mlx5_core_debug_mask; +extern struct list_head mlx5_dev_list; +extern struct mutex mlx5_intf_mutex; + #define mlx5_core_dbg(__dev, format, ...) \ dev_dbg(&(__dev)->pdev->dev, "%s:%s:%d:(pid %d): " format, \ (__dev)->priv.name, __func__, __LINE__, current->pid, \ @@ -58,8 +61,8 @@ do { \ } while (0) #define mlx5_core_err(__dev, format, ...) \ - dev_err(&(__dev)->pdev->dev, "%s:%s:%d:(pid %d): " format, \ - (__dev)->priv.name, __func__, __LINE__, current->pid, \ + dev_err(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, \ ##__VA_ARGS__) #define mlx5_core_warn(__dev, format, ...) \ @@ -70,24 +73,14 @@ do { \ #define mlx5_core_info(__dev, format, ...) \ dev_info(&(__dev)->pdev->dev, format, ##__VA_ARGS__) +#define mlx5_core_for_each_priv(__priv) \ + list_for_each_entry(__priv, &mlx5_dev_list, dev_list) + enum { MLX5_CMD_DATA, /* print command payload only */ MLX5_CMD_TIME, /* print command execution time */ }; -static inline int mlx5_cmd_exec_check_status(struct mlx5_core_dev *dev, u32 *in, - int in_size, u32 *out, - int out_size) -{ - int err; - - err = mlx5_cmd_exec(dev, in, in_size, out, out_size); - if (err) - return err; - - return mlx5_cmd_status_to_err((struct mlx5_outbox_hdr *)out); -} - int mlx5_query_hca_caps(struct mlx5_core_dev *dev); int mlx5_query_board_id(struct mlx5_core_dev *dev); int mlx5_cmd_init_hca(struct mlx5_core_dev *dev); @@ -97,6 +90,7 @@ void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, void mlx5_enter_error_state(struct mlx5_core_dev *dev); void mlx5_disable_device(struct mlx5_core_dev *dev); int mlx5_core_sriov_configure(struct pci_dev *dev, int num_vfs); +bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev); int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id); int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id); int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev); @@ -105,7 +99,27 @@ u32 mlx5_get_msix_vec(struct mlx5_core_dev *dev, int vecidx); struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn); void mlx5_cq_tasklet_cb(unsigned long data); +void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev); +void mlx5_lag_remove(struct mlx5_core_dev *dev); + +void mlx5_add_dev_by_protocol(struct mlx5_core_dev *dev, int protocol); +void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol); + +bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv); + void mlx5e_init(void); void mlx5e_cleanup(void); +static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev) +{ + /* LACP owner conditions: + * 1) Function is physical. + * 2) LAG is supported by FW. + * 3) LAG is managed by driver (currently the only option). + */ + return MLX5_CAP_GEN(dev, vport_group_manager) && + (MLX5_CAP_GEN(dev, num_lag_ports) > 1) && + MLX5_CAP_GEN(dev, lag_master); +} + #endif /* __MLX5_CORE_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c index 77a7293921d5..b9736f505bdf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c @@ -49,48 +49,43 @@ void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev) { } -int mlx5_core_create_mkey(struct mlx5_core_dev *dev, - struct mlx5_core_mkey *mkey, - struct mlx5_create_mkey_mbox_in *in, int inlen, - mlx5_cmd_cbk_t callback, void *context, - struct mlx5_create_mkey_mbox_out *out) +int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev, + struct mlx5_core_mkey *mkey, + u32 *in, int inlen, + u32 *out, int outlen, + mlx5_cmd_cbk_t callback, void *context) { struct mlx5_mkey_table *table = &dev->priv.mkey_table; - struct mlx5_create_mkey_mbox_out lout; + u32 lout[MLX5_ST_SZ_DW(create_mkey_out)] = {0}; + u32 mkey_index; + void *mkc; int err; u8 key; - memset(&lout, 0, sizeof(lout)); spin_lock_irq(&dev->priv.mkey_lock); key = dev->priv.mkey_key++; spin_unlock_irq(&dev->priv.mkey_lock); - in->seg.qpn_mkey7_0 |= cpu_to_be32(key); - in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_MKEY); - if (callback) { - err = mlx5_cmd_exec_cb(dev, in, inlen, out, sizeof(*out), - callback, context); - return err; - } else { - err = mlx5_cmd_exec(dev, in, inlen, &lout, sizeof(lout)); - } + mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); - if (err) { - mlx5_core_dbg(dev, "cmd exec failed %d\n", err); - return err; - } + MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY); + MLX5_SET(mkc, mkc, mkey_7_0, key); - if (lout.hdr.status) { - mlx5_core_dbg(dev, "status %d\n", lout.hdr.status); - return mlx5_cmd_status_to_err(&lout.hdr); - } + if (callback) + return mlx5_cmd_exec_cb(dev, in, inlen, out, outlen, + callback, context); + + err = mlx5_cmd_exec(dev, in, inlen, lout, sizeof(lout)); + if (err) + return err; - mkey->iova = be64_to_cpu(in->seg.start_addr); - mkey->size = be64_to_cpu(in->seg.len); - mkey->key = mlx5_idx_to_mkey(be32_to_cpu(lout.mkey) & 0xffffff) | key; - mkey->pd = be32_to_cpu(in->seg.flags_pd) & 0xffffff; + mkey_index = MLX5_GET(create_mkey_out, lout, mkey_index); + mkey->iova = MLX5_GET64(mkc, mkc, start_addr); + mkey->size = MLX5_GET64(mkc, mkc, len); + mkey->key = mlx5_idx_to_mkey(mkey_index) | key; + mkey->pd = MLX5_GET(mkc, mkc, pd); mlx5_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n", - be32_to_cpu(lout.mkey), key, mkey->key); + mkey_index, key, mkey->key); /* connect to mkey tree */ write_lock_irq(&table->lock); @@ -104,20 +99,25 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev, return err; } +EXPORT_SYMBOL(mlx5_core_create_mkey_cb); + +int mlx5_core_create_mkey(struct mlx5_core_dev *dev, + struct mlx5_core_mkey *mkey, + u32 *in, int inlen) +{ + return mlx5_core_create_mkey_cb(dev, mkey, in, inlen, + NULL, 0, NULL, NULL); +} EXPORT_SYMBOL(mlx5_core_create_mkey); int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey) { struct mlx5_mkey_table *table = &dev->priv.mkey_table; - struct mlx5_destroy_mkey_mbox_in in; - struct mlx5_destroy_mkey_mbox_out out; + u32 out[MLX5_ST_SZ_DW(destroy_mkey_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(destroy_mkey_in)] = {0}; struct mlx5_core_mkey *deleted_mkey; unsigned long flags; - int err; - - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); write_lock_irqsave(&table->lock, flags); deleted_mkey = radix_tree_delete(&table->tree, mlx5_base_mkey(mkey->key)); @@ -128,94 +128,71 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, return -ENOENT; } - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_MKEY); - in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mkey->key)); - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); - if (err) - return err; - - if (out.hdr.status) - return mlx5_cmd_status_to_err(&out.hdr); - - return err; + MLX5_SET(destroy_mkey_in, in, opcode, MLX5_CMD_OP_DESTROY_MKEY); + MLX5_SET(destroy_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey->key)); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } EXPORT_SYMBOL(mlx5_core_destroy_mkey); int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey, - struct mlx5_query_mkey_mbox_out *out, int outlen) + u32 *out, int outlen) { - struct mlx5_query_mkey_mbox_in in; - int err; + u32 in[MLX5_ST_SZ_DW(query_mkey_in)] = {0}; - memset(&in, 0, sizeof(in)); memset(out, 0, outlen); - - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_MKEY); - in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mkey->key)); - err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); - if (err) - return err; - - if (out->hdr.status) - return mlx5_cmd_status_to_err(&out->hdr); - - return err; + MLX5_SET(query_mkey_in, in, opcode, MLX5_CMD_OP_QUERY_MKEY); + MLX5_SET(query_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey->key)); + return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); } EXPORT_SYMBOL(mlx5_core_query_mkey); int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *_mkey, u32 *mkey) { - struct mlx5_query_special_ctxs_mbox_in in; - struct mlx5_query_special_ctxs_mbox_out out; + u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {0}; int err; - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); - - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS); - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); - if (err) - return err; - - if (out.hdr.status) - return mlx5_cmd_status_to_err(&out.hdr); - - *mkey = be32_to_cpu(out.dump_fill_mkey); - + MLX5_SET(query_special_contexts_in, in, opcode, + MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS); + err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + if (!err) + *mkey = MLX5_GET(query_special_contexts_out, out, + dump_fill_mkey); return err; } EXPORT_SYMBOL(mlx5_core_dump_fill_mkey); +static inline u32 mlx5_get_psv(u32 *out, int psv_index) +{ + switch (psv_index) { + case 1: return MLX5_GET(create_psv_out, out, psv1_index); + case 2: return MLX5_GET(create_psv_out, out, psv2_index); + case 3: return MLX5_GET(create_psv_out, out, psv3_index); + default: return MLX5_GET(create_psv_out, out, psv0_index); + } +} + int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn, int npsvs, u32 *sig_index) { - struct mlx5_allocate_psv_in in; - struct mlx5_allocate_psv_out out; + u32 out[MLX5_ST_SZ_DW(create_psv_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(create_psv_in)] = {0}; int i, err; if (npsvs > MLX5_MAX_PSVS) return -EINVAL; - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); + MLX5_SET(create_psv_in, in, opcode, MLX5_CMD_OP_CREATE_PSV); + MLX5_SET(create_psv_in, in, pd, pdn); + MLX5_SET(create_psv_in, in, num_psv, npsvs); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_PSV); - in.npsv_pd = cpu_to_be32((npsvs << 28) | pdn); - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); - if (err) { - mlx5_core_err(dev, "cmd exec failed %d\n", err); + err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + if (err) return err; - } - - if (out.hdr.status) { - mlx5_core_err(dev, "create_psv bad status %d\n", - out.hdr.status); - return mlx5_cmd_status_to_err(&out.hdr); - } for (i = 0; i < npsvs; i++) - sig_index[i] = be32_to_cpu(out.psv_idx[i]) & 0xffffff; + sig_index[i] = mlx5_get_psv(out, i); return err; } @@ -223,29 +200,11 @@ EXPORT_SYMBOL(mlx5_core_create_psv); int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num) { - struct mlx5_destroy_psv_in in; - struct mlx5_destroy_psv_out out; - int err; - - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); + u32 out[MLX5_ST_SZ_DW(destroy_psv_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(destroy_psv_in)] = {0}; - in.psv_number = cpu_to_be32(psv_num); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_PSV); - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); - if (err) { - mlx5_core_err(dev, "destroy_psv cmd exec failed %d\n", err); - goto out; - } - - if (out.hdr.status) { - mlx5_core_err(dev, "destroy_psv bad status %d\n", - out.hdr.status); - err = mlx5_cmd_status_to_err(&out.hdr); - goto out; - } - -out: - return err; + MLX5_SET(destroy_psv_in, in, opcode, MLX5_CMD_OP_DESTROY_PSV); + MLX5_SET(destroy_psv_in, in, psvn, psv_num); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } EXPORT_SYMBOL(mlx5_core_destroy_psv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index 32dea3524cee..673a7c96479a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -44,12 +44,6 @@ enum { MLX5_PAGES_TAKE = 2 }; -enum { - MLX5_BOOT_PAGES = 1, - MLX5_INIT_PAGES = 2, - MLX5_POST_INIT_PAGES = 3 -}; - struct mlx5_pages_req { struct mlx5_core_dev *dev; u16 func_id; @@ -67,33 +61,6 @@ struct fw_page { unsigned free_count; }; -struct mlx5_query_pages_inbox { - struct mlx5_inbox_hdr hdr; - u8 rsvd[8]; -}; - -struct mlx5_query_pages_outbox { - struct mlx5_outbox_hdr hdr; - __be16 rsvd; - __be16 func_id; - __be32 num_pages; -}; - -struct mlx5_manage_pages_inbox { - struct mlx5_inbox_hdr hdr; - __be16 rsvd; - __be16 func_id; - __be32 num_entries; - __be64 pas[0]; -}; - -struct mlx5_manage_pages_outbox { - struct mlx5_outbox_hdr hdr; - __be32 num_entries; - u8 rsvd[4]; - __be64 pas[0]; -}; - enum { MAX_RECLAIM_TIME_MSECS = 5000, MAX_RECLAIM_VFS_PAGES_TIME_MSECS = 2 * 1000 * 60, @@ -167,24 +134,21 @@ static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr) static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, s32 *npages, int boot) { - struct mlx5_query_pages_inbox in; - struct mlx5_query_pages_outbox out; + u32 out[MLX5_ST_SZ_DW(query_pages_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(query_pages_in)] = {0}; int err; - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES); - in.hdr.opmod = boot ? cpu_to_be16(MLX5_BOOT_PAGES) : cpu_to_be16(MLX5_INIT_PAGES); + MLX5_SET(query_pages_in, in, opcode, MLX5_CMD_OP_QUERY_PAGES); + MLX5_SET(query_pages_in, in, op_mod, boot ? + MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES : + MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES); - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); if (err) return err; - if (out.hdr.status) - return mlx5_cmd_status_to_err(&out.hdr); - - *npages = be32_to_cpu(out.num_pages); - *func_id = be16_to_cpu(out.func_id); + *npages = MLX5_GET(query_pages_out, out, num_pages); + *func_id = MLX5_GET(query_pages_out, out, function_id); return err; } @@ -280,46 +244,37 @@ out_alloc: static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id) { - struct mlx5_manage_pages_inbox *in; - struct mlx5_manage_pages_outbox out; + u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0}; int err; - in = kzalloc(sizeof(*in), GFP_KERNEL); - if (!in) - return; - - memset(&out, 0, sizeof(out)); - in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); - in->hdr.opmod = cpu_to_be16(MLX5_PAGES_CANT_GIVE); - in->func_id = cpu_to_be16(func_id); - err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out)); - if (!err) - err = mlx5_cmd_status_to_err(&out.hdr); + MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES); + MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_CANT_GIVE); + MLX5_SET(manage_pages_in, in, function_id, func_id); + err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); if (err) - mlx5_core_warn(dev, "page notify failed\n"); - - kfree(in); + mlx5_core_warn(dev, "page notify failed func_id(%d) err(%d)\n", + func_id, err); } static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, int notify_fail) { - struct mlx5_manage_pages_inbox *in; - struct mlx5_manage_pages_outbox out; - int inlen; + u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0}; + int inlen = MLX5_ST_SZ_BYTES(manage_pages_in); u64 addr; int err; + u32 *in; int i; - inlen = sizeof(*in) + npages * sizeof(in->pas[0]); + inlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_in, pas[0]); in = mlx5_vzalloc(inlen); if (!in) { err = -ENOMEM; mlx5_core_warn(dev, "vzalloc failed %d\n", inlen); goto out_free; } - memset(&out, 0, sizeof(out)); for (i = 0; i < npages; i++) { retry: @@ -332,27 +287,21 @@ retry: goto retry; } - in->pas[i] = cpu_to_be64(addr); + MLX5_SET64(manage_pages_in, in, pas[i], addr); } - in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); - in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE); - in->func_id = cpu_to_be16(func_id); - in->num_entries = cpu_to_be32(npages); - err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); + MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES); + MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_GIVE); + MLX5_SET(manage_pages_in, in, function_id, func_id); + MLX5_SET(manage_pages_in, in, input_num_entries, npages); + + err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); if (err) { mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n", func_id, npages, err); goto out_4k; } - err = mlx5_cmd_status_to_err(&out.hdr); - if (err) { - mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n", - func_id, npages, out.hdr.status); - goto out_4k; - } - dev->priv.fw_pages += npages; if (func_id) dev->priv.vfs_pages += npages; @@ -364,7 +313,7 @@ retry: out_4k: for (i--; i >= 0; i--) - free_4k(dev, be64_to_cpu(in->pas[i])); + free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i])); out_free: kvfree(in); if (notify_fail) @@ -373,8 +322,7 @@ out_free: } static int reclaim_pages_cmd(struct mlx5_core_dev *dev, - struct mlx5_manage_pages_inbox *in, int in_size, - struct mlx5_manage_pages_outbox *out, int out_size) + u32 *in, int in_size, u32 *out, int out_size) { struct fw_page *fwp; struct rb_node *p; @@ -382,55 +330,54 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev, u32 i = 0; if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) - return mlx5_cmd_exec_check_status(dev, (u32 *)in, in_size, - (u32 *)out, out_size); + return mlx5_cmd_exec(dev, in, in_size, out, out_size); - npages = be32_to_cpu(in->num_entries); + /* No hard feelings, we want our pages back! */ + npages = MLX5_GET(manage_pages_in, in, input_num_entries); p = rb_first(&dev->priv.page_root); while (p && i < npages) { fwp = rb_entry(p, struct fw_page, rb_node); - out->pas[i] = cpu_to_be64(fwp->addr); + MLX5_SET64(manage_pages_out, out, pas[i], fwp->addr); p = rb_next(p); i++; } - out->num_entries = cpu_to_be32(i); + MLX5_SET(manage_pages_out, out, output_num_entries, i); return 0; } static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, int *nclaimed) { - struct mlx5_manage_pages_inbox in; - struct mlx5_manage_pages_outbox *out; + int outlen = MLX5_ST_SZ_BYTES(manage_pages_out); + u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0}; int num_claimed; - int outlen; - u64 addr; + u32 *out; int err; int i; if (nclaimed) *nclaimed = 0; - memset(&in, 0, sizeof(in)); - outlen = sizeof(*out) + npages * sizeof(out->pas[0]); + outlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]); out = mlx5_vzalloc(outlen); if (!out) return -ENOMEM; - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); - in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE); - in.func_id = cpu_to_be16(func_id); - in.num_entries = cpu_to_be32(npages); + MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES); + MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_TAKE); + MLX5_SET(manage_pages_in, in, function_id, func_id); + MLX5_SET(manage_pages_in, in, input_num_entries, npages); + mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen); - err = reclaim_pages_cmd(dev, &in, sizeof(in), out, outlen); + err = reclaim_pages_cmd(dev, in, sizeof(in), out, outlen); if (err) { mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err); goto out_free; } - num_claimed = be32_to_cpu(out->num_entries); + num_claimed = MLX5_GET(manage_pages_out, out, output_num_entries); if (num_claimed > npages) { mlx5_core_warn(dev, "fw returned %d, driver asked %d => corruption\n", num_claimed, npages); @@ -438,10 +385,9 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, goto out_free; } - for (i = 0; i < num_claimed; i++) { - addr = be64_to_cpu(out->pas[i]); - free_4k(dev, addr); - } + for (i = 0; i < num_claimed; i++) + free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i])); + if (nclaimed) *nclaimed = num_claimed; @@ -518,8 +464,8 @@ static int optimal_reclaimed_pages(void) int ret; ret = (sizeof(lay->out) + MLX5_BLKS_FOR_RECLAIM_PAGES * sizeof(block->data) - - sizeof(struct mlx5_manage_pages_outbox)) / - FIELD_SIZEOF(struct mlx5_manage_pages_outbox, pas[0]); + MLX5_ST_SZ_BYTES(manage_pages_out)) / + MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]); return ret; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pd.c b/drivers/net/ethernet/mellanox/mlx5/core/pd.c index f2d3aee909e8..bd830d8d6c5f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pd.c @@ -36,66 +36,27 @@ #include <linux/mlx5/cmd.h> #include "mlx5_core.h" -struct mlx5_alloc_pd_mbox_in { - struct mlx5_inbox_hdr hdr; - u8 rsvd[8]; -}; - -struct mlx5_alloc_pd_mbox_out { - struct mlx5_outbox_hdr hdr; - __be32 pdn; - u8 rsvd[4]; -}; - -struct mlx5_dealloc_pd_mbox_in { - struct mlx5_inbox_hdr hdr; - __be32 pdn; - u8 rsvd[4]; -}; - -struct mlx5_dealloc_pd_mbox_out { - struct mlx5_outbox_hdr hdr; - u8 rsvd[8]; -}; - int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn) { - struct mlx5_alloc_pd_mbox_in in; - struct mlx5_alloc_pd_mbox_out out; + u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {0}; int err; - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ALLOC_PD); - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); - if (err) - return err; - - if (out.hdr.status) - return mlx5_cmd_status_to_err(&out.hdr); - - *pdn = be32_to_cpu(out.pdn) & 0xffffff; + MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD); + err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + if (!err) + *pdn = MLX5_GET(alloc_pd_out, out, pd); return err; } EXPORT_SYMBOL(mlx5_core_alloc_pd); int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn) { - struct mlx5_dealloc_pd_mbox_in in; - struct mlx5_dealloc_pd_mbox_out out; - int err; - - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DEALLOC_PD); - in.pdn = cpu_to_be32(pdn); - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); - if (err) - return err; + u32 out[MLX5_ST_SZ_DW(dealloc_pd_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(dealloc_pd_in)] = {0}; - if (out.hdr.status) - return mlx5_cmd_status_to_err(&out.hdr); - - return err; + MLX5_SET(dealloc_pd_in, in, opcode, MLX5_CMD_OP_DEALLOC_PD); + MLX5_SET(dealloc_pd_in, in, pd, pdn); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } EXPORT_SYMBOL(mlx5_core_dealloc_pd); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index 752c08127138..34e7184e23c9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -38,45 +38,42 @@ int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, int size_in, void *data_out, int size_out, - u16 reg_num, int arg, int write) + u16 reg_id, int arg, int write) { - struct mlx5_access_reg_mbox_in *in = NULL; - struct mlx5_access_reg_mbox_out *out = NULL; + int outlen = MLX5_ST_SZ_BYTES(access_register_out) + size_out; + int inlen = MLX5_ST_SZ_BYTES(access_register_in) + size_in; int err = -ENOMEM; + u32 *out = NULL; + u32 *in = NULL; + void *data; - in = mlx5_vzalloc(sizeof(*in) + size_in); - if (!in) - return -ENOMEM; - - out = mlx5_vzalloc(sizeof(*out) + size_out); - if (!out) - goto ex1; - - memcpy(in->data, data_in, size_in); - in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ACCESS_REG); - in->hdr.opmod = cpu_to_be16(!write); - in->arg = cpu_to_be32(arg); - in->register_id = cpu_to_be16(reg_num); - err = mlx5_cmd_exec(dev, in, sizeof(*in) + size_in, out, - sizeof(*out) + size_out); - if (err) - goto ex2; + in = mlx5_vzalloc(inlen); + out = mlx5_vzalloc(outlen); + if (!in || !out) + goto out; - if (out->hdr.status) - err = mlx5_cmd_status_to_err(&out->hdr); + data = MLX5_ADDR_OF(access_register_in, in, register_data); + memcpy(data, data_in, size_in); - if (!err) - memcpy(data_out, out->data, size_out); + MLX5_SET(access_register_in, in, opcode, MLX5_CMD_OP_ACCESS_REG); + MLX5_SET(access_register_in, in, op_mod, !write); + MLX5_SET(access_register_in, in, argument, arg); + MLX5_SET(access_register_in, in, register_id, reg_id); + + err = mlx5_cmd_exec(dev, in, inlen, out, outlen); + if (err) + goto out; + + data = MLX5_ADDR_OF(access_register_out, out, register_data); + memcpy(data_out, data, size_out); -ex2: +out: kvfree(out); -ex1: kvfree(in); return err; } EXPORT_SYMBOL_GPL(mlx5_core_access_reg); - struct mlx5_reg_pcap { u8 rsvd0; u8 port_num; @@ -104,12 +101,10 @@ EXPORT_SYMBOL_GPL(mlx5_set_port_caps); int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys, int ptys_size, int proto_mask, u8 local_port) { - u32 in[MLX5_ST_SZ_DW(ptys_reg)]; + u32 in[MLX5_ST_SZ_DW(ptys_reg)] = {0}; - memset(in, 0, sizeof(in)); MLX5_SET(ptys_reg, in, local_port, local_port); MLX5_SET(ptys_reg, in, proto_mask, proto_mask); - return mlx5_core_access_reg(dev, in, sizeof(in), ptys, ptys_size, MLX5_REG_PTYS, 0, 0); } @@ -117,13 +112,11 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_ptys); int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration) { + u32 in[MLX5_ST_SZ_DW(mlcr_reg)] = {0}; u32 out[MLX5_ST_SZ_DW(mlcr_reg)]; - u32 in[MLX5_ST_SZ_DW(mlcr_reg)]; - memset(in, 0, sizeof(in)); MLX5_SET(mlcr_reg, in, local_port, 1); MLX5_SET(mlcr_reg, in, beacon_duration, beacon_duration); - return mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_MLCR, 0, 1); } @@ -182,25 +175,39 @@ int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev, } EXPORT_SYMBOL_GPL(mlx5_query_port_link_width_oper); -int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev, - u8 *proto_oper, int proto_mask, - u8 local_port) +int mlx5_query_port_eth_proto_oper(struct mlx5_core_dev *dev, + u32 *proto_oper, u8 local_port) { u32 out[MLX5_ST_SZ_DW(ptys_reg)]; int err; - err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, local_port); + err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN, + local_port); if (err) return err; - if (proto_mask == MLX5_PTYS_EN) - *proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper); - else - *proto_oper = MLX5_GET(ptys_reg, out, ib_proto_oper); + *proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper); + + return 0; +} +EXPORT_SYMBOL(mlx5_query_port_eth_proto_oper); + +int mlx5_query_port_ib_proto_oper(struct mlx5_core_dev *dev, + u8 *proto_oper, u8 local_port) +{ + u32 out[MLX5_ST_SZ_DW(ptys_reg)]; + int err; + + err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_IB, + local_port); + if (err) + return err; + + *proto_oper = MLX5_GET(ptys_reg, out, ib_proto_oper); return 0; } -EXPORT_SYMBOL_GPL(mlx5_query_port_proto_oper); +EXPORT_SYMBOL(mlx5_query_port_ib_proto_oper); int mlx5_set_port_ptys(struct mlx5_core_dev *dev, bool an_disable, u32 proto_admin, int proto_mask) @@ -246,15 +253,12 @@ EXPORT_SYMBOL_GPL(mlx5_toggle_port_link); int mlx5_set_port_admin_status(struct mlx5_core_dev *dev, enum mlx5_port_status status) { - u32 in[MLX5_ST_SZ_DW(paos_reg)]; + u32 in[MLX5_ST_SZ_DW(paos_reg)] = {0}; u32 out[MLX5_ST_SZ_DW(paos_reg)]; - memset(in, 0, sizeof(in)); - MLX5_SET(paos_reg, in, local_port, 1); MLX5_SET(paos_reg, in, admin_status, status); MLX5_SET(paos_reg, in, ase, 1); - return mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_PAOS, 0, 1); } @@ -263,19 +267,15 @@ EXPORT_SYMBOL_GPL(mlx5_set_port_admin_status); int mlx5_query_port_admin_status(struct mlx5_core_dev *dev, enum mlx5_port_status *status) { - u32 in[MLX5_ST_SZ_DW(paos_reg)]; + u32 in[MLX5_ST_SZ_DW(paos_reg)] = {0}; u32 out[MLX5_ST_SZ_DW(paos_reg)]; int err; - memset(in, 0, sizeof(in)); - MLX5_SET(paos_reg, in, local_port, 1); - err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_PAOS, 0, 0); if (err) return err; - *status = MLX5_GET(paos_reg, out, admin_status); return 0; } @@ -284,13 +284,10 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status); static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, u16 *admin_mtu, u16 *max_mtu, u16 *oper_mtu, u8 port) { - u32 in[MLX5_ST_SZ_DW(pmtu_reg)]; + u32 in[MLX5_ST_SZ_DW(pmtu_reg)] = {0}; u32 out[MLX5_ST_SZ_DW(pmtu_reg)]; - memset(in, 0, sizeof(in)); - MLX5_SET(pmtu_reg, in, local_port, port); - mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_PMTU, 0, 0); @@ -304,14 +301,11 @@ static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, u16 *admin_mtu, int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port) { - u32 in[MLX5_ST_SZ_DW(pmtu_reg)]; + u32 in[MLX5_ST_SZ_DW(pmtu_reg)] = {0}; u32 out[MLX5_ST_SZ_DW(pmtu_reg)]; - memset(in, 0, sizeof(in)); - MLX5_SET(pmtu_reg, in, admin_mtu, mtu); MLX5_SET(pmtu_reg, in, local_port, port); - return mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_PMTU, 0, 1); } @@ -333,15 +327,12 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_oper_mtu); static int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num) { + u32 in[MLX5_ST_SZ_DW(pmlp_reg)] = {0}; u32 out[MLX5_ST_SZ_DW(pmlp_reg)]; - u32 in[MLX5_ST_SZ_DW(pmlp_reg)]; int module_mapping; int err; - memset(in, 0, sizeof(in)); - MLX5_SET(pmlp_reg, in, local_port, 1); - err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_PMLP, 0, 0); if (err) @@ -410,11 +401,9 @@ EXPORT_SYMBOL_GPL(mlx5_query_module_eeprom); static int mlx5_query_port_pvlc(struct mlx5_core_dev *dev, u32 *pvlc, int pvlc_size, u8 local_port) { - u32 in[MLX5_ST_SZ_DW(pvlc_reg)]; + u32 in[MLX5_ST_SZ_DW(pvlc_reg)] = {0}; - memset(in, 0, sizeof(in)); MLX5_SET(pvlc_reg, in, local_port, local_port); - return mlx5_core_access_reg(dev, in, sizeof(in), pvlc, pvlc_size, MLX5_REG_PVLC, 0, 0); } @@ -460,10 +449,9 @@ EXPORT_SYMBOL_GPL(mlx5_core_query_ib_ppcnt); int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause) { - u32 in[MLX5_ST_SZ_DW(pfcc_reg)]; + u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0}; u32 out[MLX5_ST_SZ_DW(pfcc_reg)]; - memset(in, 0, sizeof(in)); MLX5_SET(pfcc_reg, in, local_port, 1); MLX5_SET(pfcc_reg, in, pptx, tx_pause); MLX5_SET(pfcc_reg, in, pprx, rx_pause); @@ -476,13 +464,11 @@ EXPORT_SYMBOL_GPL(mlx5_set_port_pause); int mlx5_query_port_pause(struct mlx5_core_dev *dev, u32 *rx_pause, u32 *tx_pause) { - u32 in[MLX5_ST_SZ_DW(pfcc_reg)]; + u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0}; u32 out[MLX5_ST_SZ_DW(pfcc_reg)]; int err; - memset(in, 0, sizeof(in)); MLX5_SET(pfcc_reg, in, local_port, 1); - err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_PFCC, 0, 0); if (err) @@ -500,10 +486,9 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_pause); int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx) { - u32 in[MLX5_ST_SZ_DW(pfcc_reg)]; + u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0}; u32 out[MLX5_ST_SZ_DW(pfcc_reg)]; - memset(in, 0, sizeof(in)); MLX5_SET(pfcc_reg, in, local_port, 1); MLX5_SET(pfcc_reg, in, pfctx, pfc_en_tx); MLX5_SET(pfcc_reg, in, pfcrx, pfc_en_rx); @@ -517,13 +502,11 @@ EXPORT_SYMBOL_GPL(mlx5_set_port_pfc); int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx) { - u32 in[MLX5_ST_SZ_DW(pfcc_reg)]; + u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0}; u32 out[MLX5_ST_SZ_DW(pfcc_reg)]; int err; - memset(in, 0, sizeof(in)); MLX5_SET(pfcc_reg, in, local_port, 1); - err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_PFCC, 0, 0); if (err) @@ -567,12 +550,11 @@ int mlx5_max_tc(struct mlx5_core_dev *mdev) int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc) { - u32 in[MLX5_ST_SZ_DW(qtct_reg)]; + u32 in[MLX5_ST_SZ_DW(qtct_reg)] = {0}; u32 out[MLX5_ST_SZ_DW(qtct_reg)]; int err; int i; - memset(in, 0, sizeof(in)); for (i = 0; i < 8; i++) { if (prio_tc[i] > mlx5_max_tc(mdev)) return -EINVAL; @@ -617,11 +599,9 @@ static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out, int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group) { - u32 in[MLX5_ST_SZ_DW(qetc_reg)]; + u32 in[MLX5_ST_SZ_DW(qetc_reg)] = {0}; int i; - memset(in, 0, sizeof(in)); - for (i = 0; i <= mlx5_max_tc(mdev); i++) { MLX5_SET(qetc_reg, in, tc_configuration[i].g, 1); MLX5_SET(qetc_reg, in, tc_configuration[i].group, tc_group[i]); @@ -633,11 +613,9 @@ EXPORT_SYMBOL_GPL(mlx5_set_port_tc_group); int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw) { - u32 in[MLX5_ST_SZ_DW(qetc_reg)]; + u32 in[MLX5_ST_SZ_DW(qetc_reg)] = {0}; int i; - memset(in, 0, sizeof(in)); - for (i = 0; i <= mlx5_max_tc(mdev); i++) { MLX5_SET(qetc_reg, in, tc_configuration[i].b, 1); MLX5_SET(qetc_reg, in, tc_configuration[i].bw_allocation, tc_bw[i]); @@ -651,12 +629,10 @@ int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev, u8 *max_bw_value, u8 *max_bw_units) { - u32 in[MLX5_ST_SZ_DW(qetc_reg)]; + u32 in[MLX5_ST_SZ_DW(qetc_reg)] = {0}; void *ets_tcn_conf; int i; - memset(in, 0, sizeof(in)); - MLX5_SET(qetc_reg, in, port_number, 1); for (i = 0; i <= mlx5_max_tc(mdev); i++) { @@ -701,35 +677,24 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_ets_rate_limit); int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode) { - u32 in[MLX5_ST_SZ_DW(set_wol_rol_in)]; - u32 out[MLX5_ST_SZ_DW(set_wol_rol_out)]; - - memset(in, 0, sizeof(in)); - memset(out, 0, sizeof(out)); + u32 in[MLX5_ST_SZ_DW(set_wol_rol_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(set_wol_rol_out)] = {0}; MLX5_SET(set_wol_rol_in, in, opcode, MLX5_CMD_OP_SET_WOL_ROL); MLX5_SET(set_wol_rol_in, in, wol_mode_valid, 1); MLX5_SET(set_wol_rol_in, in, wol_mode, wol_mode); - - return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), - out, sizeof(out)); + return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); } EXPORT_SYMBOL_GPL(mlx5_set_port_wol); int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode) { - u32 in[MLX5_ST_SZ_DW(query_wol_rol_in)]; - u32 out[MLX5_ST_SZ_DW(query_wol_rol_out)]; + u32 in[MLX5_ST_SZ_DW(query_wol_rol_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(query_wol_rol_out)] = {0}; int err; - memset(in, 0, sizeof(in)); - memset(out, 0, sizeof(out)); - MLX5_SET(query_wol_rol_in, in, opcode, MLX5_CMD_OP_QUERY_WOL_ROL); - - err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), - out, sizeof(out)); - + err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); if (!err) *wol_mode = MLX5_GET(query_wol_rol_out, out, wol_mode); @@ -740,11 +705,9 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_wol); static int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out, int outlen) { - u32 in[MLX5_ST_SZ_DW(pcmr_reg)]; + u32 in[MLX5_ST_SZ_DW(pcmr_reg)] = {0}; - memset(in, 0, sizeof(in)); MLX5_SET(pcmr_reg, in, local_port, 1); - return mlx5_core_access_reg(mdev, in, sizeof(in), out, outlen, MLX5_REG_PCMR, 0, 0); } @@ -759,12 +722,10 @@ static int mlx5_set_ports_check(struct mlx5_core_dev *mdev, u32 *in, int inlen) int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable) { - u32 in[MLX5_ST_SZ_DW(pcmr_reg)]; + u32 in[MLX5_ST_SZ_DW(pcmr_reg)] = {0}; - memset(in, 0, sizeof(in)); MLX5_SET(pcmr_reg, in, local_port, 1); MLX5_SET(pcmr_reg, in, fcs_chk, enable); - return mlx5_set_ports_check(mdev, in, sizeof(in)); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c index b82d65802d96..d0a4005fe63a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c @@ -271,30 +271,20 @@ static void destroy_qprqsq_common(struct mlx5_core_dev *dev, int mlx5_core_create_qp(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, - struct mlx5_create_qp_mbox_in *in, - int inlen) + u32 *in, int inlen) { - struct mlx5_create_qp_mbox_out out; - struct mlx5_destroy_qp_mbox_in din; - struct mlx5_destroy_qp_mbox_out dout; + u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {0}; + u32 dout[MLX5_ST_SZ_DW(destroy_qp_out)]; + u32 din[MLX5_ST_SZ_DW(destroy_qp_in)]; int err; - memset(&out, 0, sizeof(out)); - in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_QP); + MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP); - err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); - if (err) { - mlx5_core_warn(dev, "ret %d\n", err); + err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); + if (err) return err; - } - - if (out.hdr.status) { - mlx5_core_warn(dev, "current num of QPs 0x%x\n", - atomic_read(&dev->num_qps)); - return mlx5_cmd_status_to_err(&out.hdr); - } - qp->qpn = be32_to_cpu(out.qpn) & 0xffffff; + qp->qpn = MLX5_GET(create_qp_out, out, qpn); mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn); err = create_qprqsq_common(dev, qp, MLX5_RES_QP); @@ -311,12 +301,11 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev, return 0; err_cmd: - memset(&din, 0, sizeof(din)); - memset(&dout, 0, sizeof(dout)); - din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP); - din.qpn = cpu_to_be32(qp->qpn); - mlx5_cmd_exec(dev, &din, sizeof(din), &out, sizeof(dout)); - + memset(din, 0, sizeof(din)); + memset(dout, 0, sizeof(dout)); + MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP); + MLX5_SET(destroy_qp_in, in, qpn, qp->qpn); + mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout)); return err; } EXPORT_SYMBOL_GPL(mlx5_core_create_qp); @@ -324,45 +313,145 @@ EXPORT_SYMBOL_GPL(mlx5_core_create_qp); int mlx5_core_destroy_qp(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp) { - struct mlx5_destroy_qp_mbox_in in; - struct mlx5_destroy_qp_mbox_out out; + u32 out[MLX5_ST_SZ_DW(destroy_qp_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {0}; int err; mlx5_debug_qp_remove(dev, qp); destroy_qprqsq_common(dev, qp); - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP); - in.qpn = cpu_to_be32(qp->qpn); - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP); + MLX5_SET(destroy_qp_in, in, qpn, qp->qpn); + err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); if (err) return err; - if (out.hdr.status) - return mlx5_cmd_status_to_err(&out.hdr); - atomic_dec(&dev->num_qps); return 0; } EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp); -int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 operation, - struct mlx5_modify_qp_mbox_in *in, int sqd_event, +struct mbox_info { + u32 *in; + u32 *out; + int inlen; + int outlen; +}; + +static int mbox_alloc(struct mbox_info *mbox, int inlen, int outlen) +{ + mbox->inlen = inlen; + mbox->outlen = outlen; + mbox->in = kzalloc(mbox->inlen, GFP_KERNEL); + mbox->out = kzalloc(mbox->outlen, GFP_KERNEL); + if (!mbox->in || !mbox->out) { + kfree(mbox->in); + kfree(mbox->out); + return -ENOMEM; + } + + return 0; +} + +static void mbox_free(struct mbox_info *mbox) +{ + kfree(mbox->in); + kfree(mbox->out); +} + +static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn, + u32 opt_param_mask, void *qpc, + struct mbox_info *mbox) +{ + mbox->out = NULL; + mbox->in = NULL; + +#define MBOX_ALLOC(mbox, typ) \ + mbox_alloc(mbox, MLX5_ST_SZ_BYTES(typ##_in), MLX5_ST_SZ_BYTES(typ##_out)) + +#define MOD_QP_IN_SET(typ, in, _opcode, _qpn) \ + MLX5_SET(typ##_in, in, opcode, _opcode); \ + MLX5_SET(typ##_in, in, qpn, _qpn) + +#define MOD_QP_IN_SET_QPC(typ, in, _opcode, _qpn, _opt_p, _qpc) \ + MOD_QP_IN_SET(typ, in, _opcode, _qpn); \ + MLX5_SET(typ##_in, in, opt_param_mask, _opt_p); \ + memcpy(MLX5_ADDR_OF(typ##_in, in, qpc), _qpc, MLX5_ST_SZ_BYTES(qpc)) + + switch (opcode) { + /* 2RST & 2ERR */ + case MLX5_CMD_OP_2RST_QP: + if (MBOX_ALLOC(mbox, qp_2rst)) + return -ENOMEM; + MOD_QP_IN_SET(qp_2rst, mbox->in, opcode, qpn); + break; + case MLX5_CMD_OP_2ERR_QP: + if (MBOX_ALLOC(mbox, qp_2err)) + return -ENOMEM; + MOD_QP_IN_SET(qp_2err, mbox->in, opcode, qpn); + break; + + /* MODIFY with QPC */ + case MLX5_CMD_OP_RST2INIT_QP: + if (MBOX_ALLOC(mbox, rst2init_qp)) + return -ENOMEM; + MOD_QP_IN_SET_QPC(rst2init_qp, mbox->in, opcode, qpn, + opt_param_mask, qpc); + break; + case MLX5_CMD_OP_INIT2RTR_QP: + if (MBOX_ALLOC(mbox, init2rtr_qp)) + return -ENOMEM; + MOD_QP_IN_SET_QPC(init2rtr_qp, mbox->in, opcode, qpn, + opt_param_mask, qpc); + break; + case MLX5_CMD_OP_RTR2RTS_QP: + if (MBOX_ALLOC(mbox, rtr2rts_qp)) + return -ENOMEM; + MOD_QP_IN_SET_QPC(rtr2rts_qp, mbox->in, opcode, qpn, + opt_param_mask, qpc); + break; + case MLX5_CMD_OP_RTS2RTS_QP: + if (MBOX_ALLOC(mbox, rts2rts_qp)) + return -ENOMEM; + MOD_QP_IN_SET_QPC(rts2rts_qp, mbox->in, opcode, qpn, + opt_param_mask, qpc); + break; + case MLX5_CMD_OP_SQERR2RTS_QP: + if (MBOX_ALLOC(mbox, sqerr2rts_qp)) + return -ENOMEM; + MOD_QP_IN_SET_QPC(sqerr2rts_qp, mbox->in, opcode, qpn, + opt_param_mask, qpc); + break; + case MLX5_CMD_OP_INIT2INIT_QP: + if (MBOX_ALLOC(mbox, init2init_qp)) + return -ENOMEM; + MOD_QP_IN_SET_QPC(init2init_qp, mbox->in, opcode, qpn, + opt_param_mask, qpc); + break; + default: + mlx5_core_err(dev, "Unknown transition for modify QP: OP(0x%x) QPN(0x%x)\n", + opcode, qpn); + return -EINVAL; + } + return 0; +} + +int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode, + u32 opt_param_mask, void *qpc, struct mlx5_core_qp *qp) { - struct mlx5_modify_qp_mbox_out out; - int err = 0; + struct mbox_info mbox; + int err; - memset(&out, 0, sizeof(out)); - in->hdr.opcode = cpu_to_be16(operation); - in->qpn = cpu_to_be32(qp->qpn); - err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out)); + err = modify_qp_mbox_alloc(dev, opcode, qp->qpn, + opt_param_mask, qpc, &mbox); if (err) return err; - return mlx5_cmd_status_to_err(&out.hdr); + err = mlx5_cmd_exec(dev, mbox.in, mbox.inlen, mbox.out, mbox.outlen); + mbox_free(&mbox); + return err; } EXPORT_SYMBOL_GPL(mlx5_core_qp_modify); @@ -382,66 +471,38 @@ void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev) } int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, - struct mlx5_query_qp_mbox_out *out, int outlen) + u32 *out, int outlen) { - struct mlx5_query_qp_mbox_in in; - int err; - - memset(&in, 0, sizeof(in)); - memset(out, 0, outlen); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_QP); - in.qpn = cpu_to_be32(qp->qpn); - err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); - if (err) - return err; + u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {0}; - if (out->hdr.status) - return mlx5_cmd_status_to_err(&out->hdr); - - return err; + MLX5_SET(query_qp_in, in, opcode, MLX5_CMD_OP_QUERY_QP); + MLX5_SET(query_qp_in, in, qpn, qp->qpn); + return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); } EXPORT_SYMBOL_GPL(mlx5_core_qp_query); int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn) { - struct mlx5_alloc_xrcd_mbox_in in; - struct mlx5_alloc_xrcd_mbox_out out; + u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)] = {0}; int err; - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ALLOC_XRCD); - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); - if (err) - return err; - - if (out.hdr.status) - err = mlx5_cmd_status_to_err(&out.hdr); - else - *xrcdn = be32_to_cpu(out.xrcdn) & 0xffffff; - + MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD); + err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + if (!err) + *xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd); return err; } EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc); int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn) { - struct mlx5_dealloc_xrcd_mbox_in in; - struct mlx5_dealloc_xrcd_mbox_out out; - int err; + u32 out[MLX5_ST_SZ_DW(dealloc_xrcd_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)] = {0}; - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DEALLOC_XRCD); - in.xrcdn = cpu_to_be32(xrcdn); - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); - if (err) - return err; - - if (out.hdr.status) - err = mlx5_cmd_status_to_err(&out.hdr); - - return err; + MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD); + MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc); @@ -449,28 +510,23 @@ EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc); int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn, u8 flags, int error) { - struct mlx5_page_fault_resume_mbox_in in; - struct mlx5_page_fault_resume_mbox_out out; - int err; - - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_PAGE_FAULT_RESUME); - in.hdr.opmod = 0; - flags &= (MLX5_PAGE_FAULT_RESUME_REQUESTOR | - MLX5_PAGE_FAULT_RESUME_WRITE | - MLX5_PAGE_FAULT_RESUME_RDMA); - flags |= (error ? MLX5_PAGE_FAULT_RESUME_ERROR : 0); - in.flags_qpn = cpu_to_be32((qpn & MLX5_QPN_MASK) | - (flags << MLX5_QPN_BITS)); - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); - if (err) - return err; - - if (out.hdr.status) - err = mlx5_cmd_status_to_err(&out.hdr); - - return err; + u32 out[MLX5_ST_SZ_DW(page_fault_resume_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)] = {0}; + + MLX5_SET(page_fault_resume_in, in, opcode, + MLX5_CMD_OP_PAGE_FAULT_RESUME); + MLX5_SET(page_fault_resume_in, in, qpn, qpn); + + if (flags & MLX5_PAGE_FAULT_RESUME_REQUESTOR) + MLX5_SET(page_fault_resume_in, in, req_res, 1); + if (flags & MLX5_PAGE_FAULT_RESUME_WRITE) + MLX5_SET(page_fault_resume_in, in, read_write, 1); + if (flags & MLX5_PAGE_FAULT_RESUME_RDMA) + MLX5_SET(page_fault_resume_in, in, rdma, 1); + if (error) + MLX5_SET(page_fault_resume_in, in, error, 1); + + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } EXPORT_SYMBOL_GPL(mlx5_core_page_fault_resume); #endif @@ -541,15 +597,12 @@ EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked); int mlx5_core_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id) { - u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)]; - u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)]; + u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {0}; int err; - memset(in, 0, sizeof(in)); - memset(out, 0, sizeof(out)); - MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER); - err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); + err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); if (!err) *counter_id = MLX5_GET(alloc_q_counter_out, out, counter_set_id); @@ -559,31 +612,25 @@ EXPORT_SYMBOL_GPL(mlx5_core_alloc_q_counter); int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id) { - u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)]; - u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)]; - - memset(in, 0, sizeof(in)); - memset(out, 0, sizeof(out)); + u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)] = {0}; MLX5_SET(dealloc_q_counter_in, in, opcode, MLX5_CMD_OP_DEALLOC_Q_COUNTER); MLX5_SET(dealloc_q_counter_in, in, counter_set_id, counter_id); - return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, - sizeof(out)); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } EXPORT_SYMBOL_GPL(mlx5_core_dealloc_q_counter); int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id, int reset, void *out, int out_size) { - u32 in[MLX5_ST_SZ_DW(query_q_counter_in)]; - - memset(in, 0, sizeof(in)); + u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {0}; MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER); MLX5_SET(query_q_counter_in, in, clear, reset); MLX5_SET(query_q_counter_in, in, counter_set_id, counter_id); - return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, out_size); + return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size); } EXPORT_SYMBOL_GPL(mlx5_core_query_q_counter); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rl.c b/drivers/net/ethernet/mellanox/mlx5/core/rl.c index c07c28bd3d55..104902a93a0b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/rl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/rl.c @@ -63,19 +63,14 @@ static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table, static int mlx5_set_rate_limit_cmd(struct mlx5_core_dev *dev, u32 rate, u16 index) { - u32 in[MLX5_ST_SZ_DW(set_rate_limit_in)]; - u32 out[MLX5_ST_SZ_DW(set_rate_limit_out)]; - - memset(in, 0, sizeof(in)); - memset(out, 0, sizeof(out)); + u32 in[MLX5_ST_SZ_DW(set_rate_limit_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(set_rate_limit_out)] = {0}; MLX5_SET(set_rate_limit_in, in, opcode, MLX5_CMD_OP_SET_RATE_LIMIT); MLX5_SET(set_rate_limit_in, in, rate_limit_index, index); MLX5_SET(set_rate_limit_in, in, rate_limit, rate); - - return mlx5_cmd_exec_check_status(dev, in, sizeof(in), - out, sizeof(out)); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c index b380a6bc1f85..78e789245183 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c @@ -37,6 +37,13 @@ #include "eswitch.h" #endif +bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev) +{ + struct mlx5_core_sriov *sriov = &dev->priv.sriov; + + return !!sriov->num_vfs; +} + static void enable_vfs(struct mlx5_core_dev *dev, int num_vfs) { struct mlx5_core_sriov *sriov = &dev->priv.sriov; @@ -144,6 +151,11 @@ int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs) if (!mlx5_core_is_pf(dev)) return -EPERM; + if (num_vfs && mlx5_lag_is_active(dev)) { + mlx5_core_warn(dev, "can't turn sriov on while LAG is active"); + return -EINVAL; + } + mlx5_core_cleanup_vfs(dev); if (!num_vfs) { @@ -155,13 +167,13 @@ int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs) if (!pci_vfs_assigned(pdev)) pci_disable_sriov(pdev); else - pr_info("unloading PF driver while leaving orphan VFs\n"); + mlx5_core_info(dev, "unloading PF driver while leaving orphan VFs\n"); return 0; } err = mlx5_core_sriov_enable(pdev, num_vfs); if (err) { - dev_warn(&pdev->dev, "mlx5_core_sriov_enable failed %d\n", err); + mlx5_core_warn(dev, "mlx5_core_sriov_enable failed %d\n", err); return err; } @@ -180,7 +192,8 @@ static int sync_required(struct pci_dev *pdev) int cur_vfs = pci_num_vf(pdev); if (cur_vfs != sriov->num_vfs) { - pr_info("current VFs %d, registered %d - sync needed\n", cur_vfs, sriov->num_vfs); + mlx5_core_warn(dev, "current VFs %d, registered %d - sync needed\n", + cur_vfs, sriov->num_vfs); return 1; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/srq.c b/drivers/net/ethernet/mellanox/mlx5/core/srq.c index c07f4d01b70e..3099630015d7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/srq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/srq.c @@ -175,8 +175,8 @@ static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, MLX5_SET(create_srq_in, create_in, opcode, MLX5_CMD_OP_CREATE_SRQ); - err = mlx5_cmd_exec_check_status(dev, create_in, inlen, create_out, - sizeof(create_out)); + err = mlx5_cmd_exec(dev, create_in, inlen, create_out, + sizeof(create_out)); kvfree(create_in); if (!err) srq->srqn = MLX5_GET(create_srq_out, create_out, srqn); @@ -194,8 +194,8 @@ static int destroy_srq_cmd(struct mlx5_core_dev *dev, MLX5_CMD_OP_DESTROY_SRQ); MLX5_SET(destroy_srq_in, srq_in, srqn, srq->srqn); - return mlx5_cmd_exec_check_status(dev, srq_in, sizeof(srq_in), - srq_out, sizeof(srq_out)); + return mlx5_cmd_exec(dev, srq_in, sizeof(srq_in), + srq_out, sizeof(srq_out)); } static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, @@ -209,8 +209,8 @@ static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, MLX5_SET(arm_xrc_srq_in, srq_in, xrc_srqn, srq->srqn); MLX5_SET(arm_xrc_srq_in, srq_in, lwm, lwm); - return mlx5_cmd_exec_check_status(dev, srq_in, sizeof(srq_in), - srq_out, sizeof(srq_out)); + return mlx5_cmd_exec(dev, srq_in, sizeof(srq_in), + srq_out, sizeof(srq_out)); } static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, @@ -228,9 +228,8 @@ static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, MLX5_SET(query_srq_in, srq_in, opcode, MLX5_CMD_OP_QUERY_SRQ); MLX5_SET(query_srq_in, srq_in, srqn, srq->srqn); - err = mlx5_cmd_exec_check_status(dev, srq_in, sizeof(srq_in), - srq_out, - MLX5_ST_SZ_BYTES(query_srq_out)); + err = mlx5_cmd_exec(dev, srq_in, sizeof(srq_in), + srq_out, MLX5_ST_SZ_BYTES(query_srq_out)); if (err) goto out; @@ -272,8 +271,8 @@ static int create_xrc_srq_cmd(struct mlx5_core_dev *dev, MLX5_CMD_OP_CREATE_XRC_SRQ); memset(create_out, 0, sizeof(create_out)); - err = mlx5_cmd_exec_check_status(dev, create_in, inlen, create_out, - sizeof(create_out)); + err = mlx5_cmd_exec(dev, create_in, inlen, create_out, + sizeof(create_out)); if (err) goto out; @@ -286,36 +285,30 @@ out: static int destroy_xrc_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq) { - u32 xrcsrq_in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)]; - u32 xrcsrq_out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)]; - - memset(xrcsrq_in, 0, sizeof(xrcsrq_in)); - memset(xrcsrq_out, 0, sizeof(xrcsrq_out)); + u32 xrcsrq_in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)] = {0}; + u32 xrcsrq_out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)] = {0}; MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, opcode, MLX5_CMD_OP_DESTROY_XRC_SRQ); MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn); - return mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in), - xrcsrq_out, sizeof(xrcsrq_out)); + return mlx5_cmd_exec(dev, xrcsrq_in, sizeof(xrcsrq_in), + xrcsrq_out, sizeof(xrcsrq_out)); } static int arm_xrc_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, u16 lwm) { - u32 xrcsrq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)]; - u32 xrcsrq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)]; - - memset(xrcsrq_in, 0, sizeof(xrcsrq_in)); - memset(xrcsrq_out, 0, sizeof(xrcsrq_out)); + u32 xrcsrq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0}; + u32 xrcsrq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0}; MLX5_SET(arm_xrc_srq_in, xrcsrq_in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ); MLX5_SET(arm_xrc_srq_in, xrcsrq_in, op_mod, MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ); MLX5_SET(arm_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn); MLX5_SET(arm_xrc_srq_in, xrcsrq_in, lwm, lwm); - return mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in), - xrcsrq_out, sizeof(xrcsrq_out)); + return mlx5_cmd_exec(dev, xrcsrq_in, sizeof(xrcsrq_in), + xrcsrq_out, sizeof(xrcsrq_out)); } static int query_xrc_srq_cmd(struct mlx5_core_dev *dev, @@ -335,9 +328,9 @@ static int query_xrc_srq_cmd(struct mlx5_core_dev *dev, MLX5_SET(query_xrc_srq_in, xrcsrq_in, opcode, MLX5_CMD_OP_QUERY_XRC_SRQ); MLX5_SET(query_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn); - err = mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in), - xrcsrq_out, - MLX5_ST_SZ_BYTES(query_xrc_srq_out)); + + err = mlx5_cmd_exec(dev, xrcsrq_in, sizeof(xrcsrq_in), xrcsrq_out, + MLX5_ST_SZ_BYTES(query_xrc_srq_out)); if (err) goto out; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c index 28274a6fbafe..a00ff49eec18 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c @@ -36,17 +36,14 @@ int mlx5_core_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn) { - u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)]; - u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)]; + u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {0}; int err; - memset(in, 0, sizeof(in)); - memset(out, 0, sizeof(out)); - MLX5_SET(alloc_transport_domain_in, in, opcode, MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN); - err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); + err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); if (!err) *tdn = MLX5_GET(alloc_transport_domain_out, out, transport_domain); @@ -57,29 +54,23 @@ EXPORT_SYMBOL(mlx5_core_alloc_transport_domain); void mlx5_core_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn) { - u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)]; - u32 out[MLX5_ST_SZ_DW(dealloc_transport_domain_out)]; - - memset(in, 0, sizeof(in)); - memset(out, 0, sizeof(out)); + u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(dealloc_transport_domain_out)] = {0}; MLX5_SET(dealloc_transport_domain_in, in, opcode, MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN); MLX5_SET(dealloc_transport_domain_in, in, transport_domain, tdn); - - mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); + mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } EXPORT_SYMBOL(mlx5_core_dealloc_transport_domain); int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn) { - u32 out[MLX5_ST_SZ_DW(create_rq_out)]; + u32 out[MLX5_ST_SZ_DW(create_rq_out)] = {0}; int err; MLX5_SET(create_rq_in, in, opcode, MLX5_CMD_OP_CREATE_RQ); - - memset(out, 0, sizeof(out)); - err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); + err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); if (!err) *rqn = MLX5_GET(create_rq_out, out, rqn); @@ -95,21 +86,18 @@ int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen) MLX5_SET(modify_rq_in, in, opcode, MLX5_CMD_OP_MODIFY_RQ); memset(out, 0, sizeof(out)); - return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); + return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); } EXPORT_SYMBOL(mlx5_core_modify_rq); void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn) { - u32 in[MLX5_ST_SZ_DW(destroy_rq_in)]; - u32 out[MLX5_ST_SZ_DW(destroy_rq_out)]; - - memset(in, 0, sizeof(in)); + u32 in[MLX5_ST_SZ_DW(destroy_rq_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(destroy_rq_out)] = {0}; MLX5_SET(destroy_rq_in, in, opcode, MLX5_CMD_OP_DESTROY_RQ); MLX5_SET(destroy_rq_in, in, rqn, rqn); - - mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); + mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } EXPORT_SYMBOL(mlx5_core_destroy_rq); @@ -121,19 +109,17 @@ int mlx5_core_query_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *out) MLX5_SET(query_rq_in, in, opcode, MLX5_CMD_OP_QUERY_RQ); MLX5_SET(query_rq_in, in, rqn, rqn); - return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen); + return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); } EXPORT_SYMBOL(mlx5_core_query_rq); int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn) { - u32 out[MLX5_ST_SZ_DW(create_sq_out)]; + u32 out[MLX5_ST_SZ_DW(create_sq_out)] = {0}; int err; MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ); - - memset(out, 0, sizeof(out)); - err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); + err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); if (!err) *sqn = MLX5_GET(create_sq_out, out, sqn); @@ -142,27 +128,22 @@ int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn) int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen) { - u32 out[MLX5_ST_SZ_DW(modify_sq_out)]; + u32 out[MLX5_ST_SZ_DW(modify_sq_out)] = {0}; MLX5_SET(modify_sq_in, in, sqn, sqn); MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ); - - memset(out, 0, sizeof(out)); - return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); + return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); } EXPORT_SYMBOL(mlx5_core_modify_sq); void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn) { - u32 in[MLX5_ST_SZ_DW(destroy_sq_in)]; - u32 out[MLX5_ST_SZ_DW(destroy_sq_out)]; - - memset(in, 0, sizeof(in)); + u32 in[MLX5_ST_SZ_DW(destroy_sq_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(destroy_sq_out)] = {0}; MLX5_SET(destroy_sq_in, in, opcode, MLX5_CMD_OP_DESTROY_SQ); MLX5_SET(destroy_sq_in, in, sqn, sqn); - - mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); + mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out) @@ -172,21 +153,20 @@ int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out) MLX5_SET(query_sq_in, in, opcode, MLX5_CMD_OP_QUERY_SQ); MLX5_SET(query_sq_in, in, sqn, sqn); - - return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen); + return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); } EXPORT_SYMBOL(mlx5_core_query_sq); int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *tirn) { - u32 out[MLX5_ST_SZ_DW(create_tir_out)]; + u32 out[MLX5_ST_SZ_DW(create_tir_out)] = {0}; int err; MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR); memset(out, 0, sizeof(out)); - err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); + err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); if (!err) *tirn = MLX5_GET(create_tir_out, out, tirn); @@ -197,39 +177,32 @@ EXPORT_SYMBOL(mlx5_core_create_tir); int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in, int inlen) { - u32 out[MLX5_ST_SZ_DW(modify_tir_out)]; + u32 out[MLX5_ST_SZ_DW(modify_tir_out)] = {0}; MLX5_SET(modify_tir_in, in, tirn, tirn); MLX5_SET(modify_tir_in, in, opcode, MLX5_CMD_OP_MODIFY_TIR); - - memset(out, 0, sizeof(out)); - return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); + return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); } void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn) { - u32 in[MLX5_ST_SZ_DW(destroy_tir_in)]; - u32 out[MLX5_ST_SZ_DW(destroy_tir_out)]; - - memset(in, 0, sizeof(in)); + u32 in[MLX5_ST_SZ_DW(destroy_tir_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(destroy_tir_out)] = {0}; MLX5_SET(destroy_tir_in, in, opcode, MLX5_CMD_OP_DESTROY_TIR); MLX5_SET(destroy_tir_in, in, tirn, tirn); - - mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); + mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } EXPORT_SYMBOL(mlx5_core_destroy_tir); int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *tisn) { - u32 out[MLX5_ST_SZ_DW(create_tis_out)]; + u32 out[MLX5_ST_SZ_DW(create_tis_out)] = {0}; int err; MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS); - - memset(out, 0, sizeof(out)); - err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); + err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); if (!err) *tisn = MLX5_GET(create_tis_out, out, tisn); @@ -245,34 +218,29 @@ int mlx5_core_modify_tis(struct mlx5_core_dev *dev, u32 tisn, u32 *in, MLX5_SET(modify_tis_in, in, tisn, tisn); MLX5_SET(modify_tis_in, in, opcode, MLX5_CMD_OP_MODIFY_TIS); - return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); + return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); } EXPORT_SYMBOL(mlx5_core_modify_tis); void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn) { - u32 in[MLX5_ST_SZ_DW(destroy_tis_in)]; - u32 out[MLX5_ST_SZ_DW(destroy_tis_out)]; - - memset(in, 0, sizeof(in)); + u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(destroy_tis_out)] = {0}; MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS); MLX5_SET(destroy_tis_in, in, tisn, tisn); - - mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); + mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } EXPORT_SYMBOL(mlx5_core_destroy_tis); int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rmpn) { - u32 out[MLX5_ST_SZ_DW(create_rmp_out)]; + u32 out[MLX5_ST_SZ_DW(create_rmp_out)] = {0}; int err; MLX5_SET(create_rmp_in, in, opcode, MLX5_CMD_OP_CREATE_RMP); - - memset(out, 0, sizeof(out)); - err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); + err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); if (!err) *rmpn = MLX5_GET(create_rmp_out, out, rmpn); @@ -281,38 +249,31 @@ int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen, int mlx5_core_modify_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen) { - u32 out[MLX5_ST_SZ_DW(modify_rmp_out)]; + u32 out[MLX5_ST_SZ_DW(modify_rmp_out)] = {0}; MLX5_SET(modify_rmp_in, in, opcode, MLX5_CMD_OP_MODIFY_RMP); - - memset(out, 0, sizeof(out)); - return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); + return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); } int mlx5_core_destroy_rmp(struct mlx5_core_dev *dev, u32 rmpn) { - u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)]; - u32 out[MLX5_ST_SZ_DW(destroy_rmp_out)]; - - memset(in, 0, sizeof(in)); + u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(destroy_rmp_out)] = {0}; MLX5_SET(destroy_rmp_in, in, opcode, MLX5_CMD_OP_DESTROY_RMP); MLX5_SET(destroy_rmp_in, in, rmpn, rmpn); - - return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } int mlx5_core_query_rmp(struct mlx5_core_dev *dev, u32 rmpn, u32 *out) { - u32 in[MLX5_ST_SZ_DW(query_rmp_in)]; + u32 in[MLX5_ST_SZ_DW(query_rmp_in)] = {0}; int outlen = MLX5_ST_SZ_BYTES(query_rmp_out); - memset(in, 0, sizeof(in)); MLX5_SET(query_rmp_in, in, opcode, MLX5_CMD_OP_QUERY_RMP); MLX5_SET(query_rmp_in, in, rmpn, rmpn); - - return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen); + return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); } int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm) @@ -347,13 +308,11 @@ int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm) int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *xsrqn) { - u32 out[MLX5_ST_SZ_DW(create_xrc_srq_out)]; + u32 out[MLX5_ST_SZ_DW(create_xrc_srq_out)] = {0}; int err; MLX5_SET(create_xrc_srq_in, in, opcode, MLX5_CMD_OP_CREATE_XRC_SRQ); - - memset(out, 0, sizeof(out)); - err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); + err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); if (!err) *xsrqn = MLX5_GET(create_xrc_srq_out, out, xrc_srqn); @@ -362,33 +321,25 @@ int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen, int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 xsrqn) { - u32 in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)]; - u32 out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)]; - - memset(in, 0, sizeof(in)); - memset(out, 0, sizeof(out)); + u32 in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)] = {0}; MLX5_SET(destroy_xrc_srq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRC_SRQ); MLX5_SET(destroy_xrc_srq_in, in, xrc_srqn, xsrqn); - - return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, - sizeof(out)); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u32 *out) { - u32 in[MLX5_ST_SZ_DW(query_xrc_srq_in)]; + u32 in[MLX5_ST_SZ_DW(query_xrc_srq_in)] = {0}; void *srqc; void *xrc_srqc; int err; - memset(in, 0, sizeof(in)); MLX5_SET(query_xrc_srq_in, in, opcode, MLX5_CMD_OP_QUERY_XRC_SRQ); MLX5_SET(query_xrc_srq_in, in, xrc_srqn, xsrqn); - - err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), - out, - MLX5_ST_SZ_BYTES(query_xrc_srq_out)); + err = mlx5_cmd_exec(dev, in, sizeof(in), out, + MLX5_ST_SZ_BYTES(query_xrc_srq_out)); if (!err) { xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, out, xrc_srq_context_entry); @@ -401,32 +352,25 @@ int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u32 *out) int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u16 lwm) { - u32 in[MLX5_ST_SZ_DW(arm_xrc_srq_in)]; - u32 out[MLX5_ST_SZ_DW(arm_xrc_srq_out)]; - - memset(in, 0, sizeof(in)); - memset(out, 0, sizeof(out)); + u32 in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0}; MLX5_SET(arm_xrc_srq_in, in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ); MLX5_SET(arm_xrc_srq_in, in, xrc_srqn, xsrqn); MLX5_SET(arm_xrc_srq_in, in, lwm, lwm); MLX5_SET(arm_xrc_srq_in, in, op_mod, MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ); - - return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, - sizeof(out)); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqtn) { - u32 out[MLX5_ST_SZ_DW(create_rqt_out)]; + u32 out[MLX5_ST_SZ_DW(create_rqt_out)] = {0}; int err; MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT); - - memset(out, 0, sizeof(out)); - err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); + err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); if (!err) *rqtn = MLX5_GET(create_rqt_out, out, rqtn); @@ -437,25 +381,20 @@ EXPORT_SYMBOL(mlx5_core_create_rqt); int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in, int inlen) { - u32 out[MLX5_ST_SZ_DW(modify_rqt_out)]; + u32 out[MLX5_ST_SZ_DW(modify_rqt_out)] = {0}; MLX5_SET(modify_rqt_in, in, rqtn, rqtn); MLX5_SET(modify_rqt_in, in, opcode, MLX5_CMD_OP_MODIFY_RQT); - - memset(out, 0, sizeof(out)); - return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); + return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); } void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn) { - u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)]; - u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)]; - - memset(in, 0, sizeof(in)); + u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)] = {0}; MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT); MLX5_SET(destroy_rqt_in, in, rqtn, rqtn); - - mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); + mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } EXPORT_SYMBOL(mlx5_core_destroy_rqt); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c index 5ff8af472bf5..ab0b896621a0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c @@ -42,73 +42,28 @@ enum { NUM_LOW_LAT_UUARS = 4, }; - -struct mlx5_alloc_uar_mbox_in { - struct mlx5_inbox_hdr hdr; - u8 rsvd[8]; -}; - -struct mlx5_alloc_uar_mbox_out { - struct mlx5_outbox_hdr hdr; - __be32 uarn; - u8 rsvd[4]; -}; - -struct mlx5_free_uar_mbox_in { - struct mlx5_inbox_hdr hdr; - __be32 uarn; - u8 rsvd[4]; -}; - -struct mlx5_free_uar_mbox_out { - struct mlx5_outbox_hdr hdr; - u8 rsvd[8]; -}; - int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn) { - struct mlx5_alloc_uar_mbox_in in; - struct mlx5_alloc_uar_mbox_out out; + u32 out[MLX5_ST_SZ_DW(alloc_uar_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(alloc_uar_in)] = {0}; int err; - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ALLOC_UAR); - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); - if (err) - goto ex; - - if (out.hdr.status) { - err = mlx5_cmd_status_to_err(&out.hdr); - goto ex; - } - - *uarn = be32_to_cpu(out.uarn) & 0xffffff; - -ex: + MLX5_SET(alloc_uar_in, in, opcode, MLX5_CMD_OP_ALLOC_UAR); + err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + if (!err) + *uarn = MLX5_GET(alloc_uar_out, out, uar); return err; } EXPORT_SYMBOL(mlx5_cmd_alloc_uar); int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn) { - struct mlx5_free_uar_mbox_in in; - struct mlx5_free_uar_mbox_out out; - int err; - - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DEALLOC_UAR); - in.uarn = cpu_to_be32(uarn); - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); - if (err) - goto ex; + u32 out[MLX5_ST_SZ_DW(dealloc_uar_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(dealloc_uar_in)] = {0}; - if (out.hdr.status) - err = mlx5_cmd_status_to_err(&out.hdr); - -ex: - return err; + MLX5_SET(dealloc_uar_in, in, opcode, MLX5_CMD_OP_DEALLOC_UAR); + MLX5_SET(dealloc_uar_in, in, uar, uarn); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } EXPORT_SYMBOL(mlx5_cmd_free_uar); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index 21365d06982b..525f17af108e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -39,10 +39,7 @@ static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport, u32 *out, int outlen) { - int err; - u32 in[MLX5_ST_SZ_DW(query_vport_state_in)]; - - memset(in, 0, sizeof(in)); + u32 in[MLX5_ST_SZ_DW(query_vport_state_in)] = {0}; MLX5_SET(query_vport_state_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_STATE); @@ -51,11 +48,7 @@ static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, if (vport) MLX5_SET(query_vport_state_in, in, other_vport, 1); - err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen); - if (err) - mlx5_core_warn(mdev, "MLX5_CMD_OP_QUERY_VPORT_STATE failed\n"); - - return err; + return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen); } u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport) @@ -81,58 +74,43 @@ EXPORT_SYMBOL_GPL(mlx5_query_vport_admin_state); int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport, u8 state) { - u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)]; - u32 out[MLX5_ST_SZ_DW(modify_vport_state_out)]; - int err; - - memset(in, 0, sizeof(in)); + u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(modify_vport_state_out)] = {0}; MLX5_SET(modify_vport_state_in, in, opcode, MLX5_CMD_OP_MODIFY_VPORT_STATE); MLX5_SET(modify_vport_state_in, in, op_mod, opmod); MLX5_SET(modify_vport_state_in, in, vport_number, vport); - if (vport) MLX5_SET(modify_vport_state_in, in, other_vport, 1); - MLX5_SET(modify_vport_state_in, in, admin_state, state); - err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, - sizeof(out)); - if (err) - mlx5_core_warn(mdev, "MLX5_CMD_OP_MODIFY_VPORT_STATE failed\n"); - - return err; + return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); } EXPORT_SYMBOL_GPL(mlx5_modify_vport_admin_state); static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport, u32 *out, int outlen) { - u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)]; - - memset(in, 0, sizeof(in)); + u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0}; MLX5_SET(query_nic_vport_context_in, in, opcode, MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT); - MLX5_SET(query_nic_vport_context_in, in, vport_number, vport); if (vport) MLX5_SET(query_nic_vport_context_in, in, other_vport, 1); - return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen); + return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen); } static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in, int inlen) { - u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)]; + u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0}; MLX5_SET(modify_nic_vport_context_in, in, opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); - - memset(out, 0, sizeof(out)); - return mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out)); + return mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out)); } void mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev, @@ -147,6 +125,26 @@ void mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev, } EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_min_inline); +int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev, + u16 vport, u8 min_inline) +{ + u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {0}; + int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in); + void *nic_vport_ctx; + + MLX5_SET(modify_nic_vport_context_in, in, + field_select.min_inline, 1); + MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport); + MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1); + + nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, + in, nic_vport_context); + MLX5_SET(nic_vport_context, nic_vport_ctx, + min_wqe_inline_mode, min_inline); + + return mlx5_modify_nic_vport_context(mdev, in, inlen); +} + int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, u16 vport, u8 *addr) { @@ -254,7 +252,7 @@ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev, u8 addr_list[][ETH_ALEN], int *list_size) { - u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)]; + u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0}; void *nic_vport_ctx; int max_list_size; int req_list_size; @@ -278,7 +276,6 @@ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev, out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) + req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout); - memset(in, 0, sizeof(in)); out = kzalloc(out_sz, GFP_KERNEL); if (!out) return -ENOMEM; @@ -291,7 +288,7 @@ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev, if (vport) MLX5_SET(query_nic_vport_context_in, in, other_vport, 1); - err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, out_sz); + err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz); if (err) goto out; @@ -361,7 +358,7 @@ int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev, ether_addr_copy(curr_mac, addr_list[i]); } - err = mlx5_cmd_exec_check_status(dev, in, in_sz, out, sizeof(out)); + err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out)); kfree(in); return err; } @@ -406,7 +403,7 @@ int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev, if (vport) MLX5_SET(query_nic_vport_context_in, in, other_vport, 1); - err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, out_sz); + err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz); if (err) goto out; @@ -473,7 +470,7 @@ int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev, MLX5_SET(vlan_layout, vlan_addr, vlan, vlans[i]); } - err = mlx5_cmd_exec_check_status(dev, in, in_sz, out, sizeof(out)); + err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out)); kfree(in); return err; } @@ -631,10 +628,6 @@ int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport, if (err) goto out; - err = mlx5_cmd_status_to_err_v2(out); - if (err) - goto out; - tmp = out + MLX5_ST_SZ_BYTES(query_hca_vport_gid_out); gid->global.subnet_prefix = tmp->global.subnet_prefix; gid->global.interface_id = tmp->global.interface_id; @@ -700,10 +693,6 @@ int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport, if (err) goto out; - err = mlx5_cmd_status_to_err_v2(out); - if (err) - goto out; - pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey); for (i = 0; i < nout; i++, pkey++, pkarr += MLX5_ST_SZ_BYTES(pkey)) *pkey = MLX5_GET_PR(pkey, pkarr, pkey); @@ -721,7 +710,7 @@ int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev, struct mlx5_hca_vport_context *rep) { int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_context_out); - int in[MLX5_ST_SZ_DW(query_hca_vport_context_in)]; + int in[MLX5_ST_SZ_DW(query_hca_vport_context_in)] = {0}; int is_group_manager; void *out; void *ctx; @@ -729,7 +718,6 @@ int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev, is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager); - memset(in, 0, sizeof(in)); out = kzalloc(out_sz, GFP_KERNEL); if (!out) return -ENOMEM; @@ -752,9 +740,6 @@ int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev, err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz); if (err) goto ex; - err = mlx5_cmd_status_to_err_v2(out); - if (err) - goto ex; ctx = MLX5_ADDR_OF(query_hca_vport_context_out, out, hca_vport_context); rep->field_select = MLX5_GET_PR(hca_vport_context, ctx, field_select); @@ -969,10 +954,6 @@ int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport, MLX5_SET(query_vport_counter_in, in, port_num, port_num); err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz); - if (err) - goto free; - err = mlx5_cmd_status_to_err_v2(out); - free: kvfree(in); return err; @@ -1035,11 +1016,6 @@ int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev, MLX5_SET(hca_vport_context, ctx, qkey_violation_counter, req->qkey_violation_counter); MLX5_SET(hca_vport_context, ctx, pkey_violation_counter, req->pkey_violation_counter); err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out)); - if (err) - goto ex; - - err = mlx5_cmd_status_to_err_v2(out); - ex: kfree(in); return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c index e25a73ed2981..07a9ba6cfc70 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c @@ -46,41 +46,24 @@ void mlx5e_vxlan_init(struct mlx5e_priv *priv) static int mlx5e_vxlan_core_add_port_cmd(struct mlx5_core_dev *mdev, u16 port) { - struct mlx5_outbox_hdr *hdr; - int err; - - u32 in[MLX5_ST_SZ_DW(add_vxlan_udp_dport_in)]; - u32 out[MLX5_ST_SZ_DW(add_vxlan_udp_dport_out)]; - - memset(in, 0, sizeof(in)); - memset(out, 0, sizeof(out)); + u32 in[MLX5_ST_SZ_DW(add_vxlan_udp_dport_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(add_vxlan_udp_dport_out)] = {0}; MLX5_SET(add_vxlan_udp_dport_in, in, opcode, MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT); MLX5_SET(add_vxlan_udp_dport_in, in, vxlan_udp_port, port); - - err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); - if (err) - return err; - - hdr = (struct mlx5_outbox_hdr *)out; - return hdr->status ? -ENOMEM : 0; + return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); } static int mlx5e_vxlan_core_del_port_cmd(struct mlx5_core_dev *mdev, u16 port) { - u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)]; - u32 out[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_out)]; - - memset(in, 0, sizeof(in)); - memset(out, 0, sizeof(out)); + u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_out)] = {0}; MLX5_SET(delete_vxlan_udp_dport_in, in, opcode, MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT); MLX5_SET(delete_vxlan_udp_dport_in, in, vxlan_udp_port, port); - - return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, - sizeof(out)); + return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); } struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index d3476ead9982..d2e32979319c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -87,6 +87,7 @@ struct mlxsw_rx_listener { void (*func)(struct sk_buff *skb, u8 local_port, void *priv); u8 local_port; u16 trap_id; + enum mlxsw_reg_hpkt_action action; }; struct mlxsw_event_listener { diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 1721098eef13..b83d0a7a0b49 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -591,6 +591,12 @@ static const struct mlxsw_reg_info mlxsw_reg_sfn = { */ MLXSW_ITEM32(reg, sfn, swid, 0x00, 24, 8); +/* reg_sfn_end + * Forces the current session to end. + * Access: OP + */ +MLXSW_ITEM32(reg, sfn, end, 0x04, 20, 1); + /* reg_sfn_num_rec * Request: Number of learned notifications and aged-out notification * records requested. @@ -605,6 +611,7 @@ static inline void mlxsw_reg_sfn_pack(char *payload) { MLXSW_REG_ZERO(sfn, payload); mlxsw_reg_sfn_swid_set(payload, 0); + mlxsw_reg_sfn_end_set(payload, 1); mlxsw_reg_sfn_num_rec_set(payload, MLXSW_REG_SFN_REC_MAX_COUNT); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 7291f2c4b0c7..6c6b726c4897 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -555,8 +555,9 @@ int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port, return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl); } -static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, - u16 vid, bool learn_enable) +int __mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, + u16 vid_begin, u16 vid_end, + bool learn_enable) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; char *spvmlr_pl; @@ -565,13 +566,20 @@ static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); if (!spvmlr_pl) return -ENOMEM; - mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, - learn_enable); + mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid_begin, + vid_end, learn_enable); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); kfree(spvmlr_pl); return err; } +static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, + u16 vid, bool learn_enable) +{ + return __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid, + learn_enable); +} + static int mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) { @@ -973,10 +981,6 @@ static int mlxsw_sp_port_add_vid(struct net_device *dev, goto err_port_vp_mode_trans; } - err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); - if (err) - goto err_port_vid_learning_set; - err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged); if (err) goto err_port_add_vid; @@ -984,8 +988,6 @@ static int mlxsw_sp_port_add_vid(struct net_device *dev, return 0; err_port_add_vid: - mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); -err_port_vid_learning_set: if (list_is_singular(&mlxsw_sp_port->vports_list)) mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); err_port_vp_mode_trans: @@ -1012,8 +1014,6 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev, mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false); - mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); - /* Drop FID reference. If this was the last reference the * resources will be freed. */ @@ -2570,123 +2570,47 @@ static void mlxsw_sp_rx_listener_func(struct sk_buff *skb, u8 local_port, netif_receive_skb(skb); } +static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port, + void *priv) +{ + skb->offload_fwd_mark = 1; + return mlxsw_sp_rx_listener_func(skb, local_port, priv); +} + +#define MLXSW_SP_RXL(_func, _trap_id, _action) \ + { \ + .func = _func, \ + .local_port = MLXSW_PORT_DONT_CARE, \ + .trap_id = MLXSW_TRAP_ID_##_trap_id, \ + .action = MLXSW_REG_HPKT_ACTION_##_action, \ + } + static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = { - { - .func = mlxsw_sp_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_FDB_MC, - }, + MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, FDB_MC, TRAP_TO_CPU), /* Traps for specific L2 packet types, not trapped as FDB MC */ - { - .func = mlxsw_sp_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_STP, - }, - { - .func = mlxsw_sp_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_LACP, - }, - { - .func = mlxsw_sp_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_EAPOL, - }, - { - .func = mlxsw_sp_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_LLDP, - }, - { - .func = mlxsw_sp_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_MMRP, - }, - { - .func = mlxsw_sp_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_MVRP, - }, - { - .func = mlxsw_sp_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_RPVST, - }, - { - .func = mlxsw_sp_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_DHCP, - }, - { - .func = mlxsw_sp_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_IGMP_QUERY, - }, - { - .func = mlxsw_sp_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT, - }, - { - .func = mlxsw_sp_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT, - }, - { - .func = mlxsw_sp_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE, - }, - { - .func = mlxsw_sp_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT, - }, - { - .func = mlxsw_sp_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_ARPBC, - }, - { - .func = mlxsw_sp_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_ARPUC, - }, - { - .func = mlxsw_sp_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_MTUERROR, - }, - { - .func = mlxsw_sp_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_TTLERROR, - }, - { - .func = mlxsw_sp_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_LBERROR, - }, - { - .func = mlxsw_sp_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_OSPF, - }, - { - .func = mlxsw_sp_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_IP2ME, - }, - { - .func = mlxsw_sp_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_RTR_INGRESS0, - }, - { - .func = mlxsw_sp_rx_listener_func, - .local_port = MLXSW_PORT_DONT_CARE, - .trap_id = MLXSW_TRAP_ID_HOST_MISS_IPV4, - }, + MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, STP, TRAP_TO_CPU), + MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, LACP, TRAP_TO_CPU), + MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, EAPOL, TRAP_TO_CPU), + MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, LLDP, TRAP_TO_CPU), + MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, MMRP, TRAP_TO_CPU), + MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, MVRP, TRAP_TO_CPU), + MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, RPVST, TRAP_TO_CPU), + MLXSW_SP_RXL(mlxsw_sp_rx_listener_mark_func, DHCP, MIRROR_TO_CPU), + MLXSW_SP_RXL(mlxsw_sp_rx_listener_mark_func, IGMP_QUERY, MIRROR_TO_CPU), + MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, IGMP_V1_REPORT, TRAP_TO_CPU), + MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, IGMP_V2_REPORT, TRAP_TO_CPU), + MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, IGMP_V2_LEAVE, TRAP_TO_CPU), + MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, IGMP_V3_REPORT, TRAP_TO_CPU), + MLXSW_SP_RXL(mlxsw_sp_rx_listener_mark_func, ARPBC, MIRROR_TO_CPU), + MLXSW_SP_RXL(mlxsw_sp_rx_listener_mark_func, ARPUC, MIRROR_TO_CPU), + /* L3 traps */ + MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, MTUERROR, TRAP_TO_CPU), + MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, TTLERROR, TRAP_TO_CPU), + MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, LBERROR, TRAP_TO_CPU), + MLXSW_SP_RXL(mlxsw_sp_rx_listener_mark_func, OSPF, TRAP_TO_CPU), + MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, IP2ME, TRAP_TO_CPU), + MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, RTR_INGRESS0, TRAP_TO_CPU), + MLXSW_SP_RXL(mlxsw_sp_rx_listener_func, HOST_MISS_IPV4, TRAP_TO_CPU), }; static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) @@ -2713,7 +2637,7 @@ static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) if (err) goto err_rx_listener_register; - mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU, + mlxsw_reg_hpkt_pack(hpkt_pl, mlxsw_sp_rx_listener[i].action, mlxsw_sp_rx_listener[i].trap_id); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); if (err) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index ab3feb81bd43..01537d3a1c48 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -558,6 +558,9 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, u32 maxrate); +int __mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, + u16 vid_begin, u16 vid_end, + bool learn_enable); #ifdef CONFIG_MLXSW_SPECTRUM_DCB diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index d1b59cdfacc1..0c3fbbc6b537 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -261,12 +261,40 @@ int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid, false); } +static int mlxsw_sp_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, + bool set) +{ + u16 vid; + int err; + + if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { + vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port); + + return __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid, + set); + } + + for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { + err = __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid, + set); + if (err) + goto err_port_vid_learning_set; + } + + return 0; + +err_port_vid_learning_set: + for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) + __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid, !set); + return err; +} + static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port, struct switchdev_trans *trans, unsigned long brport_flags) { + unsigned long learning = mlxsw_sp_port->learning ? BR_LEARNING : 0; unsigned long uc_flood = mlxsw_sp_port->uc_flood ? BR_FLOOD : 0; - bool set; int err; if (!mlxsw_sp_port->bridged) @@ -276,17 +304,30 @@ static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port, return 0; if ((uc_flood ^ brport_flags) & BR_FLOOD) { - set = mlxsw_sp_port->uc_flood ? false : true; - err = mlxsw_sp_port_uc_flood_set(mlxsw_sp_port, set); + err = mlxsw_sp_port_uc_flood_set(mlxsw_sp_port, + !mlxsw_sp_port->uc_flood); if (err) return err; } + if ((learning ^ brport_flags) & BR_LEARNING) { + err = mlxsw_sp_port_learning_set(mlxsw_sp_port, + !mlxsw_sp_port->learning); + if (err) + goto err_port_learning_set; + } + mlxsw_sp_port->uc_flood = brport_flags & BR_FLOOD ? 1 : 0; mlxsw_sp_port->learning = brport_flags & BR_LEARNING ? 1 : 0; mlxsw_sp_port->learning_sync = brport_flags & BR_LEARNING_SYNC ? 1 : 0; return 0; + +err_port_learning_set: + if ((uc_flood ^ brport_flags) & BR_FLOOD) + mlxsw_sp_port_uc_flood_set(mlxsw_sp_port, + mlxsw_sp_port->uc_flood); + return err; } static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time) @@ -635,6 +676,27 @@ static int __mlxsw_sp_port_vlans_set(struct mlxsw_sp_port *mlxsw_sp_port, return 0; } +static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, + u16 vid_begin, u16 vid_end, + bool learn_enable) +{ + u16 vid, vid_e; + int err; + + for (vid = vid_begin; vid <= vid_end; + vid += MLXSW_REG_SPVMLR_REC_MAX_COUNT) { + vid_e = min((u16) (vid + MLXSW_REG_SPVMLR_REC_MAX_COUNT - 1), + vid_end); + + err = __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, + vid_e, learn_enable); + if (err) + return err; + } + + return 0; +} + static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, u16 vid_end, bool flag_untagged, bool flag_pvid) @@ -675,6 +737,14 @@ static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, } } + err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid_begin, vid_end, + mlxsw_sp_port->learning); + if (err) { + netdev_err(dev, "Failed to set learning for VIDs %d-%d\n", + vid_begin, vid_end); + goto err_port_vid_learning_set; + } + /* Changing activity bits only if HW operation succeded */ for (vid = vid_begin; vid <= vid_end; vid++) { set_bit(vid, mlxsw_sp_port->active_vlans); @@ -697,6 +767,9 @@ static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, err_port_stp_state_set: for (vid = vid_begin; vid <= vid_end; vid++) clear_bit(vid, mlxsw_sp_port->active_vlans); + mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid_begin, vid_end, + false); +err_port_vid_learning_set: if (old_pvid != mlxsw_sp_port->pvid) mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid); err_port_pvid_set: @@ -1001,29 +1074,20 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev, static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, u16 vid_end) { - struct net_device *dev = mlxsw_sp_port->dev; u16 vid, pvid; - int err; if (!mlxsw_sp_port->bridged) return -EINVAL; - err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, - false, false); - if (err) { - netdev_err(dev, "Unable to del VIDs %d-%d\n", vid_begin, - vid_end); - return err; - } + mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid_begin, vid_end, + false); pvid = mlxsw_sp_port->pvid; - if (pvid >= vid_begin && pvid <= vid_end) { - err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0); - if (err) { - netdev_err(dev, "Unable to del PVID %d\n", pvid); - return err; - } - } + if (pvid >= vid_begin && pvid <= vid_end) + mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0); + + __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, false, + false); mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end); @@ -1366,8 +1430,6 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp, vid = fid; } - adding = adding && mlxsw_sp_port->learning; - do_fdb_op: err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding, true); @@ -1429,8 +1491,6 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp, vid = fid; } - adding = adding && mlxsw_sp_port->learning; - do_fdb_op: err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid, adding, true); @@ -1496,20 +1556,18 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work) mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work); rtnl_lock(); - do { - mlxsw_reg_sfn_pack(sfn_pl); - err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl); - if (err) { - dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n"); - break; - } - num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl); - for (i = 0; i < num_rec; i++) - mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i); + mlxsw_reg_sfn_pack(sfn_pl); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl); + if (err) { + dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n"); + goto out; + } + num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl); + for (i = 0; i < num_rec; i++) + mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i); - } while (num_rec); +out: rtnl_unlock(); - kfree(sfn_pl); mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp); } diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c index 87b7b814778b..712d8bcb7d8c 100644 --- a/drivers/net/ethernet/nuvoton/w90p910_ether.c +++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c @@ -751,7 +751,7 @@ static void netdev_rx(struct net_device *dev) dev_err(&pdev->dev, "rx crc err\n"); ether->stats.rx_crc_errors++; } else if (status & RXDS_ALIE) { - dev_err(&pdev->dev, "rx aligment err\n"); + dev_err(&pdev->dev, "rx alignment err\n"); ether->stats.rx_frame_errors++; } else if (status & RXDS_PTLE) { dev_err(&pdev->dev, "rx longer err\n"); diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 45ab74676573..2d67469eb8f6 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -26,7 +26,7 @@ #include "qed_hsi.h" extern const struct qed_common_ops qed_common_ops_pass; -#define DRV_MODULE_VERSION "8.7.1.20" +#define DRV_MODULE_VERSION "8.10.9.20" #define MAX_HWFNS_PER_DEVICE (4) #define NAME_SIZE 16 @@ -42,6 +42,8 @@ enum qed_coalescing_mode { struct qed_eth_cb_ops; struct qed_dev_info; +union qed_mcp_protocol_stats; +enum qed_mcp_protocol_type; /* helpers */ static inline u32 qed_db_addr(u32 cid, u32 DEMS) @@ -606,7 +608,9 @@ void qed_link_update(struct qed_hwfn *hwfn); u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len, u8 *input_buf, u32 max_size, u8 *unzip_buf); - +void qed_get_protocol_stats(struct qed_dev *cdev, + enum qed_mcp_protocol_type type, + union qed_mcp_protocol_stats *stats); int qed_slowpath_irq_req(struct qed_hwfn *hwfn); #endif /* _QED_H */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index 1c35f376143e..547692759d06 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -377,9 +377,8 @@ static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn, } } -u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn, - enum protocol_type type, - u32 *vf_cid) +u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn, + enum protocol_type type, u32 *vf_cid) { if (vf_cid) *vf_cid = p_hwfn->p_cxt_mngr->conn_cfg[type].cids_per_vf; @@ -405,10 +404,10 @@ u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn, return cnt; } -static void -qed_cxt_set_proto_tid_count(struct qed_hwfn *p_hwfn, - enum protocol_type proto, - u8 seg, u8 seg_type, u32 count, bool has_fl) +static void qed_cxt_set_proto_tid_count(struct qed_hwfn *p_hwfn, + enum protocol_type proto, + u8 seg, + u8 seg_type, u32 count, bool has_fl) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; struct qed_tid_seg *p_seg = &p_mngr->conn_cfg[proto].tid_seg[seg]; @@ -420,8 +419,7 @@ qed_cxt_set_proto_tid_count(struct qed_hwfn *p_hwfn, static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli, struct qed_ilt_cli_blk *p_blk, - u32 start_line, u32 total_size, - u32 elem_size) + u32 start_line, u32 total_size, u32 elem_size) { u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val); @@ -448,8 +446,7 @@ static void qed_ilt_cli_adv_line(struct qed_hwfn *p_hwfn, p_cli->first.val = *p_line; p_cli->active = true; - *p_line += DIV_ROUND_UP(p_blk->total_size, - p_blk->real_size_in_page); + *p_line += DIV_ROUND_UP(p_blk->total_size, p_blk->real_size_in_page); p_cli->last.val = *p_line - 1; DP_VERBOSE(p_hwfn, QED_MSG_ILT, @@ -926,12 +923,9 @@ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn, void *p_virt; u32 size; - size = min_t(u32, sz_left, - p_blk->real_size_in_page); + size = min_t(u32, sz_left, p_blk->real_size_in_page); p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, - size, - &p_phys, - GFP_KERNEL); + size, &p_phys, GFP_KERNEL); if (!p_virt) return -ENOMEM; memset(p_virt, 0, size); @@ -976,7 +970,7 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn) for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) { p_blk = &clients[i].pf_blks[j]; rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, 0); - if (rc != 0) + if (rc) goto ilt_shadow_fail; } for (k = 0; k < p_mngr->vf_count; k++) { @@ -985,7 +979,7 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn) p_blk = &clients[i].vf_blks[j]; rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, lines); - if (rc != 0) + if (rc) goto ilt_shadow_fail; } } @@ -1672,7 +1666,7 @@ static void qed_tm_init_pf(struct qed_hwfn *p_hwfn) p_hwfn->rel_pf_id * NUM_TASK_PF_SEGMENTS + i); STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word); - active_seg_mask |= (tm_iids.pf_tids[i] ? (1 << i) : 0); + active_seg_mask |= (tm_iids.pf_tids[i] ? BIT(i) : 0); tm_offset += tm_iids.pf_tids[i]; } @@ -1702,8 +1696,7 @@ void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn) } int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn, - enum protocol_type type, - u32 *p_cid) + enum protocol_type type, u32 *p_cid) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; u32 rel_cid; @@ -1717,8 +1710,7 @@ int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn, p_mngr->acquired[type].max_count); if (rel_cid >= p_mngr->acquired[type].max_count) { - DP_NOTICE(p_hwfn, "no CID available for protocol %d\n", - type); + DP_NOTICE(p_hwfn, "no CID available for protocol %d\n", type); return -EINVAL; } @@ -1730,8 +1722,7 @@ int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn, } static bool qed_cxt_test_cid_acquired(struct qed_hwfn *p_hwfn, - u32 cid, - enum protocol_type *p_type) + u32 cid, enum protocol_type *p_type) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; struct qed_cid_acquired_map *p_map; @@ -1763,8 +1754,7 @@ static bool qed_cxt_test_cid_acquired(struct qed_hwfn *p_hwfn, return true; } -void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, - u32 cid) +void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; enum protocol_type type; @@ -1781,8 +1771,7 @@ void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, __clear_bit(rel_cid, p_mngr->acquired[type].cid_map); } -int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, - struct qed_cxt_info *p_info) +int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; u32 conn_cxt_size, hw_p_size, cxts_per_p, line; diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index 226cb08cc055..b900dfbb57ff 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -1968,6 +1968,7 @@ static int qed_dcbnl_get_ieee_pfc(struct qed_dev *cdev, if (!dcbx_info->operational.ieee) { DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n"); + kfree(dcbx_info); return -EINVAL; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 0e4f4a9306b5..5ae27f2d2fa5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -35,8 +35,7 @@ #include "qed_sriov.h" #include "qed_vf.h" -static spinlock_t qm_lock; -static bool qm_lock_init = false; +static DEFINE_SPINLOCK(qm_lock); /* API common to all protocols */ enum BAR_ID { @@ -44,8 +43,7 @@ enum BAR_ID { BAR_ID_1 /* Used for doorbells */ }; -static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn, - enum BAR_ID bar_id) +static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id) { u32 bar_reg = (bar_id == BAR_ID_0 ? PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE); @@ -70,8 +68,7 @@ static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn, } } -void qed_init_dp(struct qed_dev *cdev, - u32 dp_module, u8 dp_level) +void qed_init_dp(struct qed_dev *cdev, u32 dp_module, u8 dp_level) { u32 i; @@ -543,8 +540,7 @@ int qed_resc_alloc(struct qed_dev *cdev) cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL); if (!cdev->reset_stats) { DP_NOTICE(cdev, "Failed to allocate reset statistics\n"); - rc = -ENOMEM; - goto alloc_err; + goto alloc_no_mem; } return 0; @@ -605,9 +601,8 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn, /* Make sure notification is not set before initiating final cleanup */ if (REG_RD(p_hwfn, addr)) { - DP_NOTICE( - p_hwfn, - "Unexpected; Found final cleanup notification before initiating final cleanup\n"); + DP_NOTICE(p_hwfn, + "Unexpected; Found final cleanup notification before initiating final cleanup\n"); REG_WR(p_hwfn, addr, 0); } @@ -701,17 +696,14 @@ static void qed_init_cau_rt_data(struct qed_dev *cdev) continue; qed_init_cau_sb_entry(p_hwfn, &sb_entry, - p_block->function_id, - 0, 0); - STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2, - sb_entry); + p_block->function_id, 0, 0); + STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2, sb_entry); } } } static int qed_hw_init_common(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - int hw_mode) + struct qed_ptt *p_ptt, int hw_mode) { struct qed_qm_info *qm_info = &p_hwfn->qm_info; struct qed_qm_common_rt_init_params params; @@ -759,7 +751,7 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, qed_port_unpretend(p_hwfn, p_ptt); rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode); - if (rc != 0) + if (rc) return rc; qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0); @@ -780,6 +772,9 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id); qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid); qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1); + qed_wr(p_hwfn, p_ptt, CCFC_REG_WEAK_ENABLE_VF, 0x0); + qed_wr(p_hwfn, p_ptt, TCFC_REG_STRONG_ENABLE_VF, 0x1); + qed_wr(p_hwfn, p_ptt, TCFC_REG_WEAK_ENABLE_VF, 0x0); } /* pretend to original PF */ qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id); @@ -788,37 +783,10 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, } static int qed_hw_init_port(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - int hw_mode) + struct qed_ptt *p_ptt, int hw_mode) { - int rc = 0; - - rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id, hw_mode); - if (rc != 0) - return rc; - - if (hw_mode & (1 << MODE_MF_SI)) { - u8 pf_id = 0; - - if (!qed_hw_init_first_eth(p_hwfn, p_ptt, &pf_id)) { - DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, - "PF[%08x] is first eth on engine\n", pf_id); - - /* We should have configured BIT for ppfid, i.e., the - * relative function number in the port. But there's a - * bug in LLH in BB where the ppfid is actually engine - * based, so we need to take this into account. - */ - qed_wr(p_hwfn, p_ptt, - NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR, 1 << pf_id); - } - - /* Take the protocol-based hit vector if there is a hit, - * otherwise take the other vector. - */ - qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_CLS_TYPE_DUALMODE, 0x2); - } - return rc; + return qed_init_run(p_hwfn, p_ptt, PHASE_PORT, + p_hwfn->port_id, hw_mode); } static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, @@ -848,7 +816,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, qed_int_igu_init_rt(p_hwfn); /* Set VLAN in NIG if needed */ - if (hw_mode & (1 << MODE_MF_SD)) { + if (hw_mode & BIT(MODE_MF_SD)) { DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Configuring LLH_FUNC_TAG\n"); STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1); STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET, @@ -856,7 +824,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, } /* Enable classification by MAC if needed */ - if (hw_mode & (1 << MODE_MF_SI)) { + if (hw_mode & BIT(MODE_MF_SI)) { DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Configuring TAGMAC_CLS_TYPE\n"); STORE_RT_REG(p_hwfn, @@ -871,7 +839,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, /* Cleanup chip from previous driver if such remains exist */ rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id, false); - if (rc != 0) + if (rc) return rc; /* PF Init sequence */ @@ -887,21 +855,6 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, /* Pure runtime initializations - directly to the HW */ qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true); - if (hw_mode & (1 << MODE_MF_SI)) { - u8 pf_id = 0; - u32 val = 0; - - if (!qed_hw_init_first_eth(p_hwfn, p_ptt, &pf_id)) { - if (p_hwfn->rel_pf_id == pf_id) { - DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, - "PF[%d] is first ETH on engine\n", - pf_id); - val = 1; - } - qed_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, val); - } - } - if (b_hw_start) { /* enable interrupts */ qed_int_igu_enable(p_hwfn, p_ptt, int_mode); @@ -950,8 +903,7 @@ static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn, /* Read shadow of current MFW mailbox */ qed_mcp_read_mb(p_hwfn, p_main_ptt); memcpy(p_hwfn->mcp_info->mfw_mb_shadow, - p_hwfn->mcp_info->mfw_mb_cur, - p_hwfn->mcp_info->mfw_mb_length); + p_hwfn->mcp_info->mfw_mb_cur, p_hwfn->mcp_info->mfw_mb_length); } int qed_hw_init(struct qed_dev *cdev, @@ -971,7 +923,7 @@ int qed_hw_init(struct qed_dev *cdev, if (IS_PF(cdev)) { rc = qed_init_fw_data(cdev, bin_fw_data); - if (rc != 0) + if (rc) return rc; } @@ -988,8 +940,7 @@ int qed_hw_init(struct qed_dev *cdev, qed_calc_hw_mode(p_hwfn); - rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, - &load_code); + rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, &load_code); if (rc) { DP_NOTICE(p_hwfn, "Failed sending LOAD_REQ command\n"); return rc; @@ -1004,11 +955,6 @@ int qed_hw_init(struct qed_dev *cdev, p_hwfn->first_on_engine = (load_code == FW_MSG_CODE_DRV_LOAD_ENGINE); - if (!qm_lock_init) { - spin_lock_init(&qm_lock); - qm_lock_init = true; - } - switch (load_code) { case FW_MSG_CODE_DRV_LOAD_ENGINE: rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt, @@ -1071,9 +1017,8 @@ int qed_hw_init(struct qed_dev *cdev, } #define QED_HW_STOP_RETRY_LIMIT (10) -static inline void qed_hw_timers_stop(struct qed_dev *cdev, - struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) +static void qed_hw_timers_stop(struct qed_dev *cdev, + struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { int i; @@ -1084,8 +1029,7 @@ static inline void qed_hw_timers_stop(struct qed_dev *cdev, for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) { if ((!qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN)) && - (!qed_rd(p_hwfn, p_ptt, - TM_REG_PF_SCAN_ACTIVE_TASK))) + (!qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK))) break; /* Dependent on number of connection/tasks, possibly @@ -1190,8 +1134,7 @@ void qed_hw_stop_fastpath(struct qed_dev *cdev) } DP_VERBOSE(p_hwfn, - NETIF_MSG_IFDOWN, - "Shutting down the fastpath\n"); + NETIF_MSG_IFDOWN, "Shutting down the fastpath\n"); qed_wr(p_hwfn, p_ptt, NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1); @@ -1219,14 +1162,13 @@ void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn) NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0); } -static int qed_reg_assert(struct qed_hwfn *hwfn, - struct qed_ptt *ptt, u32 reg, - bool expected) +static int qed_reg_assert(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 reg, bool expected) { - u32 assert_val = qed_rd(hwfn, ptt, reg); + u32 assert_val = qed_rd(p_hwfn, p_ptt, reg); if (assert_val != expected) { - DP_NOTICE(hwfn, "Value at address 0x%x != 0x%08x\n", + DP_NOTICE(p_hwfn, "Value at address 0x%08x != 0x%08x\n", reg, expected); return -EINVAL; } @@ -1306,8 +1248,7 @@ static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn) /* Clean Previous errors if such exist */ qed_wr(p_hwfn, p_hwfn->p_main_ptt, - PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR, - 1 << p_hwfn->abs_pf_id); + PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR, 1 << p_hwfn->abs_pf_id); /* enable internal target-read */ qed_wr(p_hwfn, p_hwfn->p_main_ptt, @@ -1317,7 +1258,8 @@ static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn) static void get_function_id(struct qed_hwfn *p_hwfn) { /* ME Register */ - p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR); + p_hwfn->hw_info.opaque_fid = (u16) REG_RD(p_hwfn, + PXP_PF_ME_OPAQUE_ADDR); p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR); @@ -1326,6 +1268,10 @@ static void get_function_id(struct qed_hwfn *p_hwfn) PXP_CONCRETE_FID_PFID); p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid, PXP_CONCRETE_FID_PORT); + + DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, + "Read ME register: Concrete 0x%08x Opaque 0x%04x\n", + p_hwfn->hw_info.concrete_fid, p_hwfn->hw_info.opaque_fid); } static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) @@ -1417,8 +1363,7 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn) return 0; } -static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) +static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg; u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities; @@ -1472,8 +1417,7 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G; break; default: - DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n", - core_cfg); + DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n", core_cfg); break; } @@ -1484,11 +1428,11 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, link_temp = qed_rd(p_hwfn, p_ptt, port_cfg_addr + offsetof(struct nvm_cfg1_port, speed_cap_mask)); - link->speed.advertised_speeds = - link_temp & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK; + link_temp &= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK; + link->speed.advertised_speeds = link_temp; - p_hwfn->mcp_info->link_capabilities.speed_capabilities = - link->speed.advertised_speeds; + link_temp = link->speed.advertised_speeds; + p_hwfn->mcp_info->link_capabilities.speed_capabilities = link_temp; link_temp = qed_rd(p_hwfn, p_ptt, port_cfg_addr + @@ -1517,8 +1461,7 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, link->speed.forced_speed = 100000; break; default: - DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n", - link_temp); + DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n", link_temp); } link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK; @@ -1628,10 +1571,10 @@ static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, - "PF [rel_id %d, abs_id %d] within the %d enabled functions on the engine\n", + "PF [rel_id %d, abs_id %d] occupies index %d within the %d enabled functions on the engine\n", p_hwfn->rel_pf_id, p_hwfn->abs_pf_id, - p_hwfn->num_funcs_on_engine); + p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine); } static int @@ -1703,10 +1646,9 @@ static int qed_get_dev_info(struct qed_dev *cdev) u32 tmp; /* Read Vendor Id / Device Id */ - pci_read_config_word(cdev->pdev, PCI_VENDOR_ID, - &cdev->vendor_id); - pci_read_config_word(cdev->pdev, PCI_DEVICE_ID, - &cdev->device_id); + pci_read_config_word(cdev->pdev, PCI_VENDOR_ID, &cdev->vendor_id); + pci_read_config_word(cdev->pdev, PCI_DEVICE_ID, &cdev->device_id); + cdev->chip_num = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt, MISCS_REG_CHIP_NUM); cdev->chip_rev = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt, @@ -1782,7 +1724,7 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn, /* First hwfn learns basic information, e.g., number of hwfns */ if (!p_hwfn->my_id) { rc = qed_get_dev_info(p_hwfn->cdev); - if (rc != 0) + if (rc) goto err1; } @@ -2183,8 +2125,7 @@ int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, u16 src_id, u16 *dst_id) return 0; } -int qed_fw_vport(struct qed_hwfn *p_hwfn, - u8 src_id, u8 *dst_id) +int qed_fw_vport(struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id) { if (src_id >= RESC_NUM(p_hwfn, QED_VPORT)) { u8 min, max; @@ -2203,8 +2144,7 @@ int qed_fw_vport(struct qed_hwfn *p_hwfn, return 0; } -int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, - u8 src_id, u8 *dst_id) +int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id) { if (src_id >= RESC_NUM(p_hwfn, QED_RSS_ENG)) { u8 min, max; @@ -2386,8 +2326,7 @@ static void qed_disable_wfq_for_all_vports(struct qed_hwfn *p_hwfn, * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate. */ static int qed_init_wfq_param(struct qed_hwfn *p_hwfn, - u16 vport_id, u32 req_rate, - u32 min_pf_rate) + u16 vport_id, u32 req_rate, u32 min_pf_rate) { u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0; int non_requested_count = 0, req_count = 0, i, num_vports; @@ -2471,7 +2410,7 @@ static int __qed_configure_vport_wfq(struct qed_hwfn *p_hwfn, rc = qed_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate); - if (rc == 0) + if (!rc) qed_configure_wfq_for_all_vports(p_hwfn, p_ptt, p_link->min_pf_rate); else diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 6f9d3b831a2a..a67b3554aabd 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -536,6 +536,244 @@ struct core_conn_context { struct regpair ustorm_st_padding[2]; }; +enum core_error_handle { + LL2_DROP_PACKET, + LL2_DO_NOTHING, + LL2_ASSERT, + MAX_CORE_ERROR_HANDLE +}; + +enum core_event_opcode { + CORE_EVENT_TX_QUEUE_START, + CORE_EVENT_TX_QUEUE_STOP, + CORE_EVENT_RX_QUEUE_START, + CORE_EVENT_RX_QUEUE_STOP, + MAX_CORE_EVENT_OPCODE +}; + +enum core_l4_pseudo_checksum_mode { + CORE_L4_PSEUDO_CSUM_CORRECT_LENGTH, + CORE_L4_PSEUDO_CSUM_ZERO_LENGTH, + MAX_CORE_L4_PSEUDO_CHECKSUM_MODE +}; + +struct core_ll2_port_stats { + struct regpair gsi_invalid_hdr; + struct regpair gsi_invalid_pkt_length; + struct regpair gsi_unsupported_pkt_typ; + struct regpair gsi_crcchksm_error; +}; + +struct core_ll2_pstorm_per_queue_stat { + struct regpair sent_ucast_bytes; + struct regpair sent_mcast_bytes; + struct regpair sent_bcast_bytes; + struct regpair sent_ucast_pkts; + struct regpair sent_mcast_pkts; + struct regpair sent_bcast_pkts; +}; + +struct core_ll2_rx_prod { + __le16 bd_prod; + __le16 cqe_prod; + __le32 reserved; +}; + +struct core_ll2_tstorm_per_queue_stat { + struct regpair packet_too_big_discard; + struct regpair no_buff_discard; +}; + +struct core_ll2_ustorm_per_queue_stat { + struct regpair rcv_ucast_bytes; + struct regpair rcv_mcast_bytes; + struct regpair rcv_bcast_bytes; + struct regpair rcv_ucast_pkts; + struct regpair rcv_mcast_pkts; + struct regpair rcv_bcast_pkts; +}; + +enum core_ramrod_cmd_id { + CORE_RAMROD_UNUSED, + CORE_RAMROD_RX_QUEUE_START, + CORE_RAMROD_TX_QUEUE_START, + CORE_RAMROD_RX_QUEUE_STOP, + CORE_RAMROD_TX_QUEUE_STOP, + MAX_CORE_RAMROD_CMD_ID +}; + +enum core_roce_flavor_type { + CORE_ROCE, + CORE_RROCE, + MAX_CORE_ROCE_FLAVOR_TYPE +}; + +struct core_rx_action_on_error { + u8 error_type; +#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_MASK 0x3 +#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_SHIFT 0 +#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_MASK 0x3 +#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_SHIFT 2 +#define CORE_RX_ACTION_ON_ERROR_RESERVED_MASK 0xF +#define CORE_RX_ACTION_ON_ERROR_RESERVED_SHIFT 4 +}; + +struct core_rx_bd { + struct regpair addr; + __le16 reserved[4]; +}; + +struct core_rx_bd_with_buff_len { + struct regpair addr; + __le16 buff_length; + __le16 reserved[3]; +}; + +union core_rx_bd_union { + struct core_rx_bd rx_bd; + struct core_rx_bd_with_buff_len rx_bd_with_len; +}; + +struct core_rx_cqe_opaque_data { + __le32 data[2]; +}; + +enum core_rx_cqe_type { + CORE_RX_CQE_ILLIGAL_TYPE, + CORE_RX_CQE_TYPE_REGULAR, + CORE_RX_CQE_TYPE_GSI_OFFLOAD, + CORE_RX_CQE_TYPE_SLOW_PATH, + MAX_CORE_RX_CQE_TYPE +}; + +struct core_rx_fast_path_cqe { + u8 type; + u8 placement_offset; + struct parsing_and_err_flags parse_flags; + __le16 packet_length; + __le16 vlan; + struct core_rx_cqe_opaque_data opaque_data; + __le32 reserved[4]; +}; + +struct core_rx_gsi_offload_cqe { + u8 type; + u8 data_length_error; + struct parsing_and_err_flags parse_flags; + __le16 data_length; + __le16 vlan; + __le32 src_mac_addrhi; + __le16 src_mac_addrlo; + u8 reserved1[2]; + __le32 gid_dst[4]; +}; + +struct core_rx_slow_path_cqe { + u8 type; + u8 ramrod_cmd_id; + __le16 echo; + __le32 reserved1[7]; +}; + +union core_rx_cqe_union { + struct core_rx_fast_path_cqe rx_cqe_fp; + struct core_rx_gsi_offload_cqe rx_cqe_gsi; + struct core_rx_slow_path_cqe rx_cqe_sp; +}; + +struct core_rx_start_ramrod_data { + struct regpair bd_base; + struct regpair cqe_pbl_addr; + __le16 mtu; + __le16 sb_id; + u8 sb_index; + u8 complete_cqe_flg; + u8 complete_event_flg; + u8 drop_ttl0_flg; + __le16 num_of_pbl_pages; + u8 inner_vlan_removal_en; + u8 queue_id; + u8 main_func_queue; + u8 mf_si_bcast_accept_all; + u8 mf_si_mcast_accept_all; + struct core_rx_action_on_error action_on_error; + u8 gsi_offload_flag; + u8 reserved[7]; +}; + +struct core_rx_stop_ramrod_data { + u8 complete_cqe_flg; + u8 complete_event_flg; + u8 queue_id; + u8 reserved1; + __le16 reserved2[2]; +}; + +struct core_tx_bd_flags { + u8 as_bitfield; +#define CORE_TX_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1 +#define CORE_TX_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 0 +#define CORE_TX_BD_FLAGS_VLAN_INSERTION_MASK 0x1 +#define CORE_TX_BD_FLAGS_VLAN_INSERTION_SHIFT 1 +#define CORE_TX_BD_FLAGS_START_BD_MASK 0x1 +#define CORE_TX_BD_FLAGS_START_BD_SHIFT 2 +#define CORE_TX_BD_FLAGS_IP_CSUM_MASK 0x1 +#define CORE_TX_BD_FLAGS_IP_CSUM_SHIFT 3 +#define CORE_TX_BD_FLAGS_L4_CSUM_MASK 0x1 +#define CORE_TX_BD_FLAGS_L4_CSUM_SHIFT 4 +#define CORE_TX_BD_FLAGS_IPV6_EXT_MASK 0x1 +#define CORE_TX_BD_FLAGS_IPV6_EXT_SHIFT 5 +#define CORE_TX_BD_FLAGS_L4_PROTOCOL_MASK 0x1 +#define CORE_TX_BD_FLAGS_L4_PROTOCOL_SHIFT 6 +#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_MASK 0x1 +#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_SHIFT 7 +}; + +struct core_tx_bd { + struct regpair addr; + __le16 nbytes; + __le16 nw_vlan_or_lb_echo; + u8 bitfield0; +#define CORE_TX_BD_NBDS_MASK 0xF +#define CORE_TX_BD_NBDS_SHIFT 0 +#define CORE_TX_BD_ROCE_FLAV_MASK 0x1 +#define CORE_TX_BD_ROCE_FLAV_SHIFT 4 +#define CORE_TX_BD_RESERVED0_MASK 0x7 +#define CORE_TX_BD_RESERVED0_SHIFT 5 + struct core_tx_bd_flags bd_flags; + __le16 bitfield1; +#define CORE_TX_BD_L4_HDR_OFFSET_W_MASK 0x3FFF +#define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0 +#define CORE_TX_BD_TX_DST_MASK 0x1 +#define CORE_TX_BD_TX_DST_SHIFT 14 +#define CORE_TX_BD_RESERVED1_MASK 0x1 +#define CORE_TX_BD_RESERVED1_SHIFT 15 +}; + +enum core_tx_dest { + CORE_TX_DEST_NW, + CORE_TX_DEST_LB, + MAX_CORE_TX_DEST +}; + +struct core_tx_start_ramrod_data { + struct regpair pbl_base_addr; + __le16 mtu; + __le16 sb_id; + u8 sb_index; + u8 stats_en; + u8 stats_id; + u8 conn_type; + __le16 pbl_size; + __le16 qm_pq_id; + u8 gsi_offload_flag; + u8 resrved[3]; +}; + +struct core_tx_stop_ramrod_data { + __le32 reserved0[2]; +}; + struct eth_mstorm_per_pf_stat { struct regpair gre_discard_pkts; struct regpair vxlan_discard_pkts; @@ -636,9 +874,33 @@ struct hsi_fp_ver_struct { }; /* Mstorm non-triggering VF zone */ +enum malicious_vf_error_id { + MALICIOUS_VF_NO_ERROR, + VF_PF_CHANNEL_NOT_READY, + VF_ZONE_MSG_NOT_VALID, + VF_ZONE_FUNC_NOT_ENABLED, + ETH_PACKET_TOO_SMALL, + ETH_ILLEGAL_VLAN_MODE, + ETH_MTU_VIOLATION, + ETH_ILLEGAL_INBAND_TAGS, + ETH_VLAN_INSERT_AND_INBAND_VLAN, + ETH_ILLEGAL_NBDS, + ETH_FIRST_BD_WO_SOP, + ETH_INSUFFICIENT_BDS, + ETH_ILLEGAL_LSO_HDR_NBDS, + ETH_ILLEGAL_LSO_MSS, + ETH_ZERO_SIZE_BD, + ETH_ILLEGAL_LSO_HDR_LEN, + ETH_INSUFFICIENT_PAYLOAD, + ETH_EDPM_OUT_OF_SYNC, + ETH_TUNN_IPV6_EXT_NBD_ERR, + ETH_CONTROL_PACKET_VIOLATION, + MAX_MALICIOUS_VF_ERROR_ID +}; + struct mstorm_non_trigger_vf_zone { struct eth_mstorm_per_queue_stat eth_queue_stat; - struct eth_rx_prod_data eth_rx_queue_producers[ETH_MAX_NUM_RX_QUEUES_PER_VF]; + struct eth_rx_prod_data eth_rx_queue_producers[ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD]; }; /* Mstorm VF zone */ @@ -705,13 +967,17 @@ struct pf_start_ramrod_data { struct protocol_dcb_data { u8 dcb_enable_flag; + u8 reserved_a; u8 dcb_priority; u8 dcb_tc; - u8 reserved; + u8 reserved_b; + u8 reserved0; }; struct pf_update_tunnel_config { u8 update_rx_pf_clss; + u8 update_rx_def_ucast_clss; + u8 update_rx_def_non_ucast_clss; u8 update_tx_pf_clss; u8 set_vxlan_udp_port_flg; u8 set_geneve_udp_port_flg; @@ -727,7 +993,7 @@ struct pf_update_tunnel_config { u8 tunnel_clss_ipgre; __le16 vxlan_udp_port; __le16 geneve_udp_port; - __le16 reserved[3]; + __le16 reserved[2]; }; struct pf_update_ramrod_data { @@ -736,16 +1002,17 @@ struct pf_update_ramrod_data { u8 update_fcoe_dcb_data_flag; u8 update_iscsi_dcb_data_flag; u8 update_roce_dcb_data_flag; + u8 update_rroce_dcb_data_flag; u8 update_iwarp_dcb_data_flag; u8 update_mf_vlan_flag; - u8 reserved; struct protocol_dcb_data eth_dcb_data; struct protocol_dcb_data fcoe_dcb_data; struct protocol_dcb_data iscsi_dcb_data; struct protocol_dcb_data roce_dcb_data; + struct protocol_dcb_data rroce_dcb_data; struct protocol_dcb_data iwarp_dcb_data; __le16 mf_vlan; - __le16 reserved2; + __le16 reserved; struct pf_update_tunnel_config tunnel_config; }; @@ -766,10 +1033,14 @@ enum protocol_version_array_key { MAX_PROTOCOL_VERSION_ARRAY_KEY }; -/* Pstorm non-triggering VF zone */ +struct rdma_sent_stats { + struct regpair sent_bytes; + struct regpair sent_pkts; +}; + struct pstorm_non_trigger_vf_zone { struct eth_pstorm_per_queue_stat eth_queue_stat; - struct regpair reserved[2]; + struct rdma_sent_stats rdma_stats; }; /* Pstorm VF zone */ @@ -786,7 +1057,11 @@ struct ramrod_header { __le16 echo; }; -/* Slowpath Element (SPQE) */ +struct rdma_rcv_stats { + struct regpair rcv_bytes; + struct regpair rcv_pkts; +}; + struct slow_path_element { struct ramrod_header hdr; struct regpair data_ptr; @@ -794,7 +1069,7 @@ struct slow_path_element { /* Tstorm non-triggering VF zone */ struct tstorm_non_trigger_vf_zone { - struct regpair reserved[2]; + struct rdma_rcv_stats rdma_stats; }; struct tstorm_per_port_stat { @@ -802,9 +1077,14 @@ struct tstorm_per_port_stat { struct regpair mac_error_discard; struct regpair mftag_filter_discard; struct regpair eth_mac_filter_discard; - struct regpair reserved[5]; + struct regpair ll2_mac_filter_discard; + struct regpair ll2_conn_disabled_discard; + struct regpair iscsi_irregular_pkt; + struct regpair reserved; + struct regpair roce_irregular_pkt; struct regpair eth_irregular_pkt; - struct regpair reserved1[2]; + struct regpair reserved1; + struct regpair preroce_irregular_pkt; struct regpair eth_gre_tunn_filter_discard; struct regpair eth_vxlan_tunn_filter_discard; struct regpair eth_geneve_tunn_filter_discard; @@ -870,7 +1150,13 @@ struct vf_stop_ramrod_data { __le32 reserved2; }; -/* Attentions status block */ +enum vf_zone_size_mode { + VF_ZONE_SIZE_MODE_DEFAULT, + VF_ZONE_SIZE_MODE_DOUBLE, + VF_ZONE_SIZE_MODE_QUAD, + MAX_VF_ZONE_SIZE_MODE +}; + struct atten_status_block { __le32 atten_bits; __le32 atten_ack; @@ -1579,6 +1865,7 @@ enum dbg_status { DBG_STATUS_REG_FIFO_BAD_DATA, DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA, DBG_STATUS_DBG_ARRAY_NOT_SET, + DBG_STATUS_MULTI_BLOCKS_WITH_FILTER, MAX_DBG_STATUS }; @@ -1589,7 +1876,41 @@ enum dbg_status { /* Number of VLAN priorities */ #define NUM_OF_VLAN_PRIORITIES 8 -/* QM per-port init parameters */ +struct init_brb_ram_req { + __le32 guranteed_per_tc; + __le32 headroom_per_tc; + __le32 min_pkt_size; + __le32 max_ports_per_engine; + u8 num_active_tcs[MAX_NUM_PORTS]; +}; + +struct init_ets_tc_req { + u8 use_sp; + u8 use_wfq; + __le16 weight; +}; + +struct init_ets_req { + __le32 mtu; + struct init_ets_tc_req tc_req[NUM_OF_TCS]; +}; + +struct init_nig_lb_rl_req { + __le16 lb_mac_rate; + __le16 lb_rate; + __le32 mtu; + __le16 tc_rate[NUM_OF_PHYS_TCS]; +}; + +struct init_nig_pri_tc_map_entry { + u8 tc_id; + u8 valid; +}; + +struct init_nig_pri_tc_map_req { + struct init_nig_pri_tc_map_entry pri[NUM_OF_VLAN_PRIORITIES]; +}; + struct init_qm_port_params { u8 active; u8 active_phys_tcs; @@ -1619,7 +1940,7 @@ struct init_qm_vport_params { /* Width of GRC address in bits (addresses are specified in dwords) */ #define GRC_ADDR_BITS 23 -#define MAX_GRC_ADDR ((1 << GRC_ADDR_BITS) - 1) +#define MAX_GRC_ADDR (BIT(GRC_ADDR_BITS) - 1) /* indicates an init that should be applied to any phase ID */ #define ANY_PHASE_ID 0xffff @@ -1674,11 +1995,11 @@ struct bin_buffer_hdr { /* binary init buffer types */ enum bin_init_buffer_type { - BIN_BUF_FW_VER_INFO, + BIN_BUF_INIT_FW_VER_INFO, BIN_BUF_INIT_CMD, BIN_BUF_INIT_VAL, BIN_BUF_INIT_MODE_TREE, - BIN_BUF_IRO, + BIN_BUF_INIT_IRO, MAX_BIN_INIT_BUFFER_TYPE }; @@ -1918,44 +2239,34 @@ enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn, #define MAX_NAME_LEN 16 /* Win 2 */ -#define GTT_BAR0_MAP_REG_IGU_CMD \ - 0x00f000UL +#define GTT_BAR0_MAP_REG_IGU_CMD 0x00f000UL /* Win 3 */ -#define GTT_BAR0_MAP_REG_TSDM_RAM \ - 0x010000UL +#define GTT_BAR0_MAP_REG_TSDM_RAM 0x010000UL /* Win 4 */ -#define GTT_BAR0_MAP_REG_MSDM_RAM \ - 0x011000UL +#define GTT_BAR0_MAP_REG_MSDM_RAM 0x011000UL /* Win 5 */ -#define GTT_BAR0_MAP_REG_MSDM_RAM_1024 \ - 0x012000UL +#define GTT_BAR0_MAP_REG_MSDM_RAM_1024 0x012000UL /* Win 6 */ -#define GTT_BAR0_MAP_REG_USDM_RAM \ - 0x013000UL +#define GTT_BAR0_MAP_REG_USDM_RAM 0x013000UL /* Win 7 */ -#define GTT_BAR0_MAP_REG_USDM_RAM_1024 \ - 0x014000UL +#define GTT_BAR0_MAP_REG_USDM_RAM_1024 0x014000UL /* Win 8 */ -#define GTT_BAR0_MAP_REG_USDM_RAM_2048 \ - 0x015000UL +#define GTT_BAR0_MAP_REG_USDM_RAM_2048 0x015000UL /* Win 9 */ -#define GTT_BAR0_MAP_REG_XSDM_RAM \ - 0x016000UL +#define GTT_BAR0_MAP_REG_XSDM_RAM 0x016000UL /* Win 10 */ -#define GTT_BAR0_MAP_REG_YSDM_RAM \ - 0x017000UL +#define GTT_BAR0_MAP_REG_YSDM_RAM 0x017000UL /* Win 11 */ -#define GTT_BAR0_MAP_REG_PSDM_RAM \ - 0x018000UL +#define GTT_BAR0_MAP_REG_PSDM_RAM 0x018000UL /** * @brief qed_qm_pf_mem_size - prepare QM ILT sizes @@ -2003,7 +2314,7 @@ struct qed_qm_pf_rt_init_params { u16 num_vf_pqs; u8 start_vport; u8 num_vports; - u8 pf_wfq; + u16 pf_wfq; u32 pf_rl; struct init_qm_pq_params *pq_params; struct init_qm_vport_params *vport_params; @@ -2138,6 +2449,9 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, #define TSTORM_PORT_STAT_OFFSET(port_id) \ (IRO[1].base + ((port_id) * IRO[1].m1)) #define TSTORM_PORT_STAT_SIZE (IRO[1].size) +#define TSTORM_LL2_PORT_STAT_OFFSET(port_id) \ + (IRO[2].base + ((port_id) * IRO[2].m1)) +#define TSTORM_LL2_PORT_STAT_SIZE (IRO[2].size) #define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \ (IRO[3].base + ((vf_id) * IRO[3].m1)) #define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[3].size) @@ -2153,42 +2467,90 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, #define USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) \ (IRO[7].base + ((queue_zone_id) * IRO[7].m1)) #define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[7].size) +#define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) \ + (IRO[14].base + ((core_rx_queue_id) * IRO[14].m1)) +#define TSTORM_LL2_RX_PRODS_SIZE (IRO[14].size) +#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \ + (IRO[15].base + ((core_rx_queue_id) * IRO[15].m1)) +#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[15].size) +#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \ + (IRO[16].base + ((core_rx_queue_id) * IRO[16].m1)) +#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[16].size) +#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \ + (IRO[17].base + ((core_tx_stats_id) * IRO[17].m1)) +#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[17]. size) #define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ (IRO[18].base + ((stat_counter_id) * IRO[18].m1)) #define MSTORM_QUEUE_STAT_SIZE (IRO[18].size) #define MSTORM_ETH_PF_PRODS_OFFSET(queue_id) \ (IRO[19].base + ((queue_id) * IRO[19].m1)) #define MSTORM_ETH_PF_PRODS_SIZE (IRO[19].size) -#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[20].base) -#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[20].size) +#define MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id) \ + (IRO[20].base + ((vf_id) * IRO[20].m1) + ((vf_queue_id) * IRO[20].m2)) +#define MSTORM_ETH_VF_PRODS_SIZE (IRO[20].size) +#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[21].base) +#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[21].size) #define MSTORM_ETH_PF_STAT_OFFSET(pf_id) \ - (IRO[21].base + ((pf_id) * IRO[21].m1)) + (IRO[22].base + ((pf_id) * IRO[22].m1)) #define MSTORM_ETH_PF_STAT_SIZE (IRO[21].size) #define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ - (IRO[22].base + ((stat_counter_id) * IRO[22].m1)) -#define USTORM_QUEUE_STAT_SIZE (IRO[22].size) + (IRO[23].base + ((stat_counter_id) * IRO[23].m1)) +#define USTORM_QUEUE_STAT_SIZE (IRO[23].size) #define USTORM_ETH_PF_STAT_OFFSET(pf_id) \ - (IRO[23].base + ((pf_id) * IRO[23].m1)) -#define USTORM_ETH_PF_STAT_SIZE (IRO[23].size) + (IRO[24].base + ((pf_id) * IRO[24].m1)) +#define USTORM_ETH_PF_STAT_SIZE (IRO[24].size) #define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ - (IRO[24].base + ((stat_counter_id) * IRO[24].m1)) -#define PSTORM_QUEUE_STAT_SIZE (IRO[24].size) + (IRO[25].base + ((stat_counter_id) * IRO[25].m1)) +#define PSTORM_QUEUE_STAT_SIZE (IRO[25].size) #define PSTORM_ETH_PF_STAT_OFFSET(pf_id) \ - (IRO[25].base + ((pf_id) * IRO[25].m1)) -#define PSTORM_ETH_PF_STAT_SIZE (IRO[25].size) + (IRO[26].base + ((pf_id) * IRO[26].m1)) +#define PSTORM_ETH_PF_STAT_SIZE (IRO[26].size) #define PSTORM_CTL_FRAME_ETHTYPE_OFFSET(ethtype) \ - (IRO[26].base + ((ethtype) * IRO[26].m1)) -#define PSTORM_CTL_FRAME_ETHTYPE_SIZE (IRO[26].size) -#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[27].base) -#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[27].size) + (IRO[27].base + ((ethtype) * IRO[27].m1)) +#define PSTORM_CTL_FRAME_ETHTYPE_SIZE (IRO[27].size) +#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[28].base) +#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[28].size) #define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \ - (IRO[28].base + ((pf_id) * IRO[28].m1)) -#define ETH_RX_RATE_LIMIT_SIZE (IRO[28].size) + (IRO[29].base + ((pf_id) * IRO[29].m1)) +#define ETH_RX_RATE_LIMIT_SIZE (IRO[29].size) #define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \ - (IRO[29].base + ((queue_id) * IRO[29].m1)) -#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[29].size) - -static const struct iro iro_arr[46] = { + (IRO[30].base + ((queue_id) * IRO[30].m1)) +#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[30].size) +#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \ + (IRO[34].base + ((cmdq_queue_id) * IRO[34].m1)) +#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[34].size) +#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \ + (IRO[35].base + ((func_id) * IRO[35].m1) + ((bdq_id) * IRO[35].m2)) +#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[35].size) +#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \ + (IRO[36].base + ((func_id) * IRO[36].m1) + ((bdq_id) * IRO[36].m2)) +#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[36].size) +#define TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \ + (IRO[37].base + ((pf_id) * IRO[37].m1)) +#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[37].size) +#define MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \ + (IRO[38].base + ((pf_id) * IRO[38].m1)) +#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[38].size) +#define USTORM_ISCSI_RX_STATS_OFFSET(pf_id) \ + (IRO[39].base + ((pf_id) * IRO[39].m1)) +#define USTORM_ISCSI_RX_STATS_SIZE (IRO[39].size) +#define XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \ + (IRO[40].base + ((pf_id) * IRO[40].m1)) +#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[40].size) +#define YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \ + (IRO[41].base + ((pf_id) * IRO[41].m1)) +#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[41].size) +#define PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \ + (IRO[42].base + ((pf_id) * IRO[42].m1)) +#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[42].size) +#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \ + (IRO[45].base + ((rdma_stat_counter_id) * IRO[45].m1)) +#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[45].size) +#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \ + (IRO[46].base + ((rdma_stat_counter_id) * IRO[46].m1)) +#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size) + +static const struct iro iro_arr[47] = { {0x0, 0x0, 0x0, 0x0, 0x8}, {0x4cb0, 0x78, 0x0, 0x0, 0x78}, {0x6318, 0x20, 0x0, 0x0, 0x20}, @@ -2201,20 +2563,21 @@ static const struct iro iro_arr[46] = { {0x3df0, 0x0, 0x0, 0x0, 0x78}, {0x29b0, 0x0, 0x0, 0x0, 0x78}, {0x4c38, 0x0, 0x0, 0x0, 0x78}, - {0x4a48, 0x0, 0x0, 0x0, 0x78}, + {0x4990, 0x0, 0x0, 0x0, 0x78}, {0x7e48, 0x0, 0x0, 0x0, 0x78}, {0xa28, 0x8, 0x0, 0x0, 0x8}, {0x60f8, 0x10, 0x0, 0x0, 0x10}, {0xb820, 0x30, 0x0, 0x0, 0x30}, {0x95b8, 0x30, 0x0, 0x0, 0x30}, - {0x4c18, 0x80, 0x0, 0x0, 0x40}, + {0x4b60, 0x80, 0x0, 0x0, 0x40}, {0x1f8, 0x4, 0x0, 0x0, 0x4}, - {0xc9a8, 0x0, 0x0, 0x0, 0x4}, - {0x4c58, 0x80, 0x0, 0x0, 0x20}, + {0x53a0, 0x80, 0x4, 0x0, 0x4}, + {0xc8f0, 0x0, 0x0, 0x0, 0x4}, + {0x4ba0, 0x80, 0x0, 0x0, 0x20}, {0x8050, 0x40, 0x0, 0x0, 0x30}, {0xe770, 0x60, 0x0, 0x0, 0x60}, {0x2b48, 0x80, 0x0, 0x0, 0x38}, - {0xdf88, 0x78, 0x0, 0x0, 0x78}, + {0xf188, 0x78, 0x0, 0x0, 0x78}, {0x1f8, 0x4, 0x0, 0x0, 0x4}, {0xacf0, 0x0, 0x0, 0x0, 0xf0}, {0xade0, 0x8, 0x0, 0x0, 0x8}, @@ -2226,455 +2589,457 @@ static const struct iro iro_arr[46] = { {0x200, 0x10, 0x8, 0x0, 0x8}, {0xb78, 0x10, 0x8, 0x0, 0x2}, {0xd888, 0x38, 0x0, 0x0, 0x24}, - {0x12120, 0x10, 0x0, 0x0, 0x8}, - {0x11b20, 0x38, 0x0, 0x0, 0x18}, + {0x12c38, 0x10, 0x0, 0x0, 0x8}, + {0x11aa0, 0x38, 0x0, 0x0, 0x18}, {0xa8c0, 0x30, 0x0, 0x0, 0x10}, {0x86f8, 0x28, 0x0, 0x0, 0x18}, - {0xeff8, 0x10, 0x0, 0x0, 0x10}, + {0x101f8, 0x10, 0x0, 0x0, 0x10}, {0xdd08, 0x48, 0x0, 0x0, 0x38}, - {0xf460, 0x20, 0x0, 0x0, 0x20}, + {0x10660, 0x20, 0x0, 0x0, 0x20}, {0x2b80, 0x80, 0x0, 0x0, 0x10}, {0x5000, 0x10, 0x0, 0x0, 0x10}, }; /* Runtime array offsets */ -#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0 -#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1 -#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2 -#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3 -#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4 -#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5 -#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6 -#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7 -#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8 -#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9 -#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10 -#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11 -#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12 -#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13 -#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14 -#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15 -#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16 -#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 17 -#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 18 -#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 19 -#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 20 -#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 21 -#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 22 -#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 23 -#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 24 -#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761 -#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736 -#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761 -#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736 -#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1497 -#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736 -#define CAU_REG_PI_MEMORY_RT_OFFSET 2233 -#define CAU_REG_PI_MEMORY_RT_SIZE 4416 -#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6649 -#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6650 -#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6651 -#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6652 -#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6653 -#define PRS_REG_SEARCH_TCP_RT_OFFSET 6654 -#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6655 -#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6656 -#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6657 -#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6658 -#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6659 -#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6660 -#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6661 -#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6662 -#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6663 -#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6664 -#define SRC_REG_FIRSTFREE_RT_OFFSET 6665 -#define SRC_REG_FIRSTFREE_RT_SIZE 2 -#define SRC_REG_LASTFREE_RT_OFFSET 6667 -#define SRC_REG_LASTFREE_RT_SIZE 2 -#define SRC_REG_COUNTFREE_RT_OFFSET 6669 -#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6670 -#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6671 -#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6672 -#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6673 -#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6674 -#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6675 -#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 6676 -#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6677 -#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6678 -#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6679 -#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6680 -#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6681 -#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6682 -#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6683 -#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6684 -#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6685 -#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6686 -#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6687 -#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6688 -#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6689 -#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6690 -#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6691 -#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6692 -#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6693 -#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6694 -#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6695 -#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6696 -#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6697 -#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6698 -#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6699 -#define PSWRQ2_REG_WR_MBS0_RT_OFFSET 6700 -#define PSWRQ2_REG_RD_MBS0_RT_OFFSET 6701 -#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6702 -#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6703 -#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6704 -#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000 -#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28704 -#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28705 -#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28706 -#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28707 -#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28708 -#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28709 -#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28710 -#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28711 -#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28712 -#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28713 -#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28714 -#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416 -#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29130 -#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512 -#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29642 -#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29643 -#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29644 -#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29645 -#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29646 -#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29647 -#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29648 -#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29649 -#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29650 -#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29651 -#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29652 -#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29653 -#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29654 -#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29655 -#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29656 -#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29657 -#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29658 -#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29659 -#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29660 -#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29661 -#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29662 -#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29663 -#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29664 -#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29665 -#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29666 -#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29667 -#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29668 -#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29669 -#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29670 -#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29671 -#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29672 -#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29673 -#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29674 -#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29675 -#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29676 -#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29677 -#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29678 -#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29679 -#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29680 -#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29681 -#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29682 -#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29683 -#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29684 -#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29685 -#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29686 -#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29687 -#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29688 -#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29689 -#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29690 -#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29691 -#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29692 -#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29693 -#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29694 -#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29695 -#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29696 -#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29697 -#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29698 -#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29699 -#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29700 -#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29701 -#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29702 -#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29703 -#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29704 -#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29705 -#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29706 -#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29707 -#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29708 -#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29709 -#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128 -#define QM_REG_VOQCRDLINE_RT_OFFSET 29837 -#define QM_REG_VOQCRDLINE_RT_SIZE 20 -#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29857 -#define QM_REG_VOQINITCRDLINE_RT_SIZE 20 -#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29877 -#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29878 -#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29879 -#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29880 -#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29881 -#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29882 -#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29883 -#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29884 -#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29885 -#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29886 -#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29887 -#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29888 -#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29889 -#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29890 -#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29891 -#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29892 -#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29893 -#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29894 -#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29895 -#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29896 -#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29897 -#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29898 -#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29899 -#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29900 -#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29901 -#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29902 -#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29903 -#define QM_REG_PQTX2PF_0_RT_OFFSET 29904 -#define QM_REG_PQTX2PF_1_RT_OFFSET 29905 -#define QM_REG_PQTX2PF_2_RT_OFFSET 29906 -#define QM_REG_PQTX2PF_3_RT_OFFSET 29907 -#define QM_REG_PQTX2PF_4_RT_OFFSET 29908 -#define QM_REG_PQTX2PF_5_RT_OFFSET 29909 -#define QM_REG_PQTX2PF_6_RT_OFFSET 29910 -#define QM_REG_PQTX2PF_7_RT_OFFSET 29911 -#define QM_REG_PQTX2PF_8_RT_OFFSET 29912 -#define QM_REG_PQTX2PF_9_RT_OFFSET 29913 -#define QM_REG_PQTX2PF_10_RT_OFFSET 29914 -#define QM_REG_PQTX2PF_11_RT_OFFSET 29915 -#define QM_REG_PQTX2PF_12_RT_OFFSET 29916 -#define QM_REG_PQTX2PF_13_RT_OFFSET 29917 -#define QM_REG_PQTX2PF_14_RT_OFFSET 29918 -#define QM_REG_PQTX2PF_15_RT_OFFSET 29919 -#define QM_REG_PQTX2PF_16_RT_OFFSET 29920 -#define QM_REG_PQTX2PF_17_RT_OFFSET 29921 -#define QM_REG_PQTX2PF_18_RT_OFFSET 29922 -#define QM_REG_PQTX2PF_19_RT_OFFSET 29923 -#define QM_REG_PQTX2PF_20_RT_OFFSET 29924 -#define QM_REG_PQTX2PF_21_RT_OFFSET 29925 -#define QM_REG_PQTX2PF_22_RT_OFFSET 29926 -#define QM_REG_PQTX2PF_23_RT_OFFSET 29927 -#define QM_REG_PQTX2PF_24_RT_OFFSET 29928 -#define QM_REG_PQTX2PF_25_RT_OFFSET 29929 -#define QM_REG_PQTX2PF_26_RT_OFFSET 29930 -#define QM_REG_PQTX2PF_27_RT_OFFSET 29931 -#define QM_REG_PQTX2PF_28_RT_OFFSET 29932 -#define QM_REG_PQTX2PF_29_RT_OFFSET 29933 -#define QM_REG_PQTX2PF_30_RT_OFFSET 29934 -#define QM_REG_PQTX2PF_31_RT_OFFSET 29935 -#define QM_REG_PQTX2PF_32_RT_OFFSET 29936 -#define QM_REG_PQTX2PF_33_RT_OFFSET 29937 -#define QM_REG_PQTX2PF_34_RT_OFFSET 29938 -#define QM_REG_PQTX2PF_35_RT_OFFSET 29939 -#define QM_REG_PQTX2PF_36_RT_OFFSET 29940 -#define QM_REG_PQTX2PF_37_RT_OFFSET 29941 -#define QM_REG_PQTX2PF_38_RT_OFFSET 29942 -#define QM_REG_PQTX2PF_39_RT_OFFSET 29943 -#define QM_REG_PQTX2PF_40_RT_OFFSET 29944 -#define QM_REG_PQTX2PF_41_RT_OFFSET 29945 -#define QM_REG_PQTX2PF_42_RT_OFFSET 29946 -#define QM_REG_PQTX2PF_43_RT_OFFSET 29947 -#define QM_REG_PQTX2PF_44_RT_OFFSET 29948 -#define QM_REG_PQTX2PF_45_RT_OFFSET 29949 -#define QM_REG_PQTX2PF_46_RT_OFFSET 29950 -#define QM_REG_PQTX2PF_47_RT_OFFSET 29951 -#define QM_REG_PQTX2PF_48_RT_OFFSET 29952 -#define QM_REG_PQTX2PF_49_RT_OFFSET 29953 -#define QM_REG_PQTX2PF_50_RT_OFFSET 29954 -#define QM_REG_PQTX2PF_51_RT_OFFSET 29955 -#define QM_REG_PQTX2PF_52_RT_OFFSET 29956 -#define QM_REG_PQTX2PF_53_RT_OFFSET 29957 -#define QM_REG_PQTX2PF_54_RT_OFFSET 29958 -#define QM_REG_PQTX2PF_55_RT_OFFSET 29959 -#define QM_REG_PQTX2PF_56_RT_OFFSET 29960 -#define QM_REG_PQTX2PF_57_RT_OFFSET 29961 -#define QM_REG_PQTX2PF_58_RT_OFFSET 29962 -#define QM_REG_PQTX2PF_59_RT_OFFSET 29963 -#define QM_REG_PQTX2PF_60_RT_OFFSET 29964 -#define QM_REG_PQTX2PF_61_RT_OFFSET 29965 -#define QM_REG_PQTX2PF_62_RT_OFFSET 29966 -#define QM_REG_PQTX2PF_63_RT_OFFSET 29967 -#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29968 -#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29969 -#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29970 -#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29971 -#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29972 -#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29973 -#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29974 -#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29975 -#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29976 -#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29977 -#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29978 -#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29979 -#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29980 -#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29981 -#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29982 -#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29983 -#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29984 -#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29985 -#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29986 -#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29987 -#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29988 -#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29989 -#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29990 -#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29991 -#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29992 -#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29993 -#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29994 -#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29995 -#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29996 -#define QM_REG_RLGLBLINCVAL_RT_SIZE 256 -#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30252 -#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256 -#define QM_REG_RLGLBLCRD_RT_OFFSET 30508 -#define QM_REG_RLGLBLCRD_RT_SIZE 256 -#define QM_REG_RLGLBLENABLE_RT_OFFSET 30764 -#define QM_REG_RLPFPERIOD_RT_OFFSET 30765 -#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30766 -#define QM_REG_RLPFINCVAL_RT_OFFSET 30767 -#define QM_REG_RLPFINCVAL_RT_SIZE 16 -#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30783 -#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16 -#define QM_REG_RLPFCRD_RT_OFFSET 30799 -#define QM_REG_RLPFCRD_RT_SIZE 16 -#define QM_REG_RLPFENABLE_RT_OFFSET 30815 -#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30816 -#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30817 -#define QM_REG_WFQPFWEIGHT_RT_SIZE 16 -#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30833 -#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16 -#define QM_REG_WFQPFCRD_RT_OFFSET 30849 -#define QM_REG_WFQPFCRD_RT_SIZE 160 -#define QM_REG_WFQPFENABLE_RT_OFFSET 31009 -#define QM_REG_WFQVPENABLE_RT_OFFSET 31010 -#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31011 -#define QM_REG_BASEADDRTXPQ_RT_SIZE 512 -#define QM_REG_TXPQMAP_RT_OFFSET 31523 -#define QM_REG_TXPQMAP_RT_SIZE 512 -#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32035 -#define QM_REG_WFQVPWEIGHT_RT_SIZE 512 -#define QM_REG_WFQVPCRD_RT_OFFSET 32547 -#define QM_REG_WFQVPCRD_RT_SIZE 512 -#define QM_REG_WFQVPMAP_RT_OFFSET 33059 -#define QM_REG_WFQVPMAP_RT_SIZE 512 -#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33571 -#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160 -#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 33731 -#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 33732 -#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 33733 -#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 33734 -#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 33735 -#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 33736 -#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 33737 -#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 33738 -#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4 -#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 33742 -#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4 -#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 33746 -#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4 -#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 33750 -#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 33751 -#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32 -#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 33783 -#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 33799 -#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 33815 -#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 33831 -#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16 -#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 33847 -#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 33848 -#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 33849 -#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 33850 -#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 33851 -#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 33852 -#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 33853 -#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 33854 -#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 33855 -#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 33856 -#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 33857 -#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 33858 -#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 33859 -#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 33860 -#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 33861 -#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 33862 -#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 33863 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 33864 -#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 33865 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 33866 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 33867 -#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 33868 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 33869 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 33870 -#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 33871 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 33872 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 33873 -#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 33874 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 33875 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 33876 -#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 33877 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 33878 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 33879 -#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 33880 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 33881 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 33882 -#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 33883 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 33884 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 33885 -#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 33886 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 33887 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 33888 -#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 33889 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 33890 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 33891 -#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 33892 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 33893 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 33894 -#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 33895 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 33896 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 33897 -#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 33898 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 33899 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 33900 -#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 33901 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 33902 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 33903 -#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 33904 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 33905 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 33906 -#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 33907 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 33908 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 33909 -#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 33910 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 33911 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 33912 -#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 33913 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 33914 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 33915 -#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 33916 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 33917 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 33918 -#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 33919 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 33920 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 33921 -#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 33922 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 33923 -#define XCM_REG_CON_PHY_Q3_RT_OFFSET 33924 - -#define RUNTIME_ARRAY_SIZE 33925 +#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0 +#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1 +#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2 +#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3 +#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4 +#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5 +#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6 +#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7 +#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8 +#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9 +#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10 +#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11 +#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12 +#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13 +#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14 +#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15 +#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16 +#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 17 +#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 18 +#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 19 +#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 20 +#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 21 +#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 22 +#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 23 +#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 24 +#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761 +#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736 +#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761 +#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736 +#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1497 +#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736 +#define CAU_REG_PI_MEMORY_RT_OFFSET 2233 +#define CAU_REG_PI_MEMORY_RT_SIZE 4416 +#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6649 +#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6650 +#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6651 +#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6652 +#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6653 +#define PRS_REG_SEARCH_TCP_RT_OFFSET 6654 +#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6655 +#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6656 +#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6657 +#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6658 +#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6659 +#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6660 +#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6661 +#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6662 +#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6663 +#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6664 +#define SRC_REG_FIRSTFREE_RT_OFFSET 6665 +#define SRC_REG_FIRSTFREE_RT_SIZE 2 +#define SRC_REG_LASTFREE_RT_OFFSET 6667 +#define SRC_REG_LASTFREE_RT_SIZE 2 +#define SRC_REG_COUNTFREE_RT_OFFSET 6669 +#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6670 +#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6671 +#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6672 +#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6673 +#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6674 +#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6675 +#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 6676 +#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6677 +#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6678 +#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6679 +#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6680 +#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6681 +#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6682 +#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6683 +#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6684 +#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6685 +#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6686 +#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6687 +#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6688 +#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6689 +#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6690 +#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6691 +#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6692 +#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6693 +#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6694 +#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6695 +#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6696 +#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6697 +#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6698 +#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6699 +#define PSWRQ2_REG_WR_MBS0_RT_OFFSET 6700 +#define PSWRQ2_REG_RD_MBS0_RT_OFFSET 6701 +#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6702 +#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6703 +#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6704 +#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000 +#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28704 +#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET 28705 +#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET 28706 +#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28707 +#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28708 +#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28709 +#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28710 +#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28711 +#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28712 +#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28713 +#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28714 +#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28715 +#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28716 +#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416 +#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29132 +#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512 +#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29644 +#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29645 +#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29646 +#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29647 +#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29648 +#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29649 +#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29650 +#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29651 +#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29652 +#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29653 +#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29654 +#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29655 +#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29656 +#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29657 +#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29658 +#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29659 +#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29660 +#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29661 +#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29662 +#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29663 +#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29664 +#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29665 +#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29666 +#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29667 +#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29668 +#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29669 +#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29670 +#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29671 +#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29672 +#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29673 +#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29674 +#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29675 +#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29676 +#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29677 +#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29678 +#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29679 +#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29680 +#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29681 +#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29682 +#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29683 +#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29684 +#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29685 +#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29686 +#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29687 +#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29688 +#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29689 +#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29690 +#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29691 +#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29692 +#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29693 +#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29694 +#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29695 +#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29696 +#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29697 +#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29698 +#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29699 +#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29700 +#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29701 +#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29702 +#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29703 +#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29704 +#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29705 +#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29706 +#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29707 +#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29708 +#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29709 +#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29710 +#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29711 +#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128 +#define QM_REG_VOQCRDLINE_RT_OFFSET 29839 +#define QM_REG_VOQCRDLINE_RT_SIZE 20 +#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29859 +#define QM_REG_VOQINITCRDLINE_RT_SIZE 20 +#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29879 +#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29880 +#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29881 +#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29882 +#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29883 +#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29884 +#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29885 +#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29886 +#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29887 +#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29888 +#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29889 +#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29890 +#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29891 +#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29892 +#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29893 +#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29894 +#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29895 +#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29896 +#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29897 +#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29898 +#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29899 +#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29900 +#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29901 +#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29902 +#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29903 +#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29904 +#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29905 +#define QM_REG_PQTX2PF_0_RT_OFFSET 29906 +#define QM_REG_PQTX2PF_1_RT_OFFSET 29907 +#define QM_REG_PQTX2PF_2_RT_OFFSET 29908 +#define QM_REG_PQTX2PF_3_RT_OFFSET 29909 +#define QM_REG_PQTX2PF_4_RT_OFFSET 29910 +#define QM_REG_PQTX2PF_5_RT_OFFSET 29911 +#define QM_REG_PQTX2PF_6_RT_OFFSET 29912 +#define QM_REG_PQTX2PF_7_RT_OFFSET 29913 +#define QM_REG_PQTX2PF_8_RT_OFFSET 29914 +#define QM_REG_PQTX2PF_9_RT_OFFSET 29915 +#define QM_REG_PQTX2PF_10_RT_OFFSET 29916 +#define QM_REG_PQTX2PF_11_RT_OFFSET 29917 +#define QM_REG_PQTX2PF_12_RT_OFFSET 29918 +#define QM_REG_PQTX2PF_13_RT_OFFSET 29919 +#define QM_REG_PQTX2PF_14_RT_OFFSET 29920 +#define QM_REG_PQTX2PF_15_RT_OFFSET 29921 +#define QM_REG_PQTX2PF_16_RT_OFFSET 29922 +#define QM_REG_PQTX2PF_17_RT_OFFSET 29923 +#define QM_REG_PQTX2PF_18_RT_OFFSET 29924 +#define QM_REG_PQTX2PF_19_RT_OFFSET 29925 +#define QM_REG_PQTX2PF_20_RT_OFFSET 29926 +#define QM_REG_PQTX2PF_21_RT_OFFSET 29927 +#define QM_REG_PQTX2PF_22_RT_OFFSET 29928 +#define QM_REG_PQTX2PF_23_RT_OFFSET 29929 +#define QM_REG_PQTX2PF_24_RT_OFFSET 29930 +#define QM_REG_PQTX2PF_25_RT_OFFSET 29931 +#define QM_REG_PQTX2PF_26_RT_OFFSET 29932 +#define QM_REG_PQTX2PF_27_RT_OFFSET 29933 +#define QM_REG_PQTX2PF_28_RT_OFFSET 29934 +#define QM_REG_PQTX2PF_29_RT_OFFSET 29935 +#define QM_REG_PQTX2PF_30_RT_OFFSET 29936 +#define QM_REG_PQTX2PF_31_RT_OFFSET 29937 +#define QM_REG_PQTX2PF_32_RT_OFFSET 29938 +#define QM_REG_PQTX2PF_33_RT_OFFSET 29939 +#define QM_REG_PQTX2PF_34_RT_OFFSET 29940 +#define QM_REG_PQTX2PF_35_RT_OFFSET 29941 +#define QM_REG_PQTX2PF_36_RT_OFFSET 29942 +#define QM_REG_PQTX2PF_37_RT_OFFSET 29943 +#define QM_REG_PQTX2PF_38_RT_OFFSET 29944 +#define QM_REG_PQTX2PF_39_RT_OFFSET 29945 +#define QM_REG_PQTX2PF_40_RT_OFFSET 29946 +#define QM_REG_PQTX2PF_41_RT_OFFSET 29947 +#define QM_REG_PQTX2PF_42_RT_OFFSET 29948 +#define QM_REG_PQTX2PF_43_RT_OFFSET 29949 +#define QM_REG_PQTX2PF_44_RT_OFFSET 29950 +#define QM_REG_PQTX2PF_45_RT_OFFSET 29951 +#define QM_REG_PQTX2PF_46_RT_OFFSET 29952 +#define QM_REG_PQTX2PF_47_RT_OFFSET 29953 +#define QM_REG_PQTX2PF_48_RT_OFFSET 29954 +#define QM_REG_PQTX2PF_49_RT_OFFSET 29955 +#define QM_REG_PQTX2PF_50_RT_OFFSET 29956 +#define QM_REG_PQTX2PF_51_RT_OFFSET 29957 +#define QM_REG_PQTX2PF_52_RT_OFFSET 29958 +#define QM_REG_PQTX2PF_53_RT_OFFSET 29959 +#define QM_REG_PQTX2PF_54_RT_OFFSET 29960 +#define QM_REG_PQTX2PF_55_RT_OFFSET 29961 +#define QM_REG_PQTX2PF_56_RT_OFFSET 29962 +#define QM_REG_PQTX2PF_57_RT_OFFSET 29963 +#define QM_REG_PQTX2PF_58_RT_OFFSET 29964 +#define QM_REG_PQTX2PF_59_RT_OFFSET 29965 +#define QM_REG_PQTX2PF_60_RT_OFFSET 29966 +#define QM_REG_PQTX2PF_61_RT_OFFSET 29967 +#define QM_REG_PQTX2PF_62_RT_OFFSET 29968 +#define QM_REG_PQTX2PF_63_RT_OFFSET 29969 +#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29970 +#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29971 +#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29972 +#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29973 +#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29974 +#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29975 +#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29976 +#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29977 +#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29978 +#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29979 +#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29980 +#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29981 +#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29982 +#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29983 +#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29984 +#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29985 +#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29986 +#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29987 +#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29988 +#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29989 +#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29990 +#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29991 +#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29992 +#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29993 +#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29994 +#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29995 +#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29996 +#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29997 +#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29998 +#define QM_REG_RLGLBLINCVAL_RT_SIZE 256 +#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30254 +#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256 +#define QM_REG_RLGLBLCRD_RT_OFFSET 30510 +#define QM_REG_RLGLBLCRD_RT_SIZE 256 +#define QM_REG_RLGLBLENABLE_RT_OFFSET 30766 +#define QM_REG_RLPFPERIOD_RT_OFFSET 30767 +#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30768 +#define QM_REG_RLPFINCVAL_RT_OFFSET 30769 +#define QM_REG_RLPFINCVAL_RT_SIZE 16 +#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30785 +#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16 +#define QM_REG_RLPFCRD_RT_OFFSET 30801 +#define QM_REG_RLPFCRD_RT_SIZE 16 +#define QM_REG_RLPFENABLE_RT_OFFSET 30817 +#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30818 +#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30819 +#define QM_REG_WFQPFWEIGHT_RT_SIZE 16 +#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30835 +#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16 +#define QM_REG_WFQPFCRD_RT_OFFSET 30851 +#define QM_REG_WFQPFCRD_RT_SIZE 160 +#define QM_REG_WFQPFENABLE_RT_OFFSET 31011 +#define QM_REG_WFQVPENABLE_RT_OFFSET 31012 +#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31013 +#define QM_REG_BASEADDRTXPQ_RT_SIZE 512 +#define QM_REG_TXPQMAP_RT_OFFSET 31525 +#define QM_REG_TXPQMAP_RT_SIZE 512 +#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32037 +#define QM_REG_WFQVPWEIGHT_RT_SIZE 512 +#define QM_REG_WFQVPCRD_RT_OFFSET 32549 +#define QM_REG_WFQVPCRD_RT_SIZE 512 +#define QM_REG_WFQVPMAP_RT_OFFSET 33061 +#define QM_REG_WFQVPMAP_RT_SIZE 512 +#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33573 +#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160 +#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 33733 +#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 33734 +#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 33735 +#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 33736 +#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 33737 +#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 33738 +#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 33739 +#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 33740 +#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4 +#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 33744 +#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4 +#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 33748 +#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4 +#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 33752 +#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 33753 +#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32 +#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 33785 +#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16 +#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 33801 +#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16 +#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 33817 +#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16 +#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 33833 +#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16 +#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 33849 +#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 33850 +#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 33851 +#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 33852 +#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 33853 +#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 33854 +#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 33855 +#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 33856 +#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 33857 +#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 33858 +#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 33859 +#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 33860 +#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 33861 +#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 33862 +#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 33863 +#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 33864 +#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 33865 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 33866 +#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 33867 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 33868 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 33869 +#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 33870 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 33871 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 33872 +#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 33873 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 33874 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 33875 +#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 33876 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 33877 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 33878 +#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 33879 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 33880 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 33881 +#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 33882 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 33883 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 33884 +#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 33885 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 33886 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 33887 +#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 33888 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 33889 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 33890 +#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 33891 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 33892 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 33893 +#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 33894 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 33895 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 33896 +#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 33897 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 33898 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 33899 +#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 33900 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 33901 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 33902 +#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 33903 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 33904 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 33905 +#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 33906 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 33907 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 33908 +#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 33909 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 33910 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 33911 +#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 33912 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 33913 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 33914 +#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 33915 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 33916 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 33917 +#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 33918 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 33919 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 33920 +#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 33921 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 33922 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 33923 +#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 33924 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 33925 +#define XCM_REG_CON_PHY_Q3_RT_OFFSET 33926 + +#define RUNTIME_ARRAY_SIZE 33927 /* The eth storm context for the Tstorm */ struct tstorm_eth_conn_st_ctx { @@ -3201,7 +3566,31 @@ struct eth_conn_context { struct mstorm_eth_conn_st_ctx mstorm_st_context; }; -/* opcodes for the event ring */ +enum eth_error_code { + ETH_OK = 0x00, + ETH_FILTERS_MAC_ADD_FAIL_FULL, + ETH_FILTERS_MAC_ADD_FAIL_FULL_MTT2, + ETH_FILTERS_MAC_ADD_FAIL_DUP_MTT2, + ETH_FILTERS_MAC_ADD_FAIL_DUP_STT2, + ETH_FILTERS_MAC_DEL_FAIL_NOF, + ETH_FILTERS_MAC_DEL_FAIL_NOF_MTT2, + ETH_FILTERS_MAC_DEL_FAIL_NOF_STT2, + ETH_FILTERS_MAC_ADD_FAIL_ZERO_MAC, + ETH_FILTERS_VLAN_ADD_FAIL_FULL, + ETH_FILTERS_VLAN_ADD_FAIL_DUP, + ETH_FILTERS_VLAN_DEL_FAIL_NOF, + ETH_FILTERS_VLAN_DEL_FAIL_NOF_TT1, + ETH_FILTERS_PAIR_ADD_FAIL_DUP, + ETH_FILTERS_PAIR_ADD_FAIL_FULL, + ETH_FILTERS_PAIR_ADD_FAIL_FULL_MAC, + ETH_FILTERS_PAIR_DEL_FAIL_NOF, + ETH_FILTERS_PAIR_DEL_FAIL_NOF_TT1, + ETH_FILTERS_PAIR_ADD_FAIL_ZERO_MAC, + ETH_FILTERS_VNI_ADD_FAIL_FULL, + ETH_FILTERS_VNI_ADD_FAIL_DUP, + MAX_ETH_ERROR_CODE +}; + enum eth_event_opcode { ETH_EVENT_UNUSED, ETH_EVENT_VPORT_START, @@ -3269,7 +3658,13 @@ enum eth_filter_type { MAX_ETH_FILTER_TYPE }; -/* Ethernet Ramrod Command IDs */ +enum eth_ipv4_frag_type { + ETH_IPV4_NOT_FRAG, + ETH_IPV4_FIRST_FRAG, + ETH_IPV4_NON_FIRST_FRAG, + MAX_ETH_IPV4_FRAG_TYPE +}; + enum eth_ramrod_cmd_id { ETH_RAMROD_UNUSED, ETH_RAMROD_VPORT_START, @@ -3451,8 +3846,8 @@ struct rx_queue_start_ramrod_data { u8 toggle_val; u8 vf_rx_prod_index; - - u8 reserved[6]; + u8 vf_rx_prod_use_zone_a; + u8 reserved[5]; __le16 reserved1; struct regpair cqe_pbl_addr; struct regpair bd_base; @@ -3526,10 +3921,11 @@ struct tx_queue_start_ramrod_data { __le16 pxp_st_index; __le16 comp_agg_size; __le16 queue_zone_id; - __le16 test_dup_count; + __le16 reserved2; __le16 pbl_size; __le16 tx_queue_id; - + __le16 same_as_last_id; + __le16 reserved[3]; struct regpair pbl_base_addr; struct regpair bd_cons_address; }; @@ -4926,8 +5322,8 @@ struct roce_create_qp_resp_ramrod_data { #define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_SHIFT 5 #define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_MASK 0x1 #define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_SHIFT 6 -#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED0_MASK 0x1 -#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED0_SHIFT 7 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN_MASK 0x1 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN_SHIFT 7 #define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_MASK 0x7 #define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_SHIFT 8 #define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK 0x1F @@ -4988,6 +5384,10 @@ enum roce_event_opcode { MAX_ROCE_EVENT_OPCODE }; +struct roce_init_func_ramrod_data { + struct rdma_init_func_ramrod_data rdma; +}; + struct roce_modify_qp_req_ramrod_data { __le16 flags; #define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK 0x1 @@ -7239,6 +7639,12 @@ struct public_drv_mb { #define DRV_MSG_CODE_MCP_RESET 0x00090000 #define DRV_MSG_CODE_SET_VERSION 0x000f0000 +#define DRV_MSG_CODE_GET_STATS 0x00130000 +#define DRV_MSG_CODE_STATS_TYPE_LAN 1 +#define DRV_MSG_CODE_STATS_TYPE_FCOE 2 +#define DRV_MSG_CODE_STATS_TYPE_ISCSI 3 +#define DRV_MSG_CODE_STATS_TYPE_RDMA 4 + #define DRV_MSG_CODE_BIST_TEST 0x001e0000 #define DRV_MSG_CODE_SET_LED_MODE 0x00200000 @@ -7315,10 +7721,10 @@ enum MFW_DRV_MSG_TYPE { MFW_DRV_MSG_RESERVED4, MFW_DRV_MSG_BW_UPDATE, MFW_DRV_MSG_BW_UPDATE5, - MFW_DRV_MSG_BW_UPDATE6, - MFW_DRV_MSG_BW_UPDATE7, - MFW_DRV_MSG_BW_UPDATE8, - MFW_DRV_MSG_BW_UPDATE9, + MFW_DRV_MSG_GET_LAN_STATS, + MFW_DRV_MSG_GET_FCOE_STATS, + MFW_DRV_MSG_GET_ISCSI_STATS, + MFW_DRV_MSG_GET_RDMA_STATS, MFW_DRV_MSG_BW_UPDATE10, MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE, MFW_DRV_MSG_BW_UPDATE11, diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c index e17885321faf..8ebdc79b3850 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.c +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c @@ -44,8 +44,7 @@ struct qed_ptt_pool { int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn) { - struct qed_ptt_pool *p_pool = kmalloc(sizeof(*p_pool), - GFP_KERNEL); + struct qed_ptt_pool *p_pool = kmalloc(sizeof(*p_pool), GFP_KERNEL); int i; if (!p_pool) @@ -113,16 +112,14 @@ struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn) return NULL; } -void qed_ptt_release(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) +void qed_ptt_release(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { spin_lock_bh(&p_hwfn->p_ptt_pool->lock); list_add(&p_ptt->list_entry, &p_hwfn->p_ptt_pool->free_list); spin_unlock_bh(&p_hwfn->p_ptt_pool->lock); } -u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) +u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { /* The HW is using DWORDS and we need to translate it to Bytes */ return le32_to_cpu(p_ptt->pxp.offset) << 2; @@ -141,8 +138,7 @@ u32 qed_ptt_get_bar_addr(struct qed_ptt *p_ptt) } void qed_ptt_set_win(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 new_hw_addr) + struct qed_ptt *p_ptt, u32 new_hw_addr) { u32 prev_hw_addr; @@ -166,8 +162,7 @@ void qed_ptt_set_win(struct qed_hwfn *p_hwfn, } static u32 qed_set_ptt(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 hw_addr) + struct qed_ptt *p_ptt, u32 hw_addr) { u32 win_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt); u32 offset; @@ -224,10 +219,7 @@ u32 qed_rd(struct qed_hwfn *p_hwfn, static void qed_memcpy_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - void *addr, - u32 hw_addr, - size_t n, - bool to_device) + void *addr, u32 hw_addr, size_t n, bool to_device) { u32 dw_count, *host_addr, hw_offset; size_t quota, done = 0; @@ -259,8 +251,7 @@ static void qed_memcpy_hw(struct qed_hwfn *p_hwfn, } void qed_memcpy_from(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - void *dest, u32 hw_addr, size_t n) + struct qed_ptt *p_ptt, void *dest, u32 hw_addr, size_t n) { DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "hw_addr 0x%x, dest %p hw_addr 0x%x, size %lu\n", @@ -270,8 +261,7 @@ void qed_memcpy_from(struct qed_hwfn *p_hwfn, } void qed_memcpy_to(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 hw_addr, void *src, size_t n) + struct qed_ptt *p_ptt, u32 hw_addr, void *src, size_t n) { DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "hw_addr 0x%x, hw_addr 0x%x, src %p size %lu\n", @@ -280,9 +270,7 @@ void qed_memcpy_to(struct qed_hwfn *p_hwfn, qed_memcpy_hw(p_hwfn, p_ptt, src, hw_addr, n, true); } -void qed_fid_pretend(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u16 fid) +void qed_fid_pretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 fid) { u16 control = 0; @@ -309,8 +297,7 @@ void qed_fid_pretend(struct qed_hwfn *p_hwfn, } void qed_port_pretend(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u8 port_id) + struct qed_ptt *p_ptt, u8 port_id) { u16 control = 0; @@ -326,8 +313,7 @@ void qed_port_pretend(struct qed_hwfn *p_hwfn, *(u32 *)&p_ptt->pxp.pretend); } -void qed_port_unpretend(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) +void qed_port_unpretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u16 control = 0; @@ -429,28 +415,27 @@ u32 qed_dmae_idx_to_go_cmd(u8 idx) return DMAE_REG_GO_C0 + (idx << 2); } -static int -qed_dmae_post_command(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) +static int qed_dmae_post_command(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) { - struct dmae_cmd *command = p_hwfn->dmae_info.p_dmae_cmd; + struct dmae_cmd *p_command = p_hwfn->dmae_info.p_dmae_cmd; u8 idx_cmd = p_hwfn->dmae_info.channel, i; int qed_status = 0; /* verify address is not NULL */ - if ((((command->dst_addr_lo == 0) && (command->dst_addr_hi == 0)) || - ((command->src_addr_lo == 0) && (command->src_addr_hi == 0)))) { + if ((((!p_command->dst_addr_lo) && (!p_command->dst_addr_hi)) || + ((!p_command->src_addr_lo) && (!p_command->src_addr_hi)))) { DP_NOTICE(p_hwfn, "source or destination address 0 idx_cmd=%d\n" "opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n", - idx_cmd, - le32_to_cpu(command->opcode), - le16_to_cpu(command->opcode_b), - le16_to_cpu(command->length_dw), - le32_to_cpu(command->src_addr_hi), - le32_to_cpu(command->src_addr_lo), - le32_to_cpu(command->dst_addr_hi), - le32_to_cpu(command->dst_addr_lo)); + idx_cmd, + le32_to_cpu(p_command->opcode), + le16_to_cpu(p_command->opcode_b), + le16_to_cpu(p_command->length_dw), + le32_to_cpu(p_command->src_addr_hi), + le32_to_cpu(p_command->src_addr_lo), + le32_to_cpu(p_command->dst_addr_hi), + le32_to_cpu(p_command->dst_addr_lo)); return -EINVAL; } @@ -459,13 +444,13 @@ qed_dmae_post_command(struct qed_hwfn *p_hwfn, NETIF_MSG_HW, "Posting DMAE command [idx %d]: opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n", idx_cmd, - le32_to_cpu(command->opcode), - le16_to_cpu(command->opcode_b), - le16_to_cpu(command->length_dw), - le32_to_cpu(command->src_addr_hi), - le32_to_cpu(command->src_addr_lo), - le32_to_cpu(command->dst_addr_hi), - le32_to_cpu(command->dst_addr_lo)); + le32_to_cpu(p_command->opcode), + le16_to_cpu(p_command->opcode_b), + le16_to_cpu(p_command->length_dw), + le32_to_cpu(p_command->src_addr_hi), + le32_to_cpu(p_command->src_addr_lo), + le32_to_cpu(p_command->dst_addr_hi), + le32_to_cpu(p_command->dst_addr_lo)); /* Copy the command to DMAE - need to do it before every call * for source/dest address no reset. @@ -475,7 +460,7 @@ qed_dmae_post_command(struct qed_hwfn *p_hwfn, */ for (i = 0; i < DMAE_CMD_SIZE; i++) { u32 data = (i < DMAE_CMD_SIZE_TO_FILL) ? - *(((u32 *)command) + i) : 0; + *(((u32 *)p_command) + i) : 0; qed_wr(p_hwfn, p_ptt, DMAE_REG_CMD_MEM + @@ -483,9 +468,7 @@ qed_dmae_post_command(struct qed_hwfn *p_hwfn, (i * sizeof(u32)), data); } - qed_wr(p_hwfn, p_ptt, - qed_dmae_idx_to_go_cmd(idx_cmd), - DMAE_GO_VALUE); + qed_wr(p_hwfn, p_ptt, qed_dmae_idx_to_go_cmd(idx_cmd), DMAE_GO_VALUE); return qed_status; } @@ -498,9 +481,7 @@ int qed_dmae_info_alloc(struct qed_hwfn *p_hwfn) u32 **p_comp = &p_hwfn->dmae_info.p_completion_word; *p_comp = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, - sizeof(u32), - p_addr, - GFP_KERNEL); + sizeof(u32), p_addr, GFP_KERNEL); if (!*p_comp) { DP_NOTICE(p_hwfn, "Failed to allocate `p_completion_word'\n"); goto err; @@ -543,8 +524,7 @@ void qed_dmae_info_free(struct qed_hwfn *p_hwfn) p_phys = p_hwfn->dmae_info.completion_word_phys_addr; dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(u32), - p_hwfn->dmae_info.p_completion_word, - p_phys); + p_hwfn->dmae_info.p_completion_word, p_phys); p_hwfn->dmae_info.p_completion_word = NULL; } @@ -552,8 +532,7 @@ void qed_dmae_info_free(struct qed_hwfn *p_hwfn) p_phys = p_hwfn->dmae_info.dmae_cmd_phys_addr; dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(struct dmae_cmd), - p_hwfn->dmae_info.p_dmae_cmd, - p_phys); + p_hwfn->dmae_info.p_dmae_cmd, p_phys); p_hwfn->dmae_info.p_dmae_cmd = NULL; } @@ -571,9 +550,7 @@ void qed_dmae_info_free(struct qed_hwfn *p_hwfn) static int qed_dmae_operation_wait(struct qed_hwfn *p_hwfn) { - u32 wait_cnt = 0; - u32 wait_cnt_limit = 10000; - + u32 wait_cnt_limit = 10000, wait_cnt = 0; int qed_status = 0; barrier(); @@ -606,7 +583,7 @@ static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn, u64 dst_addr, u8 src_type, u8 dst_type, - u32 length) + u32 length_dw) { dma_addr_t phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr; struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd; @@ -624,7 +601,7 @@ static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn, cmd->src_addr_lo = cpu_to_le32(lower_32_bits(phys)); memcpy(&p_hwfn->dmae_info.p_intermediate_buffer[0], (void *)(uintptr_t)src_addr, - length * sizeof(u32)); + length_dw * sizeof(u32)); break; default: return -EINVAL; @@ -645,7 +622,7 @@ static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn, return -EINVAL; } - cmd->length_dw = cpu_to_le16((u16)length); + cmd->length_dw = cpu_to_le16((u16)length_dw); qed_dmae_post_command(p_hwfn, p_ptt); @@ -654,16 +631,14 @@ static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn, if (qed_status) { DP_NOTICE(p_hwfn, "qed_dmae_host2grc: Wait Failed. source_addr 0x%llx, grc_addr 0x%llx, size_in_dwords 0x%x\n", - src_addr, - dst_addr, - length); + src_addr, dst_addr, length_dw); return qed_status; } if (dst_type == QED_DMAE_ADDRESS_HOST_VIRT) memcpy((void *)(uintptr_t)(dst_addr), &p_hwfn->dmae_info.p_intermediate_buffer[0], - length * sizeof(u32)); + length_dw * sizeof(u32)); return 0; } @@ -730,10 +705,7 @@ static int qed_dmae_execute_command(struct qed_hwfn *p_hwfn, if (qed_status) { DP_NOTICE(p_hwfn, "qed_dmae_execute_sub_operation Failed with error 0x%x. source_addr 0x%llx, destination addr 0x%llx, size_in_dwords 0x%x\n", - qed_status, - src_addr, - dst_addr, - length_cur); + qed_status, src_addr, dst_addr, length_cur); break; } } @@ -743,10 +715,7 @@ static int qed_dmae_execute_command(struct qed_hwfn *p_hwfn, int qed_dmae_host2grc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - u64 source_addr, - u32 grc_addr, - u32 size_in_dwords, - u32 flags) + u64 source_addr, u32 grc_addr, u32 size_in_dwords, u32 flags) { u32 grc_addr_in_dw = grc_addr / sizeof(u32); struct qed_dmae_params params; @@ -768,9 +737,10 @@ int qed_dmae_host2grc(struct qed_hwfn *p_hwfn, return rc; } -int -qed_dmae_grc2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 grc_addr, - dma_addr_t dest_addr, u32 size_in_dwords, u32 flags) +int qed_dmae_grc2host(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 grc_addr, + dma_addr_t dest_addr, u32 size_in_dwords, u32 flags) { u32 grc_addr_in_dw = grc_addr / sizeof(u32); struct qed_dmae_params params; @@ -791,12 +761,11 @@ qed_dmae_grc2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 grc_addr, return rc; } -int -qed_dmae_host2host(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - dma_addr_t source_addr, - dma_addr_t dest_addr, - u32 size_in_dwords, struct qed_dmae_params *p_params) +int qed_dmae_host2host(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + dma_addr_t source_addr, + dma_addr_t dest_addr, + u32 size_in_dwords, struct qed_dmae_params *p_params) { int rc; diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c index 9866a20d2128..8ce8564061d5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c @@ -59,17 +59,14 @@ void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn) p_hwfn->rt_data.b_valid[i] = false; } -void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn, - u32 rt_offset, - u32 val) +void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn, u32 rt_offset, u32 val) { p_hwfn->rt_data.init_val[rt_offset] = val; p_hwfn->rt_data.b_valid[rt_offset] = true; } void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn, - u32 rt_offset, u32 *p_val, - size_t size) + u32 rt_offset, u32 *p_val, size_t size) { size_t i; @@ -81,10 +78,7 @@ void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn, static int qed_init_rt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - u32 addr, - u16 rt_offset, - u16 size, - bool b_must_dmae) + u32 addr, u16 rt_offset, u16 size, bool b_must_dmae) { u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset]; bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset]; @@ -102,8 +96,7 @@ static int qed_init_rt(struct qed_hwfn *p_hwfn, * simply write the data instead of using dmae. */ if (!b_must_dmae) { - qed_wr(p_hwfn, p_ptt, addr + (i << 2), - p_init_val[i]); + qed_wr(p_hwfn, p_ptt, addr + (i << 2), p_init_val[i]); continue; } @@ -115,7 +108,7 @@ static int qed_init_rt(struct qed_hwfn *p_hwfn, rc = qed_dmae_host2grc(p_hwfn, p_ptt, (uintptr_t)(p_init_val + i), addr + (i << 2), segment, 0); - if (rc != 0) + if (rc) return rc; /* Jump over the entire segment, including invalid entry */ @@ -182,9 +175,7 @@ static int qed_init_array_dmae(struct qed_hwfn *p_hwfn, static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - u32 addr, - u32 fill, - u32 fill_count) + u32 addr, u32 fill, u32 fill_count) { static u32 zero_buffer[DMAE_MAX_RW_SIZE]; @@ -199,15 +190,12 @@ static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn, return qed_dmae_host2grc(p_hwfn, p_ptt, (uintptr_t)(&zero_buffer[0]), - addr, fill_count, - QED_DMAE_FLAG_RW_REPL_SRC); + addr, fill_count, QED_DMAE_FLAG_RW_REPL_SRC); } static void qed_init_fill(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - u32 addr, - u32 fill, - u32 fill_count) + u32 addr, u32 fill, u32 fill_count) { u32 i; @@ -218,12 +206,12 @@ static void qed_init_fill(struct qed_hwfn *p_hwfn, static int qed_init_cmd_array(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct init_write_op *cmd, - bool b_must_dmae, - bool b_can_dmae) + bool b_must_dmae, bool b_can_dmae) { + u32 dmae_array_offset = le32_to_cpu(cmd->args.array_offset); u32 data = le32_to_cpu(cmd->data); u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2; - u32 dmae_array_offset = le32_to_cpu(cmd->args.array_offset); + u32 offset, output_len, input_len, max_size; struct qed_dev *cdev = p_hwfn->cdev; union init_array_hdr *hdr; @@ -233,8 +221,7 @@ static int qed_init_cmd_array(struct qed_hwfn *p_hwfn, array_data = cdev->fw_data->arr_data; - hdr = (union init_array_hdr *)(array_data + - dmae_array_offset); + hdr = (union init_array_hdr *)(array_data + dmae_array_offset); data = le32_to_cpu(hdr->raw.data); switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) { case INIT_ARR_ZIPPED: @@ -290,13 +277,12 @@ static int qed_init_cmd_array(struct qed_hwfn *p_hwfn, /* init_ops write command */ static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - struct init_write_op *cmd, - bool b_can_dmae) + struct init_write_op *p_cmd, bool b_can_dmae) { - u32 data = le32_to_cpu(cmd->data); - u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2; + u32 data = le32_to_cpu(p_cmd->data); bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS); - union init_write_args *arg = &cmd->args; + u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2; + union init_write_args *arg = &p_cmd->args; int rc = 0; /* Sanitize */ @@ -309,20 +295,18 @@ static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn, switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) { case INIT_SRC_INLINE: - qed_wr(p_hwfn, p_ptt, addr, - le32_to_cpu(arg->inline_val)); + data = le32_to_cpu(p_cmd->args.inline_val); + qed_wr(p_hwfn, p_ptt, addr, data); break; case INIT_SRC_ZEROS: - if (b_must_dmae || - (b_can_dmae && (le32_to_cpu(arg->zeros_count) >= 64))) - rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, 0, - le32_to_cpu(arg->zeros_count)); + data = le32_to_cpu(p_cmd->args.zeros_count); + if (b_must_dmae || (b_can_dmae && (data >= 64))) + rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, 0, data); else - qed_init_fill(p_hwfn, p_ptt, addr, 0, - le32_to_cpu(arg->zeros_count)); + qed_init_fill(p_hwfn, p_ptt, addr, 0, data); break; case INIT_SRC_ARRAY: - rc = qed_init_cmd_array(p_hwfn, p_ptt, cmd, + rc = qed_init_cmd_array(p_hwfn, p_ptt, p_cmd, b_must_dmae, b_can_dmae); break; case INIT_SRC_RUNTIME: @@ -353,8 +337,7 @@ static inline bool comp_or(u32 val, u32 expected_val) /* init_ops read/poll commands */ static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - struct init_read_op *cmd) + struct qed_ptt *p_ptt, struct init_read_op *cmd) { bool (*comp_check)(u32 val, u32 expected_val); u32 delay = QED_INIT_POLL_PERIOD_US, val; @@ -412,35 +395,33 @@ static void qed_init_cmd_cb(struct qed_hwfn *p_hwfn, } static u8 qed_init_cmd_mode_match(struct qed_hwfn *p_hwfn, - u16 *offset, - int modes) + u16 *p_offset, int modes) { struct qed_dev *cdev = p_hwfn->cdev; const u8 *modes_tree_buf; u8 arg1, arg2, tree_val; modes_tree_buf = cdev->fw_data->modes_tree_buf; - tree_val = modes_tree_buf[(*offset)++]; + tree_val = modes_tree_buf[(*p_offset)++]; switch (tree_val) { case INIT_MODE_OP_NOT: - return qed_init_cmd_mode_match(p_hwfn, offset, modes) ^ 1; + return qed_init_cmd_mode_match(p_hwfn, p_offset, modes) ^ 1; case INIT_MODE_OP_OR: - arg1 = qed_init_cmd_mode_match(p_hwfn, offset, modes); - arg2 = qed_init_cmd_mode_match(p_hwfn, offset, modes); + arg1 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes); + arg2 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes); return arg1 | arg2; case INIT_MODE_OP_AND: - arg1 = qed_init_cmd_mode_match(p_hwfn, offset, modes); - arg2 = qed_init_cmd_mode_match(p_hwfn, offset, modes); + arg1 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes); + arg2 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes); return arg1 & arg2; default: tree_val -= MAX_INIT_MODE_OPS; - return (modes & (1 << tree_val)) ? 1 : 0; + return (modes & BIT(tree_val)) ? 1 : 0; } } static u32 qed_init_cmd_mode(struct qed_hwfn *p_hwfn, - struct init_if_mode_op *p_cmd, - int modes) + struct init_if_mode_op *p_cmd, int modes) { u16 offset = le16_to_cpu(p_cmd->modes_buf_offset); @@ -453,8 +434,7 @@ static u32 qed_init_cmd_mode(struct qed_hwfn *p_hwfn, static u32 qed_init_cmd_phase(struct qed_hwfn *p_hwfn, struct init_if_phase_op *p_cmd, - u32 phase, - u32 phase_id) + u32 phase, u32 phase_id) { u32 data = le32_to_cpu(p_cmd->phase_data); u32 op_data = le32_to_cpu(p_cmd->op_data); @@ -468,10 +448,7 @@ static u32 qed_init_cmd_phase(struct qed_hwfn *p_hwfn, } int qed_init_run(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - int phase, - int phase_id, - int modes) + struct qed_ptt *p_ptt, int phase, int phase_id, int modes) { struct qed_dev *cdev = p_hwfn->cdev; u32 cmd_num, num_init_ops; @@ -557,7 +534,7 @@ int qed_init_fw_data(struct qed_dev *cdev, const u8 *data) /* First Dword contains metadata and should be skipped */ buf_hdr = (struct bin_buffer_hdr *)(data + sizeof(u32)); - offset = buf_hdr[BIN_BUF_FW_VER_INFO].offset; + offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset; fw->fw_ver_info = (struct fw_ver_info *)(data + offset); offset = buf_hdr[BIN_BUF_INIT_CMD].offset; diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index 8fa50fa23c8d..61ec973a06c7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -1775,10 +1775,9 @@ struct qed_sb_attn_info { }; static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn, - struct qed_sb_attn_info *p_sb_desc) + struct qed_sb_attn_info *p_sb_desc) { - u16 rc = 0; - u16 index; + u16 rc = 0, index; /* Make certain HW write took affect */ mmiowb(); @@ -1802,15 +1801,13 @@ static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn, * @param asserted_bits newly asserted bits * @return int */ -static int qed_int_assertion(struct qed_hwfn *p_hwfn, - u16 asserted_bits) +static int qed_int_assertion(struct qed_hwfn *p_hwfn, u16 asserted_bits) { struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; u32 igu_mask; /* Mask the source of the attention in the IGU */ - igu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, - IGU_REG_ATTENTION_ENABLE); + igu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE); DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n", igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE)); igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE); @@ -2041,7 +2038,7 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn, struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j]; if ((p_bit->flags & ATTENTION_PARITY) && - !!(parities & (1 << bit_idx))) + !!(parities & BIT(bit_idx))) qed_int_deassertion_parity(p_hwfn, p_bit, bit_idx); @@ -2114,8 +2111,7 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn, ~((u32)deasserted_bits)); /* Unmask deasserted attentions in IGU */ - aeu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, - IGU_REG_ATTENTION_ENABLE); + aeu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE); aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE); qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask); @@ -2160,8 +2156,7 @@ static int qed_int_attentions(struct qed_hwfn *p_hwfn) index, attn_bits, attn_acks, asserted_bits, deasserted_bits, p_sb_attn_sw->known_attn); } else if (asserted_bits == 0x100) { - DP_INFO(p_hwfn, - "MFW indication via attention\n"); + DP_INFO(p_hwfn, "MFW indication via attention\n"); } else { DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "MFW indication [deassertion]\n"); @@ -2173,18 +2168,14 @@ static int qed_int_attentions(struct qed_hwfn *p_hwfn) return rc; } - if (deasserted_bits) { + if (deasserted_bits) rc = qed_int_deassertion(p_hwfn, deasserted_bits); - if (rc) - return rc; - } return rc; } static void qed_sb_ack_attn(struct qed_hwfn *p_hwfn, - void __iomem *igu_addr, - u32 ack_cons) + void __iomem *igu_addr, u32 ack_cons) { struct igu_prod_cons_update igu_ack = { 0 }; @@ -2242,9 +2233,8 @@ void qed_int_sp_dpc(unsigned long hwfn_cookie) /* Gather Interrupts/Attentions information */ if (!sb_info->sb_virt) { - DP_ERR( - p_hwfn->cdev, - "Interrupt Status block is NULL - cannot check for new interrupts!\n"); + DP_ERR(p_hwfn->cdev, + "Interrupt Status block is NULL - cannot check for new interrupts!\n"); } else { u32 tmp_index = sb_info->sb_ack; @@ -2255,9 +2245,8 @@ void qed_int_sp_dpc(unsigned long hwfn_cookie) } if (!sb_attn || !sb_attn->sb_attn) { - DP_ERR( - p_hwfn->cdev, - "Attentions Status block is NULL - cannot check for new attentions!\n"); + DP_ERR(p_hwfn->cdev, + "Attentions Status block is NULL - cannot check for new attentions!\n"); } else { u16 tmp_index = sb_attn->index; @@ -2313,8 +2302,7 @@ static void qed_int_sb_attn_free(struct qed_hwfn *p_hwfn) if (p_sb->sb_attn) dma_free_coherent(&p_hwfn->cdev->pdev->dev, SB_ATTN_ALIGNED_SIZE(p_hwfn), - p_sb->sb_attn, - p_sb->sb_phys); + p_sb->sb_attn, p_sb->sb_phys); kfree(p_sb); } @@ -2337,8 +2325,7 @@ static void qed_int_sb_attn_setup(struct qed_hwfn *p_hwfn, static void qed_int_sb_attn_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - void *sb_virt_addr, - dma_addr_t sb_phy_addr) + void *sb_virt_addr, dma_addr_t sb_phy_addr) { struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn; int i, j, k; @@ -2378,8 +2365,8 @@ static int qed_int_sb_attn_alloc(struct qed_hwfn *p_hwfn, { struct qed_dev *cdev = p_hwfn->cdev; struct qed_sb_attn_info *p_sb; - void *p_virt; dma_addr_t p_phys = 0; + void *p_virt; /* SB struct */ p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL); @@ -2412,9 +2399,7 @@ static int qed_int_sb_attn_alloc(struct qed_hwfn *p_hwfn, void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn, struct cau_sb_entry *p_sb_entry, - u8 pf_id, - u16 vf_number, - u8 vf_valid) + u8 pf_id, u16 vf_number, u8 vf_valid) { struct qed_dev *cdev = p_hwfn->cdev; u32 cau_state; @@ -2428,12 +2413,6 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn, SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F); SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F); - /* setting the time resultion to a fixed value ( = 1) */ - SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0, - QED_CAU_DEF_RX_TIMER_RES); - SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1, - QED_CAU_DEF_TX_TIMER_RES); - cau_state = CAU_HC_DISABLE_STATE; if (cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) { @@ -2468,9 +2447,7 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn, void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, dma_addr_t sb_phys, - u16 igu_sb_id, - u16 vf_number, - u8 vf_valid) + u16 igu_sb_id, u16 vf_number, u8 vf_valid) { struct cau_sb_entry sb_entry; @@ -2514,8 +2491,7 @@ void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn, timer_res = 2; timeset = (u8)(p_hwfn->cdev->rx_coalesce_usecs >> timer_res); qed_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI, - QED_COAL_RX_STATE_MACHINE, - timeset); + QED_COAL_RX_STATE_MACHINE, timeset); if (p_hwfn->cdev->tx_coalesce_usecs <= 0x7F) timer_res = 0; @@ -2541,8 +2517,7 @@ void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn, u8 timeset) { struct cau_pi_entry pi_entry; - u32 sb_offset; - u32 pi_offset; + u32 sb_offset, pi_offset; if (IS_VF(p_hwfn->cdev)) return; @@ -2569,8 +2544,7 @@ void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn, } void qed_int_sb_setup(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - struct qed_sb_info *sb_info) + struct qed_ptt *p_ptt, struct qed_sb_info *sb_info) { /* zero status block and ack counter */ sb_info->sb_ack = 0; @@ -2590,8 +2564,7 @@ void qed_int_sb_setup(struct qed_hwfn *p_hwfn, * * @return u16 */ -static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, - u16 sb_id) +static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) { u16 igu_sb_id; @@ -2603,8 +2576,12 @@ static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, else igu_sb_id = qed_vf_get_igu_sb_id(p_hwfn, sb_id); - DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "SB [%s] index is 0x%04x\n", - (sb_id == QED_SP_SB_ID) ? "DSB" : "non-DSB", igu_sb_id); + if (sb_id == QED_SP_SB_ID) + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, + "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id); + else + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, + "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id); return igu_sb_id; } @@ -2612,9 +2589,7 @@ static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, int qed_int_sb_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_sb_info *sb_info, - void *sb_virt_addr, - dma_addr_t sb_phy_addr, - u16 sb_id) + void *sb_virt_addr, dma_addr_t sb_phy_addr, u16 sb_id) { sb_info->sb_virt = sb_virt_addr; sb_info->sb_phys = sb_phy_addr; @@ -2650,8 +2625,7 @@ int qed_int_sb_init(struct qed_hwfn *p_hwfn, } int qed_int_sb_release(struct qed_hwfn *p_hwfn, - struct qed_sb_info *sb_info, - u16 sb_id) + struct qed_sb_info *sb_info, u16 sb_id) { if (sb_id == QED_SP_SB_ID) { DP_ERR(p_hwfn, "Do Not free sp sb using this function"); @@ -2685,8 +2659,7 @@ static void qed_int_sp_sb_free(struct qed_hwfn *p_hwfn) kfree(p_sb); } -static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) +static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_sb_sp_info *p_sb; dma_addr_t p_phys = 0; @@ -2721,9 +2694,7 @@ static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn, int qed_int_register_cb(struct qed_hwfn *p_hwfn, qed_int_comp_cb_t comp_cb, - void *cookie, - u8 *sb_idx, - __le16 **p_fw_cons) + void *cookie, u8 *sb_idx, __le16 **p_fw_cons) { struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb; int rc = -ENOMEM; @@ -2764,8 +2735,7 @@ u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn) } void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - enum qed_int_mode int_mode) + struct qed_ptt *p_ptt, enum qed_int_mode int_mode) { u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN; @@ -2809,7 +2779,7 @@ int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff); if ((int_mode != QED_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) { rc = qed_slowpath_irq_req(p_hwfn); - if (rc != 0) { + if (rc) { DP_NOTICE(p_hwfn, "Slowpath IRQ request failed\n"); return -EINVAL; } @@ -2822,8 +2792,7 @@ int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, return rc; } -void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) +void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { p_hwfn->b_int_enabled = 0; @@ -2950,13 +2919,11 @@ void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn, p_hwfn->hw_info.opaque_fid, b_set); } -static u32 qed_int_igu_read_cam_block(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u16 sb_id) +static u32 qed_int_igu_read_cam_block(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u16 sb_id) { u32 val = qed_rd(p_hwfn, p_ptt, - IGU_REG_MAPPING_MEMORY + - sizeof(u32) * sb_id); + IGU_REG_MAPPING_MEMORY + sizeof(u32) * sb_id); struct qed_igu_block *p_block; p_block = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id]; @@ -2983,8 +2950,7 @@ out: return val; } -int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) +int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_igu_info *p_igu_info; u32 val, min_vf = 0, max_vf = 0; @@ -3104,22 +3070,19 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, */ void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn) { - u32 igu_pf_conf = 0; - - igu_pf_conf |= IGU_PF_CONF_FUNC_EN; + u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN; STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf); } u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn) { - u64 intr_status = 0; - u32 intr_status_lo = 0; - u32 intr_status_hi = 0; u32 lsb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_LSB_UPPER - IGU_CMD_INT_ACK_BASE; u32 msb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_MSB_UPPER - IGU_CMD_INT_ACK_BASE; + u32 intr_status_hi = 0, intr_status_lo = 0; + u64 intr_status = 0; intr_status_lo = REG_RD(p_hwfn, GTT_BAR0_MAP_REG_IGU_CMD + @@ -3153,8 +3116,7 @@ static void qed_int_sp_dpc_free(struct qed_hwfn *p_hwfn) kfree(p_hwfn->sp_dpc); } -int qed_int_alloc(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) +int qed_int_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { int rc = 0; @@ -3169,10 +3131,9 @@ int qed_int_alloc(struct qed_hwfn *p_hwfn, return rc; } rc = qed_int_sb_attn_alloc(p_hwfn, p_ptt); - if (rc) { + if (rc) DP_ERR(p_hwfn->cdev, "Failed to allocate sb attn mem\n"); - return rc; - } + return rc; } @@ -3183,8 +3144,7 @@ void qed_int_free(struct qed_hwfn *p_hwfn) qed_int_sp_dpc_free(p_hwfn); } -void qed_int_setup(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) +void qed_int_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { qed_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info); qed_int_sb_attn_setup(p_hwfn, p_ptt); diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 401e738543b5..4409ea3f7d40 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -52,7 +52,7 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, u16 rx_mode = 0; rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); - if (rc != 0) + if (rc) return rc; memset(&init_data, 0, sizeof(init_data)); @@ -80,8 +80,7 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, p_ramrod->rx_mode.state = cpu_to_le16(rx_mode); /* TPA related fields */ - memset(&p_ramrod->tpa_param, 0, - sizeof(struct eth_vport_tpa_param)); + memset(&p_ramrod->tpa_param, 0, sizeof(struct eth_vport_tpa_param)); p_ramrod->tpa_param.max_buff_num = p_params->max_buffers_per_cqe; @@ -102,6 +101,9 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, p_ramrod->tx_switching_en = p_params->tx_switching; + p_ramrod->ctl_frame_mac_check_en = !!p_params->check_mac; + p_ramrod->ctl_frame_ethtype_check_en = !!p_params->check_ethtype; + /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */ p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev, p_params->concrete_fid); @@ -306,14 +308,14 @@ qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn, memset(&p_ramrod->approx_mcast.bins, 0, sizeof(p_ramrod->approx_mcast.bins)); - if (p_params->update_approx_mcast_flg) { - p_ramrod->common.update_approx_mcast_flg = 1; - for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { - u32 *p_bins = (u32 *)p_params->bins; - __le32 val = cpu_to_le32(p_bins[i]); + if (!p_params->update_approx_mcast_flg) + return; - p_ramrod->approx_mcast.bins[i] = val; - } + p_ramrod->common.update_approx_mcast_flg = 1; + for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { + u32 *p_bins = (u32 *)p_params->bins; + + p_ramrod->approx_mcast.bins[i] = cpu_to_le32(p_bins[i]); } } @@ -336,7 +338,7 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn, } rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); - if (rc != 0) + if (rc) return rc; memset(&init_data, 0, sizeof(init_data)); @@ -361,8 +363,8 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn, p_cmn->tx_active_flg = p_params->vport_active_tx_flg; p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg; p_cmn->accept_any_vlan = p_params->accept_any_vlan; - p_cmn->update_accept_any_vlan_flg = - p_params->update_accept_any_vlan_flg; + val = p_params->update_accept_any_vlan_flg; + p_cmn->update_accept_any_vlan_flg = val; p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg; val = p_params->update_inner_vlan_removal_flg; @@ -411,7 +413,7 @@ int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id) return qed_vf_pf_vport_stop(p_hwfn); rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id); - if (rc != 0) + if (rc) return rc; memset(&init_data, 0, sizeof(init_data)); @@ -476,7 +478,7 @@ static int qed_filter_accept_cmd(struct qed_dev *cdev, rc = qed_sp_vport_update(p_hwfn, &vport_update_params, comp_mode, p_comp_data); - if (rc != 0) { + if (rc) { DP_ERR(cdev, "Update rx_mode failed %d\n", rc); return rc; } @@ -511,11 +513,12 @@ static int qed_sp_release_queue_cid( int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, u16 opaque_fid, u32 cid, - struct qed_queue_start_common_params *params, + struct qed_queue_start_common_params *p_params, u8 stats_id, u16 bd_max_bytes, dma_addr_t bd_chain_phys_addr, - dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size) + dma_addr_t cqe_pbl_addr, + u16 cqe_pbl_size, bool b_use_zone_a_prod) { struct rx_queue_start_ramrod_data *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; @@ -526,23 +529,23 @@ int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, int rc = -EINVAL; /* Store information for the stop */ - p_rx_cid = &p_hwfn->p_rx_cids[params->queue_id]; - p_rx_cid->cid = cid; - p_rx_cid->opaque_fid = opaque_fid; - p_rx_cid->vport_id = params->vport_id; + p_rx_cid = &p_hwfn->p_rx_cids[p_params->queue_id]; + p_rx_cid->cid = cid; + p_rx_cid->opaque_fid = opaque_fid; + p_rx_cid->vport_id = p_params->vport_id; - rc = qed_fw_vport(p_hwfn, params->vport_id, &abs_vport_id); - if (rc != 0) + rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); + if (rc) return rc; - rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_rx_q_id); - if (rc != 0) + rc = qed_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_rx_q_id); + if (rc) return rc; DP_VERBOSE(p_hwfn, QED_MSG_SP, "opaque_fid=0x%x, cid=0x%x, rx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n", - opaque_fid, cid, params->queue_id, params->vport_id, - params->sb); + opaque_fid, + cid, p_params->queue_id, p_params->vport_id, p_params->sb); /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); @@ -558,24 +561,28 @@ int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, p_ramrod = &p_ent->ramrod.rx_queue_start; - p_ramrod->sb_id = cpu_to_le16(params->sb); - p_ramrod->sb_index = params->sb_idx; - p_ramrod->vport_id = abs_vport_id; - p_ramrod->stats_counter_id = stats_id; - p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id); - p_ramrod->complete_cqe_flg = 0; - p_ramrod->complete_event_flg = 1; + p_ramrod->sb_id = cpu_to_le16(p_params->sb); + p_ramrod->sb_index = p_params->sb_idx; + p_ramrod->vport_id = abs_vport_id; + p_ramrod->stats_counter_id = stats_id; + p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id); + p_ramrod->complete_cqe_flg = 0; + p_ramrod->complete_event_flg = 1; - p_ramrod->bd_max_bytes = cpu_to_le16(bd_max_bytes); + p_ramrod->bd_max_bytes = cpu_to_le16(bd_max_bytes); DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr); - p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size); + p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size); DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr); - p_ramrod->vf_rx_prod_index = params->vf_qid; - if (params->vf_qid) + if (p_params->vf_qid || b_use_zone_a_prod) { + p_ramrod->vf_rx_prod_index = p_params->vf_qid; DP_VERBOSE(p_hwfn, QED_MSG_SP, - "Queue is meant for VF rxq[%04x]\n", params->vf_qid); + "Queue%s is meant for VF rxq[%02x]\n", + b_use_zone_a_prod ? " [legacy]" : "", + p_params->vf_qid); + p_ramrod->vf_rx_prod_use_zone_a = b_use_zone_a_prod; + } return qed_spq_post(p_hwfn, p_ent, NULL); } @@ -583,7 +590,7 @@ int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, static int qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn, u16 opaque_fid, - struct qed_queue_start_common_params *params, + struct qed_queue_start_common_params *p_params, u16 bd_max_bytes, dma_addr_t bd_chain_phys_addr, dma_addr_t cqe_pbl_addr, @@ -597,20 +604,20 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn, if (IS_VF(p_hwfn->cdev)) { return qed_vf_pf_rxq_start(p_hwfn, - params->queue_id, - params->sb, - params->sb_idx, + p_params->queue_id, + p_params->sb, + (u8)p_params->sb_idx, bd_max_bytes, bd_chain_phys_addr, cqe_pbl_addr, cqe_pbl_size, pp_prod); } - rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_l2_queue); - if (rc != 0) + rc = qed_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_l2_queue); + if (rc) return rc; - rc = qed_fw_vport(p_hwfn, params->vport_id, &abs_stats_id); - if (rc != 0) + rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_stats_id); + if (rc) return rc; *pp_prod = (u8 __iomem *)p_hwfn->regview + @@ -622,9 +629,8 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn, (u32 *)(&init_prod_val)); /* Allocate a CID for the queue */ - p_rx_cid = &p_hwfn->p_rx_cids[params->queue_id]; - rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, - &p_rx_cid->cid); + p_rx_cid = &p_hwfn->p_rx_cids[p_params->queue_id]; + rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &p_rx_cid->cid); if (rc) { DP_NOTICE(p_hwfn, "Failed to acquire cid\n"); return rc; @@ -634,14 +640,13 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn, rc = qed_sp_eth_rxq_start_ramrod(p_hwfn, opaque_fid, p_rx_cid->cid, - params, + p_params, abs_stats_id, bd_max_bytes, bd_chain_phys_addr, - cqe_pbl_addr, - cqe_pbl_size); + cqe_pbl_addr, cqe_pbl_size, false); - if (rc != 0) + if (rc) qed_sp_release_queue_cid(p_hwfn, p_rx_cid); return rc; @@ -788,21 +793,20 @@ int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, if (rc) return rc; - p_ramrod = &p_ent->ramrod.tx_queue_start; - p_ramrod->vport_id = abs_vport_id; + p_ramrod = &p_ent->ramrod.tx_queue_start; + p_ramrod->vport_id = abs_vport_id; - p_ramrod->sb_id = cpu_to_le16(p_params->sb); - p_ramrod->sb_index = p_params->sb_idx; - p_ramrod->stats_counter_id = stats_id; + p_ramrod->sb_id = cpu_to_le16(p_params->sb); + p_ramrod->sb_index = p_params->sb_idx; + p_ramrod->stats_counter_id = stats_id; - p_ramrod->queue_zone_id = cpu_to_le16(abs_tx_q_id); - p_ramrod->pbl_size = cpu_to_le16(pbl_size); + p_ramrod->queue_zone_id = cpu_to_le16(abs_tx_q_id); + + p_ramrod->pbl_size = cpu_to_le16(pbl_size); DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr); - pq_id = qed_get_qm_pq(p_hwfn, - PROTOCOLID_ETH, - p_pq_params); - p_ramrod->qm_pq_id = cpu_to_le16(pq_id); + pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_ETH, p_pq_params); + p_ramrod->qm_pq_id = cpu_to_le16(pq_id); return qed_spq_post(p_hwfn, p_ent, NULL); } @@ -836,8 +840,7 @@ qed_sp_eth_tx_queue_start(struct qed_hwfn *p_hwfn, memset(&pq_params, 0, sizeof(pq_params)); /* Allocate a CID for the queue */ - rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, - &p_tx_cid->cid); + rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &p_tx_cid->cid); if (rc) { DP_NOTICE(p_hwfn, "Failed to acquire cid\n"); return rc; @@ -896,8 +899,7 @@ int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, u16 tx_queue_id) return qed_sp_release_queue_cid(p_hwfn, p_tx_cid); } -static enum eth_filter_action -qed_filter_action(enum qed_filter_opcode opcode) +static enum eth_filter_action qed_filter_action(enum qed_filter_opcode opcode) { enum eth_filter_action action = MAX_ETH_FILTER_ACTION; @@ -1033,19 +1035,19 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn, p_first_filter->vni = cpu_to_le32(p_filter_cmd->vni); if (p_filter_cmd->opcode == QED_FILTER_MOVE) { - p_second_filter->type = p_first_filter->type; - p_second_filter->mac_msb = p_first_filter->mac_msb; - p_second_filter->mac_mid = p_first_filter->mac_mid; - p_second_filter->mac_lsb = p_first_filter->mac_lsb; - p_second_filter->vlan_id = p_first_filter->vlan_id; - p_second_filter->vni = p_first_filter->vni; + p_second_filter->type = p_first_filter->type; + p_second_filter->mac_msb = p_first_filter->mac_msb; + p_second_filter->mac_mid = p_first_filter->mac_mid; + p_second_filter->mac_lsb = p_first_filter->mac_lsb; + p_second_filter->vlan_id = p_first_filter->vlan_id; + p_second_filter->vni = p_first_filter->vni; p_first_filter->action = ETH_FILTER_ACTION_REMOVE; p_first_filter->vport_id = vport_to_remove_from; - p_second_filter->action = ETH_FILTER_ACTION_ADD; - p_second_filter->vport_id = vport_to_add_to; + p_second_filter->action = ETH_FILTER_ACTION_ADD; + p_second_filter->vport_id = vport_to_add_to; } else if (p_filter_cmd->opcode == QED_FILTER_REPLACE) { p_first_filter->vport_id = vport_to_add_to; memcpy(p_second_filter, p_first_filter, @@ -1086,7 +1088,7 @@ int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn, rc = qed_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd, &p_ramrod, &p_ent, comp_mode, p_comp_data); - if (rc != 0) { + if (rc) { DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc); return rc; } @@ -1094,10 +1096,8 @@ int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn, p_header->assert_on_error = p_filter_cmd->assert_on_error; rc = qed_spq_post(p_hwfn, p_ent, NULL); - if (rc != 0) { - DP_ERR(p_hwfn, - "Unicast filter ADD command failed %d\n", - rc); + if (rc) { + DP_ERR(p_hwfn, "Unicast filter ADD command failed %d\n", rc); return rc; } @@ -1136,15 +1136,10 @@ int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn, * Return: ******************************************************************************/ static u32 qed_calc_crc32c(u8 *crc32_packet, - u32 crc32_length, - u32 crc32_seed, - u8 complement) + u32 crc32_length, u32 crc32_seed, u8 complement) { - u32 byte = 0; - u32 bit = 0; - u8 msb = 0; - u8 current_byte = 0; - u32 crc32_result = crc32_seed; + u32 byte = 0, bit = 0, crc32_result = crc32_seed; + u8 msb = 0, current_byte = 0; if ((!crc32_packet) || (crc32_length == 0) || @@ -1164,9 +1159,7 @@ static u32 qed_calc_crc32c(u8 *crc32_packet, return crc32_result; } -static inline u32 qed_crc32c_le(u32 seed, - u8 *mac, - u32 len) +static u32 qed_crc32c_le(u32 seed, u8 *mac, u32 len) { u32 packet_buf[2] = { 0 }; @@ -1196,17 +1189,14 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn, u8 abs_vport_id = 0; int rc, i; - if (p_filter_cmd->opcode == QED_FILTER_ADD) { + if (p_filter_cmd->opcode == QED_FILTER_ADD) rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to, &abs_vport_id); - if (rc) - return rc; - } else { + else rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from, &abs_vport_id); - if (rc) - return rc; - } + if (rc) + return rc; /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); @@ -1244,11 +1234,11 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn, /* Convert to correct endianity */ for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { + struct vport_update_ramrod_mcast *p_ramrod_bins; u32 *p_bins = (u32 *)bins; - struct vport_update_ramrod_mcast *approx_mcast; - approx_mcast = &p_ramrod->approx_mcast; - approx_mcast->bins[i] = cpu_to_le32(p_bins[i]); + p_ramrod_bins = &p_ramrod->approx_mcast; + p_ramrod_bins->bins[i] = cpu_to_le32(p_bins[i]); } } @@ -1286,8 +1276,7 @@ static int qed_filter_mcast_cmd(struct qed_dev *cdev, rc = qed_sp_eth_filter_mcast(p_hwfn, opaque_fid, p_filter_cmd, - comp_mode, - p_comp_data); + comp_mode, p_comp_data); } return rc; } @@ -1314,9 +1303,8 @@ static int qed_filter_ucast_cmd(struct qed_dev *cdev, rc = qed_sp_eth_filter_ucast(p_hwfn, opaque_fid, p_filter_cmd, - comp_mode, - p_comp_data); - if (rc != 0) + comp_mode, p_comp_data); + if (rc) break; } @@ -1590,8 +1578,7 @@ out: } } -void qed_get_vport_stats(struct qed_dev *cdev, - struct qed_eth_stats *stats) +void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats) { u32 i; @@ -1698,6 +1685,8 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev, qed_vf_get_num_vlan_filters(&cdev->hwfns[0], &info->num_vlan_filters); qed_vf_get_port_mac(&cdev->hwfns[0], info->port_mac); + + info->is_legacy = !!cdev->hwfns[0].vf_iov_info->b_pre_fp_hsi; } qed_fill_dev_info(cdev, &info->common); @@ -1766,8 +1755,7 @@ static int qed_start_vport(struct qed_dev *cdev, return 0; } -static int qed_stop_vport(struct qed_dev *cdev, - u8 vport_id) +static int qed_stop_vport(struct qed_dev *cdev, u8 vport_id) { int rc, i; @@ -1775,8 +1763,7 @@ static int qed_stop_vport(struct qed_dev *cdev, struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; rc = qed_sp_vport_stop(p_hwfn, - p_hwfn->hw_info.opaque_fid, - vport_id); + p_hwfn->hw_info.opaque_fid, vport_id); if (rc) { DP_ERR(cdev, "Failed to stop VPORT\n"); @@ -1801,10 +1788,8 @@ static int qed_update_vport(struct qed_dev *cdev, /* Translate protocol params into sp params */ sp_params.vport_id = params->vport_id; - sp_params.update_vport_active_rx_flg = - params->update_vport_active_flg; - sp_params.update_vport_active_tx_flg = - params->update_vport_active_flg; + sp_params.update_vport_active_rx_flg = params->update_vport_active_flg; + sp_params.update_vport_active_tx_flg = params->update_vport_active_flg; sp_params.vport_active_rx_flg = params->vport_active_flg; sp_params.vport_active_tx_flg = params->vport_active_flg; sp_params.update_tx_switching_flg = params->update_tx_switching_flg; @@ -1817,8 +1802,7 @@ static int qed_update_vport(struct qed_dev *cdev, * We need to re-fix the rss values per engine for CMT. */ if (cdev->num_hwfns > 1 && params->update_rss_flg) { - struct qed_update_vport_rss_params *rss = - ¶ms->rss_params; + struct qed_update_vport_rss_params *rss = ¶ms->rss_params; int k, max = 0; /* Find largest entry, since it's possible RSS needs to @@ -1861,8 +1845,8 @@ static int qed_update_vport(struct qed_dev *cdev, QED_RSS_IND_TABLE_SIZE * sizeof(u16)); memcpy(sp_rss_params.rss_key, params->rss_params.rss_key, QED_RSS_KEY_SIZE * sizeof(u32)); + sp_params.rss_params = &sp_rss_params; } - sp_params.rss_params = &sp_rss_params; for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; @@ -1893,8 +1877,8 @@ static int qed_start_rxq(struct qed_dev *cdev, u16 cqe_pbl_size, void __iomem **pp_prod) { - int rc, hwfn_index; struct qed_hwfn *p_hwfn; + int rc, hwfn_index; hwfn_index = params->rss_id % cdev->num_hwfns; p_hwfn = &cdev->hwfns[hwfn_index]; @@ -1935,8 +1919,7 @@ static int qed_stop_rxq(struct qed_dev *cdev, rc = qed_sp_eth_rx_queue_stop(p_hwfn, params->rx_queue_id / cdev->num_hwfns, - params->eq_completion_only, - false); + params->eq_completion_only, false); if (rc) { DP_ERR(cdev, "Failed to stop RXQ#%d\n", params->rx_queue_id); return rc; @@ -2047,11 +2030,11 @@ static int qed_configure_filter_rx_mode(struct qed_dev *cdev, memset(&accept_flags, 0, sizeof(accept_flags)); - accept_flags.update_rx_mode_config = 1; - accept_flags.update_tx_mode_config = 1; - accept_flags.rx_accept_filter = QED_ACCEPT_UCAST_MATCHED | - QED_ACCEPT_MCAST_MATCHED | - QED_ACCEPT_BCAST; + accept_flags.update_rx_mode_config = 1; + accept_flags.update_tx_mode_config = 1; + accept_flags.rx_accept_filter = QED_ACCEPT_UCAST_MATCHED | + QED_ACCEPT_MCAST_MATCHED | + QED_ACCEPT_BCAST; accept_flags.tx_accept_filter = QED_ACCEPT_UCAST_MATCHED | QED_ACCEPT_MCAST_MATCHED | QED_ACCEPT_BCAST; @@ -2072,9 +2055,8 @@ static int qed_configure_filter_ucast(struct qed_dev *cdev, struct qed_filter_ucast ucast; if (!params->vlan_valid && !params->mac_valid) { - DP_NOTICE( - cdev, - "Tried configuring a unicast filter, but both MAC and VLAN are not set\n"); + DP_NOTICE(cdev, + "Tried configuring a unicast filter, but both MAC and VLAN are not set\n"); return -EINVAL; } @@ -2135,8 +2117,7 @@ static int qed_configure_filter_mcast(struct qed_dev *cdev, for (i = 0; i < mcast.num_mc_addrs; i++) ether_addr_copy(mcast.mac[i], params->mac[i]); - return qed_filter_mcast_cmd(cdev, &mcast, - QED_SPQ_MODE_CB, NULL); + return qed_filter_mcast_cmd(cdev, &mcast, QED_SPQ_MODE_CB, NULL); } static int qed_configure_filter(struct qed_dev *cdev, @@ -2153,15 +2134,13 @@ static int qed_configure_filter(struct qed_dev *cdev, accept_flags = params->filter.accept_flags; return qed_configure_filter_rx_mode(cdev, accept_flags); default: - DP_NOTICE(cdev, "Unknown filter type %d\n", - (int)params->type); + DP_NOTICE(cdev, "Unknown filter type %d\n", (int)params->type); return -EINVAL; } } static int qed_fp_cqe_completion(struct qed_dev *dev, - u8 rss_id, - struct eth_slow_path_rx_cqe *cqe) + u8 rss_id, struct eth_slow_path_rx_cqe *cqe) { return qed_eth_cqe_completion(&dev->hwfns[rss_id % dev->num_hwfns], cqe); diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h index 002114543451..e495d62fcc03 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h @@ -102,6 +102,8 @@ struct qed_sp_vport_start_params { u16 opaque_fid; u8 vport_id; u16 mtu; + bool check_mac; + bool check_ethtype; }; int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, @@ -213,6 +215,8 @@ qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn, enum spq_mode comp_mode, struct qed_spq_comp_cb *p_comp_data); +void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats); + int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, struct qed_sp_vport_start_params *p_params); @@ -223,7 +227,8 @@ int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, u8 stats_id, u16 bd_max_bytes, dma_addr_t bd_chain_phys_addr, - dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size); + dma_addr_t cqe_pbl_addr, + u16 cqe_pbl_size, bool b_use_zone_a_prod); int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, u16 opaque_fid, diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index c7dc34bfdd0a..32f71ee57191 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -51,8 +51,6 @@ MODULE_FIRMWARE(QED_FW_FILE_NAME); static int __init qed_init(void) { - pr_notice("qed_init called\n"); - pr_info("%s", version); return 0; @@ -106,8 +104,7 @@ static void qed_free_pci(struct qed_dev *cdev) /* Performs PCI initializations as well as initializing PCI-related parameters * in the device structrue. Returns 0 in case of success. */ -static int qed_init_pci(struct qed_dev *cdev, - struct pci_dev *pdev) +static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev) { u8 rev_id; int rc; @@ -263,8 +260,7 @@ static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev) } /* Sets the requested power state */ -static int qed_set_power_state(struct qed_dev *cdev, - pci_power_t state) +static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state) { if (!cdev) return -ENODEV; @@ -366,8 +362,8 @@ static int qed_enable_msix(struct qed_dev *cdev, DP_NOTICE(cdev, "Trying to enable MSI-X with less vectors (%d out of %d)\n", cnt, int_params->in.num_vectors); - rc = pci_enable_msix_exact(cdev->pdev, - int_params->msix_table, cnt); + rc = pci_enable_msix_exact(cdev->pdev, int_params->msix_table, + cnt); if (!rc) rc = cnt; } @@ -439,6 +435,11 @@ static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode) } out: + if (!rc) + DP_INFO(cdev, "Using %s interrupts\n", + int_params->out.int_mode == QED_INT_MODE_INTA ? + "INTa" : int_params->out.int_mode == QED_INT_MODE_MSI ? + "MSI" : "MSIX"); cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE; return rc; @@ -514,19 +515,18 @@ static irqreturn_t qed_single_int(int irq, void *dev_instance) int qed_slowpath_irq_req(struct qed_hwfn *hwfn) { struct qed_dev *cdev = hwfn->cdev; + u32 int_mode; int rc = 0; u8 id; - if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { + int_mode = cdev->int_params.out.int_mode; + if (int_mode == QED_INT_MODE_MSIX) { id = hwfn->my_id; snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x", id, cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id); rc = request_irq(cdev->int_params.msix_table[id].vector, qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc); - if (!rc) - DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP), - "Requested slowpath MSI-X\n"); } else { unsigned long flags = 0; @@ -541,6 +541,13 @@ int qed_slowpath_irq_req(struct qed_hwfn *hwfn) flags, cdev->name, cdev); } + if (rc) + DP_NOTICE(cdev, "request_irq failed, rc = %d\n", rc); + else + DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP), + "Requested slowpath %s\n", + (int_mode == QED_INT_MODE_MSIX) ? "MSI-X" : "IRQ"); + return rc; } @@ -974,8 +981,7 @@ static u32 qed_sb_init(struct qed_dev *cdev, } static u32 qed_sb_release(struct qed_dev *cdev, - struct qed_sb_info *sb_info, - u16 sb_id) + struct qed_sb_info *sb_info, u16 sb_id) { struct qed_hwfn *p_hwfn; int hwfn_index; @@ -1025,20 +1031,23 @@ static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params) link_params->speed.autoneg = params->autoneg; if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) { link_params->speed.advertised_speeds = 0; - if ((params->adv_speeds & SUPPORTED_1000baseT_Half) || - (params->adv_speeds & SUPPORTED_1000baseT_Full)) + if ((params->adv_speeds & QED_LM_1000baseT_Half_BIT) || + (params->adv_speeds & QED_LM_1000baseT_Full_BIT)) + link_params->speed.advertised_speeds |= + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; + if (params->adv_speeds & QED_LM_10000baseKR_Full_BIT) link_params->speed.advertised_speeds |= - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; - if (params->adv_speeds & SUPPORTED_10000baseKR_Full) + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; + if (params->adv_speeds & QED_LM_25000baseKR_Full_BIT) link_params->speed.advertised_speeds |= - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; - if (params->adv_speeds & SUPPORTED_40000baseLR4_Full) + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; + if (params->adv_speeds & QED_LM_40000baseLR4_Full_BIT) link_params->speed.advertised_speeds |= - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G; - if (params->adv_speeds & 0) + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G; + if (params->adv_speeds & QED_LM_50000baseKR2_Full_BIT) link_params->speed.advertised_speeds |= - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G; - if (params->adv_speeds & 0) + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G; + if (params->adv_speeds & QED_LM_100000baseKR4_Full_BIT) link_params->speed.advertised_speeds |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G; } @@ -1168,50 +1177,56 @@ static void qed_fill_link(struct qed_hwfn *hwfn, if_link->link_up = true; /* TODO - at the moment assume supported and advertised speed equal */ - if_link->supported_caps = SUPPORTED_FIBRE; + if_link->supported_caps = QED_LM_FIBRE_BIT; if (params.speed.autoneg) - if_link->supported_caps |= SUPPORTED_Autoneg; + if_link->supported_caps |= QED_LM_Autoneg_BIT; if (params.pause.autoneg || (params.pause.forced_rx && params.pause.forced_tx)) - if_link->supported_caps |= SUPPORTED_Asym_Pause; + if_link->supported_caps |= QED_LM_Asym_Pause_BIT; if (params.pause.autoneg || params.pause.forced_rx || params.pause.forced_tx) - if_link->supported_caps |= SUPPORTED_Pause; + if_link->supported_caps |= QED_LM_Pause_BIT; if_link->advertised_caps = if_link->supported_caps; if (params.speed.advertised_speeds & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) - if_link->advertised_caps |= SUPPORTED_1000baseT_Half | - SUPPORTED_1000baseT_Full; + if_link->advertised_caps |= QED_LM_1000baseT_Half_BIT | + QED_LM_1000baseT_Full_BIT; if (params.speed.advertised_speeds & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) - if_link->advertised_caps |= SUPPORTED_10000baseKR_Full; + if_link->advertised_caps |= QED_LM_10000baseKR_Full_BIT; if (params.speed.advertised_speeds & - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) - if_link->advertised_caps |= SUPPORTED_40000baseLR4_Full; + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) + if_link->advertised_caps |= QED_LM_25000baseKR_Full_BIT; if (params.speed.advertised_speeds & - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) - if_link->advertised_caps |= 0; + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) + if_link->advertised_caps |= QED_LM_40000baseLR4_Full_BIT; + if (params.speed.advertised_speeds & + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) + if_link->advertised_caps |= QED_LM_50000baseKR2_Full_BIT; if (params.speed.advertised_speeds & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) - if_link->advertised_caps |= 0; + if_link->advertised_caps |= QED_LM_100000baseKR4_Full_BIT; if (link_caps.speed_capabilities & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) - if_link->supported_caps |= SUPPORTED_1000baseT_Half | - SUPPORTED_1000baseT_Full; + if_link->supported_caps |= QED_LM_1000baseT_Half_BIT | + QED_LM_1000baseT_Full_BIT; if (link_caps.speed_capabilities & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) - if_link->supported_caps |= SUPPORTED_10000baseKR_Full; + if_link->supported_caps |= QED_LM_10000baseKR_Full_BIT; + if (link_caps.speed_capabilities & + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) + if_link->supported_caps |= QED_LM_25000baseKR_Full_BIT; if (link_caps.speed_capabilities & - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) - if_link->supported_caps |= SUPPORTED_40000baseLR4_Full; + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) + if_link->supported_caps |= QED_LM_40000baseLR4_Full_BIT; if (link_caps.speed_capabilities & - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) - if_link->supported_caps |= 0; + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) + if_link->supported_caps |= QED_LM_50000baseKR2_Full_BIT; if (link_caps.speed_capabilities & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) - if_link->supported_caps |= 0; + if_link->supported_caps |= QED_LM_100000baseKR4_Full_BIT; if (link.link_up) if_link->speed = link.speed; @@ -1231,33 +1246,29 @@ static void qed_fill_link(struct qed_hwfn *hwfn, if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE; /* Link partner capabilities */ - if (link.partner_adv_speed & - QED_LINK_PARTNER_SPEED_1G_HD) - if_link->lp_caps |= SUPPORTED_1000baseT_Half; - if (link.partner_adv_speed & - QED_LINK_PARTNER_SPEED_1G_FD) - if_link->lp_caps |= SUPPORTED_1000baseT_Full; - if (link.partner_adv_speed & - QED_LINK_PARTNER_SPEED_10G) - if_link->lp_caps |= SUPPORTED_10000baseKR_Full; - if (link.partner_adv_speed & - QED_LINK_PARTNER_SPEED_40G) - if_link->lp_caps |= SUPPORTED_40000baseLR4_Full; - if (link.partner_adv_speed & - QED_LINK_PARTNER_SPEED_50G) - if_link->lp_caps |= 0; - if (link.partner_adv_speed & - QED_LINK_PARTNER_SPEED_100G) - if_link->lp_caps |= 0; + if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_1G_HD) + if_link->lp_caps |= QED_LM_1000baseT_Half_BIT; + if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_1G_FD) + if_link->lp_caps |= QED_LM_1000baseT_Full_BIT; + if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_10G) + if_link->lp_caps |= QED_LM_10000baseKR_Full_BIT; + if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_25G) + if_link->lp_caps |= QED_LM_25000baseKR_Full_BIT; + if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_40G) + if_link->lp_caps |= QED_LM_40000baseLR4_Full_BIT; + if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_50G) + if_link->lp_caps |= QED_LM_50000baseKR2_Full_BIT; + if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_100G) + if_link->lp_caps |= QED_LM_100000baseKR4_Full_BIT; if (link.an_complete) - if_link->lp_caps |= SUPPORTED_Autoneg; + if_link->lp_caps |= QED_LM_Autoneg_BIT; if (link.partner_adv_pause) - if_link->lp_caps |= SUPPORTED_Pause; + if_link->lp_caps |= QED_LM_Pause_BIT; if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE || link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE) - if_link->lp_caps |= SUPPORTED_Asym_Pause; + if_link->lp_caps |= QED_LM_Asym_Pause_BIT; } static void qed_get_current_link(struct qed_dev *cdev, @@ -1391,3 +1402,24 @@ const struct qed_common_ops qed_common_ops_pass = { .set_coalesce = &qed_set_coalesce, .set_led = &qed_set_led, }; + +void qed_get_protocol_stats(struct qed_dev *cdev, + enum qed_mcp_protocol_type type, + union qed_mcp_protocol_stats *stats) +{ + struct qed_eth_stats eth_stats; + + memset(stats, 0, sizeof(*stats)); + + switch (type) { + case QED_MCP_LAN_STATS: + qed_get_vport_stats(cdev, ð_stats); + stats->lan_stats.ucast_rx_pkts = eth_stats.rx_ucast_pkts; + stats->lan_stats.ucast_tx_pkts = eth_stats.tx_ucast_pkts; + stats->lan_stats.fcs_err = -1; + break; + default: + DP_ERR(cdev, "Invalid protocol type = %d\n", type); + return; + } +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index a240f26344a4..4c212667b482 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -54,8 +54,7 @@ bool qed_mcp_is_init(struct qed_hwfn *p_hwfn) return true; } -void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) +void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, PUBLIC_PORT); @@ -68,8 +67,7 @@ void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn, p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn)); } -void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) +void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length); u32 tmp, i; @@ -99,8 +97,7 @@ int qed_mcp_free(struct qed_hwfn *p_hwfn) return 0; } -static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) +static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_mcp_info *p_info = p_hwfn->mcp_info; u32 drv_mb_offsize, mfw_mb_offsize; @@ -143,8 +140,7 @@ static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, return 0; } -int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) +int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_mcp_info *p_info; u32 size; @@ -165,9 +161,7 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32); p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL); - p_info->mfw_mb_shadow = - kzalloc(sizeof(u32) * MFW_DRV_MSG_MAX_DWORDS( - p_info->mfw_mb_length), GFP_KERNEL); + p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL); if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr) goto err; @@ -189,8 +183,7 @@ err: * access is achieved by setting a blocking flag, which will fail other * competing contexts to send their mailboxes. */ -static int qed_mcp_mb_lock(struct qed_hwfn *p_hwfn, - u32 cmd) +static int qed_mcp_mb_lock(struct qed_hwfn *p_hwfn, u32 cmd) { spin_lock_bh(&p_hwfn->mcp_info->lock); @@ -221,15 +214,13 @@ static int qed_mcp_mb_lock(struct qed_hwfn *p_hwfn, return 0; } -static void qed_mcp_mb_unlock(struct qed_hwfn *p_hwfn, - u32 cmd) +static void qed_mcp_mb_unlock(struct qed_hwfn *p_hwfn, u32 cmd) { if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ) spin_unlock_bh(&p_hwfn->mcp_info->lock); } -int qed_mcp_reset(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) +int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 seq = ++p_hwfn->mcp_info->drv_mb_seq; u8 delay = CHIP_MCP_RESP_ITER_US; @@ -326,7 +317,8 @@ static int qed_do_mcp_cmd(struct qed_hwfn *p_hwfn, *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param); } else { /* FW BUG! */ - DP_ERR(p_hwfn, "MFW failed to respond!\n"); + DP_ERR(p_hwfn, "MFW failed to respond [cmd 0x%x param 0x%x]\n", + cmd, param); *o_mcp_resp = 0; rc = -EAGAIN; } @@ -342,7 +334,7 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, /* MCP not initialized */ if (!qed_mcp_is_init(p_hwfn)) { - DP_NOTICE(p_hwfn, "MFW is not initialized !\n"); + DP_NOTICE(p_hwfn, "MFW is not initialized!\n"); return -EBUSY; } @@ -399,8 +391,7 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn, } int qed_mcp_load_req(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 *p_load_code) + struct qed_ptt *p_ptt, u32 *p_load_code) { struct qed_dev *cdev = p_hwfn->cdev; struct qed_mcp_mb_params mb_params; @@ -527,8 +518,7 @@ static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn, "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n", transceiver_state, (u32)(p_hwfn->mcp_info->port_addr + - offsetof(struct public_port, - transceiver_data))); + offsetof(struct public_port, transceiver_data))); transceiver_state = GET_FIELD(transceiver_state, ETH_TRANSCEIVER_STATE); @@ -540,8 +530,7 @@ static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn, } static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - bool b_reset) + struct qed_ptt *p_ptt, bool b_reset) { struct qed_mcp_link_state *p_link; u8 max_bw, min_bw; @@ -557,8 +546,7 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, "Received link update [0x%08x] from mfw [Addr 0x%x]\n", status, (u32)(p_hwfn->mcp_info->port_addr + - offsetof(struct public_port, - link_status))); + offsetof(struct public_port, link_status))); } else { DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, "Resetting link indications\n"); @@ -635,6 +623,9 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ? QED_LINK_PARTNER_SPEED_20G : 0; p_link->partner_adv_speed |= + (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ? + QED_LINK_PARTNER_SPEED_25G : 0; + p_link->partner_adv_speed |= (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ? QED_LINK_PARTNER_SPEED_40G : 0; p_link->partner_adv_speed |= @@ -722,6 +713,48 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up) return 0; } +static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum MFW_DRV_MSG_TYPE type) +{ + enum qed_mcp_protocol_type stats_type; + union qed_mcp_protocol_stats stats; + struct qed_mcp_mb_params mb_params; + union drv_union_data union_data; + u32 hsi_param; + + switch (type) { + case MFW_DRV_MSG_GET_LAN_STATS: + stats_type = QED_MCP_LAN_STATS; + hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN; + break; + case MFW_DRV_MSG_GET_FCOE_STATS: + stats_type = QED_MCP_FCOE_STATS; + hsi_param = DRV_MSG_CODE_STATS_TYPE_FCOE; + break; + case MFW_DRV_MSG_GET_ISCSI_STATS: + stats_type = QED_MCP_ISCSI_STATS; + hsi_param = DRV_MSG_CODE_STATS_TYPE_ISCSI; + break; + case MFW_DRV_MSG_GET_RDMA_STATS: + stats_type = QED_MCP_RDMA_STATS; + hsi_param = DRV_MSG_CODE_STATS_TYPE_RDMA; + break; + default: + DP_NOTICE(p_hwfn, "Invalid protocol type %d\n", type); + return; + } + + qed_get_protocol_stats(p_hwfn->cdev, stats_type, &stats); + + memset(&mb_params, 0, sizeof(mb_params)); + mb_params.cmd = DRV_MSG_CODE_GET_STATS; + mb_params.param = hsi_param; + memcpy(&union_data, &stats, sizeof(stats)); + mb_params.p_data_src = &union_data; + qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); +} + static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn, struct public_func *p_shmem_info) { @@ -752,8 +785,7 @@ static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn, static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - struct public_func *p_data, - int pfid) + struct public_func *p_data, int pfid) { u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, PUBLIC_FUNC); @@ -763,51 +795,20 @@ static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn, memset(p_data, 0, sizeof(*p_data)); - size = min_t(u32, sizeof(*p_data), - QED_SECTION_SIZE(mfw_path_offsize)); + size = min_t(u32, sizeof(*p_data), QED_SECTION_SIZE(mfw_path_offsize)); for (i = 0; i < size / sizeof(u32); i++) ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt, func_addr + (i << 2)); return size; } -int qed_hw_init_first_eth(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, u8 *p_pf) -{ - struct public_func shmem_info; - int i; - - /* Find first Ethernet interface in port */ - for (i = 0; i < NUM_OF_ENG_PFS(p_hwfn->cdev); - i += p_hwfn->cdev->num_ports_in_engines) { - qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, - MCP_PF_ID_BY_REL(p_hwfn, i)); - - if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE) - continue; - - if ((shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK) == - FUNC_MF_CFG_PROTOCOL_ETHERNET) { - *p_pf = (u8)i; - return 0; - } - } - - DP_NOTICE(p_hwfn, - "Failed to find on port an ethernet interface in MF_SI mode\n"); - - return -EINVAL; -} - -static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) +static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_mcp_function_info *p_info; struct public_func shmem_info; u32 resp = 0, param = 0; - qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, - MCP_PF_ID(p_hwfn)); + qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn)); qed_read_pf_bandwidth(p_hwfn, &shmem_info); @@ -867,6 +868,12 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE: qed_mcp_handle_transceiver_change(p_hwfn, p_ptt); break; + case MFW_DRV_MSG_GET_LAN_STATS: + case MFW_DRV_MSG_GET_FCOE_STATS: + case MFW_DRV_MSG_GET_ISCSI_STATS: + case MFW_DRV_MSG_GET_RDMA_STATS: + qed_mcp_send_protocol_stats(p_hwfn, p_ptt, i); + break; case MFW_DRV_MSG_BW_UPDATE: qed_mcp_update_bw(p_hwfn, p_ptt); break; @@ -940,8 +947,7 @@ int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn, return 0; } -int qed_mcp_get_media_type(struct qed_dev *cdev, - u32 *p_media_type) +int qed_mcp_get_media_type(struct qed_dev *cdev, u32 *p_media_type) { struct qed_hwfn *p_hwfn = &cdev->hwfns[0]; struct qed_ptt *p_ptt; @@ -950,7 +956,7 @@ int qed_mcp_get_media_type(struct qed_dev *cdev, return -EINVAL; if (!qed_mcp_is_init(p_hwfn)) { - DP_NOTICE(p_hwfn, "MFW is not initialized !\n"); + DP_NOTICE(p_hwfn, "MFW is not initialized!\n"); return -EBUSY; } @@ -1003,15 +1009,13 @@ int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn, struct qed_mcp_function_info *info; struct public_func shmem_info; - qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, - MCP_PF_ID(p_hwfn)); + qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn)); info = &p_hwfn->mcp_info->func_info; info->pause_on_host = (shmem_info.config & FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0; - if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info, - &info->protocol)) { + if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info, &info->protocol)) { DP_ERR(p_hwfn, "Unknown personality %08x\n", (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK)); return -EINVAL; @@ -1072,15 +1076,13 @@ struct qed_mcp_link_capabilities return &p_hwfn->mcp_info->link_capabilities; } -int qed_mcp_drain(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) +int qed_mcp_drain(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 resp = 0, param = 0; int rc; rc = qed_mcp_cmd(p_hwfn, p_ptt, - DRV_MSG_CODE_NIG_DRAIN, 1000, - &resp, ¶m); + DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, ¶m); /* Wait for the drain to complete before returning */ msleep(1020); @@ -1089,8 +1091,7 @@ int qed_mcp_drain(struct qed_hwfn *p_hwfn, } int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 *p_flash_size) + struct qed_ptt *p_ptt, u32 *p_flash_size) { u32 flash_size; @@ -1168,8 +1169,8 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn, return rc; } -int qed_mcp_set_led(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - enum qed_led_mode mode) +int qed_mcp_set_led(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, enum qed_led_mode mode) { u32 resp = 0, param = 0, drv_mb_param; int rc; diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 7f319aa1b229..c6372fa574b7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -60,9 +60,10 @@ struct qed_mcp_link_state { #define QED_LINK_PARTNER_SPEED_1G_FD BIT(1) #define QED_LINK_PARTNER_SPEED_10G BIT(2) #define QED_LINK_PARTNER_SPEED_20G BIT(3) -#define QED_LINK_PARTNER_SPEED_40G BIT(4) -#define QED_LINK_PARTNER_SPEED_50G BIT(5) -#define QED_LINK_PARTNER_SPEED_100G BIT(6) +#define QED_LINK_PARTNER_SPEED_25G BIT(4) +#define QED_LINK_PARTNER_SPEED_40G BIT(5) +#define QED_LINK_PARTNER_SPEED_50G BIT(6) +#define QED_LINK_PARTNER_SPEED_100G BIT(7) u32 partner_adv_speed; bool partner_tx_flow_ctrl_en; @@ -105,6 +106,47 @@ struct qed_mcp_drv_version { u8 name[MCP_DRV_VER_STR_SIZE - 4]; }; +struct qed_mcp_lan_stats { + u64 ucast_rx_pkts; + u64 ucast_tx_pkts; + u32 fcs_err; +}; + +struct qed_mcp_fcoe_stats { + u64 rx_pkts; + u64 tx_pkts; + u32 fcs_err; + u32 login_failure; +}; + +struct qed_mcp_iscsi_stats { + u64 rx_pdus; + u64 tx_pdus; + u64 rx_bytes; + u64 tx_bytes; +}; + +struct qed_mcp_rdma_stats { + u64 rx_pkts; + u64 tx_pkts; + u64 rx_bytes; + u64 tx_byts; +}; + +enum qed_mcp_protocol_type { + QED_MCP_LAN_STATS, + QED_MCP_FCOE_STATS, + QED_MCP_ISCSI_STATS, + QED_MCP_RDMA_STATS +}; + +union qed_mcp_protocol_stats { + struct qed_mcp_lan_stats lan_stats; + struct qed_mcp_fcoe_stats fcoe_stats; + struct qed_mcp_iscsi_stats iscsi_stats; + struct qed_mcp_rdma_stats rdma_stats; +}; + /** * @brief - returns the link params of the hw function * @@ -458,6 +500,4 @@ int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn, struct qed_mcp_link_state *p_link, u8 min_bw); -int qed_hw_init_first_eth(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, u8 *p_pf); #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index f6b86ca1ff79..b49d47f3de71 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -116,8 +116,14 @@ 0x1009c4UL #define QM_REG_PF_EN \ 0x2f2ea4UL +#define TCFC_REG_WEAK_ENABLE_VF \ + 0x2d0704UL #define TCFC_REG_STRONG_ENABLE_PF \ 0x2d0708UL +#define TCFC_REG_STRONG_ENABLE_VF \ + 0x2d070cUL +#define CCFC_REG_WEAK_ENABLE_VF \ + 0x2e0704UL #define CCFC_REG_STRONG_ENABLE_PF \ 0x2e0708UL #define PGLUE_B_REG_PGL_ADDR_88_F0 \ diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index a52f3fc051f5..2888eb0628f8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -25,9 +25,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent, - u8 cmd, - u8 protocol, - struct qed_sp_init_data *p_data) + u8 cmd, u8 protocol, struct qed_sp_init_data *p_data) { u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid; struct qed_spq_entry *p_ent = NULL; @@ -38,7 +36,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, rc = qed_spq_get_entry(p_hwfn, pp_ent); - if (rc != 0) + if (rc) return rc; p_ent = *pp_ent; @@ -321,8 +319,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, rc = qed_sp_init_request(p_hwfn, &p_ent, COMMON_RAMROD_PF_START, - PROTOCOLID_COMMON, - &init_data); + PROTOCOLID_COMMON, &init_data); if (rc) return rc; @@ -356,8 +353,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr, p_hwfn->p_consq->chain.pbl.p_phys_table); - qed_tunn_set_pf_start_params(p_hwfn, p_tunn, - &p_ramrod->tunnel_config); + qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config); if (IS_MF_SI(p_hwfn)) p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch; @@ -389,8 +385,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n", - sb, sb_index, - p_ramrod->outer_tag); + sb, sb_index, p_ramrod->outer_tag); rc = qed_spq_post(p_hwfn, p_ent, NULL); diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index d73456eab1d7..0265a32c8681 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -41,8 +41,7 @@ ***************************************************************************/ static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn, void *cookie, - union event_ring_data *data, - u8 fw_return_code) + union event_ring_data *data, u8 fw_return_code) { struct qed_spq_comp_done *comp_done; @@ -109,9 +108,8 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn, /*************************************************************************** * SPQ entries inner API ***************************************************************************/ -static int -qed_spq_fill_entry(struct qed_hwfn *p_hwfn, - struct qed_spq_entry *p_ent) +static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn, + struct qed_spq_entry *p_ent) { p_ent->flags = 0; @@ -189,8 +187,7 @@ static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn, } static int qed_spq_hw_post(struct qed_hwfn *p_hwfn, - struct qed_spq *p_spq, - struct qed_spq_entry *p_ent) + struct qed_spq *p_spq, struct qed_spq_entry *p_ent) { struct qed_chain *p_chain = &p_hwfn->p_spq->chain; u16 echo = qed_chain_get_prod_idx(p_chain); @@ -255,8 +252,7 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn, /*************************************************************************** * EQ API ***************************************************************************/ -void qed_eq_prod_update(struct qed_hwfn *p_hwfn, - u16 prod) +void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod) { u32 addr = GTT_BAR0_MAP_REG_USDM_RAM + USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id); @@ -267,9 +263,7 @@ void qed_eq_prod_update(struct qed_hwfn *p_hwfn, mmiowb(); } -int qed_eq_completion(struct qed_hwfn *p_hwfn, - void *cookie) - +int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie) { struct qed_eq *p_eq = cookie; struct qed_chain *p_chain = &p_eq->chain; @@ -323,8 +317,7 @@ int qed_eq_completion(struct qed_hwfn *p_hwfn, return rc; } -struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn, - u16 num_elem) +struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem) { struct qed_eq *p_eq; @@ -348,11 +341,8 @@ struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn, } /* register EQ completion on the SP SB */ - qed_int_register_cb(p_hwfn, - qed_eq_completion, - p_eq, - &p_eq->eq_sb_index, - &p_eq->p_fw_cons); + qed_int_register_cb(p_hwfn, qed_eq_completion, + p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons); return p_eq; @@ -361,14 +351,12 @@ eq_allocate_fail: return NULL; } -void qed_eq_setup(struct qed_hwfn *p_hwfn, - struct qed_eq *p_eq) +void qed_eq_setup(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq) { qed_chain_reset(&p_eq->chain); } -void qed_eq_free(struct qed_hwfn *p_hwfn, - struct qed_eq *p_eq) +void qed_eq_free(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq) { if (!p_eq) return; @@ -379,10 +367,9 @@ void qed_eq_free(struct qed_hwfn *p_hwfn, /*************************************************************************** * CQE API - manipulate EQ functionality ***************************************************************************/ -static int qed_cqe_completion( - struct qed_hwfn *p_hwfn, - struct eth_slow_path_rx_cqe *cqe, - enum protocol_type protocol) +static int qed_cqe_completion(struct qed_hwfn *p_hwfn, + struct eth_slow_path_rx_cqe *cqe, + enum protocol_type protocol) { if (IS_VF(p_hwfn->cdev)) return 0; @@ -463,8 +450,7 @@ int qed_spq_alloc(struct qed_hwfn *p_hwfn) u32 capacity; /* SPQ struct */ - p_spq = - kzalloc(sizeof(struct qed_spq), GFP_KERNEL); + p_spq = kzalloc(sizeof(struct qed_spq), GFP_KERNEL); if (!p_spq) { DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_spq'\n"); return -ENOMEM; @@ -525,9 +511,7 @@ void qed_spq_free(struct qed_hwfn *p_hwfn) kfree(p_spq); } -int -qed_spq_get_entry(struct qed_hwfn *p_hwfn, - struct qed_spq_entry **pp_ent) +int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent) { struct qed_spq *p_spq = p_hwfn->p_spq; struct qed_spq_entry *p_ent = NULL; @@ -538,14 +522,15 @@ qed_spq_get_entry(struct qed_hwfn *p_hwfn, if (list_empty(&p_spq->free_pool)) { p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC); if (!p_ent) { + DP_NOTICE(p_hwfn, + "Failed to allocate an SPQ entry for a pending ramrod\n"); rc = -ENOMEM; goto out_unlock; } p_ent->queue = &p_spq->unlimited_pending; } else { p_ent = list_first_entry(&p_spq->free_pool, - struct qed_spq_entry, - list); + struct qed_spq_entry, list); list_del(&p_ent->list); p_ent->queue = &p_spq->pending; } @@ -564,8 +549,7 @@ static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn, list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool); } -void qed_spq_return_entry(struct qed_hwfn *p_hwfn, - struct qed_spq_entry *p_ent) +void qed_spq_return_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent) { spin_lock_bh(&p_hwfn->p_spq->lock); __qed_spq_return_entry(p_hwfn, p_ent); @@ -586,10 +570,9 @@ void qed_spq_return_entry(struct qed_hwfn *p_hwfn, * * @return int */ -static int -qed_spq_add_entry(struct qed_hwfn *p_hwfn, - struct qed_spq_entry *p_ent, - enum spq_priority priority) +static int qed_spq_add_entry(struct qed_hwfn *p_hwfn, + struct qed_spq_entry *p_ent, + enum spq_priority priority) { struct qed_spq *p_spq = p_hwfn->p_spq; @@ -604,8 +587,7 @@ qed_spq_add_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_en2; p_en2 = list_first_entry(&p_spq->free_pool, - struct qed_spq_entry, - list); + struct qed_spq_entry, list); list_del(&p_en2->list); /* Copy the ring element physical pointer to the new @@ -655,8 +637,7 @@ u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn) * Posting new Ramrods ***************************************************************************/ static int qed_spq_post_list(struct qed_hwfn *p_hwfn, - struct list_head *head, - u32 keep_reserve) + struct list_head *head, u32 keep_reserve) { struct qed_spq *p_spq = p_hwfn->p_spq; int rc; @@ -690,8 +671,7 @@ static int qed_spq_pend_post(struct qed_hwfn *p_hwfn) break; p_ent = list_first_entry(&p_spq->unlimited_pending, - struct qed_spq_entry, - list); + struct qed_spq_entry, list); if (!p_ent) return -EINVAL; @@ -705,8 +685,7 @@ static int qed_spq_pend_post(struct qed_hwfn *p_hwfn) } int qed_spq_post(struct qed_hwfn *p_hwfn, - struct qed_spq_entry *p_ent, - u8 *fw_return_code) + struct qed_spq_entry *p_ent, u8 *fw_return_code) { int rc = 0; struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL; @@ -803,8 +782,7 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn, return -EINVAL; spin_lock_bh(&p_spq->lock); - list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, - list) { + list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) { if (p_ent->elem.hdr.echo == echo) { u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE; @@ -846,15 +824,22 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn, if (!found) { DP_NOTICE(p_hwfn, - "Failed to find an entry this EQE completes\n"); + "Failed to find an entry this EQE [echo %04x] completes\n", + le16_to_cpu(echo)); return -EEXIST; } - DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Complete: func %p cookie %p)\n", + DP_VERBOSE(p_hwfn, QED_MSG_SPQ, + "Complete EQE [echo %04x]: func %p cookie %p)\n", + le16_to_cpu(echo), p_ent->comp_cb.function, p_ent->comp_cb.cookie); if (found->comp_cb.function) found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data, fw_return_code); + else + DP_VERBOSE(p_hwfn, + QED_MSG_SPQ, + "Got a completion without a callback function\n"); if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) || (found->queue == &p_spq->unlimited_pending)) @@ -901,14 +886,12 @@ consq_allocate_fail: return NULL; } -void qed_consq_setup(struct qed_hwfn *p_hwfn, - struct qed_consq *p_consq) +void qed_consq_setup(struct qed_hwfn *p_hwfn, struct qed_consq *p_consq) { qed_chain_reset(&p_consq->chain); } -void qed_consq_free(struct qed_hwfn *p_hwfn, - struct qed_consq *p_consq) +void qed_consq_free(struct qed_hwfn *p_hwfn, struct qed_consq *p_consq) { if (!p_consq) return; diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 15399da268d9..cb68674640f9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -60,7 +60,8 @@ static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf) } fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor; - if (fp_minor > ETH_HSI_VER_MINOR) { + if (fp_minor > ETH_HSI_VER_MINOR && + fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, "VF [%d] - Requested fp hsi %02x.%02x which is slightly newer than PF's %02x.%02x; Configuring PFs version\n", @@ -699,7 +700,7 @@ static void qed_iov_config_perm_table(struct qed_hwfn *p_hwfn, &qzone_id); reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4; - val = enable ? (vf->abs_vf_id | (1 << 8)) : 0; + val = enable ? (vf->abs_vf_id | BIT(8)) : 0; qed_wr(p_hwfn, p_ptt, reg_addr, val); } } @@ -1090,13 +1091,13 @@ static u16 qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn *p_hwfn, /* Prepare response for all extended tlvs if they are found by PF */ for (i = 0; i < QED_IOV_VP_UPDATE_MAX; i++) { - if (!(tlvs_mask & (1 << i))) + if (!(tlvs_mask & BIT(i))) continue; resp = qed_add_tlv(p_hwfn, &p_mbx->offset, qed_iov_vport_to_tlv(p_hwfn, i), size); - if (tlvs_accepted & (1 << i)) + if (tlvs_accepted & BIT(i)) resp->hdr.status = status; else resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED; @@ -1241,6 +1242,16 @@ static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn, p_req->num_vlan_filters, p_resp->num_vlan_filters, p_req->num_mc_filters, p_resp->num_mc_filters); + + /* Some legacy OSes are incapable of correctly handling this + * failure. + */ + if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor == + ETH_HSI_VER_NO_PKT_LEN_TUNN) && + (p_vf->acquire.vfdev_info.os_type == + VFPF_ACQUIRE_OS_WINDOWS)) + return PFVF_STATUS_SUCCESS; + return PFVF_STATUS_NO_RESOURCE; } @@ -1280,22 +1291,42 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn, memset(resp, 0, sizeof(*resp)); + /* Write the PF version so that VF would know which version + * is supported - might be later overriden. This guarantees that + * VF could recognize legacy PF based on lack of versions in reply. + */ + pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR; + pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR; + + if (vf->state != VF_FREE && vf->state != VF_STOPPED) { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF[%d] sent ACQUIRE but is already in state %d - fail request\n", + vf->abs_vf_id, vf->state); + goto out; + } + /* Validate FW compatibility */ if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) { - DP_INFO(p_hwfn, - "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n", - vf->abs_vf_id, - req->vfdev_info.eth_fp_hsi_major, - req->vfdev_info.eth_fp_hsi_minor, - ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR); - - /* Write the PF version so that VF would know which version - * is supported. - */ - pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR; - pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR; + if (req->vfdev_info.capabilities & + VFPF_ACQUIRE_CAP_PRE_FP_HSI) { + struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info; - goto out; + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF[%d] is pre-fastpath HSI\n", + vf->abs_vf_id); + p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR; + p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN; + } else { + DP_INFO(p_hwfn, + "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n", + vf->abs_vf_id, + req->vfdev_info.eth_fp_hsi_major, + req->vfdev_info.eth_fp_hsi_minor, + ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR); + + goto out; + } } /* On 100g PFs, prevent old VFs from loading */ @@ -1334,8 +1365,11 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn, pfdev_info->fw_minor = FW_MINOR_VERSION; pfdev_info->fw_rev = FW_REVISION_VERSION; pfdev_info->fw_eng = FW_ENGINEERING_VERSION; - pfdev_info->minor_fp_hsi = min_t(u8, - ETH_HSI_VER_MINOR, + + /* Incorrect when legacy, but doesn't matter as legacy isn't reading + * this field. + */ + pfdev_info->minor_fp_hsi = min_t(u8, ETH_HSI_VER_MINOR, req->vfdev_info.eth_fp_hsi_minor); pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX; qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL); @@ -1438,14 +1472,11 @@ static int qed_iov_reconfigure_unicast_vlan(struct qed_hwfn *p_hwfn, filter.type = QED_FILTER_VLAN; filter.vlan = p_vf->shadow_config.vlans[i].vid; - DP_VERBOSE(p_hwfn, - QED_MSG_IOV, + DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Reconfiguring VLAN [0x%04x] for VF [%04x]\n", filter.vlan, p_vf->relative_vf_id); - rc = qed_sp_eth_filter_ucast(p_hwfn, - p_vf->opaque_fid, - &filter, - QED_SPQ_MODE_CB, NULL); + rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid, + &filter, QED_SPQ_MODE_CB, NULL); if (rc) { DP_NOTICE(p_hwfn, "Failed to configure VLAN [%04x] to VF [%04x]\n", @@ -1463,7 +1494,7 @@ qed_iov_reconfigure_unicast_shadow(struct qed_hwfn *p_hwfn, { int rc = 0; - if ((events & (1 << VLAN_ADDR_FORCED)) && + if ((events & BIT(VLAN_ADDR_FORCED)) && !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) rc = qed_iov_reconfigure_unicast_vlan(p_hwfn, p_vf); @@ -1479,7 +1510,7 @@ static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn, if (!p_vf->vport_instance) return -EINVAL; - if (events & (1 << MAC_ADDR_FORCED)) { + if (events & BIT(MAC_ADDR_FORCED)) { /* Since there's no way [currently] of removing the MAC, * we can always assume this means we need to force it. */ @@ -1502,7 +1533,7 @@ static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn, p_vf->configured_features |= 1 << MAC_ADDR_FORCED; } - if (events & (1 << VLAN_ADDR_FORCED)) { + if (events & BIT(VLAN_ADDR_FORCED)) { struct qed_sp_vport_update_params vport_update; u8 removal; int i; @@ -1572,7 +1603,7 @@ static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn, if (filter.vlan) p_vf->configured_features |= 1 << VLAN_ADDR_FORCED; else - p_vf->configured_features &= ~(1 << VLAN_ADDR_FORCED); + p_vf->configured_features &= ~BIT(VLAN_ADDR_FORCED); } /* If forced features are terminated, we need to configure the shadow @@ -1619,8 +1650,7 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn, qed_int_cau_conf_sb(p_hwfn, p_ptt, start->sb_addr[sb_id], - vf->igu_sbs[sb_id], - vf->abs_vf_id, 1); + vf->igu_sbs[sb_id], vf->abs_vf_id, 1); } qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf); @@ -1632,7 +1662,7 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn, * vfs that would still be fine, since they passed '0' as padding]. */ p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap; - if (!(*p_bitmap & (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) { + if (!(*p_bitmap & BIT(VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) { u8 vf_req = start->only_untagged; vf_info->bulletin.p_virt->default_only_untagged = vf_req; @@ -1650,9 +1680,10 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn, params.vport_id = vf->vport_id; params.max_buffers_per_cqe = start->max_buffers_per_cqe; params.mtu = vf->mtu; + params.check_mac = true; rc = qed_sp_eth_vport_start(p_hwfn, ¶ms); - if (rc != 0) { + if (rc) { DP_ERR(p_hwfn, "qed_iov_vf_mbx_start_vport returned error %d\n", rc); status = PFVF_STATUS_FAILURE; @@ -1679,7 +1710,7 @@ static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn, vf->spoof_chk = false; rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id); - if (rc != 0) { + if (rc) { DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n", rc); status = PFVF_STATUS_FAILURE; @@ -1695,21 +1726,32 @@ static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn, static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - struct qed_vf_info *vf, u8 status) + struct qed_vf_info *vf, + u8 status, bool b_legacy) { struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; struct pfvf_start_queue_resp_tlv *p_tlv; struct vfpf_start_rxq_tlv *req; + u16 length; mbx->offset = (u8 *)mbx->reply_virt; + /* Taking a bigger struct instead of adding a TLV to list was a + * mistake, but one which we're now stuck with, as some older + * clients assume the size of the previous response. + */ + if (!b_legacy) + length = sizeof(*p_tlv); + else + length = sizeof(struct pfvf_def_resp_tlv); + p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ, - sizeof(*p_tlv)); + length); qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); /* Update the TLV with the response */ - if (status == PFVF_STATUS_SUCCESS) { + if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) { req = &mbx->req_virt->start_rxq; p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B + offsetof(struct mstorm_vf_zone, @@ -1717,7 +1759,7 @@ static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn, sizeof(struct eth_rx_prod_data) * req->rx_qid; } - qed_iov_send_response(p_hwfn, p_ptt, vf, sizeof(*p_tlv), status); + qed_iov_send_response(p_hwfn, p_ptt, vf, length, status); } static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn, @@ -1728,6 +1770,7 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn, struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; u8 status = PFVF_STATUS_NO_RESOURCE; struct vfpf_start_rxq_tlv *req; + bool b_legacy_vf = false; int rc; memset(¶ms, 0, sizeof(params)); @@ -1743,13 +1786,27 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn, params.sb = req->hw_sb; params.sb_idx = req->sb_index; + /* Legacy VFs have their Producers in a different location, which they + * calculate on their own and clean the producer prior to this. + */ + if (vf->acquire.vfdev_info.eth_fp_hsi_minor == + ETH_HSI_VER_NO_PKT_LEN_TUNN) { + b_legacy_vf = true; + } else { + REG_WR(p_hwfn, + GTT_BAR0_MAP_REG_MSDM_RAM + + MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid), + 0); + } + rc = qed_sp_eth_rxq_start_ramrod(p_hwfn, vf->opaque_fid, vf->vf_queues[req->rx_qid].fw_cid, ¶ms, vf->abs_vf_id + 0x10, req->bd_max_bytes, req->rxq_addr, - req->cqe_pbl_addr, req->cqe_pbl_size); + req->cqe_pbl_addr, req->cqe_pbl_size, + b_legacy_vf); if (rc) { status = PFVF_STATUS_FAILURE; @@ -1760,7 +1817,7 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn, } out: - qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status); + qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status, b_legacy_vf); } static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn, @@ -1769,23 +1826,38 @@ static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn, { struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; struct pfvf_start_queue_resp_tlv *p_tlv; + bool b_legacy = false; + u16 length; mbx->offset = (u8 *)mbx->reply_virt; + /* Taking a bigger struct instead of adding a TLV to list was a + * mistake, but one which we're now stuck with, as some older + * clients assume the size of the previous response. + */ + if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor == + ETH_HSI_VER_NO_PKT_LEN_TUNN) + b_legacy = true; + + if (!b_legacy) + length = sizeof(*p_tlv); + else + length = sizeof(struct pfvf_def_resp_tlv); + p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ, - sizeof(*p_tlv)); + length); qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); /* Update the TLV with the response */ - if (status == PFVF_STATUS_SUCCESS) { + if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) { u16 qid = mbx->req_virt->start_txq.tx_qid; p_tlv->offset = qed_db_addr(p_vf->vf_queues[qid].fw_cid, DQ_DEMS_LEGACY); } - qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_tlv), status); + qed_iov_send_response(p_hwfn, p_ptt, p_vf, length, status); } static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn, @@ -2045,7 +2117,7 @@ qed_iov_vp_update_vlan_param(struct qed_hwfn *p_hwfn, p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan; /* Ignore the VF request if we're forcing a vlan */ - if (!(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) { + if (!(p_vf->configured_features & BIT(VLAN_ADDR_FORCED))) { p_data->update_inner_vlan_removal_flg = 1; p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan; } @@ -2340,7 +2412,7 @@ static int qed_iov_vf_update_vlan_shadow(struct qed_hwfn *p_hwfn, /* In forced mode, we're willing to remove entries - but we don't add * new ones. */ - if (p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED)) + if (p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED)) return 0; if (p_params->opcode == QED_FILTER_ADD || @@ -2374,7 +2446,7 @@ static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn, int i; /* If we're in forced-mode, we don't allow any change */ - if (p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)) + if (p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)) return 0; /* First remove entries and then add new ones */ @@ -2509,7 +2581,7 @@ static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn, } /* Determine if the unicast filtering is acceptible by PF */ - if ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) && + if ((p_bulletin->valid_bitmap & BIT(VLAN_ADDR_FORCED)) && (params.type == QED_FILTER_VLAN || params.type == QED_FILTER_MAC_VLAN)) { /* Once VLAN is forced or PVID is set, do not allow @@ -2521,7 +2593,7 @@ static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn, goto out; } - if ((p_bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) && + if ((p_bulletin->valid_bitmap & BIT(MAC_ADDR_FORCED)) && (params.type == QED_FILTER_MAC || params.type == QED_FILTER_MAC_VLAN)) { if (!ether_addr_equal(p_bulletin->mac, params.mac) || @@ -2749,7 +2821,7 @@ cleanup: /* Mark VF for ack and clean pending state */ if (p_vf->state == VF_RESET) p_vf->state = VF_STOPPED; - ack_vfs[vfid / 32] |= (1 << (vfid % 32)); + ack_vfs[vfid / 32] |= BIT((vfid % 32)); p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &= ~(1ULL << (rel_vf_id % 64)); p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &= @@ -2805,7 +2877,7 @@ int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs) continue; vfid = p_vf->abs_vf_id; - if ((1 << (vfid % 32)) & p_disabled_vfs[vfid / 32]) { + if (BIT((vfid % 32)) & p_disabled_vfs[vfid / 32]) { u64 *p_flr = p_hwfn->pf_iov_info->pending_flr; u16 rel_vf_id = p_vf->relative_vf_id; @@ -3064,8 +3136,7 @@ static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn, vf_info->bulletin.p_virt->valid_bitmap |= feature; /* Forced MAC will disable MAC_ADDR */ - vf_info->bulletin.p_virt->valid_bitmap &= - ~(1 << VFPF_BULLETIN_MAC_ADDR); + vf_info->bulletin.p_virt->valid_bitmap &= ~BIT(VFPF_BULLETIN_MAC_ADDR); qed_iov_configure_vport_forced(p_hwfn, vf_info, feature); } @@ -3163,7 +3234,7 @@ static u8 *qed_iov_bulletin_get_forced_mac(struct qed_hwfn *p_hwfn, if (!p_vf || !p_vf->bulletin.p_virt) return NULL; - if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED))) + if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))) return NULL; return p_vf->bulletin.p_virt->mac; @@ -3177,7 +3248,7 @@ u16 qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id) if (!p_vf || !p_vf->bulletin.p_virt) return 0; - if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED))) + if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED))) return 0; return p_vf->bulletin.p_virt->pvid; diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index 9b780b31b15c..3c9071de5472 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -46,6 +46,17 @@ static void *qed_vf_pf_prep(struct qed_hwfn *p_hwfn, u16 type, u16 length) return p_tlv; } +static void qed_vf_pf_req_end(struct qed_hwfn *p_hwfn, int req_status) +{ + union pfvf_tlvs *resp = p_hwfn->vf_iov_info->pf2vf_reply; + + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF request status = 0x%x, PF reply status = 0x%x\n", + req_status, resp->default_resp.hdr.status); + + mutex_unlock(&(p_hwfn->vf_iov_info->mutex)); +} + static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size) { union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request; @@ -103,16 +114,12 @@ static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size) "VF <-- PF Timeout [Type %d]\n", p_req->first_tlv.tl.type); rc = -EBUSY; - goto exit; } else { DP_VERBOSE(p_hwfn, QED_MSG_IOV, "PF response: %d [Type %d]\n", *done, p_req->first_tlv.tl.type); } -exit: - mutex_unlock(&(p_hwfn->vf_iov_info->mutex)); - return rc; } @@ -191,6 +198,9 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) DP_VERBOSE(p_hwfn, QED_MSG_IOV, "attempting to acquire resources\n"); + /* Clear response buffer, as this might be a re-send */ + memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs)); + /* send acquire request */ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); if (rc) @@ -205,9 +215,12 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) /* PF agrees to allocate our resources */ if (!(resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE)) { - DP_INFO(p_hwfn, - "PF is using old incompatible driver; Either downgrade driver or request provider to update hypervisor version\n"); - return -EINVAL; + /* It's possible legacy PF mistakenly accepted; + * but we don't care - simply mark it as + * legacy and continue. + */ + req->vfdev_info.capabilities |= + VFPF_ACQUIRE_CAP_PRE_FP_HSI; } DP_VERBOSE(p_hwfn, QED_MSG_IOV, "resources acquired\n"); resources_acquired = true; @@ -215,27 +228,55 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) attempts < VF_ACQUIRE_THRESH) { qed_vf_pf_acquire_reduce_resc(p_hwfn, p_resc, &resp->resc); + } else if (resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED) { + if (pfdev_info->major_fp_hsi && + (pfdev_info->major_fp_hsi != ETH_HSI_VER_MAJOR)) { + DP_NOTICE(p_hwfn, + "PF uses an incompatible fastpath HSI %02x.%02x [VF requires %02x.%02x]. Please change to a VF driver using %02x.xx.\n", + pfdev_info->major_fp_hsi, + pfdev_info->minor_fp_hsi, + ETH_HSI_VER_MAJOR, + ETH_HSI_VER_MINOR, + pfdev_info->major_fp_hsi); + rc = -EINVAL; + goto exit; + } - /* Clear response buffer */ - memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs)); - } else if ((resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED) && - pfdev_info->major_fp_hsi && - (pfdev_info->major_fp_hsi != ETH_HSI_VER_MAJOR)) { - DP_NOTICE(p_hwfn, - "PF uses an incompatible fastpath HSI %02x.%02x [VF requires %02x.%02x]. Please change to a VF driver using %02x.xx.\n", - pfdev_info->major_fp_hsi, - pfdev_info->minor_fp_hsi, - ETH_HSI_VER_MAJOR, - ETH_HSI_VER_MINOR, pfdev_info->major_fp_hsi); - return -EINVAL; + if (!pfdev_info->major_fp_hsi) { + if (req->vfdev_info.capabilities & + VFPF_ACQUIRE_CAP_PRE_FP_HSI) { + DP_NOTICE(p_hwfn, + "PF uses very old drivers. Please change to a VF driver using no later than 8.8.x.x.\n"); + rc = -EINVAL; + goto exit; + } else { + DP_INFO(p_hwfn, + "PF is old - try re-acquire to see if it supports FW-version override\n"); + req->vfdev_info.capabilities |= + VFPF_ACQUIRE_CAP_PRE_FP_HSI; + continue; + } + } + + /* If PF/VF are using same Major, PF must have had + * it's reasons. Simply fail. + */ + DP_NOTICE(p_hwfn, "PF rejected acquisition by VF\n"); + rc = -EINVAL; + goto exit; } else { DP_ERR(p_hwfn, "PF returned error %d to VF acquisition request\n", resp->hdr.status); - return -EAGAIN; + rc = -EAGAIN; + goto exit; } } + /* Mark the PF as legacy, if needed */ + if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_PRE_FP_HSI) + p_iov->b_pre_fp_hsi = true; + /* Update bulletin board size with response from PF */ p_iov->bulletin.size = resp->bulletin_size; @@ -253,14 +294,18 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) } } - if (ETH_HSI_VER_MINOR && + if (!p_iov->b_pre_fp_hsi && + ETH_HSI_VER_MINOR && (resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) { DP_INFO(p_hwfn, "PF is using older fastpath HSI; %02x.%02x is configured\n", ETH_HSI_VER_MAJOR, resp->pfdev_info.minor_fp_hsi); } - return 0; +exit: + qed_vf_pf_req_end(p_hwfn, rc); + + return rc; } int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn) @@ -347,6 +392,9 @@ free_p_iov: return -ENOMEM; } +#define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A +#define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \ + (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev))) int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, u8 rx_qid, @@ -374,6 +422,21 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, req->bd_max_bytes = bd_max_bytes; req->stat_id = -1; + /* If PF is legacy, we'll need to calculate producers ourselves + * as well as clean them. + */ + if (pp_prod && p_iov->b_pre_fp_hsi) { + u8 hw_qid = p_iov->acquire_resp.resc.hw_qid[rx_qid]; + u32 init_prod_val = 0; + + *pp_prod = (u8 __iomem *)p_hwfn->regview + + MSTORM_QZONE_START(p_hwfn->cdev) + + hw_qid * MSTORM_QZONE_SIZE; + + /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ + __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), + (u32 *)(&init_prod_val)); + } /* add list termination tlv */ qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); @@ -381,13 +444,15 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, resp = &p_iov->pf2vf_reply->queue_start; rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); if (rc) - return rc; + goto exit; - if (resp->hdr.status != PFVF_STATUS_SUCCESS) - return -EINVAL; + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + rc = -EINVAL; + goto exit; + } /* Learn the address of the producer from the response */ - if (pp_prod) { + if (pp_prod && !p_iov->b_pre_fp_hsi) { u32 init_prod_val = 0; *pp_prod = (u8 __iomem *)p_hwfn->regview + resp->offset; @@ -399,6 +464,8 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), (u32 *)&init_prod_val); } +exit: + qed_vf_pf_req_end(p_hwfn, rc); return rc; } @@ -424,10 +491,15 @@ int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, u16 rx_qid, bool cqe_completion) resp = &p_iov->pf2vf_reply->default_resp; rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); if (rc) - return rc; + goto exit; - if (resp->hdr.status != PFVF_STATUS_SUCCESS) - return -EINVAL; + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + rc = -EINVAL; + goto exit; + } + +exit: + qed_vf_pf_req_end(p_hwfn, rc); return rc; } @@ -470,13 +542,27 @@ int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, } if (pp_doorbell) { - *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + resp->offset; + /* Modern PFs provide the actual offsets, while legacy + * provided only the queue id. + */ + if (!p_iov->b_pre_fp_hsi) { + *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + + resp->offset; + } else { + u8 cid = p_iov->acquire_resp.resc.cid[tx_queue_id]; + u32 db_addr; + + db_addr = qed_db_addr(cid, DQ_DEMS_LEGACY); + *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + + db_addr; + } DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n", tx_queue_id, *pp_doorbell, resp->offset); } exit: + qed_vf_pf_req_end(p_hwfn, rc); return rc; } @@ -501,10 +587,15 @@ int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid) resp = &p_iov->pf2vf_reply->default_resp; rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); if (rc) - return rc; + goto exit; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + rc = -EINVAL; + goto exit; + } - if (resp->hdr.status != PFVF_STATUS_SUCCESS) - return -EINVAL; +exit: + qed_vf_pf_req_end(p_hwfn, rc); return rc; } @@ -543,10 +634,15 @@ int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn, resp = &p_iov->pf2vf_reply->default_resp; rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); if (rc) - return rc; + goto exit; - if (resp->hdr.status != PFVF_STATUS_SUCCESS) - return -EINVAL; + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + rc = -EINVAL; + goto exit; + } + +exit: + qed_vf_pf_req_end(p_hwfn, rc); return rc; } @@ -567,10 +663,15 @@ int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn) rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); if (rc) - return rc; + goto exit; - if (resp->hdr.status != PFVF_STATUS_SUCCESS) - return -EINVAL; + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + rc = -EINVAL; + goto exit; + } + +exit: + qed_vf_pf_req_end(p_hwfn, rc); return rc; } @@ -770,13 +871,18 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn, rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, resp_size); if (rc) - return rc; + goto exit; - if (resp->hdr.status != PFVF_STATUS_SUCCESS) - return -EINVAL; + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + rc = -EINVAL; + goto exit; + } qed_vf_handle_vp_update_tlvs_resp(p_hwfn, p_params); +exit: + qed_vf_pf_req_end(p_hwfn, rc); + return rc; } @@ -797,14 +903,19 @@ int qed_vf_pf_reset(struct qed_hwfn *p_hwfn) resp = &p_iov->pf2vf_reply->default_resp; rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); if (rc) - return rc; + goto exit; - if (resp->hdr.status != PFVF_STATUS_SUCCESS) - return -EAGAIN; + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + rc = -EAGAIN; + goto exit; + } p_hwfn->b_int_enabled = 0; - return 0; +exit: + qed_vf_pf_req_end(p_hwfn, rc); + + return rc; } int qed_vf_pf_release(struct qed_hwfn *p_hwfn) @@ -828,6 +939,8 @@ int qed_vf_pf_release(struct qed_hwfn *p_hwfn) if (!rc && resp->hdr.status != PFVF_STATUS_SUCCESS) rc = -EAGAIN; + qed_vf_pf_req_end(p_hwfn, rc); + p_hwfn->b_int_enabled = 0; if (p_iov->vf2pf_request) @@ -896,12 +1009,17 @@ int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn, resp = &p_iov->pf2vf_reply->default_resp; rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); if (rc) - return rc; + goto exit; - if (resp->hdr.status != PFVF_STATUS_SUCCESS) - return -EAGAIN; + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + rc = -EAGAIN; + goto exit; + } - return 0; +exit: + qed_vf_pf_req_end(p_hwfn, rc); + + return rc; } int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn) @@ -920,12 +1038,17 @@ int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn) rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); if (rc) - return rc; + goto exit; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + rc = -EINVAL; + goto exit; + } - if (resp->hdr.status != PFVF_STATUS_SUCCESS) - return -EINVAL; +exit: + qed_vf_pf_req_end(p_hwfn, rc); - return 0; + return rc; } u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h index b23ce58e932f..35db7a28aa13 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.h +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h @@ -86,7 +86,7 @@ struct vfpf_acquire_tlv { struct vfpf_first_tlv first_tlv; struct vf_pf_vfdev_info { -#define VFPF_ACQUIRE_CAP_OBSOLETE (1 << 0) +#define VFPF_ACQUIRE_CAP_PRE_FP_HSI (1 << 0) /* VF pre-FP hsi version */ #define VFPF_ACQUIRE_CAP_100G (1 << 1) /* VF can support 100g */ u64 capabilities; u8 fw_major; @@ -551,6 +551,11 @@ struct qed_vf_iov { /* we set aside a copy of the acquire response */ struct pfvf_acquire_resp_tlv acquire_resp; + + /* In case PF originates prior to the fp-hsi version comparison, + * this has to be propagated as it affects the fastpath. + */ + bool b_pre_fp_hsi; }; #ifdef CONFIG_QED_SRIOV diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 02b06d4e40ae..e01adce4a966 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -25,7 +25,7 @@ #define QEDE_MAJOR_VERSION 8 #define QEDE_MINOR_VERSION 10 -#define QEDE_REVISION_VERSION 1 +#define QEDE_REVISION_VERSION 9 #define QEDE_ENGINEERING_VERSION 20 #define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \ __stringify(QEDE_MINOR_VERSION) "." \ @@ -36,6 +36,8 @@ struct qede_stats { u64 no_buff_discards; + u64 packet_too_big_discard; + u64 ttl0_discard; u64 rx_ucast_bytes; u64 rx_mcast_bytes; u64 rx_bcast_bytes; @@ -124,16 +126,22 @@ struct qede_dev { (edev)->dev_info.num_tc) struct qede_fastpath *fp_array; - u16 req_rss; - u16 num_rss; + u8 req_num_tx; + u8 fp_num_tx; + u8 req_num_rx; + u8 fp_num_rx; + u16 req_queues; + u16 num_queues; u8 num_tc; -#define QEDE_RSS_CNT(edev) ((edev)->num_rss) -#define QEDE_TSS_CNT(edev) ((edev)->num_rss * \ - (edev)->num_tc) -#define QEDE_TSS_IDX(edev, txqidx) ((txqidx) % (edev)->num_rss) -#define QEDE_TC_IDX(edev, txqidx) ((txqidx) / (edev)->num_rss) +#define QEDE_QUEUE_CNT(edev) ((edev)->num_queues) +#define QEDE_RSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_tx) +#define QEDE_TSS_COUNT(edev) (((edev)->num_queues - (edev)->fp_num_rx) * \ + (edev)->num_tc) +#define QEDE_TX_IDX(edev, txqidx) ((edev)->fp_num_rx + (txqidx) % \ + QEDE_TSS_COUNT(edev)) +#define QEDE_TC_IDX(edev, txqidx) ((txqidx) / QEDE_TSS_COUNT(edev)) #define QEDE_TX_QUEUE(edev, txqidx) \ - (&(edev)->fp_array[QEDE_TSS_IDX((edev), (txqidx))].txqs[QEDE_TC_IDX( \ + (&(edev)->fp_array[QEDE_TX_IDX((edev), (txqidx))].txqs[QEDE_TC_IDX(\ (edev), (txqidx))]) struct qed_int_info int_info; @@ -235,6 +243,7 @@ struct qede_rx_queue { u16 num_rx_buffers; u16 rxq_id; + u64 rcv_pkts; u64 rx_hw_errors; u64 rx_alloc_errors; u64 rx_ip_frags; @@ -263,6 +272,10 @@ struct qede_tx_queue { union db_prod tx_db; u16 num_tx_buffers; + u64 xmit_pkts; + u64 stopped_cnt; + + bool is_legacy; }; #define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr.hi), \ @@ -277,7 +290,11 @@ struct qede_tx_queue { struct qede_fastpath { struct qede_dev *edev; - u8 rss_id; +#define QEDE_FASTPATH_TX BIT(0) +#define QEDE_FASTPATH_RX BIT(1) +#define QEDE_FASTPATH_COMBINED (QEDE_FASTPATH_TX | QEDE_FASTPATH_RX) + u8 type; + u8 id; struct napi_struct napi; struct qed_sb_info *sb_info; struct qede_rx_queue *rxq; @@ -337,6 +354,6 @@ void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev, #define QEDE_MIN_PKT_LEN 64 #define QEDE_RX_HDR_SIZE 256 -#define for_each_rss(i) for (i = 0; i < edev->num_rss; i++) +#define for_each_queue(i) for (i = 0; i < edev->num_queues; i++) #endif /* _QEDE_H_ */ diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index f8492cac9290..4d45945bc34c 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -35,6 +35,7 @@ static const struct { u64 offset; char string[ETH_GSTRING_LEN]; } qede_rqstats_arr[] = { + QEDE_RQSTAT(rcv_pkts), QEDE_RQSTAT(rx_hw_errors), QEDE_RQSTAT(rx_alloc_errors), QEDE_RQSTAT(rx_ip_frags), @@ -44,6 +45,24 @@ static const struct { #define QEDE_RQSTATS_DATA(dev, sindex, rqindex) \ (*((u64 *)(((char *)(dev->fp_array[(rqindex)].rxq)) +\ qede_rqstats_arr[(sindex)].offset))) +#define QEDE_TQSTAT_OFFSET(stat_name) \ + (offsetof(struct qede_tx_queue, stat_name)) +#define QEDE_TQSTAT_STRING(stat_name) (#stat_name) +#define QEDE_TQSTAT(stat_name) \ + {QEDE_TQSTAT_OFFSET(stat_name), QEDE_TQSTAT_STRING(stat_name)} +#define QEDE_NUM_TQSTATS ARRAY_SIZE(qede_tqstats_arr) +static const struct { + u64 offset; + char string[ETH_GSTRING_LEN]; +} qede_tqstats_arr[] = { + QEDE_TQSTAT(xmit_pkts), + QEDE_TQSTAT(stopped_cnt), +}; + +#define QEDE_TQSTATS_DATA(dev, sindex, tssid, tcid) \ + (*((u64 *)(((u64)(&dev->fp_array[tssid].txqs[tcid])) +\ + qede_tqstats_arr[(sindex)].offset))) + static const struct { u64 offset; char string[ETH_GSTRING_LEN]; @@ -107,6 +126,8 @@ static const struct { QEDE_PF_STAT(mftag_filter_discards), QEDE_PF_STAT(mac_filter_discards), QEDE_STAT(tx_err_drop_pkts), + QEDE_STAT(ttl0_discard), + QEDE_STAT(packet_too_big_discard), QEDE_STAT(coalesced_pkts), QEDE_STAT(coalesced_events), @@ -151,17 +172,29 @@ static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf) { int i, j, k; + for (i = 0, k = 0; i < QEDE_QUEUE_CNT(edev); i++) { + int tc; + + for (j = 0; j < QEDE_NUM_RQSTATS; j++) + sprintf(buf + (k + j) * ETH_GSTRING_LEN, + "%d: %s", i, qede_rqstats_arr[j].string); + k += QEDE_NUM_RQSTATS; + for (tc = 0; tc < edev->num_tc; tc++) { + for (j = 0; j < QEDE_NUM_TQSTATS; j++) + sprintf(buf + (k + j) * ETH_GSTRING_LEN, + "%d.%d: %s", i, tc, + qede_tqstats_arr[j].string); + k += QEDE_NUM_TQSTATS; + } + } + for (i = 0, j = 0; i < QEDE_NUM_STATS; i++) { if (IS_VF(edev) && qede_stats_arr[i].pf_only) continue; - strcpy(buf + j * ETH_GSTRING_LEN, + strcpy(buf + (k + j) * ETH_GSTRING_LEN, qede_stats_arr[i].string); j++; } - - for (k = 0; k < QEDE_NUM_RQSTATS; k++, j++) - strcpy(buf + j * ETH_GSTRING_LEN, - qede_rqstats_arr[k].string); } static void qede_get_strings(struct net_device *dev, u32 stringset, u8 *buf) @@ -197,19 +230,30 @@ static void qede_get_ethtool_stats(struct net_device *dev, mutex_lock(&edev->qede_lock); + for (qid = 0; qid < QEDE_QUEUE_CNT(edev); qid++) { + int tc; + + if (edev->fp_array[qid].type & QEDE_FASTPATH_RX) { + for (sidx = 0; sidx < QEDE_NUM_RQSTATS; sidx++) + buf[cnt++] = QEDE_RQSTATS_DATA(edev, sidx, qid); + } + + if (edev->fp_array[qid].type & QEDE_FASTPATH_TX) { + for (tc = 0; tc < edev->num_tc; tc++) { + for (sidx = 0; sidx < QEDE_NUM_TQSTATS; sidx++) + buf[cnt++] = QEDE_TQSTATS_DATA(edev, + sidx, + qid, tc); + } + } + } + for (sidx = 0; sidx < QEDE_NUM_STATS; sidx++) { if (IS_VF(edev) && qede_stats_arr[sidx].pf_only) continue; buf[cnt++] = QEDE_STATS_DATA(edev, sidx); } - for (sidx = 0; sidx < QEDE_NUM_RQSTATS; sidx++) { - buf[cnt] = 0; - for (qid = 0; qid < edev->num_rss; qid++) - buf[cnt] += QEDE_RQSTATS_DATA(edev, sidx, qid); - cnt++; - } - mutex_unlock(&edev->qede_lock); } @@ -227,7 +271,8 @@ static int qede_get_sset_count(struct net_device *dev, int stringset) if (qede_stats_arr[i].pf_only) num_stats--; } - return num_stats + QEDE_NUM_RQSTATS; + return num_stats + QEDE_RSS_COUNT(edev) * QEDE_NUM_RQSTATS + + QEDE_TSS_COUNT(edev) * QEDE_NUM_TQSTATS * edev->num_tc; case ETH_SS_PRIV_FLAGS: return QEDE_PRI_FLAG_LEN; case ETH_SS_TEST: @@ -249,78 +294,150 @@ static u32 qede_get_priv_flags(struct net_device *dev) return (!!(edev->dev_info.common.num_hwfns > 1)) << QEDE_PRI_FLAG_CMT; } -static int qede_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +struct qede_link_mode_mapping { + u32 qed_link_mode; + u32 ethtool_link_mode; +}; + +static const struct qede_link_mode_mapping qed_lm_map[] = { + {QED_LM_FIBRE_BIT, ETHTOOL_LINK_MODE_FIBRE_BIT}, + {QED_LM_Autoneg_BIT, ETHTOOL_LINK_MODE_Autoneg_BIT}, + {QED_LM_Asym_Pause_BIT, ETHTOOL_LINK_MODE_Asym_Pause_BIT}, + {QED_LM_Pause_BIT, ETHTOOL_LINK_MODE_Pause_BIT}, + {QED_LM_1000baseT_Half_BIT, ETHTOOL_LINK_MODE_1000baseT_Half_BIT}, + {QED_LM_1000baseT_Full_BIT, ETHTOOL_LINK_MODE_1000baseT_Full_BIT}, + {QED_LM_10000baseKR_Full_BIT, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT}, + {QED_LM_25000baseKR_Full_BIT, ETHTOOL_LINK_MODE_25000baseKR_Full_BIT}, + {QED_LM_40000baseLR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT}, + {QED_LM_50000baseKR2_Full_BIT, ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT}, + {QED_LM_100000baseKR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT}, +}; + +#define QEDE_DRV_TO_ETHTOOL_CAPS(caps, lk_ksettings, name) \ +{ \ + int i; \ + \ + for (i = 0; i < QED_LM_COUNT; i++) { \ + if ((caps) & (qed_lm_map[i].qed_link_mode)) \ + __set_bit(qed_lm_map[i].ethtool_link_mode,\ + lk_ksettings->link_modes.name); \ + } \ +} + +#define QEDE_ETHTOOL_TO_DRV_CAPS(caps, lk_ksettings, name) \ +{ \ + int i; \ + \ + for (i = 0; i < QED_LM_COUNT; i++) { \ + if (test_bit(qed_lm_map[i].ethtool_link_mode, \ + lk_ksettings->link_modes.name)) \ + caps |= qed_lm_map[i].qed_link_mode; \ + } \ +} + +static int qede_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { + struct ethtool_link_settings *base = &cmd->base; struct qede_dev *edev = netdev_priv(dev); struct qed_link_output current_link; memset(¤t_link, 0, sizeof(current_link)); edev->ops->common->get_link(edev->cdev, ¤t_link); - cmd->supported = current_link.supported_caps; - cmd->advertising = current_link.advertised_caps; + ethtool_link_ksettings_zero_link_mode(cmd, supported); + QEDE_DRV_TO_ETHTOOL_CAPS(current_link.supported_caps, cmd, supported) + + ethtool_link_ksettings_zero_link_mode(cmd, advertising); + QEDE_DRV_TO_ETHTOOL_CAPS(current_link.advertised_caps, cmd, advertising) + + ethtool_link_ksettings_zero_link_mode(cmd, lp_advertising); + QEDE_DRV_TO_ETHTOOL_CAPS(current_link.lp_caps, cmd, lp_advertising) + if ((edev->state == QEDE_STATE_OPEN) && (current_link.link_up)) { - ethtool_cmd_speed_set(cmd, current_link.speed); - cmd->duplex = current_link.duplex; + base->speed = current_link.speed; + base->duplex = current_link.duplex; } else { - cmd->duplex = DUPLEX_UNKNOWN; - ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); + base->speed = SPEED_UNKNOWN; + base->duplex = DUPLEX_UNKNOWN; } - cmd->port = current_link.port; - cmd->autoneg = (current_link.autoneg) ? AUTONEG_ENABLE : - AUTONEG_DISABLE; - cmd->lp_advertising = current_link.lp_caps; + base->port = current_link.port; + base->autoneg = (current_link.autoneg) ? AUTONEG_ENABLE : + AUTONEG_DISABLE; return 0; } -static int qede_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int qede_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { + const struct ethtool_link_settings *base = &cmd->base; struct qede_dev *edev = netdev_priv(dev); struct qed_link_output current_link; struct qed_link_params params; - u32 speed; if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) { - DP_INFO(edev, - "Link settings are not allowed to be changed\n"); + DP_INFO(edev, "Link settings are not allowed to be changed\n"); return -EOPNOTSUPP; } - memset(¤t_link, 0, sizeof(current_link)); memset(¶ms, 0, sizeof(params)); edev->ops->common->get_link(edev->cdev, ¤t_link); - speed = ethtool_cmd_speed(cmd); params.override_flags |= QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS; params.override_flags |= QED_LINK_OVERRIDE_SPEED_AUTONEG; - if (cmd->autoneg == AUTONEG_ENABLE) { + if (base->autoneg == AUTONEG_ENABLE) { params.autoneg = true; params.forced_speed = 0; - params.adv_speeds = cmd->advertising; - } else { /* forced speed */ + QEDE_ETHTOOL_TO_DRV_CAPS(params.adv_speeds, cmd, advertising) + } else { /* forced speed */ params.override_flags |= QED_LINK_OVERRIDE_SPEED_FORCED_SPEED; params.autoneg = false; - params.forced_speed = speed; - switch (speed) { + params.forced_speed = base->speed; + switch (base->speed) { case SPEED_10000: if (!(current_link.supported_caps & - SUPPORTED_10000baseKR_Full)) { + QED_LM_10000baseKR_Full_BIT)) { DP_INFO(edev, "10G speed not supported\n"); return -EINVAL; } - params.adv_speeds = SUPPORTED_10000baseKR_Full; + params.adv_speeds = QED_LM_10000baseKR_Full_BIT; + break; + case SPEED_25000: + if (!(current_link.supported_caps & + QED_LM_25000baseKR_Full_BIT)) { + DP_INFO(edev, "25G speed not supported\n"); + return -EINVAL; + } + params.adv_speeds = QED_LM_25000baseKR_Full_BIT; break; case SPEED_40000: if (!(current_link.supported_caps & - SUPPORTED_40000baseLR4_Full)) { + QED_LM_40000baseLR4_Full_BIT)) { DP_INFO(edev, "40G speed not supported\n"); return -EINVAL; } - params.adv_speeds = SUPPORTED_40000baseLR4_Full; + params.adv_speeds = QED_LM_40000baseLR4_Full_BIT; + break; + case SPEED_50000: + if (!(current_link.supported_caps & + QED_LM_50000baseKR2_Full_BIT)) { + DP_INFO(edev, "50G speed not supported\n"); + return -EINVAL; + } + params.adv_speeds = QED_LM_50000baseKR2_Full_BIT; + break; + case SPEED_100000: + if (!(current_link.supported_caps & + QED_LM_100000baseKR4_Full_BIT)) { + DP_INFO(edev, "100G speed not supported\n"); + return -EINVAL; + } + params.adv_speeds = QED_LM_100000baseKR4_Full_BIT; break; default: - DP_INFO(edev, "Unsupported speed %u\n", speed); + DP_INFO(edev, "Unsupported speed %u\n", base->speed); return -EINVAL; } } @@ -368,8 +485,7 @@ static u32 qede_get_msglevel(struct net_device *ndev) { struct qede_dev *edev = netdev_priv(ndev); - return ((u32)edev->dp_level << QED_LOG_LEVEL_SHIFT) | - edev->dp_module; + return ((u32)edev->dp_level << QED_LOG_LEVEL_SHIFT) | edev->dp_module; } static void qede_set_msglevel(struct net_device *ndev, u32 level) @@ -393,8 +509,7 @@ static int qede_nway_reset(struct net_device *dev) struct qed_link_params link_params; if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) { - DP_INFO(edev, - "Link settings are not allowed to be changed\n"); + DP_INFO(edev, "Link settings are not allowed to be changed\n"); return -EOPNOTSUPP; } @@ -467,7 +582,7 @@ static int qede_set_coalesce(struct net_device *dev, rxc = (u16)coal->rx_coalesce_usecs; txc = (u16)coal->tx_coalesce_usecs; - for_each_rss(i) { + for_each_queue(i) { sb_id = edev->fp_array[i].sb_info->igu_sb_id; rc = edev->ops->common->set_coalesce(edev->cdev, rxc, txc, (u8)i, sb_id); @@ -563,7 +678,7 @@ static int qede_set_pauseparam(struct net_device *dev, memset(¶ms, 0, sizeof(params)); params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG; if (epause->autoneg) { - if (!(current_link.supported_caps & SUPPORTED_Autoneg)) { + if (!(current_link.supported_caps & QED_LM_Autoneg_BIT)) { DP_INFO(edev, "autoneg not supported\n"); return -EINVAL; } @@ -619,45 +734,70 @@ static void qede_get_channels(struct net_device *dev, struct qede_dev *edev = netdev_priv(dev); channels->max_combined = QEDE_MAX_RSS_CNT(edev); - channels->combined_count = QEDE_RSS_CNT(edev); + channels->combined_count = QEDE_QUEUE_CNT(edev) - edev->fp_num_tx - + edev->fp_num_rx; + channels->tx_count = edev->fp_num_tx; + channels->rx_count = edev->fp_num_rx; } static int qede_set_channels(struct net_device *dev, struct ethtool_channels *channels) { struct qede_dev *edev = netdev_priv(dev); + u32 count; DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), "set-channels command parameters: rx = %d, tx = %d, other = %d, combined = %d\n", channels->rx_count, channels->tx_count, channels->other_count, channels->combined_count); - /* We don't support separate rx / tx, nor `other' channels. */ - if (channels->rx_count || channels->tx_count || - channels->other_count || (channels->combined_count == 0) || - (channels->combined_count > QEDE_MAX_RSS_CNT(edev))) { + count = channels->rx_count + channels->tx_count + + channels->combined_count; + + /* We don't support `other' channels */ + if (channels->other_count) { DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), "command parameters not supported\n"); return -EINVAL; } + if (!(channels->combined_count || (channels->rx_count && + channels->tx_count))) { + DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), + "need to request at least one transmit and one receive channel\n"); + return -EINVAL; + } + + if (count > QEDE_MAX_RSS_CNT(edev)) { + DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), + "requested channels = %d max supported channels = %d\n", + count, QEDE_MAX_RSS_CNT(edev)); + return -EINVAL; + } + /* Check if there was a change in the active parameters */ - if (channels->combined_count == QEDE_RSS_CNT(edev)) { + if ((count == QEDE_QUEUE_CNT(edev)) && + (channels->tx_count == edev->fp_num_tx) && + (channels->rx_count == edev->fp_num_rx)) { DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), "No change in active parameters\n"); return 0; } /* We need the number of queues to be divisible between the hwfns */ - if (channels->combined_count % edev->dev_info.common.num_hwfns) { + if ((count % edev->dev_info.common.num_hwfns) || + (channels->tx_count % edev->dev_info.common.num_hwfns) || + (channels->rx_count % edev->dev_info.common.num_hwfns)) { DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), - "Number of channels must be divisable by %04x\n", + "Number of channels must be divisible by %04x\n", edev->dev_info.common.num_hwfns); return -EINVAL; } /* Set number of queues and reload if necessary */ - edev->req_rss = channels->combined_count; + edev->req_queues = count; + edev->req_num_tx = channels->tx_count; + edev->req_num_rx = channels->rx_count; if (netif_running(dev)) qede_reload(edev, NULL, NULL); @@ -727,7 +867,7 @@ static int qede_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, switch (info->cmd) { case ETHTOOL_GRXRINGS: - info->data = edev->num_rss; + info->data = QEDE_RSS_COUNT(edev); return 0; case ETHTOOL_GRXFH: return qede_get_rss_flags(edev, info); @@ -930,7 +1070,7 @@ static void qede_netif_start(struct qede_dev *edev) if (!netif_running(edev->ndev)) return; - for_each_rss(i) { + for_each_queue(i) { /* Update and reenable interrupts */ qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_ENABLE, 1); napi_enable(&edev->fp_array[i].napi); @@ -942,7 +1082,7 @@ static void qede_netif_stop(struct qede_dev *edev) { int i; - for_each_rss(i) { + for_each_queue(i) { napi_disable(&edev->fp_array[i].napi); /* Disable interrupts */ qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_DISABLE, 0); @@ -952,11 +1092,23 @@ static void qede_netif_stop(struct qede_dev *edev) static int qede_selftest_transmit_traffic(struct qede_dev *edev, struct sk_buff *skb) { - struct qede_tx_queue *txq = &edev->fp_array[0].txqs[0]; + struct qede_tx_queue *txq = NULL; struct eth_tx_1st_bd *first_bd; dma_addr_t mapping; int i, idx, val; + for_each_queue(i) { + if (edev->fp_array[i].type & QEDE_FASTPATH_TX) { + txq = edev->fp_array[i].txqs; + break; + } + } + + if (!txq) { + DP_NOTICE(edev, "Tx path is not available\n"); + return -1; + } + /* Fill the entry in the SW ring and the BDs in the FW ring */ idx = txq->sw_tx_prod & NUM_TX_BDS_MAX; txq->sw_tx_ring[idx].skb = skb; @@ -1020,14 +1172,26 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev, static int qede_selftest_receive_traffic(struct qede_dev *edev) { - struct qede_rx_queue *rxq = edev->fp_array[0].rxq; u16 hw_comp_cons, sw_comp_cons, sw_rx_index, len; struct eth_fast_path_rx_reg_cqe *fp_cqe; + struct qede_rx_queue *rxq = NULL; struct sw_rx_data *sw_rx_data; union eth_rx_cqe *cqe; u8 *data_ptr; int i; + for_each_queue(i) { + if (edev->fp_array[i].type & QEDE_FASTPATH_RX) { + rxq = edev->fp_array[i].rxq; + break; + } + } + + if (!rxq) { + DP_NOTICE(edev, "Rx path is not available\n"); + return -1; + } + /* The packet is expected to receive on rx-queue 0 even though RSS is * enabled. This is because the queue 0 is configured as the default * queue and that the loopback traffic is not IP. @@ -1228,8 +1392,8 @@ static int qede_get_tunable(struct net_device *dev, } static const struct ethtool_ops qede_ethtool_ops = { - .get_settings = qede_get_settings, - .set_settings = qede_set_settings, + .get_link_ksettings = qede_get_link_ksettings, + .set_link_ksettings = qede_set_link_ksettings, .get_drvinfo = qede_get_drvinfo, .get_msglevel = qede_get_msglevel, .set_msglevel = qede_set_msglevel, @@ -1260,7 +1424,7 @@ static const struct ethtool_ops qede_ethtool_ops = { }; static const struct ethtool_ops qede_vf_ethtool_ops = { - .get_settings = qede_get_settings, + .get_link_ksettings = qede_get_link_ksettings, .get_drvinfo = qede_get_drvinfo, .get_msglevel = qede_get_msglevel, .set_msglevel = qede_set_msglevel, diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index a6eb6af8cbe8..b4a56e61631a 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -222,7 +222,7 @@ int __init qede_init(void) { int ret; - pr_notice("qede_init: %s\n", version); + pr_info("qede_init: %s\n", version); qed_ops = qed_get_eth_ops(); if (!qed_ops) { @@ -253,7 +253,8 @@ int __init qede_init(void) static void __exit qede_cleanup(void) { - pr_notice("qede_cleanup called\n"); + if (debug & QED_LOG_INFO_MASK) + pr_info("qede_cleanup called\n"); unregister_netdevice_notifier(&qede_netdev_notifier); pci_unregister_driver(&qede_pci_driver); @@ -270,8 +271,7 @@ module_exit(qede_cleanup); /* Unmap the data and free skb */ static int qede_free_tx_pkt(struct qede_dev *edev, - struct qede_tx_queue *txq, - int *len) + struct qede_tx_queue *txq, int *len) { u16 idx = txq->sw_tx_cons & NUM_TX_BDS_MAX; struct sk_buff *skb = txq->sw_tx_ring[idx].skb; @@ -329,8 +329,7 @@ static int qede_free_tx_pkt(struct qede_dev *edev, static void qede_free_failed_tx_pkt(struct qede_dev *edev, struct qede_tx_queue *txq, struct eth_tx_1st_bd *first_bd, - int nbd, - bool data_split) + int nbd, bool data_split) { u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX; struct sk_buff *skb = txq->sw_tx_ring[idx].skb; @@ -339,8 +338,7 @@ static void qede_free_failed_tx_pkt(struct qede_dev *edev, /* Return prod to its position before this skb was handled */ qed_chain_set_prod(&txq->tx_pbl, - le16_to_cpu(txq->tx_db.data.bd_prod), - first_bd); + le16_to_cpu(txq->tx_db.data.bd_prod), first_bd); first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl); @@ -366,8 +364,7 @@ static void qede_free_failed_tx_pkt(struct qede_dev *edev, /* Return again prod to its position before this skb was handled */ qed_chain_set_prod(&txq->tx_pbl, - le16_to_cpu(txq->tx_db.data.bd_prod), - first_bd); + le16_to_cpu(txq->tx_db.data.bd_prod), first_bd); /* Free skb */ dev_kfree_skb_any(skb); @@ -376,8 +373,7 @@ static void qede_free_failed_tx_pkt(struct qede_dev *edev, } static u32 qede_xmit_type(struct qede_dev *edev, - struct sk_buff *skb, - int *ipv6_ext) + struct sk_buff *skb, int *ipv6_ext) { u32 rc = XMIT_L4_CSUM; __be16 l3_proto; @@ -434,15 +430,13 @@ static void qede_set_params_for_ipv6_ext(struct sk_buff *skb, } static int map_frag_to_bd(struct qede_dev *edev, - skb_frag_t *frag, - struct eth_tx_bd *bd) + skb_frag_t *frag, struct eth_tx_bd *bd) { dma_addr_t mapping; /* Map skb non-linear frag data for DMA */ mapping = skb_frag_dma_map(&edev->pdev->dev, frag, 0, - skb_frag_size(frag), - DMA_TO_DEVICE); + skb_frag_size(frag), DMA_TO_DEVICE); if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) { DP_NOTICE(edev, "Unable to map frag - dropping packet\n"); return -ENOMEM; @@ -504,9 +498,8 @@ static inline void qede_update_tx_producer(struct qede_tx_queue *txq) } /* Main transmit function */ -static -netdev_tx_t qede_start_xmit(struct sk_buff *skb, - struct net_device *ndev) +static netdev_tx_t qede_start_xmit(struct sk_buff *skb, + struct net_device *ndev) { struct qede_dev *edev = netdev_priv(ndev); struct netdev_queue *netdev_txq; @@ -526,12 +519,11 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, /* Get tx-queue context and netdev index */ txq_index = skb_get_queue_mapping(skb); - WARN_ON(txq_index >= QEDE_TSS_CNT(edev)); + WARN_ON(txq_index >= QEDE_TSS_COUNT(edev)); txq = QEDE_TX_QUEUE(edev, txq_index); netdev_txq = netdev_get_tx_queue(ndev, txq_index); - WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < - (MAX_SKB_FRAGS + 1)); + WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1)); xmit_type = qede_xmit_type(edev, skb, &ipv6_ext); @@ -606,6 +598,14 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT; } + /* Legacy FW had flipped behavior in regard to this bit - + * I.e., needed to set to prevent FW from touching encapsulated + * packets when it didn't need to. + */ + if (unlikely(txq->is_legacy)) + first_bd->data.bitfields ^= + 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT; + /* If the packet is IPv6 with extension header, indicate that * to FW and pass few params, since the device cracker doesn't * support parsing IPv6 with extension header/s. @@ -731,6 +731,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, qede_update_tx_producer(txq); netif_tx_stop_queue(netdev_txq); + txq->stopped_cnt++; DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED, "Stop queue was called\n"); /* paired memory barrier is in qede_tx_int(), we have to keep @@ -764,8 +765,7 @@ int qede_txq_has_work(struct qede_tx_queue *txq) return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl); } -static int qede_tx_int(struct qede_dev *edev, - struct qede_tx_queue *txq) +static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq) { struct netdev_queue *netdev_txq; u16 hw_bd_cons; @@ -791,6 +791,7 @@ static int qede_tx_int(struct qede_dev *edev, bytes_compl += len; pkts_compl++; txq->sw_tx_cons++; + txq->xmit_pkts++; } netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl); @@ -963,8 +964,7 @@ static inline void qede_update_rx_prod(struct qede_dev *edev, static u32 qede_get_rxhash(struct qede_dev *edev, u8 bitfields, - __le32 rss_hash, - enum pkt_hash_types *rxhash_type) + __le32 rss_hash, enum pkt_hash_types *rxhash_type) { enum rss_hash_type htype; @@ -993,12 +993,10 @@ static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag) static inline void qede_skb_receive(struct qede_dev *edev, struct qede_fastpath *fp, - struct sk_buff *skb, - u16 vlan_tag) + struct sk_buff *skb, u16 vlan_tag) { if (vlan_tag) - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), - vlan_tag); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); napi_gro_receive(&fp->napi, skb); } @@ -1021,8 +1019,7 @@ static void qede_set_gro_params(struct qede_dev *edev, static int qede_fill_frag_skb(struct qede_dev *edev, struct qede_rx_queue *rxq, - u8 tpa_agg_index, - u16 len_on_bd) + u8 tpa_agg_index, u16 len_on_bd) { struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX]; @@ -1209,7 +1206,7 @@ static void qede_gro_receive(struct qede_dev *edev, #endif send_skb: - skb_record_rx_queue(skb, fp->rss_id); + skb_record_rx_queue(skb, fp->rxq->rxq_id); qede_skb_receive(edev, fp, skb, vlan_tag); } @@ -1413,7 +1410,7 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget) if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) { edev->ops->eth_cqe_completion( - edev->cdev, fp->rss_id, + edev->cdev, fp->id, (struct eth_slow_path_rx_cqe *)cqe); goto next_cqe; } @@ -1470,7 +1467,7 @@ alloc_skb: skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE); if (unlikely(!skb)) { DP_NOTICE(edev, - "Build_skb failed, dropping incoming packet\n"); + "skb allocation failed, dropping incoming packet\n"); qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num); rxq->rx_alloc_errors++; goto next_cqe; @@ -1578,14 +1575,13 @@ alloc_skb: skb->protocol = eth_type_trans(skb, edev->ndev); rx_hash = qede_get_rxhash(edev, fp_cqe->bitfields, - fp_cqe->rss_hash, - &rxhash_type); + fp_cqe->rss_hash, &rxhash_type); skb_set_hash(skb, rx_hash, rxhash_type); qede_set_skb_csum(skb, csum_flag); - skb_record_rx_queue(skb, fp->rss_id); + skb_record_rx_queue(skb, fp->rxq->rxq_id); qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag)); next_rx_only: @@ -1604,6 +1600,8 @@ next_cqe: /* don't consume bd rx buffer */ /* Update producers */ qede_update_rx_prod(edev, rxq); + rxq->rcv_pkts += rx_pkt; + return rx_pkt; } @@ -1616,10 +1614,12 @@ static int qede_poll(struct napi_struct *napi, int budget) u8 tc; for (tc = 0; tc < edev->num_tc; tc++) - if (qede_txq_has_work(&fp->txqs[tc])) + if (likely(fp->type & QEDE_FASTPATH_TX) && + qede_txq_has_work(&fp->txqs[tc])) qede_tx_int(edev, &fp->txqs[tc]); - rx_work_done = qede_has_rx_work(fp->rxq) ? + rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) && + qede_has_rx_work(fp->rxq)) ? qede_rx_int(fp, budget) : 0; if (rx_work_done < budget) { qed_sb_update_sb_idx(fp->sb_info); @@ -1639,8 +1639,10 @@ static int qede_poll(struct napi_struct *napi, int budget) rmb(); /* Fall out from the NAPI loop if needed */ - if (!(qede_has_rx_work(fp->rxq) || - qede_has_tx_work(fp))) { + if (!((likely(fp->type & QEDE_FASTPATH_RX) && + qede_has_rx_work(fp->rxq)) || + (likely(fp->type & QEDE_FASTPATH_TX) && + qede_has_tx_work(fp)))) { napi_complete(napi); /* Update and reenable interrupts */ @@ -1711,6 +1713,8 @@ void qede_fill_by_demand_stats(struct qede_dev *edev) edev->ops->get_vport_stats(edev->cdev, &stats); edev->stats.no_buff_discards = stats.no_buff_discards; + edev->stats.packet_too_big_discard = stats.packet_too_big_discard; + edev->stats.ttl0_discard = stats.ttl0_discard; edev->stats.rx_ucast_bytes = stats.rx_ucast_bytes; edev->stats.rx_mcast_bytes = stats.rx_mcast_bytes; edev->stats.rx_bcast_bytes = stats.rx_bcast_bytes; @@ -1790,9 +1794,9 @@ void qede_fill_by_demand_stats(struct qede_dev *edev) edev->stats.tx_mac_ctrl_frames = stats.tx_mac_ctrl_frames; } -static struct rtnl_link_stats64 *qede_get_stats64( - struct net_device *dev, - struct rtnl_link_stats64 *stats) +static +struct rtnl_link_stats64 *qede_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) { struct qede_dev *edev = netdev_priv(dev); @@ -2106,8 +2110,7 @@ static void qede_vlan_mark_nonconfigured(struct qede_dev *edev) } DP_VERBOSE(edev, NETIF_MSG_IFDOWN, - "marked vlan %d as non-configured\n", - vlan->vid); + "marked vlan %d as non-configured\n", vlan->vid); } edev->accept_any_vlan = false; @@ -2149,7 +2152,7 @@ static void qede_udp_tunnel_add(struct net_device *dev, edev->vxlan_dst_port = t_port; - DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d", + DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d\n", t_port); set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags); @@ -2160,7 +2163,7 @@ static void qede_udp_tunnel_add(struct net_device *dev, edev->geneve_dst_port = t_port; - DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d", + DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d\n", t_port); set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags); break; @@ -2184,7 +2187,7 @@ static void qede_udp_tunnel_del(struct net_device *dev, edev->vxlan_dst_port = 0; - DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d", + DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d\n", t_port); set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags); @@ -2195,7 +2198,7 @@ static void qede_udp_tunnel_del(struct net_device *dev, edev->geneve_dst_port = 0; - DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d", + DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d\n", t_port); set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags); break; @@ -2240,15 +2243,13 @@ static const struct net_device_ops qede_netdev_ops = { static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev, struct pci_dev *pdev, struct qed_dev_eth_info *info, - u32 dp_module, - u8 dp_level) + u32 dp_module, u8 dp_level) { struct net_device *ndev; struct qede_dev *edev; ndev = alloc_etherdev_mqs(sizeof(*edev), - info->num_queues, - info->num_queues); + info->num_queues, info->num_queues); if (!ndev) { pr_err("etherdev allocation failed\n"); return NULL; @@ -2264,6 +2265,9 @@ static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev, edev->q_num_rx_buffers = NUM_RX_BDS_DEF; edev->q_num_tx_buffers = NUM_TX_BDS_DEF; + DP_INFO(edev, "Allocated netdev with %d tx queues and %d rx queues\n", + info->num_queues, info->num_queues); + SET_NETDEV_DEV(ndev, &pdev->dev); memset(&edev->stats, 0, sizeof(edev->stats)); @@ -2352,7 +2356,7 @@ static void qede_free_fp_array(struct qede_dev *edev) struct qede_fastpath *fp; int i; - for_each_rss(i) { + for_each_queue(i) { fp = &edev->fp_array[i]; kfree(fp->sb_info); @@ -2361,22 +2365,33 @@ static void qede_free_fp_array(struct qede_dev *edev) } kfree(edev->fp_array); } - edev->num_rss = 0; + + edev->num_queues = 0; + edev->fp_num_tx = 0; + edev->fp_num_rx = 0; } static int qede_alloc_fp_array(struct qede_dev *edev) { + u8 fp_combined, fp_rx = edev->fp_num_rx; struct qede_fastpath *fp; int i; - edev->fp_array = kcalloc(QEDE_RSS_CNT(edev), + edev->fp_array = kcalloc(QEDE_QUEUE_CNT(edev), sizeof(*edev->fp_array), GFP_KERNEL); if (!edev->fp_array) { DP_NOTICE(edev, "fp array allocation failed\n"); goto err; } - for_each_rss(i) { + fp_combined = QEDE_QUEUE_CNT(edev) - fp_rx - edev->fp_num_tx; + + /* Allocate the FP elements for Rx queues followed by combined and then + * the Tx. This ordering should be maintained so that the respective + * queues (Rx or Tx) will be together in the fastpath array and the + * associated ids will be sequential. + */ + for_each_queue(i) { fp = &edev->fp_array[i]; fp->sb_info = kcalloc(1, sizeof(*fp->sb_info), GFP_KERNEL); @@ -2385,16 +2400,33 @@ static int qede_alloc_fp_array(struct qede_dev *edev) goto err; } - fp->rxq = kcalloc(1, sizeof(*fp->rxq), GFP_KERNEL); - if (!fp->rxq) { - DP_NOTICE(edev, "RXQ struct allocation failed\n"); - goto err; + if (fp_rx) { + fp->type = QEDE_FASTPATH_RX; + fp_rx--; + } else if (fp_combined) { + fp->type = QEDE_FASTPATH_COMBINED; + fp_combined--; + } else { + fp->type = QEDE_FASTPATH_TX; } - fp->txqs = kcalloc(edev->num_tc, sizeof(*fp->txqs), GFP_KERNEL); - if (!fp->txqs) { - DP_NOTICE(edev, "TXQ array allocation failed\n"); - goto err; + if (fp->type & QEDE_FASTPATH_TX) { + fp->txqs = kcalloc(edev->num_tc, sizeof(*fp->txqs), + GFP_KERNEL); + if (!fp->txqs) { + DP_NOTICE(edev, + "TXQ array allocation failed\n"); + goto err; + } + } + + if (fp->type & QEDE_FASTPATH_RX) { + fp->rxq = kcalloc(1, sizeof(*fp->rxq), GFP_KERNEL); + if (!fp->rxq) { + DP_NOTICE(edev, + "RXQ struct allocation failed\n"); + goto err; + } } } @@ -2456,7 +2488,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, bool is_vf, enum qede_probe_mode mode) { struct qed_probe_params probe_params; - struct qed_slowpath_params params; + struct qed_slowpath_params sp_params; struct qed_dev_eth_info dev_info; struct qede_dev *edev; struct qed_dev *cdev; @@ -2479,14 +2511,14 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, qede_update_pf_params(cdev); /* Start the Slowpath-process */ - memset(¶ms, 0, sizeof(struct qed_slowpath_params)); - params.int_mode = QED_INT_MODE_MSIX; - params.drv_major = QEDE_MAJOR_VERSION; - params.drv_minor = QEDE_MINOR_VERSION; - params.drv_rev = QEDE_REVISION_VERSION; - params.drv_eng = QEDE_ENGINEERING_VERSION; - strlcpy(params.name, "qede LAN", QED_DRV_VER_STR_SIZE); - rc = qed_ops->common->slowpath_start(cdev, ¶ms); + memset(&sp_params, 0, sizeof(sp_params)); + sp_params.int_mode = QED_INT_MODE_MSIX; + sp_params.drv_major = QEDE_MAJOR_VERSION; + sp_params.drv_minor = QEDE_MINOR_VERSION; + sp_params.drv_rev = QEDE_REVISION_VERSION; + sp_params.drv_eng = QEDE_ENGINEERING_VERSION; + strlcpy(sp_params.name, "qede LAN", QED_DRV_VER_STR_SIZE); + rc = qed_ops->common->slowpath_start(cdev, &sp_params); if (rc) { pr_notice("Cannot start slowpath\n"); goto err1; @@ -2589,7 +2621,7 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode) qed_ops->common->slowpath_stop(cdev); qed_ops->common->remove(cdev); - pr_notice("Ending successfully qede_remove\n"); + dev_info(&pdev->dev, "Ending qede_remove successfully\n"); } static void qede_remove(struct pci_dev *pdev) @@ -2608,8 +2640,8 @@ static int qede_set_num_queues(struct qede_dev *edev) u16 rss_num; /* Setup queues according to possible resources*/ - if (edev->req_rss) - rss_num = edev->req_rss; + if (edev->req_queues) + rss_num = edev->req_queues; else rss_num = netif_get_num_default_rss_queues() * edev->dev_info.common.num_hwfns; @@ -2619,11 +2651,15 @@ static int qede_set_num_queues(struct qede_dev *edev) rc = edev->ops->common->set_fp_int(edev->cdev, rss_num); if (rc > 0) { /* Managed to request interrupts for our queues */ - edev->num_rss = rc; + edev->num_queues = rc; DP_INFO(edev, "Managed %d [of %d] RSS queues\n", - QEDE_RSS_CNT(edev), rss_num); + QEDE_QUEUE_CNT(edev), rss_num); rc = 0; } + + edev->fp_num_tx = edev->req_num_tx; + edev->fp_num_rx = edev->req_num_rx; + return rc; } @@ -2637,16 +2673,14 @@ static void qede_free_mem_sb(struct qede_dev *edev, /* This function allocates fast-path status block memory */ static int qede_alloc_mem_sb(struct qede_dev *edev, - struct qed_sb_info *sb_info, - u16 sb_id) + struct qed_sb_info *sb_info, u16 sb_id) { struct status_block *sb_virt; dma_addr_t sb_phys; int rc; sb_virt = dma_alloc_coherent(&edev->pdev->dev, - sizeof(*sb_virt), - &sb_phys, GFP_KERNEL); + sizeof(*sb_virt), &sb_phys, GFP_KERNEL); if (!sb_virt) { DP_ERR(edev, "Status block allocation failed\n"); return -ENOMEM; @@ -2678,16 +2712,15 @@ static void qede_free_rx_buffers(struct qede_dev *edev, data = rx_buf->data; dma_unmap_page(&edev->pdev->dev, - rx_buf->mapping, - PAGE_SIZE, DMA_FROM_DEVICE); + rx_buf->mapping, PAGE_SIZE, DMA_FROM_DEVICE); rx_buf->data = NULL; __free_page(data); } } -static void qede_free_sge_mem(struct qede_dev *edev, - struct qede_rx_queue *rxq) { +static void qede_free_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq) +{ int i; if (edev->gro_disable) @@ -2706,8 +2739,7 @@ static void qede_free_sge_mem(struct qede_dev *edev, } } -static void qede_free_mem_rxq(struct qede_dev *edev, - struct qede_rx_queue *rxq) +static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) { qede_free_sge_mem(edev, rxq); @@ -2729,9 +2761,6 @@ static int qede_alloc_rx_buffer(struct qede_dev *edev, struct eth_rx_bd *rx_bd; dma_addr_t mapping; struct page *data; - u16 rx_buf_size; - - rx_buf_size = rxq->rx_buf_size; data = alloc_pages(GFP_ATOMIC, 0); if (unlikely(!data)) { @@ -2766,8 +2795,7 @@ static int qede_alloc_rx_buffer(struct qede_dev *edev, return 0; } -static int qede_alloc_sge_mem(struct qede_dev *edev, - struct qede_rx_queue *rxq) +static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq) { dma_addr_t mapping; int i; @@ -2814,15 +2842,14 @@ err: } /* This function allocates all memory needed per Rx queue */ -static int qede_alloc_mem_rxq(struct qede_dev *edev, - struct qede_rx_queue *rxq) +static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) { int i, rc, size; rxq->num_rx_buffers = edev->q_num_rx_buffers; - rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + - edev->ndev->mtu; + rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + edev->ndev->mtu; + if (rxq->rx_buf_size > PAGE_SIZE) rxq->rx_buf_size = PAGE_SIZE; @@ -2876,8 +2903,7 @@ err: return rc; } -static void qede_free_mem_txq(struct qede_dev *edev, - struct qede_tx_queue *txq) +static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq) { /* Free the parallel SW ring */ kfree(txq->sw_tx_ring); @@ -2887,8 +2913,7 @@ static void qede_free_mem_txq(struct qede_dev *edev, } /* This function allocates all memory needed per Tx queue */ -static int qede_alloc_mem_txq(struct qede_dev *edev, - struct qede_tx_queue *txq) +static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq) { int size, rc; union eth_tx_bd_types *p_virt; @@ -2920,41 +2945,45 @@ err: } /* This function frees all memory of a single fp */ -static void qede_free_mem_fp(struct qede_dev *edev, - struct qede_fastpath *fp) +static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp) { int tc; qede_free_mem_sb(edev, fp->sb_info); - qede_free_mem_rxq(edev, fp->rxq); + if (fp->type & QEDE_FASTPATH_RX) + qede_free_mem_rxq(edev, fp->rxq); - for (tc = 0; tc < edev->num_tc; tc++) - qede_free_mem_txq(edev, &fp->txqs[tc]); + if (fp->type & QEDE_FASTPATH_TX) + for (tc = 0; tc < edev->num_tc; tc++) + qede_free_mem_txq(edev, &fp->txqs[tc]); } /* This function allocates all memory needed for a single fp (i.e. an entity - * which contains status block, one rx queue and multiple per-TC tx queues. + * which contains status block, one rx queue and/or multiple per-TC tx queues. */ -static int qede_alloc_mem_fp(struct qede_dev *edev, - struct qede_fastpath *fp) +static int qede_alloc_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp) { int rc, tc; - rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->rss_id); - if (rc) - goto err; - - rc = qede_alloc_mem_rxq(edev, fp->rxq); + rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->id); if (rc) goto err; - for (tc = 0; tc < edev->num_tc; tc++) { - rc = qede_alloc_mem_txq(edev, &fp->txqs[tc]); + if (fp->type & QEDE_FASTPATH_RX) { + rc = qede_alloc_mem_rxq(edev, fp->rxq); if (rc) goto err; } + if (fp->type & QEDE_FASTPATH_TX) { + for (tc = 0; tc < edev->num_tc; tc++) { + rc = qede_alloc_mem_txq(edev, &fp->txqs[tc]); + if (rc) + goto err; + } + } + return 0; err: return rc; @@ -2964,7 +2993,7 @@ static void qede_free_mem_load(struct qede_dev *edev) { int i; - for_each_rss(i) { + for_each_queue(i) { struct qede_fastpath *fp = &edev->fp_array[i]; qede_free_mem_fp(edev, fp); @@ -2974,16 +3003,16 @@ static void qede_free_mem_load(struct qede_dev *edev) /* This function allocates all qede memory at NIC load. */ static int qede_alloc_mem_load(struct qede_dev *edev) { - int rc = 0, rss_id; + int rc = 0, queue_id; - for (rss_id = 0; rss_id < QEDE_RSS_CNT(edev); rss_id++) { - struct qede_fastpath *fp = &edev->fp_array[rss_id]; + for (queue_id = 0; queue_id < QEDE_QUEUE_CNT(edev); queue_id++) { + struct qede_fastpath *fp = &edev->fp_array[queue_id]; rc = qede_alloc_mem_fp(edev, fp); if (rc) { DP_ERR(edev, "Failed to allocate memory for fastpath - rss id = %d\n", - rss_id); + queue_id); qede_free_mem_load(edev); return rc; } @@ -2995,30 +3024,38 @@ static int qede_alloc_mem_load(struct qede_dev *edev) /* This function inits fp content and resets the SB, RXQ and TXQ structures */ static void qede_init_fp(struct qede_dev *edev) { - int rss_id, txq_index, tc; + int queue_id, rxq_index = 0, txq_index = 0, tc; struct qede_fastpath *fp; - for_each_rss(rss_id) { - fp = &edev->fp_array[rss_id]; + for_each_queue(queue_id) { + fp = &edev->fp_array[queue_id]; fp->edev = edev; - fp->rss_id = rss_id; + fp->id = queue_id; memset((void *)&fp->napi, 0, sizeof(fp->napi)); memset((void *)fp->sb_info, 0, sizeof(*fp->sb_info)); - memset((void *)fp->rxq, 0, sizeof(*fp->rxq)); - fp->rxq->rxq_id = rss_id; + if (fp->type & QEDE_FASTPATH_RX) { + memset((void *)fp->rxq, 0, sizeof(*fp->rxq)); + fp->rxq->rxq_id = rxq_index++; + } - memset((void *)fp->txqs, 0, (edev->num_tc * sizeof(*fp->txqs))); - for (tc = 0; tc < edev->num_tc; tc++) { - txq_index = tc * QEDE_RSS_CNT(edev) + rss_id; - fp->txqs[tc].index = txq_index; + if (fp->type & QEDE_FASTPATH_TX) { + memset((void *)fp->txqs, 0, + (edev->num_tc * sizeof(*fp->txqs))); + for (tc = 0; tc < edev->num_tc; tc++) { + fp->txqs[tc].index = txq_index + + tc * QEDE_TSS_COUNT(edev); + if (edev->dev_info.is_legacy) + fp->txqs[tc].is_legacy = true; + } + txq_index++; } snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", - edev->ndev->name, rss_id); + edev->ndev->name, queue_id); } edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO); @@ -3028,12 +3065,13 @@ static int qede_set_real_num_queues(struct qede_dev *edev) { int rc = 0; - rc = netif_set_real_num_tx_queues(edev->ndev, QEDE_TSS_CNT(edev)); + rc = netif_set_real_num_tx_queues(edev->ndev, QEDE_TSS_COUNT(edev)); if (rc) { DP_NOTICE(edev, "Failed to set real number of Tx queues\n"); return rc; } - rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_CNT(edev)); + + rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_COUNT(edev)); if (rc) { DP_NOTICE(edev, "Failed to set real number of Rx queues\n"); return rc; @@ -3046,7 +3084,7 @@ static void qede_napi_disable_remove(struct qede_dev *edev) { int i; - for_each_rss(i) { + for_each_queue(i) { napi_disable(&edev->fp_array[i].napi); netif_napi_del(&edev->fp_array[i].napi); @@ -3058,7 +3096,7 @@ static void qede_napi_add_enable(struct qede_dev *edev) int i; /* Add NAPI objects */ - for_each_rss(i) { + for_each_queue(i) { netif_napi_add(edev->ndev, &edev->fp_array[i].napi, qede_poll, NAPI_POLL_WEIGHT); napi_enable(&edev->fp_array[i].napi); @@ -3087,14 +3125,14 @@ static int qede_req_msix_irqs(struct qede_dev *edev) int i, rc; /* Sanitize number of interrupts == number of prepared RSS queues */ - if (QEDE_RSS_CNT(edev) > edev->int_info.msix_cnt) { + if (QEDE_QUEUE_CNT(edev) > edev->int_info.msix_cnt) { DP_ERR(edev, "Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n", - QEDE_RSS_CNT(edev), edev->int_info.msix_cnt); + QEDE_QUEUE_CNT(edev), edev->int_info.msix_cnt); return -EINVAL; } - for (i = 0; i < QEDE_RSS_CNT(edev); i++) { + for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) { rc = request_irq(edev->int_info.msix[i].vector, qede_msix_fp_int, 0, edev->fp_array[i].name, &edev->fp_array[i]); @@ -3139,18 +3177,17 @@ static int qede_setup_irqs(struct qede_dev *edev) /* qed should learn receive the RSS ids and callbacks */ ops = edev->ops->common; - for (i = 0; i < QEDE_RSS_CNT(edev); i++) + for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) ops->simd_handler_config(edev->cdev, &edev->fp_array[i], i, qede_simd_fp_handler); - edev->int_info.used_cnt = QEDE_RSS_CNT(edev); + edev->int_info.used_cnt = QEDE_QUEUE_CNT(edev); } return 0; } static int qede_drain_txq(struct qede_dev *edev, - struct qede_tx_queue *txq, - bool allow_drain) + struct qede_tx_queue *txq, bool allow_drain) { int rc, cnt = 1000; @@ -3202,45 +3239,53 @@ static int qede_stop_queues(struct qede_dev *edev) } /* Flush Tx queues. If needed, request drain from MCP */ - for_each_rss(i) { + for_each_queue(i) { struct qede_fastpath *fp = &edev->fp_array[i]; - for (tc = 0; tc < edev->num_tc; tc++) { - struct qede_tx_queue *txq = &fp->txqs[tc]; + if (fp->type & QEDE_FASTPATH_TX) { + for (tc = 0; tc < edev->num_tc; tc++) { + struct qede_tx_queue *txq = &fp->txqs[tc]; - rc = qede_drain_txq(edev, txq, true); - if (rc) - return rc; + rc = qede_drain_txq(edev, txq, true); + if (rc) + return rc; + } } } - /* Stop all Queues in reverse order*/ - for (i = QEDE_RSS_CNT(edev) - 1; i >= 0; i--) { + /* Stop all Queues in reverse order */ + for (i = QEDE_QUEUE_CNT(edev) - 1; i >= 0; i--) { struct qed_stop_rxq_params rx_params; - /* Stop the Tx Queue(s)*/ - for (tc = 0; tc < edev->num_tc; tc++) { - struct qed_stop_txq_params tx_params; - - tx_params.rss_id = i; - tx_params.tx_queue_id = tc * QEDE_RSS_CNT(edev) + i; - rc = edev->ops->q_tx_stop(cdev, &tx_params); - if (rc) { - DP_ERR(edev, "Failed to stop TXQ #%d\n", - tx_params.tx_queue_id); - return rc; + /* Stop the Tx Queue(s) */ + if (edev->fp_array[i].type & QEDE_FASTPATH_TX) { + for (tc = 0; tc < edev->num_tc; tc++) { + struct qed_stop_txq_params tx_params; + u8 val; + + tx_params.rss_id = i; + val = edev->fp_array[i].txqs[tc].index; + tx_params.tx_queue_id = val; + rc = edev->ops->q_tx_stop(cdev, &tx_params); + if (rc) { + DP_ERR(edev, "Failed to stop TXQ #%d\n", + tx_params.tx_queue_id); + return rc; + } } } - /* Stop the Rx Queue*/ - memset(&rx_params, 0, sizeof(rx_params)); - rx_params.rss_id = i; - rx_params.rx_queue_id = i; + /* Stop the Rx Queue */ + if (edev->fp_array[i].type & QEDE_FASTPATH_RX) { + memset(&rx_params, 0, sizeof(rx_params)); + rx_params.rss_id = i; + rx_params.rx_queue_id = edev->fp_array[i].rxq->rxq_id; - rc = edev->ops->q_rx_stop(cdev, &rx_params); - if (rc) { - DP_ERR(edev, "Failed to stop RXQ #%d\n", i); - return rc; + rc = edev->ops->q_rx_stop(cdev, &rx_params); + if (rc) { + DP_ERR(edev, "Failed to stop RXQ #%d\n", i); + return rc; + } } } @@ -3263,7 +3308,7 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats) struct qed_start_vport_params start = {0}; bool reset_rss_indir = false; - if (!edev->num_rss) { + if (!edev->num_queues) { DP_ERR(edev, "Cannot update V-VPORT as active as there are no Rx queues\n"); return -EINVAL; @@ -3287,50 +3332,66 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats) "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n", start.vport_id, edev->ndev->mtu + 0xe, vlan_removal_en); - for_each_rss(i) { + for_each_queue(i) { struct qede_fastpath *fp = &edev->fp_array[i]; - dma_addr_t phys_table = fp->rxq->rx_comp_ring.pbl.p_phys_table; - - memset(&q_params, 0, sizeof(q_params)); - q_params.rss_id = i; - q_params.queue_id = i; - q_params.vport_id = 0; - q_params.sb = fp->sb_info->igu_sb_id; - q_params.sb_idx = RX_PI; - - rc = edev->ops->q_rx_start(cdev, &q_params, - fp->rxq->rx_buf_size, - fp->rxq->rx_bd_ring.p_phys_addr, - phys_table, - fp->rxq->rx_comp_ring.page_cnt, - &fp->rxq->hw_rxq_prod_addr); - if (rc) { - DP_ERR(edev, "Start RXQ #%d failed %d\n", i, rc); - return rc; - } + dma_addr_t p_phys_table; + u32 page_cnt; + + if (fp->type & QEDE_FASTPATH_RX) { + struct qede_rx_queue *rxq = fp->rxq; + __le16 *val; + + memset(&q_params, 0, sizeof(q_params)); + q_params.rss_id = i; + q_params.queue_id = rxq->rxq_id; + q_params.vport_id = 0; + q_params.sb = fp->sb_info->igu_sb_id; + q_params.sb_idx = RX_PI; + + p_phys_table = + qed_chain_get_pbl_phys(&rxq->rx_comp_ring); + page_cnt = qed_chain_get_page_cnt(&rxq->rx_comp_ring); + + rc = edev->ops->q_rx_start(cdev, &q_params, + rxq->rx_buf_size, + rxq->rx_bd_ring.p_phys_addr, + p_phys_table, + page_cnt, + &rxq->hw_rxq_prod_addr); + if (rc) { + DP_ERR(edev, "Start RXQ #%d failed %d\n", i, + rc); + return rc; + } - fp->rxq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[RX_PI]; + val = &fp->sb_info->sb_virt->pi_array[RX_PI]; + rxq->hw_cons_ptr = val; - qede_update_rx_prod(edev, fp->rxq); + qede_update_rx_prod(edev, rxq); + } + + if (!(fp->type & QEDE_FASTPATH_TX)) + continue; for (tc = 0; tc < edev->num_tc; tc++) { struct qede_tx_queue *txq = &fp->txqs[tc]; - int txq_index = tc * QEDE_RSS_CNT(edev) + i; + + p_phys_table = qed_chain_get_pbl_phys(&txq->tx_pbl); + page_cnt = qed_chain_get_page_cnt(&txq->tx_pbl); memset(&q_params, 0, sizeof(q_params)); q_params.rss_id = i; - q_params.queue_id = txq_index; + q_params.queue_id = txq->index; q_params.vport_id = 0; q_params.sb = fp->sb_info->igu_sb_id; q_params.sb_idx = TX_PI(tc); rc = edev->ops->q_tx_start(cdev, &q_params, - txq->tx_pbl.pbl.p_phys_table, - txq->tx_pbl.page_cnt, + p_phys_table, page_cnt, &txq->doorbell_addr); if (rc) { DP_ERR(edev, "Start TXQ #%d failed %d\n", - txq_index, rc); + txq->index, rc); return rc; } @@ -3361,13 +3422,13 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats) } /* Fill struct with RSS params */ - if (QEDE_RSS_CNT(edev) > 1) { + if (QEDE_RSS_COUNT(edev) > 1) { vport_update_params.update_rss_flg = 1; /* Need to validate current RSS config uses valid entries */ for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) { if (edev->rss_params.rss_ind_table[i] >= - edev->num_rss) { + QEDE_RSS_COUNT(edev)) { reset_rss_indir = true; break; } @@ -3380,7 +3441,7 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats) for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) { u16 indir_val; - val = QEDE_RSS_CNT(edev); + val = QEDE_RSS_COUNT(edev); indir_val = ethtool_rxfh_indir_default(i, val); edev->rss_params.rss_ind_table[i] = indir_val; } @@ -3509,7 +3570,7 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode) if (rc) goto err1; DP_INFO(edev, "Allocated %d RSS queues on %d TC/s\n", - QEDE_RSS_CNT(edev), edev->num_tc); + QEDE_QUEUE_CNT(edev), edev->num_tc); rc = qede_set_real_num_queues(edev); if (rc) @@ -3562,7 +3623,9 @@ err2: err1: edev->ops->common->set_fp_int(edev->cdev, 0); qede_free_fp_array(edev); - edev->num_rss = 0; + edev->num_queues = 0; + edev->fp_num_tx = 0; + edev->fp_num_rx = 0; err0: return rc; } diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h index 4e5d5e953e15..f1109661a533 100644 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h @@ -1011,7 +1011,6 @@ struct ravb_private { struct work_struct work; /* MII transceiver section. */ struct mii_bus *mii_bus; /* MDIO bus control */ - struct phy_device *phydev; /* PHY device control */ int link; phy_interface_t phy_interface; int msg_enable; diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 1e1cc0fad17f..cad23ba06904 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -942,7 +942,7 @@ out: static void ravb_adjust_link(struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); - struct phy_device *phydev = priv->phydev; + struct phy_device *phydev = ndev->phydev; bool new_state = false; if (phydev->link) { @@ -1032,48 +1032,47 @@ static int ravb_phy_init(struct net_device *ndev) phy_attached_info(phydev); - priv->phydev = phydev; - return 0; } /* PHY control start function */ static int ravb_phy_start(struct net_device *ndev) { - struct ravb_private *priv = netdev_priv(ndev); int error; error = ravb_phy_init(ndev); if (error) return error; - phy_start(priv->phydev); + phy_start(ndev->phydev); return 0; } -static int ravb_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) +static int ravb_get_link_ksettings(struct net_device *ndev, + struct ethtool_link_ksettings *cmd) { struct ravb_private *priv = netdev_priv(ndev); int error = -ENODEV; unsigned long flags; - if (priv->phydev) { + if (ndev->phydev) { spin_lock_irqsave(&priv->lock, flags); - error = phy_ethtool_gset(priv->phydev, ecmd); + error = phy_ethtool_ksettings_get(ndev->phydev, cmd); spin_unlock_irqrestore(&priv->lock, flags); } return error; } -static int ravb_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) +static int ravb_set_link_ksettings(struct net_device *ndev, + const struct ethtool_link_ksettings *cmd) { struct ravb_private *priv = netdev_priv(ndev); unsigned long flags; int error; - if (!priv->phydev) + if (!ndev->phydev) return -ENODEV; spin_lock_irqsave(&priv->lock, flags); @@ -1081,11 +1080,11 @@ static int ravb_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) /* Disable TX and RX */ ravb_rcv_snd_disable(ndev); - error = phy_ethtool_sset(priv->phydev, ecmd); + error = phy_ethtool_ksettings_set(ndev->phydev, cmd); if (error) goto error_exit; - if (ecmd->duplex == DUPLEX_FULL) + if (cmd->base.duplex == DUPLEX_FULL) priv->duplex = 1; else priv->duplex = 0; @@ -1110,9 +1109,9 @@ static int ravb_nway_reset(struct net_device *ndev) int error = -ENODEV; unsigned long flags; - if (priv->phydev) { + if (ndev->phydev) { spin_lock_irqsave(&priv->lock, flags); - error = phy_start_aneg(priv->phydev); + error = phy_start_aneg(ndev->phydev); spin_unlock_irqrestore(&priv->lock, flags); } @@ -1309,8 +1308,6 @@ static int ravb_get_ts_info(struct net_device *ndev, } static const struct ethtool_ops ravb_ethtool_ops = { - .get_settings = ravb_get_settings, - .set_settings = ravb_set_settings, .nway_reset = ravb_nway_reset, .get_msglevel = ravb_get_msglevel, .set_msglevel = ravb_set_msglevel, @@ -1321,6 +1318,8 @@ static const struct ethtool_ops ravb_ethtool_ops = { .get_ringparam = ravb_get_ringparam, .set_ringparam = ravb_set_ringparam, .get_ts_info = ravb_get_ts_info, + .get_link_ksettings = ravb_get_link_ksettings, + .set_link_ksettings = ravb_set_link_ksettings, }; static inline int ravb_hook_irq(unsigned int irq, irq_handler_t handler, @@ -1661,10 +1660,9 @@ static int ravb_close(struct net_device *ndev) } /* PHY disconnect */ - if (priv->phydev) { - phy_stop(priv->phydev); - phy_disconnect(priv->phydev); - priv->phydev = NULL; + if (ndev->phydev) { + phy_stop(ndev->phydev); + phy_disconnect(ndev->phydev); } if (priv->chip_id != RCAR_GEN2) { @@ -1753,8 +1751,7 @@ static int ravb_hwtstamp_set(struct net_device *ndev, struct ifreq *req) /* ioctl to device function */ static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd) { - struct ravb_private *priv = netdev_priv(ndev); - struct phy_device *phydev = priv->phydev; + struct phy_device *phydev = ndev->phydev; if (!netif_running(ndev)) return -EINVAL; @@ -1876,6 +1873,20 @@ static int ravb_set_gti(struct net_device *ndev) return 0; } +static void ravb_set_config_mode(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + + if (priv->chip_id == RCAR_GEN2) { + ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG); + /* Set CSEL value */ + ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB); + } else { + ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG | + CCC_GAC | CCC_CSEL_HPB); + } +} + static int ravb_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; @@ -1978,14 +1989,7 @@ static int ravb_probe(struct platform_device *pdev) ndev->ethtool_ops = &ravb_ethtool_ops; /* Set AVB config mode */ - if (chip_id == RCAR_GEN2) { - ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG); - /* Set CSEL value */ - ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB); - } else { - ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG | - CCC_GAC | CCC_CSEL_HPB); - } + ravb_set_config_mode(ndev); /* Set GTI value */ error = ravb_set_gti(ndev); @@ -2097,6 +2101,54 @@ static int ravb_remove(struct platform_device *pdev) } #ifdef CONFIG_PM +static int ravb_suspend(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + int ret = 0; + + if (netif_running(ndev)) { + netif_device_detach(ndev); + ret = ravb_close(ndev); + } + + return ret; +} + +static int ravb_resume(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct ravb_private *priv = netdev_priv(ndev); + int ret = 0; + + /* All register have been reset to default values. + * Restore all registers which where setup at probe time and + * reopen device if it was running before system suspended. + */ + + /* Set AVB config mode */ + ravb_set_config_mode(ndev); + + /* Set GTI value */ + ret = ravb_set_gti(ndev); + if (ret) + return ret; + + /* Request GTI loading */ + ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI); + + /* Restore descriptor base address table */ + ravb_write(ndev, priv->desc_bat_dma, DBAT); + + if (netif_running(ndev)) { + ret = ravb_open(ndev); + if (ret < 0) + return ret; + netif_device_attach(ndev); + } + + return ret; +} + static int ravb_runtime_nop(struct device *dev) { /* Runtime PM callback shared between ->runtime_suspend() @@ -2110,6 +2162,7 @@ static int ravb_runtime_nop(struct device *dev) } static const struct dev_pm_ops ravb_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(ravb_suspend, ravb_resume) SET_RUNTIME_PM_OPS(ravb_runtime_nop, ravb_runtime_nop, NULL) }; diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 799d58d86e6d..1f8240aec086 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -1723,7 +1723,7 @@ out: static void sh_eth_adjust_link(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); - struct phy_device *phydev = mdp->phydev; + struct phy_device *phydev = ndev->phydev; int new_state = 0; if (phydev->link) { @@ -1800,51 +1800,48 @@ static int sh_eth_phy_init(struct net_device *ndev) phy_attached_info(phydev); - mdp->phydev = phydev; - return 0; } /* PHY control start function */ static int sh_eth_phy_start(struct net_device *ndev) { - struct sh_eth_private *mdp = netdev_priv(ndev); int ret; ret = sh_eth_phy_init(ndev); if (ret) return ret; - phy_start(mdp->phydev); + phy_start(ndev->phydev); return 0; } -static int sh_eth_get_settings(struct net_device *ndev, - struct ethtool_cmd *ecmd) +static int sh_eth_get_link_ksettings(struct net_device *ndev, + struct ethtool_link_ksettings *cmd) { struct sh_eth_private *mdp = netdev_priv(ndev); unsigned long flags; int ret; - if (!mdp->phydev) + if (!ndev->phydev) return -ENODEV; spin_lock_irqsave(&mdp->lock, flags); - ret = phy_ethtool_gset(mdp->phydev, ecmd); + ret = phy_ethtool_ksettings_get(ndev->phydev, cmd); spin_unlock_irqrestore(&mdp->lock, flags); return ret; } -static int sh_eth_set_settings(struct net_device *ndev, - struct ethtool_cmd *ecmd) +static int sh_eth_set_link_ksettings(struct net_device *ndev, + const struct ethtool_link_ksettings *cmd) { struct sh_eth_private *mdp = netdev_priv(ndev); unsigned long flags; int ret; - if (!mdp->phydev) + if (!ndev->phydev) return -ENODEV; spin_lock_irqsave(&mdp->lock, flags); @@ -1852,11 +1849,11 @@ static int sh_eth_set_settings(struct net_device *ndev, /* disable tx and rx */ sh_eth_rcv_snd_disable(ndev); - ret = phy_ethtool_sset(mdp->phydev, ecmd); + ret = phy_ethtool_ksettings_set(ndev->phydev, cmd); if (ret) goto error_exit; - if (ecmd->duplex == DUPLEX_FULL) + if (cmd->base.duplex == DUPLEX_FULL) mdp->duplex = 1; else mdp->duplex = 0; @@ -2067,11 +2064,11 @@ static int sh_eth_nway_reset(struct net_device *ndev) unsigned long flags; int ret; - if (!mdp->phydev) + if (!ndev->phydev) return -ENODEV; spin_lock_irqsave(&mdp->lock, flags); - ret = phy_start_aneg(mdp->phydev); + ret = phy_start_aneg(ndev->phydev); spin_unlock_irqrestore(&mdp->lock, flags); return ret; @@ -2198,8 +2195,6 @@ static int sh_eth_set_ringparam(struct net_device *ndev, } static const struct ethtool_ops sh_eth_ethtool_ops = { - .get_settings = sh_eth_get_settings, - .set_settings = sh_eth_set_settings, .get_regs_len = sh_eth_get_regs_len, .get_regs = sh_eth_get_regs, .nway_reset = sh_eth_nway_reset, @@ -2211,6 +2206,8 @@ static const struct ethtool_ops sh_eth_ethtool_ops = { .get_sset_count = sh_eth_get_sset_count, .get_ringparam = sh_eth_get_ringparam, .set_ringparam = sh_eth_set_ringparam, + .get_link_ksettings = sh_eth_get_link_ksettings, + .set_link_ksettings = sh_eth_set_link_ksettings, }; /* network device open function */ @@ -2408,10 +2405,9 @@ static int sh_eth_close(struct net_device *ndev) sh_eth_dev_exit(ndev); /* PHY Disconnect */ - if (mdp->phydev) { - phy_stop(mdp->phydev); - phy_disconnect(mdp->phydev); - mdp->phydev = NULL; + if (ndev->phydev) { + phy_stop(ndev->phydev); + phy_disconnect(ndev->phydev); } free_irq(ndev->irq, ndev); @@ -2429,8 +2425,7 @@ static int sh_eth_close(struct net_device *ndev) /* ioctl to device function */ static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) { - struct sh_eth_private *mdp = netdev_priv(ndev); - struct phy_device *phydev = mdp->phydev; + struct phy_device *phydev = ndev->phydev; if (!netif_running(ndev)) return -EINVAL; diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index c62380e34a1d..d050f37f3e0f 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -518,7 +518,6 @@ struct sh_eth_private { /* MII transceiver section. */ u32 phy_id; /* PHY ID */ struct mii_bus *mii_bus; /* MDIO bus control */ - struct phy_device *phydev; /* PHY device control */ int link; phy_interface_t phy_interface; int msg_enable; diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index f0b09b05ed3f..1f0c08602eba 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -2412,7 +2412,7 @@ static int rocker_port_rx_proc(const struct rocker *rocker, skb->protocol = eth_type_trans(skb, rocker_port->dev); if (rx_flags & ROCKER_RX_FLAGS_FWD_OFFLOAD) - skb->offload_fwd_mark = rocker_port->dev->offload_fwd_mark; + skb->offload_fwd_mark = 1; rocker_port->dev->stats.rx_packets++; rocker_port->dev->stats.rx_bytes += skb->len; diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c index 1ca796316173..fcad907baecf 100644 --- a/drivers/net/ethernet/rocker/rocker_ofdpa.c +++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c @@ -2558,7 +2558,6 @@ static int ofdpa_port_init(struct rocker_port *rocker_port) struct ofdpa_port *ofdpa_port = rocker_port->wpriv; int err; - switchdev_port_fwd_mark_set(ofdpa_port->dev, NULL, false); rocker_port_set_learning(rocker_port, !!(ofdpa_port->brport_flags & BR_LEARNING)); @@ -2817,7 +2816,6 @@ static int ofdpa_port_bridge_join(struct ofdpa_port *ofdpa_port, ofdpa_port_internal_vlan_id_get(ofdpa_port, bridge->ifindex); ofdpa_port->bridge_dev = bridge; - switchdev_port_fwd_mark_set(ofdpa_port->dev, bridge, true); return ofdpa_port_vlan_add(ofdpa_port, NULL, OFDPA_UNTAGGED_VID, 0); } @@ -2836,8 +2834,6 @@ static int ofdpa_port_bridge_leave(struct ofdpa_port *ofdpa_port) ofdpa_port_internal_vlan_id_get(ofdpa_port, ofdpa_port->dev->ifindex); - switchdev_port_fwd_mark_set(ofdpa_port->dev, ofdpa_port->bridge_dev, - false); ofdpa_port->bridge_dev = NULL; err = ofdpa_port_vlan_add(ofdpa_port, NULL, OFDPA_UNTAGGED_VID, 0); diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index e00a669e9e09..00279da6a1e8 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -177,7 +177,7 @@ static int efx_ef10_get_vf_index(struct efx_nic *efx) static int efx_ef10_init_datapath_caps(struct efx_nic *efx) { - MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_OUT_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_V2_OUT_LEN); struct efx_ef10_nic_data *nic_data = efx->nic_data; size_t outlen; int rc; @@ -188,7 +188,7 @@ static int efx_ef10_init_datapath_caps(struct efx_nic *efx) outbuf, sizeof(outbuf), &outlen); if (rc) return rc; - if (outlen < sizeof(outbuf)) { + if (outlen < MC_CMD_GET_CAPABILITIES_OUT_LEN) { netif_err(efx, drv, efx->net_dev, "unable to read datapath firmware capabilities\n"); return -EIO; @@ -197,6 +197,12 @@ static int efx_ef10_init_datapath_caps(struct efx_nic *efx) nic_data->datapath_caps = MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1); + if (outlen >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) + nic_data->datapath_caps2 = MCDI_DWORD(outbuf, + GET_CAPABILITIES_V2_OUT_FLAGS2); + else + nic_data->datapath_caps2 = 0; + /* record the DPCPU firmware IDs to determine VEB vswitching support. */ nic_data->rx_dpcpu_fw_id = @@ -227,6 +233,116 @@ static int efx_ef10_get_sysclk_freq(struct efx_nic *efx) return rc > 0 ? rc : -ERANGE; } +static int efx_ef10_get_timer_workarounds(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + unsigned int implemented; + unsigned int enabled; + int rc; + + nic_data->workaround_35388 = false; + nic_data->workaround_61265 = false; + + rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled); + + if (rc == -ENOSYS) { + /* Firmware without GET_WORKAROUNDS - not a problem. */ + rc = 0; + } else if (rc == 0) { + /* Bug61265 workaround is always enabled if implemented. */ + if (enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG61265) + nic_data->workaround_61265 = true; + + if (enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG35388) { + nic_data->workaround_35388 = true; + } else if (implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG35388) { + /* Workaround is implemented but not enabled. + * Try to enable it. + */ + rc = efx_mcdi_set_workaround(efx, + MC_CMD_WORKAROUND_BUG35388, + true, NULL); + if (rc == 0) + nic_data->workaround_35388 = true; + /* If we failed to set the workaround just carry on. */ + rc = 0; + } + } + + netif_dbg(efx, probe, efx->net_dev, + "workaround for bug 35388 is %sabled\n", + nic_data->workaround_35388 ? "en" : "dis"); + netif_dbg(efx, probe, efx->net_dev, + "workaround for bug 61265 is %sabled\n", + nic_data->workaround_61265 ? "en" : "dis"); + + return rc; +} + +static void efx_ef10_process_timer_config(struct efx_nic *efx, + const efx_dword_t *data) +{ + unsigned int max_count; + + if (EFX_EF10_WORKAROUND_61265(efx)) { + efx->timer_quantum_ns = MCDI_DWORD(data, + GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS); + efx->timer_max_ns = MCDI_DWORD(data, + GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS); + } else if (EFX_EF10_WORKAROUND_35388(efx)) { + efx->timer_quantum_ns = MCDI_DWORD(data, + GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT); + max_count = MCDI_DWORD(data, + GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT); + efx->timer_max_ns = max_count * efx->timer_quantum_ns; + } else { + efx->timer_quantum_ns = MCDI_DWORD(data, + GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT); + max_count = MCDI_DWORD(data, + GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT); + efx->timer_max_ns = max_count * efx->timer_quantum_ns; + } + + netif_dbg(efx, probe, efx->net_dev, + "got timer properties from MC: quantum %u ns; max %u ns\n", + efx->timer_quantum_ns, efx->timer_max_ns); +} + +static int efx_ef10_get_timer_config(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN); + int rc; + + rc = efx_ef10_get_timer_workarounds(efx); + if (rc) + return rc; + + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_EVQ_TMR_PROPERTIES, NULL, 0, + outbuf, sizeof(outbuf), NULL); + + if (rc == 0) { + efx_ef10_process_timer_config(efx, outbuf); + } else if (rc == -ENOSYS || rc == -EPERM) { + /* Not available - fall back to Huntington defaults. */ + unsigned int quantum; + + rc = efx_ef10_get_sysclk_freq(efx); + if (rc < 0) + return rc; + + quantum = 1536000 / rc; /* 1536 cycles */ + efx->timer_quantum_ns = quantum; + efx->timer_max_ns = efx->type->timer_period_max * quantum; + rc = 0; + } else { + efx_mcdi_display_error(efx, MC_CMD_GET_EVQ_TMR_PROPERTIES, + MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN, + NULL, 0, rc); + } + + return rc; +} + static int efx_ef10_get_mac_address_pf(struct efx_nic *efx, u8 *mac_address) { MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN); @@ -527,32 +643,9 @@ static int efx_ef10_probe(struct efx_nic *efx) if (rc) goto fail5; - rc = efx_ef10_get_sysclk_freq(efx); + rc = efx_ef10_get_timer_config(efx); if (rc < 0) goto fail5; - efx->timer_quantum_ns = 1536000 / rc; /* 1536 cycles */ - - /* Check whether firmware supports bug 35388 workaround. - * First try to enable it, then if we get EPERM, just - * ask if it's already enabled - */ - rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true, NULL); - if (rc == 0) { - nic_data->workaround_35388 = true; - } else if (rc == -EPERM) { - unsigned int enabled; - - rc = efx_mcdi_get_workarounds(efx, NULL, &enabled); - if (rc) - goto fail3; - nic_data->workaround_35388 = enabled & - MC_CMD_GET_WORKAROUNDS_OUT_BUG35388; - } else if (rc != -ENOSYS && rc != -ENOENT) { - goto fail5; - } - netif_dbg(efx, probe, efx->net_dev, - "workaround for bug 35388 is %sabled\n", - nic_data->workaround_35388 ? "en" : "dis"); rc = efx_mcdi_mon_probe(efx); if (rc && rc != -EPERM) @@ -1440,9 +1533,10 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = { (1ULL << GENERIC_STAT_rx_nodesc_trunc) | \ (1ULL << GENERIC_STAT_rx_noskb_drops)) -/* These statistics are only provided by the 10G MAC. For a 10G/40G - * switchable port we do not expose these because they might not - * include all the packets they should. +/* On 7000 series NICs, these statistics are only provided by the 10G MAC. + * For a 10G/40G switchable port we do not expose these because they might + * not include all the packets they should. + * On 8000 series NICs these statistics are always provided. */ #define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_port_tx_control) | \ (1ULL << EF10_STAT_port_tx_lt64) | \ @@ -1488,10 +1582,15 @@ static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx) 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL)) return 0; - if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) + if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) { raw_mask |= HUNT_40G_EXTRA_STAT_MASK; - else + /* 8000 series have everything even at 40G */ + if (nic_data->datapath_caps2 & + (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN)) + raw_mask |= HUNT_10G_ONLY_STAT_MASK; + } else { raw_mask |= HUNT_10G_ONLY_STAT_MASK; + } if (nic_data->datapath_caps & (1 << MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN)) @@ -1617,7 +1716,6 @@ static int efx_ef10_try_update_nic_stats_pf(struct efx_nic *efx) efx_ef10_get_stat_mask(efx, mask); dma_stats = efx->stats_buffer.addr; - nic_data = efx->nic_data; generation_end = dma_stats[MC_CMD_MAC_GENERATION_END]; if (generation_end == EFX_MC_STATS_GENERATION_INVALID) @@ -1744,27 +1842,43 @@ static size_t efx_ef10_update_stats_vf(struct efx_nic *efx, u64 *full_stats, static void efx_ef10_push_irq_moderation(struct efx_channel *channel) { struct efx_nic *efx = channel->efx; - unsigned int mode, value; + unsigned int mode, usecs; efx_dword_t timer_cmd; - if (channel->irq_moderation) { + if (channel->irq_moderation_us) { mode = 3; - value = channel->irq_moderation - 1; + usecs = channel->irq_moderation_us; } else { mode = 0; - value = 0; + usecs = 0; } - if (EFX_EF10_WORKAROUND_35388(efx)) { + if (EFX_EF10_WORKAROUND_61265(efx)) { + MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_EVQ_TMR_IN_LEN); + unsigned int ns = usecs * 1000; + + MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_INSTANCE, + channel->channel); + MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, ns); + MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, ns); + MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_MODE, mode); + + efx_mcdi_rpc_async(efx, MC_CMD_SET_EVQ_TMR, + inbuf, sizeof(inbuf), 0, NULL, 0); + } else if (EFX_EF10_WORKAROUND_35388(efx)) { + unsigned int ticks = efx_usecs_to_ticks(efx, usecs); + EFX_POPULATE_DWORD_3(timer_cmd, ERF_DD_EVQ_IND_TIMER_FLAGS, EFE_DD_EVQ_IND_TIMER_FLAGS, ERF_DD_EVQ_IND_TIMER_MODE, mode, - ERF_DD_EVQ_IND_TIMER_VAL, value); + ERF_DD_EVQ_IND_TIMER_VAL, ticks); efx_writed_page(efx, &timer_cmd, ER_DD_EVQ_INDIRECT, channel->channel); } else { + unsigned int ticks = efx_usecs_to_ticks(efx, usecs); + EFX_POPULATE_DWORD_2(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode, - ERF_DZ_TC_TIMER_VAL, value); + ERF_DZ_TC_TIMER_VAL, ticks); efx_writed_page(efx, &timer_cmd, ER_DZ_EVQ_TMR, channel->channel); } @@ -1935,14 +2049,18 @@ static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -static void efx_ef10_irq_test_generate(struct efx_nic *efx) +static int efx_ef10_irq_test_generate(struct efx_nic *efx) { MCDI_DECLARE_BUF(inbuf, MC_CMD_TRIGGER_INTERRUPT_IN_LEN); + if (efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG41750, true, + NULL) == 0) + return -ENOTSUPP; + BUILD_BUG_ON(MC_CMD_TRIGGER_INTERRUPT_OUT_LEN != 0); MCDI_SET_DWORD(inbuf, TRIGGER_INTERRUPT_IN_INTR_LEVEL, efx->irq_level); - (void) efx_mcdi_rpc(efx, MC_CMD_TRIGGER_INTERRUPT, + return efx_mcdi_rpc(efx, MC_CMD_TRIGGER_INTERRUPT, inbuf, sizeof(inbuf), NULL, 0, NULL); } @@ -2536,13 +2654,12 @@ fail: static int efx_ef10_ev_init(struct efx_channel *channel) { MCDI_DECLARE_BUF(inbuf, - MC_CMD_INIT_EVQ_IN_LEN(EFX_MAX_EVQ_SIZE * 8 / - EFX_BUF_SIZE)); - MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_OUT_LEN); + MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_MAX_EVQ_SIZE * 8 / + EFX_BUF_SIZE)); + MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_V2_OUT_LEN); size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE; struct efx_nic *efx = channel->efx; struct efx_ef10_nic_data *nic_data; - bool supports_rx_merge; size_t inlen, outlen; unsigned int enabled, implemented; dma_addr_t dma_addr; @@ -2550,9 +2667,6 @@ static int efx_ef10_ev_init(struct efx_channel *channel) int i; nic_data = efx->nic_data; - supports_rx_merge = - !!(nic_data->datapath_caps & - 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN); /* Fill event queue with all ones (i.e. empty events) */ memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len); @@ -2561,11 +2675,6 @@ static int efx_ef10_ev_init(struct efx_channel *channel) MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel); /* INIT_EVQ expects index in vector table, not absolute */ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel); - MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS, - INIT_EVQ_IN_FLAG_INTERRUPTING, 1, - INIT_EVQ_IN_FLAG_RX_MERGE, 1, - INIT_EVQ_IN_FLAG_TX_MERGE, 1, - INIT_EVQ_IN_FLAG_CUT_THRU, !supports_rx_merge); MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE, MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS); MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0); @@ -2574,6 +2683,27 @@ static int efx_ef10_ev_init(struct efx_channel *channel) MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS); MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0); + if (nic_data->datapath_caps2 & + 1 << MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_LBN) { + /* Use the new generic approach to specifying event queue + * configuration, requesting lower latency or higher throughput. + * The options that actually get used appear in the output. + */ + MCDI_POPULATE_DWORD_2(inbuf, INIT_EVQ_V2_IN_FLAGS, + INIT_EVQ_V2_IN_FLAG_INTERRUPTING, 1, + INIT_EVQ_V2_IN_FLAG_TYPE, + MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO); + } else { + bool cut_thru = !(nic_data->datapath_caps & + 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN); + + MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS, + INIT_EVQ_IN_FLAG_INTERRUPTING, 1, + INIT_EVQ_IN_FLAG_RX_MERGE, 1, + INIT_EVQ_IN_FLAG_TX_MERGE, 1, + INIT_EVQ_IN_FLAG_CUT_THRU, cut_thru); + } + dma_addr = channel->eventq.buf.dma_addr; for (i = 0; i < entries; ++i) { MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr); @@ -2584,6 +2714,13 @@ static int efx_ef10_ev_init(struct efx_channel *channel) rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen, outbuf, sizeof(outbuf), &outlen); + + if (outlen >= MC_CMD_INIT_EVQ_V2_OUT_LEN) + netif_dbg(efx, drv, efx->net_dev, + "Channel %d using event queue flags %08x\n", + channel->channel, + MCDI_DWORD(outbuf, INIT_EVQ_V2_OUT_FLAGS)); + /* IRQ return is ignored */ if (channel->channel || rc) return rc; @@ -2591,8 +2728,8 @@ static int efx_ef10_ev_init(struct efx_channel *channel) /* Successfully created event queue on channel 0 */ rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled); if (rc == -ENOSYS) { - /* GET_WORKAROUNDS was implemented before the bug26807 - * workaround, thus the latter must be unavailable in this fw + /* GET_WORKAROUNDS was implemented before this workaround, + * thus it must be unavailable in this firmware. */ nic_data->workaround_26807 = false; rc = 0; diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 14b821b1c880..f3826ae28bac 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -281,6 +281,27 @@ static int efx_process_channel(struct efx_channel *channel, int budget) * NAPI guarantees serialisation of polls of the same device, which * provides the guarantee required by efx_process_channel(). */ +static void efx_update_irq_mod(struct efx_nic *efx, struct efx_channel *channel) +{ + int step = efx->irq_mod_step_us; + + if (channel->irq_mod_score < irq_adapt_low_thresh) { + if (channel->irq_moderation_us > step) { + channel->irq_moderation_us -= step; + efx->type->push_irq_moderation(channel); + } + } else if (channel->irq_mod_score > irq_adapt_high_thresh) { + if (channel->irq_moderation_us < + efx->irq_rx_moderation_us) { + channel->irq_moderation_us += step; + efx->type->push_irq_moderation(channel); + } + } + + channel->irq_count = 0; + channel->irq_mod_score = 0; +} + static int efx_poll(struct napi_struct *napi, int budget) { struct efx_channel *channel = @@ -301,22 +322,7 @@ static int efx_poll(struct napi_struct *napi, int budget) if (efx_channel_has_rx_queue(channel) && efx->irq_rx_adaptive && unlikely(++channel->irq_count == 1000)) { - if (unlikely(channel->irq_mod_score < - irq_adapt_low_thresh)) { - if (channel->irq_moderation > 1) { - channel->irq_moderation -= 1; - efx->type->push_irq_moderation(channel); - } - } else if (unlikely(channel->irq_mod_score > - irq_adapt_high_thresh)) { - if (channel->irq_moderation < - efx->irq_rx_moderation) { - channel->irq_moderation += 1; - efx->type->push_irq_moderation(channel); - } - } - channel->irq_count = 0; - channel->irq_mod_score = 0; + efx_update_irq_mod(efx, channel); } efx_filter_rfs_expire(channel); @@ -1703,6 +1709,7 @@ static int efx_probe_nic(struct efx_nic *efx) netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels); /* Initialise the interrupt moderation settings */ + efx->irq_mod_step_us = DIV_ROUND_UP(efx->timer_quantum_ns, 1000); efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true, true); @@ -1949,14 +1956,21 @@ static void efx_remove_all(struct efx_nic *efx) * Interrupt moderation * **************************************************************************/ - -static unsigned int irq_mod_ticks(unsigned int usecs, unsigned int quantum_ns) +unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs) { if (usecs == 0) return 0; - if (usecs * 1000 < quantum_ns) + if (usecs * 1000 < efx->timer_quantum_ns) return 1; /* never round down to 0 */ - return usecs * 1000 / quantum_ns; + return usecs * 1000 / efx->timer_quantum_ns; +} + +unsigned int efx_ticks_to_usecs(struct efx_nic *efx, unsigned int ticks) +{ + /* We must round up when converting ticks to microseconds + * because we round down when converting the other way. + */ + return DIV_ROUND_UP(ticks * efx->timer_quantum_ns, 1000); } /* Set interrupt moderation parameters */ @@ -1965,21 +1979,16 @@ int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs, bool rx_may_override_tx) { struct efx_channel *channel; - unsigned int irq_mod_max = DIV_ROUND_UP(efx->type->timer_period_max * - efx->timer_quantum_ns, - 1000); - unsigned int tx_ticks; - unsigned int rx_ticks; + unsigned int timer_max_us; EFX_ASSERT_RESET_SERIALISED(efx); - if (tx_usecs > irq_mod_max || rx_usecs > irq_mod_max) - return -EINVAL; + timer_max_us = efx->timer_max_ns / 1000; - tx_ticks = irq_mod_ticks(tx_usecs, efx->timer_quantum_ns); - rx_ticks = irq_mod_ticks(rx_usecs, efx->timer_quantum_ns); + if (tx_usecs > timer_max_us || rx_usecs > timer_max_us) + return -EINVAL; - if (tx_ticks != rx_ticks && efx->tx_channel_offset == 0 && + if (tx_usecs != rx_usecs && efx->tx_channel_offset == 0 && !rx_may_override_tx) { netif_err(efx, drv, efx->net_dev, "Channels are shared. " "RX and TX IRQ moderation must be equal\n"); @@ -1987,12 +1996,12 @@ int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs, } efx->irq_rx_adaptive = rx_adaptive; - efx->irq_rx_moderation = rx_ticks; + efx->irq_rx_moderation_us = rx_usecs; efx_for_each_channel(channel, efx) { if (efx_channel_has_rx_queue(channel)) - channel->irq_moderation = rx_ticks; + channel->irq_moderation_us = rx_usecs; else if (efx_channel_has_tx_queues(channel)) - channel->irq_moderation = tx_ticks; + channel->irq_moderation_us = tx_usecs; } return 0; @@ -2001,26 +2010,21 @@ int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs, void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs, unsigned int *rx_usecs, bool *rx_adaptive) { - /* We must round up when converting ticks to microseconds - * because we round down when converting the other way. - */ - *rx_adaptive = efx->irq_rx_adaptive; - *rx_usecs = DIV_ROUND_UP(efx->irq_rx_moderation * - efx->timer_quantum_ns, - 1000); + *rx_usecs = efx->irq_rx_moderation_us; /* If channels are shared between RX and TX, so is IRQ * moderation. Otherwise, IRQ moderation is the same for all * TX channels and is not adaptive. */ - if (efx->tx_channel_offset == 0) + if (efx->tx_channel_offset == 0) { *tx_usecs = *rx_usecs; - else - *tx_usecs = DIV_ROUND_UP( - efx->channel[efx->tx_channel_offset]->irq_moderation * - efx->timer_quantum_ns, - 1000); + } else { + struct efx_channel *tx_channel; + + tx_channel = efx->channel[efx->tx_channel_offset]; + *tx_usecs = tx_channel->irq_moderation_us; + } } /************************************************************************** diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index c3ae739e9c7a..342ae16e1f2d 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -204,6 +204,8 @@ int efx_try_recovery(struct efx_nic *efx); /* Global */ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type); +unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs); +unsigned int efx_ticks_to_usecs(struct efx_nic *efx, unsigned int ticks); int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs, unsigned int rx_usecs, bool rx_adaptive, bool rx_may_override_tx); diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c index d790cb8d9db3..1a7092602aec 100644 --- a/drivers/net/ethernet/sfc/falcon.c +++ b/drivers/net/ethernet/sfc/falcon.c @@ -378,12 +378,15 @@ static void falcon_push_irq_moderation(struct efx_channel *channel) struct efx_nic *efx = channel->efx; /* Set timer register */ - if (channel->irq_moderation) { + if (channel->irq_moderation_us) { + unsigned int ticks; + + ticks = efx_usecs_to_ticks(efx, channel->irq_moderation_us); EFX_POPULATE_DWORD_2(timer_cmd, FRF_AB_TC_TIMER_MODE, FFE_BB_TIMER_MODE_INT_HLDOFF, FRF_AB_TC_TIMER_VAL, - channel->irq_moderation - 1); + ticks - 1); } else { EFX_POPULATE_DWORD_2(timer_cmd, FRF_AB_TC_TIMER_MODE, @@ -2373,6 +2376,8 @@ static int falcon_probe_nic(struct efx_nic *efx) EFX_MAX_CHANNELS); efx->max_tx_channels = efx->max_channels; efx->timer_quantum_ns = 4968; /* 621 cycles */ + efx->timer_max_ns = efx->type->timer_period_max * + efx->timer_quantum_ns; /* Initialise I2C adapter */ board = falcon_board(efx); diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index 4c83739d158f..4762ec444cb8 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c @@ -1477,9 +1477,10 @@ void efx_farch_irq_disable_master(struct efx_nic *efx) * Interrupt must already have been enabled, otherwise nasty things * may happen. */ -void efx_farch_irq_test_generate(struct efx_nic *efx) +int efx_farch_irq_test_generate(struct efx_nic *efx) { efx_farch_interrupts(efx, true, true); + return 0; } /* Process a fatal interrupt diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index d28e7dd8fa3c..9fbc12a8f80c 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -548,7 +548,10 @@ static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout) efx_mcdi_display_error(efx, async->cmd, async->inlen, errbuf, err_len, rc); } - async->complete(efx, async->cookie, rc, outbuf, data_len); + + if (async->complete) + async->complete(efx, async->cookie, rc, outbuf, + min(async->outlen, data_len)); kfree(async); efx_mcdi_release(mcdi); diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h index c9a5b003caaf..ccceafc15896 100644 --- a/drivers/net/ethernet/sfc/mcdi_pcol.h +++ b/drivers/net/ethernet/sfc/mcdi_pcol.h @@ -2645,16 +2645,20 @@ #define MC_CMD_POLL_BIST_MEM_BUS_MC 0x0 /* enum: CSR IREG bus. */ #define MC_CMD_POLL_BIST_MEM_BUS_CSR 0x1 -/* enum: RX DPCPU bus. */ +/* enum: RX0 DPCPU bus. */ #define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_RX 0x2 /* enum: TX0 DPCPU bus. */ #define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_TX0 0x3 /* enum: TX1 DPCPU bus. */ #define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_TX1 0x4 -/* enum: RX DICPU bus. */ +/* enum: RX0 DICPU bus. */ #define MC_CMD_POLL_BIST_MEM_BUS_DICPU_RX 0x5 /* enum: TX DICPU bus. */ #define MC_CMD_POLL_BIST_MEM_BUS_DICPU_TX 0x6 +/* enum: RX1 DPCPU bus. */ +#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_RX1 0x7 +/* enum: RX1 DICPU bus. */ +#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_RX1 0x8 /* Pattern written to RAM / register */ #define MC_CMD_POLL_BIST_OUT_MEM_EXPECT_OFST 16 /* Actual value read from RAM / register */ @@ -3612,6 +3616,8 @@ #define MC_CMD_NVRAM_INFO_OUT_PROTECTED_WIDTH 1 #define MC_CMD_NVRAM_INFO_OUT_TLV_LBN 1 #define MC_CMD_NVRAM_INFO_OUT_TLV_WIDTH 1 +#define MC_CMD_NVRAM_INFO_OUT_CMAC_LBN 6 +#define MC_CMD_NVRAM_INFO_OUT_CMAC_WIDTH 1 #define MC_CMD_NVRAM_INFO_OUT_A_B_LBN 7 #define MC_CMD_NVRAM_INFO_OUT_A_B_WIDTH 1 #define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16 @@ -4389,6 +4395,8 @@ * the command will fail with MC_CMD_ERR_FILTERS_PRESENT. */ #define MC_CMD_WORKAROUND_BUG26807 0x6 +/* enum: Bug 61265 work around (broken EVQ TMR writes). */ +#define MC_CMD_WORKAROUND_BUG61265 0x7 /* 0 = disable the workaround indicated by TYPE; any non-zero value = enable * the workaround */ @@ -4413,7 +4421,6 @@ * (GET_PHY_CFG_OUT_MEDIA_TYPE); the valid 'page number' input values, and the * output data, are interpreted on a per-type basis. For SFP+: PAGE=0 or 1 * returns a 128-byte block read from module I2C address 0xA0 offset 0 or 0x80. - * Anything else: currently undefined. Locks required: None. Return code: 0. */ #define MC_CMD_GET_PHY_MEDIA_INFO 0x4b @@ -5479,6 +5486,8 @@ #define LICENSED_V3_FEATURES_TX_SNIFF_WIDTH 1 #define LICENSED_V3_FEATURES_PROXY_FILTER_OPS_LBN 8 #define LICENSED_V3_FEATURES_PROXY_FILTER_OPS_WIDTH 1 +#define LICENSED_V3_FEATURES_EVENT_CUT_THROUGH_LBN 9 +#define LICENSED_V3_FEATURES_EVENT_CUT_THROUGH_WIDTH 1 #define LICENSED_V3_FEATURES_MASK_LBN 0 #define LICENSED_V3_FEATURES_MASK_WIDTH 64 @@ -5634,6 +5643,109 @@ /* Only valid if INTRFLAG was true */ #define MC_CMD_INIT_EVQ_OUT_IRQ_OFST 0 +/* MC_CMD_INIT_EVQ_V2_IN msgrequest */ +#define MC_CMD_INIT_EVQ_V2_IN_LENMIN 44 +#define MC_CMD_INIT_EVQ_V2_IN_LENMAX 548 +#define MC_CMD_INIT_EVQ_V2_IN_LEN(num) (36+8*(num)) +/* Size, in entries */ +#define MC_CMD_INIT_EVQ_V2_IN_SIZE_OFST 0 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. + */ +#define MC_CMD_INIT_EVQ_V2_IN_INSTANCE_OFST 4 +/* The initial timer value. The load value is ignored if the timer mode is DIS. + */ +#define MC_CMD_INIT_EVQ_V2_IN_TMR_LOAD_OFST 8 +/* The reload value is ignored in one-shot modes */ +#define MC_CMD_INIT_EVQ_V2_IN_TMR_RELOAD_OFST 12 +/* tbd */ +#define MC_CMD_INIT_EVQ_V2_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_LBN 0 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RPTR_DOS_LBN 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RPTR_DOS_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INT_ARMD_LBN 2 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INT_ARMD_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_CUT_THRU_LBN 3 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_CUT_THRU_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RX_MERGE_LBN 4 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RX_MERGE_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TX_MERGE_LBN 5 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TX_MERGE_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_USE_TIMER_LBN 6 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_USE_TIMER_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LBN 7 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_WIDTH 4 +/* enum: All initialisation flags specified by host. */ +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_MANUAL 0x0 +/* enum: MEDFORD only. Certain initialisation flags specified by host may be + * over-ridden by firmware based on licenses and firmware variant in order to + * provide the lowest latency achievable. See + * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags. + */ +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LOW_LATENCY 0x1 +/* enum: MEDFORD only. Certain initialisation flags specified by host may be + * over-ridden by firmware based on licenses and firmware variant in order to + * provide the best throughput achievable. See + * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags. + */ +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_THROUGHPUT 0x2 +/* enum: MEDFORD only. Certain initialisation flags may be over-ridden by + * firmware based on licenses and firmware variant. See + * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags. + */ +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO 0x3 +#define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_OFST 20 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS 0x0 +/* enum: Immediate */ +#define MC_CMD_INIT_EVQ_V2_IN_TMR_IMMED_START 0x1 +/* enum: Triggered */ +#define MC_CMD_INIT_EVQ_V2_IN_TMR_TRIG_START 0x2 +/* enum: Hold-off */ +#define MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF 0x3 +/* Target EVQ for wakeups if in wakeup mode. */ +#define MC_CMD_INIT_EVQ_V2_IN_TARGET_EVQ_OFST 24 +/* Target interrupt if in interrupting mode (note union with target EVQ). Use + * MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test + * purposes. + */ +#define MC_CMD_INIT_EVQ_V2_IN_IRQ_NUM_OFST 24 +/* Event Counter Mode. */ +#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_OFST 28 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS 0x0 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_RX 0x1 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_TX 0x2 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_RXTX 0x3 +/* Event queue packet count threshold. */ +#define MC_CMD_INIT_EVQ_V2_IN_COUNT_THRSHLD_OFST 32 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_OFST 36 +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LO_OFST 36 +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_HI_OFST 40 +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MINNUM 1 +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MAXNUM 64 + +/* MC_CMD_INIT_EVQ_V2_OUT msgresponse */ +#define MC_CMD_INIT_EVQ_V2_OUT_LEN 8 +/* Only valid if INTRFLAG was true */ +#define MC_CMD_INIT_EVQ_V2_OUT_IRQ_OFST 0 +/* Actual configuration applied on the card */ +#define MC_CMD_INIT_EVQ_V2_OUT_FLAGS_OFST 4 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_LBN 0 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RX_MERGE_LBN 1 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RX_MERGE_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_TX_MERGE_LBN 2 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_TX_MERGE_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RXQ_FORCE_EV_MERGING_LBN 3 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RXQ_FORCE_EV_MERGING_WIDTH 1 + /* QUEUE_CRC_MODE structuredef */ #define QUEUE_CRC_MODE_LEN 1 #define QUEUE_CRC_MODE_MODE_LBN 0 @@ -5697,8 +5809,8 @@ #define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_WIDTH 1 #define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_LBN 9 #define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_WIDTH 1 -#define MC_CMD_INIT_RXQ_IN_FLAG_FORCE_EV_MERGING_LBN 10 -#define MC_CMD_INIT_RXQ_IN_FLAG_FORCE_EV_MERGING_WIDTH 1 +#define MC_CMD_INIT_RXQ_IN_UNUSED_LBN 10 +#define MC_CMD_INIT_RXQ_IN_UNUSED_WIDTH 1 /* Owner ID to use if in buffer mode (zero if physical) */ #define MC_CMD_INIT_RXQ_IN_OWNER_ID_OFST 20 /* The port ID associated with the v-adaptor which should contain this DMAQ. */ @@ -7854,6 +7966,20 @@ #define MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_WIDTH 1 #define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_LBN 4 #define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VFIFO_ULL_MODE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TIMESTAMP_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_SNIFF_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_WIDTH 1 /* Number of FATSOv2 contexts per datapath supported by this NIC. Not present * on older firmware (check the length). */ @@ -7910,6 +8036,288 @@ #define MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_OFST 70 #define MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_LEN 2 +/* MC_CMD_GET_CAPABILITIES_V3_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_LEN 73 +/* First word of flags. */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VXLAN_NVGRE_WIDTH 1 +/* RxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DPCPU_FW_ID_LEN 2 +/* enum: Standard RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP 0x0 +/* enum: Low latency RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_LOW_LATENCY 0x1 +/* enum: Packed stream RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: BIST RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_BIST 0x10a +/* enum: RXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +/* enum: RXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +/* enum: RXDP Test firmware image 3 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +/* enum: RXDP Test firmware image 4 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +/* enum: RXDP Test firmware image 5 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_BACKPRESSURE 0x105 +/* enum: RXDP Test firmware image 6 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +/* enum: RXDP Test firmware image 7 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +/* enum: RXDP Test firmware image 8 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +/* enum: RXDP Test firmware image 9 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* TxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_LEN 2 +/* enum: Standard TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP 0x0 +/* enum: Low latency TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_LOW_LATENCY 0x1 +/* enum: High packet rate TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: BIST TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_BIST 0x12d +/* enum: TXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +/* enum: TXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +/* enum: TXDP CSR bus test firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial RX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Virtual switching (full feature) RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant RX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +/* enum: Low latency RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +/* enum: Packed stream RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +/* enum: RX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* enum: RX PD firmware parsing but not filtering network overlay tunnel + * encapsulations (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial TX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Virtual switching (full feature) TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant TX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: TX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* Hardware capabilities of NIC */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_HW_CAPABILITIES_OFST 12 +/* Licensed capabilities */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_LICENSE_CAPABILITIES_OFST 16 +/* Second word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVQ_TIMER_CTRL_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVENT_CUT_THROUGH_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_CUT_THROUGH_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VFIFO_ULL_MODE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_V2_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_TIMESTAMPING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TIMESTAMP_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_SNIFF_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_WIDTH 1 +/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present + * on older firmware (check the length). + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 +/* One byte per PF containing the number of the external port assigned to this + * PF, indexed by PF number. Special values indicate that a PF is either not + * present or not assigned. + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff +/* enum: PF does not exist. */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe +/* enum: PF does exist but is not assigned to any external port. */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_ASSIGNED 0xfd +/* enum: This value indicates that PF is assigned, but it cannot be expressed + * in this field. It is intended for a possible future situation where a more + * complex scheme of PFs to ports mapping is being used. The future driver + * should look for a new field supporting the new scheme. The current/old + * driver should treat this value as PF_NOT_ASSIGNED. + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +/* One byte per PF containing the number of its VFs, indexed by PF number. A + * special value indicates that a PF is not present. + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_OFST 42 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +/* MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* enum: PF does not exist. */ +/* MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe */ +/* Number of VIs available for each external port */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_OFST 58 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_NUM 4 +/* Size of RX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ RX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DESC_CACHE_SIZE_OFST 66 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DESC_CACHE_SIZE_LEN 1 +/* Size of TX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ TX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DESC_CACHE_SIZE_OFST 67 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DESC_CACHE_SIZE_LEN 1 +/* Total number of available PIO buffers */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_PIO_BUFFS_OFST 68 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_PIO_BUFFS_LEN 2 +/* Size of a single PIO buffer */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_SIZE_PIO_BUFF_OFST 70 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_SIZE_PIO_BUFF_LEN 2 +/* On chips later than Medford the amount of address space assigned to each VI + * is configurable. This is a global setting that the driver must query to + * discover the VI to address mapping. Cut-through PIO (CTPIO) in not available + * with 8k VI windows. + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_OFST 72 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_LEN 1 +/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k. + * CTPIO is not mapped. + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K 0x0 +/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K 0x1 +/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K 0x2 + /***********************************/ /* MC_CMD_V2_EXTN @@ -9026,7 +9434,7 @@ */ #define MC_CMD_GET_RXDP_CONFIG 0xc2 -#define MC_CMD_0xc2_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0xc2_PRIVILEGE_CTG SRIOV_CTG_GENERAL /* MC_CMD_GET_RXDP_CONFIG_IN msgrequest */ #define MC_CMD_GET_RXDP_CONFIG_IN_LEN 0 @@ -10125,7 +10533,9 @@ * that this operation returns a zero-length response */ #define MC_CMD_LICENSING_V3_IN_OP_UPDATE_LICENSE 0x0 -/* enum: report counts of installed licenses */ +/* enum: report counts of installed licenses Returns EAGAIN if license + * processing (updating) has been started but not yet completed. + */ #define MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE 0x1 /* MC_CMD_LICENSING_V3_OUT msgresponse */ @@ -10763,6 +11173,8 @@ #define MC_CMD_GET_WORKAROUNDS_OUT_BUG42008 0x20 /* enum: Bug 26807 features present in firmware (multicast filter chaining) */ #define MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 0x40 +/* enum: Bug 61265 work around (broken EVQ TMR writes). */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG61265 0x80 /***********************************/ @@ -11280,22 +11692,110 @@ #define MC_CMD_0x118_PRIVILEGE_CTG SRIOV_CTG_ADMIN /* MC_CMD_RX_BALANCING_IN msgrequest */ -#define MC_CMD_RX_BALANCING_IN_LEN 4 +#define MC_CMD_RX_BALANCING_IN_LEN 16 /* The RX port whose upconverter table will be modified */ #define MC_CMD_RX_BALANCING_IN_PORT_OFST 0 -#define MC_CMD_RX_BALANCING_IN_PORT_LEN 1 /* The VLAN priority associated to the table index and vFIFO */ -#define MC_CMD_RX_BALANCING_IN_PRIORITY_OFST 1 -#define MC_CMD_RX_BALANCING_IN_PRIORITY_LEN 1 +#define MC_CMD_RX_BALANCING_IN_PRIORITY_OFST 4 /* The resulting bit of SRC^DST for indexing the table */ -#define MC_CMD_RX_BALANCING_IN_SRC_DST_OFST 2 -#define MC_CMD_RX_BALANCING_IN_SRC_DST_LEN 1 +#define MC_CMD_RX_BALANCING_IN_SRC_DST_OFST 8 /* The RX engine to which the vFIFO in the table entry will point to */ -#define MC_CMD_RX_BALANCING_IN_ENG_OFST 3 -#define MC_CMD_RX_BALANCING_IN_ENG_LEN 1 +#define MC_CMD_RX_BALANCING_IN_ENG_OFST 12 /* MC_CMD_RX_BALANCING_OUT msgresponse */ #define MC_CMD_RX_BALANCING_OUT_LEN 0 +/***********************************/ +/* MC_CMD_SET_EVQ_TMR + * Update the timer load, timer reload and timer mode values for a given EVQ. + * The requested timer values (in TMR_LOAD_REQ_NS and TMR_RELOAD_REQ_NS) will + * be rounded up to the granularity supported by the hardware, then truncated + * to the range supported by the hardware. The resulting value after the + * rounding and truncation will be returned to the caller (in TMR_LOAD_ACT_NS + * and TMR_RELOAD_ACT_NS). + */ +#define MC_CMD_SET_EVQ_TMR 0x120 + +#define MC_CMD_0x120_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_SET_EVQ_TMR_IN msgrequest */ +#define MC_CMD_SET_EVQ_TMR_IN_LEN 16 +/* Function-relative queue instance */ +#define MC_CMD_SET_EVQ_TMR_IN_INSTANCE_OFST 0 +/* Requested value for timer load (in nanoseconds) */ +#define MC_CMD_SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS_OFST 4 +/* Requested value for timer reload (in nanoseconds) */ +#define MC_CMD_SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS_OFST 8 +/* Timer mode. Meanings as per EVQ_TMR_REG.TC_TIMER_VAL */ +#define MC_CMD_SET_EVQ_TMR_IN_TMR_MODE_OFST 12 +#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS 0x0 /* enum */ +#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START 0x1 /* enum */ +#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START 0x2 /* enum */ +#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_INT_HLDOFF 0x3 /* enum */ + +/* MC_CMD_SET_EVQ_TMR_OUT msgresponse */ +#define MC_CMD_SET_EVQ_TMR_OUT_LEN 8 +/* Actual value for timer load (in nanoseconds) */ +#define MC_CMD_SET_EVQ_TMR_OUT_TMR_LOAD_ACT_NS_OFST 0 +/* Actual value for timer reload (in nanoseconds) */ +#define MC_CMD_SET_EVQ_TMR_OUT_TMR_RELOAD_ACT_NS_OFST 4 + + +/***********************************/ +/* MC_CMD_GET_EVQ_TMR_PROPERTIES + * Query properties about the event queue timers. + */ +#define MC_CMD_GET_EVQ_TMR_PROPERTIES 0x122 + +#define MC_CMD_0x122_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_EVQ_TMR_PROPERTIES_IN msgrequest */ +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_IN_LEN 0 + +/* MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT msgresponse */ +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN 36 +/* Reserved for future use. */ +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_FLAGS_OFST 0 +/* For timers updated via writes to EVQ_TMR_REG, this is the time interval (in + * nanoseconds) for each increment of the timer load/reload count. The + * requested duration of a timer is this value multiplied by the timer + * load/reload count. + */ +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT_OFST 4 +/* For timers updated via writes to EVQ_TMR_REG, this is the maximum value + * allowed for timer load/reload counts. + */ +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT_OFST 8 +/* For timers updated via writes to EVQ_TMR_REG, timer load/reload counts not a + * multiple of this step size will be rounded in an implementation defined + * manner. + */ +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_STEP_OFST 12 +/* Maximum timer duration (in nanoseconds) for timers updated via MCDI. Only + * meaningful if MC_CMD_SET_EVQ_TMR is implemented. + */ +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS_OFST 16 +/* Timer durations requested via MCDI that are not a multiple of this step size + * will be rounded up. Only meaningful if MC_CMD_SET_EVQ_TMR is implemented. + */ +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS_OFST 20 +/* For timers updated using the bug35388 workaround, this is the time interval + * (in nanoseconds) for each increment of the timer load/reload count. The + * requested duration of a timer is this value multiplied by the timer + * load/reload count. This field is only meaningful if the bug35388 workaround + * is enabled. + */ +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT_OFST 24 +/* For timers updated using the bug35388 workaround, this is the maximum value + * allowed for timer load/reload counts. This field is only meaningful if the + * bug35388 workaround is enabled. + */ +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT_OFST 28 +/* For timers updated using the bug35388 workaround, timer load/reload counts + * not a multiple of this step size will be rounded in an implementation + * defined manner. This field is only meaningful if the bug35388 workaround is + * enabled. + */ +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_STEP_OFST 32 #endif /* MCDI_PCOL_H */ diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 9ff062a36ea8..0a2504b5dad5 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -392,7 +392,7 @@ enum efx_sync_events_state { * @eventq_init: Event queue initialised flag * @enabled: Channel enabled indicator * @irq: IRQ number (MSI and MSI-X only) - * @irq_moderation: IRQ moderation value (in hardware ticks) + * @irq_moderation_us: IRQ moderation value (in microseconds) * @napi_dev: Net device used with NAPI * @napi_str: NAPI control structure * @state: state for NAPI vs busy polling @@ -433,7 +433,7 @@ struct efx_channel { bool eventq_init; bool enabled; int irq; - unsigned int irq_moderation; + unsigned int irq_moderation_us; struct net_device *napi_dev; struct napi_struct napi_str; #ifdef CONFIG_NET_RX_BUSY_POLL @@ -810,8 +810,10 @@ struct vfdi_status; * @membase: Memory BAR value * @interrupt_mode: Interrupt mode * @timer_quantum_ns: Interrupt timer quantum, in nanoseconds + * @timer_max_ns: Interrupt timer maximum value, in nanoseconds * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues - * @irq_rx_moderation: IRQ moderation time for RX event queues + * @irq_rx_mod_step_us: Step size for IRQ moderation for RX event queues + * @irq_rx_moderation_us: IRQ moderation time for RX event queues * @msg_enable: Log message enable flags * @state: Device state number (%STATE_*). Serialised by the rtnl_lock. * @reset_pending: Bitmask for pending resets @@ -940,8 +942,10 @@ struct efx_nic { enum efx_int_mode interrupt_mode; unsigned int timer_quantum_ns; + unsigned int timer_max_ns; bool irq_rx_adaptive; - unsigned int irq_rx_moderation; + unsigned int irq_mod_step_us; + unsigned int irq_rx_moderation_us; u32 msg_enable; enum nic_state state; @@ -1271,7 +1275,7 @@ struct efx_nic_type { int (*mcdi_poll_reboot)(struct efx_nic *efx); void (*mcdi_reboot_detected)(struct efx_nic *efx); void (*irq_enable_master)(struct efx_nic *efx); - void (*irq_test_generate)(struct efx_nic *efx); + int (*irq_test_generate)(struct efx_nic *efx); void (*irq_disable_non_ev)(struct efx_nic *efx); irqreturn_t (*irq_handle_msi)(int irq, void *dev_id); irqreturn_t (*irq_handle_legacy)(int irq, void *dev_id); diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c index 89b83e59e1dc..aa1945a858d5 100644 --- a/drivers/net/ethernet/sfc/nic.c +++ b/drivers/net/ethernet/sfc/nic.c @@ -66,11 +66,11 @@ void efx_nic_event_test_start(struct efx_channel *channel) channel->efx->type->ev_test_generate(channel); } -void efx_nic_irq_test_start(struct efx_nic *efx) +int efx_nic_irq_test_start(struct efx_nic *efx) { efx->last_irq_cpu = -1; smp_wmb(); - efx->type->irq_test_generate(efx); + return efx->type->irq_test_generate(efx); } /* Hook interrupt handler(s) diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index 96944c3c9d14..73bee7ea332a 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -507,10 +507,13 @@ enum { * @stats: Hardware statistics * @workaround_35388: Flag: firmware supports workaround for bug 35388 * @workaround_26807: Flag: firmware supports workaround for bug 26807 + * @workaround_61265: Flag: firmware supports workaround for bug 61265 * @must_check_datapath_caps: Flag: @datapath_caps needs to be revalidated * after MC reboot * @datapath_caps: Capabilities of datapath firmware (FLAGS1 field of * %MC_CMD_GET_CAPABILITIES response) + * @datapath_caps2: Further Capabilities of datapath firmware (FLAGS2 field of + * %MC_CMD_GET_CAPABILITIES response) * @rx_dpcpu_fw_id: Firmware ID of the RxDPCPU * @tx_dpcpu_fw_id: Firmware ID of the TxDPCPU * @vport_id: The function's vport ID, only relevant for PFs @@ -540,8 +543,10 @@ struct efx_ef10_nic_data { u64 stats[EF10_STAT_COUNT]; bool workaround_35388; bool workaround_26807; + bool workaround_61265; bool must_check_datapath_caps; u32 datapath_caps; + u32 datapath_caps2; unsigned int rx_dpcpu_fw_id; unsigned int tx_dpcpu_fw_id; unsigned int vport_id; @@ -741,12 +746,12 @@ static inline void efx_update_diff_stat(u64 *stat, u64 diff) /* Interrupts */ int efx_nic_init_interrupt(struct efx_nic *efx); -void efx_nic_irq_test_start(struct efx_nic *efx); +int efx_nic_irq_test_start(struct efx_nic *efx); void efx_nic_fini_interrupt(struct efx_nic *efx); /* Falcon/Siena interrupts */ void efx_farch_irq_enable_master(struct efx_nic *efx); -void efx_farch_irq_test_generate(struct efx_nic *efx); +int efx_farch_irq_test_generate(struct efx_nic *efx); void efx_farch_irq_disable_master(struct efx_nic *efx); irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id); irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id); diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index c771e0af4e06..dd204d9704c6 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -1306,7 +1306,7 @@ static int efx_ptp_probe_channel(struct efx_channel *channel) { struct efx_nic *efx = channel->efx; - channel->irq_moderation = 0; + channel->irq_moderation_us = 0; channel->rx_queue.core_index = 0; return efx_ptp_probe(efx, channel); diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c index 9d78830da609..cd38b44ae23a 100644 --- a/drivers/net/ethernet/sfc/selftest.c +++ b/drivers/net/ethernet/sfc/selftest.c @@ -135,11 +135,19 @@ static int efx_test_interrupts(struct efx_nic *efx, { unsigned long timeout, wait; int cpu; + int rc; netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n"); tests->interrupt = -1; - efx_nic_irq_test_start(efx); + rc = efx_nic_irq_test_start(efx); + if (rc == -ENOTSUPP) { + netif_dbg(efx, drv, efx->net_dev, + "direct interrupt testing not supported\n"); + tests->interrupt = 0; + return 0; + } + timeout = jiffies + IRQ_TIMEOUT; wait = 1; diff --git a/drivers/net/ethernet/sfc/selftest.h b/drivers/net/ethernet/sfc/selftest.h index 009dbe88f3be..32a427253a03 100644 --- a/drivers/net/ethernet/sfc/selftest.h +++ b/drivers/net/ethernet/sfc/selftest.h @@ -28,7 +28,7 @@ struct efx_loopback_self_tests { /* Efx self test results * For fields which are not counters, 1 indicates success and -1 - * indicates failure. + * indicates failure; 0 indicates test could not be run. */ struct efx_self_tests { /* online tests */ diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index 2219b5424d2b..04ed1b4c7cd9 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c @@ -34,19 +34,24 @@ static void siena_init_wol(struct efx_nic *efx); static void siena_push_irq_moderation(struct efx_channel *channel) { + struct efx_nic *efx = channel->efx; efx_dword_t timer_cmd; - if (channel->irq_moderation) + if (channel->irq_moderation_us) { + unsigned int ticks; + + ticks = efx_usecs_to_ticks(efx, channel->irq_moderation_us); EFX_POPULATE_DWORD_2(timer_cmd, FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_INT_HLDOFF, FRF_CZ_TC_TIMER_VAL, - channel->irq_moderation - 1); - else + ticks - 1); + } else { EFX_POPULATE_DWORD_2(timer_cmd, FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS, FRF_CZ_TC_TIMER_VAL, 0); + } efx_writed_page_locked(channel->efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0, channel->channel); } @@ -222,6 +227,9 @@ static int siena_probe_nvconfig(struct efx_nic *efx) efx->timer_quantum_ns = (caps & (1 << MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN)) ? 3072 : 6144; /* 768 cycles */ + efx->timer_max_ns = efx->type->timer_period_max * + efx->timer_quantum_ns; + return rc; } diff --git a/drivers/net/ethernet/sfc/workarounds.h b/drivers/net/ethernet/sfc/workarounds.h index 2310b75d4ec2..351cd14cb9f9 100644 --- a/drivers/net/ethernet/sfc/workarounds.h +++ b/drivers/net/ethernet/sfc/workarounds.h @@ -50,4 +50,8 @@ #define EFX_WORKAROUND_35388(efx) \ (efx_nic_rev(efx) == EFX_REV_HUNT_A0 && EFX_EF10_WORKAROUND_35388(efx)) +/* Moderation timer access must go through MCDI */ +#define EFX_EF10_WORKAROUND_61265(efx) \ + (((struct efx_ef10_nic_data *)efx->nic_data)->workaround_61265) + #endif /* EFX_WORKAROUNDS_H */ diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index f85d605e4560..c6cff3d2ff05 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -124,7 +124,7 @@ do { \ #define RX_PRIORITY_MAPPING 0x76543210 #define TX_PRIORITY_MAPPING 0x33221100 -#define CPDMA_TX_PRIORITY_MAP 0x76543210 +#define CPDMA_TX_PRIORITY_MAP 0x01234567 #define CPSW_VLAN_AWARE BIT(1) #define CPSW_ALE_VLAN_AWARE 1 @@ -140,9 +140,11 @@ do { \ #define CPSW_CMINTMAX_INTVL (1000 / CPSW_CMINTMIN_CNT) #define CPSW_CMINTMIN_INTVL ((1000 / CPSW_CMINTMAX_CNT) + 1) -#define cpsw_slave_index(priv) \ - ((priv->data.dual_emac) ? priv->emac_port : \ - priv->data.active_slave) +#define cpsw_slave_index(cpsw, priv) \ + ((cpsw->data.dual_emac) ? priv->emac_port : \ + cpsw->data.active_slave) +#define IRQ_NUM 2 +#define CPSW_MAX_QUEUES 8 static int debug_level; module_param(debug_level, int, 0); @@ -363,38 +365,41 @@ static inline void slave_write(struct cpsw_slave *slave, u32 val, u32 offset) __raw_writel(val, slave->regs + offset); } -struct cpsw_priv { - struct platform_device *pdev; - struct net_device *ndev; - struct napi_struct napi_rx; - struct napi_struct napi_tx; +struct cpsw_common { struct device *dev; struct cpsw_platform_data data; + struct napi_struct napi_rx; + struct napi_struct napi_tx; struct cpsw_ss_regs __iomem *regs; struct cpsw_wr_regs __iomem *wr_regs; u8 __iomem *hw_stats; struct cpsw_host_regs __iomem *host_port_regs; - u32 msg_enable; u32 version; u32 coal_intvl; u32 bus_freq_mhz; int rx_packet_max; - struct clk *clk; - u8 mac_addr[ETH_ALEN]; struct cpsw_slave *slaves; struct cpdma_ctlr *dma; - struct cpdma_chan *txch, *rxch; + struct cpdma_chan *txch[CPSW_MAX_QUEUES]; + struct cpdma_chan *rxch[CPSW_MAX_QUEUES]; struct cpsw_ale *ale; - bool rx_pause; - bool tx_pause; bool quirk_irq; bool rx_irq_disabled; bool tx_irq_disabled; - /* snapshot of IRQ numbers */ - u32 irqs_table[4]; - u32 num_irqs; - struct cpts *cpts; + u32 irqs_table[IRQ_NUM]; + struct cpts *cpts; + int rx_ch_num, tx_ch_num; +}; + +struct cpsw_priv { + struct net_device *ndev; + struct device *dev; + u32 msg_enable; + u8 mac_addr[ETH_ALEN]; + bool rx_pause; + bool tx_pause; u32 emac_port; + struct cpsw_common *cpsw; }; struct cpsw_stats { @@ -455,108 +460,92 @@ static const struct cpsw_stats cpsw_gstrings_stats[] = { { "Rx Start of Frame Overruns", CPSW_STAT(rxsofoverruns) }, { "Rx Middle of Frame Overruns", CPSW_STAT(rxmofoverruns) }, { "Rx DMA Overruns", CPSW_STAT(rxdmaoverruns) }, - { "Rx DMA chan: head_enqueue", CPDMA_RX_STAT(head_enqueue) }, - { "Rx DMA chan: tail_enqueue", CPDMA_RX_STAT(tail_enqueue) }, - { "Rx DMA chan: pad_enqueue", CPDMA_RX_STAT(pad_enqueue) }, - { "Rx DMA chan: misqueued", CPDMA_RX_STAT(misqueued) }, - { "Rx DMA chan: desc_alloc_fail", CPDMA_RX_STAT(desc_alloc_fail) }, - { "Rx DMA chan: pad_alloc_fail", CPDMA_RX_STAT(pad_alloc_fail) }, - { "Rx DMA chan: runt_receive_buf", CPDMA_RX_STAT(runt_receive_buff) }, - { "Rx DMA chan: runt_transmit_buf", CPDMA_RX_STAT(runt_transmit_buff) }, - { "Rx DMA chan: empty_dequeue", CPDMA_RX_STAT(empty_dequeue) }, - { "Rx DMA chan: busy_dequeue", CPDMA_RX_STAT(busy_dequeue) }, - { "Rx DMA chan: good_dequeue", CPDMA_RX_STAT(good_dequeue) }, - { "Rx DMA chan: requeue", CPDMA_RX_STAT(requeue) }, - { "Rx DMA chan: teardown_dequeue", CPDMA_RX_STAT(teardown_dequeue) }, - { "Tx DMA chan: head_enqueue", CPDMA_TX_STAT(head_enqueue) }, - { "Tx DMA chan: tail_enqueue", CPDMA_TX_STAT(tail_enqueue) }, - { "Tx DMA chan: pad_enqueue", CPDMA_TX_STAT(pad_enqueue) }, - { "Tx DMA chan: misqueued", CPDMA_TX_STAT(misqueued) }, - { "Tx DMA chan: desc_alloc_fail", CPDMA_TX_STAT(desc_alloc_fail) }, - { "Tx DMA chan: pad_alloc_fail", CPDMA_TX_STAT(pad_alloc_fail) }, - { "Tx DMA chan: runt_receive_buf", CPDMA_TX_STAT(runt_receive_buff) }, - { "Tx DMA chan: runt_transmit_buf", CPDMA_TX_STAT(runt_transmit_buff) }, - { "Tx DMA chan: empty_dequeue", CPDMA_TX_STAT(empty_dequeue) }, - { "Tx DMA chan: busy_dequeue", CPDMA_TX_STAT(busy_dequeue) }, - { "Tx DMA chan: good_dequeue", CPDMA_TX_STAT(good_dequeue) }, - { "Tx DMA chan: requeue", CPDMA_TX_STAT(requeue) }, - { "Tx DMA chan: teardown_dequeue", CPDMA_TX_STAT(teardown_dequeue) }, }; -#define CPSW_STATS_LEN ARRAY_SIZE(cpsw_gstrings_stats) +static const struct cpsw_stats cpsw_gstrings_ch_stats[] = { + { "head_enqueue", CPDMA_RX_STAT(head_enqueue) }, + { "tail_enqueue", CPDMA_RX_STAT(tail_enqueue) }, + { "pad_enqueue", CPDMA_RX_STAT(pad_enqueue) }, + { "misqueued", CPDMA_RX_STAT(misqueued) }, + { "desc_alloc_fail", CPDMA_RX_STAT(desc_alloc_fail) }, + { "pad_alloc_fail", CPDMA_RX_STAT(pad_alloc_fail) }, + { "runt_receive_buf", CPDMA_RX_STAT(runt_receive_buff) }, + { "runt_transmit_buf", CPDMA_RX_STAT(runt_transmit_buff) }, + { "empty_dequeue", CPDMA_RX_STAT(empty_dequeue) }, + { "busy_dequeue", CPDMA_RX_STAT(busy_dequeue) }, + { "good_dequeue", CPDMA_RX_STAT(good_dequeue) }, + { "requeue", CPDMA_RX_STAT(requeue) }, + { "teardown_dequeue", CPDMA_RX_STAT(teardown_dequeue) }, +}; -#define napi_to_priv(napi) container_of(napi, struct cpsw_priv, napi) +#define CPSW_STATS_COMMON_LEN ARRAY_SIZE(cpsw_gstrings_stats) +#define CPSW_STATS_CH_LEN ARRAY_SIZE(cpsw_gstrings_ch_stats) + +#define ndev_to_cpsw(ndev) (((struct cpsw_priv *)netdev_priv(ndev))->cpsw) +#define napi_to_cpsw(napi) container_of(napi, struct cpsw_common, napi) #define for_each_slave(priv, func, arg...) \ do { \ struct cpsw_slave *slave; \ + struct cpsw_common *cpsw = (priv)->cpsw; \ int n; \ - if (priv->data.dual_emac) \ - (func)((priv)->slaves + priv->emac_port, ##arg);\ + if (cpsw->data.dual_emac) \ + (func)((cpsw)->slaves + priv->emac_port, ##arg);\ else \ - for (n = (priv)->data.slaves, \ - slave = (priv)->slaves; \ + for (n = cpsw->data.slaves, \ + slave = cpsw->slaves; \ n; n--) \ (func)(slave++, ##arg); \ } while (0) -#define cpsw_get_slave_ndev(priv, __slave_no__) \ - ((__slave_no__ < priv->data.slaves) ? \ - priv->slaves[__slave_no__].ndev : NULL) -#define cpsw_get_slave_priv(priv, __slave_no__) \ - (((__slave_no__ < priv->data.slaves) && \ - (priv->slaves[__slave_no__].ndev)) ? \ - netdev_priv(priv->slaves[__slave_no__].ndev) : NULL) \ - -#define cpsw_dual_emac_src_port_detect(status, priv, ndev, skb) \ + +#define cpsw_dual_emac_src_port_detect(cpsw, status, ndev, skb) \ do { \ - if (!priv->data.dual_emac) \ + if (!cpsw->data.dual_emac) \ break; \ if (CPDMA_RX_SOURCE_PORT(status) == 1) { \ - ndev = cpsw_get_slave_ndev(priv, 0); \ - priv = netdev_priv(ndev); \ + ndev = cpsw->slaves[0].ndev; \ skb->dev = ndev; \ } else if (CPDMA_RX_SOURCE_PORT(status) == 2) { \ - ndev = cpsw_get_slave_ndev(priv, 1); \ - priv = netdev_priv(ndev); \ + ndev = cpsw->slaves[1].ndev; \ skb->dev = ndev; \ } \ } while (0) -#define cpsw_add_mcast(priv, addr) \ +#define cpsw_add_mcast(cpsw, priv, addr) \ do { \ - if (priv->data.dual_emac) { \ - struct cpsw_slave *slave = priv->slaves + \ + if (cpsw->data.dual_emac) { \ + struct cpsw_slave *slave = cpsw->slaves + \ priv->emac_port; \ - int slave_port = cpsw_get_slave_port(priv, \ + int slave_port = cpsw_get_slave_port( \ slave->slave_num); \ - cpsw_ale_add_mcast(priv->ale, addr, \ + cpsw_ale_add_mcast(cpsw->ale, addr, \ 1 << slave_port | ALE_PORT_HOST, \ ALE_VLAN, slave->port_vlan, 0); \ } else { \ - cpsw_ale_add_mcast(priv->ale, addr, \ + cpsw_ale_add_mcast(cpsw->ale, addr, \ ALE_ALL_PORTS, \ 0, 0, 0); \ } \ } while (0) -static inline int cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num) +static inline int cpsw_get_slave_port(u32 slave_num) { return slave_num + 1; } static void cpsw_set_promiscious(struct net_device *ndev, bool enable) { - struct cpsw_priv *priv = netdev_priv(ndev); - struct cpsw_ale *ale = priv->ale; + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); + struct cpsw_ale *ale = cpsw->ale; int i; - if (priv->data.dual_emac) { + if (cpsw->data.dual_emac) { bool flag = false; /* Enabling promiscuous mode for one interface will be * common for both the interface as the interface shares * the same hardware resource. */ - for (i = 0; i < priv->data.slaves; i++) - if (priv->slaves[i].ndev->flags & IFF_PROMISC) + for (i = 0; i < cpsw->data.slaves; i++) + if (cpsw->slaves[i].ndev->flags & IFF_PROMISC) flag = true; if (!enable && flag) { @@ -579,7 +568,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable) unsigned long timeout = jiffies + HZ; /* Disable Learn for all ports (host is port 0 and slaves are port 1 and up */ - for (i = 0; i <= priv->data.slaves; i++) { + for (i = 0; i <= cpsw->data.slaves; i++) { cpsw_ale_control_set(ale, i, ALE_PORT_NOLEARN, 1); cpsw_ale_control_set(ale, i, @@ -606,7 +595,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable) cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0); /* Enable Learn for all ports (host is port 0 and slaves are port 1 and up */ - for (i = 0; i <= priv->data.slaves; i++) { + for (i = 0; i <= cpsw->data.slaves; i++) { cpsw_ale_control_set(ale, i, ALE_PORT_NOLEARN, 0); cpsw_ale_control_set(ale, i, @@ -620,17 +609,18 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable) static void cpsw_ndo_set_rx_mode(struct net_device *ndev) { struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; int vid; - if (priv->data.dual_emac) - vid = priv->slaves[priv->emac_port].port_vlan; + if (cpsw->data.dual_emac) + vid = cpsw->slaves[priv->emac_port].port_vlan; else - vid = priv->data.default_vlan; + vid = cpsw->data.default_vlan; if (ndev->flags & IFF_PROMISC) { /* Enable promiscuous mode */ cpsw_set_promiscious(ndev, true); - cpsw_ale_set_allmulti(priv->ale, IFF_ALLMULTI); + cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI); return; } else { /* Disable promiscuous mode */ @@ -638,51 +628,54 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev) } /* Restore allmulti on vlans if necessary */ - cpsw_ale_set_allmulti(priv->ale, priv->ndev->flags & IFF_ALLMULTI); + cpsw_ale_set_allmulti(cpsw->ale, priv->ndev->flags & IFF_ALLMULTI); /* Clear all mcast from ALE */ - cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS, vid); + cpsw_ale_flush_multicast(cpsw->ale, ALE_ALL_PORTS, vid); if (!netdev_mc_empty(ndev)) { struct netdev_hw_addr *ha; /* program multicast address list into ALE register */ netdev_for_each_mc_addr(ha, ndev) { - cpsw_add_mcast(priv, (u8 *)ha->addr); + cpsw_add_mcast(cpsw, priv, (u8 *)ha->addr); } } } -static void cpsw_intr_enable(struct cpsw_priv *priv) +static void cpsw_intr_enable(struct cpsw_common *cpsw) { - __raw_writel(0xFF, &priv->wr_regs->tx_en); - __raw_writel(0xFF, &priv->wr_regs->rx_en); + __raw_writel(0xFF, &cpsw->wr_regs->tx_en); + __raw_writel(0xFF, &cpsw->wr_regs->rx_en); - cpdma_ctlr_int_ctrl(priv->dma, true); + cpdma_ctlr_int_ctrl(cpsw->dma, true); return; } -static void cpsw_intr_disable(struct cpsw_priv *priv) +static void cpsw_intr_disable(struct cpsw_common *cpsw) { - __raw_writel(0, &priv->wr_regs->tx_en); - __raw_writel(0, &priv->wr_regs->rx_en); + __raw_writel(0, &cpsw->wr_regs->tx_en); + __raw_writel(0, &cpsw->wr_regs->rx_en); - cpdma_ctlr_int_ctrl(priv->dma, false); + cpdma_ctlr_int_ctrl(cpsw->dma, false); return; } static void cpsw_tx_handler(void *token, int len, int status) { + struct netdev_queue *txq; struct sk_buff *skb = token; struct net_device *ndev = skb->dev; - struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); /* Check whether the queue is stopped due to stalled tx dma, if the * queue is stopped then start the queue as we have free desc for tx */ - if (unlikely(netif_queue_stopped(ndev))) - netif_wake_queue(ndev); - cpts_tx_timestamp(priv->cpts, skb); + txq = netdev_get_tx_queue(ndev, skb_get_queue_mapping(skb)); + if (unlikely(netif_tx_queue_stopped(txq))) + netif_tx_wake_queue(txq); + + cpts_tx_timestamp(cpsw->cpts, skb); ndev->stats.tx_packets++; ndev->stats.tx_bytes += len; dev_kfree_skb_any(skb); @@ -690,22 +683,23 @@ static void cpsw_tx_handler(void *token, int len, int status) static void cpsw_rx_handler(void *token, int len, int status) { + struct cpdma_chan *ch; struct sk_buff *skb = token; struct sk_buff *new_skb; struct net_device *ndev = skb->dev; - struct cpsw_priv *priv = netdev_priv(ndev); int ret = 0; + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); - cpsw_dual_emac_src_port_detect(status, priv, ndev, skb); + cpsw_dual_emac_src_port_detect(cpsw, status, ndev, skb); if (unlikely(status < 0) || unlikely(!netif_running(ndev))) { bool ndev_status = false; - struct cpsw_slave *slave = priv->slaves; + struct cpsw_slave *slave = cpsw->slaves; int n; - if (priv->data.dual_emac) { + if (cpsw->data.dual_emac) { /* In dual emac mode check for all interfaces */ - for (n = priv->data.slaves; n; n--, slave++) + for (n = cpsw->data.slaves; n; n--, slave++) if (netif_running(slave->ndev)) ndev_status = true; } @@ -726,10 +720,11 @@ static void cpsw_rx_handler(void *token, int len, int status) return; } - new_skb = netdev_alloc_skb_ip_align(ndev, priv->rx_packet_max); + new_skb = netdev_alloc_skb_ip_align(ndev, cpsw->rx_packet_max); if (new_skb) { + skb_copy_queue_mapping(new_skb, skb); skb_put(skb, len); - cpts_rx_timestamp(priv->cpts, skb); + cpts_rx_timestamp(cpsw->cpts, skb); skb->protocol = eth_type_trans(skb, ndev); netif_receive_skb(skb); ndev->stats.rx_bytes += len; @@ -741,83 +736,117 @@ static void cpsw_rx_handler(void *token, int len, int status) } requeue: - ret = cpdma_chan_submit(priv->rxch, new_skb, new_skb->data, - skb_tailroom(new_skb), 0); + if (netif_dormant(ndev)) { + dev_kfree_skb_any(new_skb); + return; + } + + ch = cpsw->rxch[skb_get_queue_mapping(new_skb)]; + ret = cpdma_chan_submit(ch, new_skb, new_skb->data, + skb_tailroom(new_skb), 0); if (WARN_ON(ret < 0)) dev_kfree_skb_any(new_skb); } static irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id) { - struct cpsw_priv *priv = dev_id; + struct cpsw_common *cpsw = dev_id; - writel(0, &priv->wr_regs->tx_en); - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); + writel(0, &cpsw->wr_regs->tx_en); + cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_TX); - if (priv->quirk_irq) { - disable_irq_nosync(priv->irqs_table[1]); - priv->tx_irq_disabled = true; + if (cpsw->quirk_irq) { + disable_irq_nosync(cpsw->irqs_table[1]); + cpsw->tx_irq_disabled = true; } - napi_schedule(&priv->napi_tx); + napi_schedule(&cpsw->napi_tx); return IRQ_HANDLED; } static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id) { - struct cpsw_priv *priv = dev_id; + struct cpsw_common *cpsw = dev_id; - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); - writel(0, &priv->wr_regs->rx_en); + cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX); + writel(0, &cpsw->wr_regs->rx_en); - if (priv->quirk_irq) { - disable_irq_nosync(priv->irqs_table[0]); - priv->rx_irq_disabled = true; + if (cpsw->quirk_irq) { + disable_irq_nosync(cpsw->irqs_table[0]); + cpsw->rx_irq_disabled = true; } - napi_schedule(&priv->napi_rx); + napi_schedule(&cpsw->napi_rx); return IRQ_HANDLED; } static int cpsw_tx_poll(struct napi_struct *napi_tx, int budget) { - struct cpsw_priv *priv = napi_to_priv(napi_tx); - int num_tx; + u32 ch_map; + int num_tx, ch; + struct cpsw_common *cpsw = napi_to_cpsw(napi_tx); + + /* process every unprocessed channel */ + ch_map = cpdma_ctrl_txchs_state(cpsw->dma); + for (ch = 0, num_tx = 0; num_tx < budget; ch_map >>= 1, ch++) { + if (!ch_map) { + ch_map = cpdma_ctrl_txchs_state(cpsw->dma); + if (!ch_map) + break; + + ch = 0; + } + + if (!(ch_map & 0x01)) + continue; + + num_tx += cpdma_chan_process(cpsw->txch[ch], budget - num_tx); + } - num_tx = cpdma_chan_process(priv->txch, budget); if (num_tx < budget) { napi_complete(napi_tx); - writel(0xff, &priv->wr_regs->tx_en); - if (priv->quirk_irq && priv->tx_irq_disabled) { - priv->tx_irq_disabled = false; - enable_irq(priv->irqs_table[1]); + writel(0xff, &cpsw->wr_regs->tx_en); + if (cpsw->quirk_irq && cpsw->tx_irq_disabled) { + cpsw->tx_irq_disabled = false; + enable_irq(cpsw->irqs_table[1]); } } - if (num_tx) - cpsw_dbg(priv, intr, "poll %d tx pkts\n", num_tx); - return num_tx; } static int cpsw_rx_poll(struct napi_struct *napi_rx, int budget) { - struct cpsw_priv *priv = napi_to_priv(napi_rx); - int num_rx; + u32 ch_map; + int num_rx, ch; + struct cpsw_common *cpsw = napi_to_cpsw(napi_rx); + + /* process every unprocessed channel */ + ch_map = cpdma_ctrl_rxchs_state(cpsw->dma); + for (ch = 0, num_rx = 0; num_rx < budget; ch_map >>= 1, ch++) { + if (!ch_map) { + ch_map = cpdma_ctrl_rxchs_state(cpsw->dma); + if (!ch_map) + break; + + ch = 0; + } + + if (!(ch_map & 0x01)) + continue; + + num_rx += cpdma_chan_process(cpsw->rxch[ch], budget - num_rx); + } - num_rx = cpdma_chan_process(priv->rxch, budget); if (num_rx < budget) { napi_complete(napi_rx); - writel(0xff, &priv->wr_regs->rx_en); - if (priv->quirk_irq && priv->rx_irq_disabled) { - priv->rx_irq_disabled = false; - enable_irq(priv->irqs_table[0]); + writel(0xff, &cpsw->wr_regs->rx_en); + if (cpsw->quirk_irq && cpsw->rx_irq_disabled) { + cpsw->rx_irq_disabled = false; + enable_irq(cpsw->irqs_table[0]); } } - if (num_rx) - cpsw_dbg(priv, intr, "poll %d rx pkts\n", num_rx); - return num_rx; } @@ -850,17 +879,18 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave, struct phy_device *phy = slave->phy; u32 mac_control = 0; u32 slave_port; + struct cpsw_common *cpsw = priv->cpsw; if (!phy) return; - slave_port = cpsw_get_slave_port(priv, slave->slave_num); + slave_port = cpsw_get_slave_port(slave->slave_num); if (phy->link) { - mac_control = priv->data.mac_control; + mac_control = cpsw->data.mac_control; /* enable forwarding */ - cpsw_ale_control_set(priv->ale, slave_port, + cpsw_ale_control_set(cpsw->ale, slave_port, ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); if (phy->speed == 1000) @@ -884,7 +914,7 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave, } else { mac_control = 0; /* disable forwarding */ - cpsw_ale_control_set(priv->ale, slave_port, + cpsw_ale_control_set(cpsw->ale, slave_port, ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); } @@ -906,19 +936,19 @@ static void cpsw_adjust_link(struct net_device *ndev) if (link) { netif_carrier_on(ndev); if (netif_running(ndev)) - netif_wake_queue(ndev); + netif_tx_wake_all_queues(ndev); } else { netif_carrier_off(ndev); - netif_stop_queue(ndev); + netif_tx_stop_all_queues(ndev); } } static int cpsw_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal) { - struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); - coal->rx_coalesce_usecs = priv->coal_intvl; + coal->rx_coalesce_usecs = cpsw->coal_intvl; return 0; } @@ -931,11 +961,12 @@ static int cpsw_set_coalesce(struct net_device *ndev, u32 prescale = 0; u32 addnl_dvdr = 1; u32 coal_intvl = 0; + struct cpsw_common *cpsw = priv->cpsw; coal_intvl = coal->rx_coalesce_usecs; - int_ctrl = readl(&priv->wr_regs->int_control); - prescale = priv->bus_freq_mhz * 4; + int_ctrl = readl(&cpsw->wr_regs->int_control); + prescale = cpsw->bus_freq_mhz * 4; if (!coal->rx_coalesce_usecs) { int_ctrl &= ~(CPSW_INTPRESCALE_MASK | CPSW_INTPACEEN); @@ -963,53 +994,69 @@ static int cpsw_set_coalesce(struct net_device *ndev, } num_interrupts = (1000 * addnl_dvdr) / coal_intvl; - writel(num_interrupts, &priv->wr_regs->rx_imax); - writel(num_interrupts, &priv->wr_regs->tx_imax); + writel(num_interrupts, &cpsw->wr_regs->rx_imax); + writel(num_interrupts, &cpsw->wr_regs->tx_imax); int_ctrl |= CPSW_INTPACEEN; int_ctrl &= (~CPSW_INTPRESCALE_MASK); int_ctrl |= (prescale & CPSW_INTPRESCALE_MASK); update_return: - writel(int_ctrl, &priv->wr_regs->int_control); + writel(int_ctrl, &cpsw->wr_regs->int_control); cpsw_notice(priv, timer, "Set coalesce to %d usecs.\n", coal_intvl); - if (priv->data.dual_emac) { - int i; - - for (i = 0; i < priv->data.slaves; i++) { - priv = netdev_priv(priv->slaves[i].ndev); - priv->coal_intvl = coal_intvl; - } - } else { - priv->coal_intvl = coal_intvl; - } + cpsw->coal_intvl = coal_intvl; return 0; } static int cpsw_get_sset_count(struct net_device *ndev, int sset) { + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); + switch (sset) { case ETH_SS_STATS: - return CPSW_STATS_LEN; + return (CPSW_STATS_COMMON_LEN + + (cpsw->rx_ch_num + cpsw->tx_ch_num) * + CPSW_STATS_CH_LEN); default: return -EOPNOTSUPP; } } +static void cpsw_add_ch_strings(u8 **p, int ch_num, int rx_dir) +{ + int ch_stats_len; + int line; + int i; + + ch_stats_len = CPSW_STATS_CH_LEN * ch_num; + for (i = 0; i < ch_stats_len; i++) { + line = i % CPSW_STATS_CH_LEN; + snprintf(*p, ETH_GSTRING_LEN, + "%s DMA chan %d: %s", rx_dir ? "Rx" : "Tx", + i / CPSW_STATS_CH_LEN, + cpsw_gstrings_ch_stats[line].stat_string); + *p += ETH_GSTRING_LEN; + } +} + static void cpsw_get_strings(struct net_device *ndev, u32 stringset, u8 *data) { + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); u8 *p = data; int i; switch (stringset) { case ETH_SS_STATS: - for (i = 0; i < CPSW_STATS_LEN; i++) { + for (i = 0; i < CPSW_STATS_COMMON_LEN; i++) { memcpy(p, cpsw_gstrings_stats[i].stat_string, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } + + cpsw_add_ch_strings(&p, cpsw->rx_ch_num, 1); + cpsw_add_ch_strings(&p, cpsw->tx_ch_num, 0); break; } } @@ -1017,86 +1064,78 @@ static void cpsw_get_strings(struct net_device *ndev, u32 stringset, u8 *data) static void cpsw_get_ethtool_stats(struct net_device *ndev, struct ethtool_stats *stats, u64 *data) { - struct cpsw_priv *priv = netdev_priv(ndev); - struct cpdma_chan_stats rx_stats; - struct cpdma_chan_stats tx_stats; - u32 val; u8 *p; - int i; + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); + struct cpdma_chan_stats ch_stats; + int i, l, ch; /* Collect Davinci CPDMA stats for Rx and Tx Channel */ - cpdma_chan_get_stats(priv->rxch, &rx_stats); - cpdma_chan_get_stats(priv->txch, &tx_stats); - - for (i = 0; i < CPSW_STATS_LEN; i++) { - switch (cpsw_gstrings_stats[i].type) { - case CPSW_STATS: - val = readl(priv->hw_stats + - cpsw_gstrings_stats[i].stat_offset); - data[i] = val; - break; - - case CPDMA_RX_STATS: - p = (u8 *)&rx_stats + - cpsw_gstrings_stats[i].stat_offset; - data[i] = *(u32 *)p; - break; + for (l = 0; l < CPSW_STATS_COMMON_LEN; l++) + data[l] = readl(cpsw->hw_stats + + cpsw_gstrings_stats[l].stat_offset); + + for (ch = 0; ch < cpsw->rx_ch_num; ch++) { + cpdma_chan_get_stats(cpsw->rxch[ch], &ch_stats); + for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) { + p = (u8 *)&ch_stats + + cpsw_gstrings_ch_stats[i].stat_offset; + data[l] = *(u32 *)p; + } + } - case CPDMA_TX_STATS: - p = (u8 *)&tx_stats + - cpsw_gstrings_stats[i].stat_offset; - data[i] = *(u32 *)p; - break; + for (ch = 0; ch < cpsw->tx_ch_num; ch++) { + cpdma_chan_get_stats(cpsw->txch[ch], &ch_stats); + for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) { + p = (u8 *)&ch_stats + + cpsw_gstrings_ch_stats[i].stat_offset; + data[l] = *(u32 *)p; } } } -static int cpsw_common_res_usage_state(struct cpsw_priv *priv) +static int cpsw_common_res_usage_state(struct cpsw_common *cpsw) { u32 i; u32 usage_count = 0; - if (!priv->data.dual_emac) + if (!cpsw->data.dual_emac) return 0; - for (i = 0; i < priv->data.slaves; i++) - if (priv->slaves[i].open_stat) + for (i = 0; i < cpsw->data.slaves; i++) + if (cpsw->slaves[i].open_stat) usage_count++; return usage_count; } -static inline int cpsw_tx_packet_submit(struct net_device *ndev, - struct cpsw_priv *priv, struct sk_buff *skb) +static inline int cpsw_tx_packet_submit(struct cpsw_priv *priv, + struct sk_buff *skb, + struct cpdma_chan *txch) { - if (!priv->data.dual_emac) - return cpdma_chan_submit(priv->txch, skb, skb->data, - skb->len, 0); + struct cpsw_common *cpsw = priv->cpsw; - if (ndev == cpsw_get_slave_ndev(priv, 0)) - return cpdma_chan_submit(priv->txch, skb, skb->data, - skb->len, 1); - else - return cpdma_chan_submit(priv->txch, skb, skb->data, - skb->len, 2); + return cpdma_chan_submit(txch, skb, skb->data, skb->len, + priv->emac_port + cpsw->data.dual_emac); } static inline void cpsw_add_dual_emac_def_ale_entries( struct cpsw_priv *priv, struct cpsw_slave *slave, u32 slave_port) { + struct cpsw_common *cpsw = priv->cpsw; u32 port_mask = 1 << slave_port | ALE_PORT_HOST; - if (priv->version == CPSW_VERSION_1) + if (cpsw->version == CPSW_VERSION_1) slave_write(slave, slave->port_vlan, CPSW1_PORT_VLAN); else slave_write(slave, slave->port_vlan, CPSW2_PORT_VLAN); - cpsw_ale_add_vlan(priv->ale, slave->port_vlan, port_mask, + cpsw_ale_add_vlan(cpsw->ale, slave->port_vlan, port_mask, port_mask, port_mask, 0); - cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, + cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast, port_mask, ALE_VLAN, slave->port_vlan, 0); - cpsw_ale_add_ucast(priv->ale, priv->mac_addr, - HOST_PORT_NUM, ALE_VLAN | ALE_SECURE, slave->port_vlan); + cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr, + HOST_PORT_NUM, ALE_VLAN | + ALE_SECURE, slave->port_vlan); } static void soft_reset_slave(struct cpsw_slave *slave) @@ -1110,13 +1149,14 @@ static void soft_reset_slave(struct cpsw_slave *slave) static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) { u32 slave_port; + struct cpsw_common *cpsw = priv->cpsw; soft_reset_slave(slave); /* setup priority mapping */ __raw_writel(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map); - switch (priv->version) { + switch (cpsw->version) { case CPSW_VERSION_1: slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP); break; @@ -1128,17 +1168,17 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) } /* setup max packet size, and mac address */ - __raw_writel(priv->rx_packet_max, &slave->sliver->rx_maxlen); + __raw_writel(cpsw->rx_packet_max, &slave->sliver->rx_maxlen); cpsw_set_slave_mac(slave, priv); slave->mac_control = 0; /* no link yet */ - slave_port = cpsw_get_slave_port(priv, slave->slave_num); + slave_port = cpsw_get_slave_port(slave->slave_num); - if (priv->data.dual_emac) + if (cpsw->data.dual_emac) cpsw_add_dual_emac_def_ale_entries(priv, slave, slave_port); else - cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, + cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast, 1 << slave_port, 0, 0, ALE_MCAST_FWD_2); if (slave->data->phy_node) { @@ -1168,81 +1208,121 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) phy_start(slave->phy); /* Configure GMII_SEL register */ - cpsw_phy_sel(&priv->pdev->dev, slave->phy->interface, slave->slave_num); + cpsw_phy_sel(cpsw->dev, slave->phy->interface, slave->slave_num); } static inline void cpsw_add_default_vlan(struct cpsw_priv *priv) { - const int vlan = priv->data.default_vlan; + struct cpsw_common *cpsw = priv->cpsw; + const int vlan = cpsw->data.default_vlan; u32 reg; int i; int unreg_mcast_mask; - reg = (priv->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN : + reg = (cpsw->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN : CPSW2_PORT_VLAN; - writel(vlan, &priv->host_port_regs->port_vlan); + writel(vlan, &cpsw->host_port_regs->port_vlan); - for (i = 0; i < priv->data.slaves; i++) - slave_write(priv->slaves + i, vlan, reg); + for (i = 0; i < cpsw->data.slaves; i++) + slave_write(cpsw->slaves + i, vlan, reg); if (priv->ndev->flags & IFF_ALLMULTI) unreg_mcast_mask = ALE_ALL_PORTS; else unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2; - cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS, + cpsw_ale_add_vlan(cpsw->ale, vlan, ALE_ALL_PORTS, ALE_ALL_PORTS, ALE_ALL_PORTS, unreg_mcast_mask); } static void cpsw_init_host_port(struct cpsw_priv *priv) { - u32 control_reg; u32 fifo_mode; + u32 control_reg; + struct cpsw_common *cpsw = priv->cpsw; /* soft reset the controller and initialize ale */ - soft_reset("cpsw", &priv->regs->soft_reset); - cpsw_ale_start(priv->ale); + soft_reset("cpsw", &cpsw->regs->soft_reset); + cpsw_ale_start(cpsw->ale); /* switch to vlan unaware mode */ - cpsw_ale_control_set(priv->ale, HOST_PORT_NUM, ALE_VLAN_AWARE, + cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_VLAN_AWARE, CPSW_ALE_VLAN_AWARE); - control_reg = readl(&priv->regs->control); + control_reg = readl(&cpsw->regs->control); control_reg |= CPSW_VLAN_AWARE; - writel(control_reg, &priv->regs->control); - fifo_mode = (priv->data.dual_emac) ? CPSW_FIFO_DUAL_MAC_MODE : + writel(control_reg, &cpsw->regs->control); + fifo_mode = (cpsw->data.dual_emac) ? CPSW_FIFO_DUAL_MAC_MODE : CPSW_FIFO_NORMAL_MODE; - writel(fifo_mode, &priv->host_port_regs->tx_in_ctl); + writel(fifo_mode, &cpsw->host_port_regs->tx_in_ctl); /* setup host port priority mapping */ __raw_writel(CPDMA_TX_PRIORITY_MAP, - &priv->host_port_regs->cpdma_tx_pri_map); - __raw_writel(0, &priv->host_port_regs->cpdma_rx_chan_map); + &cpsw->host_port_regs->cpdma_tx_pri_map); + __raw_writel(0, &cpsw->host_port_regs->cpdma_rx_chan_map); - cpsw_ale_control_set(priv->ale, HOST_PORT_NUM, + cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); - if (!priv->data.dual_emac) { - cpsw_ale_add_ucast(priv->ale, priv->mac_addr, HOST_PORT_NUM, + if (!cpsw->data.dual_emac) { + cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM, 0, 0); - cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, + cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast, ALE_PORT_HOST, 0, 0, ALE_MCAST_FWD_2); } } -static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv) +static int cpsw_fill_rx_channels(struct cpsw_priv *priv) +{ + struct cpsw_common *cpsw = priv->cpsw; + struct sk_buff *skb; + int ch_buf_num; + int ch, i, ret; + + for (ch = 0; ch < cpsw->rx_ch_num; ch++) { + ch_buf_num = cpdma_chan_get_rx_buf_num(cpsw->rxch[ch]); + for (i = 0; i < ch_buf_num; i++) { + skb = __netdev_alloc_skb_ip_align(priv->ndev, + cpsw->rx_packet_max, + GFP_KERNEL); + if (!skb) { + cpsw_err(priv, ifup, "cannot allocate skb\n"); + return -ENOMEM; + } + + skb_set_queue_mapping(skb, ch); + ret = cpdma_chan_submit(cpsw->rxch[ch], skb, skb->data, + skb_tailroom(skb), 0); + if (ret < 0) { + cpsw_err(priv, ifup, + "cannot submit skb to channel %d rx, error %d\n", + ch, ret); + kfree_skb(skb); + return ret; + } + kmemleak_not_leak(skb); + } + + cpsw_info(priv, ifup, "ch %d rx, submitted %d descriptors\n", + ch, ch_buf_num); + } + + return 0; +} + +static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_common *cpsw) { u32 slave_port; - slave_port = cpsw_get_slave_port(priv, slave->slave_num); + slave_port = cpsw_get_slave_port(slave->slave_num); if (!slave->phy) return; phy_stop(slave->phy); phy_disconnect(slave->phy); slave->phy = NULL; - cpsw_ale_control_set(priv->ale, slave_port, + cpsw_ale_control_set(cpsw->ale, slave_port, ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); soft_reset_slave(slave); } @@ -1250,115 +1330,111 @@ static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv) static int cpsw_ndo_open(struct net_device *ndev) { struct cpsw_priv *priv = netdev_priv(ndev); - int i, ret; + struct cpsw_common *cpsw = priv->cpsw; + int ret; u32 reg; - ret = pm_runtime_get_sync(&priv->pdev->dev); + ret = pm_runtime_get_sync(cpsw->dev); if (ret < 0) { - pm_runtime_put_noidle(&priv->pdev->dev); + pm_runtime_put_noidle(cpsw->dev); return ret; } - if (!cpsw_common_res_usage_state(priv)) - cpsw_intr_disable(priv); + if (!cpsw_common_res_usage_state(cpsw)) + cpsw_intr_disable(cpsw); netif_carrier_off(ndev); - reg = priv->version; + /* Notify the stack of the actual queue counts. */ + ret = netif_set_real_num_tx_queues(ndev, cpsw->tx_ch_num); + if (ret) { + dev_err(priv->dev, "cannot set real number of tx queues\n"); + goto err_cleanup; + } + + ret = netif_set_real_num_rx_queues(ndev, cpsw->rx_ch_num); + if (ret) { + dev_err(priv->dev, "cannot set real number of rx queues\n"); + goto err_cleanup; + } + + reg = cpsw->version; dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n", CPSW_MAJOR_VERSION(reg), CPSW_MINOR_VERSION(reg), CPSW_RTL_VERSION(reg)); /* initialize host and slave ports */ - if (!cpsw_common_res_usage_state(priv)) + if (!cpsw_common_res_usage_state(cpsw)) cpsw_init_host_port(priv); for_each_slave(priv, cpsw_slave_open, priv); /* Add default VLAN */ - if (!priv->data.dual_emac) + if (!cpsw->data.dual_emac) cpsw_add_default_vlan(priv); else - cpsw_ale_add_vlan(priv->ale, priv->data.default_vlan, + cpsw_ale_add_vlan(cpsw->ale, cpsw->data.default_vlan, ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0); - if (!cpsw_common_res_usage_state(priv)) { - struct cpsw_priv *priv_sl0 = cpsw_get_slave_priv(priv, 0); - int buf_num; - + if (!cpsw_common_res_usage_state(cpsw)) { /* setup tx dma to fixed prio and zero offset */ - cpdma_control_set(priv->dma, CPDMA_TX_PRIO_FIXED, 1); - cpdma_control_set(priv->dma, CPDMA_RX_BUFFER_OFFSET, 0); + cpdma_control_set(cpsw->dma, CPDMA_TX_PRIO_FIXED, 1); + cpdma_control_set(cpsw->dma, CPDMA_RX_BUFFER_OFFSET, 0); /* disable priority elevation */ - __raw_writel(0, &priv->regs->ptype); + __raw_writel(0, &cpsw->regs->ptype); /* enable statistics collection only on all ports */ - __raw_writel(0x7, &priv->regs->stat_port_en); + __raw_writel(0x7, &cpsw->regs->stat_port_en); /* Enable internal fifo flow control */ - writel(0x7, &priv->regs->flow_control); + writel(0x7, &cpsw->regs->flow_control); - napi_enable(&priv_sl0->napi_rx); - napi_enable(&priv_sl0->napi_tx); + napi_enable(&cpsw->napi_rx); + napi_enable(&cpsw->napi_tx); - if (priv_sl0->tx_irq_disabled) { - priv_sl0->tx_irq_disabled = false; - enable_irq(priv->irqs_table[1]); + if (cpsw->tx_irq_disabled) { + cpsw->tx_irq_disabled = false; + enable_irq(cpsw->irqs_table[1]); } - if (priv_sl0->rx_irq_disabled) { - priv_sl0->rx_irq_disabled = false; - enable_irq(priv->irqs_table[0]); + if (cpsw->rx_irq_disabled) { + cpsw->rx_irq_disabled = false; + enable_irq(cpsw->irqs_table[0]); } - buf_num = cpdma_chan_get_rx_buf_num(priv->dma); - for (i = 0; i < buf_num; i++) { - struct sk_buff *skb; + ret = cpsw_fill_rx_channels(priv); + if (ret < 0) + goto err_cleanup; - ret = -ENOMEM; - skb = __netdev_alloc_skb_ip_align(priv->ndev, - priv->rx_packet_max, GFP_KERNEL); - if (!skb) - goto err_cleanup; - ret = cpdma_chan_submit(priv->rxch, skb, skb->data, - skb_tailroom(skb), 0); - if (ret < 0) { - kfree_skb(skb); - goto err_cleanup; - } - kmemleak_not_leak(skb); - } - /* continue even if we didn't manage to submit all - * receive descs - */ - cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i); - - if (cpts_register(&priv->pdev->dev, priv->cpts, - priv->data.cpts_clock_mult, - priv->data.cpts_clock_shift)) + if (cpts_register(cpsw->dev, cpsw->cpts, + cpsw->data.cpts_clock_mult, + cpsw->data.cpts_clock_shift)) dev_err(priv->dev, "error registering cpts device\n"); } /* Enable Interrupt pacing if configured */ - if (priv->coal_intvl != 0) { + if (cpsw->coal_intvl != 0) { struct ethtool_coalesce coal; - coal.rx_coalesce_usecs = priv->coal_intvl; + coal.rx_coalesce_usecs = cpsw->coal_intvl; cpsw_set_coalesce(ndev, &coal); } - cpdma_ctlr_start(priv->dma); - cpsw_intr_enable(priv); + cpdma_ctlr_start(cpsw->dma); + cpsw_intr_enable(cpsw); + + if (cpsw->data.dual_emac) + cpsw->slaves[priv->emac_port].open_stat = true; + + netif_tx_start_all_queues(ndev); - if (priv->data.dual_emac) - priv->slaves[priv->emac_port].open_stat = true; return 0; err_cleanup: - cpdma_ctlr_stop(priv->dma); - for_each_slave(priv, cpsw_slave_stop, priv); - pm_runtime_put_sync(&priv->pdev->dev); + cpdma_ctlr_stop(cpsw->dma); + for_each_slave(priv, cpsw_slave_stop, cpsw); + pm_runtime_put_sync(cpsw->dev); netif_carrier_off(priv->ndev); return ret; } @@ -1366,25 +1442,24 @@ err_cleanup: static int cpsw_ndo_stop(struct net_device *ndev) { struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; cpsw_info(priv, ifdown, "shutting down cpsw device\n"); - netif_stop_queue(priv->ndev); + netif_tx_stop_all_queues(priv->ndev); netif_carrier_off(priv->ndev); - if (cpsw_common_res_usage_state(priv) <= 1) { - struct cpsw_priv *priv_sl0 = cpsw_get_slave_priv(priv, 0); - - napi_disable(&priv_sl0->napi_rx); - napi_disable(&priv_sl0->napi_tx); - cpts_unregister(priv->cpts); - cpsw_intr_disable(priv); - cpdma_ctlr_stop(priv->dma); - cpsw_ale_stop(priv->ale); - } - for_each_slave(priv, cpsw_slave_stop, priv); - pm_runtime_put_sync(&priv->pdev->dev); - if (priv->data.dual_emac) - priv->slaves[priv->emac_port].open_stat = false; + if (cpsw_common_res_usage_state(cpsw) <= 1) { + napi_disable(&cpsw->napi_rx); + napi_disable(&cpsw->napi_tx); + cpts_unregister(cpsw->cpts); + cpsw_intr_disable(cpsw); + cpdma_ctlr_stop(cpsw->dma); + cpsw_ale_stop(cpsw->ale); + } + for_each_slave(priv, cpsw_slave_stop, cpsw); + pm_runtime_put_sync(cpsw->dev); + if (cpsw->data.dual_emac) + cpsw->slaves[priv->emac_port].open_stat = false; return 0; } @@ -1392,7 +1467,10 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct cpsw_priv *priv = netdev_priv(ndev); - int ret; + struct cpsw_common *cpsw = priv->cpsw; + struct netdev_queue *txq; + struct cpdma_chan *txch; + int ret, q_idx; netif_trans_update(ndev); @@ -1403,12 +1481,17 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb, } if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && - priv->cpts->tx_enable) + cpsw->cpts->tx_enable) skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; skb_tx_timestamp(skb); - ret = cpsw_tx_packet_submit(ndev, priv, skb); + q_idx = skb_get_queue_mapping(skb); + if (q_idx >= cpsw->tx_ch_num) + q_idx = q_idx % cpsw->tx_ch_num; + + txch = cpsw->txch[q_idx]; + ret = cpsw_tx_packet_submit(priv, skb, txch); if (unlikely(ret != 0)) { cpsw_err(priv, tx_err, "desc submit failed\n"); goto fail; @@ -1417,24 +1500,27 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb, /* If there is no more tx desc left free then we need to * tell the kernel to stop sending us tx frames. */ - if (unlikely(!cpdma_check_free_tx_desc(priv->txch))) - netif_stop_queue(ndev); + if (unlikely(!cpdma_check_free_tx_desc(txch))) { + txq = netdev_get_tx_queue(ndev, q_idx); + netif_tx_stop_queue(txq); + } return NETDEV_TX_OK; fail: ndev->stats.tx_dropped++; - netif_stop_queue(ndev); + txq = netdev_get_tx_queue(ndev, skb_get_queue_mapping(skb)); + netif_tx_stop_queue(txq); return NETDEV_TX_BUSY; } #ifdef CONFIG_TI_CPTS -static void cpsw_hwtstamp_v1(struct cpsw_priv *priv) +static void cpsw_hwtstamp_v1(struct cpsw_common *cpsw) { - struct cpsw_slave *slave = &priv->slaves[priv->data.active_slave]; + struct cpsw_slave *slave = &cpsw->slaves[cpsw->data.active_slave]; u32 ts_en, seq_id; - if (!priv->cpts->tx_enable && !priv->cpts->rx_enable) { + if (!cpsw->cpts->tx_enable && !cpsw->cpts->rx_enable) { slave_write(slave, 0, CPSW1_TS_CTL); return; } @@ -1442,10 +1528,10 @@ static void cpsw_hwtstamp_v1(struct cpsw_priv *priv) seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588; ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS; - if (priv->cpts->tx_enable) + if (cpsw->cpts->tx_enable) ts_en |= CPSW_V1_TS_TX_EN; - if (priv->cpts->rx_enable) + if (cpsw->cpts->rx_enable) ts_en |= CPSW_V1_TS_RX_EN; slave_write(slave, ts_en, CPSW1_TS_CTL); @@ -1455,32 +1541,33 @@ static void cpsw_hwtstamp_v1(struct cpsw_priv *priv) static void cpsw_hwtstamp_v2(struct cpsw_priv *priv) { struct cpsw_slave *slave; + struct cpsw_common *cpsw = priv->cpsw; u32 ctrl, mtype; - if (priv->data.dual_emac) - slave = &priv->slaves[priv->emac_port]; + if (cpsw->data.dual_emac) + slave = &cpsw->slaves[priv->emac_port]; else - slave = &priv->slaves[priv->data.active_slave]; + slave = &cpsw->slaves[cpsw->data.active_slave]; ctrl = slave_read(slave, CPSW2_CONTROL); - switch (priv->version) { + switch (cpsw->version) { case CPSW_VERSION_2: ctrl &= ~CTRL_V2_ALL_TS_MASK; - if (priv->cpts->tx_enable) + if (cpsw->cpts->tx_enable) ctrl |= CTRL_V2_TX_TS_BITS; - if (priv->cpts->rx_enable) + if (cpsw->cpts->rx_enable) ctrl |= CTRL_V2_RX_TS_BITS; break; case CPSW_VERSION_3: default: ctrl &= ~CTRL_V3_ALL_TS_MASK; - if (priv->cpts->tx_enable) + if (cpsw->cpts->tx_enable) ctrl |= CTRL_V3_TX_TS_BITS; - if (priv->cpts->rx_enable) + if (cpsw->cpts->rx_enable) ctrl |= CTRL_V3_RX_TS_BITS; break; } @@ -1489,18 +1576,19 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv) slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE); slave_write(slave, ctrl, CPSW2_CONTROL); - __raw_writel(ETH_P_1588, &priv->regs->ts_ltype); + __raw_writel(ETH_P_1588, &cpsw->regs->ts_ltype); } static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) { struct cpsw_priv *priv = netdev_priv(dev); - struct cpts *cpts = priv->cpts; struct hwtstamp_config cfg; + struct cpsw_common *cpsw = priv->cpsw; + struct cpts *cpts = cpsw->cpts; - if (priv->version != CPSW_VERSION_1 && - priv->version != CPSW_VERSION_2 && - priv->version != CPSW_VERSION_3) + if (cpsw->version != CPSW_VERSION_1 && + cpsw->version != CPSW_VERSION_2 && + cpsw->version != CPSW_VERSION_3) return -EOPNOTSUPP; if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) @@ -1540,9 +1628,9 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) cpts->tx_enable = cfg.tx_type == HWTSTAMP_TX_ON; - switch (priv->version) { + switch (cpsw->version) { case CPSW_VERSION_1: - cpsw_hwtstamp_v1(priv); + cpsw_hwtstamp_v1(cpsw); break; case CPSW_VERSION_2: case CPSW_VERSION_3: @@ -1557,13 +1645,13 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) { - struct cpsw_priv *priv = netdev_priv(dev); - struct cpts *cpts = priv->cpts; + struct cpsw_common *cpsw = ndev_to_cpsw(dev); + struct cpts *cpts = cpsw->cpts; struct hwtstamp_config cfg; - if (priv->version != CPSW_VERSION_1 && - priv->version != CPSW_VERSION_2 && - priv->version != CPSW_VERSION_3) + if (cpsw->version != CPSW_VERSION_1 && + cpsw->version != CPSW_VERSION_2 && + cpsw->version != CPSW_VERSION_3) return -EOPNOTSUPP; cfg.flags = 0; @@ -1579,7 +1667,8 @@ static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd) { struct cpsw_priv *priv = netdev_priv(dev); - int slave_no = cpsw_slave_index(priv); + struct cpsw_common *cpsw = priv->cpsw; + int slave_no = cpsw_slave_index(cpsw, priv); if (!netif_running(dev)) return -EINVAL; @@ -1593,27 +1682,33 @@ static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd) #endif } - if (!priv->slaves[slave_no].phy) + if (!cpsw->slaves[slave_no].phy) return -EOPNOTSUPP; - return phy_mii_ioctl(priv->slaves[slave_no].phy, req, cmd); + return phy_mii_ioctl(cpsw->slaves[slave_no].phy, req, cmd); } static void cpsw_ndo_tx_timeout(struct net_device *ndev) { struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + int ch; cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n"); ndev->stats.tx_errors++; - cpsw_intr_disable(priv); - cpdma_chan_stop(priv->txch); - cpdma_chan_start(priv->txch); - cpsw_intr_enable(priv); + cpsw_intr_disable(cpsw); + for (ch = 0; ch < cpsw->tx_ch_num; ch++) { + cpdma_chan_stop(cpsw->txch[ch]); + cpdma_chan_start(cpsw->txch[ch]); + } + + cpsw_intr_enable(cpsw); } static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p) { struct cpsw_priv *priv = netdev_priv(ndev); struct sockaddr *addr = (struct sockaddr *)p; + struct cpsw_common *cpsw = priv->cpsw; int flags = 0; u16 vid = 0; int ret; @@ -1621,27 +1716,27 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - ret = pm_runtime_get_sync(&priv->pdev->dev); + ret = pm_runtime_get_sync(cpsw->dev); if (ret < 0) { - pm_runtime_put_noidle(&priv->pdev->dev); + pm_runtime_put_noidle(cpsw->dev); return ret; } - if (priv->data.dual_emac) { - vid = priv->slaves[priv->emac_port].port_vlan; + if (cpsw->data.dual_emac) { + vid = cpsw->slaves[priv->emac_port].port_vlan; flags = ALE_VLAN; } - cpsw_ale_del_ucast(priv->ale, priv->mac_addr, HOST_PORT_NUM, + cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM, flags, vid); - cpsw_ale_add_ucast(priv->ale, addr->sa_data, HOST_PORT_NUM, + cpsw_ale_add_ucast(cpsw->ale, addr->sa_data, HOST_PORT_NUM, flags, vid); memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN); memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN); for_each_slave(priv, cpsw_set_slave_mac, priv); - pm_runtime_put(&priv->pdev->dev); + pm_runtime_put(cpsw->dev); return 0; } @@ -1649,12 +1744,12 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p) #ifdef CONFIG_NET_POLL_CONTROLLER static void cpsw_ndo_poll_controller(struct net_device *ndev) { - struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); - cpsw_intr_disable(priv); - cpsw_rx_interrupt(priv->irqs_table[0], priv); - cpsw_tx_interrupt(priv->irqs_table[1], priv); - cpsw_intr_enable(priv); + cpsw_intr_disable(cpsw); + cpsw_rx_interrupt(cpsw->irqs_table[0], cpsw); + cpsw_tx_interrupt(cpsw->irqs_table[1], cpsw); + cpsw_intr_enable(cpsw); } #endif @@ -1664,8 +1759,9 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv, int ret; int unreg_mcast_mask = 0; u32 port_mask; + struct cpsw_common *cpsw = priv->cpsw; - if (priv->data.dual_emac) { + if (cpsw->data.dual_emac) { port_mask = (1 << (priv->emac_port + 1)) | ALE_PORT_HOST; if (priv->ndev->flags & IFF_ALLMULTI) @@ -1679,27 +1775,27 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv, unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2; } - ret = cpsw_ale_add_vlan(priv->ale, vid, port_mask, 0, port_mask, + ret = cpsw_ale_add_vlan(cpsw->ale, vid, port_mask, 0, port_mask, unreg_mcast_mask); if (ret != 0) return ret; - ret = cpsw_ale_add_ucast(priv->ale, priv->mac_addr, + ret = cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM, ALE_VLAN, vid); if (ret != 0) goto clean_vid; - ret = cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, + ret = cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast, port_mask, ALE_VLAN, vid, 0); if (ret != 0) goto clean_vlan_ucast; return 0; clean_vlan_ucast: - cpsw_ale_del_ucast(priv->ale, priv->mac_addr, + cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM, ALE_VLAN, vid); clean_vid: - cpsw_ale_del_vlan(priv->ale, vid, 0); + cpsw_ale_del_vlan(cpsw->ale, vid, 0); return ret; } @@ -1707,26 +1803,27 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid) { struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; int ret; - if (vid == priv->data.default_vlan) + if (vid == cpsw->data.default_vlan) return 0; - ret = pm_runtime_get_sync(&priv->pdev->dev); + ret = pm_runtime_get_sync(cpsw->dev); if (ret < 0) { - pm_runtime_put_noidle(&priv->pdev->dev); + pm_runtime_put_noidle(cpsw->dev); return ret; } - if (priv->data.dual_emac) { + if (cpsw->data.dual_emac) { /* In dual EMAC, reserved VLAN id should not be used for * creating VLAN interfaces as this can break the dual * EMAC port separation */ int i; - for (i = 0; i < priv->data.slaves; i++) { - if (vid == priv->slaves[i].port_vlan) + for (i = 0; i < cpsw->data.slaves; i++) { + if (vid == cpsw->slaves[i].port_vlan) return -EINVAL; } } @@ -1734,7 +1831,7 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev, dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid); ret = cpsw_add_vlan_ale_entry(priv, vid); - pm_runtime_put(&priv->pdev->dev); + pm_runtime_put(cpsw->dev); return ret; } @@ -1742,39 +1839,40 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid) { struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; int ret; - if (vid == priv->data.default_vlan) + if (vid == cpsw->data.default_vlan) return 0; - ret = pm_runtime_get_sync(&priv->pdev->dev); + ret = pm_runtime_get_sync(cpsw->dev); if (ret < 0) { - pm_runtime_put_noidle(&priv->pdev->dev); + pm_runtime_put_noidle(cpsw->dev); return ret; } - if (priv->data.dual_emac) { + if (cpsw->data.dual_emac) { int i; - for (i = 0; i < priv->data.slaves; i++) { - if (vid == priv->slaves[i].port_vlan) + for (i = 0; i < cpsw->data.slaves; i++) { + if (vid == cpsw->slaves[i].port_vlan) return -EINVAL; } } dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid); - ret = cpsw_ale_del_vlan(priv->ale, vid, 0); + ret = cpsw_ale_del_vlan(cpsw->ale, vid, 0); if (ret != 0) return ret; - ret = cpsw_ale_del_ucast(priv->ale, priv->mac_addr, + ret = cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM, ALE_VLAN, vid); if (ret != 0) return ret; - ret = cpsw_ale_del_mcast(priv->ale, priv->ndev->broadcast, + ret = cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast, 0, ALE_VLAN, vid); - pm_runtime_put(&priv->pdev->dev); + pm_runtime_put(cpsw->dev); return ret; } @@ -1797,31 +1895,32 @@ static const struct net_device_ops cpsw_netdev_ops = { static int cpsw_get_regs_len(struct net_device *ndev) { - struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); - return priv->data.ale_entries * ALE_ENTRY_WORDS * sizeof(u32); + return cpsw->data.ale_entries * ALE_ENTRY_WORDS * sizeof(u32); } static void cpsw_get_regs(struct net_device *ndev, struct ethtool_regs *regs, void *p) { - struct cpsw_priv *priv = netdev_priv(ndev); u32 *reg = p; + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); /* update CPSW IP version */ - regs->version = priv->version; + regs->version = cpsw->version; - cpsw_ale_dump(priv->ale, reg); + cpsw_ale_dump(cpsw->ale, reg); } static void cpsw_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info) { - struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); + struct platform_device *pdev = to_platform_device(cpsw->dev); strlcpy(info->driver, "cpsw", sizeof(info->driver)); strlcpy(info->version, "1.0", sizeof(info->version)); - strlcpy(info->bus_info, priv->pdev->name, sizeof(info->bus_info)); + strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info)); } static u32 cpsw_get_msglevel(struct net_device *ndev) @@ -1840,7 +1939,7 @@ static int cpsw_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info) { #ifdef CONFIG_TI_CPTS - struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | @@ -1849,7 +1948,7 @@ static int cpsw_get_ts_info(struct net_device *ndev, SOF_TIMESTAMPING_RX_SOFTWARE | SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_RAW_HARDWARE; - info->phc_index = priv->cpts->phc_index; + info->phc_index = cpsw->cpts->phc_index; info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); @@ -1872,10 +1971,11 @@ static int cpsw_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) { struct cpsw_priv *priv = netdev_priv(ndev); - int slave_no = cpsw_slave_index(priv); + struct cpsw_common *cpsw = priv->cpsw; + int slave_no = cpsw_slave_index(cpsw, priv); - if (priv->slaves[slave_no].phy) - return phy_ethtool_gset(priv->slaves[slave_no].phy, ecmd); + if (cpsw->slaves[slave_no].phy) + return phy_ethtool_gset(cpsw->slaves[slave_no].phy, ecmd); else return -EOPNOTSUPP; } @@ -1883,10 +1983,11 @@ static int cpsw_get_settings(struct net_device *ndev, static int cpsw_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) { struct cpsw_priv *priv = netdev_priv(ndev); - int slave_no = cpsw_slave_index(priv); + struct cpsw_common *cpsw = priv->cpsw; + int slave_no = cpsw_slave_index(cpsw, priv); - if (priv->slaves[slave_no].phy) - return phy_ethtool_sset(priv->slaves[slave_no].phy, ecmd); + if (cpsw->slaves[slave_no].phy) + return phy_ethtool_sset(cpsw->slaves[slave_no].phy, ecmd); else return -EOPNOTSUPP; } @@ -1894,22 +1995,24 @@ static int cpsw_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) static void cpsw_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) { struct cpsw_priv *priv = netdev_priv(ndev); - int slave_no = cpsw_slave_index(priv); + struct cpsw_common *cpsw = priv->cpsw; + int slave_no = cpsw_slave_index(cpsw, priv); wol->supported = 0; wol->wolopts = 0; - if (priv->slaves[slave_no].phy) - phy_ethtool_get_wol(priv->slaves[slave_no].phy, wol); + if (cpsw->slaves[slave_no].phy) + phy_ethtool_get_wol(cpsw->slaves[slave_no].phy, wol); } static int cpsw_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) { struct cpsw_priv *priv = netdev_priv(ndev); - int slave_no = cpsw_slave_index(priv); + struct cpsw_common *cpsw = priv->cpsw; + int slave_no = cpsw_slave_index(cpsw, priv); - if (priv->slaves[slave_no].phy) - return phy_ethtool_set_wol(priv->slaves[slave_no].phy, wol); + if (cpsw->slaves[slave_no].phy) + return phy_ethtool_set_wol(cpsw->slaves[slave_no].phy, wol); else return -EOPNOTSUPP; } @@ -1940,12 +2043,13 @@ static int cpsw_set_pauseparam(struct net_device *ndev, static int cpsw_ethtool_op_begin(struct net_device *ndev) { struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; int ret; - ret = pm_runtime_get_sync(&priv->pdev->dev); + ret = pm_runtime_get_sync(cpsw->dev); if (ret < 0) { cpsw_err(priv, drv, "ethtool begin failed %d\n", ret); - pm_runtime_put_noidle(&priv->pdev->dev); + pm_runtime_put_noidle(cpsw->dev); } return ret; @@ -1956,11 +2060,185 @@ static void cpsw_ethtool_op_complete(struct net_device *ndev) struct cpsw_priv *priv = netdev_priv(ndev); int ret; - ret = pm_runtime_put(&priv->pdev->dev); + ret = pm_runtime_put(priv->cpsw->dev); if (ret < 0) cpsw_err(priv, drv, "ethtool complete failed %d\n", ret); } +static void cpsw_get_channels(struct net_device *ndev, + struct ethtool_channels *ch) +{ + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); + + ch->max_combined = 0; + ch->max_rx = CPSW_MAX_QUEUES; + ch->max_tx = CPSW_MAX_QUEUES; + ch->max_other = 0; + ch->other_count = 0; + ch->rx_count = cpsw->rx_ch_num; + ch->tx_count = cpsw->tx_ch_num; + ch->combined_count = 0; +} + +static int cpsw_check_ch_settings(struct cpsw_common *cpsw, + struct ethtool_channels *ch) +{ + if (ch->combined_count) + return -EINVAL; + + /* verify we have at least one channel in each direction */ + if (!ch->rx_count || !ch->tx_count) + return -EINVAL; + + if (ch->rx_count > cpsw->data.channels || + ch->tx_count > cpsw->data.channels) + return -EINVAL; + + return 0; +} + +static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx) +{ + int (*poll)(struct napi_struct *, int); + struct cpsw_common *cpsw = priv->cpsw; + void (*handler)(void *, int, int); + struct cpdma_chan **chan; + int ret, *ch; + + if (rx) { + ch = &cpsw->rx_ch_num; + chan = cpsw->rxch; + handler = cpsw_rx_handler; + poll = cpsw_rx_poll; + } else { + ch = &cpsw->tx_ch_num; + chan = cpsw->txch; + handler = cpsw_tx_handler; + poll = cpsw_tx_poll; + } + + while (*ch < ch_num) { + chan[*ch] = cpdma_chan_create(cpsw->dma, *ch, handler, rx); + + if (IS_ERR(chan[*ch])) + return PTR_ERR(chan[*ch]); + + if (!chan[*ch]) + return -EINVAL; + + cpsw_info(priv, ifup, "created new %d %s channel\n", *ch, + (rx ? "rx" : "tx")); + (*ch)++; + } + + while (*ch > ch_num) { + (*ch)--; + + ret = cpdma_chan_destroy(chan[*ch]); + if (ret) + return ret; + + cpsw_info(priv, ifup, "destroyed %d %s channel\n", *ch, + (rx ? "rx" : "tx")); + } + + return 0; +} + +static int cpsw_update_channels(struct cpsw_priv *priv, + struct ethtool_channels *ch) +{ + int ret; + + ret = cpsw_update_channels_res(priv, ch->rx_count, 1); + if (ret) + return ret; + + ret = cpsw_update_channels_res(priv, ch->tx_count, 0); + if (ret) + return ret; + + return 0; +} + +static int cpsw_set_channels(struct net_device *ndev, + struct ethtool_channels *chs) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + struct cpsw_slave *slave; + int i, ret; + + ret = cpsw_check_ch_settings(cpsw, chs); + if (ret < 0) + return ret; + + /* Disable NAPI scheduling */ + cpsw_intr_disable(cpsw); + + /* Stop all transmit queues for every network device. + * Disable re-using rx descriptors with dormant_on. + */ + for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) { + if (!(slave->ndev && netif_running(slave->ndev))) + continue; + + netif_tx_stop_all_queues(slave->ndev); + netif_dormant_on(slave->ndev); + } + + /* Handle rest of tx packets and stop cpdma channels */ + cpdma_ctlr_stop(cpsw->dma); + ret = cpsw_update_channels(priv, chs); + if (ret) + goto err; + + for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) { + if (!(slave->ndev && netif_running(slave->ndev))) + continue; + + /* Inform stack about new count of queues */ + ret = netif_set_real_num_tx_queues(slave->ndev, + cpsw->tx_ch_num); + if (ret) { + dev_err(priv->dev, "cannot set real number of tx queues\n"); + goto err; + } + + ret = netif_set_real_num_rx_queues(slave->ndev, + cpsw->rx_ch_num); + if (ret) { + dev_err(priv->dev, "cannot set real number of rx queues\n"); + goto err; + } + + /* Enable rx packets handling */ + netif_dormant_off(slave->ndev); + } + + if (cpsw_common_res_usage_state(cpsw)) { + ret = cpsw_fill_rx_channels(priv); + if (ret) + goto err; + + /* After this receive is started */ + cpdma_ctlr_start(cpsw->dma); + cpsw_intr_enable(cpsw); + } + + /* Resume transmit for every affected interface */ + for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) { + if (!(slave->ndev && netif_running(slave->ndev))) + continue; + netif_tx_start_all_queues(slave->ndev); + } + return 0; +err: + dev_err(priv->dev, "cannot update channels number, closing device\n"); + dev_close(ndev); + return ret; +} + static const struct ethtool_ops cpsw_ethtool_ops = { .get_drvinfo = cpsw_get_drvinfo, .get_msglevel = cpsw_get_msglevel, @@ -1982,14 +2260,16 @@ static const struct ethtool_ops cpsw_ethtool_ops = { .get_regs = cpsw_get_regs, .begin = cpsw_ethtool_op_begin, .complete = cpsw_ethtool_op_complete, + .get_channels = cpsw_get_channels, + .set_channels = cpsw_set_channels, }; -static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv, +static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_common *cpsw, u32 slave_reg_ofs, u32 sliver_reg_ofs) { - void __iomem *regs = priv->regs; + void __iomem *regs = cpsw->regs; int slave_num = slave->slave_num; - struct cpsw_slave_data *data = priv->data.slave_data + slave_num; + struct cpsw_slave_data *data = cpsw->data.slave_data + slave_num; slave->data = data; slave->regs = regs + slave_reg_ofs; @@ -2160,71 +2440,50 @@ no_phy_slave: return 0; } -static int cpsw_probe_dual_emac(struct platform_device *pdev, - struct cpsw_priv *priv) +static int cpsw_probe_dual_emac(struct cpsw_priv *priv) { - struct cpsw_platform_data *data = &priv->data; + struct cpsw_common *cpsw = priv->cpsw; + struct cpsw_platform_data *data = &cpsw->data; struct net_device *ndev; struct cpsw_priv *priv_sl2; - int ret = 0, i; + int ret = 0; - ndev = alloc_etherdev(sizeof(struct cpsw_priv)); + ndev = alloc_etherdev_mq(sizeof(struct cpsw_priv), CPSW_MAX_QUEUES); if (!ndev) { - dev_err(&pdev->dev, "cpsw: error allocating net_device\n"); + dev_err(cpsw->dev, "cpsw: error allocating net_device\n"); return -ENOMEM; } priv_sl2 = netdev_priv(ndev); - priv_sl2->data = *data; - priv_sl2->pdev = pdev; + priv_sl2->cpsw = cpsw; priv_sl2->ndev = ndev; priv_sl2->dev = &ndev->dev; priv_sl2->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG); - priv_sl2->rx_packet_max = max(rx_packet_max, 128); if (is_valid_ether_addr(data->slave_data[1].mac_addr)) { memcpy(priv_sl2->mac_addr, data->slave_data[1].mac_addr, ETH_ALEN); - dev_info(&pdev->dev, "cpsw: Detected MACID = %pM\n", priv_sl2->mac_addr); + dev_info(cpsw->dev, "cpsw: Detected MACID = %pM\n", + priv_sl2->mac_addr); } else { random_ether_addr(priv_sl2->mac_addr); - dev_info(&pdev->dev, "cpsw: Random MACID = %pM\n", priv_sl2->mac_addr); + dev_info(cpsw->dev, "cpsw: Random MACID = %pM\n", + priv_sl2->mac_addr); } memcpy(ndev->dev_addr, priv_sl2->mac_addr, ETH_ALEN); - priv_sl2->slaves = priv->slaves; - priv_sl2->clk = priv->clk; - - priv_sl2->coal_intvl = 0; - priv_sl2->bus_freq_mhz = priv->bus_freq_mhz; - - priv_sl2->regs = priv->regs; - priv_sl2->host_port_regs = priv->host_port_regs; - priv_sl2->wr_regs = priv->wr_regs; - priv_sl2->hw_stats = priv->hw_stats; - priv_sl2->dma = priv->dma; - priv_sl2->txch = priv->txch; - priv_sl2->rxch = priv->rxch; - priv_sl2->ale = priv->ale; priv_sl2->emac_port = 1; - priv->slaves[1].ndev = ndev; - priv_sl2->cpts = priv->cpts; - priv_sl2->version = priv->version; - - for (i = 0; i < priv->num_irqs; i++) { - priv_sl2->irqs_table[i] = priv->irqs_table[i]; - priv_sl2->num_irqs = priv->num_irqs; - } + cpsw->slaves[1].ndev = ndev; ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; ndev->netdev_ops = &cpsw_netdev_ops; ndev->ethtool_ops = &cpsw_ethtool_ops; /* register the network device */ - SET_NETDEV_DEV(ndev, &pdev->dev); + SET_NETDEV_DEV(ndev, cpsw->dev); ret = register_netdev(ndev); if (ret) { - dev_err(&pdev->dev, "cpsw: error registering net device\n"); + dev_err(cpsw->dev, "cpsw: error registering net device\n"); free_netdev(ndev); ret = -ENODEV; } @@ -2272,6 +2531,7 @@ MODULE_DEVICE_TABLE(of, cpsw_of_mtable); static int cpsw_probe(struct platform_device *pdev) { + struct clk *clk; struct cpsw_platform_data *data; struct net_device *ndev; struct cpsw_priv *priv; @@ -2282,10 +2542,14 @@ static int cpsw_probe(struct platform_device *pdev) const struct of_device_id *of_id; struct gpio_descs *mode; u32 slave_offset, sliver_offset, slave_size; + struct cpsw_common *cpsw; int ret = 0, i; int irq; - ndev = alloc_etherdev(sizeof(struct cpsw_priv)); + cpsw = devm_kzalloc(&pdev->dev, sizeof(struct cpsw_common), GFP_KERNEL); + cpsw->dev = &pdev->dev; + + ndev = alloc_etherdev_mq(sizeof(struct cpsw_priv), CPSW_MAX_QUEUES); if (!ndev) { dev_err(&pdev->dev, "error allocating net_device\n"); return -ENOMEM; @@ -2293,13 +2557,13 @@ static int cpsw_probe(struct platform_device *pdev) platform_set_drvdata(pdev, ndev); priv = netdev_priv(ndev); - priv->pdev = pdev; + priv->cpsw = cpsw; priv->ndev = ndev; priv->dev = &ndev->dev; priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG); - priv->rx_packet_max = max(rx_packet_max, 128); - priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL); - if (!priv->cpts) { + cpsw->rx_packet_max = max(rx_packet_max, 128); + cpsw->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL); + if (!cpsw->cpts) { dev_err(&pdev->dev, "error allocating cpts\n"); ret = -ENOMEM; goto clean_ndev_ret; @@ -2320,12 +2584,14 @@ static int cpsw_probe(struct platform_device *pdev) /* Select default pin state */ pinctrl_pm_select_default_state(&pdev->dev); - if (cpsw_probe_dt(&priv->data, pdev)) { + if (cpsw_probe_dt(&cpsw->data, pdev)) { dev_err(&pdev->dev, "cpsw: platform data missing\n"); ret = -ENODEV; goto clean_runtime_disable_ret; } - data = &priv->data; + data = &cpsw->data; + cpsw->rx_ch_num = 1; + cpsw->tx_ch_num = 1; if (is_valid_ether_addr(data->slave_data[0].mac_addr)) { memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN); @@ -2337,27 +2603,26 @@ static int cpsw_probe(struct platform_device *pdev) memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN); - priv->slaves = devm_kzalloc(&pdev->dev, + cpsw->slaves = devm_kzalloc(&pdev->dev, sizeof(struct cpsw_slave) * data->slaves, GFP_KERNEL); - if (!priv->slaves) { + if (!cpsw->slaves) { ret = -ENOMEM; goto clean_runtime_disable_ret; } for (i = 0; i < data->slaves; i++) - priv->slaves[i].slave_num = i; + cpsw->slaves[i].slave_num = i; - priv->slaves[0].ndev = ndev; + cpsw->slaves[0].ndev = ndev; priv->emac_port = 0; - priv->clk = devm_clk_get(&pdev->dev, "fck"); - if (IS_ERR(priv->clk)) { + clk = devm_clk_get(&pdev->dev, "fck"); + if (IS_ERR(clk)) { dev_err(priv->dev, "fck is not found\n"); ret = -ENODEV; goto clean_runtime_disable_ret; } - priv->coal_intvl = 0; - priv->bus_freq_mhz = clk_get_rate(priv->clk) / 1000000; + cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000; ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ss_regs = devm_ioremap_resource(&pdev->dev, ss_res); @@ -2365,7 +2630,7 @@ static int cpsw_probe(struct platform_device *pdev) ret = PTR_ERR(ss_regs); goto clean_runtime_disable_ret; } - priv->regs = ss_regs; + cpsw->regs = ss_regs; /* Need to enable clocks with runtime PM api to access module * registers @@ -2375,24 +2640,24 @@ static int cpsw_probe(struct platform_device *pdev) pm_runtime_put_noidle(&pdev->dev); goto clean_runtime_disable_ret; } - priv->version = readl(&priv->regs->id_ver); + cpsw->version = readl(&cpsw->regs->id_ver); pm_runtime_put_sync(&pdev->dev); res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - priv->wr_regs = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(priv->wr_regs)) { - ret = PTR_ERR(priv->wr_regs); + cpsw->wr_regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(cpsw->wr_regs)) { + ret = PTR_ERR(cpsw->wr_regs); goto clean_runtime_disable_ret; } memset(&dma_params, 0, sizeof(dma_params)); memset(&ale_params, 0, sizeof(ale_params)); - switch (priv->version) { + switch (cpsw->version) { case CPSW_VERSION_1: - priv->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET; - priv->cpts->reg = ss_regs + CPSW1_CPTS_OFFSET; - priv->hw_stats = ss_regs + CPSW1_HW_STATS; + cpsw->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET; + cpsw->cpts->reg = ss_regs + CPSW1_CPTS_OFFSET; + cpsw->hw_stats = ss_regs + CPSW1_HW_STATS; dma_params.dmaregs = ss_regs + CPSW1_CPDMA_OFFSET; dma_params.txhdp = ss_regs + CPSW1_STATERAM_OFFSET; ale_params.ale_regs = ss_regs + CPSW1_ALE_OFFSET; @@ -2404,9 +2669,9 @@ static int cpsw_probe(struct platform_device *pdev) case CPSW_VERSION_2: case CPSW_VERSION_3: case CPSW_VERSION_4: - priv->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET; - priv->cpts->reg = ss_regs + CPSW2_CPTS_OFFSET; - priv->hw_stats = ss_regs + CPSW2_HW_STATS; + cpsw->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET; + cpsw->cpts->reg = ss_regs + CPSW2_CPTS_OFFSET; + cpsw->hw_stats = ss_regs + CPSW2_HW_STATS; dma_params.dmaregs = ss_regs + CPSW2_CPDMA_OFFSET; dma_params.txhdp = ss_regs + CPSW2_STATERAM_OFFSET; ale_params.ale_regs = ss_regs + CPSW2_ALE_OFFSET; @@ -2417,13 +2682,14 @@ static int cpsw_probe(struct platform_device *pdev) (u32 __force) ss_res->start + CPSW2_BD_OFFSET; break; default: - dev_err(priv->dev, "unknown version 0x%08x\n", priv->version); + dev_err(priv->dev, "unknown version 0x%08x\n", cpsw->version); ret = -ENODEV; goto clean_runtime_disable_ret; } - for (i = 0; i < priv->data.slaves; i++) { - struct cpsw_slave *slave = &priv->slaves[i]; - cpsw_slave_init(slave, priv, slave_offset, sliver_offset); + for (i = 0; i < cpsw->data.slaves; i++) { + struct cpsw_slave *slave = &cpsw->slaves[i]; + + cpsw_slave_init(slave, cpsw, slave_offset, sliver_offset); slave_offset += slave_size; sliver_offset += SLIVER_SIZE; } @@ -2443,19 +2709,16 @@ static int cpsw_probe(struct platform_device *pdev) dma_params.has_ext_regs = true; dma_params.desc_hw_addr = dma_params.desc_mem_phys; - priv->dma = cpdma_ctlr_create(&dma_params); - if (!priv->dma) { + cpsw->dma = cpdma_ctlr_create(&dma_params); + if (!cpsw->dma) { dev_err(priv->dev, "error initializing dma\n"); ret = -ENOMEM; goto clean_runtime_disable_ret; } - priv->txch = cpdma_chan_create(priv->dma, tx_chan_num(0), - cpsw_tx_handler); - priv->rxch = cpdma_chan_create(priv->dma, rx_chan_num(0), - cpsw_rx_handler); - - if (WARN_ON(!priv->txch || !priv->rxch)) { + cpsw->txch[0] = cpdma_chan_create(cpsw->dma, 0, cpsw_tx_handler, 0); + cpsw->rxch[0] = cpdma_chan_create(cpsw->dma, 0, cpsw_rx_handler, 1); + if (WARN_ON(!cpsw->rxch[0] || !cpsw->txch[0])) { dev_err(priv->dev, "error initializing dma channels\n"); ret = -ENOMEM; goto clean_dma_ret; @@ -2466,8 +2729,8 @@ static int cpsw_probe(struct platform_device *pdev) ale_params.ale_entries = data->ale_entries; ale_params.ale_ports = data->slaves; - priv->ale = cpsw_ale_create(&ale_params); - if (!priv->ale) { + cpsw->ale = cpsw_ale_create(&ale_params); + if (!cpsw->ale) { dev_err(priv->dev, "error initializing ale engine\n"); ret = -ENODEV; goto clean_dma_ret; @@ -2484,7 +2747,7 @@ static int cpsw_probe(struct platform_device *pdev) if (of_id) { pdev->id_entry = of_id->data; if (pdev->id_entry->driver_data) - priv->quirk_irq = true; + cpsw->quirk_irq = true; } /* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and @@ -2502,9 +2765,9 @@ static int cpsw_probe(struct platform_device *pdev) goto clean_ale_ret; } - priv->irqs_table[0] = irq; + cpsw->irqs_table[0] = irq; ret = devm_request_irq(&pdev->dev, irq, cpsw_rx_interrupt, - 0, dev_name(&pdev->dev), priv); + 0, dev_name(&pdev->dev), cpsw); if (ret < 0) { dev_err(priv->dev, "error attaching irq (%d)\n", ret); goto clean_ale_ret; @@ -2517,21 +2780,20 @@ static int cpsw_probe(struct platform_device *pdev) goto clean_ale_ret; } - priv->irqs_table[1] = irq; + cpsw->irqs_table[1] = irq; ret = devm_request_irq(&pdev->dev, irq, cpsw_tx_interrupt, - 0, dev_name(&pdev->dev), priv); + 0, dev_name(&pdev->dev), cpsw); if (ret < 0) { dev_err(priv->dev, "error attaching irq (%d)\n", ret); goto clean_ale_ret; } - priv->num_irqs = 2; ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; ndev->netdev_ops = &cpsw_netdev_ops; ndev->ethtool_ops = &cpsw_ethtool_ops; - netif_napi_add(ndev, &priv->napi_rx, cpsw_rx_poll, CPSW_POLL_WEIGHT); - netif_tx_napi_add(ndev, &priv->napi_tx, cpsw_tx_poll, CPSW_POLL_WEIGHT); + netif_napi_add(ndev, &cpsw->napi_rx, cpsw_rx_poll, CPSW_POLL_WEIGHT); + netif_tx_napi_add(ndev, &cpsw->napi_tx, cpsw_tx_poll, CPSW_POLL_WEIGHT); /* register the network device */ SET_NETDEV_DEV(ndev, &pdev->dev); @@ -2545,8 +2807,8 @@ static int cpsw_probe(struct platform_device *pdev) cpsw_notice(priv, probe, "initialized device (regs %pa, irq %d)\n", &ss_res->start, ndev->irq); - if (priv->data.dual_emac) { - ret = cpsw_probe_dual_emac(pdev, priv); + if (cpsw->data.dual_emac) { + ret = cpsw_probe_dual_emac(priv); if (ret) { cpsw_err(priv, probe, "error probe slave 2 emac interface\n"); goto clean_ale_ret; @@ -2556,9 +2818,9 @@ static int cpsw_probe(struct platform_device *pdev) return 0; clean_ale_ret: - cpsw_ale_destroy(priv->ale); + cpsw_ale_destroy(cpsw->ale); clean_dma_ret: - cpdma_ctlr_destroy(priv->dma); + cpdma_ctlr_destroy(cpsw->dma); clean_runtime_disable_ret: pm_runtime_disable(&pdev->dev); clean_ndev_ret: @@ -2569,7 +2831,7 @@ clean_ndev_ret: static int cpsw_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); - struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); int ret; ret = pm_runtime_get_sync(&pdev->dev); @@ -2578,17 +2840,17 @@ static int cpsw_remove(struct platform_device *pdev) return ret; } - if (priv->data.dual_emac) - unregister_netdev(cpsw_get_slave_ndev(priv, 1)); + if (cpsw->data.dual_emac) + unregister_netdev(cpsw->slaves[1].ndev); unregister_netdev(ndev); - cpsw_ale_destroy(priv->ale); - cpdma_ctlr_destroy(priv->dma); + cpsw_ale_destroy(cpsw->ale); + cpdma_ctlr_destroy(cpsw->dma); of_platform_depopulate(&pdev->dev); pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); - if (priv->data.dual_emac) - free_netdev(cpsw_get_slave_ndev(priv, 1)); + if (cpsw->data.dual_emac) + free_netdev(cpsw->slaves[1].ndev); free_netdev(ndev); return 0; } @@ -2598,14 +2860,14 @@ static int cpsw_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct net_device *ndev = platform_get_drvdata(pdev); - struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); - if (priv->data.dual_emac) { + if (cpsw->data.dual_emac) { int i; - for (i = 0; i < priv->data.slaves; i++) { - if (netif_running(priv->slaves[i].ndev)) - cpsw_ndo_stop(priv->slaves[i].ndev); + for (i = 0; i < cpsw->data.slaves; i++) { + if (netif_running(cpsw->slaves[i].ndev)) + cpsw_ndo_stop(cpsw->slaves[i].ndev); } } else { if (netif_running(ndev)) @@ -2613,7 +2875,7 @@ static int cpsw_suspend(struct device *dev) } /* Select sleep pin state */ - pinctrl_pm_select_sleep_state(&pdev->dev); + pinctrl_pm_select_sleep_state(dev); return 0; } @@ -2622,17 +2884,17 @@ static int cpsw_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct net_device *ndev = platform_get_drvdata(pdev); - struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = netdev_priv(ndev); /* Select default pin state */ - pinctrl_pm_select_default_state(&pdev->dev); + pinctrl_pm_select_default_state(dev); - if (priv->data.dual_emac) { + if (cpsw->data.dual_emac) { int i; - for (i = 0; i < priv->data.slaves; i++) { - if (netif_running(priv->slaves[i].ndev)) - cpsw_ndo_open(priv->slaves[i].ndev); + for (i = 0; i < cpsw->data.slaves; i++) { + if (netif_running(cpsw->slaves[i].ndev)) + cpsw_ndo_open(cpsw->slaves[i].ndev); } } else { if (netif_running(ndev)) diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index 19e5f32a8a64..c3f35f11a8fd 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c @@ -86,7 +86,7 @@ struct cpdma_desc_pool { void __iomem *iomap; /* ioremap map */ void *cpumap; /* dma_alloc map */ int desc_size, mem_size; - int num_desc, used_desc; + int num_desc; struct device *dev; struct gen_pool *gen_pool; }; @@ -104,6 +104,7 @@ struct cpdma_ctlr { struct cpdma_desc_pool *pool; spinlock_t lock; struct cpdma_chan *channels[2 * CPDMA_MAX_CHANNELS]; + int chan_num; }; struct cpdma_chan { @@ -123,6 +124,13 @@ struct cpdma_chan { int int_set, int_clear, td; }; +#define tx_chan_num(chan) (chan) +#define rx_chan_num(chan) ((chan) + CPDMA_MAX_CHANNELS) +#define is_rx_chan(chan) ((chan)->chan_num >= CPDMA_MAX_CHANNELS) +#define is_tx_chan(chan) (!is_rx_chan(chan)) +#define __chan_linear(chan_num) ((chan_num) & (CPDMA_MAX_CHANNELS - 1)) +#define chan_linear(chan) __chan_linear((chan)->chan_num) + /* The following make access to common cpdma_ctlr params more readable */ #define dmaregs params.dmaregs #define num_chan params.num_chan @@ -148,7 +156,10 @@ static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool) if (!pool) return; - WARN_ON(pool->used_desc); + WARN(gen_pool_size(pool->gen_pool) != gen_pool_avail(pool->gen_pool), + "cpdma_desc_pool size %d != avail %d", + gen_pool_size(pool->gen_pool), + gen_pool_avail(pool->gen_pool)); if (pool->cpumap) dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap, pool->phys); @@ -232,21 +243,14 @@ desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma) static struct cpdma_desc __iomem * cpdma_desc_alloc(struct cpdma_desc_pool *pool) { - struct cpdma_desc __iomem *desc = NULL; - - desc = (struct cpdma_desc __iomem *)gen_pool_alloc(pool->gen_pool, - pool->desc_size); - if (desc) - pool->used_desc++; - - return desc; + return (struct cpdma_desc __iomem *) + gen_pool_alloc(pool->gen_pool, pool->desc_size); } static void cpdma_desc_free(struct cpdma_desc_pool *pool, struct cpdma_desc __iomem *desc, int num_desc) { gen_pool_free(pool->gen_pool, (unsigned long)desc, pool->desc_size); - pool->used_desc--; } struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params) @@ -260,6 +264,7 @@ struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params) ctlr->state = CPDMA_STATE_IDLE; ctlr->params = *params; ctlr->dev = params->dev; + ctlr->chan_num = 0; spin_lock_init(&ctlr->lock); ctlr->pool = cpdma_desc_pool_create(ctlr->dev, @@ -336,12 +341,14 @@ int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr) } ctlr->state = CPDMA_STATE_TEARDOWN; + spin_unlock_irqrestore(&ctlr->lock, flags); for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { if (ctlr->channels[i]) cpdma_chan_stop(ctlr->channels[i]); } + spin_lock_irqsave(&ctlr->lock, flags); dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff); dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff); @@ -403,13 +410,52 @@ void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value) } EXPORT_SYMBOL_GPL(cpdma_ctlr_eoi); +u32 cpdma_ctrl_rxchs_state(struct cpdma_ctlr *ctlr) +{ + return dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED); +} +EXPORT_SYMBOL_GPL(cpdma_ctrl_rxchs_state); + +u32 cpdma_ctrl_txchs_state(struct cpdma_ctlr *ctlr) +{ + return dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED); +} +EXPORT_SYMBOL_GPL(cpdma_ctrl_txchs_state); + +/** + * cpdma_chan_split_pool - Splits ctrl pool between all channels. + * Has to be called under ctlr lock + */ +static void cpdma_chan_split_pool(struct cpdma_ctlr *ctlr) +{ + struct cpdma_desc_pool *pool = ctlr->pool; + struct cpdma_chan *chan; + int ch_desc_num; + int i; + + if (!ctlr->chan_num) + return; + + /* calculate average size of pool slice */ + ch_desc_num = pool->num_desc / ctlr->chan_num; + + /* split ctlr pool */ + for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { + chan = ctlr->channels[i]; + if (chan) + chan->desc_num = ch_desc_num; + } +} + struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num, - cpdma_handler_fn handler) + cpdma_handler_fn handler, int rx_type) { + int offset = chan_num * 4; struct cpdma_chan *chan; - int offset = (chan_num % CPDMA_MAX_CHANNELS) * 4; unsigned long flags; + chan_num = rx_type ? rx_chan_num(chan_num) : tx_chan_num(chan_num); + if (__chan_linear(chan_num) >= ctlr->num_chan) return NULL; @@ -451,14 +497,25 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num, spin_lock_init(&chan->lock); ctlr->channels[chan_num] = chan; + ctlr->chan_num++; + + cpdma_chan_split_pool(ctlr); + spin_unlock_irqrestore(&ctlr->lock, flags); return chan; } EXPORT_SYMBOL_GPL(cpdma_chan_create); -int cpdma_chan_get_rx_buf_num(struct cpdma_ctlr *ctlr) +int cpdma_chan_get_rx_buf_num(struct cpdma_chan *chan) { - return ctlr->pool->num_desc / 2; + unsigned long flags; + int desc_num; + + spin_lock_irqsave(&chan->lock, flags); + desc_num = chan->desc_num; + spin_unlock_irqrestore(&chan->lock, flags); + + return desc_num; } EXPORT_SYMBOL_GPL(cpdma_chan_get_rx_buf_num); @@ -475,6 +532,10 @@ int cpdma_chan_destroy(struct cpdma_chan *chan) if (chan->state != CPDMA_STATE_IDLE) cpdma_chan_stop(chan); ctlr->channels[chan->chan_num] = NULL; + ctlr->chan_num--; + + cpdma_chan_split_pool(ctlr); + spin_unlock_irqrestore(&ctlr->lock, flags); return 0; } diff --git a/drivers/net/ethernet/ti/davinci_cpdma.h b/drivers/net/ethernet/ti/davinci_cpdma.h index 4b46cd6e9a3f..a07b22b12bc1 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.h +++ b/drivers/net/ethernet/ti/davinci_cpdma.h @@ -17,13 +17,6 @@ #define CPDMA_MAX_CHANNELS BITS_PER_LONG -#define tx_chan_num(chan) (chan) -#define rx_chan_num(chan) ((chan) + CPDMA_MAX_CHANNELS) -#define is_rx_chan(chan) ((chan)->chan_num >= CPDMA_MAX_CHANNELS) -#define is_tx_chan(chan) (!is_rx_chan(chan)) -#define __chan_linear(chan_num) ((chan_num) & (CPDMA_MAX_CHANNELS - 1)) -#define chan_linear(chan) __chan_linear((chan)->chan_num) - #define CPDMA_RX_SOURCE_PORT(__status__) ((__status__ >> 16) & 0x7) #define CPDMA_EOI_RX_THRESH 0x0 @@ -79,8 +72,8 @@ int cpdma_ctlr_start(struct cpdma_ctlr *ctlr); int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr); struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num, - cpdma_handler_fn handler); -int cpdma_chan_get_rx_buf_num(struct cpdma_ctlr *ctlr); + cpdma_handler_fn handler, int rx_type); +int cpdma_chan_get_rx_buf_num(struct cpdma_chan *chan); int cpdma_chan_destroy(struct cpdma_chan *chan); int cpdma_chan_start(struct cpdma_chan *chan); int cpdma_chan_stop(struct cpdma_chan *chan); @@ -94,6 +87,8 @@ int cpdma_chan_process(struct cpdma_chan *chan, int quota); int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable); void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value); int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable); +u32 cpdma_ctrl_rxchs_state(struct cpdma_ctlr *ctlr); +u32 cpdma_ctrl_txchs_state(struct cpdma_ctlr *ctlr); bool cpdma_check_free_tx_desc(struct cpdma_chan *chan); enum cpdma_control { diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 727a79f3c7dd..2fd94a5bc1f3 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -597,14 +597,14 @@ static u32 hash_get(u8 *addr) } /** - * hash_add - Hash function to add mac addr from hash table + * emac_hash_add - Hash function to add mac addr from hash table * @priv: The DaVinci EMAC private adapter structure * @mac_addr: mac address to delete from hash table * * Adds mac address to the internal hash table * */ -static int hash_add(struct emac_priv *priv, u8 *mac_addr) +static int emac_hash_add(struct emac_priv *priv, u8 *mac_addr) { struct device *emac_dev = &priv->ndev->dev; u32 rc = 0; @@ -613,7 +613,7 @@ static int hash_add(struct emac_priv *priv, u8 *mac_addr) if (hash_value >= EMAC_NUM_MULTICAST_BITS) { if (netif_msg_drv(priv)) { - dev_err(emac_dev, "DaVinci EMAC: hash_add(): Invalid "\ + dev_err(emac_dev, "DaVinci EMAC: emac_hash_add(): Invalid "\ "Hash %08x, should not be greater than %08x", hash_value, (EMAC_NUM_MULTICAST_BITS - 1)); } @@ -639,14 +639,14 @@ static int hash_add(struct emac_priv *priv, u8 *mac_addr) } /** - * hash_del - Hash function to delete mac addr from hash table + * emac_hash_del - Hash function to delete mac addr from hash table * @priv: The DaVinci EMAC private adapter structure * @mac_addr: mac address to delete from hash table * * Removes mac address from the internal hash table * */ -static int hash_del(struct emac_priv *priv, u8 *mac_addr) +static int emac_hash_del(struct emac_priv *priv, u8 *mac_addr) { u32 hash_value; u32 hash_bit; @@ -696,10 +696,10 @@ static void emac_add_mcast(struct emac_priv *priv, u32 action, u8 *mac_addr) switch (action) { case EMAC_MULTICAST_ADD: - update = hash_add(priv, mac_addr); + update = emac_hash_add(priv, mac_addr); break; case EMAC_MULTICAST_DEL: - update = hash_del(priv, mac_addr); + update = emac_hash_del(priv, mac_addr); break; case EMAC_ALL_MULTI_SET: update = 1; @@ -1870,10 +1870,10 @@ static int davinci_emac_probe(struct platform_device *pdev) goto no_pdata; } - priv->txchan = cpdma_chan_create(priv->dma, tx_chan_num(EMAC_DEF_TX_CH), - emac_tx_handler); - priv->rxchan = cpdma_chan_create(priv->dma, rx_chan_num(EMAC_DEF_RX_CH), - emac_rx_handler); + priv->txchan = cpdma_chan_create(priv->dma, EMAC_DEF_TX_CH, + emac_tx_handler, 0); + priv->rxchan = cpdma_chan_create(priv->dma, EMAC_DEF_RX_CH, + emac_rx_handler, 1); if (WARN_ON(!priv->txchan || !priv->rxchan)) { rc = -ENOMEM; goto no_cpdma_chan; diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c index 9006877c53f2..e46b1ebbbff4 100644 --- a/drivers/net/fjes/fjes_main.c +++ b/drivers/net/fjes/fjes_main.c @@ -97,7 +97,6 @@ static struct acpi_driver fjes_acpi_driver = { static struct platform_driver fjes_driver = { .driver = { .name = DRV_NAME, - .owner = THIS_MODULE, }, .probe = fjes_probe, .remove = fjes_remove, diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 591af71eae56..284b97b6b258 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -84,8 +84,6 @@ struct ndis_recv_scale_cap { /* NDIS_RECEIVE_SCALE_CAPABILITIES */ #define NDIS_RSS_HASH_SECRET_KEY_MAX_SIZE_REVISION_2 40 #define ITAB_NUM 128 -#define HASH_KEYLEN NDIS_RSS_HASH_SECRET_KEY_MAX_SIZE_REVISION_2 -extern u8 netvsc_hash_key[]; struct ndis_recv_scale_param { /* NDIS_RECEIVE_SCALE_PARAMETERS */ struct ndis_obj_header hdr; @@ -175,7 +173,7 @@ struct rndis_device { struct rndis_message; struct netvsc_device; int netvsc_device_add(struct hv_device *device, void *additional_info); -int netvsc_device_remove(struct hv_device *device); +void netvsc_device_remove(struct hv_device *device); int netvsc_send(struct hv_device *device, struct hv_netvsc_packet *packet, struct rndis_message *rndis_msg, @@ -490,6 +488,7 @@ struct nvsp_2_vsc_capability { u64 sriov:1; u64 ieee8021q:1; u64 correlation_id:1; + u64 teaming:1; }; }; } __packed; @@ -633,12 +632,34 @@ struct multi_send_data { u32 count; /* counter of batched packets */ }; +struct recv_comp_data { + u64 tid; /* transaction id */ + u32 status; +}; + +/* Netvsc Receive Slots Max */ +#define NETVSC_RECVSLOT_MAX (NETVSC_RECEIVE_BUFFER_SIZE / ETH_DATA_LEN + 1) + +struct multi_recv_comp { + void *buf; /* queued receive completions */ + u32 first; /* first data entry */ + u32 next; /* next entry for writing */ +}; + struct netvsc_stats { u64 packets; u64 bytes; struct u64_stats_sync syncp; }; +struct netvsc_ethtool_stats { + unsigned long tx_scattered; + unsigned long tx_no_memory; + unsigned long tx_no_space; + unsigned long tx_too_big; + unsigned long tx_busy; +}; + struct netvsc_reconfig { struct list_head list; u32 event; @@ -668,6 +689,7 @@ struct net_device_context { /* Ethtool settings */ u8 duplex; u32 speed; + struct netvsc_ethtool_stats eth_stats; /* the device is going away */ bool start_remove; @@ -735,6 +757,9 @@ struct netvsc_device { u32 max_pkt; /* max number of pkt in one send, e.g. 8 */ u32 pkt_align; /* alignment bytes, e.g. 8 */ + struct multi_recv_comp mrc[VRSS_CHANNEL_MAX]; + atomic_t num_outstanding_recvs; + atomic_t open_cnt; }; diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 410fb8e81376..2a9ccc4d9e3c 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -34,6 +34,89 @@ #include "hyperv_net.h" /* + * An API to support in-place processing of incoming VMBUS packets. + */ +#define VMBUS_PKT_TRAILER 8 + +static struct vmpacket_descriptor * +get_next_pkt_raw(struct vmbus_channel *channel) +{ + struct hv_ring_buffer_info *ring_info = &channel->inbound; + u32 read_loc = ring_info->priv_read_index; + void *ring_buffer = hv_get_ring_buffer(ring_info); + struct vmpacket_descriptor *cur_desc; + u32 packetlen; + u32 dsize = ring_info->ring_datasize; + u32 delta = read_loc - ring_info->ring_buffer->read_index; + u32 bytes_avail_toread = (hv_get_bytes_to_read(ring_info) - delta); + + if (bytes_avail_toread < sizeof(struct vmpacket_descriptor)) + return NULL; + + if ((read_loc + sizeof(*cur_desc)) > dsize) + return NULL; + + cur_desc = ring_buffer + read_loc; + packetlen = cur_desc->len8 << 3; + + /* + * If the packet under consideration is wrapping around, + * return failure. + */ + if ((read_loc + packetlen + VMBUS_PKT_TRAILER) > (dsize - 1)) + return NULL; + + return cur_desc; +} + +/* + * A helper function to step through packets "in-place" + * This API is to be called after each successful call + * get_next_pkt_raw(). + */ +static void put_pkt_raw(struct vmbus_channel *channel, + struct vmpacket_descriptor *desc) +{ + struct hv_ring_buffer_info *ring_info = &channel->inbound; + u32 read_loc = ring_info->priv_read_index; + u32 packetlen = desc->len8 << 3; + u32 dsize = ring_info->ring_datasize; + + BUG_ON((read_loc + packetlen + VMBUS_PKT_TRAILER) > dsize); + + /* + * Include the packet trailer. + */ + ring_info->priv_read_index += packetlen + VMBUS_PKT_TRAILER; +} + +/* + * This call commits the read index and potentially signals the host. + * Here is the pattern for using the "in-place" consumption APIs: + * + * while (get_next_pkt_raw() { + * process the packet "in-place"; + * put_pkt_raw(); + * } + * if (packets processed in place) + * commit_rd_index(); + */ +static void commit_rd_index(struct vmbus_channel *channel) +{ + struct hv_ring_buffer_info *ring_info = &channel->inbound; + /* + * Make sure all reads are done before we update the read index since + * the writer may start writing to the read area once the read index + * is updated. + */ + virt_rmb(); + ring_info->ring_buffer->read_index = ring_info->priv_read_index; + + if (hv_need_to_signal_on_read(ring_info)) + vmbus_set_event(channel); +} + +/* * Switch the data path from the synthetic interface to the VF * interface. */ @@ -59,7 +142,6 @@ void netvsc_switch_datapath(struct net_device *ndev, bool vf) VM_PKT_DATA_INBAND, 0); } - static struct netvsc_device *alloc_net_device(void) { struct netvsc_device *net_device; @@ -74,17 +156,26 @@ static struct netvsc_device *alloc_net_device(void) return NULL; } + net_device->mrc[0].buf = vzalloc(NETVSC_RECVSLOT_MAX * + sizeof(struct recv_comp_data)); + init_waitqueue_head(&net_device->wait_drain); net_device->destroy = false; atomic_set(&net_device->open_cnt, 0); net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; + init_completion(&net_device->channel_init_wait); return net_device; } static void free_netvsc_device(struct netvsc_device *nvdev) { + int i; + + for (i = 0; i < VRSS_CHANNEL_MAX; i++) + vfree(nvdev->mrc[i].buf); + kfree(nvdev->cb_buffer); kfree(nvdev); } @@ -107,20 +198,20 @@ static struct netvsc_device *get_inbound_net_device(struct hv_device *device) goto get_in_err; if (net_device->destroy && - atomic_read(&net_device->num_outstanding_sends) == 0) + atomic_read(&net_device->num_outstanding_sends) == 0 && + atomic_read(&net_device->num_outstanding_recvs) == 0) net_device = NULL; get_in_err: return net_device; } - -static int netvsc_destroy_buf(struct hv_device *device) +static void netvsc_destroy_buf(struct hv_device *device) { struct nvsp_message *revoke_packet; - int ret = 0; struct net_device *ndev = hv_get_drvdata(device); struct netvsc_device *net_device = net_device_to_netvsc_device(ndev); + int ret; /* * If we got a section count, it means we received a @@ -150,7 +241,7 @@ static int netvsc_destroy_buf(struct hv_device *device) if (ret != 0) { netdev_err(ndev, "unable to send " "revoke receive buffer to netvsp\n"); - return ret; + return; } } @@ -165,7 +256,7 @@ static int netvsc_destroy_buf(struct hv_device *device) if (ret != 0) { netdev_err(ndev, "unable to teardown receive buffer's gpadl\n"); - return ret; + return; } net_device->recv_buf_gpadl_handle = 0; } @@ -209,7 +300,7 @@ static int netvsc_destroy_buf(struct hv_device *device) if (ret != 0) { netdev_err(ndev, "unable to send " "revoke send buffer to netvsp\n"); - return ret; + return; } } /* Teardown the gpadl on the vsp end */ @@ -223,7 +314,7 @@ static int netvsc_destroy_buf(struct hv_device *device) if (ret != 0) { netdev_err(ndev, "unable to teardown send buffer's gpadl\n"); - return ret; + return; } net_device->send_buf_gpadl_handle = 0; } @@ -233,8 +324,6 @@ static int netvsc_destroy_buf(struct hv_device *device) net_device->send_buf = NULL; } kfree(net_device->send_section_map); - - return ret; } static int netvsc_init_buf(struct hv_device *device) @@ -276,7 +365,6 @@ static int netvsc_init_buf(struct hv_device *device) goto cleanup; } - /* Notify the NetVsp of the gpadl handle */ init_packet = &net_device->channel_init_pkt; @@ -403,7 +491,7 @@ static int netvsc_init_buf(struct hv_device *device) /* Section count is simply the size divided by the section size. */ net_device->send_section_cnt = - net_device->send_buf_size/net_device->send_section_size; + net_device->send_buf_size / net_device->send_section_size; dev_info(&device->device, "Send section size: %d, Section count:%d\n", net_device->send_section_size, net_device->send_section_cnt); @@ -412,8 +500,8 @@ static int netvsc_init_buf(struct hv_device *device) net_device->map_words = DIV_ROUND_UP(net_device->send_section_cnt, BITS_PER_LONG); - net_device->send_section_map = - kzalloc(net_device->map_words * sizeof(ulong), GFP_KERNEL); + net_device->send_section_map = kcalloc(net_device->map_words, + sizeof(ulong), GFP_KERNEL); if (net_device->send_section_map == NULL) { ret = -ENOMEM; goto cleanup; @@ -428,7 +516,6 @@ exit: return ret; } - /* Negotiate NVSP protocol version */ static int negotiate_nvsp_ver(struct hv_device *device, struct netvsc_device *net_device, @@ -468,9 +555,13 @@ static int negotiate_nvsp_ver(struct hv_device *device, init_packet->msg.v2_msg.send_ndis_config.mtu = ndev->mtu + ETH_HLEN; init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1; - if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5) + if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5) { init_packet->msg.v2_msg.send_ndis_config.capability.sriov = 1; + /* Teaming bit is needed to receive link speed updates */ + init_packet->msg.v2_msg.send_ndis_config.capability.teaming = 1; + } + ret = vmbus_sendpacket(device->channel, init_packet, sizeof(struct nvsp_message), (unsigned long)init_packet, @@ -485,9 +576,10 @@ static int netvsc_connect_vsp(struct hv_device *device) struct netvsc_device *net_device; struct nvsp_message *init_packet; int ndis_version; - u32 ver_list[] = { NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2, + const u32 ver_list[] = { + NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2, NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5 }; - int i, num_ver = 4; /* number of different NVSP versions */ + int i; net_device = get_outbound_net_device(device); if (!net_device) @@ -496,7 +588,7 @@ static int netvsc_connect_vsp(struct hv_device *device) init_packet = &net_device->channel_init_pkt; /* Negotiate the latest NVSP protocol supported */ - for (i = num_ver - 1; i >= 0; i--) + for (i = ARRAY_SIZE(ver_list) - 1; i >= 0; i--) if (negotiate_nvsp_ver(device, net_device, init_packet, ver_list[i]) == 0) { net_device->nvsp_version = ver_list[i]; @@ -555,7 +647,7 @@ static void netvsc_disconnect_vsp(struct hv_device *device) /* * netvsc_device_remove - Callback when the root bus device is removed */ -int netvsc_device_remove(struct hv_device *device) +void netvsc_device_remove(struct hv_device *device) { struct net_device *ndev = hv_get_drvdata(device); struct net_device_context *net_device_ctx = netdev_priv(ndev); @@ -577,10 +669,8 @@ int netvsc_device_remove(struct hv_device *device) /* Release all resources */ vfree(net_device->sub_cb_buf); free_netvsc_device(net_device); - return 0; } - #define RING_AVAIL_PERCENT_HIWATER 20 #define RING_AVAIL_PERCENT_LOWATER 10 @@ -604,72 +694,79 @@ static inline void netvsc_free_send_slot(struct netvsc_device *net_device, sync_change_bit(index, net_device->send_section_map); } +static void netvsc_send_tx_complete(struct netvsc_device *net_device, + struct vmbus_channel *incoming_channel, + struct hv_device *device, + struct vmpacket_descriptor *packet) +{ + struct sk_buff *skb = (struct sk_buff *)(unsigned long)packet->trans_id; + struct net_device *ndev = hv_get_drvdata(device); + struct net_device_context *net_device_ctx = netdev_priv(ndev); + struct vmbus_channel *channel = device->channel; + int num_outstanding_sends; + u16 q_idx = 0; + int queue_sends; + + /* Notify the layer above us */ + if (likely(skb)) { + struct hv_netvsc_packet *nvsc_packet + = (struct hv_netvsc_packet *)skb->cb; + u32 send_index = nvsc_packet->send_buf_index; + + if (send_index != NETVSC_INVALID_INDEX) + netvsc_free_send_slot(net_device, send_index); + q_idx = nvsc_packet->q_idx; + channel = incoming_channel; + + dev_kfree_skb_any(skb); + } + + num_outstanding_sends = + atomic_dec_return(&net_device->num_outstanding_sends); + queue_sends = atomic_dec_return(&net_device->queue_sends[q_idx]); + + if (net_device->destroy && num_outstanding_sends == 0) + wake_up(&net_device->wait_drain); + + if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) && + !net_device_ctx->start_remove && + (hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER || + queue_sends < 1)) + netif_tx_wake_queue(netdev_get_tx_queue(ndev, q_idx)); +} + static void netvsc_send_completion(struct netvsc_device *net_device, struct vmbus_channel *incoming_channel, struct hv_device *device, struct vmpacket_descriptor *packet) { struct nvsp_message *nvsp_packet; - struct hv_netvsc_packet *nvsc_packet; struct net_device *ndev = hv_get_drvdata(device); - struct net_device_context *net_device_ctx = netdev_priv(ndev); - u32 send_index; - struct sk_buff *skb; nvsp_packet = (struct nvsp_message *)((unsigned long)packet + - (packet->offset8 << 3)); + (packet->offset8 << 3)); - if ((nvsp_packet->hdr.msg_type == NVSP_MSG_TYPE_INIT_COMPLETE) || - (nvsp_packet->hdr.msg_type == - NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE) || - (nvsp_packet->hdr.msg_type == - NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE) || - (nvsp_packet->hdr.msg_type == - NVSP_MSG5_TYPE_SUBCHANNEL)) { + switch (nvsp_packet->hdr.msg_type) { + case NVSP_MSG_TYPE_INIT_COMPLETE: + case NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE: + case NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE: + case NVSP_MSG5_TYPE_SUBCHANNEL: /* Copy the response back */ memcpy(&net_device->channel_init_pkt, nvsp_packet, sizeof(struct nvsp_message)); complete(&net_device->channel_init_wait); - } else if (nvsp_packet->hdr.msg_type == - NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE) { - int num_outstanding_sends; - u16 q_idx = 0; - struct vmbus_channel *channel = device->channel; - int queue_sends; - - /* Get the send context */ - skb = (struct sk_buff *)(unsigned long)packet->trans_id; - - /* Notify the layer above us */ - if (skb) { - nvsc_packet = (struct hv_netvsc_packet *) skb->cb; - send_index = nvsc_packet->send_buf_index; - if (send_index != NETVSC_INVALID_INDEX) - netvsc_free_send_slot(net_device, send_index); - q_idx = nvsc_packet->q_idx; - channel = incoming_channel; - dev_kfree_skb_any(skb); - } - - num_outstanding_sends = - atomic_dec_return(&net_device->num_outstanding_sends); - queue_sends = atomic_dec_return(&net_device-> - queue_sends[q_idx]); + break; - if (net_device->destroy && num_outstanding_sends == 0) - wake_up(&net_device->wait_drain); + case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE: + netvsc_send_tx_complete(net_device, incoming_channel, + device, packet); + break; - if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) && - !net_device_ctx->start_remove && - (hv_ringbuf_avail_percent(&channel->outbound) > - RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) - netif_tx_wake_queue(netdev_get_tx_queue( - ndev, q_idx)); - } else { - netdev_err(ndev, "Unknown send completion packet type- " - "%d received!!\n", nvsp_packet->hdr.msg_type); + default: + netdev_err(ndev, + "Unknown send completion type %d received!!\n", + nvsp_packet->hdr.msg_type); } - } static u32 netvsc_get_next_send_section(struct netvsc_device *net_device) @@ -743,7 +840,7 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device, return msg_size; } -static inline int netvsc_send_pkt( +static int netvsc_send_pkt( struct hv_device *device, struct hv_netvsc_packet *packet, struct netvsc_device *net_device, @@ -859,7 +956,7 @@ int netvsc_send(struct hv_device *device, struct sk_buff *skb) { struct netvsc_device *net_device; - int ret = 0, m_ret = 0; + int ret = 0; struct vmbus_channel *out_channel; u16 q_idx = packet->q_idx; u32 pktlen = packet->total_data_buflen, msd_len = 0; @@ -948,8 +1045,8 @@ int netvsc_send(struct hv_device *device, } if (msd_send) { - m_ret = netvsc_send_pkt(device, msd_send, net_device, - NULL, msd_skb); + int m_ret = netvsc_send_pkt(device, msd_send, net_device, + NULL, msd_skb); if (m_ret != 0) { netvsc_free_send_slot(net_device, @@ -968,49 +1065,121 @@ send_now: return ret; } -static void netvsc_send_recv_completion(struct hv_device *device, - struct vmbus_channel *channel, - struct netvsc_device *net_device, - u64 transaction_id, u32 status) +static int netvsc_send_recv_completion(struct vmbus_channel *channel, + u64 transaction_id, u32 status) { struct nvsp_message recvcompMessage; - int retries = 0; int ret; - struct net_device *ndev = hv_get_drvdata(device); recvcompMessage.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE; recvcompMessage.msg.v1_msg.send_rndis_pkt_complete.status = status; -retry_send_cmplt: /* Send the completion */ ret = vmbus_sendpacket(channel, &recvcompMessage, - sizeof(struct nvsp_message), transaction_id, - VM_PKT_COMP, 0); - if (ret == 0) { - /* success */ - /* no-op */ - } else if (ret == -EAGAIN) { - /* no more room...wait a bit and attempt to retry 3 times */ - retries++; - netdev_err(ndev, "unable to send receive completion pkt" - " (tid %llx)...retrying %d\n", transaction_id, retries); - - if (retries < 4) { - udelay(100); - goto retry_send_cmplt; - } else { - netdev_err(ndev, "unable to send receive " - "completion pkt (tid %llx)...give up retrying\n", - transaction_id); - } - } else { - netdev_err(ndev, "unable to send receive " - "completion pkt - %llx\n", transaction_id); + sizeof(struct nvsp_message_header) + sizeof(u32), + transaction_id, VM_PKT_COMP, 0); + + return ret; +} + +static inline void count_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx, + u32 *filled, u32 *avail) +{ + u32 first = nvdev->mrc[q_idx].first; + u32 next = nvdev->mrc[q_idx].next; + + *filled = (first > next) ? NETVSC_RECVSLOT_MAX - first + next : + next - first; + + *avail = NETVSC_RECVSLOT_MAX - *filled - 1; +} + +/* Read the first filled slot, no change to index */ +static inline struct recv_comp_data *read_recv_comp_slot(struct netvsc_device + *nvdev, u16 q_idx) +{ + u32 filled, avail; + + if (!nvdev->mrc[q_idx].buf) + return NULL; + + count_recv_comp_slot(nvdev, q_idx, &filled, &avail); + if (!filled) + return NULL; + + return nvdev->mrc[q_idx].buf + nvdev->mrc[q_idx].first * + sizeof(struct recv_comp_data); +} + +/* Put the first filled slot back to available pool */ +static inline void put_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx) +{ + int num_recv; + + nvdev->mrc[q_idx].first = (nvdev->mrc[q_idx].first + 1) % + NETVSC_RECVSLOT_MAX; + + num_recv = atomic_dec_return(&nvdev->num_outstanding_recvs); + + if (nvdev->destroy && num_recv == 0) + wake_up(&nvdev->wait_drain); +} + +/* Check and send pending recv completions */ +static void netvsc_chk_recv_comp(struct netvsc_device *nvdev, + struct vmbus_channel *channel, u16 q_idx) +{ + struct recv_comp_data *rcd; + int ret; + + while (true) { + rcd = read_recv_comp_slot(nvdev, q_idx); + if (!rcd) + break; + + ret = netvsc_send_recv_completion(channel, rcd->tid, + rcd->status); + if (ret) + break; + + put_recv_comp_slot(nvdev, q_idx); } } +#define NETVSC_RCD_WATERMARK 80 + +/* Get next available slot */ +static inline struct recv_comp_data *get_recv_comp_slot( + struct netvsc_device *nvdev, struct vmbus_channel *channel, u16 q_idx) +{ + u32 filled, avail, next; + struct recv_comp_data *rcd; + + if (!nvdev->recv_section) + return NULL; + + if (!nvdev->mrc[q_idx].buf) + return NULL; + + if (atomic_read(&nvdev->num_outstanding_recvs) > + nvdev->recv_section->num_sub_allocs * NETVSC_RCD_WATERMARK / 100) + netvsc_chk_recv_comp(nvdev, channel, q_idx); + + count_recv_comp_slot(nvdev, q_idx, &filled, &avail); + if (!avail) + return NULL; + + next = nvdev->mrc[q_idx].next; + rcd = nvdev->mrc[q_idx].buf + next * sizeof(struct recv_comp_data); + nvdev->mrc[q_idx].next = (next + 1) % NETVSC_RECVSLOT_MAX; + + atomic_inc(&nvdev->num_outstanding_recvs); + + return rcd; +} + static void netvsc_receive(struct netvsc_device *net_device, struct vmbus_channel *channel, struct hv_device *device, @@ -1025,6 +1194,9 @@ static void netvsc_receive(struct netvsc_device *net_device, int count = 0; struct net_device *ndev = hv_get_drvdata(device); void *data; + int ret; + struct recv_comp_data *rcd; + u16 q_idx = channel->offermsg.offer.sub_channel_index; /* * All inbound packets other than send completion should be xfer page @@ -1069,13 +1241,29 @@ static void netvsc_receive(struct netvsc_device *net_device, /* Pass it to the upper layer */ status = rndis_filter_receive(device, netvsc_packet, &data, channel); + } + if (!net_device->mrc[q_idx].buf) { + ret = netvsc_send_recv_completion(channel, + vmxferpage_packet->d.trans_id, + status); + if (ret) + netdev_err(ndev, "Recv_comp q:%hd, tid:%llx, err:%d\n", + q_idx, vmxferpage_packet->d.trans_id, ret); + return; } - netvsc_send_recv_completion(device, channel, net_device, - vmxferpage_packet->d.trans_id, status); -} + rcd = get_recv_comp_slot(net_device, channel, q_idx); + if (!rcd) { + netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n", + q_idx, vmxferpage_packet->d.trans_id); + return; + } + + rcd->tid = vmxferpage_packet->d.trans_id; + rcd->status = status; +} static void netvsc_send_table(struct hv_device *hdev, struct nvsp_message *nvmsg) @@ -1157,11 +1345,11 @@ static void netvsc_process_raw_pkt(struct hv_device *device, } } - void netvsc_channel_cb(void *context) { int ret; struct vmbus_channel *channel = (struct vmbus_channel *)context; + u16 q_idx = channel->offermsg.offer.sub_channel_index; struct hv_device *device; struct netvsc_device *net_device; u32 bytes_recvd; @@ -1213,8 +1401,6 @@ void netvsc_channel_cb(void *context) ndev, request_id, desc); - - } else { /* * We are done for this pass. @@ -1241,7 +1427,8 @@ void netvsc_channel_cb(void *context) if (bufferlen > NETVSC_PACKET_SIZE) kfree(buffer); - return; + + netvsc_chk_recv_comp(net_device, channel, q_idx); } /* @@ -1263,9 +1450,6 @@ int netvsc_device_add(struct hv_device *device, void *additional_info) net_device->ring_size = ring_size; - /* Initialize the NetVSC channel extension */ - init_completion(&net_device->channel_init_wait); - set_per_channel_state(device->channel, net_device->cb_buffer); /* Open the channel */ diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 3ba29fc80d05..2360e704e271 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -40,7 +40,6 @@ #include "hyperv_net.h" - #define RING_SIZE_MIN 64 #define LINKCHANGE_INT (2 * HZ) #define NETVSC_HW_FEATURES (NETIF_F_RXCSUM | \ @@ -358,18 +357,14 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) struct rndis_message *rndis_msg; struct rndis_packet *rndis_pkt; u32 rndis_msg_size; - bool isvlan; - bool linear = false; struct rndis_per_packet_info *ppi; struct ndis_tcp_ip_checksum_info *csum_info; - struct ndis_tcp_lso_info *lso_info; int hdr_offset; u32 net_trans_info; u32 hash; u32 skb_length; struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT]; struct hv_page_buffer *pb = page_buf; - struct netvsc_stats *tx_stats = this_cpu_ptr(net_device_ctx->tx_stats); /* We will atmost need two pages to describe the rndis * header. We can only transmit MAX_PAGE_BUFFER_COUNT number @@ -377,22 +372,20 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) * more pages we try linearizing it. */ -check_size: skb_length = skb->len; num_data_pgs = netvsc_get_slots(skb) + 2; - if (num_data_pgs > MAX_PAGE_BUFFER_COUNT && linear) { - net_alert_ratelimited("packet too big: %u pages (%u bytes)\n", - num_data_pgs, skb->len); - ret = -EFAULT; - goto drop; - } else if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) { - if (skb_linearize(skb)) { - net_alert_ratelimited("failed to linearize skb\n"); - ret = -ENOMEM; + + if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) { + ++net_device_ctx->eth_stats.tx_scattered; + + if (skb_linearize(skb)) + goto no_memory; + + num_data_pgs = netvsc_get_slots(skb) + 2; + if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) { + ++net_device_ctx->eth_stats.tx_too_big; goto drop; } - linear = true; - goto check_size; } /* @@ -401,17 +394,14 @@ check_size: * structure. */ ret = skb_cow_head(skb, RNDIS_AND_PPI_SIZE); - if (ret) { - netdev_err(net, "unable to alloc hv_netvsc_packet\n"); - ret = -ENOMEM; - goto drop; - } + if (ret) + goto no_memory; + /* Use the skb control buffer for building up the packet */ BUILD_BUG_ON(sizeof(struct hv_netvsc_packet) > FIELD_SIZEOF(struct sk_buff, cb)); packet = (struct hv_netvsc_packet *)skb->cb; - packet->q_idx = skb_get_queue_mapping(skb); packet->total_data_buflen = skb->len; @@ -420,8 +410,6 @@ check_size: memset(rndis_msg, 0, RNDIS_AND_PPI_SIZE); - isvlan = skb->vlan_tci & VLAN_TAG_PRESENT; - /* Add the rndis header */ rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET; rndis_msg->msg_len = packet->total_data_buflen; @@ -440,7 +428,7 @@ check_size: *(u32 *)((void *)ppi + ppi->ppi_offset) = hash; } - if (isvlan) { + if (skb_vlan_tag_present(skb)) { struct ndis_pkt_8021q_info *vlan; rndis_msg_size += NDIS_VLAN_PPI_SIZE; @@ -461,8 +449,37 @@ check_size: * Setup the sendside checksum offload only if this is not a * GSO packet. */ - if (skb_is_gso(skb)) - goto do_lso; + if (skb_is_gso(skb)) { + struct ndis_tcp_lso_info *lso_info; + + rndis_msg_size += NDIS_LSO_PPI_SIZE; + ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE, + TCP_LARGESEND_PKTINFO); + + lso_info = (struct ndis_tcp_lso_info *)((void *)ppi + + ppi->ppi_offset); + + lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE; + if (net_trans_info & (INFO_IPV4 << 16)) { + lso_info->lso_v2_transmit.ip_version = + NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4; + ip_hdr(skb)->tot_len = 0; + ip_hdr(skb)->check = 0; + tcp_hdr(skb)->check = + ~csum_tcpudp_magic(ip_hdr(skb)->saddr, + ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); + } else { + lso_info->lso_v2_transmit.ip_version = + NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6; + ipv6_hdr(skb)->payload_len = 0; + tcp_hdr(skb)->check = + ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); + } + lso_info->lso_v2_transmit.tcp_header_offset = hdr_offset; + lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size; + goto do_send; + } if ((skb->ip_summed == CHECKSUM_NONE) || (skb->ip_summed == CHECKSUM_UNNECESSARY)) @@ -495,7 +512,7 @@ check_size: ret = skb_cow_head(skb, 0); if (ret) - goto drop; + goto no_memory; uh = udp_hdr(skb); udp_len = ntohs(uh->len); @@ -509,35 +526,6 @@ check_size: csum_info->transmit.udp_checksum = 0; } - goto do_send; - -do_lso: - rndis_msg_size += NDIS_LSO_PPI_SIZE; - ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE, - TCP_LARGESEND_PKTINFO); - - lso_info = (struct ndis_tcp_lso_info *)((void *)ppi + - ppi->ppi_offset); - - lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE; - if (net_trans_info & (INFO_IPV4 << 16)) { - lso_info->lso_v2_transmit.ip_version = - NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4; - ip_hdr(skb)->tot_len = 0; - ip_hdr(skb)->check = 0; - tcp_hdr(skb)->check = - ~csum_tcpudp_magic(ip_hdr(skb)->saddr, - ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); - } else { - lso_info->lso_v2_transmit.ip_version = - NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6; - ipv6_hdr(skb)->payload_len = 0; - tcp_hdr(skb)->check = - ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, - &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); - } - lso_info->lso_v2_transmit.tcp_header_offset = hdr_offset; - lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size; do_send: /* Start filling in the page buffers with the rndis hdr */ @@ -550,21 +538,33 @@ do_send: skb_tx_timestamp(skb); ret = netvsc_send(net_device_ctx->device_ctx, packet, rndis_msg, &pb, skb); + if (likely(ret == 0)) { + struct netvsc_stats *tx_stats = this_cpu_ptr(net_device_ctx->tx_stats); -drop: - if (ret == 0) { u64_stats_update_begin(&tx_stats->syncp); tx_stats->packets++; tx_stats->bytes += skb_length; u64_stats_update_end(&tx_stats->syncp); - } else { - if (ret != -EAGAIN) { - dev_kfree_skb_any(skb); - net->stats.tx_dropped++; - } + return NETDEV_TX_OK; + } + + if (ret == -EAGAIN) { + ++net_device_ctx->eth_stats.tx_busy; + return NETDEV_TX_BUSY; } - return (ret == -EAGAIN) ? NETDEV_TX_BUSY : NETDEV_TX_OK; + if (ret == -ENOSPC) + ++net_device_ctx->eth_stats.tx_no_space; + +drop: + dev_kfree_skb_any(skb); + net->stats.tx_dropped++; + + return NETDEV_TX_OK; + +no_memory: + ++net_device_ctx->eth_stats.tx_no_memory; + goto drop; } /* @@ -579,19 +579,32 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj, struct netvsc_reconfig *event; unsigned long flags; - /* Handle link change statuses only */ + net = hv_get_drvdata(device_obj); + + if (!net) + return; + + ndev_ctx = netdev_priv(net); + + /* Update the physical link speed when changing to another vSwitch */ + if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) { + u32 speed; + + speed = *(u32 *)((void *)indicate + indicate-> + status_buf_offset) / 10000; + ndev_ctx->speed = speed; + return; + } + + /* Handle these link change statuses below */ if (indicate->status != RNDIS_STATUS_NETWORK_CHANGE && indicate->status != RNDIS_STATUS_MEDIA_CONNECT && indicate->status != RNDIS_STATUS_MEDIA_DISCONNECT) return; - net = hv_get_drvdata(device_obj); - - if (!net || net->reg_state != NETREG_REGISTERED) + if (net->reg_state != NETREG_REGISTERED) return; - ndev_ctx = netdev_priv(net); - event = kzalloc(sizeof(*event), GFP_ATOMIC); if (!event) return; @@ -604,7 +617,6 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj, schedule_delayed_work(&ndev_ctx->dwork, 0); } - static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net, struct hv_netvsc_packet *packet, struct ndis_tcp_ip_checksum_info *csum_info, @@ -728,8 +740,12 @@ vf_injection_done: static void netvsc_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info) { + struct net_device_context *net_device_ctx = netdev_priv(net); + struct hv_device *dev = net_device_ctx->device_ctx; + strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); strlcpy(info->fw_version, "N/A", sizeof(info->fw_version)); + strlcpy(info->bus_info, vmbus_dev_name(dev), sizeof(info->bus_info)); } static void netvsc_get_channels(struct net_device *net, @@ -1005,6 +1021,51 @@ static int netvsc_set_mac_addr(struct net_device *ndev, void *p) return err; } +static const struct { + char name[ETH_GSTRING_LEN]; + u16 offset; +} netvsc_stats[] = { + { "tx_scattered", offsetof(struct netvsc_ethtool_stats, tx_scattered) }, + { "tx_no_memory", offsetof(struct netvsc_ethtool_stats, tx_no_memory) }, + { "tx_no_space", offsetof(struct netvsc_ethtool_stats, tx_no_space) }, + { "tx_too_big", offsetof(struct netvsc_ethtool_stats, tx_too_big) }, + { "tx_busy", offsetof(struct netvsc_ethtool_stats, tx_busy) }, +}; + +static int netvsc_get_sset_count(struct net_device *dev, int string_set) +{ + switch (string_set) { + case ETH_SS_STATS: + return ARRAY_SIZE(netvsc_stats); + default: + return -EINVAL; + } +} + +static void netvsc_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct net_device_context *ndc = netdev_priv(dev); + const void *nds = &ndc->eth_stats; + int i; + + for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++) + data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset); +} + +static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++) + memcpy(data + i * ETH_GSTRING_LEN, + netvsc_stats[i].name, ETH_GSTRING_LEN); + break; + } +} + #ifdef CONFIG_NET_POLL_CONTROLLER static void netvsc_poll_controller(struct net_device *net) { @@ -1017,6 +1078,9 @@ static void netvsc_poll_controller(struct net_device *net) static const struct ethtool_ops ethtool_ops = { .get_drvinfo = netvsc_get_drvinfo, .get_link = ethtool_op_get_link, + .get_ethtool_stats = netvsc_get_ethtool_stats, + .get_sset_count = netvsc_get_sset_count, + .get_strings = netvsc_get_strings, .get_channels = netvsc_get_channels, .set_channels = netvsc_set_channels, .get_ts_info = ethtool_op_get_ts_info, @@ -1154,9 +1218,8 @@ static void netvsc_free_netdev(struct net_device *netdev) static struct net_device *get_netvsc_net_device(char *mac) { struct net_device *dev, *found = NULL; - int rtnl_locked; - rtnl_locked = rtnl_trylock(); + ASSERT_RTNL(); for_each_netdev(&init_net, dev) { if (memcmp(dev->dev_addr, mac, ETH_ALEN) == 0) { @@ -1166,8 +1229,6 @@ static struct net_device *get_netvsc_net_device(char *mac) break; } } - if (rtnl_locked) - rtnl_unlock(); return found; } @@ -1261,7 +1322,6 @@ static int netvsc_vf_up(struct net_device *vf_netdev) return NOTIFY_OK; } - static int netvsc_vf_down(struct net_device *vf_netdev) { struct net_device *ndev; @@ -1295,7 +1355,6 @@ static int netvsc_vf_down(struct net_device *vf_netdev) return NOTIFY_OK; } - static int netvsc_unregister_vf(struct net_device *vf_netdev) { struct net_device *ndev; @@ -1337,6 +1396,8 @@ static int netvsc_probe(struct hv_device *dev, netif_carrier_off(net); + netvsc_init_settings(net); + net_device_ctx = netdev_priv(net); net_device_ctx->device_ctx = dev; net_device_ctx->msg_enable = netif_msg_init(debug, default_msg); @@ -1398,8 +1459,6 @@ static int netvsc_probe(struct hv_device *dev, netif_set_real_num_tx_queues(net, nvdev->num_chn); netif_set_real_num_rx_queues(net, nvdev->num_chn); - netvsc_init_settings(net); - ret = register_netdev(net); if (ret != 0) { pr_err("Unable to register netdev.\n"); @@ -1423,7 +1482,6 @@ static int netvsc_remove(struct hv_device *dev) return 0; } - ndev_ctx = netdev_priv(net); net_device = ndev_ctx->nvdev; @@ -1470,7 +1528,6 @@ static struct hv_driver netvsc_drv = { .remove = netvsc_remove, }; - /* * On Hyper-V, every VF interface is matched with a corresponding * synthetic interface. The synthetic interface is presented first diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 8e830f741d47..9195d5da8485 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -663,13 +663,14 @@ cleanup: return ret; } -u8 netvsc_hash_key[HASH_KEYLEN] = { +static const u8 netvsc_hash_key[] = { 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa }; +#define HASH_KEYLEN ARRAY_SIZE(netvsc_hash_key) static int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue) { @@ -720,7 +721,6 @@ static int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue) for (i = 0; i < HASH_KEYLEN; i++) keyp[i] = netvsc_hash_key[i]; - ret = rndis_filter_send_request(rdev, request); if (ret != 0) goto cleanup; @@ -738,7 +738,6 @@ cleanup: return ret; } - static int rndis_filter_query_device_link_status(struct rndis_device *dev) { u32 size = sizeof(u32); @@ -752,6 +751,28 @@ static int rndis_filter_query_device_link_status(struct rndis_device *dev) return ret; } +static int rndis_filter_query_link_speed(struct rndis_device *dev) +{ + u32 size = sizeof(u32); + u32 link_speed; + struct net_device_context *ndc; + int ret; + + ret = rndis_filter_query_device(dev, RNDIS_OID_GEN_LINK_SPEED, + &link_speed, &size); + + if (!ret) { + ndc = netdev_priv(dev->ndev); + + /* The link speed reported from host is in 100bps unit, so + * we convert it to Mbps here. + */ + ndc->speed = link_speed / 10000; + } + + return ret; +} + int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter) { struct rndis_request *request; @@ -792,7 +813,6 @@ cleanup: return ret; } - static int rndis_filter_init_device(struct rndis_device *dev) { struct rndis_request *request; @@ -875,11 +895,11 @@ cleanup: /* Wait for all send completions */ wait_event(nvdev->wait_drain, - atomic_read(&nvdev->num_outstanding_sends) == 0); + atomic_read(&nvdev->num_outstanding_sends) == 0 && + atomic_read(&nvdev->num_outstanding_recvs) == 0); if (request) put_rndis_request(dev, request); - return; } static int rndis_filter_open_device(struct rndis_device *dev) @@ -931,6 +951,9 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc) set_per_channel_state(new_sc, nvscdev->sub_cb_buf + (chn_index - 1) * NETVSC_PACKET_SIZE); + nvscdev->mrc[chn_index].buf = vzalloc(NETVSC_RECVSLOT_MAX * + sizeof(struct recv_comp_data)); + ret = vmbus_open(new_sc, nvscdev->ring_size * PAGE_SIZE, nvscdev->ring_size * PAGE_SIZE, NULL, 0, netvsc_channel_cb, new_sc); @@ -946,7 +969,7 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc) } int rndis_filter_device_add(struct hv_device *dev, - void *additional_info) + void *additional_info) { int ret; struct net_device *net = hv_get_drvdata(dev); @@ -1028,7 +1051,6 @@ int rndis_filter_device_add(struct hv_device *dev, offloads.udp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED; offloads.lso_v2_ipv4 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED; - ret = rndis_filter_set_offload_params(net, &offloads); if (ret) goto err_dev_remv; @@ -1044,6 +1066,8 @@ int rndis_filter_device_add(struct hv_device *dev, if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_5) return 0; + rndis_filter_query_link_speed(rndis_device); + /* vRSS setup */ memset(&rsscap, 0, rsscap_size); ret = rndis_filter_query_device(rndis_device, @@ -1152,7 +1176,6 @@ void rndis_filter_device_remove(struct hv_device *dev) netvsc_device_remove(dev); } - int rndis_filter_open(struct netvsc_device *nvdev) { if (!nvdev) diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 47a64342cc16..1c3e07c3d0b8 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -15,152 +15,218 @@ if PHYLIB config SWPHY bool -comment "MII PHY device drivers" - -config AQUANTIA_PHY - tristate "Drivers for the Aquantia PHYs" - ---help--- - Currently supports the Aquantia AQ1202, AQ2104, AQR105, AQR405 +comment "MDIO bus device drivers" -config AT803X_PHY - tristate "Drivers for Atheros AT803X PHYs" - ---help--- - Currently supports the AT8030 and AT8035 model +config MDIO_BCM_IPROC + tristate "Broadcom iProc MDIO bus controller" + depends on ARCH_BCM_IPROC || COMPILE_TEST + depends on HAS_IOMEM && OF_MDIO + help + This module provides a driver for the MDIO busses found in the + Broadcom iProc SoC's. -config AMD_PHY - tristate "Drivers for the AMD PHYs" - ---help--- - Currently supports the am79c874 +config MDIO_BCM_UNIMAC + tristate "Broadcom UniMAC MDIO bus controller" + depends on HAS_IOMEM + help + This module provides a driver for the Broadcom UniMAC MDIO busses. + This hardware can be found in the Broadcom GENET Ethernet MAC + controllers as well as some Broadcom Ethernet switches such as the + Starfighter 2 switches. -config MARVELL_PHY - tristate "Drivers for Marvell PHYs" - ---help--- - Currently has a driver for the 88E1011S - -config DAVICOM_PHY - tristate "Drivers for Davicom PHYs" - ---help--- - Currently supports dm9161e and dm9131 +config MDIO_BITBANG + tristate "Bitbanged MDIO buses" + help + This module implements the MDIO bus protocol in software, + for use by low level drivers that export the ability to + drive the relevant pins. -config QSEMI_PHY - tristate "Drivers for Quality Semiconductor PHYs" - ---help--- - Currently supports the qs6612 + If in doubt, say N. -config LXT_PHY - tristate "Drivers for the Intel LXT PHYs" - ---help--- - Currently supports the lxt970, lxt971 +config MDIO_BUS_MUX + tristate + depends on OF_MDIO + help + This module provides a driver framework for MDIO bus + multiplexers which connect one of several child MDIO busses + to a parent bus. Switching between child busses is done by + device specific drivers. -config CICADA_PHY - tristate "Drivers for the Cicada PHYs" - ---help--- - Currently supports the cis8204 +config MDIO_BUS_MUX_BCM_IPROC + tristate "Broadcom iProc based MDIO bus multiplexers" + depends on OF && OF_MDIO && (ARCH_BCM_IPROC || COMPILE_TEST) + select MDIO_BUS_MUX + default ARCH_BCM_IPROC + help + This module provides a driver for MDIO bus multiplexers found in + iProc based Broadcom SoCs. This multiplexer connects one of several + child MDIO bus to a parent bus. Buses could be internal as well as + external and selection logic lies inside the same multiplexer. -config VITESSE_PHY - tristate "Drivers for the Vitesse PHYs" - ---help--- - Currently supports the vsc8244 +config MDIO_BUS_MUX_GPIO + tristate "GPIO controlled MDIO bus multiplexers" + depends on OF_GPIO && OF_MDIO + select MDIO_BUS_MUX + help + This module provides a driver for MDIO bus multiplexers that + are controlled via GPIO lines. The multiplexer connects one of + several child MDIO busses to a parent bus. Child bus + selection is under the control of GPIO lines. -config TERANETICS_PHY - tristate "Drivers for the Teranetics PHYs" - ---help--- - Currently supports the Teranetics TN2020 +config MDIO_BUS_MUX_MMIOREG + tristate "MMIO device-controlled MDIO bus multiplexers" + depends on OF_MDIO && HAS_IOMEM + select MDIO_BUS_MUX + help + This module provides a driver for MDIO bus multiplexers that + are controlled via a simple memory-mapped device, like an FPGA. + The multiplexer connects one of several child MDIO busses to a + parent bus. Child bus selection is under the control of one of + the FPGA's registers. -config SMSC_PHY - tristate "Drivers for SMSC PHYs" - ---help--- - Currently supports the LAN83C185, LAN8187 and LAN8700 PHYs + Currently, only 8-bit registers are supported. -config BCM_NET_PHYLIB +config MDIO_CAVIUM tristate -config BROADCOM_PHY - tristate "Drivers for Broadcom PHYs" - select BCM_NET_PHYLIB +config MDIO_GPIO + tristate "GPIO lib-based bitbanged MDIO buses" + depends on MDIO_BITBANG && GPIOLIB ---help--- - Currently supports the BCM5411, BCM5421, BCM5461, BCM54616S, BCM5464, - BCM5481 and BCM5482 PHYs. + Supports GPIO lib-based MDIO busses. -config BCM_CYGNUS_PHY - tristate "Drivers for Broadcom Cygnus SoC internal PHY" - depends on ARCH_BCM_CYGNUS || COMPILE_TEST - depends on MDIO_BCM_IPROC - select BCM_NET_PHYLIB + To compile this driver as a module, choose M here: the module + will be called mdio-gpio. + +config MDIO_HISI_FEMAC + tristate "Hisilicon FEMAC MDIO bus controller" + depends on HAS_IOMEM && OF_MDIO + help + This module provides a driver for the MDIO busses found in the + Hisilicon SoC that have an Fast Ethernet MAC. + +config MDIO_MOXART + tristate "MOXA ART MDIO interface support" + depends on ARCH_MOXART + help + This driver supports the MDIO interface found in the network + interface units of the MOXA ART SoC + +config MDIO_OCTEON + tristate "Octeon and some ThunderX SOCs MDIO buses" + depends on 64BIT + depends on HAS_IOMEM + select MDIO_CAVIUM + help + This module provides a driver for the Octeon and ThunderX MDIO + buses. It is required by the Octeon and ThunderX ethernet device + drivers on some systems. + +config MDIO_SUN4I + tristate "Allwinner sun4i MDIO interface support" + depends on ARCH_SUNXI + help + This driver supports the MDIO interface found in the network + interface units of the Allwinner SoC that have an EMAC (A10, + A12, A10s, etc.) + +config MDIO_THUNDER + tristate "ThunderX SOCs MDIO buses" + depends on 64BIT + depends on PCI + select MDIO_CAVIUM + help + This driver supports the MDIO interfaces found on Cavium + ThunderX SoCs when the MDIO bus device appears as a PCI + device. + +config MDIO_XGENE + tristate "APM X-Gene SoC MDIO bus controller" + help + This module provides a driver for the MDIO busses found in the + APM X-Gene SoC's. + +comment "MII PHY device drivers" + +config AMD_PHY + tristate "AMD PHYs" ---help--- - This PHY driver is for the 1G internal PHYs of the Broadcom - Cygnus Family SoC. + Currently supports the am79c874 - Currently supports internal PHY's used in the BCM11300, - BCM11320, BCM11350, BCM11360, BCM58300, BCM58302, - BCM58303 & BCM58305 Broadcom Cygnus SoCs. +config AQUANTIA_PHY + tristate "Aquantia PHYs" + ---help--- + Currently supports the Aquantia AQ1202, AQ2104, AQR105, AQR405 + +config AT803X_PHY + tristate "AT803X PHYs" + ---help--- + Currently supports the AT8030 and AT8035 model config BCM63XX_PHY - tristate "Drivers for Broadcom 63xx SOCs internal PHY" + tristate "Broadcom 63xx SOCs internal PHY" depends on BCM63XX select BCM_NET_PHYLIB ---help--- Currently supports the 6348 and 6358 PHYs. config BCM7XXX_PHY - tristate "Drivers for Broadcom 7xxx SOCs internal PHYs" + tristate "Broadcom 7xxx SOCs internal PHYs" select BCM_NET_PHYLIB ---help--- Currently supports the BCM7366, BCM7439, BCM7445, and 40nm and 65nm generation of BCM7xxx Set Top Box SoCs. config BCM87XX_PHY - tristate "Driver for Broadcom BCM8706 and BCM8727 PHYs" + tristate "Broadcom BCM8706 and BCM8727 PHYs" help Currently supports the BCM8706 and BCM8727 10G Ethernet PHYs. -config ICPLUS_PHY - tristate "Drivers for ICPlus PHYs" +config BCM_CYGNUS_PHY + tristate "Broadcom Cygnus SoC internal PHY" + depends on ARCH_BCM_CYGNUS || COMPILE_TEST + depends on MDIO_BCM_IPROC + select BCM_NET_PHYLIB ---help--- - Currently supports the IP175C and IP1001 PHYs. + This PHY driver is for the 1G internal PHYs of the Broadcom + Cygnus Family SoC. -config REALTEK_PHY - tristate "Drivers for Realtek PHYs" - ---help--- - Supports the Realtek 821x PHY. + Currently supports internal PHY's used in the BCM11300, + BCM11320, BCM11350, BCM11360, BCM58300, BCM58302, + BCM58303 & BCM58305 Broadcom Cygnus SoCs. -config NATIONAL_PHY - tristate "Drivers for National Semiconductor PHYs" - ---help--- - Currently supports the DP83865 PHY. +config BCM_NET_PHYLIB + tristate -config STE10XP - tristate "Driver for STMicroelectronics STe10Xp PHYs" +config BROADCOM_PHY + tristate "Broadcom PHYs" + select BCM_NET_PHYLIB ---help--- - This is the driver for the STe100p and STe101p PHYs. + Currently supports the BCM5411, BCM5421, BCM5461, BCM54616S, BCM5464, + BCM5481 and BCM5482 PHYs. -config LSI_ET1011C_PHY - tristate "Driver for LSI ET1011C PHY" +config CICADA_PHY + tristate "Cicada PHYs" ---help--- - Supports the LSI ET1011C PHY. + Currently supports the cis8204 -config MICREL_PHY - tristate "Driver for Micrel PHYs" +config DAVICOM_PHY + tristate "Davicom PHYs" ---help--- - Supports the KSZ9021, VSC8201, KS8001 PHYs. + Currently supports dm9161e and dm9131 config DP83848_PHY - tristate "Driver for Texas Instruments DP83848 PHY" + tristate "Texas Instruments DP83848 PHY" ---help--- Supports the DP83848 PHY. config DP83867_PHY - tristate "Drivers for Texas Instruments DP83867 Gigabit PHY" + tristate "Texas Instruments DP83867 Gigabit PHY" ---help--- Currently supports the DP83867 PHY. -config MICROCHIP_PHY - tristate "Drivers for Microchip PHYs" - help - Supports the LAN88XX PHYs. - config FIXED_PHY - tristate "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs" + tristate "MDIO Bus/PHY emulation with fixed speed/link PHYs" depends on PHYLIB select SWPHY ---help--- @@ -169,143 +235,90 @@ config FIXED_PHY Currently tested with mpc866ads and mpc8349e-mitx. -config MDIO_BITBANG - tristate "Support for bitbanged MDIO buses" - help - This module implements the MDIO bus protocol in software, - for use by low level drivers that export the ability to - drive the relevant pins. - - If in doubt, say N. - -config MDIO_GPIO - tristate "Support for GPIO lib-based bitbanged MDIO buses" - depends on MDIO_BITBANG && GPIOLIB +config ICPLUS_PHY + tristate "ICPlus PHYs" ---help--- - Supports GPIO lib-based MDIO busses. - - To compile this driver as a module, choose M here: the module - will be called mdio-gpio. - -config MDIO_CAVIUM - tristate - -config MDIO_OCTEON - tristate "Support for MDIO buses on Octeon and some ThunderX SOCs" - depends on 64BIT - depends on HAS_IOMEM - select MDIO_CAVIUM - help - This module provides a driver for the Octeon and ThunderX MDIO - buses. It is required by the Octeon and ThunderX ethernet device - drivers on some systems. + Currently supports the IP175C and IP1001 PHYs. -config MDIO_THUNDER - tristate "Support for MDIO buses on ThunderX SOCs" - depends on 64BIT - depends on PCI - select MDIO_CAVIUM - help - This driver supports the MDIO interfaces found on Cavium - ThunderX SoCs when the MDIO bus device appears as a PCI - device. +config INTEL_XWAY_PHY + tristate "Intel XWAY PHYs" + ---help--- + Supports the Intel XWAY (former Lantiq) 11G and 22E PHYs. + These PHYs are marked as standalone chips under the names + PEF 7061, PEF 7071 and PEF 7072 or integrated into the Intel + SoCs xRX200, xRX300, xRX330, xRX350 and xRX550. +config LSI_ET1011C_PHY + tristate "LSI ET1011C PHY" + ---help--- + Supports the LSI ET1011C PHY. -config MDIO_SUN4I - tristate "Allwinner sun4i MDIO interface support" - depends on ARCH_SUNXI - help - This driver supports the MDIO interface found in the network - interface units of the Allwinner SoC that have an EMAC (A10, - A12, A10s, etc.) +config LXT_PHY + tristate "Intel LXT PHYs" + ---help--- + Currently supports the lxt970, lxt971 -config MDIO_MOXART - tristate "MOXA ART MDIO interface support" - depends on ARCH_MOXART - help - This driver supports the MDIO interface found in the network - interface units of the MOXA ART SoC +config MARVELL_PHY + tristate "Marvell PHYs" + ---help--- + Currently has a driver for the 88E1011S -config MDIO_BUS_MUX - tristate - depends on OF_MDIO - help - This module provides a driver framework for MDIO bus - multiplexers which connect one of several child MDIO busses - to a parent bus. Switching between child busses is done by - device specific drivers. +config MICREL_PHY + tristate "Micrel PHYs" + ---help--- + Supports the KSZ9021, VSC8201, KS8001 PHYs. -config MDIO_BUS_MUX_GPIO - tristate "Support for GPIO controlled MDIO bus multiplexers" - depends on OF_GPIO && OF_MDIO - select MDIO_BUS_MUX +config MICROCHIP_PHY + tristate "Microchip PHYs" help - This module provides a driver for MDIO bus multiplexers that - are controlled via GPIO lines. The multiplexer connects one of - several child MDIO busses to a parent bus. Child bus - selection is under the control of GPIO lines. + Supports the LAN88XX PHYs. -config MDIO_BUS_MUX_MMIOREG - tristate "Support for MMIO device-controlled MDIO bus multiplexers" - depends on OF_MDIO && HAS_IOMEM - select MDIO_BUS_MUX - help - This module provides a driver for MDIO bus multiplexers that - are controlled via a simple memory-mapped device, like an FPGA. - The multiplexer connects one of several child MDIO busses to a - parent bus. Child bus selection is under the control of one of - the FPGA's registers. +config MICROSEMI_PHY + tristate "Microsemi PHYs" + ---help--- + Currently supports the VSC8531 and VSC8541 PHYs - Currently, only 8-bit registers are supported. +config NATIONAL_PHY + tristate "National Semiconductor PHYs" + ---help--- + Currently supports the DP83865 PHY. -config MDIO_BUS_MUX_BCM_IPROC - tristate "Support for iProc based MDIO bus multiplexers" - depends on OF && OF_MDIO && (ARCH_BCM_IPROC || COMPILE_TEST) - select MDIO_BUS_MUX - default ARCH_BCM_IPROC - help - This module provides a driver for MDIO bus multiplexers found in - iProc based Broadcom SoCs. This multiplexer connects one of several - child MDIO bus to a parent bus. Buses could be internal as well as - external and selection logic lies inside the same multiplexer. +config QSEMI_PHY + tristate "Quality Semiconductor PHYs" + ---help--- + Currently supports the qs6612 -config MDIO_BCM_UNIMAC - tristate "Broadcom UniMAC MDIO bus controller" - depends on HAS_IOMEM - help - This module provides a driver for the Broadcom UniMAC MDIO busses. - This hardware can be found in the Broadcom GENET Ethernet MAC - controllers as well as some Broadcom Ethernet switches such as the - Starfighter 2 switches. +config REALTEK_PHY + tristate "Realtek PHYs" + ---help--- + Supports the Realtek 821x PHY. -config MDIO_BCM_IPROC - tristate "Broadcom iProc MDIO bus controller" - depends on ARCH_BCM_IPROC || COMPILE_TEST - depends on HAS_IOMEM && OF_MDIO - help - This module provides a driver for the MDIO busses found in the - Broadcom iProc SoC's. +config SMSC_PHY + tristate "SMSC PHYs" + ---help--- + Currently supports the LAN83C185, LAN8187 and LAN8700 PHYs -config INTEL_XWAY_PHY - tristate "Driver for Intel XWAY PHYs" +config STE10XP + tristate "STMicroelectronics STe10Xp PHYs" ---help--- - Supports the Intel XWAY (former Lantiq) 11G and 22E PHYs. - These PHYs are marked as standalone chips under the names - PEF 7061, PEF 7071 and PEF 7072 or integrated into the Intel - SoCs xRX200, xRX300, xRX330, xRX350 and xRX550. + This is the driver for the STe100p and STe101p PHYs. -config MDIO_HISI_FEMAC - tristate "Hisilicon FEMAC MDIO bus controller" - depends on HAS_IOMEM && OF_MDIO - help - This module provides a driver for the MDIO busses found in the - Hisilicon SoC that have an Fast Ethernet MAC. +config TERANETICS_PHY + tristate "Teranetics PHYs" + ---help--- + Currently supports the Teranetics TN2020 -config MDIO_XGENE - tristate "APM X-Gene SoC MDIO bus controller" - help - This module provides a driver for the MDIO busses found in the - APM X-Gene SoC's. +config VITESSE_PHY + tristate "Vitesse PHYs" + ---help--- + Currently supports the vsc8244 + +config XILINX_GMII2RGMII + tristate "Xilinx GMII2RGMII converter driver" + ---help--- + This driver support xilinx GMII to RGMII IP core it provides + the Reduced Gigabit Media Independent Interface(RGMII) between + Ethernet physical media devices and the Gigabit Ethernet controller. endif # PHYLIB diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index 534dfa74d5a2..e58667d111e7 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -1,51 +1,55 @@ -# Makefile for Linux PHY drivers +# Makefile for Linux PHY drivers and MDIO bus drivers libphy-y := phy.o phy_device.o mdio_bus.o mdio_device.o libphy-$(CONFIG_SWPHY) += swphy.o obj-$(CONFIG_PHYLIB) += libphy.o + +obj-$(CONFIG_MDIO_BCM_IPROC) += mdio-bcm-iproc.o +obj-$(CONFIG_MDIO_BCM_UNIMAC) += mdio-bcm-unimac.o +obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o +obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o +obj-$(CONFIG_MDIO_BUS_MUX_BCM_IPROC) += mdio-mux-bcm-iproc.o +obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o +obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o +obj-$(CONFIG_MDIO_CAVIUM) += mdio-cavium.o +obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o +obj-$(CONFIG_MDIO_HISI_FEMAC) += mdio-hisi-femac.o +obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o +obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o +obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o +obj-$(CONFIG_MDIO_THUNDER) += mdio-thunder.o +obj-$(CONFIG_MDIO_XGENE) += mdio-xgene.o + +obj-$(CONFIG_AMD_PHY) += amd.o obj-$(CONFIG_AQUANTIA_PHY) += aquantia.o -obj-$(CONFIG_MARVELL_PHY) += marvell.o -obj-$(CONFIG_DAVICOM_PHY) += davicom.o -obj-$(CONFIG_CICADA_PHY) += cicada.o -obj-$(CONFIG_LXT_PHY) += lxt.o -obj-$(CONFIG_QSEMI_PHY) += qsemi.o -obj-$(CONFIG_SMSC_PHY) += smsc.o -obj-$(CONFIG_TERANETICS_PHY) += teranetics.o -obj-$(CONFIG_VITESSE_PHY) += vitesse.o -obj-$(CONFIG_BCM_NET_PHYLIB) += bcm-phy-lib.o -obj-$(CONFIG_BROADCOM_PHY) += broadcom.o +obj-$(CONFIG_AT803X_PHY) += at803x.o obj-$(CONFIG_BCM63XX_PHY) += bcm63xx.o obj-$(CONFIG_BCM7XXX_PHY) += bcm7xxx.o obj-$(CONFIG_BCM87XX_PHY) += bcm87xx.o obj-$(CONFIG_BCM_CYGNUS_PHY) += bcm-cygnus.o -obj-$(CONFIG_ICPLUS_PHY) += icplus.o -obj-$(CONFIG_REALTEK_PHY) += realtek.o -obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o -obj-$(CONFIG_FIXED_PHY) += fixed_phy.o -obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o -obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o -obj-$(CONFIG_NATIONAL_PHY) += national.o +obj-$(CONFIG_BCM_NET_PHYLIB) += bcm-phy-lib.o +obj-$(CONFIG_BROADCOM_PHY) += broadcom.o +obj-$(CONFIG_CICADA_PHY) += cicada.o +obj-$(CONFIG_DAVICOM_PHY) += davicom.o obj-$(CONFIG_DP83640_PHY) += dp83640.o obj-$(CONFIG_DP83848_PHY) += dp83848.o obj-$(CONFIG_DP83867_PHY) += dp83867.o -obj-$(CONFIG_STE10XP) += ste10Xp.o -obj-$(CONFIG_MICREL_PHY) += micrel.o -obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o -obj-$(CONFIG_MDIO_THUNDER) += mdio-thunder.o -obj-$(CONFIG_MDIO_CAVIUM) += mdio-cavium.o +obj-$(CONFIG_FIXED_PHY) += fixed_phy.o +obj-$(CONFIG_ICPLUS_PHY) += icplus.o +obj-$(CONFIG_INTEL_XWAY_PHY) += intel-xway.o +obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o +obj-$(CONFIG_LXT_PHY) += lxt.o +obj-$(CONFIG_MARVELL_PHY) += marvell.o obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o -obj-$(CONFIG_AT803X_PHY) += at803x.o -obj-$(CONFIG_AMD_PHY) += amd.o -obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o -obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o -obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o -obj-$(CONFIG_MDIO_BUS_MUX_BCM_IPROC) += mdio-mux-bcm-iproc.o -obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o -obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o -obj-$(CONFIG_MDIO_BCM_UNIMAC) += mdio-bcm-unimac.o +obj-$(CONFIG_MICREL_PHY) += micrel.o obj-$(CONFIG_MICROCHIP_PHY) += microchip.o -obj-$(CONFIG_MDIO_BCM_IPROC) += mdio-bcm-iproc.o -obj-$(CONFIG_INTEL_XWAY_PHY) += intel-xway.o -obj-$(CONFIG_MDIO_HISI_FEMAC) += mdio-hisi-femac.o -obj-$(CONFIG_MDIO_XGENE) += mdio-xgene.o +obj-$(CONFIG_MICROSEMI_PHY) += mscc.o +obj-$(CONFIG_NATIONAL_PHY) += national.o +obj-$(CONFIG_QSEMI_PHY) += qsemi.o +obj-$(CONFIG_REALTEK_PHY) += realtek.o +obj-$(CONFIG_SMSC_PHY) += smsc.o +obj-$(CONFIG_STE10XP) += ste10Xp.o +obj-$(CONFIG_TERANETICS_PHY) += teranetics.o +obj-$(CONFIG_VITESSE_PHY) += vitesse.o +obj-$(CONFIG_XILINX_GMII2RGMII) += xilinx_gmii2rgmii.o diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c new file mode 100644 index 000000000000..ad33390b382a --- /dev/null +++ b/drivers/net/phy/mscc.c @@ -0,0 +1,161 @@ +/* + * Driver for Microsemi VSC85xx PHYs + * + * Author: Nagaraju Lakkaraju + * License: Dual MIT/GPL + * Copyright (c) 2016 Microsemi Corporation + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mdio.h> +#include <linux/mii.h> +#include <linux/phy.h> + +enum rgmii_rx_clock_delay { + RGMII_RX_CLK_DELAY_0_2_NS = 0, + RGMII_RX_CLK_DELAY_0_8_NS = 1, + RGMII_RX_CLK_DELAY_1_1_NS = 2, + RGMII_RX_CLK_DELAY_1_7_NS = 3, + RGMII_RX_CLK_DELAY_2_0_NS = 4, + RGMII_RX_CLK_DELAY_2_3_NS = 5, + RGMII_RX_CLK_DELAY_2_6_NS = 6, + RGMII_RX_CLK_DELAY_3_4_NS = 7 +}; + +#define MII_VSC85XX_INT_MASK 25 +#define MII_VSC85XX_INT_MASK_MASK 0xa000 +#define MII_VSC85XX_INT_STATUS 26 + +#define MSCC_EXT_PAGE_ACCESS 31 +#define MSCC_PHY_PAGE_STANDARD 0x0000 /* Standard registers */ +#define MSCC_PHY_PAGE_EXTENDED_2 0x0002 /* Extended reg - page 2 */ + +/* Extended Page 2 Registers */ +#define MSCC_PHY_RGMII_CNTL 20 +#define RGMII_RX_CLK_DELAY_MASK 0x0070 +#define RGMII_RX_CLK_DELAY_POS 4 + +/* Microsemi PHY ID's */ +#define PHY_ID_VSC8531 0x00070570 +#define PHY_ID_VSC8541 0x00070770 + +static int vsc85xx_phy_page_set(struct phy_device *phydev, u8 page) +{ + int rc; + + rc = phy_write(phydev, MSCC_EXT_PAGE_ACCESS, page); + return rc; +} + +static int vsc85xx_default_config(struct phy_device *phydev) +{ + int rc; + u16 reg_val; + + mutex_lock(&phydev->lock); + rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_EXTENDED_2); + if (rc != 0) + goto out_unlock; + + reg_val = phy_read(phydev, MSCC_PHY_RGMII_CNTL); + reg_val &= ~(RGMII_RX_CLK_DELAY_MASK); + reg_val |= (RGMII_RX_CLK_DELAY_1_1_NS << RGMII_RX_CLK_DELAY_POS); + phy_write(phydev, MSCC_PHY_RGMII_CNTL, reg_val); + rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_STANDARD); + +out_unlock: + mutex_unlock(&phydev->lock); + + return rc; +} + +static int vsc85xx_config_init(struct phy_device *phydev) +{ + int rc; + + rc = vsc85xx_default_config(phydev); + if (rc) + return rc; + rc = genphy_config_init(phydev); + + return rc; +} + +static int vsc85xx_ack_interrupt(struct phy_device *phydev) +{ + int rc = 0; + + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) + rc = phy_read(phydev, MII_VSC85XX_INT_STATUS); + + return (rc < 0) ? rc : 0; +} + +static int vsc85xx_config_intr(struct phy_device *phydev) +{ + int rc; + + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { + rc = phy_write(phydev, MII_VSC85XX_INT_MASK, + MII_VSC85XX_INT_MASK_MASK); + } else { + rc = phy_write(phydev, MII_VSC85XX_INT_MASK, 0); + if (rc < 0) + return rc; + rc = phy_read(phydev, MII_VSC85XX_INT_STATUS); + } + + return rc; +} + +/* Microsemi VSC85xx PHYs */ +static struct phy_driver vsc85xx_driver[] = { +{ + .phy_id = PHY_ID_VSC8531, + .name = "Microsemi VSC8531", + .phy_id_mask = 0xfffffff0, + .features = PHY_GBIT_FEATURES, + .flags = PHY_HAS_INTERRUPT, + .soft_reset = &genphy_soft_reset, + .config_init = &vsc85xx_config_init, + .config_aneg = &genphy_config_aneg, + .aneg_done = &genphy_aneg_done, + .read_status = &genphy_read_status, + .ack_interrupt = &vsc85xx_ack_interrupt, + .config_intr = &vsc85xx_config_intr, + .suspend = &genphy_suspend, + .resume = &genphy_resume, +}, +{ + .phy_id = PHY_ID_VSC8541, + .name = "Microsemi VSC8541 SyncE", + .phy_id_mask = 0xfffffff0, + .features = PHY_GBIT_FEATURES, + .flags = PHY_HAS_INTERRUPT, + .soft_reset = &genphy_soft_reset, + .config_init = &vsc85xx_config_init, + .config_aneg = &genphy_config_aneg, + .aneg_done = &genphy_aneg_done, + .read_status = &genphy_read_status, + .ack_interrupt = &vsc85xx_ack_interrupt, + .config_intr = &vsc85xx_config_intr, + .suspend = &genphy_suspend, + .resume = &genphy_resume, +} + +}; + +module_phy_driver(vsc85xx_driver); + +static struct mdio_device_id __maybe_unused vsc85xx_tbl[] = { + { PHY_ID_VSC8531, 0xfffffff0, }, + { PHY_ID_VSC8541, 0xfffffff0, }, + { } +}; + +MODULE_DEVICE_TABLE(mdio, vsc85xx_tbl); + +MODULE_DESCRIPTION("Microsemi VSC85xx PHY driver"); +MODULE_AUTHOR("Nagaraju Lakkaraju"); +MODULE_LICENSE("Dual MIT/GPL"); diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c new file mode 100644 index 000000000000..d15dd3938ba8 --- /dev/null +++ b/drivers/net/phy/xilinx_gmii2rgmii.c @@ -0,0 +1,112 @@ +/* Xilinx GMII2RGMII Converter driver + * + * Copyright (C) 2016 Xilinx, Inc. + * Copyright (C) 2016 Andrew Lunn <andrew@lunn.ch> + * + * Author: Andrew Lunn <andrew@lunn.ch> + * Author: Kedareswara rao Appana <appanad@xilinx.com> + * + * Description: + * This driver is developed for Xilinx GMII2RGMII Converter + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/mii.h> +#include <linux/mdio.h> +#include <linux/phy.h> +#include <linux/of_mdio.h> + +#define XILINX_GMII2RGMII_REG 0x10 +#define XILINX_GMII2RGMII_SPEED_MASK (BMCR_SPEED1000 | BMCR_SPEED100) + +struct gmii2rgmii { + struct phy_device *phy_dev; + struct phy_driver *phy_drv; + struct phy_driver conv_phy_drv; + int addr; +}; + +static int xgmiitorgmii_read_status(struct phy_device *phydev) +{ + struct gmii2rgmii *priv = phydev->priv; + u16 val = 0; + + priv->phy_drv->read_status(phydev); + + val = mdiobus_read(phydev->mdio.bus, priv->addr, XILINX_GMII2RGMII_REG); + val &= XILINX_GMII2RGMII_SPEED_MASK; + + if (phydev->speed == SPEED_1000) + val |= BMCR_SPEED1000; + else if (phydev->speed == SPEED_100) + val |= BMCR_SPEED100; + else + val |= BMCR_SPEED10; + + mdiobus_write(phydev->mdio.bus, priv->addr, XILINX_GMII2RGMII_REG, val); + + return 0; +} + +static int xgmiitorgmii_probe(struct mdio_device *mdiodev) +{ + struct device *dev = &mdiodev->dev; + struct device_node *np = dev->of_node, *phy_node; + struct gmii2rgmii *priv; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + phy_node = of_parse_phandle(np, "phy-handle", 0); + if (!phy_node) { + dev_err(dev, "Couldn't parse phy-handle\n"); + return -ENODEV; + } + + priv->phy_dev = of_phy_find_device(phy_node); + of_node_put(phy_node); + if (!priv->phy_dev) { + dev_info(dev, "Couldn't find phydev\n"); + return -EPROBE_DEFER; + } + + priv->addr = mdiodev->addr; + priv->phy_drv = priv->phy_dev->drv; + memcpy(&priv->conv_phy_drv, priv->phy_dev->drv, + sizeof(struct phy_driver)); + priv->conv_phy_drv.read_status = xgmiitorgmii_read_status; + priv->phy_dev->priv = priv; + priv->phy_dev->drv = &priv->conv_phy_drv; + + return 0; +} + +static const struct of_device_id xgmiitorgmii_of_match[] = { + { .compatible = "xlnx,gmii-to-rgmii-1.0" }, + {}, +}; +MODULE_DEVICE_TABLE(of, xgmiitorgmii_of_match); + +static struct mdio_driver xgmiitorgmii_driver = { + .probe = xgmiitorgmii_probe, + .mdiodrv.driver = { + .name = "xgmiitorgmii", + .of_match_table = xgmiitorgmii_of_match, + }, +}; + +mdio_module_driver(xgmiitorgmii_driver); + +MODULE_DESCRIPTION("Xilinx GMII2RGMII converter driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index f226db4616b7..70cfa06ccd40 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -1103,6 +1103,15 @@ static int ppp_nl_newlink(struct net *src_net, struct net_device *dev, } conf.file = file; + + /* Don't use device name generated by the rtnetlink layer when ifname + * isn't specified. Let ppp_dev_configure() set the device name using + * the PPP unit identifer as suffix (i.e. ppp<unit_id>). This allows + * userspace to infer the device name using to the PPPIOCGUNIT ioctl. + */ + if (!tb[IFLA_IFNAME]) + conf.ifname_is_set = false; + err = ppp_dev_configure(src_net, dev, &conf); out_unlock: diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c index ae0905ed4a32..1951b1085cb8 100644 --- a/drivers/net/ppp/pptp.c +++ b/drivers/net/ppp/pptp.c @@ -37,6 +37,7 @@ #include <net/icmp.h> #include <net/route.h> #include <net/gre.h> +#include <net/pptp.h> #include <linux/uaccess.h> @@ -53,41 +54,6 @@ static struct proto pptp_sk_proto __read_mostly; static const struct ppp_channel_ops pptp_chan_ops; static const struct proto_ops pptp_ops; -#define PPP_LCP_ECHOREQ 0x09 -#define PPP_LCP_ECHOREP 0x0A -#define SC_RCV_BITS (SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP) - -#define MISSING_WINDOW 20 -#define WRAPPED(curseq, lastseq)\ - ((((curseq) & 0xffffff00) == 0) &&\ - (((lastseq) & 0xffffff00) == 0xffffff00)) - -#define PPTP_GRE_PROTO 0x880B -#define PPTP_GRE_VER 0x1 - -#define PPTP_GRE_FLAG_C 0x80 -#define PPTP_GRE_FLAG_R 0x40 -#define PPTP_GRE_FLAG_K 0x20 -#define PPTP_GRE_FLAG_S 0x10 -#define PPTP_GRE_FLAG_A 0x80 - -#define PPTP_GRE_IS_C(f) ((f)&PPTP_GRE_FLAG_C) -#define PPTP_GRE_IS_R(f) ((f)&PPTP_GRE_FLAG_R) -#define PPTP_GRE_IS_K(f) ((f)&PPTP_GRE_FLAG_K) -#define PPTP_GRE_IS_S(f) ((f)&PPTP_GRE_FLAG_S) -#define PPTP_GRE_IS_A(f) ((f)&PPTP_GRE_FLAG_A) - -#define PPTP_HEADER_OVERHEAD (2+sizeof(struct pptp_gre_header)) -struct pptp_gre_header { - u8 flags; - u8 ver; - __be16 protocol; - __be16 payload_len; - __be16 call_id; - __be32 seq; - __be32 ack; -} __packed; - static struct pppox_sock *lookup_chan(u16 call_id, __be32 s_addr) { struct pppox_sock *sock; @@ -240,16 +206,14 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb) skb_push(skb, header_len); hdr = (struct pptp_gre_header *)(skb->data); - hdr->flags = PPTP_GRE_FLAG_K; - hdr->ver = PPTP_GRE_VER; - hdr->protocol = htons(PPTP_GRE_PROTO); - hdr->call_id = htons(opt->dst_addr.call_id); + hdr->gre_hd.flags = GRE_KEY | GRE_VERSION_1 | GRE_SEQ; + hdr->gre_hd.protocol = GRE_PROTO_PPP; + hdr->call_id = htons(opt->dst_addr.call_id); - hdr->flags |= PPTP_GRE_FLAG_S; - hdr->seq = htonl(++opt->seq_sent); + hdr->seq = htonl(++opt->seq_sent); if (opt->ack_sent != seq_recv) { /* send ack with this message */ - hdr->ver |= PPTP_GRE_FLAG_A; + hdr->gre_hd.flags |= GRE_ACK; hdr->ack = htonl(seq_recv); opt->ack_sent = seq_recv; } @@ -312,7 +276,7 @@ static int pptp_rcv_core(struct sock *sk, struct sk_buff *skb) headersize = sizeof(*header); /* test if acknowledgement present */ - if (PPTP_GRE_IS_A(header->ver)) { + if (GRE_IS_ACK(header->gre_hd.flags)) { __u32 ack; if (!pskb_may_pull(skb, headersize)) @@ -320,7 +284,7 @@ static int pptp_rcv_core(struct sock *sk, struct sk_buff *skb) header = (struct pptp_gre_header *)(skb->data); /* ack in different place if S = 0 */ - ack = PPTP_GRE_IS_S(header->flags) ? header->ack : header->seq; + ack = GRE_IS_SEQ(header->gre_hd.flags) ? header->ack : header->seq; ack = ntohl(ack); @@ -333,7 +297,7 @@ static int pptp_rcv_core(struct sock *sk, struct sk_buff *skb) headersize -= sizeof(header->ack); } /* test if payload present */ - if (!PPTP_GRE_IS_S(header->flags)) + if (!GRE_IS_SEQ(header->gre_hd.flags)) goto drop; payload_len = ntohs(header->payload_len); @@ -394,11 +358,11 @@ static int pptp_rcv(struct sk_buff *skb) header = (struct pptp_gre_header *)skb->data; - if (ntohs(header->protocol) != PPTP_GRE_PROTO || /* PPTP-GRE protocol for PPTP */ - PPTP_GRE_IS_C(header->flags) || /* flag C should be clear */ - PPTP_GRE_IS_R(header->flags) || /* flag R should be clear */ - !PPTP_GRE_IS_K(header->flags) || /* flag K should be set */ - (header->flags&0xF) != 0) /* routing and recursion ctrl = 0 */ + if (header->gre_hd.protocol != GRE_PROTO_PPP || /* PPTP-GRE protocol for PPTP */ + GRE_IS_CSUM(header->gre_hd.flags) || /* flag CSUM should be clear */ + GRE_IS_ROUTING(header->gre_hd.flags) || /* flag ROUTING should be clear */ + !GRE_IS_KEY(header->gre_hd.flags) || /* flag KEY should be set */ + (header->gre_hd.flags & GRE_FLAGS)) /* flag Recursion Ctrl should be clear */ /* if invalid, discard this packet */ goto drop; diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 6f9df375c5d4..8093e39ae263 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -731,14 +731,9 @@ static int update_filter(struct tap_filter *filter, void __user *arg) } alen = ETH_ALEN * uf.count; - addr = kmalloc(alen, GFP_KERNEL); - if (!addr) - return -ENOMEM; - - if (copy_from_user(addr, arg + sizeof(uf), alen)) { - err = -EFAULT; - goto done; - } + addr = memdup_user(arg + sizeof(uf), alen); + if (IS_ERR(addr)) + return PTR_ERR(addr); /* The filter is updated without holding any locks. Which is * perfectly safe. We disable it first and in the worst @@ -758,7 +753,7 @@ static int update_filter(struct tap_filter *filter, void __user *arg) for (; n < uf.count; n++) { if (!is_multicast_ether_addr(addr[n].u)) { err = 0; /* no filter */ - goto done; + goto free_addr; } addr_hash_set(filter->mask, addr[n].u); } @@ -774,8 +769,7 @@ static int update_filter(struct tap_filter *filter, void __user *arg) /* Return the number of exact filters */ err = nexact; - -done: +free_addr: kfree(addr); return err; } diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index 4b4458616693..c5544d36c54f 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -2300,10 +2300,8 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs, serial->rx_data_length = rx_size; for (i = 0; i < serial->num_rx_urbs; i++) { serial->rx_urb[i] = usb_alloc_urb(0, GFP_KERNEL); - if (!serial->rx_urb[i]) { - dev_err(dev, "Could not allocate urb?\n"); + if (!serial->rx_urb[i]) goto exit; - } serial->rx_urb[i]->transfer_buffer = NULL; serial->rx_urb[i]->transfer_buffer_length = 0; serial->rx_data[i] = kzalloc(serial->rx_data_length, @@ -2314,10 +2312,8 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs, /* TX, allocate urb and initialize */ serial->tx_urb = usb_alloc_urb(0, GFP_KERNEL); - if (!serial->tx_urb) { - dev_err(dev, "Could not allocate urb?\n"); + if (!serial->tx_urb) goto exit; - } serial->tx_urb->transfer_buffer = NULL; serial->tx_urb->transfer_buffer_length = 0; /* prepare our TX buffer */ @@ -2555,20 +2551,16 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface, /* start allocating */ for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) { hso_net->mux_bulk_rx_urb_pool[i] = usb_alloc_urb(0, GFP_KERNEL); - if (!hso_net->mux_bulk_rx_urb_pool[i]) { - dev_err(&interface->dev, "Could not allocate rx urb\n"); + if (!hso_net->mux_bulk_rx_urb_pool[i]) goto exit; - } hso_net->mux_bulk_rx_buf_pool[i] = kzalloc(MUX_BULK_RX_BUF_SIZE, GFP_KERNEL); if (!hso_net->mux_bulk_rx_buf_pool[i]) goto exit; } hso_net->mux_bulk_tx_urb = usb_alloc_urb(0, GFP_KERNEL); - if (!hso_net->mux_bulk_tx_urb) { - dev_err(&interface->dev, "Could not allocate tx urb\n"); + if (!hso_net->mux_bulk_tx_urb) goto exit; - } hso_net->mux_bulk_tx_buf = kzalloc(MUX_BULK_TX_BUF_SIZE, GFP_KERNEL); if (!hso_net->mux_bulk_tx_buf) goto exit; @@ -2787,10 +2779,8 @@ struct hso_shared_int *hso_create_shared_int(struct usb_interface *interface) } mux->shared_intr_urb = usb_alloc_urb(0, GFP_KERNEL); - if (!mux->shared_intr_urb) { - dev_err(&interface->dev, "Could not allocate intr urb?\n"); + if (!mux->shared_intr_urb) goto exit; - } mux->shared_intr_buf = kzalloc(le16_to_cpu(mux->intr_endp->wMaxPacketSize), GFP_KERNEL); diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 6a9d474b08b2..432b8a3ae354 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -3002,10 +3002,8 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev) gso_skb: urb = usb_alloc_urb(0, GFP_ATOMIC); - if (!urb) { - netif_dbg(dev, tx_err, dev->net, "no urb\n"); + if (!urb) goto drop; - } entry = (struct skb_data *)skb->cb; entry->urb = urb; diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 3bfb59209326..d5071e364d40 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -2062,11 +2062,8 @@ int usbnet_write_cmd_async(struct usbnet *dev, u8 cmd, u8 reqtype, cmd, reqtype, value, index, size); urb = usb_alloc_urb(0, GFP_ATOMIC); - if (!urb) { - netdev_err(dev->net, "Error allocating URB in" - " %s!\n", __func__); + if (!urb) goto fail; - } if (data) { buf = kmemdup(data, size, GFP_ATOMIC); diff --git a/drivers/net/veth.c b/drivers/net/veth.c index f37a6e61d4ad..4bda502254fb 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -313,7 +313,7 @@ static const struct net_device_ops veth_netdev_ops = { }; #define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \ - NETIF_F_RXCSUM | NETIF_F_HIGHDMA | \ + NETIF_F_RXCSUM | NETIF_F_SCTP_CRC | NETIF_F_HIGHDMA | \ NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL | \ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | \ NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_STAG_RX ) diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 4244b9d4418e..2fd93b4c759a 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -1641,7 +1641,7 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq, } } -void +static void vmxnet3_rq_destroy_all_rxdataring(struct vmxnet3_adapter *adapter) { int i; diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index c0dda6fc0921..3f7e0d2dd21a 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -27,7 +27,6 @@ #include <net/net_namespace.h> #include <net/netns/generic.h> #include <net/vxlan.h> -#include <net/protocol.h> #if IS_ENABLED(CONFIG_IPV6) #include <net/ip6_tunnel.h> diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c index 6f044450b702..5fbf83d5aa57 100644 --- a/drivers/net/wan/fsl_ucc_hdlc.c +++ b/drivers/net/wan/fsl_ucc_hdlc.c @@ -162,7 +162,7 @@ static int uhdlc_init(struct ucc_hdlc_private *priv) ALIGNMENT_OF_UCC_HDLC_PRAM); if (priv->ucc_pram_offset < 0) { - dev_err(priv->dev, "Can not allocate MURAM for hdlc prameter.\n"); + dev_err(priv->dev, "Can not allocate MURAM for hdlc parameter.\n"); ret = -ENOMEM; goto free_tx_bd; } diff --git a/drivers/net/wimax/i2400m/usb-notif.c b/drivers/net/wimax/i2400m/usb-notif.c index fc1355d98bc6..5d429f816125 100644 --- a/drivers/net/wimax/i2400m/usb-notif.c +++ b/drivers/net/wimax/i2400m/usb-notif.c @@ -206,7 +206,6 @@ int i2400mu_notification_setup(struct i2400mu *i2400mu) i2400mu->notif_urb = usb_alloc_urb(0, GFP_KERNEL); if (!i2400mu->notif_urb) { ret = -ENOMEM; - dev_err(dev, "notification: cannot allocate URB\n"); goto error_alloc_urb; } epd = usb_get_epd(i2400mu->usb_iface, diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c index 8aded24bcdf4..7a60d2e652da 100644 --- a/drivers/net/wireless/ath/ar5523/ar5523.c +++ b/drivers/net/wireless/ath/ar5523/ar5523.c @@ -706,10 +706,8 @@ static int ar5523_alloc_rx_bufs(struct ar5523 *ar) data->ar = ar; data->urb = usb_alloc_urb(0, GFP_KERNEL); - if (!data->urb) { - ar5523_err(ar, "could not allocate rx data urb\n"); + if (!data->urb) goto err; - } list_add_tail(&data->list, &ar->rx_data_free); atomic_inc(&ar->rx_data_free_cnt); } @@ -824,7 +822,6 @@ static void ar5523_tx_work_locked(struct ar5523 *ar) urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { - ar5523_err(ar, "Failed to allocate TX urb\n"); ieee80211_free_txskb(ar->hw, skb); continue; } @@ -949,10 +946,8 @@ static int ar5523_alloc_tx_cmd(struct ar5523 *ar) init_completion(&cmd->done); cmd->urb_tx = usb_alloc_urb(0, GFP_KERNEL); - if (!cmd->urb_tx) { - ar5523_err(ar, "could not allocate urb\n"); + if (!cmd->urb_tx) return -ENOMEM; - } cmd->buf_tx = usb_alloc_coherent(ar->dev, AR5523_MAX_TXCMDSZ, GFP_KERNEL, &cmd->urb_tx->transfer_dma); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c index 98b15a9a2779..fa26619a7945 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c @@ -1099,15 +1099,11 @@ struct brcmf_usbdev *brcmf_usb_attach(struct brcmf_usbdev_info *devinfo, devinfo->tx_freecount = ntxq; devinfo->ctl_urb = usb_alloc_urb(0, GFP_ATOMIC); - if (!devinfo->ctl_urb) { - brcmf_err("usb_alloc_urb (ctl) failed\n"); + if (!devinfo->ctl_urb) goto error; - } devinfo->bulk_urb = usb_alloc_urb(0, GFP_ATOMIC); - if (!devinfo->bulk_urb) { - brcmf_err("usb_alloc_urb (bulk) failed\n"); + if (!devinfo->bulk_urb) goto error; - } return &devinfo->bus_pub; diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c index 56f109bc8394..bca6935a94db 100644 --- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c +++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c @@ -1613,10 +1613,8 @@ static int ezusb_probe(struct usb_interface *interface, } upriv->read_urb = usb_alloc_urb(0, GFP_KERNEL); - if (!upriv->read_urb) { - err("No free urbs available"); + if (!upriv->read_urb) goto error; - } if (le16_to_cpu(ep->wMaxPacketSize) != 64) pr_warn("bulk in: wMaxPacketSize!= 64\n"); if (ep->bEndpointAddress != (2 | USB_DIR_IN)) diff --git a/drivers/net/wireless/marvell/libertas_tf/if_usb.c b/drivers/net/wireless/marvell/libertas_tf/if_usb.c index 799a2efe5793..e0ade40d9497 100644 --- a/drivers/net/wireless/marvell/libertas_tf/if_usb.c +++ b/drivers/net/wireless/marvell/libertas_tf/if_usb.c @@ -198,22 +198,16 @@ static int if_usb_probe(struct usb_interface *intf, } cardp->rx_urb = usb_alloc_urb(0, GFP_KERNEL); - if (!cardp->rx_urb) { - lbtf_deb_usbd(&udev->dev, "Rx URB allocation failed\n"); + if (!cardp->rx_urb) goto dealloc; - } cardp->tx_urb = usb_alloc_urb(0, GFP_KERNEL); - if (!cardp->tx_urb) { - lbtf_deb_usbd(&udev->dev, "Tx URB allocation failed\n"); + if (!cardp->tx_urb) goto dealloc; - } cardp->cmd_urb = usb_alloc_urb(0, GFP_KERNEL); - if (!cardp->cmd_urb) { - lbtf_deb_usbd(&udev->dev, "Cmd URB allocation failed\n"); + if (!cardp->cmd_urb) goto dealloc; - } cardp->ep_out_buf = kmalloc(MRVDRV_ETH_TX_PACKET_BUFFER_SIZE, GFP_KERNEL); diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c index 0857575c5c39..3bd04f52f369 100644 --- a/drivers/net/wireless/marvell/mwifiex/usb.c +++ b/drivers/net/wireless/marvell/mwifiex/usb.c @@ -657,11 +657,8 @@ static int mwifiex_usb_tx_init(struct mwifiex_adapter *adapter) card->tx_cmd.ep = card->tx_cmd_ep; card->tx_cmd.urb = usb_alloc_urb(0, GFP_KERNEL); - if (!card->tx_cmd.urb) { - mwifiex_dbg(adapter, ERROR, - "tx_cmd.urb allocation failed\n"); + if (!card->tx_cmd.urb) return -ENOMEM; - } for (i = 0; i < MWIFIEX_TX_DATA_PORT; i++) { port = &card->port[i]; @@ -677,11 +674,8 @@ static int mwifiex_usb_tx_init(struct mwifiex_adapter *adapter) port->tx_data_list[j].ep = port->tx_data_ep; port->tx_data_list[j].urb = usb_alloc_urb(0, GFP_KERNEL); - if (!port->tx_data_list[j].urb) { - mwifiex_dbg(adapter, ERROR, - "urb allocation failed\n"); + if (!port->tx_data_list[j].urb) return -ENOMEM; - } } } @@ -697,10 +691,8 @@ static int mwifiex_usb_rx_init(struct mwifiex_adapter *adapter) card->rx_cmd.ep = card->rx_cmd_ep; card->rx_cmd.urb = usb_alloc_urb(0, GFP_KERNEL); - if (!card->rx_cmd.urb) { - mwifiex_dbg(adapter, ERROR, "rx_cmd.urb allocation failed\n"); + if (!card->rx_cmd.urb) return -ENOMEM; - } card->rx_cmd.skb = dev_alloc_skb(MWIFIEX_RX_CMD_BUF_SIZE); if (!card->rx_cmd.skb) @@ -714,11 +706,8 @@ static int mwifiex_usb_rx_init(struct mwifiex_adapter *adapter) card->rx_data_list[i].ep = card->rx_data_ep; card->rx_data_list[i].urb = usb_alloc_urb(0, GFP_KERNEL); - if (!card->rx_data_list[i].urb) { - mwifiex_dbg(adapter, ERROR, - "rx_data_list[] urb allocation failed\n"); + if (!card->rx_data_list[i].urb) return -1; - } if (mwifiex_usb_submit_rx_urb(&card->rx_data_list[i], MWIFIEX_RX_DATA_BUF_SIZE)) return -1; diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c index 41617b7b0822..32aa5c1d070a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/usb.c +++ b/drivers/net/wireless/realtek/rtlwifi/usb.c @@ -739,11 +739,8 @@ static int _rtl_usb_receive(struct ieee80211_hw *hw) for (i = 0; i < rtlusb->rx_urb_num; i++) { err = -ENOMEM; urb = usb_alloc_urb(0, GFP_KERNEL); - if (!urb) { - RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG, - "Failed to alloc URB!!\n"); + if (!urb) goto err_out; - } err = _rtl_prep_rx_urb(hw, rtlusb, urb, GFP_KERNEL); if (err < 0) { @@ -907,15 +904,12 @@ static void _rtl_tx_complete(struct urb *urb) static struct urb *_rtl_usb_tx_urb_setup(struct ieee80211_hw *hw, struct sk_buff *skb, u32 ep_num) { - struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); struct urb *_urb; WARN_ON(NULL == skb); _urb = usb_alloc_urb(0, GFP_ATOMIC); if (!_urb) { - RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG, - "Can't allocate URB for bulk out!\n"); kfree_skb(skb); return NULL; } diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index 84d6cbdd11b2..3a562683603c 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -412,4 +412,8 @@ u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len, void xenvif_set_skb_hash(struct xenvif *vif, struct sk_buff *skb); +#ifdef CONFIG_DEBUG_FS +void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m); +#endif + #endif /* __XEN_NETBACK__COMMON_H__ */ diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c index fb87cb39a56b..e8c5dddc54ba 100644 --- a/drivers/net/xen-netback/hash.c +++ b/drivers/net/xen-netback/hash.c @@ -32,15 +32,6 @@ #include <linux/vmalloc.h> #include <linux/rculist.h> -static void xenvif_del_hash(struct rcu_head *rcu) -{ - struct xenvif_hash_cache_entry *entry; - - entry = container_of(rcu, struct xenvif_hash_cache_entry, rcu); - - kfree(entry); -} - static void xenvif_add_hash(struct xenvif *vif, const u8 *tag, unsigned int len, u32 val) { @@ -76,7 +67,7 @@ static void xenvif_add_hash(struct xenvif *vif, const u8 *tag, if (++vif->hash.cache.count > xenvif_hash_cache_size) { list_del_rcu(&oldest->link); vif->hash.cache.count--; - call_rcu(&oldest->rcu, xenvif_del_hash); + kfree_rcu(oldest, rcu); } } @@ -114,7 +105,7 @@ static void xenvif_flush_hash(struct xenvif *vif) list_for_each_entry_rcu(entry, &vif->hash.cache.list, link) { list_del_rcu(&entry->link); vif->hash.cache.count--; - call_rcu(&entry->rcu, xenvif_del_hash); + kfree_rcu(entry, rcu); } spin_unlock_irqrestore(&vif->hash.cache.lock, flags); @@ -369,6 +360,74 @@ u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len, return XEN_NETIF_CTRL_STATUS_SUCCESS; } +#ifdef CONFIG_DEBUG_FS +void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m) +{ + unsigned int i; + + switch (vif->hash.alg) { + case XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ: + seq_puts(m, "Hash Algorithm: TOEPLITZ\n"); + break; + + case XEN_NETIF_CTRL_HASH_ALGORITHM_NONE: + seq_puts(m, "Hash Algorithm: NONE\n"); + /* FALLTHRU */ + default: + return; + } + + if (vif->hash.flags) { + seq_puts(m, "\nHash Flags:\n"); + + if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4) + seq_puts(m, "- IPv4\n"); + if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP) + seq_puts(m, "- IPv4 + TCP\n"); + if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6) + seq_puts(m, "- IPv6\n"); + if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP) + seq_puts(m, "- IPv6 + TCP\n"); + } + + seq_puts(m, "\nHash Key:\n"); + + for (i = 0; i < XEN_NETBK_MAX_HASH_KEY_SIZE; ) { + unsigned int j, n; + + n = 8; + if (i + n >= XEN_NETBK_MAX_HASH_KEY_SIZE) + n = XEN_NETBK_MAX_HASH_KEY_SIZE - i; + + seq_printf(m, "[%2u - %2u]: ", i, i + n - 1); + + for (j = 0; j < n; j++, i++) + seq_printf(m, "%02x ", vif->hash.key[i]); + + seq_puts(m, "\n"); + } + + if (vif->hash.size != 0) { + seq_puts(m, "\nHash Mapping:\n"); + + for (i = 0; i < vif->hash.size; ) { + unsigned int j, n; + + n = 8; + if (i + n >= vif->hash.size) + n = vif->hash.size - i; + + seq_printf(m, "[%4u - %4u]: ", i, i + n - 1); + + for (j = 0; j < n; j++, i++) + seq_printf(m, "%4u ", vif->hash.mapping[i]); + + seq_puts(m, "\n"); + } + } +} +#endif /* CONFIG_DEBUG_FS */ + void xenvif_init_hash(struct xenvif *vif) { if (xenvif_hash_cache_size == 0) diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index 6a31f2610c23..bacf6e0c12b9 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -165,7 +165,7 @@ xenvif_write_io_ring(struct file *filp, const char __user *buf, size_t count, return count; } -static int xenvif_dump_open(struct inode *inode, struct file *filp) +static int xenvif_io_ring_open(struct inode *inode, struct file *filp) { int ret; void *queue = NULL; @@ -179,13 +179,35 @@ static int xenvif_dump_open(struct inode *inode, struct file *filp) static const struct file_operations xenvif_dbg_io_ring_ops_fops = { .owner = THIS_MODULE, - .open = xenvif_dump_open, + .open = xenvif_io_ring_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = xenvif_write_io_ring, }; +static int xenvif_read_ctrl(struct seq_file *m, void *v) +{ + struct xenvif *vif = m->private; + + xenvif_dump_hash_info(vif, m); + + return 0; +} + +static int xenvif_ctrl_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, xenvif_read_ctrl, inode->i_private); +} + +static const struct file_operations xenvif_dbg_ctrl_ops_fops = { + .owner = THIS_MODULE, + .open = xenvif_ctrl_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + static void xenvif_debugfs_addif(struct xenvif *vif) { struct dentry *pfile; @@ -210,6 +232,17 @@ static void xenvif_debugfs_addif(struct xenvif *vif) pr_warn("Creation of io_ring file returned %ld!\n", PTR_ERR(pfile)); } + + if (vif->ctrl_task) { + pfile = debugfs_create_file("ctrl", + S_IRUSR, + vif->xenvif_dbg_root, + vif, + &xenvif_dbg_ctrl_ops_fops); + if (IS_ERR_OR_NULL(pfile)) + pr_warn("Creation of ctrl file returned %ld!\n", + PTR_ERR(pfile)); + } } else netdev_warn(vif->dev, "Creation of vif debugfs dir returned %ld!\n", |