diff options
Diffstat (limited to 'drivers')
305 files changed, 31883 insertions, 4992 deletions
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c index 9635f1033ce5..8d973c4fc84e 100644 --- a/drivers/bcma/main.c +++ b/drivers/bcma/main.c @@ -12,6 +12,7 @@ #include <linux/slab.h> #include <linux/of_address.h> #include <linux/of_irq.h> +#include <linux/of_platform.h> MODULE_DESCRIPTION("Broadcom's specific AMBA driver"); MODULE_LICENSE("GPL"); @@ -409,6 +410,17 @@ int bcma_bus_register(struct bcma_bus *bus) bcma_core_pci_early_init(&bus->drv_pci[0]); } + /* TODO: remove check for IS_BUILTIN(CONFIG_BCMA) check when + * of_default_bus_match_table is exported or in some other way + * accessible. This is just a temporary workaround. + */ + if (IS_BUILTIN(CONFIG_BCMA) && bus->host_pdev) { + struct device *dev = &bus->host_pdev->dev; + + of_platform_populate(dev->of_node, of_default_bus_match_table, + NULL, dev); + } + /* Cores providing flash access go before SPROM init */ list_for_each_entry(core, &bus->cores, list) { if (bcma_is_core_needed_early(core->id.id)) diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig index 2e777071e1dc..79e8234b1aa5 100644 --- a/drivers/bluetooth/Kconfig +++ b/drivers/bluetooth/Kconfig @@ -132,6 +132,7 @@ config BT_HCIUART_3WIRE config BT_HCIUART_INTEL bool "Intel protocol support" depends on BT_HCIUART + select BT_HCIUART_H4 select BT_INTEL help The Intel protocol support enables Bluetooth HCI over serial diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c index fcfb72e9e0ee..a5c4d0584389 100644 --- a/drivers/bluetooth/bfusb.c +++ b/drivers/bluetooth/bfusb.c @@ -492,7 +492,7 @@ static int bfusb_send_frame(struct hci_dev *hdev, struct sk_buff *skb) case HCI_SCODATA_PKT: hdev->stat.sco_tx++; break; - }; + } /* Prepend skb with frame type */ memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1); diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c index 7aab65427d38..a00bb82eb7c6 100644 --- a/drivers/bluetooth/bt3c_cs.c +++ b/drivers/bluetooth/bt3c_cs.c @@ -427,7 +427,7 @@ static int bt3c_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) case HCI_SCODATA_PKT: hdev->stat.sco_tx++; break; - }; + } /* Prepend skb with frame type */ memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1); diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c index 9ceb8ac68fdc..02ed816a18f9 100644 --- a/drivers/bluetooth/btbcm.c +++ b/drivers/bluetooth/btbcm.c @@ -34,6 +34,7 @@ #define BDADDR_BCM20702A0 (&(bdaddr_t) {{0x00, 0xa0, 0x02, 0x70, 0x20, 0x00}}) #define BDADDR_BCM4324B3 (&(bdaddr_t) {{0x00, 0x00, 0x00, 0xb3, 0x24, 0x43}}) +#define BDADDR_BCM4330B1 (&(bdaddr_t) {{0x00, 0x00, 0x00, 0xb1, 0x30, 0x43}}) int btbcm_check_bdaddr(struct hci_dev *hdev) { @@ -66,9 +67,13 @@ int btbcm_check_bdaddr(struct hci_dev *hdev) * * The address 43:24:B3:00:00:00 indicates a BCM4324B3 controller * with waiting for configuration state. + * + * The address 43:30:B1:00:00:00 indicates a BCM4330B1 controller + * with waiting for configuration state. */ if (!bacmp(&bda->bdaddr, BDADDR_BCM20702A0) || - !bacmp(&bda->bdaddr, BDADDR_BCM4324B3)) { + !bacmp(&bda->bdaddr, BDADDR_BCM4324B3) || + !bacmp(&bda->bdaddr, BDADDR_BCM4330B1)) { BT_INFO("%s: BCM: Using default device address (%pMR)", hdev->name, &bda->bdaddr); set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks); @@ -241,6 +246,7 @@ static const struct { u16 subver; const char *name; } bcm_uart_subver_table[] = { + { 0x4103, "BCM4330B1" }, /* 002.001.003 */ { 0x410e, "BCM43341B0" }, /* 002.001.014 */ { 0x4406, "BCM4324B3" }, /* 002.004.006 */ { 0x610c, "BCM4354" }, /* 003.001.012 */ diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c index 828f2f8d1568..1ce4ac16c7fa 100644 --- a/drivers/bluetooth/btintel.c +++ b/drivers/bluetooth/btintel.c @@ -89,6 +89,86 @@ int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr) } EXPORT_SYMBOL_GPL(btintel_set_bdaddr); +void btintel_hw_error(struct hci_dev *hdev, u8 code) +{ + struct sk_buff *skb; + u8 type = 0x00; + + BT_ERR("%s: Hardware error 0x%2.2x", hdev->name, code); + + skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + BT_ERR("%s: Reset after hardware error failed (%ld)", + hdev->name, PTR_ERR(skb)); + return; + } + kfree_skb(skb); + + skb = __hci_cmd_sync(hdev, 0xfc22, 1, &type, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + BT_ERR("%s: Retrieving Intel exception info failed (%ld)", + hdev->name, PTR_ERR(skb)); + return; + } + + if (skb->len != 13) { + BT_ERR("%s: Exception info size mismatch", hdev->name); + kfree_skb(skb); + return; + } + + BT_ERR("%s: Exception info %s", hdev->name, (char *)(skb->data + 1)); + + kfree_skb(skb); +} +EXPORT_SYMBOL_GPL(btintel_hw_error); + +void btintel_version_info(struct hci_dev *hdev, struct intel_version *ver) +{ + const char *variant; + + switch (ver->fw_variant) { + case 0x06: + variant = "Bootloader"; + break; + case 0x23: + variant = "Firmware"; + break; + default: + return; + } + + BT_INFO("%s: %s revision %u.%u build %u week %u %u", hdev->name, + variant, ver->fw_revision >> 4, ver->fw_revision & 0x0f, + ver->fw_build_num, ver->fw_build_ww, 2000 + ver->fw_build_yy); +} +EXPORT_SYMBOL_GPL(btintel_version_info); + +int btintel_secure_send(struct hci_dev *hdev, u8 fragment_type, u32 plen, + const void *param) +{ + while (plen > 0) { + struct sk_buff *skb; + u8 cmd_param[253], fragment_len = (plen > 252) ? 252 : plen; + + cmd_param[0] = fragment_type; + memcpy(cmd_param + 1, param, fragment_len); + + skb = __hci_cmd_sync(hdev, 0xfc09, fragment_len + 1, + cmd_param, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + kfree_skb(skb); + + plen -= fragment_len; + param += fragment_len; + } + + return 0; +} +EXPORT_SYMBOL_GPL(btintel_secure_send); + MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); MODULE_DESCRIPTION("Bluetooth support for Intel devices ver " VERSION); MODULE_VERSION(VERSION); diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h index 4bda6ab34f60..b278d14758d5 100644 --- a/drivers/bluetooth/btintel.h +++ b/drivers/bluetooth/btintel.h @@ -73,6 +73,11 @@ struct intel_secure_send_result { int btintel_check_bdaddr(struct hci_dev *hdev); int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr); +void btintel_hw_error(struct hci_dev *hdev, u8 code); + +void btintel_version_info(struct hci_dev *hdev, struct intel_version *ver); +int btintel_secure_send(struct hci_dev *hdev, u8 fragment_type, u32 plen, + const void *param); #else @@ -86,4 +91,18 @@ static inline int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdadd return -EOPNOTSUPP; } +static inline void btintel_hw_error(struct hci_dev *hdev, u8 code) +{ +} + +static void btintel_version_info(struct hci_dev *hdev, struct intel_version *ver) +{ +} + +static inline int btintel_secure_send(struct hci_dev *hdev, u8 fragment_type, + u32 plen, const void *param) +{ + return -EOPNOTSUPP; +} + #endif diff --git a/drivers/bluetooth/btmrvl_drv.h b/drivers/bluetooth/btmrvl_drv.h index 086f0ec89580..27a9aac25583 100644 --- a/drivers/bluetooth/btmrvl_drv.h +++ b/drivers/bluetooth/btmrvl_drv.h @@ -95,10 +95,10 @@ struct btmrvl_private { struct btmrvl_device btmrvl_dev; struct btmrvl_adapter *adapter; struct btmrvl_thread main_thread; - int (*hw_host_to_card) (struct btmrvl_private *priv, + int (*hw_host_to_card)(struct btmrvl_private *priv, u8 *payload, u16 nb); - int (*hw_wakeup_firmware) (struct btmrvl_private *priv); - int (*hw_process_int_status) (struct btmrvl_private *priv); + int (*hw_wakeup_firmware)(struct btmrvl_private *priv); + int (*hw_process_int_status)(struct btmrvl_private *priv); void (*firmware_dump)(struct btmrvl_private *priv); spinlock_t driver_lock; /* spinlock used by driver */ #ifdef CONFIG_DEBUG_FS diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index b4cf8d9c9dac..cc92b0f84a51 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -68,6 +68,9 @@ static const struct usb_device_id btusb_table[] = { /* Generic Bluetooth AMP device */ { USB_DEVICE_INFO(0xe0, 0x01, 0x04), .driver_info = BTUSB_AMP }, + /* Generic Bluetooth USB interface */ + { USB_INTERFACE_INFO(0xe0, 0x01, 0x01) }, + /* Apple-specific (Broadcom) devices */ { USB_VENDOR_AND_INTERFACE_INFO(0x05ac, 0xff, 0x01, 0x01), .driver_info = BTUSB_BCM_APPLE }, @@ -1878,51 +1881,6 @@ static int btusb_send_frame_intel(struct hci_dev *hdev, struct sk_buff *skb) return -EILSEQ; } -static int btusb_intel_secure_send(struct hci_dev *hdev, u8 fragment_type, - u32 plen, const void *param) -{ - while (plen > 0) { - struct sk_buff *skb; - u8 cmd_param[253], fragment_len = (plen > 252) ? 252 : plen; - - cmd_param[0] = fragment_type; - memcpy(cmd_param + 1, param, fragment_len); - - skb = __hci_cmd_sync(hdev, 0xfc09, fragment_len + 1, - cmd_param, HCI_INIT_TIMEOUT); - if (IS_ERR(skb)) - return PTR_ERR(skb); - - kfree_skb(skb); - - plen -= fragment_len; - param += fragment_len; - } - - return 0; -} - -static void btusb_intel_version_info(struct hci_dev *hdev, - struct intel_version *ver) -{ - const char *variant; - - switch (ver->fw_variant) { - case 0x06: - variant = "Bootloader"; - break; - case 0x23: - variant = "Firmware"; - break; - default: - return; - } - - BT_INFO("%s: %s revision %u.%u build %u week %u %u", hdev->name, - variant, ver->fw_revision >> 4, ver->fw_revision & 0x0f, - ver->fw_build_num, ver->fw_build_ww, 2000 + ver->fw_build_yy); -} - static int btusb_setup_intel_new(struct hci_dev *hdev) { static const u8 reset_param[] = { 0x00, 0x01, 0x00, 0x01, @@ -1984,7 +1942,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) return -EINVAL; } - btusb_intel_version_info(hdev, ver); + btintel_version_info(hdev, ver); /* The firmware variant determines if the device is in bootloader * mode or is running operational firmware. The value 0x06 identifies @@ -2104,7 +2062,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) /* Start the firmware download transaction with the Init fragment * represented by the 128 bytes of CSS header. */ - err = btusb_intel_secure_send(hdev, 0x00, 128, fw->data); + err = btintel_secure_send(hdev, 0x00, 128, fw->data); if (err < 0) { BT_ERR("%s: Failed to send firmware header (%d)", hdev->name, err); @@ -2114,7 +2072,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) /* Send the 256 bytes of public key information from the firmware * as the PKey fragment. */ - err = btusb_intel_secure_send(hdev, 0x03, 256, fw->data + 128); + err = btintel_secure_send(hdev, 0x03, 256, fw->data + 128); if (err < 0) { BT_ERR("%s: Failed to send firmware public key (%d)", hdev->name, err); @@ -2124,7 +2082,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) /* Send the 256 bytes of signature information from the firmware * as the Sign fragment. */ - err = btusb_intel_secure_send(hdev, 0x02, 256, fw->data + 388); + err = btintel_secure_send(hdev, 0x02, 256, fw->data + 388); if (err < 0) { BT_ERR("%s: Failed to send firmware signature (%d)", hdev->name, err); @@ -2148,8 +2106,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) * firmware data buffer as a single Data fragement. */ if (!(frag_len % 4)) { - err = btusb_intel_secure_send(hdev, 0x01, frag_len, - fw_ptr); + err = btintel_secure_send(hdev, 0x01, frag_len, fw_ptr); if (err < 0) { BT_ERR("%s: Failed to send firmware data (%d)", hdev->name, err); @@ -2291,39 +2248,6 @@ done: return 0; } -static void btusb_hw_error_intel(struct hci_dev *hdev, u8 code) -{ - struct sk_buff *skb; - u8 type = 0x00; - - BT_ERR("%s: Hardware error 0x%2.2x", hdev->name, code); - - skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT); - if (IS_ERR(skb)) { - BT_ERR("%s: Reset after hardware error failed (%ld)", - hdev->name, PTR_ERR(skb)); - return; - } - kfree_skb(skb); - - skb = __hci_cmd_sync(hdev, 0xfc22, 1, &type, HCI_INIT_TIMEOUT); - if (IS_ERR(skb)) { - BT_ERR("%s: Retrieving Intel exception info failed (%ld)", - hdev->name, PTR_ERR(skb)); - return; - } - - if (skb->len != 13) { - BT_ERR("%s: Exception info size mismatch", hdev->name); - kfree_skb(skb); - return; - } - - BT_ERR("%s: Exception info %s", hdev->name, (char *)(skb->data + 1)); - - kfree_skb(skb); -} - static int btusb_shutdown_intel(struct hci_dev *hdev) { struct sk_buff *skb; @@ -2783,7 +2707,7 @@ static int btusb_probe(struct usb_interface *intf, if (id->driver_info & BTUSB_INTEL_NEW) { hdev->send = btusb_send_frame_intel; hdev->setup = btusb_setup_intel_new; - hdev->hw_error = btusb_hw_error_intel; + hdev->hw_error = btintel_hw_error; hdev->set_bdaddr = btintel_set_bdaddr; set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks); } diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c index 78e10f0c65b2..84135c54ed2e 100644 --- a/drivers/bluetooth/dtl1_cs.c +++ b/drivers/bluetooth/dtl1_cs.c @@ -182,9 +182,9 @@ static void dtl1_control(struct dtl1_info *info, struct sk_buff *skb) int i; printk(KERN_INFO "Bluetooth: Nokia control data ="); - for (i = 0; i < skb->len; i++) { + for (i = 0; i < skb->len; i++) printk(" %02x", skb->data[i]); - } + printk("\n"); /* transition to active state */ @@ -406,7 +406,7 @@ static int dtl1_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) break; default: return -EILSEQ; - }; + } nsh.zero = 0; nsh.len = skb->len; diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c index 3455cecc9ecf..b35b238a0380 100644 --- a/drivers/bluetooth/hci_h5.c +++ b/drivers/bluetooth/hci_h5.c @@ -75,7 +75,7 @@ struct h5 { size_t rx_pending; /* Expecting more bytes */ u8 rx_ack; /* Last ack number received */ - int (*rx_func) (struct hci_uart *hu, u8 c); + int (*rx_func)(struct hci_uart *hu, u8 c); struct timer_list timer; /* Retransmission timer */ diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c index 5dd07bf05236..21dfa89751df 100644 --- a/drivers/bluetooth/hci_intel.c +++ b/drivers/bluetooth/hci_intel.c @@ -24,8 +24,569 @@ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/skbuff.h> +#include <linux/firmware.h> +#include <linux/wait.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include "hci_uart.h" +#include "btintel.h" + +#define STATE_BOOTLOADER 0 +#define STATE_DOWNLOADING 1 +#define STATE_FIRMWARE_LOADED 2 +#define STATE_FIRMWARE_FAILED 3 +#define STATE_BOOTING 4 + +struct intel_data { + struct sk_buff *rx_skb; + struct sk_buff_head txq; + unsigned long flags; +}; + +static int intel_open(struct hci_uart *hu) +{ + struct intel_data *intel; + + BT_DBG("hu %p", hu); + + intel = kzalloc(sizeof(*intel), GFP_KERNEL); + if (!intel) + return -ENOMEM; + + skb_queue_head_init(&intel->txq); + + hu->priv = intel; + return 0; +} + +static int intel_close(struct hci_uart *hu) +{ + struct intel_data *intel = hu->priv; + + BT_DBG("hu %p", hu); + + skb_queue_purge(&intel->txq); + kfree_skb(intel->rx_skb); + kfree(intel); + + hu->priv = NULL; + return 0; +} + +static int intel_flush(struct hci_uart *hu) +{ + struct intel_data *intel = hu->priv; + + BT_DBG("hu %p", hu); + + skb_queue_purge(&intel->txq); + + return 0; +} + +static int inject_cmd_complete(struct hci_dev *hdev, __u16 opcode) +{ + struct sk_buff *skb; + struct hci_event_hdr *hdr; + struct hci_ev_cmd_complete *evt; + + skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_ATOMIC); + if (!skb) + return -ENOMEM; + + hdr = (struct hci_event_hdr *)skb_put(skb, sizeof(*hdr)); + hdr->evt = HCI_EV_CMD_COMPLETE; + hdr->plen = sizeof(*evt) + 1; + + evt = (struct hci_ev_cmd_complete *)skb_put(skb, sizeof(*evt)); + evt->ncmd = 0x01; + evt->opcode = cpu_to_le16(opcode); + + *skb_put(skb, 1) = 0x00; + + bt_cb(skb)->pkt_type = HCI_EVENT_PKT; + + return hci_recv_frame(hdev, skb); +} + +static int intel_setup(struct hci_uart *hu) +{ + static const u8 reset_param[] = { 0x00, 0x01, 0x00, 0x01, + 0x00, 0x08, 0x04, 0x00 }; + struct intel_data *intel = hu->priv; + struct hci_dev *hdev = hu->hdev; + struct sk_buff *skb; + struct intel_version *ver; + struct intel_boot_params *params; + const struct firmware *fw; + const u8 *fw_ptr; + char fwname[64]; + u32 frag_len; + ktime_t calltime, delta, rettime; + unsigned long long duration; + int err; + + BT_DBG("%s", hdev->name); + + hu->hdev->set_bdaddr = btintel_set_bdaddr; + + calltime = ktime_get(); + + set_bit(STATE_BOOTLOADER, &intel->flags); + + /* Read the Intel version information to determine if the device + * is in bootloader mode or if it already has operational firmware + * loaded. + */ + skb = __hci_cmd_sync(hdev, 0xfc05, 0, NULL, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + BT_ERR("%s: Reading Intel version information failed (%ld)", + hdev->name, PTR_ERR(skb)); + return PTR_ERR(skb); + } + + if (skb->len != sizeof(*ver)) { + BT_ERR("%s: Intel version event size mismatch", hdev->name); + kfree_skb(skb); + return -EILSEQ; + } + + ver = (struct intel_version *)skb->data; + if (ver->status) { + BT_ERR("%s: Intel version command failure (%02x)", + hdev->name, ver->status); + err = -bt_to_errno(ver->status); + kfree_skb(skb); + return err; + } + + /* The hardware platform number has a fixed value of 0x37 and + * for now only accept this single value. + */ + if (ver->hw_platform != 0x37) { + BT_ERR("%s: Unsupported Intel hardware platform (%u)", + hdev->name, ver->hw_platform); + kfree_skb(skb); + return -EINVAL; + } + + /* At the moment only the hardware variant iBT 3.0 (LnP/SfP) is + * supported by this firmware loading method. This check has been + * put in place to ensure correct forward compatibility options + * when newer hardware variants come along. + */ + if (ver->hw_variant != 0x0b) { + BT_ERR("%s: Unsupported Intel hardware variant (%u)", + hdev->name, ver->hw_variant); + kfree_skb(skb); + return -EINVAL; + } + + btintel_version_info(hdev, ver); + + /* The firmware variant determines if the device is in bootloader + * mode or is running operational firmware. The value 0x06 identifies + * the bootloader and the value 0x23 identifies the operational + * firmware. + * + * When the operational firmware is already present, then only + * the check for valid Bluetooth device address is needed. This + * determines if the device will be added as configured or + * unconfigured controller. + * + * It is not possible to use the Secure Boot Parameters in this + * case since that command is only available in bootloader mode. + */ + if (ver->fw_variant == 0x23) { + kfree_skb(skb); + clear_bit(STATE_BOOTLOADER, &intel->flags); + btintel_check_bdaddr(hdev); + return 0; + } + + /* If the device is not in bootloader mode, then the only possible + * choice is to return an error and abort the device initialization. + */ + if (ver->fw_variant != 0x06) { + BT_ERR("%s: Unsupported Intel firmware variant (%u)", + hdev->name, ver->fw_variant); + kfree_skb(skb); + return -ENODEV; + } + + kfree_skb(skb); + + /* Read the secure boot parameters to identify the operating + * details of the bootloader. + */ + skb = __hci_cmd_sync(hdev, 0xfc0d, 0, NULL, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + BT_ERR("%s: Reading Intel boot parameters failed (%ld)", + hdev->name, PTR_ERR(skb)); + return PTR_ERR(skb); + } + + if (skb->len != sizeof(*params)) { + BT_ERR("%s: Intel boot parameters size mismatch", hdev->name); + kfree_skb(skb); + return -EILSEQ; + } + + params = (struct intel_boot_params *)skb->data; + if (params->status) { + BT_ERR("%s: Intel boot parameters command failure (%02x)", + hdev->name, params->status); + err = -bt_to_errno(params->status); + kfree_skb(skb); + return err; + } + + BT_INFO("%s: Device revision is %u", hdev->name, + le16_to_cpu(params->dev_revid)); + + BT_INFO("%s: Secure boot is %s", hdev->name, + params->secure_boot ? "enabled" : "disabled"); + + BT_INFO("%s: Minimum firmware build %u week %u %u", hdev->name, + params->min_fw_build_nn, params->min_fw_build_cw, + 2000 + params->min_fw_build_yy); + + /* It is required that every single firmware fragment is acknowledged + * with a command complete event. If the boot parameters indicate + * that this bootloader does not send them, then abort the setup. + */ + if (params->limited_cce != 0x00) { + BT_ERR("%s: Unsupported Intel firmware loading method (%u)", + hdev->name, params->limited_cce); + kfree_skb(skb); + return -EINVAL; + } + + /* If the OTP has no valid Bluetooth device address, then there will + * also be no valid address for the operational firmware. + */ + if (!bacmp(¶ms->otp_bdaddr, BDADDR_ANY)) { + BT_INFO("%s: No device address configured", hdev->name); + set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks); + } + + /* With this Intel bootloader only the hardware variant and device + * revision information are used to select the right firmware. + * + * Currently this bootloader support is limited to hardware variant + * iBT 3.0 (LnP/SfP) which is identified by the value 11 (0x0b). + */ + snprintf(fwname, sizeof(fwname), "intel/ibt-11-%u.sfi", + le16_to_cpu(params->dev_revid)); + + err = request_firmware(&fw, fwname, &hdev->dev); + if (err < 0) { + BT_ERR("%s: Failed to load Intel firmware file (%d)", + hdev->name, err); + kfree_skb(skb); + return err; + } + + BT_INFO("%s: Found device firmware: %s", hdev->name, fwname); + + kfree_skb(skb); + + if (fw->size < 644) { + BT_ERR("%s: Invalid size of firmware file (%zu)", + hdev->name, fw->size); + err = -EBADF; + goto done; + } + + set_bit(STATE_DOWNLOADING, &intel->flags); + + /* Start the firmware download transaction with the Init fragment + * represented by the 128 bytes of CSS header. + */ + err = btintel_secure_send(hdev, 0x00, 128, fw->data); + if (err < 0) { + BT_ERR("%s: Failed to send firmware header (%d)", + hdev->name, err); + goto done; + } + + /* Send the 256 bytes of public key information from the firmware + * as the PKey fragment. + */ + err = btintel_secure_send(hdev, 0x03, 256, fw->data + 128); + if (err < 0) { + BT_ERR("%s: Failed to send firmware public key (%d)", + hdev->name, err); + goto done; + } + + /* Send the 256 bytes of signature information from the firmware + * as the Sign fragment. + */ + err = btintel_secure_send(hdev, 0x02, 256, fw->data + 388); + if (err < 0) { + BT_ERR("%s: Failed to send firmware signature (%d)", + hdev->name, err); + goto done; + } + + fw_ptr = fw->data + 644; + frag_len = 0; + + while (fw_ptr - fw->data < fw->size) { + struct hci_command_hdr *cmd = (void *)(fw_ptr + frag_len); + + frag_len += sizeof(*cmd) + cmd->plen; + + BT_DBG("%s: patching %td/%zu", hdev->name, + (fw_ptr - fw->data), fw->size); + + /* The parameter length of the secure send command requires + * a 4 byte alignment. It happens so that the firmware file + * contains proper Intel_NOP commands to align the fragments + * as needed. + * + * Send set of commands with 4 byte alignment from the + * firmware data buffer as a single Data fragement. + */ + if (frag_len % 4) + continue; + + /* Send each command from the firmware data buffer as + * a single Data fragment. + */ + err = btintel_secure_send(hdev, 0x01, frag_len, fw_ptr); + if (err < 0) { + BT_ERR("%s: Failed to send firmware data (%d)", + hdev->name, err); + goto done; + } + + fw_ptr += frag_len; + frag_len = 0; + } + + set_bit(STATE_FIRMWARE_LOADED, &intel->flags); + + BT_INFO("%s: Waiting for firmware download to complete", hdev->name); + + /* Before switching the device into operational mode and with that + * booting the loaded firmware, wait for the bootloader notification + * that all fragments have been successfully received. + * + * When the event processing receives the notification, then the + * STATE_DOWNLOADING flag will be cleared. + * + * The firmware loading should not take longer than 5 seconds + * and thus just timeout if that happens and fail the setup + * of this device. + */ + err = wait_on_bit_timeout(&intel->flags, STATE_DOWNLOADING, + TASK_INTERRUPTIBLE, + msecs_to_jiffies(5000)); + if (err == 1) { + BT_ERR("%s: Firmware loading interrupted", hdev->name); + err = -EINTR; + goto done; + } + + if (err) { + BT_ERR("%s: Firmware loading timeout", hdev->name); + err = -ETIMEDOUT; + goto done; + } + + if (test_bit(STATE_FIRMWARE_FAILED, &intel->flags)) { + BT_ERR("%s: Firmware loading failed", hdev->name); + err = -ENOEXEC; + goto done; + } + + rettime = ktime_get(); + delta = ktime_sub(rettime, calltime); + duration = (unsigned long long) ktime_to_ns(delta) >> 10; + + BT_INFO("%s: Firmware loaded in %llu usecs", hdev->name, duration); + +done: + release_firmware(fw); + + if (err < 0) + return err; + + calltime = ktime_get(); + + set_bit(STATE_BOOTING, &intel->flags); + + skb = __hci_cmd_sync(hdev, 0xfc01, sizeof(reset_param), reset_param, + HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + kfree_skb(skb); + + /* The bootloader will not indicate when the device is ready. This + * is done by the operational firmware sending bootup notification. + * + * Booting into operational firmware should not take longer than + * 1 second. However if that happens, then just fail the setup + * since something went wrong. + */ + BT_INFO("%s: Waiting for device to boot", hdev->name); + + err = wait_on_bit_timeout(&intel->flags, STATE_BOOTING, + TASK_INTERRUPTIBLE, + msecs_to_jiffies(1000)); + + if (err == 1) { + BT_ERR("%s: Device boot interrupted", hdev->name); + return -EINTR; + } + + if (err) { + BT_ERR("%s: Device boot timeout", hdev->name); + return -ETIMEDOUT; + } + + rettime = ktime_get(); + delta = ktime_sub(rettime, calltime); + duration = (unsigned long long) ktime_to_ns(delta) >> 10; + + BT_INFO("%s: Device booted in %llu usecs", hdev->name, duration); + + clear_bit(STATE_BOOTLOADER, &intel->flags); + + return 0; +} + +static int intel_recv_event(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_uart *hu = hci_get_drvdata(hdev); + struct intel_data *intel = hu->priv; + struct hci_event_hdr *hdr; + + if (!test_bit(STATE_BOOTLOADER, &intel->flags)) + goto recv; + + hdr = (void *)skb->data; + + /* When the firmware loading completes the device sends + * out a vendor specific event indicating the result of + * the firmware loading. + */ + if (skb->len == 7 && hdr->evt == 0xff && hdr->plen == 0x05 && + skb->data[2] == 0x06) { + if (skb->data[3] != 0x00) + set_bit(STATE_FIRMWARE_FAILED, &intel->flags); + + if (test_and_clear_bit(STATE_DOWNLOADING, &intel->flags) && + test_bit(STATE_FIRMWARE_LOADED, &intel->flags)) { + smp_mb__after_atomic(); + wake_up_bit(&intel->flags, STATE_DOWNLOADING); + } + + /* When switching to the operational firmware the device + * sends a vendor specific event indicating that the bootup + * completed. + */ + } else if (skb->len == 9 && hdr->evt == 0xff && hdr->plen == 0x07 && + skb->data[2] == 0x02) { + if (test_and_clear_bit(STATE_BOOTING, &intel->flags)) { + smp_mb__after_atomic(); + wake_up_bit(&intel->flags, STATE_BOOTING); + } + } +recv: + return hci_recv_frame(hdev, skb); +} + +static const struct h4_recv_pkt intel_recv_pkts[] = { + { H4_RECV_ACL, .recv = hci_recv_frame }, + { H4_RECV_SCO, .recv = hci_recv_frame }, + { H4_RECV_EVENT, .recv = intel_recv_event }, +}; + +static int intel_recv(struct hci_uart *hu, const void *data, int count) +{ + struct intel_data *intel = hu->priv; + + if (!test_bit(HCI_UART_REGISTERED, &hu->flags)) + return -EUNATCH; + + intel->rx_skb = h4_recv_buf(hu->hdev, intel->rx_skb, data, count, + intel_recv_pkts, + ARRAY_SIZE(intel_recv_pkts)); + if (IS_ERR(intel->rx_skb)) { + int err = PTR_ERR(intel->rx_skb); + BT_ERR("%s: Frame reassembly failed (%d)", hu->hdev->name, err); + intel->rx_skb = NULL; + return err; + } + + return count; +} + +static int intel_enqueue(struct hci_uart *hu, struct sk_buff *skb) +{ + struct intel_data *intel = hu->priv; + + BT_DBG("hu %p skb %p", hu, skb); + + skb_queue_tail(&intel->txq, skb); + + return 0; +} + +static struct sk_buff *intel_dequeue(struct hci_uart *hu) +{ + struct intel_data *intel = hu->priv; + struct sk_buff *skb; + + skb = skb_dequeue(&intel->txq); + if (!skb) + return skb; + + if (test_bit(STATE_BOOTLOADER, &intel->flags) && + (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT)) { + struct hci_command_hdr *cmd = (void *)skb->data; + __u16 opcode = le16_to_cpu(cmd->opcode); + + /* When the 0xfc01 command is issued to boot into + * the operational firmware, it will actually not + * send a command complete event. To keep the flow + * control working inject that event here. + */ + if (opcode == 0xfc01) + inject_cmd_complete(hu->hdev, opcode); + } + + /* Prepend skb with frame type */ + memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1); + + return skb; +} + +static const struct hci_uart_proto intel_proto = { + .id = HCI_UART_INTEL, + .name = "Intel", + .init_speed = 115200, + .open = intel_open, + .close = intel_close, + .flush = intel_flush, + .setup = intel_setup, + .recv = intel_recv, + .enqueue = intel_enqueue, + .dequeue = intel_dequeue, +}; + +int __init intel_init(void) +{ + return hci_uart_register_proto(&intel_proto); +} + +int __exit intel_deinit(void) +{ + return hci_uart_unregister_proto(&intel_proto); +} diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c index 177dd69fdd95..20c2ac193ff9 100644 --- a/drivers/bluetooth/hci_ldisc.c +++ b/drivers/bluetooth/hci_ldisc.c @@ -770,7 +770,7 @@ static int __init hci_uart_init(void) /* Register the tty discipline */ - memset(&hci_uart_ldisc, 0, sizeof (hci_uart_ldisc)); + memset(&hci_uart_ldisc, 0, sizeof(hci_uart_ldisc)); hci_uart_ldisc.magic = TTY_LDISC_MAGIC; hci_uart_ldisc.name = "n_hci"; hci_uart_ldisc.open = hci_uart_tty_open; @@ -804,6 +804,9 @@ static int __init hci_uart_init(void) #ifdef CONFIG_BT_HCIUART_3WIRE h5_init(); #endif +#ifdef CONFIG_BT_HCIUART_INTEL + intel_init(); +#endif #ifdef CONFIG_BT_HCIUART_BCM bcm_init(); #endif @@ -830,6 +833,9 @@ static void __exit hci_uart_exit(void) #ifdef CONFIG_BT_HCIUART_3WIRE h5_deinit(); #endif +#ifdef CONFIG_BT_HCIUART_INTEL + intel_deinit(); +#endif #ifdef CONFIG_BT_HCIUART_BCM bcm_deinit(); #endif diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h index ce9c670956f5..496587a73a9d 100644 --- a/drivers/bluetooth/hci_uart.h +++ b/drivers/bluetooth/hci_uart.h @@ -167,6 +167,11 @@ int h5_init(void); int h5_deinit(void); #endif +#ifdef CONFIG_BT_HCIUART_INTEL +int intel_init(void); +int intel_deinit(void); +#endif + #ifdef CONFIG_BT_HCIUART_BCM int bcm_init(void); int bcm_deinit(void); diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index 36eb3d012b6d..180a8f7ec82d 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c @@ -871,7 +871,7 @@ repoll: if (is_eth) { wc->sl = be16_to_cpu(cqe->sl_vid) >> 13; if (be32_to_cpu(cqe->vlan_my_qpn) & - MLX4_CQE_VLAN_PRESENT_MASK) { + MLX4_CQE_CVLAN_PRESENT_MASK) { wc->vlan_id = be16_to_cpu(cqe->sl_vid) & MLX4_CQE_VID_MASK; } else { diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index 7fde4d5c2b28..3c45358844eb 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -1870,8 +1870,6 @@ static void ad_marker_info_received(struct bond_marker *marker_info, static void ad_marker_response_received(struct bond_marker *marker, struct port *port) { - marker = NULL; - port = NULL; /* DO NOTHING, SINCE WE DECIDED NOT TO IMPLEMENT THIS FEATURE FOR NOW */ } diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index e1ccefce9a9d..0c627b4733ca 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -3779,7 +3779,6 @@ int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave) struct slave *slave; struct list_head *iter; struct bond_up_slave *new_arr, *old_arr; - int slaves_in_agg; int agg_id = 0; int ret = 0; @@ -3810,7 +3809,6 @@ int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave) } goto out; } - slaves_in_agg = ad_info.ports; agg_id = ad_info.aggregator_id; } bond_for_each_slave(bond, slave, iter) { diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c index 1bda29249d12..db760e84119f 100644 --- a/drivers/net/bonding/bond_netlink.c +++ b/drivers/net/bonding/bond_netlink.c @@ -111,6 +111,7 @@ static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = { [IFLA_BOND_AD_USER_PORT_KEY] = { .type = NLA_U16 }, [IFLA_BOND_AD_ACTOR_SYSTEM] = { .type = NLA_BINARY, .len = ETH_ALEN }, + [IFLA_BOND_TLB_DYNAMIC_LB] = { .type = NLA_U8 }, }; static const struct nla_policy bond_slave_policy[IFLA_BOND_SLAVE_MAX + 1] = { @@ -405,7 +406,6 @@ static int bond_changelink(struct net_device *bond_dev, if (err) return err; } - if (data[IFLA_BOND_AD_USER_PORT_KEY]) { int port_key = nla_get_u16(data[IFLA_BOND_AD_USER_PORT_KEY]); @@ -415,7 +415,6 @@ static int bond_changelink(struct net_device *bond_dev, if (err) return err; } - if (data[IFLA_BOND_AD_ACTOR_SYSTEM]) { if (nla_len(data[IFLA_BOND_AD_ACTOR_SYSTEM]) != ETH_ALEN) return -EINVAL; @@ -426,6 +425,15 @@ static int bond_changelink(struct net_device *bond_dev, if (err) return err; } + if (data[IFLA_BOND_TLB_DYNAMIC_LB]) { + int dynamic_lb = nla_get_u8(data[IFLA_BOND_TLB_DYNAMIC_LB]); + + bond_opt_initval(&newval, dynamic_lb); + err = __bond_opt_set(bond, BOND_OPT_TLB_DYNAMIC_LB, &newval); + if (err) + return err; + } + return 0; } @@ -476,6 +484,7 @@ static size_t bond_get_size(const struct net_device *bond_dev) nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_ACTOR_SYS_PRIO */ nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_USER_PORT_KEY */ nla_total_size(ETH_ALEN) + /* IFLA_BOND_AD_ACTOR_SYSTEM */ + nla_total_size(sizeof(u8)) + /* IFLA_BOND_TLB_DYNAMIC_LB */ 0; } @@ -598,6 +607,10 @@ static int bond_fill_info(struct sk_buff *skb, bond->params.ad_select)) goto nla_put_failure; + if (nla_put_u8(skb, IFLA_BOND_TLB_DYNAMIC_LB, + bond->params.tlb_dynamic_lb)) + goto nla_put_failure; + if (BOND_MODE(bond) == BOND_MODE_8023AD) { struct ad_info info; diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index e9c624d54dd4..6dda57e2e724 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -420,6 +420,13 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = { .flags = BOND_OPTFLAG_IFDOWN, .values = bond_ad_user_port_key_tbl, .set = bond_option_ad_user_port_key_set, + }, + [BOND_OPT_NUM_PEER_NOTIF_ALIAS] = { + .id = BOND_OPT_NUM_PEER_NOTIF_ALIAS, + .name = "num_grat_arp", + .desc = "Number of peer notifications to send on failover event", + .values = bond_num_peer_notif_tbl, + .set = bond_option_num_peer_notif_set } }; diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index 31835a4dab57..f4ae72086215 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -380,7 +380,7 @@ static ssize_t bonding_show_ad_select(struct device *d, static DEVICE_ATTR(ad_select, S_IRUGO | S_IWUSR, bonding_show_ad_select, bonding_sysfs_store_option); -/* Show and set the number of peer notifications to send after a failover event. */ +/* Show the number of peer notifications to send after a failover event. */ static ssize_t bonding_show_num_peer_notif(struct device *d, struct device_attribute *attr, char *buf) @@ -388,24 +388,10 @@ static ssize_t bonding_show_num_peer_notif(struct device *d, struct bonding *bond = to_bond(d); return sprintf(buf, "%d\n", bond->params.num_peer_notif); } - -static ssize_t bonding_store_num_peer_notif(struct device *d, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct bonding *bond = to_bond(d); - int ret; - - ret = bond_opt_tryset_rtnl(bond, BOND_OPT_NUM_PEER_NOTIF, (char *)buf); - if (!ret) - ret = count; - - return ret; -} static DEVICE_ATTR(num_grat_arp, S_IRUGO | S_IWUSR, - bonding_show_num_peer_notif, bonding_store_num_peer_notif); + bonding_show_num_peer_notif, bonding_sysfs_store_option); static DEVICE_ATTR(num_unsol_na, S_IRUGO | S_IWUSR, - bonding_show_num_peer_notif, bonding_store_num_peer_notif); + bonding_show_num_peer_notif, bonding_sysfs_store_option); /* Show the MII monitor interval. */ static ssize_t bonding_show_miimon(struct device *d, diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig index 7ad0a4d8e475..4c483d937481 100644 --- a/drivers/net/dsa/Kconfig +++ b/drivers/net/dsa/Kconfig @@ -46,13 +46,13 @@ config NET_DSA_MV88E6171 ethernet switches chips. config NET_DSA_MV88E6352 - tristate "Marvell 88E6172/88E6176/88E6352 ethernet switch chip support" + tristate "Marvell 88E6172/6176/6320/6321/6352 ethernet switch chip support" depends on NET_DSA select NET_DSA_MV88E6XXX select NET_DSA_TAG_EDSA ---help--- - This enables support for the Marvell 88E6172, 88E6176 and 88E6352 - ethernet switch chips. + This enables support for the Marvell 88E6172, 88E6176, 88E6320, + 88E6321 and 88E6352 ethernet switch chips. config NET_DSA_BCM_SF2 tristate "Broadcom Starfighter 2 Ethernet switch support" diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 079897b3a955..289e20443d83 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -901,15 +901,11 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port, struct fixed_phy_status *status) { struct bcm_sf2_priv *priv = ds_to_priv(ds); - u32 duplex, pause, speed; + u32 duplex, pause; u32 reg; duplex = core_readl(priv, CORE_DUPSTS); pause = core_readl(priv, CORE_PAUSESTS); - speed = core_readl(priv, CORE_SPDSTS); - - speed >>= (port * SPDSTS_SHIFT); - speed &= SPDSTS_MASK; status->link = 0; @@ -944,18 +940,6 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port, reg &= ~LINK_STS; core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port)); - switch (speed) { - case SPDSTS_10: - status->speed = SPEED_10; - break; - case SPDSTS_100: - status->speed = SPEED_100; - break; - case SPDSTS_1000: - status->speed = SPEED_1000; - break; - } - if ((pause & (1 << port)) && (pause & (1 << (port + PAUSESTS_TX_PAUSE_SHIFT)))) { status->asym_pause = 1; diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c index 632815c10a40..af210efecc55 100644 --- a/drivers/net/dsa/mv88e6352.c +++ b/drivers/net/dsa/mv88e6352.c @@ -36,6 +36,18 @@ static char *mv88e6352_probe(struct device *host_dev, int sw_addr) return "Marvell 88E6172"; if ((ret & 0xfff0) == PORT_SWITCH_ID_6176) return "Marvell 88E6176"; + if (ret == PORT_SWITCH_ID_6320_A1) + return "Marvell 88E6320 (A1)"; + if (ret == PORT_SWITCH_ID_6320_A2) + return "Marvell 88e6320 (A2)"; + if ((ret & 0xfff0) == PORT_SWITCH_ID_6320) + return "Marvell 88E6320"; + if (ret == PORT_SWITCH_ID_6321_A1) + return "Marvell 88E6321 (A1)"; + if (ret == PORT_SWITCH_ID_6321_A2) + return "Marvell 88e6321 (A2)"; + if ((ret & 0xfff0) == PORT_SWITCH_ID_6321) + return "Marvell 88E6321"; if (ret == PORT_SWITCH_ID_6352_A0) return "Marvell 88E6352 (A0)"; if (ret == PORT_SWITCH_ID_6352_A1) @@ -80,66 +92,6 @@ static int mv88e6352_setup_global(struct dsa_switch *ds) return 0; } -#ifdef CONFIG_NET_DSA_HWMON - -static int mv88e6352_get_temp(struct dsa_switch *ds, int *temp) -{ - int ret; - - *temp = 0; - - ret = mv88e6xxx_phy_page_read(ds, 0, 6, 27); - if (ret < 0) - return ret; - - *temp = (ret & 0xff) - 25; - - return 0; -} - -static int mv88e6352_get_temp_limit(struct dsa_switch *ds, int *temp) -{ - int ret; - - *temp = 0; - - ret = mv88e6xxx_phy_page_read(ds, 0, 6, 26); - if (ret < 0) - return ret; - - *temp = (((ret >> 8) & 0x1f) * 5) - 25; - - return 0; -} - -static int mv88e6352_set_temp_limit(struct dsa_switch *ds, int temp) -{ - int ret; - - ret = mv88e6xxx_phy_page_read(ds, 0, 6, 26); - if (ret < 0) - return ret; - temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f); - return mv88e6xxx_phy_page_write(ds, 0, 6, 26, - (ret & 0xe0ff) | (temp << 8)); -} - -static int mv88e6352_get_temp_alarm(struct dsa_switch *ds, bool *alarm) -{ - int ret; - - *alarm = false; - - ret = mv88e6xxx_phy_page_read(ds, 0, 6, 26); - if (ret < 0) - return ret; - - *alarm = !!(ret & 0x40); - - return 0; -} -#endif /* CONFIG_NET_DSA_HWMON */ - static int mv88e6352_setup(struct dsa_switch *ds) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); @@ -377,10 +329,10 @@ struct dsa_switch_driver mv88e6352_switch_driver = { .set_eee = mv88e6xxx_set_eee, .get_eee = mv88e6xxx_get_eee, #ifdef CONFIG_NET_DSA_HWMON - .get_temp = mv88e6352_get_temp, - .get_temp_limit = mv88e6352_get_temp_limit, - .set_temp_limit = mv88e6352_set_temp_limit, - .get_temp_alarm = mv88e6352_get_temp_alarm, + .get_temp = mv88e6xxx_get_temp, + .get_temp_limit = mv88e6xxx_get_temp_limit, + .set_temp_limit = mv88e6xxx_set_temp_limit, + .get_temp_alarm = mv88e6xxx_get_temp_alarm, #endif .get_eeprom = mv88e6352_get_eeprom, .set_eeprom = mv88e6352_set_eeprom, @@ -394,5 +346,8 @@ struct dsa_switch_driver mv88e6352_switch_driver = { .fdb_getnext = mv88e6xxx_port_fdb_getnext, }; -MODULE_ALIAS("platform:mv88e6352"); MODULE_ALIAS("platform:mv88e6172"); +MODULE_ALIAS("platform:mv88e6176"); +MODULE_ALIAS("platform:mv88e6320"); +MODULE_ALIAS("platform:mv88e6321"); +MODULE_ALIAS("platform:mv88e6352"); diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 561342466076..61ce4cf120a6 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -517,6 +517,18 @@ static bool mv88e6xxx_6185_family(struct dsa_switch *ds) return false; } +static bool mv88e6xxx_6320_family(struct dsa_switch *ds) +{ + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + + switch (ps->id) { + case PORT_SWITCH_ID_6320: + case PORT_SWITCH_ID_6321: + return true; + } + return false; +} + static bool mv88e6xxx_6351_family(struct dsa_switch *ds) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); @@ -565,7 +577,7 @@ static int _mv88e6xxx_stats_snapshot(struct dsa_switch *ds, int port) { int ret; - if (mv88e6xxx_6352_family(ds)) + if (mv88e6xxx_6320_family(ds) || mv88e6xxx_6352_family(ds)) port = (port + 1) << 5; /* Snapshot the hardware statistics counters for this port. */ @@ -796,54 +808,6 @@ void mv88e6xxx_get_regs(struct dsa_switch *ds, int port, } } -#ifdef CONFIG_NET_DSA_HWMON - -int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp) -{ - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - int ret; - int val; - - *temp = 0; - - mutex_lock(&ps->smi_mutex); - - ret = _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6); - if (ret < 0) - goto error; - - /* Enable temperature sensor */ - ret = _mv88e6xxx_phy_read(ds, 0x0, 0x1a); - if (ret < 0) - goto error; - - ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret | (1 << 5)); - if (ret < 0) - goto error; - - /* Wait for temperature to stabilize */ - usleep_range(10000, 12000); - - val = _mv88e6xxx_phy_read(ds, 0x0, 0x1a); - if (val < 0) { - ret = val; - goto error; - } - - /* Disable temperature sensor */ - ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret & ~(1 << 5)); - if (ret < 0) - goto error; - - *temp = ((val & 0x1f) - 5) * 5; - -error: - _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0); - mutex_unlock(&ps->smi_mutex); - return ret; -} -#endif /* CONFIG_NET_DSA_HWMON */ - /* Must be called with SMI lock held */ static int _mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset, u16 mask) @@ -1377,7 +1341,7 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) || mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) || - mv88e6xxx_6065_family(ds)) { + mv88e6xxx_6065_family(ds) || mv88e6xxx_6320_family(ds)) { /* MAC Forcing register: don't force link, speed, * duplex or flow control state to any particular * values on physical ports, but force the CPU port @@ -1423,7 +1387,7 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) || mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) || - mv88e6xxx_6185_family(ds)) + mv88e6xxx_6185_family(ds) || mv88e6xxx_6320_family(ds)) reg = PORT_CONTROL_IGMP_MLD_SNOOP | PORT_CONTROL_USE_TAG | PORT_CONTROL_USE_IP | PORT_CONTROL_STATE_FORWARDING; @@ -1431,7 +1395,8 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds)) reg |= PORT_CONTROL_DSA_TAG; if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || - mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds)) { + mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) || + mv88e6xxx_6320_family(ds)) { if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA) reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA; else @@ -1441,14 +1406,15 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) || mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) || - mv88e6xxx_6185_family(ds)) { + mv88e6xxx_6185_family(ds) || mv88e6xxx_6320_family(ds)) { if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA) reg |= PORT_CONTROL_EGRESS_ADD_TAG; } } if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) || - mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds)) { + mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) || + mv88e6xxx_6320_family(ds)) { if (ds->dsa_port_mask & (1 << port)) reg |= PORT_CONTROL_FRAME_MODE_DSA; if (port == dsa_upstream_port(ds)) @@ -1473,11 +1439,11 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) reg = 0; if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) || - mv88e6xxx_6095_family(ds)) + mv88e6xxx_6095_family(ds) || mv88e6xxx_6320_family(ds)) reg = PORT_CONTROL_2_MAP_DA; if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || - mv88e6xxx_6165_family(ds)) + mv88e6xxx_6165_family(ds) || mv88e6xxx_6320_family(ds)) reg |= PORT_CONTROL_2_JUMBO_10240; if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds)) { @@ -1514,7 +1480,8 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) goto abort; if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || - mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds)) { + mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) || + mv88e6xxx_6320_family(ds)) { /* Do not limit the period of time that this port can * be paused for by the remote end or the period of * time that this port can pause the remote end. @@ -1564,7 +1531,8 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) || - mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds)) { + mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) || + mv88e6xxx_6320_family(ds)) { /* Rate Control: disable ingress rate limiting. */ ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_RATE_CONTROL, 0x0001); @@ -1976,7 +1944,8 @@ int mv88e6xxx_setup_global(struct dsa_switch *ds) (i << GLOBAL2_TRUNK_MAPPING_ID_SHIFT)); if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || - mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds)) { + mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) || + mv88e6xxx_6320_family(ds)) { /* Send all frames with destination addresses matching * 01:80:c2:00:00:2x to the CPU port. */ @@ -1995,7 +1964,8 @@ int mv88e6xxx_setup_global(struct dsa_switch *ds) if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) || - mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds)) { + mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) || + mv88e6xxx_6320_family(ds)) { /* Disable ingress rate limiting by resetting all * ingress rate limit registers to their initial * state. @@ -2162,6 +2132,132 @@ mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int port, int regnum, return ret; } +#ifdef CONFIG_NET_DSA_HWMON + +static int mv88e61xx_get_temp(struct dsa_switch *ds, int *temp) +{ + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + int ret; + int val; + + *temp = 0; + + mutex_lock(&ps->smi_mutex); + + ret = _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6); + if (ret < 0) + goto error; + + /* Enable temperature sensor */ + ret = _mv88e6xxx_phy_read(ds, 0x0, 0x1a); + if (ret < 0) + goto error; + + ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret | (1 << 5)); + if (ret < 0) + goto error; + + /* Wait for temperature to stabilize */ + usleep_range(10000, 12000); + + val = _mv88e6xxx_phy_read(ds, 0x0, 0x1a); + if (val < 0) { + ret = val; + goto error; + } + + /* Disable temperature sensor */ + ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret & ~(1 << 5)); + if (ret < 0) + goto error; + + *temp = ((val & 0x1f) - 5) * 5; + +error: + _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0); + mutex_unlock(&ps->smi_mutex); + return ret; +} + +static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp) +{ + int phy = mv88e6xxx_6320_family(ds) ? 3 : 0; + int ret; + + *temp = 0; + + ret = mv88e6xxx_phy_page_read(ds, phy, 6, 27); + if (ret < 0) + return ret; + + *temp = (ret & 0xff) - 25; + + return 0; +} + +int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp) +{ + if (mv88e6xxx_6320_family(ds) || mv88e6xxx_6352_family(ds)) + return mv88e63xx_get_temp(ds, temp); + + return mv88e61xx_get_temp(ds, temp); +} + +int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp) +{ + int phy = mv88e6xxx_6320_family(ds) ? 3 : 0; + int ret; + + if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds)) + return -EOPNOTSUPP; + + *temp = 0; + + ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26); + if (ret < 0) + return ret; + + *temp = (((ret >> 8) & 0x1f) * 5) - 25; + + return 0; +} + +int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp) +{ + int phy = mv88e6xxx_6320_family(ds) ? 3 : 0; + int ret; + + if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds)) + return -EOPNOTSUPP; + + ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26); + if (ret < 0) + return ret; + temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f); + return mv88e6xxx_phy_page_write(ds, phy, 6, 26, + (ret & 0xe0ff) | (temp << 8)); +} + +int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm) +{ + int phy = mv88e6xxx_6320_family(ds) ? 3 : 0; + int ret; + + if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds)) + return -EOPNOTSUPP; + + *alarm = false; + + ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26); + if (ret < 0) + return ret; + + *alarm = !!(ret & 0x40); + + return 0; +} +#endif /* CONFIG_NET_DSA_HWMON */ + static int __init mv88e6xxx_init(void) { #if IS_ENABLED(CONFIG_NET_DSA_MV88E6131) diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index a650b2656de9..78e37226a37d 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h @@ -89,7 +89,12 @@ #define PORT_SWITCH_ID_6182 0x1a60 #define PORT_SWITCH_ID_6185 0x1a70 #define PORT_SWITCH_ID_6240 0x2400 -#define PORT_SWITCH_ID_6320 0x1250 +#define PORT_SWITCH_ID_6320 0x1150 +#define PORT_SWITCH_ID_6320_A1 0x1151 +#define PORT_SWITCH_ID_6320_A2 0x1152 +#define PORT_SWITCH_ID_6321 0x3100 +#define PORT_SWITCH_ID_6321_A1 0x3101 +#define PORT_SWITCH_ID_6321_A2 0x3102 #define PORT_SWITCH_ID_6350 0x3710 #define PORT_SWITCH_ID_6351 0x3750 #define PORT_SWITCH_ID_6352 0x3520 @@ -389,7 +394,10 @@ int mv88e6xxx_get_sset_count_basic(struct dsa_switch *ds); int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port); void mv88e6xxx_get_regs(struct dsa_switch *ds, int port, struct ethtool_regs *regs, void *_p); -int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp); +int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp); +int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp); +int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp); +int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm); int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds); int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds); int mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr, int regnum); @@ -410,6 +418,7 @@ int mv88e6xxx_port_fdb_getnext(struct dsa_switch *ds, int port, int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg); int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page, int reg, int val); + extern struct dsa_switch_driver mv88e6131_switch_driver; extern struct dsa_switch_driver mv88e6123_61_65_switch_driver; extern struct dsa_switch_driver mv88e6352_switch_driver; diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index f3bb1784066b..05aa7597dab9 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -167,6 +167,7 @@ source "drivers/net/ethernet/sgi/Kconfig" source "drivers/net/ethernet/smsc/Kconfig" source "drivers/net/ethernet/stmicro/Kconfig" source "drivers/net/ethernet/sun/Kconfig" +source "drivers/net/ethernet/synopsys/Kconfig" source "drivers/net/ethernet/tehuti/Kconfig" source "drivers/net/ethernet/ti/Kconfig" source "drivers/net/ethernet/tile/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index c51014b0464f..f42177b11723 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -77,6 +77,7 @@ obj-$(CONFIG_NET_VENDOR_SGI) += sgi/ obj-$(CONFIG_NET_VENDOR_SMSC) += smsc/ obj-$(CONFIG_NET_VENDOR_STMICRO) += stmicro/ obj-$(CONFIG_NET_VENDOR_SUN) += sun/ +obj-$(CONFIG_NET_VENDOR_SYNOPSYS) += synopsys/ obj-$(CONFIG_NET_VENDOR_TEHUTI) += tehuti/ obj-$(CONFIG_NET_VENDOR_TI) += ti/ obj-$(CONFIG_TILE_NET) += tile/ diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 4566cdf0bc39..b9a5a97ed4dd 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -933,6 +933,21 @@ static irqreturn_t bcm_sysport_wol_isr(int irq, void *dev_id) return IRQ_HANDLED; } +#ifdef CONFIG_NET_POLL_CONTROLLER +static void bcm_sysport_poll_controller(struct net_device *dev) +{ + struct bcm_sysport_priv *priv = netdev_priv(dev); + + disable_irq(priv->irq0); + bcm_sysport_rx_isr(priv->irq0, priv); + enable_irq(priv->irq0); + + disable_irq(priv->irq1); + bcm_sysport_tx_isr(priv->irq1, priv); + enable_irq(priv->irq1); +} +#endif + static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb, struct net_device *dev) { @@ -1723,6 +1738,9 @@ static const struct net_device_ops bcm_sysport_netdev_ops = { .ndo_set_features = bcm_sysport_set_features, .ndo_set_rx_mode = bcm_sysport_set_rx_mode, .ndo_set_mac_address = bcm_sysport_change_mac, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = bcm_sysport_poll_controller, +#endif }; #define REV_FMT "v%2x.%02x" diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index cd4ae76bbff2..5762c485ea06 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -1,6 +1,8 @@ -/* bnx2x.h: Broadcom Everest network driver. +/* bnx2x.h: QLogic Everest network driver. * * Copyright (c) 2007-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + * All rights reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -30,7 +32,7 @@ * (you will need to reboot afterwards) */ /* #define BNX2X_STOP_ON_ERROR */ -#define DRV_MODULE_VERSION "1.710.51-0" +#define DRV_MODULE_VERSION "1.712.30-0" #define DRV_MODULE_RELDATE "2014/02/10" #define BNX2X_BC_VER 0x040200 @@ -1227,6 +1229,10 @@ struct bnx2x_slowpath { } mac_rdata; union { + struct eth_classify_rules_ramrod_data e2; + } vlan_rdata; + + union { struct tstorm_eth_mac_filter_config e1x; struct eth_filter_rules_ramrod_data e2; } rx_mode_rdata; @@ -1408,6 +1414,9 @@ struct bnx2x_sp_objs { /* Queue State object */ struct bnx2x_queue_sp_obj q_obj; + + /* VLANs object */ + struct bnx2x_vlan_mac_obj vlan_obj; }; struct bnx2x_fp_stats { @@ -1422,6 +1431,13 @@ enum { SUB_MF_MODE_UNKNOWN = 0, SUB_MF_MODE_UFP, SUB_MF_MODE_NPAR1_DOT_5, + SUB_MF_MODE_BD, +}; + +struct bnx2x_vlan_entry { + struct list_head link; + u16 vid; + bool hw; }; struct bnx2x { @@ -1636,6 +1652,8 @@ struct bnx2x { u8 mf_sub_mode; #define IS_MF_UFP(bp) (IS_MF_SD(bp) && \ bp->mf_sub_mode == SUB_MF_MODE_UFP) +#define IS_MF_BD(bp) (IS_MF_SD(bp) && \ + bp->mf_sub_mode == SUB_MF_MODE_BD) u8 wol; @@ -1860,8 +1878,6 @@ struct bnx2x { int dcb_version; /* CAM credit pools */ - - /* used only in sriov */ struct bnx2x_credit_pool_obj vlans_pool; struct bnx2x_credit_pool_obj macs_pool; @@ -1924,6 +1940,11 @@ struct bnx2x { u16 rx_filter; struct bnx2x_link_report_data vf_link_vars; + struct list_head vlan_reg; + u16 vlan_cnt; + u16 vlan_credit; + u16 vxlan_dst_port; + bool accept_any_vlan; }; /* Tx queues may be less or equal to Rx queues */ @@ -1951,23 +1972,14 @@ extern int num_queues; #define RSS_IPV6_TCP_CAP_MASK \ TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY -/* func init flags */ -#define FUNC_FLG_RSS 0x0001 -#define FUNC_FLG_STATS 0x0002 -/* removed FUNC_FLG_UNMATCHED 0x0004 */ -#define FUNC_FLG_TPA 0x0008 -#define FUNC_FLG_SPQ 0x0010 -#define FUNC_FLG_LEADING 0x0020 /* PF only */ -#define FUNC_FLG_LEADING_STATS 0x0040 struct bnx2x_func_init_params { /* dma */ - dma_addr_t fw_stat_map; /* valid iff FUNC_FLG_STATS */ - dma_addr_t spq_map; /* valid iff FUNC_FLG_SPQ */ + bool spq_active; + dma_addr_t spq_map; + u16 spq_prod; - u16 func_flgs; u16 func_id; /* abs fid */ u16 pf_id; - u16 spq_prod; /* valid iff FUNC_FLG_SPQ */ }; #define for_each_cnic_queue(bp, var) \ @@ -2077,6 +2089,11 @@ struct bnx2x_func_init_params { int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac, struct bnx2x_vlan_mac_obj *obj, bool set, int mac_type, unsigned long *ramrod_flags); + +int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan, + struct bnx2x_vlan_mac_obj *obj, bool set, + unsigned long *ramrod_flags); + /** * bnx2x_del_all_macs - delete all MACs configured for the specific MAC object * @@ -2481,6 +2498,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, #define VF_ACQUIRE_THRESH 3 #define VF_ACQUIRE_MAC_FILTERS 1 #define VF_ACQUIRE_MC_FILTERS 10 +#define VF_ACQUIRE_VLAN_FILTERS 2 /* VLAN0 + 'real' VLAN */ #define GOOD_ME_REG(me_reg) (((me_reg) & ME_REG_VF_VALID) && \ (!((me_reg) & ME_REG_VF_ERR))) @@ -2577,6 +2595,8 @@ void bnx2x_set_local_cmng(struct bnx2x *bp); void bnx2x_update_mng_version(struct bnx2x *bp); +void bnx2x_update_mfw_dump(struct bnx2x *bp); + #define MCPR_SCRATCH_BASE(bp) \ (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH) @@ -2589,4 +2609,9 @@ void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb); #define BNX2X_MAX_PHC_DRIFT 31000000 #define BNX2X_PTP_TX_TIMEOUT +/* Re-configure all previously configured vlan filters. + * Meant for implicit re-load flows. + */ +int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp); + #endif /* bnx2x.h */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index a90d7364334f..1637de6caf46 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -1,6 +1,8 @@ -/* bnx2x_cmn.c: Broadcom Everest network driver. +/* bnx2x_cmn.c: QLogic Everest network driver. * * Copyright (c) 2007-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + * All rights reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -2103,9 +2105,14 @@ int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, if (rss_obj->udp_rss_v6) __set_bit(BNX2X_RSS_IPV6_UDP, ¶ms.rss_flags); - if (!CHIP_IS_E1x(bp)) + if (!CHIP_IS_E1x(bp)) { + /* valid only for TUNN_MODE_VXLAN tunnel mode */ + __set_bit(BNX2X_RSS_IPV4_VXLAN, ¶ms.rss_flags); + __set_bit(BNX2X_RSS_IPV6_VXLAN, ¶ms.rss_flags); + /* valid only for TUNN_MODE_GRE tunnel mode */ - __set_bit(BNX2X_RSS_GRE_INNER_HDRS, ¶ms.rss_flags); + __set_bit(BNX2X_RSS_TUNN_INNER_HDRS, ¶ms.rss_flags); + } } else { __set_bit(BNX2X_RSS_MODE_DISABLED, ¶ms.rss_flags); } @@ -2510,6 +2517,20 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index) fp->mode = TPA_MODE_DISABLED; } +void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state) +{ + u32 cur; + + if (!IS_MF_BD(bp) || !SHMEM2_HAS(bp, os_driver_state) || IS_VF(bp)) + return; + + cur = SHMEM2_RD(bp, os_driver_state[BP_FW_MB_IDX(bp)]); + DP(NETIF_MSG_IFUP, "Driver state %08x-->%08x\n", + cur, state); + + SHMEM2_WR(bp, os_driver_state[BP_FW_MB_IDX(bp)], state); +} + int bnx2x_load_cnic(struct bnx2x *bp) { int i, rc, port = BP_PORT(bp); @@ -2827,6 +2848,11 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) /* Start fast path */ + /* Re-configure vlan filters */ + rc = bnx2x_vlan_reconfigure_vid(bp); + if (rc) + LOAD_ERROR_EXIT(bp, load_error3); + /* Initialize Rx filter. */ bnx2x_set_rx_mode_inner(bp); @@ -2873,6 +2899,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) /* mark driver is loaded in shmem2 */ u32 val; val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]); + val &= ~DRV_FLAGS_MTU_MASK; + val |= (bp->dev->mtu << DRV_FLAGS_MTU_SHIFT); SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)], val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED | DRV_FLAGS_CAPABILITIES_LOADED_L2); @@ -2885,10 +2913,17 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) return -EBUSY; } + /* Update driver data for On-Chip MFW dump. */ + if (IS_PF(bp)) + bnx2x_update_mfw_dump(bp); + /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */ if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG)) bnx2x_dcbx_init(bp, false); + if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp)) + bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_ACTIVE); + DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n"); return 0; @@ -2956,6 +2991,9 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) DP(NETIF_MSG_IFUP, "Starting NIC unload\n"); + if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp)) + bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED); + /* mark driver is unloaded in shmem2 */ if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) { u32 val; @@ -3677,7 +3715,7 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb, pbd2->fw_ip_hdr_to_payload_w = hlen_w - ((sizeof(struct ipv6hdr)) >> 1); pbd_e2->data.tunnel_data.flags |= - ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER; + ETH_TUNNEL_DATA_IPV6_OUTER; } pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq); @@ -4184,6 +4222,41 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } +void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default) +{ + int mfw_vn = BP_FW_MB_IDX(bp); + u32 tmp; + + /* If the shmem shouldn't affect configuration, reflect */ + if (!IS_MF_BD(bp)) { + int i; + + for (i = 0; i < BNX2X_MAX_PRIORITY; i++) + c2s_map[i] = i; + *c2s_default = 0; + + return; + } + + tmp = SHMEM2_RD(bp, c2s_pcp_map_lower[mfw_vn]); + tmp = (__force u32)be32_to_cpu((__force __be32)tmp); + c2s_map[0] = tmp & 0xff; + c2s_map[1] = (tmp >> 8) & 0xff; + c2s_map[2] = (tmp >> 16) & 0xff; + c2s_map[3] = (tmp >> 24) & 0xff; + + tmp = SHMEM2_RD(bp, c2s_pcp_map_upper[mfw_vn]); + tmp = (__force u32)be32_to_cpu((__force __be32)tmp); + c2s_map[4] = tmp & 0xff; + c2s_map[5] = (tmp >> 8) & 0xff; + c2s_map[6] = (tmp >> 16) & 0xff; + c2s_map[7] = (tmp >> 24) & 0xff; + + tmp = SHMEM2_RD(bp, c2s_pcp_map_default[mfw_vn]); + tmp = (__force u32)be32_to_cpu((__force __be32)tmp); + *c2s_default = (tmp >> (8 * mfw_vn)) & 0xff; +} + /** * bnx2x_setup_tc - routine to configure net_device for multi tc * @@ -4194,8 +4267,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) */ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc) { - int cos, prio, count, offset; struct bnx2x *bp = netdev_priv(dev); + u8 c2s_map[BNX2X_MAX_PRIORITY], c2s_def; + int cos, prio, count, offset; /* setup tc must be called under rtnl lock */ ASSERT_RTNL(); @@ -4219,12 +4293,16 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc) return -EINVAL; } + bnx2x_get_c2s_mapping(bp, c2s_map, &c2s_def); + /* configure priority to traffic class mapping */ for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) { - netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]); + int outer_prio = c2s_map[prio]; + + netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[outer_prio]); DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, "mapping priority %d to tc %d\n", - prio, bp->prio_to_cos[prio]); + outer_prio, bp->prio_to_cos[outer_prio]); } /* Use this configuration to differentiate tc0 from other COSes @@ -4278,6 +4356,9 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p) if (netif_running(dev)) rc = bnx2x_set_eth_mac(bp, true); + if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg)) + SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS); + return rc; } @@ -4831,6 +4912,9 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu) */ dev->mtu = new_mtu; + if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg)) + SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS); + return bnx2x_reload_if_running(dev); } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index 03b7404d5b9b..fa7c53201265 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -1,6 +1,8 @@ -/* bnx2x_cmn.h: Broadcom Everest network driver. +/* bnx2x_cmn.h: QLogic Everest network driver. * * Copyright (c) 2007-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + * All rights reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -620,6 +622,14 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features); */ void bnx2x_tx_timeout(struct net_device *dev); +/** bnx2x_get_c2s_mapping - read inner-to-outer vlan configuration + * c2s_map should have BNX2X_MAX_PRIORITY entries. + * @bp: driver handle + * @c2s_map: should have BNX2X_MAX_PRIORITY entries for mapping + * @c2s_default: entry for non-tagged configuration + */ +void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default); + /*********************** Inlines **********************************/ /*********************** Fast path ********************************/ static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) @@ -931,14 +941,33 @@ static inline int bnx2x_func_start(struct bnx2x *bp) start_params->mf_mode = bp->mf_mode; start_params->sd_vlan_tag = bp->mf_ov; + /* Configure Ethertype for BD mode */ + if (IS_MF_BD(bp)) { + DP(NETIF_MSG_IFUP, "Configuring ethertype 0x88a8 for BD\n"); + start_params->sd_vlan_eth_type = ETH_P_8021AD; + REG_WR(bp, PRS_REG_VLAN_TYPE_0, ETH_P_8021AD); + REG_WR(bp, PBF_REG_VLAN_TYPE_0, ETH_P_8021AD); + REG_WR(bp, NIG_REG_LLH_E1HOV_TYPE_1, ETH_P_8021AD); + + bnx2x_get_c2s_mapping(bp, start_params->c2s_pri, + &start_params->c2s_pri_default); + start_params->c2s_pri_valid = 1; + + DP(NETIF_MSG_IFUP, + "Inner-to-Outer priority: %02x %02x %02x %02x %02x %02x %02x %02x [Default %02x]\n", + start_params->c2s_pri[0], start_params->c2s_pri[1], + start_params->c2s_pri[2], start_params->c2s_pri[3], + start_params->c2s_pri[4], start_params->c2s_pri[5], + start_params->c2s_pri[6], start_params->c2s_pri[7], + start_params->c2s_pri_default); + } + if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp)) start_params->network_cos_mode = STATIC_COS; else /* CHIP_IS_E1X */ start_params->network_cos_mode = FW_WRR; - start_params->tunnel_mode = TUNN_MODE_GRE; - start_params->gre_tunnel_type = IPGRE_TUNNEL; - start_params->inner_gre_rss_en = 1; + start_params->inner_rss = 1; if (IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) { start_params->class_fail_ethtype = ETH_P_FIP; @@ -1037,6 +1066,15 @@ static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp, BNX2X_FILTER_MAC_PENDING, &bp->sp_state, obj_type, &bp->macs_pool); + + if (!CHIP_IS_E1x(bp)) + bnx2x_init_vlan_obj(bp, &bnx2x_sp_obj(bp, fp).vlan_obj, + fp->cl_id, fp->cid, BP_FUNC(bp), + bnx2x_sp(bp, vlan_rdata), + bnx2x_sp_mapping(bp, vlan_rdata), + BNX2X_FILTER_VLAN_PENDING, + &bp->sp_state, obj_type, + &bp->vlans_pool); } /** @@ -1096,7 +1134,7 @@ static inline void bnx2x_init_bp_objs(struct bnx2x *bp) bnx2x_init_mac_credit_pool(bp, &bp->macs_pool, BP_FUNC(bp), bnx2x_get_path_func_num(bp)); - bnx2x_init_vlan_credit_pool(bp, &bp->vlans_pool, BP_ABS_FUNC(bp)>>1, + bnx2x_init_vlan_credit_pool(bp, &bp->vlans_pool, BP_FUNC(bp), bnx2x_get_path_func_num(bp)); /* RSS configuration object */ @@ -1106,6 +1144,8 @@ static inline void bnx2x_init_bp_objs(struct bnx2x *bp) bnx2x_sp_mapping(bp, rss_rdata), BNX2X_FILTER_RSS_CONF_PENDING, &bp->sp_state, BNX2X_OBJ_TYPE_RX); + + bp->vlan_credit = PF_VLAN_CREDIT_E2(bp, bnx2x_get_path_func_num(bp)); } static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp) @@ -1339,4 +1379,11 @@ void bnx2x_squeeze_objects(struct bnx2x *bp); void bnx2x_schedule_sp_rtnl(struct bnx2x*, enum sp_rtnl_flag, u32 verbose); +/** + * bnx2x_set_os_driver_state - write driver state for management FW usage + * + * @bp: driver handle + * @state: OS_DRIVER_STATE_* value reflecting current driver state + */ +void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state); #endif /* BNX2X_CMN_H */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c index 6e4294ed1fc9..7ccf6684e0a3 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c @@ -1,15 +1,17 @@ -/* bnx2x_dcb.c: Broadcom Everest network driver. +/* bnx2x_dcb.c: QLogic Everest network driver. * * Copyright 2009-2013 Broadcom Corporation + * Copyright 2014 QLogic Corporation + * All rights reserved * - * Unless you and Broadcom execute a separate written software license + * Unless you and QLogic execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2, available * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). * * Notwithstanding the above, under no circumstances may you combine this - * software in any way with any other Broadcom software provided under a - * license other than the GPL, without Broadcom's express prior written + * software in any way with any other QLogic software provided under a + * license other than the GPL, without QLogic's express prior written * consent. * * Maintained by: Ariel Elior <ariel.elior@qlogic.com> @@ -1850,6 +1852,8 @@ static void bnx2x_dcbx_fw_struct(struct bnx2x *bp, if (bp->dcbx_port_params.ets.cos_params[cos]. pri_bitmask & pri_bit) tt2cos[pri].cos = cos; + + pfc_fw_cfg->dcb_outer_pri[pri] = ttp[pri]; } /* we never want the FW to add a 0 vlan tag */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h index c6939ecb02c5..9a9517c0f703 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h @@ -1,15 +1,17 @@ -/* bnx2x_dcb.h: Broadcom Everest network driver. +/* bnx2x_dcb.h: QLogic Everest network driver. * * Copyright 2009-2013 Broadcom Corporation + * Copyright 2014 QLogic Corporation + * All rights reserved * - * Unless you and Broadcom execute a separate written software license + * Unless you and QLogic execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2, available * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). * * Notwithstanding the above, under no circumstances may you combine this - * software in any way with any other Broadcom software provided under a - * license other than the GPL, without Broadcom's express prior written + * software in any way with any other QLogic software provided under a + * license other than the GPL, without QLogic's express prior written * consent. * * Maintained by: Ariel Elior <ariel.elior@qlogic.com> diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h index 741aa130c19f..eccfa13b0f2d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h @@ -1,15 +1,17 @@ -/* bnx2x_dump.h: Broadcom Everest network driver. +/* bnx2x_dump.h: QLogic Everest network driver. * * Copyright (c) 2012-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + * All rights reserved * - * Unless you and Broadcom execute a separate written software license + * Unless you and QLogic execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2, available * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). * * Notwithstanding the above, under no circumstances may you combine this - * software in any way with any other Broadcom software provided under a - * license other than the GPL, without Broadcom's express prior written + * software in any way with any other QLogic software provided under a + * license other than the GPL, without QLogic's express prior written * consent. */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 76b9052a961c..6b2050a198df 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -1,6 +1,8 @@ -/* bnx2x_ethtool.c: Broadcom Everest network driver. +/* bnx2x_ethtool.c: QLogic Everest network driver. * * Copyright (c) 2007-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + * All rights reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -1129,6 +1131,9 @@ static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) } else bp->wol = 0; + if (SHMEM2_HAS(bp, curr_cfg)) + SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS); + return 0; } @@ -3562,17 +3567,8 @@ static int bnx2x_get_ts_info(struct net_device *dev, info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ); + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); info->tx_types = (1 << HWTSTAMP_TX_OFF)|(1 << HWTSTAMP_TX_ON); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h index 7636e3c18771..226ab29f4cb6 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h @@ -1,6 +1,8 @@ -/* bnx2x_fw_defs.h: Broadcom Everest network driver. +/* bnx2x_fw_defs.h: Qlogic Everest network driver. * * Copyright (c) 2007-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + * All rights reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -372,7 +374,7 @@ #define MAX_COS_NUMBER 4 #define MAX_TRAFFIC_TYPES 8 #define MAX_PFC_PRIORITIES 8 - +#define MAX_VLAN_PRIORITIES 8 /* used by array traffic_type_to_priority[] to mark traffic type \ that is not mapped to priority*/ #define LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED 0xFF diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h index 8aafd9b5d6a2..9e3b5a1e9f4f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h @@ -1,6 +1,8 @@ /* bnx2x_fw_file_hdr.h: FW binary file header structure. * * Copyright (c) 2007-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + * All rights reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h index 058bc7328220..08a08fa49caa 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h @@ -1,6 +1,8 @@ -/* bnx2x_hsi.h: Broadcom Everest network driver. +/* bnx2x_hsi.h: Qlogic Everest network driver. * * Copyright (c) 2007-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + * All rights reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -729,6 +731,7 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8722 0x00000f00 #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54616 0x00001000 #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84834 0x00001100 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84858 0x00001200 #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_FAILURE 0x0000fd00 #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_NOT_CONN 0x0000ff00 @@ -786,6 +789,7 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722 0x00000f00 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616 0x00001000 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834 0x00001100 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858 0x00001200 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT_WC 0x0000fc00 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00 @@ -864,6 +868,7 @@ struct shared_feat_cfg { /* NVRAM Offset */ #define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4 0x00000200 #define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT 0x00000300 #define SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE 0x00000400 + #define SHARED_FEAT_CFG_FORCE_SF_MODE_BD_MODE 0x00000500 #define SHARED_FEAT_CFG_FORCE_SF_MODE_UFP_MODE 0x00000600 #define SHARED_FEAT_CFG_FORCE_SF_MODE_EXTENDED_MODE 0x00000700 @@ -2064,6 +2069,26 @@ struct ncsi_oem_fcoe_features { #define FCOE_FEATURES4_FEATURE_SETTINGS_OFFSET 0 }; +enum curr_cfg_method_e { + CURR_CFG_MET_NONE = 0, /* default config */ + CURR_CFG_MET_OS = 1, + CURR_CFG_MET_VENDOR_SPEC = 2,/* e.g. Option ROM, NPAR, O/S Cfg Utils */ +}; + +struct mdump_driver_info { + u32 epoc; + u32 drv_ver; + u32 fw_ver; + + u32 valid_dump; + #define FIRST_DUMP_VALID (1 << 0) + #define SECOND_DUMP_VALID (1 << 1) + + u32 flags; + #define ENABLE_ALL_TRIGGERS (0x7fffffff) + #define TRIGGER_MDUMP_ONCE (1 << 31) +}; + struct ncsi_oem_data { u32 driver_version[4]; struct ncsi_oem_fcoe_features ncsi_oem_fcoe_features; @@ -2187,6 +2212,8 @@ struct shmem2_region { #define DRV_FLAGS_CAPABILITIES_LOADED_L2 0x00000002 #define DRV_FLAGS_CAPABILITIES_LOADED_FCOE 0x00000004 #define DRV_FLAGS_CAPABILITIES_LOADED_ISCSI 0x00000008 +#define DRV_FLAGS_MTU_MASK 0xffff0000 +#define DRV_FLAGS_MTU_SHIFT 16 u32 extended_dev_info_shared_cfg_size; @@ -2251,6 +2278,7 @@ struct shmem2_region { u32 reserved4; /* Offset 0x150 */ u32 link_attr_sync[PORT_MAX]; /* Offset 0x154 */ #define LINK_ATTR_SYNC_KR2_ENABLE 0x00000001 + #define LINK_ATTR_84858 0x00000002 #define LINK_SFP_EEPROM_COMP_CODE_MASK 0x0000ff00 #define LINK_SFP_EEPROM_COMP_CODE_SHIFT 8 #define LINK_SFP_EEPROM_COMP_CODE_SR 0x00001000 @@ -2268,6 +2296,74 @@ struct shmem2_region { /* We use indication for each PF (0..3) */ #define MFW_DRV_IND_READ_DONE_OFFSET(_pf_) (1 << (_pf_)) + union { /* For various OEMs */ /* Offset 0x1a0 */ + u8 storage_boot_prog[E2_FUNC_MAX]; + #define STORAGE_BOOT_PROG_MASK 0x000000FF + #define STORAGE_BOOT_PROG_NONE 0x00000000 + #define STORAGE_BOOT_PROG_ISCSI_IP_ACQUIRED 0x00000002 + #define STORAGE_BOOT_PROG_FCOE_FABRIC_LOGIN_SUCCESS 0x00000002 + #define STORAGE_BOOT_PROG_TARGET_FOUND 0x00000004 + #define STORAGE_BOOT_PROG_ISCSI_CHAP_SUCCESS 0x00000008 + #define STORAGE_BOOT_PROG_FCOE_LUN_FOUND 0x00000008 + #define STORAGE_BOOT_PROG_LOGGED_INTO_TGT 0x00000010 + #define STORAGE_BOOT_PROG_IMG_DOWNLOADED 0x00000020 + #define STORAGE_BOOT_PROG_OS_HANDOFF 0x00000040 + #define STORAGE_BOOT_PROG_COMPLETED 0x00000080 + + u32 oem_i2c_data_addr; + }; + + /* 9 entires for the C2S PCP map for each inner VLAN PCP + 1 default */ + /* For PCP values 0-3 use the map lower */ + /* 0xFF000000 - PCP 0, 0x00FF0000 - PCP 1, + * 0x0000FF00 - PCP 2, 0x000000FF PCP 3 + */ + u32 c2s_pcp_map_lower[E2_FUNC_MAX]; /* 0x1a4 */ + + /* For PCP values 4-7 use the map upper */ + /* 0xFF000000 - PCP 4, 0x00FF0000 - PCP 5, + * 0x0000FF00 - PCP 6, 0x000000FF PCP 7 + */ + u32 c2s_pcp_map_upper[E2_FUNC_MAX]; /* 0x1b4 */ + + /* For PCP default value get the MSB byte of the map default */ + u32 c2s_pcp_map_default[E2_FUNC_MAX]; /* 0x1c4 */ + + /* FC_NPIV table offset in NVRAM */ + u32 fc_npiv_nvram_tbl_addr[PORT_MAX]; /* 0x1d4 */ + + /* Shows last method that changed configuration of this device */ + enum curr_cfg_method_e curr_cfg; /* 0x1dc */ + + /* Storm FW version, shold be kept in the format 0xMMmmbbdd: + * MM - Major, mm - Minor, bb - Build ,dd - Drop + */ + u32 netproc_fw_ver; /* 0x1e0 */ + + /* Option ROM SMASH CLP version */ + u32 clp_ver; /* 0x1e4 */ + + u32 pcie_bus_num; /* 0x1e8 */ + + u32 sriov_switch_mode; /* 0x1ec */ + #define SRIOV_SWITCH_MODE_NONE 0x0 + #define SRIOV_SWITCH_MODE_VEB 0x1 + #define SRIOV_SWITCH_MODE_VEPA 0x2 + + u8 rsrv2[E2_FUNC_MAX]; /* 0x1f0 */ + + u32 img_inv_table_addr; /* Address to INV_TABLE_P */ /* 0x1f4 */ + + u32 mtu_size[E2_FUNC_MAX]; /* 0x1f8 */ + + u32 os_driver_state[E2_FUNC_MAX]; /* 0x208 */ + #define OS_DRIVER_STATE_NOT_LOADED 0 /* not installed */ + #define OS_DRIVER_STATE_LOADING 1 /* transition state */ + #define OS_DRIVER_STATE_DISABLED 2 /* installed but disabled */ + #define OS_DRIVER_STATE_ACTIVE 3 /* installed and active */ + + /* mini dump driver info */ + struct mdump_driver_info drv_info; /* 0x218 */ }; @@ -2898,8 +2994,8 @@ struct afex_stats { }; #define BCM_5710_FW_MAJOR_VERSION 7 -#define BCM_5710_FW_MINOR_VERSION 10 -#define BCM_5710_FW_REVISION_VERSION 51 +#define BCM_5710_FW_MINOR_VERSION 12 +#define BCM_5710_FW_REVISION_VERSION 30 #define BCM_5710_FW_ENGINEERING_VERSION 0 #define BCM_5710_FW_COMPILE_FLAGS 1 @@ -3901,7 +3997,11 @@ struct eth_fast_path_rx_cqe { __le16 len_on_bd; struct parsing_flags pars_flags; union eth_sgl_or_raw_data sgl_or_raw_data; - __le32 reserved1[7]; + u8 tunn_type; + u8 tunn_inner_hdrs_offset; + __le16 reserved1; + __le32 tunn_tenant_id; + __le32 padding[5]; u32 marker; }; @@ -4012,8 +4112,8 @@ struct eth_tunnel_data { __le16 pseudo_csum; u8 ip_hdr_start_inner_w; u8 flags; -#define ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER (0x1<<0) -#define ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER_SHIFT 0 +#define ETH_TUNNEL_DATA_IPV6_OUTER (0x1<<0) +#define ETH_TUNNEL_DATA_IPV6_OUTER_SHIFT 0 #define ETH_TUNNEL_DATA_RESERVED (0x7F<<1) #define ETH_TUNNEL_DATA_RESERVED_SHIFT 1 }; @@ -4120,16 +4220,12 @@ struct eth_rss_update_ramrod_data { #define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 6 #define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY (0x1<<7) #define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY_SHIFT 7 -#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY (0x1<<8) -#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY_SHIFT 8 -#define ETH_RSS_UPDATE_RAMROD_DATA_NVGRE_KEY_ENTROPY_CAPABILITY (0x1<<9) -#define ETH_RSS_UPDATE_RAMROD_DATA_NVGRE_KEY_ENTROPY_CAPABILITY_SHIFT 9 -#define ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY (0x1<<10) -#define ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY_SHIFT 10 -#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<11) -#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 11 -#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED (0xF<<12) -#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED_SHIFT 12 +#define ETH_RSS_UPDATE_RAMROD_DATA_TUNN_INNER_HDRS_CAPABILITY (0x1<<8) +#define ETH_RSS_UPDATE_RAMROD_DATA_TUNN_INNER_HDRS_CAPABILITY_SHIFT 8 +#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<9) +#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 9 +#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED (0x3F<<10) +#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED_SHIFT 10 u8 rss_result_mask; u8 reserved3; __le16 reserved4; @@ -4314,6 +4410,18 @@ enum eth_tunnel_non_lso_csum_location { MAX_ETH_TUNNEL_NON_LSO_CSUM_LOCATION }; +enum eth_tunn_type { + TUNN_TYPE_NONE, + TUNN_TYPE_VXLAN, + TUNN_TYPE_L2_GRE, + TUNN_TYPE_IPV4_GRE, + TUNN_TYPE_IPV6_GRE, + TUNN_TYPE_L2_GENEVE, + TUNN_TYPE_IPV4_GENEVE, + TUNN_TYPE_IPV6_GENEVE, + MAX_ETH_TUNN_TYPE +}; + /* * Tx regular BD structure */ @@ -4758,6 +4866,9 @@ struct afex_vif_list_ramrod_data { __le16 reserved1; }; +struct c2s_pri_trans_table_entry { + u8 val[MAX_VLAN_PRIORITIES]; +}; /* * cfc delete event data @@ -5246,6 +5357,7 @@ struct flow_control_configuration { u8 dont_add_pri_0_en; u8 reserved1; __le32 reserved2; + u8 dcb_outer_pri[MAX_TRAFFIC_TYPES]; }; @@ -5260,18 +5372,25 @@ struct function_start_data { u8 path_id; u8 network_cos_mode; u8 dmae_cmd_id; - u8 tunnel_mode; - u8 gre_tunnel_type; - u8 tunn_clss_en; - u8 inner_gre_rss_en; - u8 sd_accept_mf_clss_fail; + u8 no_added_tags; + __le16 reserved0; + __le32 reserved1; + u8 inner_clss_vxlan; + u8 inner_clss_l2gre; + u8 inner_clss_l2geneve; + u8 inner_rss; __le16 vxlan_dst_port; + __le16 geneve_dst_port; + u8 sd_accept_mf_clss_fail; + u8 sd_accept_mf_clss_fail_match_ethtype; __le16 sd_accept_mf_clss_fail_ethtype; __le16 sd_vlan_eth_type; u8 sd_vlan_force_pri_flg; u8 sd_vlan_force_pri_val; - u8 sd_accept_mf_clss_fail_match_ethtype; - u8 no_added_tags; + u8 c2s_pri_tt_valid; + u8 c2s_pri_default; + u8 reserved2[6]; + struct c2s_pri_trans_table_entry c2s_pri_trans_table; }; struct function_update_data { @@ -5289,11 +5408,12 @@ struct function_update_data { u8 tx_switch_suspend; u8 echo; u8 update_tunn_cfg_flg; - u8 tunnel_mode; - u8 gre_tunnel_type; - u8 tunn_clss_en; - u8 inner_gre_rss_en; + u8 inner_clss_vxlan; + u8 inner_clss_l2gre; + u8 inner_clss_l2geneve; + u8 inner_rss; __le16 vxlan_dst_port; + __le16 geneve_dst_port; u8 sd_vlan_force_pri_change_flg; u8 sd_vlan_force_pri_flg; u8 sd_vlan_force_pri_val; @@ -5302,6 +5422,8 @@ struct function_update_data { u8 reserved1; __le16 sd_vlan_tag; __le16 sd_vlan_eth_type; + __le16 reserved0; + __le32 reserved2; }; /* @@ -5330,15 +5452,6 @@ struct fw_version { #define __FW_VERSION_RESERVED_SHIFT 4 }; - -/* GRE Tunnel Mode */ -enum gre_tunnel_type { - NVGRE_TUNNEL, - L2GRE_TUNNEL, - IPGRE_TUNNEL, - MAX_GRE_TUNNEL_TYPE -}; - /* * Dynamic Host-Coalescing - Driver(host) counters */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h index d6e1975b7b69..46ee2c01f4c5 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h @@ -1,7 +1,9 @@ -/* bnx2x_init.h: Broadcom Everest network driver. +/* bnx2x_init.h: Qlogic Everest network driver. * Structures and macroes needed during the initialization. * * Copyright (c) 2007-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + All rights reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h index 5669ed2e87d0..1835d2e451c0 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h @@ -1,8 +1,10 @@ -/* bnx2x_init_ops.h: Broadcom Everest network driver. +/* bnx2x_init_ops.h: Qlogic Everest network driver. * Static functions needed during the initialization. * This file is "included" in bnx2x_main.c. * * Copyright (c) 2007-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + All rights reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index a0b03c27e0a3..d946bba43726 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -1,13 +1,15 @@ /* Copyright 2008-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + * All rights reserved * - * Unless you and Broadcom execute a separate written software license + * Unless you and QLogic execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2, available - * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). + * at http://www.gnu.org/licenses/gpl-2.0.html (the "GPL"). * * Notwithstanding the above, under no circumstances may you combine this - * software in any way with any other Broadcom software provided under a - * license other than the GPL, without Broadcom's express prior written + * software in any way with any other Qlogic software provided under a + * license other than the GPL, without Qlogic's express prior written * consent. * * Written by Yaniv Rosner @@ -9652,6 +9654,13 @@ static void bnx2x_8727_link_reset(struct bnx2x_phy *phy, /******************************************************************/ /* BCM8481/BCM84823/BCM84833 PHY SECTION */ /******************************************************************/ +static int bnx2x_is_8483x_8485x(struct bnx2x_phy *phy) +{ + return ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) || + (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) || + (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858)); +} + static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy, struct bnx2x *bp, u8 port) @@ -9666,8 +9675,7 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy, }; u16 fw_ver1; - if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) || - (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) { + if (bnx2x_is_8483x_8485x(phy)) { bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 0x400f, &fw_ver1); bnx2x_save_spirom_version(bp, port, fw_ver1 & 0xfff, phy->ver_addr); @@ -9749,8 +9757,7 @@ static void bnx2x_848xx_set_led(struct bnx2x *bp, bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, reg_set[i].val); - if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) || - (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) + if (bnx2x_is_8483x_8485x(phy)) offset = MDIO_PMA_REG_84833_CTL_LED_CTL_1; else offset = MDIO_PMA_REG_84823_CTL_LED_CTL_1; @@ -9768,8 +9775,7 @@ static void bnx2x_848xx_specific_func(struct bnx2x_phy *phy, struct bnx2x *bp = params->bp; switch (action) { case PHY_INIT: - if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) && - (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) { + if (!bnx2x_is_8483x_8485x(phy)) { /* Save spirom version */ bnx2x_save_848xx_spirom_version(phy, bp, params->port); } @@ -9901,8 +9907,7 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, /* Always write this if this is not 84833/4. * For 84833/4, write it only when it's a forced speed. */ - if (((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) && - (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) || + if (!bnx2x_is_8483x_8485x(phy) || ((autoneg_val & (1<<12)) == 0)) bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, @@ -9949,8 +9954,86 @@ static int bnx2x_8481_config_init(struct bnx2x_phy *phy, return bnx2x_848xx_cmn_config_init(phy, params, vars); } -#define PHY84833_CMDHDLR_WAIT 300 -#define PHY84833_CMDHDLR_MAX_ARGS 5 +#define PHY848xx_CMDHDLR_WAIT 300 +#define PHY848xx_CMDHDLR_MAX_ARGS 5 + +static int bnx2x_84858_cmd_hdlr(struct bnx2x_phy *phy, + struct link_params *params, + u16 fw_cmd, + u16 cmd_args[], int argc) +{ + int idx; + u16 val; + struct bnx2x *bp = params->bp; + + /* Step 1: Poll the STATUS register to see whether the previous command + * is in progress or the system is busy (CMD_IN_PROGRESS or + * SYSTEM_BUSY). If previous command is in progress or system is busy, + * check again until the previous command finishes execution and the + * system is available for taking command + */ + + for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) { + bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, + MDIO_848xx_CMD_HDLR_STATUS, &val); + if ((val != PHY84858_STATUS_CMD_IN_PROGRESS) && + (val != PHY84858_STATUS_CMD_SYSTEM_BUSY)) + break; + usleep_range(1000, 2000); + } + if (idx >= PHY848xx_CMDHDLR_WAIT) { + DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n"); + return -EINVAL; + } + + /* Step2: If any parameters are required for the function, write them + * to the required DATA registers + */ + + for (idx = 0; idx < argc; idx++) { + bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, + MDIO_848xx_CMD_HDLR_DATA1 + idx, + cmd_args[idx]); + } + + /* Step3: When the firmware is ready for commands, write the 'Command + * code' to the CMD register + */ + bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, + MDIO_848xx_CMD_HDLR_COMMAND, fw_cmd); + + /* Step4: Once the command has been written, poll the STATUS register + * to check whether the command has completed (CMD_COMPLETED_PASS/ + * CMD_FOR_CMDS or CMD_COMPLETED_ERROR). + */ + + for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) { + bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, + MDIO_848xx_CMD_HDLR_STATUS, &val); + if ((val == PHY84858_STATUS_CMD_COMPLETE_PASS) || + (val == PHY84858_STATUS_CMD_COMPLETE_ERROR)) + break; + usleep_range(1000, 2000); + } + if ((idx >= PHY848xx_CMDHDLR_WAIT) || + (val == PHY84858_STATUS_CMD_COMPLETE_ERROR)) { + DP(NETIF_MSG_LINK, "FW cmd failed.\n"); + return -EINVAL; + } + /* Step5: Once the command has completed, read the specficied DATA + * registers for any saved results for the command, if applicable + */ + + /* Gather returning data */ + for (idx = 0; idx < argc; idx++) { + bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, + MDIO_848xx_CMD_HDLR_DATA1 + idx, + &cmd_args[idx]); + } + + return 0; +} + static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy, struct link_params *params, u16 fw_cmd, u16 cmd_args[], int argc) @@ -9960,16 +10043,16 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy, struct bnx2x *bp = params->bp; /* Write CMD_OPEN_OVERRIDE to STATUS reg */ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, - MDIO_84833_CMD_HDLR_STATUS, + MDIO_848xx_CMD_HDLR_STATUS, PHY84833_STATUS_CMD_OPEN_OVERRIDE); - for (idx = 0; idx < PHY84833_CMDHDLR_WAIT; idx++) { + for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) { bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, - MDIO_84833_CMD_HDLR_STATUS, &val); + MDIO_848xx_CMD_HDLR_STATUS, &val); if (val == PHY84833_STATUS_CMD_OPEN_FOR_CMDS) break; usleep_range(1000, 2000); } - if (idx >= PHY84833_CMDHDLR_WAIT) { + if (idx >= PHY848xx_CMDHDLR_WAIT) { DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n"); return -EINVAL; } @@ -9977,42 +10060,62 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy, /* Prepare argument(s) and issue command */ for (idx = 0; idx < argc; idx++) { bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, - MDIO_84833_CMD_HDLR_DATA1 + idx, + MDIO_848xx_CMD_HDLR_DATA1 + idx, cmd_args[idx]); } bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, - MDIO_84833_CMD_HDLR_COMMAND, fw_cmd); - for (idx = 0; idx < PHY84833_CMDHDLR_WAIT; idx++) { + MDIO_848xx_CMD_HDLR_COMMAND, fw_cmd); + for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) { bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, - MDIO_84833_CMD_HDLR_STATUS, &val); + MDIO_848xx_CMD_HDLR_STATUS, &val); if ((val == PHY84833_STATUS_CMD_COMPLETE_PASS) || - (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) + (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) break; usleep_range(1000, 2000); } - if ((idx >= PHY84833_CMDHDLR_WAIT) || - (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) { + if ((idx >= PHY848xx_CMDHDLR_WAIT) || + (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) { DP(NETIF_MSG_LINK, "FW cmd failed.\n"); return -EINVAL; } /* Gather returning data */ for (idx = 0; idx < argc; idx++) { bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, - MDIO_84833_CMD_HDLR_DATA1 + idx, + MDIO_848xx_CMD_HDLR_DATA1 + idx, &cmd_args[idx]); } bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, - MDIO_84833_CMD_HDLR_STATUS, + MDIO_848xx_CMD_HDLR_STATUS, PHY84833_STATUS_CMD_CLEAR_COMPLETE); return 0; } -static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) +static int bnx2x_848xx_cmd_hdlr(struct bnx2x_phy *phy, + struct link_params *params, + u16 fw_cmd, + u16 cmd_args[], int argc) +{ + struct bnx2x *bp = params->bp; + + if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) || + (REG_RD(bp, params->shmem2_base + + offsetof(struct shmem2_region, + link_attr_sync[params->port])) & + LINK_ATTR_84858)) { + return bnx2x_84858_cmd_hdlr(phy, params, fw_cmd, cmd_args, + argc); + } else { + return bnx2x_84833_cmd_hdlr(phy, params, fw_cmd, cmd_args, + argc); + } +} + +static int bnx2x_848xx_pair_swap_cfg(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) { u32 pair_swap; - u16 data[PHY84833_CMDHDLR_MAX_ARGS]; + u16 data[PHY848xx_CMDHDLR_MAX_ARGS]; int status; struct bnx2x *bp = params->bp; @@ -10028,8 +10131,9 @@ static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy, /* Only the second argument is used for this command */ data[1] = (u16)pair_swap; - status = bnx2x_84833_cmd_hdlr(phy, params, - PHY84833_CMD_SET_PAIR_SWAP, data, PHY84833_CMDHDLR_MAX_ARGS); + status = bnx2x_848xx_cmd_hdlr(phy, params, + PHY848xx_CMD_SET_PAIR_SWAP, data, + PHY848xx_CMDHDLR_MAX_ARGS); if (status == 0) DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data[1]); @@ -10118,8 +10222,8 @@ static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "Don't Advertise 10GBase-T EEE\n"); /* Prevent Phy from working in EEE and advertising it */ - rc = bnx2x_84833_cmd_hdlr(phy, params, - PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1); + rc = bnx2x_848xx_cmd_hdlr(phy, params, + PHY848xx_CMD_SET_EEE_MODE, &cmd_args, 1); if (rc) { DP(NETIF_MSG_LINK, "EEE disable failed.\n"); return rc; @@ -10136,8 +10240,8 @@ static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy, struct bnx2x *bp = params->bp; u16 cmd_args = 1; - rc = bnx2x_84833_cmd_hdlr(phy, params, - PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1); + rc = bnx2x_848xx_cmd_hdlr(phy, params, + PHY848xx_CMD_SET_EEE_MODE, &cmd_args, 1); if (rc) { DP(NETIF_MSG_LINK, "EEE enable failed.\n"); return rc; @@ -10155,7 +10259,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, u8 port, initialize = 1; u16 val; u32 actual_phy_selection; - u16 cmd_args[PHY84833_CMDHDLR_MAX_ARGS]; + u16 cmd_args[PHY848xx_CMDHDLR_MAX_ARGS]; int rc = 0; usleep_range(1000, 2000); @@ -10180,8 +10284,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, /* Wait for GPHY to come out of reset */ msleep(50); - if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) && - (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) { + if (!bnx2x_is_8483x_8485x(phy)) { /* BCM84823 requires that XGXS links up first @ 10G for normal * behavior. */ @@ -10192,7 +10295,19 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, bnx2x_program_serdes(¶ms->phy[INT_PHY], params, vars); vars->line_speed = temp; } + /* Check if this is actually BCM84858 */ + if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) { + u16 hw_rev; + bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_848xx_ID_MSB, &hw_rev); + if (hw_rev == BCM84858_PHY_ID) { + params->link_attr_sync |= LINK_ATTR_84858; + bnx2x_update_link_attr(params, params->link_attr_sync); + } + } + + /* Set dual-media configuration according to configuration */ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, MDIO_CTL_REG_84823_MEDIA, &val); val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK | @@ -10237,18 +10352,17 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n", params->multi_phy_config, val); - if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) || - (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) { - bnx2x_84833_pair_swap_cfg(phy, params, vars); + if (bnx2x_is_8483x_8485x(phy)) { + bnx2x_848xx_pair_swap_cfg(phy, params, vars); /* Keep AutogrEEEn disabled. */ cmd_args[0] = 0x0; cmd_args[1] = 0x0; cmd_args[2] = PHY84833_CONSTANT_LATENCY + 1; cmd_args[3] = PHY84833_CONSTANT_LATENCY; - rc = bnx2x_84833_cmd_hdlr(phy, params, - PHY84833_CMD_SET_EEE_MODE, cmd_args, - PHY84833_CMDHDLR_MAX_ARGS); + rc = bnx2x_848xx_cmd_hdlr(phy, params, + PHY848xx_CMD_SET_EEE_MODE, cmd_args, + PHY848xx_CMDHDLR_MAX_ARGS); if (rc) DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n"); } @@ -10302,8 +10416,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK; } - if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) || - (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) { + if (bnx2x_is_8483x_8485x(phy)) { /* Bring PHY out of super isolate mode as the final step. */ bnx2x_cl45_read_and_write(bp, phy, MDIO_CTL_DEVAD, @@ -10435,8 +10548,7 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy, LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE; /* Determine if EEE was negotiated */ - if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) || - (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) + if (bnx2x_is_8483x_8485x(phy)) bnx2x_eee_an_resolve(phy, params, vars); } @@ -11842,6 +11954,40 @@ static const struct bnx2x_phy phy_84834 = { .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func }; +static const struct bnx2x_phy phy_84858 = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858, + .addr = 0xff, + .def_md_devad = 0, + .flags = FLAGS_FAN_FAILURE_DET_REQ | + FLAGS_REARM_LATCH_SIGNAL, + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_10000baseT_Full | + SUPPORTED_TP | + SUPPORTED_Autoneg | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause), + .media_type = ETH_PHY_BASE_T, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t)bnx2x_848x3_config_init, + .read_status = (read_status_t)bnx2x_848xx_read_status, + .link_reset = (link_reset_t)bnx2x_848x3_link_reset, + .config_loopback = (config_loopback_t)NULL, + .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver, + .hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy, + .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led, + .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func +}; + static const struct bnx2x_phy phy_54618se = { .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE, .addr = 0xff, @@ -12128,6 +12274,9 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp, case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834: *phy = phy_84834; break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858: + *phy = phy_84858; + break; case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616: case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE: *phy = phy_54618se; @@ -12184,9 +12333,7 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp, } phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port); - if (((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) || - (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) && - (phy->ver_addr)) { + if (bnx2x_is_8483x_8485x(phy) && (phy->ver_addr)) { /* Remove 100Mb link supported for BCM84833/4 when phy fw * version lower than or equal to 1.39 */ @@ -13281,6 +13428,7 @@ static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[], break; case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833: case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834: + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858: /* GPIO3's are linked, and so both need to be toggled * to obtain required 2us pulse. */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h index d9cce4c3899b..b7d251108c19 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h @@ -1,13 +1,15 @@ /* Copyright 2008-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + * All rights reserved * - * Unless you and Broadcom execute a separate written software license + * Unless you and QLogic execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2, available - * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). + * at http://www.gnu.org/licenses/gpl-2.0.html (the "GPL"). * * Notwithstanding the above, under no circumstances may you combine this - * software in any way with any other Broadcom software provided under a - * license other than the GPL, without Broadcom's express prior written + * software in any way with any other Qlogic software provided under a + * license other than the GPL, without Qlogic's express prior written * consent. * * Written by Yaniv Rosner diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index c27af12314ed..31c63aa22521 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -1,6 +1,8 @@ -/* bnx2x_main.c: Broadcom Everest network driver. +/* bnx2x_main.c: QLogic Everest network driver. * * Copyright (c) 2007-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + * All rights reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -81,11 +83,11 @@ #define TX_TIMEOUT (5*HZ) static char version[] = - "Broadcom NetXtreme II 5771x/578xx 10/20-Gigabit Ethernet Driver " + "QLogic 5771x/578xx 10/20-Gigabit Ethernet Driver " DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; MODULE_AUTHOR("Eliezer Tamir"); -MODULE_DESCRIPTION("Broadcom NetXtreme II " +MODULE_DESCRIPTION("QLogic " "BCM57710/57711/57711E/" "57712/57712_MF/57800/57800_MF/57810/57810_MF/" "57840/57840_MF Driver"); @@ -163,27 +165,27 @@ enum bnx2x_board_type { static struct { char *name; } board_info[] = { - [BCM57710] = { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" }, - [BCM57711] = { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" }, - [BCM57711E] = { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" }, - [BCM57712] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" }, - [BCM57712_MF] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" }, - [BCM57712_VF] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Virtual Function" }, - [BCM57800] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" }, - [BCM57800_MF] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" }, - [BCM57800_VF] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Virtual Function" }, - [BCM57810] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" }, - [BCM57810_MF] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" }, - [BCM57810_VF] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Virtual Function" }, - [BCM57840_4_10] = { "Broadcom NetXtreme II BCM57840 10 Gigabit Ethernet" }, - [BCM57840_2_20] = { "Broadcom NetXtreme II BCM57840 20 Gigabit Ethernet" }, - [BCM57840_MF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" }, - [BCM57840_VF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" }, - [BCM57811] = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet" }, - [BCM57811_MF] = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function" }, - [BCM57840_O] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" }, - [BCM57840_MFO] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" }, - [BCM57811_VF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" } + [BCM57710] = { "QLogic BCM57710 10 Gigabit PCIe [Everest]" }, + [BCM57711] = { "QLogic BCM57711 10 Gigabit PCIe" }, + [BCM57711E] = { "QLogic BCM57711E 10 Gigabit PCIe" }, + [BCM57712] = { "QLogic BCM57712 10 Gigabit Ethernet" }, + [BCM57712_MF] = { "QLogic BCM57712 10 Gigabit Ethernet Multi Function" }, + [BCM57712_VF] = { "QLogic BCM57712 10 Gigabit Ethernet Virtual Function" }, + [BCM57800] = { "QLogic BCM57800 10 Gigabit Ethernet" }, + [BCM57800_MF] = { "QLogic BCM57800 10 Gigabit Ethernet Multi Function" }, + [BCM57800_VF] = { "QLogic BCM57800 10 Gigabit Ethernet Virtual Function" }, + [BCM57810] = { "QLogic BCM57810 10 Gigabit Ethernet" }, + [BCM57810_MF] = { "QLogic BCM57810 10 Gigabit Ethernet Multi Function" }, + [BCM57810_VF] = { "QLogic BCM57810 10 Gigabit Ethernet Virtual Function" }, + [BCM57840_4_10] = { "QLogic BCM57840 10 Gigabit Ethernet" }, + [BCM57840_2_20] = { "QLogic BCM57840 20 Gigabit Ethernet" }, + [BCM57840_MF] = { "QLogic BCM57840 10/20 Gigabit Ethernet Multi Function" }, + [BCM57840_VF] = { "QLogic BCM57840 10/20 Gigabit Ethernet Virtual Function" }, + [BCM57811] = { "QLogic BCM57811 10 Gigabit Ethernet" }, + [BCM57811_MF] = { "QLogic BCM57811 10 Gigabit Ethernet Multi Function" }, + [BCM57840_O] = { "QLogic BCM57840 10/20 Gigabit Ethernet" }, + [BCM57840_MFO] = { "QLogic BCM57840 10/20 Gigabit Ethernet Multi Function" }, + [BCM57811_VF] = { "QLogic BCM57840 10/20 Gigabit Ethernet Virtual Function" } }; #ifndef PCI_DEVICE_ID_NX2_57710 @@ -2916,7 +2918,7 @@ static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp) func_params.f_obj = &bp->func_obj; func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE; - if (IS_MF_UFP(bp)) { + if (IS_MF_UFP(bp) || IS_MF_BD(bp)) { int func = BP_ABS_FUNC(bp); u32 val; @@ -2943,16 +2945,16 @@ static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp) BNX2X_ERR("Failed to configure FW of S-tag Change to %02x\n", bp->mf_ov); goto fail; + } else { + DP(BNX2X_MSG_MCP, "Configured S-tag %02x\n", + bp->mf_ov); } - - DP(BNX2X_MSG_MCP, "Configured S-tag %02x\n", bp->mf_ov); - - bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_OK, 0); - - return; + } else { + goto fail; } - /* not supported by SW yet */ + bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_OK, 0); + return; fail: bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_FAILURE, 0); } @@ -3065,7 +3067,7 @@ void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p) storm_memset_func_en(bp, p->func_id, 1); /* spq */ - if (p->func_flgs & FUNC_FLG_SPQ) { + if (p->spq_active) { storm_memset_spq_addr(bp, p->spq_map, p->func_id); REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod); @@ -3281,7 +3283,6 @@ static void bnx2x_pf_init(struct bnx2x *bp) { struct bnx2x_func_init_params func_init = {0}; struct event_ring_data eq_data = { {0} }; - u16 flags; if (!CHIP_IS_E1x(bp)) { /* reset IGU PF statistics: MSIX + ATTN */ @@ -3298,15 +3299,7 @@ static void bnx2x_pf_init(struct bnx2x *bp) BP_FUNC(bp) : BP_VN(bp))*4, 0); } - /* function setup flags */ - flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ); - - /* This flag is relevant for E1x only. - * E2 doesn't have a TPA configuration in a function level. - */ - flags |= (bp->dev->features & NETIF_F_LRO) ? FUNC_FLG_TPA : 0; - - func_init.func_flgs = flags; + func_init.spq_active = true; func_init.pf_id = BP_FUNC(bp); func_init.func_id = BP_FUNC(bp); func_init.spq_map = bp->spq_mapping; @@ -3707,6 +3700,34 @@ out: ethver, iscsiver, fcoever); } +void bnx2x_update_mfw_dump(struct bnx2x *bp) +{ + struct timeval epoc; + u32 drv_ver; + u32 valid_dump; + + if (!SHMEM2_HAS(bp, drv_info)) + return; + + /* Update Driver load time */ + do_gettimeofday(&epoc); + SHMEM2_WR(bp, drv_info.epoc, epoc.tv_sec); + + drv_ver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true); + SHMEM2_WR(bp, drv_info.drv_ver, drv_ver); + + SHMEM2_WR(bp, drv_info.fw_ver, REG_RD(bp, XSEM_REG_PRAM)); + + /* Check & notify On-Chip dump. */ + valid_dump = SHMEM2_RD(bp, drv_info.valid_dump); + + if (valid_dump & FIRST_DUMP_VALID) + DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 1st partition\n"); + + if (valid_dump & SECOND_DUMP_VALID) + DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 2nd partition\n"); +} + static void bnx2x_oem_event(struct bnx2x *bp, u32 event) { u32 cmd_ok, cmd_fail; @@ -5274,6 +5295,10 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp, vlan_mac_obj = &bp->sp_objs[cid].mac_obj; break; + case BNX2X_FILTER_VLAN_PENDING: + DP(BNX2X_MSG_SP, "Got SETUP_VLAN completions\n"); + vlan_mac_obj = &bp->sp_objs[cid].vlan_obj; + break; case BNX2X_FILTER_MCAST_PENDING: DP(BNX2X_MSG_SP, "Got SETUP_MCAST completions\n"); /* This is only relevant for 57710 where multicast MACs are @@ -5568,6 +5593,8 @@ static void bnx2x_eq_int(struct bnx2x *bp) BNX2X_STATE_OPEN): case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BNX2X_STATE_OPENING_WAIT4_PORT): + case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | + BNX2X_STATE_CLOSING_WAIT4_HALT): cid = elem->message.data.eth_event.echo & BNX2X_SWCID_MASK; DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n", @@ -5585,7 +5612,7 @@ static void bnx2x_eq_int(struct bnx2x *bp) BNX2X_STATE_DIAG): case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BNX2X_STATE_CLOSING_WAIT4_HALT): - DP(BNX2X_MSG_SP, "got (un)set mac ramrod\n"); + DP(BNX2X_MSG_SP, "got (un)set vlan/mac ramrod\n"); bnx2x_handle_classification_eqe(bp, elem); break; @@ -6173,6 +6200,11 @@ static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode, __set_bit(BNX2X_ACCEPT_MULTICAST, tx_accept_flags); __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags); + if (bp->accept_any_vlan) { + __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags); + __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags); + } + break; case BNX2X_RX_MODE_ALLMULTI: __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags); @@ -6184,6 +6216,11 @@ static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode, __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags); __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags); + if (bp->accept_any_vlan) { + __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags); + __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags); + } + break; case BNX2X_RX_MODE_PROMISC: /* According to definition of SI mode, iface in promisc mode @@ -6204,18 +6241,15 @@ static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode, else __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags); + __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags); + __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags); + break; default: BNX2X_ERR("Unknown rx_mode: %d\n", rx_mode); return -EINVAL; } - /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */ - if (rx_mode != BNX2X_RX_MODE_NONE) { - __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags); - __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags); - } - return 0; } @@ -7429,6 +7463,9 @@ static int bnx2x_init_hw_common(struct bnx2x *bp) } else BNX2X_ERR("Bootcode is missing - can not initialize link\n"); + if (SHMEM2_HAS(bp, netproc_fw_ver)) + SHMEM2_WR(bp, netproc_fw_ver, REG_RD(bp, XSEM_REG_PRAM)); + return 0; } @@ -8406,6 +8443,42 @@ int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac, return rc; } +int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan, + struct bnx2x_vlan_mac_obj *obj, bool set, + unsigned long *ramrod_flags) +{ + int rc; + struct bnx2x_vlan_mac_ramrod_params ramrod_param; + + memset(&ramrod_param, 0, sizeof(ramrod_param)); + + /* Fill general parameters */ + ramrod_param.vlan_mac_obj = obj; + ramrod_param.ramrod_flags = *ramrod_flags; + + /* Fill a user request section if needed */ + if (!test_bit(RAMROD_CONT, ramrod_flags)) { + ramrod_param.user_req.u.vlan.vlan = vlan; + /* Set the command: ADD or DEL */ + if (set) + ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; + else + ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL; + } + + rc = bnx2x_config_vlan_mac(bp, &ramrod_param); + + if (rc == -EEXIST) { + /* Do not treat adding same vlan as error. */ + DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc); + rc = 0; + } else if (rc < 0) { + BNX2X_ERR("%s VLAN failed\n", (set ? "Set" : "Del")); + } + + return rc; +} + int bnx2x_del_all_macs(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *mac_obj, int mac_type, bool wait_for_comp) @@ -11678,7 +11751,7 @@ static void validate_set_si_mode(struct bnx2x *bp) static int bnx2x_get_hwinfo(struct bnx2x *bp) { int /*abs*/func = BP_ABS_FUNC(bp); - int vn; + int vn, mfw_vn; u32 val = 0, val2 = 0; int rc = 0; @@ -11768,6 +11841,7 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp) bp->mf_mode = 0; bp->mf_sub_mode = 0; vn = BP_VN(bp); + mfw_vn = BP_FW_MB_IDX(bp); if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) { BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n", @@ -11824,6 +11898,31 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp) } else BNX2X_DEV_INFO("illegal OV for SD\n"); break; + case SHARED_FEAT_CFG_FORCE_SF_MODE_BD_MODE: + bp->mf_mode = MULTI_FUNCTION_SD; + bp->mf_sub_mode = SUB_MF_MODE_BD; + bp->mf_config[vn] = + MF_CFG_RD(bp, + func_mf_config[func].config); + + if (SHMEM2_HAS(bp, mtu_size)) { + int mtu_idx = BP_FW_MB_IDX(bp); + u16 mtu_size; + u32 mtu; + + mtu = SHMEM2_RD(bp, mtu_size[mtu_idx]); + mtu_size = (u16)mtu; + DP(NETIF_MSG_IFUP, "Read MTU size %04x [%08x]\n", + mtu_size, mtu); + + /* if valid: update device mtu */ + if (((mtu_size + ETH_HLEN) >= + ETH_MIN_PACKET_SIZE) && + (mtu_size <= + ETH_MAX_JUMBO_PACKET_SIZE)) + bp->dev->mtu = mtu_size; + } + break; case SHARED_FEAT_CFG_FORCE_SF_MODE_UFP_MODE: bp->mf_mode = MULTI_FUNCTION_SD; bp->mf_sub_mode = SUB_MF_MODE_UFP; @@ -11871,9 +11970,10 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp) BNX2X_DEV_INFO("MF OV for func %d is %d (0x%04x)\n", func, bp->mf_ov, bp->mf_ov); - } else if (bp->mf_sub_mode == SUB_MF_MODE_UFP) { + } else if ((bp->mf_sub_mode == SUB_MF_MODE_UFP) || + (bp->mf_sub_mode == SUB_MF_MODE_BD)) { dev_err(&bp->pdev->dev, - "Unexpected - no valid MF OV for func %d in UFP mode\n", + "Unexpected - no valid MF OV for func %d in UFP/BD mode\n", func); bp->path_has_ovlan = true; } else { @@ -12078,6 +12178,7 @@ static int bnx2x_init_bp(struct bnx2x *bp) mutex_init(&bp->drv_info_mutex); sema_init(&bp->stats_lock, 1); bp->drv_info_mng_owner = false; + INIT_LIST_HEAD(&bp->vlan_reg); INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); @@ -12596,6 +12697,169 @@ static netdev_features_t bnx2x_features_check(struct sk_buff *skb, return vxlan_features_check(skb, features); } +static int __bnx2x_vlan_configure_vid(struct bnx2x *bp, u16 vid, bool add) +{ + int rc; + + if (IS_PF(bp)) { + unsigned long ramrod_flags = 0; + + __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); + rc = bnx2x_set_vlan_one(bp, vid, &bp->sp_objs->vlan_obj, + add, &ramrod_flags); + } else { + rc = bnx2x_vfpf_update_vlan(bp, vid, bp->fp->index, add); + } + + return rc; +} + +int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp) +{ + struct bnx2x_vlan_entry *vlan; + int rc = 0; + + if (!bp->vlan_cnt) { + DP(NETIF_MSG_IFUP, "No need to re-configure vlan filters\n"); + return 0; + } + + list_for_each_entry(vlan, &bp->vlan_reg, link) { + /* Prepare for cleanup in case of errors */ + if (rc) { + vlan->hw = false; + continue; + } + + if (!vlan->hw) + continue; + + DP(NETIF_MSG_IFUP, "Re-configuring vlan 0x%04x\n", vlan->vid); + + rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true); + if (rc) { + BNX2X_ERR("Unable to configure VLAN %d\n", vlan->vid); + vlan->hw = false; + rc = -EINVAL; + continue; + } + } + + return rc; +} + +static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + struct bnx2x *bp = netdev_priv(dev); + struct bnx2x_vlan_entry *vlan; + bool hw = false; + int rc = 0; + + if (!netif_running(bp->dev)) { + DP(NETIF_MSG_IFUP, + "Ignoring VLAN configuration the interface is down\n"); + return -EFAULT; + } + + DP(NETIF_MSG_IFUP, "Adding VLAN %d\n", vid); + + vlan = kmalloc(sizeof(*vlan), GFP_KERNEL); + if (!vlan) + return -ENOMEM; + + bp->vlan_cnt++; + if (bp->vlan_cnt > bp->vlan_credit && !bp->accept_any_vlan) { + DP(NETIF_MSG_IFUP, "Accept all VLAN raised\n"); + bp->accept_any_vlan = true; + if (IS_PF(bp)) + bnx2x_set_rx_mode_inner(bp); + else + bnx2x_vfpf_storm_rx_mode(bp); + } else if (bp->vlan_cnt <= bp->vlan_credit) { + rc = __bnx2x_vlan_configure_vid(bp, vid, true); + hw = true; + } + + vlan->vid = vid; + vlan->hw = hw; + + if (!rc) { + list_add(&vlan->link, &bp->vlan_reg); + } else { + bp->vlan_cnt--; + kfree(vlan); + } + + DP(NETIF_MSG_IFUP, "Adding VLAN result %d\n", rc); + + return rc; +} + +static int bnx2x_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + struct bnx2x *bp = netdev_priv(dev); + struct bnx2x_vlan_entry *vlan; + int rc = 0; + + if (!netif_running(bp->dev)) { + DP(NETIF_MSG_IFUP, + "Ignoring VLAN configuration the interface is down\n"); + return -EFAULT; + } + + DP(NETIF_MSG_IFUP, "Removing VLAN %d\n", vid); + + if (!bp->vlan_cnt) { + BNX2X_ERR("Unable to kill VLAN %d\n", vid); + return -EINVAL; + } + + list_for_each_entry(vlan, &bp->vlan_reg, link) + if (vlan->vid == vid) + break; + + if (vlan->vid != vid) { + BNX2X_ERR("Unable to kill VLAN %d - not found\n", vid); + return -EINVAL; + } + + if (vlan->hw) + rc = __bnx2x_vlan_configure_vid(bp, vid, false); + + list_del(&vlan->link); + kfree(vlan); + + bp->vlan_cnt--; + + if (bp->vlan_cnt <= bp->vlan_credit && bp->accept_any_vlan) { + /* Configure all non-configured entries */ + list_for_each_entry(vlan, &bp->vlan_reg, link) { + if (vlan->hw) + continue; + + rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true); + if (rc) { + BNX2X_ERR("Unable to config VLAN %d\n", + vlan->vid); + continue; + } + DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n", + vlan->vid); + vlan->hw = true; + } + DP(NETIF_MSG_IFUP, "Accept all VLAN Removed\n"); + bp->accept_any_vlan = false; + if (IS_PF(bp)) + bnx2x_set_rx_mode_inner(bp); + else + bnx2x_vfpf_storm_rx_mode(bp); + } + + DP(NETIF_MSG_IFUP, "Removing VLAN result %d\n", rc); + + return rc; +} + static const struct net_device_ops bnx2x_netdev_ops = { .ndo_open = bnx2x_open, .ndo_stop = bnx2x_close, @@ -12609,6 +12873,8 @@ static const struct net_device_ops bnx2x_netdev_ops = { .ndo_fix_features = bnx2x_fix_features, .ndo_set_features = bnx2x_set_features, .ndo_tx_timeout = bnx2x_tx_timeout, + .ndo_vlan_rx_add_vid = bnx2x_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = bnx2x_vlan_rx_kill_vid, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = poll_bnx2x, #endif @@ -12819,6 +13085,18 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev, dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA; + /* VF with OLD Hypervisor or old PF do not support filtering */ + if (IS_PF(bp)) { + if (CHIP_IS_E1x(bp)) + bp->accept_any_vlan = true; + else + dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; +#ifdef CONFIG_BNX2X_SRIOV + } else if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) { + dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; +#endif + } + dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX; dev->features |= NETIF_F_HIGHDMA; @@ -13561,6 +13839,9 @@ static int bnx2x_init_one(struct pci_dev *pdev, bnx2x_register_phc(bp); + if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp)) + bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED); + return 0; init_one_exit: @@ -13623,6 +13904,7 @@ static void __bnx2x_remove(struct pci_dev *pdev, /* Power on: we can't let PCI layer write to us while we are in D3 */ if (IS_PF(bp)) { bnx2x_set_power_state(bp, PCI_D0); + bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_NOT_LOADED); /* Set endianity registers to reset values in case next driver * boots in different endianty environment. diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h index caf1aef651eb..a91ccbf36345 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h @@ -1,6 +1,8 @@ -/* bnx2x_mfw_req.h: Broadcom Everest network driver. +/* bnx2x_mfw_req.h: Qlogic Everest network driver. * * Copyright (c) 2012-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + * All rights reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h index 49d511092c82..4dead49bd5cb 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h @@ -1,6 +1,8 @@ -/* bnx2x_reg.h: Broadcom Everest network driver. +/* bnx2x_reg.h: Qlogic Everest network driver. * * Copyright (c) 2007-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + * All rights reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -2137,6 +2139,10 @@ /* [RW 1] When this bit is set; the LLH will expect all packets to be with e1hov */ #define NIG_REG_LLH_E1HOV_MODE 0x160d8 +/* [RW 16] Outer VLAN type identifier for multi-function mode. In non + * multi-function mode; it will hold the inner VLAN type. Typically 0x8100. + */ +#define NIG_REG_LLH_E1HOV_TYPE_1 0x16028 /* [RW 1] When this bit is set; the LLH will classify the packet before sending it to the BRB or calculating WoL on it. */ #define NIG_REG_LLH_MF_MODE 0x16024 @@ -2953,7 +2959,12 @@ #define PBF_REG_TQ_OCCUPANCY_Q0 0x1403ac /* [R 13] Number of 8 bytes lines occupied in the task queue of queue 1. */ #define PBF_REG_TQ_OCCUPANCY_Q1 0x1403b0 -#define PB_REG_CONTROL 0 +/* [RW 16] One of 8 values that should be compared to type in Ethernet + * parsing. If there is a match; the field after Ethernet is the first VLAN. + * Reset value is 0x8100 which is the standard VLAN type. Note that when + * checking second VLAN; type is compared only to 0x8100. + */ +#define PBF_REG_VLAN_TYPE_0 0x15c06c /* [RW 2] Interrupt mask register #0 read/write */ #define PB_REG_PB_INT_MASK 0x28 /* [R 2] Interrupt register #0 read */ @@ -3372,6 +3383,12 @@ #define PRS_REG_TCM_CURRENT_CREDIT 0x40160 /* [R 8] debug only: TSDM current credit. Transaction based. */ #define PRS_REG_TSDM_CURRENT_CREDIT 0x4015c +/* [RW 16] One of 8 values that should be compared to type in Ethernet + * parsing. If there is a match; the field after Ethernet is the first VLAN. + * Reset value is 0x8100 which is the standard VLAN type. Note that when + * checking second VLAN; type is compared only to 0x8100. + */ +#define PRS_REG_VLAN_TYPE_0 0x401a8 #define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT (0x1<<19) #define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF (0x1<<20) #define PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN (0x1<<22) @@ -7240,6 +7257,9 @@ Theotherbitsarereservedandshouldbezero*/ #define MDIO_AN_REG_8481_LEGACY_MII_CTRL 0xffe0 #define MDIO_AN_REG_8481_MII_CTRL_FORCE_1G 0x40 #define MDIO_AN_REG_8481_LEGACY_MII_STATUS 0xffe1 +#define MDIO_AN_REG_848xx_ID_MSB 0xffe2 +#define BCM84858_PHY_ID 0x600d +#define MDIO_AN_REG_848xx_ID_LSB 0xffe3 #define MDIO_AN_REG_8481_LEGACY_AN_ADV 0xffe4 #define MDIO_AN_REG_8481_LEGACY_AN_EXPANSION 0xffe6 #define MDIO_AN_REG_8481_1000T_CTRL 0xffe9 @@ -7283,31 +7303,31 @@ Theotherbitsarereservedandshouldbezero*/ #define MDIO_84833_TOP_CFG_FW_NO_EEE 0x1f81 #define MDIO_84833_TOP_CFG_XGPHY_STRAP1 0x401a #define MDIO_84833_SUPER_ISOLATE 0x8000 -/* These are mailbox register set used by 84833. */ -#define MDIO_84833_TOP_CFG_SCRATCH_REG0 0x4005 -#define MDIO_84833_TOP_CFG_SCRATCH_REG1 0x4006 -#define MDIO_84833_TOP_CFG_SCRATCH_REG2 0x4007 -#define MDIO_84833_TOP_CFG_SCRATCH_REG3 0x4008 -#define MDIO_84833_TOP_CFG_SCRATCH_REG4 0x4009 -#define MDIO_84833_TOP_CFG_SCRATCH_REG26 0x4037 -#define MDIO_84833_TOP_CFG_SCRATCH_REG27 0x4038 -#define MDIO_84833_TOP_CFG_SCRATCH_REG28 0x4039 -#define MDIO_84833_TOP_CFG_SCRATCH_REG29 0x403a -#define MDIO_84833_TOP_CFG_SCRATCH_REG30 0x403b -#define MDIO_84833_TOP_CFG_SCRATCH_REG31 0x403c -#define MDIO_84833_CMD_HDLR_COMMAND MDIO_84833_TOP_CFG_SCRATCH_REG0 -#define MDIO_84833_CMD_HDLR_STATUS MDIO_84833_TOP_CFG_SCRATCH_REG26 -#define MDIO_84833_CMD_HDLR_DATA1 MDIO_84833_TOP_CFG_SCRATCH_REG27 -#define MDIO_84833_CMD_HDLR_DATA2 MDIO_84833_TOP_CFG_SCRATCH_REG28 -#define MDIO_84833_CMD_HDLR_DATA3 MDIO_84833_TOP_CFG_SCRATCH_REG29 -#define MDIO_84833_CMD_HDLR_DATA4 MDIO_84833_TOP_CFG_SCRATCH_REG30 -#define MDIO_84833_CMD_HDLR_DATA5 MDIO_84833_TOP_CFG_SCRATCH_REG31 +/* These are mailbox register set used by 84833/84858. */ +#define MDIO_848xx_TOP_CFG_SCRATCH_REG0 0x4005 +#define MDIO_848xx_TOP_CFG_SCRATCH_REG1 0x4006 +#define MDIO_848xx_TOP_CFG_SCRATCH_REG2 0x4007 +#define MDIO_848xx_TOP_CFG_SCRATCH_REG3 0x4008 +#define MDIO_848xx_TOP_CFG_SCRATCH_REG4 0x4009 +#define MDIO_848xx_TOP_CFG_SCRATCH_REG26 0x4037 +#define MDIO_848xx_TOP_CFG_SCRATCH_REG27 0x4038 +#define MDIO_848xx_TOP_CFG_SCRATCH_REG28 0x4039 +#define MDIO_848xx_TOP_CFG_SCRATCH_REG29 0x403a +#define MDIO_848xx_TOP_CFG_SCRATCH_REG30 0x403b +#define MDIO_848xx_TOP_CFG_SCRATCH_REG31 0x403c +#define MDIO_848xx_CMD_HDLR_COMMAND (MDIO_848xx_TOP_CFG_SCRATCH_REG0) +#define MDIO_848xx_CMD_HDLR_STATUS (MDIO_848xx_TOP_CFG_SCRATCH_REG26) +#define MDIO_848xx_CMD_HDLR_DATA1 (MDIO_848xx_TOP_CFG_SCRATCH_REG27) +#define MDIO_848xx_CMD_HDLR_DATA2 (MDIO_848xx_TOP_CFG_SCRATCH_REG28) +#define MDIO_848xx_CMD_HDLR_DATA3 (MDIO_848xx_TOP_CFG_SCRATCH_REG29) +#define MDIO_848xx_CMD_HDLR_DATA4 (MDIO_848xx_TOP_CFG_SCRATCH_REG30) +#define MDIO_848xx_CMD_HDLR_DATA5 (MDIO_848xx_TOP_CFG_SCRATCH_REG31) -/* Mailbox command set used by 84833. */ -#define PHY84833_CMD_SET_PAIR_SWAP 0x8001 -#define PHY84833_CMD_GET_EEE_MODE 0x8008 -#define PHY84833_CMD_SET_EEE_MODE 0x8009 -/* Mailbox status set used by 84833. */ +/* Mailbox command set used by 84833/84858 */ +#define PHY848xx_CMD_SET_PAIR_SWAP 0x8001 +#define PHY848xx_CMD_GET_EEE_MODE 0x8008 +#define PHY848xx_CMD_SET_EEE_MODE 0x8009 +/* Mailbox status set used by 84833 only */ #define PHY84833_STATUS_CMD_RECEIVED 0x0001 #define PHY84833_STATUS_CMD_IN_PROGRESS 0x0002 #define PHY84833_STATUS_CMD_COMPLETE_PASS 0x0004 @@ -7318,6 +7338,13 @@ Theotherbitsarereservedandshouldbezero*/ #define PHY84833_STATUS_CMD_CLEAR_COMPLETE 0x0080 #define PHY84833_STATUS_CMD_OPEN_OVERRIDE 0xa5a5 +/* Mailbox status set used by 84858 only */ +#define PHY84858_STATUS_CMD_RECEIVED 0x0001 +#define PHY84858_STATUS_CMD_IN_PROGRESS 0x0002 +#define PHY84858_STATUS_CMD_COMPLETE_PASS 0x0004 +#define PHY84858_STATUS_CMD_COMPLETE_ERROR 0x0008 +#define PHY84858_STATUS_CMD_SYSTEM_BUSY 0xbbbb + /* Warpcore clause 45 addressing */ #define MDIO_WC_DEVAD 0x3 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 4ad415ac8cfe..c9bd7f16018e 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -1,15 +1,17 @@ -/* bnx2x_sp.c: Broadcom Everest network driver. +/* bnx2x_sp.c: Qlogic Everest network driver. * - * Copyright (c) 2011-2013 Broadcom Corporation + * Copyright 2011-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + * All rights reserved * - * Unless you and Broadcom execute a separate written software license + * Unless you and Qlogic execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2, available - * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). + * at http://www.gnu.org/licenses/gpl-2.0.html (the "GPL"). * * Notwithstanding the above, under no circumstances may you combine this - * software in any way with any other Broadcom software provided under a - * license other than the GPL, without Broadcom's express prior written + * software in any way with any other Qlogic software provided under a + * license other than the GPL, without Qlogic's express prior written * consent. * * Maintained by: Ariel Elior <ariel.elior@qlogic.com> @@ -355,6 +357,23 @@ static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o) return vp->get(vp, 1); } + +static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o) +{ + struct bnx2x_credit_pool_obj *mp = o->macs_pool; + struct bnx2x_credit_pool_obj *vp = o->vlans_pool; + + if (!mp->get(mp, 1)) + return false; + + if (!vp->get(vp, 1)) { + mp->put(mp, 1); + return false; + } + + return true; +} + static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset) { struct bnx2x_credit_pool_obj *mp = o->macs_pool; @@ -383,6 +402,22 @@ static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o) return vp->put(vp, 1); } +static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o) +{ + struct bnx2x_credit_pool_obj *mp = o->macs_pool; + struct bnx2x_credit_pool_obj *vp = o->vlans_pool; + + if (!mp->put(mp, 1)) + return false; + + if (!vp->put(vp, 1)) { + mp->get(mp, 1); + return false; + } + + return true; +} + /** * __bnx2x_vlan_mac_h_write_trylock - try getting the vlan mac writer lock * @@ -636,6 +671,26 @@ static int bnx2x_check_vlan_add(struct bnx2x *bp, return 0; } +static int bnx2x_check_vlan_mac_add(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, + union bnx2x_classification_ramrod_data *data) +{ + struct bnx2x_vlan_mac_registry_elem *pos; + + DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for ADD command\n", + data->vlan_mac.mac, data->vlan_mac.vlan); + + list_for_each_entry(pos, &o->head, link) + if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) && + (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac, + ETH_ALEN)) && + (data->vlan_mac.is_inner_mac == + pos->u.vlan_mac.is_inner_mac)) + return -EEXIST; + + return 0; +} + /* check_del() callbacks */ static struct bnx2x_vlan_mac_registry_elem * bnx2x_check_mac_del(struct bnx2x *bp, @@ -670,6 +725,27 @@ static struct bnx2x_vlan_mac_registry_elem * return NULL; } +static struct bnx2x_vlan_mac_registry_elem * + bnx2x_check_vlan_mac_del(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, + union bnx2x_classification_ramrod_data *data) +{ + struct bnx2x_vlan_mac_registry_elem *pos; + + DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for DEL command\n", + data->vlan_mac.mac, data->vlan_mac.vlan); + + list_for_each_entry(pos, &o->head, link) + if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) && + (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac, + ETH_ALEN)) && + (data->vlan_mac.is_inner_mac == + pos->u.vlan_mac.is_inner_mac)) + return pos; + + return NULL; +} + /* check_move() callback */ static bool bnx2x_check_move(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *src_o, @@ -1036,6 +1112,96 @@ static void bnx2x_set_one_vlan_e2(struct bnx2x *bp, rule_cnt); } +static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, + struct bnx2x_exeq_elem *elem, + int rule_idx, int cam_offset) +{ + struct bnx2x_raw_obj *raw = &o->raw; + struct eth_classify_rules_ramrod_data *data = + (struct eth_classify_rules_ramrod_data *)(raw->rdata); + int rule_cnt = rule_idx + 1; + union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; + enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd; + bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false; + u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan; + u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac; + u16 inner_mac; + + /* Reset the ramrod data buffer for the first rule */ + if (rule_idx == 0) + memset(data, 0, sizeof(*data)); + + /* Set a rule header */ + bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR, + &rule_entry->pair.header); + + /* Set VLAN and MAC themselves */ + rule_entry->pair.vlan = cpu_to_le16(vlan); + bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb, + &rule_entry->pair.mac_mid, + &rule_entry->pair.mac_lsb, mac); + inner_mac = elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac; + rule_entry->pair.inner_mac = cpu_to_le16(inner_mac); + /* MOVE: Add a rule that will add this MAC/VLAN to the target Queue */ + if (cmd == BNX2X_VLAN_MAC_MOVE) { + struct bnx2x_vlan_mac_obj *target_obj; + + rule_entry++; + rule_cnt++; + + /* Setup ramrod data */ + target_obj = elem->cmd_data.vlan_mac.target_obj; + bnx2x_vlan_mac_set_cmd_hdr_e2(bp, target_obj, + true, CLASSIFY_RULE_OPCODE_PAIR, + &rule_entry->pair.header); + + /* Set a VLAN itself */ + rule_entry->pair.vlan = cpu_to_le16(vlan); + bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb, + &rule_entry->pair.mac_mid, + &rule_entry->pair.mac_lsb, mac); + rule_entry->pair.inner_mac = cpu_to_le16(inner_mac); + } + + /* Set the ramrod data header */ + bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header, + rule_cnt); +} + +/** + * bnx2x_set_one_vlan_mac_e1h - + * + * @bp: device handle + * @o: bnx2x_vlan_mac_obj + * @elem: bnx2x_exeq_elem + * @rule_idx: rule_idx + * @cam_offset: cam_offset + */ +static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, + struct bnx2x_exeq_elem *elem, + int rule_idx, int cam_offset) +{ + struct bnx2x_raw_obj *raw = &o->raw; + struct mac_configuration_cmd *config = + (struct mac_configuration_cmd *)(raw->rdata); + /* 57710 and 57711 do not support MOVE command, + * so it's either ADD or DEL + */ + bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ? + true : false; + + /* Reset the ramrod data buffer */ + memset(config, 0, sizeof(*config)); + + bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING, + cam_offset, add, + elem->cmd_data.vlan_mac.u.vlan_mac.mac, + elem->cmd_data.vlan_mac.u.vlan_mac.vlan, + ETH_VLAN_FILTER_CLASSIFY, config); +} + /** * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element * @@ -1135,6 +1301,25 @@ static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan( return NULL; } +static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac( + struct bnx2x_exe_queue_obj *o, + struct bnx2x_exeq_elem *elem) +{ + struct bnx2x_exeq_elem *pos; + struct bnx2x_vlan_mac_ramrod_data *data = + &elem->cmd_data.vlan_mac.u.vlan_mac; + + /* Check pending for execution commands */ + list_for_each_entry(pos, &o->exe_queue, link) + if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data, + sizeof(*data)) && + (pos->cmd_data.vlan_mac.cmd == + elem->cmd_data.vlan_mac.cmd)) + return pos; + + return NULL; +} + /** * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed * @@ -2042,6 +2227,68 @@ void bnx2x_init_vlan_obj(struct bnx2x *bp, } } +void bnx2x_init_vlan_mac_obj(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *vlan_mac_obj, + u8 cl_id, u32 cid, u8 func_id, void *rdata, + dma_addr_t rdata_mapping, int state, + unsigned long *pstate, bnx2x_obj_type type, + struct bnx2x_credit_pool_obj *macs_pool, + struct bnx2x_credit_pool_obj *vlans_pool) +{ + union bnx2x_qable_obj *qable_obj = + (union bnx2x_qable_obj *)vlan_mac_obj; + + bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata, + rdata_mapping, state, pstate, type, + macs_pool, vlans_pool); + + /* CAM pool handling */ + vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac; + vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac; + /* CAM offset is relevant for 57710 and 57711 chips only which have a + * single CAM for both MACs and VLAN-MAC pairs. So the offset + * will be taken from MACs' pool object only. + */ + vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac; + vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac; + + if (CHIP_IS_E1(bp)) { + BNX2X_ERR("Do not support chips others than E2\n"); + BUG(); + } else if (CHIP_IS_E1H(bp)) { + vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e1h; + vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del; + vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add; + vlan_mac_obj->check_move = bnx2x_check_move_always_err; + vlan_mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC; + + /* Exe Queue */ + bnx2x_exe_queue_init(bp, + &vlan_mac_obj->exe_queue, 1, qable_obj, + bnx2x_validate_vlan_mac, + bnx2x_remove_vlan_mac, + bnx2x_optimize_vlan_mac, + bnx2x_execute_vlan_mac, + bnx2x_exeq_get_vlan_mac); + } else { + vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e2; + vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del; + vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add; + vlan_mac_obj->check_move = bnx2x_check_move; + vlan_mac_obj->ramrod_cmd = + RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES; + + /* Exe Queue */ + bnx2x_exe_queue_init(bp, + &vlan_mac_obj->exe_queue, + CLASSIFY_RULES_COUNT, + qable_obj, bnx2x_validate_vlan_mac, + bnx2x_remove_vlan_mac, + bnx2x_optimize_vlan_mac, + bnx2x_execute_vlan_mac, + bnx2x_exeq_get_vlan_mac); + } +} /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */ static inline void __storm_memset_mac_filters(struct bnx2x *bp, struct tstorm_eth_mac_filter_config *mac_filters, @@ -3854,8 +4101,8 @@ static bool bnx2x_credit_pool_get_entry_always_true( * If credit is negative pool operations will always succeed (unlimited pool). * */ -static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p, - int base, int credit) +void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p, + int base, int credit) { /* Zero the object first */ memset(p, 0, sizeof(*p)); @@ -3934,9 +4181,9 @@ void bnx2x_init_mac_credit_pool(struct bnx2x *bp, /* CAM credit is equaly divided between all active functions * on the PATH. */ - if ((func_num > 0)) { + if (func_num > 0) { if (!CHIP_REV_IS_SLOW(bp)) - cam_sz = (MAX_MAC_CREDIT_E2 / func_num); + cam_sz = PF_MAC_CREDIT_E2(bp, func_num); else cam_sz = BNX2X_CAM_SIZE_EMUL; @@ -3966,8 +4213,9 @@ void bnx2x_init_vlan_credit_pool(struct bnx2x *bp, * on the PATH. */ if (func_num > 0) { - int credit = MAX_VLAN_CREDIT_E2 / func_num; - bnx2x_init_credit_pool(p, func_id * credit, credit); + int credit = PF_VLAN_CREDIT_E2(bp, func_num); + + bnx2x_init_credit_pool(p, -1/*unused for E2*/, credit); } else /* this should never happen! Block VLAN operations. */ bnx2x_init_credit_pool(p, 0, 0); @@ -4060,8 +4308,14 @@ static int bnx2x_setup_rss(struct bnx2x *bp, if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags)) caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY; - if (test_bit(BNX2X_RSS_GRE_INNER_HDRS, &p->rss_flags)) - caps |= ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY; + if (test_bit(BNX2X_RSS_IPV4_VXLAN, &p->rss_flags)) + caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_VXLAN_CAPABILITY; + + if (test_bit(BNX2X_RSS_IPV6_VXLAN, &p->rss_flags)) + caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY; + + if (test_bit(BNX2X_RSS_TUNN_INNER_HDRS, &p->rss_flags)) + caps |= ETH_RSS_UPDATE_RAMROD_DATA_TUNN_INNER_HDRS_CAPABILITY; /* RSS keys */ if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) { @@ -5669,10 +5923,14 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp, rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag); rdata->path_id = BP_PATH(bp); rdata->network_cos_mode = start_params->network_cos_mode; - rdata->tunnel_mode = start_params->tunnel_mode; - rdata->gre_tunnel_type = start_params->gre_tunnel_type; - rdata->inner_gre_rss_en = start_params->inner_gre_rss_en; - rdata->vxlan_dst_port = cpu_to_le16(4789); + + rdata->vxlan_dst_port = cpu_to_le16(start_params->vxlan_dst_port); + rdata->geneve_dst_port = cpu_to_le16(start_params->geneve_dst_port); + rdata->inner_clss_l2gre = start_params->inner_clss_l2gre; + rdata->inner_clss_l2geneve = start_params->inner_clss_l2geneve; + rdata->inner_clss_vxlan = start_params->inner_clss_vxlan; + rdata->inner_rss = start_params->inner_rss; + rdata->sd_accept_mf_clss_fail = start_params->class_fail; if (start_params->class_fail_ethtype) { rdata->sd_accept_mf_clss_fail_match_ethtype = 1; @@ -5690,6 +5948,14 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp, cpu_to_le16(0x8100); rdata->no_added_tags = start_params->no_added_tags; + + rdata->c2s_pri_tt_valid = start_params->c2s_pri_valid; + if (rdata->c2s_pri_tt_valid) { + memcpy(rdata->c2s_pri_trans_table.val, + start_params->c2s_pri, + MAX_VLAN_PRIORITIES); + rdata->c2s_pri_default = start_params->c2s_pri_default; + } /* No need for an explicit memory barrier here as long we would * need to ensure the ordering of writing to the SPQ element * and updating of the SPQ producer which involves a memory @@ -5750,15 +6016,22 @@ static inline int bnx2x_func_send_switch_update(struct bnx2x *bp, if (test_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG, &switch_update_params->changes)) { rdata->update_tunn_cfg_flg = 1; - if (test_bit(BNX2X_F_UPDATE_TUNNEL_CLSS_EN, + if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_L2GRE, + &switch_update_params->changes)) + rdata->inner_clss_l2gre = 1; + if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_VXLAN, + &switch_update_params->changes)) + rdata->inner_clss_vxlan = 1; + if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_L2GENEVE, &switch_update_params->changes)) - rdata->tunn_clss_en = 1; - if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_GRE_RSS_EN, + rdata->inner_clss_l2geneve = 1; + if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_RSS, &switch_update_params->changes)) - rdata->inner_gre_rss_en = 1; - rdata->tunnel_mode = switch_update_params->tunnel_mode; - rdata->gre_tunnel_type = switch_update_params->gre_tunnel_type; - rdata->vxlan_dst_port = cpu_to_le16(4789); + rdata->inner_rss = 1; + rdata->vxlan_dst_port = + cpu_to_le16(switch_update_params->vxlan_dst_port); + rdata->geneve_dst_port = + cpu_to_le16(switch_update_params->geneve_dst_port); } rdata->echo = SWITCH_UPDATE; @@ -5885,6 +6158,8 @@ static inline int bnx2x_func_send_tx_start(struct bnx2x *bp, rdata->traffic_type_to_priority_cos[i] = tx_start_params->traffic_type_to_priority_cos[i]; + for (i = 0; i < MAX_TRAFFIC_TYPES; i++) + rdata->dcb_outer_pri[i] = tx_start_params->dcb_outer_pri[i]; /* No need for an explicit memory barrier here as long as we * ensure the ordering of writing to the SPQ element * and updating of the SPQ producer which involves a memory diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h index 86baecb7c60c..4048fc594cce 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h @@ -1,15 +1,17 @@ -/* bnx2x_sp.h: Broadcom Everest network driver. +/* bnx2x_sp.h: Qlogic Everest network driver. * - * Copyright (c) 2011-2013 Broadcom Corporation + * Copyright 2011-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + * All rights reserved * - * Unless you and Broadcom execute a separate written software license + * Unless you and Qlogic execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2, available - * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). + * at http://www.gnu.org/licenses/gpl-2.0.html (the "GPL"). * * Notwithstanding the above, under no circumstances may you combine this - * software in any way with any other Broadcom software provided under a - * license other than the GPL, without Broadcom's express prior written + * software in any way with any other Qlogic software provided under a + * license other than the GPL, without Qlogic's express prior written * consent. * * Maintained by: Ariel Elior <ariel.elior@qlogic.com> @@ -711,7 +713,10 @@ enum { BNX2X_RSS_IPV6, BNX2X_RSS_IPV6_TCP, BNX2X_RSS_IPV6_UDP, - BNX2X_RSS_GRE_INNER_HDRS, + + BNX2X_RSS_IPV4_VXLAN, + BNX2X_RSS_IPV6_VXLAN, + BNX2X_RSS_TUNN_INNER_HDRS, }; struct bnx2x_config_rss_params { @@ -1105,8 +1110,10 @@ enum { BNX2X_F_UPDATE_VLAN_FORCE_PRIO_CHNG, BNX2X_F_UPDATE_VLAN_FORCE_PRIO_FLAG, BNX2X_F_UPDATE_TUNNEL_CFG_CHNG, - BNX2X_F_UPDATE_TUNNEL_CLSS_EN, - BNX2X_F_UPDATE_TUNNEL_INNER_GRE_RSS_EN, + BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_L2GRE, + BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_VXLAN, + BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_L2GENEVE, + BNX2X_F_UPDATE_TUNNEL_INNER_RSS, }; /* Allowed Function states */ @@ -1171,19 +1178,23 @@ struct bnx2x_func_start_params { /* Function cos mode */ u8 network_cos_mode; - /* TUNN_MODE_NONE/TUNN_MODE_VXLAN/TUNN_MODE_GRE */ - u8 tunnel_mode; + /* UDP dest port for VXLAN */ + u16 vxlan_dst_port; - /* tunneling classification enablement */ - u8 tunn_clss_en; + /* UDP dest port for Geneve */ + u16 geneve_dst_port; - /* NVGRE_TUNNEL/L2GRE_TUNNEL/IPGRE_TUNNEL */ - u8 gre_tunnel_type; + /* Enable inner Rx classifications for L2GRE packets */ + u8 inner_clss_l2gre; - /* Enables Inner GRE RSS on the function, depends on the client RSS - * capailities - */ - u8 inner_gre_rss_en; + /* Enable inner Rx classifications for L2-Geneve packets */ + u8 inner_clss_l2geneve; + + /* Enable inner Rx classification for vxlan packets */ + u8 inner_clss_vxlan; + + /* Enable RSS according to inner header */ + u8 inner_rss; /* Allows accepting of packets failing MF classification, possibly * only matching a given ethertype @@ -1200,6 +1211,11 @@ struct bnx2x_func_start_params { /* Prevent inner vlans from being added by FW */ u8 no_added_tags; + + /* Inner-to-Outer vlan priority mapping */ + u8 c2s_pri[MAX_VLAN_PRIORITIES]; + u8 c2s_pri_default; + u8 c2s_pri_valid; }; struct bnx2x_func_switch_update_params { @@ -1207,8 +1223,8 @@ struct bnx2x_func_switch_update_params { u16 vlan; u16 vlan_eth_type; u8 vlan_force_prio; - u8 tunnel_mode; - u8 gre_tunnel_type; + u16 vxlan_dst_port; + u16 geneve_dst_port; }; struct bnx2x_func_afex_update_params { @@ -1229,6 +1245,7 @@ struct bnx2x_func_tx_start_params { u8 dcb_enabled; u8 dcb_version; u8 dont_add_pri_0_en; + u8 dcb_outer_pri[MAX_TRAFFIC_TYPES]; }; struct bnx2x_func_set_timesync_params { @@ -1396,6 +1413,14 @@ void bnx2x_init_vlan_obj(struct bnx2x *bp, unsigned long *pstate, bnx2x_obj_type type, struct bnx2x_credit_pool_obj *vlans_pool); +void bnx2x_init_vlan_mac_obj(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *vlan_mac_obj, + u8 cl_id, u32 cid, u8 func_id, void *rdata, + dma_addr_t rdata_mapping, int state, + unsigned long *pstate, bnx2x_obj_type type, + struct bnx2x_credit_pool_obj *macs_pool, + struct bnx2x_credit_pool_obj *vlans_pool); + int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o); void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp, @@ -1466,6 +1491,8 @@ void bnx2x_init_mac_credit_pool(struct bnx2x *bp, void bnx2x_init_vlan_credit_pool(struct bnx2x *bp, struct bnx2x_credit_pool_obj *p, u8 func_id, u8 func_num); +void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p, + int base, int credit); /****************** RSS CONFIGURATION ****************/ void bnx2x_init_rss_config_obj(struct bnx2x *bp, @@ -1493,4 +1520,12 @@ int bnx2x_config_rss(struct bnx2x *bp, void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj, u8 *ind_table); +#define PF_MAC_CREDIT_E2(bp, func_num) \ + ((MAX_MAC_CREDIT_E2 - GET_NUM_VFS_PER_PATH(bp) * VF_MAC_CREDIT_CNT) / \ + func_num + GET_NUM_VFS_PER_PF(bp) * VF_MAC_CREDIT_CNT) + +#define PF_VLAN_CREDIT_E2(bp, func_num) \ + ((MAX_MAC_CREDIT_E2 - GET_NUM_VFS_PER_PATH(bp) * VF_VLAN_CREDIT_CNT) / \ + func_num + GET_NUM_VFS_PER_PF(bp) * VF_VLAN_CREDIT_CNT) + #endif /* BNX2X_SP_VERBS */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index f67348d16966..ec82831f5071 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -1,15 +1,17 @@ -/* bnx2x_sriov.c: Broadcom Everest network driver. +/* bnx2x_sriov.c: QLogic Everest network driver. * * Copyright 2009-2013 Broadcom Corporation + * Copyright 2014 QLogic Corporation + * All rights reserved * - * Unless you and Broadcom execute a separate written software license + * Unless you and QLogic execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2, available * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). * * Notwithstanding the above, under no circumstances may you combine this - * software in any way with any other Broadcom software provided under a - * license other than the GPL, without Broadcom's express prior written + * software in any way with any other QLogic software provided under a + * license other than the GPL, without QLogic's express prior written * consent. * * Maintained by: Ariel Elior <ariel.elior@qlogic.com> @@ -195,14 +197,6 @@ void bnx2x_vfop_qctor_prep(struct bnx2x *bp, setup_p->gen_params.stat_id = vfq_stat_id(vf, q); setup_p->gen_params.fp_hsi = vf->fp_hsi; - /* Setup-op pause params: - * Nothing to do, the pause thresholds are set by default to 0 which - * effectively turns off the feature for this queue. We don't want - * one queue (VF) to interfering with another queue (another VF) - */ - if (vf->cfg_flags & VF_CFG_FW_FC) - BNX2X_ERR("No support for pause to VFs (abs_vfid: %d)\n", - vf->abs_vfid); /* Setup-op flags: * collect statistics, zero statistics, local-switching, security, * OV for Flex10, RSS and MCAST for leading @@ -358,22 +352,24 @@ static inline void bnx2x_vf_vlan_credit(struct bnx2x *bp, } static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf, - int qid, bool drv_only, bool mac) + int qid, bool drv_only, int type) { struct bnx2x_vlan_mac_ramrod_params ramrod; int rc; DP(BNX2X_MSG_IOV, "vf[%d] - deleting all %s\n", vf->abs_vfid, - mac ? "MACs" : "VLANs"); + (type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MACs" : + (type == BNX2X_VF_FILTER_MAC) ? "MACs" : "VLANs"); /* Prepare ramrod params */ memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params)); - if (mac) { + if (type == BNX2X_VF_FILTER_VLAN_MAC) { + set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags); + ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_mac_obj); + } else if (type == BNX2X_VF_FILTER_MAC) { set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags); ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); } else { - set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, - &ramrod.user_req.vlan_mac_flags); ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); } ramrod.user_req.cmd = BNX2X_VLAN_MAC_DEL; @@ -391,14 +387,11 @@ static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf, &ramrod.ramrod_flags); if (rc) { BNX2X_ERR("Failed to delete all %s\n", - mac ? "MACs" : "VLANs"); + (type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MACs" : + (type == BNX2X_VF_FILTER_MAC) ? "MACs" : "VLANs"); return rc; } - /* Clear the vlan counters */ - if (!mac) - atomic_set(&bnx2x_vfq(vf, qid, vlan_count), 0); - return 0; } @@ -412,13 +405,17 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp, DP(BNX2X_MSG_IOV, "vf[%d] - %s a %s filter\n", vf->abs_vfid, filter->add ? "Adding" : "Deleting", - filter->type == BNX2X_VF_FILTER_MAC ? "MAC" : "VLAN"); + (filter->type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MAC" : + (filter->type == BNX2X_VF_FILTER_MAC) ? "MAC" : "VLAN"); /* Prepare ramrod params */ memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params)); - if (filter->type == BNX2X_VF_FILTER_VLAN) { - set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, - &ramrod.user_req.vlan_mac_flags); + if (filter->type == BNX2X_VF_FILTER_VLAN_MAC) { + ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_mac_obj); + ramrod.user_req.u.vlan.vlan = filter->vid; + memcpy(&ramrod.user_req.u.mac.mac, filter->mac, ETH_ALEN); + set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags); + } else if (filter->type == BNX2X_VF_FILTER_VLAN) { ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); ramrod.user_req.u.vlan.vlan = filter->vid; } else { @@ -429,16 +426,6 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp, ramrod.user_req.cmd = filter->add ? BNX2X_VLAN_MAC_ADD : BNX2X_VLAN_MAC_DEL; - /* Verify there are available vlan credits */ - if (filter->add && filter->type == BNX2X_VF_FILTER_VLAN && - (atomic_read(&bnx2x_vfq(vf, qid, vlan_count)) >= - vf_vlan_rules_cnt(vf))) { - BNX2X_ERR("No credits for vlan [%d >= %d]\n", - atomic_read(&bnx2x_vfq(vf, qid, vlan_count)), - vf_vlan_rules_cnt(vf)); - return -ENOMEM; - } - set_bit(RAMROD_EXEC, &ramrod.ramrod_flags); if (drv_only) set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags); @@ -450,16 +437,13 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp, if (rc && rc != -EEXIST) { BNX2X_ERR("Failed to %s %s\n", filter->add ? "add" : "delete", - filter->type == BNX2X_VF_FILTER_MAC ? "MAC" : - "VLAN"); + (filter->type == BNX2X_VF_FILTER_VLAN_MAC) ? + "VLAN-MAC" : + (filter->type == BNX2X_VF_FILTER_MAC) ? + "MAC" : "VLAN"); return rc; } - /* Update the vlan counters */ - if (filter->type == BNX2X_VF_FILTER_VLAN) - bnx2x_vf_vlan_credit(bp, ramrod.vlan_mac_obj, - &bnx2x_vfq(vf, qid, vlan_count)); - return 0; } @@ -511,21 +495,7 @@ int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid, if (rc) goto op_err; - /* Configure vlan0 for leading queue */ - if (!qid) { - struct bnx2x_vf_mac_vlan_filter filter; - - memset(&filter, 0, sizeof(struct bnx2x_vf_mac_vlan_filter)); - filter.type = BNX2X_VF_FILTER_VLAN; - filter.add = true; - filter.vid = 0; - rc = bnx2x_vf_mac_vlan_config(bp, vf, qid, &filter, false); - if (rc) - goto op_err; - } - /* Schedule the configuration of any pending vlan filters */ - vf->cfg_flags |= VF_CFG_VLAN; bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN, BNX2X_MSG_IOV); return 0; @@ -544,10 +514,16 @@ static int bnx2x_vf_queue_flr(struct bnx2x *bp, struct bnx2x_virtf *vf, /* If needed, clean the filtering data base */ if ((qid == LEADING_IDX) && bnx2x_validate_vf_sp_objs(bp, vf, false)) { - rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, false); + rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, + BNX2X_VF_FILTER_VLAN_MAC); + if (rc) + goto op_err; + rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, + BNX2X_VF_FILTER_VLAN); if (rc) goto op_err; - rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, true); + rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, + BNX2X_VF_FILTER_MAC); if (rc) goto op_err; } @@ -680,11 +656,18 @@ int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid) /* Remove filtering if feasible */ if (bnx2x_validate_vf_sp_objs(bp, vf, true)) { rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, - false, false); + false, + BNX2X_VF_FILTER_VLAN_MAC); + if (rc) + goto op_err; + rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, + false, + BNX2X_VF_FILTER_VLAN); if (rc) goto op_err; rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, - false, true); + false, + BNX2X_VF_FILTER_MAC); if (rc) goto op_err; rc = bnx2x_vf_mcast(bp, vf, NULL, 0, false); @@ -765,8 +748,6 @@ static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf) val = REG_RD(bp, IGU_REG_VF_CONFIGURATION); val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN); - if (vf->cfg_flags & VF_CFG_INT_SIMD) - val |= IGU_VF_CONF_SINGLE_ISR_EN; val &= ~IGU_VF_CONF_PARENT_MASK; val |= (BP_ABS_FUNC(bp) >> 1) << IGU_VF_CONF_PARENT_SHIFT; REG_WR(bp, IGU_REG_VF_CONFIGURATION, val); @@ -845,29 +826,6 @@ int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid) return 0; } -static void bnx2x_iov_re_set_vlan_filters(struct bnx2x *bp, - struct bnx2x_virtf *vf, - int new) -{ - int num = vf_vlan_rules_cnt(vf); - int diff = new - num; - bool rc = true; - - DP(BNX2X_MSG_IOV, "vf[%d] - %d vlan filter credits [previously %d]\n", - vf->abs_vfid, new, num); - - if (diff > 0) - rc = bp->vlans_pool.get(&bp->vlans_pool, diff); - else if (diff < 0) - rc = bp->vlans_pool.put(&bp->vlans_pool, -diff); - - if (rc) - vf_vlan_rules_cnt(vf) = new; - else - DP(BNX2X_MSG_IOV, "vf[%d] - Failed to configure vlan filter credits change\n", - vf->abs_vfid); -} - /* must be called after the number of PF queues and the number of VFs are * both known */ @@ -875,21 +833,13 @@ static void bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) { struct vf_pf_resc_request *resc = &vf->alloc_resc; - u16 vlan_count = 0; /* will be set only during VF-ACQUIRE */ resc->num_rxqs = 0; resc->num_txqs = 0; - /* no credit calculations for macs (just yet) */ - resc->num_mac_filters = 1; - - /* divvy up vlan rules */ - bnx2x_iov_re_set_vlan_filters(bp, vf, 0); - vlan_count = bp->vlans_pool.check(&bp->vlans_pool); - vlan_count = 1 << ilog2(vlan_count); - bnx2x_iov_re_set_vlan_filters(bp, vf, - vlan_count / BNX2X_NR_VIRTFN(bp)); + resc->num_mac_filters = VF_MAC_CREDIT_CNT; + resc->num_vlan_filters = VF_VLAN_CREDIT_CNT; /* no real limitation */ resc->num_mc_filters = 0; @@ -1338,6 +1288,9 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, mutex_init(&bp->vfdb->bulletin_mutex); + if (SHMEM2_HAS(bp, sriov_switch_mode)) + SHMEM2_WR(bp, sriov_switch_mode, SRIOV_SWITCH_MODE_VEB); + return 0; failed: DP(BNX2X_MSG_IOV, "Failed err=%d\n", err); @@ -1620,6 +1573,11 @@ int bnx2x_iov_nic_init(struct bnx2x *bp) vf->filter_state = 0; vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id); + bnx2x_init_credit_pool(&vf->vf_vlans_pool, 0, + vf_vlan_rules_cnt(vf)); + bnx2x_init_credit_pool(&vf->vf_macs_pool, 0, + vf_mac_rules_cnt(vf)); + /* init mcast object - This object will be re-initialized * during VF-ACQUIRE with the proper cl_id and cid. * It needs to be initialized here so that it can be safely @@ -2032,12 +1990,11 @@ int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf, u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); - /* Save a vlan filter for the Hypervisor */ return ((req_resc->num_rxqs <= rxq_cnt) && (req_resc->num_txqs <= txq_cnt) && (req_resc->num_sbs <= vf_sb_count(vf)) && (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) && - (req_resc->num_vlan_filters <= vf_vlan_rules_visible_cnt(vf))); + (req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf))); } /* CORE VF API */ @@ -2091,16 +2048,12 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, vf_sb_count(vf) = resc->num_sbs; vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf); vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf); - if (resc->num_mac_filters) - vf_mac_rules_cnt(vf) = resc->num_mac_filters; - /* Add an additional vlan filter credit for the hypervisor */ - bnx2x_iov_re_set_vlan_filters(bp, vf, resc->num_vlan_filters + 1); DP(BNX2X_MSG_IOV, "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n", vf_sb_count(vf), vf_rxq_count(vf), vf_txq_count(vf), vf_mac_rules_cnt(vf), - vf_vlan_rules_visible_cnt(vf)); + vf_vlan_rules_cnt(vf)); /* Initialize the queues */ if (!vf->vfqs) { @@ -2133,7 +2086,6 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map) { struct bnx2x_func_init_params func_init = {0}; - u16 flags = 0; int i; /* the sb resources are initialized at this point, do the @@ -2160,23 +2112,9 @@ int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map) /* reset IGU VF statistics: MSIX */ REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0); - /* vf init */ - if (vf->cfg_flags & VF_CFG_STATS) - flags |= (FUNC_FLG_STATS | FUNC_FLG_SPQ); - - if (vf->cfg_flags & VF_CFG_TPA) - flags |= FUNC_FLG_TPA; - - if (is_vf_multi(vf)) - flags |= FUNC_FLG_RSS; - /* function setup */ - func_init.func_flgs = flags; func_init.pf_id = BP_FUNC(bp); func_init.func_id = FW_VF_HANDLE(vf->abs_vfid); - func_init.fw_stat_map = vf->fw_stat_map; - func_init.spq_map = vf->spq_map; - func_init.spq_prod = 0; bnx2x_func_init(bp, &func_init); /* Enable the vf */ @@ -2589,8 +2527,8 @@ void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n"); for_each_vf(bp, vfidx) { - bulletin = BP_VF_BULLETIN(bp, vfidx); - if (BP_VF(bp, vfidx)->cfg_flags & VF_CFG_VLAN) + bulletin = BP_VF_BULLETIN(bp, vfidx); + if (bulletin->valid_bitmap & VLAN_VALID) bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0); } } @@ -2808,20 +2746,58 @@ out: return rc; } -int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) +static void bnx2x_set_vf_vlan_acceptance(struct bnx2x *bp, + struct bnx2x_virtf *vf, bool accept) +{ + struct bnx2x_rx_mode_ramrod_params rx_ramrod; + unsigned long accept_flags; + + /* need to remove/add the VF's accept_any_vlan bit */ + accept_flags = bnx2x_leading_vfq(vf, accept_flags); + if (accept) + set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); + else + clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); + + bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf, + accept_flags); + bnx2x_leading_vfq(vf, accept_flags) = accept_flags; + bnx2x_config_rx_mode(bp, &rx_ramrod); +} + +static int bnx2x_set_vf_vlan_filter(struct bnx2x *bp, struct bnx2x_virtf *vf, + u16 vlan, bool add) { - struct bnx2x_queue_state_params q_params = {NULL}; struct bnx2x_vlan_mac_ramrod_params ramrod_param; - struct bnx2x_queue_update_params *update_params; + unsigned long ramrod_flags = 0; + int rc = 0; + + /* configure the new vlan to device */ + memset(&ramrod_param, 0, sizeof(ramrod_param)); + __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); + ramrod_param.vlan_mac_obj = &bnx2x_leading_vfq(vf, vlan_obj); + ramrod_param.ramrod_flags = ramrod_flags; + ramrod_param.user_req.u.vlan.vlan = vlan; + ramrod_param.user_req.cmd = add ? BNX2X_VLAN_MAC_ADD + : BNX2X_VLAN_MAC_DEL; + rc = bnx2x_config_vlan_mac(bp, &ramrod_param); + if (rc) { + BNX2X_ERR("failed to configure vlan\n"); + return -EINVAL; + } + + return 0; +} + +int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) +{ struct pf_vf_bulletin_content *bulletin = NULL; - struct bnx2x_rx_mode_ramrod_params rx_ramrod; struct bnx2x *bp = netdev_priv(dev); struct bnx2x_vlan_mac_obj *vlan_obj; unsigned long vlan_mac_flags = 0; unsigned long ramrod_flags = 0; struct bnx2x_virtf *vf = NULL; - unsigned long accept_flags; - int rc; + int i, rc; if (vlan > 4095) { BNX2X_ERR("illegal vlan value %d\n", vlan); @@ -2850,6 +2826,10 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) bulletin->valid_bitmap &= ~(1 << VLAN_VALID); bulletin->vlan = vlan; + /* Post update on VF's bulletin board */ + rc = bnx2x_post_vf_bulletin(bp, vfidx); + if (rc) + BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx); mutex_unlock(&bp->vfdb->bulletin_mutex); /* is vf initialized and queue set up? */ @@ -2876,84 +2856,76 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) goto out; } - /* need to remove/add the VF's accept_any_vlan bit */ - accept_flags = bnx2x_leading_vfq(vf, accept_flags); - if (vlan) - clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); - else - set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); - - bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf, - accept_flags); - bnx2x_leading_vfq(vf, accept_flags) = accept_flags; - bnx2x_config_rx_mode(bp, &rx_ramrod); + /* clear accept_any_vlan when HV forces vlan, otherwise + * according to VF capabilities + */ + if (vlan || !(vf->cfg_flags & VF_CFG_VLAN_FILTER)) + bnx2x_set_vf_vlan_acceptance(bp, vf, !vlan); - /* configure the new vlan to device */ - memset(&ramrod_param, 0, sizeof(ramrod_param)); - __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); - ramrod_param.vlan_mac_obj = vlan_obj; - ramrod_param.ramrod_flags = ramrod_flags; - set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, - &ramrod_param.user_req.vlan_mac_flags); - ramrod_param.user_req.u.vlan.vlan = vlan; - ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; - rc = bnx2x_config_vlan_mac(bp, &ramrod_param); - if (rc) { - BNX2X_ERR("failed to configure vlan\n"); - rc = -EINVAL; + rc = bnx2x_set_vf_vlan_filter(bp, vf, vlan, true); + if (rc) goto out; - } - /* send queue update ramrod to configure default vlan and silent - * vlan removal + /* send queue update ramrods to configure default vlan and + * silent vlan removal */ - __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); - q_params.cmd = BNX2X_Q_CMD_UPDATE; - q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj); - update_params = &q_params.params.update; - __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, - &update_params->update_flags); - __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, - &update_params->update_flags); - if (vlan == 0) { - /* if vlan is 0 then we want to leave the VF traffic - * untagged, and leave the incoming traffic untouched - * (i.e. do not remove any vlan tags). - */ - __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, - &update_params->update_flags); - __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, - &update_params->update_flags); - } else { - /* configure default vlan to vf queue and set silent - * vlan removal (the vf remains unaware of this vlan). - */ - __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, + for_each_vfq(vf, i) { + struct bnx2x_queue_state_params q_params = {NULL}; + struct bnx2x_queue_update_params *update_params; + + q_params.q_obj = &bnx2x_vfq(vf, i, sp_obj); + + /* validate the Q is UP */ + if (bnx2x_get_q_logical_state(bp, q_params.q_obj) != + BNX2X_Q_LOGICAL_STATE_ACTIVE) + continue; + + __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); + q_params.cmd = BNX2X_Q_CMD_UPDATE; + update_params = &q_params.params.update; + __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, &update_params->update_flags); - __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, + __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, &update_params->update_flags); - update_params->def_vlan = vlan; - update_params->silent_removal_value = - vlan & VLAN_VID_MASK; - update_params->silent_removal_mask = VLAN_VID_MASK; - } + if (vlan == 0) { + /* if vlan is 0 then we want to leave the VF traffic + * untagged, and leave the incoming traffic untouched + * (i.e. do not remove any vlan tags). + */ + __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, + &update_params->update_flags); + __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, + &update_params->update_flags); + } else { + /* configure default vlan to vf queue and set silent + * vlan removal (the vf remains unaware of this vlan). + */ + __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, + &update_params->update_flags); + __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, + &update_params->update_flags); + update_params->def_vlan = vlan; + update_params->silent_removal_value = + vlan & VLAN_VID_MASK; + update_params->silent_removal_mask = VLAN_VID_MASK; + } - /* Update the Queue state */ - rc = bnx2x_queue_state_change(bp, &q_params); - if (rc) { - BNX2X_ERR("Failed to configure default VLAN\n"); - goto out; + /* Update the Queue state */ + rc = bnx2x_queue_state_change(bp, &q_params); + if (rc) { + BNX2X_ERR("Failed to configure default VLAN queue %d\n", + i); + goto out; + } } - - - /* clear the flag indicating that this VF needs its vlan - * (will only be set if the HV configured the Vlan before vf was - * up and we were called because the VF came up later - */ out: - vf->cfg_flags &= ~VF_CFG_VLAN; bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); + if (rc) + DP(BNX2X_MSG_IOV, + "updated VF[%d] vlan configuration (vlan = %d)\n", + vfidx, vlan); + return rc; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h index 66ee62a0401a..670a581ffabc 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h @@ -1,15 +1,17 @@ -/* bnx2x_sriov.h: Broadcom Everest network driver. +/* bnx2x_sriov.h: QLogic Everest network driver. * * Copyright 2009-2013 Broadcom Corporation + * Copyright 2014 QLogic Corporation + * All rights reserved * - * Unless you and Broadcom execute a separate written software license + * Unless you and QLogic execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2, available * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). * * Notwithstanding the above, under no circumstances may you combine this - * software in any way with any other Broadcom software provided under a - * license other than the GPL, without Broadcom's express prior written + * software in any way with any other QLogic software provided under a + * license other than the GPL, without QLogic's express prior written * consent. * * Maintained by: Ariel Elior <ariel.elior@qlogic.com> @@ -75,7 +77,10 @@ struct bnx2x_vf_queue { /* VLANs object */ struct bnx2x_vlan_mac_obj vlan_obj; - atomic_t vlan_count; /* 0 means vlan-0 is set ~ untagged */ + + /* VLAN-MACs object */ + struct bnx2x_vlan_mac_obj vlan_mac_obj; + unsigned long accept_flags; /* last accept flags configured */ /* Queue Slow-path State object */ @@ -103,8 +108,10 @@ struct bnx2x_virtf; struct bnx2x_vf_mac_vlan_filter { int type; -#define BNX2X_VF_FILTER_MAC 1 -#define BNX2X_VF_FILTER_VLAN 2 +#define BNX2X_VF_FILTER_MAC BIT(0) +#define BNX2X_VF_FILTER_VLAN BIT(1) +#define BNX2X_VF_FILTER_VLAN_MAC \ + (BNX2X_VF_FILTER_MAC | BNX2X_VF_FILTER_VLAN) /*shortcut*/ bool add; u8 *mac; @@ -119,14 +126,9 @@ struct bnx2x_vf_mac_vlan_filters { /* vf context */ struct bnx2x_virtf { u16 cfg_flags; -#define VF_CFG_STATS 0x0001 -#define VF_CFG_FW_FC 0x0002 -#define VF_CFG_TPA 0x0004 -#define VF_CFG_INT_SIMD 0x0008 -#define VF_CACHE_LINE 0x0010 -#define VF_CFG_VLAN 0x0020 -#define VF_CFG_STATS_COALESCE 0x0040 -#define VF_CFG_EXT_BULLETIN 0x0080 +#define VF_CFG_STATS_COALESCE 0x1 +#define VF_CFG_EXT_BULLETIN 0x2 +#define VF_CFG_VLAN_FILTER 0x4 u8 link_cfg; /* IFLA_VF_LINK_STATE_AUTO * IFLA_VF_LINK_STATE_ENABLE * IFLA_VF_LINK_STATE_DISABLE @@ -140,9 +142,8 @@ struct bnx2x_virtf { bool flr_clnup_stage; /* true during flr cleanup */ /* dma */ - dma_addr_t fw_stat_map; /* valid iff VF_CFG_STATS */ + dma_addr_t fw_stat_map; u16 stats_stride; - dma_addr_t spq_map; dma_addr_t bulletin_map; /* Allocated resources counters. Before the VF is acquired, the @@ -163,8 +164,6 @@ struct bnx2x_virtf { #define vf_mac_rules_cnt(vf) ((vf)->alloc_resc.num_mac_filters) #define vf_vlan_rules_cnt(vf) ((vf)->alloc_resc.num_vlan_filters) #define vf_mc_rules_cnt(vf) ((vf)->alloc_resc.num_mc_filters) - /* Hide a single vlan filter credit for the hypervisor */ -#define vf_vlan_rules_visible_cnt(vf) (vf_vlan_rules_cnt(vf) - 1) u8 sb_count; /* actual number of SBs */ u8 igu_base_id; /* base igu status block id */ @@ -207,6 +206,9 @@ struct bnx2x_virtf { enum channel_tlvs op_current; u8 fp_hsi; + + struct bnx2x_credit_pool_obj vf_vlans_pool; + struct bnx2x_credit_pool_obj vf_macs_pool; }; #define BNX2X_NR_VIRTFN(bp) ((bp)->vfdb->sriov.nr_virtfn) @@ -230,6 +232,12 @@ struct bnx2x_virtf { #define FW_VF_HANDLE(abs_vfid) \ (abs_vfid + FW_PF_MAX_HANDLE) +#define GET_NUM_VFS_PER_PATH(bp) 64 /* use max possible value */ +#define GET_NUM_VFS_PER_PF(bp) ((bp)->vfdb ? (bp)->vfdb->sriov.total \ + : 0) +#define VF_MAC_CREDIT_CNT 1 +#define VF_VLAN_CREDIT_CNT 2 /* VLAN0 + 'real' VLAN */ + /* locking and unlocking the channel mutex */ void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, enum channel_tlvs tlv); @@ -274,6 +282,10 @@ struct bnx2x_vf_sp { } vlan_rdata; union { + struct eth_classify_rules_ramrod_data e2; + } vlan_mac_rdata; + + union { struct eth_filter_rules_ramrod_data e2; } rx_mode_rdata; @@ -536,8 +548,14 @@ int bnx2x_iov_link_update_vf(struct bnx2x *bp, int idx); int bnx2x_set_vf_link_state(struct net_device *dev, int vf, int link_state); +int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add); #else /* CONFIG_BNX2X_SRIOV */ +#define GET_NUM_VFS_PER_PATH(bp) 0 +#define GET_NUM_VFS_PER_PF(bp) 0 +#define VF_MAC_CREDIT_CNT 0 +#define VF_VLAN_CREDIT_CNT 0 + static inline void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, struct bnx2x_queue_sp_obj **q_obj) {} static inline void bnx2x_vf_handle_flr_event(struct bnx2x *bp) {} @@ -604,5 +622,7 @@ struct pf_vf_bulletin_content; static inline void bnx2x_vf_bulletin_finalize(struct pf_vf_bulletin_content *bulletin, bool support_long) {} +static inline int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add) {return 0; } + #endif /* CONFIG_BNX2X_SRIOV */ #endif /* bnx2x_sriov.h */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c index 69d699f0730a..7e0919aa450e 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c @@ -1,6 +1,8 @@ -/* bnx2x_stats.c: Broadcom Everest network driver. +/* bnx2x_stats.c: QLogic Everest network driver. * * Copyright (c) 2007-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + * All rights reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h index 965539a9dabe..b2644ed13d06 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h @@ -1,6 +1,8 @@ -/* bnx2x_stats.h: Broadcom Everest network driver. +/* bnx2x_stats.h: QLogic Everest network driver. * * Copyright (c) 2007-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + * All rights reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index 06b8c0d8fd3b..1374e5394a79 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -1,15 +1,17 @@ -/* bnx2x_vfpf.c: Broadcom Everest network driver. +/* bnx2x_vfpf.c: QLogic Everest network driver. * * Copyright 2009-2013 Broadcom Corporation + * Copyright 2014 QLogic Corporation + * All rights reserved * - * Unless you and Broadcom execute a separate written software license + * Unless you and QLogic execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2, available * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). * * Notwithstanding the above, under no circumstances may you combine this - * software in any way with any other Broadcom software provided under a - * license other than the GPL, without Broadcom's express prior written + * software in any way with any other QLogic software provided under a + * license other than the GPL, without QLogic's express prior written * consent. * * Maintained by: Ariel Elior <ariel.elior@qlogic.com> @@ -245,6 +247,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count) req->resc_request.num_sbs = bp->igu_sb_cnt; req->resc_request.num_mac_filters = VF_ACQUIRE_MAC_FILTERS; req->resc_request.num_mc_filters = VF_ACQUIRE_MC_FILTERS; + req->resc_request.num_vlan_filters = VF_ACQUIRE_VLAN_FILTERS; /* pf 2 vf bulletin board address */ req->bulletin_addr = bp->pf2vf_bulletin_mapping; @@ -255,6 +258,8 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count) /* Bulletin support for bulletin board with length > legacy length */ req->vfdev_info.caps |= VF_CAP_SUPPORT_EXT_BULLETIN; + /* vlan filtering is supported */ + req->vfdev_info.caps |= VF_CAP_SUPPORT_VLAN_FILTER; /* add list termination tlv */ bnx2x_add_tlv(bp, req, @@ -373,6 +378,8 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count) NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG; bp->igu_sb_cnt = bp->acquire_resp.resc.num_sbs; bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id; + bp->vlan_credit = bp->acquire_resp.resc.num_vlan_filters; + strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver, sizeof(bp->fw_ver)); @@ -546,7 +553,7 @@ static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, BNX2X_FILTER_MAC_PENDING, &vf->filter_state, BNX2X_OBJ_TYPE_RX_TX, - &bp->macs_pool); + &vf->vf_macs_pool); /* vlan */ bnx2x_init_vlan_obj(bp, &q->vlan_obj, cl_id, q->cid, func_id, @@ -555,8 +562,17 @@ static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, BNX2X_FILTER_VLAN_PENDING, &vf->filter_state, BNX2X_OBJ_TYPE_RX_TX, - &bp->vlans_pool); - + &vf->vf_vlans_pool); + /* vlan-mac */ + bnx2x_init_vlan_mac_obj(bp, &q->vlan_mac_obj, + cl_id, q->cid, func_id, + bnx2x_vf_sp(bp, vf, vlan_mac_rdata), + bnx2x_vf_sp_map(bp, vf, vlan_mac_rdata), + BNX2X_FILTER_VLAN_MAC_PENDING, + &vf->filter_state, + BNX2X_OBJ_TYPE_RX_TX, + &vf->vf_macs_pool, + &vf->vf_vlans_pool); /* mcast */ bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id, q->cid, func_id, func_id, @@ -723,7 +739,7 @@ int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set) req->filters[0].flags = VFPF_Q_FILTER_DEST_MAC_VALID; if (set) - req->filters[0].flags |= VFPF_Q_FILTER_SET_MAC; + req->filters[0].flags |= VFPF_Q_FILTER_SET; /* sample bulletin board for new mac */ bnx2x_sample_bulletin(bp); @@ -911,6 +927,67 @@ out: return 0; } +/* request pf to add a vlan for the vf */ +int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add) +{ + struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters; + struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; + int rc = 0; + + if (!(bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER)) { + DP(BNX2X_MSG_IOV, "HV does not support vlan filtering\n"); + return 0; + } + + /* clear mailbox and prep first tlv */ + bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS, + sizeof(*req)); + + req->flags = VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED; + req->vf_qid = vf_qid; + req->n_mac_vlan_filters = 1; + + req->filters[0].flags = VFPF_Q_FILTER_VLAN_TAG_VALID; + + if (add) + req->filters[0].flags |= VFPF_Q_FILTER_SET; + + /* sample bulletin board for hypervisor vlan */ + bnx2x_sample_bulletin(bp); + + if (bp->shadow_bulletin.content.valid_bitmap & 1 << VLAN_VALID) { + BNX2X_ERR("Hypervisor will dicline the request, avoiding\n"); + rc = -EINVAL; + goto out; + } + + req->filters[0].vlan_tag = vid; + + /* add list termination tlv */ + bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + /* output tlvs list */ + bnx2x_dp_tlv_list(bp, req); + + /* send message to pf */ + rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); + if (rc) { + BNX2X_ERR("failed to send message to pf. rc was %d\n", rc); + goto out; + } + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + BNX2X_ERR("vfpf %s VLAN %d failed\n", add ? "add" : "del", + vid); + rc = -EINVAL; + } +out: + bnx2x_vfpf_finalize(bp, &req->first_tlv); + + return rc; +} + int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp) { int mode = bp->rx_mode; @@ -934,8 +1011,13 @@ int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp) req->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST; req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST; req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST; + if (mode == BNX2X_RX_MODE_PROMISC) + req->rx_mask |= VFPF_RX_MASK_ACCEPT_ANY_VLAN; } + if (bp->accept_any_vlan) + req->rx_mask |= VFPF_RX_MASK_ACCEPT_ANY_VLAN; + req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED; req->vf_qid = 0; @@ -1188,7 +1270,8 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf, resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2; resp->pfdev_info.pf_cap = (PFVF_CAP_RSS | PFVF_CAP_TPA | - PFVF_CAP_TPA_UPDATE); + PFVF_CAP_TPA_UPDATE | + PFVF_CAP_VLAN_FILTER); bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver, sizeof(resp->pfdev_info.fw_ver)); @@ -1203,7 +1286,7 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vf_max_queue_cnt(bp, vf); resc->num_sbs = vf_sb_count(vf); resc->num_mac_filters = vf_mac_rules_cnt(vf); - resc->num_vlan_filters = vf_vlan_rules_visible_cnt(vf); + resc->num_vlan_filters = vf_vlan_rules_cnt(vf); resc->num_mc_filters = 0; if (status == PFVF_STATUS_SUCCESS) { @@ -1370,6 +1453,14 @@ static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, vf->cfg_flags &= ~VF_CFG_EXT_BULLETIN; } + if (acquire->vfdev_info.caps & VF_CAP_SUPPORT_VLAN_FILTER) { + DP(BNX2X_MSG_IOV, "VF[%d] supports vlan filtering\n", + vf->abs_vfid); + vf->cfg_flags |= VF_CFG_VLAN_FILTER; + } else { + vf->cfg_flags &= ~VF_CFG_VLAN_FILTER; + } + out: /* response */ bnx2x_vf_mbx_acquire_resp(bp, vf, mbx, rc); @@ -1382,7 +1473,6 @@ static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf, int rc; /* record ghost addresses from vf message */ - vf->spq_map = init->spq_addr; vf->fw_stat_map = init->stats_addr; vf->stats_stride = init->stats_stride; rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr); @@ -1578,17 +1668,18 @@ static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp, if ((msg_filter->flags & type_flag) != type_flag) continue; - if (type_flag == VFPF_Q_FILTER_DEST_MAC_VALID) { + memset(&fl->filters[j], 0, sizeof(fl->filters[j])); + if (type_flag & VFPF_Q_FILTER_DEST_MAC_VALID) { fl->filters[j].mac = msg_filter->mac; - fl->filters[j].type = BNX2X_VF_FILTER_MAC; - } else { + fl->filters[j].type |= BNX2X_VF_FILTER_MAC; + } + if (type_flag & VFPF_Q_FILTER_VLAN_TAG_VALID) { fl->filters[j].vid = msg_filter->vlan_tag; - fl->filters[j].type = BNX2X_VF_FILTER_VLAN; + fl->filters[j].type |= BNX2X_VF_FILTER_VLAN; } - fl->filters[j].add = - (msg_filter->flags & VFPF_Q_FILTER_SET_MAC) ? - true : false; + fl->filters[j].add = !!(msg_filter->flags & VFPF_Q_FILTER_SET); fl->count++; + j++; } if (!fl->count) kfree(fl); @@ -1598,6 +1689,18 @@ static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp, return 0; } +static int bnx2x_vf_filters_contain(struct vfpf_set_q_filters_tlv *filters, + u32 flags) +{ + int i, cnt = 0; + + for (i = 0; i < filters->n_mac_vlan_filters; i++) + if ((filters->filters[i].flags & flags) == flags) + cnt++; + + return cnt; +} + static void bnx2x_vf_mbx_dp_q_filter(struct bnx2x *bp, int msglvl, int idx, struct vfpf_q_mac_vlan_filter *filter) { @@ -1629,6 +1732,7 @@ static void bnx2x_vf_mbx_dp_q_filters(struct bnx2x *bp, int msglvl, #define VFPF_MAC_FILTER VFPF_Q_FILTER_DEST_MAC_VALID #define VFPF_VLAN_FILTER VFPF_Q_FILTER_VLAN_TAG_VALID +#define VFPF_VLAN_MAC_FILTER (VFPF_VLAN_FILTER | VFPF_MAC_FILTER) static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf) { @@ -1639,17 +1743,17 @@ static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf) /* check for any mac/vlan changes */ if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) { - /* build mac list */ struct bnx2x_vf_mac_vlan_filters *fl = NULL; + /* build vlan-mac list */ rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl, - VFPF_MAC_FILTER); + VFPF_VLAN_MAC_FILTER); if (rc) goto op_err; if (fl) { - /* set mac list */ + /* set vlan-mac list */ rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl, msg->vf_qid, false); @@ -1657,22 +1761,23 @@ static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf) goto op_err; } - /* build vlan list */ + /* build mac list */ fl = NULL; rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl, - VFPF_VLAN_FILTER); + VFPF_MAC_FILTER); if (rc) goto op_err; if (fl) { - /* set vlan list */ + /* set mac list */ rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl, msg->vf_qid, false); if (rc) goto op_err; } + } if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) { @@ -1687,11 +1792,15 @@ static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf) __set_bit(BNX2X_ACCEPT_BROADCAST, &accept); } - /* A packet arriving the vf's mac should be accepted - * with any vlan, unless a vlan has already been - * configured. + /* any_vlan is not configured if HV is forcing VLAN + * any_vlan is configured if + * 1. VF does not support vlan filtering + * OR + * 2. VF supports vlan filtering and explicitly requested it */ - if (!(bulletin->valid_bitmap & (1 << VLAN_VALID))) + if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)) && + (!(vf->cfg_flags & VF_CFG_VLAN_FILTER) || + msg->rx_mask & VFPF_RX_MASK_ACCEPT_ANY_VLAN)) __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept); /* set rx-mode */ @@ -1727,17 +1836,31 @@ static int bnx2x_filters_validate_mac(struct bnx2x *bp, * since queue was not set up. */ if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) { - /* once a mac was set by ndo can only accept a single mac... */ - if (filters->n_mac_vlan_filters > 1) { - BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called\n", - vf->abs_vfid); - rc = -EPERM; - goto response; + struct vfpf_q_mac_vlan_filter *filter = NULL; + int i; + + for (i = 0; i < filters->n_mac_vlan_filters; i++) { + if (!(filters->filters[i].flags & + VFPF_Q_FILTER_DEST_MAC_VALID)) + continue; + + /* once a mac was set by ndo can only accept + * a single mac... + */ + if (filter) { + BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called [%d filters]\n", + vf->abs_vfid, + filters->n_mac_vlan_filters); + rc = -EPERM; + goto response; + } + + filter = &filters->filters[i]; } /* ...and only the mac set by the ndo */ - if (filters->n_mac_vlan_filters == 1 && - !ether_addr_equal(filters->filters->mac, bulletin->mac)) { + if (filter && + !ether_addr_equal(filter->mac, bulletin->mac)) { BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n", vf->abs_vfid); @@ -1759,17 +1882,14 @@ static int bnx2x_filters_validate_vlan(struct bnx2x *bp, /* if vlan was set by hypervisor we don't allow guest to config vlan */ if (bulletin->valid_bitmap & 1 << VLAN_VALID) { - int i; - /* search for vlan filters */ - for (i = 0; i < filters->n_mac_vlan_filters; i++) { - if (filters->filters[i].flags & - VFPF_Q_FILTER_VLAN_TAG_VALID) { - BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n", - vf->abs_vfid); - rc = -EPERM; - goto response; - } + + if (bnx2x_vf_filters_contain(filters, + VFPF_Q_FILTER_VLAN_TAG_VALID)) { + BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n", + vf->abs_vfid); + rc = -EPERM; + goto response; } } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h index b86479fc0d2f..64f2b52c5829 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h @@ -1,16 +1,22 @@ -/* bnx2x_vfpf.h: Broadcom Everest network driver. +/* bnx2x_vfpf.h: Qlogic Everest network driver. * * Copyright (c) 2011-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + * All rights reserved * - * Unless you and Broadcom execute a separate written software license + * Unless you and Qlogic execute a separate written software license * agreement governing use of this software, this software is licensed to you - * under the terms of the GNU General Public License version 2, available - * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). + * under the terms of the GNU General Public License version 2 (the “GPL”), + * available at http://www.gnu.org/licenses/gpl-2.0.html, with the following + * added to such license: * - * Notwithstanding the above, under no circumstances may you combine this - * software in any way with any other Broadcom software provided under a - * license other than the GPL, without Broadcom's express prior written - * consent. + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions + * of the license of that module. An independent module is a module which is + * not derived from this software. The special exception does not apply to any + * modifications of the software. * * Maintained by: Ariel Elior <ariel.elior@qlogic.com> * Written by: Ariel Elior <ariel.elior@qlogic.com> @@ -64,6 +70,8 @@ struct hw_sb_info { #define VFPF_RX_MASK_ACCEPT_ALL_UNICAST 0x00000004 #define VFPF_RX_MASK_ACCEPT_ALL_MULTICAST 0x00000008 #define VFPF_RX_MASK_ACCEPT_BROADCAST 0x00000010 +#define VFPF_RX_MASK_ACCEPT_ANY_VLAN 0x00000020 + #define BULLETIN_CONTENT_SIZE (sizeof(struct pf_vf_bulletin_content)) #define BULLETIN_CONTENT_LEGACY_SIZE (32) #define BULLETIN_ATTEMPTS 5 /* crc failures before throwing towel */ @@ -127,6 +135,7 @@ struct vfpf_acquire_tlv { u8 fp_hsi_ver; u8 caps; #define VF_CAP_SUPPORT_EXT_BULLETIN (1 << 0) +#define VF_CAP_SUPPORT_VLAN_FILTER (1 << 1) } vfdev_info; struct vf_pf_resc_request resc_request; @@ -168,10 +177,12 @@ struct pfvf_acquire_resp_tlv { struct pf_vf_pfdev_info { u32 chip_num; u32 pf_cap; -#define PFVF_CAP_RSS 0x00000001 -#define PFVF_CAP_DHC 0x00000002 -#define PFVF_CAP_TPA 0x00000004 -#define PFVF_CAP_TPA_UPDATE 0x00000008 +#define PFVF_CAP_RSS 0x00000001 +#define PFVF_CAP_DHC 0x00000002 +#define PFVF_CAP_TPA 0x00000004 +#define PFVF_CAP_TPA_UPDATE 0x00000008 +#define PFVF_CAP_VLAN_FILTER 0x00000010 + char fw_ver[32]; u16 db_size; u8 indices_per_sb; @@ -288,7 +299,7 @@ struct vfpf_q_mac_vlan_filter { u32 flags; #define VFPF_Q_FILTER_DEST_MAC_VALID 0x01 #define VFPF_Q_FILTER_VLAN_TAG_VALID 0x02 -#define VFPF_Q_FILTER_SET_MAC 0x100 /* set/clear */ +#define VFPF_Q_FILTER_SET 0x100 /* set/clear */ u8 mac[ETH_ALEN]; u16 vlan_tag; }; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 64c1e9db6b0b..eb080ef8ee97 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -907,9 +907,8 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv, } bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); - if (mode == GENET_POWER_PASSIVE) - bcmgenet_mii_reset(priv->dev); + bcmgenet_phy_power_set(priv->dev, true); } /* ioctl handle special commands that are not present in ethtool. */ @@ -1725,7 +1724,7 @@ static int init_umac(struct bcmgenet_priv *priv) int0_enable |= UMAC_IRQ_TXDMA_DONE; /* Monitor cable plug/unplugged event for internal PHY */ - if (phy_is_internal(priv->phydev)) { + if (priv->internal_phy) { int0_enable |= UMAC_IRQ_LINK_EVENT; } else if (priv->ext_phy) { int0_enable |= UMAC_IRQ_LINK_EVENT; @@ -2389,6 +2388,23 @@ static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id) return IRQ_HANDLED; } +#ifdef CONFIG_NET_POLL_CONTROLLER +static void bcmgenet_poll_controller(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + /* Invoke the main RX/TX interrupt handler */ + disable_irq(priv->irq0); + bcmgenet_isr0(priv->irq0, priv); + enable_irq(priv->irq0); + + /* And the interrupt handler for RX/TX priority queues */ + disable_irq(priv->irq1); + bcmgenet_isr1(priv->irq1, priv); + enable_irq(priv->irq1); +} +#endif + static void bcmgenet_umac_reset(struct bcmgenet_priv *priv) { u32 reg; @@ -2626,13 +2642,12 @@ static int bcmgenet_open(struct net_device *dev) netif_dbg(priv, ifup, dev, "bcmgenet_open\n"); /* Turn on the clock */ - if (!IS_ERR(priv->clk)) - clk_prepare_enable(priv->clk); + clk_prepare_enable(priv->clk); /* If this is an internal GPHY, power it back on now, before UniMAC is * brought out of reset as absolutely no UniMAC activity is allowed */ - if (phy_is_internal(priv->phydev)) + if (priv->internal_phy) bcmgenet_power_up(priv, GENET_POWER_PASSIVE); /* take MAC out of reset */ @@ -2651,7 +2666,7 @@ static int bcmgenet_open(struct net_device *dev) bcmgenet_set_hw_addr(priv, dev->dev_addr); - if (phy_is_internal(priv->phydev)) { + if (priv->internal_phy) { reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); reg |= EXT_ENERGY_DET_MASK; bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); @@ -2687,23 +2702,24 @@ static int bcmgenet_open(struct net_device *dev) goto err_irq0; } - /* Re-configure the port multiplexer towards the PHY device */ - bcmgenet_mii_config(priv->dev, false); - - phy_connect_direct(dev, priv->phydev, bcmgenet_mii_setup, - priv->phy_interface); + ret = bcmgenet_mii_probe(dev); + if (ret) { + netdev_err(dev, "failed to connect to PHY\n"); + goto err_irq1; + } bcmgenet_netif_start(dev); return 0; +err_irq1: + free_irq(priv->irq1, priv); err_irq0: - free_irq(priv->irq0, dev); + free_irq(priv->irq0, priv); err_fini_dma: bcmgenet_fini_dma(priv); err_clk_disable: - if (!IS_ERR(priv->clk)) - clk_disable_unprepare(priv->clk); + clk_disable_unprepare(priv->clk); return ret; } @@ -2757,11 +2773,10 @@ static int bcmgenet_close(struct net_device *dev) free_irq(priv->irq0, priv); free_irq(priv->irq1, priv); - if (phy_is_internal(priv->phydev)) + if (priv->internal_phy) ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE); - if (!IS_ERR(priv->clk)) - clk_disable_unprepare(priv->clk); + clk_disable_unprepare(priv->clk); return ret; } @@ -2941,6 +2956,9 @@ static const struct net_device_ops bcmgenet_netdev_ops = { .ndo_set_mac_address = bcmgenet_set_mac_addr, .ndo_do_ioctl = bcmgenet_ioctl, .ndo_set_features = bcmgenet_set_features, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = bcmgenet_poll_controller, +#endif }; /* Array of GENET hardware parameters/characteristics */ @@ -3214,11 +3232,12 @@ static int bcmgenet_probe(struct platform_device *pdev) priv->version = pd->genet_version; priv->clk = devm_clk_get(&priv->pdev->dev, "enet"); - if (IS_ERR(priv->clk)) + if (IS_ERR(priv->clk)) { dev_warn(&priv->pdev->dev, "failed to get enet clock\n"); + priv->clk = NULL; + } - if (!IS_ERR(priv->clk)) - clk_prepare_enable(priv->clk); + clk_prepare_enable(priv->clk); bcmgenet_set_hw_params(priv); @@ -3229,8 +3248,10 @@ static int bcmgenet_probe(struct platform_device *pdev) INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task); priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol"); - if (IS_ERR(priv->clk_wol)) + if (IS_ERR(priv->clk_wol)) { dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n"); + priv->clk_wol = NULL; + } priv->clk_eee = devm_clk_get(&priv->pdev->dev, "enet-eee"); if (IS_ERR(priv->clk_eee)) { @@ -3256,8 +3277,7 @@ static int bcmgenet_probe(struct platform_device *pdev) netif_carrier_off(dev); /* Turn off the main clock, WOL clock is handled separately */ - if (!IS_ERR(priv->clk)) - clk_disable_unprepare(priv->clk); + clk_disable_unprepare(priv->clk); err = register_netdev(dev); if (err) @@ -3266,8 +3286,7 @@ static int bcmgenet_probe(struct platform_device *pdev) return err; err_clk_disable: - if (!IS_ERR(priv->clk)) - clk_disable_unprepare(priv->clk); + clk_disable_unprepare(priv->clk); err: free_netdev(dev); return err; @@ -3319,7 +3338,7 @@ static int bcmgenet_suspend(struct device *d) if (device_may_wakeup(d) && priv->wolopts) { ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC); clk_prepare_enable(priv->clk_wol); - } else if (phy_is_internal(priv->phydev)) { + } else if (priv->internal_phy) { ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE); } @@ -3348,7 +3367,7 @@ static int bcmgenet_resume(struct device *d) /* If this is an internal GPHY, power it back on now, before UniMAC is * brought out of reset as absolutely no UniMAC activity is allowed */ - if (phy_is_internal(priv->phydev)) + if (priv->internal_phy) bcmgenet_power_up(priv, GENET_POWER_PASSIVE); bcmgenet_umac_reset(priv); @@ -3363,14 +3382,14 @@ static int bcmgenet_resume(struct device *d) phy_init_hw(priv->phydev); /* Speed settings must be restored */ - bcmgenet_mii_config(priv->dev, false); + bcmgenet_mii_config(priv->dev); /* disable ethernet MAC while updating its registers */ umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false); bcmgenet_set_hw_addr(priv, dev->dev_addr); - if (phy_is_internal(priv->phydev)) { + if (priv->internal_phy) { reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); reg |= EXT_ENERGY_DET_MASK; bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index 6159deab8c98..7299d1075422 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -593,6 +593,7 @@ struct bcmgenet_priv { /* MDIO bus variables */ wait_queue_head_t wq; struct phy_device *phydev; + bool internal_phy; struct device_node *phy_dn; struct device_node *mdio_dn; struct mii_bus *mii_bus; @@ -670,9 +671,9 @@ GENET_IO_MACRO(rbuf, GENET_RBUF_OFF); /* MDIO routines */ int bcmgenet_mii_init(struct net_device *dev); -int bcmgenet_mii_config(struct net_device *dev, bool init); +int bcmgenet_mii_config(struct net_device *dev); +int bcmgenet_mii_probe(struct net_device *dev); void bcmgenet_mii_exit(struct net_device *dev); -void bcmgenet_mii_reset(struct net_device *dev); void bcmgenet_phy_power_set(struct net_device *dev, bool enable); void bcmgenet_mii_setup(struct net_device *dev); diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index adf23d2ac488..b3679ad1c1c7 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -163,14 +163,13 @@ void bcmgenet_mii_setup(struct net_device *dev) phy_print_status(phydev); } -void bcmgenet_mii_reset(struct net_device *dev) +static int bcmgenet_fixed_phy_link_update(struct net_device *dev, + struct fixed_phy_status *status) { - struct bcmgenet_priv *priv = netdev_priv(dev); + if (dev && dev->phydev && status) + status->link = dev->phydev->link; - if (priv->phydev) { - phy_init_hw(priv->phydev); - phy_start_aneg(priv->phydev); - } + return 0; } void bcmgenet_phy_power_set(struct net_device *dev, bool enable) @@ -215,7 +214,6 @@ static void bcmgenet_internal_phy_setup(struct net_device *dev) reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); reg |= EXT_PWR_DN_EN_LD; bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); - bcmgenet_mii_reset(dev); } static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv) @@ -226,9 +224,13 @@ static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv) reg = bcmgenet_sys_readl(priv, SYS_PORT_CTRL); reg |= LED_ACT_SOURCE_MAC; bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL); + + if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET) + fixed_phy_set_link_update(priv->phydev, + bcmgenet_fixed_phy_link_update); } -int bcmgenet_mii_config(struct net_device *dev, bool init) +int bcmgenet_mii_config(struct net_device *dev) { struct bcmgenet_priv *priv = netdev_priv(dev); struct phy_device *phydev = priv->phydev; @@ -238,10 +240,10 @@ int bcmgenet_mii_config(struct net_device *dev, bool init) u32 port_ctrl; u32 reg; - priv->ext_phy = !phy_is_internal(priv->phydev) && + priv->ext_phy = !priv->internal_phy && (priv->phy_interface != PHY_INTERFACE_MODE_MOCA); - if (phy_is_internal(priv->phydev)) + if (priv->internal_phy) priv->phy_interface = PHY_INTERFACE_MODE_NA; switch (priv->phy_interface) { @@ -259,7 +261,7 @@ int bcmgenet_mii_config(struct net_device *dev, bool init) bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL); - if (phy_is_internal(priv->phydev)) { + if (priv->internal_phy) { phy_name = "internal PHY"; bcmgenet_internal_phy_setup(dev); } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { @@ -321,13 +323,12 @@ int bcmgenet_mii_config(struct net_device *dev, bool init) bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); } - if (init) - dev_info(kdev, "configuring instance for %s\n", phy_name); + dev_info_once(kdev, "configuring instance for %s\n", phy_name); return 0; } -static int bcmgenet_mii_probe(struct net_device *dev) +int bcmgenet_mii_probe(struct net_device *dev) { struct bcmgenet_priv *priv = netdev_priv(dev); struct device_node *dn = priv->pdev->dev.of_node; @@ -345,22 +346,6 @@ static int bcmgenet_mii_probe(struct net_device *dev) priv->old_pause = -1; if (dn) { - if (priv->phydev) { - pr_info("PHY already attached\n"); - return 0; - } - - /* In the case of a fixed PHY, the DT node associated - * to the PHY is the Ethernet MAC DT node. - */ - if (!priv->phy_dn && of_phy_is_fixed_link(dn)) { - ret = of_phy_register_fixed_link(dn); - if (ret) - return ret; - - priv->phy_dn = of_node_get(dn); - } - phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup, phy_flags, priv->phy_interface); if (!phydev) { @@ -386,7 +371,7 @@ static int bcmgenet_mii_probe(struct net_device *dev) * PHY speed which is needed for bcmgenet_mii_config() to configure * things appropriately. */ - ret = bcmgenet_mii_config(dev, true); + ret = bcmgenet_mii_config(dev); if (ret) { phy_disconnect(priv->phydev); return ret; @@ -397,14 +382,11 @@ static int bcmgenet_mii_probe(struct net_device *dev) /* The internal PHY has its link interrupts routed to the * Ethernet MAC ISRs */ - if (phy_is_internal(priv->phydev)) + if (priv->internal_phy) priv->mii_bus->irq[phydev->addr] = PHY_IGNORE_INTERRUPT; else priv->mii_bus->irq[phydev->addr] = PHY_POLL; - pr_info("attached PHY at address %d [%s]\n", - phydev->addr, phydev->drv->name); - return 0; } @@ -490,7 +472,10 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv) { struct device_node *dn = priv->pdev->dev.of_node; struct device *kdev = &priv->pdev->dev; + const char *phy_mode_str = NULL; + struct phy_device *phydev = NULL; char *compat; + int phy_mode; int ret; compat = kasprintf(GFP_KERNEL, "brcm,genet-mdio-v%d", priv->version); @@ -513,17 +498,43 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv) /* Fetch the PHY phandle */ priv->phy_dn = of_parse_phandle(dn, "phy-handle", 0); + /* In the case of a fixed PHY, the DT node associated + * to the PHY is the Ethernet MAC DT node. + */ + if (!priv->phy_dn && of_phy_is_fixed_link(dn)) { + ret = of_phy_register_fixed_link(dn); + if (ret) + return ret; + + priv->phy_dn = of_node_get(dn); + } + /* Get the link mode */ - priv->phy_interface = of_get_phy_mode(dn); + phy_mode = of_get_phy_mode(dn); + priv->phy_interface = phy_mode; - return 0; -} + /* We need to specifically look up whether this PHY interface is internal + * or not *before* we even try to probe the PHY driver over MDIO as we + * may have shut down the internal PHY for power saving purposes. + */ + if (phy_mode < 0) { + ret = of_property_read_string(dn, "phy-mode", &phy_mode_str); + if (ret < 0) { + dev_err(kdev, "invalid PHY mode property\n"); + return ret; + } -static int bcmgenet_fixed_phy_link_update(struct net_device *dev, - struct fixed_phy_status *status) -{ - if (dev && dev->phydev && status) - status->link = dev->phydev->link; + priv->phy_interface = PHY_INTERFACE_MODE_NA; + if (!strcasecmp(phy_mode_str, "internal")) + priv->internal_phy = true; + } + + /* Make sure we initialize MoCA PHYs with a link down */ + if (phy_mode == PHY_INTERFACE_MODE_MOCA) { + phydev = of_phy_find_device(dn); + if (phydev) + phydev->link = 0; + } return 0; } @@ -580,12 +591,9 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv) return -ENODEV; } - if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET) { - ret = fixed_phy_set_link_update( - phydev, bcmgenet_fixed_phy_link_update); - if (!ret) - phydev->link = 0; - } + /* Make sure we initialize MoCA PHYs with a link down */ + phydev->link = 0; + } priv->phydev = phydev; @@ -615,10 +623,6 @@ int bcmgenet_mii_init(struct net_device *dev) ret = bcmgenet_mii_bus_init(priv); if (ret) - goto out_free; - - ret = bcmgenet_mii_probe(dev); - if (ret) goto out; return 0; @@ -626,7 +630,6 @@ int bcmgenet_mii_init(struct net_device *dev) out: of_node_put(priv->phy_dn); mdiobus_unregister(priv->mii_bus); -out_free: kfree(priv->mii_bus->irq); mdiobus_free(priv->mii_bus); return ret; diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index bf9eb2ecf960..88c1e1a834f8 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -2774,8 +2774,7 @@ static const struct macb_config emac_config = { static const struct macb_config zynqmp_config = { - .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE | - MACB_CAPS_JUMBO, + .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO, .dma_burst_length = 16, .clk_init = macb_clk_init, .init = macb_init, @@ -2783,8 +2782,7 @@ static const struct macb_config zynqmp_config = { }; static const struct macb_config zynq_config = { - .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE | - MACB_CAPS_NO_GIGABIT_HALF, + .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF, .dma_burst_length = 16, .clk_init = macb_clk_init, .init = macb_init, diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index 1895b6b2addd..6e1faea00ca8 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -399,7 +399,7 @@ #define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000 #define MACB_CAPS_SG_DISABLED 0x40000000 #define MACB_CAPS_MACB_IS_GEM 0x80000000 -#define MACB_CAPS_JUMBO 0x00000008 +#define MACB_CAPS_JUMBO 0x00000010 /* Bit manipulation macros */ #define MACB_BIT(name) \ diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig index c4d6bbe9458d..358442087878 100644 --- a/drivers/net/ethernet/cavium/Kconfig +++ b/drivers/net/ethernet/cavium/Kconfig @@ -37,6 +37,8 @@ config THUNDER_NIC_BGX tristate "Thunder MAC interface driver (BGX)" depends on 64BIT default ARCH_THUNDER + select PHYLIB + select MDIO_OCTEON ---help--- This driver supports programming and controlling of MAC interface from NIC physical function driver. diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 629f75d70353..58de4443eac0 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -767,6 +767,7 @@ struct adapter { bool tid_release_task_busy; struct dentry *debugfs_root; + u32 use_bd; /* Use SGE Back Door intfc for reading SGE Contexts */ spinlock_t stats_lock; spinlock_t win0_lock ____cacheline_aligned_in_smp; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c index 6074680bc985..052c660aca80 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c @@ -31,6 +31,15 @@ static const char * const dcb_ver_array[] = { "Auto Negotiated" }; +static inline bool cxgb4_dcb_state_synced(enum cxgb4_dcb_state state) +{ + if (state == CXGB4_DCB_STATE_FW_ALLSYNCED || + state == CXGB4_DCB_STATE_HOST) + return true; + else + return false; +} + /* Initialize a port's Data Center Bridging state. Typically used after a * Link Down event. */ @@ -603,7 +612,7 @@ static void cxgb4_getpfccfg(struct net_device *dev, int priority, u8 *pfccfg) struct port_info *pi = netdev2pinfo(dev); struct port_dcb_info *dcb = &pi->dcb; - if (dcb->state != CXGB4_DCB_STATE_FW_ALLSYNCED || + if (!cxgb4_dcb_state_synced(dcb->state) || priority >= CXGB4_MAX_PRIORITY) *pfccfg = 0; else @@ -620,7 +629,7 @@ static void cxgb4_setpfccfg(struct net_device *dev, int priority, u8 pfccfg) struct adapter *adap = pi->adapter; int err; - if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED || + if (!cxgb4_dcb_state_synced(pi->dcb.state) || priority >= CXGB4_MAX_PRIORITY) return; @@ -732,7 +741,7 @@ static u8 cxgb4_getpfcstate(struct net_device *dev) { struct port_info *pi = netdev2pinfo(dev); - if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED) + if (!cxgb4_dcb_state_synced(pi->dcb.state)) return false; return pi->dcb.pfcen != 0; @@ -756,7 +765,7 @@ static int __cxgb4_getapp(struct net_device *dev, u8 app_idtype, u16 app_id, struct adapter *adap = pi->adapter; int i; - if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED) + if (!cxgb4_dcb_state_synced(pi->dcb.state)) return 0; for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) { @@ -794,7 +803,9 @@ static int __cxgb4_getapp(struct net_device *dev, u8 app_idtype, u16 app_id, */ static int cxgb4_getapp(struct net_device *dev, u8 app_idtype, u16 app_id) { - return __cxgb4_getapp(dev, app_idtype, app_id, 0); + /* Convert app_idtype to firmware format before querying */ + return __cxgb4_getapp(dev, app_idtype == DCB_APP_IDTYPE_ETHTYPE ? + app_idtype : 3, app_id, 0); } /* Write a new Application User Priority Map for the specified Application ID @@ -808,7 +819,7 @@ static int __cxgb4_setapp(struct net_device *dev, u8 app_idtype, u16 app_id, int i, err; - if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED) + if (!cxgb4_dcb_state_synced(pi->dcb.state)) return -EINVAL; /* DCB info gets thrown away on link up */ @@ -896,10 +907,11 @@ cxgb4_ieee_negotiation_complete(struct net_device *dev, struct port_info *pi = netdev2pinfo(dev); struct port_dcb_info *dcb = &pi->dcb; - if (dcb_subtype && !(dcb->msgs & dcb_subtype)) - return 0; + if (dcb->state == CXGB4_DCB_STATE_FW_ALLSYNCED) + if (dcb_subtype && !(dcb->msgs & dcb_subtype)) + return 0; - return (dcb->state == CXGB4_DCB_STATE_FW_ALLSYNCED && + return (cxgb4_dcb_state_synced(dcb->state) && (dcb->supported & DCB_CAP_DCBX_VER_IEEE)); } @@ -1057,7 +1069,7 @@ static u8 cxgb4_setdcbx(struct net_device *dev, u8 dcb_request) /* Can't enable DCB if we haven't successfully negotiated it. */ - if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED) + if (!cxgb4_dcb_state_synced(pi->dcb.state)) return 1; /* There's currently no mechanism to allow for the firmware DCBX @@ -1080,7 +1092,7 @@ static int cxgb4_getpeer_app(struct net_device *dev, struct adapter *adap = pi->adapter; int i, err = 0; - if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED) + if (!cxgb4_dcb_state_synced(pi->dcb.state)) return 1; info->willing = 0; @@ -1114,7 +1126,7 @@ static int cxgb4_getpeerapp_tbl(struct net_device *dev, struct dcb_app *table) struct adapter *adap = pi->adapter; int i, err = 0; - if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED) + if (!cxgb4_dcb_state_synced(pi->dcb.state)) return 1; for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) { @@ -1133,7 +1145,7 @@ static int cxgb4_getpeerapp_tbl(struct net_device *dev, struct dcb_app *table) if (!pcmd.u.dcb.app_priority.protocolid) break; - table[i].selector = pcmd.u.dcb.app_priority.sel_field; + table[i].selector = (pcmd.u.dcb.app_priority.sel_field + 1); table[i].protocol = be16_to_cpu(pcmd.u.dcb.app_priority.protocolid); table[i].priority = @@ -1181,6 +1193,8 @@ static int cxgb4_cee_peer_getpg(struct net_device *dev, struct cee_pg *pg) for (i = 0; i < CXGB4_MAX_PRIORITY; i++) pg->pg_bw[i] = pcmd.u.dcb.pgrate.pgrate[i]; + pg->tcs_supported = pcmd.u.dcb.pgrate.num_tcs_supported; + return 0; } @@ -1198,6 +1212,8 @@ static int cxgb4_cee_peer_getpfc(struct net_device *dev, struct cee_pfc *pfc) */ pfc->pfc_en = bitswap_1(pi->dcb.pfcen); + pfc->tcs_supported = pi->dcb.pfc_num_tcs_supported; + return 0; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index a11485fbb33f..f701a6f20c6a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -151,6 +151,45 @@ static int cim_la_show_3in1(struct seq_file *seq, void *v, int idx) return 0; } +static int cim_la_show_t6(struct seq_file *seq, void *v, int idx) +{ + if (v == SEQ_START_TOKEN) { + seq_puts(seq, "Status Inst Data PC LS0Stat " + "LS0Addr LS0Data LS1Stat LS1Addr LS1Data\n"); + } else { + const u32 *p = v; + + seq_printf(seq, " %02x %04x%04x %04x%04x %04x%04x %08x %08x %08x %08x %08x %08x\n", + (p[9] >> 16) & 0xff, /* Status */ + p[9] & 0xffff, p[8] >> 16, /* Inst */ + p[8] & 0xffff, p[7] >> 16, /* Data */ + p[7] & 0xffff, p[6] >> 16, /* PC */ + p[2], p[1], p[0], /* LS0 Stat, Addr and Data */ + p[5], p[4], p[3]); /* LS1 Stat, Addr and Data */ + } + return 0; +} + +static int cim_la_show_pc_t6(struct seq_file *seq, void *v, int idx) +{ + if (v == SEQ_START_TOKEN) { + seq_puts(seq, "Status Inst Data PC\n"); + } else { + const u32 *p = v; + + seq_printf(seq, " %02x %08x %08x %08x\n", + p[3] & 0xff, p[2], p[1], p[0]); + seq_printf(seq, " %02x %02x%06x %02x%06x %02x%06x\n", + (p[6] >> 8) & 0xff, p[6] & 0xff, p[5] >> 8, + p[5] & 0xff, p[4] >> 8, p[4] & 0xff, p[3] >> 8); + seq_printf(seq, " %02x %04x%04x %04x%04x %04x%04x\n", + (p[9] >> 16) & 0xff, p[9] & 0xffff, p[8] >> 16, + p[8] & 0xffff, p[7] >> 16, p[7] & 0xffff, + p[6] >> 16); + } + return 0; +} + static int cim_la_open(struct inode *inode, struct file *file) { int ret; @@ -162,9 +201,18 @@ static int cim_la_open(struct inode *inode, struct file *file) if (ret) return ret; - p = seq_open_tab(file, adap->params.cim_la_size / 8, 8 * sizeof(u32), 1, - cfg & UPDBGLACAPTPCONLY_F ? - cim_la_show_3in1 : cim_la_show); + if (is_t6(adap->params.chip)) { + /* +1 to account for integer division of CIMLA_SIZE/10 */ + p = seq_open_tab(file, (adap->params.cim_la_size / 10) + 1, + 10 * sizeof(u32), 1, + cfg & UPDBGLACAPTPCONLY_F ? + cim_la_show_pc_t6 : cim_la_show_t6); + } else { + p = seq_open_tab(file, adap->params.cim_la_size / 8, + 8 * sizeof(u32), 1, + cfg & UPDBGLACAPTPCONLY_F ? cim_la_show_3in1 : + cim_la_show); + } if (!p) return -ENOMEM; @@ -2340,6 +2388,8 @@ int t4_setup_debugfs(struct adapter *adap) de = debugfs_create_file_size("flash", S_IRUSR, adap->debugfs_root, adap, &flash_debugfs_fops, adap->params.sf_size); + debugfs_create_bool("use_backdoor", S_IWUSR | S_IRUSR, + adap->debugfs_root, &adap->use_bd); return 0; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index 687acf71fa15..5eedb98ff581 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -925,6 +925,20 @@ static int set_flash(struct net_device *netdev, struct ethtool_flash *ef) const struct firmware *fw; struct adapter *adap = netdev2adap(netdev); unsigned int mbox = PCIE_FW_MASTER_M + 1; + u32 pcie_fw; + unsigned int master; + u8 master_vld = 0; + + pcie_fw = t4_read_reg(adap, PCIE_FW_A); + master = PCIE_FW_MASTER_G(pcie_fw); + if (pcie_fw & PCIE_FW_MASTER_VLD_F) + master_vld = 1; + /* if csiostor is the master return */ + if (master_vld && (master != adap->pf)) { + dev_warn(adap->pdev_dev, + "cxgb4 driver needs to be loaded as MASTER to support FW flash\n"); + return -EOPNOTSUPP; + } ef->data[sizeof(ef->data) - 1] = '\0'; ret = request_firmware(&fw, ef->data, adap->pdev_dev); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 351f3b1bf800..d582e175dfb6 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -4757,7 +4757,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) */ cfg_queues(adapter); - adapter->l2t = t4_init_l2t(); + adapter->l2t = t4_init_l2t(adapter->l2t_start, adapter->l2t_end); if (!adapter->l2t) { /* We tolerate a lack of L2T, giving up some functionality */ dev_warn(&pdev->dev, "could not allocate L2T, continuing\n"); diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c index 252efc29321f..ac27898c6ab0 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c @@ -51,24 +51,17 @@ #define VLAN_NONE 0xfff /* identifies sync vs async L2T_WRITE_REQs */ -#define F_SYNC_WR (1 << 12) - -enum { - L2T_STATE_VALID, /* entry is up to date */ - L2T_STATE_STALE, /* entry may be used but needs revalidation */ - L2T_STATE_RESOLVING, /* entry needs address resolution */ - L2T_STATE_SYNC_WRITE, /* synchronous write of entry underway */ - - /* when state is one of the below the entry is not hashed */ - L2T_STATE_SWITCHING, /* entry is being used by a switching filter */ - L2T_STATE_UNUSED /* entry not in use */ -}; +#define SYNC_WR_S 12 +#define SYNC_WR_V(x) ((x) << SYNC_WR_S) +#define SYNC_WR_F SYNC_WR_V(1) struct l2t_data { + unsigned int l2t_start; /* start index of our piece of the L2T */ + unsigned int l2t_size; /* number of entries in l2tab */ rwlock_t lock; atomic_t nfree; /* number of free entries */ struct l2t_entry *rover; /* starting point for next allocation */ - struct l2t_entry l2tab[L2T_SIZE]; + struct l2t_entry l2tab[0]; /* MUST BE LAST */ }; static inline unsigned int vlan_prio(const struct l2t_entry *e) @@ -85,29 +78,36 @@ static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e) /* * To avoid having to check address families we do not allow v4 and v6 * neighbors to be on the same hash chain. We keep v4 entries in the first - * half of available hash buckets and v6 in the second. + * half of available hash buckets and v6 in the second. We need at least two + * entries in our L2T for this scheme to work. */ enum { - L2T_SZ_HALF = L2T_SIZE / 2, - L2T_HASH_MASK = L2T_SZ_HALF - 1 + L2T_MIN_HASH_BUCKETS = 2, }; -static inline unsigned int arp_hash(const u32 *key, int ifindex) +static inline unsigned int arp_hash(struct l2t_data *d, const u32 *key, + int ifindex) { - return jhash_2words(*key, ifindex, 0) & L2T_HASH_MASK; + unsigned int l2t_size_half = d->l2t_size / 2; + + return jhash_2words(*key, ifindex, 0) % l2t_size_half; } -static inline unsigned int ipv6_hash(const u32 *key, int ifindex) +static inline unsigned int ipv6_hash(struct l2t_data *d, const u32 *key, + int ifindex) { + unsigned int l2t_size_half = d->l2t_size / 2; u32 xor = key[0] ^ key[1] ^ key[2] ^ key[3]; - return L2T_SZ_HALF + (jhash_2words(xor, ifindex, 0) & L2T_HASH_MASK); + return (l2t_size_half + + (jhash_2words(xor, ifindex, 0) % l2t_size_half)); } -static unsigned int addr_hash(const u32 *addr, int addr_len, int ifindex) +static unsigned int addr_hash(struct l2t_data *d, const u32 *addr, + int addr_len, int ifindex) { - return addr_len == 4 ? arp_hash(addr, ifindex) : - ipv6_hash(addr, ifindex); + return addr_len == 4 ? arp_hash(d, addr, ifindex) : + ipv6_hash(d, addr, ifindex); } /* @@ -139,6 +139,8 @@ static void neigh_replace(struct l2t_entry *e, struct neighbour *n) */ static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync) { + struct l2t_data *d = adap->l2t; + unsigned int l2t_idx = e->idx + d->l2t_start; struct sk_buff *skb; struct cpl_l2t_write_req *req; @@ -150,10 +152,10 @@ static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync) INIT_TP_WR(req, 0); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, - e->idx | (sync ? F_SYNC_WR : 0) | + l2t_idx | (sync ? SYNC_WR_F : 0) | TID_QID_V(adap->sge.fw_evtq.abs_id))); req->params = htons(L2T_W_PORT_V(e->lport) | L2T_W_NOREPLY_V(!sync)); - req->l2t_idx = htons(e->idx); + req->l2t_idx = htons(l2t_idx); req->vlan = htons(e->vlan); if (e->neigh && !(e->neigh->dev->flags & IFF_LOOPBACK)) memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac)); @@ -190,18 +192,19 @@ static void send_pending(struct adapter *adap, struct l2t_entry *e) */ void do_l2t_write_rpl(struct adapter *adap, const struct cpl_l2t_write_rpl *rpl) { + struct l2t_data *d = adap->l2t; unsigned int tid = GET_TID(rpl); - unsigned int idx = tid & (L2T_SIZE - 1); + unsigned int l2t_idx = tid % L2T_SIZE; if (unlikely(rpl->status != CPL_ERR_NONE)) { dev_err(adap->pdev_dev, "Unexpected L2T_WRITE_RPL status %u for entry %u\n", - rpl->status, idx); + rpl->status, l2t_idx); return; } - if (tid & F_SYNC_WR) { - struct l2t_entry *e = &adap->l2t->l2tab[idx]; + if (tid & SYNC_WR_F) { + struct l2t_entry *e = &d->l2tab[l2t_idx - d->l2t_start]; spin_lock(&e->lock); if (e->state != L2T_STATE_SWITCHING) { @@ -276,7 +279,7 @@ static struct l2t_entry *alloc_l2e(struct l2t_data *d) return NULL; /* there's definitely a free entry */ - for (e = d->rover, end = &d->l2tab[L2T_SIZE]; e != end; ++e) + for (e = d->rover, end = &d->l2tab[d->l2t_size]; e != end; ++e) if (atomic_read(&e->refcnt) == 0) goto found; @@ -368,7 +371,7 @@ struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh, int addr_len = neigh->tbl->key_len; u32 *addr = (u32 *)neigh->primary_key; int ifidx = neigh->dev->ifindex; - int hash = addr_hash(addr, addr_len, ifidx); + int hash = addr_hash(d, addr, addr_len, ifidx); if (neigh->dev->flags & IFF_LOOPBACK) lport = netdev2pinfo(physdev)->tx_chan + 4; @@ -481,7 +484,7 @@ void t4_l2t_update(struct adapter *adap, struct neighbour *neigh) int addr_len = neigh->tbl->key_len; u32 *addr = (u32 *) neigh->primary_key; int ifidx = neigh->dev->ifindex; - int hash = addr_hash(addr, addr_len, ifidx); + int hash = addr_hash(d, addr, addr_len, ifidx); read_lock_bh(&d->lock); for (e = d->l2tab[hash].first; e; e = e->next) @@ -554,20 +557,30 @@ int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan, return write_l2e(adap, e, 0); } -struct l2t_data *t4_init_l2t(void) +struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end) { + unsigned int l2t_size; int i; struct l2t_data *d; - d = t4_alloc_mem(sizeof(*d)); + if (l2t_start >= l2t_end || l2t_end >= L2T_SIZE) + return NULL; + l2t_size = l2t_end - l2t_start + 1; + if (l2t_size < L2T_MIN_HASH_BUCKETS) + return NULL; + + d = t4_alloc_mem(sizeof(*d) + l2t_size * sizeof(struct l2t_entry)); if (!d) return NULL; + d->l2t_start = l2t_start; + d->l2t_size = l2t_size; + d->rover = d->l2tab; - atomic_set(&d->nfree, L2T_SIZE); + atomic_set(&d->nfree, l2t_size); rwlock_init(&d->lock); - for (i = 0; i < L2T_SIZE; ++i) { + for (i = 0; i < d->l2t_size; ++i) { d->l2tab[i].idx = i; d->l2tab[i].state = L2T_STATE_UNUSED; spin_lock_init(&d->l2tab[i].lock); @@ -578,9 +591,9 @@ struct l2t_data *t4_init_l2t(void) static inline void *l2t_get_idx(struct seq_file *seq, loff_t pos) { - struct l2t_entry *l2tab = seq->private; + struct l2t_data *d = seq->private; - return pos >= L2T_SIZE ? NULL : &l2tab[pos]; + return pos >= d->l2t_size ? NULL : &d->l2tab[pos]; } static void *l2t_seq_start(struct seq_file *seq, loff_t *pos) @@ -620,6 +633,7 @@ static int l2t_seq_show(struct seq_file *seq, void *v) "Ethernet address VLAN/P LP State Users Port\n"); else { char ip[60]; + struct l2t_data *d = seq->private; struct l2t_entry *e = v; spin_lock_bh(&e->lock); @@ -628,7 +642,7 @@ static int l2t_seq_show(struct seq_file *seq, void *v) else sprintf(ip, e->v6 ? "%pI6c" : "%pI4", e->addr); seq_printf(seq, "%4u %-25s %17pM %4d %u %2u %c %5u %s\n", - e->idx, ip, e->dmac, + e->idx + d->l2t_start, ip, e->dmac, e->vlan & VLAN_VID_MASK, vlan_prio(e), e->lport, l2e_state(e), atomic_read(&e->refcnt), e->neigh ? e->neigh->dev->name : ""); @@ -652,7 +666,7 @@ static int l2t_seq_open(struct inode *inode, struct file *file) struct adapter *adap = inode->i_private; struct seq_file *seq = file->private_data; - seq->private = adap->l2t->l2tab; + seq->private = adap->l2t; } return rc; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.h b/drivers/net/ethernet/chelsio/cxgb4/l2t.h index a30126ce90cb..b38dc526aad5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/l2t.h +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.h @@ -39,6 +39,20 @@ #include <linux/if_ether.h> #include <linux/atomic.h> +enum { L2T_SIZE = 4096 }; /* # of L2T entries */ + +enum { + L2T_STATE_VALID, /* entry is up to date */ + L2T_STATE_STALE, /* entry may be used but needs revalidation */ + L2T_STATE_RESOLVING, /* entry needs address resolution */ + L2T_STATE_SYNC_WRITE, /* synchronous write of entry underway */ + L2T_STATE_NOARP, /* Netdev down or removed*/ + + /* when state is one of the below the entry is not hashed */ + L2T_STATE_SWITCHING, /* entry is being used by a switching filter */ + L2T_STATE_UNUSED /* entry not in use */ +}; + struct adapter; struct l2t_data; struct neighbour; @@ -56,7 +70,7 @@ struct cpl_l2t_write_rpl; */ struct l2t_entry { u16 state; /* entry state */ - u16 idx; /* entry index */ + u16 idx; /* entry index within in-memory table */ u32 addr[4]; /* next hop IP or IPv6 address */ int ifindex; /* neighbor's net_device's ifindex */ struct neighbour *neigh; /* associated neighbour */ @@ -104,7 +118,7 @@ void t4_l2t_update(struct adapter *adap, struct neighbour *neigh); struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d); int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan, u8 port, u8 *eth_addr); -struct l2t_data *t4_init_l2t(void); +struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end); void do_l2t_write_rpl(struct adapter *p, const struct cpl_l2t_write_rpl *rpl); extern const struct file_operations t4_l2t_fops; diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 942db078f33a..d4248d74f560 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -1137,7 +1137,7 @@ cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap, */ netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev) { - u32 wr_mid; + u32 wr_mid, ctrl0; u64 cntrl, *end; int qidx, credits; unsigned int flits, ndesc; @@ -1274,9 +1274,15 @@ out_free: dev_kfree_skb_any(skb); #endif /* CONFIG_CHELSIO_T4_FCOE */ } - cpl->ctrl0 = htonl(TXPKT_OPCODE_V(CPL_TX_PKT_XT) | - TXPKT_INTF_V(pi->tx_chan) | - TXPKT_PF_V(adap->pf)); + ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) | + TXPKT_PF_V(adap->pf); +#ifdef CONFIG_CHELSIO_T4_DCB + if (is_t4(adap->params.chip)) + ctrl0 |= TXPKT_OVLAN_IDX_V(q->dcb_prio); + else + ctrl0 |= TXPKT_T5_OVLAN_IDX_V(q->dcb_prio); +#endif + cpl->ctrl0 = htonl(ctrl0); cpl->pack = htons(0); cpl->len = htons(skb->len); cpl->ctrl1 = cpu_to_be64(cntrl); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 2b52aae7ec86..800bd489dd75 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -1345,9 +1345,9 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x5a80, 0x5a9c, 0x5b94, 0x5bfc, 0x5c10, 0x5ec0, - 0x5ec8, 0x5ec8, + 0x5ec8, 0x5ecc, 0x6000, 0x6040, - 0x6058, 0x6154, + 0x6058, 0x615c, 0x7700, 0x7798, 0x77c0, 0x7880, 0x78cc, 0x78fc, @@ -1371,20 +1371,22 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x9f00, 0x9f6c, 0x9f80, 0xa020, 0xd004, 0xd03c, + 0xd100, 0xd118, + 0xd200, 0xd31c, 0xdfc0, 0xdfe0, 0xe000, 0xf008, 0x11000, 0x11014, 0x11048, 0x11110, 0x11118, 0x1117c, - 0x11190, 0x11260, + 0x11190, 0x11264, 0x11300, 0x1130c, - 0x12000, 0x1205c, + 0x12000, 0x1206c, 0x19040, 0x1906c, 0x19078, 0x19080, 0x1908c, 0x19124, 0x19150, 0x191b0, 0x191d0, 0x191e8, - 0x19238, 0x192b8, + 0x19238, 0x192bc, 0x193f8, 0x19474, 0x19490, 0x194cc, 0x194f0, 0x194f8, @@ -1466,7 +1468,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x30200, 0x30318, 0x30400, 0x3052c, 0x30540, 0x3061c, - 0x30800, 0x3088c, + 0x30800, 0x30890, 0x308c0, 0x30908, 0x30910, 0x309b8, 0x30a00, 0x30a04, @@ -1544,7 +1546,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x34200, 0x34318, 0x34400, 0x3452c, 0x34540, 0x3461c, - 0x34800, 0x3488c, + 0x34800, 0x34890, 0x348c0, 0x34908, 0x34910, 0x349b8, 0x34a00, 0x34a04, @@ -3687,6 +3689,11 @@ int t4_read_rss(struct adapter *adapter, u16 *map) return 0; } +static unsigned int t4_use_ldst(struct adapter *adap) +{ + return (adap->flags & FW_OK) || !adap->use_bd; +} + /** * t4_fw_tp_pio_rw - Access TP PIO through LDST * @adap: the adapter @@ -3730,7 +3737,7 @@ static void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs, */ void t4_read_rss_key(struct adapter *adap, u32 *key) { - if (adap->flags & FW_OK) + if (t4_use_ldst(adap)) t4_fw_tp_pio_rw(adap, key, 10, TP_RSS_SECRET_KEY0_A, 1); else t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10, @@ -3760,7 +3767,7 @@ void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx) (vrt & KEYEXTEND_F) && (KEYMODE_G(vrt) == 3)) rss_key_addr_cnt = 32; - if (adap->flags & FW_OK) + if (t4_use_ldst(adap)) t4_fw_tp_pio_rw(adap, (void *)key, 10, TP_RSS_SECRET_KEY0_A, 0); else t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10, @@ -3789,7 +3796,7 @@ void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx) void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index, u32 *valp) { - if (adapter->flags & FW_OK) + if (t4_use_ldst(adapter)) t4_fw_tp_pio_rw(adapter, valp, 1, TP_RSS_PF0_CONFIG_A + index, 1); else @@ -3829,7 +3836,7 @@ void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index, /* Grab the VFL/VFH values ... */ - if (adapter->flags & FW_OK) { + if (t4_use_ldst(adapter)) { t4_fw_tp_pio_rw(adapter, vfl, 1, TP_RSS_VFL_CONFIG_A, 1); t4_fw_tp_pio_rw(adapter, vfh, 1, TP_RSS_VFH_CONFIG_A, 1); } else { @@ -3850,7 +3857,7 @@ u32 t4_read_rss_pf_map(struct adapter *adapter) { u32 pfmap; - if (adapter->flags & FW_OK) + if (t4_use_ldst(adapter)) t4_fw_tp_pio_rw(adapter, &pfmap, 1, TP_RSS_PF_MAP_A, 1); else t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A, @@ -3868,7 +3875,7 @@ u32 t4_read_rss_pf_mask(struct adapter *adapter) { u32 pfmask; - if (adapter->flags & FW_OK) + if (t4_use_ldst(adapter)) t4_fw_tp_pio_rw(adapter, &pfmask, 1, TP_RSS_PF_MSK_A, 1); else t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A, @@ -3924,43 +3931,25 @@ void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, */ void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st) { - /* T6 and later has 2 channels */ - if (adap->params.arch.nchan == NCHAN) { - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, - st->mac_in_errs, 12, TP_MIB_MAC_IN_ERR_0_A); - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, - st->tnl_cong_drops, 8, - TP_MIB_TNL_CNG_DROP_0_A); - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, - st->tnl_tx_drops, 4, - TP_MIB_TNL_DROP_0_A); - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, - st->ofld_vlan_drops, 4, - TP_MIB_OFD_VLN_DROP_0_A); - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, - st->tcp6_in_errs, 4, - TP_MIB_TCP_V6IN_ERR_0_A); - } else { - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, - st->mac_in_errs, 2, TP_MIB_MAC_IN_ERR_0_A); - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, - st->hdr_in_errs, 2, TP_MIB_HDR_IN_ERR_0_A); - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, - st->tcp_in_errs, 2, TP_MIB_TCP_IN_ERR_0_A); - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, - st->tnl_cong_drops, 2, - TP_MIB_TNL_CNG_DROP_0_A); - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, - st->ofld_chan_drops, 2, - TP_MIB_OFD_CHN_DROP_0_A); - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, - st->tnl_tx_drops, 2, TP_MIB_TNL_DROP_0_A); - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, - st->ofld_vlan_drops, 2, - TP_MIB_OFD_VLN_DROP_0_A); - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, - st->tcp6_in_errs, 2, TP_MIB_TCP_V6IN_ERR_0_A); - } + int nchan = adap->params.arch.nchan; + + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, + st->mac_in_errs, nchan, TP_MIB_MAC_IN_ERR_0_A); + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, + st->hdr_in_errs, nchan, TP_MIB_HDR_IN_ERR_0_A); + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, + st->tcp_in_errs, nchan, TP_MIB_TCP_IN_ERR_0_A); + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, + st->tnl_cong_drops, nchan, TP_MIB_TNL_CNG_DROP_0_A); + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, + st->ofld_chan_drops, nchan, TP_MIB_OFD_CHN_DROP_0_A); + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, + st->tnl_tx_drops, nchan, TP_MIB_TNL_DROP_0_A); + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, + st->ofld_vlan_drops, nchan, TP_MIB_OFD_VLN_DROP_0_A); + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, + st->tcp6_in_errs, nchan, TP_MIB_TCP_V6IN_ERR_0_A); + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->ofld_no_neigh, 2, TP_MIB_OFD_ARP_DROP_A); } @@ -3974,16 +3963,13 @@ void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st) */ void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st) { - /* T6 and later has 2 channels */ - if (adap->params.arch.nchan == NCHAN) { - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req, - 8, TP_MIB_CPL_IN_REQ_0_A); - } else { - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req, - 2, TP_MIB_CPL_IN_REQ_0_A); - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->rsp, - 2, TP_MIB_CPL_OUT_RSP_0_A); - } + int nchan = adap->params.arch.nchan; + + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req, + nchan, TP_MIB_CPL_IN_REQ_0_A); + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->rsp, + nchan, TP_MIB_CPL_OUT_RSP_0_A); + } /** @@ -6294,7 +6280,7 @@ int t4_init_tp_params(struct adapter *adap) /* Cache the adapter's Compressed Filter Mode and global Incress * Configuration. */ - if (adap->flags & FW_OK) { + if (t4_use_ldst(adap)) { t4_fw_tp_pio_rw(adap, &adap->params.tp.vlan_pri_map, 1, TP_VLAN_PRI_MAP_A, 1); t4_fw_tp_pio_rw(adap, &adap->params.tp.ingress_config, 1, diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h index c8488f430d19..640369df8b3a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h @@ -47,7 +47,6 @@ enum { TCB_SIZE = 128, /* TCB size */ NMTUS = 16, /* size of MTU table */ NCCTRL_WIN = 32, /* # of congestion control windows */ - L2T_SIZE = 4096, /* # of L2T entries */ PM_NSTATS = 5, /* # of PM stats */ MBOX_LEN = 64, /* mailbox size in bytes */ TRACE_LEN = 112, /* length of trace data and mask */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h index 132cb8fc0bf7..b99144afd4ec 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h @@ -660,6 +660,9 @@ struct cpl_tx_pkt { #define TXPKT_OVLAN_IDX_S 12 #define TXPKT_OVLAN_IDX_V(x) ((x) << TXPKT_OVLAN_IDX_S) +#define TXPKT_T5_OVLAN_IDX_S 12 +#define TXPKT_T5_OVLAN_IDX_V(x) ((x) << TXPKT_T5_OVLAN_IDX_S) + #define TXPKT_INTF_S 16 #define TXPKT_INTF_V(x) ((x) << TXPKT_INTF_S) diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h index d7ca106927b0..8353a6cbfcc2 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h @@ -142,6 +142,8 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN CH_PCI_ID_TABLE_FENTRY(0x5013), /* T580-chr */ CH_PCI_ID_TABLE_FENTRY(0x5014), /* T580-so */ CH_PCI_ID_TABLE_FENTRY(0x5015), /* T502-bt */ + CH_PCI_ID_TABLE_FENTRY(0x5016), /* T580-OCP-SO */ + CH_PCI_ID_TABLE_FENTRY(0x5017), /* T520-OCP-SO */ CH_PCI_ID_TABLE_FENTRY(0x5080), /* Custom T540-cr */ CH_PCI_ID_TABLE_FENTRY(0x5081), /* Custom T540-LL-cr */ CH_PCI_ID_TABLE_FENTRY(0x5082), /* Custom T504-cr */ @@ -155,6 +157,22 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN CH_PCI_ID_TABLE_FENTRY(0x5090), /* Custom T540-CR */ CH_PCI_ID_TABLE_FENTRY(0x5091), /* Custom T522-CR */ CH_PCI_ID_TABLE_FENTRY(0x5092), /* Custom T520-CR */ + + /* T6 adapters: + */ + CH_PCI_ID_TABLE_FENTRY(0x6001), + CH_PCI_ID_TABLE_FENTRY(0x6002), + CH_PCI_ID_TABLE_FENTRY(0x6003), + CH_PCI_ID_TABLE_FENTRY(0x6004), + CH_PCI_ID_TABLE_FENTRY(0x6005), + CH_PCI_ID_TABLE_FENTRY(0x6006), + CH_PCI_ID_TABLE_FENTRY(0x6007), + CH_PCI_ID_TABLE_FENTRY(0x6009), + CH_PCI_ID_TABLE_FENTRY(0x600d), + CH_PCI_ID_TABLE_FENTRY(0x6010), + CH_PCI_ID_TABLE_FENTRY(0x6011), + CH_PCI_ID_TABLE_FENTRY(0x6014), + CH_PCI_ID_TABLE_FENTRY(0x6015), CH_PCI_DEVICE_ID_TABLE_DEFINE_END; #endif /* __T4_PCI_ID_TBL_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index 375a825573b0..ed8a8f350113 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -338,6 +338,11 @@ #define EGRTHRESHOLDPACKING_G(x) \ (((x) >> EGRTHRESHOLDPACKING_S) & EGRTHRESHOLDPACKING_M) +#define T6_EGRTHRESHOLDPACKING_S 16 +#define T6_EGRTHRESHOLDPACKING_M 0xffU +#define T6_EGRTHRESHOLDPACKING_G(x) \ + (((x) >> T6_EGRTHRESHOLDPACKING_S) & T6_EGRTHRESHOLDPACKING_M) + #define SGE_TIMESTAMP_LO_A 0x1098 #define SGE_TIMESTAMP_HI_A 0x109c diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index ad53e5ad2acd..fa3786a9d30e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -1898,7 +1898,10 @@ static int napi_rx_handler(struct napi_struct *napi, int budget) rspq->unhandled_irqs++; val = CIDXINC_V(work_done) | SEINTARM_V(intr_params); - if (is_t4(rspq->adapter->params.chip)) { + /* If we don't have access to the new User GTS (T5+), use the old + * doorbell mechanism; otherwise use the new BAR2 mechanism. + */ + if (unlikely(!rspq->bar2_addr)) { t4_write_reg(rspq->adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS, val | INGRESSQID_V((u32)rspq->cntxt_id)); @@ -1998,10 +2001,13 @@ static unsigned int process_intrq(struct adapter *adapter) } val = CIDXINC_V(work_done) | SEINTARM_V(intrq->intr_params); - if (is_t4(adapter->params.chip)) + /* If we don't have access to the new User GTS (T5+), use the old + * doorbell mechanism; otherwise use the new BAR2 mechanism. + */ + if (unlikely(!intrq->bar2_addr)) { t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS, val | INGRESSQID_V(intrq->cntxt_id)); - else { + } else { writel(val | INGRESSQID_V(intrq->bar2_qid), intrq->bar2_addr + SGE_UDB_GTS); wmb(); @@ -2662,8 +2668,22 @@ int t4vf_sge_init(struct adapter *adapter) * give it more Free List entries. (Note that the SGE's Egress * Congestion Threshold is in units of 2 Free List pointers.) */ - s->fl_starve_thres - = EGRTHRESHOLD_G(sge_params->sge_congestion_control)*2 + 1; + switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) { + case CHELSIO_T4: + s->fl_starve_thres = + EGRTHRESHOLD_G(sge_params->sge_congestion_control); + break; + case CHELSIO_T5: + s->fl_starve_thres = + EGRTHRESHOLDPACKING_G(sge_params->sge_congestion_control); + break; + case CHELSIO_T6: + default: + s->fl_starve_thres = + T6_EGRTHRESHOLDPACKING_G(sge_params->sge_congestion_control); + break; + } + s->fl_starve_thres = s->fl_starve_thres * 2 + 1; /* * Set up tasklet timers. diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c index f3f1601a76f3..f44a39c40642 100644 --- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c +++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c @@ -224,7 +224,8 @@ static int enic_get_coalesce(struct net_device *netdev, struct enic *enic = netdev_priv(netdev); struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting; - ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs; + if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) + ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs; ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs; if (rxcoal->use_adaptive_rx_coalesce) ecmd->use_adaptive_rx_coalesce = 1; @@ -234,6 +235,53 @@ static int enic_get_coalesce(struct net_device *netdev, return 0; } +static int enic_coalesce_valid(struct enic *enic, + struct ethtool_coalesce *ec) +{ + u32 coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev); + u32 rx_coalesce_usecs_high = min_t(u32, coalesce_usecs_max, + ec->rx_coalesce_usecs_high); + u32 rx_coalesce_usecs_low = min_t(u32, coalesce_usecs_max, + ec->rx_coalesce_usecs_low); + + if (ec->rx_max_coalesced_frames || + ec->rx_coalesce_usecs_irq || + ec->rx_max_coalesced_frames_irq || + ec->tx_max_coalesced_frames || + ec->tx_coalesce_usecs_irq || + ec->tx_max_coalesced_frames_irq || + ec->stats_block_coalesce_usecs || + ec->use_adaptive_tx_coalesce || + ec->pkt_rate_low || + ec->rx_max_coalesced_frames_low || + ec->tx_coalesce_usecs_low || + ec->tx_max_coalesced_frames_low || + ec->pkt_rate_high || + ec->rx_max_coalesced_frames_high || + ec->tx_coalesce_usecs_high || + ec->tx_max_coalesced_frames_high || + ec->rate_sample_interval) + return -EINVAL; + + if ((vnic_dev_get_intr_mode(enic->vdev) != VNIC_DEV_INTR_MODE_MSIX) && + ec->tx_coalesce_usecs) + return -EINVAL; + + if ((ec->tx_coalesce_usecs > coalesce_usecs_max) || + (ec->rx_coalesce_usecs > coalesce_usecs_max) || + (ec->rx_coalesce_usecs_low > coalesce_usecs_max) || + (ec->rx_coalesce_usecs_high > coalesce_usecs_max)) + netdev_info(enic->netdev, "ethtool_set_coalesce: adaptor supports max coalesce value of %d. Setting max value.\n", + coalesce_usecs_max); + + if (ec->rx_coalesce_usecs_high && + (rx_coalesce_usecs_high < + rx_coalesce_usecs_low + ENIC_AIC_LARGE_PKT_DIFF)) + return -EINVAL; + + return 0; +} + static int enic_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd) { @@ -244,8 +292,12 @@ static int enic_set_coalesce(struct net_device *netdev, u32 rx_coalesce_usecs_high; u32 coalesce_usecs_max; unsigned int i, intr; + int ret; struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting; + ret = enic_coalesce_valid(enic, ecmd); + if (ret) + return ret; coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev); tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs, coalesce_usecs_max); @@ -257,59 +309,24 @@ static int enic_set_coalesce(struct net_device *netdev, rx_coalesce_usecs_high = min_t(u32, ecmd->rx_coalesce_usecs_high, coalesce_usecs_max); - switch (vnic_dev_get_intr_mode(enic->vdev)) { - case VNIC_DEV_INTR_MODE_INTX: - if (tx_coalesce_usecs != rx_coalesce_usecs) - return -EINVAL; - if (ecmd->use_adaptive_rx_coalesce || - ecmd->rx_coalesce_usecs_low || - ecmd->rx_coalesce_usecs_high) - return -EINVAL; - - intr = enic_legacy_io_intr(); - vnic_intr_coalescing_timer_set(&enic->intr[intr], - tx_coalesce_usecs); - break; - case VNIC_DEV_INTR_MODE_MSI: - if (tx_coalesce_usecs != rx_coalesce_usecs) - return -EINVAL; - if (ecmd->use_adaptive_rx_coalesce || - ecmd->rx_coalesce_usecs_low || - ecmd->rx_coalesce_usecs_high) - return -EINVAL; - - vnic_intr_coalescing_timer_set(&enic->intr[0], - tx_coalesce_usecs); - break; - case VNIC_DEV_INTR_MODE_MSIX: - if (ecmd->rx_coalesce_usecs_high && - (rx_coalesce_usecs_high < - rx_coalesce_usecs_low + ENIC_AIC_LARGE_PKT_DIFF)) - return -EINVAL; - + if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) { for (i = 0; i < enic->wq_count; i++) { intr = enic_msix_wq_intr(enic, i); vnic_intr_coalescing_timer_set(&enic->intr[intr], - tx_coalesce_usecs); - } - - rxcoal->use_adaptive_rx_coalesce = - !!ecmd->use_adaptive_rx_coalesce; - if (!rxcoal->use_adaptive_rx_coalesce) - enic_intr_coal_set_rx(enic, rx_coalesce_usecs); - - if (ecmd->rx_coalesce_usecs_high) { - rxcoal->range_end = rx_coalesce_usecs_high; - rxcoal->small_pkt_range_start = rx_coalesce_usecs_low; - rxcoal->large_pkt_range_start = rx_coalesce_usecs_low + - ENIC_AIC_LARGE_PKT_DIFF; + tx_coalesce_usecs); } - break; - default: - break; + enic->tx_coalesce_usecs = tx_coalesce_usecs; + } + rxcoal->use_adaptive_rx_coalesce = !!ecmd->use_adaptive_rx_coalesce; + if (!rxcoal->use_adaptive_rx_coalesce) + enic_intr_coal_set_rx(enic, rx_coalesce_usecs); + if (ecmd->rx_coalesce_usecs_high) { + rxcoal->range_end = rx_coalesce_usecs_high; + rxcoal->small_pkt_range_start = rx_coalesce_usecs_low; + rxcoal->large_pkt_range_start = rx_coalesce_usecs_low + + ENIC_AIC_LARGE_PKT_DIFF; } - enic->tx_coalesce_usecs = tx_coalesce_usecs; enic->rx_coalesce_usecs = rx_coalesce_usecs; return 0; diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 918a8e42139b..8f646e4e968b 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -1149,6 +1149,64 @@ static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, return 0; } +static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq) +{ + unsigned int intr = enic_msix_rq_intr(enic, rq->index); + struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; + u32 timer = cq->tobe_rx_coal_timeval; + + if (cq->tobe_rx_coal_timeval != cq->cur_rx_coal_timeval) { + vnic_intr_coalescing_timer_set(&enic->intr[intr], timer); + cq->cur_rx_coal_timeval = cq->tobe_rx_coal_timeval; + } +} + +static void enic_calc_int_moderation(struct enic *enic, struct vnic_rq *rq) +{ + struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting; + struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; + struct vnic_rx_bytes_counter *pkt_size_counter = &cq->pkt_size_counter; + int index; + u32 timer; + u32 range_start; + u32 traffic; + u64 delta; + ktime_t now = ktime_get(); + + delta = ktime_us_delta(now, cq->prev_ts); + if (delta < ENIC_AIC_TS_BREAK) + return; + cq->prev_ts = now; + + traffic = pkt_size_counter->large_pkt_bytes_cnt + + pkt_size_counter->small_pkt_bytes_cnt; + /* The table takes Mbps + * traffic *= 8 => bits + * traffic *= (10^6 / delta) => bps + * traffic /= 10^6 => Mbps + * + * Combining, traffic *= (8 / delta) + */ + + traffic <<= 3; + traffic = delta > UINT_MAX ? 0 : traffic / (u32)delta; + + for (index = 0; index < ENIC_MAX_COALESCE_TIMERS; index++) + if (traffic < mod_table[index].rx_rate) + break; + range_start = (pkt_size_counter->small_pkt_bytes_cnt > + pkt_size_counter->large_pkt_bytes_cnt << 1) ? + rx_coal->small_pkt_range_start : + rx_coal->large_pkt_range_start; + timer = range_start + ((rx_coal->range_end - range_start) * + mod_table[index].range_percent / 100); + /* Damping */ + cq->tobe_rx_coal_timeval = (timer + cq->tobe_rx_coal_timeval) >> 1; + + pkt_size_counter->large_pkt_bytes_cnt = 0; + pkt_size_counter->small_pkt_bytes_cnt = 0; +} + static int enic_poll(struct napi_struct *napi, int budget) { struct net_device *netdev = napi->dev; @@ -1199,6 +1257,11 @@ static int enic_poll(struct napi_struct *napi, int budget) if (err) rq_work_done = rq_work_to_do; + if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) + /* Call the function which refreshes the intr coalescing timer + * value based on the traffic. + */ + enic_calc_int_moderation(enic, &enic->rq[0]); if (rq_work_done < rq_work_to_do) { @@ -1207,70 +1270,14 @@ static int enic_poll(struct napi_struct *napi, int budget) */ napi_complete(napi); + if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) + enic_set_int_moderation(enic, &enic->rq[0]); vnic_intr_unmask(&enic->intr[intr]); } return rq_work_done; } -static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq) -{ - unsigned int intr = enic_msix_rq_intr(enic, rq->index); - struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; - u32 timer = cq->tobe_rx_coal_timeval; - - if (cq->tobe_rx_coal_timeval != cq->cur_rx_coal_timeval) { - vnic_intr_coalescing_timer_set(&enic->intr[intr], timer); - cq->cur_rx_coal_timeval = cq->tobe_rx_coal_timeval; - } -} - -static void enic_calc_int_moderation(struct enic *enic, struct vnic_rq *rq) -{ - struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting; - struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; - struct vnic_rx_bytes_counter *pkt_size_counter = &cq->pkt_size_counter; - int index; - u32 timer; - u32 range_start; - u32 traffic; - u64 delta; - ktime_t now = ktime_get(); - - delta = ktime_us_delta(now, cq->prev_ts); - if (delta < ENIC_AIC_TS_BREAK) - return; - cq->prev_ts = now; - - traffic = pkt_size_counter->large_pkt_bytes_cnt + - pkt_size_counter->small_pkt_bytes_cnt; - /* The table takes Mbps - * traffic *= 8 => bits - * traffic *= (10^6 / delta) => bps - * traffic /= 10^6 => Mbps - * - * Combining, traffic *= (8 / delta) - */ - - traffic <<= 3; - traffic = delta > UINT_MAX ? 0 : traffic / (u32)delta; - - for (index = 0; index < ENIC_MAX_COALESCE_TIMERS; index++) - if (traffic < mod_table[index].rx_rate) - break; - range_start = (pkt_size_counter->small_pkt_bytes_cnt > - pkt_size_counter->large_pkt_bytes_cnt << 1) ? - rx_coal->small_pkt_range_start : - rx_coal->large_pkt_range_start; - timer = range_start + ((rx_coal->range_end - range_start) * - mod_table[index].range_percent / 100); - /* Damping */ - cq->tobe_rx_coal_timeval = (timer + cq->tobe_rx_coal_timeval) >> 1; - - pkt_size_counter->large_pkt_bytes_cnt = 0; - pkt_size_counter->small_pkt_bytes_cnt = 0; -} - #ifdef CONFIG_RFS_ACCEL static void enic_free_rx_cpu_rmap(struct enic *enic) { @@ -1407,10 +1414,8 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget) if (err) work_done = work_to_do; if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) - /* Call the function which refreshes - * the intr coalescing timer value based on - * the traffic. This is supported only in - * the case of MSI-x mode + /* Call the function which refreshes the intr coalescing timer + * value based on the traffic. */ enic_calc_int_moderation(enic, &enic->rq[rq]); @@ -1569,12 +1574,6 @@ static void enic_set_rx_coal_setting(struct enic *enic) int index = -1; struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting; - /* If intr mode is not MSIX, do not do adaptive coalescing */ - if (VNIC_DEV_INTR_MODE_MSIX != vnic_dev_get_intr_mode(enic->vdev)) { - netdev_info(enic->netdev, "INTR mode is not MSIX, Not initializing adaptive coalescing"); - return; - } - /* 1. Read the link speed from fw * 2. Pick the default range for the speed * 3. Update it in enic->rx_coalesce_setting diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c index d1017509b08a..f7b42483921c 100644 --- a/drivers/net/ethernet/ec_bhf.c +++ b/drivers/net/ethernet/ec_bhf.c @@ -604,19 +604,7 @@ static struct pci_driver pci_driver = { .probe = ec_bhf_probe, .remove = ec_bhf_remove, }; - -static int __init ec_bhf_init(void) -{ - return pci_register_driver(&pci_driver); -} - -static void __exit ec_bhf_exit(void) -{ - pci_unregister_driver(&pci_driver); -} - -module_init(ec_bhf_init); -module_exit(ec_bhf_exit); +module_pci_driver(pci_driver); module_param(polling_frequency, long, S_IRUGO); MODULE_PARM_DESC(polling_frequency, "Polling timer frequency in ns"); diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 8d12b41b3b19..0a27805cbbbd 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -37,7 +37,7 @@ #include "be_hw.h" #include "be_roce.h" -#define DRV_VER "10.6.0.2" +#define DRV_VER "10.6.0.3" #define DRV_NAME "be2net" #define BE_NAME "Emulex BladeEngine2" #define BE3_NAME "Emulex BladeEngine3" @@ -105,6 +105,8 @@ #define MAX_VFS 30 /* Max VFs supported by BE3 FW */ #define FW_VER_LEN 32 +#define CNTL_SERIAL_NUM_WORDS 8 /* Controller serial number words */ +#define CNTL_SERIAL_NUM_WORD_SZ (sizeof(u16)) /* Byte-sz of serial num word */ #define RSS_INDIR_TABLE_LEN 128 #define RSS_HASH_KEY_LEN 40 @@ -228,6 +230,7 @@ struct be_mcc_obj { struct be_tx_stats { u64 tx_bytes; u64 tx_pkts; + u64 tx_vxlan_offload_pkts; u64 tx_reqs; u64 tx_compl; ulong tx_jiffies; @@ -275,6 +278,7 @@ struct be_rx_page_info { struct be_rx_stats { u64 rx_bytes; u64 rx_pkts; + u64 rx_vxlan_offload_pkts; u32 rx_drops_no_skbs; /* skb allocation errors */ u32 rx_drops_no_frags; /* HW has no fetched frags */ u32 rx_post_fail; /* page post alloc failures */ @@ -590,6 +594,7 @@ struct be_adapter { struct rss_info rss_info; /* Filters for packets that need to be sent to BMC */ u32 bmc_filt_mask; + u16 serial_num[CNTL_SERIAL_NUM_WORDS]; }; #define be_physfn(adapter) (!adapter->virtfn) diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 9eac3227d2ca..3be1fbdcdd02 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -88,19 +88,21 @@ static inline void *embedded_payload(struct be_mcc_wrb *wrb) return wrb->payload.embedded_payload; } -static void be_mcc_notify(struct be_adapter *adapter) +static int be_mcc_notify(struct be_adapter *adapter) { struct be_queue_info *mccq = &adapter->mcc_obj.q; u32 val = 0; if (be_check_error(adapter, BE_ERROR_ANY)) - return; + return -EIO; val |= mccq->id & DB_MCCQ_RING_ID_MASK; val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT; wmb(); iowrite32(val, adapter->db + DB_MCCQ_OFFSET); + + return 0; } /* To check if valid bit is set, check the entire word as we don't know @@ -170,6 +172,12 @@ static void be_async_cmd_process(struct be_adapter *adapter, return; } + if (opcode == OPCODE_LOWLEVEL_SET_LOOPBACK_MODE && + subsystem == CMD_SUBSYSTEM_LOWLEVEL) { + complete(&adapter->et_cmd_compl); + return; + } + if ((opcode == OPCODE_COMMON_WRITE_FLASHROM || opcode == OPCODE_COMMON_WRITE_OBJECT) && subsystem == CMD_SUBSYSTEM_COMMON) { @@ -541,7 +549,9 @@ static int be_mcc_notify_wait(struct be_adapter *adapter) resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1); - be_mcc_notify(adapter); + status = be_mcc_notify(adapter); + if (status) + goto out; status = be_mcc_wait_compl(adapter); if (status == -EIO) @@ -1547,7 +1557,10 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd) else hdr->version = 2; - be_mcc_notify(adapter); + status = be_mcc_notify(adapter); + if (status) + goto err; + adapter->stats_cmd_sent = true; err: @@ -1583,7 +1596,10 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter, req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num); req->cmd_params.params.reset_stats = 0; - be_mcc_notify(adapter); + status = be_mcc_notify(adapter); + if (status) + goto err; + adapter->stats_cmd_sent = true; err: @@ -1687,8 +1703,7 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter) OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req), wrb, NULL); - be_mcc_notify(adapter); - + status = be_mcc_notify(adapter); err: spin_unlock_bh(&adapter->mcc_lock); return status; @@ -1860,7 +1875,7 @@ static int __be_cmd_modify_eqd(struct be_adapter *adapter, cpu_to_le32(set_eqd[i].delay_multiplier); } - be_mcc_notify(adapter); + status = be_mcc_notify(adapter); err: spin_unlock_bh(&adapter->mcc_lock); return status; @@ -1953,7 +1968,7 @@ static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value) memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN); } - status = be_mcc_notify_wait(adapter); + status = be_mcc_notify(adapter); err: spin_unlock_bh(&adapter->mcc_lock); return status; @@ -2320,7 +2335,10 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd, req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma + sizeof(struct lancer_cmd_req_write_object))); - be_mcc_notify(adapter); + status = be_mcc_notify(adapter); + if (status) + goto err_unlock; + spin_unlock_bh(&adapter->mcc_lock); if (!wait_for_completion_timeout(&adapter->et_cmd_compl, @@ -2491,7 +2509,10 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, req->params.op_code = cpu_to_le32(flash_opcode); req->params.data_buf_size = cpu_to_le32(buf_size); - be_mcc_notify(adapter); + status = be_mcc_notify(adapter); + if (status) + goto err_unlock; + spin_unlock_bh(&adapter->mcc_lock); if (!wait_for_completion_timeout(&adapter->et_cmd_compl, @@ -2585,7 +2606,7 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; - goto err; + goto err_unlock; } req = embedded_payload(wrb); @@ -2599,8 +2620,19 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, req->loopback_type = loopback_type; req->loopback_state = enable; - status = be_mcc_notify_wait(adapter); -err: + status = be_mcc_notify(adapter); + if (status) + goto err_unlock; + + spin_unlock_bh(&adapter->mcc_lock); + + if (!wait_for_completion_timeout(&adapter->et_cmd_compl, + msecs_to_jiffies(SET_LB_MODE_TIMEOUT))) + status = -ETIMEDOUT; + + return status; + +err_unlock: spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -2636,7 +2668,9 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, req->num_pkts = cpu_to_le32(num_pkts); req->loopback_type = cpu_to_le32(loopback_type); - be_mcc_notify(adapter); + status = be_mcc_notify(adapter); + if (status) + goto err; spin_unlock_bh(&adapter->mcc_lock); @@ -2818,10 +2852,11 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter) struct be_mcc_wrb *wrb; struct be_cmd_req_cntl_attribs *req; struct be_cmd_resp_cntl_attribs *resp; - int status; + int status, i; int payload_len = max(sizeof(*req), sizeof(*resp)); struct mgmt_controller_attrib *attribs; struct be_dma_mem attribs_cmd; + u32 *serial_num; if (mutex_lock_interruptible(&adapter->mbox_lock)) return -1; @@ -2852,6 +2887,10 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter) if (!status) { attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr); adapter->hba_port_num = attribs->hba_attribs.phy_port; + serial_num = attribs->hba_attribs.controller_serial_number; + for (i = 0; i < CNTL_SERIAL_NUM_WORDS; i++) + adapter->serial_num[i] = le32_to_cpu(serial_num[i]) & + (BIT_MASK(16) - 1); } err: diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index 2716e6f30d9a..36d835bd5f3c 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -1495,6 +1495,8 @@ struct be_cmd_resp_acpi_wol_magic_config_v1 { #define BE_PME_D3COLD_CAP 0x80 /********************** LoopBack test *********************/ +#define SET_LB_MODE_TIMEOUT 12000 + struct be_cmd_req_loopback_test { struct be_cmd_req_hdr hdr; u32 loopback_type; @@ -1635,10 +1637,12 @@ struct be_cmd_req_set_qos { struct mgmt_hba_attribs { u32 rsvd0[24]; u8 controller_model_number[32]; - u32 rsvd1[79]; - u8 rsvd2[3]; + u32 rsvd1[16]; + u32 controller_serial_number[8]; + u32 rsvd2[55]; + u8 rsvd3[3]; u8 phy_port; - u32 rsvd3[13]; + u32 rsvd4[13]; } __packed; struct mgmt_controller_attrib { @@ -1758,6 +1762,7 @@ struct be_cmd_req_set_mac_list { /*********************** HSW Config ***********************/ #define PORT_FWD_TYPE_VEPA 0x3 #define PORT_FWD_TYPE_VEB 0x2 +#define PORT_FWD_TYPE_PASSTHRU 0x1 #define ENABLE_MAC_SPOOFCHK 0x2 #define DISABLE_MAC_SPOOFCHK 0x3 diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index b2476dbfd103..2c9ed1710ba6 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -138,6 +138,7 @@ static const struct be_ethtool_stat et_stats[] = { static const struct be_ethtool_stat et_rx_stats[] = { {DRVSTAT_RX_INFO(rx_bytes)},/* If moving this member see above note */ {DRVSTAT_RX_INFO(rx_pkts)}, /* If moving this member see above note */ + {DRVSTAT_RX_INFO(rx_vxlan_offload_pkts)}, {DRVSTAT_RX_INFO(rx_compl)}, {DRVSTAT_RX_INFO(rx_compl_err)}, {DRVSTAT_RX_INFO(rx_mcast_pkts)}, @@ -190,6 +191,7 @@ static const struct be_ethtool_stat et_tx_stats[] = { {DRVSTAT_TX_INFO(tx_internal_parity_err)}, {DRVSTAT_TX_INFO(tx_bytes)}, {DRVSTAT_TX_INFO(tx_pkts)}, + {DRVSTAT_TX_INFO(tx_vxlan_offload_pkts)}, /* Number of skbs queued for trasmission by the driver */ {DRVSTAT_TX_INFO(tx_reqs)}, /* Number of times the TX queue was stopped due to lack @@ -847,10 +849,21 @@ err: static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type, u64 *status) { - be_cmd_set_loopback(adapter, adapter->hba_port_num, loopback_type, 1); + int ret; + + ret = be_cmd_set_loopback(adapter, adapter->hba_port_num, + loopback_type, 1); + if (ret) + return ret; + *status = be_cmd_loopback_test(adapter, adapter->hba_port_num, loopback_type, 1500, 2, 0xabc); - be_cmd_set_loopback(adapter, adapter->hba_port_num, BE_NO_LOOPBACK, 1); + + ret = be_cmd_set_loopback(adapter, adapter->hba_port_num, + BE_NO_LOOPBACK, 1); + if (ret) + return ret; + return *status; } diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 6f642426308c..d86bc5d52246 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -677,11 +677,14 @@ void be_link_status_update(struct be_adapter *adapter, u8 link_status) static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb) { struct be_tx_stats *stats = tx_stats(txo); + u64 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1; u64_stats_update_begin(&stats->sync); stats->tx_reqs++; stats->tx_bytes += skb->len; - stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1); + stats->tx_pkts += tx_pkts; + if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL) + stats->tx_vxlan_offload_pkts += tx_pkts; u64_stats_update_end(&stats->sync); } @@ -1254,7 +1257,7 @@ static bool be_send_pkt_to_bmc(struct be_adapter *adapter, if (is_udp_pkt((*skb))) { struct udphdr *udp = udp_hdr((*skb)); - switch (udp->dest) { + switch (ntohs(udp->dest)) { case DHCP_CLIENT_PORT: os2bmc = is_dhcp_client_filt_enabled(adapter); goto done; @@ -1957,6 +1960,8 @@ static void be_rx_stats_update(struct be_rx_obj *rxo, stats->rx_compl++; stats->rx_bytes += rxcp->pkt_size; stats->rx_pkts++; + if (rxcp->tunneled) + stats->rx_vxlan_offload_pkts++; if (rxcp->pkt_type == BE_MULTICAST_PACKET) stats->rx_mcast_pkts++; if (rxcp->err) @@ -3529,15 +3534,15 @@ err: static int be_setup_wol(struct be_adapter *adapter, bool enable) { + struct device *dev = &adapter->pdev->dev; struct be_dma_mem cmd; - int status = 0; u8 mac[ETH_ALEN]; + int status; eth_zero_addr(mac); cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config); - cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, - GFP_KERNEL); + cmd.va = dma_zalloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL); if (!cmd.va) return -ENOMEM; @@ -3546,24 +3551,18 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable) PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK); if (status) { - dev_err(&adapter->pdev->dev, - "Could not enable Wake-on-lan\n"); - dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, - cmd.dma); - return status; + dev_err(dev, "Could not enable Wake-on-lan\n"); + goto err; } - status = be_cmd_enable_magic_wol(adapter, - adapter->netdev->dev_addr, - &cmd); - pci_enable_wake(adapter->pdev, PCI_D3hot, 1); - pci_enable_wake(adapter->pdev, PCI_D3cold, 1); } else { - status = be_cmd_enable_magic_wol(adapter, mac, &cmd); - pci_enable_wake(adapter->pdev, PCI_D3hot, 0); - pci_enable_wake(adapter->pdev, PCI_D3cold, 0); + ether_addr_copy(mac, adapter->netdev->dev_addr); } - dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma); + status = be_cmd_enable_magic_wol(adapter, mac, &cmd); + pci_enable_wake(adapter->pdev, PCI_D3hot, enable); + pci_enable_wake(adapter->pdev, PCI_D3cold, enable); +err: + dma_free_coherent(dev, cmd.size, cmd.va, cmd.dma); return status; } @@ -4924,7 +4923,7 @@ static bool be_check_ufi_compatibility(struct be_adapter *adapter, { if (!fhdr) { dev_err(&adapter->pdev->dev, "Invalid FW UFI file"); - return -1; + return false; } /* First letter of the build version is used to identify @@ -5079,9 +5078,6 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, int status = 0; u8 hsw_mode; - if (!sriov_enabled(adapter)) - return 0; - /* BE and Lancer chips support VEB mode only */ if (BEx_chip(adapter) || lancer_chip(adapter)) { hsw_mode = PORT_FWD_TYPE_VEB; @@ -5091,6 +5087,9 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, NULL); if (status) return 0; + + if (hsw_mode == PORT_FWD_TYPE_PASSTHRU) + return 0; } return ndo_dflt_bridge_getlink(skb, pid, seq, dev, @@ -5225,6 +5224,27 @@ static netdev_features_t be_features_check(struct sk_buff *skb, } #endif +static int be_get_phys_port_id(struct net_device *dev, + struct netdev_phys_item_id *ppid) +{ + int i, id_len = CNTL_SERIAL_NUM_WORDS * CNTL_SERIAL_NUM_WORD_SZ + 1; + struct be_adapter *adapter = netdev_priv(dev); + u8 *id; + + if (MAX_PHYS_ITEM_ID_LEN < id_len) + return -ENOSPC; + + ppid->id[0] = adapter->hba_port_num + 1; + id = &ppid->id[1]; + for (i = CNTL_SERIAL_NUM_WORDS - 1; i >= 0; + i--, id += CNTL_SERIAL_NUM_WORD_SZ) + memcpy(id, &adapter->serial_num[i], CNTL_SERIAL_NUM_WORD_SZ); + + ppid->id_len = id_len; + + return 0; +} + static const struct net_device_ops be_netdev_ops = { .ndo_open = be_open, .ndo_stop = be_close, @@ -5255,6 +5275,7 @@ static const struct net_device_ops be_netdev_ops = { .ndo_del_vxlan_port = be_del_vxlan_port, .ndo_features_check = be_features_check, #endif + .ndo_get_phys_port_id = be_get_phys_port_id, }; static void be_netdev_init(struct net_device *netdev) @@ -5813,7 +5834,6 @@ static int be_pci_resume(struct pci_dev *pdev) if (status) return status; - pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); status = be_resume(adapter); @@ -5893,7 +5913,6 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev) return PCI_ERS_RESULT_DISCONNECT; pci_set_master(pdev); - pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); /* Check if card is ok and fw is ready */ diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index f457a23d0bfb..1543cf0e8ef6 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -506,12 +506,6 @@ int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr) break; default: - /* - * register RXMTRL must be set in order to do V1 packets, - * therefore it is not possible to time stamp both V1 Sync and - * Delay_Req messages and hardware does not support - * timestamping all packets => return error - */ fep->hwts_rx_en = 1; config.rx_filter = HWTSTAMP_FILTER_ALL; break; diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 2b7610f341b0..087ffcdc48a3 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -109,15 +109,15 @@ #define TX_TIMEOUT (1*HZ) -const char gfar_driver_version[] = "1.3"; +const char gfar_driver_version[] = "2.0"; static int gfar_enet_open(struct net_device *dev); static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); static void gfar_reset_task(struct work_struct *work); static void gfar_timeout(struct net_device *dev); static int gfar_close(struct net_device *dev); -static struct sk_buff *gfar_new_skb(struct net_device *dev, - dma_addr_t *bufaddr); +static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue, + int alloc_cnt); static int gfar_set_mac_address(struct net_device *dev); static int gfar_change_mtu(struct net_device *dev, int new_mtu); static irqreturn_t gfar_error(int irq, void *dev_id); @@ -141,8 +141,7 @@ static void gfar_netpoll(struct net_device *dev); #endif int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit); static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue); -static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, - int amount_pull, struct napi_struct *napi); +static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb); static void gfar_halt_nodisable(struct gfar_private *priv); static void gfar_clear_exact_match(struct net_device *dev); static void gfar_set_mac_for_addr(struct net_device *dev, int num, @@ -169,17 +168,15 @@ static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, bdp->lstatus = cpu_to_be32(lstatus); } -static int gfar_init_bds(struct net_device *ndev) +static void gfar_init_bds(struct net_device *ndev) { struct gfar_private *priv = netdev_priv(ndev); struct gfar __iomem *regs = priv->gfargrp[0].regs; struct gfar_priv_tx_q *tx_queue = NULL; struct gfar_priv_rx_q *rx_queue = NULL; struct txbd8 *txbdp; - struct rxbd8 *rxbdp; u32 __iomem *rfbptr; int i, j; - dma_addr_t bufaddr; for (i = 0; i < priv->num_tx_queues; i++) { tx_queue = priv->tx_queue[i]; @@ -207,40 +204,26 @@ static int gfar_init_bds(struct net_device *ndev) rfbptr = ®s->rfbptr0; for (i = 0; i < priv->num_rx_queues; i++) { rx_queue = priv->rx_queue[i]; - rx_queue->cur_rx = rx_queue->rx_bd_base; - rx_queue->skb_currx = 0; - rxbdp = rx_queue->rx_bd_base; - for (j = 0; j < rx_queue->rx_ring_size; j++) { - struct sk_buff *skb = rx_queue->rx_skbuff[j]; + rx_queue->next_to_clean = 0; + rx_queue->next_to_use = 0; + rx_queue->next_to_alloc = 0; - if (skb) { - bufaddr = be32_to_cpu(rxbdp->bufPtr); - } else { - skb = gfar_new_skb(ndev, &bufaddr); - if (!skb) { - netdev_err(ndev, "Can't allocate RX buffers\n"); - return -ENOMEM; - } - rx_queue->rx_skbuff[j] = skb; - } - - gfar_init_rxbdp(rx_queue, rxbdp, bufaddr); - rxbdp++; - } + /* make sure next_to_clean != next_to_use after this + * by leaving at least 1 unused descriptor + */ + gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue)); rx_queue->rfbptr = rfbptr; rfbptr += 2; } - - return 0; } static int gfar_alloc_skb_resources(struct net_device *ndev) { void *vaddr; dma_addr_t addr; - int i, j, k; + int i, j; struct gfar_private *priv = netdev_priv(ndev); struct device *dev = priv->dev; struct gfar_priv_tx_q *tx_queue = NULL; @@ -279,7 +262,8 @@ static int gfar_alloc_skb_resources(struct net_device *ndev) rx_queue = priv->rx_queue[i]; rx_queue->rx_bd_base = vaddr; rx_queue->rx_bd_dma_base = addr; - rx_queue->dev = ndev; + rx_queue->ndev = ndev; + rx_queue->dev = dev; addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; } @@ -294,25 +278,20 @@ static int gfar_alloc_skb_resources(struct net_device *ndev) if (!tx_queue->tx_skbuff) goto cleanup; - for (k = 0; k < tx_queue->tx_ring_size; k++) - tx_queue->tx_skbuff[k] = NULL; + for (j = 0; j < tx_queue->tx_ring_size; j++) + tx_queue->tx_skbuff[j] = NULL; } for (i = 0; i < priv->num_rx_queues; i++) { rx_queue = priv->rx_queue[i]; - rx_queue->rx_skbuff = - kmalloc_array(rx_queue->rx_ring_size, - sizeof(*rx_queue->rx_skbuff), - GFP_KERNEL); - if (!rx_queue->rx_skbuff) + rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size, + sizeof(*rx_queue->rx_buff), + GFP_KERNEL); + if (!rx_queue->rx_buff) goto cleanup; - - for (j = 0; j < rx_queue->rx_ring_size; j++) - rx_queue->rx_skbuff[j] = NULL; } - if (gfar_init_bds(ndev)) - goto cleanup; + gfar_init_bds(ndev); return 0; @@ -354,10 +333,8 @@ static void gfar_init_rqprm(struct gfar_private *priv) } } -static void gfar_rx_buff_size_config(struct gfar_private *priv) +static void gfar_rx_offload_en(struct gfar_private *priv) { - int frame_size = priv->ndev->mtu + ETH_HLEN + ETH_FCS_LEN; - /* set this when rx hw offload (TOE) functions are being used */ priv->uses_rxfcb = 0; @@ -366,16 +343,6 @@ static void gfar_rx_buff_size_config(struct gfar_private *priv) if (priv->hwts_rx_en) priv->uses_rxfcb = 1; - - if (priv->uses_rxfcb) - frame_size += GMAC_FCB_LEN; - - frame_size += priv->padding; - - frame_size = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) + - INCREMENTAL_BUFFER_SIZE; - - priv->rx_buffer_size = frame_size; } static void gfar_mac_rx_config(struct gfar_private *priv) @@ -593,9 +560,8 @@ static int gfar_alloc_rx_queues(struct gfar_private *priv) if (!priv->rx_queue[i]) return -ENOMEM; - priv->rx_queue[i]->rx_skbuff = NULL; priv->rx_queue[i]->qindex = i; - priv->rx_queue[i]->dev = priv->ndev; + priv->rx_queue[i]->ndev = priv->ndev; } return 0; } @@ -1187,12 +1153,11 @@ void gfar_mac_reset(struct gfar_private *priv) udelay(3); - /* Compute rx_buff_size based on config flags */ - gfar_rx_buff_size_config(priv); + gfar_rx_offload_en(priv); /* Initialize the max receive frame/buffer lengths */ - gfar_write(®s->maxfrm, priv->rx_buffer_size); - gfar_write(®s->mrblr, priv->rx_buffer_size); + gfar_write(®s->maxfrm, GFAR_JUMBO_FRAME_SIZE); + gfar_write(®s->mrblr, GFAR_RXB_SIZE); /* Initialize the Minimum Frame Length Register */ gfar_write(®s->minflr, MINFLR_INIT_SETTINGS); @@ -1200,12 +1165,11 @@ void gfar_mac_reset(struct gfar_private *priv) /* Initialize MACCFG2. */ tempval = MACCFG2_INIT_SETTINGS; - /* If the mtu is larger than the max size for standard - * ethernet frames (ie, a jumbo frame), then set maccfg2 - * to allow huge frames, and to check the length + /* eTSEC74 erratum: Rx frames of length MAXFRM or MAXFRM-1 + * are marked as truncated. Avoid this by MACCFG2[Huge Frame]=1, + * and by checking RxBD[LG] and discarding larger than MAXFRM. */ - if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE || - gfar_has_errata(priv, GFAR_ERRATA_74)) + if (gfar_has_errata(priv, GFAR_ERRATA_74)) tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK; gfar_write(®s->maccfg2, tempval); @@ -1415,8 +1379,6 @@ static int gfar_probe(struct platform_device *ofdev) priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) dev->needed_headroom = GMAC_FCB_LEN; - priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; - /* Initializing some of the rx/tx queue level parameters */ for (i = 0; i < priv->num_tx_queues; i++) { priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE; @@ -1599,10 +1561,7 @@ static int gfar_restore(struct device *dev) return 0; } - if (gfar_init_bds(ndev)) { - free_skb_resources(priv); - return -ENOMEM; - } + gfar_init_bds(ndev); gfar_mac_reset(priv); @@ -1893,26 +1852,32 @@ static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue) static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue) { - struct rxbd8 *rxbdp; - struct gfar_private *priv = netdev_priv(rx_queue->dev); int i; - rxbdp = rx_queue->rx_bd_base; + struct rxbd8 *rxbdp = rx_queue->rx_bd_base; + + if (rx_queue->skb) + dev_kfree_skb(rx_queue->skb); for (i = 0; i < rx_queue->rx_ring_size; i++) { - if (rx_queue->rx_skbuff[i]) { - dma_unmap_single(priv->dev, be32_to_cpu(rxbdp->bufPtr), - priv->rx_buffer_size, - DMA_FROM_DEVICE); - dev_kfree_skb_any(rx_queue->rx_skbuff[i]); - rx_queue->rx_skbuff[i] = NULL; - } + struct gfar_rx_buff *rxb = &rx_queue->rx_buff[i]; + rxbdp->lstatus = 0; rxbdp->bufPtr = 0; rxbdp++; + + if (!rxb->page) + continue; + + dma_unmap_single(rx_queue->dev, rxb->dma, + PAGE_SIZE, DMA_FROM_DEVICE); + __free_page(rxb->page); + + rxb->page = NULL; } - kfree(rx_queue->rx_skbuff); - rx_queue->rx_skbuff = NULL; + + kfree(rx_queue->rx_buff); + rx_queue->rx_buff = NULL; } /* If there are any tx skbs or rx skbs still around, free them. @@ -1937,7 +1902,7 @@ static void free_skb_resources(struct gfar_private *priv) for (i = 0; i < priv->num_rx_queues; i++) { rx_queue = priv->rx_queue[i]; - if (rx_queue->rx_skbuff) + if (rx_queue->rx_buff) free_skb_rx_queue(rx_queue); } @@ -2495,7 +2460,7 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu) struct gfar_private *priv = netdev_priv(dev); int frame_size = new_mtu + ETH_HLEN; - if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) { + if ((frame_size < 64) || (frame_size > GFAR_JUMBO_FRAME_SIZE)) { netif_err(priv, drv, dev, "Invalid MTU setting\n"); return -EINVAL; } @@ -2549,15 +2514,6 @@ static void gfar_timeout(struct net_device *dev) schedule_work(&priv->reset_task); } -static void gfar_align_skb(struct sk_buff *skb) -{ - /* We need the data buffer to be aligned properly. We will reserve - * as many bytes as needed to align the data properly - */ - skb_reserve(skb, RXBUF_ALIGNMENT - - (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1))); -} - /* Interrupt Handler for Transmit complete */ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) { @@ -2615,7 +2571,8 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { struct skb_shared_hwtstamps shhwtstamps; - u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7); + u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) & + ~0x7UL); memset(&shhwtstamps, 0, sizeof(shhwtstamps)); shhwtstamps.hwtstamp = ns_to_ktime(*ns); @@ -2664,49 +2621,85 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) netdev_tx_completed_queue(txq, howmany, bytes_sent); } -static struct sk_buff *gfar_alloc_skb(struct net_device *dev) +static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb) { - struct gfar_private *priv = netdev_priv(dev); - struct sk_buff *skb; + struct page *page; + dma_addr_t addr; - skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT); - if (!skb) - return NULL; + page = dev_alloc_page(); + if (unlikely(!page)) + return false; - gfar_align_skb(skb); + addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(rxq->dev, addr))) { + __free_page(page); - return skb; + return false; + } + + rxb->dma = addr; + rxb->page = page; + rxb->page_offset = 0; + + return true; } -static struct sk_buff *gfar_new_skb(struct net_device *dev, dma_addr_t *bufaddr) +static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue) { - struct gfar_private *priv = netdev_priv(dev); - struct sk_buff *skb; - dma_addr_t addr; + struct gfar_private *priv = netdev_priv(rx_queue->ndev); + struct gfar_extra_stats *estats = &priv->extra_stats; - skb = gfar_alloc_skb(dev); - if (!skb) - return NULL; + netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n"); + atomic64_inc(&estats->rx_alloc_err); +} - addr = dma_map_single(priv->dev, skb->data, - priv->rx_buffer_size, DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(priv->dev, addr))) { - dev_kfree_skb_any(skb); - return NULL; +static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue, + int alloc_cnt) +{ + struct rxbd8 *bdp; + struct gfar_rx_buff *rxb; + int i; + + i = rx_queue->next_to_use; + bdp = &rx_queue->rx_bd_base[i]; + rxb = &rx_queue->rx_buff[i]; + + while (alloc_cnt--) { + /* try reuse page */ + if (unlikely(!rxb->page)) { + if (unlikely(!gfar_new_page(rx_queue, rxb))) { + gfar_rx_alloc_err(rx_queue); + break; + } + } + + /* Setup the new RxBD */ + gfar_init_rxbdp(rx_queue, bdp, + rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT); + + /* Update to the next pointer */ + bdp++; + rxb++; + + if (unlikely(++i == rx_queue->rx_ring_size)) { + i = 0; + bdp = rx_queue->rx_bd_base; + rxb = rx_queue->rx_buff; + } } - *bufaddr = addr; - return skb; + rx_queue->next_to_use = i; + rx_queue->next_to_alloc = i; } -static inline void count_errors(unsigned short status, struct net_device *dev) +static void count_errors(u32 lstatus, struct net_device *ndev) { - struct gfar_private *priv = netdev_priv(dev); - struct net_device_stats *stats = &dev->stats; + struct gfar_private *priv = netdev_priv(ndev); + struct net_device_stats *stats = &ndev->stats; struct gfar_extra_stats *estats = &priv->extra_stats; /* If the packet was truncated, none of the other errors matter */ - if (status & RXBD_TRUNCATED) { + if (lstatus & BD_LFLAG(RXBD_TRUNCATED)) { stats->rx_length_errors++; atomic64_inc(&estats->rx_trunc); @@ -2714,25 +2707,25 @@ static inline void count_errors(unsigned short status, struct net_device *dev) return; } /* Count the errors, if there were any */ - if (status & (RXBD_LARGE | RXBD_SHORT)) { + if (lstatus & BD_LFLAG(RXBD_LARGE | RXBD_SHORT)) { stats->rx_length_errors++; - if (status & RXBD_LARGE) + if (lstatus & BD_LFLAG(RXBD_LARGE)) atomic64_inc(&estats->rx_large); else atomic64_inc(&estats->rx_short); } - if (status & RXBD_NONOCTET) { + if (lstatus & BD_LFLAG(RXBD_NONOCTET)) { stats->rx_frame_errors++; atomic64_inc(&estats->rx_nonoctet); } - if (status & RXBD_CRCERR) { + if (lstatus & BD_LFLAG(RXBD_CRCERR)) { atomic64_inc(&estats->rx_crcerr); stats->rx_crc_errors++; } - if (status & RXBD_OVERRUN) { + if (lstatus & BD_LFLAG(RXBD_OVERRUN)) { atomic64_inc(&estats->rx_overrun); - stats->rx_crc_errors++; + stats->rx_over_errors++; } } @@ -2783,6 +2776,93 @@ static irqreturn_t gfar_transmit(int irq, void *grp_id) return IRQ_HANDLED; } +static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus, + struct sk_buff *skb, bool first) +{ + unsigned int size = lstatus & BD_LENGTH_MASK; + struct page *page = rxb->page; + + /* Remove the FCS from the packet length */ + if (likely(lstatus & BD_LFLAG(RXBD_LAST))) + size -= ETH_FCS_LEN; + + if (likely(first)) + skb_put(skb, size); + else + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + rxb->page_offset + RXBUF_ALIGNMENT, + size, GFAR_RXB_TRUESIZE); + + /* try reuse page */ + if (unlikely(page_count(page) != 1)) + return false; + + /* change offset to the other half */ + rxb->page_offset ^= GFAR_RXB_TRUESIZE; + + atomic_inc(&page->_count); + + return true; +} + +static void gfar_reuse_rx_page(struct gfar_priv_rx_q *rxq, + struct gfar_rx_buff *old_rxb) +{ + struct gfar_rx_buff *new_rxb; + u16 nta = rxq->next_to_alloc; + + new_rxb = &rxq->rx_buff[nta]; + + /* find next buf that can reuse a page */ + nta++; + rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0; + + /* copy page reference */ + *new_rxb = *old_rxb; + + /* sync for use by the device */ + dma_sync_single_range_for_device(rxq->dev, old_rxb->dma, + old_rxb->page_offset, + GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE); +} + +static struct sk_buff *gfar_get_next_rxbuff(struct gfar_priv_rx_q *rx_queue, + u32 lstatus, struct sk_buff *skb) +{ + struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean]; + struct page *page = rxb->page; + bool first = false; + + if (likely(!skb)) { + void *buff_addr = page_address(page) + rxb->page_offset; + + skb = build_skb(buff_addr, GFAR_SKBFRAG_SIZE); + if (unlikely(!skb)) { + gfar_rx_alloc_err(rx_queue); + return NULL; + } + skb_reserve(skb, RXBUF_ALIGNMENT); + first = true; + } + + dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset, + GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE); + + if (gfar_add_rx_frag(rxb, lstatus, skb, first)) { + /* reuse the free half of the page */ + gfar_reuse_rx_page(rx_queue, rxb); + } else { + /* page cannot be reused, unmap it */ + dma_unmap_page(rx_queue->dev, rxb->dma, + PAGE_SIZE, DMA_FROM_DEVICE); + } + + /* clear rxb content */ + rxb->page = NULL; + + return skb; +} + static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) { /* If valid headers were found, and valid sums @@ -2797,10 +2877,9 @@ static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) } /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */ -static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, - int amount_pull, struct napi_struct *napi) +static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb) { - struct gfar_private *priv = netdev_priv(dev); + struct gfar_private *priv = netdev_priv(ndev); struct rxfcb *fcb = NULL; /* fcb is at the beginning if exists */ @@ -2809,10 +2888,8 @@ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, /* Remove the FCB from the skb * Remove the padded bytes, if there are any */ - if (amount_pull) { - skb_record_rx_queue(skb, fcb->rq); - skb_pull(skb, amount_pull); - } + if (priv->uses_rxfcb) + skb_pull(skb, GMAC_FCB_LEN); /* Get receive timestamp from the skb */ if (priv->hwts_rx_en) { @@ -2826,24 +2903,20 @@ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, if (priv->padding) skb_pull(skb, priv->padding); - if (dev->features & NETIF_F_RXCSUM) + if (ndev->features & NETIF_F_RXCSUM) gfar_rx_checksum(skb, fcb); /* Tell the skb what kind of packet this is */ - skb->protocol = eth_type_trans(skb, dev); + skb->protocol = eth_type_trans(skb, ndev); /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here. * Even if vlan rx accel is disabled, on some chips * RXFCB_VLN is pseudo randomly set. */ - if (dev->features & NETIF_F_HW_VLAN_CTAG_RX && + if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX && be16_to_cpu(fcb->flags) & RXFCB_VLN) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(fcb->vlctl)); - - /* Send the packet up the stack */ - napi_gro_receive(napi, skb); - } /* gfar_clean_rx_ring() -- Processes each frame in the rx ring @@ -2852,91 +2925,89 @@ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, */ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) { - struct net_device *dev = rx_queue->dev; - struct rxbd8 *bdp, *base; - struct sk_buff *skb; - int pkt_len; - int amount_pull; - int howmany = 0; - struct gfar_private *priv = netdev_priv(dev); + struct net_device *ndev = rx_queue->ndev; + struct gfar_private *priv = netdev_priv(ndev); + struct rxbd8 *bdp; + int i, howmany = 0; + struct sk_buff *skb = rx_queue->skb; + int cleaned_cnt = gfar_rxbd_unused(rx_queue); + unsigned int total_bytes = 0, total_pkts = 0; /* Get the first full descriptor */ - bdp = rx_queue->cur_rx; - base = rx_queue->rx_bd_base; + i = rx_queue->next_to_clean; - amount_pull = priv->uses_rxfcb ? GMAC_FCB_LEN : 0; + while (rx_work_limit--) { + u32 lstatus; + + if (cleaned_cnt >= GFAR_RX_BUFF_ALLOC) { + gfar_alloc_rx_buffs(rx_queue, cleaned_cnt); + cleaned_cnt = 0; + } - while (!(be16_to_cpu(bdp->status) & RXBD_EMPTY) && rx_work_limit--) { - struct sk_buff *newskb; - dma_addr_t bufaddr; + bdp = &rx_queue->rx_bd_base[i]; + lstatus = be32_to_cpu(bdp->lstatus); + if (lstatus & BD_LFLAG(RXBD_EMPTY)) + break; + /* order rx buffer descriptor reads */ rmb(); - /* Add another skb for the future */ - newskb = gfar_new_skb(dev, &bufaddr); + /* fetch next to clean buffer from the ring */ + skb = gfar_get_next_rxbuff(rx_queue, lstatus, skb); + if (unlikely(!skb)) + break; - skb = rx_queue->rx_skbuff[rx_queue->skb_currx]; + cleaned_cnt++; + howmany++; - dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr), - priv->rx_buffer_size, DMA_FROM_DEVICE); - - if (unlikely(!(be16_to_cpu(bdp->status) & RXBD_ERR) && - be16_to_cpu(bdp->length) > priv->rx_buffer_size)) - bdp->status = cpu_to_be16(RXBD_LARGE); - - /* We drop the frame if we failed to allocate a new buffer */ - if (unlikely(!newskb || - !(be16_to_cpu(bdp->status) & RXBD_LAST) || - be16_to_cpu(bdp->status) & RXBD_ERR)) { - count_errors(be16_to_cpu(bdp->status), dev); - - if (unlikely(!newskb)) { - newskb = skb; - bufaddr = be32_to_cpu(bdp->bufPtr); - } else if (skb) - dev_kfree_skb(skb); - } else { - /* Increment the number of packets */ - rx_queue->stats.rx_packets++; - howmany++; - - if (likely(skb)) { - pkt_len = be16_to_cpu(bdp->length) - - ETH_FCS_LEN; - /* Remove the FCS from the packet length */ - skb_put(skb, pkt_len); - rx_queue->stats.rx_bytes += pkt_len; - skb_record_rx_queue(skb, rx_queue->qindex); - gfar_process_frame(dev, skb, amount_pull, - &rx_queue->grp->napi_rx); + if (unlikely(++i == rx_queue->rx_ring_size)) + i = 0; - } else { - netif_warn(priv, rx_err, dev, "Missing skb!\n"); - rx_queue->stats.rx_dropped++; - atomic64_inc(&priv->extra_stats.rx_skbmissing); - } + rx_queue->next_to_clean = i; + + /* fetch next buffer if not the last in frame */ + if (!(lstatus & BD_LFLAG(RXBD_LAST))) + continue; + + if (unlikely(lstatus & BD_LFLAG(RXBD_ERR))) { + count_errors(lstatus, ndev); + /* discard faulty buffer */ + dev_kfree_skb(skb); + skb = NULL; + rx_queue->stats.rx_dropped++; + continue; } - rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb; + /* Increment the number of packets */ + total_pkts++; + total_bytes += skb->len; - /* Setup the new bdp */ - gfar_init_rxbdp(rx_queue, bdp, bufaddr); + skb_record_rx_queue(skb, rx_queue->qindex); - /* Update Last Free RxBD pointer for LFC */ - if (unlikely(rx_queue->rfbptr && priv->tx_actual_en)) - gfar_write(rx_queue->rfbptr, (u32)bdp); + gfar_process_frame(ndev, skb); - /* Update to the next pointer */ - bdp = next_bd(bdp, base, rx_queue->rx_ring_size); + /* Send the packet up the stack */ + napi_gro_receive(&rx_queue->grp->napi_rx, skb); - /* update to point at the next skb */ - rx_queue->skb_currx = (rx_queue->skb_currx + 1) & - RX_RING_MOD_MASK(rx_queue->rx_ring_size); + skb = NULL; } - /* Update the current rxbd pointer to be the next one */ - rx_queue->cur_rx = bdp; + /* Store incomplete frames for completion */ + rx_queue->skb = skb; + + rx_queue->stats.rx_packets += total_pkts; + rx_queue->stats.rx_bytes += total_bytes; + + if (cleaned_cnt) + gfar_alloc_rx_buffs(rx_queue, cleaned_cnt); + + /* Update Last Free RxBD pointer for LFC */ + if (unlikely(priv->tx_actual_en)) { + u32 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue); + + gfar_write(rx_queue->rfbptr, bdp_dma); + } return howmany; } @@ -3454,7 +3525,6 @@ static noinline void gfar_update_link_state(struct gfar_private *priv) struct phy_device *phydev = priv->phydev; struct gfar_priv_rx_q *rx_queue = NULL; int i; - struct rxbd8 *bdp; if (unlikely(test_bit(GFAR_RESETTING, &priv->state))) return; @@ -3511,15 +3581,11 @@ static noinline void gfar_update_link_state(struct gfar_private *priv) /* Turn last free buffer recording on */ if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) { for (i = 0; i < priv->num_rx_queues; i++) { + u32 bdp_dma; + rx_queue = priv->rx_queue[i]; - bdp = rx_queue->cur_rx; - /* skip to previous bd */ - bdp = skip_bd(bdp, rx_queue->rx_ring_size - 1, - rx_queue->rx_bd_base, - rx_queue->rx_ring_size); - - if (rx_queue->rfbptr) - gfar_write(rx_queue->rfbptr, (u32)bdp); + bdp_dma = gfar_rxbd_dma_lastfree(rx_queue); + gfar_write(rx_queue->rfbptr, bdp_dma); } priv->tx_actual_en = 1; diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h index 5545e4103368..8c1994856e93 100644 --- a/drivers/net/ethernet/freescale/gianfar.h +++ b/drivers/net/ethernet/freescale/gianfar.h @@ -71,11 +71,6 @@ struct ethtool_rx_list { /* Number of bytes to align the rx bufs to */ #define RXBUF_ALIGNMENT 64 -/* The number of bytes which composes a unit for the purpose of - * allocating data buffers. ie-for any given MTU, the data buffer - * will be the next highest multiple of 512 bytes. */ -#define INCREMENTAL_BUFFER_SIZE 512 - #define PHY_INIT_TIMEOUT 100000 #define DRV_NAME "gfar-enet" @@ -92,6 +87,8 @@ extern const char gfar_driver_version[]; #define DEFAULT_TX_RING_SIZE 256 #define DEFAULT_RX_RING_SIZE 256 +#define GFAR_RX_BUFF_ALLOC 16 + #define GFAR_RX_MAX_RING_SIZE 256 #define GFAR_TX_MAX_RING_SIZE 256 @@ -103,11 +100,14 @@ extern const char gfar_driver_version[]; #define DEFAULT_RX_LFC_THR 16 #define DEFAULT_LFC_PTVVAL 4 -#define DEFAULT_RX_BUFFER_SIZE 1536 +#define GFAR_RXB_SIZE 1536 +#define GFAR_SKBFRAG_SIZE (RXBUF_ALIGNMENT + GFAR_RXB_SIZE \ + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) +#define GFAR_RXB_TRUESIZE 2048 + #define TX_RING_MOD_MASK(size) (size-1) #define RX_RING_MOD_MASK(size) (size-1) -#define JUMBO_BUFFER_SIZE 9728 -#define JUMBO_FRAME_SIZE 9600 +#define GFAR_JUMBO_FRAME_SIZE 9600 #define DEFAULT_FIFO_TX_THR 0x100 #define DEFAULT_FIFO_TX_STARVE 0x40 @@ -640,6 +640,7 @@ struct rmon_mib }; struct gfar_extra_stats { + atomic64_t rx_alloc_err; atomic64_t rx_large; atomic64_t rx_short; atomic64_t rx_nonoctet; @@ -651,7 +652,6 @@ struct gfar_extra_stats { atomic64_t eberr; atomic64_t tx_babt; atomic64_t tx_underrun; - atomic64_t rx_skbmissing; atomic64_t tx_timeout; }; @@ -1012,34 +1012,42 @@ struct rx_q_stats { unsigned long rx_dropped; }; +struct gfar_rx_buff { + dma_addr_t dma; + struct page *page; + unsigned int page_offset; +}; + /** * struct gfar_priv_rx_q - per rx queue structure - * @rx_skbuff: skb pointers - * @skb_currx: currently use skb pointer + * @rx_buff: Array of buffer info metadata structs * @rx_bd_base: First rx buffer descriptor - * @cur_rx: Next free rx ring entry + * @next_to_use: index of the next buffer to be alloc'd + * @next_to_clean: index of the next buffer to be cleaned * @qindex: index of this queue - * @dev: back pointer to the dev structure + * @ndev: back pointer to net_device * @rx_ring_size: Rx ring size * @rxcoalescing: enable/disable rx-coalescing * @rxic: receive interrupt coalescing vlaue */ struct gfar_priv_rx_q { - struct sk_buff **rx_skbuff __aligned(SMP_CACHE_BYTES); - dma_addr_t rx_bd_dma_base; + struct gfar_rx_buff *rx_buff __aligned(SMP_CACHE_BYTES); struct rxbd8 *rx_bd_base; - struct rxbd8 *cur_rx; - struct net_device *dev; - struct gfar_priv_grp *grp; + struct net_device *ndev; + struct device *dev; + u16 rx_ring_size; + u16 qindex; + struct gfar_priv_grp *grp; + u16 next_to_clean; + u16 next_to_use; + u16 next_to_alloc; + struct sk_buff *skb; struct rx_q_stats stats; - u16 skb_currx; - u16 qindex; - unsigned int rx_ring_size; - /* RX Coalescing values */ + u32 __iomem *rfbptr; unsigned char rxcoalescing; unsigned long rxic; - u32 __iomem *rfbptr; + dma_addr_t rx_bd_dma_base; }; enum gfar_irqinfo_id { @@ -1109,7 +1117,6 @@ struct gfar_private { struct device *dev; struct net_device *ndev; enum gfar_errata errata; - unsigned int rx_buffer_size; u16 uses_rxfcb; u16 padding; @@ -1292,6 +1299,28 @@ static inline void gfar_clear_txbd_status(struct txbd8 *bdp) bdp->lstatus = cpu_to_be32(lstatus); } +static inline int gfar_rxbd_unused(struct gfar_priv_rx_q *rxq) +{ + if (rxq->next_to_clean > rxq->next_to_use) + return rxq->next_to_clean - rxq->next_to_use - 1; + + return rxq->rx_ring_size + rxq->next_to_clean - rxq->next_to_use - 1; +} + +static inline u32 gfar_rxbd_dma_lastfree(struct gfar_priv_rx_q *rxq) +{ + struct rxbd8 *bdp; + u32 bdp_dma; + int i; + + i = rxq->next_to_use ? rxq->next_to_use - 1 : rxq->rx_ring_size - 1; + bdp = &rxq->rx_bd_base[i]; + bdp_dma = lower_32_bits(rxq->rx_bd_dma_base); + bdp_dma += (uintptr_t)bdp - (uintptr_t)rxq->rx_bd_base; + + return bdp_dma; +} + irqreturn_t gfar_receive(int irq, void *dev_id); int startup_gfar(struct net_device *dev); void stop_gfar(struct net_device *dev); diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index 3c0a8f825b63..555e461b0cfe 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -61,6 +61,8 @@ static void gfar_gdrvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo); static const char stat_gstrings[][ETH_GSTRING_LEN] = { + /* extra stats */ + "rx-allocation-errors", "rx-large-frame-errors", "rx-short-frame-errors", "rx-non-octet-errors", @@ -72,8 +74,8 @@ static const char stat_gstrings[][ETH_GSTRING_LEN] = { "ethernet-bus-error", "tx-babbling-errors", "tx-underrun-errors", - "rx-skb-missing-errors", "tx-timeout-errors", + /* rmon stats */ "tx-rx-64-frames", "tx-rx-65-127-frames", "tx-rx-128-255-frames", diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c index d49bee38cd31..cc2d8b4b18e3 100644 --- a/drivers/net/ethernet/hisilicon/hip04_eth.c +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c @@ -965,7 +965,6 @@ static struct platform_driver hip04_mac_driver = { .remove = hip04_remove, .driver = { .name = DRV_NAME, - .owner = THIS_MODULE, .of_match_table = hip04_mac_match, }, }; diff --git a/drivers/net/ethernet/hisilicon/hip04_mdio.c b/drivers/net/ethernet/hisilicon/hip04_mdio.c index b3bac25db99c..fca0a5be1f0f 100644 --- a/drivers/net/ethernet/hisilicon/hip04_mdio.c +++ b/drivers/net/ethernet/hisilicon/hip04_mdio.c @@ -174,7 +174,6 @@ static struct platform_driver hip04_mdio_driver = { .remove = hip04_mdio_remove, .driver = { .name = "hip04-mdio", - .owner = THIS_MODULE, .of_match_table = hip04_mdio_match, }, }; diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 29bbb628d712..7af870a3c549 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -79,6 +79,11 @@ static unsigned int rx_flush __read_mostly = 0; module_param(rx_flush, uint, 0644); MODULE_PARM_DESC(rx_flush, "Flush receive buffers before use"); +static bool old_large_send __read_mostly; +module_param(old_large_send, bool, S_IRUGO); +MODULE_PARM_DESC(old_large_send, + "Use old large send method on firmware that supports the new method"); + struct ibmveth_stat { char name[ETH_GSTRING_LEN]; int offset; @@ -101,7 +106,8 @@ struct ibmveth_stat ibmveth_stats[] = { { "fw_enabled_ipv4_csum", IBMVETH_STAT_OFF(fw_ipv4_csum_support) }, { "fw_enabled_ipv6_csum", IBMVETH_STAT_OFF(fw_ipv6_csum_support) }, { "tx_large_packets", IBMVETH_STAT_OFF(tx_large_packets) }, - { "rx_large_packets", IBMVETH_STAT_OFF(rx_large_packets) } + { "rx_large_packets", IBMVETH_STAT_OFF(rx_large_packets) }, + { "fw_enabled_large_send", IBMVETH_STAT_OFF(fw_large_send_support) } }; /* simple methods of getting data from the current rxq entry */ @@ -848,25 +854,91 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data) return rc1 ? rc1 : rc2; } +static int ibmveth_set_tso(struct net_device *dev, u32 data) +{ + struct ibmveth_adapter *adapter = netdev_priv(dev); + unsigned long set_attr, clr_attr, ret_attr; + long ret1, ret2; + int rc1 = 0, rc2 = 0; + int restart = 0; + + if (netif_running(dev)) { + restart = 1; + adapter->pool_config = 1; + ibmveth_close(dev); + adapter->pool_config = 0; + } + + set_attr = 0; + clr_attr = 0; + + if (data) + set_attr = IBMVETH_ILLAN_LRG_SR_ENABLED; + else + clr_attr = IBMVETH_ILLAN_LRG_SR_ENABLED; + + ret1 = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr); + + if (ret1 == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_LRG_SND_SUPPORT) && + !old_large_send) { + ret2 = h_illan_attributes(adapter->vdev->unit_address, clr_attr, + set_attr, &ret_attr); + + if (ret2 != H_SUCCESS) { + netdev_err(dev, "unable to change tso settings. %d rc=%ld\n", + data, ret2); + + h_illan_attributes(adapter->vdev->unit_address, + set_attr, clr_attr, &ret_attr); + + if (data == 1) + dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); + rc1 = -EIO; + + } else { + adapter->fw_large_send_support = data; + adapter->large_send = data; + } + } else { + /* Older firmware version of large send offload does not + * support tcp6/ipv6 + */ + if (data == 1) { + dev->features &= ~NETIF_F_TSO6; + netdev_info(dev, "TSO feature requires all partitions to have updated driver"); + } + adapter->large_send = data; + } + + if (restart) + rc2 = ibmveth_open(dev); + + return rc1 ? rc1 : rc2; +} + static int ibmveth_set_features(struct net_device *dev, netdev_features_t features) { struct ibmveth_adapter *adapter = netdev_priv(dev); int rx_csum = !!(features & NETIF_F_RXCSUM); - int rc; - netdev_features_t changed = features ^ dev->features; - - if (features & NETIF_F_TSO & changed) - netdev_info(dev, "TSO feature requires all partitions to have updated driver"); + int large_send = !!(features & (NETIF_F_TSO | NETIF_F_TSO6)); + int rc1 = 0, rc2 = 0; - if (rx_csum == adapter->rx_csum) - return 0; + if (rx_csum != adapter->rx_csum) { + rc1 = ibmveth_set_csum_offload(dev, rx_csum); + if (rc1 && !adapter->rx_csum) + dev->features = + features & ~(NETIF_F_ALL_CSUM | NETIF_F_RXCSUM); + } - rc = ibmveth_set_csum_offload(dev, rx_csum); - if (rc && !adapter->rx_csum) - dev->features = features & ~(NETIF_F_ALL_CSUM | NETIF_F_RXCSUM); + if (large_send != adapter->large_send) { + rc2 = ibmveth_set_tso(dev, large_send); + if (rc2 && !adapter->large_send) + dev->features = + features & ~(NETIF_F_TSO | NETIF_F_TSO6); + } - return rc; + return rc1 ? rc1 : rc2; } static void ibmveth_get_strings(struct net_device *dev, u32 stringset, u8 *data) @@ -917,7 +989,7 @@ static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) #define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1)) static int ibmveth_send(struct ibmveth_adapter *adapter, - union ibmveth_buf_desc *descs) + union ibmveth_buf_desc *descs, unsigned long mss) { unsigned long correlator; unsigned int retry_count; @@ -934,7 +1006,8 @@ static int ibmveth_send(struct ibmveth_adapter *adapter, descs[0].desc, descs[1].desc, descs[2].desc, descs[3].desc, descs[4].desc, descs[5].desc, - correlator, &correlator); + correlator, &correlator, mss, + adapter->fw_large_send_support); } while ((ret == H_BUSY) && (retry_count--)); if (ret != H_SUCCESS && ret != H_DROPPED) { @@ -955,6 +1028,7 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb, int last, i; int force_bounce = 0; dma_addr_t dma_addr; + unsigned long mss = 0; /* * veth handles a maximum of 6 segments including the header, so @@ -980,6 +1054,9 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb, desc_flags = IBMVETH_BUF_VALID; + if (skb_is_gso(skb) && adapter->fw_large_send_support) + desc_flags |= IBMVETH_BUF_LRG_SND; + if (skb->ip_summed == CHECKSUM_PARTIAL) { unsigned char *buf = skb_transport_header(skb) + skb->csum_offset; @@ -1007,7 +1084,7 @@ retry_bounce: descs[0].fields.flags_len = desc_flags | skb->len; descs[0].fields.address = adapter->bounce_buffer_dma; - if (ibmveth_send(adapter, descs)) { + if (ibmveth_send(adapter, descs, 0)) { adapter->tx_send_failed++; netdev->stats.tx_dropped++; } else { @@ -1041,16 +1118,23 @@ retry_bounce: descs[i+1].fields.address = dma_addr; } - if (skb_is_gso(skb) && !skb_is_gso_v6(skb)) { - /* Put -1 in the IP checksum to tell phyp it - * is a largesend packet and put the mss in the TCP checksum. - */ - ip_hdr(skb)->check = 0xffff; - tcp_hdr(skb)->check = cpu_to_be16(skb_shinfo(skb)->gso_size); - adapter->tx_large_packets++; + if (skb_is_gso(skb)) { + if (adapter->fw_large_send_support) { + mss = (unsigned long)skb_shinfo(skb)->gso_size; + adapter->tx_large_packets++; + } else if (!skb_is_gso_v6(skb)) { + /* Put -1 in the IP checksum to tell phyp it + * is a largesend packet. Put the mss in + * the TCP checksum. + */ + ip_hdr(skb)->check = 0xffff; + tcp_hdr(skb)->check = + cpu_to_be16(skb_shinfo(skb)->gso_size); + adapter->tx_large_packets++; + } } - if (ibmveth_send(adapter, descs)) { + if (ibmveth_send(adapter, descs, mss)) { adapter->tx_send_failed++; netdev->stats.tx_dropped++; } else { @@ -1401,6 +1485,8 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) struct ibmveth_adapter *adapter; unsigned char *mac_addr_p; unsigned int *mcastFilterSize_p; + long ret; + unsigned long ret_attr; dev_dbg(&dev->dev, "entering ibmveth_probe for UA 0x%x\n", dev->unit_address); @@ -1449,10 +1535,19 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) SET_NETDEV_DEV(netdev, &dev->dev); netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + netdev->features |= netdev->hw_features; - /* TSO is disabled by default */ - netdev->hw_features |= NETIF_F_TSO; + ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr); + + /* If running older firmware, TSO should not be enabled by default */ + if (ret == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_LRG_SND_SUPPORT) && + !old_large_send) { + netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; + netdev->features |= netdev->hw_features; + } else { + netdev->hw_features |= NETIF_F_TSO; + } memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN); diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h index 41dedb1fb2ae..4eade67fe30c 100644 --- a/drivers/net/ethernet/ibm/ibmveth.h +++ b/drivers/net/ethernet/ibm/ibmveth.h @@ -40,6 +40,8 @@ #define IbmVethMcastRemoveFilter 0x2UL #define IbmVethMcastClearFilterTable 0x3UL +#define IBMVETH_ILLAN_LRG_SR_ENABLED 0x0000000000010000UL +#define IBMVETH_ILLAN_LRG_SND_SUPPORT 0x0000000000008000UL #define IBMVETH_ILLAN_PADDED_PKT_CSUM 0x0000000000002000UL #define IBMVETH_ILLAN_TRUNK_PRI_MASK 0x0000000000000F00UL #define IBMVETH_ILLAN_IPV6_TCP_CSUM 0x0000000000000004UL @@ -59,13 +61,20 @@ static inline long h_send_logical_lan(unsigned long unit_address, unsigned long desc1, unsigned long desc2, unsigned long desc3, unsigned long desc4, unsigned long desc5, unsigned long desc6, - unsigned long corellator_in, unsigned long *corellator_out) + unsigned long corellator_in, unsigned long *corellator_out, + unsigned long mss, unsigned long large_send_support) { long rc; unsigned long retbuf[PLPAR_HCALL9_BUFSIZE]; - rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address, desc1, - desc2, desc3, desc4, desc5, desc6, corellator_in); + if (large_send_support) + rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address, + desc1, desc2, desc3, desc4, desc5, desc6, + corellator_in, mss); + else + rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address, + desc1, desc2, desc3, desc4, desc5, desc6, + corellator_in); *corellator_out = retbuf[0]; @@ -147,11 +156,13 @@ struct ibmveth_adapter { struct ibmveth_rx_q rx_queue; int pool_config; int rx_csum; + int large_send; void *bounce_buffer; dma_addr_t bounce_buffer_dma; u64 fw_ipv6_csum_support; u64 fw_ipv4_csum_support; + u64 fw_large_send_support; /* adapter specific stats */ u64 replenish_task_cycles; u64 replenish_no_mem; @@ -182,6 +193,7 @@ struct ibmveth_buf_desc_fields { #endif #define IBMVETH_BUF_VALID 0x80000000 #define IBMVETH_BUF_TOGGLE 0x40000000 +#define IBMVETH_BUF_LRG_SND 0x04000000 #define IBMVETH_BUF_NO_CSUM 0x02000000 #define IBMVETH_BUF_CSUM_GOOD 0x01000000 #define IBMVETH_BUF_LEN_MASK 0x00FFFFFF diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 89d788d8f263..fea1601f32a3 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -4588,6 +4588,7 @@ static int e1000_open(struct net_device *netdev) return 0; err_req_irq: + pm_qos_remove_request(&adapter->pm_qos_req); e1000e_release_hw_control(adapter); e1000_power_down_phy(adapter); e1000e_free_rx_resources(adapter->rx_ring); diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index ec76c3fa3a04..281fd8456146 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -98,7 +98,7 @@ #define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 9) /* Ethtool Private Flags */ -#define I40E_PRIV_FLAGS_NPAR_FLAG (1 << 0) +#define I40E_PRIV_FLAGS_NPAR_FLAG BIT(0) #define I40E_NVM_VERSION_LO_SHIFT 0 #define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT) @@ -289,35 +289,35 @@ struct i40e_pf { struct work_struct service_task; u64 flags; -#define I40E_FLAG_RX_CSUM_ENABLED (u64)(1 << 1) -#define I40E_FLAG_MSI_ENABLED (u64)(1 << 2) -#define I40E_FLAG_MSIX_ENABLED (u64)(1 << 3) -#define I40E_FLAG_RX_1BUF_ENABLED (u64)(1 << 4) -#define I40E_FLAG_RX_PS_ENABLED (u64)(1 << 5) -#define I40E_FLAG_RSS_ENABLED (u64)(1 << 6) -#define I40E_FLAG_VMDQ_ENABLED (u64)(1 << 7) -#define I40E_FLAG_FDIR_REQUIRES_REINIT (u64)(1 << 8) -#define I40E_FLAG_NEED_LINK_UPDATE (u64)(1 << 9) +#define I40E_FLAG_RX_CSUM_ENABLED BIT_ULL(1) +#define I40E_FLAG_MSI_ENABLED BIT_ULL(2) +#define I40E_FLAG_MSIX_ENABLED BIT_ULL(3) +#define I40E_FLAG_RX_1BUF_ENABLED BIT_ULL(4) +#define I40E_FLAG_RX_PS_ENABLED BIT_ULL(5) +#define I40E_FLAG_RSS_ENABLED BIT_ULL(6) +#define I40E_FLAG_VMDQ_ENABLED BIT_ULL(7) +#define I40E_FLAG_FDIR_REQUIRES_REINIT BIT_ULL(8) +#define I40E_FLAG_NEED_LINK_UPDATE BIT_ULL(9) #ifdef I40E_FCOE -#define I40E_FLAG_FCOE_ENABLED (u64)(1 << 11) +#define I40E_FLAG_FCOE_ENABLED BIT_ULL(11) #endif /* I40E_FCOE */ -#define I40E_FLAG_IN_NETPOLL (u64)(1 << 12) -#define I40E_FLAG_16BYTE_RX_DESC_ENABLED (u64)(1 << 13) -#define I40E_FLAG_CLEAN_ADMINQ (u64)(1 << 14) -#define I40E_FLAG_FILTER_SYNC (u64)(1 << 15) -#define I40E_FLAG_PROCESS_MDD_EVENT (u64)(1 << 17) -#define I40E_FLAG_PROCESS_VFLR_EVENT (u64)(1 << 18) -#define I40E_FLAG_SRIOV_ENABLED (u64)(1 << 19) -#define I40E_FLAG_DCB_ENABLED (u64)(1 << 20) -#define I40E_FLAG_FD_SB_ENABLED (u64)(1 << 21) -#define I40E_FLAG_FD_ATR_ENABLED (u64)(1 << 22) -#define I40E_FLAG_PTP (u64)(1 << 25) -#define I40E_FLAG_MFP_ENABLED (u64)(1 << 26) +#define I40E_FLAG_IN_NETPOLL BIT_ULL(12) +#define I40E_FLAG_16BYTE_RX_DESC_ENABLED BIT_ULL(13) +#define I40E_FLAG_CLEAN_ADMINQ BIT_ULL(14) +#define I40E_FLAG_FILTER_SYNC BIT_ULL(15) +#define I40E_FLAG_PROCESS_MDD_EVENT BIT_ULL(17) +#define I40E_FLAG_PROCESS_VFLR_EVENT BIT_ULL(18) +#define I40E_FLAG_SRIOV_ENABLED BIT_ULL(19) +#define I40E_FLAG_DCB_ENABLED BIT_ULL(20) +#define I40E_FLAG_FD_SB_ENABLED BIT_ULL(21) +#define I40E_FLAG_FD_ATR_ENABLED BIT_ULL(22) +#define I40E_FLAG_PTP BIT_ULL(25) +#define I40E_FLAG_MFP_ENABLED BIT_ULL(26) #ifdef CONFIG_I40E_VXLAN -#define I40E_FLAG_VXLAN_FILTER_SYNC (u64)(1 << 27) +#define I40E_FLAG_VXLAN_FILTER_SYNC BIT_ULL(27) #endif -#define I40E_FLAG_PORT_ID_VALID (u64)(1 << 28) -#define I40E_FLAG_DCB_CAPABLE (u64)(1 << 29) +#define I40E_FLAG_PORT_ID_VALID BIT_ULL(28) +#define I40E_FLAG_DCB_CAPABLE BIT_ULL(29) #define I40E_FLAG_VEB_MODE_ENABLED BIT_ULL(40) /* tracks features that get auto disabled by errors */ @@ -443,8 +443,8 @@ struct i40e_vsi { u32 current_netdev_flags; unsigned long state; -#define I40E_VSI_FLAG_FILTER_CHANGED (1<<0) -#define I40E_VSI_FLAG_VEB_OWNER (1<<1) +#define I40E_VSI_FLAG_FILTER_CHANGED BIT(0) +#define I40E_VSI_FLAG_VEB_OWNER BIT(1) unsigned long flags; struct list_head mac_filter_list; diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index 929e3d72a01e..9101f5c00f37 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -34,7 +34,7 @@ */ #define I40E_FW_API_VERSION_MAJOR 0x0001 -#define I40E_FW_API_VERSION_MINOR 0x0002 +#define I40E_FW_API_VERSION_MINOR 0x0004 struct i40e_aq_desc { __le16 flags; @@ -132,12 +132,7 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_list_func_capabilities = 0x000A, i40e_aqc_opc_list_dev_capabilities = 0x000B, - i40e_aqc_opc_set_cppm_configuration = 0x0103, - i40e_aqc_opc_set_arp_proxy_entry = 0x0104, - i40e_aqc_opc_set_ns_proxy_entry = 0x0105, - /* LAA */ - i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */ i40e_aqc_opc_mac_address_read = 0x0107, i40e_aqc_opc_mac_address_write = 0x0108, @@ -262,7 +257,6 @@ enum i40e_admin_queue_opc { /* Tunnel commands */ i40e_aqc_opc_add_udp_tunnel = 0x0B00, i40e_aqc_opc_del_udp_tunnel = 0x0B01, - i40e_aqc_opc_tunnel_key_structure = 0x0B10, /* Async Events */ i40e_aqc_opc_event_lan_overflow = 0x1001, @@ -274,8 +268,6 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_oem_ocbb_initialize = 0xFE03, /* debug commands */ - i40e_aqc_opc_debug_get_deviceid = 0xFF00, - i40e_aqc_opc_debug_set_mode = 0xFF01, i40e_aqc_opc_debug_read_reg = 0xFF03, i40e_aqc_opc_debug_write_reg = 0xFF04, i40e_aqc_opc_debug_modify_reg = 0xFF07, @@ -509,7 +501,8 @@ struct i40e_aqc_mac_address_read { #define I40E_AQC_SAN_ADDR_VALID 0x20 #define I40E_AQC_PORT_ADDR_VALID 0x40 #define I40E_AQC_WOL_ADDR_VALID 0x80 -#define I40E_AQC_ADDR_VALID_MASK 0xf0 +#define I40E_AQC_MC_MAG_EN_VALID 0x100 +#define I40E_AQC_ADDR_VALID_MASK 0x1F0 u8 reserved[6]; __le32 addr_high; __le32 addr_low; @@ -532,7 +525,9 @@ struct i40e_aqc_mac_address_write { #define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000 #define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000 #define I40E_AQC_WRITE_TYPE_PORT 0x8000 -#define I40E_AQC_WRITE_TYPE_MASK 0xc000 +#define I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG 0xC000 +#define I40E_AQC_WRITE_TYPE_MASK 0xC000 + __le16 mac_sah; __le32 mac_sal; u8 reserved[8]; @@ -1068,6 +1063,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes { __le16 seid; #define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF __le16 vlan_tag; +#define I40E_AQC_SET_VSI_VLAN_MASK 0x0FFF #define I40E_AQC_SET_VSI_VLAN_VALID 0x8000 u8 reserved[8]; }; @@ -2064,6 +2060,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start); #define I40E_AQC_CEE_PFC_STATUS_MASK (0x7 << I40E_AQC_CEE_PFC_STATUS_SHIFT) #define I40E_AQC_CEE_APP_STATUS_SHIFT 0x8 #define I40E_AQC_CEE_APP_STATUS_MASK (0x7 << I40E_AQC_CEE_APP_STATUS_SHIFT) +#define I40E_AQC_CEE_FCOE_STATUS_SHIFT 0x8 +#define I40E_AQC_CEE_FCOE_STATUS_MASK (0x7 << I40E_AQC_CEE_FCOE_STATUS_SHIFT) +#define I40E_AQC_CEE_ISCSI_STATUS_SHIFT 0xA +#define I40E_AQC_CEE_ISCSI_STATUS_MASK (0x7 << I40E_AQC_CEE_ISCSI_STATUS_SHIFT) +#define I40E_AQC_CEE_FIP_STATUS_SHIFT 0x10 +#define I40E_AQC_CEE_FIP_STATUS_MASK (0x7 << I40E_AQC_CEE_FIP_STATUS_SHIFT) struct i40e_aqc_get_cee_dcb_cfg_v1_resp { u8 reserved1; u8 oper_num_tc; diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 0bae22da014d..167ca0d752ea 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -72,6 +72,212 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw) } /** + * i40e_aq_str - convert AQ err code to a string + * @hw: pointer to the HW structure + * @aq_err: the AQ error code to convert + **/ +char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err) +{ + switch (aq_err) { + case I40E_AQ_RC_OK: + return "OK"; + case I40E_AQ_RC_EPERM: + return "I40E_AQ_RC_EPERM"; + case I40E_AQ_RC_ENOENT: + return "I40E_AQ_RC_ENOENT"; + case I40E_AQ_RC_ESRCH: + return "I40E_AQ_RC_ESRCH"; + case I40E_AQ_RC_EINTR: + return "I40E_AQ_RC_EINTR"; + case I40E_AQ_RC_EIO: + return "I40E_AQ_RC_EIO"; + case I40E_AQ_RC_ENXIO: + return "I40E_AQ_RC_ENXIO"; + case I40E_AQ_RC_E2BIG: + return "I40E_AQ_RC_E2BIG"; + case I40E_AQ_RC_EAGAIN: + return "I40E_AQ_RC_EAGAIN"; + case I40E_AQ_RC_ENOMEM: + return "I40E_AQ_RC_ENOMEM"; + case I40E_AQ_RC_EACCES: + return "I40E_AQ_RC_EACCES"; + case I40E_AQ_RC_EFAULT: + return "I40E_AQ_RC_EFAULT"; + case I40E_AQ_RC_EBUSY: + return "I40E_AQ_RC_EBUSY"; + case I40E_AQ_RC_EEXIST: + return "I40E_AQ_RC_EEXIST"; + case I40E_AQ_RC_EINVAL: + return "I40E_AQ_RC_EINVAL"; + case I40E_AQ_RC_ENOTTY: + return "I40E_AQ_RC_ENOTTY"; + case I40E_AQ_RC_ENOSPC: + return "I40E_AQ_RC_ENOSPC"; + case I40E_AQ_RC_ENOSYS: + return "I40E_AQ_RC_ENOSYS"; + case I40E_AQ_RC_ERANGE: + return "I40E_AQ_RC_ERANGE"; + case I40E_AQ_RC_EFLUSHED: + return "I40E_AQ_RC_EFLUSHED"; + case I40E_AQ_RC_BAD_ADDR: + return "I40E_AQ_RC_BAD_ADDR"; + case I40E_AQ_RC_EMODE: + return "I40E_AQ_RC_EMODE"; + case I40E_AQ_RC_EFBIG: + return "I40E_AQ_RC_EFBIG"; + } + + snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err); + return hw->err_str; +} + +/** + * i40e_stat_str - convert status err code to a string + * @hw: pointer to the HW structure + * @stat_err: the status error code to convert + **/ +char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err) +{ + switch (stat_err) { + case 0: + return "OK"; + case I40E_ERR_NVM: + return "I40E_ERR_NVM"; + case I40E_ERR_NVM_CHECKSUM: + return "I40E_ERR_NVM_CHECKSUM"; + case I40E_ERR_PHY: + return "I40E_ERR_PHY"; + case I40E_ERR_CONFIG: + return "I40E_ERR_CONFIG"; + case I40E_ERR_PARAM: + return "I40E_ERR_PARAM"; + case I40E_ERR_MAC_TYPE: + return "I40E_ERR_MAC_TYPE"; + case I40E_ERR_UNKNOWN_PHY: + return "I40E_ERR_UNKNOWN_PHY"; + case I40E_ERR_LINK_SETUP: + return "I40E_ERR_LINK_SETUP"; + case I40E_ERR_ADAPTER_STOPPED: + return "I40E_ERR_ADAPTER_STOPPED"; + case I40E_ERR_INVALID_MAC_ADDR: + return "I40E_ERR_INVALID_MAC_ADDR"; + case I40E_ERR_DEVICE_NOT_SUPPORTED: + return "I40E_ERR_DEVICE_NOT_SUPPORTED"; + case I40E_ERR_MASTER_REQUESTS_PENDING: + return "I40E_ERR_MASTER_REQUESTS_PENDING"; + case I40E_ERR_INVALID_LINK_SETTINGS: + return "I40E_ERR_INVALID_LINK_SETTINGS"; + case I40E_ERR_AUTONEG_NOT_COMPLETE: + return "I40E_ERR_AUTONEG_NOT_COMPLETE"; + case I40E_ERR_RESET_FAILED: + return "I40E_ERR_RESET_FAILED"; + case I40E_ERR_SWFW_SYNC: + return "I40E_ERR_SWFW_SYNC"; + case I40E_ERR_NO_AVAILABLE_VSI: + return "I40E_ERR_NO_AVAILABLE_VSI"; + case I40E_ERR_NO_MEMORY: + return "I40E_ERR_NO_MEMORY"; + case I40E_ERR_BAD_PTR: + return "I40E_ERR_BAD_PTR"; + case I40E_ERR_RING_FULL: + return "I40E_ERR_RING_FULL"; + case I40E_ERR_INVALID_PD_ID: + return "I40E_ERR_INVALID_PD_ID"; + case I40E_ERR_INVALID_QP_ID: + return "I40E_ERR_INVALID_QP_ID"; + case I40E_ERR_INVALID_CQ_ID: + return "I40E_ERR_INVALID_CQ_ID"; + case I40E_ERR_INVALID_CEQ_ID: + return "I40E_ERR_INVALID_CEQ_ID"; + case I40E_ERR_INVALID_AEQ_ID: + return "I40E_ERR_INVALID_AEQ_ID"; + case I40E_ERR_INVALID_SIZE: + return "I40E_ERR_INVALID_SIZE"; + case I40E_ERR_INVALID_ARP_INDEX: + return "I40E_ERR_INVALID_ARP_INDEX"; + case I40E_ERR_INVALID_FPM_FUNC_ID: + return "I40E_ERR_INVALID_FPM_FUNC_ID"; + case I40E_ERR_QP_INVALID_MSG_SIZE: + return "I40E_ERR_QP_INVALID_MSG_SIZE"; + case I40E_ERR_QP_TOOMANY_WRS_POSTED: + return "I40E_ERR_QP_TOOMANY_WRS_POSTED"; + case I40E_ERR_INVALID_FRAG_COUNT: + return "I40E_ERR_INVALID_FRAG_COUNT"; + case I40E_ERR_QUEUE_EMPTY: + return "I40E_ERR_QUEUE_EMPTY"; + case I40E_ERR_INVALID_ALIGNMENT: + return "I40E_ERR_INVALID_ALIGNMENT"; + case I40E_ERR_FLUSHED_QUEUE: + return "I40E_ERR_FLUSHED_QUEUE"; + case I40E_ERR_INVALID_PUSH_PAGE_INDEX: + return "I40E_ERR_INVALID_PUSH_PAGE_INDEX"; + case I40E_ERR_INVALID_IMM_DATA_SIZE: + return "I40E_ERR_INVALID_IMM_DATA_SIZE"; + case I40E_ERR_TIMEOUT: + return "I40E_ERR_TIMEOUT"; + case I40E_ERR_OPCODE_MISMATCH: + return "I40E_ERR_OPCODE_MISMATCH"; + case I40E_ERR_CQP_COMPL_ERROR: + return "I40E_ERR_CQP_COMPL_ERROR"; + case I40E_ERR_INVALID_VF_ID: + return "I40E_ERR_INVALID_VF_ID"; + case I40E_ERR_INVALID_HMCFN_ID: + return "I40E_ERR_INVALID_HMCFN_ID"; + case I40E_ERR_BACKING_PAGE_ERROR: + return "I40E_ERR_BACKING_PAGE_ERROR"; + case I40E_ERR_NO_PBLCHUNKS_AVAILABLE: + return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE"; + case I40E_ERR_INVALID_PBLE_INDEX: + return "I40E_ERR_INVALID_PBLE_INDEX"; + case I40E_ERR_INVALID_SD_INDEX: + return "I40E_ERR_INVALID_SD_INDEX"; + case I40E_ERR_INVALID_PAGE_DESC_INDEX: + return "I40E_ERR_INVALID_PAGE_DESC_INDEX"; + case I40E_ERR_INVALID_SD_TYPE: + return "I40E_ERR_INVALID_SD_TYPE"; + case I40E_ERR_MEMCPY_FAILED: + return "I40E_ERR_MEMCPY_FAILED"; + case I40E_ERR_INVALID_HMC_OBJ_INDEX: + return "I40E_ERR_INVALID_HMC_OBJ_INDEX"; + case I40E_ERR_INVALID_HMC_OBJ_COUNT: + return "I40E_ERR_INVALID_HMC_OBJ_COUNT"; + case I40E_ERR_INVALID_SRQ_ARM_LIMIT: + return "I40E_ERR_INVALID_SRQ_ARM_LIMIT"; + case I40E_ERR_SRQ_ENABLED: + return "I40E_ERR_SRQ_ENABLED"; + case I40E_ERR_ADMIN_QUEUE_ERROR: + return "I40E_ERR_ADMIN_QUEUE_ERROR"; + case I40E_ERR_ADMIN_QUEUE_TIMEOUT: + return "I40E_ERR_ADMIN_QUEUE_TIMEOUT"; + case I40E_ERR_BUF_TOO_SHORT: + return "I40E_ERR_BUF_TOO_SHORT"; + case I40E_ERR_ADMIN_QUEUE_FULL: + return "I40E_ERR_ADMIN_QUEUE_FULL"; + case I40E_ERR_ADMIN_QUEUE_NO_WORK: + return "I40E_ERR_ADMIN_QUEUE_NO_WORK"; + case I40E_ERR_BAD_IWARP_CQE: + return "I40E_ERR_BAD_IWARP_CQE"; + case I40E_ERR_NVM_BLANK_MODE: + return "I40E_ERR_NVM_BLANK_MODE"; + case I40E_ERR_NOT_IMPLEMENTED: + return "I40E_ERR_NOT_IMPLEMENTED"; + case I40E_ERR_PE_DOORBELL_NOT_ENABLED: + return "I40E_ERR_PE_DOORBELL_NOT_ENABLED"; + case I40E_ERR_DIAG_TEST_FAILED: + return "I40E_ERR_DIAG_TEST_FAILED"; + case I40E_ERR_NOT_READY: + return "I40E_ERR_NOT_READY"; + case I40E_NOT_SUPPORTED: + return "I40E_NOT_SUPPORTED"; + case I40E_ERR_FIRMWARE_API_VERSION: + return "I40E_ERR_FIRMWARE_API_VERSION"; + } + + snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err); + return hw->err_str; +} + +/** * i40e_debug_aq * @hw: debug mask related to admin queue * @mask: debug mask @@ -1187,9 +1393,9 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink) blink = false; if (blink) - gpio_val |= (1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); + gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); else - gpio_val &= ~(1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); + gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val); break; @@ -2391,7 +2597,7 @@ i40e_aq_erase_nvm_exit: #define I40E_DEV_FUNC_CAP_MSIX_VF 0x44 #define I40E_DEV_FUNC_CAP_FLOW_DIRECTOR 0x45 #define I40E_DEV_FUNC_CAP_IEEE_1588 0x46 -#define I40E_DEV_FUNC_CAP_MFP_MODE_1 0xF1 +#define I40E_DEV_FUNC_CAP_FLEX10 0xF1 #define I40E_DEV_FUNC_CAP_CEM 0xF2 #define I40E_DEV_FUNC_CAP_IWARP 0x51 #define I40E_DEV_FUNC_CAP_LED 0x61 @@ -2416,6 +2622,7 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, u32 valid_functions, num_functions; u32 number, logical_id, phys_id; struct i40e_hw_capabilities *p; + u8 major_rev; u32 i = 0; u16 id; @@ -2433,6 +2640,7 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, number = le32_to_cpu(cap->number); logical_id = le32_to_cpu(cap->logical_id); phys_id = le32_to_cpu(cap->phys_id); + major_rev = cap->major_rev; switch (id) { case I40E_DEV_FUNC_CAP_SWITCH_MODE: @@ -2507,9 +2715,21 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, case I40E_DEV_FUNC_CAP_MSIX_VF: p->num_msix_vectors_vf = number; break; - case I40E_DEV_FUNC_CAP_MFP_MODE_1: - if (number == 1) - p->mfp_mode_1 = true; + case I40E_DEV_FUNC_CAP_FLEX10: + if (major_rev == 1) { + if (number == 1) { + p->flex10_enable = true; + p->flex10_capable = true; + } + } else { + /* Capability revision >= 2 */ + if (number & 1) + p->flex10_enable = true; + if (number & 2) + p->flex10_capable = true; + } + p->flex10_mode = logical_id; + p->flex10_status = phys_id; break; case I40E_DEV_FUNC_CAP_CEM: if (number == 1) @@ -2557,7 +2777,7 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, /* Software override ensuring FCoE is disabled if npar or mfp * mode because it is not supported in these modes. */ - if (p->npar_enable || p->mfp_mode_1) + if (p->npar_enable || p->flex10_enable) p->fcoe = false; /* count the enabled ports (aka the "not disabled" ports) */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h index e137e3fac8ee..50fc894a4cde 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h @@ -58,9 +58,9 @@ #define I40E_IEEE_ETS_MAXTC_SHIFT 0 #define I40E_IEEE_ETS_MAXTC_MASK (0x7 << I40E_IEEE_ETS_MAXTC_SHIFT) #define I40E_IEEE_ETS_CBS_SHIFT 6 -#define I40E_IEEE_ETS_CBS_MASK (0x1 << I40E_IEEE_ETS_CBS_SHIFT) +#define I40E_IEEE_ETS_CBS_MASK BIT(I40E_IEEE_ETS_CBS_SHIFT) #define I40E_IEEE_ETS_WILLING_SHIFT 7 -#define I40E_IEEE_ETS_WILLING_MASK (0x1 << I40E_IEEE_ETS_WILLING_SHIFT) +#define I40E_IEEE_ETS_WILLING_MASK BIT(I40E_IEEE_ETS_WILLING_SHIFT) #define I40E_IEEE_ETS_PRIO_0_SHIFT 0 #define I40E_IEEE_ETS_PRIO_0_MASK (0x7 << I40E_IEEE_ETS_PRIO_0_SHIFT) #define I40E_IEEE_ETS_PRIO_1_SHIFT 4 @@ -79,9 +79,9 @@ #define I40E_IEEE_PFC_CAP_SHIFT 0 #define I40E_IEEE_PFC_CAP_MASK (0xF << I40E_IEEE_PFC_CAP_SHIFT) #define I40E_IEEE_PFC_MBC_SHIFT 6 -#define I40E_IEEE_PFC_MBC_MASK (0x1 << I40E_IEEE_PFC_MBC_SHIFT) +#define I40E_IEEE_PFC_MBC_MASK BIT(I40E_IEEE_PFC_MBC_SHIFT) #define I40E_IEEE_PFC_WILLING_SHIFT 7 -#define I40E_IEEE_PFC_WILLING_MASK (0x1 << I40E_IEEE_PFC_WILLING_SHIFT) +#define I40E_IEEE_PFC_WILLING_MASK BIT(I40E_IEEE_PFC_WILLING_SHIFT) /* Defines for IEEE APP TLV */ #define I40E_IEEE_APP_SEL_SHIFT 0 diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c index bd5079d5c1b6..1c51f736a8d0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c @@ -187,7 +187,7 @@ void i40e_dcbnl_set_all(struct i40e_vsi *vsi) /* Set up all the App TLVs if DCBx is negotiated */ for (i = 0; i < dcbxcfg->numapps; i++) { prio = dcbxcfg->app[i].priority; - tc_map = (1 << dcbxcfg->etscfg.prioritytable[prio]); + tc_map = BIT(dcbxcfg->etscfg.prioritytable[prio]); /* Add APP only if the TC is enabled for this VSI */ if (tc_map & vsi->tc_config.enabled_tc) { diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index da0faf478af0..d7c15d17faa6 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -964,7 +964,7 @@ static void i40e_dbg_cmd_fd_ctrl(struct i40e_pf *pf, u64 flag, bool enable) pf->auto_disable_flags |= flag; } dev_info(&pf->pdev->dev, "requesting a PF reset\n"); - i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED)); + i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED)); } #define I40E_MAX_DEBUG_OUT_BUFFER (4096*4) @@ -1471,19 +1471,19 @@ static ssize_t i40e_dbg_command_write(struct file *filp, } } else if (strncmp(cmd_buf, "pfr", 3) == 0) { dev_info(&pf->pdev->dev, "debugfs: forcing PFR\n"); - i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED)); + i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED)); } else if (strncmp(cmd_buf, "corer", 5) == 0) { dev_info(&pf->pdev->dev, "debugfs: forcing CoreR\n"); - i40e_do_reset_safe(pf, (1 << __I40E_CORE_RESET_REQUESTED)); + i40e_do_reset_safe(pf, BIT(__I40E_CORE_RESET_REQUESTED)); } else if (strncmp(cmd_buf, "globr", 5) == 0) { dev_info(&pf->pdev->dev, "debugfs: forcing GlobR\n"); - i40e_do_reset_safe(pf, (1 << __I40E_GLOBAL_RESET_REQUESTED)); + i40e_do_reset_safe(pf, BIT(__I40E_GLOBAL_RESET_REQUESTED)); } else if (strncmp(cmd_buf, "empr", 4) == 0) { dev_info(&pf->pdev->dev, "debugfs: forcing EMPR\n"); - i40e_do_reset_safe(pf, (1 << __I40E_EMP_RESET_REQUESTED)); + i40e_do_reset_safe(pf, BIT(__I40E_EMP_RESET_REQUESTED)); } else if (strncmp(cmd_buf, "read", 4) == 0) { u32 address; diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c index 56438bd579e6..f141e78d409e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_diag.c +++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c @@ -144,11 +144,8 @@ i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw) ret_code = i40e_read_nvm_word(hw, I40E_SR_NVM_CONTROL_WORD, ®_val); if (!ret_code && ((reg_val & I40E_SR_CONTROL_WORD_1_MASK) == - (0x01 << I40E_SR_CONTROL_WORD_1_SHIFT))) { - ret_code = i40e_validate_nvm_checksum(hw, NULL); - } else { - ret_code = I40E_ERR_DIAG_TEST_FAILED; - } - - return ret_code; + BIT(I40E_SR_CONTROL_WORD_1_SHIFT))) + return i40e_validate_nvm_checksum(hw, NULL); + else + return I40E_ERR_DIAG_TEST_FAILED; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 9a68c65b17ea..83d41c2cb02d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -148,7 +148,9 @@ static struct i40e_stats i40e_gstrings_stats[] = { I40E_PF_STAT("fdir_flush_cnt", fd_flush_cnt), I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match), I40E_PF_STAT("fdir_atr_tunnel_match", stats.fd_atr_tunnel_match), + I40E_PF_STAT("fdir_atr_status", stats.fd_atr_status), I40E_PF_STAT("fdir_sb_match", stats.fd_sb_match), + I40E_PF_STAT("fdir_sb_status", stats.fd_sb_status), /* LPI stats */ I40E_PF_STAT("tx_lpi_status", stats.tx_lpi_status), @@ -679,15 +681,17 @@ static int i40e_set_settings(struct net_device *netdev, /* make the aq call */ status = i40e_aq_set_phy_config(hw, &config, NULL); if (status) { - netdev_info(netdev, "Set phy config failed with error %d.\n", - status); + netdev_info(netdev, "Set phy config failed, err %s aq_err %s\n", + i40e_stat_str(hw, status), + i40e_aq_str(hw, hw->aq.asq_last_status)); return -EAGAIN; } status = i40e_aq_get_link_info(hw, true, NULL, NULL); if (status) - netdev_info(netdev, "Updating link info failed with error %d\n", - status); + netdev_info(netdev, "Updating link info failed with err %s aq_err %s\n", + i40e_stat_str(hw, status), + i40e_aq_str(hw, hw->aq.asq_last_status)); } else { netdev_info(netdev, "Nothing changed, exiting without setting anything.\n"); @@ -707,8 +711,9 @@ static int i40e_nway_reset(struct net_device *netdev) ret = i40e_aq_set_link_restart_an(hw, link_up, NULL); if (ret) { - netdev_info(netdev, "link restart failed, aq_err=%d\n", - pf->hw.aq.asq_last_status); + netdev_info(netdev, "link restart failed, err %s aq_err %s\n", + i40e_stat_str(hw, ret), + i40e_aq_str(hw, hw->aq.asq_last_status)); return -EIO; } @@ -820,18 +825,21 @@ static int i40e_set_pauseparam(struct net_device *netdev, status = i40e_set_fc(hw, &aq_failures, link_up); if (aq_failures & I40E_SET_FC_AQ_FAIL_GET) { - netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with error %d and status %d\n", - status, hw->aq.asq_last_status); + netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %s aq_err %s\n", + i40e_stat_str(hw, status), + i40e_aq_str(hw, hw->aq.asq_last_status)); err = -EAGAIN; } if (aq_failures & I40E_SET_FC_AQ_FAIL_SET) { - netdev_info(netdev, "Set fc failed on the set_phy_config call with error %d and status %d\n", - status, hw->aq.asq_last_status); + netdev_info(netdev, "Set fc failed on the set_phy_config call with err %s aq_err %s\n", + i40e_stat_str(hw, status), + i40e_aq_str(hw, hw->aq.asq_last_status)); err = -EAGAIN; } if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) { - netdev_info(netdev, "Set fc failed on the get_link_info call with error %d and status %d\n", - status, hw->aq.asq_last_status); + netdev_info(netdev, "Set fc failed on the get_link_info call with err %s aq_err %s\n", + i40e_stat_str(hw, status), + i40e_aq_str(hw, hw->aq.asq_last_status)); err = -EAGAIN; } @@ -1009,7 +1017,7 @@ static int i40e_get_eeprom_len(struct net_device *netdev) & I40E_GLPCI_LBARCTRL_FL_SIZE_MASK) >> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT; /* register returns value in power of 2, 64Kbyte chunks. */ - val = (64 * 1024) * (1 << val); + val = (64 * 1024) * BIT(val); return val; } @@ -1462,20 +1470,11 @@ static int i40e_get_ts_info(struct net_device *dev, else info->phc_index = -1; - info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); - - info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | - (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ); + info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); + + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | + BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_EVENT); return 0; } @@ -1591,7 +1590,7 @@ static void i40e_diag_test(struct net_device *netdev, /* indicate we're in test mode */ dev_close(netdev); else - i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); + i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED)); /* Link test performed before hardware reset * so autoneg doesn't interfere with test result @@ -1613,7 +1612,7 @@ static void i40e_diag_test(struct net_device *netdev, eth_test->flags |= ETH_TEST_FL_FAILED; clear_bit(__I40E_TESTING, &pf->state); - i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); + i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED)); if (if_running) dev_open(netdev); @@ -1646,7 +1645,7 @@ static void i40e_get_wol(struct net_device *netdev, /* NVM bit on means WoL disabled for the port */ i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits); - if ((1 << hw->port) & wol_nvm_bits || hw->partition_id != 1) { + if ((BIT(hw->port) & wol_nvm_bits) || (hw->partition_id != 1)) { wol->supported = 0; wol->wolopts = 0; } else { @@ -1679,7 +1678,7 @@ static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) /* NVM bit on means WoL disabled for the port */ i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits); - if (((1 << hw->port) & wol_nvm_bits)) + if (BIT(hw->port) & wol_nvm_bits) return -EOPNOTSUPP; /* only magic packet is supported */ @@ -2025,10 +2024,10 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) case TCP_V4_FLOW: switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { case 0: - hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP); + hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP); break; case (RXH_L4_B_0_1 | RXH_L4_B_2_3): - hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP); + hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP); break; default: return -EINVAL; @@ -2037,10 +2036,10 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) case TCP_V6_FLOW: switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { case 0: - hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP); + hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP); break; case (RXH_L4_B_0_1 | RXH_L4_B_2_3): - hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP); + hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP); break; default: return -EINVAL; @@ -2049,12 +2048,12 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) case UDP_V4_FLOW: switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { case 0: - hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | - ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4)); + hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | + BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4)); break; case (RXH_L4_B_0_1 | RXH_L4_B_2_3): - hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | - ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4)); + hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | + BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4)); break; default: return -EINVAL; @@ -2063,12 +2062,12 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) case UDP_V6_FLOW: switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { case 0: - hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | - ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6)); + hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | + BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6)); break; case (RXH_L4_B_0_1 | RXH_L4_B_2_3): - hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | - ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6)); + hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | + BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6)); break; default: return -EINVAL; @@ -2081,7 +2080,7 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) if ((nfc->data & RXH_L4_B_0_1) || (nfc->data & RXH_L4_B_2_3)) return -EINVAL; - hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER); + hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER); break; case AH_ESP_V6_FLOW: case AH_V6_FLOW: @@ -2090,15 +2089,15 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) if ((nfc->data & RXH_L4_B_0_1) || (nfc->data & RXH_L4_B_2_3)) return -EINVAL; - hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER); + hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER); break; case IPV4_FLOW: - hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | - ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4); + hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | + BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4); break; case IPV6_FLOW: - hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | - ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6); + hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | + BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6); break; default: return -EINVAL; diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c index c8b621e0e7cd..5ea75dd537d6 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c +++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c @@ -298,8 +298,8 @@ int i40e_init_pf_fcoe(struct i40e_pf *pf) /* enable FCoE hash filter */ val = rd32(hw, I40E_PFQF_HENA(1)); - val |= 1 << (I40E_FILTER_PCTYPE_FCOE_OX - 32); - val |= 1 << (I40E_FILTER_PCTYPE_FCOE_RX - 32); + val |= BIT(I40E_FILTER_PCTYPE_FCOE_OX - 32); + val |= BIT(I40E_FILTER_PCTYPE_FCOE_RX - 32); val &= I40E_PFQF_HENA_PTYPE_ENA_MASK; wr32(hw, I40E_PFQF_HENA(1), val); @@ -308,10 +308,10 @@ int i40e_init_pf_fcoe(struct i40e_pf *pf) pf->num_fcoe_qps = I40E_DEFAULT_FCOE; /* Reserve 4K DDP contexts and 20K filter size for FCoE */ - pf->fcoe_hmc_cntx_num = (1 << I40E_DMA_CNTX_SIZE_4K) * - I40E_DMA_CNTX_BASE_SIZE; + pf->fcoe_hmc_cntx_num = BIT(I40E_DMA_CNTX_SIZE_4K) * + I40E_DMA_CNTX_BASE_SIZE; pf->fcoe_hmc_filt_num = pf->fcoe_hmc_cntx_num + - (1 << I40E_HASH_FILTER_SIZE_16K) * + BIT(I40E_HASH_FILTER_SIZE_16K) * I40E_HASH_FILTER_BASE_SIZE; /* FCoE object: max 16K filter buckets and 4K DMA contexts */ @@ -348,7 +348,7 @@ u8 i40e_get_fcoe_tc_map(struct i40e_pf *pf) if (app.selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && app.protocolid == ETH_P_FCOE) { tc = dcbcfg->etscfg.prioritytable[app.priority]; - enabled_tc |= (1 << tc); + enabled_tc |= BIT(tc); break; } } diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.h b/drivers/net/ethernet/intel/i40e/i40e_fcoe.h index 0d49e2d15d40..a93174ddeaba 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.h +++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.h @@ -59,9 +59,9 @@ (((e) >> I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT) & 0x1) #define I40E_RX_PROG_FCOE_ERROR_TBL_FULL_BIT \ - (1 << I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT) + BIT(I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT) #define I40E_RX_PROG_FCOE_ERROR_CONFLICT_BIT \ - (1 << I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT) + BIT(I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT) #define I40E_RX_PROG_FCOE_ERROR_INVLFAIL(e) \ I40E_RX_PROG_FCOE_ERROR_CONFLICT(e) diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c index 9b987ccc9e82..5ebe12d56ebf 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c +++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c @@ -116,6 +116,7 @@ exit: * @hw: pointer to our HW structure * @hmc_info: pointer to the HMC configuration information structure * @pd_index: which page descriptor index to manipulate + * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one. * * This function: * 1. Initializes the pd entry @@ -129,12 +130,14 @@ exit: **/ i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw, struct i40e_hmc_info *hmc_info, - u32 pd_index) + u32 pd_index, + struct i40e_dma_mem *rsrc_pg) { i40e_status ret_code = 0; struct i40e_hmc_pd_table *pd_table; struct i40e_hmc_pd_entry *pd_entry; struct i40e_dma_mem mem; + struct i40e_dma_mem *page = &mem; u32 sd_idx, rel_pd_idx; u64 *pd_addr; u64 page_desc; @@ -155,18 +158,24 @@ i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw, pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table; pd_entry = &pd_table->pd_entry[rel_pd_idx]; if (!pd_entry->valid) { - /* allocate a 4K backing page */ - ret_code = i40e_allocate_dma_mem(hw, &mem, i40e_mem_bp, - I40E_HMC_PAGED_BP_SIZE, - I40E_HMC_PD_BP_BUF_ALIGNMENT); - if (ret_code) - goto exit; + if (rsrc_pg) { + pd_entry->rsrc_pg = true; + page = rsrc_pg; + } else { + /* allocate a 4K backing page */ + ret_code = i40e_allocate_dma_mem(hw, page, i40e_mem_bp, + I40E_HMC_PAGED_BP_SIZE, + I40E_HMC_PD_BP_BUF_ALIGNMENT); + if (ret_code) + goto exit; + pd_entry->rsrc_pg = false; + } - pd_entry->bp.addr = mem; + pd_entry->bp.addr = *page; pd_entry->bp.sd_pd_index = pd_index; pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED; /* Set page address and valid bit */ - page_desc = mem.pa | 0x1; + page_desc = page->pa | 0x1; pd_addr = (u64 *)pd_table->pd_page_addr.va; pd_addr += rel_pd_idx; @@ -240,7 +249,8 @@ i40e_status i40e_remove_pd_bp(struct i40e_hw *hw, I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx); /* free memory here */ - ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr)); + if (!pd_entry->rsrc_pg) + ret_code = i40e_free_dma_mem(hw, &pd_entry->bp.addr); if (ret_code) goto exit; if (!pd_table->ref_cnt) @@ -287,21 +297,15 @@ i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw, u32 idx, bool is_pf) { struct i40e_hmc_sd_entry *sd_entry; - i40e_status ret_code = 0; + + if (!is_pf) + return I40E_NOT_SUPPORTED; /* get the entry and decrease its ref counter */ sd_entry = &hmc_info->sd_table.sd_entry[idx]; - if (is_pf) { - I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT); - } else { - ret_code = I40E_NOT_SUPPORTED; - goto exit; - } - ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.bp.addr)); - if (ret_code) - goto exit; -exit: - return ret_code; + I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT); + + return i40e_free_dma_mem(hw, &sd_entry->u.bp.addr); } /** @@ -341,20 +345,13 @@ i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw, struct i40e_hmc_info *hmc_info, u32 idx, bool is_pf) { - i40e_status ret_code = 0; struct i40e_hmc_sd_entry *sd_entry; + if (!is_pf) + return I40E_NOT_SUPPORTED; + sd_entry = &hmc_info->sd_table.sd_entry[idx]; - if (is_pf) { - I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED); - } else { - ret_code = I40E_NOT_SUPPORTED; - goto exit; - } - /* free memory here */ - ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.pd_table.pd_page_addr)); - if (ret_code) - goto exit; -exit: - return ret_code; + I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED); + + return i40e_free_dma_mem(hw, &sd_entry->u.pd_table.pd_page_addr); } diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_hmc.h index 732a02660330..d90669211392 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_hmc.h +++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.h @@ -62,6 +62,7 @@ struct i40e_hmc_bp { struct i40e_hmc_pd_entry { struct i40e_hmc_bp bp; u32 sd_index; + bool rsrc_pg; bool valid; }; @@ -126,8 +127,8 @@ struct i40e_hmc_info { I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \ ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \ I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \ - (1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \ - val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \ + BIT(I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \ + val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \ wr32((hw), I40E_PFHMC_SDDATAHIGH, val1); \ wr32((hw), I40E_PFHMC_SDDATALOW, val2); \ wr32((hw), I40E_PFHMC_SDCMD, val3); \ @@ -146,7 +147,7 @@ struct i40e_hmc_info { I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \ ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \ I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \ - val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \ + val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \ wr32((hw), I40E_PFHMC_SDDATAHIGH, 0); \ wr32((hw), I40E_PFHMC_SDDATALOW, val2); \ wr32((hw), I40E_PFHMC_SDCMD, val3); \ @@ -218,7 +219,8 @@ i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw, i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw, struct i40e_hmc_info *hmc_info, - u32 pd_index); + u32 pd_index, + struct i40e_dma_mem *rsrc_pg); i40e_status i40e_remove_pd_bp(struct i40e_hw *hw, struct i40e_hmc_info *hmc_info, u32 idx); diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c index 0079ad7bcd0e..fa371a2a40c6 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c +++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c @@ -129,7 +129,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num, obj->cnt = txq_num; obj->base = 0; size_exp = rd32(hw, I40E_GLHMC_LANTXOBJSZ); - obj->size = (u64)1 << size_exp; + obj->size = BIT_ULL(size_exp); /* validate values requested by driver don't exceed HMC capacity */ if (txq_num > obj->max_cnt) { @@ -152,7 +152,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num, hw->hmc.hmc_obj[I40E_HMC_LAN_TX].size); obj->base = i40e_align_l2obj_base(obj->base); size_exp = rd32(hw, I40E_GLHMC_LANRXOBJSZ); - obj->size = (u64)1 << size_exp; + obj->size = BIT_ULL(size_exp); /* validate values requested by driver don't exceed HMC capacity */ if (rxq_num > obj->max_cnt) { @@ -175,7 +175,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num, hw->hmc.hmc_obj[I40E_HMC_LAN_RX].size); obj->base = i40e_align_l2obj_base(obj->base); size_exp = rd32(hw, I40E_GLHMC_FCOEDDPOBJSZ); - obj->size = (u64)1 << size_exp; + obj->size = BIT_ULL(size_exp); /* validate values requested by driver don't exceed HMC capacity */ if (fcoe_cntx_num > obj->max_cnt) { @@ -198,7 +198,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num, hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].size); obj->base = i40e_align_l2obj_base(obj->base); size_exp = rd32(hw, I40E_GLHMC_FCOEFOBJSZ); - obj->size = (u64)1 << size_exp; + obj->size = BIT_ULL(size_exp); /* validate values requested by driver don't exceed HMC capacity */ if (fcoe_filt_num > obj->max_cnt) { @@ -387,7 +387,7 @@ static i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw, /* update the pd table entry */ ret_code = i40e_add_pd_table_entry(hw, info->hmc_info, - i); + i, NULL); if (ret_code) { pd_error = true; break; @@ -763,7 +763,7 @@ static void i40e_write_byte(u8 *hmc_bits, /* prepare the bits and mask */ shift_width = ce_info->lsb % 8; - mask = ((u8)1 << ce_info->width) - 1; + mask = BIT(ce_info->width) - 1; src_byte = *from; src_byte &= mask; @@ -804,7 +804,7 @@ static void i40e_write_word(u8 *hmc_bits, /* prepare the bits and mask */ shift_width = ce_info->lsb % 8; - mask = ((u16)1 << ce_info->width) - 1; + mask = BIT(ce_info->width) - 1; /* don't swizzle the bits until after the mask because the mask bits * will be in a different bit position on big endian machines @@ -854,7 +854,7 @@ static void i40e_write_dword(u8 *hmc_bits, * to 5 bits so the shift will do nothing */ if (ce_info->width < 32) - mask = ((u32)1 << ce_info->width) - 1; + mask = BIT(ce_info->width) - 1; else mask = ~(u32)0; @@ -906,7 +906,7 @@ static void i40e_write_qword(u8 *hmc_bits, * to 6 bits so the shift will do nothing */ if (ce_info->width < 64) - mask = ((u64)1 << ce_info->width) - 1; + mask = BIT_ULL(ce_info->width) - 1; else mask = ~(u64)0; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 48a52b35b614..857d294d2a45 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -39,7 +39,7 @@ static const char i40e_driver_string[] = #define DRV_VERSION_MAJOR 1 #define DRV_VERSION_MINOR 3 -#define DRV_VERSION_BUILD 4 +#define DRV_VERSION_BUILD 6 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN @@ -520,7 +520,7 @@ static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg, if (likely(new_data >= *offset)) *stat = new_data - *offset; else - *stat = (new_data + ((u64)1 << 48)) - *offset; + *stat = (new_data + BIT_ULL(48)) - *offset; *stat &= 0xFFFFFFFFFFFFULL; } @@ -543,7 +543,7 @@ static void i40e_stat_update32(struct i40e_hw *hw, u32 reg, if (likely(new_data >= *offset)) *stat = (u32)(new_data - *offset); else - *stat = (u32)((new_data + ((u64)1 << 32)) - *offset); + *stat = (u32)((new_data + BIT_ULL(32)) - *offset); } /** @@ -1123,6 +1123,18 @@ static void i40e_update_pf_stats(struct i40e_pf *pf) pf->stat_offsets_loaded, &osd->rx_lpi_count, &nsd->rx_lpi_count); + if (pf->flags & I40E_FLAG_FD_SB_ENABLED && + !(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) + nsd->fd_sb_status = true; + else + nsd->fd_sb_status = false; + + if (pf->flags & I40E_FLAG_FD_ATR_ENABLED && + !(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) + nsd->fd_atr_status = true; + else + nsd->fd_atr_status = false; + pf->stat_offsets_loaded = true; } @@ -1264,7 +1276,7 @@ static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr) { struct i40e_aqc_remove_macvlan_element_data element; struct i40e_pf *pf = vsi->back; - i40e_status aq_ret; + i40e_status ret; /* Only appropriate for the PF main VSI */ if (vsi->type != I40E_VSI_MAIN) @@ -1275,8 +1287,8 @@ static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr) element.vlan_tag = 0; element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH | I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; - aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); - if (aq_ret) + ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); + if (ret) return -ENOENT; return 0; @@ -1514,7 +1526,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { /* Find numtc from enabled TC bitmap */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { - if (enabled_tc & (1 << i)) /* TC is enabled */ + if (enabled_tc & BIT_ULL(i)) /* TC is enabled */ numtc++; } if (!numtc) { @@ -1540,7 +1552,8 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, /* Setup queue offset/count for all TCs for given VSI */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { /* See if the given TC is enabled for the given VSI */ - if (vsi->tc_config.enabled_tc & (1 << i)) { /* TC is enabled */ + if (vsi->tc_config.enabled_tc & BIT_ULL(i)) { + /* TC is enabled */ int pow, num_qps; switch (vsi->type) { @@ -1566,7 +1579,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, /* find the next higher power-of-2 of num queue pairs */ num_qps = qcount; pow = 0; - while (num_qps && ((1 << pow) < qcount)) { + while (num_qps && (BIT_ULL(pow) < qcount)) { pow++; num_qps >>= 1; } @@ -1716,10 +1729,11 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) bool add_happened = false; int filter_list_len = 0; u32 changed_flags = 0; - i40e_status aq_ret = 0; + i40e_status ret = 0; struct i40e_pf *pf; int num_add = 0; int num_del = 0; + int aq_err = 0; u16 cmd_flags; /* empty array typed pointers, kcalloc later */ @@ -1771,31 +1785,31 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) /* flush a full buffer */ if (num_del == filter_list_len) { - aq_ret = i40e_aq_remove_macvlan(&pf->hw, - vsi->seid, del_list, num_del, - NULL); + ret = i40e_aq_remove_macvlan(&pf->hw, + vsi->seid, del_list, num_del, + NULL); + aq_err = pf->hw.aq.asq_last_status; num_del = 0; memset(del_list, 0, sizeof(*del_list)); - if (aq_ret && - pf->hw.aq.asq_last_status != - I40E_AQ_RC_ENOENT) + if (ret && aq_err != I40E_AQ_RC_ENOENT) dev_info(&pf->pdev->dev, - "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n", - aq_ret, - pf->hw.aq.asq_last_status); + "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, aq_err)); } } if (num_del) { - aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, + ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, del_list, num_del, NULL); + aq_err = pf->hw.aq.asq_last_status; num_del = 0; - if (aq_ret && - pf->hw.aq.asq_last_status != I40E_AQ_RC_ENOENT) + if (ret && aq_err != I40E_AQ_RC_ENOENT) dev_info(&pf->pdev->dev, - "ignoring delete macvlan error, err %d, aq_err %d\n", - aq_ret, pf->hw.aq.asq_last_status); + "ignoring delete macvlan error, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, aq_err)); } kfree(del_list); @@ -1833,29 +1847,31 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) /* flush a full buffer */ if (num_add == filter_list_len) { - aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, - add_list, num_add, - NULL); + ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, + add_list, num_add, + NULL); + aq_err = pf->hw.aq.asq_last_status; num_add = 0; - if (aq_ret) + if (ret) break; memset(add_list, 0, sizeof(*add_list)); } } if (num_add) { - aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, - add_list, num_add, NULL); + ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, + add_list, num_add, NULL); + aq_err = pf->hw.aq.asq_last_status; num_add = 0; } kfree(add_list); add_list = NULL; - if (add_happened && aq_ret && - pf->hw.aq.asq_last_status != I40E_AQ_RC_EINVAL) { + if (add_happened && ret && aq_err != I40E_AQ_RC_EINVAL) { dev_info(&pf->pdev->dev, - "add filter failed, err %d, aq_err %d\n", - aq_ret, pf->hw.aq.asq_last_status); + "add filter failed, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, aq_err)); if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) && !test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state)) { @@ -1871,34 +1887,40 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) if (changed_flags & IFF_ALLMULTI) { bool cur_multipromisc; cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI); - aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw, - vsi->seid, - cur_multipromisc, - NULL); - if (aq_ret) + ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw, + vsi->seid, + cur_multipromisc, + NULL); + if (ret) dev_info(&pf->pdev->dev, - "set multi promisc failed, err %d, aq_err %d\n", - aq_ret, pf->hw.aq.asq_last_status); + "set multi promisc failed, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, + pf->hw.aq.asq_last_status)); } if ((changed_flags & IFF_PROMISC) || promisc_forced_on) { bool cur_promisc; cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) || test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state)); - aq_ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw, - vsi->seid, - cur_promisc, NULL); - if (aq_ret) + ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw, + vsi->seid, + cur_promisc, NULL); + if (ret) dev_info(&pf->pdev->dev, - "set uni promisc failed, err %d, aq_err %d\n", - aq_ret, pf->hw.aq.asq_last_status); - aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw, - vsi->seid, - cur_promisc, NULL); - if (aq_ret) + "set uni promisc failed, err %s, aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, + pf->hw.aq.asq_last_status)); + ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw, + vsi->seid, + cur_promisc, NULL); + if (ret) dev_info(&pf->pdev->dev, - "set brdcast promisc failed, err %d, aq_err %d\n", - aq_ret, pf->hw.aq.asq_last_status); + "set brdcast promisc failed, err %s, aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, + pf->hw.aq.asq_last_status)); } clear_bit(__I40E_CONFIG_BUSY, &vsi->state); @@ -1994,8 +2016,10 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi) ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); if (ret) { dev_info(&vsi->back->pdev->dev, - "%s: update vsi failed, aq_err=%d\n", - __func__, vsi->back->hw.aq.asq_last_status); + "update vlan stripping failed, err %s aq_err %s\n", + i40e_stat_str(&vsi->back->hw, ret), + i40e_aq_str(&vsi->back->hw, + vsi->back->hw.aq.asq_last_status)); } } @@ -2023,8 +2047,10 @@ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi) ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); if (ret) { dev_info(&vsi->back->pdev->dev, - "%s: update vsi failed, aq_err=%d\n", - __func__, vsi->back->hw.aq.asq_last_status); + "update vlan stripping failed, err %s aq_err %s\n", + i40e_stat_str(&vsi->back->hw, ret), + i40e_aq_str(&vsi->back->hw, + vsi->back->hw.aq.asq_last_status)); } } @@ -2294,7 +2320,7 @@ static void i40e_restore_vlan(struct i40e_vsi *vsi) int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid) { struct i40e_vsi_context ctxt; - i40e_status aq_ret; + i40e_status ret; vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); vsi->info.pvid = cpu_to_le16(vid); @@ -2304,11 +2330,13 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid) ctxt.seid = vsi->seid; ctxt.info = vsi->info; - aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); - if (aq_ret) { + ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); + if (ret) { dev_info(&vsi->back->pdev->dev, - "%s: update vsi failed, aq_err=%d\n", - __func__, vsi->back->hw.aq.asq_last_status); + "add pvid failed, err %s aq_err %s\n", + i40e_stat_str(&vsi->back->hw, ret), + i40e_aq_str(&vsi->back->hw, + vsi->back->hw.aq.asq_last_status)); return -ENOENT; } @@ -2696,9 +2724,9 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi) #endif /* I40E_FCOE */ /* round up for the chip's needs */ vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len, - (1 << I40E_RXQ_CTX_HBUFF_SHIFT)); + BIT_ULL(I40E_RXQ_CTX_HBUFF_SHIFT)); vsi->rx_buf_len = ALIGN(vsi->rx_buf_len, - (1 << I40E_RXQ_CTX_DBUFF_SHIFT)); + BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT)); /* set up individual rings */ for (i = 0; i < vsi->num_queue_pairs && !err; i++) @@ -2728,7 +2756,7 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi) } for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) { - if (!(vsi->tc_config.enabled_tc & (1 << n))) + if (!(vsi->tc_config.enabled_tc & BIT_ULL(n))) continue; qoffset = vsi->tc_config.tc_info[n].qoffset; @@ -4073,7 +4101,7 @@ static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf) if (app.selector == I40E_APP_SEL_TCPIP && app.protocolid == I40E_APP_PROTOID_ISCSI) { tc = dcbcfg->etscfg.prioritytable[app.priority]; - enabled_tc |= (1 << tc); + enabled_tc |= BIT_ULL(tc); break; } } @@ -4122,7 +4150,7 @@ static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg) u8 i; for (i = 0; i < num_tc; i++) - enabled_tc |= 1 << i; + enabled_tc |= BIT(i); return enabled_tc; } @@ -4157,7 +4185,7 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) /* At least have TC0 */ enabled_tc = (enabled_tc ? enabled_tc : 0x1); for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { - if (enabled_tc & (1 << i)) + if (enabled_tc & BIT_ULL(i)) num_tc++; } return num_tc; @@ -4179,11 +4207,11 @@ static u8 i40e_pf_get_default_tc(struct i40e_pf *pf) /* Find the first enabled TC */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { - if (enabled_tc & (1 << i)) + if (enabled_tc & BIT_ULL(i)) break; } - return 1 << i; + return BIT(i); } /** @@ -4221,26 +4249,28 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi) struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0}; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; - i40e_status aq_ret; + i40e_status ret; u32 tc_bw_max; int i; /* Get the VSI level BW configuration */ - aq_ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL); - if (aq_ret) { + ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL); + if (ret) { dev_info(&pf->pdev->dev, - "couldn't get PF vsi bw config, err %d, aq_err %d\n", - aq_ret, pf->hw.aq.asq_last_status); + "couldn't get PF vsi bw config, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return -EINVAL; } /* Get the VSI level BW configuration per TC */ - aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config, - NULL); - if (aq_ret) { + ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config, + NULL); + if (ret) { dev_info(&pf->pdev->dev, - "couldn't get PF vsi ets bw config, err %d, aq_err %d\n", - aq_ret, pf->hw.aq.asq_last_status); + "couldn't get PF vsi ets bw config, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return -EINVAL; } @@ -4279,16 +4309,16 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc, u8 *bw_share) { struct i40e_aqc_configure_vsi_tc_bw_data bw_data; - i40e_status aq_ret; + i40e_status ret; int i; bw_data.tc_valid_bits = enabled_tc; for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) bw_data.tc_bw_credits[i] = bw_share[i]; - aq_ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data, - NULL); - if (aq_ret) { + ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data, + NULL); + if (ret) { dev_info(&vsi->back->pdev->dev, "AQ command Config VSI BW allocation per TC failed = %d\n", vsi->back->hw.aq.asq_last_status); @@ -4337,7 +4367,7 @@ static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc) * will set the numtc for netdev as 2 that will be * referenced by the netdev layer as TC 0 and 1. */ - if (vsi->tc_config.enabled_tc & (1 << i)) + if (vsi->tc_config.enabled_tc & BIT_ULL(i)) netdev_set_tc_queue(netdev, vsi->tc_config.tc_info[i].netdev_tc, vsi->tc_config.tc_info[i].qcount, @@ -4399,7 +4429,7 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) /* Enable ETS TCs with equal BW Share for now across all VSIs */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { - if (enabled_tc & (1 << i)) + if (enabled_tc & BIT_ULL(i)) bw_share[i] = 1; } @@ -4423,8 +4453,10 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); if (ret) { dev_info(&vsi->back->pdev->dev, - "update vsi failed, aq_err=%d\n", - vsi->back->hw.aq.asq_last_status); + "Update vsi tc config failed, err %s aq_err %s\n", + i40e_stat_str(&vsi->back->hw, ret), + i40e_aq_str(&vsi->back->hw, + vsi->back->hw.aq.asq_last_status)); goto out; } /* update the local VSI info with updated queue map */ @@ -4435,8 +4467,10 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) ret = i40e_vsi_get_bw_info(vsi); if (ret) { dev_info(&vsi->back->pdev->dev, - "Failed updating vsi bw info, aq_err=%d\n", - vsi->back->hw.aq.asq_last_status); + "Failed updating vsi bw info, err %s aq_err %s\n", + i40e_stat_str(&vsi->back->hw, ret), + i40e_aq_str(&vsi->back->hw, + vsi->back->hw.aq.asq_last_status)); goto out; } @@ -4469,7 +4503,7 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc) /* Enable ETS TCs with equal BW Share for now */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { - if (enabled_tc & (1 << i)) + if (enabled_tc & BIT_ULL(i)) bw_data.tc_bw_share_credits[i] = 1; } @@ -4477,8 +4511,9 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc) &bw_data, NULL); if (ret) { dev_info(&pf->pdev->dev, - "veb bw config failed, aq_err=%d\n", - pf->hw.aq.asq_last_status); + "VEB bw config failed, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); goto out; } @@ -4486,8 +4521,9 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc) ret = i40e_veb_get_bw_info(veb); if (ret) { dev_info(&pf->pdev->dev, - "Failed getting veb bw config, aq_err=%d\n", - pf->hw.aq.asq_last_status); + "Failed getting veb bw config, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); } out: @@ -4574,8 +4610,9 @@ static int i40e_resume_port_tx(struct i40e_pf *pf) ret = i40e_aq_resume_port_tx(hw, NULL); if (ret) { dev_info(&pf->pdev->dev, - "AQ command Resume Port Tx failed = %d\n", - pf->hw.aq.asq_last_status); + "Resume Port Tx failed, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); /* Schedule PF reset to recover */ set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); i40e_service_event_schedule(pf); @@ -4627,8 +4664,9 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf) } } else { dev_info(&pf->pdev->dev, - "AQ Querying DCB configuration failed: aq_err %d\n", - pf->hw.aq.asq_last_status); + "Query for DCB configuration failed, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, err), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); } out: @@ -4859,7 +4897,7 @@ static int i40e_setup_tc(struct net_device *netdev, u8 tc) /* Generate TC map for number of tc requested */ for (i = 0; i < tc; i++) - enabled_tc |= (1 << i); + enabled_tc |= BIT_ULL(i); /* Requesting same TC configuration as already enabled */ if (enabled_tc == vsi->tc_config.enabled_tc) @@ -4998,7 +5036,7 @@ err_setup_rx: err_setup_tx: i40e_vsi_free_tx_resources(vsi); if (vsi == pf->vsi[pf->lan_vsi]) - i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); + i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED)); return err; } @@ -5066,7 +5104,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags) i40e_vc_notify_reset(pf); /* do the biggest reset indicated */ - if (reset_flags & (1 << __I40E_GLOBAL_RESET_REQUESTED)) { + if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) { /* Request a Global Reset * @@ -5081,7 +5119,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags) val |= I40E_GLGEN_RTRIG_GLOBR_MASK; wr32(&pf->hw, I40E_GLGEN_RTRIG, val); - } else if (reset_flags & (1 << __I40E_CORE_RESET_REQUESTED)) { + } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) { /* Request a Core Reset * @@ -5093,7 +5131,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags) wr32(&pf->hw, I40E_GLGEN_RTRIG, val); i40e_flush(&pf->hw); - } else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) { + } else if (reset_flags & BIT_ULL(__I40E_PF_RESET_REQUESTED)) { /* Request a PF Reset * @@ -5106,7 +5144,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags) dev_dbg(&pf->pdev->dev, "PFR requested\n"); i40e_handle_reset_warning(pf); - } else if (reset_flags & (1 << __I40E_REINIT_REQUESTED)) { + } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) { int v; /* Find the VSI(s) that requested a re-init */ @@ -5123,7 +5161,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags) /* no further action needed, so return now */ return; - } else if (reset_flags & (1 << __I40E_DOWN_REQUESTED)) { + } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) { int v; /* Find the VSI(s) that needs to be brought down */ @@ -5253,7 +5291,10 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, /* Get updated DCBX data from firmware */ ret = i40e_get_dcb_config(&pf->hw); if (ret) { - dev_info(&pf->pdev->dev, "Failed querying DCB configuration data from firmware.\n"); + dev_info(&pf->pdev->dev, + "Failed querying DCB configuration data from firmware, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); goto exit; } @@ -5761,23 +5802,23 @@ static void i40e_reset_subtask(struct i40e_pf *pf) rtnl_lock(); if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) { - reset_flags |= (1 << __I40E_REINIT_REQUESTED); + reset_flags |= BIT_ULL(__I40E_REINIT_REQUESTED); clear_bit(__I40E_REINIT_REQUESTED, &pf->state); } if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) { - reset_flags |= (1 << __I40E_PF_RESET_REQUESTED); + reset_flags |= BIT_ULL(__I40E_PF_RESET_REQUESTED); clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state); } if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) { - reset_flags |= (1 << __I40E_CORE_RESET_REQUESTED); + reset_flags |= BIT_ULL(__I40E_CORE_RESET_REQUESTED); clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state); } if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) { - reset_flags |= (1 << __I40E_GLOBAL_RESET_REQUESTED); + reset_flags |= BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED); clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state); } if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) { - reset_flags |= (1 << __I40E_DOWN_REQUESTED); + reset_flags |= BIT_ULL(__I40E_DOWN_REQUESTED); clear_bit(__I40E_DOWN_REQUESTED, &pf->state); } @@ -5983,27 +6024,29 @@ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf) { struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; struct i40e_vsi_context ctxt; - int aq_ret; + int ret; ctxt.seid = pf->main_vsi_seid; ctxt.pf_num = pf->hw.pf_id; ctxt.vf_num = 0; - aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); - if (aq_ret) { + ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); + if (ret) { dev_info(&pf->pdev->dev, - "%s couldn't get PF vsi config, err %d, aq_err %d\n", - __func__, aq_ret, pf->hw.aq.asq_last_status); + "couldn't get PF vsi config, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return; } ctxt.flags = I40E_AQ_VSI_TYPE_PF; ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); - aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); - if (aq_ret) { + ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); + if (ret) { dev_info(&pf->pdev->dev, - "%s: update vsi switch failed, aq_err=%d\n", - __func__, vsi->back->hw.aq.asq_last_status); + "update vsi switch failed, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); } } @@ -6017,27 +6060,29 @@ static void i40e_disable_pf_switch_lb(struct i40e_pf *pf) { struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; struct i40e_vsi_context ctxt; - int aq_ret; + int ret; ctxt.seid = pf->main_vsi_seid; ctxt.pf_num = pf->hw.pf_id; ctxt.vf_num = 0; - aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); - if (aq_ret) { + ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); + if (ret) { dev_info(&pf->pdev->dev, - "%s couldn't get PF vsi config, err %d, aq_err %d\n", - __func__, aq_ret, pf->hw.aq.asq_last_status); + "couldn't get PF vsi config, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return; } ctxt.flags = I40E_AQ_VSI_TYPE_PF; ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); - aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); - if (aq_ret) { + ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); + if (ret) { dev_info(&pf->pdev->dev, - "%s: update vsi switch failed, aq_err=%d\n", - __func__, vsi->back->hw.aq.asq_last_status); + "update vsi switch failed, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); } } @@ -6097,7 +6142,8 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb) ret = i40e_add_vsi(ctl_vsi); if (ret) { dev_info(&pf->pdev->dev, - "rebuild of owner VSI failed: %d\n", ret); + "rebuild of veb_idx %d owner VSI failed: %d\n", + veb->idx, ret); goto end_reconstitute; } i40e_vsi_reset_stats(ctl_vsi); @@ -6176,8 +6222,10 @@ static int i40e_get_capabilities(struct i40e_pf *pf) buf_len = data_size; } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) { dev_info(&pf->pdev->dev, - "capability discovery failed: aq=%d\n", - pf->hw.aq.asq_last_status); + "capability discovery failed, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, err), + i40e_aq_str(&pf->hw, + pf->hw.aq.asq_last_status)); return -ENODEV; } } while (err); @@ -6363,7 +6411,9 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) /* rebuild the basics for the AdminQ, HMC, and initial HW switch */ ret = i40e_init_adminq(&pf->hw); if (ret) { - dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, %d\n", ret); + dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); goto clear_recovery; } @@ -6373,11 +6423,8 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) i40e_clear_pxe_mode(hw); ret = i40e_get_capabilities(pf); - if (ret) { - dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n", - ret); + if (ret) goto end_core_reset; - } ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, hw->func_caps.num_rx_qp, @@ -6418,12 +6465,16 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) I40E_AQ_EVENT_LINK_UPDOWN | I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL); if (ret) - dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", ret); + dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); /* make sure our flow control settings are restored */ ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true); if (ret) - dev_info(&pf->pdev->dev, "set fc fail, aq_err %d\n", ret); + dev_info(&pf->pdev->dev, "set fc fail, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); /* Rebuild the VSIs and VEBs that existed before reset. * They are still in our local switch element arrays, so only @@ -6484,8 +6535,10 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) msleep(75); ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); if (ret) - dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n", - pf->hw.aq.asq_last_status); + dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, + pf->hw.aq.asq_last_status)); } /* reinit the misc interrupt */ if (pf->flags & I40E_FLAG_MSIX_ENABLED) @@ -6647,8 +6700,8 @@ static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf) pf->flags &= ~I40E_FLAG_VXLAN_FILTER_SYNC; for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { - if (pf->pending_vxlan_bitmap & (1 << i)) { - pf->pending_vxlan_bitmap &= ~(1 << i); + if (pf->pending_vxlan_bitmap & BIT_ULL(i)) { + pf->pending_vxlan_bitmap &= ~BIT_ULL(i); port = pf->vxlan_ports[i]; if (port) ret = i40e_aq_add_udp_tunnel(hw, ntohs(port), @@ -6659,10 +6712,12 @@ static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf) if (ret) { dev_info(&pf->pdev->dev, - "%s vxlan port %d, index %d failed, err %d, aq_err %d\n", + "%s vxlan port %d, index %d failed, err %s aq_err %s\n", port ? "add" : "delete", - ntohs(port), i, ret, - pf->hw.aq.asq_last_status); + ntohs(port), i, + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, + pf->hw.aq.asq_last_status)); pf->vxlan_ports[i] = 0; } } @@ -7459,7 +7514,7 @@ static int i40e_config_rss(struct i40e_pf *pf) j = 0; /* lut = 4-byte sliding window of 4 lut entries */ lut = (lut << 8) | (j & - ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1)); + (BIT(pf->hw.func_caps.rss_table_entry_width) - 1)); /* On i = 3, we have 4 entries in lut; write to the register */ if ((i & 3) == 3) wr32(hw, I40E_PFQF_HLUT(i >> 2), lut); @@ -7533,7 +7588,7 @@ i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf) i40e_status status; /* Set the valid bit for this PF */ - bw_data.pf_valid_bits = cpu_to_le16(1 << pf->hw.pf_id); + bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id)); bw_data.max_bw[pf->hw.pf_id] = pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK; bw_data.min_bw[pf->hw.pf_id] = pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK; @@ -7567,8 +7622,9 @@ i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf) last_aq_status = pf->hw.aq.asq_last_status; if (ret) { dev_info(&pf->pdev->dev, - "Cannot acquire NVM for read access, err %d: aq_err %d\n", - ret, last_aq_status); + "Cannot acquire NVM for read access, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, last_aq_status)); goto bw_commit_out; } @@ -7583,8 +7639,9 @@ i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf) last_aq_status = pf->hw.aq.asq_last_status; i40e_release_nvm(&pf->hw); if (ret) { - dev_info(&pf->pdev->dev, "NVM read error, err %d aq_err %d\n", - ret, last_aq_status); + dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, last_aq_status)); goto bw_commit_out; } @@ -7596,8 +7653,9 @@ i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf) last_aq_status = pf->hw.aq.asq_last_status; if (ret) { dev_info(&pf->pdev->dev, - "Cannot acquire NVM for write access, err %d: aq_err %d\n", - ret, last_aq_status); + "Cannot acquire NVM for write access, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, last_aq_status)); goto bw_commit_out; } /* Write it back out unchanged to initiate update NVM, @@ -7615,8 +7673,9 @@ i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf) i40e_release_nvm(&pf->hw); if (ret) dev_info(&pf->pdev->dev, - "BW settings NOT SAVED, err %d aq_err %d\n", - ret, last_aq_status); + "BW settings NOT SAVED, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, last_aq_status)); bw_commit_out: return ret; @@ -7662,7 +7721,7 @@ static int i40e_sw_init(struct i40e_pf *pf) /* Depending on PF configurations, it is possible that the RSS * maximum might end up larger than the available queues */ - pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width; + pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width); pf->rss_size = 1; pf->rss_table_size = pf->hw.func_caps.rss_table_size; pf->rss_size_max = min_t(int, pf->rss_size_max, @@ -7673,7 +7732,7 @@ static int i40e_sw_init(struct i40e_pf *pf) } /* MFP mode enabled */ - if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) { + if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) { pf->flags |= I40E_FLAG_MFP_ENABLED; dev_info(&pf->pdev->dev, "MFP mode Enabled\n"); if (i40e_get_npar_bw_setting(pf)) @@ -7812,7 +7871,7 @@ static int i40e_set_features(struct net_device *netdev, need_reset = i40e_set_ntuple(pf, features); if (need_reset) - i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); + i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED)); return 0; } @@ -7875,7 +7934,7 @@ static void i40e_add_vxlan_port(struct net_device *netdev, /* New port: add it and mark its index in the bitmap */ pf->vxlan_ports[next_idx] = port; - pf->pending_vxlan_bitmap |= (1 << next_idx); + pf->pending_vxlan_bitmap |= BIT_ULL(next_idx); pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC; dev_info(&pf->pdev->dev, "adding vxlan port %d\n", ntohs(port)); @@ -7906,7 +7965,7 @@ static void i40e_del_vxlan_port(struct net_device *netdev, * and make it pending */ pf->vxlan_ports[idx] = 0; - pf->pending_vxlan_bitmap |= (1 << idx); + pf->pending_vxlan_bitmap |= BIT_ULL(idx); pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC; dev_info(&pf->pdev->dev, "deleting vxlan port %d\n", @@ -7981,7 +8040,6 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], return err; } -#ifdef HAVE_BRIDGE_ATTRIBS /** * i40e_ndo_bridge_setlink - Set the hardware bridge mode * @dev: the netdev being configured @@ -7995,7 +8053,8 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], * bridge mode enabled. **/ static int i40e_ndo_bridge_setlink(struct net_device *dev, - struct nlmsghdr *nlh) + struct nlmsghdr *nlh, + u16 flags) { struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_vsi *vsi = np->vsi; @@ -8066,14 +8125,9 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev, * Return the mode in which the hardware bridge is operating in * i.e VEB or VEPA. **/ -#ifdef HAVE_BRIDGE_FILTER static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev, u32 filter_mask, int nlflags) -#else -static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, - struct net_device *dev, int nlflags) -#endif /* HAVE_BRIDGE_FILTER */ { struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_vsi *vsi = np->vsi; @@ -8097,7 +8151,25 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode, nlflags, 0, 0, filter_mask, NULL); } -#endif /* HAVE_BRIDGE_ATTRIBS */ + +#define I40E_MAX_TUNNEL_HDR_LEN 80 +/** + * i40e_features_check - Validate encapsulated packet conforms to limits + * @skb: skb buff + * @netdev: This physical port's netdev + * @features: Offload features that the stack believes apply + **/ +static netdev_features_t i40e_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + if (skb->encapsulation && + (skb_inner_mac_header(skb) - skb_transport_header(skb) > + I40E_MAX_TUNNEL_HDR_LEN)) + return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK); + + return features; +} static const struct net_device_ops i40e_netdev_ops = { .ndo_open = i40e_open, @@ -8133,10 +8205,9 @@ static const struct net_device_ops i40e_netdev_ops = { #endif .ndo_get_phys_port_id = i40e_get_phys_port_id, .ndo_fdb_add = i40e_ndo_fdb_add, -#ifdef HAVE_BRIDGE_ATTRIBS + .ndo_features_check = i40e_features_check, .ndo_bridge_getlink = i40e_ndo_bridge_getlink, .ndo_bridge_setlink = i40e_ndo_bridge_setlink, -#endif /* HAVE_BRIDGE_ATTRIBS */ }; /** @@ -8304,8 +8375,10 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ctxt.flags = I40E_AQ_VSI_TYPE_PF; if (ret) { dev_info(&pf->pdev->dev, - "couldn't get PF vsi config, err %d, aq_err %d\n", - ret, pf->hw.aq.asq_last_status); + "couldn't get PF vsi config, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, + pf->hw.aq.asq_last_status)); return -ENOENT; } vsi->info = ctxt.info; @@ -8327,8 +8400,10 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); if (ret) { dev_info(&pf->pdev->dev, - "update vsi failed, aq_err=%d\n", - pf->hw.aq.asq_last_status); + "update vsi failed, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, + pf->hw.aq.asq_last_status)); ret = -ENOENT; goto err; } @@ -8345,9 +8420,11 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ret = i40e_vsi_config_tc(vsi, enabled_tc); if (ret) { dev_info(&pf->pdev->dev, - "failed to configure TCs for main VSI tc_map 0x%08x, err %d, aq_err %d\n", - enabled_tc, ret, - pf->hw.aq.asq_last_status); + "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n", + enabled_tc, + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, + pf->hw.aq.asq_last_status)); ret = -ENOENT; } } @@ -8438,8 +8515,10 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ret = i40e_aq_add_vsi(hw, &ctxt, NULL); if (ret) { dev_info(&vsi->back->pdev->dev, - "add vsi failed, aq_err=%d\n", - vsi->back->hw.aq.asq_last_status); + "add vsi failed, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, + pf->hw.aq.asq_last_status)); ret = -ENOENT; goto err; } @@ -8484,8 +8563,9 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ret = i40e_vsi_get_bw_info(vsi); if (ret) { dev_info(&pf->pdev->dev, - "couldn't get vsi bw info, err %d, aq_err %d\n", - ret, pf->hw.aq.asq_last_status); + "couldn't get vsi bw info, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); /* VSI is already added so not tearing that up */ ret = 0; } @@ -8658,7 +8738,7 @@ static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi) ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx); if (ret < 0) { dev_info(&pf->pdev->dev, - "failed to get tracking for %d queues for VSI %d err=%d\n", + "failed to get tracking for %d queues for VSI %d err %d\n", vsi->alloc_queue_pairs, vsi->seid, ret); goto err_vsi; } @@ -8896,8 +8976,9 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb) &bw_data, NULL); if (ret) { dev_info(&pf->pdev->dev, - "query veb bw config failed, aq_err=%d\n", - hw->aq.asq_last_status); + "query veb bw config failed, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, hw->aq.asq_last_status)); goto out; } @@ -8905,8 +8986,9 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb) &ets_data, NULL); if (ret) { dev_info(&pf->pdev->dev, - "query veb bw ets config failed, aq_err=%d\n", - hw->aq.asq_last_status); + "query veb bw ets config failed, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, hw->aq.asq_last_status)); goto out; } @@ -9090,36 +9172,40 @@ void i40e_veb_release(struct i40e_veb *veb) **/ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi) { + struct i40e_pf *pf = veb->pf; bool is_default = false; bool is_cloud = false; int ret; /* get a VEB from the hardware */ - ret = i40e_aq_add_veb(&veb->pf->hw, veb->uplink_seid, vsi->seid, + ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid, veb->enabled_tc, is_default, is_cloud, &veb->seid, NULL); if (ret) { - dev_info(&veb->pf->pdev->dev, - "couldn't add VEB, err %d, aq_err %d\n", - ret, veb->pf->hw.aq.asq_last_status); + dev_info(&pf->pdev->dev, + "couldn't add VEB, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return -EPERM; } /* get statistics counter */ - ret = i40e_aq_get_veb_parameters(&veb->pf->hw, veb->seid, NULL, NULL, + ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL, &veb->stats_idx, NULL, NULL, NULL); if (ret) { - dev_info(&veb->pf->pdev->dev, - "couldn't get VEB statistics idx, err %d, aq_err %d\n", - ret, veb->pf->hw.aq.asq_last_status); + dev_info(&pf->pdev->dev, + "couldn't get VEB statistics idx, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return -EPERM; } ret = i40e_veb_get_bw_info(veb); if (ret) { - dev_info(&veb->pf->pdev->dev, - "couldn't get VEB bw info, err %d, aq_err %d\n", - ret, veb->pf->hw.aq.asq_last_status); - i40e_aq_delete_element(&veb->pf->hw, veb->seid, NULL); + dev_info(&pf->pdev->dev, + "couldn't get VEB bw info, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + i40e_aq_delete_element(&pf->hw, veb->seid, NULL); return -ENOENT; } @@ -9325,8 +9411,10 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig) &next_seid, NULL); if (ret) { dev_info(&pf->pdev->dev, - "get switch config failed %d aq_err=%x\n", - ret, pf->hw.aq.asq_last_status); + "get switch config failed err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, + pf->hw.aq.asq_last_status)); kfree(aq_buf); return -ENOENT; } @@ -9367,8 +9455,9 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) ret = i40e_fetch_switch_configuration(pf, false); if (ret) { dev_info(&pf->pdev->dev, - "couldn't fetch switch config, err %d, aq_err %d\n", - ret, pf->hw.aq.asq_last_status); + "couldn't fetch switch config, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); return ret; } i40e_pf_reset_stats(pf); @@ -9743,7 +9832,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = i40e_init_shared_code(hw); if (err) { - dev_info(&pdev->dev, "init_shared_code failed: %d\n", err); + dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n", + err); goto err_pf_reset; } @@ -9910,15 +10000,19 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) I40E_AQ_EVENT_LINK_UPDOWN | I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL); if (err) - dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", err); + dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, err), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || (pf->hw.aq.fw_maj_ver < 4)) { msleep(75); err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); if (err) - dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n", - pf->hw.aq.asq_last_status); + dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, err), + i40e_aq_str(&pf->hw, + pf->hw.aq.asq_last_status)); } /* The main driver is (mostly) up and happy. We need to set this state * before setting up the misc vector or we get a race and the vector @@ -10006,8 +10100,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* get the requested speeds from the fw */ err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL); if (err) - dev_info(&pf->pdev->dev, "get phy abilities failed, aq_err %d, advertised speed settings may not be correct\n", - err); + dev_info(&pf->pdev->dev, + "get phy capabilities failed, err %s aq_err %s, advertised speed settings may not be correct\n", + i40e_stat_str(&pf->hw, err), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); pf->hw.phy.link_info.requested_speeds = abilities.link_speed; /* print a string summarizing features */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 554e49d02683..ce986af213d2 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -50,7 +50,7 @@ i40e_status i40e_init_nvm(struct i40e_hw *hw) sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >> I40E_GLNVM_GENS_SR_SIZE_SHIFT); /* Switching to words (sr_size contains power of 2KB) */ - nvm->sr_size = (1 << sr_size) * I40E_SR_WORDS_IN_1KB; + nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB; /* Check if we are in the normal or blank NVM programming mode */ fla = rd32(hw, I40E_GLNVM_FLA); @@ -189,8 +189,8 @@ static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset, ret_code = i40e_poll_sr_srctl_done_bit(hw); if (!ret_code) { /* Write the address and start reading */ - sr_reg = (u32)(offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) | - (1 << I40E_GLNVM_SRCTL_START_SHIFT); + sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) | + BIT(I40E_GLNVM_SRCTL_START_SHIFT); wr32(hw, I40E_GLNVM_SRCTL, sr_reg); /* Poll I40E_GLNVM_SRCTL until the done bit is set */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index 7b34f1e660ea..d52a9f7873b0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -58,6 +58,8 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void i40e_idle_aq(struct i40e_hw *hw); bool i40e_check_asq_alive(struct i40e_hw *hw); i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading); +char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err); +char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err); u32 i40e_led_get(struct i40e_hw *hw); void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink); diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index a92b7725dec3..8c40d6ea15fd 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -43,9 +43,8 @@ #define I40E_PTP_10GB_INCVAL 0x0333333333ULL #define I40E_PTP_1GB_INCVAL 0x2000000000ULL -#define I40E_PRTTSYN_CTL1_TSYNTYPE_V1 (0x1 << \ - I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT) -#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2 (0x2 << \ +#define I40E_PRTTSYN_CTL1_TSYNTYPE_V1 BIT(I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT) +#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2 (2 << \ I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT) /** @@ -357,7 +356,7 @@ void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index) prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1); - if (!(prttsyn_stat & (1 << index))) + if (!(prttsyn_stat & BIT(index))) return; lo = rd32(hw, I40E_PRTTSYN_RXTIME_L(index)); diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 9a4f2bc70cd2..330e4ef43cd8 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -464,7 +464,7 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >> I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT; - if (error == (0x1 << I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) { + if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) { if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) || (I40E_DEBUG_FD & pf->hw.debug_mask)) dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n", @@ -509,8 +509,7 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, dev_info(&pdev->dev, "FD filter programming failed due to incorrect filter parameters\n"); } - } else if (error == - (0x1 << I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) { + } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) { if (I40E_DEBUG_FD & pf->hw.debug_mask) dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n", rx_desc->wb.qword0.hi_dword.fd_id); @@ -892,7 +891,7 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) * 20-1249MB/s bulk (8000 ints/s) */ bytes_per_int = rc->total_bytes / rc->itr; - switch (rc->itr) { + switch (new_latency_range) { case I40E_LOWEST_LATENCY: if (bytes_per_int > 10) new_latency_range = I40E_LOW_LATENCY; @@ -905,9 +904,14 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) break; case I40E_BULK_LATENCY: if (bytes_per_int <= 20) - rc->latency_range = I40E_LOW_LATENCY; + new_latency_range = I40E_LOW_LATENCY; + break; + default: + if (bytes_per_int <= 20) + new_latency_range = I40E_LOW_LATENCY; break; } + rc->latency_range = new_latency_range; switch (new_latency_range) { case I40E_LOWEST_LATENCY: @@ -923,42 +927,14 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) break; } - if (new_itr != rc->itr) { - /* do an exponential smoothing */ - new_itr = (10 * new_itr * rc->itr) / - ((9 * new_itr) + rc->itr); - rc->itr = new_itr & I40E_MAX_ITR; - } + if (new_itr != rc->itr) + rc->itr = new_itr; rc->total_bytes = 0; rc->total_packets = 0; } /** - * i40e_update_dynamic_itr - Adjust ITR based on bytes per int - * @q_vector: the vector to adjust - **/ -static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector) -{ - u16 vector = q_vector->vsi->base_vector + q_vector->v_idx; - struct i40e_hw *hw = &q_vector->vsi->back->hw; - u32 reg_addr; - u16 old_itr; - - reg_addr = I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1); - old_itr = q_vector->rx.itr; - i40e_set_new_dynamic_itr(&q_vector->rx); - if (old_itr != q_vector->rx.itr) - wr32(hw, reg_addr, q_vector->rx.itr); - - reg_addr = I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1); - old_itr = q_vector->tx.itr; - i40e_set_new_dynamic_itr(&q_vector->tx); - if (old_itr != q_vector->tx.itr) - wr32(hw, reg_addr, q_vector->tx.itr); -} - -/** * i40e_clean_programming_status - clean the programming status descriptor * @rx_ring: the rx ring that has this descriptor * @rx_desc: the rx descriptor written back by HW @@ -1386,7 +1362,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, return; /* did the hardware decode the packet and checksum? */ - if (!(rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT))) + if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT))) return; /* both known and outer_ip must be set for the below code to work */ @@ -1401,25 +1377,25 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, ipv6 = true; if (ipv4 && - (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) | - (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT)))) + (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) | + BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT)))) goto checksum_fail; /* likely incorrect csum if alternate IP extension headers found */ if (ipv6 && - rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) + rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) /* don't increment checksum err here, non-fatal err */ return; /* there was some L4 error, count error and punt packet to the stack */ - if (rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT)) + if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT)) goto checksum_fail; /* handle packets that were not able to be checksummed due * to arrival speed, in this case the stack can compute * the csum. */ - if (rx_error & (1 << I40E_RX_DESC_ERROR_PPRS_SHIFT)) + if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT)) return; /* If VXLAN traffic has an outer UDPv4 checksum we need to check @@ -1543,7 +1519,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> I40E_RXD_QW1_STATUS_SHIFT; - if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT))) + if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT))) break; /* This memory barrier is needed to keep us from reading @@ -1584,8 +1560,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> I40E_RXD_QW1_ERROR_SHIFT; - rx_hbo = rx_error & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT); - rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT); + rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT); + rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT); rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT; @@ -1637,7 +1613,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) I40E_RX_INCREMENT(rx_ring, i); if (unlikely( - !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) { + !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) { struct i40e_rx_buffer *next_buffer; next_buffer = &rx_ring->rx_bi[i]; @@ -1647,7 +1623,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) } /* ERR_MASK will only have valid bits if EOP set */ - if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { + if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) { dev_kfree_skb_any(skb); continue; } @@ -1669,7 +1645,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype); - vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) + vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0; #ifdef I40E_FCOE @@ -1730,7 +1706,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> I40E_RXD_QW1_STATUS_SHIFT; - if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT))) + if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT))) break; /* This memory barrier is needed to keep us from reading @@ -1753,7 +1729,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> I40E_RXD_QW1_ERROR_SHIFT; - rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT); + rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT); rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT; @@ -1771,13 +1747,13 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) I40E_RX_INCREMENT(rx_ring, i); if (unlikely( - !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) { + !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) { rx_ring->rx_stats.non_eop_descs++; continue; } /* ERR_MASK will only have valid bits if EOP set */ - if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { + if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) { dev_kfree_skb_any(skb); /* TODO: shouldn't we increment a counter indicating the * drop? @@ -1802,7 +1778,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype); - vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) + vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0; #ifdef I40E_FCOE @@ -1827,6 +1803,68 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) } /** + * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt + * @vsi: the VSI we care about + * @q_vector: q_vector for which itr is being updated and interrupt enabled + * + **/ +static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, + struct i40e_q_vector *q_vector) +{ + struct i40e_hw *hw = &vsi->back->hw; + u16 old_itr; + int vector; + u32 val; + + vector = (q_vector->v_idx + vsi->base_vector); + if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) { + old_itr = q_vector->rx.itr; + i40e_set_new_dynamic_itr(&q_vector->rx); + if (old_itr != q_vector->rx.itr) { + val = I40E_PFINT_DYN_CTLN_INTENA_MASK | + I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | + (I40E_RX_ITR << + I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) | + (q_vector->rx.itr << + I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT); + } else { + val = I40E_PFINT_DYN_CTLN_INTENA_MASK | + I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | + (I40E_ITR_NONE << + I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); + } + if (!test_bit(__I40E_DOWN, &vsi->state)) + wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val); + } else { + i40e_irq_dynamic_enable(vsi, + q_vector->v_idx + vsi->base_vector); + } + if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) { + old_itr = q_vector->tx.itr; + i40e_set_new_dynamic_itr(&q_vector->tx); + if (old_itr != q_vector->tx.itr) { + val = I40E_PFINT_DYN_CTLN_INTENA_MASK | + I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | + (I40E_TX_ITR << + I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) | + (q_vector->tx.itr << + I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT); + } else { + val = I40E_PFINT_DYN_CTLN_INTENA_MASK | + I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | + (I40E_ITR_NONE << + I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); + } + if (!test_bit(__I40E_DOWN, &vsi->state)) + wr32(hw, I40E_PFINT_DYN_CTLN(q_vector->v_idx + + vsi->base_vector - 1), val); + } else { + i40e_irq_dynamic_enable(vsi, + q_vector->v_idx + vsi->base_vector); + } +} + +/** * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine * @napi: napi struct with our devices info in it * @budget: amount of work driver is allowed to do this pass, in packets @@ -1882,33 +1920,24 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) /* Work is done so exit the polling mode and re-enable the interrupt */ napi_complete(napi); - if (ITR_IS_DYNAMIC(vsi->rx_itr_setting) || - ITR_IS_DYNAMIC(vsi->tx_itr_setting)) - i40e_update_dynamic_itr(q_vector); - - if (!test_bit(__I40E_DOWN, &vsi->state)) { - if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { - i40e_irq_dynamic_enable(vsi, - q_vector->v_idx + vsi->base_vector); - } else { - struct i40e_hw *hw = &vsi->back->hw; - /* We re-enable the queue 0 cause, but - * don't worry about dynamic_enable - * because we left it on for the other - * possible interrupts during napi - */ - u32 qval = rd32(hw, I40E_QINT_RQCTL(0)); - qval |= I40E_QINT_RQCTL_CAUSE_ENA_MASK; - wr32(hw, I40E_QINT_RQCTL(0), qval); - - qval = rd32(hw, I40E_QINT_TQCTL(0)); - qval |= I40E_QINT_TQCTL_CAUSE_ENA_MASK; - wr32(hw, I40E_QINT_TQCTL(0), qval); - - i40e_irq_dynamic_enable_icr0(vsi->back); - } + if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { + i40e_update_enable_itr(vsi, q_vector); + } else { /* Legacy mode */ + struct i40e_hw *hw = &vsi->back->hw; + /* We re-enable the queue 0 cause, but + * don't worry about dynamic_enable + * because we left it on for the other + * possible interrupts during napi + */ + u32 qval = rd32(hw, I40E_QINT_RQCTL(0)) | + I40E_QINT_RQCTL_CAUSE_ENA_MASK; + + wr32(hw, I40E_QINT_RQCTL(0), qval); + qval = rd32(hw, I40E_QINT_TQCTL(0)) | + I40E_QINT_TQCTL_CAUSE_ENA_MASK; + wr32(hw, I40E_QINT_TQCTL(0), qval); + i40e_irq_dynamic_enable_icr0(vsi->back); } - return 0; } @@ -2616,6 +2645,8 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index))) writel(i, tx_ring->tail); + else + prefetchw(tx_desc + 1); return; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 0dc48dc9ca61..429833c47245 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -66,17 +66,17 @@ enum i40e_dyn_idx_t { /* Supported RSS offloads */ #define I40E_DEFAULT_RSS_HENA ( \ - ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \ - ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \ - ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \ - ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \ - ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \ - ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \ - ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \ - ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \ - ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \ - ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) | \ - ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD)) + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \ + BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \ + BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \ + BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD)) /* Supported Rx Buffer Sizes */ #define I40E_RXBUFFER_512 512 /* Used for packet split */ @@ -129,17 +129,17 @@ enum i40e_dyn_idx_t { #define DESC_NEEDED (MAX_SKB_FRAGS + 4) #define I40E_MIN_DESC_PENDING 4 -#define I40E_TX_FLAGS_CSUM (u32)(1) -#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1) -#define I40E_TX_FLAGS_SW_VLAN (u32)(1 << 2) -#define I40E_TX_FLAGS_TSO (u32)(1 << 3) -#define I40E_TX_FLAGS_IPV4 (u32)(1 << 4) -#define I40E_TX_FLAGS_IPV6 (u32)(1 << 5) -#define I40E_TX_FLAGS_FCCRC (u32)(1 << 6) -#define I40E_TX_FLAGS_FSO (u32)(1 << 7) -#define I40E_TX_FLAGS_TSYN (u32)(1 << 8) -#define I40E_TX_FLAGS_FD_SB (u32)(1 << 9) -#define I40E_TX_FLAGS_VXLAN_TUNNEL (u32)(1 << 10) +#define I40E_TX_FLAGS_CSUM BIT(0) +#define I40E_TX_FLAGS_HW_VLAN BIT(1) +#define I40E_TX_FLAGS_SW_VLAN BIT(2) +#define I40E_TX_FLAGS_TSO BIT(3) +#define I40E_TX_FLAGS_IPV4 BIT(4) +#define I40E_TX_FLAGS_IPV6 BIT(5) +#define I40E_TX_FLAGS_FCCRC BIT(6) +#define I40E_TX_FLAGS_FSO BIT(7) +#define I40E_TX_FLAGS_TSYN BIT(8) +#define I40E_TX_FLAGS_FD_SB BIT(9) +#define I40E_TX_FLAGS_VXLAN_TUNNEL BIT(10) #define I40E_TX_FLAGS_VLAN_MASK 0xffff0000 #define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29 diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index 9a5a75b1e2bc..a20128b82b62 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -213,7 +213,17 @@ struct i40e_hw_capabilities { bool dcb; bool fcoe; bool iscsi; /* Indicates iSCSI enabled */ - bool mfp_mode_1; + bool flex10_enable; + bool flex10_capable; + u32 flex10_mode; +#define I40E_FLEX10_MODE_UNKNOWN 0x0 +#define I40E_FLEX10_MODE_DCC 0x1 +#define I40E_FLEX10_MODE_DCI 0x2 + + u32 flex10_status; +#define I40E_FLEX10_STATUS_DCC_ERROR 0x1 +#define I40E_FLEX10_STATUS_VC_MODE 0x2 + bool mgmt_cem; bool ieee_1588; bool iwarp; @@ -487,6 +497,7 @@ struct i40e_hw { /* debug mask */ u32 debug_mask; + char err_str[16]; }; static inline bool i40e_is_vf(struct i40e_hw *hw) @@ -600,7 +611,7 @@ enum i40e_rx_desc_status_bits { }; #define I40E_RXD_QW1_STATUS_SHIFT 0 -#define I40E_RXD_QW1_STATUS_MASK (((1 << I40E_RX_DESC_STATUS_LAST) - 1) \ +#define I40E_RXD_QW1_STATUS_MASK ((BIT(I40E_RX_DESC_STATUS_LAST) - 1) \ << I40E_RXD_QW1_STATUS_SHIFT) #define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT @@ -608,8 +619,8 @@ enum i40e_rx_desc_status_bits { I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT) #define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT -#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK (0x1UL << \ - I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT) +#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK \ + BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT) enum i40e_rx_desc_fltstat_values { I40E_RX_DESC_FLTSTAT_NO_DATA = 0, @@ -743,8 +754,7 @@ enum i40e_rx_ptype_payload_layer { I40E_RXD_QW1_LENGTH_HBUF_SHIFT) #define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63 -#define I40E_RXD_QW1_LENGTH_SPH_MASK (0x1ULL << \ - I40E_RXD_QW1_LENGTH_SPH_SHIFT) +#define I40E_RXD_QW1_LENGTH_SPH_MASK BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT) enum i40e_rx_desc_ext_status_bits { /* Note: These are predefined bit offsets */ @@ -920,12 +930,12 @@ enum i40e_tx_ctx_desc_eipt_offload { #define I40E_TXD_CTX_QW0_NATT_SHIFT 9 #define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) -#define I40E_TXD_CTX_UDP_TUNNELING (0x1ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) +#define I40E_TXD_CTX_UDP_TUNNELING BIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT) #define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) #define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11 -#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK (0x1ULL << \ - I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT) +#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK \ + BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT) #define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK @@ -990,8 +1000,8 @@ enum i40e_filter_program_desc_fd_status { }; #define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23 -#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \ - I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) +#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK \ + BIT_ULL(I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) #define I40E_TXD_FLTR_QW1_CMD_SHIFT 4 #define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \ @@ -1009,8 +1019,7 @@ enum i40e_filter_program_desc_pcmd { #define I40E_TXD_FLTR_QW1_DEST_MASK (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT) #define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT) -#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK (0x1ULL << \ - I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT) +#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK BIT_ULL(I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT) #define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT (0x9ULL + \ I40E_TXD_FLTR_QW1_CMD_SHIFT) @@ -1134,6 +1143,8 @@ struct i40e_hw_port_stats { u64 fd_atr_match; u64 fd_sb_match; u64 fd_atr_tunnel_match; + u32 fd_atr_status; + u32 fd_sb_status; /* EEE LPI */ u32 tx_lpi_status; u32 rx_lpi_status; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h index 2d20af290fbf..a7ab463b4474 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h @@ -110,7 +110,9 @@ struct i40e_virtchnl_msg { * error regardless of version mismatch. */ #define I40E_VIRTCHNL_VERSION_MAJOR 1 -#define I40E_VIRTCHNL_VERSION_MINOR 0 +#define I40E_VIRTCHNL_VERSION_MINOR 1 +#define I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0 + struct i40e_virtchnl_version_info { u32 major; u32 minor; @@ -129,7 +131,8 @@ struct i40e_virtchnl_version_info { */ /* I40E_VIRTCHNL_OP_GET_VF_RESOURCES - * VF sends this request to PF with no parameters + * Version 1.0 VF sends this request to PF with no parameters + * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities * PF responds with an indirect message containing * i40e_virtchnl_vf_resource and one or more * i40e_virtchnl_vsi_resource structures. @@ -143,9 +146,12 @@ struct i40e_virtchnl_vsi_resource { u8 default_mac_addr[ETH_ALEN]; }; /* VF offload flags */ -#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001 -#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004 -#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 +#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001 +#define I40E_VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002 +#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004 +#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008 +#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010 +#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 struct i40e_virtchnl_vf_resource { u16 num_vsis; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 23f95cdbdfcc..d29d4062addf 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -160,13 +160,8 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf) **/ static inline void i40e_vc_disable_vf(struct i40e_pf *pf, struct i40e_vf *vf) { - struct i40e_hw *hw = &pf->hw; - u32 reg; - - reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); - reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK; - wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); - i40e_flush(hw); + i40e_vc_notify_vf_reset(vf); + i40e_reset_vf(vf, false); } /** @@ -282,16 +277,14 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id, } tempmap = vecmap->rxq_map; for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { - linklistmap |= (1 << - (I40E_VIRTCHNL_SUPPORTED_QTYPES * - vsi_queue_id)); + linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES * + vsi_queue_id)); } tempmap = vecmap->txq_map; for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { - linklistmap |= (1 << - (I40E_VIRTCHNL_SUPPORTED_QTYPES * vsi_queue_id - + 1)); + linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES * + vsi_queue_id + 1)); } next_q = find_first_bit(&linklistmap, @@ -337,7 +330,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id, reg = (vector_id) | (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) | (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | - (1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) | + BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) | (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT); wr32(hw, reg_idx, reg); } @@ -542,11 +535,13 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) if (vf->port_vlan_id) i40e_vsi_add_pvid(vsi, vf->port_vlan_id); f = i40e_add_filter(vsi, vf->default_lan_addr.addr, - vf->port_vlan_id, true, false); + vf->port_vlan_id ? vf->port_vlan_id : -1, + true, false); if (!f) dev_info(&pf->pdev->dev, "Could not allocate VF MAC addr\n"); - f = i40e_add_filter(vsi, brdcast, vf->port_vlan_id, + f = i40e_add_filter(vsi, brdcast, + vf->port_vlan_id ? vf->port_vlan_id : -1, true, false); if (!f) dev_info(&pf->pdev->dev, @@ -835,6 +830,7 @@ complete_reset: i40e_alloc_vf_res(vf); i40e_enable_vf_mappings(vf); set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); + clear_bit(I40E_VF_STAT_DISABLED, &vf->vf_states); /* tell the VF the reset is done */ wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE); @@ -899,7 +895,7 @@ void i40e_free_vfs(struct i40e_pf *pf) for (vf_id = 0; vf_id < tmp; vf_id++) { reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; - wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx)); + wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); } } clear_bit(__I40E_VF_DISABLE, &pf->state); @@ -1123,12 +1119,16 @@ static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf, * * called from the VF to request the API version used by the PF **/ -static int i40e_vc_get_version_msg(struct i40e_vf *vf) +static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg) { struct i40e_virtchnl_version_info info = { I40E_VIRTCHNL_VERSION_MAJOR, I40E_VIRTCHNL_VERSION_MINOR }; + vf->vf_ver = *(struct i40e_virtchnl_version_info *)msg; + /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */ + if (VF_IS_V10(vf)) + info.minor = I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS; return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION, I40E_SUCCESS, (u8 *)&info, sizeof(struct @@ -1143,7 +1143,7 @@ static int i40e_vc_get_version_msg(struct i40e_vf *vf) * * called from the VF to request its resources **/ -static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf) +static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) { struct i40e_virtchnl_vf_resource *vfres = NULL; struct i40e_pf *pf = vf->pf; @@ -1167,11 +1167,18 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf) len = 0; goto err; } + if (VF_IS_V11(vf)) + vf->driver_caps = *(u32 *)msg; + else + vf->driver_caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 | + I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG | + I40E_VIRTCHNL_VF_OFFLOAD_VLAN; vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2; vsi = pf->vsi[vf->lan_vsi_idx]; if (!vsi->info.pvid) - vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN; + vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN | + I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG; vfres->num_vsis = num_vsis; vfres->num_queue_pairs = vf->num_queue_pairs; @@ -1773,9 +1780,14 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode, valid_len = sizeof(struct i40e_virtchnl_version_info); break; case I40E_VIRTCHNL_OP_RESET_VF: - case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: valid_len = 0; break; + case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: + if (VF_IS_V11(vf)) + valid_len = sizeof(u32); + else + valid_len = 0; + break; case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE: valid_len = sizeof(struct i40e_virtchnl_txq_info); break; @@ -1888,10 +1900,10 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode, switch (v_opcode) { case I40E_VIRTCHNL_OP_VERSION: - ret = i40e_vc_get_version_msg(vf); + ret = i40e_vc_get_version_msg(vf, msg); break; case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: - ret = i40e_vc_get_vf_resources_msg(vf); + ret = i40e_vc_get_vf_resources_msg(vf, msg); break; case I40E_VIRTCHNL_OP_RESET_VF: i40e_vc_reset_vf_msg(vf); @@ -1969,9 +1981,9 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf) /* read GLGEN_VFLRSTAT register to find out the flr VFs */ vf = &pf->vf[vf_id]; reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx)); - if (reg & (1 << bit_idx)) { + if (reg & BIT(bit_idx)) { /* clear the bit in GLGEN_VFLRSTAT */ - wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx)); + wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); if (!test_bit(__I40E_DOWN, &pf->state)) i40e_reset_vf(vf, true); @@ -2023,7 +2035,8 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) } /* delete the temporary mac address */ - i40e_del_filter(vsi, vf->default_lan_addr.addr, vf->port_vlan_id, + i40e_del_filter(vsi, vf->default_lan_addr.addr, + vf->port_vlan_id ? vf->port_vlan_id : -1, true, false); /* Delete all the filters for this VSI - we're going to kill it @@ -2088,6 +2101,10 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, goto error_pvid; } + if (vsi->info.pvid == (vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT))) + /* duplicate request, so just return success */ + goto error_pvid; + if (vsi->info.pvid == 0 && i40e_is_vsi_in_vlan(vsi)) { dev_err(&pf->pdev->dev, "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n", diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index 09043c1aae54..736f6f08b4f2 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -42,6 +42,9 @@ #define I40E_VLAN_MASK 0xFFF #define I40E_PRIORITY_MASK 0x7000 +#define VF_IS_V10(_v) (((_v)->vf_ver.major == 1) && ((_v)->vf_ver.minor == 0)) +#define VF_IS_V11(_v) (((_v)->vf_ver.major == 1) && ((_v)->vf_ver.minor == 1)) + /* Various queue ctrls */ enum i40e_queue_ctrl { I40E_QUEUE_CTRL_UNKNOWN = 0, @@ -75,6 +78,8 @@ struct i40e_vf { u16 vf_id; /* all VF vsis connect to the same parent */ enum i40e_switch_element_types parent_type; + struct i40e_virtchnl_version_info vf_ver; + u32 driver_caps; /* reported by VF driver */ /* VF Port Extender (PE) stag if used */ u16 stag; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h index e715bccfb5d2..d5bd6f066921 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h @@ -34,7 +34,7 @@ */ #define I40E_FW_API_VERSION_MAJOR 0x0001 -#define I40E_FW_API_VERSION_MINOR 0x0002 +#define I40E_FW_API_VERSION_MINOR 0x0004 #define I40E_FW_API_VERSION_A0_MINOR 0x0000 struct i40e_aq_desc { @@ -133,12 +133,7 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_list_func_capabilities = 0x000A, i40e_aqc_opc_list_dev_capabilities = 0x000B, - i40e_aqc_opc_set_cppm_configuration = 0x0103, - i40e_aqc_opc_set_arp_proxy_entry = 0x0104, - i40e_aqc_opc_set_ns_proxy_entry = 0x0105, - /* LAA */ - i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */ i40e_aqc_opc_mac_address_read = 0x0107, i40e_aqc_opc_mac_address_write = 0x0108, @@ -260,7 +255,6 @@ enum i40e_admin_queue_opc { /* Tunnel commands */ i40e_aqc_opc_add_udp_tunnel = 0x0B00, i40e_aqc_opc_del_udp_tunnel = 0x0B01, - i40e_aqc_opc_tunnel_key_structure = 0x0B10, /* Async Events */ i40e_aqc_opc_event_lan_overflow = 0x1001, @@ -272,8 +266,6 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_oem_ocbb_initialize = 0xFE03, /* debug commands */ - i40e_aqc_opc_debug_get_deviceid = 0xFF00, - i40e_aqc_opc_debug_set_mode = 0xFF01, i40e_aqc_opc_debug_read_reg = 0xFF03, i40e_aqc_opc_debug_write_reg = 0xFF04, i40e_aqc_opc_debug_modify_reg = 0xFF07, @@ -507,7 +499,8 @@ struct i40e_aqc_mac_address_read { #define I40E_AQC_SAN_ADDR_VALID 0x20 #define I40E_AQC_PORT_ADDR_VALID 0x40 #define I40E_AQC_WOL_ADDR_VALID 0x80 -#define I40E_AQC_ADDR_VALID_MASK 0xf0 +#define I40E_AQC_MC_MAG_EN_VALID 0x100 +#define I40E_AQC_ADDR_VALID_MASK 0x1F0 u8 reserved[6]; __le32 addr_high; __le32 addr_low; @@ -530,7 +523,9 @@ struct i40e_aqc_mac_address_write { #define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000 #define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000 #define I40E_AQC_WRITE_TYPE_PORT 0x8000 -#define I40E_AQC_WRITE_TYPE_MASK 0xc000 +#define I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG 0xC000 +#define I40E_AQC_WRITE_TYPE_MASK 0xC000 + __le16 mac_sah; __le32 mac_sal; u8 reserved[8]; @@ -1066,6 +1061,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes { __le16 seid; #define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF __le16 vlan_tag; +#define I40E_AQC_SET_VSI_VLAN_MASK 0x0FFF #define I40E_AQC_SET_VSI_VLAN_VALID 0x8000 u8 reserved[8]; }; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c index 39fcb1dc4ea6..56c7e751149b 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_common.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c @@ -72,6 +72,212 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw) } /** + * i40evf_aq_str - convert AQ err code to a string + * @hw: pointer to the HW structure + * @aq_err: the AQ error code to convert + **/ +char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err) +{ + switch (aq_err) { + case I40E_AQ_RC_OK: + return "OK"; + case I40E_AQ_RC_EPERM: + return "I40E_AQ_RC_EPERM"; + case I40E_AQ_RC_ENOENT: + return "I40E_AQ_RC_ENOENT"; + case I40E_AQ_RC_ESRCH: + return "I40E_AQ_RC_ESRCH"; + case I40E_AQ_RC_EINTR: + return "I40E_AQ_RC_EINTR"; + case I40E_AQ_RC_EIO: + return "I40E_AQ_RC_EIO"; + case I40E_AQ_RC_ENXIO: + return "I40E_AQ_RC_ENXIO"; + case I40E_AQ_RC_E2BIG: + return "I40E_AQ_RC_E2BIG"; + case I40E_AQ_RC_EAGAIN: + return "I40E_AQ_RC_EAGAIN"; + case I40E_AQ_RC_ENOMEM: + return "I40E_AQ_RC_ENOMEM"; + case I40E_AQ_RC_EACCES: + return "I40E_AQ_RC_EACCES"; + case I40E_AQ_RC_EFAULT: + return "I40E_AQ_RC_EFAULT"; + case I40E_AQ_RC_EBUSY: + return "I40E_AQ_RC_EBUSY"; + case I40E_AQ_RC_EEXIST: + return "I40E_AQ_RC_EEXIST"; + case I40E_AQ_RC_EINVAL: + return "I40E_AQ_RC_EINVAL"; + case I40E_AQ_RC_ENOTTY: + return "I40E_AQ_RC_ENOTTY"; + case I40E_AQ_RC_ENOSPC: + return "I40E_AQ_RC_ENOSPC"; + case I40E_AQ_RC_ENOSYS: + return "I40E_AQ_RC_ENOSYS"; + case I40E_AQ_RC_ERANGE: + return "I40E_AQ_RC_ERANGE"; + case I40E_AQ_RC_EFLUSHED: + return "I40E_AQ_RC_EFLUSHED"; + case I40E_AQ_RC_BAD_ADDR: + return "I40E_AQ_RC_BAD_ADDR"; + case I40E_AQ_RC_EMODE: + return "I40E_AQ_RC_EMODE"; + case I40E_AQ_RC_EFBIG: + return "I40E_AQ_RC_EFBIG"; + } + + snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err); + return hw->err_str; +} + +/** + * i40evf_stat_str - convert status err code to a string + * @hw: pointer to the HW structure + * @stat_err: the status error code to convert + **/ +char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err) +{ + switch (stat_err) { + case 0: + return "OK"; + case I40E_ERR_NVM: + return "I40E_ERR_NVM"; + case I40E_ERR_NVM_CHECKSUM: + return "I40E_ERR_NVM_CHECKSUM"; + case I40E_ERR_PHY: + return "I40E_ERR_PHY"; + case I40E_ERR_CONFIG: + return "I40E_ERR_CONFIG"; + case I40E_ERR_PARAM: + return "I40E_ERR_PARAM"; + case I40E_ERR_MAC_TYPE: + return "I40E_ERR_MAC_TYPE"; + case I40E_ERR_UNKNOWN_PHY: + return "I40E_ERR_UNKNOWN_PHY"; + case I40E_ERR_LINK_SETUP: + return "I40E_ERR_LINK_SETUP"; + case I40E_ERR_ADAPTER_STOPPED: + return "I40E_ERR_ADAPTER_STOPPED"; + case I40E_ERR_INVALID_MAC_ADDR: + return "I40E_ERR_INVALID_MAC_ADDR"; + case I40E_ERR_DEVICE_NOT_SUPPORTED: + return "I40E_ERR_DEVICE_NOT_SUPPORTED"; + case I40E_ERR_MASTER_REQUESTS_PENDING: + return "I40E_ERR_MASTER_REQUESTS_PENDING"; + case I40E_ERR_INVALID_LINK_SETTINGS: + return "I40E_ERR_INVALID_LINK_SETTINGS"; + case I40E_ERR_AUTONEG_NOT_COMPLETE: + return "I40E_ERR_AUTONEG_NOT_COMPLETE"; + case I40E_ERR_RESET_FAILED: + return "I40E_ERR_RESET_FAILED"; + case I40E_ERR_SWFW_SYNC: + return "I40E_ERR_SWFW_SYNC"; + case I40E_ERR_NO_AVAILABLE_VSI: + return "I40E_ERR_NO_AVAILABLE_VSI"; + case I40E_ERR_NO_MEMORY: + return "I40E_ERR_NO_MEMORY"; + case I40E_ERR_BAD_PTR: + return "I40E_ERR_BAD_PTR"; + case I40E_ERR_RING_FULL: + return "I40E_ERR_RING_FULL"; + case I40E_ERR_INVALID_PD_ID: + return "I40E_ERR_INVALID_PD_ID"; + case I40E_ERR_INVALID_QP_ID: + return "I40E_ERR_INVALID_QP_ID"; + case I40E_ERR_INVALID_CQ_ID: + return "I40E_ERR_INVALID_CQ_ID"; + case I40E_ERR_INVALID_CEQ_ID: + return "I40E_ERR_INVALID_CEQ_ID"; + case I40E_ERR_INVALID_AEQ_ID: + return "I40E_ERR_INVALID_AEQ_ID"; + case I40E_ERR_INVALID_SIZE: + return "I40E_ERR_INVALID_SIZE"; + case I40E_ERR_INVALID_ARP_INDEX: + return "I40E_ERR_INVALID_ARP_INDEX"; + case I40E_ERR_INVALID_FPM_FUNC_ID: + return "I40E_ERR_INVALID_FPM_FUNC_ID"; + case I40E_ERR_QP_INVALID_MSG_SIZE: + return "I40E_ERR_QP_INVALID_MSG_SIZE"; + case I40E_ERR_QP_TOOMANY_WRS_POSTED: + return "I40E_ERR_QP_TOOMANY_WRS_POSTED"; + case I40E_ERR_INVALID_FRAG_COUNT: + return "I40E_ERR_INVALID_FRAG_COUNT"; + case I40E_ERR_QUEUE_EMPTY: + return "I40E_ERR_QUEUE_EMPTY"; + case I40E_ERR_INVALID_ALIGNMENT: + return "I40E_ERR_INVALID_ALIGNMENT"; + case I40E_ERR_FLUSHED_QUEUE: + return "I40E_ERR_FLUSHED_QUEUE"; + case I40E_ERR_INVALID_PUSH_PAGE_INDEX: + return "I40E_ERR_INVALID_PUSH_PAGE_INDEX"; + case I40E_ERR_INVALID_IMM_DATA_SIZE: + return "I40E_ERR_INVALID_IMM_DATA_SIZE"; + case I40E_ERR_TIMEOUT: + return "I40E_ERR_TIMEOUT"; + case I40E_ERR_OPCODE_MISMATCH: + return "I40E_ERR_OPCODE_MISMATCH"; + case I40E_ERR_CQP_COMPL_ERROR: + return "I40E_ERR_CQP_COMPL_ERROR"; + case I40E_ERR_INVALID_VF_ID: + return "I40E_ERR_INVALID_VF_ID"; + case I40E_ERR_INVALID_HMCFN_ID: + return "I40E_ERR_INVALID_HMCFN_ID"; + case I40E_ERR_BACKING_PAGE_ERROR: + return "I40E_ERR_BACKING_PAGE_ERROR"; + case I40E_ERR_NO_PBLCHUNKS_AVAILABLE: + return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE"; + case I40E_ERR_INVALID_PBLE_INDEX: + return "I40E_ERR_INVALID_PBLE_INDEX"; + case I40E_ERR_INVALID_SD_INDEX: + return "I40E_ERR_INVALID_SD_INDEX"; + case I40E_ERR_INVALID_PAGE_DESC_INDEX: + return "I40E_ERR_INVALID_PAGE_DESC_INDEX"; + case I40E_ERR_INVALID_SD_TYPE: + return "I40E_ERR_INVALID_SD_TYPE"; + case I40E_ERR_MEMCPY_FAILED: + return "I40E_ERR_MEMCPY_FAILED"; + case I40E_ERR_INVALID_HMC_OBJ_INDEX: + return "I40E_ERR_INVALID_HMC_OBJ_INDEX"; + case I40E_ERR_INVALID_HMC_OBJ_COUNT: + return "I40E_ERR_INVALID_HMC_OBJ_COUNT"; + case I40E_ERR_INVALID_SRQ_ARM_LIMIT: + return "I40E_ERR_INVALID_SRQ_ARM_LIMIT"; + case I40E_ERR_SRQ_ENABLED: + return "I40E_ERR_SRQ_ENABLED"; + case I40E_ERR_ADMIN_QUEUE_ERROR: + return "I40E_ERR_ADMIN_QUEUE_ERROR"; + case I40E_ERR_ADMIN_QUEUE_TIMEOUT: + return "I40E_ERR_ADMIN_QUEUE_TIMEOUT"; + case I40E_ERR_BUF_TOO_SHORT: + return "I40E_ERR_BUF_TOO_SHORT"; + case I40E_ERR_ADMIN_QUEUE_FULL: + return "I40E_ERR_ADMIN_QUEUE_FULL"; + case I40E_ERR_ADMIN_QUEUE_NO_WORK: + return "I40E_ERR_ADMIN_QUEUE_NO_WORK"; + case I40E_ERR_BAD_IWARP_CQE: + return "I40E_ERR_BAD_IWARP_CQE"; + case I40E_ERR_NVM_BLANK_MODE: + return "I40E_ERR_NVM_BLANK_MODE"; + case I40E_ERR_NOT_IMPLEMENTED: + return "I40E_ERR_NOT_IMPLEMENTED"; + case I40E_ERR_PE_DOORBELL_NOT_ENABLED: + return "I40E_ERR_PE_DOORBELL_NOT_ENABLED"; + case I40E_ERR_DIAG_TEST_FAILED: + return "I40E_ERR_DIAG_TEST_FAILED"; + case I40E_ERR_NOT_READY: + return "I40E_ERR_NOT_READY"; + case I40E_NOT_SUPPORTED: + return "I40E_NOT_SUPPORTED"; + case I40E_ERR_FIRMWARE_API_VERSION: + return "I40E_ERR_FIRMWARE_API_VERSION"; + } + + snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err); + return hw->err_str; +} + +/** * i40evf_debug_aq * @hw: debug mask related to admin queue * @mask: debug mask diff --git a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h index 931c88044300..00ed24bfce13 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h @@ -62,6 +62,7 @@ struct i40e_hmc_bp { struct i40e_hmc_pd_entry { struct i40e_hmc_bp bp; u32 sd_index; + bool rsrc_pg; bool valid; }; @@ -126,8 +127,8 @@ struct i40e_hmc_info { I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \ ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \ I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \ - (1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \ - val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \ + BIT(I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \ + val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \ wr32((hw), I40E_PFHMC_SDDATAHIGH, val1); \ wr32((hw), I40E_PFHMC_SDDATALOW, val2); \ wr32((hw), I40E_PFHMC_SDCMD, val3); \ @@ -146,7 +147,7 @@ struct i40e_hmc_info { I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \ ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \ I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \ - val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \ + val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \ wr32((hw), I40E_PFHMC_SDDATAHIGH, 0); \ wr32((hw), I40E_PFHMC_SDDATALOW, val2); \ wr32((hw), I40E_PFHMC_SDCMD, val3); \ @@ -218,7 +219,8 @@ i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw, i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw, struct i40e_hmc_info *hmc_info, - u32 pd_index); + u32 pd_index, + struct i40e_dma_mem *rsrc_pg); i40e_status i40e_remove_pd_bp(struct i40e_hw *hw, struct i40e_hmc_info *hmc_info, u32 idx); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h index 58e37a44b80a..856eb9d06595 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h @@ -60,6 +60,8 @@ void i40e_idle_aq(struct i40e_hw *hw); void i40evf_resume_aq(struct i40e_hw *hw); bool i40evf_check_asq_alive(struct i40e_hw *hw); i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw, bool unloading); +char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err); +char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err); i40e_status i40e_set_mac_type(struct i40e_hw *hw); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 395f32f226c0..60f88e4ad065 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -404,7 +404,7 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) * 20-1249MB/s bulk (8000 ints/s) */ bytes_per_int = rc->total_bytes / rc->itr; - switch (rc->itr) { + switch (new_latency_range) { case I40E_LOWEST_LATENCY: if (bytes_per_int > 10) new_latency_range = I40E_LOW_LATENCY; @@ -417,9 +417,14 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) break; case I40E_BULK_LATENCY: if (bytes_per_int <= 20) - rc->latency_range = I40E_LOW_LATENCY; + new_latency_range = I40E_LOW_LATENCY; + break; + default: + if (bytes_per_int <= 20) + new_latency_range = I40E_LOW_LATENCY; break; } + rc->latency_range = new_latency_range; switch (new_latency_range) { case I40E_LOWEST_LATENCY: @@ -435,42 +440,14 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) break; } - if (new_itr != rc->itr) { - /* do an exponential smoothing */ - new_itr = (10 * new_itr * rc->itr) / - ((9 * new_itr) + rc->itr); - rc->itr = new_itr & I40E_MAX_ITR; - } + if (new_itr != rc->itr) + rc->itr = new_itr; rc->total_bytes = 0; rc->total_packets = 0; } -/** - * i40e_update_dynamic_itr - Adjust ITR based on bytes per int - * @q_vector: the vector to adjust - **/ -static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector) -{ - u16 vector = q_vector->vsi->base_vector + q_vector->v_idx; - struct i40e_hw *hw = &q_vector->vsi->back->hw; - u32 reg_addr; - u16 old_itr; - - reg_addr = I40E_VFINT_ITRN1(I40E_RX_ITR, vector - 1); - old_itr = q_vector->rx.itr; - i40e_set_new_dynamic_itr(&q_vector->rx); - if (old_itr != q_vector->rx.itr) - wr32(hw, reg_addr, q_vector->rx.itr); - - reg_addr = I40E_VFINT_ITRN1(I40E_TX_ITR, vector - 1); - old_itr = q_vector->tx.itr; - i40e_set_new_dynamic_itr(&q_vector->tx); - if (old_itr != q_vector->tx.itr) - wr32(hw, reg_addr, q_vector->tx.itr); -} - -/** +/* * i40evf_setup_tx_descriptors - Allocate the Tx descriptors * @tx_ring: the tx ring to set up * @@ -873,7 +850,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, return; /* did the hardware decode the packet and checksum? */ - if (!(rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT))) + if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT))) return; /* both known and outer_ip must be set for the below code to work */ @@ -888,25 +865,25 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, ipv6 = true; if (ipv4 && - (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) | - (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT)))) + (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) | + BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT)))) goto checksum_fail; /* likely incorrect csum if alternate IP extension headers found */ if (ipv6 && - rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) + rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) /* don't increment checksum err here, non-fatal err */ return; /* there was some L4 error, count error and punt packet to the stack */ - if (rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT)) + if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT)) goto checksum_fail; /* handle packets that were not able to be checksummed due * to arrival speed, in this case the stack can compute * the csum. */ - if (rx_error & (1 << I40E_RX_DESC_ERROR_PPRS_SHIFT)) + if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT)) return; /* If VXLAN traffic has an outer UDPv4 checksum we need to check @@ -1027,7 +1004,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> I40E_RXD_QW1_STATUS_SHIFT; - if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT))) + if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT))) break; /* This memory barrier is needed to keep us from reading @@ -1063,8 +1040,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> I40E_RXD_QW1_ERROR_SHIFT; - rx_hbo = rx_error & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT); - rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT); + rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT); + rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT); rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT; @@ -1116,7 +1093,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) I40E_RX_INCREMENT(rx_ring, i); if (unlikely( - !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) { + !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) { struct i40e_rx_buffer *next_buffer; next_buffer = &rx_ring->rx_bi[i]; @@ -1126,7 +1103,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) } /* ERR_MASK will only have valid bits if EOP set */ - if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { + if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) { dev_kfree_skb_any(skb); continue; } @@ -1141,7 +1118,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype); - vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) + vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0; #ifdef I40E_FCOE @@ -1202,7 +1179,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> I40E_RXD_QW1_STATUS_SHIFT; - if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT))) + if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT))) break; /* This memory barrier is needed to keep us from reading @@ -1220,7 +1197,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> I40E_RXD_QW1_ERROR_SHIFT; - rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT); + rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT); rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT; @@ -1238,13 +1215,13 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) I40E_RX_INCREMENT(rx_ring, i); if (unlikely( - !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) { + !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) { rx_ring->rx_stats.non_eop_descs++; continue; } /* ERR_MASK will only have valid bits if EOP set */ - if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { + if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) { dev_kfree_skb_any(skb); /* TODO: shouldn't we increment a counter indicating the * drop? @@ -1262,7 +1239,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype); - vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) + vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0; i40e_receive_skb(rx_ring, skb, vlan_tag); @@ -1281,6 +1258,67 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) } /** + * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt + * @vsi: the VSI we care about + * @q_vector: q_vector for which itr is being updated and interrupt enabled + * + **/ +static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, + struct i40e_q_vector *q_vector) +{ + struct i40e_hw *hw = &vsi->back->hw; + u16 old_itr; + int vector; + u32 val; + + vector = (q_vector->v_idx + vsi->base_vector); + if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) { + old_itr = q_vector->rx.itr; + i40e_set_new_dynamic_itr(&q_vector->rx); + if (old_itr != q_vector->rx.itr) { + val = I40E_VFINT_DYN_CTLN_INTENA_MASK | + I40E_VFINT_DYN_CTLN_CLEARPBA_MASK | + (I40E_RX_ITR << + I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT) | + (q_vector->rx.itr << + I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT); + } else { + val = I40E_VFINT_DYN_CTLN_INTENA_MASK | + I40E_VFINT_DYN_CTLN_CLEARPBA_MASK | + (I40E_ITR_NONE << + I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT); + } + if (!test_bit(__I40E_DOWN, &vsi->state)) + wr32(hw, I40E_VFINT_DYN_CTLN1(vector - 1), val); + } else { + i40evf_irq_enable_queues(vsi->back, 1 + << q_vector->v_idx); + } + if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) { + old_itr = q_vector->tx.itr; + i40e_set_new_dynamic_itr(&q_vector->tx); + if (old_itr != q_vector->tx.itr) { + val = I40E_VFINT_DYN_CTLN_INTENA_MASK | + I40E_VFINT_DYN_CTLN_CLEARPBA_MASK | + (I40E_TX_ITR << + I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT) | + (q_vector->tx.itr << + I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT); + + } else { + val = I40E_VFINT_DYN_CTLN_INTENA_MASK | + I40E_VFINT_DYN_CTLN_CLEARPBA_MASK | + (I40E_ITR_NONE << + I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT); + } + if (!test_bit(__I40E_DOWN, &vsi->state)) + wr32(hw, I40E_VFINT_DYN_CTLN1(vector - 1), val); + } else { + i40evf_irq_enable_queues(vsi->back, BIT(q_vector->v_idx)); + } +} + +/** * i40evf_napi_poll - NAPI polling Rx/Tx cleanup routine * @napi: napi struct with our devices info in it * @budget: amount of work driver is allowed to do this pass, in packets @@ -1336,13 +1374,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) /* Work is done so exit the polling mode and re-enable the interrupt */ napi_complete(napi); - if (ITR_IS_DYNAMIC(vsi->rx_itr_setting) || - ITR_IS_DYNAMIC(vsi->tx_itr_setting)) - i40e_update_dynamic_itr(q_vector); - - if (!test_bit(__I40E_DOWN, &vsi->state)) - i40evf_irq_enable_queues(vsi->back, 1 << q_vector->v_idx); - + i40e_update_enable_itr(vsi, q_vector); return 0; } @@ -1841,6 +1873,8 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index))) writel(i, tx_ring->tail); + else + prefetchw(tx_desc + 1); return; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index e7a34f899f2c..6b47c818d1f0 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -66,17 +66,17 @@ enum i40e_dyn_idx_t { /* Supported RSS offloads */ #define I40E_DEFAULT_RSS_HENA ( \ - ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \ - ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \ - ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \ - ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \ - ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \ - ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \ - ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \ - ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \ - ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \ - ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) | \ - ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD)) + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \ + BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \ + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \ + BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \ + BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD)) /* Supported Rx Buffer Sizes */ #define I40E_RXBUFFER_512 512 /* Used for packet split */ @@ -129,16 +129,16 @@ enum i40e_dyn_idx_t { #define DESC_NEEDED (MAX_SKB_FRAGS + 4) #define I40E_MIN_DESC_PENDING 4 -#define I40E_TX_FLAGS_CSUM (u32)(1) -#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1) -#define I40E_TX_FLAGS_SW_VLAN (u32)(1 << 2) -#define I40E_TX_FLAGS_TSO (u32)(1 << 3) -#define I40E_TX_FLAGS_IPV4 (u32)(1 << 4) -#define I40E_TX_FLAGS_IPV6 (u32)(1 << 5) -#define I40E_TX_FLAGS_FCCRC (u32)(1 << 6) -#define I40E_TX_FLAGS_FSO (u32)(1 << 7) -#define I40E_TX_FLAGS_FD_SB (u32)(1 << 9) -#define I40E_TX_FLAGS_VXLAN_TUNNEL (u32)(1 << 10) +#define I40E_TX_FLAGS_CSUM BIT(0) +#define I40E_TX_FLAGS_HW_VLAN BIT(1) +#define I40E_TX_FLAGS_SW_VLAN BIT(2) +#define I40E_TX_FLAGS_TSO BIT(3) +#define I40E_TX_FLAGS_IPV4 BIT(4) +#define I40E_TX_FLAGS_IPV6 BIT(5) +#define I40E_TX_FLAGS_FCCRC BIT(6) +#define I40E_TX_FLAGS_FSO BIT(7) +#define I40E_TX_FLAGS_FD_SB BIT(9) +#define I40E_TX_FLAGS_VXLAN_TUNNEL BIT(10) #define I40E_TX_FLAGS_VLAN_MASK 0xffff0000 #define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29 diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h index c463ec41579c..4ba9a012dcba 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_type.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h @@ -213,7 +213,17 @@ struct i40e_hw_capabilities { bool dcb; bool fcoe; bool iscsi; /* Indicates iSCSI enabled */ - bool mfp_mode_1; + bool flex10_enable; + bool flex10_capable; + u32 flex10_mode; +#define I40E_FLEX10_MODE_UNKNOWN 0x0 +#define I40E_FLEX10_MODE_DCC 0x1 +#define I40E_FLEX10_MODE_DCI 0x2 + + u32 flex10_status; +#define I40E_FLEX10_STATUS_DCC_ERROR 0x1 +#define I40E_FLEX10_STATUS_VC_MODE 0x2 + bool mgmt_cem; bool ieee_1588; bool iwarp; @@ -481,6 +491,7 @@ struct i40e_hw { /* debug mask */ u32 debug_mask; + char err_str[16]; }; static inline bool i40e_is_vf(struct i40e_hw *hw) @@ -594,7 +605,7 @@ enum i40e_rx_desc_status_bits { }; #define I40E_RXD_QW1_STATUS_SHIFT 0 -#define I40E_RXD_QW1_STATUS_MASK (((1 << I40E_RX_DESC_STATUS_LAST) - 1) \ +#define I40E_RXD_QW1_STATUS_MASK ((BIT(I40E_RX_DESC_STATUS_LAST) - 1) \ << I40E_RXD_QW1_STATUS_SHIFT) #define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT @@ -602,8 +613,8 @@ enum i40e_rx_desc_status_bits { I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT) #define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT -#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK (0x1UL << \ - I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT) +#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK \ + BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT) enum i40e_rx_desc_fltstat_values { I40E_RX_DESC_FLTSTAT_NO_DATA = 0, @@ -737,8 +748,7 @@ enum i40e_rx_ptype_payload_layer { I40E_RXD_QW1_LENGTH_HBUF_SHIFT) #define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63 -#define I40E_RXD_QW1_LENGTH_SPH_MASK (0x1ULL << \ - I40E_RXD_QW1_LENGTH_SPH_SHIFT) +#define I40E_RXD_QW1_LENGTH_SPH_MASK BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT) enum i40e_rx_desc_ext_status_bits { /* Note: These are predefined bit offsets */ @@ -914,12 +924,12 @@ enum i40e_tx_ctx_desc_eipt_offload { #define I40E_TXD_CTX_QW0_NATT_SHIFT 9 #define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) -#define I40E_TXD_CTX_UDP_TUNNELING (0x1ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) +#define I40E_TXD_CTX_UDP_TUNNELING BIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT) #define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) #define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11 -#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK (0x1ULL << \ - I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT) +#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK \ + BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT) #define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK @@ -984,8 +994,8 @@ enum i40e_filter_program_desc_fd_status { }; #define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23 -#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \ - I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) +#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK \ + BIT_ULL(I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) #define I40E_TXD_FLTR_QW1_CMD_SHIFT 4 #define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \ @@ -1003,8 +1013,7 @@ enum i40e_filter_program_desc_pcmd { #define I40E_TXD_FLTR_QW1_DEST_MASK (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT) #define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT) -#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK (0x1ULL << \ - I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT) +#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK BIT_ULL(I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT) #define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT (0x9ULL + \ I40E_TXD_FLTR_QW1_CMD_SHIFT) @@ -1109,6 +1118,8 @@ struct i40e_hw_port_stats { u64 fd_atr_match; u64 fd_sb_match; u64 fd_atr_tunnel_match; + u32 fd_atr_status; + u32 fd_sb_status; /* EEE LPI */ u32 tx_lpi_status; u32 rx_lpi_status; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h index 59f62f0e65dd..1e89dea0d529 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h @@ -110,7 +110,9 @@ struct i40e_virtchnl_msg { * error regardless of version mismatch. */ #define I40E_VIRTCHNL_VERSION_MAJOR 1 -#define I40E_VIRTCHNL_VERSION_MINOR 0 +#define I40E_VIRTCHNL_VERSION_MINOR 1 +#define I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0 + struct i40e_virtchnl_version_info { u32 major; u32 minor; @@ -129,7 +131,8 @@ struct i40e_virtchnl_version_info { */ /* I40E_VIRTCHNL_OP_GET_VF_RESOURCES - * VF sends this request to PF with no parameters + * Version 1.0 VF sends this request to PF with no parameters + * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities * PF responds with an indirect message containing * i40e_virtchnl_vf_resource and one or more * i40e_virtchnl_vsi_resource structures. @@ -143,9 +146,12 @@ struct i40e_virtchnl_vsi_resource { u8 default_mac_addr[ETH_ALEN]; }; /* VF offload flags */ -#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001 -#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004 -#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 +#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001 +#define I40E_VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002 +#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004 +#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008 +#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010 +#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 struct i40e_virtchnl_vf_resource { u16 num_vsis; diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h index fea3b75a9a35..c33c7cce52fe 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ b/drivers/net/ethernet/intel/i40evf/i40evf.h @@ -207,17 +207,17 @@ struct i40evf_adapter { struct msix_entry *msix_entries; u32 flags; -#define I40EVF_FLAG_RX_CSUM_ENABLED (u32)(1) -#define I40EVF_FLAG_RX_1BUF_CAPABLE (u32)(1 << 1) -#define I40EVF_FLAG_RX_PS_CAPABLE (u32)(1 << 2) -#define I40EVF_FLAG_RX_PS_ENABLED (u32)(1 << 3) -#define I40EVF_FLAG_IN_NETPOLL (u32)(1 << 4) -#define I40EVF_FLAG_IMIR_ENABLED (u32)(1 << 5) -#define I40EVF_FLAG_MQ_CAPABLE (u32)(1 << 6) -#define I40EVF_FLAG_NEED_LINK_UPDATE (u32)(1 << 7) -#define I40EVF_FLAG_PF_COMMS_FAILED (u32)(1 << 8) -#define I40EVF_FLAG_RESET_PENDING (u32)(1 << 9) -#define I40EVF_FLAG_RESET_NEEDED (u32)(1 << 10) +#define I40EVF_FLAG_RX_CSUM_ENABLED BIT(0) +#define I40EVF_FLAG_RX_1BUF_CAPABLE BIT(1) +#define I40EVF_FLAG_RX_PS_CAPABLE BIT(2) +#define I40EVF_FLAG_RX_PS_ENABLED BIT(3) +#define I40EVF_FLAG_IN_NETPOLL BIT(4) +#define I40EVF_FLAG_IMIR_ENABLED BIT(5) +#define I40EVF_FLAG_MQ_CAPABLE BIT(6) +#define I40EVF_FLAG_NEED_LINK_UPDATE BIT(7) +#define I40EVF_FLAG_PF_COMMS_FAILED BIT(8) +#define I40EVF_FLAG_RESET_PENDING BIT(9) +#define I40EVF_FLAG_RESET_NEEDED BIT(10) /* duplcates for common code */ #define I40E_FLAG_FDIR_ATR_ENABLED 0 #define I40E_FLAG_DCB_ENABLED 0 @@ -225,15 +225,16 @@ struct i40evf_adapter { #define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED /* flags for admin queue service task */ u32 aq_required; -#define I40EVF_FLAG_AQ_ENABLE_QUEUES (u32)(1) -#define I40EVF_FLAG_AQ_DISABLE_QUEUES (u32)(1 << 1) -#define I40EVF_FLAG_AQ_ADD_MAC_FILTER (u32)(1 << 2) -#define I40EVF_FLAG_AQ_ADD_VLAN_FILTER (u32)(1 << 3) -#define I40EVF_FLAG_AQ_DEL_MAC_FILTER (u32)(1 << 4) -#define I40EVF_FLAG_AQ_DEL_VLAN_FILTER (u32)(1 << 5) -#define I40EVF_FLAG_AQ_CONFIGURE_QUEUES (u32)(1 << 6) -#define I40EVF_FLAG_AQ_MAP_VECTORS (u32)(1 << 7) -#define I40EVF_FLAG_AQ_HANDLE_RESET (u32)(1 << 8) +#define I40EVF_FLAG_AQ_ENABLE_QUEUES BIT(0) +#define I40EVF_FLAG_AQ_DISABLE_QUEUES BIT(1) +#define I40EVF_FLAG_AQ_ADD_MAC_FILTER BIT(2) +#define I40EVF_FLAG_AQ_ADD_VLAN_FILTER BIT(3) +#define I40EVF_FLAG_AQ_DEL_MAC_FILTER BIT(4) +#define I40EVF_FLAG_AQ_DEL_VLAN_FILTER BIT(5) +#define I40EVF_FLAG_AQ_CONFIGURE_QUEUES BIT(6) +#define I40EVF_FLAG_AQ_MAP_VECTORS BIT(7) +#define I40EVF_FLAG_AQ_HANDLE_RESET BIT(8) +#define I40EVF_FLAG_AQ_GET_CONFIG BIT(10) /* OS defined structs */ struct net_device *netdev; @@ -249,8 +250,17 @@ struct i40evf_adapter { bool netdev_registered; bool link_up; enum i40e_virtchnl_ops current_op; +#define CLIENT_ENABLED(_a) ((_a)->vf_res->vf_offload_flags & \ + I40E_VIRTCHNL_VF_OFFLOAD_IWARP) +#define RSS_AQ(_a) ((_a)->vf_res->vf_offload_flags & \ + I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ) +#define VLAN_ALLOWED(_a) ((_a)->vf_res->vf_offload_flags & \ + I40E_VIRTCHNL_VF_OFFLOAD_VLAN) struct i40e_virtchnl_vf_resource *vf_res; /* incl. all VSIs */ struct i40e_virtchnl_vsi_resource *vsi_res; /* our LAN VSI */ + struct i40e_virtchnl_version_info pf_version; +#define PF_IS_V11(_a) (((_a)->pf_version.major == 1) && \ + ((_a)->pf_version.minor == 1)) u16 msg_enable; struct i40e_eth_stats current_stats; struct i40e_vsi vsi; @@ -264,6 +274,7 @@ extern const char i40evf_driver_version[]; int i40evf_up(struct i40evf_adapter *adapter); void i40evf_down(struct i40evf_adapter *adapter); +int i40evf_process_config(struct i40evf_adapter *adapter); void i40evf_reset(struct i40evf_adapter *adapter); void i40evf_set_ethtool_ops(struct net_device *netdev); void i40evf_update_stats(struct i40evf_adapter *adapter); diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c index 2b53c870e7f1..4790437a50ac 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c @@ -381,11 +381,11 @@ static int i40evf_get_rss_hash_opts(struct i40evf_adapter *adapter, switch (cmd->flow_type) { case TCP_V4_FLOW: - if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP)) + if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP)) cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; break; case UDP_V4_FLOW: - if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP)) + if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP)) cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; break; @@ -397,11 +397,11 @@ static int i40evf_get_rss_hash_opts(struct i40evf_adapter *adapter, break; case TCP_V6_FLOW: - if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP)) + if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP)) cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; break; case UDP_V6_FLOW: - if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP)) + if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP)) cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; break; @@ -479,10 +479,10 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter, case TCP_V4_FLOW: switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { case 0: - hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP); + hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP); break; case (RXH_L4_B_0_1 | RXH_L4_B_2_3): - hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP); + hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP); break; default: return -EINVAL; @@ -491,10 +491,10 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter, case TCP_V6_FLOW: switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { case 0: - hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP); + hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP); break; case (RXH_L4_B_0_1 | RXH_L4_B_2_3): - hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP); + hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP); break; default: return -EINVAL; @@ -503,12 +503,12 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter, case UDP_V4_FLOW: switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { case 0: - hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | - ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4)); + hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | + BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4)); break; case (RXH_L4_B_0_1 | RXH_L4_B_2_3): - hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | - ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4)); + hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | + BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4)); break; default: return -EINVAL; @@ -517,12 +517,12 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter, case UDP_V6_FLOW: switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { case 0: - hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | - ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6)); + hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | + BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6)); break; case (RXH_L4_B_0_1 | RXH_L4_B_2_3): - hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | - ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6)); + hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | + BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6)); break; default: return -EINVAL; @@ -535,7 +535,7 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter, if ((nfc->data & RXH_L4_B_0_1) || (nfc->data & RXH_L4_B_2_3)) return -EINVAL; - hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER); + hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER); break; case AH_ESP_V6_FLOW: case AH_V6_FLOW: @@ -544,15 +544,15 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter, if ((nfc->data & RXH_L4_B_0_1) || (nfc->data & RXH_L4_B_2_3)) return -EINVAL; - hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER); + hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER); break; case IPV4_FLOW: - hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | - ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4); + hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | + BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4)); break; case IPV6_FLOW: - hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | - ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6); + hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | + BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6)); break; default: return -EINVAL; diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 4ab4ebba07a1..1503cad918d8 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -34,7 +34,7 @@ char i40evf_driver_name[] = "i40evf"; static const char i40evf_driver_string[] = "Intel(R) XL710/X710 Virtual Function Network Driver"; -#define DRV_VERSION "1.2.25" +#define DRV_VERSION "1.3.2" const char i40evf_driver_version[] = DRV_VERSION; static const char i40evf_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation."; @@ -240,7 +240,7 @@ void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask) int i; for (i = 1; i < adapter->num_msix_vectors; i++) { - if (mask & (1 << (i - 1))) { + if (mask & BIT(i - 1)) { wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), I40E_VFINT_DYN_CTLN1_INTENA_MASK | I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | @@ -268,7 +268,7 @@ static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask) wr32(hw, I40E_VFINT_DYN_CTL01, dyn_ctl); } for (i = 1; i < adapter->num_msix_vectors; i++) { - if (mask & (1 << i)) { + if (mask & BIT(i)) { dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTLN1(i - 1)); dyn_ctl |= I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK | I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | @@ -377,7 +377,7 @@ i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx) q_vector->tx.count++; q_vector->tx.latency_range = I40E_LOW_LATENCY; q_vector->num_ringpairs++; - q_vector->ring_mask |= (1 << t_idx); + q_vector->ring_mask |= BIT(t_idx); } /** @@ -406,7 +406,7 @@ static int i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter) /* The ideal configuration... * We have enough vectors to map one per queue. */ - if (q_vectors == (rxr_remaining * 2)) { + if (q_vectors >= (rxr_remaining * 2)) { for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++) i40evf_map_vector_to_rxq(adapter, v_start, rxr_idx); @@ -892,8 +892,10 @@ static void i40evf_set_rx_mode(struct net_device *netdev) break; } } + if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr)) + found = true; } - if (found) { + if (!found) { f->remove = true; adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER; } @@ -1369,6 +1371,10 @@ static void i40evf_watchdog_task(struct work_struct *work) } goto watchdog_done; } + if (adapter->aq_required & I40EVF_FLAG_AQ_GET_CONFIG) { + i40evf_send_vf_config_msg(adapter); + goto watchdog_done; + } if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_QUEUES) { i40evf_disable_queues(adapter); @@ -1604,7 +1610,8 @@ continue_reset: dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n", err); - i40evf_map_queues(adapter); + adapter->aq_required = I40EVF_FLAG_AQ_GET_CONFIG; + adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS; /* re-add all MAC filters */ list_for_each_entry(f, &adapter->mac_filter_list, list) { @@ -1614,7 +1621,7 @@ continue_reset: list_for_each_entry(f, &adapter->vlan_filter_list, list) { f->add = true; } - adapter->aq_required = I40EVF_FLAG_AQ_ADD_MAC_FILTER; + adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER; adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER; clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); i40evf_misc_irq_enable(adapter); @@ -1856,6 +1863,7 @@ static int i40evf_open(struct net_device *netdev) if (err) goto err_req_irq; + i40evf_add_filter(adapter, adapter->hw.mac.addr); i40evf_configure(adapter); err = i40evf_up_complete(adapter); @@ -1979,6 +1987,62 @@ static int i40evf_check_reset_complete(struct i40e_hw *hw) } /** + * i40evf_process_config - Process the config information we got from the PF + * @adapter: board private structure + * + * Verify that we have a valid config struct, and set up our netdev features + * and our VSI struct. + **/ +int i40evf_process_config(struct i40evf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int i; + + /* got VF config message back from PF, now we can parse it */ + for (i = 0; i < adapter->vf_res->num_vsis; i++) { + if (adapter->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV) + adapter->vsi_res = &adapter->vf_res->vsi_res[i]; + } + if (!adapter->vsi_res) { + dev_err(&adapter->pdev->dev, "No LAN VSI found\n"); + return -ENODEV; + } + + if (adapter->vf_res->vf_offload_flags + & I40E_VIRTCHNL_VF_OFFLOAD_VLAN) { + netdev->vlan_features = netdev->features; + netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER; + } + netdev->features |= NETIF_F_HIGHDMA | + NETIF_F_SG | + NETIF_F_IP_CSUM | + NETIF_F_SCTP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_RXCSUM | + NETIF_F_GRO; + + /* copy netdev features into list of user selectable features */ + netdev->hw_features |= netdev->features; + netdev->hw_features &= ~NETIF_F_RXCSUM; + + adapter->vsi.id = adapter->vsi_res->vsi_id; + + adapter->vsi.back = adapter; + adapter->vsi.base_vector = 1; + adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK; + adapter->vsi.rx_itr_setting = (I40E_ITR_DYNAMIC | + ITR_REG_TO_USEC(I40E_ITR_RX_DEF)); + adapter->vsi.tx_itr_setting = (I40E_ITR_DYNAMIC | + ITR_REG_TO_USEC(I40E_ITR_TX_DEF)); + adapter->vsi.netdev = adapter->netdev; + return 0; +} + +/** * i40evf_init_task - worker thread to perform delayed initialization * @work: pointer to work_struct containing our data * @@ -1996,10 +2060,9 @@ static void i40evf_init_task(struct work_struct *work) struct i40evf_adapter, init_task.work); struct net_device *netdev = adapter->netdev; - struct i40evf_mac_filter *f; struct i40e_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; - int i, err, bufsz; + int err, bufsz; switch (adapter->state) { case __I40EVF_STARTUP: @@ -2050,6 +2113,12 @@ static void i40evf_init_task(struct work_struct *work) if (err) { if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) err = i40evf_send_api_ver(adapter); + else + dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n", + adapter->pf_version.major, + adapter->pf_version.minor, + I40E_VIRTCHNL_VERSION_MAJOR, + I40E_VIRTCHNL_VERSION_MINOR); goto err; } err = i40evf_send_vf_config_msg(adapter); @@ -2085,42 +2154,15 @@ static void i40evf_init_task(struct work_struct *work) default: goto err_alloc; } - /* got VF config message back from PF, now we can parse it */ - for (i = 0; i < adapter->vf_res->num_vsis; i++) { - if (adapter->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV) - adapter->vsi_res = &adapter->vf_res->vsi_res[i]; - } - if (!adapter->vsi_res) { - dev_err(&pdev->dev, "No LAN VSI found\n"); + if (i40evf_process_config(adapter)) goto err_alloc; - } + adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED; netdev->netdev_ops = &i40evf_netdev_ops; i40evf_set_ethtool_ops(netdev); netdev->watchdog_timeo = 5 * HZ; - netdev->features |= NETIF_F_HIGHDMA | - NETIF_F_SG | - NETIF_F_IP_CSUM | - NETIF_F_SCTP_CSUM | - NETIF_F_IPV6_CSUM | - NETIF_F_TSO | - NETIF_F_TSO6 | - NETIF_F_RXCSUM | - NETIF_F_GRO; - - if (adapter->vf_res->vf_offload_flags - & I40E_VIRTCHNL_VF_OFFLOAD_VLAN) { - netdev->vlan_features = netdev->features; - netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER; - } - - /* copy netdev features into list of user selectable features */ - netdev->hw_features |= netdev->features; - netdev->hw_features &= ~NETIF_F_RXCSUM; if (!is_valid_ether_addr(adapter->hw.mac.addr)) { dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n", @@ -2130,16 +2172,6 @@ static void i40evf_init_task(struct work_struct *work) ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); - f = kzalloc(sizeof(*f), GFP_ATOMIC); - if (!f) - goto err_sw_init; - - ether_addr_copy(f->macaddr, adapter->hw.mac.addr); - f->add = true; - adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER; - - list_add(&f->list, &adapter->mac_filter_list); - init_timer(&adapter->watchdog_timer); adapter->watchdog_timer.function = &i40evf_watchdog_timer; adapter->watchdog_timer.data = (unsigned long)adapter; @@ -2161,17 +2193,6 @@ static void i40evf_init_task(struct work_struct *work) netif_carrier_off(netdev); - adapter->vsi.id = adapter->vsi_res->vsi_id; - adapter->vsi.seid = adapter->vsi_res->vsi_id; /* dummy */ - adapter->vsi.back = adapter; - adapter->vsi.base_vector = 1; - adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK; - adapter->vsi.rx_itr_setting = (I40E_ITR_DYNAMIC | - ITR_REG_TO_USEC(I40E_ITR_RX_DEF)); - adapter->vsi.tx_itr_setting = (I40E_ITR_DYNAMIC | - ITR_REG_TO_USEC(I40E_ITR_TX_DEF)); - adapter->vsi.netdev = adapter->netdev; - if (!adapter->netdev_registered) { err = register_netdev(netdev); if (err) @@ -2299,7 +2320,7 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw = &adapter->hw; hw->back = adapter; - adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; + adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1; adapter->state = __I40EVF_STARTUP; /* Call save state here because it relies on the adapter struct. */ diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c index 61e090558f31..d4eb1a5e7d42 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c @@ -51,8 +51,9 @@ static int i40evf_send_pf_msg(struct i40evf_adapter *adapter, err = i40e_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL); if (err) - dev_err(&adapter->pdev->dev, "Unable to send opcode %d to PF, error %d, aq status %d\n", - op, err, hw->aq.asq_last_status); + dev_err(&adapter->pdev->dev, "Unable to send opcode %d to PF, err %s, aq_err %s\n", + op, i40evf_stat_str(hw, err), + i40evf_aq_str(hw, hw->aq.asq_last_status)); return err; } @@ -125,8 +126,11 @@ int i40evf_verify_api_ver(struct i40evf_adapter *adapter) } pf_vvi = (struct i40e_virtchnl_version_info *)event.msg_buf; - if ((pf_vvi->major != I40E_VIRTCHNL_VERSION_MAJOR) || - (pf_vvi->minor != I40E_VIRTCHNL_VERSION_MINOR)) + adapter->pf_version = *pf_vvi; + + if ((pf_vvi->major > I40E_VIRTCHNL_VERSION_MAJOR) || + ((pf_vvi->major == I40E_VIRTCHNL_VERSION_MAJOR) && + (pf_vvi->minor > I40E_VIRTCHNL_VERSION_MINOR))) err = -EIO; out_alloc: @@ -145,8 +149,24 @@ out: **/ int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter) { - return i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_VF_RESOURCES, - NULL, 0); + u32 caps; + + adapter->current_op = I40E_VIRTCHNL_OP_GET_VF_RESOURCES; + adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG; + caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 | + I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ | + I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG | + I40E_VIRTCHNL_VF_OFFLOAD_VLAN; + adapter->current_op = I40E_VIRTCHNL_OP_GET_VF_RESOURCES; + adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG; + if (PF_IS_V11(adapter)) + return i40evf_send_pf_msg(adapter, + I40E_VIRTCHNL_OP_GET_VF_RESOURCES, + (u8 *)&caps, sizeof(caps)); + else + return i40evf_send_pf_msg(adapter, + I40E_VIRTCHNL_OP_GET_VF_RESOURCES, + NULL, 0); } /** @@ -274,7 +294,7 @@ void i40evf_enable_queues(struct i40evf_adapter *adapter) } adapter->current_op = I40E_VIRTCHNL_OP_ENABLE_QUEUES; vqs.vsi_id = adapter->vsi_res->vsi_id; - vqs.tx_queues = (1 << adapter->num_active_queues) - 1; + vqs.tx_queues = BIT(adapter->num_active_queues) - 1; vqs.rx_queues = vqs.tx_queues; adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES; i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ENABLE_QUEUES, @@ -299,7 +319,7 @@ void i40evf_disable_queues(struct i40evf_adapter *adapter) } adapter->current_op = I40E_VIRTCHNL_OP_DISABLE_QUEUES; vqs.vsi_id = adapter->vsi_res->vsi_id; - vqs.tx_queues = (1 << adapter->num_active_queues) - 1; + vqs.tx_queues = BIT(adapter->num_active_queues) - 1; vqs.rx_queues = vqs.tx_queues; adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES; i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DISABLE_QUEUES, @@ -708,8 +728,9 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, return; } if (v_retval) { - dev_err(&adapter->pdev->dev, "%s: PF returned error %d to our request %d\n", - __func__, v_retval, v_opcode); + dev_err(&adapter->pdev->dev, "%s: PF returned error %d (%s) to our request %d\n", + __func__, v_retval, + i40evf_stat_str(&adapter->hw, v_retval), v_opcode); } switch (v_opcode) { case I40E_VIRTCHNL_OP_GET_STATS: { @@ -729,6 +750,15 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, adapter->current_stats = *stats; } break; + case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: { + u16 len = sizeof(struct i40e_virtchnl_vf_resource) + + I40E_MAX_VF_VSI * + sizeof(struct i40e_virtchnl_vsi_resource); + memcpy(adapter->vf_res, msg, min(msglen, len)); + i40e_vf_parse_hw_config(&adapter->hw, adapter->vf_res); + i40evf_process_config(adapter); + } + break; case I40E_VIRTCHNL_OP_ENABLE_QUEUES: /* enable transmits */ i40evf_irq_enable(adapter, true); @@ -740,7 +770,6 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, i40evf_free_all_rx_resources(adapter); break; case I40E_VIRTCHNL_OP_VERSION: - case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: /* Don't display an error if we get these out of sequence. * If the firmware needed to get kicked, we'll get these and diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index b0182dd31346..d19256994e5c 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -139,10 +139,6 @@ static s32 igb_check_for_link_media_swap(struct e1000_hw *hw) if (ret_val) return ret_val; - /* reset page to 0 */ - ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); - if (ret_val) - return ret_val; if (data & E1000_M88E1112_STATUS_LINK) port = E1000_MEDIA_PORT_OTHER; @@ -151,8 +147,20 @@ static s32 igb_check_for_link_media_swap(struct e1000_hw *hw) if (port && (hw->dev_spec._82575.media_port != port)) { hw->dev_spec._82575.media_port = port; hw->dev_spec._82575.media_changed = true; + } + + if (port == E1000_MEDIA_PORT_COPPER) { + /* reset page to 0 */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); + if (ret_val) + return ret_val; + igb_check_for_link_82575(hw); } else { - ret_val = igb_check_for_link_82575(hw); + igb_check_for_link_82575(hw); + /* reset page to 0 */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); + if (ret_val) + return ret_val; } return 0; diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c index c1bb64d8366f..987c9de24764 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.c +++ b/drivers/net/ethernet/intel/igb/e1000_phy.c @@ -1,5 +1,5 @@ /* Intel(R) Gigabit Ethernet Linux driver - * Copyright(c) 2007-2014 Intel Corporation. + * Copyright(c) 2007-2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -36,9 +36,6 @@ static s32 igb_set_master_slave_mode(struct e1000_hw *hw); /* Cable length tables */ static const u16 e1000_m88_cable_length_table[] = { 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; -#define M88E1000_CABLE_LENGTH_TABLE_SIZE \ - (sizeof(e1000_m88_cable_length_table) / \ - sizeof(e1000_m88_cable_length_table[0])) static const u16 e1000_igp_2_cable_length_table[] = { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, @@ -49,9 +46,6 @@ static const u16 e1000_igp_2_cable_length_table[] = { 60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121, 124}; -#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \ - (sizeof(e1000_igp_2_cable_length_table) / \ - sizeof(e1000_igp_2_cable_length_table[0])) /** * igb_check_reset_block - Check if PHY reset is blocked @@ -1700,7 +1694,7 @@ s32 igb_get_cable_length_m88(struct e1000_hw *hw) index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> M88E1000_PSSR_CABLE_LENGTH_SHIFT; - if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) { + if (index >= ARRAY_SIZE(e1000_m88_cable_length_table) - 1) { ret_val = -E1000_ERR_PHY; goto out; } @@ -1796,7 +1790,7 @@ s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw) index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> M88E1000_PSSR_CABLE_LENGTH_SHIFT; - if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) { + if (index >= ARRAY_SIZE(e1000_m88_cable_length_table) - 1) { ret_val = -E1000_ERR_PHY; goto out; } @@ -1840,7 +1834,7 @@ s32 igb_get_cable_length_igp_2(struct e1000_hw *hw) s32 ret_val = 0; u16 phy_data, i, agc_value = 0; u16 cur_agc_index, max_agc_index = 0; - u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1; + u16 min_agc_index = ARRAY_SIZE(e1000_igp_2_cable_length_table) - 1; static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = { IGP02E1000_PHY_AGC_A, IGP02E1000_PHY_AGC_B, @@ -1863,7 +1857,7 @@ s32 igb_get_cable_length_igp_2(struct e1000_hw *hw) IGP02E1000_AGC_LENGTH_MASK; /* Array index bound check. */ - if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) || + if ((cur_agc_index >= ARRAY_SIZE(e1000_igp_2_cable_length_table)) || (cur_agc_index == 0)) { ret_val = -E1000_ERR_PHY; goto out; diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index d5673eb90c54..b7b9c670bb3c 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -2159,6 +2159,27 @@ static int igb_set_coalesce(struct net_device *netdev, struct igb_adapter *adapter = netdev_priv(netdev); int i; + if (ec->rx_max_coalesced_frames || + ec->rx_coalesce_usecs_irq || + ec->rx_max_coalesced_frames_irq || + ec->tx_max_coalesced_frames || + ec->tx_coalesce_usecs_irq || + ec->stats_block_coalesce_usecs || + ec->use_adaptive_rx_coalesce || + ec->use_adaptive_tx_coalesce || + ec->pkt_rate_low || + ec->rx_coalesce_usecs_low || + ec->rx_max_coalesced_frames_low || + ec->tx_coalesce_usecs_low || + ec->tx_max_coalesced_frames_low || + ec->pkt_rate_high || + ec->rx_coalesce_usecs_high || + ec->rx_max_coalesced_frames_high || + ec->tx_coalesce_usecs_high || + ec->tx_max_coalesced_frames_high || + ec->rate_sample_interval) + return -ENOTSUPP; + if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) || ((ec->rx_coalesce_usecs > 3) && (ec->rx_coalesce_usecs < IGB_MIN_ITR_USECS)) || @@ -2396,10 +2417,6 @@ static int igb_get_ts_info(struct net_device *dev, info->rx_filters |= (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); return 0; diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 2f70a9b152bd..41e274046896 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -57,8 +57,8 @@ #include "igb.h" #define MAJ 5 -#define MIN 2 -#define BUILD 18 +#define MIN 3 +#define BUILD 0 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ __stringify(BUILD) "-k" char igb_driver_name[] = "igb"; @@ -6621,22 +6621,25 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring, struct sk_buff *skb) { struct page *page = rx_buffer->page; + unsigned char *va = page_address(page) + rx_buffer->page_offset; unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); #if (PAGE_SIZE < 8192) unsigned int truesize = IGB_RX_BUFSZ; #else - unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); + unsigned int truesize = SKB_DATA_ALIGN(size); #endif + unsigned int pull_len; - if ((size <= IGB_RX_HDR_LEN) && !skb_is_nonlinear(skb)) { - unsigned char *va = page_address(page) + rx_buffer->page_offset; + if (unlikely(skb_is_nonlinear(skb))) + goto add_tail_frag; - if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { - igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb); - va += IGB_TS_HDR_LEN; - size -= IGB_TS_HDR_LEN; - } + if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) { + igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb); + va += IGB_TS_HDR_LEN; + size -= IGB_TS_HDR_LEN; + } + if (likely(size <= IGB_RX_HDR_LEN)) { memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); /* page is not reserved, we can reuse buffer as-is */ @@ -6648,8 +6651,21 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring, return false; } + /* we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ + pull_len = eth_get_headlen(va, IGB_RX_HDR_LEN); + + /* align pull length to size of long to optimize memcpy performance */ + memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long))); + + /* update all of the pointers */ + va += pull_len; + size -= pull_len; + +add_tail_frag: skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, - rx_buffer->page_offset, size, truesize); + (unsigned long)va & ~PAGE_MASK, size, truesize); return igb_can_reuse_rx_page(rx_buffer, page, truesize); } @@ -6791,62 +6807,6 @@ static bool igb_is_non_eop(struct igb_ring *rx_ring, } /** - * igb_pull_tail - igb specific version of skb_pull_tail - * @rx_ring: rx descriptor ring packet is being transacted on - * @rx_desc: pointer to the EOP Rx descriptor - * @skb: pointer to current skb being adjusted - * - * This function is an igb specific version of __pskb_pull_tail. The - * main difference between this version and the original function is that - * this function can make several assumptions about the state of things - * that allow for significant optimizations versus the standard function. - * As a result we can do things like drop a frag and maintain an accurate - * truesize for the skb. - */ -static void igb_pull_tail(struct igb_ring *rx_ring, - union e1000_adv_rx_desc *rx_desc, - struct sk_buff *skb) -{ - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; - unsigned char *va; - unsigned int pull_len; - - /* it is valid to use page_address instead of kmap since we are - * working with pages allocated out of the lomem pool per - * alloc_page(GFP_ATOMIC) - */ - va = skb_frag_address(frag); - - if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { - /* retrieve timestamp from buffer */ - igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb); - - /* update pointers to remove timestamp header */ - skb_frag_size_sub(frag, IGB_TS_HDR_LEN); - frag->page_offset += IGB_TS_HDR_LEN; - skb->data_len -= IGB_TS_HDR_LEN; - skb->len -= IGB_TS_HDR_LEN; - - /* move va to start of packet data */ - va += IGB_TS_HDR_LEN; - } - - /* we need the header to contain the greater of either ETH_HLEN or - * 60 bytes if the skb->len is less than 60 for skb_pad. - */ - pull_len = eth_get_headlen(va, IGB_RX_HDR_LEN); - - /* align pull length to size of long to optimize memcpy performance */ - skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); - - /* update all of the pointers */ - skb_frag_size_sub(frag, pull_len); - frag->page_offset += pull_len; - skb->data_len -= pull_len; - skb->tail += pull_len; -} - -/** * igb_cleanup_headers - Correct corrupted or empty headers * @rx_ring: rx descriptor ring packet is being transacted on * @rx_desc: pointer to the EOP Rx descriptor @@ -6873,10 +6833,6 @@ static bool igb_cleanup_headers(struct igb_ring *rx_ring, } } - /* place header in linear portion of buffer */ - if (skb_is_nonlinear(skb)) - igb_pull_tail(rx_ring, rx_desc, skb); - /* if eth_skb_pad returns an error the skb was freed */ if (eth_skb_pad(skb)) return true; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index 6b87d9634614..b1e364d26aa7 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -1394,14 +1394,12 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl) /* * Continue setup of fdirctrl register bits: * Turn perfect match filtering on - * Report hash in RSS field of Rx wb descriptor * Initialize the drop queue * Move the flexible bytes to use the ethertype - shift 6 words * Set the maximum length per hash bucket to 0xA filters * Send interrupt when 64 (0x4 * 16) filters are left */ fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH | - IXGBE_FDIRCTRL_REPORT_STATUS | (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) | (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) | (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) | diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index ec7b2324b77b..f7aeb560a504 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -2938,14 +2938,6 @@ static int ixgbe_get_ts_info(struct net_device *dev, (1 << HWTSTAMP_FILTER_NONE) | (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); break; default: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 9aa6104e34ea..3e6a9319c718 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1360,14 +1360,31 @@ static int __ixgbe_notify_dca(struct device *dev, void *data) } #endif /* CONFIG_IXGBE_DCA */ + +#define IXGBE_RSS_L4_TYPES_MASK \ + ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \ + (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \ + (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \ + (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP)) + static inline void ixgbe_rx_hash(struct ixgbe_ring *ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) { - if (ring->netdev->features & NETIF_F_RXHASH) - skb_set_hash(skb, - le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), - PKT_HASH_TYPE_L3); + u16 rss_type; + + if (!(ring->netdev->features & NETIF_F_RXHASH)) + return; + + rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) & + IXGBE_RXDADV_RSSTYPE_MASK; + + if (!rss_type) + return; + + skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), + (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ? + PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); } #ifdef IXGBE_FCOE diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h index 770e21a64388..58434584b16d 100644 --- a/drivers/net/ethernet/intel/ixgbevf/defines.h +++ b/drivers/net/ethernet/intel/ixgbevf/defines.h @@ -161,6 +161,18 @@ typedef u32 ixgbe_link_speed; #define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000 #define IXGBE_RXDADV_SPH 0x8000 +/* RSS Hash results */ +#define IXGBE_RXDADV_RSSTYPE_NONE 0x00000000 +#define IXGBE_RXDADV_RSSTYPE_IPV4_TCP 0x00000001 +#define IXGBE_RXDADV_RSSTYPE_IPV4 0x00000002 +#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP 0x00000003 +#define IXGBE_RXDADV_RSSTYPE_IPV6_EX 0x00000004 +#define IXGBE_RXDADV_RSSTYPE_IPV6 0x00000005 +#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006 +#define IXGBE_RXDADV_RSSTYPE_IPV4_UDP 0x00000007 +#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP 0x00000008 +#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009 + #define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \ IXGBE_RXD_ERR_CE | \ IXGBE_RXD_ERR_LE | \ diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c index b2f5b161d792..d3e5f5b37999 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -813,22 +813,15 @@ static u32 ixgbevf_get_rxfh_indir_size(struct net_device *netdev) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); - /* We support this operation only for 82599 and x540 at the moment */ - if (adapter->hw.mac.type < ixgbe_mac_X550_vf) - return IXGBEVF_82599_RETA_SIZE; + if (adapter->hw.mac.type >= ixgbe_mac_X550_vf) + return IXGBEVF_X550_VFRETA_SIZE; - return 0; + return IXGBEVF_82599_RETA_SIZE; } static u32 ixgbevf_get_rxfh_key_size(struct net_device *netdev) { - struct ixgbevf_adapter *adapter = netdev_priv(netdev); - - /* We support this operation only for 82599 and x540 at the moment */ - if (adapter->hw.mac.type < ixgbe_mac_X550_vf) - return IXGBEVF_RSS_HASH_KEY_SIZE; - - return 0; + return IXGBEVF_RSS_HASH_KEY_SIZE; } static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, @@ -840,21 +833,33 @@ static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, if (hfunc) *hfunc = ETH_RSS_HASH_TOP; - /* If neither indirection table nor hash key was requested - just - * return a success avoiding taking any locks. - */ - if (!indir && !key) - return 0; + if (adapter->hw.mac.type >= ixgbe_mac_X550_vf) { + if (key) + memcpy(key, adapter->rss_key, sizeof(adapter->rss_key)); - spin_lock_bh(&adapter->mbx_lock); - if (indir) - err = ixgbevf_get_reta_locked(&adapter->hw, indir, - adapter->num_rx_queues); + if (indir) { + int i; - if (!err && key) - err = ixgbevf_get_rss_key_locked(&adapter->hw, key); + for (i = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++) + indir[i] = adapter->rss_indir_tbl[i]; + } + } else { + /* If neither indirection table nor hash key was requested + * - just return a success avoiding taking any locks. + */ + if (!indir && !key) + return 0; - spin_unlock_bh(&adapter->mbx_lock); + spin_lock_bh(&adapter->mbx_lock); + if (indir) + err = ixgbevf_get_reta_locked(&adapter->hw, indir, + adapter->num_rx_queues); + + if (!err && key) + err = ixgbevf_get_rss_key_locked(&adapter->hw, key); + + spin_unlock_bh(&adapter->mbx_lock); + } return err; } diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index 775d08900949..04c7ec8446e0 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -144,9 +144,11 @@ struct ixgbevf_ring { #define MAX_RX_QUEUES IXGBE_VF_MAX_RX_QUEUES #define MAX_TX_QUEUES IXGBE_VF_MAX_TX_QUEUES -#define IXGBEVF_MAX_RSS_QUEUES 2 -#define IXGBEVF_82599_RETA_SIZE 128 +#define IXGBEVF_MAX_RSS_QUEUES 2 +#define IXGBEVF_82599_RETA_SIZE 128 /* 128 entries */ +#define IXGBEVF_X550_VFRETA_SIZE 64 /* 64 entries */ #define IXGBEVF_RSS_HASH_KEY_SIZE 40 +#define IXGBEVF_VFRSSRK_REGS 10 /* 10 registers for RSS key */ #define IXGBEVF_DEFAULT_TXD 1024 #define IXGBEVF_DEFAULT_RXD 512 @@ -447,6 +449,9 @@ struct ixgbevf_adapter { spinlock_t mbx_lock; unsigned long last_reset; + + u32 rss_key[IXGBEVF_VFRSSRK_REGS]; + u8 rss_indir_tbl[IXGBEVF_X550_VFRETA_SIZE]; }; enum ixbgevf_state_t { diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index e71cdde9cb01..88298a3ef942 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -457,6 +457,32 @@ static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector, napi_gro_receive(&q_vector->napi, skb); } +#define IXGBE_RSS_L4_TYPES_MASK \ + ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \ + (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \ + (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \ + (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP)) + +static inline void ixgbevf_rx_hash(struct ixgbevf_ring *ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + u16 rss_type; + + if (!(ring->netdev->features & NETIF_F_RXHASH)) + return; + + rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) & + IXGBE_RXDADV_RSSTYPE_MASK; + + if (!rss_type) + return; + + skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), + (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ? + PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); +} + /** * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum * @ring: structure containig ring specific data @@ -506,6 +532,7 @@ static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) { + ixgbevf_rx_hash(rx_ring, rx_desc, skb); ixgbevf_rx_checksum(rx_ring, rx_desc, skb); if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { @@ -649,46 +676,6 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring, } /** - * ixgbevf_pull_tail - ixgbevf specific version of skb_pull_tail - * @rx_ring: rx descriptor ring packet is being transacted on - * @skb: pointer to current skb being adjusted - * - * This function is an ixgbevf specific version of __pskb_pull_tail. The - * main difference between this version and the original function is that - * this function can make several assumptions about the state of things - * that allow for significant optimizations versus the standard function. - * As a result we can do things like drop a frag and maintain an accurate - * truesize for the skb. - **/ -static void ixgbevf_pull_tail(struct ixgbevf_ring *rx_ring, - struct sk_buff *skb) -{ - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; - unsigned char *va; - unsigned int pull_len; - - /* it is valid to use page_address instead of kmap since we are - * working with pages allocated out of the lomem pool per - * alloc_page(GFP_ATOMIC) - */ - va = skb_frag_address(frag); - - /* we need the header to contain the greater of either ETH_HLEN or - * 60 bytes if the skb->len is less than 60 for skb_pad. - */ - pull_len = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE); - - /* align pull length to size of long to optimize memcpy performance */ - skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); - - /* update all of the pointers */ - skb_frag_size_sub(frag, pull_len); - frag->page_offset += pull_len; - skb->data_len -= pull_len; - skb->tail += pull_len; -} - -/** * ixgbevf_cleanup_headers - Correct corrupted or empty headers * @rx_ring: rx descriptor ring packet is being transacted on * @rx_desc: pointer to the EOP Rx descriptor @@ -721,10 +708,6 @@ static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring, } } - /* place header in linear portion of buffer */ - if (skb_is_nonlinear(skb)) - ixgbevf_pull_tail(rx_ring, skb); - /* if eth_skb_pad returns an error the skb was freed */ if (eth_skb_pad(skb)) return true; @@ -789,16 +772,19 @@ static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring, struct sk_buff *skb) { struct page *page = rx_buffer->page; + unsigned char *va = page_address(page) + rx_buffer->page_offset; unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); #if (PAGE_SIZE < 8192) unsigned int truesize = IXGBEVF_RX_BUFSZ; #else unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); #endif + unsigned int pull_len; - if ((size <= IXGBEVF_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) { - unsigned char *va = page_address(page) + rx_buffer->page_offset; + if (unlikely(skb_is_nonlinear(skb))) + goto add_tail_frag; + if (likely(size <= IXGBEVF_RX_HDR_SIZE)) { memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); /* page is not reserved, we can reuse buffer as is */ @@ -810,8 +796,21 @@ static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring, return false; } + /* we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ + pull_len = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE); + + /* align pull length to size of long to optimize memcpy performance */ + memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long))); + + /* update all of the pointers */ + va += pull_len; + size -= pull_len; + +add_tail_frag: skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, - rx_buffer->page_offset, size, truesize); + (unsigned long)va & ~PAGE_MASK, size, truesize); /* avoid re-using remote pages */ if (unlikely(ixgbevf_page_is_reserved(page))) @@ -1697,22 +1696,25 @@ static void ixgbevf_setup_vfmrqc(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 vfmrqc = 0, vfreta = 0; - u32 rss_key[10]; u16 rss_i = adapter->num_rx_queues; - int i, j; + u8 i, j; /* Fill out hash function seeds */ - netdev_rss_key_fill(rss_key, sizeof(rss_key)); - for (i = 0; i < 10; i++) - IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]); + netdev_rss_key_fill(adapter->rss_key, sizeof(adapter->rss_key)); + for (i = 0; i < IXGBEVF_VFRSSRK_REGS; i++) + IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), adapter->rss_key[i]); - /* Fill out redirection table */ - for (i = 0, j = 0; i < 64; i++, j++) { + for (i = 0, j = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++, j++) { if (j == rss_i) j = 0; - vfreta = (vfreta << 8) | (j * 0x1); - if ((i & 3) == 3) + + adapter->rss_indir_tbl[i] = j; + + vfreta |= j << (i & 0x3) * 8; + if ((i & 3) == 3) { IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), vfreta); + vfreta = 0; + } } /* Perform hash on these packet types */ diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 62e48bc0cb23..fe2299ac4f5c 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -3027,8 +3027,8 @@ static int mvneta_probe(struct platform_device *pdev) const char *dt_mac_addr; char hw_mac_addr[ETH_ALEN]; const char *mac_from; + const char *managed; int phy_mode; - int fixed_phy = 0; int err; /* Our multiqueue support is not complete, so for now, only @@ -3062,7 +3062,6 @@ static int mvneta_probe(struct platform_device *pdev) dev_err(&pdev->dev, "cannot register fixed PHY\n"); goto err_free_irq; } - fixed_phy = 1; /* In the case of a fixed PHY, the DT node associated * to the PHY is the Ethernet MAC DT node. @@ -3086,8 +3085,10 @@ static int mvneta_probe(struct platform_device *pdev) pp = netdev_priv(dev); pp->phy_node = phy_node; pp->phy_interface = phy_mode; - pp->use_inband_status = (phy_mode == PHY_INTERFACE_MODE_SGMII) && - fixed_phy; + + err = of_property_read_string(dn, "managed", &managed); + pp->use_inband_status = (err == 0 && + strcmp(managed, "in-band-status") == 0); pp->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(pp->clk)) { diff --git a/drivers/net/ethernet/mellanox/Kconfig b/drivers/net/ethernet/mellanox/Kconfig index 52a6665b7abf..d54701047401 100644 --- a/drivers/net/ethernet/mellanox/Kconfig +++ b/drivers/net/ethernet/mellanox/Kconfig @@ -18,5 +18,6 @@ if NET_VENDOR_MELLANOX source "drivers/net/ethernet/mellanox/mlx4/Kconfig" source "drivers/net/ethernet/mellanox/mlx5/core/Kconfig" +source "drivers/net/ethernet/mellanox/mlxsw/Kconfig" endif # NET_VENDOR_MELLANOX diff --git a/drivers/net/ethernet/mellanox/Makefile b/drivers/net/ethernet/mellanox/Makefile index 38fe32ef5e5f..2e2a5ec509ac 100644 --- a/drivers/net/ethernet/mellanox/Makefile +++ b/drivers/net/ethernet/mellanox/Makefile @@ -4,3 +4,4 @@ obj-$(CONFIG_MLX4_CORE) += mlx4/ obj-$(CONFIG_MLX5_CORE) += mlx5/core/ +obj-$(CONFIG_MLXSW_CORE) += mlxsw/ diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index 99ba1c50e585..f79d8124321e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -102,6 +102,7 @@ mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) static const char mlx4_en_priv_flags[][ETH_GSTRING_LEN] = { "blueflame", + "phv-bit" }; static const char main_strings[][ETH_GSTRING_LEN] = { @@ -1797,35 +1798,49 @@ static int mlx4_en_get_ts_info(struct net_device *dev, static int mlx4_en_set_priv_flags(struct net_device *dev, u32 flags) { struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; bool bf_enabled_new = !!(flags & MLX4_EN_PRIV_FLAGS_BLUEFLAME); bool bf_enabled_old = !!(priv->pflags & MLX4_EN_PRIV_FLAGS_BLUEFLAME); + bool phv_enabled_new = !!(flags & MLX4_EN_PRIV_FLAGS_PHV); + bool phv_enabled_old = !!(priv->pflags & MLX4_EN_PRIV_FLAGS_PHV); int i; + int ret = 0; - if (bf_enabled_new == bf_enabled_old) - return 0; /* Nothing to do */ + if (bf_enabled_new != bf_enabled_old) { + if (bf_enabled_new) { + bool bf_supported = true; - if (bf_enabled_new) { - bool bf_supported = true; + for (i = 0; i < priv->tx_ring_num; i++) + bf_supported &= priv->tx_ring[i]->bf_alloced; - for (i = 0; i < priv->tx_ring_num; i++) - bf_supported &= priv->tx_ring[i]->bf_alloced; + if (!bf_supported) { + en_err(priv, "BlueFlame is not supported\n"); + return -EINVAL; + } - if (!bf_supported) { - en_err(priv, "BlueFlame is not supported\n"); - return -EINVAL; + priv->pflags |= MLX4_EN_PRIV_FLAGS_BLUEFLAME; + } else { + priv->pflags &= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME; } - priv->pflags |= MLX4_EN_PRIV_FLAGS_BLUEFLAME; - } else { - priv->pflags &= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME; - } - - for (i = 0; i < priv->tx_ring_num; i++) - priv->tx_ring[i]->bf_enabled = bf_enabled_new; + for (i = 0; i < priv->tx_ring_num; i++) + priv->tx_ring[i]->bf_enabled = bf_enabled_new; - en_info(priv, "BlueFlame %s\n", - bf_enabled_new ? "Enabled" : "Disabled"); + en_info(priv, "BlueFlame %s\n", + bf_enabled_new ? "Enabled" : "Disabled"); + } + if (phv_enabled_new != phv_enabled_old) { + ret = set_phv_bit(mdev->dev, priv->port, (int)phv_enabled_new); + if (ret) + return ret; + else if (phv_enabled_new) + priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV; + else + priv->pflags &= ~MLX4_EN_PRIV_FLAGS_PHV; + en_info(priv, "PHV bit %s\n", + phv_enabled_new ? "Enabled" : "Disabled"); + } return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index e0de2fd1ce12..4726122ea76b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -2184,6 +2184,25 @@ static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) } } +static netdev_features_t mlx4_en_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + struct mlx4_en_priv *en_priv = netdev_priv(netdev); + struct mlx4_en_dev *mdev = en_priv->mdev; + + /* Since there is no support for separate RX C-TAG/S-TAG vlan accel + * enable/disable make sure S-TAG flag is always in same state as + * C-TAG. + */ + if (features & NETIF_F_HW_VLAN_CTAG_RX && + !(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) + features |= NETIF_F_HW_VLAN_STAG_RX; + else + features &= ~NETIF_F_HW_VLAN_STAG_RX; + + return features; +} + static int mlx4_en_set_features(struct net_device *netdev, netdev_features_t features) { @@ -2218,6 +2237,10 @@ static int mlx4_en_set_features(struct net_device *netdev, en_info(priv, "Turn %s TX vlan strip offload\n", (features & NETIF_F_HW_VLAN_CTAG_TX) ? "ON" : "OFF"); + if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_STAG_TX)) + en_info(priv, "Turn %s TX S-VLAN strip offload\n", + (features & NETIF_F_HW_VLAN_STAG_TX) ? "ON" : "OFF"); + if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_LOOPBACK)) { en_info(priv, "Turn %s loopback\n", (features & NETIF_F_LOOPBACK) ? "ON" : "OFF"); @@ -2460,6 +2483,7 @@ static const struct net_device_ops mlx4_netdev_ops = { .ndo_poll_controller = mlx4_en_netpoll, #endif .ndo_set_features = mlx4_en_set_features, + .ndo_fix_features = mlx4_en_fix_features, .ndo_setup_tc = mlx4_en_setup_tc, #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = mlx4_en_filter_rfs, @@ -2500,6 +2524,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = { .ndo_poll_controller = mlx4_en_netpoll, #endif .ndo_set_features = mlx4_en_set_features, + .ndo_fix_features = mlx4_en_fix_features, .ndo_setup_tc = mlx4_en_setup_tc, #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = mlx4_en_filter_rfs, @@ -2931,6 +2956,27 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, dev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; + if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) { + dev->features |= NETIF_F_HW_VLAN_STAG_RX | + NETIF_F_HW_VLAN_STAG_FILTER; + dev->hw_features |= NETIF_F_HW_VLAN_STAG_RX; + } + + if (mlx4_is_slave(mdev->dev)) { + int phv; + + err = get_phv_bit(mdev->dev, port, &phv); + if (!err && phv) { + dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX; + priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV; + } + } else { + if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN && + !(mdev->dev->caps.flags2 & + MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) + dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX; + } + if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) dev->hw_features |= NETIF_F_RXFCS; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 9c145dddd717..4402a1e48c9b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -725,7 +725,7 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va, hw_checksum = csum_unfold((__force __sum16)cqe->checksum); - if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK) && + if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) && !(dev_features & NETIF_F_HW_VLAN_CTAG_RX)) { hw_checksum = get_fixed_vlan_csum(hw_checksum, hdr); hdr += sizeof(struct vlan_hdr); @@ -906,17 +906,25 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud gro_skb->csum_level = 1; if ((cqe->vlan_my_qpn & - cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) && + cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK)) && (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { u16 vid = be16_to_cpu(cqe->sl_vid); __vlan_hwaccel_put_tag(gro_skb, htons(ETH_P_8021Q), vid); + } else if ((be32_to_cpu(cqe->vlan_my_qpn) & + MLX4_CQE_SVLAN_PRESENT_MASK) && + (dev->features & NETIF_F_HW_VLAN_STAG_RX)) { + __vlan_hwaccel_put_tag(gro_skb, + htons(ETH_P_8021AD), + be16_to_cpu(cqe->sl_vid)); } if (dev->features & NETIF_F_RXHASH) skb_set_hash(gro_skb, be32_to_cpu(cqe->immed_rss_invalid), - PKT_HASH_TYPE_L3); + (ip_summed == CHECKSUM_UNNECESSARY) ? + PKT_HASH_TYPE_L4 : + PKT_HASH_TYPE_L3); skb_record_rx_queue(gro_skb, cq->ring); skb_mark_napi_id(gro_skb, &cq->napi); @@ -962,12 +970,19 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud if (dev->features & NETIF_F_RXHASH) skb_set_hash(skb, be32_to_cpu(cqe->immed_rss_invalid), - PKT_HASH_TYPE_L3); + (ip_summed == CHECKSUM_UNNECESSARY) ? + PKT_HASH_TYPE_L4 : + PKT_HASH_TYPE_L3); if ((be32_to_cpu(cqe->vlan_my_qpn) & - MLX4_CQE_VLAN_PRESENT_MASK) && + MLX4_CQE_CVLAN_PRESENT_MASK) && (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(cqe->sl_vid)); + else if ((be32_to_cpu(cqe->vlan_my_qpn) & + MLX4_CQE_SVLAN_PRESENT_MASK) && + (dev->features & NETIF_F_HW_VLAN_STAG_RX)) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), + be16_to_cpu(cqe->sl_vid)); if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) { timestamp = mlx4_en_get_cqe_ts(cqe); @@ -1065,7 +1080,10 @@ static const int frag_sizes[] = { void mlx4_en_calc_rx_buf(struct net_device *dev) { struct mlx4_en_priv *priv = netdev_priv(dev); - int eff_mtu = dev->mtu + ETH_HLEN + VLAN_HLEN; + /* VLAN_HLEN is added twice,to support skb vlan tagged with multiple + * headers. (For example: ETH_P_8021Q and ETH_P_8021AD). + */ + int eff_mtu = dev->mtu + ETH_HLEN + (2 * VLAN_HLEN); int buf_size = 0; int i = 0; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index c10d98f6ad96..494e7762fdb1 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -718,6 +718,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) u32 index, bf_index; __be32 op_own; u16 vlan_tag = 0; + u16 vlan_proto = 0; int i_frag; int lso_header_size; void *fragptr = NULL; @@ -750,9 +751,10 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) goto tx_drop; } - if (skb_vlan_tag_present(skb)) + if (skb_vlan_tag_present(skb)) { vlan_tag = skb_vlan_tag_get(skb); - + vlan_proto = be16_to_cpu(skb->vlan_proto); + } netdev_txq_bql_enqueue_prefetchw(ring->tx_queue); @@ -958,8 +960,11 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) ring->bf.offset ^= ring->bf.buf_size; } else { tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag); - tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * - !!skb_vlan_tag_present(skb); + if (vlan_proto == ETH_P_8021AD) + tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_SVLAN; + else if (vlan_proto == ETH_P_8021Q) + tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_CVLAN; + tx_desc->ctrl.fence_size = real_size; /* Ensure new descriptor hits memory diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index e30bf57ad7a1..e8ec1dec5789 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -154,6 +154,7 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) [26] = "Port ETS Scheduler support", [27] = "Port beacon support", [28] = "RX-ALL support", + [29] = "802.1ad offload support", }; int i; @@ -307,6 +308,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, #define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80 #define QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS (1 << 31) +#define QUERY_FUNC_CAP_PHV_BIT 0x40 if (vhcr->op_modifier == 1) { struct mlx4_active_ports actv_ports = @@ -351,6 +353,12 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, MLX4_PUT(outbox->buf, dev->caps.phys_port_id[vhcr->in_modifier], QUERY_FUNC_CAP_PHYS_PORT_ID); + if (dev->caps.phv_bit[port]) { + field = QUERY_FUNC_CAP_PHV_BIT; + MLX4_PUT(outbox->buf, field, + QUERY_FUNC_CAP_FLAGS0_OFFSET); + } + } else if (vhcr->op_modifier == 0) { struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); @@ -600,6 +608,9 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port, MLX4_GET(func_cap->phys_port_id, outbox, QUERY_FUNC_CAP_PHYS_PORT_ID); + MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET); + func_cap->flags |= (field & QUERY_FUNC_CAP_PHV_BIT); + /* All other resources are allocated by the master, but we still report * 'num' and 'reserved' capabilities as follows: * - num remains the maximum resource index @@ -700,6 +711,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) #define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92 #define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94 #define QUERY_DEV_CAP_CONFIG_DEV_OFFSET 0x94 +#define QUERY_DEV_CAP_PHV_EN_OFFSET 0x96 #define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98 #define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0 #define QUERY_DEV_CAP_ETH_BACKPL_OFFSET 0x9c @@ -898,6 +910,12 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CONFIG_DEV; if (field & (1 << 2)) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_IGNORE_FCS; + MLX4_GET(field, outbox, QUERY_DEV_CAP_PHV_EN_OFFSET); + if (field & 0x80) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PHV_EN; + if (field & 0x40) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN; + MLX4_GET(dev_cap->reserved_lkey, outbox, QUERY_DEV_CAP_RSVD_LKEY_OFFSET); MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET); @@ -1992,6 +2010,10 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev, MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET); MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET); + /* phv_check enable */ + MLX4_GET(byte_field, outbox, INIT_HCA_CACHELINE_SZ_OFFSET); + if (byte_field & 0x2) + param->phv_check_en = 1; out: mlx4_free_cmd_mailbox(dev, mailbox); @@ -2758,3 +2780,63 @@ int mlx4_ACCESS_REG_wrapper(struct mlx4_dev *dev, int slave, 0, MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); } + +static int mlx4_SET_PORT_phv_bit(struct mlx4_dev *dev, u8 port, u8 phv_bit) +{ +#define SET_PORT_GEN_PHV_VALID 0x10 +#define SET_PORT_GEN_PHV_EN 0x80 + + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_set_port_general_context *context; + u32 in_mod; + int err; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + context = mailbox->buf; + + context->v_ignore_fcs |= SET_PORT_GEN_PHV_VALID; + if (phv_bit) + context->phv_en |= SET_PORT_GEN_PHV_EN; + + in_mod = MLX4_SET_PORT_GENERAL << 8 | port; + err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE, + MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_NATIVE); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} + +int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv) +{ + int err; + struct mlx4_func_cap func_cap; + + memset(&func_cap, 0, sizeof(func_cap)); + err = mlx4_QUERY_FUNC_CAP(dev, port, &func_cap); + if (!err) + *phv = func_cap.flags & QUERY_FUNC_CAP_PHV_BIT; + return err; +} +EXPORT_SYMBOL(get_phv_bit); + +int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val) +{ + int ret; + + if (mlx4_is_slave(dev)) + return -EPERM; + + if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN && + !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) { + ret = mlx4_SET_PORT_phv_bit(dev, port, new_val); + if (!ret) + dev->caps.phv_bit[port] = new_val; + return ret; + } + + return -EOPNOTSUPP; +} +EXPORT_SYMBOL(set_phv_bit); diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h index 07cb7c2461ad..08de5555c2f4 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.h +++ b/drivers/net/ethernet/mellanox/mlx4/fw.h @@ -204,6 +204,7 @@ struct mlx4_init_hca_param { u16 cqe_size; /* For use only when CQE stride feature enabled */ u16 eqe_size; /* For use only when EQE stride feature enabled */ u8 rss_ip_frags; + u8 phv_check_en; /* for QUERY_HCA */ }; struct mlx4_init_ib_param { diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 29c2a017a450..121c579888bb 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -405,6 +405,21 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev->caps.max_gso_sz = dev_cap->max_gso_sz; dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz; + if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN) { + struct mlx4_init_hca_param hca_param; + + memset(&hca_param, 0, sizeof(hca_param)); + err = mlx4_QUERY_HCA(dev, &hca_param); + /* Turn off PHV_EN flag in case phv_check_en is set. + * phv_check_en is a HW check that parse the packet and verify + * phv bit was reported correctly in the wqe. To allow QinQ + * PHV_EN flag should be set and phv_check_en must be cleared + * otherwise QinQ packets will be drop by the HW. + */ + if (err || hca_param.phv_check_en) + dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_PHV_EN; + } + /* Sense port always allowed on supported devices for ConnectX-1 and -2 */ if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT) dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT; @@ -2912,6 +2927,8 @@ static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev, { u64 dev_flags = dev->flags; int err = 0; + int fw_enabled_sriov_vfs = min(pci_sriov_get_totalvfs(pdev), + MLX4_MAX_NUM_VF); if (reset_flow) { dev->dev_vfs = kcalloc(total_vfs, sizeof(*dev->dev_vfs), @@ -2937,6 +2954,12 @@ static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev, } if (!(dev->flags & MLX4_FLAG_SRIOV)) { + if (total_vfs > fw_enabled_sriov_vfs) { + mlx4_err(dev, "requested vfs (%d) > available vfs (%d). Continuing without SR_IOV\n", + total_vfs, fw_enabled_sriov_vfs); + err = -ENOMEM; + goto disable_sriov; + } mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs); err = pci_enable_sriov(pdev, total_vfs); } @@ -3418,20 +3441,20 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data, goto err_disable_pdev; } } - if (total_vfs >= MLX4_MAX_NUM_VF) { + if (total_vfs > MLX4_MAX_NUM_VF) { dev_err(&pdev->dev, - "Requested more VF's (%d) than allowed (%d)\n", - total_vfs, MLX4_MAX_NUM_VF - 1); + "Requested more VF's (%d) than allowed by hw (%d)\n", + total_vfs, MLX4_MAX_NUM_VF); err = -EINVAL; goto err_disable_pdev; } for (i = 0; i < MLX4_MAX_PORTS; i++) { - if (nvfs[i] + nvfs[2] >= MLX4_MAX_NUM_VF_P_PORT) { + if (nvfs[i] + nvfs[2] > MLX4_MAX_NUM_VF_P_PORT) { dev_err(&pdev->dev, - "Requested more VF's (%d) for port (%d) than allowed (%d)\n", + "Requested more VF's (%d) for port (%d) than allowed by driver (%d)\n", nvfs[i] + nvfs[2], i + 1, - MLX4_MAX_NUM_VF_P_PORT - 1); + MLX4_MAX_NUM_VF_P_PORT); err = -EINVAL; goto err_disable_pdev; } diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index a092c5c34d43..232b2b55f23b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -787,6 +787,9 @@ struct mlx4_set_port_general_context { u8 pprx; u8 pfcrx; u16 reserved4; + u32 reserved5; + u8 phv_en; + u8 reserved6[3]; }; struct mlx4_set_port_rqp_calc_context { diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 666d1669eb52..defcf8c395bf 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -95,6 +95,7 @@ */ #define MLX4_EN_PRIV_FLAGS_BLUEFLAME 1 +#define MLX4_EN_PRIV_FLAGS_PHV 2 #define MLX4_EN_WATCHDOG_TIMEOUT (15 * HZ) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c index 0715b497511f..6cb38304669f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c @@ -45,15 +45,34 @@ * register it in a memory region at HCA virtual address 0. */ -int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf) +static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev, + size_t size, dma_addr_t *dma_handle, + int node) +{ + struct mlx5_priv *priv = &dev->priv; + int original_node; + void *cpu_handle; + + mutex_lock(&priv->alloc_mutex); + original_node = dev_to_node(&dev->pdev->dev); + set_dev_node(&dev->pdev->dev, node); + cpu_handle = dma_zalloc_coherent(&dev->pdev->dev, size, + dma_handle, GFP_KERNEL); + set_dev_node(&dev->pdev->dev, original_node); + mutex_unlock(&priv->alloc_mutex); + return cpu_handle; +} + +int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size, + struct mlx5_buf *buf, int node) { dma_addr_t t; buf->size = size; buf->npages = 1; buf->page_shift = (u8)get_order(size) + PAGE_SHIFT; - buf->direct.buf = dma_zalloc_coherent(&dev->pdev->dev, - size, &t, GFP_KERNEL); + buf->direct.buf = mlx5_dma_zalloc_coherent_node(dev, size, + &t, node); if (!buf->direct.buf) return -ENOMEM; @@ -66,6 +85,11 @@ int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf) return 0; } + +int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf) +{ + return mlx5_buf_alloc_node(dev, size, buf, dev->priv.numa_node); +} EXPORT_SYMBOL_GPL(mlx5_buf_alloc); void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf) @@ -75,7 +99,8 @@ void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf) } EXPORT_SYMBOL_GPL(mlx5_buf_free); -static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct device *dma_device) +static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev, + int node) { struct mlx5_db_pgdir *pgdir; @@ -84,8 +109,9 @@ static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct device *dma_device) return NULL; bitmap_fill(pgdir->bitmap, MLX5_DB_PER_PAGE); - pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE, - &pgdir->db_dma, GFP_KERNEL); + + pgdir->db_page = mlx5_dma_zalloc_coherent_node(dev, PAGE_SIZE, + &pgdir->db_dma, node); if (!pgdir->db_page) { kfree(pgdir); return NULL; @@ -118,7 +144,7 @@ static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir, return 0; } -int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db) +int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db, int node) { struct mlx5_db_pgdir *pgdir; int ret = 0; @@ -129,7 +155,7 @@ int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db) if (!mlx5_alloc_db_from_pgdir(pgdir, db)) goto out; - pgdir = mlx5_alloc_db_pgdir(&(dev->pdev->dev)); + pgdir = mlx5_alloc_db_pgdir(dev, node); if (!pgdir) { ret = -ENOMEM; goto out; @@ -145,6 +171,12 @@ out: return ret; } +EXPORT_SYMBOL_GPL(mlx5_db_alloc_node); + +int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db) +{ + return mlx5_db_alloc_node(dev, db, dev->priv.numa_node); +} EXPORT_SYMBOL_GPL(mlx5_db_alloc); void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 3d23bd657e3c..45f6dc75c0df 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -60,6 +60,7 @@ #define MLX5E_TX_CQ_POLL_BUDGET 128 #define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */ +#define MLX5E_SQ_BF_BUDGET 16 static const char vport_strings[][ETH_GSTRING_LEN] = { /* vport statistics */ @@ -195,6 +196,8 @@ struct mlx5e_params { u16 rx_hash_log_tbl_sz; bool lro_en; u32 lro_wqe_sz; + u8 rss_hfunc; + u16 tx_max_inline; }; enum { @@ -266,7 +269,9 @@ struct mlx5e_sq { /* dirtied @xmit */ u16 pc ____cacheline_aligned_in_smp; u32 dma_fifo_pc; - u32 bf_offset; + u16 bf_offset; + u16 prev_cc; + u8 bf_budget; struct mlx5e_sq_stats stats; struct mlx5e_cq cq; @@ -279,9 +284,10 @@ struct mlx5e_sq { struct mlx5_wq_cyc wq; u32 dma_fifo_mask; void __iomem *uar_map; + void __iomem *uar_bf_map; struct netdev_queue *txq; u32 sqn; - u32 bf_buf_size; + u16 bf_buf_size; u16 max_inline; u16 edge; struct device *pdev; @@ -324,14 +330,18 @@ struct mlx5e_channel { }; enum mlx5e_traffic_types { - MLX5E_TT_IPV4_TCP = 0, - MLX5E_TT_IPV6_TCP = 1, - MLX5E_TT_IPV4_UDP = 2, - MLX5E_TT_IPV6_UDP = 3, - MLX5E_TT_IPV4 = 4, - MLX5E_TT_IPV6 = 5, - MLX5E_TT_ANY = 6, - MLX5E_NUM_TT = 7, + MLX5E_TT_IPV4_TCP, + MLX5E_TT_IPV6_TCP, + MLX5E_TT_IPV4_UDP, + MLX5E_TT_IPV6_UDP, + MLX5E_TT_IPV4_IPSEC_AH, + MLX5E_TT_IPV6_IPSEC_AH, + MLX5E_TT_IPV4_IPSEC_ESP, + MLX5E_TT_IPV6_IPSEC_ESP, + MLX5E_TT_IPV4, + MLX5E_TT_IPV6, + MLX5E_TT_ANY, + MLX5E_NUM_TT, }; enum { @@ -379,7 +389,6 @@ struct mlx5e_flow_table { struct mlx5e_priv { /* priv data path fields - start */ - int num_tc; int default_vlan_prio; struct mlx5e_sq **txq_to_sq_map; /* priv data path fields - end */ @@ -487,12 +496,12 @@ void mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv); int mlx5e_open_locked(struct net_device *netdev); int mlx5e_close_locked(struct net_device *netdev); -int mlx5e_update_priv_params(struct mlx5e_priv *priv, - struct mlx5e_params *new_params); static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq, - struct mlx5e_tx_wqe *wqe) + struct mlx5e_tx_wqe *wqe, int bf_sz) { + u16 ofst = MLX5_BF_OFFSET + sq->bf_offset; + /* ensure wqe is visible to device before updating doorbell record */ dma_wmb(); @@ -503,9 +512,15 @@ static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq, */ wmb(); - mlx5_write64((__be32 *)&wqe->ctrl, - sq->uar_map + MLX5_BF_OFFSET + sq->bf_offset, - NULL); + if (bf_sz) { + __iowrite64_copy(sq->uar_bf_map + ofst, &wqe->ctrl, bf_sz); + + /* flush the write-combining mapped buffer */ + wmb(); + + } else { + mlx5_write64((__be32 *)&wqe->ctrl, sq->uar_map + ofst, NULL); + } sq->bf_offset ^= sq->bf_buf_size; } @@ -519,3 +534,4 @@ static inline void mlx5e_cq_arm(struct mlx5e_cq *cq) } extern const struct ethtool_ops mlx5e_ethtool_ops; +u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 388938482ff9..b95aa3384c36 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -173,7 +173,7 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset) case ETH_SS_STATS: return NUM_VPORT_COUNTERS + priv->params.num_channels * NUM_RQ_STATS + - priv->params.num_channels * priv->num_tc * + priv->params.num_channels * priv->params.num_tc * NUM_SQ_STATS; /* fallthrough */ default: @@ -207,7 +207,7 @@ static void mlx5e_get_strings(struct net_device *dev, "rx%d_%s", i, rq_stats_strings[j]); for (i = 0; i < priv->params.num_channels; i++) - for (tc = 0; tc < priv->num_tc; tc++) + for (tc = 0; tc < priv->params.num_tc; tc++) for (j = 0; j < NUM_SQ_STATS; j++) sprintf(data + (idx++) * ETH_GSTRING_LEN, @@ -242,7 +242,7 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev, ((u64 *)&priv->channel[i]->rq.stats)[j]; for (i = 0; i < priv->params.num_channels; i++) - for (tc = 0; tc < priv->num_tc; tc++) + for (tc = 0; tc < priv->params.num_tc; tc++) for (j = 0; j < NUM_SQ_STATS; j++) data[idx++] = !test_bit(MLX5E_STATE_OPENED, &priv->state) ? 0 : @@ -264,7 +264,7 @@ static int mlx5e_set_ringparam(struct net_device *dev, struct ethtool_ringparam *param) { struct mlx5e_priv *priv = netdev_priv(dev); - struct mlx5e_params new_params; + bool was_opened; u16 min_rx_wqes; u8 log_rq_size; u8 log_sq_size; @@ -316,11 +316,18 @@ static int mlx5e_set_ringparam(struct net_device *dev, return 0; mutex_lock(&priv->state_lock); - new_params = priv->params; - new_params.log_rq_size = log_rq_size; - new_params.log_sq_size = log_sq_size; - new_params.min_rx_wqes = min_rx_wqes; - err = mlx5e_update_priv_params(priv, &new_params); + + was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); + if (was_opened) + mlx5e_close_locked(dev); + + priv->params.log_rq_size = log_rq_size; + priv->params.log_sq_size = log_sq_size; + priv->params.min_rx_wqes = min_rx_wqes; + + if (was_opened) + err = mlx5e_open_locked(dev); + mutex_unlock(&priv->state_lock); return err; @@ -342,7 +349,7 @@ static int mlx5e_set_channels(struct net_device *dev, struct mlx5e_priv *priv = netdev_priv(dev); int ncv = priv->mdev->priv.eq_table.num_comp_vectors; unsigned int count = ch->combined_count; - struct mlx5e_params new_params; + bool was_opened; int err = 0; if (!count) { @@ -365,9 +372,16 @@ static int mlx5e_set_channels(struct net_device *dev, return 0; mutex_lock(&priv->state_lock); - new_params = priv->params; - new_params.num_channels = count; - err = mlx5e_update_priv_params(priv, &new_params); + + was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); + if (was_opened) + mlx5e_close_locked(dev); + + priv->params.num_channels = count; + + if (was_opened) + err = mlx5e_open_locked(dev); + mutex_unlock(&priv->state_lock); return err; @@ -662,6 +676,101 @@ out: return err; } +static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + if (hfunc) + *hfunc = priv->params.rss_hfunc; + + return 0; +} + +static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + int err = 0; + + if (hfunc == ETH_RSS_HASH_NO_CHANGE) + return 0; + + if ((hfunc != ETH_RSS_HASH_XOR) && + (hfunc != ETH_RSS_HASH_TOP)) + return -EINVAL; + + mutex_lock(&priv->state_lock); + + priv->params.rss_hfunc = hfunc; + if (test_bit(MLX5E_STATE_OPENED, &priv->state)) { + mlx5e_close_locked(dev); + err = mlx5e_open_locked(dev); + } + + mutex_unlock(&priv->state_lock); + + return err; +} + +static int mlx5e_get_tunable(struct net_device *dev, + const struct ethtool_tunable *tuna, + void *data) +{ + const struct mlx5e_priv *priv = netdev_priv(dev); + int err = 0; + + switch (tuna->id) { + case ETHTOOL_TX_COPYBREAK: + *(u32 *)data = priv->params.tx_max_inline; + break; + default: + err = -EINVAL; + break; + } + + return err; +} + +static int mlx5e_set_tunable(struct net_device *dev, + const struct ethtool_tunable *tuna, + const void *data) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_core_dev *mdev = priv->mdev; + bool was_opened; + u32 val; + int err = 0; + + switch (tuna->id) { + case ETHTOOL_TX_COPYBREAK: + val = *(u32 *)data; + if (val > mlx5e_get_max_inline_cap(mdev)) { + err = -EINVAL; + break; + } + + mutex_lock(&priv->state_lock); + + was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); + if (was_opened) + mlx5e_close_locked(dev); + + priv->params.tx_max_inline = val; + + if (was_opened) + err = mlx5e_open_locked(dev); + + mutex_unlock(&priv->state_lock); + break; + default: + err = -EINVAL; + break; + } + + return err; +} + const struct ethtool_ops mlx5e_ethtool_ops = { .get_drvinfo = mlx5e_get_drvinfo, .get_link = ethtool_op_get_link, @@ -676,4 +785,8 @@ const struct ethtool_ops mlx5e_ethtool_ops = { .set_coalesce = mlx5e_set_coalesce, .get_settings = mlx5e_get_settings, .set_settings = mlx5e_set_settings, + .get_rxfh = mlx5e_get_rxfh, + .set_rxfh = mlx5e_set_rxfh, + .get_tunable = mlx5e_get_tunable, + .set_tunable = mlx5e_set_tunable, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c index 120db80c47aa..70ec31b9e1e9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c @@ -105,25 +105,41 @@ static void mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv, { void *ft = priv->ft.main; - if (ai->tt_vec & (1 << MLX5E_TT_IPV6_TCP)) + if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) + mlx5_del_flow_table_entry(ft, + ai->ft_ix[MLX5E_TT_IPV6_IPSEC_ESP]); + + if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) + mlx5_del_flow_table_entry(ft, + ai->ft_ix[MLX5E_TT_IPV4_IPSEC_ESP]); + + if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) + mlx5_del_flow_table_entry(ft, + ai->ft_ix[MLX5E_TT_IPV6_IPSEC_AH]); + + if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) + mlx5_del_flow_table_entry(ft, + ai->ft_ix[MLX5E_TT_IPV4_IPSEC_AH]); + + if (ai->tt_vec & BIT(MLX5E_TT_IPV6_TCP)) mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_TCP]); - if (ai->tt_vec & (1 << MLX5E_TT_IPV4_TCP)) + if (ai->tt_vec & BIT(MLX5E_TT_IPV4_TCP)) mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_TCP]); - if (ai->tt_vec & (1 << MLX5E_TT_IPV6_UDP)) + if (ai->tt_vec & BIT(MLX5E_TT_IPV6_UDP)) mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_UDP]); - if (ai->tt_vec & (1 << MLX5E_TT_IPV4_UDP)) + if (ai->tt_vec & BIT(MLX5E_TT_IPV4_UDP)) mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_UDP]); - if (ai->tt_vec & (1 << MLX5E_TT_IPV6)) + if (ai->tt_vec & BIT(MLX5E_TT_IPV6)) mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6]); - if (ai->tt_vec & (1 << MLX5E_TT_IPV4)) + if (ai->tt_vec & BIT(MLX5E_TT_IPV4)) mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4]); - if (ai->tt_vec & (1 << MLX5E_TT_ANY)) + if (ai->tt_vec & BIT(MLX5E_TT_ANY)) mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_ANY]); } @@ -156,33 +172,37 @@ static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type) switch (eth_addr_type) { case MLX5E_UC: ret = - (1 << MLX5E_TT_IPV4_TCP) | - (1 << MLX5E_TT_IPV6_TCP) | - (1 << MLX5E_TT_IPV4_UDP) | - (1 << MLX5E_TT_IPV6_UDP) | - (1 << MLX5E_TT_IPV4) | - (1 << MLX5E_TT_IPV6) | - (1 << MLX5E_TT_ANY) | + BIT(MLX5E_TT_IPV4_TCP) | + BIT(MLX5E_TT_IPV6_TCP) | + BIT(MLX5E_TT_IPV4_UDP) | + BIT(MLX5E_TT_IPV6_UDP) | + BIT(MLX5E_TT_IPV4_IPSEC_AH) | + BIT(MLX5E_TT_IPV6_IPSEC_AH) | + BIT(MLX5E_TT_IPV4_IPSEC_ESP) | + BIT(MLX5E_TT_IPV6_IPSEC_ESP) | + BIT(MLX5E_TT_IPV4) | + BIT(MLX5E_TT_IPV6) | + BIT(MLX5E_TT_ANY) | 0; break; case MLX5E_MC_IPV4: ret = - (1 << MLX5E_TT_IPV4_UDP) | - (1 << MLX5E_TT_IPV4) | + BIT(MLX5E_TT_IPV4_UDP) | + BIT(MLX5E_TT_IPV4) | 0; break; case MLX5E_MC_IPV6: ret = - (1 << MLX5E_TT_IPV6_UDP) | - (1 << MLX5E_TT_IPV6) | + BIT(MLX5E_TT_IPV6_UDP) | + BIT(MLX5E_TT_IPV6) | 0; break; case MLX5E_MC_OTHER: ret = - (1 << MLX5E_TT_ANY) | + BIT(MLX5E_TT_ANY) | 0; break; } @@ -191,23 +211,27 @@ static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type) case MLX5E_ALLMULTI: ret = - (1 << MLX5E_TT_IPV4_UDP) | - (1 << MLX5E_TT_IPV6_UDP) | - (1 << MLX5E_TT_IPV4) | - (1 << MLX5E_TT_IPV6) | - (1 << MLX5E_TT_ANY) | + BIT(MLX5E_TT_IPV4_UDP) | + BIT(MLX5E_TT_IPV6_UDP) | + BIT(MLX5E_TT_IPV4) | + BIT(MLX5E_TT_IPV6) | + BIT(MLX5E_TT_ANY) | 0; break; default: /* MLX5E_PROMISC */ ret = - (1 << MLX5E_TT_IPV4_TCP) | - (1 << MLX5E_TT_IPV6_TCP) | - (1 << MLX5E_TT_IPV4_UDP) | - (1 << MLX5E_TT_IPV6_UDP) | - (1 << MLX5E_TT_IPV4) | - (1 << MLX5E_TT_IPV6) | - (1 << MLX5E_TT_ANY) | + BIT(MLX5E_TT_IPV4_TCP) | + BIT(MLX5E_TT_IPV6_TCP) | + BIT(MLX5E_TT_IPV4_UDP) | + BIT(MLX5E_TT_IPV6_UDP) | + BIT(MLX5E_TT_IPV4_IPSEC_AH) | + BIT(MLX5E_TT_IPV6_IPSEC_AH) | + BIT(MLX5E_TT_IPV4_IPSEC_ESP) | + BIT(MLX5E_TT_IPV6_IPSEC_ESP) | + BIT(MLX5E_TT_IPV4) | + BIT(MLX5E_TT_IPV6) | + BIT(MLX5E_TT_ANY) | 0; break; } @@ -226,6 +250,7 @@ static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv, u8 *match_criteria_dmac; void *ft = priv->ft.main; u32 *tirn = priv->tirn; + u32 *ft_ix; u32 tt_vec; int err; @@ -261,51 +286,51 @@ static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv, tt_vec = mlx5e_get_tt_vec(ai, type); - if (tt_vec & (1 << MLX5E_TT_ANY)) { + ft_ix = &ai->ft_ix[MLX5E_TT_ANY]; + if (tt_vec & BIT(MLX5E_TT_ANY)) { MLX5_SET(dest_format_struct, dest, destination_id, tirn[MLX5E_TT_ANY]); err = mlx5_add_flow_table_entry(ft, match_criteria_enable, match_criteria, flow_context, - &ai->ft_ix[MLX5E_TT_ANY]); - if (err) { - mlx5e_del_eth_addr_from_flow_table(priv, ai); - return err; - } - ai->tt_vec |= (1 << MLX5E_TT_ANY); + ft_ix); + if (err) + goto err_del_ai; + + ai->tt_vec |= BIT(MLX5E_TT_ANY); } match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.ethertype); - if (tt_vec & (1 << MLX5E_TT_IPV4)) { + ft_ix = &ai->ft_ix[MLX5E_TT_IPV4]; + if (tt_vec & BIT(MLX5E_TT_IPV4)) { MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, ETH_P_IP); MLX5_SET(dest_format_struct, dest, destination_id, tirn[MLX5E_TT_IPV4]); err = mlx5_add_flow_table_entry(ft, match_criteria_enable, match_criteria, flow_context, - &ai->ft_ix[MLX5E_TT_IPV4]); - if (err) { - mlx5e_del_eth_addr_from_flow_table(priv, ai); - return err; - } - ai->tt_vec |= (1 << MLX5E_TT_IPV4); + ft_ix); + if (err) + goto err_del_ai; + + ai->tt_vec |= BIT(MLX5E_TT_IPV4); } - if (tt_vec & (1 << MLX5E_TT_IPV6)) { + ft_ix = &ai->ft_ix[MLX5E_TT_IPV6]; + if (tt_vec & BIT(MLX5E_TT_IPV6)) { MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, ETH_P_IPV6); MLX5_SET(dest_format_struct, dest, destination_id, tirn[MLX5E_TT_IPV6]); err = mlx5_add_flow_table_entry(ft, match_criteria_enable, match_criteria, flow_context, - &ai->ft_ix[MLX5E_TT_IPV6]); - if (err) { - mlx5e_del_eth_addr_from_flow_table(priv, ai); - return err; - } - ai->tt_vec |= (1 << MLX5E_TT_IPV6); + ft_ix); + if (err) + goto err_del_ai; + + ai->tt_vec |= BIT(MLX5E_TT_IPV6); } MLX5_SET_TO_ONES(fte_match_param, match_criteria, @@ -313,70 +338,141 @@ static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv, MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol, IPPROTO_UDP); - if (tt_vec & (1 << MLX5E_TT_IPV4_UDP)) { + ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_UDP]; + if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) { MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, ETH_P_IP); MLX5_SET(dest_format_struct, dest, destination_id, tirn[MLX5E_TT_IPV4_UDP]); err = mlx5_add_flow_table_entry(ft, match_criteria_enable, match_criteria, flow_context, - &ai->ft_ix[MLX5E_TT_IPV4_UDP]); - if (err) { - mlx5e_del_eth_addr_from_flow_table(priv, ai); - return err; - } - ai->tt_vec |= (1 << MLX5E_TT_IPV4_UDP); + ft_ix); + if (err) + goto err_del_ai; + + ai->tt_vec |= BIT(MLX5E_TT_IPV4_UDP); } - if (tt_vec & (1 << MLX5E_TT_IPV6_UDP)) { + ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_UDP]; + if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) { MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, ETH_P_IPV6); MLX5_SET(dest_format_struct, dest, destination_id, tirn[MLX5E_TT_IPV6_UDP]); err = mlx5_add_flow_table_entry(ft, match_criteria_enable, match_criteria, flow_context, - &ai->ft_ix[MLX5E_TT_IPV6_UDP]); - if (err) { - mlx5e_del_eth_addr_from_flow_table(priv, ai); - return err; - } - ai->tt_vec |= (1 << MLX5E_TT_IPV6_UDP); + ft_ix); + if (err) + goto err_del_ai; + + ai->tt_vec |= BIT(MLX5E_TT_IPV6_UDP); } MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol, IPPROTO_TCP); - if (tt_vec & (1 << MLX5E_TT_IPV4_TCP)) { + ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_TCP]; + if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) { MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, ETH_P_IP); MLX5_SET(dest_format_struct, dest, destination_id, tirn[MLX5E_TT_IPV4_TCP]); err = mlx5_add_flow_table_entry(ft, match_criteria_enable, match_criteria, flow_context, - &ai->ft_ix[MLX5E_TT_IPV4_TCP]); - if (err) { - mlx5e_del_eth_addr_from_flow_table(priv, ai); - return err; - } - ai->tt_vec |= (1 << MLX5E_TT_IPV4_TCP); + ft_ix); + if (err) + goto err_del_ai; + + ai->tt_vec |= BIT(MLX5E_TT_IPV4_TCP); } - if (tt_vec & (1 << MLX5E_TT_IPV6_TCP)) { + ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_TCP]; + if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) { MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, ETH_P_IPV6); MLX5_SET(dest_format_struct, dest, destination_id, tirn[MLX5E_TT_IPV6_TCP]); err = mlx5_add_flow_table_entry(ft, match_criteria_enable, match_criteria, flow_context, - &ai->ft_ix[MLX5E_TT_IPV6_TCP]); - if (err) { - mlx5e_del_eth_addr_from_flow_table(priv, ai); - return err; - } - ai->tt_vec |= (1 << MLX5E_TT_IPV6_TCP); + ft_ix); + if (err) + goto err_del_ai; + + ai->tt_vec |= BIT(MLX5E_TT_IPV6_TCP); + } + + MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol, + IPPROTO_AH); + + ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_IPSEC_AH]; + if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) { + MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, + ETH_P_IP); + MLX5_SET(dest_format_struct, dest, destination_id, + tirn[MLX5E_TT_IPV4_IPSEC_AH]); + err = mlx5_add_flow_table_entry(ft, match_criteria_enable, + match_criteria, flow_context, + ft_ix); + if (err) + goto err_del_ai; + + ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_AH); + } + + ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_IPSEC_AH]; + if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) { + MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, + ETH_P_IPV6); + MLX5_SET(dest_format_struct, dest, destination_id, + tirn[MLX5E_TT_IPV6_IPSEC_AH]); + err = mlx5_add_flow_table_entry(ft, match_criteria_enable, + match_criteria, flow_context, + ft_ix); + if (err) + goto err_del_ai; + + ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_AH); + } + + MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol, + IPPROTO_ESP); + + ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_IPSEC_ESP]; + if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) { + MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, + ETH_P_IP); + MLX5_SET(dest_format_struct, dest, destination_id, + tirn[MLX5E_TT_IPV4_IPSEC_ESP]); + err = mlx5_add_flow_table_entry(ft, match_criteria_enable, + match_criteria, flow_context, + ft_ix); + if (err) + goto err_del_ai; + + ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_ESP); + } + + ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_IPSEC_ESP]; + if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) { + MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, + ETH_P_IPV6); + MLX5_SET(dest_format_struct, dest, destination_id, + tirn[MLX5E_TT_IPV6_IPSEC_ESP]); + err = mlx5_add_flow_table_entry(ft, match_criteria_enable, + match_criteria, flow_context, + ft_ix); + if (err) + goto err_del_ai; + + ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_ESP); } return 0; + +err_del_ai: + mlx5e_del_eth_addr_from_flow_table(priv, ai); + + return err; } static int mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv, @@ -725,7 +821,7 @@ static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv) if (!g) return -ENOMEM; - g[0].log_sz = 2; + g[0].log_sz = 3; g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria, outer_headers.ethertype); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 40206da1f9d7..bb815893d3a8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -41,6 +41,7 @@ struct mlx5e_rq_param { struct mlx5e_sq_param { u32 sqc[MLX5_ST_SZ_DW(sqc)]; struct mlx5_wq_param wq; + u16 max_inline; }; struct mlx5e_cq_param { @@ -116,7 +117,7 @@ void mlx5e_update_stats(struct mlx5e_priv *priv) s->rx_csum_none += rq_stats->csum_none; s->rx_wqe_err += rq_stats->wqe_err; - for (j = 0; j < priv->num_tc; j++) { + for (j = 0; j < priv->params.num_tc; j++) { sq_stats = &priv->channel[i]->sq[j].stats; s->tso_packets += sq_stats->tso_packets; @@ -272,6 +273,8 @@ static int mlx5e_create_rq(struct mlx5e_channel *c, int err; int i; + param->wq.db_numa_node = cpu_to_node(c->cpu); + err = mlx5_wq_ll_create(mdev, ¶m->wq, rqc_wq, &rq->wq, &rq->wq_ctrl); if (err) @@ -342,11 +345,11 @@ static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param) memcpy(rqc, param->rqc, sizeof(param->rqc)); - MLX5_SET(rqc, rqc, cqn, c->rq.cq.mcq.cqn); + MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn); MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST); MLX5_SET(rqc, rqc, flush_in_error_en, 1); MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift - - PAGE_SHIFT); + MLX5_ADAPTER_PAGE_SHIFT); MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma); mlx5_fill_page_array(&rq->wq_ctrl.buf, @@ -502,6 +505,8 @@ static int mlx5e_create_sq(struct mlx5e_channel *c, if (err) return err; + param->wq.db_numa_node = cpu_to_node(c->cpu); + err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, &sq->wq_ctrl); if (err) @@ -509,7 +514,9 @@ static int mlx5e_create_sq(struct mlx5e_channel *c, sq->wq.db = &sq->wq.db[MLX5_SND_DBR]; sq->uar_map = sq->uar.map; + sq->uar_bf_map = sq->uar.bf_map; sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2; + sq->max_inline = param->max_inline; err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu)); if (err) @@ -518,11 +525,12 @@ static int mlx5e_create_sq(struct mlx5e_channel *c, txq_ix = c->ix + tc * priv->params.num_channels; sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix); - sq->pdev = c->pdev; - sq->mkey_be = c->mkey_be; - sq->channel = c; - sq->tc = tc; - sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS; + sq->pdev = c->pdev; + sq->mkey_be = c->mkey_be; + sq->channel = c; + sq->tc = tc; + sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS; + sq->bf_budget = MLX5E_SQ_BF_BUDGET; priv->txq_to_sq_map[txq_ix] = sq; return 0; @@ -569,7 +577,6 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param) memcpy(sqc, param->sqc, sizeof(param->sqc)); - MLX5_SET(sqc, sqc, user_index, sq->tc); MLX5_SET(sqc, sqc, tis_num_0, priv->tisn[sq->tc]); MLX5_SET(sqc, sqc, cqn, c->sq[sq->tc].cq.mcq.cqn); MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST); @@ -579,7 +586,7 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param) MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); MLX5_SET(wq, wq, uar_page, sq->uar.index); MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift - - PAGE_SHIFT); + MLX5_ADAPTER_PAGE_SHIFT); MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma); mlx5_fill_page_array(&sq->wq_ctrl.buf, @@ -702,7 +709,8 @@ static int mlx5e_create_cq(struct mlx5e_channel *c, int err; u32 i; - param->wq.numa = cpu_to_node(c->cpu); + param->wq.buf_numa_node = cpu_to_node(c->cpu); + param->wq.db_numa_node = cpu_to_node(c->cpu); param->eq_ix = c->ix; err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq, @@ -773,7 +781,7 @@ static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) MLX5_SET(cqc, cqc, c_eqn, eqn); MLX5_SET(cqc, cqc, uar_page, mcq->uar->index); MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - - PAGE_SHIFT); + MLX5_ADAPTER_PAGE_SHIFT); MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma); err = mlx5_core_create_cq(mdev, mcq, in, inlen); @@ -929,7 +937,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, c->pdev = &priv->mdev->pdev->dev; c->netdev = priv->netdev; c->mkey_be = cpu_to_be32(priv->mr.key); - c->num_tc = priv->num_tc; + c->num_tc = priv->params.num_tc; mlx5e_build_tc_to_txq_map(c, priv->params.num_channels); @@ -1000,7 +1008,7 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv, MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size); MLX5_SET(wq, wq, pd, priv->pdn); - param->wq.numa = dev_to_node(&priv->mdev->pdev->dev); + param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev); param->wq.linear = 1; } @@ -1014,7 +1022,8 @@ static void mlx5e_build_sq_param(struct mlx5e_priv *priv, MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB)); MLX5_SET(wq, wq, pd, priv->pdn); - param->wq.numa = dev_to_node(&priv->mdev->pdev->dev); + param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev); + param->max_inline = priv->params.tx_max_inline; } static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv, @@ -1059,27 +1068,28 @@ static void mlx5e_build_channel_param(struct mlx5e_priv *priv, static int mlx5e_open_channels(struct mlx5e_priv *priv) { struct mlx5e_channel_param cparam; + int nch = priv->params.num_channels; int err = -ENOMEM; int i; int j; - priv->channel = kcalloc(priv->params.num_channels, - sizeof(struct mlx5e_channel *), GFP_KERNEL); + priv->channel = kcalloc(nch, sizeof(struct mlx5e_channel *), + GFP_KERNEL); - priv->txq_to_sq_map = kcalloc(priv->params.num_channels * priv->num_tc, + priv->txq_to_sq_map = kcalloc(nch * priv->params.num_tc, sizeof(struct mlx5e_sq *), GFP_KERNEL); if (!priv->channel || !priv->txq_to_sq_map) goto err_free_txq_to_sq_map; mlx5e_build_channel_param(priv, &cparam); - for (i = 0; i < priv->params.num_channels; i++) { + for (i = 0; i < nch; i++) { err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]); if (err) goto err_close_channels; } - for (j = 0; j < priv->params.num_channels; j++) { + for (j = 0; j < nch; j++) { err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j]->rq); if (err) goto err_close_channels; @@ -1130,11 +1140,10 @@ static void mlx5e_close_tis(struct mlx5e_priv *priv, int tc) static int mlx5e_open_tises(struct mlx5e_priv *priv) { - int num_tc = priv->num_tc; int err; int tc; - for (tc = 0; tc < num_tc; tc++) { + for (tc = 0; tc < priv->params.num_tc; tc++) { err = mlx5e_open_tis(priv, tc); if (err) goto err_close_tises; @@ -1151,26 +1160,41 @@ err_close_tises: static void mlx5e_close_tises(struct mlx5e_priv *priv) { - int num_tc = priv->num_tc; int tc; - for (tc = 0; tc < num_tc; tc++) + for (tc = 0; tc < priv->params.num_tc; tc++) mlx5e_close_tis(priv, tc); } +static int mlx5e_rx_hash_fn(int hfunc) +{ + return (hfunc == ETH_RSS_HASH_TOP) ? + MLX5_RX_HASH_FN_TOEPLITZ : + MLX5_RX_HASH_FN_INVERTED_XOR8; +} + +static int mlx5e_bits_invert(unsigned long a, int size) +{ + int inv = 0; + int i; + + for (i = 0; i < size; i++) + inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i; + + return inv; +} + static int mlx5e_open_rqt(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; u32 *in; - u32 out[MLX5_ST_SZ_DW(create_rqt_out)]; void *rqtc; int inlen; int err; - int sz; + int log_tbl_sz = priv->params.rx_hash_log_tbl_sz; + int sz = 1 << log_tbl_sz; int i; - sz = 1 << priv->params.rx_hash_log_tbl_sz; - inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz; in = mlx5_vzalloc(inlen); if (!in) @@ -1182,17 +1206,16 @@ static int mlx5e_open_rqt(struct mlx5e_priv *priv) MLX5_SET(rqtc, rqtc, rqt_max_size, sz); for (i = 0; i < sz; i++) { - int ix = i % priv->params.num_channels; + int ix = i; + if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR) + ix = mlx5e_bits_invert(i, log_tbl_sz); + + ix = ix % priv->params.num_channels; MLX5_SET(rqtc, rqtc, rq_num[i], priv->channel[ix]->rq.rqn); } - MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT); - - memset(out, 0, sizeof(out)); - err = mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out)); - if (!err) - priv->rqtn = MLX5_GET(create_rqt_out, out, rqtn); + err = mlx5_core_create_rqt(mdev, in, inlen, &priv->rqtn); kvfree(in); @@ -1201,16 +1224,7 @@ static int mlx5e_open_rqt(struct mlx5e_priv *priv) static void mlx5e_close_rqt(struct mlx5e_priv *priv) { - u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)]; - u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)]; - - memset(in, 0, sizeof(in)); - - MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT); - MLX5_SET(destroy_rqt_in, in, rqtn, priv->rqtn); - - mlx5_cmd_exec_check_status(priv->mdev, in, sizeof(in), out, - sizeof(out)); + mlx5_core_destroy_rqt(priv->mdev, priv->rqtn); } static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt) @@ -1221,13 +1235,17 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt) #define ROUGH_MAX_L2_L3_HDR_SZ 256 -#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\ - MLX5_HASH_FIELD_SEL_DST_IP) +#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\ + MLX5_HASH_FIELD_SEL_DST_IP) + +#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\ + MLX5_HASH_FIELD_SEL_DST_IP |\ + MLX5_HASH_FIELD_SEL_L4_SPORT |\ + MLX5_HASH_FIELD_SEL_L4_DPORT) -#define MLX5_HASH_ALL (MLX5_HASH_FIELD_SEL_SRC_IP |\ - MLX5_HASH_FIELD_SEL_DST_IP |\ - MLX5_HASH_FIELD_SEL_L4_SPORT |\ - MLX5_HASH_FIELD_SEL_L4_DPORT) +#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\ + MLX5_HASH_FIELD_SEL_DST_IP |\ + MLX5_HASH_FIELD_SEL_IPSEC_SPI) if (priv->params.lro_en) { MLX5_SET(tirc, tirc, lro_enable_mask, @@ -1254,12 +1272,16 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt) MLX5_SET(tirc, tirc, indirect_table, priv->rqtn); MLX5_SET(tirc, tirc, rx_hash_fn, - MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ); - MLX5_SET(tirc, tirc, rx_hash_symmetric, 1); - netdev_rss_key_fill(MLX5_ADDR_OF(tirc, tirc, - rx_hash_toeplitz_key), - MLX5_FLD_SZ_BYTES(tirc, - rx_hash_toeplitz_key)); + mlx5e_rx_hash_fn(priv->params.rss_hfunc)); + if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) { + void *rss_key = MLX5_ADDR_OF(tirc, tirc, + rx_hash_toeplitz_key); + size_t len = MLX5_FLD_SZ_BYTES(tirc, + rx_hash_toeplitz_key); + + MLX5_SET(tirc, tirc, rx_hash_symmetric, 1); + netdev_rss_key_fill(rss_key, len); + } break; } @@ -1270,7 +1292,7 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt) MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, MLX5_L4_PROT_TYPE_TCP); MLX5_SET(rx_hash_field_select, hfso, selected_fields, - MLX5_HASH_ALL); + MLX5_HASH_IP_L4PORTS); break; case MLX5E_TT_IPV6_TCP: @@ -1279,7 +1301,7 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt) MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, MLX5_L4_PROT_TYPE_TCP); MLX5_SET(rx_hash_field_select, hfso, selected_fields, - MLX5_HASH_ALL); + MLX5_HASH_IP_L4PORTS); break; case MLX5E_TT_IPV4_UDP: @@ -1288,7 +1310,7 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt) MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, MLX5_L4_PROT_TYPE_UDP); MLX5_SET(rx_hash_field_select, hfso, selected_fields, - MLX5_HASH_ALL); + MLX5_HASH_IP_L4PORTS); break; case MLX5E_TT_IPV6_UDP: @@ -1297,7 +1319,35 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt) MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, MLX5_L4_PROT_TYPE_UDP); MLX5_SET(rx_hash_field_select, hfso, selected_fields, - MLX5_HASH_ALL); + MLX5_HASH_IP_L4PORTS); + break; + + case MLX5E_TT_IPV4_IPSEC_AH: + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, + MLX5_L3_PROT_TYPE_IPV4); + MLX5_SET(rx_hash_field_select, hfso, selected_fields, + MLX5_HASH_IP_IPSEC_SPI); + break; + + case MLX5E_TT_IPV6_IPSEC_AH: + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, + MLX5_L3_PROT_TYPE_IPV6); + MLX5_SET(rx_hash_field_select, hfso, selected_fields, + MLX5_HASH_IP_IPSEC_SPI); + break; + + case MLX5E_TT_IPV4_IPSEC_ESP: + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, + MLX5_L3_PROT_TYPE_IPV4); + MLX5_SET(rx_hash_field_select, hfso, selected_fields, + MLX5_HASH_IP_IPSEC_SPI); + break; + + case MLX5E_TT_IPV6_IPSEC_ESP: + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, + MLX5_L3_PROT_TYPE_IPV6); + MLX5_SET(rx_hash_field_select, hfso, selected_fields, + MLX5_HASH_IP_IPSEC_SPI); break; case MLX5E_TT_IPV4: @@ -1520,26 +1570,6 @@ static int mlx5e_close(struct net_device *netdev) return err; } -int mlx5e_update_priv_params(struct mlx5e_priv *priv, - struct mlx5e_params *new_params) -{ - int err = 0; - int was_opened; - - WARN_ON(!mutex_is_locked(&priv->state_lock)); - - was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); - if (was_opened) - mlx5e_close_locked(priv->netdev); - - priv->params = *new_params; - - if (was_opened) - err = mlx5e_open_locked(priv->netdev); - - return err; -} - static struct rtnl_link_stats64 * mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) { @@ -1589,20 +1619,22 @@ static int mlx5e_set_features(struct net_device *netdev, netdev_features_t features) { struct mlx5e_priv *priv = netdev_priv(netdev); + int err = 0; netdev_features_t changes = features ^ netdev->features; - struct mlx5e_params new_params; - bool update_params = false; mutex_lock(&priv->state_lock); - new_params = priv->params; if (changes & NETIF_F_LRO) { - new_params.lro_en = !!(features & NETIF_F_LRO); - update_params = true; - } + bool was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); + + if (was_opened) + mlx5e_close_locked(priv->netdev); - if (update_params) - mlx5e_update_priv_params(priv, &new_params); + priv->params.lro_en = !!(features & NETIF_F_LRO); + + if (was_opened) + err = mlx5e_open_locked(priv->netdev); + } if (changes & NETIF_F_HW_VLAN_CTAG_FILTER) { if (features & NETIF_F_HW_VLAN_CTAG_FILTER) @@ -1620,8 +1652,9 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; + bool was_opened; int max_mtu; - int err; + int err = 0; mlx5_query_port_max_mtu(mdev, &max_mtu, 1); @@ -1633,8 +1666,16 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) } mutex_lock(&priv->state_lock); + + was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); + if (was_opened) + mlx5e_close_locked(netdev); + netdev->mtu = new_mtu; - err = mlx5e_update_priv_params(priv, &priv->params); + + if (was_opened) + err = mlx5e_open_locked(netdev); + mutex_unlock(&priv->state_lock); return err; @@ -1673,6 +1714,15 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) return 0; } +u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev) +{ + int bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2; + + return bf_buf_size - + sizeof(struct mlx5e_tx_wqe) + + 2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/; +} + static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev, struct net_device *netdev, int num_comp_vectors) @@ -1691,6 +1741,7 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev, MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC; priv->params.tx_cq_moderation_pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; + priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev); priv->params.min_rx_wqes = MLX5E_PARAMS_DEFAULT_MIN_RX_WQES; priv->params.rx_hash_log_tbl_sz = @@ -1700,6 +1751,7 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev, MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ; priv->params.num_tc = 1; priv->params.default_vlan_prio = 0; + priv->params.rss_hfunc = ETH_RSS_HASH_XOR; priv->params.lro_en = false && !!MLX5_CAP_ETH(priv->mdev, lro_cap); priv->params.lro_wqe_sz = @@ -1708,7 +1760,6 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev, priv->mdev = mdev; priv->netdev = netdev; priv->params.num_channels = num_comp_vectors; - priv->num_tc = priv->params.num_tc; priv->default_vlan_prio = priv->params.default_vlan_prio; spin_lock_init(&priv->async_events_spinlock); @@ -1733,9 +1784,8 @@ static void mlx5e_build_netdev(struct net_device *netdev) SET_NETDEV_DEV(netdev, &mdev->pdev->dev); - if (priv->num_tc > 1) { + if (priv->params.num_tc > 1) mlx5e_netdev_ops.ndo_select_queue = mlx5e_select_queue; - } netdev->netdev_ops = &mlx5e_netdev_ops; netdev->watchdog_timeo = 15 * HZ; @@ -1819,36 +1869,31 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev) err = mlx5_alloc_map_uar(mdev, &priv->cq_uar); if (err) { - netdev_err(netdev, "%s: mlx5_alloc_map_uar failed, %d\n", - __func__, err); + mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err); goto err_free_netdev; } err = mlx5_core_alloc_pd(mdev, &priv->pdn); if (err) { - netdev_err(netdev, "%s: mlx5_core_alloc_pd failed, %d\n", - __func__, err); + mlx5_core_err(mdev, "alloc pd failed, %d\n", err); goto err_unmap_free_uar; } err = mlx5_alloc_transport_domain(mdev, &priv->tdn); if (err) { - netdev_err(netdev, "%s: mlx5_alloc_transport_domain failed, %d\n", - __func__, err); + mlx5_core_err(mdev, "alloc td failed, %d\n", err); goto err_dealloc_pd; } err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr); if (err) { - netdev_err(netdev, "%s: mlx5e_create_mkey failed, %d\n", - __func__, err); + mlx5_core_err(mdev, "create mkey failed, %d\n", err); goto err_dealloc_transport_domain; } err = register_netdev(netdev); if (err) { - netdev_err(netdev, "%s: register_netdev failed, %d\n", - __func__, err); + mlx5_core_err(mdev, "register_netdev failed, %d\n", err); goto err_destroy_mkey; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 03f28f438e55..64380bc0cd6a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -57,7 +57,7 @@ void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw) if (notify_hw) { cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; - mlx5e_tx_notify_hw(sq, wqe); + mlx5e_tx_notify_hw(sq, wqe, 0); } } @@ -110,9 +110,17 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, } static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq, - struct sk_buff *skb) + struct sk_buff *skb, bool bf) { -#define MLX5E_MIN_INLINE 16 /* eth header with vlan (w/o next ethertype) */ + /* Some NIC TX decisions, e.g loopback, are based on the packet + * headers and occur before the data gather. + * Therefore these headers must be copied into the WQE + */ +#define MLX5E_MIN_INLINE (ETH_HLEN + 2/*vlan tag*/) + + if (bf && (skb_headlen(skb) <= sq->max_inline)) + return skb_headlen(skb); + return MLX5E_MIN_INLINE; } @@ -129,6 +137,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) u8 opcode = MLX5_OPCODE_SEND; dma_addr_t dma_addr = 0; + bool bf = false; u16 headlen; u16 ds_cnt; u16 ihs; @@ -141,6 +150,11 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) else sq->stats.csum_offload_none++; + if (sq->cc != sq->prev_cc) { + sq->prev_cc = sq->cc; + sq->bf_budget = (sq->cc == sq->pc) ? MLX5E_SQ_BF_BUDGET : 0; + } + if (skb_is_gso(skb)) { u32 payload_len; @@ -153,7 +167,10 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) sq->stats.tso_packets++; sq->stats.tso_bytes += payload_len; } else { - ihs = mlx5e_get_inline_hdr_size(sq, skb); + bf = sq->bf_budget && + !skb->xmit_more && + !skb_shinfo(skb)->nr_frags; + ihs = mlx5e_get_inline_hdr_size(sq, skb, bf); MLX5E_TX_SKB_CB(skb)->num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); } @@ -225,14 +242,21 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) } if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) { + int bf_sz = 0; + + if (bf && sq->uar_bf_map) + bf_sz = MLX5E_TX_SKB_CB(skb)->num_wqebbs << 3; + cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; - mlx5e_tx_notify_hw(sq, wqe); + mlx5e_tx_notify_hw(sq, wqe, bf_sz); } /* fill sq edge with nops to avoid wqe wrap around */ while ((sq->pc & wq->sz_m1) > sq->edge) mlx5e_send_nop(sq, false); + sq->bf_budget = bf ? sq->bf_budget - 1 : 0; + sq->stats.packets++; return NETDEV_TX_OK; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index afad529838de..603a8b0908ee 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -455,7 +455,7 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i) struct mlx5_priv *priv = &mdev->priv; struct msix_entry *msix = priv->msix_arr; int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector; - int numa_node = dev_to_node(&mdev->pdev->dev); + int numa_node = priv->numa_node; int err; if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) { @@ -654,6 +654,22 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev) } #endif +static int map_bf_area(struct mlx5_core_dev *dev) +{ + resource_size_t bf_start = pci_resource_start(dev->pdev, 0); + resource_size_t bf_len = pci_resource_len(dev->pdev, 0); + + dev->priv.bf_mapping = io_mapping_create_wc(bf_start, bf_len); + + return dev->priv.bf_mapping ? 0 : -ENOMEM; +} + +static void unmap_bf_area(struct mlx5_core_dev *dev) +{ + if (dev->priv.bf_mapping) + io_mapping_free(dev->priv.bf_mapping); +} + static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) { struct mlx5_priv *priv = &dev->priv; @@ -668,6 +684,10 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) INIT_LIST_HEAD(&priv->pgdir_list); spin_lock_init(&priv->mkey_lock); + mutex_init(&priv->alloc_mutex); + + priv->numa_node = dev_to_node(&dev->pdev->dev); + priv->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), mlx5_debugfs_root); if (!priv->dbg_root) return -ENOMEM; @@ -804,10 +824,13 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) goto err_stop_eqs; } + if (map_bf_area(dev)) + dev_err(&pdev->dev, "Failed to map blue flame area\n"); + err = mlx5_irq_set_affinity_hints(dev); if (err) { dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n"); - goto err_free_comp_eqs; + goto err_unmap_bf_area; } MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock); @@ -819,7 +842,9 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) return 0; -err_free_comp_eqs: +err_unmap_bf_area: + unmap_bf_area(dev); + free_comp_eqs(dev); err_stop_eqs: @@ -877,6 +902,7 @@ static void mlx5_dev_cleanup(struct mlx5_core_dev *dev) mlx5_cleanup_qp_table(dev); mlx5_cleanup_cq_table(dev); mlx5_irq_clear_affinity_hints(dev); + unmap_bf_area(dev); free_comp_eqs(dev); mlx5_stop_eqs(dev); mlx5_free_uuars(dev, &priv->uuari); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index fc88ecaecb4b..566a70488db1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -73,7 +73,12 @@ static inline int mlx5_cmd_exec_check_status(struct mlx5_core_dev *dev, u32 *in, int in_size, u32 *out, int out_size) { - mlx5_cmd_exec(dev, in, in_size, out, out_size); + int err; + + err = mlx5_cmd_exec(dev, in, in_size, out, out_size); + if (err) + return err; + return mlx5_cmd_status_to_err((struct mlx5_outbox_hdr *)out); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c index 8d98b03026d5..c4f3f74908ec 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c @@ -358,3 +358,32 @@ int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u16 lwm) return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); } + +int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen, + u32 *rqtn) +{ + u32 out[MLX5_ST_SZ_DW(create_rqt_out)]; + int err; + + MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT); + + memset(out, 0, sizeof(out)); + err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); + if (!err) + *rqtn = MLX5_GET(create_rqt_out, out, rqtn); + + return err; +} + +void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn) +{ + u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)]; + u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)]; + + memset(in, 0, sizeof(in)); + + MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT); + MLX5_SET(destroy_rqt_in, in, rqtn, rqtn); + + mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.h b/drivers/net/ethernet/mellanox/mlx5/core/transobj.h index f9ef244710d5..10bd75e7d9b1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.h @@ -61,4 +61,8 @@ int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 rmpn); int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u32 *out); int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm); +int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen, + u32 *rqtn); +void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn); + #endif /* __TRANSOBJ_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c index 9ef85873ceea..eb05c845ece9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c @@ -32,6 +32,7 @@ #include <linux/kernel.h> #include <linux/module.h> +#include <linux/io-mapping.h> #include <linux/mlx5/driver.h> #include <linux/mlx5/cmd.h> #include "mlx5_core.h" @@ -246,6 +247,10 @@ int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar) goto err_free_uar; } + if (mdev->priv.bf_mapping) + uar->bf_map = io_mapping_map_wc(mdev->priv.bf_mapping, + uar->index << PAGE_SHIFT); + return 0; err_free_uar: @@ -257,6 +262,7 @@ EXPORT_SYMBOL(mlx5_alloc_map_uar); void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar) { + io_mapping_unmap(uar->bf_map); iounmap(uar->map); mlx5_cmd_free_uar(mdev, uar->index); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c index 8388411582cf..ce21ee5b2357 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c @@ -73,13 +73,14 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride); wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1; - err = mlx5_db_alloc(mdev, &wq_ctrl->db); + err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node); if (err) { mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err); return err; } - err = mlx5_buf_alloc(mdev, mlx5_wq_cyc_get_byte_size(wq), &wq_ctrl->buf); + err = mlx5_buf_alloc_node(mdev, mlx5_wq_cyc_get_byte_size(wq), + &wq_ctrl->buf, param->buf_numa_node); if (err) { mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err); goto err_db_free; @@ -108,13 +109,14 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, wq->log_sz = MLX5_GET(cqc, cqc, log_cq_size); wq->sz_m1 = (1 << wq->log_sz) - 1; - err = mlx5_db_alloc(mdev, &wq_ctrl->db); + err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node); if (err) { mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err); return err; } - err = mlx5_buf_alloc(mdev, mlx5_cqwq_get_byte_size(wq), &wq_ctrl->buf); + err = mlx5_buf_alloc_node(mdev, mlx5_cqwq_get_byte_size(wq), + &wq_ctrl->buf, param->buf_numa_node); if (err) { mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err); goto err_db_free; @@ -144,7 +146,7 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride); wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1; - err = mlx5_db_alloc(mdev, &wq_ctrl->db); + err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node); if (err) { mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err); return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h index e0ddd69fb429..6c2a8f95093c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h @@ -37,7 +37,8 @@ struct mlx5_wq_param { int linear; - int numa; + int buf_numa_node; + int db_numa_node; }; struct mlx5_wq_ctrl { diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig new file mode 100644 index 000000000000..8d1080da49b5 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig @@ -0,0 +1,32 @@ +# +# Mellanox switch drivers configuration +# + +config MLXSW_CORE + tristate "Mellanox Technologies Switch ASICs support" + ---help--- + This driver supports Mellanox Technologies Switch ASICs family. + + To compile this driver as a module, choose M here: the + module will be called mlxsw_core. + +config MLXSW_PCI + tristate "PCI bus implementation for Mellanox Technologies Switch ASICs" + depends on PCI && MLXSW_CORE + default m + ---help--- + This is PCI bus implementation for Mellanox Technologies Switch ASICs. + + To compile this driver as a module, choose M here: the + module will be called mlxsw_pci. + +config MLXSW_SWITCHX2 + tristate "Mellanox Technologies SwitchX-2 support" + depends on MLXSW_CORE && NET_SWITCHDEV + default m + ---help--- + This driver supports Mellanox Technologies SwitchX-2 Ethernet + Switch ASICs. + + To compile this driver as a module, choose M here: the + module will be called mlxsw_switchx2. diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile new file mode 100644 index 000000000000..0a05f65ee814 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile @@ -0,0 +1,6 @@ +obj-$(CONFIG_MLXSW_CORE) += mlxsw_core.o +mlxsw_core-objs := core.o +obj-$(CONFIG_MLXSW_PCI) += mlxsw_pci.o +mlxsw_pci-objs := pci.o +obj-$(CONFIG_MLXSW_SWITCHX2) += mlxsw_switchx2.o +mlxsw_switchx2-objs := switchx2.o diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h new file mode 100644 index 000000000000..770db17eb03f --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h @@ -0,0 +1,1090 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/cmd.h + * Copyright (c) 2015 Mellanox Technologies. All rights reserved. + * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> + * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MLXSW_CMD_H +#define _MLXSW_CMD_H + +#include "item.h" + +#define MLXSW_CMD_MBOX_SIZE 4096 + +static inline char *mlxsw_cmd_mbox_alloc(void) +{ + return kzalloc(MLXSW_CMD_MBOX_SIZE, GFP_KERNEL); +} + +static inline void mlxsw_cmd_mbox_free(char *mbox) +{ + kfree(mbox); +} + +static inline void mlxsw_cmd_mbox_zero(char *mbox) +{ + memset(mbox, 0, MLXSW_CMD_MBOX_SIZE); +} + +struct mlxsw_core; + +int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod, + u32 in_mod, bool out_mbox_direct, + char *in_mbox, size_t in_mbox_size, + char *out_mbox, size_t out_mbox_size); + +static inline int mlxsw_cmd_exec_in(struct mlxsw_core *mlxsw_core, u16 opcode, + u8 opcode_mod, u32 in_mod, char *in_mbox, + size_t in_mbox_size) +{ + return mlxsw_cmd_exec(mlxsw_core, opcode, opcode_mod, in_mod, false, + in_mbox, in_mbox_size, NULL, 0); +} + +static inline int mlxsw_cmd_exec_out(struct mlxsw_core *mlxsw_core, u16 opcode, + u8 opcode_mod, u32 in_mod, + bool out_mbox_direct, + char *out_mbox, size_t out_mbox_size) +{ + return mlxsw_cmd_exec(mlxsw_core, opcode, opcode_mod, in_mod, + out_mbox_direct, NULL, 0, + out_mbox, out_mbox_size); +} + +static inline int mlxsw_cmd_exec_none(struct mlxsw_core *mlxsw_core, u16 opcode, + u8 opcode_mod, u32 in_mod) +{ + return mlxsw_cmd_exec(mlxsw_core, opcode, opcode_mod, in_mod, false, + NULL, 0, NULL, 0); +} + +enum mlxsw_cmd_opcode { + MLXSW_CMD_OPCODE_QUERY_FW = 0x004, + MLXSW_CMD_OPCODE_QUERY_BOARDINFO = 0x006, + MLXSW_CMD_OPCODE_QUERY_AQ_CAP = 0x003, + MLXSW_CMD_OPCODE_MAP_FA = 0xFFF, + MLXSW_CMD_OPCODE_UNMAP_FA = 0xFFE, + MLXSW_CMD_OPCODE_CONFIG_PROFILE = 0x100, + MLXSW_CMD_OPCODE_ACCESS_REG = 0x040, + MLXSW_CMD_OPCODE_SW2HW_DQ = 0x201, + MLXSW_CMD_OPCODE_HW2SW_DQ = 0x202, + MLXSW_CMD_OPCODE_2ERR_DQ = 0x01E, + MLXSW_CMD_OPCODE_QUERY_DQ = 0x022, + MLXSW_CMD_OPCODE_SW2HW_CQ = 0x016, + MLXSW_CMD_OPCODE_HW2SW_CQ = 0x017, + MLXSW_CMD_OPCODE_QUERY_CQ = 0x018, + MLXSW_CMD_OPCODE_SW2HW_EQ = 0x013, + MLXSW_CMD_OPCODE_HW2SW_EQ = 0x014, + MLXSW_CMD_OPCODE_QUERY_EQ = 0x015, +}; + +static inline const char *mlxsw_cmd_opcode_str(u16 opcode) +{ + switch (opcode) { + case MLXSW_CMD_OPCODE_QUERY_FW: + return "QUERY_FW"; + case MLXSW_CMD_OPCODE_QUERY_BOARDINFO: + return "QUERY_BOARDINFO"; + case MLXSW_CMD_OPCODE_QUERY_AQ_CAP: + return "QUERY_AQ_CAP"; + case MLXSW_CMD_OPCODE_MAP_FA: + return "MAP_FA"; + case MLXSW_CMD_OPCODE_UNMAP_FA: + return "UNMAP_FA"; + case MLXSW_CMD_OPCODE_CONFIG_PROFILE: + return "CONFIG_PROFILE"; + case MLXSW_CMD_OPCODE_ACCESS_REG: + return "ACCESS_REG"; + case MLXSW_CMD_OPCODE_SW2HW_DQ: + return "SW2HW_DQ"; + case MLXSW_CMD_OPCODE_HW2SW_DQ: + return "HW2SW_DQ"; + case MLXSW_CMD_OPCODE_2ERR_DQ: + return "2ERR_DQ"; + case MLXSW_CMD_OPCODE_QUERY_DQ: + return "QUERY_DQ"; + case MLXSW_CMD_OPCODE_SW2HW_CQ: + return "SW2HW_CQ"; + case MLXSW_CMD_OPCODE_HW2SW_CQ: + return "HW2SW_CQ"; + case MLXSW_CMD_OPCODE_QUERY_CQ: + return "QUERY_CQ"; + case MLXSW_CMD_OPCODE_SW2HW_EQ: + return "SW2HW_EQ"; + case MLXSW_CMD_OPCODE_HW2SW_EQ: + return "HW2SW_EQ"; + case MLXSW_CMD_OPCODE_QUERY_EQ: + return "QUERY_EQ"; + default: + return "*UNKNOWN*"; + } +} + +enum mlxsw_cmd_status { + /* Command execution succeeded. */ + MLXSW_CMD_STATUS_OK = 0x00, + /* Internal error (e.g. bus error) occurred while processing command. */ + MLXSW_CMD_STATUS_INTERNAL_ERR = 0x01, + /* Operation/command not supported or opcode modifier not supported. */ + MLXSW_CMD_STATUS_BAD_OP = 0x02, + /* Parameter not supported, parameter out of range. */ + MLXSW_CMD_STATUS_BAD_PARAM = 0x03, + /* System was not enabled or bad system state. */ + MLXSW_CMD_STATUS_BAD_SYS_STATE = 0x04, + /* Attempt to access reserved or unallocated resource, or resource in + * inappropriate ownership. + */ + MLXSW_CMD_STATUS_BAD_RESOURCE = 0x05, + /* Requested resource is currently executing a command. */ + MLXSW_CMD_STATUS_RESOURCE_BUSY = 0x06, + /* Required capability exceeds device limits. */ + MLXSW_CMD_STATUS_EXCEED_LIM = 0x08, + /* Resource is not in the appropriate state or ownership. */ + MLXSW_CMD_STATUS_BAD_RES_STATE = 0x09, + /* Index out of range (might be beyond table size or attempt to + * access a reserved resource). + */ + MLXSW_CMD_STATUS_BAD_INDEX = 0x0A, + /* NVMEM checksum/CRC failed. */ + MLXSW_CMD_STATUS_BAD_NVMEM = 0x0B, + /* Bad management packet (silently discarded). */ + MLXSW_CMD_STATUS_BAD_PKT = 0x30, +}; + +static inline const char *mlxsw_cmd_status_str(u8 status) +{ + switch (status) { + case MLXSW_CMD_STATUS_OK: + return "OK"; + case MLXSW_CMD_STATUS_INTERNAL_ERR: + return "INTERNAL_ERR"; + case MLXSW_CMD_STATUS_BAD_OP: + return "BAD_OP"; + case MLXSW_CMD_STATUS_BAD_PARAM: + return "BAD_PARAM"; + case MLXSW_CMD_STATUS_BAD_SYS_STATE: + return "BAD_SYS_STATE"; + case MLXSW_CMD_STATUS_BAD_RESOURCE: + return "BAD_RESOURCE"; + case MLXSW_CMD_STATUS_RESOURCE_BUSY: + return "RESOURCE_BUSY"; + case MLXSW_CMD_STATUS_EXCEED_LIM: + return "EXCEED_LIM"; + case MLXSW_CMD_STATUS_BAD_RES_STATE: + return "BAD_RES_STATE"; + case MLXSW_CMD_STATUS_BAD_INDEX: + return "BAD_INDEX"; + case MLXSW_CMD_STATUS_BAD_NVMEM: + return "BAD_NVMEM"; + case MLXSW_CMD_STATUS_BAD_PKT: + return "BAD_PKT"; + default: + return "*UNKNOWN*"; + } +} + +/* QUERY_FW - Query Firmware + * ------------------------- + * OpMod == 0, INMmod == 0 + * ----------------------- + * The QUERY_FW command retrieves information related to firmware, command + * interface version and the amount of resources that should be allocated to + * the firmware. + */ + +static inline int mlxsw_cmd_query_fw(struct mlxsw_core *mlxsw_core, + char *out_mbox) +{ + return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_FW, + 0, 0, false, out_mbox, MLXSW_CMD_MBOX_SIZE); +} + +/* cmd_mbox_query_fw_fw_pages + * Amount of physical memory to be allocatedfor firmware usage in 4KB pages. + */ +MLXSW_ITEM32(cmd_mbox, query_fw, fw_pages, 0x00, 16, 16); + +/* cmd_mbox_query_fw_fw_rev_major + * Firmware Revision - Major + */ +MLXSW_ITEM32(cmd_mbox, query_fw, fw_rev_major, 0x00, 0, 16); + +/* cmd_mbox_query_fw_fw_rev_subminor + * Firmware Sub-minor version (Patch level) + */ +MLXSW_ITEM32(cmd_mbox, query_fw, fw_rev_subminor, 0x04, 16, 16); + +/* cmd_mbox_query_fw_fw_rev_minor + * Firmware Revision - Minor + */ +MLXSW_ITEM32(cmd_mbox, query_fw, fw_rev_minor, 0x04, 0, 16); + +/* cmd_mbox_query_fw_core_clk + * Internal Clock Frequency (in MHz) + */ +MLXSW_ITEM32(cmd_mbox, query_fw, core_clk, 0x08, 16, 16); + +/* cmd_mbox_query_fw_cmd_interface_rev + * Command Interface Interpreter Revision ID. This number is bumped up + * every time a non-backward-compatible change is done for the command + * interface. The current cmd_interface_rev is 1. + */ +MLXSW_ITEM32(cmd_mbox, query_fw, cmd_interface_rev, 0x08, 0, 16); + +/* cmd_mbox_query_fw_dt + * If set, Debug Trace is supported + */ +MLXSW_ITEM32(cmd_mbox, query_fw, dt, 0x0C, 31, 1); + +/* cmd_mbox_query_fw_api_version + * Indicates the version of the API, to enable software querying + * for compatibility. The current api_version is 1. + */ +MLXSW_ITEM32(cmd_mbox, query_fw, api_version, 0x0C, 0, 16); + +/* cmd_mbox_query_fw_fw_hour + * Firmware timestamp - hour + */ +MLXSW_ITEM32(cmd_mbox, query_fw, fw_hour, 0x10, 24, 8); + +/* cmd_mbox_query_fw_fw_minutes + * Firmware timestamp - minutes + */ +MLXSW_ITEM32(cmd_mbox, query_fw, fw_minutes, 0x10, 16, 8); + +/* cmd_mbox_query_fw_fw_seconds + * Firmware timestamp - seconds + */ +MLXSW_ITEM32(cmd_mbox, query_fw, fw_seconds, 0x10, 8, 8); + +/* cmd_mbox_query_fw_fw_year + * Firmware timestamp - year + */ +MLXSW_ITEM32(cmd_mbox, query_fw, fw_year, 0x14, 16, 16); + +/* cmd_mbox_query_fw_fw_month + * Firmware timestamp - month + */ +MLXSW_ITEM32(cmd_mbox, query_fw, fw_month, 0x14, 8, 8); + +/* cmd_mbox_query_fw_fw_day + * Firmware timestamp - day + */ +MLXSW_ITEM32(cmd_mbox, query_fw, fw_day, 0x14, 0, 8); + +/* cmd_mbox_query_fw_clr_int_base_offset + * Clear Interrupt register's offset from clr_int_bar register + * in PCI address space. + */ +MLXSW_ITEM64(cmd_mbox, query_fw, clr_int_base_offset, 0x20, 0, 64); + +/* cmd_mbox_query_fw_clr_int_bar + * PCI base address register (BAR) where clr_int register is located. + * 00 - BAR 0-1 (64 bit BAR) + */ +MLXSW_ITEM32(cmd_mbox, query_fw, clr_int_bar, 0x28, 30, 2); + +/* cmd_mbox_query_fw_error_buf_offset + * Read Only buffer for internal error reports of offset + * from error_buf_bar register in PCI address space). + */ +MLXSW_ITEM64(cmd_mbox, query_fw, error_buf_offset, 0x30, 0, 64); + +/* cmd_mbox_query_fw_error_buf_size + * Internal error buffer size in DWORDs + */ +MLXSW_ITEM32(cmd_mbox, query_fw, error_buf_size, 0x38, 0, 32); + +/* cmd_mbox_query_fw_error_int_bar + * PCI base address register (BAR) where error buffer + * register is located. + * 00 - BAR 0-1 (64 bit BAR) + */ +MLXSW_ITEM32(cmd_mbox, query_fw, error_int_bar, 0x3C, 30, 2); + +/* cmd_mbox_query_fw_doorbell_page_offset + * Offset of the doorbell page + */ +MLXSW_ITEM64(cmd_mbox, query_fw, doorbell_page_offset, 0x40, 0, 64); + +/* cmd_mbox_query_fw_doorbell_page_bar + * PCI base address register (BAR) of the doorbell page + * 00 - BAR 0-1 (64 bit BAR) + */ +MLXSW_ITEM32(cmd_mbox, query_fw, doorbell_page_bar, 0x48, 30, 2); + +/* QUERY_BOARDINFO - Query Board Information + * ----------------------------------------- + * OpMod == 0 (N/A), INMmod == 0 (N/A) + * ----------------------------------- + * The QUERY_BOARDINFO command retrieves adapter specific parameters. + */ + +static inline int mlxsw_cmd_boardinfo(struct mlxsw_core *mlxsw_core, + char *out_mbox) +{ + return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_BOARDINFO, + 0, 0, false, out_mbox, MLXSW_CMD_MBOX_SIZE); +} + +/* cmd_mbox_boardinfo_intapin + * When PCIe interrupt messages are being used, this value is used for clearing + * an interrupt. When using MSI-X, this register is not used. + */ +MLXSW_ITEM32(cmd_mbox, boardinfo, intapin, 0x10, 24, 8); + +/* cmd_mbox_boardinfo_vsd_vendor_id + * PCISIG Vendor ID (www.pcisig.com/membership/vid_search) of the vendor + * specifying/formatting the VSD. The vsd_vendor_id identifies the management + * domain of the VSD/PSID data. Different vendors may choose different VSD/PSID + * format and encoding as long as they use their assigned vsd_vendor_id. + */ +MLXSW_ITEM32(cmd_mbox, boardinfo, vsd_vendor_id, 0x1C, 0, 16); + +/* cmd_mbox_boardinfo_vsd + * Vendor Specific Data. The VSD string that is burnt to the Flash + * with the firmware. + */ +#define MLXSW_CMD_BOARDINFO_VSD_LEN 208 +MLXSW_ITEM_BUF(cmd_mbox, boardinfo, vsd, 0x20, MLXSW_CMD_BOARDINFO_VSD_LEN); + +/* cmd_mbox_boardinfo_psid + * The PSID field is a 16-ascii (byte) character string which acts as + * the board ID. The PSID format is used in conjunction with + * Mellanox vsd_vendor_id (15B3h). + */ +#define MLXSW_CMD_BOARDINFO_PSID_LEN 16 +MLXSW_ITEM_BUF(cmd_mbox, boardinfo, psid, 0xF0, MLXSW_CMD_BOARDINFO_PSID_LEN); + +/* QUERY_AQ_CAP - Query Asynchronous Queues Capabilities + * ----------------------------------------------------- + * OpMod == 0 (N/A), INMmod == 0 (N/A) + * ----------------------------------- + * The QUERY_AQ_CAP command returns the device asynchronous queues + * capabilities supported. + */ + +static inline int mlxsw_cmd_query_aq_cap(struct mlxsw_core *mlxsw_core, + char *out_mbox) +{ + return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_AQ_CAP, + 0, 0, false, out_mbox, MLXSW_CMD_MBOX_SIZE); +} + +/* cmd_mbox_query_aq_cap_log_max_sdq_sz + * Log (base 2) of max WQEs allowed on SDQ. + */ +MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_sdq_sz, 0x00, 24, 8); + +/* cmd_mbox_query_aq_cap_max_num_sdqs + * Maximum number of SDQs. + */ +MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_sdqs, 0x00, 0, 8); + +/* cmd_mbox_query_aq_cap_log_max_rdq_sz + * Log (base 2) of max WQEs allowed on RDQ. + */ +MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_rdq_sz, 0x04, 24, 8); + +/* cmd_mbox_query_aq_cap_max_num_rdqs + * Maximum number of RDQs. + */ +MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_rdqs, 0x04, 0, 8); + +/* cmd_mbox_query_aq_cap_log_max_cq_sz + * Log (base 2) of max CQEs allowed on CQ. + */ +MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_cq_sz, 0x08, 24, 8); + +/* cmd_mbox_query_aq_cap_max_num_cqs + * Maximum number of CQs. + */ +MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_cqs, 0x08, 0, 8); + +/* cmd_mbox_query_aq_cap_log_max_eq_sz + * Log (base 2) of max EQEs allowed on EQ. + */ +MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_eq_sz, 0x0C, 24, 8); + +/* cmd_mbox_query_aq_cap_max_num_eqs + * Maximum number of EQs. + */ +MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_eqs, 0x0C, 0, 8); + +/* cmd_mbox_query_aq_cap_max_sg_sq + * The maximum S/G list elements in an DSQ. DSQ must not contain + * more S/G entries than indicated here. + */ +MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_sg_sq, 0x10, 8, 8); + +/* cmd_mbox_query_aq_cap_ + * The maximum S/G list elements in an DRQ. DRQ must not contain + * more S/G entries than indicated here. + */ +MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_sg_rq, 0x10, 0, 8); + +/* MAP_FA - Map Firmware Area + * -------------------------- + * OpMod == 0 (N/A), INMmod == Number of VPM entries + * ------------------------------------------------- + * The MAP_FA command passes physical pages to the switch. These pages + * are used to store the device firmware. MAP_FA can be executed multiple + * times until all the firmware area is mapped (the size that should be + * mapped is retrieved through the QUERY_FW command). All required pages + * must be mapped to finish the initialization phase. Physical memory + * passed in this command must be pinned. + */ + +static inline int mlxsw_cmd_map_fa(struct mlxsw_core *mlxsw_core, + char *in_mbox, u32 vpm_entries_count) +{ + return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_MAP_FA, + 0, vpm_entries_count, + in_mbox, MLXSW_CMD_MBOX_SIZE); +} + +/* cmd_mbox_map_fa_pa + * Physical Address. + */ +MLXSW_ITEM64_INDEXED(cmd_mbox, map_fa, pa, 0x00, 12, 52, 0x08, 0x00, true); + +/* cmd_mbox_map_fa_log2size + * Log (base 2) of the size in 4KB pages of the physical and contiguous memory + * that starts at PA_L/H. + */ +MLXSW_ITEM32_INDEXED(cmd_mbox, map_fa, log2size, 0x00, 0, 5, 0x08, 0x04, false); + +/* UNMAP_FA - Unmap Firmware Area + * ------------------------------ + * OpMod == 0 (N/A), INMmod == 0 (N/A) + * ----------------------------------- + * The UNMAP_FA command unload the firmware and unmaps all the + * firmware area. After this command is completed the device will not access + * the pages that were mapped to the firmware area. After executing UNMAP_FA + * command, software reset must be done prior to execution of MAP_FW command. + */ + +static inline int mlxsw_cmd_unmap_fa(struct mlxsw_core *mlxsw_core) +{ + return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_UNMAP_FA, 0, 0); +} + +/* CONFIG_PROFILE (Set) - Configure Switch Profile + * ------------------------------ + * OpMod == 1 (Set), INMmod == 0 (N/A) + * ----------------------------------- + * The CONFIG_PROFILE command sets the switch profile. The command can be + * executed on the device only once at startup in order to allocate and + * configure all switch resources and prepare it for operational mode. + * It is not possible to change the device profile after the chip is + * in operational mode. + * Failure of the CONFIG_PROFILE command leaves the hardware in an indeterminate + * state therefore it is required to perform software reset to the device + * following an unsuccessful completion of the command. It is required + * to perform software reset to the device to change an existing profile. + */ + +static inline int mlxsw_cmd_config_profile_set(struct mlxsw_core *mlxsw_core, + char *in_mbox) +{ + return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_CONFIG_PROFILE, + 1, 0, in_mbox, MLXSW_CMD_MBOX_SIZE); +} + +/* cmd_mbox_config_profile_set_max_vepa_channels + * Capability bit. Setting a bit to 1 configures the profile + * according to the mailbox contents. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, set_max_vepa_channels, 0x0C, 0, 1); + +/* cmd_mbox_config_profile_set_max_lag + * Capability bit. Setting a bit to 1 configures the profile + * according to the mailbox contents. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, set_max_lag, 0x0C, 1, 1); + +/* cmd_mbox_config_profile_set_max_port_per_lag + * Capability bit. Setting a bit to 1 configures the profile + * according to the mailbox contents. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, set_max_port_per_lag, 0x0C, 2, 1); + +/* cmd_mbox_config_profile_set_max_mid + * Capability bit. Setting a bit to 1 configures the profile + * according to the mailbox contents. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, set_max_mid, 0x0C, 3, 1); + +/* cmd_mbox_config_profile_set_max_pgt + * Capability bit. Setting a bit to 1 configures the profile + * according to the mailbox contents. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, set_max_pgt, 0x0C, 4, 1); + +/* cmd_mbox_config_profile_set_max_system_port + * Capability bit. Setting a bit to 1 configures the profile + * according to the mailbox contents. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, set_max_system_port, 0x0C, 5, 1); + +/* cmd_mbox_config_profile_set_max_vlan_groups + * Capability bit. Setting a bit to 1 configures the profile + * according to the mailbox contents. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, set_max_vlan_groups, 0x0C, 6, 1); + +/* cmd_mbox_config_profile_set_max_regions + * Capability bit. Setting a bit to 1 configures the profile + * according to the mailbox contents. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, set_max_regions, 0x0C, 7, 1); + +/* cmd_mbox_config_profile_set_fid_based + * Capability bit. Setting a bit to 1 configures the profile + * according to the mailbox contents. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, set_flood_mode, 0x0C, 8, 1); + +/* cmd_mbox_config_profile_set_max_flood_tables + * Capability bit. Setting a bit to 1 configures the profile + * according to the mailbox contents. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, set_flood_tables, 0x0C, 9, 1); + +/* cmd_mbox_config_profile_set_max_ib_mc + * Capability bit. Setting a bit to 1 configures the profile + * according to the mailbox contents. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, set_max_ib_mc, 0x0C, 12, 1); + +/* cmd_mbox_config_profile_set_max_pkey + * Capability bit. Setting a bit to 1 configures the profile + * according to the mailbox contents. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, set_max_pkey, 0x0C, 13, 1); + +/* cmd_mbox_config_profile_set_adaptive_routing_group_cap + * Capability bit. Setting a bit to 1 configures the profile + * according to the mailbox contents. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, + set_adaptive_routing_group_cap, 0x0C, 14, 1); + +/* cmd_mbox_config_profile_set_ar_sec + * Capability bit. Setting a bit to 1 configures the profile + * according to the mailbox contents. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, set_ar_sec, 0x0C, 15, 1); + +/* cmd_mbox_config_profile_max_vepa_channels + * Maximum number of VEPA channels per port (0 through 16) + * 0 - multi-channel VEPA is disabled + */ +MLXSW_ITEM32(cmd_mbox, config_profile, max_vepa_channels, 0x10, 0, 8); + +/* cmd_mbox_config_profile_max_lag + * Maximum number of LAG IDs requested. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, max_lag, 0x14, 0, 16); + +/* cmd_mbox_config_profile_max_port_per_lag + * Maximum number of ports per LAG requested. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, max_port_per_lag, 0x18, 0, 16); + +/* cmd_mbox_config_profile_max_mid + * Maximum Multicast IDs. + * Multicast IDs are allocated from 0 to max_mid-1 + */ +MLXSW_ITEM32(cmd_mbox, config_profile, max_mid, 0x1C, 0, 16); + +/* cmd_mbox_config_profile_max_pgt + * Maximum records in the Port Group Table per Switch Partition. + * Port Group Table indexes are from 0 to max_pgt-1 + */ +MLXSW_ITEM32(cmd_mbox, config_profile, max_pgt, 0x20, 0, 16); + +/* cmd_mbox_config_profile_max_system_port + * The maximum number of system ports that can be allocated. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, max_system_port, 0x24, 0, 16); + +/* cmd_mbox_config_profile_max_vlan_groups + * Maximum number VLAN Groups for VLAN binding. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, max_vlan_groups, 0x28, 0, 12); + +/* cmd_mbox_config_profile_max_regions + * Maximum number of TCAM Regions. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, max_regions, 0x2C, 0, 16); + +/* cmd_mbox_config_profile_max_flood_tables + * Maximum number of Flooding Tables. Flooding Tables are associated to + * the different packet types for the different switch partitions. + * Note that the table size depends on the fid_based mode. + * In SwitchX silicon, tables are split equally between the switch + * partitions. e.g. for 2 swids and 8 tables, the first 4 are associated + * with swid-1 and the last 4 are associated with swid-2. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, max_flood_tables, 0x30, 16, 4); + +/* cmd_mbox_config_profile_max_vid_flood_tables + * Maximum number of per-vid flooding tables. Flooding tables are associated + * to the different packet types for the different switch partitions. + * Table size is 4K entries covering all VID space. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, max_vid_flood_tables, 0x30, 8, 4); + +/* cmd_mbox_config_profile_fid_based + * FID Based Flood Mode + * 00 Do not use FID to offset the index into the Port Group Table/Multicast ID + * 01 Use FID to offset the index to the Port Group Table (pgi) + * 10 Use FID to offset the index to the Port Group Table (pgi) and + * the Multicast ID + */ +MLXSW_ITEM32(cmd_mbox, config_profile, flood_mode, 0x30, 0, 2); + +/* cmd_mbox_config_profile_max_ib_mc + * Maximum number of multicast FDB records for InfiniBand + * FDB (in 512 chunks) per InfiniBand switch partition. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, max_ib_mc, 0x40, 0, 15); + +/* cmd_mbox_config_profile_max_pkey + * Maximum per port PKEY table size (for PKEY enforcement) + */ +MLXSW_ITEM32(cmd_mbox, config_profile, max_pkey, 0x44, 0, 15); + +/* cmd_mbox_config_profile_ar_sec + * Primary/secondary capability + * Describes the number of adaptive routing sub-groups + * 0 - disable primary/secondary (single group) + * 1 - enable primary/secondary (2 sub-groups) + * 2 - 3 sub-groups: Not supported in SwitchX, SwitchX-2 + * 3 - 4 sub-groups: Not supported in SwitchX, SwitchX-2 + */ +MLXSW_ITEM32(cmd_mbox, config_profile, ar_sec, 0x4C, 24, 2); + +/* cmd_mbox_config_profile_adaptive_routing_group_cap + * Adaptive Routing Group Capability. Indicates the number of AR groups + * supported. Note that when Primary/secondary is enabled, each + * primary/secondary couple consumes 2 adaptive routing entries. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, adaptive_routing_group_cap, 0x4C, 0, 16); + +/* cmd_mbox_config_profile_arn + * Adaptive Routing Notification Enable + * Not supported in SwitchX, SwitchX-2 + */ +MLXSW_ITEM32(cmd_mbox, config_profile, arn, 0x50, 31, 1); + +/* cmd_mbox_config_profile_swid_config_mask + * Modify Switch Partition Configuration mask. When set, the configu- + * ration value for the Switch Partition are taken from the mailbox. + * When clear, the current configuration values are used. + * Bit 0 - set type + * Bit 1 - properties + * Other - reserved + */ +MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_mask, + 0x60, 24, 8, 0x08, 0x00, false); + +/* cmd_mbox_config_profile_swid_config_type + * Switch Partition type. + * 0000 - disabled (Switch Partition does not exist) + * 0001 - InfiniBand + * 0010 - Ethernet + * 1000 - router port (SwitchX-2 only) + * Other - reserved + */ +MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_type, + 0x60, 20, 4, 0x08, 0x00, false); + +/* cmd_mbox_config_profile_swid_config_properties + * Switch Partition properties. + */ +MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_properties, + 0x60, 0, 8, 0x08, 0x00, false); + +/* ACCESS_REG - Access EMAD Supported Register + * ---------------------------------- + * OpMod == 0 (N/A), INMmod == 0 (N/A) + * ------------------------------------- + * The ACCESS_REG command supports accessing device registers. This access + * is mainly used for bootstrapping. + */ + +static inline int mlxsw_cmd_access_reg(struct mlxsw_core *mlxsw_core, + char *in_mbox, char *out_mbox) +{ + return mlxsw_cmd_exec(mlxsw_core, MLXSW_CMD_OPCODE_ACCESS_REG, + 0, 0, false, in_mbox, MLXSW_CMD_MBOX_SIZE, + out_mbox, MLXSW_CMD_MBOX_SIZE); +} + +/* SW2HW_DQ - Software to Hardware DQ + * ---------------------------------- + * OpMod == 0 (send DQ) / OpMod == 1 (receive DQ) + * INMmod == DQ number + * ---------------------------------------------- + * The SW2HW_DQ command transitions a descriptor queue from software to + * hardware ownership. The command enables posting WQEs and ringing DoorBells + * on the descriptor queue. + */ + +static inline int __mlxsw_cmd_sw2hw_dq(struct mlxsw_core *mlxsw_core, + char *in_mbox, u32 dq_number, + u8 opcode_mod) +{ + return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_SW2HW_DQ, + opcode_mod, dq_number, + in_mbox, MLXSW_CMD_MBOX_SIZE); +} + +enum { + MLXSW_CMD_OPCODE_MOD_SDQ = 0, + MLXSW_CMD_OPCODE_MOD_RDQ = 1, +}; + +static inline int mlxsw_cmd_sw2hw_sdq(struct mlxsw_core *mlxsw_core, + char *in_mbox, u32 dq_number) +{ + return __mlxsw_cmd_sw2hw_dq(mlxsw_core, in_mbox, dq_number, + MLXSW_CMD_OPCODE_MOD_SDQ); +} + +static inline int mlxsw_cmd_sw2hw_rdq(struct mlxsw_core *mlxsw_core, + char *in_mbox, u32 dq_number) +{ + return __mlxsw_cmd_sw2hw_dq(mlxsw_core, in_mbox, dq_number, + MLXSW_CMD_OPCODE_MOD_RDQ); +} + +/* cmd_mbox_sw2hw_dq_cq + * Number of the CQ that this Descriptor Queue reports completions to. + */ +MLXSW_ITEM32(cmd_mbox, sw2hw_dq, cq, 0x00, 24, 8); + +/* cmd_mbox_sw2hw_dq_sdq_tclass + * SDQ: CPU Egress TClass + * RDQ: Reserved + */ +MLXSW_ITEM32(cmd_mbox, sw2hw_dq, sdq_tclass, 0x00, 16, 6); + +/* cmd_mbox_sw2hw_dq_log2_dq_sz + * Log (base 2) of the Descriptor Queue size in 4KB pages. + */ +MLXSW_ITEM32(cmd_mbox, sw2hw_dq, log2_dq_sz, 0x00, 0, 6); + +/* cmd_mbox_sw2hw_dq_pa + * Physical Address. + */ +MLXSW_ITEM64_INDEXED(cmd_mbox, sw2hw_dq, pa, 0x10, 12, 52, 0x08, 0x00, true); + +/* HW2SW_DQ - Hardware to Software DQ + * ---------------------------------- + * OpMod == 0 (send DQ) / OpMod == 1 (receive DQ) + * INMmod == DQ number + * ---------------------------------------------- + * The HW2SW_DQ command transitions a descriptor queue from hardware to + * software ownership. Incoming packets on the DQ are silently discarded, + * SW should not post descriptors on nonoperational DQs. + */ + +static inline int __mlxsw_cmd_hw2sw_dq(struct mlxsw_core *mlxsw_core, + u32 dq_number, u8 opcode_mod) +{ + return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_HW2SW_DQ, + opcode_mod, dq_number); +} + +static inline int mlxsw_cmd_hw2sw_sdq(struct mlxsw_core *mlxsw_core, + u32 dq_number) +{ + return __mlxsw_cmd_hw2sw_dq(mlxsw_core, dq_number, + MLXSW_CMD_OPCODE_MOD_SDQ); +} + +static inline int mlxsw_cmd_hw2sw_rdq(struct mlxsw_core *mlxsw_core, + u32 dq_number) +{ + return __mlxsw_cmd_hw2sw_dq(mlxsw_core, dq_number, + MLXSW_CMD_OPCODE_MOD_RDQ); +} + +/* 2ERR_DQ - To Error DQ + * --------------------- + * OpMod == 0 (send DQ) / OpMod == 1 (receive DQ) + * INMmod == DQ number + * ---------------------------------------------- + * The 2ERR_DQ command transitions the DQ into the error state from the state + * in which it has been. While the command is executed, some in-process + * descriptors may complete. Once the DQ transitions into the error state, + * if there are posted descriptors on the RDQ/SDQ, the hardware writes + * a completion with error (flushed) for all descriptors posted in the RDQ/SDQ. + * When the command is completed successfully, the DQ is already in + * the error state. + */ + +static inline int __mlxsw_cmd_2err_dq(struct mlxsw_core *mlxsw_core, + u32 dq_number, u8 opcode_mod) +{ + return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_2ERR_DQ, + opcode_mod, dq_number); +} + +static inline int mlxsw_cmd_2err_sdq(struct mlxsw_core *mlxsw_core, + u32 dq_number) +{ + return __mlxsw_cmd_2err_dq(mlxsw_core, dq_number, + MLXSW_CMD_OPCODE_MOD_SDQ); +} + +static inline int mlxsw_cmd_2err_rdq(struct mlxsw_core *mlxsw_core, + u32 dq_number) +{ + return __mlxsw_cmd_2err_dq(mlxsw_core, dq_number, + MLXSW_CMD_OPCODE_MOD_RDQ); +} + +/* QUERY_DQ - Query DQ + * --------------------- + * OpMod == 0 (send DQ) / OpMod == 1 (receive DQ) + * INMmod == DQ number + * ---------------------------------------------- + * The QUERY_DQ command retrieves a snapshot of DQ parameters from the hardware. + * + * Note: Output mailbox has the same format as SW2HW_DQ. + */ + +static inline int __mlxsw_cmd_query_dq(struct mlxsw_core *mlxsw_core, + char *out_mbox, u32 dq_number, + u8 opcode_mod) +{ + return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_2ERR_DQ, + opcode_mod, dq_number, false, + out_mbox, MLXSW_CMD_MBOX_SIZE); +} + +static inline int mlxsw_cmd_query_sdq(struct mlxsw_core *mlxsw_core, + char *out_mbox, u32 dq_number) +{ + return __mlxsw_cmd_query_dq(mlxsw_core, out_mbox, dq_number, + MLXSW_CMD_OPCODE_MOD_SDQ); +} + +static inline int mlxsw_cmd_query_rdq(struct mlxsw_core *mlxsw_core, + char *out_mbox, u32 dq_number) +{ + return __mlxsw_cmd_query_dq(mlxsw_core, out_mbox, dq_number, + MLXSW_CMD_OPCODE_MOD_RDQ); +} + +/* SW2HW_CQ - Software to Hardware CQ + * ---------------------------------- + * OpMod == 0 (N/A), INMmod == CQ number + * ------------------------------------- + * The SW2HW_CQ command transfers ownership of a CQ context entry from software + * to hardware. The command takes the CQ context entry from the input mailbox + * and stores it in the CQC in the ownership of the hardware. The command fails + * if the requested CQC entry is already in the ownership of the hardware. + */ + +static inline int mlxsw_cmd_sw2hw_cq(struct mlxsw_core *mlxsw_core, + char *in_mbox, u32 cq_number) +{ + return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_SW2HW_CQ, + 0, cq_number, in_mbox, MLXSW_CMD_MBOX_SIZE); +} + +/* cmd_mbox_sw2hw_cq_cv + * CQE Version. + * 0 - CQE Version 0, 1 - CQE Version 1 + */ +MLXSW_ITEM32(cmd_mbox, sw2hw_cq, cv, 0x00, 28, 4); + +/* cmd_mbox_sw2hw_cq_c_eqn + * Event Queue this CQ reports completion events to. + */ +MLXSW_ITEM32(cmd_mbox, sw2hw_cq, c_eqn, 0x00, 24, 1); + +/* cmd_mbox_sw2hw_cq_oi + * When set, overrun ignore is enabled. When set, updates of + * CQ consumer counter (poll for completion) or Request completion + * notifications (Arm CQ) DoorBells should not be rung on that CQ. + */ +MLXSW_ITEM32(cmd_mbox, sw2hw_cq, oi, 0x00, 12, 1); + +/* cmd_mbox_sw2hw_cq_st + * Event delivery state machine + * 0x0 - FIRED + * 0x1 - ARMED (Request for Notification) + */ +MLXSW_ITEM32(cmd_mbox, sw2hw_cq, st, 0x00, 8, 1); + +/* cmd_mbox_sw2hw_cq_log_cq_size + * Log (base 2) of the CQ size (in entries). + */ +MLXSW_ITEM32(cmd_mbox, sw2hw_cq, log_cq_size, 0x00, 0, 4); + +/* cmd_mbox_sw2hw_cq_producer_counter + * Producer Counter. The counter is incremented for each CQE that is + * written by the HW to the CQ. + * Maintained by HW (valid for the QUERY_CQ command only) + */ +MLXSW_ITEM32(cmd_mbox, sw2hw_cq, producer_counter, 0x04, 0, 16); + +/* cmd_mbox_sw2hw_cq_pa + * Physical Address. + */ +MLXSW_ITEM64_INDEXED(cmd_mbox, sw2hw_cq, pa, 0x10, 11, 53, 0x08, 0x00, true); + +/* HW2SW_CQ - Hardware to Software CQ + * ---------------------------------- + * OpMod == 0 (N/A), INMmod == CQ number + * ------------------------------------- + * The HW2SW_CQ command transfers ownership of a CQ context entry from hardware + * to software. The CQC entry is invalidated as a result of this command. + */ + +static inline int mlxsw_cmd_hw2sw_cq(struct mlxsw_core *mlxsw_core, + u32 cq_number) +{ + return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_HW2SW_CQ, + 0, cq_number); +} + +/* QUERY_CQ - Query CQ + * ---------------------------------- + * OpMod == 0 (N/A), INMmod == CQ number + * ------------------------------------- + * The QUERY_CQ command retrieves a snapshot of the current CQ context entry. + * The command stores the snapshot in the output mailbox in the software format. + * Note that the CQ context state and values are not affected by the QUERY_CQ + * command. The QUERY_CQ command is for debug purposes only. + * + * Note: Output mailbox has the same format as SW2HW_CQ. + */ + +static inline int mlxsw_cmd_query_cq(struct mlxsw_core *mlxsw_core, + char *out_mbox, u32 cq_number) +{ + return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_CQ, + 0, cq_number, false, + out_mbox, MLXSW_CMD_MBOX_SIZE); +} + +/* SW2HW_EQ - Software to Hardware EQ + * ---------------------------------- + * OpMod == 0 (N/A), INMmod == EQ number + * ------------------------------------- + * The SW2HW_EQ command transfers ownership of an EQ context entry from software + * to hardware. The command takes the EQ context entry from the input mailbox + * and stores it in the EQC in the ownership of the hardware. The command fails + * if the requested EQC entry is already in the ownership of the hardware. + */ + +static inline int mlxsw_cmd_sw2hw_eq(struct mlxsw_core *mlxsw_core, + char *in_mbox, u32 eq_number) +{ + return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_SW2HW_EQ, + 0, eq_number, in_mbox, MLXSW_CMD_MBOX_SIZE); +} + +/* cmd_mbox_sw2hw_eq_int_msix + * When set, MSI-X cycles will be generated by this EQ. + * When cleared, an interrupt will be generated by this EQ. + */ +MLXSW_ITEM32(cmd_mbox, sw2hw_eq, int_msix, 0x00, 24, 1); + +/* cmd_mbox_sw2hw_eq_int_oi + * When set, overrun ignore is enabled. + */ +MLXSW_ITEM32(cmd_mbox, sw2hw_eq, oi, 0x00, 12, 1); + +/* cmd_mbox_sw2hw_eq_int_st + * Event delivery state machine + * 0x0 - FIRED + * 0x1 - ARMED (Request for Notification) + * 0x11 - Always ARMED + * other - reserved + */ +MLXSW_ITEM32(cmd_mbox, sw2hw_eq, st, 0x00, 8, 2); + +/* cmd_mbox_sw2hw_eq_int_log_eq_size + * Log (base 2) of the EQ size (in entries). + */ +MLXSW_ITEM32(cmd_mbox, sw2hw_eq, log_eq_size, 0x00, 0, 4); + +/* cmd_mbox_sw2hw_eq_int_producer_counter + * Producer Counter. The counter is incremented for each EQE that is written + * by the HW to the EQ. + * Maintained by HW (valid for the QUERY_EQ command only) + */ +MLXSW_ITEM32(cmd_mbox, sw2hw_eq, producer_counter, 0x04, 0, 16); + +/* cmd_mbox_sw2hw_eq_int_pa + * Physical Address. + */ +MLXSW_ITEM64_INDEXED(cmd_mbox, sw2hw_eq, pa, 0x10, 11, 53, 0x08, 0x00, true); + +/* HW2SW_EQ - Hardware to Software EQ + * ---------------------------------- + * OpMod == 0 (N/A), INMmod == EQ number + * ------------------------------------- + */ + +static inline int mlxsw_cmd_hw2sw_eq(struct mlxsw_core *mlxsw_core, + u32 eq_number) +{ + return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_HW2SW_EQ, + 0, eq_number); +} + +/* QUERY_EQ - Query EQ + * ---------------------------------- + * OpMod == 0 (N/A), INMmod == EQ number + * ------------------------------------- + * + * Note: Output mailbox has the same format as SW2HW_EQ. + */ + +static inline int mlxsw_cmd_query_eq(struct mlxsw_core *mlxsw_core, + char *out_mbox, u32 eq_number) +{ + return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_EQ, + 0, eq_number, false, + out_mbox, MLXSW_CMD_MBOX_SIZE); +} + +#endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c new file mode 100644 index 000000000000..ad66ae44a0d7 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -0,0 +1,1286 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/core.c + * Copyright (c) 2015 Mellanox Technologies. All rights reserved. + * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> + * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> + * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/export.h> +#include <linux/err.h> +#include <linux/if_link.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> +#include <linux/u64_stats_sync.h> +#include <linux/netdevice.h> +#include <linux/wait.h> +#include <linux/skbuff.h> +#include <linux/etherdevice.h> +#include <linux/types.h> +#include <linux/wait.h> +#include <linux/string.h> +#include <linux/gfp.h> +#include <linux/random.h> +#include <linux/jiffies.h> +#include <linux/mutex.h> +#include <linux/rcupdate.h> +#include <linux/slab.h> +#include <asm/byteorder.h> + +#include "core.h" +#include "item.h" +#include "cmd.h" +#include "port.h" +#include "trap.h" +#include "emad.h" +#include "reg.h" + +static LIST_HEAD(mlxsw_core_driver_list); +static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock); + +static const char mlxsw_core_driver_name[] = "mlxsw_core"; + +static struct dentry *mlxsw_core_dbg_root; + +struct mlxsw_core_pcpu_stats { + u64 trap_rx_packets[MLXSW_TRAP_ID_MAX]; + u64 trap_rx_bytes[MLXSW_TRAP_ID_MAX]; + u64 port_rx_packets[MLXSW_PORT_MAX_PORTS]; + u64 port_rx_bytes[MLXSW_PORT_MAX_PORTS]; + struct u64_stats_sync syncp; + u32 trap_rx_dropped[MLXSW_TRAP_ID_MAX]; + u32 port_rx_dropped[MLXSW_PORT_MAX_PORTS]; + u32 trap_rx_invalid; + u32 port_rx_invalid; +}; + +struct mlxsw_core { + struct mlxsw_driver *driver; + const struct mlxsw_bus *bus; + void *bus_priv; + const struct mlxsw_bus_info *bus_info; + struct list_head rx_listener_list; + struct list_head event_listener_list; + struct { + struct sk_buff *resp_skb; + u64 tid; + wait_queue_head_t wait; + bool trans_active; + struct mutex lock; /* One EMAD transaction at a time. */ + bool use_emad; + } emad; + struct mlxsw_core_pcpu_stats __percpu *pcpu_stats; + struct dentry *dbg_dir; + struct { + struct debugfs_blob_wrapper vsd_blob; + struct debugfs_blob_wrapper psid_blob; + } dbg; + unsigned long driver_priv[0]; + /* driver_priv has to be always the last item */ +}; + +struct mlxsw_rx_listener_item { + struct list_head list; + struct mlxsw_rx_listener rxl; + void *priv; +}; + +struct mlxsw_event_listener_item { + struct list_head list; + struct mlxsw_event_listener el; + void *priv; +}; + +/****************** + * EMAD processing + ******************/ + +/* emad_eth_hdr_dmac + * Destination MAC in EMAD's Ethernet header. + * Must be set to 01:02:c9:00:00:01 + */ +MLXSW_ITEM_BUF(emad, eth_hdr, dmac, 0x00, 6); + +/* emad_eth_hdr_smac + * Source MAC in EMAD's Ethernet header. + * Must be set to 00:02:c9:01:02:03 + */ +MLXSW_ITEM_BUF(emad, eth_hdr, smac, 0x06, 6); + +/* emad_eth_hdr_ethertype + * Ethertype in EMAD's Ethernet header. + * Must be set to 0x8932 + */ +MLXSW_ITEM32(emad, eth_hdr, ethertype, 0x0C, 16, 16); + +/* emad_eth_hdr_mlx_proto + * Mellanox protocol. + * Must be set to 0x0. + */ +MLXSW_ITEM32(emad, eth_hdr, mlx_proto, 0x0C, 8, 8); + +/* emad_eth_hdr_ver + * Mellanox protocol version. + * Must be set to 0x0. + */ +MLXSW_ITEM32(emad, eth_hdr, ver, 0x0C, 4, 4); + +/* emad_op_tlv_type + * Type of the TLV. + * Must be set to 0x1 (operation TLV). + */ +MLXSW_ITEM32(emad, op_tlv, type, 0x00, 27, 5); + +/* emad_op_tlv_len + * Length of the operation TLV in u32. + * Must be set to 0x4. + */ +MLXSW_ITEM32(emad, op_tlv, len, 0x00, 16, 11); + +/* emad_op_tlv_dr + * Direct route bit. Setting to 1 indicates the EMAD is a direct route + * EMAD. DR TLV must follow. + * + * Note: Currently not supported and must not be set. + */ +MLXSW_ITEM32(emad, op_tlv, dr, 0x00, 15, 1); + +/* emad_op_tlv_status + * Returned status in case of EMAD response. Must be set to 0 in case + * of EMAD request. + * 0x0 - success + * 0x1 - device is busy. Requester should retry + * 0x2 - Mellanox protocol version not supported + * 0x3 - unknown TLV + * 0x4 - register not supported + * 0x5 - operation class not supported + * 0x6 - EMAD method not supported + * 0x7 - bad parameter (e.g. port out of range) + * 0x8 - resource not available + * 0x9 - message receipt acknowledgment. Requester should retry + * 0x70 - internal error + */ +MLXSW_ITEM32(emad, op_tlv, status, 0x00, 8, 7); + +/* emad_op_tlv_register_id + * Register ID of register within register TLV. + */ +MLXSW_ITEM32(emad, op_tlv, register_id, 0x04, 16, 16); + +/* emad_op_tlv_r + * Response bit. Setting to 1 indicates Response, otherwise request. + */ +MLXSW_ITEM32(emad, op_tlv, r, 0x04, 15, 1); + +/* emad_op_tlv_method + * EMAD method type. + * 0x1 - query + * 0x2 - write + * 0x3 - send (currently not supported) + * 0x4 - event + */ +MLXSW_ITEM32(emad, op_tlv, method, 0x04, 8, 7); + +/* emad_op_tlv_class + * EMAD operation class. Must be set to 0x1 (REG_ACCESS). + */ +MLXSW_ITEM32(emad, op_tlv, class, 0x04, 0, 8); + +/* emad_op_tlv_tid + * EMAD transaction ID. Used for pairing request and response EMADs. + */ +MLXSW_ITEM64(emad, op_tlv, tid, 0x08, 0, 64); + +/* emad_reg_tlv_type + * Type of the TLV. + * Must be set to 0x3 (register TLV). + */ +MLXSW_ITEM32(emad, reg_tlv, type, 0x00, 27, 5); + +/* emad_reg_tlv_len + * Length of the operation TLV in u32. + */ +MLXSW_ITEM32(emad, reg_tlv, len, 0x00, 16, 11); + +/* emad_end_tlv_type + * Type of the TLV. + * Must be set to 0x0 (end TLV). + */ +MLXSW_ITEM32(emad, end_tlv, type, 0x00, 27, 5); + +/* emad_end_tlv_len + * Length of the end TLV in u32. + * Must be set to 1. + */ +MLXSW_ITEM32(emad, end_tlv, len, 0x00, 16, 11); + +enum mlxsw_core_reg_access_type { + MLXSW_CORE_REG_ACCESS_TYPE_QUERY, + MLXSW_CORE_REG_ACCESS_TYPE_WRITE, +}; + +static inline const char * +mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type) +{ + switch (type) { + case MLXSW_CORE_REG_ACCESS_TYPE_QUERY: + return "query"; + case MLXSW_CORE_REG_ACCESS_TYPE_WRITE: + return "write"; + } + BUG(); +} + +static void mlxsw_emad_pack_end_tlv(char *end_tlv) +{ + mlxsw_emad_end_tlv_type_set(end_tlv, MLXSW_EMAD_TLV_TYPE_END); + mlxsw_emad_end_tlv_len_set(end_tlv, MLXSW_EMAD_END_TLV_LEN); +} + +static void mlxsw_emad_pack_reg_tlv(char *reg_tlv, + const struct mlxsw_reg_info *reg, + char *payload) +{ + mlxsw_emad_reg_tlv_type_set(reg_tlv, MLXSW_EMAD_TLV_TYPE_REG); + mlxsw_emad_reg_tlv_len_set(reg_tlv, reg->len / sizeof(u32) + 1); + memcpy(reg_tlv + sizeof(u32), payload, reg->len); +} + +static void mlxsw_emad_pack_op_tlv(char *op_tlv, + const struct mlxsw_reg_info *reg, + enum mlxsw_core_reg_access_type type, + struct mlxsw_core *mlxsw_core) +{ + mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP); + mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN); + mlxsw_emad_op_tlv_dr_set(op_tlv, 0); + mlxsw_emad_op_tlv_status_set(op_tlv, 0); + mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id); + mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST); + if (MLXSW_CORE_REG_ACCESS_TYPE_QUERY == type) + mlxsw_emad_op_tlv_method_set(op_tlv, + MLXSW_EMAD_OP_TLV_METHOD_QUERY); + else + mlxsw_emad_op_tlv_method_set(op_tlv, + MLXSW_EMAD_OP_TLV_METHOD_WRITE); + mlxsw_emad_op_tlv_class_set(op_tlv, + MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS); + mlxsw_emad_op_tlv_tid_set(op_tlv, mlxsw_core->emad.tid); +} + +static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb) +{ + char *eth_hdr = skb_push(skb, MLXSW_EMAD_ETH_HDR_LEN); + + mlxsw_emad_eth_hdr_dmac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_DMAC); + mlxsw_emad_eth_hdr_smac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_SMAC); + mlxsw_emad_eth_hdr_ethertype_set(eth_hdr, MLXSW_EMAD_EH_ETHERTYPE); + mlxsw_emad_eth_hdr_mlx_proto_set(eth_hdr, MLXSW_EMAD_EH_MLX_PROTO); + mlxsw_emad_eth_hdr_ver_set(eth_hdr, MLXSW_EMAD_EH_PROTO_VERSION); + + skb_reset_mac_header(skb); + + return 0; +} + +static void mlxsw_emad_construct(struct sk_buff *skb, + const struct mlxsw_reg_info *reg, + char *payload, + enum mlxsw_core_reg_access_type type, + struct mlxsw_core *mlxsw_core) +{ + char *buf; + + buf = skb_push(skb, MLXSW_EMAD_END_TLV_LEN * sizeof(u32)); + mlxsw_emad_pack_end_tlv(buf); + + buf = skb_push(skb, reg->len + sizeof(u32)); + mlxsw_emad_pack_reg_tlv(buf, reg, payload); + + buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32)); + mlxsw_emad_pack_op_tlv(buf, reg, type, mlxsw_core); + + mlxsw_emad_construct_eth_hdr(skb); +} + +static char *mlxsw_emad_op_tlv(const struct sk_buff *skb) +{ + return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN)); +} + +static char *mlxsw_emad_reg_tlv(const struct sk_buff *skb) +{ + return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN + + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32))); +} + +static char *mlxsw_emad_reg_payload(const char *op_tlv) +{ + return ((char *) (op_tlv + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32))); +} + +static u64 mlxsw_emad_get_tid(const struct sk_buff *skb) +{ + char *op_tlv; + + op_tlv = mlxsw_emad_op_tlv(skb); + return mlxsw_emad_op_tlv_tid_get(op_tlv); +} + +static bool mlxsw_emad_is_resp(const struct sk_buff *skb) +{ + char *op_tlv; + + op_tlv = mlxsw_emad_op_tlv(skb); + return (MLXSW_EMAD_OP_TLV_RESPONSE == mlxsw_emad_op_tlv_r_get(op_tlv)); +} + +#define MLXSW_EMAD_TIMEOUT_MS 200 + +static int __mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core, + struct sk_buff *skb, + const struct mlxsw_tx_info *tx_info) +{ + int err; + int ret; + + err = mlxsw_core_skb_transmit(mlxsw_core->driver_priv, skb, tx_info); + if (err) { + dev_warn(mlxsw_core->bus_info->dev, "Failed to transmit EMAD (tid=%llx)\n", + mlxsw_core->emad.tid); + dev_kfree_skb(skb); + return err; + } + + mlxsw_core->emad.trans_active = true; + ret = wait_event_timeout(mlxsw_core->emad.wait, + !(mlxsw_core->emad.trans_active), + msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS)); + if (!ret) { + dev_warn(mlxsw_core->bus_info->dev, "EMAD timed-out (tid=%llx)\n", + mlxsw_core->emad.tid); + mlxsw_core->emad.trans_active = false; + return -EIO; + } + + return 0; +} + +static int mlxsw_emad_process_status(struct mlxsw_core *mlxsw_core, + char *op_tlv) +{ + enum mlxsw_emad_op_tlv_status status; + u64 tid; + + status = mlxsw_emad_op_tlv_status_get(op_tlv); + tid = mlxsw_emad_op_tlv_tid_get(op_tlv); + + switch (status) { + case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS: + return 0; + case MLXSW_EMAD_OP_TLV_STATUS_BUSY: + case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK: + dev_warn(mlxsw_core->bus_info->dev, "Reg access status again (tid=%llx,status=%x(%s))\n", + tid, status, mlxsw_emad_op_tlv_status_str(status)); + return -EAGAIN; + case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED: + case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV: + case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED: + case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED: + case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED: + case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER: + case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE: + case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR: + default: + dev_err(mlxsw_core->bus_info->dev, "Reg access status failed (tid=%llx,status=%x(%s))\n", + tid, status, mlxsw_emad_op_tlv_status_str(status)); + return -EIO; + } +} + +static int mlxsw_emad_process_status_skb(struct mlxsw_core *mlxsw_core, + struct sk_buff *skb) +{ + return mlxsw_emad_process_status(mlxsw_core, mlxsw_emad_op_tlv(skb)); +} + +static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core, + struct sk_buff *skb, + const struct mlxsw_tx_info *tx_info) +{ + struct sk_buff *trans_skb; + int n_retry; + int err; + + n_retry = 0; +retry: + /* We copy the EMAD to a new skb, since we might need + * to retransmit it in case of failure. + */ + trans_skb = skb_copy(skb, GFP_KERNEL); + if (!trans_skb) { + err = -ENOMEM; + goto out; + } + + err = __mlxsw_emad_transmit(mlxsw_core, trans_skb, tx_info); + if (!err) { + struct sk_buff *resp_skb = mlxsw_core->emad.resp_skb; + + err = mlxsw_emad_process_status_skb(mlxsw_core, resp_skb); + if (err) + dev_kfree_skb(resp_skb); + if (!err || err != -EAGAIN) + goto out; + } + if (n_retry++ < MLXSW_EMAD_MAX_RETRY) + goto retry; + +out: + dev_kfree_skb(skb); + mlxsw_core->emad.tid++; + return err; +} + +static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port, + void *priv) +{ + struct mlxsw_core *mlxsw_core = priv; + + if (mlxsw_emad_is_resp(skb) && + mlxsw_core->emad.trans_active && + mlxsw_emad_get_tid(skb) == mlxsw_core->emad.tid) { + mlxsw_core->emad.resp_skb = skb; + mlxsw_core->emad.trans_active = false; + wake_up(&mlxsw_core->emad.wait); + } else { + dev_kfree_skb(skb); + } +} + +static const struct mlxsw_rx_listener mlxsw_emad_rx_listener = { + .func = mlxsw_emad_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_ETHEMAD, +}; + +static int mlxsw_emad_traps_set(struct mlxsw_core *mlxsw_core) +{ + char htgt_pl[MLXSW_REG_HTGT_LEN]; + char hpkt_pl[MLXSW_REG_HPKT_LEN]; + int err; + + mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD); + err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); + if (err) + return err; + + mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU, + MLXSW_REG_HTGT_TRAP_GROUP_EMAD, + MLXSW_TRAP_ID_ETHEMAD); + return mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl); +} + +static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core) +{ + int err; + + /* Set the upper 32 bits of the transaction ID field to a random + * number. This allows us to discard EMADs addressed to other + * devices. + */ + get_random_bytes(&mlxsw_core->emad.tid, 4); + mlxsw_core->emad.tid = mlxsw_core->emad.tid << 32; + + init_waitqueue_head(&mlxsw_core->emad.wait); + mlxsw_core->emad.trans_active = false; + mutex_init(&mlxsw_core->emad.lock); + + err = mlxsw_core_rx_listener_register(mlxsw_core, + &mlxsw_emad_rx_listener, + mlxsw_core); + if (err) + return err; + + err = mlxsw_emad_traps_set(mlxsw_core); + if (err) + goto err_emad_trap_set; + + mlxsw_core->emad.use_emad = true; + + return 0; + +err_emad_trap_set: + mlxsw_core_rx_listener_unregister(mlxsw_core, + &mlxsw_emad_rx_listener, + mlxsw_core); + return err; +} + +static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core) +{ + char hpkt_pl[MLXSW_REG_HPKT_LEN]; + + mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD, + MLXSW_REG_HTGT_TRAP_GROUP_EMAD, + MLXSW_TRAP_ID_ETHEMAD); + mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl); + + mlxsw_core_rx_listener_unregister(mlxsw_core, + &mlxsw_emad_rx_listener, + mlxsw_core); +} + +static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core, + u16 reg_len) +{ + struct sk_buff *skb; + u16 emad_len; + + emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN + + (MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) * + sizeof(u32) + mlxsw_core->driver->txhdr_len); + if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN) + return NULL; + + skb = netdev_alloc_skb(NULL, emad_len); + if (!skb) + return NULL; + memset(skb->data, 0, emad_len); + skb_reserve(skb, emad_len); + + return skb; +} + +/***************** + * Core functions + *****************/ + +static int mlxsw_core_rx_stats_dbg_read(struct seq_file *file, void *data) +{ + struct mlxsw_core *mlxsw_core = file->private; + struct mlxsw_core_pcpu_stats *p; + u64 rx_packets, rx_bytes; + u64 tmp_rx_packets, tmp_rx_bytes; + u32 rx_dropped, rx_invalid; + unsigned int start; + int i; + int j; + static const char hdr[] = + " NUM RX_PACKETS RX_BYTES RX_DROPPED\n"; + + seq_printf(file, hdr); + for (i = 0; i < MLXSW_TRAP_ID_MAX; i++) { + rx_packets = 0; + rx_bytes = 0; + rx_dropped = 0; + for_each_possible_cpu(j) { + p = per_cpu_ptr(mlxsw_core->pcpu_stats, j); + do { + start = u64_stats_fetch_begin(&p->syncp); + tmp_rx_packets = p->trap_rx_packets[i]; + tmp_rx_bytes = p->trap_rx_bytes[i]; + } while (u64_stats_fetch_retry(&p->syncp, start)); + + rx_packets += tmp_rx_packets; + rx_bytes += tmp_rx_bytes; + rx_dropped += p->trap_rx_dropped[i]; + } + seq_printf(file, "trap %3d %12llu %12llu %10u\n", + i, rx_packets, rx_bytes, rx_dropped); + } + rx_invalid = 0; + for_each_possible_cpu(j) { + p = per_cpu_ptr(mlxsw_core->pcpu_stats, j); + rx_invalid += p->trap_rx_invalid; + } + seq_printf(file, "trap INV %10u\n", + rx_invalid); + + for (i = 0; i < MLXSW_PORT_MAX_PORTS; i++) { + rx_packets = 0; + rx_bytes = 0; + rx_dropped = 0; + for_each_possible_cpu(j) { + p = per_cpu_ptr(mlxsw_core->pcpu_stats, j); + do { + start = u64_stats_fetch_begin(&p->syncp); + tmp_rx_packets = p->port_rx_packets[i]; + tmp_rx_bytes = p->port_rx_bytes[i]; + } while (u64_stats_fetch_retry(&p->syncp, start)); + + rx_packets += tmp_rx_packets; + rx_bytes += tmp_rx_bytes; + rx_dropped += p->port_rx_dropped[i]; + } + seq_printf(file, "port %3d %12llu %12llu %10u\n", + i, rx_packets, rx_bytes, rx_dropped); + } + rx_invalid = 0; + for_each_possible_cpu(j) { + p = per_cpu_ptr(mlxsw_core->pcpu_stats, j); + rx_invalid += p->port_rx_invalid; + } + seq_printf(file, "port INV %10u\n", + rx_invalid); + return 0; +} + +static int mlxsw_core_rx_stats_dbg_open(struct inode *inode, struct file *f) +{ + struct mlxsw_core *mlxsw_core = inode->i_private; + + return single_open(f, mlxsw_core_rx_stats_dbg_read, mlxsw_core); +} + +static const struct file_operations mlxsw_core_rx_stats_dbg_ops = { + .owner = THIS_MODULE, + .open = mlxsw_core_rx_stats_dbg_open, + .release = single_release, + .read = seq_read, + .llseek = seq_lseek +}; + +static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core, + const char *buf, size_t size) +{ + __be32 *m = (__be32 *) buf; + int i; + int count = size / sizeof(__be32); + + for (i = count - 1; i >= 0; i--) + if (m[i]) + break; + i++; + count = i ? i : 1; + for (i = 0; i < count; i += 4) + dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n", + i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]), + be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3])); +} + +int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver) +{ + spin_lock(&mlxsw_core_driver_list_lock); + list_add_tail(&mlxsw_driver->list, &mlxsw_core_driver_list); + spin_unlock(&mlxsw_core_driver_list_lock); + return 0; +} +EXPORT_SYMBOL(mlxsw_core_driver_register); + +void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver) +{ + spin_lock(&mlxsw_core_driver_list_lock); + list_del(&mlxsw_driver->list); + spin_unlock(&mlxsw_core_driver_list_lock); +} +EXPORT_SYMBOL(mlxsw_core_driver_unregister); + +static struct mlxsw_driver *__driver_find(const char *kind) +{ + struct mlxsw_driver *mlxsw_driver; + + list_for_each_entry(mlxsw_driver, &mlxsw_core_driver_list, list) { + if (strcmp(mlxsw_driver->kind, kind) == 0) + return mlxsw_driver; + } + return NULL; +} + +static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind) +{ + struct mlxsw_driver *mlxsw_driver; + + spin_lock(&mlxsw_core_driver_list_lock); + mlxsw_driver = __driver_find(kind); + if (!mlxsw_driver) { + spin_unlock(&mlxsw_core_driver_list_lock); + request_module(MLXSW_MODULE_ALIAS_PREFIX "%s", kind); + spin_lock(&mlxsw_core_driver_list_lock); + mlxsw_driver = __driver_find(kind); + } + if (mlxsw_driver) { + if (!try_module_get(mlxsw_driver->owner)) + mlxsw_driver = NULL; + } + + spin_unlock(&mlxsw_core_driver_list_lock); + return mlxsw_driver; +} + +static void mlxsw_core_driver_put(const char *kind) +{ + struct mlxsw_driver *mlxsw_driver; + + spin_lock(&mlxsw_core_driver_list_lock); + mlxsw_driver = __driver_find(kind); + spin_unlock(&mlxsw_core_driver_list_lock); + if (!mlxsw_driver) + return; + module_put(mlxsw_driver->owner); +} + +static int mlxsw_core_debugfs_init(struct mlxsw_core *mlxsw_core) +{ + const struct mlxsw_bus_info *bus_info = mlxsw_core->bus_info; + + mlxsw_core->dbg_dir = debugfs_create_dir(bus_info->device_name, + mlxsw_core_dbg_root); + if (!mlxsw_core->dbg_dir) + return -ENOMEM; + debugfs_create_file("rx_stats", S_IRUGO, mlxsw_core->dbg_dir, + mlxsw_core, &mlxsw_core_rx_stats_dbg_ops); + mlxsw_core->dbg.vsd_blob.data = (void *) &bus_info->vsd; + mlxsw_core->dbg.vsd_blob.size = sizeof(bus_info->vsd); + debugfs_create_blob("vsd", S_IRUGO, mlxsw_core->dbg_dir, + &mlxsw_core->dbg.vsd_blob); + mlxsw_core->dbg.psid_blob.data = (void *) &bus_info->psid; + mlxsw_core->dbg.psid_blob.size = sizeof(bus_info->psid); + debugfs_create_blob("psid", S_IRUGO, mlxsw_core->dbg_dir, + &mlxsw_core->dbg.psid_blob); + return 0; +} + +static void mlxsw_core_debugfs_fini(struct mlxsw_core *mlxsw_core) +{ + debugfs_remove_recursive(mlxsw_core->dbg_dir); +} + +int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, + const struct mlxsw_bus *mlxsw_bus, + void *bus_priv) +{ + const char *device_kind = mlxsw_bus_info->device_kind; + struct mlxsw_core *mlxsw_core; + struct mlxsw_driver *mlxsw_driver; + size_t alloc_size; + int err; + + mlxsw_driver = mlxsw_core_driver_get(device_kind); + if (!mlxsw_driver) + return -EINVAL; + alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size; + mlxsw_core = kzalloc(alloc_size, GFP_KERNEL); + if (!mlxsw_core) { + err = -ENOMEM; + goto err_core_alloc; + } + + INIT_LIST_HEAD(&mlxsw_core->rx_listener_list); + INIT_LIST_HEAD(&mlxsw_core->event_listener_list); + mlxsw_core->driver = mlxsw_driver; + mlxsw_core->bus = mlxsw_bus; + mlxsw_core->bus_priv = bus_priv; + mlxsw_core->bus_info = mlxsw_bus_info; + + mlxsw_core->pcpu_stats = + netdev_alloc_pcpu_stats(struct mlxsw_core_pcpu_stats); + if (!mlxsw_core->pcpu_stats) { + err = -ENOMEM; + goto err_alloc_stats; + } + + err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile); + if (err) + goto err_bus_init; + + err = mlxsw_emad_init(mlxsw_core); + if (err) + goto err_emad_init; + + err = mlxsw_driver->init(mlxsw_core->driver_priv, mlxsw_core, + mlxsw_bus_info); + if (err) + goto err_driver_init; + + err = mlxsw_core_debugfs_init(mlxsw_core); + if (err) + goto err_debugfs_init; + + return 0; + +err_debugfs_init: + mlxsw_core->driver->fini(mlxsw_core->driver_priv); +err_driver_init: + mlxsw_emad_fini(mlxsw_core); +err_emad_init: + mlxsw_bus->fini(bus_priv); +err_bus_init: + free_percpu(mlxsw_core->pcpu_stats); +err_alloc_stats: + kfree(mlxsw_core); +err_core_alloc: + mlxsw_core_driver_put(device_kind); + return err; +} +EXPORT_SYMBOL(mlxsw_core_bus_device_register); + +void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core) +{ + const char *device_kind = mlxsw_core->bus_info->device_kind; + + mlxsw_core_debugfs_fini(mlxsw_core); + mlxsw_core->driver->fini(mlxsw_core->driver_priv); + mlxsw_emad_fini(mlxsw_core); + mlxsw_core->bus->fini(mlxsw_core->bus_priv); + free_percpu(mlxsw_core->pcpu_stats); + kfree(mlxsw_core); + mlxsw_core_driver_put(device_kind); +} +EXPORT_SYMBOL(mlxsw_core_bus_device_unregister); + +static struct mlxsw_core *__mlxsw_core_get(void *driver_priv) +{ + return container_of(driver_priv, struct mlxsw_core, driver_priv); +} + +int mlxsw_core_skb_transmit(void *driver_priv, struct sk_buff *skb, + const struct mlxsw_tx_info *tx_info) +{ + struct mlxsw_core *mlxsw_core = __mlxsw_core_get(driver_priv); + + return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb, + tx_info); +} +EXPORT_SYMBOL(mlxsw_core_skb_transmit); + +static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a, + const struct mlxsw_rx_listener *rxl_b) +{ + return (rxl_a->func == rxl_b->func && + rxl_a->local_port == rxl_b->local_port && + rxl_a->trap_id == rxl_b->trap_id); +} + +static struct mlxsw_rx_listener_item * +__find_rx_listener_item(struct mlxsw_core *mlxsw_core, + const struct mlxsw_rx_listener *rxl, + void *priv) +{ + struct mlxsw_rx_listener_item *rxl_item; + + list_for_each_entry(rxl_item, &mlxsw_core->rx_listener_list, list) { + if (__is_rx_listener_equal(&rxl_item->rxl, rxl) && + rxl_item->priv == priv) + return rxl_item; + } + return NULL; +} + +int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core, + const struct mlxsw_rx_listener *rxl, + void *priv) +{ + struct mlxsw_rx_listener_item *rxl_item; + + rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv); + if (rxl_item) + return -EEXIST; + rxl_item = kmalloc(sizeof(*rxl_item), GFP_KERNEL); + if (!rxl_item) + return -ENOMEM; + rxl_item->rxl = *rxl; + rxl_item->priv = priv; + + list_add_rcu(&rxl_item->list, &mlxsw_core->rx_listener_list); + return 0; +} +EXPORT_SYMBOL(mlxsw_core_rx_listener_register); + +void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core, + const struct mlxsw_rx_listener *rxl, + void *priv) +{ + struct mlxsw_rx_listener_item *rxl_item; + + rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv); + if (!rxl_item) + return; + list_del_rcu(&rxl_item->list); + synchronize_rcu(); + kfree(rxl_item); +} +EXPORT_SYMBOL(mlxsw_core_rx_listener_unregister); + +static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port, + void *priv) +{ + struct mlxsw_event_listener_item *event_listener_item = priv; + struct mlxsw_reg_info reg; + char *payload; + char *op_tlv = mlxsw_emad_op_tlv(skb); + char *reg_tlv = mlxsw_emad_reg_tlv(skb); + + reg.id = mlxsw_emad_op_tlv_register_id_get(op_tlv); + reg.len = (mlxsw_emad_reg_tlv_len_get(reg_tlv) - 1) * sizeof(u32); + payload = mlxsw_emad_reg_payload(op_tlv); + event_listener_item->el.func(®, payload, event_listener_item->priv); + dev_kfree_skb(skb); +} + +static bool __is_event_listener_equal(const struct mlxsw_event_listener *el_a, + const struct mlxsw_event_listener *el_b) +{ + return (el_a->func == el_b->func && + el_a->trap_id == el_b->trap_id); +} + +static struct mlxsw_event_listener_item * +__find_event_listener_item(struct mlxsw_core *mlxsw_core, + const struct mlxsw_event_listener *el, + void *priv) +{ + struct mlxsw_event_listener_item *el_item; + + list_for_each_entry(el_item, &mlxsw_core->event_listener_list, list) { + if (__is_event_listener_equal(&el_item->el, el) && + el_item->priv == priv) + return el_item; + } + return NULL; +} + +int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core, + const struct mlxsw_event_listener *el, + void *priv) +{ + int err; + struct mlxsw_event_listener_item *el_item; + const struct mlxsw_rx_listener rxl = { + .func = mlxsw_core_event_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = el->trap_id, + }; + + el_item = __find_event_listener_item(mlxsw_core, el, priv); + if (el_item) + return -EEXIST; + el_item = kmalloc(sizeof(*el_item), GFP_KERNEL); + if (!el_item) + return -ENOMEM; + el_item->el = *el; + el_item->priv = priv; + + err = mlxsw_core_rx_listener_register(mlxsw_core, &rxl, el_item); + if (err) + goto err_rx_listener_register; + + /* No reason to save item if we did not manage to register an RX + * listener for it. + */ + list_add_rcu(&el_item->list, &mlxsw_core->event_listener_list); + + return 0; + +err_rx_listener_register: + kfree(el_item); + return err; +} +EXPORT_SYMBOL(mlxsw_core_event_listener_register); + +void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core, + const struct mlxsw_event_listener *el, + void *priv) +{ + struct mlxsw_event_listener_item *el_item; + const struct mlxsw_rx_listener rxl = { + .func = mlxsw_core_event_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = el->trap_id, + }; + + el_item = __find_event_listener_item(mlxsw_core, el, priv); + if (!el_item) + return; + mlxsw_core_rx_listener_unregister(mlxsw_core, &rxl, el_item); + list_del(&el_item->list); + kfree(el_item); +} +EXPORT_SYMBOL(mlxsw_core_event_listener_unregister); + +static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core, + const struct mlxsw_reg_info *reg, + char *payload, + enum mlxsw_core_reg_access_type type) +{ + int err; + char *op_tlv; + struct sk_buff *skb; + struct mlxsw_tx_info tx_info = { + .local_port = MLXSW_PORT_CPU_PORT, + .is_emad = true, + }; + + skb = mlxsw_emad_alloc(mlxsw_core, reg->len); + if (!skb) + return -ENOMEM; + + mlxsw_emad_construct(skb, reg, payload, type, mlxsw_core); + mlxsw_core->driver->txhdr_construct(skb, &tx_info); + + dev_dbg(mlxsw_core->bus_info->dev, "EMAD send (tid=%llx)\n", + mlxsw_core->emad.tid); + mlxsw_core_buf_dump_dbg(mlxsw_core, skb->data, skb->len); + + err = mlxsw_emad_transmit(mlxsw_core, skb, &tx_info); + if (!err) { + op_tlv = mlxsw_emad_op_tlv(mlxsw_core->emad.resp_skb); + memcpy(payload, mlxsw_emad_reg_payload(op_tlv), + reg->len); + + dev_dbg(mlxsw_core->bus_info->dev, "EMAD recv (tid=%llx)\n", + mlxsw_core->emad.tid - 1); + mlxsw_core_buf_dump_dbg(mlxsw_core, + mlxsw_core->emad.resp_skb->data, + skb->len); + + dev_kfree_skb(mlxsw_core->emad.resp_skb); + } + + return err; +} + +static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core, + const struct mlxsw_reg_info *reg, + char *payload, + enum mlxsw_core_reg_access_type type) +{ + int err, n_retry; + char *in_mbox, *out_mbox, *tmp; + + in_mbox = mlxsw_cmd_mbox_alloc(); + if (!in_mbox) + return -ENOMEM; + + out_mbox = mlxsw_cmd_mbox_alloc(); + if (!out_mbox) { + err = -ENOMEM; + goto free_in_mbox; + } + + mlxsw_emad_pack_op_tlv(in_mbox, reg, type, mlxsw_core); + tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32); + mlxsw_emad_pack_reg_tlv(tmp, reg, payload); + + n_retry = 0; +retry: + err = mlxsw_cmd_access_reg(mlxsw_core, in_mbox, out_mbox); + if (!err) { + err = mlxsw_emad_process_status(mlxsw_core, out_mbox); + if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY) + goto retry; + } + + if (!err) + memcpy(payload, mlxsw_emad_reg_payload(out_mbox), + reg->len); + + mlxsw_core->emad.tid++; + mlxsw_cmd_mbox_free(out_mbox); +free_in_mbox: + mlxsw_cmd_mbox_free(in_mbox); + return err; +} + +static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core, + const struct mlxsw_reg_info *reg, + char *payload, + enum mlxsw_core_reg_access_type type) +{ + u64 cur_tid; + int err; + + if (mutex_lock_interruptible(&mlxsw_core->emad.lock)) { + dev_err(mlxsw_core->bus_info->dev, "Reg access interrupted (reg_id=%x(%s),type=%s)\n", + reg->id, mlxsw_reg_id_str(reg->id), + mlxsw_core_reg_access_type_str(type)); + return -EINTR; + } + + cur_tid = mlxsw_core->emad.tid; + dev_dbg(mlxsw_core->bus_info->dev, "Reg access (tid=%llx,reg_id=%x(%s),type=%s)\n", + cur_tid, reg->id, mlxsw_reg_id_str(reg->id), + mlxsw_core_reg_access_type_str(type)); + + /* During initialization EMAD interface is not available to us, + * so we default to command interface. We switch to EMAD interface + * after setting the appropriate traps. + */ + if (!mlxsw_core->emad.use_emad) + err = mlxsw_core_reg_access_cmd(mlxsw_core, reg, + payload, type); + else + err = mlxsw_core_reg_access_emad(mlxsw_core, reg, + payload, type); + + if (err) + dev_err(mlxsw_core->bus_info->dev, "Reg access failed (tid=%llx,reg_id=%x(%s),type=%s)\n", + cur_tid, reg->id, mlxsw_reg_id_str(reg->id), + mlxsw_core_reg_access_type_str(type)); + + mutex_unlock(&mlxsw_core->emad.lock); + return err; +} + +int mlxsw_reg_query(struct mlxsw_core *mlxsw_core, + const struct mlxsw_reg_info *reg, char *payload) +{ + return mlxsw_core_reg_access(mlxsw_core, reg, payload, + MLXSW_CORE_REG_ACCESS_TYPE_QUERY); +} +EXPORT_SYMBOL(mlxsw_reg_query); + +int mlxsw_reg_write(struct mlxsw_core *mlxsw_core, + const struct mlxsw_reg_info *reg, char *payload) +{ + return mlxsw_core_reg_access(mlxsw_core, reg, payload, + MLXSW_CORE_REG_ACCESS_TYPE_WRITE); +} +EXPORT_SYMBOL(mlxsw_reg_write); + +void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, + struct mlxsw_rx_info *rx_info) +{ + struct mlxsw_rx_listener_item *rxl_item; + const struct mlxsw_rx_listener *rxl; + struct mlxsw_core_pcpu_stats *pcpu_stats; + u8 local_port = rx_info->sys_port; + bool found = false; + + dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: sys_port = %d, trap_id = 0x%x\n", + __func__, rx_info->sys_port, rx_info->trap_id); + + if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) || + (local_port >= MLXSW_PORT_MAX_PORTS)) + goto drop; + + rcu_read_lock(); + list_for_each_entry_rcu(rxl_item, &mlxsw_core->rx_listener_list, list) { + rxl = &rxl_item->rxl; + if ((rxl->local_port == MLXSW_PORT_DONT_CARE || + rxl->local_port == local_port) && + rxl->trap_id == rx_info->trap_id) { + found = true; + break; + } + } + rcu_read_unlock(); + if (!found) + goto drop; + + pcpu_stats = this_cpu_ptr(mlxsw_core->pcpu_stats); + u64_stats_update_begin(&pcpu_stats->syncp); + pcpu_stats->port_rx_packets[local_port]++; + pcpu_stats->port_rx_bytes[local_port] += skb->len; + pcpu_stats->trap_rx_packets[rx_info->trap_id]++; + pcpu_stats->trap_rx_bytes[rx_info->trap_id] += skb->len; + u64_stats_update_end(&pcpu_stats->syncp); + + rxl->func(skb, local_port, rxl_item->priv); + return; + +drop: + if (rx_info->trap_id >= MLXSW_TRAP_ID_MAX) + this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_invalid); + else + this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_dropped[rx_info->trap_id]); + if (local_port >= MLXSW_PORT_MAX_PORTS) + this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_invalid); + else + this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_dropped[local_port]); + dev_kfree_skb(skb); +} +EXPORT_SYMBOL(mlxsw_core_skb_receive); + +int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod, + u32 in_mod, bool out_mbox_direct, + char *in_mbox, size_t in_mbox_size, + char *out_mbox, size_t out_mbox_size) +{ + u8 status; + int err; + + BUG_ON(in_mbox_size % sizeof(u32) || out_mbox_size % sizeof(u32)); + if (!mlxsw_core->bus->cmd_exec) + return -EOPNOTSUPP; + + dev_dbg(mlxsw_core->bus_info->dev, "Cmd exec (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n", + opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, in_mod); + if (in_mbox) { + dev_dbg(mlxsw_core->bus_info->dev, "Input mailbox:\n"); + mlxsw_core_buf_dump_dbg(mlxsw_core, in_mbox, in_mbox_size); + } + + err = mlxsw_core->bus->cmd_exec(mlxsw_core->bus_priv, opcode, + opcode_mod, in_mod, out_mbox_direct, + in_mbox, in_mbox_size, + out_mbox, out_mbox_size, &status); + + if (err == -EIO && status != MLXSW_CMD_STATUS_OK) { + dev_err(mlxsw_core->bus_info->dev, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n", + opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, + in_mod, status, mlxsw_cmd_status_str(status)); + } else if (err == -ETIMEDOUT) { + dev_err(mlxsw_core->bus_info->dev, "Cmd exec timed-out (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n", + opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, + in_mod); + } + + if (!err && out_mbox) { + dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n"); + mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size); + } + return err; +} +EXPORT_SYMBOL(mlxsw_cmd_exec); + +static int __init mlxsw_core_module_init(void) +{ + mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL); + if (!mlxsw_core_dbg_root) + return -ENOMEM; + return 0; +} + +static void __exit mlxsw_core_module_exit(void) +{ + debugfs_remove_recursive(mlxsw_core_dbg_root); +} + +module_init(mlxsw_core_module_init); +module_exit(mlxsw_core_module_exit); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); +MODULE_DESCRIPTION("Mellanox switch device core driver"); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h new file mode 100644 index 000000000000..2280b319c362 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -0,0 +1,202 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/core.h + * Copyright (c) 2015 Mellanox Technologies. All rights reserved. + * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> + * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> + * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MLXSW_CORE_H +#define _MLXSW_CORE_H + +#include <linux/module.h> +#include <linux/device.h> +#include <linux/slab.h> +#include <linux/gfp.h> +#include <linux/types.h> +#include <linux/skbuff.h> + +#include "trap.h" +#include "reg.h" + +#include "cmd.h" + +#define MLXSW_MODULE_ALIAS_PREFIX "mlxsw-driver-" +#define MODULE_MLXSW_DRIVER_ALIAS(kind) \ + MODULE_ALIAS(MLXSW_MODULE_ALIAS_PREFIX kind) + +#define MLXSW_DEVICE_KIND_SWITCHX2 "switchx2" + +struct mlxsw_core; +struct mlxsw_driver; +struct mlxsw_bus; +struct mlxsw_bus_info; + +int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver); +void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver); + +int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, + const struct mlxsw_bus *mlxsw_bus, + void *bus_priv); +void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core); + +struct mlxsw_tx_info { + u8 local_port; + bool is_emad; +}; + +int mlxsw_core_skb_transmit(void *driver_priv, struct sk_buff *skb, + const struct mlxsw_tx_info *tx_info); + +struct mlxsw_rx_listener { + void (*func)(struct sk_buff *skb, u8 local_port, void *priv); + u8 local_port; + u16 trap_id; +}; + +struct mlxsw_event_listener { + void (*func)(const struct mlxsw_reg_info *reg, + char *payload, void *priv); + enum mlxsw_event_trap_id trap_id; +}; + +int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core, + const struct mlxsw_rx_listener *rxl, + void *priv); +void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core, + const struct mlxsw_rx_listener *rxl, + void *priv); + +int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core, + const struct mlxsw_event_listener *el, + void *priv); +void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core, + const struct mlxsw_event_listener *el, + void *priv); + +int mlxsw_reg_query(struct mlxsw_core *mlxsw_core, + const struct mlxsw_reg_info *reg, char *payload); +int mlxsw_reg_write(struct mlxsw_core *mlxsw_core, + const struct mlxsw_reg_info *reg, char *payload); + +struct mlxsw_rx_info { + u16 sys_port; + int trap_id; +}; + +void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, + struct mlxsw_rx_info *rx_info); + +#define MLXSW_CONFIG_PROFILE_SWID_COUNT 8 + +struct mlxsw_swid_config { + u8 used_type:1, + used_properties:1; + u8 type; + u8 properties; +}; + +struct mlxsw_config_profile { + u16 used_max_vepa_channels:1, + used_max_lag:1, + used_max_port_per_lag:1, + used_max_mid:1, + used_max_pgt:1, + used_max_system_port:1, + used_max_vlan_groups:1, + used_max_regions:1, + used_flood_tables:1, + used_flood_mode:1, + used_max_ib_mc:1, + used_max_pkey:1, + used_ar_sec:1, + used_adaptive_routing_group_cap:1; + u8 max_vepa_channels; + u16 max_lag; + u16 max_port_per_lag; + u16 max_mid; + u16 max_pgt; + u16 max_system_port; + u16 max_vlan_groups; + u16 max_regions; + u8 max_flood_tables; + u8 max_vid_flood_tables; + u8 flood_mode; + u16 max_ib_mc; + u16 max_pkey; + u8 ar_sec; + u16 adaptive_routing_group_cap; + u8 arn; + struct mlxsw_swid_config swid_config[MLXSW_CONFIG_PROFILE_SWID_COUNT]; +}; + +struct mlxsw_driver { + struct list_head list; + const char *kind; + struct module *owner; + size_t priv_size; + int (*init)(void *driver_priv, struct mlxsw_core *mlxsw_core, + const struct mlxsw_bus_info *mlxsw_bus_info); + void (*fini)(void *driver_priv); + void (*txhdr_construct)(struct sk_buff *skb, + const struct mlxsw_tx_info *tx_info); + u8 txhdr_len; + const struct mlxsw_config_profile *profile; +}; + +struct mlxsw_bus { + const char *kind; + int (*init)(void *bus_priv, struct mlxsw_core *mlxsw_core, + const struct mlxsw_config_profile *profile); + void (*fini)(void *bus_priv); + int (*skb_transmit)(void *bus_priv, struct sk_buff *skb, + const struct mlxsw_tx_info *tx_info); + int (*cmd_exec)(void *bus_priv, u16 opcode, u8 opcode_mod, + u32 in_mod, bool out_mbox_direct, + char *in_mbox, size_t in_mbox_size, + char *out_mbox, size_t out_mbox_size, + u8 *p_status); +}; + +struct mlxsw_bus_info { + const char *device_kind; + const char *device_name; + struct device *dev; + struct { + u16 major; + u16 minor; + u16 subminor; + } fw_rev; + u8 vsd[MLXSW_CMD_BOARDINFO_VSD_LEN]; + u8 psid[MLXSW_CMD_BOARDINFO_PSID_LEN]; +}; + +#endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/emad.h b/drivers/net/ethernet/mellanox/mlxsw/emad.h new file mode 100644 index 000000000000..97b6bb5d9185 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/emad.h @@ -0,0 +1,127 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/emad.h + * Copyright (c) 2015 Mellanox Technologies. All rights reserved. + * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> + * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MLXSW_EMAD_H +#define _MLXSW_EMAD_H + +#define MLXSW_EMAD_MAX_FRAME_LEN 1518 /* Length in u8 */ +#define MLXSW_EMAD_MAX_RETRY 5 + +/* EMAD Ethernet header */ +#define MLXSW_EMAD_ETH_HDR_LEN 0x10 /* Length in u8 */ +#define MLXSW_EMAD_EH_DMAC "\x01\x02\xc9\x00\x00\x01" +#define MLXSW_EMAD_EH_SMAC "\x00\x02\xc9\x01\x02\x03" +#define MLXSW_EMAD_EH_ETHERTYPE 0x8932 +#define MLXSW_EMAD_EH_MLX_PROTO 0 +#define MLXSW_EMAD_EH_PROTO_VERSION 0 + +/* EMAD TLV Types */ +enum { + MLXSW_EMAD_TLV_TYPE_END, + MLXSW_EMAD_TLV_TYPE_OP, + MLXSW_EMAD_TLV_TYPE_DR, + MLXSW_EMAD_TLV_TYPE_REG, + MLXSW_EMAD_TLV_TYPE_USERDATA, + MLXSW_EMAD_TLV_TYPE_OOBETH, +}; + +/* OP TLV */ +#define MLXSW_EMAD_OP_TLV_LEN 4 /* Length in u32 */ + +enum { + MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS = 1, + MLXSW_EMAD_OP_TLV_CLASS_IPC = 2, +}; + +enum mlxsw_emad_op_tlv_status { + MLXSW_EMAD_OP_TLV_STATUS_SUCCESS, + MLXSW_EMAD_OP_TLV_STATUS_BUSY, + MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED, + MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV, + MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED, + MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED, + MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED, + MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER, + MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE, + MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK, + MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR = 0x70, +}; + +static inline char *mlxsw_emad_op_tlv_status_str(u8 status) +{ + switch (status) { + case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS: + return "operation performed"; + case MLXSW_EMAD_OP_TLV_STATUS_BUSY: + return "device is busy"; + case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED: + return "version not supported"; + case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV: + return "unknown TLV"; + case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED: + return "register not supported"; + case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED: + return "class not supported"; + case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED: + return "method not supported"; + case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER: + return "bad parameter"; + case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE: + return "resource not available"; + case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK: + return "acknowledged. retransmit"; + case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR: + return "internal error"; + default: + return "*UNKNOWN*"; + } +} + +enum { + MLXSW_EMAD_OP_TLV_REQUEST, + MLXSW_EMAD_OP_TLV_RESPONSE +}; + +enum { + MLXSW_EMAD_OP_TLV_METHOD_QUERY = 1, + MLXSW_EMAD_OP_TLV_METHOD_WRITE = 2, + MLXSW_EMAD_OP_TLV_METHOD_SEND = 3, + MLXSW_EMAD_OP_TLV_METHOD_EVENT = 5, +}; + +/* END TLV */ +#define MLXSW_EMAD_END_TLV_LEN 1 /* Length in u32 */ + +#endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/item.h b/drivers/net/ethernet/mellanox/mlxsw/item.h new file mode 100644 index 000000000000..4d0ac882bec3 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/item.h @@ -0,0 +1,405 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/item.h + * Copyright (c) 2015 Mellanox Technologies. All rights reserved. + * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> + * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MLXSW_ITEM_H +#define _MLXSW_ITEM_H + +#include <linux/types.h> +#include <linux/string.h> +#include <linux/bitops.h> + +struct mlxsw_item { + unsigned short offset; /* bytes in container */ + unsigned short step; /* step in bytes for indexed items */ + unsigned short in_step_offset; /* offset within one step */ + unsigned char shift; /* shift in bits */ + unsigned char element_size; /* size of element in bit array */ + bool no_real_shift; + union { + unsigned char bits; + unsigned short bytes; + } size; + const char *name; +}; + +static inline unsigned int +__mlxsw_item_offset(struct mlxsw_item *item, unsigned short index, + size_t typesize) +{ + BUG_ON(index && !item->step); + if (item->offset % typesize != 0 || + item->step % typesize != 0 || + item->in_step_offset % typesize != 0) { + pr_err("mlxsw: item bug (name=%s,offset=%x,step=%x,in_step_offset=%x,typesize=%lx)\n", + item->name, item->offset, item->step, + item->in_step_offset, typesize); + BUG(); + } + + return ((item->offset + item->step * index + item->in_step_offset) / + typesize); +} + +static inline u16 __mlxsw_item_get16(char *buf, struct mlxsw_item *item, + unsigned short index) +{ + unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u16)); + __be16 *b = (__be16 *) buf; + u16 tmp; + + tmp = be16_to_cpu(b[offset]); + tmp >>= item->shift; + tmp &= GENMASK(item->size.bits - 1, 0); + if (item->no_real_shift) + tmp <<= item->shift; + return tmp; +} + +static inline void __mlxsw_item_set16(char *buf, struct mlxsw_item *item, + unsigned short index, u16 val) +{ + unsigned int offset = __mlxsw_item_offset(item, index, + sizeof(u16)); + __be16 *b = (__be16 *) buf; + u16 mask = GENMASK(item->size.bits - 1, 0) << item->shift; + u16 tmp; + + if (!item->no_real_shift) + val <<= item->shift; + val &= mask; + tmp = be16_to_cpu(b[offset]); + tmp &= ~mask; + tmp |= val; + b[offset] = cpu_to_be16(tmp); +} + +static inline u32 __mlxsw_item_get32(char *buf, struct mlxsw_item *item, + unsigned short index) +{ + unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u32)); + __be32 *b = (__be32 *) buf; + u32 tmp; + + tmp = be32_to_cpu(b[offset]); + tmp >>= item->shift; + tmp &= GENMASK(item->size.bits - 1, 0); + if (item->no_real_shift) + tmp <<= item->shift; + return tmp; +} + +static inline void __mlxsw_item_set32(char *buf, struct mlxsw_item *item, + unsigned short index, u32 val) +{ + unsigned int offset = __mlxsw_item_offset(item, index, + sizeof(u32)); + __be32 *b = (__be32 *) buf; + u32 mask = GENMASK(item->size.bits - 1, 0) << item->shift; + u32 tmp; + + if (!item->no_real_shift) + val <<= item->shift; + val &= mask; + tmp = be32_to_cpu(b[offset]); + tmp &= ~mask; + tmp |= val; + b[offset] = cpu_to_be32(tmp); +} + +static inline u64 __mlxsw_item_get64(char *buf, struct mlxsw_item *item, + unsigned short index) +{ + unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u64)); + __be64 *b = (__be64 *) buf; + u64 tmp; + + tmp = be64_to_cpu(b[offset]); + tmp >>= item->shift; + tmp &= GENMASK_ULL(item->size.bits - 1, 0); + if (item->no_real_shift) + tmp <<= item->shift; + return tmp; +} + +static inline void __mlxsw_item_set64(char *buf, struct mlxsw_item *item, + unsigned short index, u64 val) +{ + unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u64)); + __be64 *b = (__be64 *) buf; + u64 mask = GENMASK_ULL(item->size.bits - 1, 0) << item->shift; + u64 tmp; + + if (!item->no_real_shift) + val <<= item->shift; + val &= mask; + tmp = be64_to_cpu(b[offset]); + tmp &= ~mask; + tmp |= val; + b[offset] = cpu_to_be64(tmp); +} + +static inline void __mlxsw_item_memcpy_from(char *buf, char *dst, + struct mlxsw_item *item) +{ + memcpy(dst, &buf[item->offset], item->size.bytes); +} + +static inline void __mlxsw_item_memcpy_to(char *buf, char *src, + struct mlxsw_item *item) +{ + memcpy(&buf[item->offset], src, item->size.bytes); +} + +static inline u16 +__mlxsw_item_bit_array_offset(struct mlxsw_item *item, u16 index, u8 *shift) +{ + u16 max_index, be_index; + u16 offset; /* byte offset inside the array */ + + BUG_ON(index && !item->element_size); + if (item->offset % sizeof(u32) != 0 || + BITS_PER_BYTE % item->element_size != 0) { + pr_err("mlxsw: item bug (name=%s,offset=%x,element_size=%x)\n", + item->name, item->offset, item->element_size); + BUG(); + } + + max_index = (item->size.bytes << 3) / item->element_size - 1; + be_index = max_index - index; + offset = be_index * item->element_size >> 3; + *shift = index % (BITS_PER_BYTE / item->element_size) << 1; + + return item->offset + offset; +} + +static inline u8 __mlxsw_item_bit_array_get(char *buf, struct mlxsw_item *item, + u16 index) +{ + u8 shift, tmp; + u16 offset = __mlxsw_item_bit_array_offset(item, index, &shift); + + tmp = buf[offset]; + tmp >>= shift; + tmp &= GENMASK(item->element_size - 1, 0); + return tmp; +} + +static inline void __mlxsw_item_bit_array_set(char *buf, struct mlxsw_item *item, + u16 index, u8 val) +{ + u8 shift, tmp; + u16 offset = __mlxsw_item_bit_array_offset(item, index, &shift); + u8 mask = GENMASK(item->element_size - 1, 0) << shift; + + val <<= shift; + val &= mask; + tmp = buf[offset]; + tmp &= ~mask; + tmp |= val; + buf[offset] = tmp; +} + +#define __ITEM_NAME(_type, _cname, _iname) \ + mlxsw_##_type##_##_cname##_##_iname##_item + +/* _type: cmd_mbox, reg, etc. + * _cname: containter name (e.g. command name, register name) + * _iname: item name within the container + */ + +#define MLXSW_ITEM16(_type, _cname, _iname, _offset, _shift, _sizebits) \ +static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ + .offset = _offset, \ + .shift = _shift, \ + .size = {.bits = _sizebits,}, \ + .name = #_type "_" #_cname "_" #_iname, \ +}; \ +static inline u16 mlxsw_##_type##_##_cname##_##_iname##_get(char *buf) \ +{ \ + return __mlxsw_item_get16(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \ +} \ +static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 val)\ +{ \ + __mlxsw_item_set16(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \ +} + +#define MLXSW_ITEM16_INDEXED(_type, _cname, _iname, _offset, _shift, _sizebits, \ + _step, _instepoffset, _norealshift) \ +static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ + .offset = _offset, \ + .step = _step, \ + .in_step_offset = _instepoffset, \ + .shift = _shift, \ + .no_real_shift = _norealshift, \ + .size = {.bits = _sizebits,}, \ + .name = #_type "_" #_cname "_" #_iname, \ +}; \ +static inline u16 \ +mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, unsigned short index) \ +{ \ + return __mlxsw_item_get16(buf, &__ITEM_NAME(_type, _cname, _iname), \ + index); \ +} \ +static inline void \ +mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \ + u16 val) \ +{ \ + __mlxsw_item_set16(buf, &__ITEM_NAME(_type, _cname, _iname), \ + index, val); \ +} + +#define MLXSW_ITEM32(_type, _cname, _iname, _offset, _shift, _sizebits) \ +static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ + .offset = _offset, \ + .shift = _shift, \ + .size = {.bits = _sizebits,}, \ + .name = #_type "_" #_cname "_" #_iname, \ +}; \ +static inline u32 mlxsw_##_type##_##_cname##_##_iname##_get(char *buf) \ +{ \ + return __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \ +} \ +static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u32 val)\ +{ \ + __mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \ +} + +#define MLXSW_ITEM32_INDEXED(_type, _cname, _iname, _offset, _shift, _sizebits, \ + _step, _instepoffset, _norealshift) \ +static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ + .offset = _offset, \ + .step = _step, \ + .in_step_offset = _instepoffset, \ + .shift = _shift, \ + .no_real_shift = _norealshift, \ + .size = {.bits = _sizebits,}, \ + .name = #_type "_" #_cname "_" #_iname, \ +}; \ +static inline u32 \ +mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, unsigned short index) \ +{ \ + return __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, _iname), \ + index); \ +} \ +static inline void \ +mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \ + u32 val) \ +{ \ + __mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, _iname), \ + index, val); \ +} + +#define MLXSW_ITEM64(_type, _cname, _iname, _offset, _shift, _sizebits) \ +static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ + .offset = _offset, \ + .shift = _shift, \ + .size = {.bits = _sizebits,}, \ + .name = #_type "_" #_cname "_" #_iname, \ +}; \ +static inline u64 mlxsw_##_type##_##_cname##_##_iname##_get(char *buf) \ +{ \ + return __mlxsw_item_get64(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \ +} \ +static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u64 val)\ +{ \ + __mlxsw_item_set64(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \ +} + +#define MLXSW_ITEM64_INDEXED(_type, _cname, _iname, _offset, _shift, \ + _sizebits, _step, _instepoffset, _norealshift) \ +static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ + .offset = _offset, \ + .step = _step, \ + .in_step_offset = _instepoffset, \ + .shift = _shift, \ + .no_real_shift = _norealshift, \ + .size = {.bits = _sizebits,}, \ + .name = #_type "_" #_cname "_" #_iname, \ +}; \ +static inline u64 \ +mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, unsigned short index) \ +{ \ + return __mlxsw_item_get64(buf, &__ITEM_NAME(_type, _cname, _iname), \ + index); \ +} \ +static inline void \ +mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \ + u64 val) \ +{ \ + __mlxsw_item_set64(buf, &__ITEM_NAME(_type, _cname, _iname), \ + index, val); \ +} + +#define MLXSW_ITEM_BUF(_type, _cname, _iname, _offset, _sizebytes) \ +static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ + .offset = _offset, \ + .size = {.bytes = _sizebytes,}, \ + .name = #_type "_" #_cname "_" #_iname, \ +}; \ +static inline void \ +mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(char *buf, char *dst) \ +{ \ + __mlxsw_item_memcpy_from(buf, dst, &__ITEM_NAME(_type, _cname, _iname));\ +} \ +static inline void \ +mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, char *src) \ +{ \ + __mlxsw_item_memcpy_to(buf, src, &__ITEM_NAME(_type, _cname, _iname)); \ +} + +#define MLXSW_ITEM_BIT_ARRAY(_type, _cname, _iname, _offset, _sizebytes, \ + _element_size) \ +static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ + .offset = _offset, \ + .element_size = _element_size, \ + .size = {.bytes = _sizebytes,}, \ + .name = #_type "_" #_cname "_" #_iname, \ +}; \ +static inline u8 \ +mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, u16 index) \ +{ \ + return __mlxsw_item_bit_array_get(buf, \ + &__ITEM_NAME(_type, _cname, _iname), \ + index); \ +} \ +static inline void \ +mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 index, u8 val) \ +{ \ + return __mlxsw_item_bit_array_set(buf, \ + &__ITEM_NAME(_type, _cname, _iname), \ + index, val); \ +} \ + +#endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c new file mode 100644 index 000000000000..298ead5b42ca --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -0,0 +1,1794 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/pci.c + * Copyright (c) 2015 Mellanox Technologies. All rights reserved. + * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/export.h> +#include <linux/err.h> +#include <linux/device.h> +#include <linux/pci.h> +#include <linux/interrupt.h> +#include <linux/wait.h> +#include <linux/types.h> +#include <linux/skbuff.h> +#include <linux/if_vlan.h> +#include <linux/log2.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> + +#include "pci.h" +#include "core.h" +#include "cmd.h" +#include "port.h" + +static const char mlxsw_pci_driver_name[] = "mlxsw_pci"; + +static const struct pci_device_id mlxsw_pci_id_table[] = { + {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SWITCHX2), 0}, + {0, } +}; + +static struct dentry *mlxsw_pci_dbg_root; + +static const char *mlxsw_pci_device_kind_get(const struct pci_device_id *id) +{ + switch (id->device) { + case PCI_DEVICE_ID_MELLANOX_SWITCHX2: + return MLXSW_DEVICE_KIND_SWITCHX2; + default: + BUG(); + } +} + +#define mlxsw_pci_write32(mlxsw_pci, reg, val) \ + iowrite32be(val, (mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg)) +#define mlxsw_pci_read32(mlxsw_pci, reg) \ + ioread32be((mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg)) + +enum mlxsw_pci_queue_type { + MLXSW_PCI_QUEUE_TYPE_SDQ, + MLXSW_PCI_QUEUE_TYPE_RDQ, + MLXSW_PCI_QUEUE_TYPE_CQ, + MLXSW_PCI_QUEUE_TYPE_EQ, +}; + +static const char *mlxsw_pci_queue_type_str(enum mlxsw_pci_queue_type q_type) +{ + switch (q_type) { + case MLXSW_PCI_QUEUE_TYPE_SDQ: + return "sdq"; + case MLXSW_PCI_QUEUE_TYPE_RDQ: + return "rdq"; + case MLXSW_PCI_QUEUE_TYPE_CQ: + return "cq"; + case MLXSW_PCI_QUEUE_TYPE_EQ: + return "eq"; + } + BUG(); +} + +#define MLXSW_PCI_QUEUE_TYPE_COUNT 4 + +static const u16 mlxsw_pci_doorbell_type_offset[] = { + MLXSW_PCI_DOORBELL_SDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_SDQ */ + MLXSW_PCI_DOORBELL_RDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_RDQ */ + MLXSW_PCI_DOORBELL_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */ + MLXSW_PCI_DOORBELL_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */ +}; + +static const u16 mlxsw_pci_doorbell_arm_type_offset[] = { + 0, /* unused */ + 0, /* unused */ + MLXSW_PCI_DOORBELL_ARM_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */ + MLXSW_PCI_DOORBELL_ARM_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */ +}; + +struct mlxsw_pci_mem_item { + char *buf; + dma_addr_t mapaddr; + size_t size; +}; + +struct mlxsw_pci_queue_elem_info { + char *elem; /* pointer to actual dma mapped element mem chunk */ + union { + struct { + struct sk_buff *skb; + } sdq; + struct { + struct sk_buff *skb; + } rdq; + } u; +}; + +struct mlxsw_pci_queue { + spinlock_t lock; /* for queue accesses */ + struct mlxsw_pci_mem_item mem_item; + struct mlxsw_pci_queue_elem_info *elem_info; + u16 producer_counter; + u16 consumer_counter; + u16 count; /* number of elements in queue */ + u8 num; /* queue number */ + u8 elem_size; /* size of one element */ + enum mlxsw_pci_queue_type type; + struct tasklet_struct tasklet; /* queue processing tasklet */ + struct mlxsw_pci *pci; + union { + struct { + u32 comp_sdq_count; + u32 comp_rdq_count; + } cq; + struct { + u32 ev_cmd_count; + u32 ev_comp_count; + u32 ev_other_count; + } eq; + } u; +}; + +struct mlxsw_pci_queue_type_group { + struct mlxsw_pci_queue *q; + u8 count; /* number of queues in group */ +}; + +struct mlxsw_pci { + struct pci_dev *pdev; + u8 __iomem *hw_addr; + struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT]; + u32 doorbell_offset; + struct msix_entry msix_entry; + struct mlxsw_core *core; + struct { + u16 num_pages; + struct mlxsw_pci_mem_item *items; + } fw_area; + struct { + struct mutex lock; /* Lock access to command registers */ + bool nopoll; + wait_queue_head_t wait; + bool wait_done; + struct { + u8 status; + u64 out_param; + } comp; + } cmd; + struct mlxsw_bus_info bus_info; + struct dentry *dbg_dir; +}; + +static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q) +{ + tasklet_schedule(&q->tasklet); +} + +static char *__mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q, + size_t elem_size, int elem_index) +{ + return q->mem_item.buf + (elem_size * elem_index); +} + +static struct mlxsw_pci_queue_elem_info * +mlxsw_pci_queue_elem_info_get(struct mlxsw_pci_queue *q, int elem_index) +{ + return &q->elem_info[elem_index]; +} + +static struct mlxsw_pci_queue_elem_info * +mlxsw_pci_queue_elem_info_producer_get(struct mlxsw_pci_queue *q) +{ + int index = q->producer_counter & (q->count - 1); + + if ((q->producer_counter - q->consumer_counter) == q->count) + return NULL; + return mlxsw_pci_queue_elem_info_get(q, index); +} + +static struct mlxsw_pci_queue_elem_info * +mlxsw_pci_queue_elem_info_consumer_get(struct mlxsw_pci_queue *q) +{ + int index = q->consumer_counter & (q->count - 1); + + return mlxsw_pci_queue_elem_info_get(q, index); +} + +static char *mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q, int elem_index) +{ + return mlxsw_pci_queue_elem_info_get(q, elem_index)->elem; +} + +static bool mlxsw_pci_elem_hw_owned(struct mlxsw_pci_queue *q, bool owner_bit) +{ + return owner_bit != !!(q->consumer_counter & q->count); +} + +static char *mlxsw_pci_queue_sw_elem_get(struct mlxsw_pci_queue *q, + u32 (*get_elem_owner_func)(char *)) +{ + struct mlxsw_pci_queue_elem_info *elem_info; + char *elem; + bool owner_bit; + + elem_info = mlxsw_pci_queue_elem_info_consumer_get(q); + elem = elem_info->elem; + owner_bit = get_elem_owner_func(elem); + if (mlxsw_pci_elem_hw_owned(q, owner_bit)) + return NULL; + q->consumer_counter++; + rmb(); /* make sure we read owned bit before the rest of elem */ + return elem; +} + +static struct mlxsw_pci_queue_type_group * +mlxsw_pci_queue_type_group_get(struct mlxsw_pci *mlxsw_pci, + enum mlxsw_pci_queue_type q_type) +{ + return &mlxsw_pci->queues[q_type]; +} + +static u8 __mlxsw_pci_queue_count(struct mlxsw_pci *mlxsw_pci, + enum mlxsw_pci_queue_type q_type) +{ + struct mlxsw_pci_queue_type_group *queue_group; + + queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_type); + return queue_group->count; +} + +static u8 mlxsw_pci_sdq_count(struct mlxsw_pci *mlxsw_pci) +{ + return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_SDQ); +} + +static u8 mlxsw_pci_rdq_count(struct mlxsw_pci *mlxsw_pci) +{ + return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_RDQ); +} + +static u8 mlxsw_pci_cq_count(struct mlxsw_pci *mlxsw_pci) +{ + return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ); +} + +static u8 mlxsw_pci_eq_count(struct mlxsw_pci *mlxsw_pci) +{ + return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ); +} + +static struct mlxsw_pci_queue * +__mlxsw_pci_queue_get(struct mlxsw_pci *mlxsw_pci, + enum mlxsw_pci_queue_type q_type, u8 q_num) +{ + return &mlxsw_pci->queues[q_type].q[q_num]; +} + +static struct mlxsw_pci_queue *mlxsw_pci_sdq_get(struct mlxsw_pci *mlxsw_pci, + u8 q_num) +{ + return __mlxsw_pci_queue_get(mlxsw_pci, + MLXSW_PCI_QUEUE_TYPE_SDQ, q_num); +} + +static struct mlxsw_pci_queue *mlxsw_pci_rdq_get(struct mlxsw_pci *mlxsw_pci, + u8 q_num) +{ + return __mlxsw_pci_queue_get(mlxsw_pci, + MLXSW_PCI_QUEUE_TYPE_RDQ, q_num); +} + +static struct mlxsw_pci_queue *mlxsw_pci_cq_get(struct mlxsw_pci *mlxsw_pci, + u8 q_num) +{ + return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ, q_num); +} + +static struct mlxsw_pci_queue *mlxsw_pci_eq_get(struct mlxsw_pci *mlxsw_pci, + u8 q_num) +{ + return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ, q_num); +} + +static void __mlxsw_pci_queue_doorbell_set(struct mlxsw_pci *mlxsw_pci, + struct mlxsw_pci_queue *q, + u16 val) +{ + mlxsw_pci_write32(mlxsw_pci, + DOORBELL(mlxsw_pci->doorbell_offset, + mlxsw_pci_doorbell_type_offset[q->type], + q->num), val); +} + +static void __mlxsw_pci_queue_doorbell_arm_set(struct mlxsw_pci *mlxsw_pci, + struct mlxsw_pci_queue *q, + u16 val) +{ + mlxsw_pci_write32(mlxsw_pci, + DOORBELL(mlxsw_pci->doorbell_offset, + mlxsw_pci_doorbell_arm_type_offset[q->type], + q->num), val); +} + +static void mlxsw_pci_queue_doorbell_producer_ring(struct mlxsw_pci *mlxsw_pci, + struct mlxsw_pci_queue *q) +{ + wmb(); /* ensure all writes are done before we ring a bell */ + __mlxsw_pci_queue_doorbell_set(mlxsw_pci, q, q->producer_counter); +} + +static void mlxsw_pci_queue_doorbell_consumer_ring(struct mlxsw_pci *mlxsw_pci, + struct mlxsw_pci_queue *q) +{ + wmb(); /* ensure all writes are done before we ring a bell */ + __mlxsw_pci_queue_doorbell_set(mlxsw_pci, q, + q->consumer_counter + q->count); +} + +static void +mlxsw_pci_queue_doorbell_arm_consumer_ring(struct mlxsw_pci *mlxsw_pci, + struct mlxsw_pci_queue *q) +{ + wmb(); /* ensure all writes are done before we ring a bell */ + __mlxsw_pci_queue_doorbell_arm_set(mlxsw_pci, q, q->consumer_counter); +} + +static dma_addr_t __mlxsw_pci_queue_page_get(struct mlxsw_pci_queue *q, + int page_index) +{ + return q->mem_item.mapaddr + MLXSW_PCI_PAGE_SIZE * page_index; +} + +static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, + struct mlxsw_pci_queue *q) +{ + int i; + int err; + + q->producer_counter = 0; + q->consumer_counter = 0; + + /* Set CQ of same number of this SDQ. */ + mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num); + mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, 7); + mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */ + for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) { + dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i); + + mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr); + } + + err = mlxsw_cmd_sw2hw_sdq(mlxsw_pci->core, mbox, q->num); + if (err) + return err; + mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q); + return 0; +} + +static void mlxsw_pci_sdq_fini(struct mlxsw_pci *mlxsw_pci, + struct mlxsw_pci_queue *q) +{ + mlxsw_cmd_hw2sw_sdq(mlxsw_pci->core, q->num); +} + +static int mlxsw_pci_sdq_dbg_read(struct seq_file *file, void *data) +{ + struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private); + struct mlxsw_pci_queue *q; + int i; + static const char hdr[] = + "NUM PROD_COUNT CONS_COUNT COUNT\n"; + + seq_printf(file, hdr); + for (i = 0; i < mlxsw_pci_sdq_count(mlxsw_pci); i++) { + q = mlxsw_pci_sdq_get(mlxsw_pci, i); + spin_lock_bh(&q->lock); + seq_printf(file, "%3d %10d %10d %5d\n", + i, q->producer_counter, q->consumer_counter, + q->count); + spin_unlock_bh(&q->lock); + } + return 0; +} + +static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe, + int index, char *frag_data, size_t frag_len, + int direction) +{ + struct pci_dev *pdev = mlxsw_pci->pdev; + dma_addr_t mapaddr; + + mapaddr = pci_map_single(pdev, frag_data, frag_len, direction); + if (unlikely(pci_dma_mapping_error(pdev, mapaddr))) { + if (net_ratelimit()) + dev_err(&pdev->dev, "failed to dma map tx frag\n"); + return -EIO; + } + mlxsw_pci_wqe_address_set(wqe, index, mapaddr); + mlxsw_pci_wqe_byte_count_set(wqe, index, frag_len); + return 0; +} + +static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe, + int index, int direction) +{ + struct pci_dev *pdev = mlxsw_pci->pdev; + size_t frag_len = mlxsw_pci_wqe_byte_count_get(wqe, index); + dma_addr_t mapaddr = mlxsw_pci_wqe_address_get(wqe, index); + + if (!frag_len) + return; + pci_unmap_single(pdev, mapaddr, frag_len, direction); +} + +static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci, + struct mlxsw_pci_queue_elem_info *elem_info) +{ + size_t buf_len = MLXSW_PORT_MAX_MTU; + char *wqe = elem_info->elem; + struct sk_buff *skb; + int err; + + elem_info->u.rdq.skb = NULL; + skb = netdev_alloc_skb_ip_align(NULL, buf_len); + if (!skb) + return -ENOMEM; + + /* Assume that wqe was previously zeroed. */ + + err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data, + buf_len, DMA_FROM_DEVICE); + if (err) + goto err_frag_map; + + elem_info->u.rdq.skb = skb; + return 0; + +err_frag_map: + dev_kfree_skb_any(skb); + return err; +} + +static void mlxsw_pci_rdq_skb_free(struct mlxsw_pci *mlxsw_pci, + struct mlxsw_pci_queue_elem_info *elem_info) +{ + struct sk_buff *skb; + char *wqe; + + skb = elem_info->u.rdq.skb; + wqe = elem_info->elem; + + mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); +} + +static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, + struct mlxsw_pci_queue *q) +{ + struct mlxsw_pci_queue_elem_info *elem_info; + int i; + int err; + + q->producer_counter = 0; + q->consumer_counter = 0; + + /* Set CQ of same number of this RDQ with base + * above MLXSW_PCI_SDQS_MAX as the lower ones are assigned to SDQs. + */ + mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num + MLXSW_PCI_SDQS_COUNT); + mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */ + for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) { + dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i); + + mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr); + } + + err = mlxsw_cmd_sw2hw_rdq(mlxsw_pci->core, mbox, q->num); + if (err) + return err; + + mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q); + + for (i = 0; i < q->count; i++) { + elem_info = mlxsw_pci_queue_elem_info_producer_get(q); + BUG_ON(!elem_info); + err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info); + if (err) + goto rollback; + /* Everything is set up, ring doorbell to pass elem to HW */ + q->producer_counter++; + mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q); + } + + return 0; + +rollback: + for (i--; i >= 0; i--) { + elem_info = mlxsw_pci_queue_elem_info_get(q, i); + mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info); + } + mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num); + + return err; +} + +static void mlxsw_pci_rdq_fini(struct mlxsw_pci *mlxsw_pci, + struct mlxsw_pci_queue *q) +{ + struct mlxsw_pci_queue_elem_info *elem_info; + int i; + + mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num); + for (i = 0; i < q->count; i++) { + elem_info = mlxsw_pci_queue_elem_info_get(q, i); + mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info); + } +} + +static int mlxsw_pci_rdq_dbg_read(struct seq_file *file, void *data) +{ + struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private); + struct mlxsw_pci_queue *q; + int i; + static const char hdr[] = + "NUM PROD_COUNT CONS_COUNT COUNT\n"; + + seq_printf(file, hdr); + for (i = 0; i < mlxsw_pci_rdq_count(mlxsw_pci); i++) { + q = mlxsw_pci_rdq_get(mlxsw_pci, i); + spin_lock_bh(&q->lock); + seq_printf(file, "%3d %10d %10d %5d\n", + i, q->producer_counter, q->consumer_counter, + q->count); + spin_unlock_bh(&q->lock); + } + return 0; +} + +static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, + struct mlxsw_pci_queue *q) +{ + int i; + int err; + + q->consumer_counter = 0; + + for (i = 0; i < q->count; i++) { + char *elem = mlxsw_pci_queue_elem_get(q, i); + + mlxsw_pci_cqe_owner_set(elem, 1); + } + + mlxsw_cmd_mbox_sw2hw_cq_cv_set(mbox, 0); /* CQE ver 0 */ + mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM); + mlxsw_cmd_mbox_sw2hw_cq_oi_set(mbox, 0); + mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0); + mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count)); + for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) { + dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i); + + mlxsw_cmd_mbox_sw2hw_cq_pa_set(mbox, i, mapaddr); + } + err = mlxsw_cmd_sw2hw_cq(mlxsw_pci->core, mbox, q->num); + if (err) + return err; + mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); + mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q); + return 0; +} + +static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci, + struct mlxsw_pci_queue *q) +{ + mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num); +} + +static int mlxsw_pci_cq_dbg_read(struct seq_file *file, void *data) +{ + struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private); + + struct mlxsw_pci_queue *q; + int i; + static const char hdr[] = + "NUM CONS_INDEX SDQ_COUNT RDQ_COUNT COUNT\n"; + + seq_printf(file, hdr); + for (i = 0; i < mlxsw_pci_cq_count(mlxsw_pci); i++) { + q = mlxsw_pci_cq_get(mlxsw_pci, i); + spin_lock_bh(&q->lock); + seq_printf(file, "%3d %10d %10d %10d %5d\n", + i, q->consumer_counter, q->u.cq.comp_sdq_count, + q->u.cq.comp_rdq_count, q->count); + spin_unlock_bh(&q->lock); + } + return 0; +} + +static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci, + struct mlxsw_pci_queue *q, + u16 consumer_counter_limit, + char *cqe) +{ + struct pci_dev *pdev = mlxsw_pci->pdev; + struct mlxsw_pci_queue_elem_info *elem_info; + char *wqe; + struct sk_buff *skb; + int i; + + spin_lock(&q->lock); + elem_info = mlxsw_pci_queue_elem_info_consumer_get(q); + skb = elem_info->u.sdq.skb; + wqe = elem_info->elem; + for (i = 0; i < MLXSW_PCI_WQE_SG_ENTRIES; i++) + mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE); + dev_kfree_skb_any(skb); + elem_info->u.sdq.skb = NULL; + + if (q->consumer_counter++ != consumer_counter_limit) + dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in SDQ\n"); + spin_unlock(&q->lock); +} + +static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci, + struct mlxsw_pci_queue *q, + u16 consumer_counter_limit, + char *cqe) +{ + struct pci_dev *pdev = mlxsw_pci->pdev; + struct mlxsw_pci_queue_elem_info *elem_info; + char *wqe; + struct sk_buff *skb; + struct mlxsw_rx_info rx_info; + int err; + + elem_info = mlxsw_pci_queue_elem_info_consumer_get(q); + skb = elem_info->u.sdq.skb; + if (!skb) + return; + wqe = elem_info->elem; + mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE); + + if (q->consumer_counter++ != consumer_counter_limit) + dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n"); + + /* We do not support lag now */ + if (mlxsw_pci_cqe_lag_get(cqe)) + goto drop; + + rx_info.sys_port = mlxsw_pci_cqe_system_port_get(cqe); + rx_info.trap_id = mlxsw_pci_cqe_trap_id_get(cqe); + + skb_put(skb, mlxsw_pci_cqe_byte_count_get(cqe)); + mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info); + +put_new_skb: + memset(wqe, 0, q->elem_size); + err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info); + if (err && net_ratelimit()) + dev_dbg(&pdev->dev, "Failed to alloc skb for RDQ\n"); + /* Everything is set up, ring doorbell to pass elem to HW */ + q->producer_counter++; + mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q); + return; + +drop: + dev_kfree_skb_any(skb); + goto put_new_skb; +} + +static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q) +{ + return mlxsw_pci_queue_sw_elem_get(q, mlxsw_pci_cqe_owner_get); +} + +static void mlxsw_pci_cq_tasklet(unsigned long data) +{ + struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data; + struct mlxsw_pci *mlxsw_pci = q->pci; + char *cqe; + int items = 0; + int credits = q->count >> 1; + + while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) { + u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe); + u8 sendq = mlxsw_pci_cqe_sr_get(cqe); + u8 dqn = mlxsw_pci_cqe_dqn_get(cqe); + + if (sendq) { + struct mlxsw_pci_queue *sdq; + + sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn); + mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq, + wqe_counter, cqe); + q->u.cq.comp_sdq_count++; + } else { + struct mlxsw_pci_queue *rdq; + + rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn); + mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq, + wqe_counter, cqe); + q->u.cq.comp_rdq_count++; + } + if (++items == credits) + break; + } + if (items) { + mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); + mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q); + } +} + +static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, + struct mlxsw_pci_queue *q) +{ + int i; + int err; + + q->consumer_counter = 0; + + for (i = 0; i < q->count; i++) { + char *elem = mlxsw_pci_queue_elem_get(q, i); + + mlxsw_pci_eqe_owner_set(elem, 1); + } + + mlxsw_cmd_mbox_sw2hw_eq_int_msix_set(mbox, 1); /* MSI-X used */ + mlxsw_cmd_mbox_sw2hw_eq_oi_set(mbox, 0); + mlxsw_cmd_mbox_sw2hw_eq_st_set(mbox, 1); /* armed */ + mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(mbox, ilog2(q->count)); + for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) { + dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i); + + mlxsw_cmd_mbox_sw2hw_eq_pa_set(mbox, i, mapaddr); + } + err = mlxsw_cmd_sw2hw_eq(mlxsw_pci->core, mbox, q->num); + if (err) + return err; + mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); + mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q); + return 0; +} + +static void mlxsw_pci_eq_fini(struct mlxsw_pci *mlxsw_pci, + struct mlxsw_pci_queue *q) +{ + mlxsw_cmd_hw2sw_eq(mlxsw_pci->core, q->num); +} + +static int mlxsw_pci_eq_dbg_read(struct seq_file *file, void *data) +{ + struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private); + struct mlxsw_pci_queue *q; + int i; + static const char hdr[] = + "NUM CONS_COUNT EV_CMD EV_COMP EV_OTHER COUNT\n"; + + seq_printf(file, hdr); + for (i = 0; i < mlxsw_pci_eq_count(mlxsw_pci); i++) { + q = mlxsw_pci_eq_get(mlxsw_pci, i); + spin_lock_bh(&q->lock); + seq_printf(file, "%3d %10d %10d %10d %10d %5d\n", + i, q->consumer_counter, q->u.eq.ev_cmd_count, + q->u.eq.ev_comp_count, q->u.eq.ev_other_count, + q->count); + spin_unlock_bh(&q->lock); + } + return 0; +} + +static void mlxsw_pci_eq_cmd_event(struct mlxsw_pci *mlxsw_pci, char *eqe) +{ + mlxsw_pci->cmd.comp.status = mlxsw_pci_eqe_cmd_status_get(eqe); + mlxsw_pci->cmd.comp.out_param = + ((u64) mlxsw_pci_eqe_cmd_out_param_h_get(eqe)) << 32 | + mlxsw_pci_eqe_cmd_out_param_l_get(eqe); + mlxsw_pci->cmd.wait_done = true; + wake_up(&mlxsw_pci->cmd.wait); +} + +static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q) +{ + return mlxsw_pci_queue_sw_elem_get(q, mlxsw_pci_eqe_owner_get); +} + +static void mlxsw_pci_eq_tasklet(unsigned long data) +{ + struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data; + struct mlxsw_pci *mlxsw_pci = q->pci; + unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_COUNT)]; + char *eqe; + u8 cqn; + bool cq_handle = false; + int items = 0; + int credits = q->count >> 1; + + memset(&active_cqns, 0, sizeof(active_cqns)); + + while ((eqe = mlxsw_pci_eq_sw_eqe_get(q))) { + u8 event_type = mlxsw_pci_eqe_event_type_get(eqe); + + switch (event_type) { + case MLXSW_PCI_EQE_EVENT_TYPE_CMD: + mlxsw_pci_eq_cmd_event(mlxsw_pci, eqe); + q->u.eq.ev_cmd_count++; + break; + case MLXSW_PCI_EQE_EVENT_TYPE_COMP: + cqn = mlxsw_pci_eqe_cqn_get(eqe); + set_bit(cqn, active_cqns); + cq_handle = true; + q->u.eq.ev_comp_count++; + break; + default: + q->u.eq.ev_other_count++; + } + if (++items == credits) + break; + } + if (items) { + mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); + mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q); + } + + if (!cq_handle) + return; + for_each_set_bit(cqn, active_cqns, MLXSW_PCI_CQS_COUNT) { + q = mlxsw_pci_cq_get(mlxsw_pci, cqn); + mlxsw_pci_queue_tasklet_schedule(q); + } +} + +struct mlxsw_pci_queue_ops { + const char *name; + enum mlxsw_pci_queue_type type; + int (*init)(struct mlxsw_pci *mlxsw_pci, char *mbox, + struct mlxsw_pci_queue *q); + void (*fini)(struct mlxsw_pci *mlxsw_pci, + struct mlxsw_pci_queue *q); + void (*tasklet)(unsigned long data); + int (*dbg_read)(struct seq_file *s, void *data); + u16 elem_count; + u8 elem_size; +}; + +static const struct mlxsw_pci_queue_ops mlxsw_pci_sdq_ops = { + .type = MLXSW_PCI_QUEUE_TYPE_SDQ, + .init = mlxsw_pci_sdq_init, + .fini = mlxsw_pci_sdq_fini, + .dbg_read = mlxsw_pci_sdq_dbg_read, + .elem_count = MLXSW_PCI_WQE_COUNT, + .elem_size = MLXSW_PCI_WQE_SIZE, +}; + +static const struct mlxsw_pci_queue_ops mlxsw_pci_rdq_ops = { + .type = MLXSW_PCI_QUEUE_TYPE_RDQ, + .init = mlxsw_pci_rdq_init, + .fini = mlxsw_pci_rdq_fini, + .dbg_read = mlxsw_pci_rdq_dbg_read, + .elem_count = MLXSW_PCI_WQE_COUNT, + .elem_size = MLXSW_PCI_WQE_SIZE +}; + +static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops = { + .type = MLXSW_PCI_QUEUE_TYPE_CQ, + .init = mlxsw_pci_cq_init, + .fini = mlxsw_pci_cq_fini, + .tasklet = mlxsw_pci_cq_tasklet, + .dbg_read = mlxsw_pci_cq_dbg_read, + .elem_count = MLXSW_PCI_CQE_COUNT, + .elem_size = MLXSW_PCI_CQE_SIZE +}; + +static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops = { + .type = MLXSW_PCI_QUEUE_TYPE_EQ, + .init = mlxsw_pci_eq_init, + .fini = mlxsw_pci_eq_fini, + .tasklet = mlxsw_pci_eq_tasklet, + .dbg_read = mlxsw_pci_eq_dbg_read, + .elem_count = MLXSW_PCI_EQE_COUNT, + .elem_size = MLXSW_PCI_EQE_SIZE +}; + +static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox, + const struct mlxsw_pci_queue_ops *q_ops, + struct mlxsw_pci_queue *q, u8 q_num) +{ + struct mlxsw_pci_mem_item *mem_item = &q->mem_item; + int i; + int err; + + spin_lock_init(&q->lock); + q->num = q_num; + q->count = q_ops->elem_count; + q->elem_size = q_ops->elem_size; + q->type = q_ops->type; + q->pci = mlxsw_pci; + + if (q_ops->tasklet) + tasklet_init(&q->tasklet, q_ops->tasklet, (unsigned long) q); + + mem_item->size = MLXSW_PCI_AQ_SIZE; + mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev, + mem_item->size, + &mem_item->mapaddr); + if (!mem_item->buf) + return -ENOMEM; + memset(mem_item->buf, 0, mem_item->size); + + q->elem_info = kcalloc(q->count, sizeof(*q->elem_info), GFP_KERNEL); + if (!q->elem_info) { + err = -ENOMEM; + goto err_elem_info_alloc; + } + + /* Initialize dma mapped elements info elem_info for + * future easy access. + */ + for (i = 0; i < q->count; i++) { + struct mlxsw_pci_queue_elem_info *elem_info; + + elem_info = mlxsw_pci_queue_elem_info_get(q, i); + elem_info->elem = + __mlxsw_pci_queue_elem_get(q, q_ops->elem_size, i); + } + + mlxsw_cmd_mbox_zero(mbox); + err = q_ops->init(mlxsw_pci, mbox, q); + if (err) + goto err_q_ops_init; + return 0; + +err_q_ops_init: + kfree(q->elem_info); +err_elem_info_alloc: + pci_free_consistent(mlxsw_pci->pdev, mem_item->size, + mem_item->buf, mem_item->mapaddr); + return err; +} + +static void mlxsw_pci_queue_fini(struct mlxsw_pci *mlxsw_pci, + const struct mlxsw_pci_queue_ops *q_ops, + struct mlxsw_pci_queue *q) +{ + struct mlxsw_pci_mem_item *mem_item = &q->mem_item; + + q_ops->fini(mlxsw_pci, q); + kfree(q->elem_info); + pci_free_consistent(mlxsw_pci->pdev, mem_item->size, + mem_item->buf, mem_item->mapaddr); +} + +static int mlxsw_pci_queue_group_init(struct mlxsw_pci *mlxsw_pci, char *mbox, + const struct mlxsw_pci_queue_ops *q_ops, + u8 num_qs) +{ + struct pci_dev *pdev = mlxsw_pci->pdev; + struct mlxsw_pci_queue_type_group *queue_group; + char tmp[16]; + int i; + int err; + + queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type); + queue_group->q = kcalloc(num_qs, sizeof(*queue_group->q), GFP_KERNEL); + if (!queue_group->q) + return -ENOMEM; + + for (i = 0; i < num_qs; i++) { + err = mlxsw_pci_queue_init(mlxsw_pci, mbox, q_ops, + &queue_group->q[i], i); + if (err) + goto err_queue_init; + } + queue_group->count = num_qs; + + sprintf(tmp, "%s_stats", mlxsw_pci_queue_type_str(q_ops->type)); + debugfs_create_devm_seqfile(&pdev->dev, tmp, mlxsw_pci->dbg_dir, + q_ops->dbg_read); + + return 0; + +err_queue_init: + for (i--; i >= 0; i--) + mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]); + kfree(queue_group->q); + return err; +} + +static void mlxsw_pci_queue_group_fini(struct mlxsw_pci *mlxsw_pci, + const struct mlxsw_pci_queue_ops *q_ops) +{ + struct mlxsw_pci_queue_type_group *queue_group; + int i; + + queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type); + for (i = 0; i < queue_group->count; i++) + mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]); + kfree(queue_group->q); +} + +static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox) +{ + struct pci_dev *pdev = mlxsw_pci->pdev; + u8 num_sdqs; + u8 sdq_log2sz; + u8 num_rdqs; + u8 rdq_log2sz; + u8 num_cqs; + u8 cq_log2sz; + u8 num_eqs; + u8 eq_log2sz; + int err; + + mlxsw_cmd_mbox_zero(mbox); + err = mlxsw_cmd_query_aq_cap(mlxsw_pci->core, mbox); + if (err) + return err; + + num_sdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_sdqs_get(mbox); + sdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_sdq_sz_get(mbox); + num_rdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_rdqs_get(mbox); + rdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_rdq_sz_get(mbox); + num_cqs = mlxsw_cmd_mbox_query_aq_cap_max_num_cqs_get(mbox); + cq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cq_sz_get(mbox); + num_eqs = mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get(mbox); + eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox); + + if ((num_sdqs != MLXSW_PCI_SDQS_COUNT) || + (num_rdqs != MLXSW_PCI_RDQS_COUNT) || + (num_cqs != MLXSW_PCI_CQS_COUNT) || + (num_eqs != MLXSW_PCI_EQS_COUNT)) { + dev_err(&pdev->dev, "Unsupported number of queues\n"); + return -EINVAL; + } + + if ((1 << sdq_log2sz != MLXSW_PCI_WQE_COUNT) || + (1 << rdq_log2sz != MLXSW_PCI_WQE_COUNT) || + (1 << cq_log2sz != MLXSW_PCI_CQE_COUNT) || + (1 << eq_log2sz != MLXSW_PCI_EQE_COUNT)) { + dev_err(&pdev->dev, "Unsupported number of async queue descriptors\n"); + return -EINVAL; + } + + err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_eq_ops, + num_eqs); + if (err) { + dev_err(&pdev->dev, "Failed to initialize event queues\n"); + return err; + } + + err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_cq_ops, + num_cqs); + if (err) { + dev_err(&pdev->dev, "Failed to initialize completion queues\n"); + goto err_cqs_init; + } + + err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_sdq_ops, + num_sdqs); + if (err) { + dev_err(&pdev->dev, "Failed to initialize send descriptor queues\n"); + goto err_sdqs_init; + } + + err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_rdq_ops, + num_rdqs); + if (err) { + dev_err(&pdev->dev, "Failed to initialize receive descriptor queues\n"); + goto err_rdqs_init; + } + + /* We have to poll in command interface until queues are initialized */ + mlxsw_pci->cmd.nopoll = true; + return 0; + +err_rdqs_init: + mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops); +err_sdqs_init: + mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops); +err_cqs_init: + mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops); + return err; +} + +static void mlxsw_pci_aqs_fini(struct mlxsw_pci *mlxsw_pci) +{ + mlxsw_pci->cmd.nopoll = false; + mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_rdq_ops); + mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops); + mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops); + mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops); +} + +static void +mlxsw_pci_config_profile_swid_config(struct mlxsw_pci *mlxsw_pci, + char *mbox, int index, + const struct mlxsw_swid_config *swid) +{ + u8 mask = 0; + + if (swid->used_type) { + mlxsw_cmd_mbox_config_profile_swid_config_type_set( + mbox, index, swid->type); + mask |= 1; + } + if (swid->used_properties) { + mlxsw_cmd_mbox_config_profile_swid_config_properties_set( + mbox, index, swid->properties); + mask |= 2; + } + mlxsw_cmd_mbox_config_profile_swid_config_mask_set(mbox, index, mask); +} + +static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox, + const struct mlxsw_config_profile *profile) +{ + int i; + + mlxsw_cmd_mbox_zero(mbox); + + if (profile->used_max_vepa_channels) { + mlxsw_cmd_mbox_config_profile_set_max_vepa_channels_set( + mbox, 1); + mlxsw_cmd_mbox_config_profile_max_vepa_channels_set( + mbox, profile->max_vepa_channels); + } + if (profile->used_max_lag) { + mlxsw_cmd_mbox_config_profile_set_max_lag_set( + mbox, 1); + mlxsw_cmd_mbox_config_profile_max_lag_set( + mbox, profile->max_lag); + } + if (profile->used_max_port_per_lag) { + mlxsw_cmd_mbox_config_profile_set_max_port_per_lag_set( + mbox, 1); + mlxsw_cmd_mbox_config_profile_max_port_per_lag_set( + mbox, profile->max_port_per_lag); + } + if (profile->used_max_mid) { + mlxsw_cmd_mbox_config_profile_set_max_mid_set( + mbox, 1); + mlxsw_cmd_mbox_config_profile_max_mid_set( + mbox, profile->max_mid); + } + if (profile->used_max_pgt) { + mlxsw_cmd_mbox_config_profile_set_max_pgt_set( + mbox, 1); + mlxsw_cmd_mbox_config_profile_max_pgt_set( + mbox, profile->max_pgt); + } + if (profile->used_max_system_port) { + mlxsw_cmd_mbox_config_profile_set_max_system_port_set( + mbox, 1); + mlxsw_cmd_mbox_config_profile_max_system_port_set( + mbox, profile->max_system_port); + } + if (profile->used_max_vlan_groups) { + mlxsw_cmd_mbox_config_profile_set_max_vlan_groups_set( + mbox, 1); + mlxsw_cmd_mbox_config_profile_max_vlan_groups_set( + mbox, profile->max_vlan_groups); + } + if (profile->used_max_regions) { + mlxsw_cmd_mbox_config_profile_set_max_regions_set( + mbox, 1); + mlxsw_cmd_mbox_config_profile_max_regions_set( + mbox, profile->max_regions); + } + if (profile->used_flood_tables) { + mlxsw_cmd_mbox_config_profile_set_flood_tables_set( + mbox, 1); + mlxsw_cmd_mbox_config_profile_max_flood_tables_set( + mbox, profile->max_flood_tables); + mlxsw_cmd_mbox_config_profile_max_vid_flood_tables_set( + mbox, profile->max_vid_flood_tables); + } + if (profile->used_flood_mode) { + mlxsw_cmd_mbox_config_profile_set_flood_mode_set( + mbox, 1); + mlxsw_cmd_mbox_config_profile_flood_mode_set( + mbox, profile->flood_mode); + } + if (profile->used_max_ib_mc) { + mlxsw_cmd_mbox_config_profile_set_max_ib_mc_set( + mbox, 1); + mlxsw_cmd_mbox_config_profile_max_ib_mc_set( + mbox, profile->max_ib_mc); + } + if (profile->used_max_pkey) { + mlxsw_cmd_mbox_config_profile_set_max_pkey_set( + mbox, 1); + mlxsw_cmd_mbox_config_profile_max_pkey_set( + mbox, profile->max_pkey); + } + if (profile->used_ar_sec) { + mlxsw_cmd_mbox_config_profile_set_ar_sec_set( + mbox, 1); + mlxsw_cmd_mbox_config_profile_ar_sec_set( + mbox, profile->ar_sec); + } + if (profile->used_adaptive_routing_group_cap) { + mlxsw_cmd_mbox_config_profile_set_adaptive_routing_group_cap_set( + mbox, 1); + mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set( + mbox, profile->adaptive_routing_group_cap); + } + + for (i = 0; i < MLXSW_CONFIG_PROFILE_SWID_COUNT; i++) + mlxsw_pci_config_profile_swid_config(mlxsw_pci, mbox, i, + &profile->swid_config[i]); + + return mlxsw_cmd_config_profile_set(mlxsw_pci->core, mbox); +} + +static int mlxsw_pci_boardinfo(struct mlxsw_pci *mlxsw_pci, char *mbox) +{ + struct mlxsw_bus_info *bus_info = &mlxsw_pci->bus_info; + int err; + + mlxsw_cmd_mbox_zero(mbox); + err = mlxsw_cmd_boardinfo(mlxsw_pci->core, mbox); + if (err) + return err; + mlxsw_cmd_mbox_boardinfo_vsd_memcpy_from(mbox, bus_info->vsd); + mlxsw_cmd_mbox_boardinfo_psid_memcpy_from(mbox, bus_info->psid); + return 0; +} + +static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox, + u16 num_pages) +{ + struct mlxsw_pci_mem_item *mem_item; + int i; + int err; + + mlxsw_pci->fw_area.items = kcalloc(num_pages, sizeof(*mem_item), + GFP_KERNEL); + if (!mlxsw_pci->fw_area.items) + return -ENOMEM; + mlxsw_pci->fw_area.num_pages = num_pages; + + mlxsw_cmd_mbox_zero(mbox); + for (i = 0; i < num_pages; i++) { + mem_item = &mlxsw_pci->fw_area.items[i]; + + mem_item->size = MLXSW_PCI_PAGE_SIZE; + mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev, + mem_item->size, + &mem_item->mapaddr); + if (!mem_item->buf) { + err = -ENOMEM; + goto err_alloc; + } + mlxsw_cmd_mbox_map_fa_pa_set(mbox, i, mem_item->mapaddr); + mlxsw_cmd_mbox_map_fa_log2size_set(mbox, i, 0); /* 1 page */ + } + + err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, num_pages); + if (err) + goto err_cmd_map_fa; + + return 0; + +err_cmd_map_fa: +err_alloc: + for (i--; i >= 0; i--) { + mem_item = &mlxsw_pci->fw_area.items[i]; + + pci_free_consistent(mlxsw_pci->pdev, mem_item->size, + mem_item->buf, mem_item->mapaddr); + } + kfree(mlxsw_pci->fw_area.items); + return err; +} + +static void mlxsw_pci_fw_area_fini(struct mlxsw_pci *mlxsw_pci) +{ + struct mlxsw_pci_mem_item *mem_item; + int i; + + mlxsw_cmd_unmap_fa(mlxsw_pci->core); + + for (i = 0; i < mlxsw_pci->fw_area.num_pages; i++) { + mem_item = &mlxsw_pci->fw_area.items[i]; + + pci_free_consistent(mlxsw_pci->pdev, mem_item->size, + mem_item->buf, mem_item->mapaddr); + } + kfree(mlxsw_pci->fw_area.items); +} + +static irqreturn_t mlxsw_pci_eq_irq_handler(int irq, void *dev_id) +{ + struct mlxsw_pci *mlxsw_pci = dev_id; + struct mlxsw_pci_queue *q; + int i; + + for (i = 0; i < MLXSW_PCI_EQS_COUNT; i++) { + q = mlxsw_pci_eq_get(mlxsw_pci, i); + mlxsw_pci_queue_tasklet_schedule(q); + } + return IRQ_HANDLED; +} + +static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core, + const struct mlxsw_config_profile *profile) +{ + struct mlxsw_pci *mlxsw_pci = bus_priv; + struct pci_dev *pdev = mlxsw_pci->pdev; + char *mbox; + u16 num_pages; + int err; + + mutex_init(&mlxsw_pci->cmd.lock); + init_waitqueue_head(&mlxsw_pci->cmd.wait); + + mlxsw_pci->core = mlxsw_core; + + mbox = mlxsw_cmd_mbox_alloc(); + if (!mbox) + return -ENOMEM; + err = mlxsw_cmd_query_fw(mlxsw_core, mbox); + if (err) + goto err_query_fw; + + mlxsw_pci->bus_info.fw_rev.major = + mlxsw_cmd_mbox_query_fw_fw_rev_major_get(mbox); + mlxsw_pci->bus_info.fw_rev.minor = + mlxsw_cmd_mbox_query_fw_fw_rev_minor_get(mbox); + mlxsw_pci->bus_info.fw_rev.subminor = + mlxsw_cmd_mbox_query_fw_fw_rev_subminor_get(mbox); + + if (mlxsw_cmd_mbox_query_fw_cmd_interface_rev_get(mbox) != 1) { + dev_err(&pdev->dev, "Unsupported cmd interface revision ID queried from hw\n"); + err = -EINVAL; + goto err_iface_rev; + } + if (mlxsw_cmd_mbox_query_fw_doorbell_page_bar_get(mbox) != 0) { + dev_err(&pdev->dev, "Unsupported doorbell page bar queried from hw\n"); + err = -EINVAL; + goto err_doorbell_page_bar; + } + + mlxsw_pci->doorbell_offset = + mlxsw_cmd_mbox_query_fw_doorbell_page_offset_get(mbox); + + num_pages = mlxsw_cmd_mbox_query_fw_fw_pages_get(mbox); + err = mlxsw_pci_fw_area_init(mlxsw_pci, mbox, num_pages); + if (err) + goto err_fw_area_init; + + err = mlxsw_pci_boardinfo(mlxsw_pci, mbox); + if (err) + goto err_boardinfo; + + err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile); + if (err) + goto err_config_profile; + + err = mlxsw_pci_aqs_init(mlxsw_pci, mbox); + if (err) + goto err_aqs_init; + + err = request_irq(mlxsw_pci->msix_entry.vector, + mlxsw_pci_eq_irq_handler, 0, + mlxsw_pci_driver_name, mlxsw_pci); + if (err) { + dev_err(&pdev->dev, "IRQ request failed\n"); + goto err_request_eq_irq; + } + + goto mbox_put; + +err_request_eq_irq: + mlxsw_pci_aqs_fini(mlxsw_pci); +err_aqs_init: +err_config_profile: +err_boardinfo: + mlxsw_pci_fw_area_fini(mlxsw_pci); +err_fw_area_init: +err_doorbell_page_bar: +err_iface_rev: +err_query_fw: +mbox_put: + mlxsw_cmd_mbox_free(mbox); + return err; +} + +static void mlxsw_pci_fini(void *bus_priv) +{ + struct mlxsw_pci *mlxsw_pci = bus_priv; + + free_irq(mlxsw_pci->msix_entry.vector, mlxsw_pci); + mlxsw_pci_aqs_fini(mlxsw_pci); + mlxsw_pci_fw_area_fini(mlxsw_pci); +} + +static struct mlxsw_pci_queue * +mlxsw_pci_sdq_pick(struct mlxsw_pci *mlxsw_pci, + const struct mlxsw_tx_info *tx_info) +{ + u8 sdqn = tx_info->local_port % mlxsw_pci_sdq_count(mlxsw_pci); + + return mlxsw_pci_sdq_get(mlxsw_pci, sdqn); +} + +static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb, + const struct mlxsw_tx_info *tx_info) +{ + struct mlxsw_pci *mlxsw_pci = bus_priv; + struct mlxsw_pci_queue *q; + struct mlxsw_pci_queue_elem_info *elem_info; + char *wqe; + int i; + int err; + + if (skb_shinfo(skb)->nr_frags > MLXSW_PCI_WQE_SG_ENTRIES - 1) { + err = skb_linearize(skb); + if (err) + return err; + } + + q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info); + spin_lock_bh(&q->lock); + elem_info = mlxsw_pci_queue_elem_info_producer_get(q); + if (!elem_info) { + /* queue is full */ + err = -EAGAIN; + goto unlock; + } + elem_info->u.sdq.skb = skb; + + wqe = elem_info->elem; + mlxsw_pci_wqe_c_set(wqe, 1); /* always report completion */ + mlxsw_pci_wqe_lp_set(wqe, !!tx_info->is_emad); + mlxsw_pci_wqe_type_set(wqe, MLXSW_PCI_WQE_TYPE_ETHERNET); + + err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + if (err) + goto unlock; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, i + 1, + skb_frag_address(frag), + skb_frag_size(frag), + DMA_TO_DEVICE); + if (err) + goto unmap_frags; + } + + /* Set unused sq entries byte count to zero. */ + for (i++; i < MLXSW_PCI_WQE_SG_ENTRIES; i++) + mlxsw_pci_wqe_byte_count_set(wqe, i, 0); + + /* Everything is set up, ring producer doorbell to get HW going */ + q->producer_counter++; + mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q); + + goto unlock; + +unmap_frags: + for (; i >= 0; i--) + mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE); +unlock: + spin_unlock_bh(&q->lock); + return err; +} + +static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod, + u32 in_mod, bool out_mbox_direct, + char *in_mbox, size_t in_mbox_size, + char *out_mbox, size_t out_mbox_size, + u8 *p_status) +{ + struct mlxsw_pci *mlxsw_pci = bus_priv; + dma_addr_t in_mapaddr = 0; + dma_addr_t out_mapaddr = 0; + bool evreq = mlxsw_pci->cmd.nopoll; + unsigned long timeout = msecs_to_jiffies(MLXSW_PCI_CIR_TIMEOUT_MSECS); + bool *p_wait_done = &mlxsw_pci->cmd.wait_done; + int err; + + *p_status = MLXSW_CMD_STATUS_OK; + + err = mutex_lock_interruptible(&mlxsw_pci->cmd.lock); + if (err) + return err; + + if (in_mbox) { + in_mapaddr = pci_map_single(mlxsw_pci->pdev, in_mbox, + in_mbox_size, PCI_DMA_TODEVICE); + if (unlikely(pci_dma_mapping_error(mlxsw_pci->pdev, + in_mapaddr))) { + err = -EIO; + goto err_in_mbox_map; + } + } + mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_HI, in_mapaddr >> 32); + mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_LO, in_mapaddr); + + if (out_mbox) { + out_mapaddr = pci_map_single(mlxsw_pci->pdev, out_mbox, + out_mbox_size, PCI_DMA_FROMDEVICE); + if (unlikely(pci_dma_mapping_error(mlxsw_pci->pdev, + out_mapaddr))) { + err = -EIO; + goto err_out_mbox_map; + } + } + mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_HI, out_mapaddr >> 32); + mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_LO, out_mapaddr); + + mlxsw_pci_write32(mlxsw_pci, CIR_IN_MODIFIER, in_mod); + mlxsw_pci_write32(mlxsw_pci, CIR_TOKEN, 0); + + *p_wait_done = false; + + wmb(); /* all needs to be written before we write control register */ + mlxsw_pci_write32(mlxsw_pci, CIR_CTRL, + MLXSW_PCI_CIR_CTRL_GO_BIT | + (evreq ? MLXSW_PCI_CIR_CTRL_EVREQ_BIT : 0) | + (opcode_mod << MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT) | + opcode); + + if (!evreq) { + unsigned long end; + + end = jiffies + timeout; + do { + u32 ctrl = mlxsw_pci_read32(mlxsw_pci, CIR_CTRL); + + if (!(ctrl & MLXSW_PCI_CIR_CTRL_GO_BIT)) { + *p_wait_done = true; + *p_status = ctrl >> MLXSW_PCI_CIR_CTRL_STATUS_SHIFT; + break; + } + cond_resched(); + } while (time_before(jiffies, end)); + } else { + wait_event_timeout(mlxsw_pci->cmd.wait, *p_wait_done, timeout); + *p_status = mlxsw_pci->cmd.comp.status; + } + + err = 0; + if (*p_wait_done) { + if (*p_status) + err = -EIO; + } else { + err = -ETIMEDOUT; + } + + if (!err && out_mbox && out_mbox_direct) { + /* Some commands does not use output param as address to mailbox + * but they store output directly into registers. In that case, + * copy registers into mbox buffer. + */ + __be32 tmp; + + if (!evreq) { + tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci, + CIR_OUT_PARAM_HI)); + memcpy(out_mbox, &tmp, sizeof(tmp)); + tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci, + CIR_OUT_PARAM_LO)); + memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp)); + } + } + + if (out_mapaddr) + pci_unmap_single(mlxsw_pci->pdev, out_mapaddr, out_mbox_size, + PCI_DMA_FROMDEVICE); + + /* fall through */ + +err_out_mbox_map: + if (in_mapaddr) + pci_unmap_single(mlxsw_pci->pdev, in_mapaddr, in_mbox_size, + PCI_DMA_TODEVICE); +err_in_mbox_map: + mutex_unlock(&mlxsw_pci->cmd.lock); + + return err; +} + +static const struct mlxsw_bus mlxsw_pci_bus = { + .kind = "pci", + .init = mlxsw_pci_init, + .fini = mlxsw_pci_fini, + .skb_transmit = mlxsw_pci_skb_transmit, + .cmd_exec = mlxsw_pci_cmd_exec, +}; + +static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci) +{ + mlxsw_pci_write32(mlxsw_pci, SW_RESET, MLXSW_PCI_SW_RESET_RST_BIT); + /* Current firware does not let us know when the reset is done. + * So we just wait here for constant time and hope for the best. + */ + msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS); + return 0; +} + +static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct mlxsw_pci *mlxsw_pci; + int err; + + mlxsw_pci = kzalloc(sizeof(*mlxsw_pci), GFP_KERNEL); + if (!mlxsw_pci) + return -ENOMEM; + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "pci_enable_device failed\n"); + goto err_pci_enable_device; + } + + err = pci_request_regions(pdev, mlxsw_pci_driver_name); + if (err) { + dev_err(&pdev->dev, "pci_request_regions failed\n"); + goto err_pci_request_regions; + } + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (!err) { + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) { + dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n"); + goto err_pci_set_dma_mask; + } + } else { + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "pci_set_dma_mask failed\n"); + goto err_pci_set_dma_mask; + } + } + + if (pci_resource_len(pdev, 0) < MLXSW_PCI_BAR0_SIZE) { + dev_err(&pdev->dev, "invalid PCI region size\n"); + err = -EINVAL; + goto err_pci_resource_len_check; + } + + mlxsw_pci->hw_addr = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + if (!mlxsw_pci->hw_addr) { + dev_err(&pdev->dev, "ioremap failed\n"); + err = -EIO; + goto err_ioremap; + } + pci_set_master(pdev); + + mlxsw_pci->pdev = pdev; + pci_set_drvdata(pdev, mlxsw_pci); + + err = mlxsw_pci_sw_reset(mlxsw_pci); + if (err) { + dev_err(&pdev->dev, "Software reset failed\n"); + goto err_sw_reset; + } + + err = pci_enable_msix_exact(pdev, &mlxsw_pci->msix_entry, 1); + if (err) { + dev_err(&pdev->dev, "MSI-X init failed\n"); + goto err_msix_init; + } + + mlxsw_pci->bus_info.device_kind = mlxsw_pci_device_kind_get(id); + mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev); + mlxsw_pci->bus_info.dev = &pdev->dev; + + mlxsw_pci->dbg_dir = debugfs_create_dir(mlxsw_pci->bus_info.device_name, + mlxsw_pci_dbg_root); + if (!mlxsw_pci->dbg_dir) { + dev_err(&pdev->dev, "Failed to create debugfs dir\n"); + goto err_dbg_create_dir; + } + + err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info, + &mlxsw_pci_bus, mlxsw_pci); + if (err) { + dev_err(&pdev->dev, "cannot register bus device\n"); + goto err_bus_device_register; + } + + return 0; + +err_bus_device_register: + debugfs_remove_recursive(mlxsw_pci->dbg_dir); +err_dbg_create_dir: + pci_disable_msix(mlxsw_pci->pdev); +err_msix_init: +err_sw_reset: + iounmap(mlxsw_pci->hw_addr); +err_ioremap: +err_pci_resource_len_check: +err_pci_set_dma_mask: + pci_release_regions(pdev); +err_pci_request_regions: + pci_disable_device(pdev); +err_pci_enable_device: + kfree(mlxsw_pci); + return err; +} + +static void mlxsw_pci_remove(struct pci_dev *pdev) +{ + struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev); + + mlxsw_core_bus_device_unregister(mlxsw_pci->core); + debugfs_remove_recursive(mlxsw_pci->dbg_dir); + pci_disable_msix(mlxsw_pci->pdev); + iounmap(mlxsw_pci->hw_addr); + pci_release_regions(mlxsw_pci->pdev); + pci_disable_device(mlxsw_pci->pdev); + kfree(mlxsw_pci); +} + +static struct pci_driver mlxsw_pci_driver = { + .name = mlxsw_pci_driver_name, + .id_table = mlxsw_pci_id_table, + .probe = mlxsw_pci_probe, + .remove = mlxsw_pci_remove, +}; + +static int __init mlxsw_pci_module_init(void) +{ + int err; + + mlxsw_pci_dbg_root = debugfs_create_dir(mlxsw_pci_driver_name, NULL); + if (!mlxsw_pci_dbg_root) + return -ENOMEM; + err = pci_register_driver(&mlxsw_pci_driver); + if (err) + goto err_register_driver; + return 0; + +err_register_driver: + debugfs_remove_recursive(mlxsw_pci_dbg_root); + return err; +} + +static void __exit mlxsw_pci_module_exit(void) +{ + pci_unregister_driver(&mlxsw_pci_driver); + debugfs_remove_recursive(mlxsw_pci_dbg_root); +} + +module_init(mlxsw_pci_module_init); +module_exit(mlxsw_pci_module_exit); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); +MODULE_DESCRIPTION("Mellanox switch PCI interface driver"); +MODULE_DEVICE_TABLE(pci, mlxsw_pci_id_table); diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.h b/drivers/net/ethernet/mellanox/mlxsw/pci.h new file mode 100644 index 000000000000..887af8459149 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.h @@ -0,0 +1,221 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/pci.h + * Copyright (c) 2015 Mellanox Technologies. All rights reserved. + * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MLXSW_PCI_H +#define _MLXSW_PCI_H + +#include <linux/bitops.h> + +#include "item.h" + +#define PCI_DEVICE_ID_MELLANOX_SWITCHX2 0xc738 +#define MLXSW_PCI_BAR0_SIZE (1024 * 1024) /* 1MB */ +#define MLXSW_PCI_PAGE_SIZE 4096 + +#define MLXSW_PCI_CIR_BASE 0x71000 +#define MLXSW_PCI_CIR_IN_PARAM_HI MLXSW_PCI_CIR_BASE +#define MLXSW_PCI_CIR_IN_PARAM_LO (MLXSW_PCI_CIR_BASE + 0x04) +#define MLXSW_PCI_CIR_IN_MODIFIER (MLXSW_PCI_CIR_BASE + 0x08) +#define MLXSW_PCI_CIR_OUT_PARAM_HI (MLXSW_PCI_CIR_BASE + 0x0C) +#define MLXSW_PCI_CIR_OUT_PARAM_LO (MLXSW_PCI_CIR_BASE + 0x10) +#define MLXSW_PCI_CIR_TOKEN (MLXSW_PCI_CIR_BASE + 0x14) +#define MLXSW_PCI_CIR_CTRL (MLXSW_PCI_CIR_BASE + 0x18) +#define MLXSW_PCI_CIR_CTRL_GO_BIT BIT(23) +#define MLXSW_PCI_CIR_CTRL_EVREQ_BIT BIT(22) +#define MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT 12 +#define MLXSW_PCI_CIR_CTRL_STATUS_SHIFT 24 +#define MLXSW_PCI_CIR_TIMEOUT_MSECS 1000 + +#define MLXSW_PCI_SW_RESET 0xF0010 +#define MLXSW_PCI_SW_RESET_RST_BIT BIT(0) +#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 5000 + +#define MLXSW_PCI_DOORBELL_SDQ_OFFSET 0x000 +#define MLXSW_PCI_DOORBELL_RDQ_OFFSET 0x200 +#define MLXSW_PCI_DOORBELL_CQ_OFFSET 0x400 +#define MLXSW_PCI_DOORBELL_EQ_OFFSET 0x600 +#define MLXSW_PCI_DOORBELL_ARM_CQ_OFFSET 0x800 +#define MLXSW_PCI_DOORBELL_ARM_EQ_OFFSET 0xA00 + +#define MLXSW_PCI_DOORBELL(offset, type_offset, num) \ + ((offset) + (type_offset) + (num) * 4) + +#define MLXSW_PCI_RDQS_COUNT 24 +#define MLXSW_PCI_SDQS_COUNT 24 +#define MLXSW_PCI_CQS_COUNT (MLXSW_PCI_RDQS_COUNT + MLXSW_PCI_SDQS_COUNT) +#define MLXSW_PCI_EQS_COUNT 2 +#define MLXSW_PCI_EQ_ASYNC_NUM 0 +#define MLXSW_PCI_EQ_COMP_NUM 1 + +#define MLXSW_PCI_AQ_PAGES 8 +#define MLXSW_PCI_AQ_SIZE (MLXSW_PCI_PAGE_SIZE * MLXSW_PCI_AQ_PAGES) +#define MLXSW_PCI_WQE_SIZE 32 /* 32 bytes per element */ +#define MLXSW_PCI_CQE_SIZE 16 /* 16 bytes per element */ +#define MLXSW_PCI_EQE_SIZE 16 /* 16 bytes per element */ +#define MLXSW_PCI_WQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_WQE_SIZE) +#define MLXSW_PCI_CQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE_SIZE) +#define MLXSW_PCI_EQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_EQE_SIZE) +#define MLXSW_PCI_EQE_UPDATE_COUNT 0x80 + +#define MLXSW_PCI_WQE_SG_ENTRIES 3 +#define MLXSW_PCI_WQE_TYPE_ETHERNET 0xA + +/* pci_wqe_c + * If set it indicates that a completion should be reported upon + * execution of this descriptor. + */ +MLXSW_ITEM32(pci, wqe, c, 0x00, 31, 1); + +/* pci_wqe_lp + * Local Processing, set if packet should be processed by the local + * switch hardware: + * For Ethernet EMAD (Direct Route and non Direct Route) - + * must be set if packet destination is local device + * For InfiniBand CTL - must be set if packet destination is local device + * Otherwise it must be clear + * Local Process packets must not exceed the size of 2K (including payload + * and headers). + */ +MLXSW_ITEM32(pci, wqe, lp, 0x00, 30, 1); + +/* pci_wqe_type + * Packet type. + */ +MLXSW_ITEM32(pci, wqe, type, 0x00, 23, 4); + +/* pci_wqe_byte_count + * Size of i-th scatter/gather entry, 0 if entry is unused. + */ +MLXSW_ITEM16_INDEXED(pci, wqe, byte_count, 0x02, 0, 14, 0x02, 0x00, false); + +/* pci_wqe_address + * Physical address of i-th scatter/gather entry. + * Gather Entries must be 2Byte aligned. + */ +MLXSW_ITEM64_INDEXED(pci, wqe, address, 0x08, 0, 64, 0x8, 0x0, false); + +/* pci_cqe_lag + * Packet arrives from a port which is a LAG + */ +MLXSW_ITEM32(pci, cqe, lag, 0x00, 23, 1); + +/* pci_cqe_system_port + * When lag=0: System port on which the packet was received + * When lag=1: + * bits [15:4] LAG ID on which the packet was received + * bits [3:0] sub_port on which the packet was received + */ +MLXSW_ITEM32(pci, cqe, system_port, 0x00, 0, 16); + +/* pci_cqe_wqe_counter + * WQE count of the WQEs completed on the associated dqn + */ +MLXSW_ITEM32(pci, cqe, wqe_counter, 0x04, 16, 16); + +/* pci_cqe_byte_count + * Byte count of received packets including additional two + * Reserved Bytes that are append to the end of the frame. + * Reserved for Send CQE. + */ +MLXSW_ITEM32(pci, cqe, byte_count, 0x04, 0, 14); + +/* pci_cqe_trap_id + * Trap ID that captured the packet. + */ +MLXSW_ITEM32(pci, cqe, trap_id, 0x08, 0, 8); + +/* pci_cqe_e + * CQE with Error. + */ +MLXSW_ITEM32(pci, cqe, e, 0x0C, 7, 1); + +/* pci_cqe_sr + * 1 - Send Queue + * 0 - Receive Queue + */ +MLXSW_ITEM32(pci, cqe, sr, 0x0C, 6, 1); + +/* pci_cqe_dqn + * Descriptor Queue (DQ) Number. + */ +MLXSW_ITEM32(pci, cqe, dqn, 0x0C, 1, 5); + +/* pci_cqe_owner + * Ownership bit. + */ +MLXSW_ITEM32(pci, cqe, owner, 0x0C, 0, 1); + +/* pci_eqe_event_type + * Event type. + */ +MLXSW_ITEM32(pci, eqe, event_type, 0x0C, 24, 8); +#define MLXSW_PCI_EQE_EVENT_TYPE_COMP 0x00 +#define MLXSW_PCI_EQE_EVENT_TYPE_CMD 0x0A + +/* pci_eqe_event_sub_type + * Event type. + */ +MLXSW_ITEM32(pci, eqe, event_sub_type, 0x0C, 16, 8); + +/* pci_eqe_cqn + * Completion Queue that triggeret this EQE. + */ +MLXSW_ITEM32(pci, eqe, cqn, 0x0C, 8, 7); + +/* pci_eqe_owner + * Ownership bit. + */ +MLXSW_ITEM32(pci, eqe, owner, 0x0C, 0, 1); + +/* pci_eqe_cmd_token + * Command completion event - token + */ +MLXSW_ITEM32(pci, eqe, cmd_token, 0x08, 16, 16); + +/* pci_eqe_cmd_status + * Command completion event - status + */ +MLXSW_ITEM32(pci, eqe, cmd_status, 0x08, 0, 8); + +/* pci_eqe_cmd_out_param_h + * Command completion event - output parameter - higher part + */ +MLXSW_ITEM32(pci, eqe, cmd_out_param_h, 0x0C, 0, 32); + +/* pci_eqe_cmd_out_param_l + * Command completion event - output parameter - lower part + */ +MLXSW_ITEM32(pci, eqe, cmd_out_param_l, 0x10, 0, 32); + +#endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/port.h b/drivers/net/ethernet/mellanox/mlxsw/port.h new file mode 100644 index 000000000000..726f5435b32f --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/port.h @@ -0,0 +1,75 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/port.h + * Copyright (c) 2015 Mellanox Technologies. All rights reserved. + * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> + * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> + * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef _MLXSW_PORT_H +#define _MLXSW_PORT_H + +#include <linux/types.h> + +#define MLXSW_PORT_MAX_MTU 10000 + +#define MLXSW_PORT_DEFAULT_VID 1 + +#define MLXSW_PORT_SWID_DISABLED_PORT 255 +#define MLXSW_PORT_SWID_ALL_SWIDS 254 +#define MLXSW_PORT_SWID_TYPE_ETH 2 + +#define MLXSW_PORT_MID 0xd000 + +#define MLXSW_PORT_MAX_PHY_PORTS 0x40 +#define MLXSW_PORT_MAX_PORTS MLXSW_PORT_MAX_PHY_PORTS + +#define MLXSW_PORT_DEVID_BITS_OFFSET 10 +#define MLXSW_PORT_PHY_BITS_OFFSET 4 +#define MLXSW_PORT_PHY_BITS_MASK (MLXSW_PORT_MAX_PHY_PORTS - 1) + +#define MLXSW_PORT_CPU_PORT 0x0 + +#define MLXSW_PORT_DONT_CARE (MLXSW_PORT_MAX_PORTS) + +enum mlxsw_port_admin_status { + MLXSW_PORT_ADMIN_STATUS_UP = 1, + MLXSW_PORT_ADMIN_STATUS_DOWN = 2, + MLXSW_PORT_ADMIN_STATUS_UP_ONCE = 3, + MLXSW_PORT_ADMIN_STATUS_DISABLED = 4, +}; + +enum mlxsw_reg_pude_oper_status { + MLXSW_PORT_OPER_STATUS_UP = 1, + MLXSW_PORT_OPER_STATUS_DOWN = 2, + MLXSW_PORT_OPER_STATUS_FAILURE = 4, /* Can be set to up again. */ +}; + +#endif /* _MLXSW_PORT_H */ diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h new file mode 100644 index 000000000000..b5a72f8e78b1 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -0,0 +1,1289 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/reg.h + * Copyright (c) 2015 Mellanox Technologies. All rights reserved. + * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> + * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> + * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MLXSW_REG_H +#define _MLXSW_REG_H + +#include <linux/string.h> +#include <linux/bitops.h> +#include <linux/if_vlan.h> + +#include "item.h" +#include "port.h" + +struct mlxsw_reg_info { + u16 id; + u16 len; /* In u8 */ +}; + +#define MLXSW_REG(type) (&mlxsw_reg_##type) +#define MLXSW_REG_LEN(type) MLXSW_REG(type)->len +#define MLXSW_REG_ZERO(type, payload) memset(payload, 0, MLXSW_REG(type)->len) + +/* SGCR - Switch General Configuration Register + * -------------------------------------------- + * This register is used for configuration of the switch capabilities. + */ +#define MLXSW_REG_SGCR_ID 0x2000 +#define MLXSW_REG_SGCR_LEN 0x10 + +static const struct mlxsw_reg_info mlxsw_reg_sgcr = { + .id = MLXSW_REG_SGCR_ID, + .len = MLXSW_REG_SGCR_LEN, +}; + +/* reg_sgcr_llb + * Link Local Broadcast (Default=0) + * When set, all Link Local packets (224.0.0.X) will be treated as broadcast + * packets and ignore the IGMP snooping entries. + * Access: RW + */ +MLXSW_ITEM32(reg, sgcr, llb, 0x04, 0, 1); + +static inline void mlxsw_reg_sgcr_pack(char *payload, bool llb) +{ + MLXSW_REG_ZERO(sgcr, payload); + mlxsw_reg_sgcr_llb_set(payload, !!llb); +} + +/* SPAD - Switch Physical Address Register + * --------------------------------------- + * The SPAD register configures the switch physical MAC address. + */ +#define MLXSW_REG_SPAD_ID 0x2002 +#define MLXSW_REG_SPAD_LEN 0x10 + +static const struct mlxsw_reg_info mlxsw_reg_spad = { + .id = MLXSW_REG_SPAD_ID, + .len = MLXSW_REG_SPAD_LEN, +}; + +/* reg_spad_base_mac + * Base MAC address for the switch partitions. + * Per switch partition MAC address is equal to: + * base_mac + swid + * Access: RW + */ +MLXSW_ITEM_BUF(reg, spad, base_mac, 0x02, 6); + +/* SMID - Switch Multicast ID + * -------------------------- + * In multi-chip configuration, each device should maintain mapping between + * Multicast ID (MID) into a list of local ports. This mapping is used in all + * the devices other than the ingress device, and is implemented as part of the + * FDB. The MID record maps from a MID, which is a unique identi- fier of the + * multicast group within the stacking domain, into a list of local ports into + * which the packet is replicated. + */ +#define MLXSW_REG_SMID_ID 0x2007 +#define MLXSW_REG_SMID_LEN 0x420 + +static const struct mlxsw_reg_info mlxsw_reg_smid = { + .id = MLXSW_REG_SMID_ID, + .len = MLXSW_REG_SMID_LEN, +}; + +/* reg_smid_swid + * Switch partition ID. + * Access: Index + */ +MLXSW_ITEM32(reg, smid, swid, 0x00, 24, 8); + +/* reg_smid_mid + * Multicast identifier - global identifier that represents the multicast group + * across all devices + * Access: Index + */ +MLXSW_ITEM32(reg, smid, mid, 0x00, 0, 16); + +/* reg_smid_port + * Local port memebership (1 bit per port). + * Access: RW + */ +MLXSW_ITEM_BIT_ARRAY(reg, smid, port, 0x20, 0x20, 1); + +/* reg_smid_port_mask + * Local port mask (1 bit per port). + * Access: W + */ +MLXSW_ITEM_BIT_ARRAY(reg, smid, port_mask, 0x220, 0x20, 1); + +static inline void mlxsw_reg_smid_pack(char *payload, u16 mid) +{ + MLXSW_REG_ZERO(smid, payload); + mlxsw_reg_smid_swid_set(payload, 0); + mlxsw_reg_smid_mid_set(payload, mid); + mlxsw_reg_smid_port_set(payload, MLXSW_PORT_CPU_PORT, 1); + mlxsw_reg_smid_port_mask_set(payload, MLXSW_PORT_CPU_PORT, 1); +} + +/* SPMS - Switch Port MSTP/RSTP State Register + * ------------------------------------------- + * Configures the spanning tree state of a physical port. + */ +#define MLXSW_REG_SPMS_ID 0x200d +#define MLXSW_REG_SPMS_LEN 0x404 + +static const struct mlxsw_reg_info mlxsw_reg_spms = { + .id = MLXSW_REG_SPMS_ID, + .len = MLXSW_REG_SPMS_LEN, +}; + +/* reg_spms_local_port + * Local port number. + * Access: Index + */ +MLXSW_ITEM32(reg, spms, local_port, 0x00, 16, 8); + +enum mlxsw_reg_spms_state { + MLXSW_REG_SPMS_STATE_NO_CHANGE, + MLXSW_REG_SPMS_STATE_DISCARDING, + MLXSW_REG_SPMS_STATE_LEARNING, + MLXSW_REG_SPMS_STATE_FORWARDING, +}; + +/* reg_spms_state + * Spanning tree state of each VLAN ID (VID) of the local port. + * 0 - Do not change spanning tree state (used only when writing). + * 1 - Discarding. No learning or forwarding to/from this port (default). + * 2 - Learning. Port is learning, but not forwarding. + * 3 - Forwarding. Port is learning and forwarding. + * Access: RW + */ +MLXSW_ITEM_BIT_ARRAY(reg, spms, state, 0x04, 0x400, 2); + +static inline void mlxsw_reg_spms_pack(char *payload, u8 local_port, u16 vid, + enum mlxsw_reg_spms_state state) +{ + MLXSW_REG_ZERO(spms, payload); + mlxsw_reg_spms_local_port_set(payload, local_port); + mlxsw_reg_spms_state_set(payload, vid, state); +} + +/* SFGC - Switch Flooding Group Configuration + * ------------------------------------------ + * The following register controls the association of flooding tables and MIDs + * to packet types used for flooding. + */ +#define MLXSW_REG_SFGC_ID 0x2011 +#define MLXSW_REG_SFGC_LEN 0x10 + +static const struct mlxsw_reg_info mlxsw_reg_sfgc = { + .id = MLXSW_REG_SFGC_ID, + .len = MLXSW_REG_SFGC_LEN, +}; + +enum mlxsw_reg_sfgc_type { + MLXSW_REG_SFGC_TYPE_BROADCAST = 0, + MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST = 1, + MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4 = 2, + MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6 = 3, + MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP = 5, + MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL = 6, + MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST = 7, +}; + +/* reg_sfgc_type + * The traffic type to reach the flooding table. + * Access: Index + */ +MLXSW_ITEM32(reg, sfgc, type, 0x00, 0, 4); + +enum mlxsw_reg_sfgc_bridge_type { + MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID = 0, + MLXSW_REG_SFGC_BRIDGE_TYPE_VFID = 1, +}; + +/* reg_sfgc_bridge_type + * Access: Index + * + * Note: SwitchX-2 only supports 802.1Q mode. + */ +MLXSW_ITEM32(reg, sfgc, bridge_type, 0x04, 24, 3); + +enum mlxsw_flood_table_type { + MLXSW_REG_SFGC_TABLE_TYPE_VID = 1, + MLXSW_REG_SFGC_TABLE_TYPE_SINGLE = 2, + MLXSW_REG_SFGC_TABLE_TYPE_ANY = 0, + MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST = 3, + MLXSW_REG_SFGC_TABLE_TYPE_FID = 4, +}; + +/* reg_sfgc_table_type + * See mlxsw_flood_table_type + * Access: RW + * + * Note: FID offset and FID types are not supported in SwitchX-2. + */ +MLXSW_ITEM32(reg, sfgc, table_type, 0x04, 16, 3); + +/* reg_sfgc_flood_table + * Flooding table index to associate with the specific type on the specific + * switch partition. + * Access: RW + */ +MLXSW_ITEM32(reg, sfgc, flood_table, 0x04, 0, 6); + +/* reg_sfgc_mid + * The multicast ID for the swid. Not supported for Spectrum + * Access: RW + */ +MLXSW_ITEM32(reg, sfgc, mid, 0x08, 0, 16); + +/* reg_sfgc_counter_set_type + * Counter Set Type for flow counters. + * Access: RW + */ +MLXSW_ITEM32(reg, sfgc, counter_set_type, 0x0C, 24, 8); + +/* reg_sfgc_counter_index + * Counter Index for flow counters. + * Access: RW + */ +MLXSW_ITEM32(reg, sfgc, counter_index, 0x0C, 0, 24); + +static inline void +mlxsw_reg_sfgc_pack(char *payload, enum mlxsw_reg_sfgc_type type, + enum mlxsw_reg_sfgc_bridge_type bridge_type, + enum mlxsw_flood_table_type table_type, + unsigned int flood_table) +{ + MLXSW_REG_ZERO(sfgc, payload); + mlxsw_reg_sfgc_type_set(payload, type); + mlxsw_reg_sfgc_bridge_type_set(payload, bridge_type); + mlxsw_reg_sfgc_table_type_set(payload, table_type); + mlxsw_reg_sfgc_flood_table_set(payload, flood_table); + mlxsw_reg_sfgc_mid_set(payload, MLXSW_PORT_MID); +} + +/* SFTR - Switch Flooding Table Register + * ------------------------------------- + * The switch flooding table is used for flooding packet replication. The table + * defines a bit mask of ports for packet replication. + */ +#define MLXSW_REG_SFTR_ID 0x2012 +#define MLXSW_REG_SFTR_LEN 0x420 + +static const struct mlxsw_reg_info mlxsw_reg_sftr = { + .id = MLXSW_REG_SFTR_ID, + .len = MLXSW_REG_SFTR_LEN, +}; + +/* reg_sftr_swid + * Switch partition ID with which to associate the port. + * Access: Index + */ +MLXSW_ITEM32(reg, sftr, swid, 0x00, 24, 8); + +/* reg_sftr_flood_table + * Flooding table index to associate with the specific type on the specific + * switch partition. + * Access: Index + */ +MLXSW_ITEM32(reg, sftr, flood_table, 0x00, 16, 6); + +/* reg_sftr_index + * Index. Used as an index into the Flooding Table in case the table is + * configured to use VID / FID or FID Offset. + * Access: Index + */ +MLXSW_ITEM32(reg, sftr, index, 0x00, 0, 16); + +/* reg_sftr_table_type + * See mlxsw_flood_table_type + * Access: RW + */ +MLXSW_ITEM32(reg, sftr, table_type, 0x04, 16, 3); + +/* reg_sftr_range + * Range of entries to update + * Access: Index + */ +MLXSW_ITEM32(reg, sftr, range, 0x04, 0, 16); + +/* reg_sftr_port + * Local port membership (1 bit per port). + * Access: RW + */ +MLXSW_ITEM_BIT_ARRAY(reg, sftr, port, 0x20, 0x20, 1); + +/* reg_sftr_cpu_port_mask + * CPU port mask (1 bit per port). + * Access: W + */ +MLXSW_ITEM_BIT_ARRAY(reg, sftr, port_mask, 0x220, 0x20, 1); + +static inline void mlxsw_reg_sftr_pack(char *payload, + unsigned int flood_table, + unsigned int index, + enum mlxsw_flood_table_type table_type, + unsigned int range) +{ + MLXSW_REG_ZERO(sftr, payload); + mlxsw_reg_sftr_swid_set(payload, 0); + mlxsw_reg_sftr_flood_table_set(payload, flood_table); + mlxsw_reg_sftr_index_set(payload, index); + mlxsw_reg_sftr_table_type_set(payload, table_type); + mlxsw_reg_sftr_range_set(payload, range); + mlxsw_reg_sftr_port_set(payload, MLXSW_PORT_CPU_PORT, 1); + mlxsw_reg_sftr_port_mask_set(payload, MLXSW_PORT_CPU_PORT, 1); +} + +/* SPMLR - Switch Port MAC Learning Register + * ----------------------------------------- + * Controls the Switch MAC learning policy per port. + */ +#define MLXSW_REG_SPMLR_ID 0x2018 +#define MLXSW_REG_SPMLR_LEN 0x8 + +static const struct mlxsw_reg_info mlxsw_reg_spmlr = { + .id = MLXSW_REG_SPMLR_ID, + .len = MLXSW_REG_SPMLR_LEN, +}; + +/* reg_spmlr_local_port + * Local port number. + * Access: Index + */ +MLXSW_ITEM32(reg, spmlr, local_port, 0x00, 16, 8); + +/* reg_spmlr_sub_port + * Virtual port within the physical port. + * Should be set to 0 when virtual ports are not enabled on the port. + * Access: Index + */ +MLXSW_ITEM32(reg, spmlr, sub_port, 0x00, 8, 8); + +enum mlxsw_reg_spmlr_learn_mode { + MLXSW_REG_SPMLR_LEARN_MODE_DISABLE = 0, + MLXSW_REG_SPMLR_LEARN_MODE_ENABLE = 2, + MLXSW_REG_SPMLR_LEARN_MODE_SEC = 3, +}; + +/* reg_spmlr_learn_mode + * Learning mode on the port. + * 0 - Learning disabled. + * 2 - Learning enabled. + * 3 - Security mode. + * + * In security mode the switch does not learn MACs on the port, but uses the + * SMAC to see if it exists on another ingress port. If so, the packet is + * classified as a bad packet and is discarded unless the software registers + * to receive port security error packets usign HPKT. + */ +MLXSW_ITEM32(reg, spmlr, learn_mode, 0x04, 30, 2); + +static inline void mlxsw_reg_spmlr_pack(char *payload, u8 local_port, + enum mlxsw_reg_spmlr_learn_mode mode) +{ + MLXSW_REG_ZERO(spmlr, payload); + mlxsw_reg_spmlr_local_port_set(payload, local_port); + mlxsw_reg_spmlr_sub_port_set(payload, 0); + mlxsw_reg_spmlr_learn_mode_set(payload, mode); +} + +/* PMLP - Ports Module to Local Port Register + * ------------------------------------------ + * Configures the assignment of modules to local ports. + */ +#define MLXSW_REG_PMLP_ID 0x5002 +#define MLXSW_REG_PMLP_LEN 0x40 + +static const struct mlxsw_reg_info mlxsw_reg_pmlp = { + .id = MLXSW_REG_PMLP_ID, + .len = MLXSW_REG_PMLP_LEN, +}; + +/* reg_pmlp_rxtx + * 0 - Tx value is used for both Tx and Rx. + * 1 - Rx value is taken from a separte field. + * Access: RW + */ +MLXSW_ITEM32(reg, pmlp, rxtx, 0x00, 31, 1); + +/* reg_pmlp_local_port + * Local port number. + * Access: Index + */ +MLXSW_ITEM32(reg, pmlp, local_port, 0x00, 16, 8); + +/* reg_pmlp_width + * 0 - Unmap local port. + * 1 - Lane 0 is used. + * 2 - Lanes 0 and 1 are used. + * 4 - Lanes 0, 1, 2 and 3 are used. + * Access: RW + */ +MLXSW_ITEM32(reg, pmlp, width, 0x00, 0, 8); + +/* reg_pmlp_module + * Module number. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, pmlp, module, 0x04, 0, 8, 0x04, 0, false); + +/* reg_pmlp_tx_lane + * Tx Lane. When rxtx field is cleared, this field is used for Rx as well. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, pmlp, tx_lane, 0x04, 16, 2, 0x04, 16, false); + +/* reg_pmlp_rx_lane + * Rx Lane. When rxtx field is cleared, this field is ignored and Rx lane is + * equal to Tx lane. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, pmlp, rx_lane, 0x04, 24, 2, 0x04, 24, false); + +static inline void mlxsw_reg_pmlp_pack(char *payload, u8 local_port) +{ + MLXSW_REG_ZERO(pmlp, payload); + mlxsw_reg_pmlp_local_port_set(payload, local_port); +} + +/* PMTU - Port MTU Register + * ------------------------ + * Configures and reports the port MTU. + */ +#define MLXSW_REG_PMTU_ID 0x5003 +#define MLXSW_REG_PMTU_LEN 0x10 + +static const struct mlxsw_reg_info mlxsw_reg_pmtu = { + .id = MLXSW_REG_PMTU_ID, + .len = MLXSW_REG_PMTU_LEN, +}; + +/* reg_pmtu_local_port + * Local port number. + * Access: Index + */ +MLXSW_ITEM32(reg, pmtu, local_port, 0x00, 16, 8); + +/* reg_pmtu_max_mtu + * Maximum MTU. + * When port type (e.g. Ethernet) is configured, the relevant MTU is + * reported, otherwise the minimum between the max_mtu of the different + * types is reported. + * Access: RO + */ +MLXSW_ITEM32(reg, pmtu, max_mtu, 0x04, 16, 16); + +/* reg_pmtu_admin_mtu + * MTU value to set port to. Must be smaller or equal to max_mtu. + * Note: If port type is Infiniband, then port must be disabled, when its + * MTU is set. + * Access: RW + */ +MLXSW_ITEM32(reg, pmtu, admin_mtu, 0x08, 16, 16); + +/* reg_pmtu_oper_mtu + * The actual MTU configured on the port. Packets exceeding this size + * will be dropped. + * Note: In Ethernet and FC oper_mtu == admin_mtu, however, in Infiniband + * oper_mtu might be smaller than admin_mtu. + * Access: RO + */ +MLXSW_ITEM32(reg, pmtu, oper_mtu, 0x0C, 16, 16); + +static inline void mlxsw_reg_pmtu_pack(char *payload, u8 local_port, + u16 new_mtu) +{ + MLXSW_REG_ZERO(pmtu, payload); + mlxsw_reg_pmtu_local_port_set(payload, local_port); + mlxsw_reg_pmtu_max_mtu_set(payload, 0); + mlxsw_reg_pmtu_admin_mtu_set(payload, new_mtu); + mlxsw_reg_pmtu_oper_mtu_set(payload, 0); +} + +/* PTYS - Port Type and Speed Register + * ----------------------------------- + * Configures and reports the port speed type. + * + * Note: When set while the link is up, the changes will not take effect + * until the port transitions from down to up state. + */ +#define MLXSW_REG_PTYS_ID 0x5004 +#define MLXSW_REG_PTYS_LEN 0x40 + +static const struct mlxsw_reg_info mlxsw_reg_ptys = { + .id = MLXSW_REG_PTYS_ID, + .len = MLXSW_REG_PTYS_LEN, +}; + +/* reg_ptys_local_port + * Local port number. + * Access: Index + */ +MLXSW_ITEM32(reg, ptys, local_port, 0x00, 16, 8); + +#define MLXSW_REG_PTYS_PROTO_MASK_ETH BIT(2) + +/* reg_ptys_proto_mask + * Protocol mask. Indicates which protocol is used. + * 0 - Infiniband. + * 1 - Fibre Channel. + * 2 - Ethernet. + * Access: Index + */ +MLXSW_ITEM32(reg, ptys, proto_mask, 0x00, 0, 3); + +#define MLXSW_REG_PTYS_ETH_SPEED_SGMII BIT(0) +#define MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX BIT(1) +#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 BIT(2) +#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 BIT(3) +#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR BIT(4) +#define MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2 BIT(5) +#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 BIT(6) +#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 BIT(7) +#define MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4 BIT(8) +#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR BIT(12) +#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR BIT(13) +#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR BIT(14) +#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 BIT(15) +#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4 BIT(16) +#define MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 BIT(19) +#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 BIT(20) +#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 BIT(21) +#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 BIT(22) +#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4 BIT(23) +#define MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX BIT(24) +#define MLXSW_REG_PTYS_ETH_SPEED_100BASE_T BIT(25) +#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T BIT(26) +#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR BIT(27) +#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR BIT(28) +#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR BIT(29) +#define MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 BIT(30) +#define MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2 BIT(31) + +/* reg_ptys_eth_proto_cap + * Ethernet port supported speeds and protocols. + * Access: RO + */ +MLXSW_ITEM32(reg, ptys, eth_proto_cap, 0x0C, 0, 32); + +/* reg_ptys_eth_proto_admin + * Speed and protocol to set port to. + * Access: RW + */ +MLXSW_ITEM32(reg, ptys, eth_proto_admin, 0x18, 0, 32); + +/* reg_ptys_eth_proto_oper + * The current speed and protocol configured for the port. + * Access: RO + */ +MLXSW_ITEM32(reg, ptys, eth_proto_oper, 0x24, 0, 32); + +static inline void mlxsw_reg_ptys_pack(char *payload, u8 local_port, + u32 proto_admin) +{ + MLXSW_REG_ZERO(ptys, payload); + mlxsw_reg_ptys_local_port_set(payload, local_port); + mlxsw_reg_ptys_proto_mask_set(payload, MLXSW_REG_PTYS_PROTO_MASK_ETH); + mlxsw_reg_ptys_eth_proto_admin_set(payload, proto_admin); +} + +static inline void mlxsw_reg_ptys_unpack(char *payload, u32 *p_eth_proto_cap, + u32 *p_eth_proto_adm, + u32 *p_eth_proto_oper) +{ + if (p_eth_proto_cap) + *p_eth_proto_cap = mlxsw_reg_ptys_eth_proto_cap_get(payload); + if (p_eth_proto_adm) + *p_eth_proto_adm = mlxsw_reg_ptys_eth_proto_admin_get(payload); + if (p_eth_proto_oper) + *p_eth_proto_oper = mlxsw_reg_ptys_eth_proto_oper_get(payload); +} + +/* PPAD - Port Physical Address Register + * ------------------------------------- + * The PPAD register configures the per port physical MAC address. + */ +#define MLXSW_REG_PPAD_ID 0x5005 +#define MLXSW_REG_PPAD_LEN 0x10 + +static const struct mlxsw_reg_info mlxsw_reg_ppad = { + .id = MLXSW_REG_PPAD_ID, + .len = MLXSW_REG_PPAD_LEN, +}; + +/* reg_ppad_single_base_mac + * 0: base_mac, local port should be 0 and mac[7:0] is + * reserved. HW will set incremental + * 1: single_mac - mac of the local_port + * Access: RW + */ +MLXSW_ITEM32(reg, ppad, single_base_mac, 0x00, 28, 1); + +/* reg_ppad_local_port + * port number, if single_base_mac = 0 then local_port is reserved + * Access: RW + */ +MLXSW_ITEM32(reg, ppad, local_port, 0x00, 16, 8); + +/* reg_ppad_mac + * If single_base_mac = 0 - base MAC address, mac[7:0] is reserved. + * If single_base_mac = 1 - the per port MAC address + * Access: RW + */ +MLXSW_ITEM_BUF(reg, ppad, mac, 0x02, 6); + +static inline void mlxsw_reg_ppad_pack(char *payload, bool single_base_mac, + u8 local_port) +{ + MLXSW_REG_ZERO(ppad, payload); + mlxsw_reg_ppad_single_base_mac_set(payload, !!single_base_mac); + mlxsw_reg_ppad_local_port_set(payload, local_port); +} + +/* PAOS - Ports Administrative and Operational Status Register + * ----------------------------------------------------------- + * Configures and retrieves per port administrative and operational status. + */ +#define MLXSW_REG_PAOS_ID 0x5006 +#define MLXSW_REG_PAOS_LEN 0x10 + +static const struct mlxsw_reg_info mlxsw_reg_paos = { + .id = MLXSW_REG_PAOS_ID, + .len = MLXSW_REG_PAOS_LEN, +}; + +/* reg_paos_swid + * Switch partition ID with which to associate the port. + * Note: while external ports uses unique local port numbers (and thus swid is + * redundant), router ports use the same local port number where swid is the + * only indication for the relevant port. + * Access: Index + */ +MLXSW_ITEM32(reg, paos, swid, 0x00, 24, 8); + +/* reg_paos_local_port + * Local port number. + * Access: Index + */ +MLXSW_ITEM32(reg, paos, local_port, 0x00, 16, 8); + +/* reg_paos_admin_status + * Port administrative state (the desired state of the port): + * 1 - Up. + * 2 - Down. + * 3 - Up once. This means that in case of link failure, the port won't go + * into polling mode, but will wait to be re-enabled by software. + * 4 - Disabled by system. Can only be set by hardware. + * Access: RW + */ +MLXSW_ITEM32(reg, paos, admin_status, 0x00, 8, 4); + +/* reg_paos_oper_status + * Port operational state (the current state): + * 1 - Up. + * 2 - Down. + * 3 - Down by port failure. This means that the device will not let the + * port up again until explicitly specified by software. + * Access: RO + */ +MLXSW_ITEM32(reg, paos, oper_status, 0x00, 0, 4); + +/* reg_paos_ase + * Admin state update enabled. + * Access: WO + */ +MLXSW_ITEM32(reg, paos, ase, 0x04, 31, 1); + +/* reg_paos_ee + * Event update enable. If this bit is set, event generation will be + * updated based on the e field. + * Access: WO + */ +MLXSW_ITEM32(reg, paos, ee, 0x04, 30, 1); + +/* reg_paos_e + * Event generation on operational state change: + * 0 - Do not generate event. + * 1 - Generate Event. + * 2 - Generate Single Event. + * Access: RW + */ +MLXSW_ITEM32(reg, paos, e, 0x04, 0, 2); + +static inline void mlxsw_reg_paos_pack(char *payload, u8 local_port, + enum mlxsw_port_admin_status status) +{ + MLXSW_REG_ZERO(paos, payload); + mlxsw_reg_paos_swid_set(payload, 0); + mlxsw_reg_paos_local_port_set(payload, local_port); + mlxsw_reg_paos_admin_status_set(payload, status); + mlxsw_reg_paos_oper_status_set(payload, 0); + mlxsw_reg_paos_ase_set(payload, 1); + mlxsw_reg_paos_ee_set(payload, 1); + mlxsw_reg_paos_e_set(payload, 1); +} + +/* PPCNT - Ports Performance Counters Register + * ------------------------------------------- + * The PPCNT register retrieves per port performance counters. + */ +#define MLXSW_REG_PPCNT_ID 0x5008 +#define MLXSW_REG_PPCNT_LEN 0x100 + +static const struct mlxsw_reg_info mlxsw_reg_ppcnt = { + .id = MLXSW_REG_PPCNT_ID, + .len = MLXSW_REG_PPCNT_LEN, +}; + +/* reg_ppcnt_swid + * For HCA: must be always 0. + * Switch partition ID to associate port with. + * Switch partitions are numbered from 0 to 7 inclusively. + * Switch partition 254 indicates stacking ports. + * Switch partition 255 indicates all switch partitions. + * Only valid on Set() operation with local_port=255. + * Access: Index + */ +MLXSW_ITEM32(reg, ppcnt, swid, 0x00, 24, 8); + +/* reg_ppcnt_local_port + * Local port number. + * 255 indicates all ports on the device, and is only allowed + * for Set() operation. + * Access: Index + */ +MLXSW_ITEM32(reg, ppcnt, local_port, 0x00, 16, 8); + +/* reg_ppcnt_pnat + * Port number access type: + * 0 - Local port number + * 1 - IB port number + * Access: Index + */ +MLXSW_ITEM32(reg, ppcnt, pnat, 0x00, 14, 2); + +/* reg_ppcnt_grp + * Performance counter group. + * Group 63 indicates all groups. Only valid on Set() operation with + * clr bit set. + * 0x0: IEEE 802.3 Counters + * 0x1: RFC 2863 Counters + * 0x2: RFC 2819 Counters + * 0x3: RFC 3635 Counters + * 0x5: Ethernet Extended Counters + * 0x8: Link Level Retransmission Counters + * 0x10: Per Priority Counters + * 0x11: Per Traffic Class Counters + * 0x12: Physical Layer Counters + * Access: Index + */ +MLXSW_ITEM32(reg, ppcnt, grp, 0x00, 0, 6); + +/* reg_ppcnt_clr + * Clear counters. Setting the clr bit will reset the counter value + * for all counters in the counter group. This bit can be set + * for both Set() and Get() operation. + * Access: OP + */ +MLXSW_ITEM32(reg, ppcnt, clr, 0x04, 31, 1); + +/* reg_ppcnt_prio_tc + * Priority for counter set that support per priority, valid values: 0-7. + * Traffic class for counter set that support per traffic class, + * valid values: 0- cap_max_tclass-1 . + * For HCA: cap_max_tclass is always 8. + * Otherwise must be 0. + * Access: Index + */ +MLXSW_ITEM32(reg, ppcnt, prio_tc, 0x04, 0, 5); + +/* reg_ppcnt_a_frames_transmitted_ok + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, a_frames_transmitted_ok, + 0x08 + 0x00, 0, 64); + +/* reg_ppcnt_a_frames_received_ok + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, a_frames_received_ok, + 0x08 + 0x08, 0, 64); + +/* reg_ppcnt_a_frame_check_sequence_errors + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, a_frame_check_sequence_errors, + 0x08 + 0x10, 0, 64); + +/* reg_ppcnt_a_alignment_errors + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, a_alignment_errors, + 0x08 + 0x18, 0, 64); + +/* reg_ppcnt_a_octets_transmitted_ok + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, a_octets_transmitted_ok, + 0x08 + 0x20, 0, 64); + +/* reg_ppcnt_a_octets_received_ok + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, a_octets_received_ok, + 0x08 + 0x28, 0, 64); + +/* reg_ppcnt_a_multicast_frames_xmitted_ok + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, a_multicast_frames_xmitted_ok, + 0x08 + 0x30, 0, 64); + +/* reg_ppcnt_a_broadcast_frames_xmitted_ok + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, a_broadcast_frames_xmitted_ok, + 0x08 + 0x38, 0, 64); + +/* reg_ppcnt_a_multicast_frames_received_ok + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, a_multicast_frames_received_ok, + 0x08 + 0x40, 0, 64); + +/* reg_ppcnt_a_broadcast_frames_received_ok + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, a_broadcast_frames_received_ok, + 0x08 + 0x48, 0, 64); + +/* reg_ppcnt_a_in_range_length_errors + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, a_in_range_length_errors, + 0x08 + 0x50, 0, 64); + +/* reg_ppcnt_a_out_of_range_length_field + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, a_out_of_range_length_field, + 0x08 + 0x58, 0, 64); + +/* reg_ppcnt_a_frame_too_long_errors + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, a_frame_too_long_errors, + 0x08 + 0x60, 0, 64); + +/* reg_ppcnt_a_symbol_error_during_carrier + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, a_symbol_error_during_carrier, + 0x08 + 0x68, 0, 64); + +/* reg_ppcnt_a_mac_control_frames_transmitted + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, a_mac_control_frames_transmitted, + 0x08 + 0x70, 0, 64); + +/* reg_ppcnt_a_mac_control_frames_received + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, a_mac_control_frames_received, + 0x08 + 0x78, 0, 64); + +/* reg_ppcnt_a_unsupported_opcodes_received + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, a_unsupported_opcodes_received, + 0x08 + 0x80, 0, 64); + +/* reg_ppcnt_a_pause_mac_ctrl_frames_received + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_received, + 0x08 + 0x88, 0, 64); + +/* reg_ppcnt_a_pause_mac_ctrl_frames_transmitted + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_transmitted, + 0x08 + 0x90, 0, 64); + +static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port) +{ + MLXSW_REG_ZERO(ppcnt, payload); + mlxsw_reg_ppcnt_swid_set(payload, 0); + mlxsw_reg_ppcnt_local_port_set(payload, local_port); + mlxsw_reg_ppcnt_pnat_set(payload, 0); + mlxsw_reg_ppcnt_grp_set(payload, 0); + mlxsw_reg_ppcnt_clr_set(payload, 0); + mlxsw_reg_ppcnt_prio_tc_set(payload, 0); +} + +/* PSPA - Port Switch Partition Allocation + * --------------------------------------- + * Controls the association of a port with a switch partition and enables + * configuring ports as stacking ports. + */ +#define MLXSW_REG_PSPA_ID 0x500d +#define MLXSW_REG_PSPA_LEN 0x8 + +static const struct mlxsw_reg_info mlxsw_reg_pspa = { + .id = MLXSW_REG_PSPA_ID, + .len = MLXSW_REG_PSPA_LEN, +}; + +/* reg_pspa_swid + * Switch partition ID. + * Access: RW + */ +MLXSW_ITEM32(reg, pspa, swid, 0x00, 24, 8); + +/* reg_pspa_local_port + * Local port number. + * Access: Index + */ +MLXSW_ITEM32(reg, pspa, local_port, 0x00, 16, 8); + +/* reg_pspa_sub_port + * Virtual port within the local port. Set to 0 when virtual ports are + * disabled on the local port. + * Access: Index + */ +MLXSW_ITEM32(reg, pspa, sub_port, 0x00, 8, 8); + +static inline void mlxsw_reg_pspa_pack(char *payload, u8 swid, u8 local_port) +{ + MLXSW_REG_ZERO(pspa, payload); + mlxsw_reg_pspa_swid_set(payload, swid); + mlxsw_reg_pspa_local_port_set(payload, local_port); + mlxsw_reg_pspa_sub_port_set(payload, 0); +} + +/* HTGT - Host Trap Group Table + * ---------------------------- + * Configures the properties for forwarding to CPU. + */ +#define MLXSW_REG_HTGT_ID 0x7002 +#define MLXSW_REG_HTGT_LEN 0x100 + +static const struct mlxsw_reg_info mlxsw_reg_htgt = { + .id = MLXSW_REG_HTGT_ID, + .len = MLXSW_REG_HTGT_LEN, +}; + +/* reg_htgt_swid + * Switch partition ID. + * Access: Index + */ +MLXSW_ITEM32(reg, htgt, swid, 0x00, 24, 8); + +#define MLXSW_REG_HTGT_PATH_TYPE_LOCAL 0x0 /* For locally attached CPU */ + +/* reg_htgt_type + * CPU path type. + * Access: RW + */ +MLXSW_ITEM32(reg, htgt, type, 0x00, 8, 4); + +#define MLXSW_REG_HTGT_TRAP_GROUP_EMAD 0x0 +#define MLXSW_REG_HTGT_TRAP_GROUP_RX 0x1 + +/* reg_htgt_trap_group + * Trap group number. User defined number specifying which trap groups + * should be forwarded to the CPU. The mapping between trap IDs and trap + * groups is configured using HPKT register. + * Access: Index + */ +MLXSW_ITEM32(reg, htgt, trap_group, 0x00, 0, 8); + +enum { + MLXSW_REG_HTGT_POLICER_DISABLE, + MLXSW_REG_HTGT_POLICER_ENABLE, +}; + +/* reg_htgt_pide + * Enable policer ID specified using 'pid' field. + * Access: RW + */ +MLXSW_ITEM32(reg, htgt, pide, 0x04, 15, 1); + +/* reg_htgt_pid + * Policer ID for the trap group. + * Access: RW + */ +MLXSW_ITEM32(reg, htgt, pid, 0x04, 0, 8); + +#define MLXSW_REG_HTGT_TRAP_TO_CPU 0x0 + +/* reg_htgt_mirror_action + * Mirror action to use. + * 0 - Trap to CPU. + * 1 - Trap to CPU and mirror to a mirroring agent. + * 2 - Mirror to a mirroring agent and do not trap to CPU. + * Access: RW + * + * Note: Mirroring to a mirroring agent is only supported in Spectrum. + */ +MLXSW_ITEM32(reg, htgt, mirror_action, 0x08, 8, 2); + +/* reg_htgt_mirroring_agent + * Mirroring agent. + * Access: RW + */ +MLXSW_ITEM32(reg, htgt, mirroring_agent, 0x08, 0, 3); + +/* reg_htgt_priority + * Trap group priority. + * In case a packet matches multiple classification rules, the packet will + * only be trapped once, based on the trap ID associated with the group (via + * register HPKT) with the highest priority. + * Supported values are 0-7, with 7 represnting the highest priority. + * Access: RW + * + * Note: In SwitchX-2 this field is ignored and the priority value is replaced + * by the 'trap_group' field. + */ +MLXSW_ITEM32(reg, htgt, priority, 0x0C, 0, 4); + +/* reg_htgt_local_path_cpu_tclass + * CPU ingress traffic class for the trap group. + * Access: RW + */ +MLXSW_ITEM32(reg, htgt, local_path_cpu_tclass, 0x10, 16, 6); + +#define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_EMAD 0x15 +#define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_RX 0x14 + +/* reg_htgt_local_path_rdq + * Receive descriptor queue (RDQ) to use for the trap group. + * Access: RW + */ +MLXSW_ITEM32(reg, htgt, local_path_rdq, 0x10, 0, 6); + +static inline void mlxsw_reg_htgt_pack(char *payload, u8 trap_group) +{ + u8 swid, rdq; + + MLXSW_REG_ZERO(htgt, payload); + if (MLXSW_REG_HTGT_TRAP_GROUP_EMAD == trap_group) { + swid = MLXSW_PORT_SWID_ALL_SWIDS; + rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_EMAD; + } else { + swid = 0; + rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_RX; + } + mlxsw_reg_htgt_swid_set(payload, swid); + mlxsw_reg_htgt_type_set(payload, MLXSW_REG_HTGT_PATH_TYPE_LOCAL); + mlxsw_reg_htgt_trap_group_set(payload, trap_group); + mlxsw_reg_htgt_pide_set(payload, MLXSW_REG_HTGT_POLICER_DISABLE); + mlxsw_reg_htgt_pid_set(payload, 0); + mlxsw_reg_htgt_mirror_action_set(payload, MLXSW_REG_HTGT_TRAP_TO_CPU); + mlxsw_reg_htgt_mirroring_agent_set(payload, 0); + mlxsw_reg_htgt_priority_set(payload, 0); + mlxsw_reg_htgt_local_path_cpu_tclass_set(payload, 7); + mlxsw_reg_htgt_local_path_rdq_set(payload, rdq); +} + +/* HPKT - Host Packet Trap + * ----------------------- + * Configures trap IDs inside trap groups. + */ +#define MLXSW_REG_HPKT_ID 0x7003 +#define MLXSW_REG_HPKT_LEN 0x10 + +static const struct mlxsw_reg_info mlxsw_reg_hpkt = { + .id = MLXSW_REG_HPKT_ID, + .len = MLXSW_REG_HPKT_LEN, +}; + +enum { + MLXSW_REG_HPKT_ACK_NOT_REQUIRED, + MLXSW_REG_HPKT_ACK_REQUIRED, +}; + +/* reg_hpkt_ack + * Require acknowledgements from the host for events. + * If set, then the device will wait for the event it sent to be acknowledged + * by the host. This option is only relevant for event trap IDs. + * Access: RW + * + * Note: Currently not supported by firmware. + */ +MLXSW_ITEM32(reg, hpkt, ack, 0x00, 24, 1); + +enum mlxsw_reg_hpkt_action { + MLXSW_REG_HPKT_ACTION_FORWARD, + MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU, + MLXSW_REG_HPKT_ACTION_MIRROR_TO_CPU, + MLXSW_REG_HPKT_ACTION_DISCARD, + MLXSW_REG_HPKT_ACTION_SOFT_DISCARD, + MLXSW_REG_HPKT_ACTION_TRAP_AND_SOFT_DISCARD, +}; + +/* reg_hpkt_action + * Action to perform on packet when trapped. + * 0 - No action. Forward to CPU based on switching rules. + * 1 - Trap to CPU (CPU receives sole copy). + * 2 - Mirror to CPU (CPU receives a replica of the packet). + * 3 - Discard. + * 4 - Soft discard (allow other traps to act on the packet). + * 5 - Trap and soft discard (allow other traps to overwrite this trap). + * Access: RW + * + * Note: Must be set to 0 (forward) for event trap IDs, as they are already + * addressed to the CPU. + */ +MLXSW_ITEM32(reg, hpkt, action, 0x00, 20, 3); + +/* reg_hpkt_trap_group + * Trap group to associate the trap with. + * Access: RW + */ +MLXSW_ITEM32(reg, hpkt, trap_group, 0x00, 12, 6); + +/* reg_hpkt_trap_id + * Trap ID. + * Access: Index + * + * Note: A trap ID can only be associated with a single trap group. The device + * will associate the trap ID with the last trap group configured. + */ +MLXSW_ITEM32(reg, hpkt, trap_id, 0x00, 0, 9); + +enum { + MLXSW_REG_HPKT_CTRL_PACKET_DEFAULT, + MLXSW_REG_HPKT_CTRL_PACKET_NO_BUFFER, + MLXSW_REG_HPKT_CTRL_PACKET_USE_BUFFER, +}; + +/* reg_hpkt_ctrl + * Configure dedicated buffer resources for control packets. + * 0 - Keep factory defaults. + * 1 - Do not use control buffer for this trap ID. + * 2 - Use control buffer for this trap ID. + * Access: RW + */ +MLXSW_ITEM32(reg, hpkt, ctrl, 0x04, 16, 2); + +static inline void mlxsw_reg_hpkt_pack(char *payload, u8 action, + u8 trap_group, u16 trap_id) +{ + MLXSW_REG_ZERO(hpkt, payload); + mlxsw_reg_hpkt_ack_set(payload, MLXSW_REG_HPKT_ACK_NOT_REQUIRED); + mlxsw_reg_hpkt_action_set(payload, action); + mlxsw_reg_hpkt_trap_group_set(payload, trap_group); + mlxsw_reg_hpkt_trap_id_set(payload, trap_id); + mlxsw_reg_hpkt_ctrl_set(payload, MLXSW_REG_HPKT_CTRL_PACKET_DEFAULT); +} + +static inline const char *mlxsw_reg_id_str(u16 reg_id) +{ + switch (reg_id) { + case MLXSW_REG_SGCR_ID: + return "SGCR"; + case MLXSW_REG_SPAD_ID: + return "SPAD"; + case MLXSW_REG_SMID_ID: + return "SMID"; + case MLXSW_REG_SPMS_ID: + return "SPMS"; + case MLXSW_REG_SFGC_ID: + return "SFGC"; + case MLXSW_REG_SFTR_ID: + return "SFTR"; + case MLXSW_REG_SPMLR_ID: + return "SPMLR"; + case MLXSW_REG_PMLP_ID: + return "PMLP"; + case MLXSW_REG_PMTU_ID: + return "PMTU"; + case MLXSW_REG_PTYS_ID: + return "PTYS"; + case MLXSW_REG_PPAD_ID: + return "PPAD"; + case MLXSW_REG_PAOS_ID: + return "PAOS"; + case MLXSW_REG_PPCNT_ID: + return "PPCNT"; + case MLXSW_REG_PSPA_ID: + return "PSPA"; + case MLXSW_REG_HTGT_ID: + return "HTGT"; + case MLXSW_REG_HPKT_ID: + return "HPKT"; + default: + return "*UNKNOWN*"; + } +} + +/* PUDE - Port Up / Down Event + * --------------------------- + * Reports the operational state change of a port. + */ +#define MLXSW_REG_PUDE_LEN 0x10 + +/* reg_pude_swid + * Switch partition ID with which to associate the port. + * Access: Index + */ +MLXSW_ITEM32(reg, pude, swid, 0x00, 24, 8); + +/* reg_pude_local_port + * Local port number. + * Access: Index + */ +MLXSW_ITEM32(reg, pude, local_port, 0x00, 16, 8); + +/* reg_pude_admin_status + * Port administrative state (the desired state). + * 1 - Up. + * 2 - Down. + * 3 - Up once. This means that in case of link failure, the port won't go + * into polling mode, but will wait to be re-enabled by software. + * 4 - Disabled by system. Can only be set by hardware. + * Access: RO + */ +MLXSW_ITEM32(reg, pude, admin_status, 0x00, 8, 4); + +/* reg_pude_oper_status + * Port operatioanl state. + * 1 - Up. + * 2 - Down. + * 3 - Down by port failure. This means that the device will not let the + * port up again until explicitly specified by software. + * Access: RO + */ +MLXSW_ITEM32(reg, pude, oper_status, 0x00, 0, 4); + +#endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c new file mode 100644 index 000000000000..29b46eef9769 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c @@ -0,0 +1,1552 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/switchx2.c + * Copyright (c) 2015 Mellanox Technologies. All rights reserved. + * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> + * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> + * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/slab.h> +#include <linux/device.h> +#include <linux/skbuff.h> +#include <linux/if_vlan.h> +#include <net/switchdev.h> +#include <generated/utsrelease.h> + +#include "core.h" +#include "reg.h" +#include "port.h" +#include "trap.h" +#include "txheader.h" + +static const char mlxsw_sx_driver_name[] = "mlxsw_switchx2"; +static const char mlxsw_sx_driver_version[] = "1.0"; + +struct mlxsw_sx_port; + +#define MLXSW_SW_HW_ID_LEN 6 + +struct mlxsw_sx { + struct mlxsw_sx_port **ports; + struct mlxsw_core *core; + const struct mlxsw_bus_info *bus_info; + u8 hw_id[MLXSW_SW_HW_ID_LEN]; +}; + +struct mlxsw_sx_port_pcpu_stats { + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; + struct u64_stats_sync syncp; + u32 tx_dropped; +}; + +struct mlxsw_sx_port { + struct net_device *dev; + struct mlxsw_sx_port_pcpu_stats __percpu *pcpu_stats; + struct mlxsw_sx *mlxsw_sx; + u8 local_port; +}; + +/* tx_hdr_version + * Tx header version. + * Must be set to 0. + */ +MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); + +/* tx_hdr_ctl + * Packet control type. + * 0 - Ethernet control (e.g. EMADs, LACP) + * 1 - Ethernet data + */ +MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); + +/* tx_hdr_proto + * Packet protocol type. Must be set to 1 (Ethernet). + */ +MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); + +/* tx_hdr_etclass + * Egress TClass to be used on the egress device on the egress port. + * The MSB is specified in the 'ctclass3' field. + * Range is 0-15, where 15 is the highest priority. + */ +MLXSW_ITEM32(tx, hdr, etclass, 0x00, 18, 3); + +/* tx_hdr_swid + * Switch partition ID. + */ +MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); + +/* tx_hdr_port_mid + * Destination local port for unicast packets. + * Destination multicast ID for multicast packets. + * + * Control packets are directed to a specific egress port, while data + * packets are transmitted through the CPU port (0) into the switch partition, + * where forwarding rules are applied. + */ +MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); + +/* tx_hdr_ctclass3 + * See field 'etclass'. + */ +MLXSW_ITEM32(tx, hdr, ctclass3, 0x04, 14, 1); + +/* tx_hdr_rdq + * RDQ for control packets sent to remote CPU. + * Must be set to 0x1F for EMADs, otherwise 0. + */ +MLXSW_ITEM32(tx, hdr, rdq, 0x04, 9, 5); + +/* tx_hdr_cpu_sig + * Signature control for packets going to CPU. Must be set to 0. + */ +MLXSW_ITEM32(tx, hdr, cpu_sig, 0x04, 0, 9); + +/* tx_hdr_sig + * Stacking protocl signature. Must be set to 0xE0E0. + */ +MLXSW_ITEM32(tx, hdr, sig, 0x0C, 16, 16); + +/* tx_hdr_stclass + * Stacking TClass. + */ +MLXSW_ITEM32(tx, hdr, stclass, 0x0C, 13, 3); + +/* tx_hdr_emad + * EMAD bit. Must be set for EMADs. + */ +MLXSW_ITEM32(tx, hdr, emad, 0x0C, 5, 1); + +/* tx_hdr_type + * 0 - Data packets + * 6 - Control packets + */ +MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); + +static void mlxsw_sx_txhdr_construct(struct sk_buff *skb, + const struct mlxsw_tx_info *tx_info) +{ + char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); + bool is_emad = tx_info->is_emad; + + memset(txhdr, 0, MLXSW_TXHDR_LEN); + + /* We currently set default values for the egress tclass (QoS). */ + mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_0); + mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); + mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); + mlxsw_tx_hdr_etclass_set(txhdr, is_emad ? MLXSW_TXHDR_ETCLASS_6 : + MLXSW_TXHDR_ETCLASS_5); + mlxsw_tx_hdr_swid_set(txhdr, 0); + mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); + mlxsw_tx_hdr_ctclass3_set(txhdr, MLXSW_TXHDR_CTCLASS3); + mlxsw_tx_hdr_rdq_set(txhdr, is_emad ? MLXSW_TXHDR_RDQ_EMAD : + MLXSW_TXHDR_RDQ_OTHER); + mlxsw_tx_hdr_cpu_sig_set(txhdr, MLXSW_TXHDR_CPU_SIG); + mlxsw_tx_hdr_sig_set(txhdr, MLXSW_TXHDR_SIG); + mlxsw_tx_hdr_stclass_set(txhdr, MLXSW_TXHDR_STCLASS_NONE); + mlxsw_tx_hdr_emad_set(txhdr, is_emad ? MLXSW_TXHDR_EMAD : + MLXSW_TXHDR_NOT_EMAD); + mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); +} + +static int mlxsw_sx_port_admin_status_set(struct mlxsw_sx_port *mlxsw_sx_port, + bool is_up) +{ + struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; + char paos_pl[MLXSW_REG_PAOS_LEN]; + + mlxsw_reg_paos_pack(paos_pl, mlxsw_sx_port->local_port, + is_up ? MLXSW_PORT_ADMIN_STATUS_UP : + MLXSW_PORT_ADMIN_STATUS_DOWN); + return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(paos), paos_pl); +} + +static int mlxsw_sx_port_oper_status_get(struct mlxsw_sx_port *mlxsw_sx_port, + bool *p_is_up) +{ + struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; + char paos_pl[MLXSW_REG_PAOS_LEN]; + u8 oper_status; + int err; + + mlxsw_reg_paos_pack(paos_pl, mlxsw_sx_port->local_port, 0); + err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(paos), paos_pl); + if (err) + return err; + oper_status = mlxsw_reg_paos_oper_status_get(paos_pl); + *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false; + return 0; +} + +static int mlxsw_sx_port_mtu_set(struct mlxsw_sx_port *mlxsw_sx_port, u16 mtu) +{ + struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; + char pmtu_pl[MLXSW_REG_PMTU_LEN]; + int max_mtu; + int err; + + mtu += MLXSW_TXHDR_LEN + ETH_HLEN; + mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sx_port->local_port, 0); + err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(pmtu), pmtu_pl); + if (err) + return err; + max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); + + if (mtu > max_mtu) + return -EINVAL; + + mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sx_port->local_port, mtu); + return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(pmtu), pmtu_pl); +} + +static int mlxsw_sx_port_swid_set(struct mlxsw_sx_port *mlxsw_sx_port, u8 swid) +{ + struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; + char pspa_pl[MLXSW_REG_PSPA_LEN]; + + mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sx_port->local_port); + return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(pspa), pspa_pl); +} + +static int mlxsw_sx_port_module_check(struct mlxsw_sx_port *mlxsw_sx_port, + bool *p_usable) +{ + struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; + char pmlp_pl[MLXSW_REG_PMLP_LEN]; + int err; + + mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sx_port->local_port); + err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(pmlp), pmlp_pl); + if (err) + return err; + *p_usable = mlxsw_reg_pmlp_width_get(pmlp_pl) ? true : false; + return 0; +} + +static int mlxsw_sx_port_open(struct net_device *dev) +{ + struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev); + int err; + + err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true); + if (err) + return err; + netif_start_queue(dev); + return 0; +} + +static int mlxsw_sx_port_stop(struct net_device *dev) +{ + struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev); + + netif_stop_queue(dev); + return mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false); +} + +static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev); + struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; + struct mlxsw_sx_port_pcpu_stats *pcpu_stats; + const struct mlxsw_tx_info tx_info = { + .local_port = mlxsw_sx_port->local_port, + .is_emad = false, + }; + struct sk_buff *skb_old = NULL; + int err; + + if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { + struct sk_buff *skb_new; + + skb_old = skb; + skb_new = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN); + if (!skb_new) { + this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped); + dev_kfree_skb_any(skb_old); + return NETDEV_TX_OK; + } + skb = skb_new; + } + mlxsw_sx_txhdr_construct(skb, &tx_info); + err = mlxsw_core_skb_transmit(mlxsw_sx, skb, &tx_info); + if (err == -EAGAIN) { + if (skb_old) + dev_kfree_skb_any(skb); + return NETDEV_TX_BUSY; + } + + if (skb_old) + dev_kfree_skb_any(skb_old); + + if (!err) { + pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats); + u64_stats_update_begin(&pcpu_stats->syncp); + pcpu_stats->tx_packets++; + pcpu_stats->tx_bytes += skb->len; + u64_stats_update_end(&pcpu_stats->syncp); + } else { + this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped); + dev_kfree_skb_any(skb); + } + return NETDEV_TX_OK; +} + +static int mlxsw_sx_port_change_mtu(struct net_device *dev, int mtu) +{ + struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev); + int err; + + err = mlxsw_sx_port_mtu_set(mlxsw_sx_port, mtu); + if (err) + return err; + dev->mtu = mtu; + return 0; +} + +static struct rtnl_link_stats64 * +mlxsw_sx_port_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev); + struct mlxsw_sx_port_pcpu_stats *p; + u64 rx_packets, rx_bytes, tx_packets, tx_bytes; + u32 tx_dropped = 0; + unsigned int start; + int i; + + for_each_possible_cpu(i) { + p = per_cpu_ptr(mlxsw_sx_port->pcpu_stats, i); + do { + start = u64_stats_fetch_begin_irq(&p->syncp); + rx_packets = p->rx_packets; + rx_bytes = p->rx_bytes; + tx_packets = p->tx_packets; + tx_bytes = p->tx_bytes; + } while (u64_stats_fetch_retry_irq(&p->syncp, start)); + + stats->rx_packets += rx_packets; + stats->rx_bytes += rx_bytes; + stats->tx_packets += tx_packets; + stats->tx_bytes += tx_bytes; + /* tx_dropped is u32, updated without syncp protection. */ + tx_dropped += p->tx_dropped; + } + stats->tx_dropped = tx_dropped; + return stats; +} + +static const struct net_device_ops mlxsw_sx_port_netdev_ops = { + .ndo_open = mlxsw_sx_port_open, + .ndo_stop = mlxsw_sx_port_stop, + .ndo_start_xmit = mlxsw_sx_port_xmit, + .ndo_change_mtu = mlxsw_sx_port_change_mtu, + .ndo_get_stats64 = mlxsw_sx_port_get_stats64, +}; + +static void mlxsw_sx_port_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *drvinfo) +{ + struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev); + struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; + + strlcpy(drvinfo->driver, mlxsw_sx_driver_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, mlxsw_sx_driver_version, + sizeof(drvinfo->version)); + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "%d.%d.%d", + mlxsw_sx->bus_info->fw_rev.major, + mlxsw_sx->bus_info->fw_rev.minor, + mlxsw_sx->bus_info->fw_rev.subminor); + strlcpy(drvinfo->bus_info, mlxsw_sx->bus_info->device_name, + sizeof(drvinfo->bus_info)); +} + +struct mlxsw_sx_port_hw_stats { + char str[ETH_GSTRING_LEN]; + u64 (*getter)(char *payload); +}; + +static const struct mlxsw_sx_port_hw_stats mlxsw_sx_port_hw_stats[] = { + { + .str = "a_frames_transmitted_ok", + .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get, + }, + { + .str = "a_frames_received_ok", + .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get, + }, + { + .str = "a_frame_check_sequence_errors", + .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get, + }, + { + .str = "a_alignment_errors", + .getter = mlxsw_reg_ppcnt_a_alignment_errors_get, + }, + { + .str = "a_octets_transmitted_ok", + .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get, + }, + { + .str = "a_octets_received_ok", + .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get, + }, + { + .str = "a_multicast_frames_xmitted_ok", + .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get, + }, + { + .str = "a_broadcast_frames_xmitted_ok", + .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get, + }, + { + .str = "a_multicast_frames_received_ok", + .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get, + }, + { + .str = "a_broadcast_frames_received_ok", + .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get, + }, + { + .str = "a_in_range_length_errors", + .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get, + }, + { + .str = "a_out_of_range_length_field", + .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get, + }, + { + .str = "a_frame_too_long_errors", + .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get, + }, + { + .str = "a_symbol_error_during_carrier", + .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get, + }, + { + .str = "a_mac_control_frames_transmitted", + .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get, + }, + { + .str = "a_mac_control_frames_received", + .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get, + }, + { + .str = "a_unsupported_opcodes_received", + .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get, + }, + { + .str = "a_pause_mac_ctrl_frames_received", + .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get, + }, + { + .str = "a_pause_mac_ctrl_frames_xmitted", + .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get, + }, +}; + +#define MLXSW_SX_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sx_port_hw_stats) + +static void mlxsw_sx_port_get_strings(struct net_device *dev, + u32 stringset, u8 *data) +{ + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < MLXSW_SX_PORT_HW_STATS_LEN; i++) { + memcpy(p, mlxsw_sx_port_hw_stats[i].str, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + break; + } +} + +static void mlxsw_sx_port_get_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev); + struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; + char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; + int i; + int err; + + mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sx_port->local_port); + err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ppcnt), ppcnt_pl); + for (i = 0; i < MLXSW_SX_PORT_HW_STATS_LEN; i++) + data[i] = !err ? mlxsw_sx_port_hw_stats[i].getter(ppcnt_pl) : 0; +} + +static int mlxsw_sx_port_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return MLXSW_SX_PORT_HW_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +struct mlxsw_sx_port_link_mode { + u32 mask; + u32 supported; + u32 advertised; + u32 speed; +}; + +static const struct mlxsw_sx_port_link_mode mlxsw_sx_port_link_mode[] = { + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T, + .supported = SUPPORTED_100baseT_Full, + .advertised = ADVERTISED_100baseT_Full, + .speed = 100, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX, + .speed = 100, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII | + MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX, + .supported = SUPPORTED_1000baseKX_Full, + .advertised = ADVERTISED_1000baseKX_Full, + .speed = 1000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T, + .supported = SUPPORTED_10000baseT_Full, + .advertised = ADVERTISED_10000baseT_Full, + .speed = 10000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 | + MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4, + .supported = SUPPORTED_10000baseKX4_Full, + .advertised = ADVERTISED_10000baseKX4_Full, + .speed = 10000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | + MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | + MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | + MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR, + .supported = SUPPORTED_10000baseKR_Full, + .advertised = ADVERTISED_10000baseKR_Full, + .speed = 10000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2, + .supported = SUPPORTED_20000baseKR2_Full, + .advertised = ADVERTISED_20000baseKR2_Full, + .speed = 20000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4, + .supported = SUPPORTED_40000baseCR4_Full, + .advertised = ADVERTISED_40000baseCR4_Full, + .speed = 40000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4, + .supported = SUPPORTED_40000baseKR4_Full, + .advertised = ADVERTISED_40000baseKR4_Full, + .speed = 40000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4, + .supported = SUPPORTED_40000baseSR4_Full, + .advertised = ADVERTISED_40000baseSR4_Full, + .speed = 40000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4, + .supported = SUPPORTED_40000baseLR4_Full, + .advertised = ADVERTISED_40000baseLR4_Full, + .speed = 40000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR | + MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR | + MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, + .speed = 25000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 | + MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 | + MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2, + .speed = 50000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, + .supported = SUPPORTED_56000baseKR4_Full, + .advertised = ADVERTISED_56000baseKR4_Full, + .speed = 56000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 | + MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | + MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | + MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4, + .speed = 100000, + }, +}; + +#define MLXSW_SX_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sx_port_link_mode) + +static u32 mlxsw_sx_from_ptys_supported_port(u32 ptys_eth_proto) +{ + if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | + MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | + MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | + MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | + MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | + MLXSW_REG_PTYS_ETH_SPEED_SGMII)) + return SUPPORTED_FIBRE; + + if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | + MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | + MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | + MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | + MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX)) + return SUPPORTED_Backplane; + return 0; +} + +static u32 mlxsw_sx_from_ptys_supported_link(u32 ptys_eth_proto) +{ + u32 modes = 0; + int i; + + for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) { + if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask) + modes |= mlxsw_sx_port_link_mode[i].supported; + } + return modes; +} + +static u32 mlxsw_sx_from_ptys_advert_link(u32 ptys_eth_proto) +{ + u32 modes = 0; + int i; + + for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) { + if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask) + modes |= mlxsw_sx_port_link_mode[i].advertised; + } + return modes; +} + +static void mlxsw_sx_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto, + struct ethtool_cmd *cmd) +{ + u32 speed = SPEED_UNKNOWN; + u8 duplex = DUPLEX_UNKNOWN; + int i; + + if (!carrier_ok) + goto out; + + for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) { + if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask) { + speed = mlxsw_sx_port_link_mode[i].speed; + duplex = DUPLEX_FULL; + break; + } + } +out: + ethtool_cmd_speed_set(cmd, speed); + cmd->duplex = duplex; +} + +static u8 mlxsw_sx_port_connector_port(u32 ptys_eth_proto) +{ + if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | + MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | + MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | + MLXSW_REG_PTYS_ETH_SPEED_SGMII)) + return PORT_FIBRE; + + if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | + MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | + MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4)) + return PORT_DA; + + if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | + MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | + MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | + MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4)) + return PORT_NONE; + + return PORT_OTHER; +} + +static int mlxsw_sx_port_get_settings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev); + struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; + char ptys_pl[MLXSW_REG_PTYS_LEN]; + u32 eth_proto_cap; + u32 eth_proto_admin; + u32 eth_proto_oper; + int err; + + mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, 0); + err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl); + if (err) { + netdev_err(dev, "Failed to get proto"); + return err; + } + mlxsw_reg_ptys_unpack(ptys_pl, ð_proto_cap, + ð_proto_admin, ð_proto_oper); + + cmd->supported = mlxsw_sx_from_ptys_supported_port(eth_proto_cap) | + mlxsw_sx_from_ptys_supported_link(eth_proto_cap) | + SUPPORTED_Pause | SUPPORTED_Asym_Pause; + cmd->advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_admin); + mlxsw_sx_from_ptys_speed_duplex(netif_carrier_ok(dev), + eth_proto_oper, cmd); + + eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; + cmd->port = mlxsw_sx_port_connector_port(eth_proto_oper); + cmd->lp_advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_oper); + + cmd->transceiver = XCVR_INTERNAL; + return 0; +} + +static u32 mlxsw_sx_to_ptys_advert_link(u32 advertising) +{ + u32 ptys_proto = 0; + int i; + + for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) { + if (advertising & mlxsw_sx_port_link_mode[i].advertised) + ptys_proto |= mlxsw_sx_port_link_mode[i].mask; + } + return ptys_proto; +} + +static u32 mlxsw_sx_to_ptys_speed(u32 speed) +{ + u32 ptys_proto = 0; + int i; + + for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) { + if (speed == mlxsw_sx_port_link_mode[i].speed) + ptys_proto |= mlxsw_sx_port_link_mode[i].mask; + } + return ptys_proto; +} + +static int mlxsw_sx_port_set_settings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev); + struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; + char ptys_pl[MLXSW_REG_PTYS_LEN]; + u32 speed; + u32 eth_proto_new; + u32 eth_proto_cap; + u32 eth_proto_admin; + bool is_up; + int err; + + speed = ethtool_cmd_speed(cmd); + + eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ? + mlxsw_sx_to_ptys_advert_link(cmd->advertising) : + mlxsw_sx_to_ptys_speed(speed); + + mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, 0); + err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl); + if (err) { + netdev_err(dev, "Failed to get proto"); + return err; + } + mlxsw_reg_ptys_unpack(ptys_pl, ð_proto_cap, ð_proto_admin, NULL); + + eth_proto_new = eth_proto_new & eth_proto_cap; + if (!eth_proto_new) { + netdev_err(dev, "Not supported proto admin requested"); + return -EINVAL; + } + if (eth_proto_new == eth_proto_admin) + return 0; + + mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, eth_proto_new); + err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl); + if (err) { + netdev_err(dev, "Failed to set proto admin"); + return err; + } + + err = mlxsw_sx_port_oper_status_get(mlxsw_sx_port, &is_up); + if (err) { + netdev_err(dev, "Failed to get oper status"); + return err; + } + if (!is_up) + return 0; + + err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false); + if (err) { + netdev_err(dev, "Failed to set admin status"); + return err; + } + + err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true); + if (err) { + netdev_err(dev, "Failed to set admin status"); + return err; + } + + return 0; +} + +static const struct ethtool_ops mlxsw_sx_port_ethtool_ops = { + .get_drvinfo = mlxsw_sx_port_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_strings = mlxsw_sx_port_get_strings, + .get_ethtool_stats = mlxsw_sx_port_get_stats, + .get_sset_count = mlxsw_sx_port_get_sset_count, + .get_settings = mlxsw_sx_port_get_settings, + .set_settings = mlxsw_sx_port_set_settings, +}; + +static int mlxsw_sx_port_attr_get(struct net_device *dev, + struct switchdev_attr *attr) +{ + struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev); + struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; + + switch (attr->id) { + case SWITCHDEV_ATTR_PORT_PARENT_ID: + attr->u.ppid.id_len = sizeof(mlxsw_sx->hw_id); + memcpy(&attr->u.ppid.id, &mlxsw_sx->hw_id, attr->u.ppid.id_len); + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static const struct switchdev_ops mlxsw_sx_port_switchdev_ops = { + .switchdev_port_attr_get = mlxsw_sx_port_attr_get, +}; + +static int mlxsw_sx_hw_id_get(struct mlxsw_sx *mlxsw_sx) +{ + char spad_pl[MLXSW_REG_SPAD_LEN]; + int err; + + err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(spad), spad_pl); + if (err) + return err; + mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sx->hw_id); + return 0; +} + +static int mlxsw_sx_port_dev_addr_get(struct mlxsw_sx_port *mlxsw_sx_port) +{ + struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; + struct net_device *dev = mlxsw_sx_port->dev; + char ppad_pl[MLXSW_REG_PPAD_LEN]; + int err; + + mlxsw_reg_ppad_pack(ppad_pl, false, 0); + err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ppad), ppad_pl); + if (err) + return err; + mlxsw_reg_ppad_mac_memcpy_from(ppad_pl, dev->dev_addr); + /* The last byte value in base mac address is guaranteed + * to be such it does not overflow when adding local_port + * value. + */ + dev->dev_addr[ETH_ALEN - 1] += mlxsw_sx_port->local_port; + return 0; +} + +static int mlxsw_sx_port_stp_state_set(struct mlxsw_sx_port *mlxsw_sx_port, + u16 vid, enum mlxsw_reg_spms_state state) +{ + struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; + char *spms_pl; + int err; + + spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); + if (!spms_pl) + return -ENOMEM; + mlxsw_reg_spms_pack(spms_pl, mlxsw_sx_port->local_port, vid, state); + err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spms), spms_pl); + kfree(spms_pl); + return err; +} + +static int mlxsw_sx_port_speed_set(struct mlxsw_sx_port *mlxsw_sx_port, + u32 speed) +{ + struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; + char ptys_pl[MLXSW_REG_PTYS_LEN]; + + mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, speed); + return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl); +} + +static int +mlxsw_sx_port_mac_learning_mode_set(struct mlxsw_sx_port *mlxsw_sx_port, + enum mlxsw_reg_spmlr_learn_mode mode) +{ + struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; + char spmlr_pl[MLXSW_REG_SPMLR_LEN]; + + mlxsw_reg_spmlr_pack(spmlr_pl, mlxsw_sx_port->local_port, mode); + return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spmlr), spmlr_pl); +} + +static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port) +{ + struct mlxsw_sx_port *mlxsw_sx_port; + struct net_device *dev; + bool usable; + int err; + + dev = alloc_etherdev(sizeof(struct mlxsw_sx_port)); + if (!dev) + return -ENOMEM; + mlxsw_sx_port = netdev_priv(dev); + mlxsw_sx_port->dev = dev; + mlxsw_sx_port->mlxsw_sx = mlxsw_sx; + mlxsw_sx_port->local_port = local_port; + + mlxsw_sx_port->pcpu_stats = + netdev_alloc_pcpu_stats(struct mlxsw_sx_port_pcpu_stats); + if (!mlxsw_sx_port->pcpu_stats) { + err = -ENOMEM; + goto err_alloc_stats; + } + + dev->netdev_ops = &mlxsw_sx_port_netdev_ops; + dev->ethtool_ops = &mlxsw_sx_port_ethtool_ops; + dev->switchdev_ops = &mlxsw_sx_port_switchdev_ops; + + err = mlxsw_sx_port_dev_addr_get(mlxsw_sx_port); + if (err) { + dev_err(mlxsw_sx->bus_info->dev, "Port %d: Unable to get port mac address\n", + mlxsw_sx_port->local_port); + goto err_dev_addr_get; + } + + netif_carrier_off(dev); + + dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | + NETIF_F_VLAN_CHALLENGED; + + /* Each packet needs to have a Tx header (metadata) on top all other + * headers. + */ + dev->hard_header_len += MLXSW_TXHDR_LEN; + + err = mlxsw_sx_port_module_check(mlxsw_sx_port, &usable); + if (err) { + dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to check module\n", + mlxsw_sx_port->local_port); + goto err_port_module_check; + } + + if (!usable) { + dev_dbg(mlxsw_sx->bus_info->dev, "Port %d: Not usable, skipping initialization\n", + mlxsw_sx_port->local_port); + goto port_not_usable; + } + + err = mlxsw_sx_port_swid_set(mlxsw_sx_port, 0); + if (err) { + dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set SWID\n", + mlxsw_sx_port->local_port); + goto err_port_swid_set; + } + + err = mlxsw_sx_port_speed_set(mlxsw_sx_port, + MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4); + if (err) { + dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set speed\n", + mlxsw_sx_port->local_port); + goto err_port_speed_set; + } + + err = mlxsw_sx_port_mtu_set(mlxsw_sx_port, ETH_DATA_LEN); + if (err) { + dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MTU\n", + mlxsw_sx_port->local_port); + goto err_port_mtu_set; + } + + err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false); + if (err) + goto err_port_admin_status_set; + + err = mlxsw_sx_port_stp_state_set(mlxsw_sx_port, + MLXSW_PORT_DEFAULT_VID, + MLXSW_REG_SPMS_STATE_FORWARDING); + if (err) { + dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set STP state\n", + mlxsw_sx_port->local_port); + goto err_port_stp_state_set; + } + + err = mlxsw_sx_port_mac_learning_mode_set(mlxsw_sx_port, + MLXSW_REG_SPMLR_LEARN_MODE_DISABLE); + if (err) { + dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MAC learning mode\n", + mlxsw_sx_port->local_port); + goto err_port_mac_learning_mode_set; + } + + err = register_netdev(dev); + if (err) { + dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to register netdev\n", + mlxsw_sx_port->local_port); + goto err_register_netdev; + } + + mlxsw_sx->ports[local_port] = mlxsw_sx_port; + return 0; + +err_register_netdev: +err_port_admin_status_set: +err_port_mac_learning_mode_set: +err_port_stp_state_set: +err_port_mtu_set: +err_port_speed_set: +err_port_swid_set: +port_not_usable: +err_port_module_check: +err_dev_addr_get: + free_percpu(mlxsw_sx_port->pcpu_stats); +err_alloc_stats: + free_netdev(dev); + return err; +} + +static void mlxsw_sx_port_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port) +{ + struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port]; + + if (!mlxsw_sx_port) + return; + unregister_netdev(mlxsw_sx_port->dev); /* This calls ndo_stop */ + mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT); + free_percpu(mlxsw_sx_port->pcpu_stats); +} + +static void mlxsw_sx_ports_remove(struct mlxsw_sx *mlxsw_sx) +{ + int i; + + for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) + mlxsw_sx_port_remove(mlxsw_sx, i); + kfree(mlxsw_sx->ports); +} + +static int mlxsw_sx_ports_create(struct mlxsw_sx *mlxsw_sx) +{ + size_t alloc_size; + int i; + int err; + + alloc_size = sizeof(struct mlxsw_sx_port *) * MLXSW_PORT_MAX_PORTS; + mlxsw_sx->ports = kzalloc(alloc_size, GFP_KERNEL); + if (!mlxsw_sx->ports) + return -ENOMEM; + + for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) { + err = mlxsw_sx_port_create(mlxsw_sx, i); + if (err) + goto err_port_create; + } + return 0; + +err_port_create: + for (i--; i >= 1; i--) + mlxsw_sx_port_remove(mlxsw_sx, i); + kfree(mlxsw_sx->ports); + return err; +} + +static void mlxsw_sx_pude_event_func(const struct mlxsw_reg_info *reg, + char *pude_pl, void *priv) +{ + struct mlxsw_sx *mlxsw_sx = priv; + struct mlxsw_sx_port *mlxsw_sx_port; + enum mlxsw_reg_pude_oper_status status; + u8 local_port; + + local_port = mlxsw_reg_pude_local_port_get(pude_pl); + mlxsw_sx_port = mlxsw_sx->ports[local_port]; + if (!mlxsw_sx_port) { + dev_warn(mlxsw_sx->bus_info->dev, "Port %d: Link event received for non-existent port\n", + local_port); + return; + } + + status = mlxsw_reg_pude_oper_status_get(pude_pl); + if (MLXSW_PORT_OPER_STATUS_UP == status) { + netdev_info(mlxsw_sx_port->dev, "link up\n"); + netif_carrier_on(mlxsw_sx_port->dev); + } else { + netdev_info(mlxsw_sx_port->dev, "link down\n"); + netif_carrier_off(mlxsw_sx_port->dev); + } +} + +static struct mlxsw_event_listener mlxsw_sx_pude_event = { + .func = mlxsw_sx_pude_event_func, + .trap_id = MLXSW_TRAP_ID_PUDE, +}; + +static int mlxsw_sx_event_register(struct mlxsw_sx *mlxsw_sx, + enum mlxsw_event_trap_id trap_id) +{ + struct mlxsw_event_listener *el; + char hpkt_pl[MLXSW_REG_HPKT_LEN]; + int err; + + switch (trap_id) { + case MLXSW_TRAP_ID_PUDE: + el = &mlxsw_sx_pude_event; + break; + } + err = mlxsw_core_event_listener_register(mlxsw_sx->core, el, mlxsw_sx); + if (err) + return err; + + mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, + MLXSW_REG_HTGT_TRAP_GROUP_EMAD, trap_id); + err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl); + if (err) + goto err_event_trap_set; + + return 0; + +err_event_trap_set: + mlxsw_core_event_listener_unregister(mlxsw_sx->core, el, mlxsw_sx); + return err; +} + +static void mlxsw_sx_event_unregister(struct mlxsw_sx *mlxsw_sx, + enum mlxsw_event_trap_id trap_id) +{ + struct mlxsw_event_listener *el; + + switch (trap_id) { + case MLXSW_TRAP_ID_PUDE: + el = &mlxsw_sx_pude_event; + break; + } + mlxsw_core_event_listener_unregister(mlxsw_sx->core, el, mlxsw_sx); +} + +static void mlxsw_sx_rx_listener_func(struct sk_buff *skb, u8 local_port, + void *priv) +{ + struct mlxsw_sx *mlxsw_sx = priv; + struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port]; + struct mlxsw_sx_port_pcpu_stats *pcpu_stats; + + if (unlikely(!mlxsw_sx_port)) { + if (net_ratelimit()) + dev_warn(mlxsw_sx->bus_info->dev, "Port %d: skb received for non-existent port\n", + local_port); + return; + } + + skb->dev = mlxsw_sx_port->dev; + + pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats); + u64_stats_update_begin(&pcpu_stats->syncp); + pcpu_stats->rx_packets++; + pcpu_stats->rx_bytes += skb->len; + u64_stats_update_end(&pcpu_stats->syncp); + + skb->protocol = eth_type_trans(skb, skb->dev); + netif_receive_skb(skb); +} + +static const struct mlxsw_rx_listener mlxsw_sx_rx_listener[] = { + { + .func = mlxsw_sx_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_FDB_MC, + }, + /* Traps for specific L2 packet types, not trapped as FDB MC */ + { + .func = mlxsw_sx_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_STP, + }, + { + .func = mlxsw_sx_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_LACP, + }, + { + .func = mlxsw_sx_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_EAPOL, + }, + { + .func = mlxsw_sx_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_LLDP, + }, + { + .func = mlxsw_sx_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_MMRP, + }, + { + .func = mlxsw_sx_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_MVRP, + }, + { + .func = mlxsw_sx_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_RPVST, + }, + { + .func = mlxsw_sx_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_DHCP, + }, + { + .func = mlxsw_sx_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_IGMP_QUERY, + }, + { + .func = mlxsw_sx_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT, + }, + { + .func = mlxsw_sx_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT, + }, + { + .func = mlxsw_sx_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE, + }, + { + .func = mlxsw_sx_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT, + }, +}; + +static int mlxsw_sx_traps_init(struct mlxsw_sx *mlxsw_sx) +{ + char htgt_pl[MLXSW_REG_HTGT_LEN]; + char hpkt_pl[MLXSW_REG_HPKT_LEN]; + int i; + int err; + + mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX); + err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(htgt), htgt_pl); + if (err) + return err; + + for (i = 0; i < ARRAY_SIZE(mlxsw_sx_rx_listener); i++) { + err = mlxsw_core_rx_listener_register(mlxsw_sx->core, + &mlxsw_sx_rx_listener[i], + mlxsw_sx); + if (err) + goto err_rx_listener_register; + + mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU, + MLXSW_REG_HTGT_TRAP_GROUP_RX, + mlxsw_sx_rx_listener[i].trap_id); + err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl); + if (err) + goto err_rx_trap_set; + } + return 0; + +err_rx_trap_set: + mlxsw_core_rx_listener_unregister(mlxsw_sx->core, + &mlxsw_sx_rx_listener[i], + mlxsw_sx); +err_rx_listener_register: + for (i--; i >= 0; i--) { + mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, + MLXSW_REG_HTGT_TRAP_GROUP_RX, + mlxsw_sx_rx_listener[i].trap_id); + mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl); + + mlxsw_core_rx_listener_unregister(mlxsw_sx->core, + &mlxsw_sx_rx_listener[i], + mlxsw_sx); + } + return err; +} + +static void mlxsw_sx_traps_fini(struct mlxsw_sx *mlxsw_sx) +{ + char hpkt_pl[MLXSW_REG_HPKT_LEN]; + int i; + + for (i = 0; i < ARRAY_SIZE(mlxsw_sx_rx_listener); i++) { + mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, + MLXSW_REG_HTGT_TRAP_GROUP_RX, + mlxsw_sx_rx_listener[i].trap_id); + mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl); + + mlxsw_core_rx_listener_unregister(mlxsw_sx->core, + &mlxsw_sx_rx_listener[i], + mlxsw_sx); + } +} + +static int mlxsw_sx_flood_init(struct mlxsw_sx *mlxsw_sx) +{ + char sfgc_pl[MLXSW_REG_SFGC_LEN]; + char sgcr_pl[MLXSW_REG_SGCR_LEN]; + char *smid_pl; + char *sftr_pl; + int err; + + /* Due to FW bug, we must configure SMID. */ + smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL); + if (!smid_pl) + return -ENOMEM; + mlxsw_reg_smid_pack(smid_pl, MLXSW_PORT_MID); + err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(smid), smid_pl); + kfree(smid_pl); + if (err) + return err; + + /* Configure a flooding table, which includes only CPU port. */ + sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL); + if (!sftr_pl) + return -ENOMEM; + mlxsw_reg_sftr_pack(sftr_pl, 0, 0, MLXSW_REG_SFGC_TABLE_TYPE_SINGLE, 0); + err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sftr), sftr_pl); + kfree(sftr_pl); + if (err) + return err; + + /* Flood different packet types using the flooding table. */ + mlxsw_reg_sfgc_pack(sfgc_pl, + MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST, + MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID, + MLXSW_REG_SFGC_TABLE_TYPE_SINGLE, + 0); + err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl); + if (err) + return err; + + mlxsw_reg_sfgc_pack(sfgc_pl, + MLXSW_REG_SFGC_TYPE_BROADCAST, + MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID, + MLXSW_REG_SFGC_TABLE_TYPE_SINGLE, + 0); + err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl); + if (err) + return err; + + mlxsw_reg_sfgc_pack(sfgc_pl, + MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP, + MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID, + MLXSW_REG_SFGC_TABLE_TYPE_SINGLE, + 0); + err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl); + if (err) + return err; + + mlxsw_reg_sfgc_pack(sfgc_pl, + MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6, + MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID, + MLXSW_REG_SFGC_TABLE_TYPE_SINGLE, + 0); + err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl); + if (err) + return err; + + mlxsw_reg_sfgc_pack(sfgc_pl, + MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4, + MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID, + MLXSW_REG_SFGC_TABLE_TYPE_SINGLE, + 0); + err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl); + if (err) + return err; + + mlxsw_reg_sgcr_pack(sgcr_pl, true); + return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sgcr), sgcr_pl); +} + +static int mlxsw_sx_init(void *priv, struct mlxsw_core *mlxsw_core, + const struct mlxsw_bus_info *mlxsw_bus_info) +{ + struct mlxsw_sx *mlxsw_sx = priv; + int err; + + mlxsw_sx->core = mlxsw_core; + mlxsw_sx->bus_info = mlxsw_bus_info; + + err = mlxsw_sx_hw_id_get(mlxsw_sx); + if (err) { + dev_err(mlxsw_sx->bus_info->dev, "Failed to get switch HW ID\n"); + return err; + } + + err = mlxsw_sx_ports_create(mlxsw_sx); + if (err) { + dev_err(mlxsw_sx->bus_info->dev, "Failed to create ports\n"); + return err; + } + + err = mlxsw_sx_event_register(mlxsw_sx, MLXSW_TRAP_ID_PUDE); + if (err) { + dev_err(mlxsw_sx->bus_info->dev, "Failed to register for PUDE events\n"); + goto err_event_register; + } + + err = mlxsw_sx_traps_init(mlxsw_sx); + if (err) { + dev_err(mlxsw_sx->bus_info->dev, "Failed to set traps for RX\n"); + goto err_rx_listener_register; + } + + err = mlxsw_sx_flood_init(mlxsw_sx); + if (err) { + dev_err(mlxsw_sx->bus_info->dev, "Failed to initialize flood tables\n"); + goto err_flood_init; + } + + return 0; + +err_flood_init: + mlxsw_sx_traps_fini(mlxsw_sx); +err_rx_listener_register: + mlxsw_sx_event_unregister(mlxsw_sx, MLXSW_TRAP_ID_PUDE); +err_event_register: + mlxsw_sx_ports_remove(mlxsw_sx); + return err; +} + +static void mlxsw_sx_fini(void *priv) +{ + struct mlxsw_sx *mlxsw_sx = priv; + + mlxsw_sx_traps_fini(mlxsw_sx); + mlxsw_sx_event_unregister(mlxsw_sx, MLXSW_TRAP_ID_PUDE); + mlxsw_sx_ports_remove(mlxsw_sx); +} + +static struct mlxsw_config_profile mlxsw_sx_config_profile = { + .used_max_vepa_channels = 1, + .max_vepa_channels = 0, + .used_max_lag = 1, + .max_lag = 64, + .used_max_port_per_lag = 1, + .max_port_per_lag = 16, + .used_max_mid = 1, + .max_mid = 7000, + .used_max_pgt = 1, + .max_pgt = 0, + .used_max_system_port = 1, + .max_system_port = 48000, + .used_max_vlan_groups = 1, + .max_vlan_groups = 127, + .used_max_regions = 1, + .max_regions = 400, + .used_flood_tables = 1, + .max_flood_tables = 2, + .max_vid_flood_tables = 1, + .used_flood_mode = 1, + .flood_mode = 3, + .used_max_ib_mc = 1, + .max_ib_mc = 0, + .used_max_pkey = 1, + .max_pkey = 0, + .swid_config = { + { + .used_type = 1, + .type = MLXSW_PORT_SWID_TYPE_ETH, + } + }, +}; + +static struct mlxsw_driver mlxsw_sx_driver = { + .kind = MLXSW_DEVICE_KIND_SWITCHX2, + .owner = THIS_MODULE, + .priv_size = sizeof(struct mlxsw_sx), + .init = mlxsw_sx_init, + .fini = mlxsw_sx_fini, + .txhdr_construct = mlxsw_sx_txhdr_construct, + .txhdr_len = MLXSW_TXHDR_LEN, + .profile = &mlxsw_sx_config_profile, +}; + +static int __init mlxsw_sx_module_init(void) +{ + return mlxsw_core_driver_register(&mlxsw_sx_driver); +} + +static void __exit mlxsw_sx_module_exit(void) +{ + mlxsw_core_driver_unregister(&mlxsw_sx_driver); +} + +module_init(mlxsw_sx_module_init); +module_exit(mlxsw_sx_module_exit); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); +MODULE_DESCRIPTION("Mellanox SwitchX-2 driver"); +MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SWITCHX2); diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h new file mode 100644 index 000000000000..53a9550be75e --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h @@ -0,0 +1,66 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/trap.h + * Copyright (c) 2015 Mellanox Technologies. All rights reserved. + * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> + * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> + * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef _MLXSW_TRAP_H +#define _MLXSW_TRAP_H + +enum { + /* Ethernet EMAD and FDB miss */ + MLXSW_TRAP_ID_FDB_MC = 0x01, + MLXSW_TRAP_ID_ETHEMAD = 0x05, + /* L2 traps for specific packet types */ + MLXSW_TRAP_ID_STP = 0x10, + MLXSW_TRAP_ID_LACP = 0x11, + MLXSW_TRAP_ID_EAPOL = 0x12, + MLXSW_TRAP_ID_LLDP = 0x13, + MLXSW_TRAP_ID_MMRP = 0x14, + MLXSW_TRAP_ID_MVRP = 0x15, + MLXSW_TRAP_ID_RPVST = 0x16, + MLXSW_TRAP_ID_DHCP = 0x19, + MLXSW_TRAP_ID_IGMP_QUERY = 0x30, + MLXSW_TRAP_ID_IGMP_V1_REPORT = 0x31, + MLXSW_TRAP_ID_IGMP_V2_REPORT = 0x32, + MLXSW_TRAP_ID_IGMP_V2_LEAVE = 0x33, + MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34, + + MLXSW_TRAP_ID_MAX = 0x1FF +}; + +enum mlxsw_event_trap_id { + /* Port Up/Down event generated by hardware */ + MLXSW_TRAP_ID_PUDE = 0x8, +}; + +#endif /* _MLXSW_TRAP_H */ diff --git a/drivers/net/ethernet/mellanox/mlxsw/txheader.h b/drivers/net/ethernet/mellanox/mlxsw/txheader.h new file mode 100644 index 000000000000..06fc46c78a0b --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/txheader.h @@ -0,0 +1,80 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/txheader.h + * Copyright (c) 2015 Mellanox Technologies. All rights reserved. + * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> + * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MLXSW_TXHEADER_H +#define _MLXSW_TXHEADER_H + +#define MLXSW_TXHDR_LEN 0x10 +#define MLXSW_TXHDR_VERSION_0 0 + +enum { + MLXSW_TXHDR_ETH_CTL, + MLXSW_TXHDR_ETH_DATA, +}; + +#define MLXSW_TXHDR_PROTO_ETH 1 + +enum { + MLXSW_TXHDR_ETCLASS_0, + MLXSW_TXHDR_ETCLASS_1, + MLXSW_TXHDR_ETCLASS_2, + MLXSW_TXHDR_ETCLASS_3, + MLXSW_TXHDR_ETCLASS_4, + MLXSW_TXHDR_ETCLASS_5, + MLXSW_TXHDR_ETCLASS_6, + MLXSW_TXHDR_ETCLASS_7, +}; + +enum { + MLXSW_TXHDR_RDQ_OTHER, + MLXSW_TXHDR_RDQ_EMAD = 0x1f, +}; + +#define MLXSW_TXHDR_CTCLASS3 0 +#define MLXSW_TXHDR_CPU_SIG 0 +#define MLXSW_TXHDR_SIG 0xE0E0 +#define MLXSW_TXHDR_STCLASS_NONE 0 + +enum { + MLXSW_TXHDR_NOT_EMAD, + MLXSW_TXHDR_EMAD, +}; + +enum { + MLXSW_TXHDR_TYPE_DATA, + MLXSW_TXHDR_TYPE_CONTROL = 6, +}; + +#endif diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index c28111749e1f..2d1b94274079 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -8226,31 +8226,7 @@ static void s2io_rem_nic(struct pci_dev *pdev) pci_disable_device(pdev); } -/** - * s2io_starter - Entry point for the driver - * Description: This function is the entry point for the driver. It verifies - * the module loadable parameters and initializes PCI configuration space. - */ - -static int __init s2io_starter(void) -{ - return pci_register_driver(&s2io_driver); -} - -/** - * s2io_closer - Cleanup routine for the driver - * Description: This function is the cleanup routine for the driver. It - * unregisters the driver. - */ - -static __exit void s2io_closer(void) -{ - pci_unregister_driver(&s2io_driver); - DBG_PRINT(INIT_DBG, "cleanup done\n"); -} - -module_init(s2io_starter); -module_exit(s2io_closer); +module_pci_driver(s2io_driver); static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip, struct tcphdr **tcp, struct RxD_t *rxdp, diff --git a/drivers/net/ethernet/neterion/s2io.h b/drivers/net/ethernet/neterion/s2io.h index d89b6ed82c51..6c5997dc8afc 100644 --- a/drivers/net/ethernet/neterion/s2io.h +++ b/drivers/net/ethernet/neterion/s2io.h @@ -1085,8 +1085,6 @@ static void s2io_txpic_intr_handle(struct s2io_nic *sp); static void tx_intr_handler(struct fifo_info *fifo_data); static void s2io_handle_errors(void * dev_id); -static int s2io_starter(void); -static void s2io_closer(void); static void s2io_tx_watchdog(struct net_device *dev); static void s2io_set_multicast(struct net_device *dev); static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 2f6cc423ab1d..7dbab3c20db5 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -2403,7 +2403,6 @@ int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter, qlcnic_free_tx_rings(adapter); return -ENOMEM; } - memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring)); tx_ring->cmd_buf_arr = cmd_buf_arr; spin_lock_init(&tx_ring->tx_clean_lock); } diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h index 8aa50ac4e2d6..a157aaaaff6a 100644 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h @@ -658,6 +658,8 @@ struct ravb_desc { __le32 dptr; /* Descriptor pointer */ }; +#define DPTR_ALIGN 4 /* Required descriptor pointer alignment */ + enum DIE_DT { /* Frame data */ DT_FMID = 0x40, @@ -739,6 +741,7 @@ enum RAVB_QUEUE { #define RX_QUEUE_OFFSET 4 #define NUM_RX_QUEUE 2 #define NUM_TX_QUEUE 2 +#define NUM_TX_DESC 2 /* TX descriptors per packet */ struct ravb_tstamp_skb { struct list_head list; @@ -777,9 +780,9 @@ struct ravb_private { dma_addr_t tx_desc_dma[NUM_TX_QUEUE]; struct ravb_ex_rx_desc *rx_ring[NUM_RX_QUEUE]; struct ravb_tx_desc *tx_ring[NUM_TX_QUEUE]; + void *tx_align[NUM_TX_QUEUE]; struct sk_buff **rx_skb[NUM_RX_QUEUE]; struct sk_buff **tx_skb[NUM_TX_QUEUE]; - void **tx_buffers[NUM_TX_QUEUE]; u32 rx_over_errors; u32 rx_fifo_errors; struct net_device_stats stats[NUM_RX_QUEUE]; diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 78849dd4ef8e..3d972d819420 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -195,12 +195,8 @@ static void ravb_ring_free(struct net_device *ndev, int q) priv->tx_skb[q] = NULL; /* Free aligned TX buffers */ - if (priv->tx_buffers[q]) { - for (i = 0; i < priv->num_tx_ring[q]; i++) - kfree(priv->tx_buffers[q][i]); - } - kfree(priv->tx_buffers[q]); - priv->tx_buffers[q] = NULL; + kfree(priv->tx_align[q]); + priv->tx_align[q] = NULL; if (priv->rx_ring[q]) { ring_size = sizeof(struct ravb_ex_rx_desc) * @@ -212,7 +208,7 @@ static void ravb_ring_free(struct net_device *ndev, int q) if (priv->tx_ring[q]) { ring_size = sizeof(struct ravb_tx_desc) * - (priv->num_tx_ring[q] + 1); + (priv->num_tx_ring[q] * NUM_TX_DESC + 1); dma_free_coherent(NULL, ring_size, priv->tx_ring[q], priv->tx_desc_dma[q]); priv->tx_ring[q] = NULL; @@ -223,11 +219,12 @@ static void ravb_ring_free(struct net_device *ndev, int q) static void ravb_ring_format(struct net_device *ndev, int q) { struct ravb_private *priv = netdev_priv(ndev); - struct ravb_ex_rx_desc *rx_desc = NULL; - struct ravb_tx_desc *tx_desc = NULL; - struct ravb_desc *desc = NULL; + struct ravb_ex_rx_desc *rx_desc; + struct ravb_tx_desc *tx_desc; + struct ravb_desc *desc; int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q]; - int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q]; + int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] * + NUM_TX_DESC; dma_addr_t dma_addr; int i; @@ -260,11 +257,12 @@ static void ravb_ring_format(struct net_device *ndev, int q) memset(priv->tx_ring[q], 0, tx_ring_size); /* Build TX ring buffer */ - for (i = 0; i < priv->num_tx_ring[q]; i++) { - tx_desc = &priv->tx_ring[q][i]; + for (i = 0, tx_desc = priv->tx_ring[q]; i < priv->num_tx_ring[q]; + i++, tx_desc++) { + tx_desc->die_dt = DT_EEMPTY; + tx_desc++; tx_desc->die_dt = DT_EEMPTY; } - tx_desc = &priv->tx_ring[q][i]; tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]); tx_desc->die_dt = DT_LINKFIX; /* type */ @@ -285,7 +283,6 @@ static int ravb_ring_init(struct net_device *ndev, int q) struct ravb_private *priv = netdev_priv(ndev); struct sk_buff *skb; int ring_size; - void *buffer; int i; /* Allocate RX and TX skb rings */ @@ -305,19 +302,11 @@ static int ravb_ring_init(struct net_device *ndev, int q) } /* Allocate rings for the aligned buffers */ - priv->tx_buffers[q] = kcalloc(priv->num_tx_ring[q], - sizeof(*priv->tx_buffers[q]), GFP_KERNEL); - if (!priv->tx_buffers[q]) + priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] + + DPTR_ALIGN - 1, GFP_KERNEL); + if (!priv->tx_align[q]) goto error; - for (i = 0; i < priv->num_tx_ring[q]; i++) { - buffer = kmalloc(PKT_BUF_SZ + RAVB_ALIGN - 1, GFP_KERNEL); - if (!buffer) - goto error; - /* Aligned TX buffer */ - priv->tx_buffers[q][i] = buffer; - } - /* Allocate all RX descriptors. */ ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1); priv->rx_ring[q] = dma_alloc_coherent(NULL, ring_size, @@ -329,7 +318,8 @@ static int ravb_ring_init(struct net_device *ndev, int q) priv->dirty_rx[q] = 0; /* Allocate all TX descriptors. */ - ring_size = sizeof(struct ravb_tx_desc) * (priv->num_tx_ring[q] + 1); + ring_size = sizeof(struct ravb_tx_desc) * + (priv->num_tx_ring[q] * NUM_TX_DESC + 1); priv->tx_ring[q] = dma_alloc_coherent(NULL, ring_size, &priv->tx_desc_dma[q], GFP_KERNEL); @@ -439,11 +429,12 @@ static int ravb_tx_free(struct net_device *ndev, int q) struct net_device_stats *stats = &priv->stats[q]; struct ravb_tx_desc *desc; int free_num = 0; - int entry = 0; + int entry; u32 size; for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) { - entry = priv->dirty_tx[q] % priv->num_tx_ring[q]; + entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] * + NUM_TX_DESC); desc = &priv->tx_ring[q][entry]; if (desc->die_dt != DT_FEMPTY) break; @@ -451,14 +442,18 @@ static int ravb_tx_free(struct net_device *ndev, int q) dma_rmb(); size = le16_to_cpu(desc->ds_tagl) & TX_DS; /* Free the original skb. */ - if (priv->tx_skb[q][entry]) { + if (priv->tx_skb[q][entry / NUM_TX_DESC]) { dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr), size, DMA_TO_DEVICE); - dev_kfree_skb_any(priv->tx_skb[q][entry]); - priv->tx_skb[q][entry] = NULL; + /* Last packet descriptor? */ + if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) { + entry /= NUM_TX_DESC; + dev_kfree_skb_any(priv->tx_skb[q][entry]); + priv->tx_skb[q][entry] = NULL; + stats->tx_packets++; + } free_num++; } - stats->tx_packets++; stats->tx_bytes += size; desc->die_dt = DT_EEMPTY; } @@ -512,8 +507,8 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q) struct sk_buff *skb; dma_addr_t dma_addr; struct timespec64 ts; - u16 pkt_len = 0; u8 desc_status; + u16 pkt_len; int limit; boguscnt = min(boguscnt, *quota); @@ -1277,44 +1272,60 @@ static void ravb_tx_timeout_work(struct work_struct *work) static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); - struct ravb_tstamp_skb *ts_skb = NULL; u16 q = skb_get_queue_mapping(skb); + struct ravb_tstamp_skb *ts_skb; struct ravb_tx_desc *desc; unsigned long flags; u32 dma_addr; void *buffer; u32 entry; + u32 len; spin_lock_irqsave(&priv->lock, flags); - if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q]) { + if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) * + NUM_TX_DESC) { netif_err(priv, tx_queued, ndev, "still transmitting with the full ring!\n"); netif_stop_subqueue(ndev, q); spin_unlock_irqrestore(&priv->lock, flags); return NETDEV_TX_BUSY; } - entry = priv->cur_tx[q] % priv->num_tx_ring[q]; - priv->tx_skb[q][entry] = skb; + entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * NUM_TX_DESC); + priv->tx_skb[q][entry / NUM_TX_DESC] = skb; if (skb_put_padto(skb, ETH_ZLEN)) goto drop; - buffer = PTR_ALIGN(priv->tx_buffers[q][entry], RAVB_ALIGN); - memcpy(buffer, skb->data, skb->len); - desc = &priv->tx_ring[q][entry]; - desc->ds_tagl = cpu_to_le16(skb->len); - dma_addr = dma_map_single(&ndev->dev, buffer, skb->len, DMA_TO_DEVICE); + buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) + + entry / NUM_TX_DESC * DPTR_ALIGN; + len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data; + memcpy(buffer, skb->data, len); + dma_addr = dma_map_single(&ndev->dev, buffer, len, DMA_TO_DEVICE); if (dma_mapping_error(&ndev->dev, dma_addr)) goto drop; + + desc = &priv->tx_ring[q][entry]; + desc->ds_tagl = cpu_to_le16(len); + desc->dptr = cpu_to_le32(dma_addr); + + buffer = skb->data + len; + len = skb->len - len; + dma_addr = dma_map_single(&ndev->dev, buffer, len, DMA_TO_DEVICE); + if (dma_mapping_error(&ndev->dev, dma_addr)) + goto unmap; + + desc++; + desc->ds_tagl = cpu_to_le16(len); desc->dptr = cpu_to_le32(dma_addr); /* TX timestamp required */ if (q == RAVB_NC) { ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC); if (!ts_skb) { - dma_unmap_single(&ndev->dev, dma_addr, skb->len, + desc--; + dma_unmap_single(&ndev->dev, dma_addr, len, DMA_TO_DEVICE); - goto drop; + goto unmap; } ts_skb->skb = skb; ts_skb->tag = priv->ts_skb_tag++; @@ -1330,13 +1341,15 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) /* Descriptor type must be set after all the above writes */ dma_wmb(); - desc->die_dt = DT_FSINGLE; + desc->die_dt = DT_FEND; + desc--; + desc->die_dt = DT_FSTART; ravb_write(ndev, ravb_read(ndev, TCCR) | (TCCR_TSRQ0 << q), TCCR); - priv->cur_tx[q]++; - if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q] && - !ravb_tx_free(ndev, q)) + priv->cur_tx[q] += NUM_TX_DESC; + if (priv->cur_tx[q] - priv->dirty_tx[q] > + (priv->num_tx_ring[q] - 1) * NUM_TX_DESC && !ravb_tx_free(ndev, q)) netif_stop_subqueue(ndev, q); exit: @@ -1344,9 +1357,12 @@ exit: spin_unlock_irqrestore(&priv->lock, flags); return NETDEV_TX_OK; +unmap: + dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr), + le16_to_cpu(desc->ds_tagl), DMA_TO_DEVICE); drop: dev_kfree_skb_any(skb); - priv->tx_skb[q][entry] = NULL; + priv->tx_skb[q][entry / NUM_TX_DESC] = NULL; goto exit; } diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c index 2d8578cade03..7b4c3474acfe 100644 --- a/drivers/net/ethernet/rocker/rocker.c +++ b/drivers/net/ethernet/rocker/rocker.c @@ -202,6 +202,7 @@ enum { ROCKER_CTRL_IPV4_MCAST, ROCKER_CTRL_IPV6_MCAST, ROCKER_CTRL_DFLT_BRIDGING, + ROCKER_CTRL_DFLT_OVS, ROCKER_CTRL_MAX, }; @@ -321,9 +322,21 @@ static u16 rocker_port_vlan_to_vid(const struct rocker_port *rocker_port, return ntohs(vlan_id); } +static bool rocker_port_is_slave(const struct rocker_port *rocker_port, + const char *kind) +{ + return rocker_port->bridge_dev && + !strcmp(rocker_port->bridge_dev->rtnl_link_ops->kind, kind); +} + static bool rocker_port_is_bridged(const struct rocker_port *rocker_port) { - return !!rocker_port->bridge_dev; + return rocker_port_is_slave(rocker_port, "bridge"); +} + +static bool rocker_port_is_ovsed(const struct rocker_port *rocker_port) +{ + return rocker_port_is_slave(rocker_port, "openvswitch"); } #define ROCKER_OP_FLAG_REMOVE BIT(0) @@ -1818,6 +1831,30 @@ rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port *rocker_port, } static int +rocker_cmd_set_port_settings_mtu_prep(const struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + int mtu = *(int *)priv; + struct rocker_tlv *cmd_info; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, + ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, + rocker_port->pport)) + return -EMSGSIZE; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MTU, + mtu)) + return -EMSGSIZE; + rocker_tlv_nest_end(desc_info, cmd_info); + return 0; +} + +static int rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port, struct rocker_desc_info *desc_info, void *priv) @@ -1874,6 +1911,14 @@ static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port, macaddr, NULL, NULL); } +static int rocker_cmd_set_port_settings_mtu(struct rocker_port *rocker_port, + int mtu) +{ + return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0, + rocker_cmd_set_port_settings_mtu_prep, + &mtu, NULL, NULL); +} + static int rocker_port_set_learning(struct rocker_port *rocker_port, enum switchdev_trans trans) { @@ -3243,6 +3288,12 @@ static struct rocker_ctrl { .bridge = true, .copy_to_cpu = true, }, + [ROCKER_CTRL_DFLT_OVS] = { + /* pass all pkts up to CPU */ + .eth_dst = zero_mac, + .eth_dst_mask = zero_mac, + .acl = true, + }, }; static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port, @@ -3755,11 +3806,14 @@ static int rocker_port_stp_update(struct rocker_port *rocker_port, break; case BR_STATE_LEARNING: case BR_STATE_FORWARDING: - want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true; + if (!rocker_port_is_ovsed(rocker_port)) + want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true; want[ROCKER_CTRL_IPV4_MCAST] = true; want[ROCKER_CTRL_IPV6_MCAST] = true; if (rocker_port_is_bridged(rocker_port)) want[ROCKER_CTRL_DFLT_BRIDGING] = true; + else if (rocker_port_is_ovsed(rocker_port)) + want[ROCKER_CTRL_DFLT_OVS] = true; else want[ROCKER_CTRL_LOCAL_ARP] = true; break; @@ -3983,7 +4037,8 @@ static int rocker_port_open(struct net_device *dev) napi_enable(&rocker_port->napi_tx); napi_enable(&rocker_port->napi_rx); - rocker_port_set_enable(rocker_port, true); + if (!dev->proto_down) + rocker_port_set_enable(rocker_port, true); netif_start_queue(dev); return 0; @@ -4152,6 +4207,34 @@ static int rocker_port_set_mac_address(struct net_device *dev, void *p) return 0; } +static int rocker_port_change_mtu(struct net_device *dev, int new_mtu) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + int running = netif_running(dev); + int err; + +#define ROCKER_PORT_MIN_MTU 68 +#define ROCKER_PORT_MAX_MTU 9000 + + if (new_mtu < ROCKER_PORT_MIN_MTU || new_mtu > ROCKER_PORT_MAX_MTU) + return -EINVAL; + + if (running) + rocker_port_stop(dev); + + netdev_info(dev, "MTU change from %d to %d\n", dev->mtu, new_mtu); + dev->mtu = new_mtu; + + err = rocker_cmd_set_port_settings_mtu(rocker_port, new_mtu); + if (err) + return err; + + if (running) + err = rocker_port_open(dev); + + return err; +} + static int rocker_port_get_phys_port_name(struct net_device *dev, char *buf, size_t len) { @@ -4167,11 +4250,23 @@ static int rocker_port_get_phys_port_name(struct net_device *dev, return err ? -EOPNOTSUPP : 0; } +static int rocker_port_change_proto_down(struct net_device *dev, + bool proto_down) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + + if (rocker_port->dev->flags & IFF_UP) + rocker_port_set_enable(rocker_port, !proto_down); + rocker_port->dev->proto_down = proto_down; + return 0; +} + static const struct net_device_ops rocker_port_netdev_ops = { .ndo_open = rocker_port_open, .ndo_stop = rocker_port_stop, .ndo_start_xmit = rocker_port_xmit, .ndo_set_mac_address = rocker_port_set_mac_address, + .ndo_change_mtu = rocker_port_change_mtu, .ndo_bridge_getlink = switchdev_port_bridge_getlink, .ndo_bridge_setlink = switchdev_port_bridge_setlink, .ndo_bridge_dellink = switchdev_port_bridge_dellink, @@ -4179,6 +4274,7 @@ static const struct net_device_ops rocker_port_netdev_ops = { .ndo_fdb_del = switchdev_port_fdb_del, .ndo_fdb_dump = switchdev_port_fdb_dump, .ndo_get_phys_port_name = rocker_port_get_phys_port_name, + .ndo_change_proto_down = rocker_port_change_proto_down, }; /******************** @@ -4726,6 +4822,7 @@ static int rocker_port_rx_proc(const struct rocker *rocker, const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1]; struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info); size_t rx_len; + u16 rx_flags = 0; if (!skb) return -ENOENT; @@ -4733,6 +4830,8 @@ static int rocker_port_rx_proc(const struct rocker *rocker, rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info); if (!attrs[ROCKER_TLV_RX_FRAG_LEN]) return -EINVAL; + if (attrs[ROCKER_TLV_RX_FLAGS]) + rx_flags = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FLAGS]); rocker_dma_rx_ring_skb_unmap(rocker, attrs); @@ -4740,6 +4839,9 @@ static int rocker_port_rx_proc(const struct rocker *rocker, skb_put(skb, rx_len); skb->protocol = eth_type_trans(skb, rocker_port->dev); + if (rx_flags & ROCKER_RX_FLAGS_FWD_OFFLOAD) + skb->offload_fwd_mark = rocker_port->dev->offload_fwd_mark; + rocker_port->dev->stats.rx_packets++; rocker_port->dev->stats.rx_bytes += skb->len; @@ -4877,6 +4979,8 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number) } rocker->ports[port_number] = rocker_port; + switchdev_port_fwd_mark_set(rocker_port->dev, NULL, false); + rocker_port_set_learning(rocker_port, SWITCHDEV_TRANS_NONE); err = rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE, 0); @@ -5156,6 +5260,7 @@ static int rocker_port_bridge_join(struct rocker_port *rocker_port, rocker_port_internal_vlan_id_get(rocker_port, bridge->ifindex); rocker_port->bridge_dev = bridge; + switchdev_port_fwd_mark_set(rocker_port->dev, bridge, true); return rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE, untagged_vid, 0); @@ -5176,6 +5281,8 @@ static int rocker_port_bridge_leave(struct rocker_port *rocker_port) rocker_port_internal_vlan_id_get(rocker_port, rocker_port->dev->ifindex); + switchdev_port_fwd_mark_set(rocker_port->dev, rocker_port->bridge_dev, + false); rocker_port->bridge_dev = NULL; err = rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE, @@ -5190,23 +5297,39 @@ static int rocker_port_bridge_leave(struct rocker_port *rocker_port) return err; } + +static int rocker_port_ovs_changed(struct rocker_port *rocker_port, + struct net_device *master) +{ + int err; + + rocker_port->bridge_dev = master; + + err = rocker_port_fwd_disable(rocker_port, SWITCHDEV_TRANS_NONE, 0); + if (err) + return err; + err = rocker_port_fwd_enable(rocker_port, SWITCHDEV_TRANS_NONE, 0); + + return err; +} + static int rocker_port_master_changed(struct net_device *dev) { struct rocker_port *rocker_port = netdev_priv(dev); struct net_device *master = netdev_master_upper_dev_get(dev); int err = 0; - /* There are currently three cases handled here: - * 1. Joining a bridge - * 2. Leaving a previously joined bridge - * 3. Other, e.g. being added to or removed from a bond or openvswitch, - * in which case nothing is done - */ - if (master && master->rtnl_link_ops && - !strcmp(master->rtnl_link_ops->kind, "bridge")) - err = rocker_port_bridge_join(rocker_port, master); - else if (rocker_port_is_bridged(rocker_port)) + /* N.B: Do nothing if the type of master is not supported */ + if (master && master->rtnl_link_ops) { + if (!strcmp(master->rtnl_link_ops->kind, "bridge")) + err = rocker_port_bridge_join(rocker_port, master); + else if (!strcmp(master->rtnl_link_ops->kind, "openvswitch")) + err = rocker_port_ovs_changed(rocker_port, master); + } else if (rocker_port_is_bridged(rocker_port)) { err = rocker_port_bridge_leave(rocker_port); + } else if (rocker_port_is_ovsed(rocker_port)) { + err = rocker_port_ovs_changed(rocker_port, NULL); + } return err; } diff --git a/drivers/net/ethernet/rocker/rocker.h b/drivers/net/ethernet/rocker/rocker.h index c61fbf968036..12490b2f6504 100644 --- a/drivers/net/ethernet/rocker/rocker.h +++ b/drivers/net/ethernet/rocker/rocker.h @@ -159,6 +159,7 @@ enum { ROCKER_TLV_CMD_PORT_SETTINGS_MODE, /* u8 */ ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING, /* u8 */ ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME, /* binary */ + ROCKER_TLV_CMD_PORT_SETTINGS_MTU, /* u16 */ __ROCKER_TLV_CMD_PORT_SETTINGS_MAX, ROCKER_TLV_CMD_PORT_SETTINGS_MAX = @@ -245,6 +246,7 @@ enum { #define ROCKER_RX_FLAGS_TCP BIT(5) #define ROCKER_RX_FLAGS_UDP BIT(6) #define ROCKER_RX_FLAGS_TCP_UDP_CSUM_GOOD BIT(7) +#define ROCKER_RX_FLAGS_FWD_OFFLOAD BIT(8) enum { ROCKER_TLV_TX_UNSPEC, diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 605cc8948594..06b8061f1b42 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -49,6 +49,12 @@ enum { */ #define HUNT_FILTER_TBL_ROWS 8192 +#define EFX_EF10_FILTER_ID_INVALID 0xffff +struct efx_ef10_dev_addr { + u8 addr[ETH_ALEN]; + u16 id; +}; + struct efx_ef10_filter_table { /* The RX match field masks supported by this fw & hw, in order of priority */ enum efx_filter_match_flags rx_match_flags[ @@ -69,13 +75,14 @@ struct efx_ef10_filter_table { /* Shadow of net_device address lists, guarded by mac_lock */ #define EFX_EF10_FILTER_DEV_UC_MAX 32 #define EFX_EF10_FILTER_DEV_MC_MAX 256 - struct { - u8 addr[ETH_ALEN]; - u16 id; - } dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX], - dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX]; - int dev_uc_count; /* negative for PROMISC */ - int dev_mc_count; /* negative for PROMISC/ALLMULTI */ + struct efx_ef10_dev_addr dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX]; + struct efx_ef10_dev_addr dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX]; + int dev_uc_count; + int dev_mc_count; +/* Indices (like efx_ef10_dev_addr.id) for promisc/allmulti filters */ + u16 ucdef_id; + u16 bcast_id; + u16 mcdef_id; }; /* An arbitrary search limit for the software hash table */ @@ -387,7 +394,7 @@ static int efx_ef10_probe(struct efx_nic *efx) * First try to enable it, then if we get EPERM, just * ask if it's already enabled */ - rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true); + rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true, NULL); if (rc == 0) { nic_data->workaround_35388 = true; } else if (rc == -EPERM) { @@ -984,12 +991,24 @@ static int efx_ef10_init_nic(struct efx_nic *efx) static void efx_ef10_reset_mc_allocations(struct efx_nic *efx) { struct efx_ef10_nic_data *nic_data = efx->nic_data; +#ifdef CONFIG_SFC_SRIOV + unsigned int i; +#endif /* All our allocations have been reset */ nic_data->must_realloc_vis = true; nic_data->must_restore_filters = true; nic_data->must_restore_piobufs = true; nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; + + /* Driver-created vswitches and vports must be re-created */ + nic_data->must_probe_vswitching = true; + nic_data->vport_id = EVB_PORT_ID_ASSIGNED; +#ifdef CONFIG_SFC_SRIOV + if (nic_data->vf) + for (i = 0; i < efx->vf_count; i++) + nic_data->vf[i].vport_id = 0; +#endif } static enum reset_type efx_ef10_map_reset_reason(enum reset_type reason) @@ -1034,6 +1053,12 @@ static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type) { int rc = efx_mcdi_reset(efx, reset_type); + /* Unprivileged functions return -EPERM, but need to return success + * here so that the datapath is brought back up. + */ + if (reset_type == RESET_TYPE_WORLD && rc == -EPERM) + rc = 0; + /* If it was a port reset, trigger reallocation of MC resources. * Note that on an MC reset nothing needs to be done now because we'll * detect the MC reset later and handle it then. @@ -1558,10 +1583,6 @@ static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx) /* All our allocations have been reset */ efx_ef10_reset_mc_allocations(efx); - /* Driver-created vswitches and vports must be re-created */ - nic_data->must_probe_vswitching = true; - nic_data->vport_id = EVB_PORT_ID_ASSIGNED; - /* The datapath firmware might have been changed */ nic_data->must_check_datapath_caps = true; @@ -2197,6 +2218,29 @@ static int efx_ef10_ev_probe(struct efx_channel *channel) GFP_KERNEL); } +static void efx_ef10_ev_fini(struct efx_channel *channel) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN); + MCDI_DECLARE_BUF_ERR(outbuf); + struct efx_nic *efx = channel->efx; + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel); + + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + + if (rc && rc != -EALREADY) + goto fail; + + return; + +fail: + efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN, + outbuf, outlen, rc); +} + static int efx_ef10_ev_init(struct efx_channel *channel) { MCDI_DECLARE_BUF(inbuf, @@ -2208,6 +2252,7 @@ static int efx_ef10_ev_init(struct efx_channel *channel) struct efx_ef10_nic_data *nic_data; bool supports_rx_merge; size_t inlen, outlen; + unsigned int enabled, implemented; dma_addr_t dma_addr; int rc; int i; @@ -2248,30 +2293,52 @@ static int efx_ef10_ev_init(struct efx_channel *channel) rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen, outbuf, sizeof(outbuf), &outlen); /* IRQ return is ignored */ - return rc; -} - -static void efx_ef10_ev_fini(struct efx_channel *channel) -{ - MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN); - MCDI_DECLARE_BUF_ERR(outbuf); - struct efx_nic *efx = channel->efx; - size_t outlen; - int rc; - - MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel); - - rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf), - outbuf, sizeof(outbuf), &outlen); + if (channel->channel || rc) + return rc; - if (rc && rc != -EALREADY) + /* Successfully created event queue on channel 0 */ + rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled); + if (rc == -ENOSYS) { + /* GET_WORKAROUNDS was implemented before the bug26807 + * workaround, thus the latter must be unavailable in this fw + */ + nic_data->workaround_26807 = false; + rc = 0; + } else if (rc) { goto fail; + } else { + nic_data->workaround_26807 = + !!(enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807); + + if (implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 && + !nic_data->workaround_26807) { + unsigned int flags; + + rc = efx_mcdi_set_workaround(efx, + MC_CMD_WORKAROUND_BUG26807, + true, &flags); + + if (!rc) { + if (flags & + 1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN) { + netif_info(efx, drv, efx->net_dev, + "other functions on NIC have been reset\n"); + /* MC's boot count has incremented */ + ++nic_data->warm_boot_count; + } + nic_data->workaround_26807 = true; + } else if (rc == -EPERM) { + rc = 0; + } + } + } - return; + if (!rc) + return 0; fail: - efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN, - outbuf, outlen, rc); + efx_ef10_ev_fini(channel); + return rc; } static void efx_ef10_ev_remove(struct efx_channel *channel) @@ -3225,6 +3292,19 @@ static int efx_ef10_filter_remove_safe(struct efx_nic *efx, filter_id, false); } +static u32 efx_ef10_filter_get_unsafe_id(struct efx_nic *efx, u32 filter_id) +{ + return filter_id % HUNT_FILTER_TBL_ROWS; +} + +static int efx_ef10_filter_remove_unsafe(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 filter_id) +{ + return efx_ef10_filter_remove_internal(efx, 1U << priority, + filter_id, true); +} + static int efx_ef10_filter_get_safe(struct efx_nic *efx, enum efx_filter_priority priority, u32 filter_id, struct efx_filter_spec *spec) @@ -3598,6 +3678,10 @@ static int efx_ef10_filter_table_probe(struct efx_nic *efx) goto fail; } + table->ucdef_id = EFX_EF10_FILTER_ID_INVALID; + table->bcast_id = EFX_EF10_FILTER_ID_INVALID; + table->mcdef_id = EFX_EF10_FILTER_ID_INVALID; + efx->filter_state = table; init_waitqueue_head(&table->waitq); return 0; @@ -3700,145 +3784,233 @@ static void efx_ef10_filter_table_remove(struct efx_nic *efx) kfree(table); } -/* Caller must hold efx->filter_sem for read if race against - * efx_ef10_filter_table_remove() is possible - */ -static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx) +#define EFX_EF10_FILTER_DO_MARK_OLD(id) \ + if (id != EFX_EF10_FILTER_ID_INVALID) { \ + filter_idx = efx_ef10_filter_get_unsafe_id(efx, id); \ + WARN_ON(!table->entry[filter_idx].spec); \ + table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD; \ + } +static void efx_ef10_filter_mark_old(struct efx_nic *efx) { struct efx_ef10_filter_table *table = efx->filter_state; - struct net_device *net_dev = efx->net_dev; - struct efx_filter_spec spec; - bool remove_failed = false; - struct netdev_hw_addr *uc; - struct netdev_hw_addr *mc; - unsigned int filter_idx; - int i, n, rc; - - if (!efx_dev_registered(efx)) - return; + unsigned int filter_idx, i; if (!table) return; /* Mark old filters that may need to be removed */ spin_lock_bh(&efx->filter_lock); - n = table->dev_uc_count < 0 ? 1 : table->dev_uc_count; - for (i = 0; i < n; i++) { - filter_idx = table->dev_uc_list[i].id % HUNT_FILTER_TBL_ROWS; - table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD; - } - n = table->dev_mc_count < 0 ? 1 : table->dev_mc_count; - for (i = 0; i < n; i++) { - filter_idx = table->dev_mc_list[i].id % HUNT_FILTER_TBL_ROWS; - table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD; - } + for (i = 0; i < table->dev_uc_count; i++) + EFX_EF10_FILTER_DO_MARK_OLD(table->dev_uc_list[i].id); + for (i = 0; i < table->dev_mc_count; i++) + EFX_EF10_FILTER_DO_MARK_OLD(table->dev_mc_list[i].id); + EFX_EF10_FILTER_DO_MARK_OLD(table->ucdef_id); + EFX_EF10_FILTER_DO_MARK_OLD(table->bcast_id); + EFX_EF10_FILTER_DO_MARK_OLD(table->mcdef_id); spin_unlock_bh(&efx->filter_lock); +} +#undef EFX_EF10_FILTER_DO_MARK_OLD - /* Copy/convert the address lists; add the primary station - * address and broadcast address - */ - netif_addr_lock_bh(net_dev); - if (net_dev->flags & IFF_PROMISC || - netdev_uc_count(net_dev) >= EFX_EF10_FILTER_DEV_UC_MAX) { - table->dev_uc_count = -1; - } else { - table->dev_uc_count = 1 + netdev_uc_count(net_dev); - ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr); - i = 1; - netdev_for_each_uc_addr(uc, net_dev) { - ether_addr_copy(table->dev_uc_list[i].addr, uc->addr); - i++; +static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx, bool *promisc) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + struct net_device *net_dev = efx->net_dev; + struct netdev_hw_addr *uc; + int addr_count; + unsigned int i; + + table->ucdef_id = EFX_EF10_FILTER_ID_INVALID; + addr_count = netdev_uc_count(net_dev); + if (net_dev->flags & IFF_PROMISC) + *promisc = true; + table->dev_uc_count = 1 + addr_count; + ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr); + i = 1; + netdev_for_each_uc_addr(uc, net_dev) { + if (i >= EFX_EF10_FILTER_DEV_UC_MAX) { + *promisc = true; + break; } + ether_addr_copy(table->dev_uc_list[i].addr, uc->addr); + table->dev_uc_list[i].id = EFX_EF10_FILTER_ID_INVALID; + i++; } - if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI) || - netdev_mc_count(net_dev) >= EFX_EF10_FILTER_DEV_MC_MAX) { - table->dev_mc_count = -1; - } else { - table->dev_mc_count = 1 + netdev_mc_count(net_dev); - eth_broadcast_addr(table->dev_mc_list[0].addr); - i = 1; - netdev_for_each_mc_addr(mc, net_dev) { - ether_addr_copy(table->dev_mc_list[i].addr, mc->addr); - i++; +} + +static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx, bool *promisc) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + struct net_device *net_dev = efx->net_dev; + struct netdev_hw_addr *mc; + unsigned int i, addr_count; + + table->mcdef_id = EFX_EF10_FILTER_ID_INVALID; + table->bcast_id = EFX_EF10_FILTER_ID_INVALID; + if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) + *promisc = true; + + addr_count = netdev_mc_count(net_dev); + i = 0; + netdev_for_each_mc_addr(mc, net_dev) { + if (i >= EFX_EF10_FILTER_DEV_MC_MAX) { + *promisc = true; + break; } + ether_addr_copy(table->dev_mc_list[i].addr, mc->addr); + table->dev_mc_list[i].id = EFX_EF10_FILTER_ID_INVALID; + i++; } - netif_addr_unlock_bh(net_dev); - /* Insert/renew unicast filters */ - if (table->dev_uc_count >= 0) { - for (i = 0; i < table->dev_uc_count; i++) { - efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, - EFX_FILTER_FLAG_RX_RSS, - 0); - efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, - table->dev_uc_list[i].addr); - rc = efx_ef10_filter_insert(efx, &spec, true); - if (rc < 0) { - /* Fall back to unicast-promisc */ - while (i--) - efx_ef10_filter_remove_safe( + table->dev_mc_count = i; +} + +static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, + bool multicast, bool rollback) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + struct efx_ef10_dev_addr *addr_list; + struct efx_filter_spec spec; + u8 baddr[ETH_ALEN]; + unsigned int i, j; + int addr_count; + int rc; + + if (multicast) { + addr_list = table->dev_mc_list; + addr_count = table->dev_mc_count; + } else { + addr_list = table->dev_uc_list; + addr_count = table->dev_uc_count; + } + + /* Insert/renew filters */ + for (i = 0; i < addr_count; i++) { + efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, + EFX_FILTER_FLAG_RX_RSS, + 0); + efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, + addr_list[i].addr); + rc = efx_ef10_filter_insert(efx, &spec, true); + if (rc < 0) { + if (rollback) { + netif_info(efx, drv, efx->net_dev, + "efx_ef10_filter_insert failed rc=%d\n", + rc); + /* Fall back to promiscuous */ + for (j = 0; j < i; j++) { + if (addr_list[j].id == EFX_EF10_FILTER_ID_INVALID) + continue; + efx_ef10_filter_remove_unsafe( efx, EFX_FILTER_PRI_AUTO, - table->dev_uc_list[i].id); - table->dev_uc_count = -1; - break; + addr_list[j].id); + addr_list[j].id = EFX_EF10_FILTER_ID_INVALID; + } + return rc; + } else { + /* mark as not inserted, and carry on */ + rc = EFX_EF10_FILTER_ID_INVALID; } - table->dev_uc_list[i].id = rc; } + addr_list[i].id = efx_ef10_filter_get_unsafe_id(efx, rc); } - if (table->dev_uc_count < 0) { + + if (multicast && rollback) { + /* Also need an Ethernet broadcast filter */ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, EFX_FILTER_FLAG_RX_RSS, 0); - efx_filter_set_uc_def(&spec); + eth_broadcast_addr(baddr); + efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, baddr); rc = efx_ef10_filter_insert(efx, &spec, true); if (rc < 0) { - WARN_ON(1); - table->dev_uc_count = 0; + netif_warn(efx, drv, efx->net_dev, + "Broadcast filter insert failed rc=%d\n", rc); + /* Fall back to promiscuous */ + for (j = 0; j < i; j++) { + if (addr_list[j].id == EFX_EF10_FILTER_ID_INVALID) + continue; + efx_ef10_filter_remove_unsafe( + efx, EFX_FILTER_PRI_AUTO, + addr_list[j].id); + addr_list[j].id = EFX_EF10_FILTER_ID_INVALID; + } + return rc; } else { - table->dev_uc_list[0].id = rc; + table->bcast_id = efx_ef10_filter_get_unsafe_id(efx, rc); } } - /* Insert/renew multicast filters */ - if (table->dev_mc_count >= 0) { - for (i = 0; i < table->dev_mc_count; i++) { + return 0; +} + +static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast, + bool rollback) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + struct efx_ef10_nic_data *nic_data = efx->nic_data; + struct efx_filter_spec spec; + u8 baddr[ETH_ALEN]; + int rc; + + efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, + EFX_FILTER_FLAG_RX_RSS, + 0); + + if (multicast) + efx_filter_set_mc_def(&spec); + else + efx_filter_set_uc_def(&spec); + + rc = efx_ef10_filter_insert(efx, &spec, true); + if (rc < 0) { + netif_warn(efx, drv, efx->net_dev, + "%scast mismatch filter insert failed rc=%d\n", + multicast ? "Multi" : "Uni", rc); + } else if (multicast) { + table->mcdef_id = efx_ef10_filter_get_unsafe_id(efx, rc); + if (!nic_data->workaround_26807) { + /* Also need an Ethernet broadcast filter */ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, EFX_FILTER_FLAG_RX_RSS, 0); + eth_broadcast_addr(baddr); efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, - table->dev_mc_list[i].addr); + baddr); rc = efx_ef10_filter_insert(efx, &spec, true); if (rc < 0) { - /* Fall back to multicast-promisc */ - while (i--) - efx_ef10_filter_remove_safe( - efx, EFX_FILTER_PRI_AUTO, - table->dev_mc_list[i].id); - table->dev_mc_count = -1; - break; + netif_warn(efx, drv, efx->net_dev, + "Broadcast filter insert failed rc=%d\n", + rc); + if (rollback) { + /* Roll back the mc_def filter */ + efx_ef10_filter_remove_unsafe( + efx, EFX_FILTER_PRI_AUTO, + table->mcdef_id); + table->mcdef_id = EFX_EF10_FILTER_ID_INVALID; + return rc; + } + } else { + table->bcast_id = efx_ef10_filter_get_unsafe_id(efx, rc); } - table->dev_mc_list[i].id = rc; - } - } - if (table->dev_mc_count < 0) { - efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, - EFX_FILTER_FLAG_RX_RSS, - 0); - efx_filter_set_mc_def(&spec); - rc = efx_ef10_filter_insert(efx, &spec, true); - if (rc < 0) { - WARN_ON(1); - table->dev_mc_count = 0; - } else { - table->dev_mc_list[0].id = rc; } + rc = 0; + } else { + table->ucdef_id = rc; + rc = 0; } + return rc; +} + +/* Remove filters that weren't renewed. Since nothing else changes the AUTO_OLD + * flag or removes these filters, we don't need to hold the filter_lock while + * scanning for these filters. + */ +static void efx_ef10_filter_remove_old(struct efx_nic *efx) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + bool remove_failed = false; + int i; - /* Remove filters that weren't renewed. Since nothing else - * changes the AUTO_OLD flag or removes these filters, we - * don't need to hold the filter_lock while scanning for - * these filters. - */ for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) { if (ACCESS_ONCE(table->entry[i].spec) & EFX_EF10_FILTER_FLAG_AUTO_OLD) { @@ -3917,6 +4089,87 @@ reset_nic: return rc ? rc : rc2; } +/* Caller must hold efx->filter_sem for read if race against + * efx_ef10_filter_table_remove() is possible + */ +static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + struct efx_ef10_nic_data *nic_data = efx->nic_data; + struct net_device *net_dev = efx->net_dev; + bool uc_promisc = false, mc_promisc = false; + + if (!efx_dev_registered(efx)) + return; + + if (!table) + return; + + efx_ef10_filter_mark_old(efx); + + /* Copy/convert the address lists; add the primary station + * address and broadcast address + */ + netif_addr_lock_bh(net_dev); + efx_ef10_filter_uc_addr_list(efx, &uc_promisc); + efx_ef10_filter_mc_addr_list(efx, &mc_promisc); + netif_addr_unlock_bh(net_dev); + + /* Insert/renew unicast filters */ + if (uc_promisc) { + efx_ef10_filter_insert_def(efx, false, false); + efx_ef10_filter_insert_addr_list(efx, false, false); + } else { + /* If any of the filters failed to insert, fall back to + * promiscuous mode - add in the uc_def filter. But keep + * our individual unicast filters. + */ + if (efx_ef10_filter_insert_addr_list(efx, false, false)) + efx_ef10_filter_insert_def(efx, false, false); + } + + /* Insert/renew multicast filters */ + /* If changing promiscuous state with cascaded multicast filters, remove + * old filters first, so that packets are dropped rather than duplicated + */ + if (nic_data->workaround_26807 && efx->mc_promisc != mc_promisc) + efx_ef10_filter_remove_old(efx); + if (mc_promisc) { + if (nic_data->workaround_26807) { + /* If we failed to insert promiscuous filters, rollback + * and fall back to individual multicast filters + */ + if (efx_ef10_filter_insert_def(efx, true, true)) { + /* Changing promisc state, so remove old filters */ + efx_ef10_filter_remove_old(efx); + efx_ef10_filter_insert_addr_list(efx, true, false); + } + } else { + /* If we failed to insert promiscuous filters, don't + * rollback. Regardless, also insert the mc_list + */ + efx_ef10_filter_insert_def(efx, true, false); + efx_ef10_filter_insert_addr_list(efx, true, false); + } + } else { + /* If any filters failed to insert, rollback and fall back to + * promiscuous mode - mc_def filter and maybe broadcast. If + * that fails, roll back again and insert as many of our + * individual multicast filters as we can. + */ + if (efx_ef10_filter_insert_addr_list(efx, true, true)) { + /* Changing promisc state, so remove old filters */ + if (nic_data->workaround_26807) + efx_ef10_filter_remove_old(efx); + if (efx_ef10_filter_insert_def(efx, true, true)) + efx_ef10_filter_insert_addr_list(efx, true, false); + } + } + + efx_ef10_filter_remove_old(efx); + efx->mc_promisc = mc_promisc; +} + static int efx_ef10_set_mac_address(struct efx_nic *efx) { MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN); @@ -4085,6 +4338,8 @@ efx_ef10_test_chip(struct efx_nic *efx, struct efx_self_tests *tests) rc = efx_mcdi_reset(efx, RESET_TYPE_WORLD); out: + if (rc == -EPERM) + rc = 0; rc2 = efx_reset_up(efx, RESET_TYPE_WORLD, rc == 0); return rc ? rc : rc2; } diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index 81640f8bb811..98d172b04f71 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -1779,15 +1779,31 @@ int efx_mcdi_wol_filter_reset(struct efx_nic *efx) return rc; } -int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled) +int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled, + unsigned int *flags) { MCDI_DECLARE_BUF(inbuf, MC_CMD_WORKAROUND_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_WORKAROUND_EXT_OUT_LEN); + size_t outlen; + int rc; BUILD_BUG_ON(MC_CMD_WORKAROUND_OUT_LEN != 0); MCDI_SET_DWORD(inbuf, WORKAROUND_IN_TYPE, type); MCDI_SET_DWORD(inbuf, WORKAROUND_IN_ENABLED, enabled); - return efx_mcdi_rpc(efx, MC_CMD_WORKAROUND, inbuf, sizeof(inbuf), - NULL, 0, NULL); + rc = efx_mcdi_rpc(efx, MC_CMD_WORKAROUND, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + + if (!flags) + return 0; + + if (outlen >= MC_CMD_WORKAROUND_EXT_OUT_LEN) + *flags = MCDI_DWORD(outbuf, WORKAROUND_EXT_OUT_FLAGS); + else + *flags = 0; + + return 0; } int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out, @@ -1816,7 +1832,11 @@ int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out, return 0; fail: - netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); + /* Older firmware lacks GET_WORKAROUNDS and this isn't especially + * terrifying. The call site will have to deal with it though. + */ + netif_printk(efx, hw, rc == -ENOSYS ? KERN_DEBUG : KERN_ERR, + efx->net_dev, "%s: failed rc=%d\n", __func__, rc); return rc; } diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h index 1838afe2da92..025d504c472b 100644 --- a/drivers/net/ethernet/sfc/mcdi.h +++ b/drivers/net/ethernet/sfc/mcdi.h @@ -346,7 +346,8 @@ void efx_mcdi_mac_pull_stats(struct efx_nic *efx); bool efx_mcdi_mac_check_fault(struct efx_nic *efx); enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason); int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method); -int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled); +int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled, + unsigned int *flags); int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out, unsigned int *enabled_out); diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h index 45fca9fc66b7..4cc772164a79 100644 --- a/drivers/net/ethernet/sfc/mcdi_pcol.h +++ b/drivers/net/ethernet/sfc/mcdi_pcol.h @@ -26,6 +26,10 @@ * Unlike a warm boot, assume DMEM has been reloaded, so that * the MC persistent data must be reinitialised. */ #define MC_FW_TEPID_BOOT_OK (16) +/* We have entered the main firmware via recovery mode. This + * means that MC persistent data must be reinitialised, but that + * we shouldn't touch PCIe config. */ +#define MC_FW_RECOVERY_MODE_PCIE_INIT_OK (32) /* BIST state has been initialized */ #define MC_FW_BIST_INIT_OK (128) @@ -169,6 +173,8 @@ #define MC_CMD_ERR_EINTR 4 /* I/O failure */ #define MC_CMD_ERR_EIO 5 +/* Already exists */ +#define MC_CMD_ERR_EEXIST 6 /* Try again */ #define MC_CMD_ERR_EAGAIN 11 /* Out of memory */ @@ -181,6 +187,10 @@ #define MC_CMD_ERR_ENODEV 19 /* Invalid argument to target */ #define MC_CMD_ERR_EINVAL 22 +/* Broken pipe */ +#define MC_CMD_ERR_EPIPE 32 +/* Read-only */ +#define MC_CMD_ERR_EROFS 30 /* Out of range */ #define MC_CMD_ERR_ERANGE 34 /* Non-recursive resource is already acquired */ @@ -226,6 +236,43 @@ #define MC_CMD_ERR_SLAVE_NOT_PRESENT 0x100a /* The datapath is disabled. */ #define MC_CMD_ERR_DATAPATH_DISABLED 0x100b +/* The requesting client is not a function */ +#define MC_CMD_ERR_CLIENT_NOT_FN 0x100c +/* The requested operation might require the + command to be passed between MCs, and the + transport doesn't support that. Should + only ever been seen over the UART. */ +#define MC_CMD_ERR_TRANSPORT_NOPROXY 0x100d +/* VLAN tag(s) exists */ +#define MC_CMD_ERR_VLAN_EXIST 0x100e +/* No MAC address assigned to an EVB port */ +#define MC_CMD_ERR_NO_MAC_ADDR 0x100f +/* Notifies the driver that the request has been relayed + * to an admin function for authorization. The driver should + * wait for a PROXY_RESPONSE event and then resend its request. + * This error code is followed by a 32-bit handle that + * helps matching it with the respective PROXY_RESPONSE event. */ +#define MC_CMD_ERR_PROXY_PENDING 0x1010 +#define MC_CMD_ERR_PROXY_PENDING_HANDLE_OFST 4 +/* The request cannot be passed for authorization because + * another request from the same function is currently being + * authorized. The drvier should try again later. */ +#define MC_CMD_ERR_PROXY_INPROGRESS 0x1011 +/* Returned by MC_CMD_PROXY_COMPLETE if the caller is not the function + * that has enabled proxying or BLOCK_INDEX points to a function that + * doesn't await an authorization. */ +#define MC_CMD_ERR_PROXY_UNEXPECTED 0x1012 +/* This code is currently only used internally in FW. Its meaning is that + * an operation failed due to lack of SR-IOV privilege. + * Normally it is translated to EPERM by send_cmd_err(), + * but it may also be used to trigger some special mechanism + * for handling such case, e.g. to relay the failed request + * to a designated admin function for authorization. */ +#define MC_CMD_ERR_NO_PRIVILEGE 0x1013 +/* Workaround 26807 could not be turned on/off because some functions + * have already installed filters. See the comment at + * MC_CMD_WORKAROUND_BUG26807. */ +#define MC_CMD_ERR_FILTERS_PRESENT 0x1014 #define MC_CMD_ERR_CODE_OFST 0 @@ -275,6 +322,11 @@ MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST + \ (n) * MC_CMD_DBIWROP_TYPEDEF_LEN) +/* This may be ORed with an EVB_PORT_ID_xxx constant to pass a non-default + * stack ID (which must be in the range 1-255) along with an EVB port ID. + */ +#define EVB_STACK_ID(n) (((n) & 0xff) << 16) + /* Version 2 adds an optional argument to error returns: the errno value * may be followed by the (0-based) number of the first argument that @@ -394,6 +446,8 @@ #define MCDI_EVENT_AOE_BYTEBLASTER 0x9 /* enum: DDR ECC status update */ #define MCDI_EVENT_AOE_DDR_ECC_STATUS 0xa +/* enum: PTP status update */ +#define MCDI_EVENT_AOE_PTP_STATUS 0xb #define MCDI_EVENT_AOE_ERR_DATA_LBN 8 #define MCDI_EVENT_AOE_ERR_DATA_WIDTH 8 #define MCDI_EVENT_RX_ERR_RXQ_LBN 0 @@ -408,6 +462,16 @@ #define MCDI_EVENT_RX_FLUSH_RXQ_WIDTH 12 #define MCDI_EVENT_MC_REBOOT_COUNT_LBN 0 #define MCDI_EVENT_MC_REBOOT_COUNT_WIDTH 16 +#define MCDI_EVENT_MUM_ERR_TYPE_LBN 0 +#define MCDI_EVENT_MUM_ERR_TYPE_WIDTH 8 +/* enum: MUM failed to load - no valid image? */ +#define MCDI_EVENT_MUM_NO_LOAD 0x1 +/* enum: MUM f/w reported an exception */ +#define MCDI_EVENT_MUM_ASSERT 0x2 +/* enum: MUM not kicking watchdog */ +#define MCDI_EVENT_MUM_WATCHDOG 0x3 +#define MCDI_EVENT_MUM_ERR_DATA_LBN 8 +#define MCDI_EVENT_MUM_ERR_DATA_WIDTH 8 #define MCDI_EVENT_DATA_LBN 0 #define MCDI_EVENT_DATA_WIDTH 32 #define MCDI_EVENT_SRC_LBN 36 @@ -416,6 +480,8 @@ #define MCDI_EVENT_EV_CODE_WIDTH 4 #define MCDI_EVENT_CODE_LBN 44 #define MCDI_EVENT_CODE_WIDTH 8 +/* enum: Event generated by host software */ +#define MCDI_EVENT_SW_EVENT 0x0 /* enum: Bad assert. */ #define MCDI_EVENT_CODE_BADSSERT 0x1 /* enum: PM Notice. */ @@ -470,6 +536,14 @@ #define MCDI_EVENT_CODE_MC_BIST 0x19 /* enum: PTP tick event providing current NIC time */ #define MCDI_EVENT_CODE_PTP_TIME 0x1a +/* enum: MUM fault */ +#define MCDI_EVENT_CODE_MUM 0x1b +/* enum: notify the designated PF of a new authorization request */ +#define MCDI_EVENT_CODE_PROXY_REQUEST 0x1c +/* enum: notify a function that awaits an authorization that its request has + * been processed and it may now resend the command + */ +#define MCDI_EVENT_CODE_PROXY_RESPONSE 0x1d /* enum: Artificial event generated by host and posted via MC for test * purposes. */ @@ -537,6 +611,33 @@ /* For CODE_PTP_TIME events, bits 19-26 of the minor value of the PTP clock */ #define MCDI_EVENT_PTP_TIME_MINOR_26_19_LBN 36 #define MCDI_EVENT_PTP_TIME_MINOR_26_19_WIDTH 8 +/* For CODE_PTP_TIME events where report sync status is enabled, indicates + * whether the NIC clock has ever been set + */ +#define MCDI_EVENT_PTP_TIME_NIC_CLOCK_VALID_LBN 36 +#define MCDI_EVENT_PTP_TIME_NIC_CLOCK_VALID_WIDTH 1 +/* For CODE_PTP_TIME events where report sync status is enabled, indicates + * whether the NIC and System clocks are in sync + */ +#define MCDI_EVENT_PTP_TIME_HOST_NIC_IN_SYNC_LBN 37 +#define MCDI_EVENT_PTP_TIME_HOST_NIC_IN_SYNC_WIDTH 1 +/* For CODE_PTP_TIME events where report sync status is enabled, bits 21-26 of + * the minor value of the PTP clock + */ +#define MCDI_EVENT_PTP_TIME_MINOR_26_21_LBN 38 +#define MCDI_EVENT_PTP_TIME_MINOR_26_21_WIDTH 6 +#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_OFST 0 +#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_LBN 0 +#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_WIDTH 32 +#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_OFST 0 +#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_LBN 0 +#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_WIDTH 32 +/* Zero means that the request has been completed or authorized, and the driver + * should resend it. A non-zero value means that the authorization has been + * denied, and gives the reason. Typically it will be EPERM. + */ +#define MCDI_EVENT_PROXY_RESPONSE_RC_LBN 36 +#define MCDI_EVENT_PROXY_RESPONSE_RC_WIDTH 8 /* FCDI_EVENT structuredef */ #define FCDI_EVENT_LEN 8 @@ -581,6 +682,10 @@ #define FCDI_EVENT_CODE_PTP_TICK 0x7 /* enum: ECC error counters */ #define FCDI_EVENT_CODE_DDR_ECC_STATUS 0x8 +/* enum: Current status of PTP */ +#define FCDI_EVENT_CODE_PTP_STATUS 0x9 +/* enum: Port id config to map MC-FC port idx */ +#define FCDI_EVENT_CODE_PORT_CONFIG 0xa #define FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0 #define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0 #define FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32 @@ -594,11 +699,24 @@ #define FCDI_EVENT_LINK_STATE_DATA_OFST 0 #define FCDI_EVENT_LINK_STATE_DATA_LBN 0 #define FCDI_EVENT_LINK_STATE_DATA_WIDTH 32 +#define FCDI_EVENT_PTP_STATE_OFST 0 +#define FCDI_EVENT_PTP_UNDEFINED 0x0 /* enum */ +#define FCDI_EVENT_PTP_SETUP_FAILED 0x1 /* enum */ +#define FCDI_EVENT_PTP_OPERATIONAL 0x2 /* enum */ +#define FCDI_EVENT_PTP_STATE_LBN 0 +#define FCDI_EVENT_PTP_STATE_WIDTH 32 #define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_LBN 36 #define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_WIDTH 8 #define FCDI_EVENT_DDR_ECC_STATUS_STATUS_OFST 0 #define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LBN 0 #define FCDI_EVENT_DDR_ECC_STATUS_STATUS_WIDTH 32 +/* Index of MC port being referred to */ +#define FCDI_EVENT_PORT_CONFIG_SRC_LBN 36 +#define FCDI_EVENT_PORT_CONFIG_SRC_WIDTH 8 +/* FC Port index that matches the MC port index in SRC */ +#define FCDI_EVENT_PORT_CONFIG_DATA_OFST 0 +#define FCDI_EVENT_PORT_CONFIG_DATA_LBN 0 +#define FCDI_EVENT_PORT_CONFIG_DATA_WIDTH 32 /* FCDI_EXTENDED_EVENT_PPS structuredef: Extended FCDI event to send PPS events * to the MC. Note that this structure | is overlayed over a normal FCDI event @@ -631,6 +749,90 @@ #define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LBN 64 #define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_WIDTH 64 +/* MUM_EVENT structuredef */ +#define MUM_EVENT_LEN 8 +#define MUM_EVENT_CONT_LBN 32 +#define MUM_EVENT_CONT_WIDTH 1 +#define MUM_EVENT_LEVEL_LBN 33 +#define MUM_EVENT_LEVEL_WIDTH 3 +/* enum: Info. */ +#define MUM_EVENT_LEVEL_INFO 0x0 +/* enum: Warning. */ +#define MUM_EVENT_LEVEL_WARN 0x1 +/* enum: Error. */ +#define MUM_EVENT_LEVEL_ERR 0x2 +/* enum: Fatal. */ +#define MUM_EVENT_LEVEL_FATAL 0x3 +#define MUM_EVENT_DATA_OFST 0 +#define MUM_EVENT_SENSOR_ID_LBN 0 +#define MUM_EVENT_SENSOR_ID_WIDTH 8 +/* Enum values, see field(s): */ +/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */ +#define MUM_EVENT_SENSOR_STATE_LBN 8 +#define MUM_EVENT_SENSOR_STATE_WIDTH 8 +#define MUM_EVENT_PORT_PHY_READY_LBN 0 +#define MUM_EVENT_PORT_PHY_READY_WIDTH 1 +#define MUM_EVENT_PORT_PHY_LINK_UP_LBN 1 +#define MUM_EVENT_PORT_PHY_LINK_UP_WIDTH 1 +#define MUM_EVENT_PORT_PHY_TX_LOL_LBN 2 +#define MUM_EVENT_PORT_PHY_TX_LOL_WIDTH 1 +#define MUM_EVENT_PORT_PHY_RX_LOL_LBN 3 +#define MUM_EVENT_PORT_PHY_RX_LOL_WIDTH 1 +#define MUM_EVENT_PORT_PHY_TX_LOS_LBN 4 +#define MUM_EVENT_PORT_PHY_TX_LOS_WIDTH 1 +#define MUM_EVENT_PORT_PHY_RX_LOS_LBN 5 +#define MUM_EVENT_PORT_PHY_RX_LOS_WIDTH 1 +#define MUM_EVENT_PORT_PHY_TX_FAULT_LBN 6 +#define MUM_EVENT_PORT_PHY_TX_FAULT_WIDTH 1 +#define MUM_EVENT_DATA_LBN 0 +#define MUM_EVENT_DATA_WIDTH 32 +#define MUM_EVENT_SRC_LBN 36 +#define MUM_EVENT_SRC_WIDTH 8 +#define MUM_EVENT_EV_CODE_LBN 60 +#define MUM_EVENT_EV_CODE_WIDTH 4 +#define MUM_EVENT_CODE_LBN 44 +#define MUM_EVENT_CODE_WIDTH 8 +/* enum: The MUM was rebooted. */ +#define MUM_EVENT_CODE_REBOOT 0x1 +/* enum: Bad assert. */ +#define MUM_EVENT_CODE_ASSERT 0x2 +/* enum: Sensor failure. */ +#define MUM_EVENT_CODE_SENSOR 0x3 +/* enum: Link fault has been asserted, or has cleared. */ +#define MUM_EVENT_CODE_QSFP_LASI_INTERRUPT 0x4 +#define MUM_EVENT_SENSOR_DATA_OFST 0 +#define MUM_EVENT_SENSOR_DATA_LBN 0 +#define MUM_EVENT_SENSOR_DATA_WIDTH 32 +#define MUM_EVENT_PORT_PHY_FLAGS_OFST 0 +#define MUM_EVENT_PORT_PHY_FLAGS_LBN 0 +#define MUM_EVENT_PORT_PHY_FLAGS_WIDTH 32 +#define MUM_EVENT_PORT_PHY_COPPER_LEN_OFST 0 +#define MUM_EVENT_PORT_PHY_COPPER_LEN_LBN 0 +#define MUM_EVENT_PORT_PHY_COPPER_LEN_WIDTH 32 +#define MUM_EVENT_PORT_PHY_CAPS_OFST 0 +#define MUM_EVENT_PORT_PHY_CAPS_LBN 0 +#define MUM_EVENT_PORT_PHY_CAPS_WIDTH 32 +#define MUM_EVENT_PORT_PHY_TECH_OFST 0 +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_UNKNOWN 0x0 /* enum */ +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_OPTICAL 0x1 /* enum */ +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE 0x2 /* enum */ +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE_EQUALIZED 0x3 /* enum */ +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LIMITING 0x4 /* enum */ +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LINEAR 0x5 /* enum */ +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_BASE_T 0x6 /* enum */ +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_LOOPBACK_PASSIVE 0x7 /* enum */ +#define MUM_EVENT_PORT_PHY_TECH_LBN 0 +#define MUM_EVENT_PORT_PHY_TECH_WIDTH 32 +#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_LBN 36 +#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_WIDTH 4 +#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_FLAGS 0x0 /* enum */ +#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_COPPER_LEN 0x1 /* enum */ +#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_CAPS 0x2 /* enum */ +#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_TECH 0x3 /* enum */ +#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_MAX 0x4 /* enum */ +#define MUM_EVENT_PORT_PHY_SRC_PORT_NO_LBN 40 +#define MUM_EVENT_PORT_PHY_SRC_PORT_NO_WIDTH 4 + /***********************************/ /* MC_CMD_READ32 @@ -687,24 +889,34 @@ /* MC_CMD_COPYCODE_IN msgrequest */ #define MC_CMD_COPYCODE_IN_LEN 16 -/* Source address */ -#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0 -/* enum: The main image should be entered via a copy of a single word from and - * to this address when none of the other magic behaviours are required. +/* Source address + * + * The main image should be entered via a copy of a single word from and to a + * magic address, which controls various aspects of the boot. The magic address + * is a bitfield, with each bit as documented below. */ +#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0 +/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT (see below) */ #define MC_CMD_COPYCODE_HUNT_NO_MAGIC_ADDR 0x10000 -/* enum: Entering the main image via a copy of a single word from and to this - * address indicates that it should not attempt to start the datapath CPUs. - * This is useful for certain soft rebooting scenarios. (Huntington only) +/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT and + * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED (see below) */ #define MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR 0x1d0d0 -/* enum: Entering the main image via a copy of a single word from and to this - * address indicates that it should not attempt to parse any configuration from - * flash. (In addition, the datapath CPUs will not be started, as for - * MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR above.) This is useful for - * certain soft rebooting scenarios. (Huntington only) +/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT, + * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED and BOOT_MAGIC_IGNORE_CONFIG (see + * below) */ #define MC_CMD_COPYCODE_HUNT_IGNORE_CONFIG_MAGIC_ADDR 0x1badc +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_LBN 17 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_WIDTH 1 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_LBN 2 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_WIDTH 1 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_LBN 3 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_WIDTH 1 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_LBN 4 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_WIDTH 1 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_LBN 5 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_WIDTH 1 /* Destination address */ #define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4 #define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8 @@ -795,6 +1007,10 @@ #define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8 #define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_LEN 4 #define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM 31 +/* enum: A magic value hinting that the value in this register at the time of + * the failure has likely been lost. + */ +#define MC_CMD_GET_ASSERTS_REG_NO_DATA 0xda7a1057 /* Failing thread address */ #define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132 #define MC_CMD_GET_ASSERTS_OUT_RESERVED_OFST 136 @@ -802,7 +1018,8 @@ /***********************************/ /* MC_CMD_LOG_CTRL - * Configure the output stream for various events and messages. + * Configure the output stream for log events such as link state changes, + * sensor notifications and MCDI completions */ #define MC_CMD_LOG_CTRL 0x7 @@ -816,6 +1033,7 @@ #define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1 /* enum: Event queue. */ #define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ 0x2 +/* Legacy argument. Must be zero. */ #define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4 /* MC_CMD_LOG_CTRL_OUT msgresponse */ @@ -955,8 +1173,12 @@ * input on the same NIC. */ #define MC_CMD_PTP_OP_MANFTEST_PPS 0x1a +/* enum: Set the PTP sync status. Status is used by firmware to report to event + * subscribers. + */ +#define MC_CMD_PTP_OP_SET_SYNC_STATUS 0x1b /* enum: Above this for future use. */ -#define MC_CMD_PTP_OP_MAX 0x1b +#define MC_CMD_PTP_OP_MAX 0x1c /* MC_CMD_PTP_IN_ENABLE msgrequest */ #define MC_CMD_PTP_IN_ENABLE_LEN 16 @@ -1191,8 +1413,12 @@ #define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN 12 /* MC_CMD_PTP_IN_CMD_OFST 0 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ -/* Event queue to send PTP time events to */ +/* Original field containing queue ID. Now extended to include flags. */ #define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_OFST 8 +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_LBN 0 +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_WIDTH 16 +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_LBN 31 +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_WIDTH 1 /* MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE msgrequest */ #define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN 16 @@ -1214,6 +1440,23 @@ /* 1 to enable PPS test mode, 0 to disable and return result. */ #define MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_OFST 8 +/* MC_CMD_PTP_IN_SET_SYNC_STATUS msgrequest */ +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_LEN 24 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* NIC - Host System Clock Synchronization status */ +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_STATUS_OFST 8 +/* enum: Host System clock and NIC clock are not in sync */ +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_NOT_IN_SYNC 0x0 +/* enum: Host System clock and NIC clock are synchronized */ +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_IN_SYNC 0x1 +/* If synchronized, number of seconds until clocks should be considered to be + * no longer in sync. + */ +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_TIMEOUT_OFST 12 +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED0_OFST 16 +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_OFST 20 + /* MC_CMD_PTP_OUT msgresponse */ #define MC_CMD_PTP_OUT_LEN 0 @@ -1375,7 +1618,7 @@ #define MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_27FRACTION 0x2 /* MC_CMD_PTP_OUT_GET_ATTRIBUTES msgresponse */ -#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN 8 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN 24 /* Time format required/used by for this NIC. Applies to all PTP MCDI * operations that pass times between the host and firmware. If this operation * is not supported (older firmware) a format of seconds and nanoseconds should @@ -1396,6 +1639,13 @@ * end and start times minus the time that the MC waited for host end. */ #define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_OFST 4 +/* Various PTP capabilities */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_OFST 8 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_LBN 0 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_WIDTH 1 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_OFST 12 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED1_OFST 16 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_OFST 20 /* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS msgresponse */ #define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN 16 @@ -1415,6 +1665,9 @@ /* Enum values, see field(s): */ /* MC_CMD_PTP_OUT_MANFTEST_BASIC/TEST_RESULT */ +/* MC_CMD_PTP_OUT_SET_SYNC_STATUS msgresponse */ +#define MC_CMD_PTP_OUT_SET_SYNC_STATUS_LEN 0 + /***********************************/ /* MC_CMD_CSR_READ32 @@ -1915,6 +2168,14 @@ #define MC_CMD_FW_FULL_FEATURED 0x0 /* enum: Prefer to use firmware with fewer features but lower latency */ #define MC_CMD_FW_LOW_LATENCY 0x1 +/* enum: Prefer to use firmware for SolarCapture packed stream mode */ +#define MC_CMD_FW_PACKED_STREAM 0x2 +/* enum: Prefer to use firmware with fewer features and simpler TX event + * batching but higher TX packet rate + */ +#define MC_CMD_FW_HIGH_TX_RATE 0x3 +/* enum: Reserved value */ +#define MC_CMD_FW_PACKED_STREAM_HASH_MODE_1 0x4 /* enum: Only this option is allowed for non-admin functions */ #define MC_CMD_FW_DONT_CARE 0xffffffff @@ -2481,6 +2742,12 @@ #define MC_CMD_LOOPBACK_SD_FES_WS 0x22 /* enum: Near side of AOE Siena side port */ #define MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23 +/* enum: Medford Wireside datapath loopback */ +#define MC_CMD_LOOPBACK_DATA_WS 0x24 +/* enum: Force link up without setting up any physical loopback (snapper use + * only) + */ +#define MC_CMD_LOOPBACK_FORCE_EXT_LINK 0x25 /* Supported loopbacks. */ #define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_OFST 8 #define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LEN 8 @@ -2552,12 +2819,8 @@ #define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_WIDTH 1 /* This returns the negotiated flow control value. */ #define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20 -/* enum: Flow control is off. */ -#define MC_CMD_FCNTL_OFF 0x0 -/* enum: Respond to flow control. */ -#define MC_CMD_FCNTL_RESPOND 0x1 -/* enum: Respond to and Issue flow control. */ -#define MC_CMD_FCNTL_BIDIR 0x2 +/* Enum values, see field(s): */ +/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */ #define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24 #define MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0 #define MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1 @@ -2632,7 +2895,7 @@ #define MC_CMD_0x2c_PRIVILEGE_CTG SRIOV_CTG_LINK /* MC_CMD_SET_MAC_IN msgrequest */ -#define MC_CMD_SET_MAC_IN_LEN 24 +#define MC_CMD_SET_MAC_IN_LEN 28 /* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of * EtherII, VLAN, bug16011 padding). */ @@ -2649,13 +2912,20 @@ #define MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1 #define MC_CMD_SET_MAC_IN_FCNTL_OFST 20 /* enum: Flow control is off. */ -/* MC_CMD_FCNTL_OFF 0x0 */ +#define MC_CMD_FCNTL_OFF 0x0 /* enum: Respond to flow control. */ -/* MC_CMD_FCNTL_RESPOND 0x1 */ +#define MC_CMD_FCNTL_RESPOND 0x1 /* enum: Respond to and Issue flow control. */ -/* MC_CMD_FCNTL_BIDIR 0x2 */ +#define MC_CMD_FCNTL_BIDIR 0x2 /* enum: Auto neg flow control. */ #define MC_CMD_FCNTL_AUTO 0x3 +/* enum: Priority flow control (eftest builds only). */ +#define MC_CMD_FCNTL_QBB 0x4 +/* enum: Issue flow control. */ +#define MC_CMD_FCNTL_GENERATE 0x5 +#define MC_CMD_SET_MAC_IN_FLAGS_OFST 24 +#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_LBN 0 +#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_WIDTH 1 /* MC_CMD_SET_MAC_OUT msgresponse */ #define MC_CMD_SET_MAC_OUT_LEN 0 @@ -2748,7 +3018,8 @@ * guarantee consistent results. If the DMA_ADDR is 0, then no DMA is * performed, and the statistics may be read from the message response. If * DMA_ADDR != 0, then the statistics are dmad to that (page-aligned location). - * Locks required: None. Returns: 0, ETIME + * Locks required: None. The PERIODIC_CLEAR option is not used and now has no + * effect. Returns: 0, ETIME */ #define MC_CMD_MAC_STATS 0x2e @@ -2791,6 +3062,7 @@ #define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_OFST 4 #define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS #define MC_CMD_MAC_GENERATION_START 0x0 /* enum */ +#define MC_CMD_MAC_DMABUF_START 0x1 /* enum */ #define MC_CMD_MAC_TX_PKTS 0x1 /* enum */ #define MC_CMD_MAC_TX_PAUSE_PKTS 0x2 /* enum */ #define MC_CMD_MAC_TX_CONTROL_PKTS 0x3 /* enum */ @@ -2890,8 +3162,8 @@ * PM_AND_RXDP_COUNTERS capability only. */ #define MC_CMD_MAC_RXDP_STREAMING_PKTS 0x46 -/* enum: RXDP counter: Number of times an emergency descriptor fetch was - * performed. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only. +/* enum: RXDP counter: Number of times an hlb descriptor fetch was performed. + * Valid for EF10 with PM_AND_RXDP_COUNTERS capability only. */ #define MC_CMD_MAC_RXDP_HLB_FETCH_CONDITIONS 0x47 /* enum: RXDP counter: Number of times the DPCPU waited for an existing @@ -3213,6 +3485,8 @@ #define MC_CMD_NVRAM_TYPE_LICENSE 0x12 /* enum: FC Log. */ #define MC_CMD_NVRAM_TYPE_FC_LOG 0x13 +/* enum: Additional flash on FPGA. */ +#define MC_CMD_NVRAM_TYPE_FC_EXTRA 0x14 /***********************************/ @@ -3407,6 +3681,8 @@ */ #define MC_CMD_SCHEDINFO 0x3e +#define MC_CMD_0x3e_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_SCHEDINFO_IN msgrequest */ #define MC_CMD_SCHEDINFO_IN_LEN 0 @@ -3593,6 +3869,68 @@ #define MC_CMD_SENSOR_VDD08D_VSS08D_CSR_EXTADC 0x2c /* enum: Hotpoint temperature: degC */ #define MC_CMD_SENSOR_HOTPOINT_TEMP 0x2d +/* enum: Port 0 PHY power switch over-current: bool */ +#define MC_CMD_SENSOR_PHY_POWER_PORT0 0x2e +/* enum: Port 1 PHY power switch over-current: bool */ +#define MC_CMD_SENSOR_PHY_POWER_PORT1 0x2f +/* enum: Mop-up microcontroller reference voltage (millivolts) */ +#define MC_CMD_SENSOR_MUM_VCC 0x30 +/* enum: 0.9v power phase A voltage: mV */ +#define MC_CMD_SENSOR_IN_0V9_A 0x31 +/* enum: 0.9v power phase A current: mA */ +#define MC_CMD_SENSOR_IN_I0V9_A 0x32 +/* enum: 0.9V voltage regulator phase A temperature: degC */ +#define MC_CMD_SENSOR_VREG_0V9_A_TEMP 0x33 +/* enum: 0.9v power phase B voltage: mV */ +#define MC_CMD_SENSOR_IN_0V9_B 0x34 +/* enum: 0.9v power phase B current: mA */ +#define MC_CMD_SENSOR_IN_I0V9_B 0x35 +/* enum: 0.9V voltage regulator phase B temperature: degC */ +#define MC_CMD_SENSOR_VREG_0V9_B_TEMP 0x36 +/* enum: CCOM AVREG 1v2 supply (interval ADC): mV */ +#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY 0x37 +/* enum: CCOM AVREG 1v2 supply (external ADC): mV */ +#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY_EXTADC 0x38 +/* enum: CCOM AVREG 1v8 supply (interval ADC): mV */ +#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY 0x39 +/* enum: CCOM AVREG 1v8 supply (external ADC): mV */ +#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY_EXTADC 0x3a +/* enum: Not a sensor: reserved for the next page flag */ +#define MC_CMD_SENSOR_PAGE1_NEXT 0x3f +/* enum: controller internal temperature sensor voltage on master core + * (internal ADC): mV + */ +#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT 0x40 +/* enum: controller internal temperature on master core (internal ADC): degC */ +#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP 0x41 +/* enum: controller internal temperature sensor voltage on master core + * (external ADC): mV + */ +#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT_EXTADC 0x42 +/* enum: controller internal temperature on master core (external ADC): degC */ +#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP_EXTADC 0x43 +/* enum: controller internal temperature on slave core sensor voltage (internal + * ADC): mV + */ +#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT 0x44 +/* enum: controller internal temperature on slave core (internal ADC): degC */ +#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP 0x45 +/* enum: controller internal temperature on slave core sensor voltage (external + * ADC): mV + */ +#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT_EXTADC 0x46 +/* enum: controller internal temperature on slave core (external ADC): degC */ +#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP_EXTADC 0x47 +/* enum: Voltage supplied to the SODIMMs from their power supply: mV */ +#define MC_CMD_SENSOR_SODIMM_VOUT 0x49 +/* enum: Temperature of SODIMM 0 (if installed): degC */ +#define MC_CMD_SENSOR_SODIMM_0_TEMP 0x4a +/* enum: Temperature of SODIMM 1 (if installed): degC */ +#define MC_CMD_SENSOR_SODIMM_1_TEMP 0x4b +/* enum: Voltage supplied to the QSFP #0 from their power supply: mV */ +#define MC_CMD_SENSOR_PHY0_VCC 0x4c +/* enum: Voltage supplied to the QSFP #1 from their power supply: mV */ +#define MC_CMD_SENSOR_PHY1_VCC 0x4d /* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */ #define MC_CMD_SENSOR_ENTRY_OFST 4 #define MC_CMD_SENSOR_ENTRY_LEN 8 @@ -3701,6 +4039,8 @@ #define MC_CMD_SENSOR_STATE_BROKEN 0x3 /* enum: Sensor is working but does not currently have a reading. */ #define MC_CMD_SENSOR_STATE_NO_READING 0x4 +/* enum: Sensor initialisation failed. */ +#define MC_CMD_SENSOR_STATE_INIT_FAILED 0x5 #define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LBN 16 #define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_WIDTH 8 #define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_OFST 3 @@ -3870,6 +4210,7 @@ /* MC_CMD_WORKAROUND_IN msgrequest */ #define MC_CMD_WORKAROUND_IN_LEN 8 +/* The enums here must correspond with those in MC_CMD_GET_WORKAROUND. */ #define MC_CMD_WORKAROUND_IN_TYPE_OFST 0 /* enum: Bug 17230 work around. */ #define MC_CMD_WORKAROUND_BUG17230 0x1 @@ -3877,11 +4218,38 @@ #define MC_CMD_WORKAROUND_BUG35388 0x2 /* enum: Bug35017 workaround (A64 tables must be identity map) */ #define MC_CMD_WORKAROUND_BUG35017 0x3 +/* enum: Bug 41750 present (MC_CMD_TRIGGER_INTERRUPT won't work) */ +#define MC_CMD_WORKAROUND_BUG41750 0x4 +/* enum: Bug 42008 present (Interrupts can overtake associated events). Caution + * - before adding code that queries this workaround, remember that there's + * released Monza firmware that doesn't understand MC_CMD_WORKAROUND_BUG42008, + * and will hence (incorrectly) report that the bug doesn't exist. + */ +#define MC_CMD_WORKAROUND_BUG42008 0x5 +/* enum: Bug 26807 features present in firmware (multicast filter chaining) + * This feature cannot be turned on/off while there are any filters already + * present. The behaviour in such case depends on the acting client's privilege + * level. If the client has the admin privilege, then all functions that have + * filters installed will be FLRed and the FLR_DONE flag will be set. Otherwise + * the command will fail with MC_CMD_ERR_FILTERS_PRESENT. + */ +#define MC_CMD_WORKAROUND_BUG26807 0x6 +/* 0 = disable the workaround indicated by TYPE; any non-zero value = enable + * the workaround + */ #define MC_CMD_WORKAROUND_IN_ENABLED_OFST 4 /* MC_CMD_WORKAROUND_OUT msgresponse */ #define MC_CMD_WORKAROUND_OUT_LEN 0 +/* MC_CMD_WORKAROUND_EXT_OUT msgresponse: This response format will be used + * when (TYPE == MC_CMD_WORKAROUND_BUG26807) + */ +#define MC_CMD_WORKAROUND_EXT_OUT_LEN 4 +#define MC_CMD_WORKAROUND_EXT_OUT_FLAGS_OFST 0 +#define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN 0 +#define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_WIDTH 1 + /***********************************/ /* MC_CMD_GET_PHY_MEDIA_INFO @@ -4093,7 +4461,7 @@ /***********************************/ /* MC_CMD_GET_MAC_ADDRESSES - * Returns the base MAC, count and stride for the requestiong function + * Returns the base MAC, count and stride for the requesting function */ #define MC_CMD_GET_MAC_ADDRESSES 0x55 @@ -4115,6 +4483,527 @@ /* Spacing of allocated MAC addresses */ #define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_OFST 12 + +/***********************************/ +/* MC_CMD_CLP + * Perform a CLP related operation + */ +#define MC_CMD_CLP 0x56 + +#define MC_CMD_0x56_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_CLP_IN msgrequest */ +#define MC_CMD_CLP_IN_LEN 4 +/* Sub operation */ +#define MC_CMD_CLP_IN_OP_OFST 0 +/* enum: Return to factory default settings */ +#define MC_CMD_CLP_OP_DEFAULT 0x1 +/* enum: Set MAC address */ +#define MC_CMD_CLP_OP_SET_MAC 0x2 +/* enum: Get MAC address */ +#define MC_CMD_CLP_OP_GET_MAC 0x3 +/* enum: Set UEFI/GPXE boot mode */ +#define MC_CMD_CLP_OP_SET_BOOT 0x4 +/* enum: Get UEFI/GPXE boot mode */ +#define MC_CMD_CLP_OP_GET_BOOT 0x5 + +/* MC_CMD_CLP_OUT msgresponse */ +#define MC_CMD_CLP_OUT_LEN 0 + +/* MC_CMD_CLP_IN_DEFAULT msgrequest */ +#define MC_CMD_CLP_IN_DEFAULT_LEN 4 +/* MC_CMD_CLP_IN_OP_OFST 0 */ + +/* MC_CMD_CLP_OUT_DEFAULT msgresponse */ +#define MC_CMD_CLP_OUT_DEFAULT_LEN 0 + +/* MC_CMD_CLP_IN_SET_MAC msgrequest */ +#define MC_CMD_CLP_IN_SET_MAC_LEN 12 +/* MC_CMD_CLP_IN_OP_OFST 0 */ +/* MAC address assigned to port */ +#define MC_CMD_CLP_IN_SET_MAC_ADDR_OFST 4 +#define MC_CMD_CLP_IN_SET_MAC_ADDR_LEN 6 +/* Padding */ +#define MC_CMD_CLP_IN_SET_MAC_RESERVED_OFST 10 +#define MC_CMD_CLP_IN_SET_MAC_RESERVED_LEN 2 + +/* MC_CMD_CLP_OUT_SET_MAC msgresponse */ +#define MC_CMD_CLP_OUT_SET_MAC_LEN 0 + +/* MC_CMD_CLP_IN_GET_MAC msgrequest */ +#define MC_CMD_CLP_IN_GET_MAC_LEN 4 +/* MC_CMD_CLP_IN_OP_OFST 0 */ + +/* MC_CMD_CLP_OUT_GET_MAC msgresponse */ +#define MC_CMD_CLP_OUT_GET_MAC_LEN 8 +/* MAC address assigned to port */ +#define MC_CMD_CLP_OUT_GET_MAC_ADDR_OFST 0 +#define MC_CMD_CLP_OUT_GET_MAC_ADDR_LEN 6 +/* Padding */ +#define MC_CMD_CLP_OUT_GET_MAC_RESERVED_OFST 6 +#define MC_CMD_CLP_OUT_GET_MAC_RESERVED_LEN 2 + +/* MC_CMD_CLP_IN_SET_BOOT msgrequest */ +#define MC_CMD_CLP_IN_SET_BOOT_LEN 5 +/* MC_CMD_CLP_IN_OP_OFST 0 */ +/* Boot flag */ +#define MC_CMD_CLP_IN_SET_BOOT_FLAG_OFST 4 +#define MC_CMD_CLP_IN_SET_BOOT_FLAG_LEN 1 + +/* MC_CMD_CLP_OUT_SET_BOOT msgresponse */ +#define MC_CMD_CLP_OUT_SET_BOOT_LEN 0 + +/* MC_CMD_CLP_IN_GET_BOOT msgrequest */ +#define MC_CMD_CLP_IN_GET_BOOT_LEN 4 +/* MC_CMD_CLP_IN_OP_OFST 0 */ + +/* MC_CMD_CLP_OUT_GET_BOOT msgresponse */ +#define MC_CMD_CLP_OUT_GET_BOOT_LEN 4 +/* Boot flag */ +#define MC_CMD_CLP_OUT_GET_BOOT_FLAG_OFST 0 +#define MC_CMD_CLP_OUT_GET_BOOT_FLAG_LEN 1 +/* Padding */ +#define MC_CMD_CLP_OUT_GET_BOOT_RESERVED_OFST 1 +#define MC_CMD_CLP_OUT_GET_BOOT_RESERVED_LEN 3 + + +/***********************************/ +/* MC_CMD_MUM + * Perform a MUM operation + */ +#define MC_CMD_MUM 0x57 + +#define MC_CMD_0x57_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_MUM_IN msgrequest */ +#define MC_CMD_MUM_IN_LEN 4 +#define MC_CMD_MUM_IN_OP_HDR_OFST 0 +#define MC_CMD_MUM_IN_OP_LBN 0 +#define MC_CMD_MUM_IN_OP_WIDTH 8 +/* enum: NULL MCDI command to MUM */ +#define MC_CMD_MUM_OP_NULL 0x1 +/* enum: Get MUM version */ +#define MC_CMD_MUM_OP_GET_VERSION 0x2 +/* enum: Issue raw I2C command to MUM */ +#define MC_CMD_MUM_OP_RAW_CMD 0x3 +/* enum: Read from registers on devices connected to MUM. */ +#define MC_CMD_MUM_OP_READ 0x4 +/* enum: Write to registers on devices connected to MUM. */ +#define MC_CMD_MUM_OP_WRITE 0x5 +/* enum: Control UART logging. */ +#define MC_CMD_MUM_OP_LOG 0x6 +/* enum: Operations on MUM GPIO lines */ +#define MC_CMD_MUM_OP_GPIO 0x7 +/* enum: Get sensor readings from MUM */ +#define MC_CMD_MUM_OP_READ_SENSORS 0x8 +/* enum: Initiate clock programming on the MUM */ +#define MC_CMD_MUM_OP_PROGRAM_CLOCKS 0x9 +/* enum: Initiate FPGA load from flash on the MUM */ +#define MC_CMD_MUM_OP_FPGA_LOAD 0xa +/* enum: Request sensor reading from MUM ADC resulting from earlier request via + * MUM ATB + */ +#define MC_CMD_MUM_OP_READ_ATB_SENSOR 0xb +/* enum: Send commands relating to the QSFP ports via the MUM for PHY + * operations + */ +#define MC_CMD_MUM_OP_QSFP 0xc + +/* MC_CMD_MUM_IN_NULL msgrequest */ +#define MC_CMD_MUM_IN_NULL_LEN 4 +/* MUM cmd header */ +#define MC_CMD_MUM_IN_CMD_OFST 0 + +/* MC_CMD_MUM_IN_GET_VERSION msgrequest */ +#define MC_CMD_MUM_IN_GET_VERSION_LEN 4 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ + +/* MC_CMD_MUM_IN_READ msgrequest */ +#define MC_CMD_MUM_IN_READ_LEN 16 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* ID of (device connected to MUM) to read from registers of */ +#define MC_CMD_MUM_IN_READ_DEVICE_OFST 4 +/* enum: Hittite HMC1035 clock generator on Sorrento board */ +#define MC_CMD_MUM_DEV_HITTITE 0x1 +/* enum: Hittite HMC1035 clock generator for NIC-side on Sorrento board */ +#define MC_CMD_MUM_DEV_HITTITE_NIC 0x2 +/* 32-bit address to read from */ +#define MC_CMD_MUM_IN_READ_ADDR_OFST 8 +/* Number of words to read. */ +#define MC_CMD_MUM_IN_READ_NUMWORDS_OFST 12 + +/* MC_CMD_MUM_IN_WRITE msgrequest */ +#define MC_CMD_MUM_IN_WRITE_LENMIN 16 +#define MC_CMD_MUM_IN_WRITE_LENMAX 252 +#define MC_CMD_MUM_IN_WRITE_LEN(num) (12+4*(num)) +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* ID of (device connected to MUM) to write to registers of */ +#define MC_CMD_MUM_IN_WRITE_DEVICE_OFST 4 +/* enum: Hittite HMC1035 clock generator on Sorrento board */ +/* MC_CMD_MUM_DEV_HITTITE 0x1 */ +/* 32-bit address to write to */ +#define MC_CMD_MUM_IN_WRITE_ADDR_OFST 8 +/* Words to write */ +#define MC_CMD_MUM_IN_WRITE_BUFFER_OFST 12 +#define MC_CMD_MUM_IN_WRITE_BUFFER_LEN 4 +#define MC_CMD_MUM_IN_WRITE_BUFFER_MINNUM 1 +#define MC_CMD_MUM_IN_WRITE_BUFFER_MAXNUM 60 + +/* MC_CMD_MUM_IN_RAW_CMD msgrequest */ +#define MC_CMD_MUM_IN_RAW_CMD_LENMIN 17 +#define MC_CMD_MUM_IN_RAW_CMD_LENMAX 252 +#define MC_CMD_MUM_IN_RAW_CMD_LEN(num) (16+1*(num)) +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MUM I2C cmd code */ +#define MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_OFST 4 +/* Number of bytes to write */ +#define MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_OFST 8 +/* Number of bytes to read */ +#define MC_CMD_MUM_IN_RAW_CMD_NUM_READ_OFST 12 +/* Bytes to write */ +#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_OFST 16 +#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_LEN 1 +#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MINNUM 1 +#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MAXNUM 236 + +/* MC_CMD_MUM_IN_LOG msgrequest */ +#define MC_CMD_MUM_IN_LOG_LEN 8 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_LOG_OP_OFST 4 +#define MC_CMD_MUM_IN_LOG_OP_UART 0x1 /* enum */ + +/* MC_CMD_MUM_IN_LOG_OP_UART msgrequest */ +#define MC_CMD_MUM_IN_LOG_OP_UART_LEN 12 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_LOG_OP_OFST 4 */ +/* Enable/disable debug output to UART */ +#define MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_OFST 8 + +/* MC_CMD_MUM_IN_GPIO msgrequest */ +#define MC_CMD_MUM_IN_GPIO_LEN 8 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_GPIO_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OPCODE_LBN 0 +#define MC_CMD_MUM_IN_GPIO_OPCODE_WIDTH 8 +#define MC_CMD_MUM_IN_GPIO_IN_READ 0x0 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE 0x1 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OUT_READ 0x2 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE 0x3 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ 0x4 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OP 0x5 /* enum */ + +/* MC_CMD_MUM_IN_GPIO_IN_READ msgrequest */ +#define MC_CMD_MUM_IN_GPIO_IN_READ_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_GPIO_IN_READ_HDR_OFST 4 + +/* MC_CMD_MUM_IN_GPIO_OUT_WRITE msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_LEN 16 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_OFST 4 +/* The first 32-bit word to be written to the GPIO OUT register. */ +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_OFST 8 +/* The second 32-bit word to be written to the GPIO OUT register. */ +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_OFST 12 + +/* MC_CMD_MUM_IN_GPIO_OUT_READ msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OUT_READ_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_OFST 4 + +/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_LEN 16 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_OFST 4 +/* The first 32-bit word to be written to the GPIO OUT ENABLE register. */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_OFST 8 +/* The second 32-bit word to be written to the GPIO OUT ENABLE register. */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_OFST 12 + +/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_OFST 4 + +/* MC_CMD_MUM_IN_GPIO_OP msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OP_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_GPIO_OP_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_LBN 8 +#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_WIDTH 8 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ 0x0 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE 0x1 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG 0x2 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE 0x3 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_LBN 16 +#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_WIDTH 8 + +/* MC_CMD_MUM_IN_GPIO_OP_OUT_READ msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_OFST 4 + +/* MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_LBN 24 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_WIDTH 8 + +/* MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_LBN 24 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_WIDTH 8 + +/* MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_LBN 24 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_WIDTH 8 + +/* MC_CMD_MUM_IN_READ_SENSORS msgrequest */ +#define MC_CMD_MUM_IN_READ_SENSORS_LEN 8 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_OFST 4 +#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_LBN 0 +#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_WIDTH 8 +#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_LBN 8 +#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_WIDTH 8 + +/* MC_CMD_MUM_IN_PROGRAM_CLOCKS msgrequest */ +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_LEN 12 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* Bit-mask of clocks to be programmed */ +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_OFST 4 +#define MC_CMD_MUM_CLOCK_ID_FPGA 0x0 /* enum */ +#define MC_CMD_MUM_CLOCK_ID_DDR 0x1 /* enum */ +#define MC_CMD_MUM_CLOCK_ID_NIC 0x2 /* enum */ +/* Control flags for clock programming */ +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_OFST 8 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_LBN 0 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_WIDTH 1 + +/* MC_CMD_MUM_IN_FPGA_LOAD msgrequest */ +#define MC_CMD_MUM_IN_FPGA_LOAD_LEN 8 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* Enable/Disable FPGA config from flash */ +#define MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_OFST 4 + +/* MC_CMD_MUM_IN_READ_ATB_SENSOR msgrequest */ +#define MC_CMD_MUM_IN_READ_ATB_SENSOR_LEN 4 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ + +/* MC_CMD_MUM_IN_QSFP msgrequest */ +#define MC_CMD_MUM_IN_QSFP_LEN 12 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_QSFP_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_OPCODE_LBN 0 +#define MC_CMD_MUM_IN_QSFP_OPCODE_WIDTH 4 +#define MC_CMD_MUM_IN_QSFP_INIT 0x0 /* enum */ +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE 0x1 /* enum */ +#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP 0x2 /* enum */ +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO 0x3 /* enum */ +#define MC_CMD_MUM_IN_QSFP_FILL_STATS 0x4 /* enum */ +#define MC_CMD_MUM_IN_QSFP_POLL_BIST 0x5 /* enum */ +#define MC_CMD_MUM_IN_QSFP_IDX_OFST 8 + +/* MC_CMD_MUM_IN_QSFP_INIT msgrequest */ +#define MC_CMD_MUM_IN_QSFP_INIT_LEN 16 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_QSFP_INIT_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_INIT_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_INIT_CAGE_OFST 12 + +/* MC_CMD_MUM_IN_QSFP_RECONFIGURE msgrequest */ +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_LEN 24 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_OFST 12 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_OFST 16 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_OFST 20 + +/* MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP msgrequest */ +#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_LEN 12 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_OFST 8 + +/* MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO msgrequest */ +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_LEN 16 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_OFST 12 + +/* MC_CMD_MUM_IN_QSFP_FILL_STATS msgrequest */ +#define MC_CMD_MUM_IN_QSFP_FILL_STATS_LEN 12 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_OFST 8 + +/* MC_CMD_MUM_IN_QSFP_POLL_BIST msgrequest */ +#define MC_CMD_MUM_IN_QSFP_POLL_BIST_LEN 12 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_OFST 8 + +/* MC_CMD_MUM_OUT msgresponse */ +#define MC_CMD_MUM_OUT_LEN 0 + +/* MC_CMD_MUM_OUT_NULL msgresponse */ +#define MC_CMD_MUM_OUT_NULL_LEN 0 + +/* MC_CMD_MUM_OUT_GET_VERSION msgresponse */ +#define MC_CMD_MUM_OUT_GET_VERSION_LEN 12 +#define MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_OFST 0 +#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_OFST 4 +#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LEN 8 +#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_OFST 4 +#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_OFST 8 + +/* MC_CMD_MUM_OUT_RAW_CMD msgresponse */ +#define MC_CMD_MUM_OUT_RAW_CMD_LENMIN 1 +#define MC_CMD_MUM_OUT_RAW_CMD_LENMAX 252 +#define MC_CMD_MUM_OUT_RAW_CMD_LEN(num) (0+1*(num)) +/* returned data */ +#define MC_CMD_MUM_OUT_RAW_CMD_DATA_OFST 0 +#define MC_CMD_MUM_OUT_RAW_CMD_DATA_LEN 1 +#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MINNUM 1 +#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MAXNUM 252 + +/* MC_CMD_MUM_OUT_READ msgresponse */ +#define MC_CMD_MUM_OUT_READ_LENMIN 4 +#define MC_CMD_MUM_OUT_READ_LENMAX 252 +#define MC_CMD_MUM_OUT_READ_LEN(num) (0+4*(num)) +#define MC_CMD_MUM_OUT_READ_BUFFER_OFST 0 +#define MC_CMD_MUM_OUT_READ_BUFFER_LEN 4 +#define MC_CMD_MUM_OUT_READ_BUFFER_MINNUM 1 +#define MC_CMD_MUM_OUT_READ_BUFFER_MAXNUM 63 + +/* MC_CMD_MUM_OUT_WRITE msgresponse */ +#define MC_CMD_MUM_OUT_WRITE_LEN 0 + +/* MC_CMD_MUM_OUT_LOG msgresponse */ +#define MC_CMD_MUM_OUT_LOG_LEN 0 + +/* MC_CMD_MUM_OUT_LOG_OP_UART msgresponse */ +#define MC_CMD_MUM_OUT_LOG_OP_UART_LEN 0 + +/* MC_CMD_MUM_OUT_GPIO_IN_READ msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_IN_READ_LEN 8 +/* The first 32-bit word read from the GPIO IN register. */ +#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_OFST 0 +/* The second 32-bit word read from the GPIO IN register. */ +#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_OFST 4 + +/* MC_CMD_MUM_OUT_GPIO_OUT_WRITE msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OUT_WRITE_LEN 0 + +/* MC_CMD_MUM_OUT_GPIO_OUT_READ msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OUT_READ_LEN 8 +/* The first 32-bit word read from the GPIO OUT register. */ +#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_OFST 0 +/* The second 32-bit word read from the GPIO OUT register. */ +#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_OFST 4 + +/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE_LEN 0 + +/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_LEN 8 +#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_OFST 0 +#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_OFST 4 + +/* MC_CMD_MUM_OUT_GPIO_OP_OUT_READ msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_LEN 4 +#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_OFST 0 + +/* MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE_LEN 0 + +/* MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG_LEN 0 + +/* MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE_LEN 0 + +/* MC_CMD_MUM_OUT_READ_SENSORS msgresponse */ +#define MC_CMD_MUM_OUT_READ_SENSORS_LENMIN 4 +#define MC_CMD_MUM_OUT_READ_SENSORS_LENMAX 252 +#define MC_CMD_MUM_OUT_READ_SENSORS_LEN(num) (0+4*(num)) +#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_OFST 0 +#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_LEN 4 +#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MINNUM 1 +#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MAXNUM 63 +#define MC_CMD_MUM_OUT_READ_SENSORS_READING_LBN 0 +#define MC_CMD_MUM_OUT_READ_SENSORS_READING_WIDTH 16 +#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_LBN 16 +#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_WIDTH 8 +#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_LBN 24 +#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_WIDTH 8 + +/* MC_CMD_MUM_OUT_PROGRAM_CLOCKS msgresponse */ +#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_LEN 4 +#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_OFST 0 + +/* MC_CMD_MUM_OUT_FPGA_LOAD msgresponse */ +#define MC_CMD_MUM_OUT_FPGA_LOAD_LEN 0 + +/* MC_CMD_MUM_OUT_READ_ATB_SENSOR msgresponse */ +#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_LEN 4 +#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_OFST 0 + +/* MC_CMD_MUM_OUT_QSFP_INIT msgresponse */ +#define MC_CMD_MUM_OUT_QSFP_INIT_LEN 0 + +/* MC_CMD_MUM_OUT_QSFP_RECONFIGURE msgresponse */ +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_LEN 8 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_OFST 0 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_OFST 4 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_LBN 0 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_WIDTH 1 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_LBN 1 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_WIDTH 1 + +/* MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP msgresponse */ +#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_LEN 4 +#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_OFST 0 + +/* MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO msgresponse */ +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMIN 5 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMAX 252 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LEN(num) (4+1*(num)) +/* in bytes */ +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_OFST 0 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_OFST 4 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_LEN 1 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MINNUM 1 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MAXNUM 248 + +/* MC_CMD_MUM_OUT_QSFP_FILL_STATS msgresponse */ +#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_LEN 8 +#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_OFST 0 +#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_OFST 4 + +/* MC_CMD_MUM_OUT_QSFP_POLL_BIST msgresponse */ +#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_LEN 4 +#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_OFST 0 + /* MC_CMD_RESOURCE_SPECIFIER enum */ /* enum: Any */ #define MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff @@ -4203,6 +5092,30 @@ #define NVRAM_PARTITION_TYPE_PHY_MIN 0xa00 /* enum: End of range used for PHY partitions (low 8 bits are the PHY ID) */ #define NVRAM_PARTITION_TYPE_PHY_MAX 0xaff +/* enum: Primary FPGA partition */ +#define NVRAM_PARTITION_TYPE_FPGA 0xb00 +/* enum: Secondary FPGA partition */ +#define NVRAM_PARTITION_TYPE_FPGA_BACKUP 0xb01 +/* enum: FC firmware partition */ +#define NVRAM_PARTITION_TYPE_FC_FIRMWARE 0xb02 +/* enum: FC License partition */ +#define NVRAM_PARTITION_TYPE_FC_LICENSE 0xb03 +/* enum: Non-volatile log output partition for FC */ +#define NVRAM_PARTITION_TYPE_FC_LOG 0xb04 +/* enum: MUM firmware partition */ +#define NVRAM_PARTITION_TYPE_MUM_FIRMWARE 0xc00 +/* enum: MUM Non-volatile log output partition. */ +#define NVRAM_PARTITION_TYPE_MUM_LOG 0xc01 +/* enum: MUM Application table partition. */ +#define NVRAM_PARTITION_TYPE_MUM_APPTABLE 0xc02 +/* enum: MUM boot rom partition. */ +#define NVRAM_PARTITION_TYPE_MUM_BOOT_ROM 0xc03 +/* enum: MUM production signatures & calibration rom partition. */ +#define NVRAM_PARTITION_TYPE_MUM_PROD_ROM 0xc04 +/* enum: MUM user signatures & calibration rom partition. */ +#define NVRAM_PARTITION_TYPE_MUM_USER_ROM 0xc05 +/* enum: MUM fuses and lockbits partition. */ +#define NVRAM_PARTITION_TYPE_MUM_FUSELOCK 0xc06 /* enum: Start of reserved value range (firmware may use for any purpose) */ #define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00 /* enum: End of reserved value range (firmware may use for any purpose) */ @@ -4218,66 +5131,69 @@ #define LICENSED_APP_ID_LEN 4 #define LICENSED_APP_ID_ID_OFST 0 /* enum: OpenOnload */ -#define LICENSED_APP_ID_ONLOAD 0x1 +#define LICENSED_APP_ID_ONLOAD 0x1 /* enum: PTP timestamping */ -#define LICENSED_APP_ID_PTP 0x2 +#define LICENSED_APP_ID_PTP 0x2 /* enum: SolarCapture Pro */ -#define LICENSED_APP_ID_SOLARCAPTURE_PRO 0x4 +#define LICENSED_APP_ID_SOLARCAPTURE_PRO 0x4 +/* enum: SolarSecure filter engine */ +#define LICENSED_APP_ID_SOLARSECURE 0x8 +/* enum: Performance monitor */ +#define LICENSED_APP_ID_PERF_MONITOR 0x10 +/* enum: SolarCapture Live */ +#define LICENSED_APP_ID_SOLARCAPTURE_LIVE 0x20 +/* enum: Capture SolarSystem */ +#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM 0x40 +/* enum: Network Access Control */ +#define LICENSED_APP_ID_NETWORK_ACCESS_CONTROL 0x80 #define LICENSED_APP_ID_ID_LBN 0 #define LICENSED_APP_ID_ID_WIDTH 32 - -/***********************************/ -/* MC_CMD_GET_WORKAROUNDS - * Read the list of all implemented and all currently enabled workarounds. The - * enums here must correspond with those in MC_CMD_WORKAROUND. - */ -#define MC_CMD_GET_WORKAROUNDS 0x59 - -/* MC_CMD_GET_WORKAROUNDS_OUT msgresponse */ -#define MC_CMD_GET_WORKAROUNDS_OUT_LEN 8 -/* Each workaround is represented by a single bit according to the enums below. - */ -#define MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_OFST 0 -#define MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_OFST 4 -/* enum: Bug 17230 work around. */ -#define MC_CMD_GET_WORKAROUNDS_OUT_BUG17230 0x2 -/* enum: Bug 35388 work around (unsafe EVQ writes). */ -#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35388 0x4 -/* enum: Bug35017 workaround (A64 tables must be identity map) */ -#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35017 0x8 - - -/***********************************/ -/* MC_CMD_LINK_STATE_MODE - * Read/set link state mode of a VF - */ -#define MC_CMD_LINK_STATE_MODE 0x5c - -#define MC_CMD_0x5c_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_LINK_STATE_MODE_IN msgrequest */ -#define MC_CMD_LINK_STATE_MODE_IN_LEN 8 -/* The target function to have its link state mode read or set, must be a VF - * e.g. VF 1,3 = 0x00030001 - */ -#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_OFST 0 -#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_LBN 0 -#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_WIDTH 16 -#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_LBN 16 -#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_WIDTH 16 -/* New link state mode to be set */ -#define MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_OFST 4 -#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO 0x0 /* enum */ -#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP 0x1 /* enum */ -#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN 0x2 /* enum */ -/* enum: Use this value to just read the existing setting without modifying it. - */ -#define MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE 0xffffffff - -/* MC_CMD_LINK_STATE_MODE_OUT msgresponse */ -#define MC_CMD_LINK_STATE_MODE_OUT_LEN 4 -#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0 +/* TX_TIMESTAMP_EVENT structuredef */ +#define TX_TIMESTAMP_EVENT_LEN 6 +/* lower 16 bits of timestamp data */ +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_OFST 0 +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_LEN 2 +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_LBN 0 +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_WIDTH 16 +/* Type of TX event, ordinary TX completion, low or high part of TX timestamp + */ +#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_OFST 3 +#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_LEN 1 +/* enum: This is a TX completion event, not a timestamp */ +#define TX_TIMESTAMP_EVENT_TX_EV_COMPLETION 0x0 +/* enum: This is the low part of a TX timestamp event */ +#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO 0x51 +/* enum: This is the high part of a TX timestamp event */ +#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_HI 0x52 +#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_LBN 24 +#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_WIDTH 8 +/* upper 16 bits of timestamp data */ +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_OFST 4 +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_LEN 2 +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_LBN 32 +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_WIDTH 16 + +/* RSS_MODE structuredef */ +#define RSS_MODE_LEN 1 +/* The RSS mode for a particular packet type is a value from 0 - 15 which can + * be considered as 4 bits selecting which fields are included in the hash. (A + * value 0 effectively disables RSS spreading for the packet type.) The YAML + * generation tools require this structure to be a whole number of bytes wide, + * but only 4 bits are relevant. + */ +#define RSS_MODE_HASH_SELECTOR_OFST 0 +#define RSS_MODE_HASH_SELECTOR_LEN 1 +#define RSS_MODE_HASH_SRC_ADDR_LBN 0 +#define RSS_MODE_HASH_SRC_ADDR_WIDTH 1 +#define RSS_MODE_HASH_DST_ADDR_LBN 1 +#define RSS_MODE_HASH_DST_ADDR_WIDTH 1 +#define RSS_MODE_HASH_SRC_PORT_LBN 2 +#define RSS_MODE_HASH_SRC_PORT_WIDTH 1 +#define RSS_MODE_HASH_DST_PORT_LBN 3 +#define RSS_MODE_HASH_DST_PORT_WIDTH 1 +#define RSS_MODE_HASH_SELECTOR_LBN 0 +#define RSS_MODE_HASH_SELECTOR_WIDTH 8 /***********************************/ @@ -4413,7 +5329,9 @@ #define MC_CMD_0x81_PRIVILEGE_CTG SRIOV_CTG_GENERAL -/* MC_CMD_INIT_RXQ_IN msgrequest */ +/* MC_CMD_INIT_RXQ_IN msgrequest: Legacy RXQ_INIT request. Use extended version + * in new code. + */ #define MC_CMD_INIT_RXQ_IN_LENMIN 36 #define MC_CMD_INIT_RXQ_IN_LENMAX 252 #define MC_CMD_INIT_RXQ_IN_LEN(num) (28+8*(num)) @@ -4456,9 +5374,73 @@ #define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MINNUM 1 #define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MAXNUM 28 +/* MC_CMD_INIT_RXQ_EXT_IN msgrequest: Extended RXQ_INIT with additional mode + * flags + */ +#define MC_CMD_INIT_RXQ_EXT_IN_LEN 544 +/* Size, in entries */ +#define MC_CMD_INIT_RXQ_EXT_IN_SIZE_OFST 0 +/* The EVQ to send events to. This is an index originally specified to INIT_EVQ + */ +#define MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_OFST 4 +/* The value to put in the event data. Check hardware spec. for valid range. */ +#define MC_CMD_INIT_RXQ_EXT_IN_LABEL_OFST 8 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. + */ +#define MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_OFST 12 +/* There will be more flags here. */ +#define MC_CMD_INIT_RXQ_EXT_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_LBN 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_LBN 2 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_LBN 3 +#define MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_WIDTH 4 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_LBN 7 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_LBN 8 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_LBN 9 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_LBN 10 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_WIDTH 4 +/* enum: One packet per descriptor (for normal networking) */ +#define MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET 0x0 +/* enum: Pack multiple packets into large descriptors (for SolarCapture) */ +#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM 0x1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_LBN 14 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_LBN 15 +#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3 +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M 0x0 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_512K 0x1 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_256K 0x2 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_128K 0x3 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_64K 0x4 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_LBN 18 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1 +/* Owner ID to use if in buffer mode (zero if physical) */ +#define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_OFST 20 +/* The port ID associated with the v-adaptor which should contain this DMAQ. */ +#define MC_CMD_INIT_RXQ_EXT_IN_PORT_ID_OFST 24 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_OFST 28 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LO_OFST 28 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_HI_OFST 32 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_NUM 64 +/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */ +#define MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_OFST 540 + /* MC_CMD_INIT_RXQ_OUT msgresponse */ #define MC_CMD_INIT_RXQ_OUT_LEN 0 +/* MC_CMD_INIT_RXQ_EXT_OUT msgresponse */ +#define MC_CMD_INIT_RXQ_EXT_OUT_LEN 0 + /***********************************/ /* MC_CMD_INIT_TXQ @@ -4467,7 +5449,9 @@ #define MC_CMD_0x82_PRIVILEGE_CTG SRIOV_CTG_GENERAL -/* MC_CMD_INIT_TXQ_IN msgrequest */ +/* MC_CMD_INIT_TXQ_IN msgrequest: Legacy INIT_TXQ request. Use extended version + * in new code. + */ #define MC_CMD_INIT_TXQ_IN_LENMIN 36 #define MC_CMD_INIT_TXQ_IN_LENMAX 252 #define MC_CMD_INIT_TXQ_IN_LEN(num) (28+8*(num)) @@ -4499,6 +5483,10 @@ #define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_WIDTH 1 #define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_LBN 9 #define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_LBN 10 +#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11 +#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1 /* Owner ID to use if in buffer mode (zero if physical) */ #define MC_CMD_INIT_TXQ_IN_OWNER_ID_OFST 20 /* The port ID associated with the v-adaptor which should contain this DMAQ. */ @@ -4511,6 +5499,60 @@ #define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MINNUM 1 #define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM 28 +/* MC_CMD_INIT_TXQ_EXT_IN msgrequest: Extended INIT_TXQ with additional mode + * flags + */ +#define MC_CMD_INIT_TXQ_EXT_IN_LEN 544 +/* Size, in entries */ +#define MC_CMD_INIT_TXQ_EXT_IN_SIZE_OFST 0 +/* The EVQ to send events to. This is an index originally specified to + * INIT_EVQ. + */ +#define MC_CMD_INIT_TXQ_EXT_IN_TARGET_EVQ_OFST 4 +/* The value to put in the event data. Check hardware spec. for valid range. */ +#define MC_CMD_INIT_TXQ_EXT_IN_LABEL_OFST 8 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. + */ +#define MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_OFST 12 +/* There will be more flags here. */ +#define MC_CMD_INIT_TXQ_EXT_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_LBN 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_LBN 2 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_LBN 3 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_LBN 4 +#define MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_WIDTH 4 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_LBN 8 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_LBN 9 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_LBN 10 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1 +/* Owner ID to use if in buffer mode (zero if physical) */ +#define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_OFST 20 +/* The port ID associated with the v-adaptor which should contain this DMAQ. */ +#define MC_CMD_INIT_TXQ_EXT_IN_PORT_ID_OFST 24 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_OFST 28 +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LO_OFST 28 +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_HI_OFST 32 +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MINNUM 1 +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM 64 +/* Flags related to Qbb flow control mode. */ +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_OFST 540 +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_LBN 0 +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_LBN 1 +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_WIDTH 3 + /* MC_CMD_INIT_TXQ_OUT msgresponse */ #define MC_CMD_INIT_TXQ_OUT_LEN 0 @@ -4617,6 +5659,132 @@ /* MC_CMD_PROXY_CMD_OUT msgresponse */ #define MC_CMD_PROXY_CMD_OUT_LEN 0 +/* MC_PROXY_STATUS_BUFFER structuredef: Host memory status buffer used to + * manage proxied requests + */ +#define MC_PROXY_STATUS_BUFFER_LEN 16 +/* Handle allocated by the firmware for this proxy transaction */ +#define MC_PROXY_STATUS_BUFFER_HANDLE_OFST 0 +/* enum: An invalid handle. */ +#define MC_PROXY_STATUS_BUFFER_HANDLE_INVALID 0x0 +#define MC_PROXY_STATUS_BUFFER_HANDLE_LBN 0 +#define MC_PROXY_STATUS_BUFFER_HANDLE_WIDTH 32 +/* The requesting physical function number */ +#define MC_PROXY_STATUS_BUFFER_PF_OFST 4 +#define MC_PROXY_STATUS_BUFFER_PF_LEN 2 +#define MC_PROXY_STATUS_BUFFER_PF_LBN 32 +#define MC_PROXY_STATUS_BUFFER_PF_WIDTH 16 +/* The requesting virtual function number. Set to VF_NULL if the target is a + * PF. + */ +#define MC_PROXY_STATUS_BUFFER_VF_OFST 6 +#define MC_PROXY_STATUS_BUFFER_VF_LEN 2 +#define MC_PROXY_STATUS_BUFFER_VF_LBN 48 +#define MC_PROXY_STATUS_BUFFER_VF_WIDTH 16 +/* The target function RID. */ +#define MC_PROXY_STATUS_BUFFER_RID_OFST 8 +#define MC_PROXY_STATUS_BUFFER_RID_LEN 2 +#define MC_PROXY_STATUS_BUFFER_RID_LBN 64 +#define MC_PROXY_STATUS_BUFFER_RID_WIDTH 16 +/* The status of the proxy as described in MC_CMD_PROXY_COMPLETE. */ +#define MC_PROXY_STATUS_BUFFER_STATUS_OFST 10 +#define MC_PROXY_STATUS_BUFFER_STATUS_LEN 2 +#define MC_PROXY_STATUS_BUFFER_STATUS_LBN 80 +#define MC_PROXY_STATUS_BUFFER_STATUS_WIDTH 16 +/* If a request is authorized rather than carried out by the host, this is the + * elevated privilege mask granted to the requesting function. + */ +#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_OFST 12 +#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LBN 96 +#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_WIDTH 32 + + +/***********************************/ +/* MC_CMD_PROXY_CONFIGURE + * Enable/disable authorization of MCDI requests from unprivileged functions by + * a designated admin function + */ +#define MC_CMD_PROXY_CONFIGURE 0x58 + +#define MC_CMD_0x58_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_PROXY_CONFIGURE_IN msgrequest */ +#define MC_CMD_PROXY_CONFIGURE_IN_LEN 108 +#define MC_CMD_PROXY_CONFIGURE_IN_FLAGS_OFST 0 +#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_LBN 0 +#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_WIDTH 1 +/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS + * of blocks, each of the size REQUEST_BLOCK_SIZE. + */ +#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_OFST 4 +#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LEN 8 +#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_OFST 4 +#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_OFST 8 +/* Must be a power of 2 */ +#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_OFST 12 +/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS + * of blocks, each of the size REPLY_BLOCK_SIZE. + */ +#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_OFST 16 +#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LEN 8 +#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_OFST 16 +#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_OFST 20 +/* Must be a power of 2 */ +#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_OFST 24 +/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS + * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if + * host intends to complete proxied operations by using MC_CMD_PROXY_CMD. + */ +#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_OFST 28 +#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LEN 8 +#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_OFST 28 +#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_OFST 32 +/* Must be a power of 2, or zero if this buffer is not provided */ +#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_OFST 36 +/* Applies to all three buffers */ +#define MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_OFST 40 +/* A bit mask defining which MCDI operations may be proxied */ +#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_OFST 44 +#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_LEN 64 + +/* MC_CMD_PROXY_CONFIGURE_OUT msgresponse */ +#define MC_CMD_PROXY_CONFIGURE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_PROXY_COMPLETE + * Tells FW that a requested proxy operation has either been completed (by + * using MC_CMD_PROXY_CMD) or authorized/declined. May only be sent by the + * function that enabled proxying/authorization (by using + * MC_CMD_PROXY_CONFIGURE). + */ +#define MC_CMD_PROXY_COMPLETE 0x5f + +#define MC_CMD_0x5f_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_PROXY_COMPLETE_IN msgrequest */ +#define MC_CMD_PROXY_COMPLETE_IN_LEN 12 +#define MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_OFST 0 +#define MC_CMD_PROXY_COMPLETE_IN_STATUS_OFST 4 +/* enum: The operation has been completed by using MC_CMD_PROXY_CMD, the reply + * is stored in the REPLY_BUFF. + */ +#define MC_CMD_PROXY_COMPLETE_IN_COMPLETE 0x0 +/* enum: The operation has been authorized. The originating function may now + * try again. + */ +#define MC_CMD_PROXY_COMPLETE_IN_AUTHORIZED 0x1 +/* enum: The operation has been declined. */ +#define MC_CMD_PROXY_COMPLETE_IN_DECLINED 0x2 +/* enum: The authorization failed because the relevant application did not + * respond in time. + */ +#define MC_CMD_PROXY_COMPLETE_IN_TIMEDOUT 0x3 +#define MC_CMD_PROXY_COMPLETE_IN_HANDLE_OFST 8 + +/* MC_CMD_PROXY_COMPLETE_OUT msgresponse */ +#define MC_CMD_PROXY_COMPLETE_OUT_LEN 0 + /***********************************/ /* MC_CMD_ALLOC_BUFTBL_CHUNK @@ -4688,6 +5856,44 @@ /* MC_CMD_FREE_BUFTBL_CHUNK_OUT msgresponse */ #define MC_CMD_FREE_BUFTBL_CHUNK_OUT_LEN 0 +/* PORT_CONFIG_ENTRY structuredef */ +#define PORT_CONFIG_ENTRY_LEN 16 +/* External port number (label) */ +#define PORT_CONFIG_ENTRY_EXT_NUMBER_OFST 0 +#define PORT_CONFIG_ENTRY_EXT_NUMBER_LEN 1 +#define PORT_CONFIG_ENTRY_EXT_NUMBER_LBN 0 +#define PORT_CONFIG_ENTRY_EXT_NUMBER_WIDTH 8 +/* Port core location */ +#define PORT_CONFIG_ENTRY_CORE_OFST 1 +#define PORT_CONFIG_ENTRY_CORE_LEN 1 +#define PORT_CONFIG_ENTRY_STANDALONE 0x0 /* enum */ +#define PORT_CONFIG_ENTRY_MASTER 0x1 /* enum */ +#define PORT_CONFIG_ENTRY_SLAVE 0x2 /* enum */ +#define PORT_CONFIG_ENTRY_CORE_LBN 8 +#define PORT_CONFIG_ENTRY_CORE_WIDTH 8 +/* Internal number (HW resource) relative to the core */ +#define PORT_CONFIG_ENTRY_INT_NUMBER_OFST 2 +#define PORT_CONFIG_ENTRY_INT_NUMBER_LEN 1 +#define PORT_CONFIG_ENTRY_INT_NUMBER_LBN 16 +#define PORT_CONFIG_ENTRY_INT_NUMBER_WIDTH 8 +/* Reserved */ +#define PORT_CONFIG_ENTRY_RSVD_OFST 3 +#define PORT_CONFIG_ENTRY_RSVD_LEN 1 +#define PORT_CONFIG_ENTRY_RSVD_LBN 24 +#define PORT_CONFIG_ENTRY_RSVD_WIDTH 8 +/* Bitmask of KR lanes used by the port */ +#define PORT_CONFIG_ENTRY_LANES_OFST 4 +#define PORT_CONFIG_ENTRY_LANES_LBN 32 +#define PORT_CONFIG_ENTRY_LANES_WIDTH 32 +/* Port capabilities (MC_CMD_PHY_CAP_*) */ +#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_OFST 8 +#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_LBN 64 +#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_WIDTH 32 +/* Reserved (align to 16 bytes) */ +#define PORT_CONFIG_ENTRY_RSVD2_OFST 12 +#define PORT_CONFIG_ENTRY_RSVD2_LBN 96 +#define PORT_CONFIG_ENTRY_RSVD2_WIDTH 32 + /***********************************/ /* MC_CMD_FILTER_OP @@ -4759,9 +5965,9 @@ #define MC_CMD_FILTER_OP_IN_RX_DEST_HOST 0x1 /* enum: receive to MC */ #define MC_CMD_FILTER_OP_IN_RX_DEST_MC 0x2 -/* enum: loop back to port 0 TX MAC */ +/* enum: loop back to TXDP 0 */ #define MC_CMD_FILTER_OP_IN_RX_DEST_TX0 0x3 -/* enum: loop back to port 1 TX MAC */ +/* enum: loop back to TXDP 1 */ #define MC_CMD_FILTER_OP_IN_RX_DEST_TX1 0x4 /* receive queue handle (for multiple queue modes, this is the base queue) */ #define MC_CMD_FILTER_OP_IN_RX_QUEUE_OFST 24 @@ -4778,9 +5984,7 @@ #define MC_CMD_FILTER_OP_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000 /* RSS context (for RX_MODE_RSS) or .1p mapping handle (for * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or - * MC_CMD_DOT1P_MAPPING_ALLOC. Note that these handles should be considered - * opaque to the host, although a value of 0xFFFFFFFF is guaranteed never to be - * a valid handle. + * MC_CMD_DOT1P_MAPPING_ALLOC. */ #define MC_CMD_FILTER_OP_IN_RX_CONTEXT_OFST 32 /* transmit domain (reserved; set to 0) */ @@ -4835,6 +6039,235 @@ #define MC_CMD_FILTER_OP_IN_DST_IP_OFST 92 #define MC_CMD_FILTER_OP_IN_DST_IP_LEN 16 +/* MC_CMD_FILTER_OP_EXT_IN msgrequest: Extension to MC_CMD_FILTER_OP_IN to + * include handling of VXLAN/NVGRE encapsulated frame filtering (which is + * supported on Medford only). + */ +#define MC_CMD_FILTER_OP_EXT_IN_LEN 172 +/* identifies the type of operation requested */ +#define MC_CMD_FILTER_OP_EXT_IN_OP_OFST 0 +/* Enum values, see field(s): */ +/* MC_CMD_FILTER_OP_IN/OP */ +/* filter handle (for remove / unsubscribe operations) */ +#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_OFST 4 +#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_LEN 8 +#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_LO_OFST 4 +#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_HI_OFST 8 +/* The port ID associated with the v-adaptor which should contain this filter. + */ +#define MC_CMD_FILTER_OP_EXT_IN_PORT_ID_OFST 12 +/* fields to include in match criteria */ +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_LBN 0 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_LBN 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_LBN 2 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_LBN 3 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_LBN 4 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_LBN 5 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN 6 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_LBN 7 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_LBN 8 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN 9 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_LBN 10 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_LBN 11 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_LBN 12 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_LBN 13 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_LBN 14 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_LBN 15 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_LBN 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_LBN 17 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_LBN 18 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_LBN 19 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_LBN 20 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_LBN 21 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_LBN 22 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_LBN 23 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN 24 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN 25 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1 +/* receive destination */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_OFST 20 +/* enum: drop packets */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_DROP 0x0 +/* enum: receive to host */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_HOST 0x1 +/* enum: receive to MC */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_MC 0x2 +/* enum: loop back to TXDP 0 */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX0 0x3 +/* enum: loop back to TXDP 1 */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX1 0x4 +/* receive queue handle (for multiple queue modes, this is the base queue) */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_OFST 24 +/* receive mode */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_OFST 28 +/* enum: receive to just the specified queue */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_SIMPLE 0x0 +/* enum: receive to multiple queues using RSS context */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_RSS 0x1 +/* enum: receive to multiple queues using .1p mapping */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_DOT1P_MAPPING 0x2 +/* enum: install a filter entry that will never match; for test purposes only + */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000 +/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for + * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or + * MC_CMD_DOT1P_MAPPING_ALLOC. + */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_CONTEXT_OFST 32 +/* transmit domain (reserved; set to 0) */ +#define MC_CMD_FILTER_OP_EXT_IN_TX_DOMAIN_OFST 36 +/* transmit destination (either set the MAC and/or PM bits for explicit + * control, or set this field to TX_DEST_DEFAULT for sensible default + * behaviour) + */ +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_OFST 40 +/* enum: request default behaviour (based on filter type) */ +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_DEFAULT 0xffffffff +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_LBN 0 +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_LBN 1 +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_WIDTH 1 +/* source MAC address to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_SRC_MAC_OFST 44 +#define MC_CMD_FILTER_OP_EXT_IN_SRC_MAC_LEN 6 +/* source port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_SRC_PORT_OFST 50 +#define MC_CMD_FILTER_OP_EXT_IN_SRC_PORT_LEN 2 +/* destination MAC address to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_DST_MAC_OFST 52 +#define MC_CMD_FILTER_OP_EXT_IN_DST_MAC_LEN 6 +/* destination port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_DST_PORT_OFST 58 +#define MC_CMD_FILTER_OP_EXT_IN_DST_PORT_LEN 2 +/* Ethernet type to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_ETHER_TYPE_OFST 60 +#define MC_CMD_FILTER_OP_EXT_IN_ETHER_TYPE_LEN 2 +/* Inner VLAN tag to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_INNER_VLAN_OFST 62 +#define MC_CMD_FILTER_OP_EXT_IN_INNER_VLAN_LEN 2 +/* Outer VLAN tag to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_OUTER_VLAN_OFST 64 +#define MC_CMD_FILTER_OP_EXT_IN_OUTER_VLAN_LEN 2 +/* IP protocol to match (in low byte; set high byte to 0) */ +#define MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_OFST 66 +#define MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_LEN 2 +/* Firmware defined register 0 to match (reserved; set to 0) */ +#define MC_CMD_FILTER_OP_EXT_IN_FWDEF0_OFST 68 +/* VNI (for VXLAN/Geneve, when IP protocol is UDP) or VSID (for NVGRE, when IP + * protocol is GRE) to match (as bytes in network order; set last byte to 0 for + * VXLAN/NVGRE, or 1 for Geneve) + */ +#define MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_OFST 72 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_LBN 0 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_WIDTH 24 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_LBN 24 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_WIDTH 8 +/* enum: Match VXLAN traffic with this VNI */ +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN 0x0 +/* enum: Match Geneve traffic with this VNI */ +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE 0x1 +/* enum: Reserved for experimental development use */ +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_EXPERIMENTAL 0xfe +#define MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_LBN 0 +#define MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_WIDTH 24 +#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_LBN 24 +#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_WIDTH 8 +/* enum: Match NVGRE traffic with this VSID */ +#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_NVGRE 0x0 +/* source IP address to match (as bytes in network order; set last 12 bytes to + * 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_EXT_IN_SRC_IP_OFST 76 +#define MC_CMD_FILTER_OP_EXT_IN_SRC_IP_LEN 16 +/* destination IP address to match (as bytes in network order; set last 12 + * bytes to 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_EXT_IN_DST_IP_OFST 92 +#define MC_CMD_FILTER_OP_EXT_IN_DST_IP_LEN 16 +/* VXLAN/NVGRE inner frame source MAC address to match (as bytes in network + * order) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_MAC_OFST 108 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_MAC_LEN 6 +/* VXLAN/NVGRE inner frame source port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_PORT_OFST 114 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_PORT_LEN 2 +/* VXLAN/NVGRE inner frame destination MAC address to match (as bytes in + * network order) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_MAC_OFST 116 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_MAC_LEN 6 +/* VXLAN/NVGRE inner frame destination port to match (as bytes in network + * order) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_PORT_OFST 122 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_PORT_LEN 2 +/* VXLAN/NVGRE inner frame Ethernet type to match (as bytes in network order) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_ETHER_TYPE_OFST 124 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_ETHER_TYPE_LEN 2 +/* VXLAN/NVGRE inner frame Inner VLAN tag to match (as bytes in network order) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_INNER_VLAN_OFST 126 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_INNER_VLAN_LEN 2 +/* VXLAN/NVGRE inner frame Outer VLAN tag to match (as bytes in network order) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_OUTER_VLAN_OFST 128 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_OUTER_VLAN_LEN 2 +/* VXLAN/NVGRE inner frame IP protocol to match (in low byte; set high byte to + * 0) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_IP_PROTO_OFST 130 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_IP_PROTO_LEN 2 +/* VXLAN/NVGRE inner frame Firmware defined register 0 to match (reserved; set + * to 0) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF0_OFST 132 +/* VXLAN/NVGRE inner frame Firmware defined register 1 to match (reserved; set + * to 0) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF1_OFST 136 +/* VXLAN/NVGRE inner frame source IP address to match (as bytes in network + * order; set last 12 bytes to 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_IP_OFST 140 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_IP_LEN 16 +/* VXLAN/NVGRE inner frame destination IP address to match (as bytes in network + * order; set last 12 bytes to 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_OFST 156 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_LEN 16 + /* MC_CMD_FILTER_OP_OUT msgresponse */ #define MC_CMD_FILTER_OP_OUT_LEN 12 /* identifies the type of operation requested */ @@ -4849,6 +6282,27 @@ #define MC_CMD_FILTER_OP_OUT_HANDLE_LEN 8 #define MC_CMD_FILTER_OP_OUT_HANDLE_LO_OFST 4 #define MC_CMD_FILTER_OP_OUT_HANDLE_HI_OFST 8 +/* enum: guaranteed invalid filter handle (low 32 bits) */ +#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_INVALID 0xffffffff +/* enum: guaranteed invalid filter handle (high 32 bits) */ +#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_INVALID 0xffffffff + +/* MC_CMD_FILTER_OP_EXT_OUT msgresponse */ +#define MC_CMD_FILTER_OP_EXT_OUT_LEN 12 +/* identifies the type of operation requested */ +#define MC_CMD_FILTER_OP_EXT_OUT_OP_OFST 0 +/* Enum values, see field(s): */ +/* MC_CMD_FILTER_OP_EXT_IN/OP */ +/* Returned filter handle (for insert / subscribe operations). Note that these + * handles should be considered opaque to the host, although a value of + * 0xFFFFFFFF_FFFFFFFF is guaranteed never to be a valid handle. + */ +#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_OFST 4 +#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LEN 8 +#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LO_OFST 4 +#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_HI_OFST 8 +/* Enum values, see field(s): */ +/* MC_CMD_FILTER_OP_OUT/HANDLE */ /***********************************/ @@ -4865,6 +6319,10 @@ #define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_OFST 0 /* enum: read the list of supported RX filter matches */ #define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES 0x1 +/* enum: read flags indicating restrictions on filter insertion for the calling + * client + */ +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_RESTRICTIONS 0x2 /* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */ #define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8 @@ -4884,6 +6342,17 @@ #define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MINNUM 0 #define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM 61 +/* MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT msgresponse */ +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_LEN 8 +/* identifies the type of operation requested */ +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_OP_OFST 0 +/* Enum values, see field(s): */ +/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */ +/* bitfield of filter insertion restrictions */ +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_OFST 4 +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_LBN 0 +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_WIDTH 1 + /***********************************/ /* MC_CMD_PARSER_DISP_RW @@ -4901,8 +6370,10 @@ #define MC_CMD_PARSER_DISP_RW_IN_RX_DICPU 0x0 /* enum: TX dispatcher CPU */ #define MC_CMD_PARSER_DISP_RW_IN_TX_DICPU 0x1 -/* enum: Lookup engine */ +/* enum: Lookup engine (with original metadata format) */ #define MC_CMD_PARSER_DISP_RW_IN_LUE 0x2 +/* enum: Lookup engine (with requested metadata format) */ +#define MC_CMD_PARSER_DISP_RW_IN_LUE_VERSIONED_METADATA 0x3 /* identifies the type of operation requested */ #define MC_CMD_PARSER_DISP_RW_IN_OP_OFST 4 /* enum: read a word of DICPU DMEM or a LUE entry */ @@ -4919,6 +6390,8 @@ #define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_OFST 12 /* AND mask (for DMEM read-modify-writes: new = (old & mask) ^ value) */ #define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_OFST 16 +/* metadata format (for LUE reads using LUE_VERSIONED_METADATA) */ +#define MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_OFST 12 /* value to write (for LUE writes) */ #define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_OFST 12 #define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_LEN 20 @@ -5019,7 +6492,9 @@ /* The maximum number of VIs that would be useful */ #define MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_OFST 4 -/* MC_CMD_ALLOC_VIS_OUT msgresponse */ +/* MC_CMD_ALLOC_VIS_OUT msgresponse: Huntington-compatible VI_ALLOC request. + * Use extended version in new code. + */ #define MC_CMD_ALLOC_VIS_OUT_LEN 8 /* The number of VIs allocated on this function */ #define MC_CMD_ALLOC_VIS_OUT_VI_COUNT_OFST 0 @@ -5028,6 +6503,17 @@ */ #define MC_CMD_ALLOC_VIS_OUT_VI_BASE_OFST 4 +/* MC_CMD_ALLOC_VIS_EXT_OUT msgresponse */ +#define MC_CMD_ALLOC_VIS_EXT_OUT_LEN 12 +/* The number of VIs allocated on this function */ +#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_COUNT_OFST 0 +/* The base absolute VI number allocated to this function. Required to + * correctly interpret wakeup events. + */ +#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_BASE_OFST 4 +/* Function's port vi_shift value (always 0 on Huntington) */ +#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_SHIFT_OFST 8 + /***********************************/ /* MC_CMD_FREE_VIS @@ -5114,13 +6600,15 @@ #define MC_CMD_GET_VI_ALLOC_INFO_IN_LEN 0 /* MC_CMD_GET_VI_ALLOC_INFO_OUT msgresponse */ -#define MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 8 +#define MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 12 /* The number of VIs allocated on this function */ #define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_OFST 0 /* The base absolute VI number allocated to this function. Required to * correctly interpret wakeup events. */ #define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_OFST 4 +/* Function's port vi_shift value (always 0 on Huntington) */ +#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_OFST 8 /***********************************/ @@ -5575,6 +7063,7 @@ #define MC_CMD_GET_CAPABILITIES 0xbe #define MC_CMD_0xbe_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_GET_CAPABILITIES_IN msgrequest */ #define MC_CMD_GET_CAPABILITIES_IN_LEN 0 @@ -5582,6 +7071,20 @@ #define MC_CMD_GET_CAPABILITIES_OUT_LEN 20 /* First word of flags. */ #define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_WIDTH 1 #define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_LBN 19 #define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_WIDTH 1 #define MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_LBN 20 @@ -5600,8 +7103,14 @@ #define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1 #define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN 27 #define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 #define MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN 30 #define MC_CMD_GET_CAPABILITIES_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_WIDTH 1 /* RxDPCPU firmware id. */ #define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4 #define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2 @@ -5609,6 +7118,10 @@ #define MC_CMD_GET_CAPABILITIES_OUT_RXDP 0x0 /* enum: Low latency RXDP firmware */ #define MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY 0x1 +/* enum: Packed stream RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: BIST RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_BIST 0x10a /* enum: RXDP Test firmware image 1 */ #define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 /* enum: RXDP Test firmware image 2 */ @@ -5632,6 +7145,10 @@ #define MC_CMD_GET_CAPABILITIES_OUT_TXDP 0x0 /* enum: Low latency TXDP firmware */ #define MC_CMD_GET_CAPABILITIES_OUT_TXDP_LOW_LATENCY 0x1 +/* enum: High packet rate TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: BIST TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_BIST 0x12d /* enum: TXDP Test firmware image 1 */ #define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 /* enum: TXDP Test firmware image 2 */ @@ -5642,22 +7159,69 @@ #define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_WIDTH 12 #define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_LBN 12 #define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 /* enum */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH 0x3 /* enum */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 /* enum */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial RX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Virtual switching (full feature) RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant RX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +/* enum: Low latency RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +/* enum: Packed stream RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +/* enum: RX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* enum: RX PD firmware parsing but not filtering network overlay tunnel + * encapsulations (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf #define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_OFST 10 #define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_LEN 2 #define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_LBN 0 #define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_WIDTH 12 #define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_LBN 12 #define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 -#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 /* enum */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH 0x3 /* enum */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 /* enum */ +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial TX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Virtual switching (full feature) TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant TX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 #define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: TX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe /* Hardware capabilities of NIC */ #define MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_OFST 12 /* Licensed capabilities */ @@ -5735,6 +7299,15 @@ /* the rate in mbps */ #define MC_CMD_TCM_BUCKET_INIT_IN_RATE_OFST 4 +/* MC_CMD_TCM_BUCKET_INIT_EXT_IN msgrequest */ +#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_LEN 12 +/* the bucket id */ +#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_OFST 0 +/* the rate in mbps */ +#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_OFST 4 +/* the desired maximum fill level */ +#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_OFST 8 + /* MC_CMD_TCM_BUCKET_INIT_OUT msgresponse */ #define MC_CMD_TCM_BUCKET_INIT_OUT_LEN 0 @@ -5753,8 +7326,14 @@ #define MC_CMD_TCM_TXQ_INIT_IN_QID_OFST 0 /* the static priority associated with the txq */ #define MC_CMD_TCM_TXQ_INIT_IN_LABEL_OFST 4 -/* bitmask of the priority queues this txq is inserted into */ +/* bitmask of the priority queues this txq is inserted into when inserted. */ #define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_OFST 8 +#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_LBN 0 +#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_WIDTH 1 +#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_LBN 1 +#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_WIDTH 1 +#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_LBN 2 +#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_WIDTH 1 /* the reaction point (RP) bucket */ #define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_OFST 12 /* an already reserved bucket (typically set to bucket associated with outer @@ -5768,6 +7347,35 @@ /* the min bucket (typically for ETS/minimum bandwidth) */ #define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_OFST 24 +/* MC_CMD_TCM_TXQ_INIT_EXT_IN msgrequest */ +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LEN 32 +/* the txq id */ +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_OFST 0 +/* the static priority associated with the txq */ +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_OFST 4 +/* bitmask of the priority queues this txq is inserted into when inserted. */ +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_OFST 8 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_LBN 0 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_WIDTH 1 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_LBN 1 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_WIDTH 1 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_LBN 2 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_WIDTH 1 +/* the reaction point (RP) bucket */ +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_OFST 12 +/* an already reserved bucket (typically set to bucket associated with outer + * vswitch) + */ +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_OFST 16 +/* an already reserved bucket (typically set to bucket associated with inner + * vswitch) + */ +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_OFST 20 +/* the min bucket (typically for ETS/minimum bandwidth) */ +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_OFST 24 +/* the static priority associated with the txq */ +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_OFST 28 + /* MC_CMD_TCM_TXQ_INIT_OUT msgresponse */ #define MC_CMD_TCM_TXQ_INIT_OUT_LEN 0 @@ -5826,13 +7434,23 @@ #define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VLAN 0x1 /* enum: VEB */ #define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEB 0x2 -/* enum: VEPA */ +/* enum: VEPA (obsolete) */ #define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEPA 0x3 +/* enum: MUX */ +#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_MUX 0x4 +/* enum: Snapper specific; semantics TBD */ +#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_TEST 0x5 /* Flags controlling v-port creation */ #define MC_CMD_VSWITCH_ALLOC_IN_FLAGS_OFST 8 #define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_LBN 0 #define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1 -/* The number of VLAN tags to support. */ +/* The number of VLAN tags to allow for attached v-ports. For VLAN aggregators, + * this must be one or greated, and the attached v-ports must have exactly this + * number of tags. For other v-switch types, this must be zero of greater, and + * is an upper limit on the number of VLAN tags for attached v-ports. An error + * will be returned if existing configuration means we can't support attached + * v-ports with this number of tags. + */ #define MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_OFST 12 /* MC_CMD_VSWITCH_ALLOC_OUT msgresponse */ @@ -5892,7 +7510,10 @@ #define MC_CMD_VPORT_ALLOC_IN_FLAGS_OFST 8 #define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_LBN 0 #define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1 -/* The number of VLAN tags to insert/remove. */ +/* The number of VLAN tags to insert/remove. An error will be returned if + * incompatible with the number of VLAN tags specified for the upstream + * v-switch. + */ #define MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_OFST 12 /* The actual VLAN tags to insert/remove */ #define MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_OFST 16 @@ -6136,8 +7757,13 @@ /* MC_CMD_RSS_CONTEXT_ALLOC_OUT msgresponse */ #define MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN 4 -/* The handle of the new RSS context */ +/* The handle of the new RSS context. This should be considered opaque to the + * host, although a value of 0xFFFFFFFF is guaranteed never to be a valid + * handle. + */ #define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_OFST 0 +/* enum: guaranteed invalid RSS context handle value */ +#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_INVALID 0xffffffff /***********************************/ @@ -6249,7 +7875,11 @@ #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8 /* The handle of the RSS context */ #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0 -/* Hash control flags */ +/* Hash control flags. The _EN bits are always supported. The _MODE bits only + * work when the firmware reports ADDITIONAL_RSS_MODES in + * MC_CMD_GET_CAPABILITIES and override the _EN bits if any of them are not 0. + * See the RSS_MODE structure for the meaning of the mode bits. + */ #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_OFST 4 #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_LBN 0 #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_WIDTH 1 @@ -6259,6 +7889,20 @@ #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_WIDTH 1 #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_LBN 3 #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_LBN 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_LBN 8 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_LBN 12 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_LBN 16 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_LBN 20 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_LBN 24 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_LBN 28 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_WIDTH 4 /* MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT msgresponse */ #define MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN 0 @@ -6279,7 +7923,12 @@ /* MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT msgresponse */ #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN 8 -/* Hash control flags */ +/* Hash control flags. If any _MODE bits are non-zero (which will only be true + * when the firmware reports ADDITIONAL_RSS_MODES) then the _EN bits should be + * disregarded (but are guaranteed to be consistent with the _MODE bits if + * RSS_CONTEXT_SET_FLAGS has never been called for this context since it was + * allocated). + */ #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST 4 #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN 0 #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_WIDTH 1 @@ -6289,6 +7938,20 @@ #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_WIDTH 1 #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_LBN 3 #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_LBN 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_LBN 8 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN 12 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_LBN 16 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_LBN 20 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN 24 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_LBN 28 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_WIDTH 4 /***********************************/ @@ -6311,8 +7974,13 @@ /* MC_CMD_DOT1P_MAPPING_ALLOC_OUT msgresponse */ #define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_LEN 4 -/* The handle of the new .1p mapping */ +/* The handle of the new .1p mapping. This should be considered opaque to the + * host, although a value of 0xFFFFFFFF is guaranteed never to be a valid + * handle. + */ #define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_OFST 0 +/* enum: guaranteed invalid .1p mapping handle value */ +#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_INVALID 0xffffffff /***********************************/ @@ -6421,375 +8089,6 @@ /***********************************/ -/* MC_CMD_RMON_RX_CLASS_STATS - * Retrieve rmon rx class statistics - */ -#define MC_CMD_RMON_RX_CLASS_STATS 0xc3 - -/* MC_CMD_RMON_RX_CLASS_STATS_IN msgrequest */ -#define MC_CMD_RMON_RX_CLASS_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_RX_CLASS_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_RX_CLASS_STATS_IN_CLASS_LBN 0 -#define MC_CMD_RMON_RX_CLASS_STATS_IN_CLASS_WIDTH 8 -#define MC_CMD_RMON_RX_CLASS_STATS_IN_RST_LBN 8 -#define MC_CMD_RMON_RX_CLASS_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_RX_CLASS_STATS_OUT msgresponse */ -#define MC_CMD_RMON_RX_CLASS_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_RX_CLASS_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_RX_CLASS_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_TX_CLASS_STATS - * Retrieve rmon tx class statistics - */ -#define MC_CMD_RMON_TX_CLASS_STATS 0xc4 - -/* MC_CMD_RMON_TX_CLASS_STATS_IN msgrequest */ -#define MC_CMD_RMON_TX_CLASS_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_TX_CLASS_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_TX_CLASS_STATS_IN_CLASS_LBN 0 -#define MC_CMD_RMON_TX_CLASS_STATS_IN_CLASS_WIDTH 8 -#define MC_CMD_RMON_TX_CLASS_STATS_IN_RST_LBN 8 -#define MC_CMD_RMON_TX_CLASS_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_TX_CLASS_STATS_OUT msgresponse */ -#define MC_CMD_RMON_TX_CLASS_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_TX_CLASS_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_TX_CLASS_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_RX_SUPER_CLASS_STATS - * Retrieve rmon rx super_class statistics - */ -#define MC_CMD_RMON_RX_SUPER_CLASS_STATS 0xc5 - -/* MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN msgrequest */ -#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_SUPER_CLASS_LBN 0 -#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_SUPER_CLASS_WIDTH 4 -#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_RST_LBN 4 -#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT msgresponse */ -#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_TX_SUPER_CLASS_STATS - * Retrieve rmon tx super_class statistics - */ -#define MC_CMD_RMON_TX_SUPER_CLASS_STATS 0xc6 - -/* MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN msgrequest */ -#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_SUPER_CLASS_LBN 0 -#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_SUPER_CLASS_WIDTH 4 -#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_RST_LBN 4 -#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT msgresponse */ -#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS - * Add qid to class for statistics collection - */ -#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS 0xc7 - -/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN msgrequest */ -#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_LEN 12 -/* class */ -#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_CLASS_OFST 0 -/* qid */ -#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_QID_OFST 4 -/* flags */ -#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8 -#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0 -#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4 -#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4 -#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4 -#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_MTU_LBN 8 -#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14 - -/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS_OUT msgresponse */ -#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_OUT_LEN 0 - - -/***********************************/ -/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS - * Add qid to class for statistics collection - */ -#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS 0xc8 - -/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN msgrequest */ -#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_LEN 12 -/* class */ -#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_CLASS_OFST 0 -/* qid */ -#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_QID_OFST 4 -/* flags */ -#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8 -#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0 -#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4 -#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4 -#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4 -#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_MTU_LBN 8 -#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14 - -/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS_OUT msgresponse */ -#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_OUT_LEN 0 - - -/***********************************/ -/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS - * Add qid to class for statistics collection - */ -#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS 0xc9 - -/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN msgrequest */ -#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_LEN 12 -/* class */ -#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_CLASS_OFST 0 -/* qid */ -#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_QID_OFST 4 -/* flags */ -#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8 -#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0 -#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4 -#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4 -#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4 -#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_MTU_LBN 8 -#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14 - -/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS_OUT msgresponse */ -#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_OUT_LEN 0 - - -/***********************************/ -/* MC_CMD_RMON_ALLOC_CLASS - * Allocate an rmon class - */ -#define MC_CMD_RMON_ALLOC_CLASS 0xca - -/* MC_CMD_RMON_ALLOC_CLASS_IN msgrequest */ -#define MC_CMD_RMON_ALLOC_CLASS_IN_LEN 0 - -/* MC_CMD_RMON_ALLOC_CLASS_OUT msgresponse */ -#define MC_CMD_RMON_ALLOC_CLASS_OUT_LEN 4 -/* class */ -#define MC_CMD_RMON_ALLOC_CLASS_OUT_CLASS_OFST 0 - - -/***********************************/ -/* MC_CMD_RMON_DEALLOC_CLASS - * Deallocate an rmon class - */ -#define MC_CMD_RMON_DEALLOC_CLASS 0xcb - -/* MC_CMD_RMON_DEALLOC_CLASS_IN msgrequest */ -#define MC_CMD_RMON_DEALLOC_CLASS_IN_LEN 4 -/* class */ -#define MC_CMD_RMON_DEALLOC_CLASS_IN_CLASS_OFST 0 - -/* MC_CMD_RMON_DEALLOC_CLASS_OUT msgresponse */ -#define MC_CMD_RMON_DEALLOC_CLASS_OUT_LEN 0 - - -/***********************************/ -/* MC_CMD_RMON_ALLOC_SUPER_CLASS - * Allocate an rmon super_class - */ -#define MC_CMD_RMON_ALLOC_SUPER_CLASS 0xcc - -/* MC_CMD_RMON_ALLOC_SUPER_CLASS_IN msgrequest */ -#define MC_CMD_RMON_ALLOC_SUPER_CLASS_IN_LEN 0 - -/* MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT msgresponse */ -#define MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT_LEN 4 -/* super_class */ -#define MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT_SUPER_CLASS_OFST 0 - - -/***********************************/ -/* MC_CMD_RMON_DEALLOC_SUPER_CLASS - * Deallocate an rmon tx super_class - */ -#define MC_CMD_RMON_DEALLOC_SUPER_CLASS 0xcd - -/* MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN msgrequest */ -#define MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN_LEN 4 -/* super_class */ -#define MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN_SUPER_CLASS_OFST 0 - -/* MC_CMD_RMON_DEALLOC_SUPER_CLASS_OUT msgresponse */ -#define MC_CMD_RMON_DEALLOC_SUPER_CLASS_OUT_LEN 0 - - -/***********************************/ -/* MC_CMD_RMON_RX_UP_CONV_STATS - * Retrieve up converter statistics - */ -#define MC_CMD_RMON_RX_UP_CONV_STATS 0xce - -/* MC_CMD_RMON_RX_UP_CONV_STATS_IN msgrequest */ -#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_PORT_LBN 0 -#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_PORT_WIDTH 2 -#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_RST_LBN 2 -#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_RX_UP_CONV_STATS_OUT msgresponse */ -#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_RX_IPI_STATS - * Retrieve rx ipi stats - */ -#define MC_CMD_RMON_RX_IPI_STATS 0xcf - -/* MC_CMD_RMON_RX_IPI_STATS_IN msgrequest */ -#define MC_CMD_RMON_RX_IPI_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_RX_IPI_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_RX_IPI_STATS_IN_VFIFO_LBN 0 -#define MC_CMD_RMON_RX_IPI_STATS_IN_VFIFO_WIDTH 5 -#define MC_CMD_RMON_RX_IPI_STATS_IN_RST_LBN 5 -#define MC_CMD_RMON_RX_IPI_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_RX_IPI_STATS_OUT msgresponse */ -#define MC_CMD_RMON_RX_IPI_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_RX_IPI_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_RX_IPI_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS - * Retrieve rx ipsec cntxt_ptr indexed stats - */ -#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS 0xd0 - -/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN msgrequest */ -#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_LBN 0 -#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_WIDTH 9 -#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_RST_LBN 9 -#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT msgresponse */ -#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_RX_IPSEC_PORT_STATS - * Retrieve rx ipsec port indexed stats - */ -#define MC_CMD_RMON_RX_IPSEC_PORT_STATS 0xd1 - -/* MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN msgrequest */ -#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_PORT_LBN 0 -#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_PORT_WIDTH 2 -#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_RST_LBN 2 -#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT msgresponse */ -#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS - * Retrieve tx ipsec overflow - */ -#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS 0xd2 - -/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN msgrequest */ -#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_PORT_LBN 0 -#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_PORT_WIDTH 2 -#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_RST_LBN 2 -#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT msgresponse */ -#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ /* MC_CMD_VPORT_ADD_MAC_ADDRESS * Add a MAC address to a v-port */ @@ -6877,7 +8176,7 @@ #define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMIN 12 #define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMAX 252 #define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LEN(num) (0+12*(num)) -/* Raw buffer table entries, laid out as BUFTBL_ENTRY. */ +/* Raw buffer table entries, layed out as BUFTBL_ENTRY. */ #define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_OFST 0 #define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_LEN 12 #define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MINNUM 1 @@ -6921,354 +8220,6 @@ /***********************************/ -/* MC_CMD_RMON_RX_CLASS_DROPS_STATS - * Retrieve rx class drop stats - */ -#define MC_CMD_RMON_RX_CLASS_DROPS_STATS 0xd3 - -/* MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN msgrequest */ -#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_CLASS_LBN 0 -#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_CLASS_WIDTH 8 -#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_RST_LBN 8 -#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT msgresponse */ -#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS - * Retrieve rx super class drop stats - */ -#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS 0xd4 - -/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN msgrequest */ -#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_SUPER_CLASS_LBN 0 -#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_SUPER_CLASS_WIDTH 4 -#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_RST_LBN 4 -#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT msgresponse */ -#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_RX_ERRORS_STATS - * Retrieve rxdp errors - */ -#define MC_CMD_RMON_RX_ERRORS_STATS 0xd5 - -/* MC_CMD_RMON_RX_ERRORS_STATS_IN msgrequest */ -#define MC_CMD_RMON_RX_ERRORS_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_RX_ERRORS_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_RX_ERRORS_STATS_IN_QID_LBN 0 -#define MC_CMD_RMON_RX_ERRORS_STATS_IN_QID_WIDTH 11 -#define MC_CMD_RMON_RX_ERRORS_STATS_IN_RST_LBN 11 -#define MC_CMD_RMON_RX_ERRORS_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_RX_ERRORS_STATS_OUT msgresponse */ -#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_RX_OVERFLOW_STATS - * Retrieve rxdp overflow - */ -#define MC_CMD_RMON_RX_OVERFLOW_STATS 0xd6 - -/* MC_CMD_RMON_RX_OVERFLOW_STATS_IN msgrequest */ -#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_CLASS_LBN 0 -#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_CLASS_WIDTH 8 -#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_RST_LBN 8 -#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_RX_OVERFLOW_STATS_OUT msgresponse */ -#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_TX_IPI_STATS - * Retrieve tx ipi stats - */ -#define MC_CMD_RMON_TX_IPI_STATS 0xd7 - -/* MC_CMD_RMON_TX_IPI_STATS_IN msgrequest */ -#define MC_CMD_RMON_TX_IPI_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_TX_IPI_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_TX_IPI_STATS_IN_VFIFO_LBN 0 -#define MC_CMD_RMON_TX_IPI_STATS_IN_VFIFO_WIDTH 5 -#define MC_CMD_RMON_TX_IPI_STATS_IN_RST_LBN 5 -#define MC_CMD_RMON_TX_IPI_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_TX_IPI_STATS_OUT msgresponse */ -#define MC_CMD_RMON_TX_IPI_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_TX_IPI_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_TX_IPI_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS - * Retrieve tx ipsec counters by cntxt_ptr - */ -#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS 0xd8 - -/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN msgrequest */ -#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_LBN 0 -#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_WIDTH 9 -#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_RST_LBN 9 -#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT msgresponse */ -#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_TX_IPSEC_PORT_STATS - * Retrieve tx ipsec counters by port - */ -#define MC_CMD_RMON_TX_IPSEC_PORT_STATS 0xd9 - -/* MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN msgrequest */ -#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_PORT_LBN 0 -#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_PORT_WIDTH 2 -#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_RST_LBN 2 -#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT msgresponse */ -#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS - * Retrieve tx ipsec overflow - */ -#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS 0xda - -/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN msgrequest */ -#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_PORT_LBN 0 -#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_PORT_WIDTH 2 -#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_RST_LBN 2 -#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT msgresponse */ -#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_TX_NOWHERE_STATS - * Retrieve tx nowhere stats - */ -#define MC_CMD_RMON_TX_NOWHERE_STATS 0xdb - -/* MC_CMD_RMON_TX_NOWHERE_STATS_IN msgrequest */ -#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_CLASS_LBN 0 -#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_CLASS_WIDTH 8 -#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_RST_LBN 8 -#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_TX_NOWHERE_STATS_OUT msgresponse */ -#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS - * Retrieve tx nowhere qbb stats - */ -#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS 0xdc - -/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN msgrequest */ -#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_PRIORITY_LBN 0 -#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_PRIORITY_WIDTH 3 -#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_RST_LBN 3 -#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT msgresponse */ -#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_TX_ERRORS_STATS - * Retrieve rxdp errors - */ -#define MC_CMD_RMON_TX_ERRORS_STATS 0xdd - -/* MC_CMD_RMON_TX_ERRORS_STATS_IN msgrequest */ -#define MC_CMD_RMON_TX_ERRORS_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_TX_ERRORS_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_TX_ERRORS_STATS_IN_QID_LBN 0 -#define MC_CMD_RMON_TX_ERRORS_STATS_IN_QID_WIDTH 11 -#define MC_CMD_RMON_TX_ERRORS_STATS_IN_RST_LBN 11 -#define MC_CMD_RMON_TX_ERRORS_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_TX_ERRORS_STATS_OUT msgresponse */ -#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_TX_OVERFLOW_STATS - * Retrieve rxdp overflow - */ -#define MC_CMD_RMON_TX_OVERFLOW_STATS 0xde - -/* MC_CMD_RMON_TX_OVERFLOW_STATS_IN msgrequest */ -#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_CLASS_LBN 0 -#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_CLASS_WIDTH 8 -#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_RST_LBN 8 -#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_TX_OVERFLOW_STATS_OUT msgresponse */ -#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_COLLECT_CLASS_STATS - * Explicitly collect class stats at the specified evb port - */ -#define MC_CMD_RMON_COLLECT_CLASS_STATS 0xdf - -/* MC_CMD_RMON_COLLECT_CLASS_STATS_IN msgrequest */ -#define MC_CMD_RMON_COLLECT_CLASS_STATS_IN_LEN 4 -/* The port id associated with the vport/pport at which to collect class stats - */ -#define MC_CMD_RMON_COLLECT_CLASS_STATS_IN_PORT_ID_OFST 0 - -/* MC_CMD_RMON_COLLECT_CLASS_STATS_OUT msgresponse */ -#define MC_CMD_RMON_COLLECT_CLASS_STATS_OUT_LEN 4 -/* class */ -#define MC_CMD_RMON_COLLECT_CLASS_STATS_OUT_CLASS_OFST 0 - - -/***********************************/ -/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS - * Explicitly collect class stats at the specified evb port - */ -#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS 0xe0 - -/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN msgrequest */ -#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN_LEN 4 -/* The port id associated with the vport/pport at which to collect class stats - */ -#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN_PORT_ID_OFST 0 - -/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT msgresponse */ -#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT_LEN 4 -/* super_class */ -#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT_SUPER_CLASS_OFST 0 - - -/***********************************/ /* MC_CMD_GET_CLOCK * Return the system and PDCPU clock frequencies. */ @@ -7296,22 +8247,66 @@ #define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_ADMIN /* MC_CMD_SET_CLOCK_IN msgrequest */ -#define MC_CMD_SET_CLOCK_IN_LEN 12 -/* Requested system frequency in MHz; 0 leaves unchanged. */ +#define MC_CMD_SET_CLOCK_IN_LEN 28 +/* Requested frequency in MHz for system clock domain */ #define MC_CMD_SET_CLOCK_IN_SYS_FREQ_OFST 0 -/* Requested inter-core frequency in MHz; 0 leaves unchanged. */ +/* enum: Leave the system clock domain frequency unchanged */ +#define MC_CMD_SET_CLOCK_IN_SYS_DOMAIN_DONT_CHANGE 0x0 +/* Requested frequency in MHz for inter-core clock domain */ #define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_OFST 4 -/* Request DPCPU frequency in MHz; 0 leaves unchanged. */ +/* enum: Leave the inter-core clock domain frequency unchanged */ +#define MC_CMD_SET_CLOCK_IN_ICORE_DOMAIN_DONT_CHANGE 0x0 +/* Requested frequency in MHz for DPCPU clock domain */ #define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_OFST 8 +/* enum: Leave the DPCPU clock domain frequency unchanged */ +#define MC_CMD_SET_CLOCK_IN_DPCPU_DOMAIN_DONT_CHANGE 0x0 +/* Requested frequency in MHz for PCS clock domain */ +#define MC_CMD_SET_CLOCK_IN_PCS_FREQ_OFST 12 +/* enum: Leave the PCS clock domain frequency unchanged */ +#define MC_CMD_SET_CLOCK_IN_PCS_DOMAIN_DONT_CHANGE 0x0 +/* Requested frequency in MHz for MC clock domain */ +#define MC_CMD_SET_CLOCK_IN_MC_FREQ_OFST 16 +/* enum: Leave the MC clock domain frequency unchanged */ +#define MC_CMD_SET_CLOCK_IN_MC_DOMAIN_DONT_CHANGE 0x0 +/* Requested frequency in MHz for rmon clock domain */ +#define MC_CMD_SET_CLOCK_IN_RMON_FREQ_OFST 20 +/* enum: Leave the rmon clock domain frequency unchanged */ +#define MC_CMD_SET_CLOCK_IN_RMON_DOMAIN_DONT_CHANGE 0x0 +/* Requested frequency in MHz for vswitch clock domain */ +#define MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_OFST 24 +/* enum: Leave the vswitch clock domain frequency unchanged */ +#define MC_CMD_SET_CLOCK_IN_VSWITCH_DOMAIN_DONT_CHANGE 0x0 /* MC_CMD_SET_CLOCK_OUT msgresponse */ -#define MC_CMD_SET_CLOCK_OUT_LEN 12 +#define MC_CMD_SET_CLOCK_OUT_LEN 28 /* Resulting system frequency in MHz */ #define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_OFST 0 +/* enum: The system clock domain doesn't exist */ +#define MC_CMD_SET_CLOCK_OUT_SYS_DOMAIN_UNSUPPORTED 0x0 /* Resulting inter-core frequency in MHz */ #define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_OFST 4 +/* enum: The inter-core clock domain doesn't exist / isn't used */ +#define MC_CMD_SET_CLOCK_OUT_ICORE_DOMAIN_UNSUPPORTED 0x0 /* Resulting DPCPU frequency in MHz */ #define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_OFST 8 +/* enum: The dpcpu clock domain doesn't exist */ +#define MC_CMD_SET_CLOCK_OUT_DPCPU_DOMAIN_UNSUPPORTED 0x0 +/* Resulting PCS frequency in MHz */ +#define MC_CMD_SET_CLOCK_OUT_PCS_FREQ_OFST 12 +/* enum: The PCS clock domain doesn't exist / isn't controlled */ +#define MC_CMD_SET_CLOCK_OUT_PCS_DOMAIN_UNSUPPORTED 0x0 +/* Resulting MC frequency in MHz */ +#define MC_CMD_SET_CLOCK_OUT_MC_FREQ_OFST 16 +/* enum: The MC clock domain doesn't exist / isn't controlled */ +#define MC_CMD_SET_CLOCK_OUT_MC_DOMAIN_UNSUPPORTED 0x0 +/* Resulting rmon frequency in MHz */ +#define MC_CMD_SET_CLOCK_OUT_RMON_FREQ_OFST 20 +/* enum: The rmon clock domain doesn't exist / isn't controlled */ +#define MC_CMD_SET_CLOCK_OUT_RMON_DOMAIN_UNSUPPORTED 0x0 +/* Resulting vswitch frequency in MHz */ +#define MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_OFST 24 +/* enum: The vswitch clock domain doesn't exist / isn't controlled */ +#define MC_CMD_SET_CLOCK_OUT_VSWITCH_DOMAIN_UNSUPPORTED 0x0 /***********************************/ @@ -7325,12 +8320,22 @@ /* MC_CMD_DPCPU_RPC_IN msgrequest */ #define MC_CMD_DPCPU_RPC_IN_LEN 36 #define MC_CMD_DPCPU_RPC_IN_CPU_OFST 0 -/* enum: RxDPCPU */ -#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX 0x0 +/* enum: RxDPCPU0 */ +#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX0 0x0 /* enum: TxDPCPU0 */ #define MC_CMD_DPCPU_RPC_IN_DPCPU_TX0 0x1 /* enum: TxDPCPU1 */ #define MC_CMD_DPCPU_RPC_IN_DPCPU_TX1 0x2 +/* enum: RxDPCPU1 (Medford only) */ +#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX1 0x3 +/* enum: RxDPCPU (will be for the calling function; for now, just an alias of + * DPCPU_RX0) + */ +#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX 0x80 +/* enum: TxDPCPU (will be for the calling function; for now, just an alias of + * DPCPU_TX0) + */ +#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX 0x81 /* First 8 bits [39:32] of DATA are consumed by MC-DPCPU protocol and must be * initialised to zero */ @@ -7418,6 +8423,25 @@ /***********************************/ +/* MC_CMD_SHMBOOT_OP + * Special operations to support (for now) shmboot. + */ +#define MC_CMD_SHMBOOT_OP 0xe6 + +#define MC_CMD_0xe6_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_SHMBOOT_OP_IN msgrequest */ +#define MC_CMD_SHMBOOT_OP_IN_LEN 4 +/* Identifies the operation to perform */ +#define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_OFST 0 +/* enum: Copy slave_data section to the slave core. (Greenport only) */ +#define MC_CMD_SHMBOOT_OP_IN_PUSH_SLAVE_DATA 0x0 + +/* MC_CMD_SHMBOOT_OP_OUT msgresponse */ +#define MC_CMD_SHMBOOT_OP_OUT_LEN 0 + + +/***********************************/ /* MC_CMD_CAP_BLK_READ * Read multiple 64bit words from capture block memory */ @@ -7730,6 +8754,8 @@ * more data is returned. */ #define MC_CMD_KR_TUNE_IN_POLL_EYE_PLOT 0x6 +/* enum: Read Figure Of Merit (eye quality, higher is better). */ +#define MC_CMD_KR_TUNE_IN_READ_FOM 0x7 /* Align the arguments to 32 bits */ #define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_OFST 1 #define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_LEN 3 @@ -7762,20 +8788,32 @@ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8 -/* enum: Attenuation (0-15) */ +/* enum: Attenuation (0-15, TBD for Medford) */ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_ATT 0x0 -/* enum: CTLE Boost (0-15) */ +/* enum: CTLE Boost (0-15, TBD for Medford) */ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_BOOST 0x1 -/* enum: Edge DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive) */ +/* enum: Edge DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive, TBD + * for Medford) + */ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP1 0x2 -/* enum: Edge DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive) */ +/* enum: Edge DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive, TBD for + * Medford) + */ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP2 0x3 -/* enum: Edge DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive) */ +/* enum: Edge DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive, TBD for + * Medford) + */ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP3 0x4 -/* enum: Edge DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive) */ +/* enum: Edge DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive, TBD for + * Medford) + */ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP4 0x5 -/* enum: Edge DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive) */ +/* enum: Edge DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive, TBD for + * Medford) + */ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP5 0x6 +/* enum: Edge DFE DLEV (TBD for Medford) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_DLEV 0x7 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 3 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */ @@ -7865,6 +8903,8 @@ #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_PREDRV_DLY 0x7 /* enum: TX Slew Rate Fine control */ #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_SR_SET 0x8 +/* enum: TX Termination Impedance control */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_RT_SET 0x9 #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8 #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 3 #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_0 0x0 /* enum */ @@ -7955,6 +8995,20 @@ #define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0 #define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126 +/* MC_CMD_KR_TUNE_READ_FOM_IN msgrequest */ +#define MC_CMD_KR_TUNE_READ_FOM_IN_LEN 8 +/* Requested operation */ +#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_OP_OFST 0 +#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_OFST 1 +#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_LEN 3 +#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_OFST 4 + +/* MC_CMD_KR_TUNE_READ_FOM_OUT msgresponse */ +#define MC_CMD_KR_TUNE_READ_FOM_OUT_LEN 4 +#define MC_CMD_KR_TUNE_READ_FOM_OUT_FOM_OFST 0 + /***********************************/ /* MC_CMD_PCIE_TUNE @@ -8224,6 +9278,8 @@ #define MC_CMD_LICENSED_APP_OP_IN_OP_OFST 4 /* enum: validate application */ #define MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE 0x0 +/* enum: mask application */ +#define MC_CMD_LICENSED_APP_OP_IN_OP_MASK 0x1 /* arguments specific to this particular operation */ #define MC_CMD_LICENSED_APP_OP_IN_ARGS_OFST 8 #define MC_CMD_LICENSED_APP_OP_IN_ARGS_LEN 4 @@ -8258,10 +9314,22 @@ #define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_OFST 4 #define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_LEN 64 +/* MC_CMD_LICENSED_APP_OP_MASK_IN msgrequest */ +#define MC_CMD_LICENSED_APP_OP_MASK_IN_LEN 12 +/* application ID */ +#define MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_OFST 0 +/* the type of operation requested */ +#define MC_CMD_LICENSED_APP_OP_MASK_IN_OP_OFST 4 +/* flag */ +#define MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_OFST 8 + +/* MC_CMD_LICENSED_APP_OP_MASK_OUT msgresponse */ +#define MC_CMD_LICENSED_APP_OP_MASK_OUT_LEN 0 + /***********************************/ /* MC_CMD_SET_PORT_SNIFF_CONFIG - * Configure port sniffing for the physical port associated with the calling + * Configure RX port sniffing for the physical port associated with the calling * function. Only a privileged function may change the port sniffing * configuration. A copy of all traffic delivered to the host (non-promiscuous * mode) or all traffic arriving at the port (promiscuous mode) may be @@ -8299,7 +9367,7 @@ /***********************************/ /* MC_CMD_GET_PORT_SNIFF_CONFIG - * Obtain the current port sniffing configuration for the physical port + * Obtain the current RX port sniffing configuration for the physical port * associated with the calling function. Only a privileged function may read * the configuration. */ @@ -8330,4 +9398,673 @@ #define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12 +/***********************************/ +/* MC_CMD_SET_PARSER_DISP_CONFIG + * Change configuration related to the parser-dispatcher subsystem. + */ +#define MC_CMD_SET_PARSER_DISP_CONFIG 0xf9 + +#define MC_CMD_0xf9_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_SET_PARSER_DISP_CONFIG_IN msgrequest */ +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMIN 12 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMAX 252 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LEN(num) (8+4*(num)) +/* the type of configuration setting to change */ +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0 +/* enum: Per-TXQ enable for multicast UDP destination lookup for possible + * internal loopback. (ENTITY is a queue handle, VALUE is a single boolean.) + */ +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TXQ_MCAST_UDP_DST_LOOKUP_EN 0x0 +/* enum: Per-v-adaptor enable for suppression of self-transmissions on the + * internal loopback path. (ENTITY is an EVB_PORT_ID, VALUE is a single + * boolean.) + */ +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VADAPTOR_SUPPRESS_SELF_TX 0x1 +/* handle for the entity to update: queue handle, EVB port ID, etc. depending + * on the type of configuration setting being changed + */ +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4 +/* new value: the details depend on the type of configuration setting being + * changed + */ +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_OFST 8 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_LEN 4 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MINNUM 1 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MAXNUM 61 + +/* MC_CMD_SET_PARSER_DISP_CONFIG_OUT msgresponse */ +#define MC_CMD_SET_PARSER_DISP_CONFIG_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_PARSER_DISP_CONFIG + * Read configuration related to the parser-dispatcher subsystem. + */ +#define MC_CMD_GET_PARSER_DISP_CONFIG 0xfa + +#define MC_CMD_0xfa_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_PARSER_DISP_CONFIG_IN msgrequest */ +#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_LEN 8 +/* the type of configuration setting to read */ +#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0 +/* Enum values, see field(s): */ +/* MC_CMD_SET_PARSER_DISP_CONFIG/MC_CMD_SET_PARSER_DISP_CONFIG_IN/TYPE */ +/* handle for the entity to query: queue handle, EVB port ID, etc. depending on + * the type of configuration setting being read + */ +#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4 + +/* MC_CMD_GET_PARSER_DISP_CONFIG_OUT msgresponse */ +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMIN 4 +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMAX 252 +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LEN(num) (0+4*(num)) +/* current value: the details depend on the type of configuration setting being + * read + */ +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_OFST 0 +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_LEN 4 +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MINNUM 1 +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MAXNUM 63 + + +/***********************************/ +/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG + * Configure TX port sniffing for the physical port associated with the calling + * function. Only a privileged function may change the port sniffing + * configuration. A copy of all traffic transmitted through the port may be + * delivered to a specific queue, or a set of queues with RSS. Note that these + * packets are delivered with transmit timestamps in the packet prefix, not + * receive timestamps, so it is likely that the queue(s) will need to be + * dedicated as TX sniff receivers. + */ +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG 0xfb + +#define MC_CMD_0xfb_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN msgrequest */ +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_LEN 16 +/* configuration flags */ +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0 +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0 +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1 +/* receive queue handle (for RSS mode, this is the base queue) */ +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4 +/* receive mode */ +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8 +/* enum: receive to just the specified queue */ +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0 +/* enum: receive to multiple queues using RSS context */ +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1 +/* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note + * that these handles should be considered opaque to the host, although a value + * of 0xFFFFFFFF is guaranteed never to be a valid handle. + */ +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12 + +/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */ +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG + * Obtain the current TX port sniffing configuration for the physical port + * associated with the calling function. Only a privileged function may read + * the configuration. + */ +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG 0xfc + +#define MC_CMD_0xfc_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN msgrequest */ +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN_LEN 0 + +/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */ +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_LEN 16 +/* configuration flags */ +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0 +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0 +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1 +/* receiving queue handle (for RSS mode, this is the base queue) */ +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4 +/* receive mode */ +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8 +/* enum: receiving to just the specified queue */ +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0 +/* enum: receiving to multiple queues using RSS context */ +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1 +/* RSS context (for RX_MODE_RSS) */ +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12 + + +/***********************************/ +/* MC_CMD_RMON_STATS_RX_ERRORS + * Per queue rx error stats. + */ +#define MC_CMD_RMON_STATS_RX_ERRORS 0xfe + +#define MC_CMD_0xfe_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_RMON_STATS_RX_ERRORS_IN msgrequest */ +#define MC_CMD_RMON_STATS_RX_ERRORS_IN_LEN 8 +/* The rx queue to get stats for. */ +#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_OFST 0 +#define MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_OFST 4 +#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_LBN 0 +#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_WIDTH 1 + +/* MC_CMD_RMON_STATS_RX_ERRORS_OUT msgresponse */ +#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_LEN 16 +#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_OFST 0 +#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_OFST 4 +#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_OFST 8 +#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_OFST 12 + + +/***********************************/ +/* MC_CMD_GET_PCIE_RESOURCE_INFO + * Find out about available PCIE resources + */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO 0xfd + +/* MC_CMD_GET_PCIE_RESOURCE_INFO_IN msgrequest */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO_IN_LEN 0 + +/* MC_CMD_GET_PCIE_RESOURCE_INFO_OUT msgresponse */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_LEN 28 +/* The maximum number of PFs the device can expose */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_OFST 0 +/* The maximum number of VFs the device can expose in total */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_OFST 4 +/* The maximum number of MSI-X vectors the device can provide in total */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_OFST 8 +/* the number of MSI-X vectors the device will allocate by default to each PF + */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_OFST 12 +/* the number of MSI-X vectors the device will allocate by default to each VF + */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_OFST 16 +/* the maximum number of MSI-X vectors the device can allocate to any one PF */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_OFST 20 +/* the maximum number of MSI-X vectors the device can allocate to any one VF */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_OFST 24 + + +/***********************************/ +/* MC_CMD_GET_PORT_MODES + * Find out about available port modes + */ +#define MC_CMD_GET_PORT_MODES 0xff + +#define MC_CMD_0xff_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_PORT_MODES_IN msgrequest */ +#define MC_CMD_GET_PORT_MODES_IN_LEN 0 + +/* MC_CMD_GET_PORT_MODES_OUT msgresponse */ +#define MC_CMD_GET_PORT_MODES_OUT_LEN 12 +/* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*) */ +#define MC_CMD_GET_PORT_MODES_OUT_MODES_OFST 0 +/* Default (canonical) board mode */ +#define MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_OFST 4 +/* Current board mode */ +#define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST 8 + + +/***********************************/ +/* MC_CMD_READ_ATB + * Sample voltages on the ATB + */ +#define MC_CMD_READ_ATB 0x100 + +#define MC_CMD_0x100_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_READ_ATB_IN msgrequest */ +#define MC_CMD_READ_ATB_IN_LEN 16 +#define MC_CMD_READ_ATB_IN_SIGNAL_BUS_OFST 0 +#define MC_CMD_READ_ATB_IN_BUS_CCOM 0x0 /* enum */ +#define MC_CMD_READ_ATB_IN_BUS_CKR 0x1 /* enum */ +#define MC_CMD_READ_ATB_IN_BUS_CPCIE 0x8 /* enum */ +#define MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_OFST 4 +#define MC_CMD_READ_ATB_IN_SIGNAL_SEL_OFST 8 +#define MC_CMD_READ_ATB_IN_SETTLING_TIME_US_OFST 12 + +/* MC_CMD_READ_ATB_OUT msgresponse */ +#define MC_CMD_READ_ATB_OUT_LEN 4 +#define MC_CMD_READ_ATB_OUT_SAMPLE_MV_OFST 0 + + +/***********************************/ +/* MC_CMD_GET_WORKAROUNDS + * Read the list of all implemented and all currently enabled workarounds. The + * enums here must correspond with those in MC_CMD_WORKAROUND. + */ +#define MC_CMD_GET_WORKAROUNDS 0x59 + +#define MC_CMD_0x59_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_WORKAROUNDS_OUT msgresponse */ +#define MC_CMD_GET_WORKAROUNDS_OUT_LEN 8 +/* Each workaround is represented by a single bit according to the enums below. + */ +#define MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_OFST 0 +#define MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_OFST 4 +/* enum: Bug 17230 work around. */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG17230 0x2 +/* enum: Bug 35388 work around (unsafe EVQ writes). */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35388 0x4 +/* enum: Bug35017 workaround (A64 tables must be identity map) */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35017 0x8 +/* enum: Bug 41750 present (MC_CMD_TRIGGER_INTERRUPT won't work) */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG41750 0x10 +/* enum: Bug 42008 present (Interrupts can overtake associated events). Caution + * - before adding code that queries this workaround, remember that there's + * released Monza firmware that doesn't understand MC_CMD_WORKAROUND_BUG42008, + * and will hence (incorrectly) report that the bug doesn't exist. + */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG42008 0x20 +/* enum: Bug 26807 features present in firmware (multicast filter chaining) */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 0x40 + + +/***********************************/ +/* MC_CMD_PRIVILEGE_MASK + * Read/set privileges of an arbitrary PCIe function + */ +#define MC_CMD_PRIVILEGE_MASK 0x5a + +#define MC_CMD_0x5a_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_PRIVILEGE_MASK_IN msgrequest */ +#define MC_CMD_PRIVILEGE_MASK_IN_LEN 8 +/* The target function to have its mask read or set e.g. PF 0 = 0xFFFF0000, VF + * 1,3 = 0x00030001 + */ +#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_OFST 0 +#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_LBN 0 +#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_WIDTH 16 +#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_LBN 16 +#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_WIDTH 16 +#define MC_CMD_PRIVILEGE_MASK_IN_VF_NULL 0xffff /* enum */ +/* New privilege mask to be set. The mask will only be changed if the MSB is + * set to 1. + */ +#define MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_OFST 4 +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN 0x1 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK 0x2 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD 0x4 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP 0x8 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS 0x10 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING 0x20 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST 0x40 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST 0x80 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST 0x100 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST 0x200 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS 0x400 /* enum */ +/* enum: Set this bit to indicate that a new privilege mask is to be set, + * otherwise the command will only read the existing mask. + */ +#define MC_CMD_PRIVILEGE_MASK_IN_DO_CHANGE 0x80000000 + +/* MC_CMD_PRIVILEGE_MASK_OUT msgresponse */ +#define MC_CMD_PRIVILEGE_MASK_OUT_LEN 4 +/* For an admin function, always all the privileges are reported. */ +#define MC_CMD_PRIVILEGE_MASK_OUT_OLD_MASK_OFST 0 + + +/***********************************/ +/* MC_CMD_LINK_STATE_MODE + * Read/set link state mode of a VF + */ +#define MC_CMD_LINK_STATE_MODE 0x5c + +#define MC_CMD_0x5c_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_LINK_STATE_MODE_IN msgrequest */ +#define MC_CMD_LINK_STATE_MODE_IN_LEN 8 +/* The target function to have its link state mode read or set, must be a VF + * e.g. VF 1,3 = 0x00030001 + */ +#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_OFST 0 +#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_LBN 0 +#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_WIDTH 16 +#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_LBN 16 +#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_WIDTH 16 +/* New link state mode to be set */ +#define MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_OFST 4 +#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO 0x0 /* enum */ +#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP 0x1 /* enum */ +#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN 0x2 /* enum */ +/* enum: Use this value to just read the existing setting without modifying it. + */ +#define MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE 0xffffffff + +/* MC_CMD_LINK_STATE_MODE_OUT msgresponse */ +#define MC_CMD_LINK_STATE_MODE_OUT_LEN 4 +#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0 + + +/***********************************/ +/* MC_CMD_GET_SNAPSHOT_LENGTH + * Obtain the curent range of allowable values for the SNAPSHOT_LENGTH + * parameter to MC_CMD_INIT_RXQ. + */ +#define MC_CMD_GET_SNAPSHOT_LENGTH 0x101 + +#define MC_CMD_0x101_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_SNAPSHOT_LENGTH_IN msgrequest */ +#define MC_CMD_GET_SNAPSHOT_LENGTH_IN_LEN 0 + +/* MC_CMD_GET_SNAPSHOT_LENGTH_OUT msgresponse */ +#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_LEN 8 +/* Minimum acceptable snapshot length. */ +#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_OFST 0 +/* Maximum acceptable snapshot length. */ +#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_OFST 4 + + +/***********************************/ +/* MC_CMD_FUSE_DIAGS + * Additional fuse diagnostics + */ +#define MC_CMD_FUSE_DIAGS 0x102 + +#define MC_CMD_0x102_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_FUSE_DIAGS_IN msgrequest */ +#define MC_CMD_FUSE_DIAGS_IN_LEN 0 + +/* MC_CMD_FUSE_DIAGS_OUT msgresponse */ +#define MC_CMD_FUSE_DIAGS_OUT_LEN 48 +/* Total number of mismatched bits between pairs in area 0 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_OFST 0 +/* Total number of unexpectedly clear (set in B but not A) bits in area 0 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_OFST 4 +/* Total number of unexpectedly clear (set in A but not B) bits in area 0 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_OFST 8 +/* Checksum of data after logical OR of pairs in area 0 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_OFST 12 +/* Total number of mismatched bits between pairs in area 1 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_OFST 16 +/* Total number of unexpectedly clear (set in B but not A) bits in area 1 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_OFST 20 +/* Total number of unexpectedly clear (set in A but not B) bits in area 1 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_OFST 24 +/* Checksum of data after logical OR of pairs in area 1 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_OFST 28 +/* Total number of mismatched bits between pairs in area 2 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_OFST 32 +/* Total number of unexpectedly clear (set in B but not A) bits in area 2 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_OFST 36 +/* Total number of unexpectedly clear (set in A but not B) bits in area 2 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_OFST 40 +/* Checksum of data after logical OR of pairs in area 2 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_OFST 44 + + +/***********************************/ +/* MC_CMD_PRIVILEGE_MODIFY + * Modify the privileges of a set of PCIe functions. Note that this operation + * only effects non-admin functions unless the admin privilege itself is + * included in one of the masks provided. + */ +#define MC_CMD_PRIVILEGE_MODIFY 0x60 + +#define MC_CMD_0x60_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_PRIVILEGE_MODIFY_IN msgrequest */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_LEN 16 +/* The groups of functions to have their privilege masks modified. */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_OFST 0 +#define MC_CMD_PRIVILEGE_MODIFY_IN_NONE 0x0 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_ALL 0x1 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_PFS_ONLY 0x2 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_ONLY 0x3 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_OF_PF 0x4 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_ONE 0x5 /* enum */ +/* For VFS_OF_PF specify the PF, for ONE specify the target function */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_OFST 4 +#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_LBN 0 +#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_WIDTH 16 +#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_LBN 16 +#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_WIDTH 16 +/* Privileges to be added to the target functions. For privilege definitions + * refer to the command MC_CMD_PRIVILEGE_MASK + */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_OFST 8 +/* Privileges to be removed from the target functions. For privilege + * definitions refer to the command MC_CMD_PRIVILEGE_MASK + */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_OFST 12 + +/* MC_CMD_PRIVILEGE_MODIFY_OUT msgresponse */ +#define MC_CMD_PRIVILEGE_MODIFY_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_XPM_READ_BYTES + * Read XPM memory + */ +#define MC_CMD_XPM_READ_BYTES 0x103 + +#define MC_CMD_0x103_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_XPM_READ_BYTES_IN msgrequest */ +#define MC_CMD_XPM_READ_BYTES_IN_LEN 8 +/* Start address (byte) */ +#define MC_CMD_XPM_READ_BYTES_IN_ADDR_OFST 0 +/* Count (bytes) */ +#define MC_CMD_XPM_READ_BYTES_IN_COUNT_OFST 4 + +/* MC_CMD_XPM_READ_BYTES_OUT msgresponse */ +#define MC_CMD_XPM_READ_BYTES_OUT_LENMIN 0 +#define MC_CMD_XPM_READ_BYTES_OUT_LENMAX 252 +#define MC_CMD_XPM_READ_BYTES_OUT_LEN(num) (0+1*(num)) +/* Data */ +#define MC_CMD_XPM_READ_BYTES_OUT_DATA_OFST 0 +#define MC_CMD_XPM_READ_BYTES_OUT_DATA_LEN 1 +#define MC_CMD_XPM_READ_BYTES_OUT_DATA_MINNUM 0 +#define MC_CMD_XPM_READ_BYTES_OUT_DATA_MAXNUM 252 + + +/***********************************/ +/* MC_CMD_XPM_WRITE_BYTES + * Write XPM memory + */ +#define MC_CMD_XPM_WRITE_BYTES 0x104 + +#define MC_CMD_0x104_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_XPM_WRITE_BYTES_IN msgrequest */ +#define MC_CMD_XPM_WRITE_BYTES_IN_LENMIN 8 +#define MC_CMD_XPM_WRITE_BYTES_IN_LENMAX 252 +#define MC_CMD_XPM_WRITE_BYTES_IN_LEN(num) (8+1*(num)) +/* Start address (byte) */ +#define MC_CMD_XPM_WRITE_BYTES_IN_ADDR_OFST 0 +/* Count (bytes) */ +#define MC_CMD_XPM_WRITE_BYTES_IN_COUNT_OFST 4 +/* Data */ +#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_OFST 8 +#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_LEN 1 +#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_MINNUM 0 +#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_MAXNUM 244 + +/* MC_CMD_XPM_WRITE_BYTES_OUT msgresponse */ +#define MC_CMD_XPM_WRITE_BYTES_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_XPM_READ_SECTOR + * Read XPM sector + */ +#define MC_CMD_XPM_READ_SECTOR 0x105 + +#define MC_CMD_0x105_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_XPM_READ_SECTOR_IN msgrequest */ +#define MC_CMD_XPM_READ_SECTOR_IN_LEN 8 +/* Sector index */ +#define MC_CMD_XPM_READ_SECTOR_IN_INDEX_OFST 0 +/* Sector size */ +#define MC_CMD_XPM_READ_SECTOR_IN_SIZE_OFST 4 + +/* MC_CMD_XPM_READ_SECTOR_OUT msgresponse */ +#define MC_CMD_XPM_READ_SECTOR_OUT_LENMIN 4 +#define MC_CMD_XPM_READ_SECTOR_OUT_LENMAX 36 +#define MC_CMD_XPM_READ_SECTOR_OUT_LEN(num) (4+1*(num)) +/* Sector type */ +#define MC_CMD_XPM_READ_SECTOR_OUT_TYPE_OFST 0 +#define MC_CMD_XPM_READ_SECTOR_OUT_BLANK 0x0 /* enum */ +#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_128 0x1 /* enum */ +#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_256 0x2 /* enum */ +#define MC_CMD_XPM_READ_SECTOR_OUT_INVALID 0xff /* enum */ +/* Sector data */ +#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_OFST 4 +#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_LEN 1 +#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_MINNUM 0 +#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_MAXNUM 32 + + +/***********************************/ +/* MC_CMD_XPM_WRITE_SECTOR + * Write XPM sector + */ +#define MC_CMD_XPM_WRITE_SECTOR 0x106 + +#define MC_CMD_0x106_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_XPM_WRITE_SECTOR_IN msgrequest */ +#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMIN 12 +#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMAX 44 +#define MC_CMD_XPM_WRITE_SECTOR_IN_LEN(num) (12+1*(num)) +/* If writing fails due to an uncorrectable error, try up to RETRIES following + * sectors (or until no more space available). If 0, only one write attempt is + * made. Note that uncorrectable errors are unlikely, thanks to XPM self-repair + * mechanism. + */ +#define MC_CMD_XPM_WRITE_SECTOR_IN_RETRIES_OFST 0 +#define MC_CMD_XPM_WRITE_SECTOR_IN_RETRIES_LEN 1 +#define MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_OFST 1 +#define MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_LEN 3 +/* Sector type */ +#define MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_OFST 4 +/* Enum values, see field(s): */ +/* MC_CMD_XPM_READ_SECTOR_OUT/TYPE */ +/* Sector size */ +#define MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_OFST 8 +/* Sector data */ +#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_OFST 12 +#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_LEN 1 +#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MINNUM 0 +#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MAXNUM 32 + +/* MC_CMD_XPM_WRITE_SECTOR_OUT msgresponse */ +#define MC_CMD_XPM_WRITE_SECTOR_OUT_LEN 4 +/* New sector index */ +#define MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_OFST 0 + + +/***********************************/ +/* MC_CMD_XPM_INVALIDATE_SECTOR + * Invalidate XPM sector + */ +#define MC_CMD_XPM_INVALIDATE_SECTOR 0x107 + +#define MC_CMD_0x107_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_XPM_INVALIDATE_SECTOR_IN msgrequest */ +#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_LEN 4 +/* Sector index */ +#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_OFST 0 + +/* MC_CMD_XPM_INVALIDATE_SECTOR_OUT msgresponse */ +#define MC_CMD_XPM_INVALIDATE_SECTOR_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_XPM_BLANK_CHECK + * Blank-check XPM memory and report bad locations + */ +#define MC_CMD_XPM_BLANK_CHECK 0x108 + +#define MC_CMD_0x108_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_XPM_BLANK_CHECK_IN msgrequest */ +#define MC_CMD_XPM_BLANK_CHECK_IN_LEN 8 +/* Start address (byte) */ +#define MC_CMD_XPM_BLANK_CHECK_IN_ADDR_OFST 0 +/* Count (bytes) */ +#define MC_CMD_XPM_BLANK_CHECK_IN_COUNT_OFST 4 + +/* MC_CMD_XPM_BLANK_CHECK_OUT msgresponse */ +#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMIN 4 +#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMAX 252 +#define MC_CMD_XPM_BLANK_CHECK_OUT_LEN(num) (4+2*(num)) +/* Total number of bad (non-blank) locations */ +#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_OFST 0 +/* Addresses of bad locations (may be less than BAD_COUNT, if all cannot fit + * into MCDI response) + */ +#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_OFST 4 +#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_LEN 2 +#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MINNUM 0 +#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MAXNUM 124 + + +/***********************************/ +/* MC_CMD_XPM_REPAIR + * Blank-check and repair XPM memory + */ +#define MC_CMD_XPM_REPAIR 0x109 + +#define MC_CMD_0x109_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_XPM_REPAIR_IN msgrequest */ +#define MC_CMD_XPM_REPAIR_IN_LEN 8 +/* Start address (byte) */ +#define MC_CMD_XPM_REPAIR_IN_ADDR_OFST 0 +/* Count (bytes) */ +#define MC_CMD_XPM_REPAIR_IN_COUNT_OFST 4 + +/* MC_CMD_XPM_REPAIR_OUT msgresponse */ +#define MC_CMD_XPM_REPAIR_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_XPM_DECODER_TEST + * Test XPM memory address decoders for gross manufacturing defects. Can only + * be performed on an unprogrammed part. + */ +#define MC_CMD_XPM_DECODER_TEST 0x10a + +#define MC_CMD_0x10a_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_XPM_DECODER_TEST_IN msgrequest */ +#define MC_CMD_XPM_DECODER_TEST_IN_LEN 0 + +/* MC_CMD_XPM_DECODER_TEST_OUT msgresponse */ +#define MC_CMD_XPM_DECODER_TEST_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_XPM_WRITE_TEST + * XPM memory write test. Test XPM write logic for gross manufacturing defects + * by writing to a dedicated test row. There are 16 locations in the test row + * and the test can only be performed on locations that have not been + * previously used (i.e. can be run at most 16 times). The test will pick the + * first available location to use, or fail with ENOSPC if none left. + */ +#define MC_CMD_XPM_WRITE_TEST 0x10b + +#define MC_CMD_0x10b_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_XPM_WRITE_TEST_IN msgrequest */ +#define MC_CMD_XPM_WRITE_TEST_IN_LEN 0 + +/* MC_CMD_XPM_WRITE_TEST_OUT msgresponse */ +#define MC_CMD_XPM_WRITE_TEST_OUT_LEN 0 + + #endif /* MCDI_PCOL_H */ diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 47d1e3a96522..4d35313a239d 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -925,6 +925,7 @@ struct vfdi_status; * @stats_lock: Statistics update lock. Must be held when calling * efx_nic_type::{update,start,stop}_stats. * @n_rx_noskb_drops: Count of RX packets dropped due to failure to allocate an skb + * @mc_promisc: Whether in multicast promiscuous mode when last changed * * This is stored in the private area of the &struct net_device. */ @@ -1072,6 +1073,7 @@ struct efx_nic { int last_irq_cpu; spinlock_t stats_lock; atomic_t n_rx_noskb_drops; + bool mc_promisc; }; static inline int efx_dev_registered(struct efx_nic *efx) diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index 31ff9084d9a4..0b536e27d3b2 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -506,6 +506,7 @@ enum { * @rx_rss_context_exclusive: Whether our RSS context is exclusive or shared * @stats: Hardware statistics * @workaround_35388: Flag: firmware supports workaround for bug 35388 + * @workaround_26807: Flag: firmware supports workaround for bug 26807 * @must_check_datapath_caps: Flag: @datapath_caps needs to be revalidated * after MC reboot * @datapath_caps: Capabilities of datapath firmware (FLAGS1 field of @@ -535,6 +536,7 @@ struct efx_ef10_nic_data { bool rx_rss_context_exclusive; u64 stats[EF10_STAT_COUNT]; bool workaround_35388; + bool workaround_26807; bool must_check_datapath_caps; u32 datapath_caps; unsigned int rx_dpcpu_fw_id; diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c index b605dfd5c7bc..9d78830da609 100644 --- a/drivers/net/ethernet/sfc/selftest.c +++ b/drivers/net/ethernet/sfc/selftest.c @@ -114,7 +114,10 @@ static int efx_test_nvram(struct efx_nic *efx, struct efx_self_tests *tests) if (efx->type->test_nvram) { rc = efx->type->test_nvram(efx); - tests->nvram = rc ? -1 : 1; + if (rc == -EPERM) + rc = 0; + else + tests->nvram = rc ? -1 : 1; } return rc; @@ -253,6 +256,12 @@ static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests, mutex_lock(&efx->mac_lock); rc = efx->phy_op->run_tests(efx, tests->phy_ext, flags); mutex_unlock(&efx->mac_lock); + if (rc == -EPERM) + rc = 0; + else + netif_info(efx, drv, efx->net_dev, + "%s phy selftest\n", rc ? "Failed" : "Passed"); + return rc; } @@ -661,6 +670,9 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests, wmb(); kfree(state); + if (rc == -EPERM) + rc = 0; + return rc; } diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index b323b9167526..b2f886d90429 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c @@ -1042,9 +1042,5 @@ const struct efx_nic_type siena_a0_nic_type = { .max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS, .hwtstamp_filters = (1 << HWTSTAMP_FILTER_NONE | 1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT | - 1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC | - 1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ | - 1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT | - 1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC | - 1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ), + 1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT), }; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c index e817a1a44379..b1e5f24708c9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c @@ -16,6 +16,46 @@ #include "stmmac.h" #include "stmmac_platform.h" +static int dwmac_generic_probe(struct platform_device *pdev) +{ + struct plat_stmmacenet_data *plat_dat; + struct stmmac_resources stmmac_res; + int ret; + + ret = stmmac_get_platform_resources(pdev, &stmmac_res); + if (ret) + return ret; + + if (pdev->dev.of_node) { + plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + if (IS_ERR(plat_dat)) { + dev_err(&pdev->dev, "dt configuration failed\n"); + return PTR_ERR(plat_dat); + } + } else { + plat_dat = dev_get_platdata(&pdev->dev); + if (!plat_dat) { + dev_err(&pdev->dev, "no platform data provided\n"); + return -EINVAL; + } + + /* Set default value for multicast hash bins */ + plat_dat->multicast_filter_bins = HASH_TABLE_SIZE; + + /* Set default value for unicast filter entries */ + plat_dat->unicast_filter_entries = 1; + } + + /* Custom initialisation (if needed) */ + if (plat_dat->init) { + ret = plat_dat->init(pdev, plat_dat->bsp_priv); + if (ret) + return ret; + } + + return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); +} + static const struct of_device_id dwmac_generic_match[] = { { .compatible = "st,spear600-gmac"}, { .compatible = "snps,dwmac-3.610"}, @@ -27,7 +67,7 @@ static const struct of_device_id dwmac_generic_match[] = { MODULE_DEVICE_TABLE(of, dwmac_generic_match); static struct platform_driver dwmac_generic_driver = { - .probe = stmmac_pltfr_probe, + .probe = dwmac_generic_probe, .remove = stmmac_pltfr_remove, .driver = { .name = STMMAC_RESOURCE_NAME, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c index 7e3129e7f143..333489f0fd24 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c @@ -248,23 +248,40 @@ static void *ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac) return NULL; } -static void *ipq806x_gmac_setup(struct platform_device *pdev) +static void ipq806x_gmac_fix_mac_speed(void *priv, unsigned int speed) +{ + struct ipq806x_gmac *gmac = priv; + + ipq806x_gmac_set_speed(gmac, speed); +} + +static int ipq806x_gmac_probe(struct platform_device *pdev) { + struct plat_stmmacenet_data *plat_dat; + struct stmmac_resources stmmac_res; struct device *dev = &pdev->dev; struct ipq806x_gmac *gmac; int val; void *err; + val = stmmac_get_platform_resources(pdev, &stmmac_res); + if (val) + return val; + + plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + if (IS_ERR(plat_dat)) + return PTR_ERR(plat_dat); + gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL); if (!gmac) - return ERR_PTR(-ENOMEM); + return -ENOMEM; gmac->pdev = pdev; err = ipq806x_gmac_of_parse(gmac); - if (err) { + if (IS_ERR(err)) { dev_err(dev, "device tree parsing error\n"); - return err; + return PTR_ERR(err); } regmap_write(gmac->qsgmii_csr, QSGMII_PCS_CAL_LCKDT_CTL, @@ -285,7 +302,7 @@ static void *ipq806x_gmac_setup(struct platform_device *pdev) default: dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n", phy_modes(gmac->phy_mode)); - return NULL; + return -EINVAL; } regmap_write(gmac->nss_common, NSS_COMMON_GMAC_CTL(gmac->id), val); @@ -304,7 +321,7 @@ static void *ipq806x_gmac_setup(struct platform_device *pdev) default: dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n", phy_modes(gmac->phy_mode)); - return NULL; + return -EINVAL; } regmap_write(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, val); @@ -327,30 +344,21 @@ static void *ipq806x_gmac_setup(struct platform_device *pdev) 0xC << QSGMII_PHY_TX_DRV_AMP_OFFSET); } - return gmac; -} + plat_dat->has_gmac = true; + plat_dat->bsp_priv = gmac; + plat_dat->fix_mac_speed = ipq806x_gmac_fix_mac_speed; -static void ipq806x_gmac_fix_mac_speed(void *priv, unsigned int speed) -{ - struct ipq806x_gmac *gmac = priv; - - ipq806x_gmac_set_speed(gmac, speed); + return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); } -static const struct stmmac_of_data ipq806x_gmac_data = { - .has_gmac = 1, - .setup = ipq806x_gmac_setup, - .fix_mac_speed = ipq806x_gmac_fix_mac_speed, -}; - static const struct of_device_id ipq806x_gmac_dwmac_match[] = { - { .compatible = "qcom,ipq806x-gmac", .data = &ipq806x_gmac_data }, + { .compatible = "qcom,ipq806x-gmac" }, { } }; MODULE_DEVICE_TABLE(of, ipq806x_gmac_dwmac_match); static struct platform_driver ipq806x_gmac_dwmac_driver = { - .probe = stmmac_pltfr_probe, + .probe = ipq806x_gmac_probe, .remove = stmmac_pltfr_remove, .driver = { .name = "ipq806x-gmac-dwmac", diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c index cb888d3ebbdc..78e9d1861896 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c @@ -25,66 +25,53 @@ # define LPC18XX_CREG_CREG6_ETHMODE_MII 0x0 # define LPC18XX_CREG_CREG6_ETHMODE_RMII 0x4 -struct lpc18xx_dwmac_priv_data { +static int lpc18xx_dwmac_probe(struct platform_device *pdev) +{ + struct plat_stmmacenet_data *plat_dat; + struct stmmac_resources stmmac_res; struct regmap *reg; - int interface; -}; + u8 ethmode; + int ret; -static void *lpc18xx_dwmac_setup(struct platform_device *pdev) -{ - struct lpc18xx_dwmac_priv_data *dwmac; + ret = stmmac_get_platform_resources(pdev, &stmmac_res); + if (ret) + return ret; - dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL); - if (!dwmac) - return ERR_PTR(-ENOMEM); + plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + if (IS_ERR(plat_dat)) + return PTR_ERR(plat_dat); - dwmac->interface = of_get_phy_mode(pdev->dev.of_node); - if (dwmac->interface < 0) - return ERR_PTR(dwmac->interface); + plat_dat->has_gmac = true; - dwmac->reg = syscon_regmap_lookup_by_compatible("nxp,lpc1850-creg"); - if (IS_ERR(dwmac->reg)) { - dev_err(&pdev->dev, "Syscon lookup failed\n"); - return dwmac->reg; + reg = syscon_regmap_lookup_by_compatible("nxp,lpc1850-creg"); + if (IS_ERR(reg)) { + dev_err(&pdev->dev, "syscon lookup failed\n"); + return PTR_ERR(reg); } - return dwmac; -} - -static int lpc18xx_dwmac_init(struct platform_device *pdev, void *priv) -{ - struct lpc18xx_dwmac_priv_data *dwmac = priv; - u8 ethmode; - - if (dwmac->interface == PHY_INTERFACE_MODE_MII) { + if (plat_dat->interface == PHY_INTERFACE_MODE_MII) { ethmode = LPC18XX_CREG_CREG6_ETHMODE_MII; - } else if (dwmac->interface == PHY_INTERFACE_MODE_RMII) { + } else if (plat_dat->interface == PHY_INTERFACE_MODE_RMII) { ethmode = LPC18XX_CREG_CREG6_ETHMODE_RMII; } else { dev_err(&pdev->dev, "Only MII and RMII mode supported\n"); return -EINVAL; } - regmap_update_bits(dwmac->reg, LPC18XX_CREG_CREG6, + regmap_update_bits(reg, LPC18XX_CREG_CREG6, LPC18XX_CREG_CREG6_ETHMODE_MASK, ethmode); - return 0; + return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); } -static const struct stmmac_of_data lpc18xx_dwmac_data = { - .has_gmac = 1, - .setup = lpc18xx_dwmac_setup, - .init = lpc18xx_dwmac_init, -}; - static const struct of_device_id lpc18xx_dwmac_match[] = { - { .compatible = "nxp,lpc1850-dwmac", .data = &lpc18xx_dwmac_data }, + { .compatible = "nxp,lpc1850-dwmac" }, { } }; MODULE_DEVICE_TABLE(of, lpc18xx_dwmac_match); static struct platform_driver lpc18xx_dwmac_driver = { - .probe = stmmac_pltfr_probe, + .probe = lpc18xx_dwmac_probe, .remove = stmmac_pltfr_remove, .driver = { .name = "lpc18xx-dwmac", diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c index 61a324a87d09..c1bac1912b37 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c @@ -47,36 +47,45 @@ static void meson6_dwmac_fix_mac_speed(void *priv, unsigned int speed) writel(val, dwmac->reg); } -static void *meson6_dwmac_setup(struct platform_device *pdev) +static int meson6_dwmac_probe(struct platform_device *pdev) { + struct plat_stmmacenet_data *plat_dat; + struct stmmac_resources stmmac_res; struct meson_dwmac *dwmac; struct resource *res; + int ret; + + ret = stmmac_get_platform_resources(pdev, &stmmac_res); + if (ret) + return ret; + + plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + if (IS_ERR(plat_dat)) + return PTR_ERR(plat_dat); dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL); if (!dwmac) - return ERR_PTR(-ENOMEM); + return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 1); dwmac->reg = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(dwmac->reg)) - return ERR_CAST(dwmac->reg); + return PTR_ERR(dwmac->reg); - return dwmac; -} + plat_dat->bsp_priv = dwmac; + plat_dat->fix_mac_speed = meson6_dwmac_fix_mac_speed; -static const struct stmmac_of_data meson6_dwmac_data = { - .setup = meson6_dwmac_setup, - .fix_mac_speed = meson6_dwmac_fix_mac_speed, -}; + return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); +} static const struct of_device_id meson6_dwmac_match[] = { - { .compatible = "amlogic,meson6-dwmac", .data = &meson6_dwmac_data}, + { .compatible = "amlogic,meson6-dwmac" }, { } }; MODULE_DEVICE_TABLE(of, meson6_dwmac_match); static struct platform_driver meson6_dwmac_driver = { - .probe = stmmac_pltfr_probe, + .probe = meson6_dwmac_probe, .remove = stmmac_pltfr_remove, .driver = { .name = "meson6-dwmac", diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index 00a1e1e09d4f..11baa4b19779 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -46,7 +46,7 @@ struct rk_priv_data { struct platform_device *pdev; int phy_iface; struct regulator *regulator; - struct rk_gmac_ops *ops; + const struct rk_gmac_ops *ops; bool clk_enabled; bool clock_input; @@ -177,7 +177,7 @@ static void rk3288_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed) } } -struct rk_gmac_ops rk3288_ops = { +static const struct rk_gmac_ops rk3288_ops = { .set_to_rgmii = rk3288_set_to_rgmii, .set_to_rmii = rk3288_set_to_rmii, .set_rgmii_speed = rk3288_set_rgmii_speed, @@ -289,7 +289,7 @@ static void rk3368_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed) } } -struct rk_gmac_ops rk3368_ops = { +static const struct rk_gmac_ops rk3368_ops = { .set_to_rgmii = rk3368_set_to_rgmii, .set_to_rmii = rk3368_set_to_rmii, .set_rgmii_speed = rk3368_set_rgmii_speed, @@ -448,7 +448,7 @@ static int phy_power_on(struct rk_priv_data *bsp_priv, bool enable) } static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev, - struct rk_gmac_ops *ops) + const struct rk_gmac_ops *ops) { struct rk_priv_data *bsp_priv; struct device *dev = &pdev->dev; @@ -529,16 +529,6 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev, return bsp_priv; } -static void *rk3288_gmac_setup(struct platform_device *pdev) -{ - return rk_gmac_setup(pdev, &rk3288_ops); -} - -static void *rk3368_gmac_setup(struct platform_device *pdev) -{ - return rk_gmac_setup(pdev, &rk3368_ops); -} - static int rk_gmac_init(struct platform_device *pdev, void *priv) { struct rk_priv_data *bsp_priv = priv; @@ -576,31 +566,52 @@ static void rk_fix_speed(void *priv, unsigned int speed) dev_err(dev, "unsupported interface %d", bsp_priv->phy_iface); } -static const struct stmmac_of_data rk3288_gmac_data = { - .has_gmac = 1, - .fix_mac_speed = rk_fix_speed, - .setup = rk3288_gmac_setup, - .init = rk_gmac_init, - .exit = rk_gmac_exit, -}; +static int rk_gmac_probe(struct platform_device *pdev) +{ + struct plat_stmmacenet_data *plat_dat; + struct stmmac_resources stmmac_res; + const struct rk_gmac_ops *data; + int ret; -static const struct stmmac_of_data rk3368_gmac_data = { - .has_gmac = 1, - .fix_mac_speed = rk_fix_speed, - .setup = rk3368_gmac_setup, - .init = rk_gmac_init, - .exit = rk_gmac_exit, -}; + data = of_device_get_match_data(&pdev->dev); + if (!data) { + dev_err(&pdev->dev, "no of match data provided\n"); + return -EINVAL; + } + + ret = stmmac_get_platform_resources(pdev, &stmmac_res); + if (ret) + return ret; + + plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + if (IS_ERR(plat_dat)) + return PTR_ERR(plat_dat); + + plat_dat->has_gmac = true; + plat_dat->init = rk_gmac_init; + plat_dat->exit = rk_gmac_exit; + plat_dat->fix_mac_speed = rk_fix_speed; + + plat_dat->bsp_priv = rk_gmac_setup(pdev, data); + if (IS_ERR(plat_dat->bsp_priv)) + return PTR_ERR(plat_dat->bsp_priv); + + ret = rk_gmac_init(pdev, plat_dat->bsp_priv); + if (ret) + return ret; + + return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); +} static const struct of_device_id rk_gmac_dwmac_match[] = { - { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_gmac_data}, - { .compatible = "rockchip,rk3368-gmac", .data = &rk3368_gmac_data}, + { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_ops }, + { .compatible = "rockchip,rk3368-gmac", .data = &rk3368_ops }, { } }; MODULE_DEVICE_TABLE(of, rk_gmac_dwmac_match); static struct platform_driver rk_gmac_dwmac_driver = { - .probe = stmmac_pltfr_probe, + .probe = rk_gmac_probe, .remove = stmmac_pltfr_remove, .driver = { .name = "rk_gmac-dwmac", diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index 8141c5b844ae..401383b252a8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -175,31 +175,6 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac) return 0; } -static void *socfpga_dwmac_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - int ret; - struct socfpga_dwmac *dwmac; - - dwmac = devm_kzalloc(dev, sizeof(*dwmac), GFP_KERNEL); - if (!dwmac) - return ERR_PTR(-ENOMEM); - - ret = socfpga_dwmac_parse_data(dwmac, dev); - if (ret) { - dev_err(dev, "Unable to parse OF data\n"); - return ERR_PTR(ret); - } - - ret = socfpga_dwmac_setup(dwmac); - if (ret) { - dev_err(dev, "couldn't setup SoC glue (%d)\n", ret); - return ERR_PTR(ret); - } - - return dwmac; -} - static void socfpga_dwmac_exit(struct platform_device *pdev, void *priv) { struct socfpga_dwmac *dwmac = priv; @@ -257,21 +232,58 @@ static int socfpga_dwmac_init(struct platform_device *pdev, void *priv) return ret; } -static const struct stmmac_of_data socfpga_gmac_data = { - .setup = socfpga_dwmac_probe, - .init = socfpga_dwmac_init, - .exit = socfpga_dwmac_exit, - .fix_mac_speed = socfpga_dwmac_fix_mac_speed, -}; +static int socfpga_dwmac_probe(struct platform_device *pdev) +{ + struct plat_stmmacenet_data *plat_dat; + struct stmmac_resources stmmac_res; + struct device *dev = &pdev->dev; + int ret; + struct socfpga_dwmac *dwmac; + + ret = stmmac_get_platform_resources(pdev, &stmmac_res); + if (ret) + return ret; + + plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + if (IS_ERR(plat_dat)) + return PTR_ERR(plat_dat); + + dwmac = devm_kzalloc(dev, sizeof(*dwmac), GFP_KERNEL); + if (!dwmac) + return -ENOMEM; + + ret = socfpga_dwmac_parse_data(dwmac, dev); + if (ret) { + dev_err(dev, "Unable to parse OF data\n"); + return ret; + } + + ret = socfpga_dwmac_setup(dwmac); + if (ret) { + dev_err(dev, "couldn't setup SoC glue (%d)\n", ret); + return ret; + } + + plat_dat->bsp_priv = dwmac; + plat_dat->init = socfpga_dwmac_init; + plat_dat->exit = socfpga_dwmac_exit; + plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed; + + ret = socfpga_dwmac_init(pdev, plat_dat->bsp_priv); + if (ret) + return ret; + + return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); +} static const struct of_device_id socfpga_dwmac_match[] = { - { .compatible = "altr,socfpga-stmmac", .data = &socfpga_gmac_data }, + { .compatible = "altr,socfpga-stmmac" }, { } }; MODULE_DEVICE_TABLE(of, socfpga_dwmac_match); static struct platform_driver socfpga_dwmac_driver = { - .probe = stmmac_pltfr_probe, + .probe = socfpga_dwmac_probe, .remove = stmmac_pltfr_remove, .driver = { .name = "socfpga-dwmac", diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c index a2e8111c5d14..7f6f4a4fcc70 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c @@ -21,6 +21,7 @@ #include <linux/regmap.h> #include <linux/clk.h> #include <linux/of.h> +#include <linux/of_device.h> #include <linux/of_net.h> #include "stmmac_platform.h" @@ -128,6 +129,11 @@ struct sti_dwmac { struct device *dev; struct regmap *regmap; u32 speed; + void (*fix_retime_src)(void *priv, unsigned int speed); +}; + +struct sti_dwmac_of_data { + void (*fix_retime_src)(void *priv, unsigned int speed); }; static u32 phy_intf_sels[] = { @@ -222,8 +228,9 @@ static void stid127_fix_retime_src(void *priv, u32 spd) regmap_update_bits(dwmac->regmap, reg, STID127_RETIME_SRC_MASK, val); } -static void sti_dwmac_ctrl_init(struct sti_dwmac *dwmac) +static int sti_dwmac_init(struct platform_device *pdev, void *priv) { + struct sti_dwmac *dwmac = priv; struct regmap *regmap = dwmac->regmap; int iface = dwmac->interface; struct device *dev = dwmac->dev; @@ -241,28 +248,8 @@ static void sti_dwmac_ctrl_init(struct sti_dwmac *dwmac) val = (iface == PHY_INTERFACE_MODE_REVMII) ? 0 : ENMII; regmap_update_bits(regmap, reg, ENMII_MASK, val); -} - -static int stix4xx_init(struct platform_device *pdev, void *priv) -{ - struct sti_dwmac *dwmac = priv; - u32 spd = dwmac->speed; - - sti_dwmac_ctrl_init(dwmac); - - stih4xx_fix_retime_src(priv, spd); - - return 0; -} -static int stid127_init(struct platform_device *pdev, void *priv) -{ - struct sti_dwmac *dwmac = priv; - u32 spd = dwmac->speed; - - sti_dwmac_ctrl_init(dwmac); - - stid127_fix_retime_src(priv, spd); + dwmac->fix_retime_src(priv, dwmac->speed); return 0; } @@ -334,36 +321,58 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac, return 0; } -static void *sti_dwmac_setup(struct platform_device *pdev) +static int sti_dwmac_probe(struct platform_device *pdev) { + struct plat_stmmacenet_data *plat_dat; + const struct sti_dwmac_of_data *data; + struct stmmac_resources stmmac_res; struct sti_dwmac *dwmac; int ret; + data = of_device_get_match_data(&pdev->dev); + if (!data) { + dev_err(&pdev->dev, "No OF match data provided\n"); + return -EINVAL; + } + + ret = stmmac_get_platform_resources(pdev, &stmmac_res); + if (ret) + return ret; + + plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + if (IS_ERR(plat_dat)) + return PTR_ERR(plat_dat); + dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL); if (!dwmac) - return ERR_PTR(-ENOMEM); + return -ENOMEM; ret = sti_dwmac_parse_data(dwmac, pdev); if (ret) { dev_err(&pdev->dev, "Unable to parse OF data\n"); - return ERR_PTR(ret); + return ret; } - return dwmac; + dwmac->fix_retime_src = data->fix_retime_src; + + plat_dat->bsp_priv = dwmac; + plat_dat->init = sti_dwmac_init; + plat_dat->exit = sti_dwmac_exit; + plat_dat->fix_mac_speed = data->fix_retime_src; + + ret = sti_dwmac_init(pdev, plat_dat->bsp_priv); + if (ret) + return ret; + + return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); } -static const struct stmmac_of_data stih4xx_dwmac_data = { - .fix_mac_speed = stih4xx_fix_retime_src, - .setup = sti_dwmac_setup, - .init = stix4xx_init, - .exit = sti_dwmac_exit, +static const struct sti_dwmac_of_data stih4xx_dwmac_data = { + .fix_retime_src = stih4xx_fix_retime_src, }; -static const struct stmmac_of_data stid127_dwmac_data = { - .fix_mac_speed = stid127_fix_retime_src, - .setup = sti_dwmac_setup, - .init = stid127_init, - .exit = sti_dwmac_exit, +static const struct sti_dwmac_of_data stid127_dwmac_data = { + .fix_retime_src = stid127_fix_retime_src, }; static const struct of_device_id sti_dwmac_match[] = { @@ -376,7 +385,7 @@ static const struct of_device_id sti_dwmac_match[] = { MODULE_DEVICE_TABLE(of, sti_dwmac_match); static struct platform_driver sti_dwmac_driver = { - .probe = stmmac_pltfr_probe, + .probe = sti_dwmac_probe, .remove = stmmac_pltfr_remove, .driver = { .name = "sti-dwmac", diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c index 15048ca39759..52b8ed9bd87c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c @@ -33,35 +33,6 @@ struct sunxi_priv_data { struct regulator *regulator; }; -static void *sun7i_gmac_setup(struct platform_device *pdev) -{ - struct sunxi_priv_data *gmac; - struct device *dev = &pdev->dev; - - gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL); - if (!gmac) - return ERR_PTR(-ENOMEM); - - gmac->interface = of_get_phy_mode(dev->of_node); - - gmac->tx_clk = devm_clk_get(dev, "allwinner_gmac_tx"); - if (IS_ERR(gmac->tx_clk)) { - dev_err(dev, "could not get tx clock\n"); - return gmac->tx_clk; - } - - /* Optional regulator for PHY */ - gmac->regulator = devm_regulator_get_optional(dev, "phy"); - if (IS_ERR(gmac->regulator)) { - if (PTR_ERR(gmac->regulator) == -EPROBE_DEFER) - return ERR_PTR(-EPROBE_DEFER); - dev_info(dev, "no regulator found\n"); - gmac->regulator = NULL; - } - - return gmac; -} - #define SUN7I_GMAC_GMII_RGMII_RATE 125000000 #define SUN7I_GMAC_MII_RATE 25000000 @@ -132,25 +103,67 @@ static void sun7i_fix_speed(void *priv, unsigned int speed) } } -/* of_data specifying hardware features and callbacks. - * hardware features were copied from Allwinner drivers. */ -static const struct stmmac_of_data sun7i_gmac_data = { - .has_gmac = 1, - .tx_coe = 1, - .fix_mac_speed = sun7i_fix_speed, - .setup = sun7i_gmac_setup, - .init = sun7i_gmac_init, - .exit = sun7i_gmac_exit, -}; +static int sun7i_gmac_probe(struct platform_device *pdev) +{ + struct plat_stmmacenet_data *plat_dat; + struct stmmac_resources stmmac_res; + struct sunxi_priv_data *gmac; + struct device *dev = &pdev->dev; + int ret; + + ret = stmmac_get_platform_resources(pdev, &stmmac_res); + if (ret) + return ret; + + plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + if (IS_ERR(plat_dat)) + return PTR_ERR(plat_dat); + + gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL); + if (!gmac) + return -ENOMEM; + + gmac->interface = of_get_phy_mode(dev->of_node); + + gmac->tx_clk = devm_clk_get(dev, "allwinner_gmac_tx"); + if (IS_ERR(gmac->tx_clk)) { + dev_err(dev, "could not get tx clock\n"); + return PTR_ERR(gmac->tx_clk); + } + + /* Optional regulator for PHY */ + gmac->regulator = devm_regulator_get_optional(dev, "phy"); + if (IS_ERR(gmac->regulator)) { + if (PTR_ERR(gmac->regulator) == -EPROBE_DEFER) + return -EPROBE_DEFER; + dev_info(dev, "no regulator found\n"); + gmac->regulator = NULL; + } + + /* platform data specifying hardware features and callbacks. + * hardware features were copied from Allwinner drivers. */ + plat_dat->tx_coe = 1; + plat_dat->has_gmac = true; + plat_dat->bsp_priv = gmac; + plat_dat->init = sun7i_gmac_init; + plat_dat->exit = sun7i_gmac_exit; + plat_dat->fix_mac_speed = sun7i_fix_speed; + + ret = sun7i_gmac_init(pdev, plat_dat->bsp_priv); + if (ret) + return ret; + + return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); +} static const struct of_device_id sun7i_dwmac_match[] = { - { .compatible = "allwinner,sun7i-a20-gmac", .data = &sun7i_gmac_data}, + { .compatible = "allwinner,sun7i-a20-gmac" }, { } }; MODULE_DEVICE_TABLE(of, sun7i_dwmac_match); static struct platform_driver sun7i_dwmac_driver = { - .probe = stmmac_pltfr_probe, + .probe = sun7i_gmac_probe, .remove = stmmac_pltfr_remove, .driver = { .name = "sun7i-dwmac", diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index bcdc8955c719..d02691ba3d7f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -104,32 +104,16 @@ static int dwmac1000_validate_ucast_entries(int ucast_entries) * this function is to read the driver parameters from device-tree and * set some private fields that will be used by the main at runtime. */ -static int stmmac_probe_config_dt(struct platform_device *pdev, - struct plat_stmmacenet_data *plat, - const char **mac) +struct plat_stmmacenet_data * +stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) { struct device_node *np = pdev->dev.of_node; + struct plat_stmmacenet_data *plat; struct stmmac_dma_cfg *dma_cfg; - const struct of_device_id *device; - struct device *dev = &pdev->dev; - - device = of_match_device(dev->driver->of_match_table, dev); - if (device->data) { - const struct stmmac_of_data *data = device->data; - plat->has_gmac = data->has_gmac; - plat->enh_desc = data->enh_desc; - plat->tx_coe = data->tx_coe; - plat->rx_coe = data->rx_coe; - plat->bugged_jumbo = data->bugged_jumbo; - plat->pmt = data->pmt; - plat->riwt_off = data->riwt_off; - plat->fix_mac_speed = data->fix_mac_speed; - plat->bus_setup = data->bus_setup; - plat->setup = data->setup; - plat->free = data->free; - plat->init = data->init; - plat->exit = data->exit; - } + + plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL); + if (!plat) + return ERR_PTR(-ENOMEM); *mac = of_get_mac_address(np); plat->interface = of_get_phy_mode(np); @@ -151,7 +135,7 @@ static int stmmac_probe_config_dt(struct platform_device *pdev, /* If phy-handle is not specified, check if we have a fixed-phy */ if (!plat->phy_node && of_phy_is_fixed_link(np)) { if ((of_phy_register_fixed_link(np) < 0)) - return -ENODEV; + return ERR_PTR(-ENODEV); plat->phy_node = of_node_get(np); } @@ -182,6 +166,12 @@ static int stmmac_probe_config_dt(struct platform_device *pdev, */ plat->maxmtu = JUMBO_LEN; + /* Set default value for multicast hash bins */ + plat->multicast_filter_bins = HASH_TABLE_SIZE; + + /* Set default value for unicast filter entries */ + plat->unicast_filter_entries = 1; + /* * Currently only the properties needed on SPEAr600 * are provided. All other properties should be added @@ -222,7 +212,7 @@ static int stmmac_probe_config_dt(struct platform_device *pdev, GFP_KERNEL); if (!dma_cfg) { of_node_put(np); - return -ENOMEM; + return ERR_PTR(-ENOMEM); } plat->dma_cfg = dma_cfg; of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl); @@ -240,44 +230,34 @@ static int stmmac_probe_config_dt(struct platform_device *pdev, pr_warn("force_sf_dma_mode is ignored if force_thresh_dma_mode is set."); } - return 0; + return plat; } #else -static int stmmac_probe_config_dt(struct platform_device *pdev, - struct plat_stmmacenet_data *plat, - const char **mac) +struct plat_stmmacenet_data * +stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) { - return -ENOSYS; + return ERR_PTR(-ENOSYS); } #endif /* CONFIG_OF */ +EXPORT_SYMBOL_GPL(stmmac_probe_config_dt); -/** - * stmmac_pltfr_probe - platform driver probe. - * @pdev: platform device pointer - * Description: platform_device probe function. It is to allocate - * the necessary platform resources, invoke custom helper (if required) and - * invoke the main probe function. - */ -int stmmac_pltfr_probe(struct platform_device *pdev) +int stmmac_get_platform_resources(struct platform_device *pdev, + struct stmmac_resources *stmmac_res) { - struct stmmac_resources stmmac_res; - int ret = 0; struct resource *res; - struct device *dev = &pdev->dev; - struct plat_stmmacenet_data *plat_dat = NULL; - memset(&stmmac_res, 0, sizeof(stmmac_res)); + memset(stmmac_res, 0, sizeof(*stmmac_res)); /* Get IRQ information early to have an ability to ask for deferred * probe if needed before we went too far with resource allocation. */ - stmmac_res.irq = platform_get_irq_byname(pdev, "macirq"); - if (stmmac_res.irq < 0) { - if (stmmac_res.irq != -EPROBE_DEFER) { - dev_err(dev, + stmmac_res->irq = platform_get_irq_byname(pdev, "macirq"); + if (stmmac_res->irq < 0) { + if (stmmac_res->irq != -EPROBE_DEFER) { + dev_err(&pdev->dev, "MAC IRQ configuration information not found\n"); } - return stmmac_res.irq; + return stmmac_res->irq; } /* On some platforms e.g. SPEAr the wake up irq differs from the mac irq @@ -287,64 +267,23 @@ int stmmac_pltfr_probe(struct platform_device *pdev) * In case the wake up interrupt is not passed from the platform * so the driver will continue to use the mac irq (ndev->irq) */ - stmmac_res.wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq"); - if (stmmac_res.wol_irq < 0) { - if (stmmac_res.wol_irq == -EPROBE_DEFER) + stmmac_res->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq"); + if (stmmac_res->wol_irq < 0) { + if (stmmac_res->wol_irq == -EPROBE_DEFER) return -EPROBE_DEFER; - stmmac_res.wol_irq = stmmac_res.irq; + stmmac_res->wol_irq = stmmac_res->irq; } - stmmac_res.lpi_irq = platform_get_irq_byname(pdev, "eth_lpi"); - if (stmmac_res.lpi_irq == -EPROBE_DEFER) + stmmac_res->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi"); + if (stmmac_res->lpi_irq == -EPROBE_DEFER) return -EPROBE_DEFER; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - stmmac_res.addr = devm_ioremap_resource(dev, res); - if (IS_ERR(stmmac_res.addr)) - return PTR_ERR(stmmac_res.addr); - - plat_dat = dev_get_platdata(&pdev->dev); - - if (!plat_dat) - plat_dat = devm_kzalloc(&pdev->dev, - sizeof(struct plat_stmmacenet_data), - GFP_KERNEL); - if (!plat_dat) { - pr_err("%s: ERROR: no memory", __func__); - return -ENOMEM; - } - - /* Set default value for multicast hash bins */ - plat_dat->multicast_filter_bins = HASH_TABLE_SIZE; - - /* Set default value for unicast filter entries */ - plat_dat->unicast_filter_entries = 1; - - if (pdev->dev.of_node) { - ret = stmmac_probe_config_dt(pdev, plat_dat, &stmmac_res.mac); - if (ret) { - pr_err("%s: main dt probe failed", __func__); - return ret; - } - } + stmmac_res->addr = devm_ioremap_resource(&pdev->dev, res); - /* Custom setup (if needed) */ - if (plat_dat->setup) { - plat_dat->bsp_priv = plat_dat->setup(pdev); - if (IS_ERR(plat_dat->bsp_priv)) - return PTR_ERR(plat_dat->bsp_priv); - } - - /* Custom initialisation (if needed)*/ - if (plat_dat->init) { - ret = plat_dat->init(pdev, plat_dat->bsp_priv); - if (unlikely(ret)) - return ret; - } - - return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); + return PTR_ERR_OR_ZERO(stmmac_res->addr); } -EXPORT_SYMBOL_GPL(stmmac_pltfr_probe); +EXPORT_SYMBOL_GPL(stmmac_get_platform_resources); /** * stmmac_pltfr_remove @@ -361,9 +300,6 @@ int stmmac_pltfr_remove(struct platform_device *pdev) if (priv->plat->exit) priv->plat->exit(pdev, priv->plat->bsp_priv); - if (priv->plat->free) - priv->plat->free(pdev, priv->plat->bsp_priv); - return ret; } EXPORT_SYMBOL_GPL(stmmac_pltfr_remove); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h index 71da86d7bd00..ffeb8d9e2b2e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h @@ -19,7 +19,14 @@ #ifndef __STMMAC_PLATFORM_H__ #define __STMMAC_PLATFORM_H__ -int stmmac_pltfr_probe(struct platform_device *pdev); +#include "stmmac.h" + +struct plat_stmmacenet_data * +stmmac_probe_config_dt(struct platform_device *pdev, const char **mac); + +int stmmac_get_platform_resources(struct platform_device *pdev, + struct stmmac_resources *stmmac_res); + int stmmac_pltfr_remove(struct platform_device *pdev); extern const struct dev_pm_ops stmmac_pltfr_pm_ops; diff --git a/drivers/net/ethernet/synopsys/Kconfig b/drivers/net/ethernet/synopsys/Kconfig new file mode 100644 index 000000000000..a8f315106742 --- /dev/null +++ b/drivers/net/ethernet/synopsys/Kconfig @@ -0,0 +1,27 @@ +# +# Synopsys network device configuration +# + +config NET_VENDOR_SYNOPSYS + bool "Synopsys devices" + default y + ---help--- + If you have a network (Ethernet) device belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Synopsys devices. If you say Y, you will be asked + for your specific device in the following questions. + +if NET_VENDOR_SYNOPSYS + +config SYNOPSYS_DWC_ETH_QOS + tristate "Sypnopsys DWC Ethernet QOS v4.10a support" + select PHYLIB + select CRC32 + select MII + depends on OF + ---help--- + This driver supports the DWC Ethernet QoS from Synopsys + +endif # NET_VENDOR_SYNOPSYS diff --git a/drivers/net/ethernet/synopsys/Makefile b/drivers/net/ethernet/synopsys/Makefile new file mode 100644 index 000000000000..7a375723fc18 --- /dev/null +++ b/drivers/net/ethernet/synopsys/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Synopsys network device drivers. +# + +obj-$(CONFIG_SYNOPSYS_DWC_ETH_QOS) += dwc_eth_qos.o diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c new file mode 100644 index 000000000000..85b3326775b8 --- /dev/null +++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c @@ -0,0 +1,3019 @@ +/* Synopsys DWC Ethernet Quality-of-Service v4.10a linux driver + * + * This is a driver for the Synopsys DWC Ethernet QoS IP version 4.10a (GMAC). + * This version introduced a lot of changes which breaks backwards + * compatibility the non-QoS IP from Synopsys (used in the ST Micro drivers). + * Some fields differ between version 4.00a and 4.10a, mainly the interrupt + * bit fields. The driver could be made compatible with 4.00, if all relevant + * HW erratas are handled. + * + * The GMAC is highly configurable at synthesis time. This driver has been + * developed for a subset of the total available feature set. Currently + * it supports: + * - TSO + * - Checksum offload for RX and TX. + * - Energy efficient ethernet. + * - GMII phy interface. + * - The statistics module. + * - Single RX and TX queue. + * + * Copyright (C) 2015 Axis Communications AB. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +#include <linux/clk.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/ethtool.h> +#include <linux/stat.h> +#include <linux/types.h> + +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/delay.h> +#include <linux/mm.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/platform_device.h> + +#include <linux/phy.h> +#include <linux/mii.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/vmalloc.h> +#include <linux/version.h> + +#include <linux/device.h> +#include <linux/bitrev.h> +#include <linux/crc32.h> + +#include <linux/of.h> +#include <linux/interrupt.h> +#include <linux/clocksource.h> +#include <linux/net_tstamp.h> +#include <linux/pm_runtime.h> +#include <linux/of_net.h> +#include <linux/of_address.h> +#include <linux/of_mdio.h> +#include <linux/timer.h> +#include <linux/tcp.h> + +#define DRIVER_NAME "dwceqos" +#define DRIVER_DESCRIPTION "Synopsys DWC Ethernet QoS driver" +#define DRIVER_VERSION "0.9" + +#define DWCEQOS_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \ + NETIF_MSG_LINK | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP) + +#define DWCEQOS_TX_TIMEOUT 5 /* Seconds */ + +#define DWCEQOS_LPI_TIMER_MIN 8 +#define DWCEQOS_LPI_TIMER_MAX ((1 << 20) - 1) + +#define DWCEQOS_RX_BUF_SIZE 2048 + +#define DWCEQOS_RX_DCNT 256 +#define DWCEQOS_TX_DCNT 256 + +#define DWCEQOS_HASH_TABLE_SIZE 64 + +/* The size field in the DMA descriptor is 14 bits */ +#define BYTES_PER_DMA_DESC 16376 + +/* Hardware registers */ +#define START_MAC_REG_OFFSET 0x0000 +#define MAX_MAC_REG_OFFSET 0x0bd0 +#define START_MTL_REG_OFFSET 0x0c00 +#define MAX_MTL_REG_OFFSET 0x0d7c +#define START_DMA_REG_OFFSET 0x1000 +#define MAX_DMA_REG_OFFSET 0x117C + +#define REG_SPACE_SIZE 0x1800 + +/* DMA */ +#define REG_DWCEQOS_DMA_MODE 0x1000 +#define REG_DWCEQOS_DMA_SYSBUS_MODE 0x1004 +#define REG_DWCEQOS_DMA_IS 0x1008 +#define REG_DWCEQOS_DMA_DEBUG_ST0 0x100c + +/* DMA channel registers */ +#define REG_DWCEQOS_DMA_CH0_CTRL 0x1100 +#define REG_DWCEQOS_DMA_CH0_TX_CTRL 0x1104 +#define REG_DWCEQOS_DMA_CH0_RX_CTRL 0x1108 +#define REG_DWCEQOS_DMA_CH0_TXDESC_LIST 0x1114 +#define REG_DWCEQOS_DMA_CH0_RXDESC_LIST 0x111c +#define REG_DWCEQOS_DMA_CH0_TXDESC_TAIL 0x1120 +#define REG_DWCEQOS_DMA_CH0_RXDESC_TAIL 0x1128 +#define REG_DWCEQOS_DMA_CH0_TXDESC_LEN 0x112c +#define REG_DWCEQOS_DMA_CH0_RXDESC_LEN 0x1130 +#define REG_DWCEQOS_DMA_CH0_IE 0x1134 +#define REG_DWCEQOS_DMA_CH0_CUR_TXDESC 0x1144 +#define REG_DWCEQOS_DMA_CH0_CUR_RXDESC 0x114c +#define REG_DWCEQOS_DMA_CH0_CUR_TXBUF 0x1154 +#define REG_DWCEQOS_DMA_CH0_CUR_RXBUG 0x115c +#define REG_DWCEQOS_DMA_CH0_STA 0x1160 + +#define DWCEQOS_DMA_MODE_TXPR BIT(11) +#define DWCEQOS_DMA_MODE_DA BIT(1) + +#define DWCEQOS_DMA_SYSBUS_MODE_EN_LPI BIT(31) +#define DWCEQOS_DMA_SYSBUS_MODE_FB BIT(0) +#define DWCEQOS_DMA_SYSBUS_MODE_AAL BIT(12) + +#define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT(x) \ + (((x) << 16) & 0x000F0000) +#define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_DEFAULT 3 +#define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_MASK GENMASK(19, 16) + +#define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT(x) \ + (((x) << 24) & 0x0F000000) +#define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_DEFAULT 3 +#define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_MASK GENMASK(27, 24) + +#define DWCEQOS_DMA_SYSBUS_MODE_BURST_MASK GENMASK(7, 1) +#define DWCEQOS_DMA_SYSBUS_MODE_BURST(x) \ + (((x) << 1) & DWCEQOS_DMA_SYSBUS_MODE_BURST_MASK) +#define DWCEQOS_DMA_SYSBUS_MODE_BURST_DEFAULT GENMASK(3, 1) + +#define DWCEQOS_DMA_CH_CTRL_PBLX8 BIT(16) +#define DWCEQOS_DMA_CH_CTRL_DSL(x) ((x) << 18) + +#define DWCEQOS_DMA_CH_CTRL_PBL(x) ((x) << 16) +#define DWCEQOS_DMA_CH_CTRL_START BIT(0) +#define DWCEQOS_DMA_CH_RX_CTRL_BUFSIZE(x) ((x) << 1) +#define DWCEQOS_DMA_CH_TX_OSP BIT(4) +#define DWCEQOS_DMA_CH_TX_TSE BIT(12) + +#define DWCEQOS_DMA_CH0_IE_NIE BIT(15) +#define DWCEQOS_DMA_CH0_IE_AIE BIT(14) +#define DWCEQOS_DMA_CH0_IE_RIE BIT(6) +#define DWCEQOS_DMA_CH0_IE_TIE BIT(0) +#define DWCEQOS_DMA_CH0_IE_FBEE BIT(12) +#define DWCEQOS_DMA_CH0_IE_RBUE BIT(7) + +#define DWCEQOS_DMA_IS_DC0IS BIT(0) +#define DWCEQOS_DMA_IS_MTLIS BIT(16) +#define DWCEQOS_DMA_IS_MACIS BIT(17) + +#define DWCEQOS_DMA_CH0_IS_TI BIT(0) +#define DWCEQOS_DMA_CH0_IS_RI BIT(6) +#define DWCEQOS_DMA_CH0_IS_RBU BIT(7) +#define DWCEQOS_DMA_CH0_IS_FBE BIT(12) +#define DWCEQOS_DMA_CH0_IS_CDE BIT(13) +#define DWCEQOS_DMA_CH0_IS_AIS BIT(14) + +#define DWCEQOS_DMA_CH0_IS_TEB GENMASK(18, 16) +#define DWCEQOS_DMA_CH0_IS_TX_ERR_READ BIT(16) +#define DWCEQOS_DMA_CH0_IS_TX_ERR_DESCR BIT(17) + +#define DWCEQOS_DMA_CH0_IS_REB GENMASK(21, 19) +#define DWCEQOS_DMA_CH0_IS_RX_ERR_READ BIT(19) +#define DWCEQOS_DMA_CH0_IS_RX_ERR_DESCR BIT(20) + +/* DMA descriptor bits for RX normal descriptor (read format) */ +#define DWCEQOS_DMA_RDES3_OWN BIT(31) +#define DWCEQOS_DMA_RDES3_INTE BIT(30) +#define DWCEQOS_DMA_RDES3_BUF2V BIT(25) +#define DWCEQOS_DMA_RDES3_BUF1V BIT(24) + +/* DMA descriptor bits for RX normal descriptor (write back format) */ +#define DWCEQOS_DMA_RDES1_IPCE BIT(7) +#define DWCEQOS_DMA_RDES3_ES BIT(15) +#define DWCEQOS_DMA_RDES3_E_JT BIT(14) +#define DWCEQOS_DMA_RDES3_PL(x) ((x) & 0x7fff) +#define DWCEQOS_DMA_RDES1_PT 0x00000007 +#define DWCEQOS_DMA_RDES1_PT_UDP BIT(0) +#define DWCEQOS_DMA_RDES1_PT_TCP BIT(1) +#define DWCEQOS_DMA_RDES1_PT_ICMP 0x00000003 + +/* DMA descriptor bits for TX normal descriptor (read format) */ +#define DWCEQOS_DMA_TDES2_IOC BIT(31) +#define DWCEQOS_DMA_TDES3_OWN BIT(31) +#define DWCEQOS_DMA_TDES3_CTXT BIT(30) +#define DWCEQOS_DMA_TDES3_FD BIT(29) +#define DWCEQOS_DMA_TDES3_LD BIT(28) +#define DWCEQOS_DMA_TDES3_CIPH BIT(16) +#define DWCEQOS_DMA_TDES3_CIPP BIT(17) +#define DWCEQOS_DMA_TDES3_CA 0x00030000 +#define DWCEQOS_DMA_TDES3_TSE BIT(18) +#define DWCEQOS_DMA_DES3_THL(x) ((x) << 19) +#define DWCEQOS_DMA_DES2_B2L(x) ((x) << 16) + +#define DWCEQOS_DMA_TDES3_TCMSSV BIT(26) + +/* DMA channel states */ +#define DMA_TX_CH_STOPPED 0 +#define DMA_TX_CH_SUSPENDED 6 + +#define DMA_GET_TX_STATE_CH0(status0) ((status0 & 0xF000) >> 12) + +/* MTL */ +#define REG_DWCEQOS_MTL_OPER 0x0c00 +#define REG_DWCEQOS_MTL_DEBUG_ST 0x0c0c +#define REG_DWCEQOS_MTL_TXQ0_DEBUG_ST 0x0d08 +#define REG_DWCEQOS_MTL_RXQ0_DEBUG_ST 0x0d38 + +#define REG_DWCEQOS_MTL_IS 0x0c20 +#define REG_DWCEQOS_MTL_TXQ0_OPER 0x0d00 +#define REG_DWCEQOS_MTL_RXQ0_OPER 0x0d30 +#define REG_DWCEQOS_MTL_RXQ0_MIS_CNT 0x0d34 +#define REG_DWCEQOS_MTL_RXQ0_CTRL 0x0d3c + +#define REG_DWCEQOS_MTL_Q0_ISCTRL 0x0d2c + +#define DWCEQOS_MTL_SCHALG_STRICT 0x00000060 + +#define DWCEQOS_MTL_TXQ_TXQEN BIT(3) +#define DWCEQOS_MTL_TXQ_TSF BIT(1) +#define DWCEQOS_MTL_TXQ_FTQ BIT(0) +#define DWCEQOS_MTL_TXQ_TTC512 0x00000070 + +#define DWCEQOS_MTL_TXQ_SIZE(x) ((((x) - 256) & 0xff00) << 8) + +#define DWCEQOS_MTL_RXQ_SIZE(x) ((((x) - 256) & 0xff00) << 12) +#define DWCEQOS_MTL_RXQ_EHFC BIT(7) +#define DWCEQOS_MTL_RXQ_DIS_TCP_EF BIT(6) +#define DWCEQOS_MTL_RXQ_FEP BIT(4) +#define DWCEQOS_MTL_RXQ_FUP BIT(3) +#define DWCEQOS_MTL_RXQ_RSF BIT(5) +#define DWCEQOS_MTL_RXQ_RTC32 BIT(0) + +/* MAC */ +#define REG_DWCEQOS_MAC_CFG 0x0000 +#define REG_DWCEQOS_MAC_EXT_CFG 0x0004 +#define REG_DWCEQOS_MAC_PKT_FILT 0x0008 +#define REG_DWCEQOS_MAC_WD_TO 0x000c +#define REG_DWCEQOS_HASTABLE_LO 0x0010 +#define REG_DWCEQOS_HASTABLE_HI 0x0014 +#define REG_DWCEQOS_MAC_IS 0x00b0 +#define REG_DWCEQOS_MAC_IE 0x00b4 +#define REG_DWCEQOS_MAC_STAT 0x00b8 +#define REG_DWCEQOS_MAC_MDIO_ADDR 0x0200 +#define REG_DWCEQOS_MAC_MDIO_DATA 0x0204 +#define REG_DWCEQOS_MAC_MAC_ADDR0_HI 0x0300 +#define REG_DWCEQOS_MAC_MAC_ADDR0_LO 0x0304 +#define REG_DWCEQOS_MAC_RXQ0_CTRL0 0x00a0 +#define REG_DWCEQOS_MAC_HW_FEATURE0 0x011c +#define REG_DWCEQOS_MAC_HW_FEATURE1 0x0120 +#define REG_DWCEQOS_MAC_HW_FEATURE2 0x0124 +#define REG_DWCEQOS_MAC_HASHTABLE_LO 0x0010 +#define REG_DWCEQOS_MAC_HASHTABLE_HI 0x0014 +#define REG_DWCEQOS_MAC_LPI_CTRL_STATUS 0x00d0 +#define REG_DWCEQOS_MAC_LPI_TIMERS_CTRL 0x00d4 +#define REG_DWCEQOS_MAC_LPI_ENTRY_TIMER 0x00d8 +#define REG_DWCEQOS_MAC_1US_TIC_COUNTER 0x00dc +#define REG_DWCEQOS_MAC_RX_FLOW_CTRL 0x0090 +#define REG_DWCEQOS_MAC_Q0_TX_FLOW 0x0070 + +#define DWCEQOS_MAC_CFG_ACS BIT(20) +#define DWCEQOS_MAC_CFG_JD BIT(17) +#define DWCEQOS_MAC_CFG_JE BIT(16) +#define DWCEQOS_MAC_CFG_PS BIT(15) +#define DWCEQOS_MAC_CFG_FES BIT(14) +#define DWCEQOS_MAC_CFG_DM BIT(13) +#define DWCEQOS_MAC_CFG_DO BIT(10) +#define DWCEQOS_MAC_CFG_TE BIT(1) +#define DWCEQOS_MAC_CFG_IPC BIT(27) +#define DWCEQOS_MAC_CFG_RE BIT(0) + +#define DWCEQOS_ADDR_HIGH(reg) (0x00000300 + (reg * 8)) +#define DWCEQOS_ADDR_LOW(reg) (0x00000304 + (reg * 8)) + +#define DWCEQOS_MAC_IS_LPI_INT BIT(5) +#define DWCEQOS_MAC_IS_MMC_INT BIT(8) + +#define DWCEQOS_MAC_RXQ_EN BIT(1) +#define DWCEQOS_MAC_MAC_ADDR_HI_EN BIT(31) +#define DWCEQOS_MAC_PKT_FILT_RA BIT(31) +#define DWCEQOS_MAC_PKT_FILT_HPF BIT(10) +#define DWCEQOS_MAC_PKT_FILT_SAF BIT(9) +#define DWCEQOS_MAC_PKT_FILT_SAIF BIT(8) +#define DWCEQOS_MAC_PKT_FILT_DBF BIT(5) +#define DWCEQOS_MAC_PKT_FILT_PM BIT(4) +#define DWCEQOS_MAC_PKT_FILT_DAIF BIT(3) +#define DWCEQOS_MAC_PKT_FILT_HMC BIT(2) +#define DWCEQOS_MAC_PKT_FILT_HUC BIT(1) +#define DWCEQOS_MAC_PKT_FILT_PR BIT(0) + +#define DWCEQOS_MAC_MDIO_ADDR_CR(x) (((x & 15)) << 8) +#define DWCEQOS_MAC_MDIO_ADDR_CR_20 2 +#define DWCEQOS_MAC_MDIO_ADDR_CR_35 3 +#define DWCEQOS_MAC_MDIO_ADDR_CR_60 0 +#define DWCEQOS_MAC_MDIO_ADDR_CR_100 1 +#define DWCEQOS_MAC_MDIO_ADDR_CR_150 4 +#define DWCEQOS_MAC_MDIO_ADDR_CR_250 5 +#define DWCEQOS_MAC_MDIO_ADDR_GOC_READ 0x0000000c +#define DWCEQOS_MAC_MDIO_ADDR_GOC_WRITE BIT(2) +#define DWCEQOS_MAC_MDIO_ADDR_GB BIT(0) + +#define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIEN BIT(0) +#define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIEX BIT(1) +#define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIEN BIT(2) +#define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIEX BIT(3) +#define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIST BIT(8) +#define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIST BIT(9) +#define DWCEQOS_MAC_LPI_CTRL_STATUS_LPIEN BIT(16) +#define DWCEQOS_MAC_LPI_CTRL_STATUS_PLS BIT(17) +#define DWCEQOS_MAC_LPI_CTRL_STATUS_PLSEN BIT(18) +#define DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA BIT(19) +#define DWCEQOS_MAC_LPI_CTRL_STATUS_LPITE BIT(20) +#define DWCEQOS_MAC_LPI_CTRL_STATUS_LPITCSE BIT(21) + +#define DWCEQOS_MAC_1US_TIC_COUNTER_VAL(x) ((x) & GENMASK(11, 0)) + +#define DWCEQOS_LPI_CTRL_ENABLE_EEE (DWCEQOS_MAC_LPI_CTRL_STATUS_LPITE | \ + DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA | \ + DWCEQOS_MAC_LPI_CTRL_STATUS_LPIEN) + +#define DWCEQOS_MAC_RX_FLOW_CTRL_RFE BIT(0) + +#define DWCEQOS_MAC_Q0_TX_FLOW_TFE BIT(1) +#define DWCEQOS_MAC_Q0_TX_FLOW_PT(time) ((time) << 16) +#define DWCEQOS_MAC_Q0_TX_FLOW_PLT_4_SLOTS (0 << 4) + +/* Features */ +#define DWCEQOS_MAC_HW_FEATURE0_RXCOESEL BIT(16) +#define DWCEQOS_MAC_HW_FEATURE0_TXCOESEL BIT(14) +#define DWCEQOS_MAC_HW_FEATURE0_HDSEL BIT(2) +#define DWCEQOS_MAC_HW_FEATURE0_EEESEL BIT(13) +#define DWCEQOS_MAC_HW_FEATURE0_GMIISEL BIT(1) +#define DWCEQOS_MAC_HW_FEATURE0_MIISEL BIT(0) + +#define DWCEQOS_MAC_HW_FEATURE1_TSOEN BIT(18) +#define DWCEQOS_MAC_HW_FEATURE1_TXFIFOSIZE(x) ((128 << ((x) & 0x7c0)) >> 6) +#define DWCEQOS_MAC_HW_FEATURE1_RXFIFOSIZE(x) (128 << ((x) & 0x1f)) + +#define DWCEQOS_MAX_PERFECT_ADDRESSES(feature1) \ + (1 + (((feature1) & 0x1fc0000) >> 18)) + +#define DWCEQOS_MDIO_PHYADDR(x) (((x) & 0x1f) << 21) +#define DWCEQOS_MDIO_PHYREG(x) (((x) & 0x1f) << 16) + +#define DWCEQOS_DMA_MODE_SWR BIT(0) + +#define DWCEQOS_DWCEQOS_RX_BUF_SIZE 2048 + +/* Mac Management Counters */ +#define REG_DWCEQOS_MMC_CTRL 0x0700 +#define REG_DWCEQOS_MMC_RXIRQ 0x0704 +#define REG_DWCEQOS_MMC_TXIRQ 0x0708 +#define REG_DWCEQOS_MMC_RXIRQMASK 0x070c +#define REG_DWCEQOS_MMC_TXIRQMASK 0x0710 + +#define DWCEQOS_MMC_CTRL_CNTRST BIT(0) +#define DWCEQOS_MMC_CTRL_RSTONRD BIT(2) + +#define DWC_MMC_TXLPITRANSCNTR 0x07F0 +#define DWC_MMC_TXLPIUSCNTR 0x07EC +#define DWC_MMC_TXOVERSIZE_G 0x0778 +#define DWC_MMC_TXVLANPACKETS_G 0x0774 +#define DWC_MMC_TXPAUSEPACKETS 0x0770 +#define DWC_MMC_TXEXCESSDEF 0x076C +#define DWC_MMC_TXPACKETCOUNT_G 0x0768 +#define DWC_MMC_TXOCTETCOUNT_G 0x0764 +#define DWC_MMC_TXCARRIERERROR 0x0760 +#define DWC_MMC_TXEXCESSCOL 0x075C +#define DWC_MMC_TXLATECOL 0x0758 +#define DWC_MMC_TXDEFERRED 0x0754 +#define DWC_MMC_TXMULTICOL_G 0x0750 +#define DWC_MMC_TXSINGLECOL_G 0x074C +#define DWC_MMC_TXUNDERFLOWERROR 0x0748 +#define DWC_MMC_TXBROADCASTPACKETS_GB 0x0744 +#define DWC_MMC_TXMULTICASTPACKETS_GB 0x0740 +#define DWC_MMC_TXUNICASTPACKETS_GB 0x073C +#define DWC_MMC_TX1024TOMAXOCTETS_GB 0x0738 +#define DWC_MMC_TX512TO1023OCTETS_GB 0x0734 +#define DWC_MMC_TX256TO511OCTETS_GB 0x0730 +#define DWC_MMC_TX128TO255OCTETS_GB 0x072C +#define DWC_MMC_TX65TO127OCTETS_GB 0x0728 +#define DWC_MMC_TX64OCTETS_GB 0x0724 +#define DWC_MMC_TXMULTICASTPACKETS_G 0x0720 +#define DWC_MMC_TXBROADCASTPACKETS_G 0x071C +#define DWC_MMC_TXPACKETCOUNT_GB 0x0718 +#define DWC_MMC_TXOCTETCOUNT_GB 0x0714 + +#define DWC_MMC_RXLPITRANSCNTR 0x07F8 +#define DWC_MMC_RXLPIUSCNTR 0x07F4 +#define DWC_MMC_RXCTRLPACKETS_G 0x07E4 +#define DWC_MMC_RXRCVERROR 0x07E0 +#define DWC_MMC_RXWATCHDOG 0x07DC +#define DWC_MMC_RXVLANPACKETS_GB 0x07D8 +#define DWC_MMC_RXFIFOOVERFLOW 0x07D4 +#define DWC_MMC_RXPAUSEPACKETS 0x07D0 +#define DWC_MMC_RXOUTOFRANGETYPE 0x07CC +#define DWC_MMC_RXLENGTHERROR 0x07C8 +#define DWC_MMC_RXUNICASTPACKETS_G 0x07C4 +#define DWC_MMC_RX1024TOMAXOCTETS_GB 0x07C0 +#define DWC_MMC_RX512TO1023OCTETS_GB 0x07BC +#define DWC_MMC_RX256TO511OCTETS_GB 0x07B8 +#define DWC_MMC_RX128TO255OCTETS_GB 0x07B4 +#define DWC_MMC_RX65TO127OCTETS_GB 0x07B0 +#define DWC_MMC_RX64OCTETS_GB 0x07AC +#define DWC_MMC_RXOVERSIZE_G 0x07A8 +#define DWC_MMC_RXUNDERSIZE_G 0x07A4 +#define DWC_MMC_RXJABBERERROR 0x07A0 +#define DWC_MMC_RXRUNTERROR 0x079C +#define DWC_MMC_RXALIGNMENTERROR 0x0798 +#define DWC_MMC_RXCRCERROR 0x0794 +#define DWC_MMC_RXMULTICASTPACKETS_G 0x0790 +#define DWC_MMC_RXBROADCASTPACKETS_G 0x078C +#define DWC_MMC_RXOCTETCOUNT_G 0x0788 +#define DWC_MMC_RXOCTETCOUNT_GB 0x0784 +#define DWC_MMC_RXPACKETCOUNT_GB 0x0780 + +static int debug = 3; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "DWC_eth_qos debug level (0=none,...,16=all)"); + +/* DMA ring descriptor. These are used as support descriptors for the HW DMA */ +struct ring_desc { + struct sk_buff *skb; + dma_addr_t mapping; + size_t len; +}; + +/* DMA hardware descriptor */ +struct dwceqos_dma_desc { + u32 des0; + u32 des1; + u32 des2; + u32 des3; +} ____cacheline_aligned; + +struct dwceqos_mmc_counters { + __u64 txlpitranscntr; + __u64 txpiuscntr; + __u64 txoversize_g; + __u64 txvlanpackets_g; + __u64 txpausepackets; + __u64 txexcessdef; + __u64 txpacketcount_g; + __u64 txoctetcount_g; + __u64 txcarriererror; + __u64 txexcesscol; + __u64 txlatecol; + __u64 txdeferred; + __u64 txmulticol_g; + __u64 txsinglecol_g; + __u64 txunderflowerror; + __u64 txbroadcastpackets_gb; + __u64 txmulticastpackets_gb; + __u64 txunicastpackets_gb; + __u64 tx1024tomaxoctets_gb; + __u64 tx512to1023octets_gb; + __u64 tx256to511octets_gb; + __u64 tx128to255octets_gb; + __u64 tx65to127octets_gb; + __u64 tx64octets_gb; + __u64 txmulticastpackets_g; + __u64 txbroadcastpackets_g; + __u64 txpacketcount_gb; + __u64 txoctetcount_gb; + + __u64 rxlpitranscntr; + __u64 rxlpiuscntr; + __u64 rxctrlpackets_g; + __u64 rxrcverror; + __u64 rxwatchdog; + __u64 rxvlanpackets_gb; + __u64 rxfifooverflow; + __u64 rxpausepackets; + __u64 rxoutofrangetype; + __u64 rxlengtherror; + __u64 rxunicastpackets_g; + __u64 rx1024tomaxoctets_gb; + __u64 rx512to1023octets_gb; + __u64 rx256to511octets_gb; + __u64 rx128to255octets_gb; + __u64 rx65to127octets_gb; + __u64 rx64octets_gb; + __u64 rxoversize_g; + __u64 rxundersize_g; + __u64 rxjabbererror; + __u64 rxrunterror; + __u64 rxalignmenterror; + __u64 rxcrcerror; + __u64 rxmulticastpackets_g; + __u64 rxbroadcastpackets_g; + __u64 rxoctetcount_g; + __u64 rxoctetcount_gb; + __u64 rxpacketcount_gb; +}; + +/* Ethtool statistics */ + +struct dwceqos_stat { + const char stat_name[ETH_GSTRING_LEN]; + int offset; +}; + +#define STAT_ITEM(name, var) \ + {\ + name,\ + offsetof(struct dwceqos_mmc_counters, var),\ + } + +static const struct dwceqos_stat dwceqos_ethtool_stats[] = { + STAT_ITEM("tx_bytes", txoctetcount_gb), + STAT_ITEM("tx_packets", txpacketcount_gb), + STAT_ITEM("tx_unicst_packets", txunicastpackets_gb), + STAT_ITEM("tx_broadcast_packets", txbroadcastpackets_gb), + STAT_ITEM("tx_multicast_packets", txmulticastpackets_gb), + STAT_ITEM("tx_pause_packets", txpausepackets), + STAT_ITEM("tx_up_to_64_byte_packets", tx64octets_gb), + STAT_ITEM("tx_65_to_127_byte_packets", tx65to127octets_gb), + STAT_ITEM("tx_128_to_255_byte_packets", tx128to255octets_gb), + STAT_ITEM("tx_256_to_511_byte_packets", tx256to511octets_gb), + STAT_ITEM("tx_512_to_1023_byte_packets", tx512to1023octets_gb), + STAT_ITEM("tx_1024_to_maxsize_packets", tx1024tomaxoctets_gb), + STAT_ITEM("tx_underflow_errors", txunderflowerror), + STAT_ITEM("tx_lpi_count", txlpitranscntr), + + STAT_ITEM("rx_bytes", rxoctetcount_gb), + STAT_ITEM("rx_packets", rxpacketcount_gb), + STAT_ITEM("rx_unicast_packets", rxunicastpackets_g), + STAT_ITEM("rx_broadcast_packets", rxbroadcastpackets_g), + STAT_ITEM("rx_multicast_packets", rxmulticastpackets_g), + STAT_ITEM("rx_vlan_packets", rxvlanpackets_gb), + STAT_ITEM("rx_pause_packets", rxpausepackets), + STAT_ITEM("rx_up_to_64_byte_packets", rx64octets_gb), + STAT_ITEM("rx_65_to_127_byte_packets", rx65to127octets_gb), + STAT_ITEM("rx_128_to_255_byte_packets", rx128to255octets_gb), + STAT_ITEM("rx_256_to_511_byte_packets", rx256to511octets_gb), + STAT_ITEM("rx_512_to_1023_byte_packets", rx512to1023octets_gb), + STAT_ITEM("rx_1024_to_maxsize_packets", rx1024tomaxoctets_gb), + STAT_ITEM("rx_fifo_overflow_errors", rxfifooverflow), + STAT_ITEM("rx_oversize_packets", rxoversize_g), + STAT_ITEM("rx_undersize_packets", rxundersize_g), + STAT_ITEM("rx_jabbers", rxjabbererror), + STAT_ITEM("rx_align_errors", rxalignmenterror), + STAT_ITEM("rx_crc_errors", rxcrcerror), + STAT_ITEM("rx_lpi_count", rxlpitranscntr), +}; + +/* Configuration of AXI bus parameters. + * These values depend on the parameters set on the MAC core as well + * as the AXI interconnect. + */ +struct dwceqos_bus_cfg { + /* Enable AXI low-power interface. */ + bool en_lpi; + /* Limit on number of outstanding AXI write requests. */ + u32 write_requests; + /* Limit on number of outstanding AXI read requests. */ + u32 read_requests; + /* Bitmap of allowed AXI burst lengths, 4-256 beats. */ + u32 burst_map; + /* DMA Programmable burst length*/ + u32 tx_pbl; + u32 rx_pbl; +}; + +struct dwceqos_flowcontrol { + int autoneg; + int rx; + int rx_current; + int tx; + int tx_current; +}; + +struct net_local { + void __iomem *baseaddr; + struct clk *phy_ref_clk; + struct clk *apb_pclk; + + struct device_node *phy_node; + struct net_device *ndev; + struct platform_device *pdev; + + u32 msg_enable; + + struct tasklet_struct tx_bdreclaim_tasklet; + struct workqueue_struct *txtimeout_handler_wq; + struct work_struct txtimeout_reinit; + + phy_interface_t phy_interface; + struct phy_device *phy_dev; + struct mii_bus *mii_bus; + + unsigned int link; + unsigned int speed; + unsigned int duplex; + + struct napi_struct napi; + + /* DMA Descriptor Areas */ + struct ring_desc *rx_skb; + struct ring_desc *tx_skb; + + struct dwceqos_dma_desc *tx_descs; + struct dwceqos_dma_desc *rx_descs; + + /* DMA Mapped Descriptor areas*/ + dma_addr_t tx_descs_addr; + dma_addr_t rx_descs_addr; + dma_addr_t tx_descs_tail_addr; + dma_addr_t rx_descs_tail_addr; + + size_t tx_free; + size_t tx_next; + size_t rx_cur; + size_t tx_cur; + + /* Spinlocks for accessing DMA Descriptors */ + spinlock_t tx_lock; + + /* Spinlock for register read-modify-writes. */ + spinlock_t hw_lock; + + u32 feature0; + u32 feature1; + u32 feature2; + + struct dwceqos_bus_cfg bus_cfg; + bool en_tx_lpi_clockgating; + + int eee_enabled; + int eee_active; + int csr_val; + u32 gso_size; + + struct dwceqos_mmc_counters mmc_counters; + /* Protect the mmc_counter updates. */ + spinlock_t stats_lock; + u32 mmc_rx_counters_mask; + u32 mmc_tx_counters_mask; + + struct dwceqos_flowcontrol flowcontrol; +}; + +static void dwceqos_read_mmc_counters(struct net_local *lp, u32 rx_mask, + u32 tx_mask); + +static void dwceqos_set_umac_addr(struct net_local *lp, unsigned char *addr, + unsigned int reg_n); +static int dwceqos_stop(struct net_device *ndev); +static int dwceqos_open(struct net_device *ndev); +static void dwceqos_tx_poll_demand(struct net_local *lp); + +static void dwceqos_set_rx_flowcontrol(struct net_local *lp, bool enable); +static void dwceqos_set_tx_flowcontrol(struct net_local *lp, bool enable); + +static void dwceqos_reset_state(struct net_local *lp); + +#define dwceqos_read(lp, reg) \ + readl_relaxed(((void __iomem *)((lp)->baseaddr)) + (reg)) +#define dwceqos_write(lp, reg, val) \ + writel_relaxed((val), ((void __iomem *)((lp)->baseaddr)) + (reg)) + +static void dwceqos_reset_state(struct net_local *lp) +{ + lp->link = 0; + lp->speed = 0; + lp->duplex = DUPLEX_UNKNOWN; + lp->flowcontrol.rx_current = 0; + lp->flowcontrol.tx_current = 0; + lp->eee_active = 0; + lp->eee_enabled = 0; +} + +static void print_descriptor(struct net_local *lp, int index, int tx) +{ + struct dwceqos_dma_desc *dd; + + if (tx) + dd = (struct dwceqos_dma_desc *)&lp->tx_descs[index]; + else + dd = (struct dwceqos_dma_desc *)&lp->rx_descs[index]; + + pr_info("%s DMA Descriptor #%d@%p Contents:\n", tx ? "TX" : "RX", + index, dd); + pr_info("0x%08x 0x%08x 0x%08x 0x%08x\n", dd->des0, dd->des1, dd->des2, + dd->des3); +} + +static void print_status(struct net_local *lp) +{ + size_t desci, i; + + pr_info("tx_free %zu, tx_cur %zu, tx_next %zu\n", lp->tx_free, + lp->tx_cur, lp->tx_next); + + print_descriptor(lp, lp->rx_cur, 0); + + for (desci = (lp->tx_cur - 10) % DWCEQOS_TX_DCNT, i = 0; + i < DWCEQOS_TX_DCNT; + ++i) { + print_descriptor(lp, desci, 1); + desci = (desci + 1) % DWCEQOS_TX_DCNT; + } + + pr_info("DMA_Debug_Status0: 0x%08x\n", + dwceqos_read(lp, REG_DWCEQOS_DMA_DEBUG_ST0)); + pr_info("DMA_CH0_Status: 0x%08x\n", + dwceqos_read(lp, REG_DWCEQOS_DMA_IS)); + pr_info("DMA_CH0_Current_App_TxDesc: 0x%08x\n", + dwceqos_read(lp, 0x1144)); + pr_info("DMA_CH0_Current_App_TxBuff: 0x%08x\n", + dwceqos_read(lp, 0x1154)); + pr_info("MTL_Debug_Status: 0x%08x\n", + dwceqos_read(lp, REG_DWCEQOS_MTL_DEBUG_ST)); + pr_info("MTL_TXQ0_Debug_Status: 0x%08x\n", + dwceqos_read(lp, REG_DWCEQOS_MTL_TXQ0_DEBUG_ST)); + pr_info("MTL_RXQ0_Debug_Status: 0x%08x\n", + dwceqos_read(lp, REG_DWCEQOS_MTL_RXQ0_DEBUG_ST)); + pr_info("Current TX DMA: 0x%08x, RX DMA: 0x%08x\n", + dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_CUR_TXDESC), + dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_CUR_RXDESC)); +} + +static void dwceqos_mdio_set_csr(struct net_local *lp) +{ + int rate = clk_get_rate(lp->apb_pclk); + + if (rate <= 20000000) + lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_20; + else if (rate <= 35000000) + lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_35; + else if (rate <= 60000000) + lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_60; + else if (rate <= 100000000) + lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_100; + else if (rate <= 150000000) + lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_150; + else if (rate <= 250000000) + lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_250; +} + +/* Simple MDIO functions implementing mii_bus */ +static int dwceqos_mdio_read(struct mii_bus *bus, int mii_id, int phyreg) +{ + struct net_local *lp = bus->priv; + u32 regval; + int i; + int data; + + regval = DWCEQOS_MDIO_PHYADDR(mii_id) | + DWCEQOS_MDIO_PHYREG(phyreg) | + DWCEQOS_MAC_MDIO_ADDR_CR(lp->csr_val) | + DWCEQOS_MAC_MDIO_ADDR_GB | + DWCEQOS_MAC_MDIO_ADDR_GOC_READ; + dwceqos_write(lp, REG_DWCEQOS_MAC_MDIO_ADDR, regval); + + for (i = 0; i < 5; ++i) { + usleep_range(64, 128); + if (!(dwceqos_read(lp, REG_DWCEQOS_MAC_MDIO_ADDR) & + DWCEQOS_MAC_MDIO_ADDR_GB)) + break; + } + + data = dwceqos_read(lp, REG_DWCEQOS_MAC_MDIO_DATA); + if (i == 5) { + netdev_warn(lp->ndev, "MDIO read timed out\n"); + data = 0xffff; + } + + return data & 0xffff; +} + +static int dwceqos_mdio_write(struct mii_bus *bus, int mii_id, int phyreg, + u16 value) +{ + struct net_local *lp = bus->priv; + u32 regval; + int i; + + dwceqos_write(lp, REG_DWCEQOS_MAC_MDIO_DATA, value); + + regval = DWCEQOS_MDIO_PHYADDR(mii_id) | + DWCEQOS_MDIO_PHYREG(phyreg) | + DWCEQOS_MAC_MDIO_ADDR_CR(lp->csr_val) | + DWCEQOS_MAC_MDIO_ADDR_GB | + DWCEQOS_MAC_MDIO_ADDR_GOC_WRITE; + dwceqos_write(lp, REG_DWCEQOS_MAC_MDIO_ADDR, regval); + + for (i = 0; i < 5; ++i) { + usleep_range(64, 128); + if (!(dwceqos_read(lp, REG_DWCEQOS_MAC_MDIO_ADDR) & + DWCEQOS_MAC_MDIO_ADDR_GB)) + break; + } + if (i == 5) + netdev_warn(lp->ndev, "MDIO write timed out\n"); + return 0; +} + +static int dwceqos_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) +{ + struct net_local *lp = netdev_priv(ndev); + struct phy_device *phydev = lp->phy_dev; + + if (!netif_running(ndev)) + return -EINVAL; + + if (!phydev) + return -ENODEV; + + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + return phy_mii_ioctl(phydev, rq, cmd); + default: + dev_info(&lp->pdev->dev, "ioctl %X not implemented.\n", cmd); + return -EOPNOTSUPP; + } +} + +static void dwceqos_link_down(struct net_local *lp) +{ + u32 regval; + unsigned long flags; + + /* Indicate link down to the LPI state machine */ + spin_lock_irqsave(&lp->hw_lock, flags); + regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS); + regval &= ~DWCEQOS_MAC_LPI_CTRL_STATUS_PLS; + dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval); + spin_unlock_irqrestore(&lp->hw_lock, flags); +} + +static void dwceqos_link_up(struct net_local *lp) +{ + u32 regval; + unsigned long flags; + + /* Indicate link up to the LPI state machine */ + spin_lock_irqsave(&lp->hw_lock, flags); + regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS); + regval |= DWCEQOS_MAC_LPI_CTRL_STATUS_PLS; + dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval); + spin_unlock_irqrestore(&lp->hw_lock, flags); + + lp->eee_active = !phy_init_eee(lp->phy_dev, 0); + + /* Check for changed EEE capability */ + if (!lp->eee_active && lp->eee_enabled) { + lp->eee_enabled = 0; + + spin_lock_irqsave(&lp->hw_lock, flags); + regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS); + regval &= ~DWCEQOS_LPI_CTRL_ENABLE_EEE; + dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval); + spin_unlock_irqrestore(&lp->hw_lock, flags); + } +} + +static void dwceqos_set_speed(struct net_local *lp) +{ + struct phy_device *phydev = lp->phy_dev; + u32 regval; + + regval = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG); + regval &= ~(DWCEQOS_MAC_CFG_PS | DWCEQOS_MAC_CFG_FES | + DWCEQOS_MAC_CFG_DM); + + if (phydev->duplex) + regval |= DWCEQOS_MAC_CFG_DM; + if (phydev->speed == SPEED_10) { + regval |= DWCEQOS_MAC_CFG_PS; + } else if (phydev->speed == SPEED_100) { + regval |= DWCEQOS_MAC_CFG_PS | + DWCEQOS_MAC_CFG_FES; + } else if (phydev->speed != SPEED_1000) { + netdev_err(lp->ndev, + "unknown PHY speed %d\n", + phydev->speed); + return; + } + + dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, regval); +} + +static void dwceqos_adjust_link(struct net_device *ndev) +{ + struct net_local *lp = netdev_priv(ndev); + struct phy_device *phydev = lp->phy_dev; + int status_change = 0; + + if (phydev->link) { + if ((lp->speed != phydev->speed) || + (lp->duplex != phydev->duplex)) { + dwceqos_set_speed(lp); + + lp->speed = phydev->speed; + lp->duplex = phydev->duplex; + status_change = 1; + } + + if (lp->flowcontrol.autoneg) { + lp->flowcontrol.rx = phydev->pause || + phydev->asym_pause; + lp->flowcontrol.tx = phydev->pause || + phydev->asym_pause; + } + + if (lp->flowcontrol.rx != lp->flowcontrol.rx_current) { + if (netif_msg_link(lp)) + netdev_dbg(ndev, "set rx flow to %d\n", + lp->flowcontrol.rx); + dwceqos_set_rx_flowcontrol(lp, lp->flowcontrol.rx); + lp->flowcontrol.rx_current = lp->flowcontrol.rx; + } + if (lp->flowcontrol.tx != lp->flowcontrol.tx_current) { + if (netif_msg_link(lp)) + netdev_dbg(ndev, "set tx flow to %d\n", + lp->flowcontrol.tx); + dwceqos_set_tx_flowcontrol(lp, lp->flowcontrol.tx); + lp->flowcontrol.tx_current = lp->flowcontrol.tx; + } + } + + if (phydev->link != lp->link) { + lp->link = phydev->link; + status_change = 1; + } + + if (status_change) { + if (phydev->link) { + lp->ndev->trans_start = jiffies; + dwceqos_link_up(lp); + } else { + dwceqos_link_down(lp); + } + phy_print_status(phydev); + } +} + +static int dwceqos_mii_probe(struct net_device *ndev) +{ + struct net_local *lp = netdev_priv(ndev); + struct phy_device *phydev = NULL; + + if (lp->phy_node) { + phydev = of_phy_connect(lp->ndev, + lp->phy_node, + &dwceqos_adjust_link, + 0, + lp->phy_interface); + + if (!phydev) { + netdev_err(ndev, "no PHY found\n"); + return -1; + } + } else { + netdev_err(ndev, "no PHY configured\n"); + return -ENODEV; + } + + if (netif_msg_probe(lp)) + netdev_dbg(lp->ndev, + "phydev %p, phydev->phy_id 0xa%x, phydev->addr 0x%x\n", + phydev, phydev->phy_id, phydev->addr); + + phydev->supported &= PHY_GBIT_FEATURES; + + lp->link = 0; + lp->speed = 0; + lp->duplex = DUPLEX_UNKNOWN; + lp->phy_dev = phydev; + + if (netif_msg_probe(lp)) { + netdev_dbg(lp->ndev, "phy_addr 0x%x, phy_id 0x%08x\n", + lp->phy_dev->addr, lp->phy_dev->phy_id); + + netdev_dbg(lp->ndev, "attach [%s] phy driver\n", + lp->phy_dev->drv->name); + } + + return 0; +} + +static void dwceqos_alloc_rxring_desc(struct net_local *lp, int index) +{ + struct sk_buff *new_skb; + dma_addr_t new_skb_baddr = 0; + + new_skb = netdev_alloc_skb(lp->ndev, DWCEQOS_RX_BUF_SIZE); + if (!new_skb) { + netdev_err(lp->ndev, "alloc_skb error for desc %d\n", index); + goto err_out; + } + + new_skb_baddr = dma_map_single(lp->ndev->dev.parent, + new_skb->data, DWCEQOS_RX_BUF_SIZE, + DMA_FROM_DEVICE); + if (dma_mapping_error(lp->ndev->dev.parent, new_skb_baddr)) { + netdev_err(lp->ndev, "DMA map error\n"); + dev_kfree_skb(new_skb); + new_skb = NULL; + goto err_out; + } + + lp->rx_descs[index].des0 = new_skb_baddr; + lp->rx_descs[index].des1 = 0; + lp->rx_descs[index].des2 = 0; + lp->rx_descs[index].des3 = DWCEQOS_DMA_RDES3_INTE | + DWCEQOS_DMA_RDES3_BUF1V | + DWCEQOS_DMA_RDES3_OWN; + + lp->rx_skb[index].mapping = new_skb_baddr; + lp->rx_skb[index].len = DWCEQOS_RX_BUF_SIZE; + +err_out: + lp->rx_skb[index].skb = new_skb; +} + +static void dwceqos_clean_rings(struct net_local *lp) +{ + int i; + + if (lp->rx_skb) { + for (i = 0; i < DWCEQOS_RX_DCNT; i++) { + if (lp->rx_skb[i].skb) { + dma_unmap_single(lp->ndev->dev.parent, + lp->rx_skb[i].mapping, + lp->rx_skb[i].len, + DMA_FROM_DEVICE); + + dev_kfree_skb(lp->rx_skb[i].skb); + lp->rx_skb[i].skb = NULL; + lp->rx_skb[i].mapping = 0; + } + } + } + + if (lp->tx_skb) { + for (i = 0; i < DWCEQOS_TX_DCNT; i++) { + if (lp->tx_skb[i].skb) { + dev_kfree_skb(lp->tx_skb[i].skb); + lp->tx_skb[i].skb = NULL; + } + if (lp->tx_skb[i].mapping) { + dma_unmap_single(lp->ndev->dev.parent, + lp->tx_skb[i].mapping, + lp->tx_skb[i].len, + DMA_TO_DEVICE); + lp->tx_skb[i].mapping = 0; + } + } + } +} + +static void dwceqos_descriptor_free(struct net_local *lp) +{ + int size; + + dwceqos_clean_rings(lp); + + kfree(lp->tx_skb); + lp->tx_skb = NULL; + kfree(lp->rx_skb); + lp->rx_skb = NULL; + + size = DWCEQOS_RX_DCNT * sizeof(struct dwceqos_dma_desc); + if (lp->rx_descs) { + dma_free_coherent(lp->ndev->dev.parent, size, + (void *)(lp->rx_descs), lp->rx_descs_addr); + lp->rx_descs = NULL; + } + + size = DWCEQOS_TX_DCNT * sizeof(struct dwceqos_dma_desc); + if (lp->tx_descs) { + dma_free_coherent(lp->ndev->dev.parent, size, + (void *)(lp->tx_descs), lp->tx_descs_addr); + lp->tx_descs = NULL; + } +} + +static int dwceqos_descriptor_init(struct net_local *lp) +{ + int size; + u32 i; + + lp->gso_size = 0; + + lp->tx_skb = NULL; + lp->rx_skb = NULL; + lp->rx_descs = NULL; + lp->tx_descs = NULL; + + /* Reset the DMA indexes */ + lp->rx_cur = 0; + lp->tx_cur = 0; + lp->tx_next = 0; + lp->tx_free = DWCEQOS_TX_DCNT; + + /* Allocate Ring descriptors */ + size = DWCEQOS_RX_DCNT * sizeof(struct ring_desc); + lp->rx_skb = kzalloc(size, GFP_KERNEL); + if (!lp->rx_skb) + goto err_out; + + size = DWCEQOS_TX_DCNT * sizeof(struct ring_desc); + lp->tx_skb = kzalloc(size, GFP_KERNEL); + if (!lp->tx_skb) + goto err_out; + + /* Allocate DMA descriptors */ + size = DWCEQOS_RX_DCNT * sizeof(struct dwceqos_dma_desc); + lp->rx_descs = dma_alloc_coherent(lp->ndev->dev.parent, size, + &lp->rx_descs_addr, 0); + if (!lp->rx_descs) + goto err_out; + lp->rx_descs_tail_addr = lp->rx_descs_addr + + sizeof(struct dwceqos_dma_desc) * DWCEQOS_RX_DCNT; + + size = DWCEQOS_TX_DCNT * sizeof(struct dwceqos_dma_desc); + lp->tx_descs = dma_alloc_coherent(lp->ndev->dev.parent, size, + &lp->tx_descs_addr, 0); + if (!lp->tx_descs) + goto err_out; + lp->tx_descs_tail_addr = lp->tx_descs_addr + + sizeof(struct dwceqos_dma_desc) * DWCEQOS_TX_DCNT; + + /* Initialize RX Ring Descriptors and buffers */ + for (i = 0; i < DWCEQOS_RX_DCNT; ++i) { + dwceqos_alloc_rxring_desc(lp, i); + if (!(lp->rx_skb[lp->rx_cur].skb)) + goto err_out; + } + + /* Initialize TX Descriptors */ + for (i = 0; i < DWCEQOS_TX_DCNT; ++i) { + lp->tx_descs[i].des0 = 0; + lp->tx_descs[i].des1 = 0; + lp->tx_descs[i].des2 = 0; + lp->tx_descs[i].des3 = 0; + } + + /* Make descriptor writes visible to the DMA. */ + wmb(); + + return 0; + +err_out: + dwceqos_descriptor_free(lp); + return -ENOMEM; +} + +static int dwceqos_packet_avail(struct net_local *lp) +{ + return !(lp->rx_descs[lp->rx_cur].des3 & DWCEQOS_DMA_RDES3_OWN); +} + +static void dwceqos_get_hwfeatures(struct net_local *lp) +{ + lp->feature0 = dwceqos_read(lp, REG_DWCEQOS_MAC_HW_FEATURE0); + lp->feature1 = dwceqos_read(lp, REG_DWCEQOS_MAC_HW_FEATURE1); + lp->feature2 = dwceqos_read(lp, REG_DWCEQOS_MAC_HW_FEATURE2); +} + +static void dwceqos_dma_enable_txirq(struct net_local *lp) +{ + u32 regval; + unsigned long flags; + + spin_lock_irqsave(&lp->hw_lock, flags); + regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE); + regval |= DWCEQOS_DMA_CH0_IE_TIE; + dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval); + spin_unlock_irqrestore(&lp->hw_lock, flags); +} + +static void dwceqos_dma_disable_txirq(struct net_local *lp) +{ + u32 regval; + unsigned long flags; + + spin_lock_irqsave(&lp->hw_lock, flags); + regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE); + regval &= ~DWCEQOS_DMA_CH0_IE_TIE; + dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval); + spin_unlock_irqrestore(&lp->hw_lock, flags); +} + +static void dwceqos_dma_enable_rxirq(struct net_local *lp) +{ + u32 regval; + unsigned long flags; + + spin_lock_irqsave(&lp->hw_lock, flags); + regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE); + regval |= DWCEQOS_DMA_CH0_IE_RIE; + dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval); + spin_unlock_irqrestore(&lp->hw_lock, flags); +} + +static void dwceqos_dma_disable_rxirq(struct net_local *lp) +{ + u32 regval; + unsigned long flags; + + spin_lock_irqsave(&lp->hw_lock, flags); + regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE); + regval &= ~DWCEQOS_DMA_CH0_IE_RIE; + dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval); + spin_unlock_irqrestore(&lp->hw_lock, flags); +} + +static void dwceqos_enable_mmc_interrupt(struct net_local *lp) +{ + dwceqos_write(lp, REG_DWCEQOS_MMC_RXIRQMASK, 0); + dwceqos_write(lp, REG_DWCEQOS_MMC_TXIRQMASK, 0); +} + +static int dwceqos_mii_init(struct net_local *lp) +{ + int ret = -ENXIO, i; + struct resource res; + struct device_node *mdionode; + + mdionode = of_get_child_by_name(lp->pdev->dev.of_node, "mdio"); + + if (!mdionode) + return 0; + + lp->mii_bus = mdiobus_alloc(); + if (!lp->mii_bus) { + ret = -ENOMEM; + goto err_out; + } + + lp->mii_bus->name = "DWCEQOS MII bus"; + lp->mii_bus->read = &dwceqos_mdio_read; + lp->mii_bus->write = &dwceqos_mdio_write; + lp->mii_bus->priv = lp; + lp->mii_bus->parent = &lp->ndev->dev; + + lp->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); + if (!lp->mii_bus->irq) { + ret = -ENOMEM; + goto err_out_free_mdiobus; + } + + for (i = 0; i < PHY_MAX_ADDR; i++) + lp->mii_bus->irq[i] = PHY_POLL; + of_address_to_resource(lp->pdev->dev.of_node, 0, &res); + snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%.8llx", + (unsigned long long)res.start); + if (of_mdiobus_register(lp->mii_bus, mdionode)) + goto err_out_free_mdio_irq; + + return 0; + +err_out_free_mdio_irq: + kfree(lp->mii_bus->irq); +err_out_free_mdiobus: + mdiobus_free(lp->mii_bus); +err_out: + of_node_put(mdionode); + return ret; +} + +/* DMA reset. When issued also resets all MTL and MAC registers as well */ +static void dwceqos_reset_hw(struct net_local *lp) +{ + /* Wait (at most) 0.5 seconds for DMA reset*/ + int i = 5000; + u32 reg; + + /* Force gigabit to guarantee a TX clock for GMII. */ + reg = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG); + reg &= ~(DWCEQOS_MAC_CFG_PS | DWCEQOS_MAC_CFG_FES); + reg |= DWCEQOS_MAC_CFG_DM; + dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, reg); + + dwceqos_write(lp, REG_DWCEQOS_DMA_MODE, DWCEQOS_DMA_MODE_SWR); + + do { + udelay(100); + i--; + reg = dwceqos_read(lp, REG_DWCEQOS_DMA_MODE); + } while ((reg & DWCEQOS_DMA_MODE_SWR) && i); + /* We might experience a timeout if the chip clock mux is broken */ + if (!i) + netdev_err(lp->ndev, "DMA reset timed out!\n"); +} + +static void dwceqos_fatal_bus_error(struct net_local *lp, u32 dma_status) +{ + if (dma_status & DWCEQOS_DMA_CH0_IS_TEB) { + netdev_err(lp->ndev, "txdma bus error %s %s (status=%08x)\n", + dma_status & DWCEQOS_DMA_CH0_IS_TX_ERR_READ ? + "read" : "write", + dma_status & DWCEQOS_DMA_CH0_IS_TX_ERR_DESCR ? + "descr" : "data", + dma_status); + + print_status(lp); + } + if (dma_status & DWCEQOS_DMA_CH0_IS_REB) { + netdev_err(lp->ndev, "rxdma bus error %s %s (status=%08x)\n", + dma_status & DWCEQOS_DMA_CH0_IS_RX_ERR_READ ? + "read" : "write", + dma_status & DWCEQOS_DMA_CH0_IS_RX_ERR_DESCR ? + "descr" : "data", + dma_status); + + print_status(lp); + } +} + +static void dwceqos_mmc_interrupt(struct net_local *lp) +{ + unsigned long flags; + + spin_lock_irqsave(&lp->stats_lock, flags); + + /* A latched mmc interrupt can not be masked, we must read + * all the counters with an interrupt pending. + */ + dwceqos_read_mmc_counters(lp, + dwceqos_read(lp, REG_DWCEQOS_MMC_RXIRQ), + dwceqos_read(lp, REG_DWCEQOS_MMC_TXIRQ)); + + spin_unlock_irqrestore(&lp->stats_lock, flags); +} + +static void dwceqos_mac_interrupt(struct net_local *lp) +{ + u32 cause; + + cause = dwceqos_read(lp, REG_DWCEQOS_MAC_IS); + + if (cause & DWCEQOS_MAC_IS_MMC_INT) + dwceqos_mmc_interrupt(lp); +} + +static irqreturn_t dwceqos_interrupt(int irq, void *dev_id) +{ + struct net_device *ndev = dev_id; + struct net_local *lp = netdev_priv(ndev); + + u32 cause; + u32 dma_status; + irqreturn_t ret = IRQ_NONE; + + cause = dwceqos_read(lp, REG_DWCEQOS_DMA_IS); + /* DMA Channel 0 Interrupt */ + if (cause & DWCEQOS_DMA_IS_DC0IS) { + dma_status = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_STA); + + /* Transmit Interrupt */ + if (dma_status & DWCEQOS_DMA_CH0_IS_TI) { + tasklet_schedule(&lp->tx_bdreclaim_tasklet); + dwceqos_dma_disable_txirq(lp); + } + + /* Receive Interrupt */ + if (dma_status & DWCEQOS_DMA_CH0_IS_RI) { + /* Disable RX IRQs */ + dwceqos_dma_disable_rxirq(lp); + napi_schedule(&lp->napi); + } + + /* Fatal Bus Error interrupt */ + if (unlikely(dma_status & DWCEQOS_DMA_CH0_IS_FBE)) { + dwceqos_fatal_bus_error(lp, dma_status); + + /* errata 9000831707 */ + dma_status |= DWCEQOS_DMA_CH0_IS_TEB | + DWCEQOS_DMA_CH0_IS_REB; + } + + /* Ack all DMA Channel 0 IRQs */ + dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_STA, dma_status); + ret = IRQ_HANDLED; + } + + if (cause & DWCEQOS_DMA_IS_MTLIS) { + u32 val = dwceqos_read(lp, REG_DWCEQOS_MTL_Q0_ISCTRL); + + dwceqos_write(lp, REG_DWCEQOS_MTL_Q0_ISCTRL, val); + ret = IRQ_HANDLED; + } + + if (cause & DWCEQOS_DMA_IS_MACIS) { + dwceqos_mac_interrupt(lp); + ret = IRQ_HANDLED; + } + return ret; +} + +static void dwceqos_set_rx_flowcontrol(struct net_local *lp, bool enable) +{ + u32 regval; + unsigned long flags; + + spin_lock_irqsave(&lp->hw_lock, flags); + + regval = dwceqos_read(lp, REG_DWCEQOS_MAC_RX_FLOW_CTRL); + if (enable) + regval |= DWCEQOS_MAC_RX_FLOW_CTRL_RFE; + else + regval &= ~DWCEQOS_MAC_RX_FLOW_CTRL_RFE; + dwceqos_write(lp, REG_DWCEQOS_MAC_RX_FLOW_CTRL, regval); + + spin_unlock_irqrestore(&lp->hw_lock, flags); +} + +static void dwceqos_set_tx_flowcontrol(struct net_local *lp, bool enable) +{ + u32 regval; + unsigned long flags; + + spin_lock_irqsave(&lp->hw_lock, flags); + + /* MTL flow control */ + regval = dwceqos_read(lp, REG_DWCEQOS_MTL_RXQ0_OPER); + if (enable) + regval |= DWCEQOS_MTL_RXQ_EHFC; + else + regval &= ~DWCEQOS_MTL_RXQ_EHFC; + + dwceqos_write(lp, REG_DWCEQOS_MTL_RXQ0_OPER, regval); + + /* MAC flow control */ + regval = dwceqos_read(lp, REG_DWCEQOS_MAC_Q0_TX_FLOW); + if (enable) + regval |= DWCEQOS_MAC_Q0_TX_FLOW_TFE; + else + regval &= ~DWCEQOS_MAC_Q0_TX_FLOW_TFE; + dwceqos_write(lp, REG_DWCEQOS_MAC_Q0_TX_FLOW, regval); + + spin_unlock_irqrestore(&lp->hw_lock, flags); +} + +static void dwceqos_configure_flow_control(struct net_local *lp) +{ + u32 regval; + unsigned long flags; + int RQS, RFD, RFA; + + spin_lock_irqsave(&lp->hw_lock, flags); + + regval = dwceqos_read(lp, REG_DWCEQOS_MTL_RXQ0_OPER); + + /* The queue size is in units of 256 bytes. We want 512 bytes units for + * the threshold fields. + */ + RQS = ((regval >> 20) & 0x3FF) + 1; + RQS /= 2; + + /* The thresholds are relative to a full queue, with a bias + * of 1 KiByte below full. + */ + RFD = RQS / 2 - 2; + RFA = RQS / 8 - 2; + + regval = (regval & 0xFFF000FF) | (RFD << 14) | (RFA << 8); + + if (RFD >= 0 && RFA >= 0) { + dwceqos_write(lp, REG_DWCEQOS_MTL_RXQ0_OPER, regval); + } else { + netdev_warn(lp->ndev, + "FIFO too small for flow control."); + } + + regval = DWCEQOS_MAC_Q0_TX_FLOW_PT(256) | + DWCEQOS_MAC_Q0_TX_FLOW_PLT_4_SLOTS; + + dwceqos_write(lp, REG_DWCEQOS_MAC_Q0_TX_FLOW, regval); + + spin_unlock_irqrestore(&lp->hw_lock, flags); +} + +static void dwceqos_configure_clock(struct net_local *lp) +{ + unsigned long rate_mhz = clk_get_rate(lp->apb_pclk) / 1000000; + + BUG_ON(!rate_mhz); + + dwceqos_write(lp, + REG_DWCEQOS_MAC_1US_TIC_COUNTER, + DWCEQOS_MAC_1US_TIC_COUNTER_VAL(rate_mhz - 1)); +} + +static void dwceqos_configure_bus(struct net_local *lp) +{ + u32 sysbus_reg; + + /* N.B. We do not support the Fixed Burst mode because it + * opens a race window by making HW access to DMA descriptors + * non-atomic. + */ + + sysbus_reg = DWCEQOS_DMA_SYSBUS_MODE_AAL; + + if (lp->bus_cfg.en_lpi) + sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_EN_LPI; + + if (lp->bus_cfg.burst_map) + sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_BURST( + lp->bus_cfg.burst_map); + else + sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_BURST( + DWCEQOS_DMA_SYSBUS_MODE_BURST_DEFAULT); + + if (lp->bus_cfg.read_requests) + sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT( + lp->bus_cfg.read_requests - 1); + else + sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT( + DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_DEFAULT); + + if (lp->bus_cfg.write_requests) + sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT( + lp->bus_cfg.write_requests - 1); + else + sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT( + DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_DEFAULT); + + if (netif_msg_hw(lp)) + netdev_dbg(lp->ndev, "SysbusMode %#X\n", sysbus_reg); + + dwceqos_write(lp, REG_DWCEQOS_DMA_SYSBUS_MODE, sysbus_reg); +} + +static void dwceqos_init_hw(struct net_local *lp) +{ + u32 regval; + u32 buswidth; + u32 dma_skip; + + /* Software reset */ + dwceqos_reset_hw(lp); + + dwceqos_configure_bus(lp); + + /* Probe data bus width, 32/64/128 bits. */ + dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL, 0xF); + regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL); + buswidth = (regval ^ 0xF) + 1; + + /* Cache-align dma descriptors. */ + dma_skip = (sizeof(struct dwceqos_dma_desc) - 16) / buswidth; + dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_CTRL, + DWCEQOS_DMA_CH_CTRL_DSL(dma_skip) | + DWCEQOS_DMA_CH_CTRL_PBLX8); + + /* Initialize DMA Channel 0 */ + dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_LEN, DWCEQOS_TX_DCNT - 1); + dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_LEN, DWCEQOS_RX_DCNT - 1); + dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_LIST, + (u32)lp->tx_descs_addr); + dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_LIST, + (u32)lp->rx_descs_addr); + + dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL, + lp->tx_descs_tail_addr); + dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_TAIL, + lp->rx_descs_tail_addr); + + if (lp->bus_cfg.tx_pbl) + regval = DWCEQOS_DMA_CH_CTRL_PBL(lp->bus_cfg.tx_pbl); + else + regval = DWCEQOS_DMA_CH_CTRL_PBL(2); + + /* Enable TSO if the HW support it */ + if (lp->feature1 & DWCEQOS_MAC_HW_FEATURE1_TSOEN) + regval |= DWCEQOS_DMA_CH_TX_TSE; + + dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TX_CTRL, regval); + + if (lp->bus_cfg.rx_pbl) + regval = DWCEQOS_DMA_CH_CTRL_PBL(lp->bus_cfg.rx_pbl); + else + regval = DWCEQOS_DMA_CH_CTRL_PBL(2); + + regval |= DWCEQOS_DMA_CH_RX_CTRL_BUFSIZE(DWCEQOS_DWCEQOS_RX_BUF_SIZE); + dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RX_CTRL, regval); + + regval |= DWCEQOS_DMA_CH_CTRL_START; + dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RX_CTRL, regval); + + /* Initialize MTL Queues */ + regval = DWCEQOS_MTL_SCHALG_STRICT; + dwceqos_write(lp, REG_DWCEQOS_MTL_OPER, regval); + + regval = DWCEQOS_MTL_TXQ_SIZE( + DWCEQOS_MAC_HW_FEATURE1_TXFIFOSIZE(lp->feature1)) | + DWCEQOS_MTL_TXQ_TXQEN | DWCEQOS_MTL_TXQ_TSF | + DWCEQOS_MTL_TXQ_TTC512; + dwceqos_write(lp, REG_DWCEQOS_MTL_TXQ0_OPER, regval); + + regval = DWCEQOS_MTL_RXQ_SIZE( + DWCEQOS_MAC_HW_FEATURE1_RXFIFOSIZE(lp->feature1)) | + DWCEQOS_MTL_RXQ_FUP | DWCEQOS_MTL_RXQ_FEP | DWCEQOS_MTL_RXQ_RSF; + dwceqos_write(lp, REG_DWCEQOS_MTL_RXQ0_OPER, regval); + + dwceqos_configure_flow_control(lp); + + /* Initialize MAC */ + dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0); + + lp->eee_enabled = 0; + + dwceqos_configure_clock(lp); + + /* MMC counters */ + + /* probe implemented counters */ + dwceqos_write(lp, REG_DWCEQOS_MMC_RXIRQMASK, ~0u); + dwceqos_write(lp, REG_DWCEQOS_MMC_TXIRQMASK, ~0u); + lp->mmc_rx_counters_mask = dwceqos_read(lp, REG_DWCEQOS_MMC_RXIRQMASK); + lp->mmc_tx_counters_mask = dwceqos_read(lp, REG_DWCEQOS_MMC_TXIRQMASK); + + dwceqos_write(lp, REG_DWCEQOS_MMC_CTRL, DWCEQOS_MMC_CTRL_CNTRST | + DWCEQOS_MMC_CTRL_RSTONRD); + dwceqos_enable_mmc_interrupt(lp); + + /* Enable Interrupts */ + dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, + DWCEQOS_DMA_CH0_IE_NIE | + DWCEQOS_DMA_CH0_IE_RIE | DWCEQOS_DMA_CH0_IE_TIE | + DWCEQOS_DMA_CH0_IE_AIE | + DWCEQOS_DMA_CH0_IE_FBEE); + + dwceqos_write(lp, REG_DWCEQOS_MAC_IE, 0); + + dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, DWCEQOS_MAC_CFG_IPC | + DWCEQOS_MAC_CFG_DM | DWCEQOS_MAC_CFG_TE | DWCEQOS_MAC_CFG_RE); + + /* Start TX DMA */ + regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_TX_CTRL); + dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TX_CTRL, + regval | DWCEQOS_DMA_CH_CTRL_START); + + /* Enable MAC TX/RX */ + regval = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG); + dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, + regval | DWCEQOS_MAC_CFG_TE | DWCEQOS_MAC_CFG_RE); +} + +static void dwceqos_tx_reclaim(unsigned long data) +{ + struct net_device *ndev = (struct net_device *)data; + struct net_local *lp = netdev_priv(ndev); + unsigned int tx_bytes = 0; + unsigned int tx_packets = 0; + + spin_lock(&lp->tx_lock); + + while (lp->tx_free < DWCEQOS_TX_DCNT) { + struct dwceqos_dma_desc *dd = &lp->tx_descs[lp->tx_cur]; + struct ring_desc *rd = &lp->tx_skb[lp->tx_cur]; + + /* Descriptor still being held by DMA ? */ + if (dd->des3 & DWCEQOS_DMA_TDES3_OWN) + break; + + if (rd->mapping) + dma_unmap_single(ndev->dev.parent, rd->mapping, rd->len, + DMA_TO_DEVICE); + + if (unlikely(rd->skb)) { + ++tx_packets; + tx_bytes += rd->skb->len; + dev_consume_skb_any(rd->skb); + } + + rd->skb = NULL; + rd->mapping = 0; + lp->tx_free++; + lp->tx_cur = (lp->tx_cur + 1) % DWCEQOS_TX_DCNT; + + if ((dd->des3 & DWCEQOS_DMA_TDES3_LD) && + (dd->des3 & DWCEQOS_DMA_RDES3_ES)) { + if (netif_msg_tx_err(lp)) + netdev_err(ndev, "TX Error, TDES3 = 0x%x\n", + dd->des3); + if (netif_msg_hw(lp)) + print_status(lp); + } + } + spin_unlock(&lp->tx_lock); + + netdev_completed_queue(ndev, tx_packets, tx_bytes); + + dwceqos_dma_enable_txirq(lp); + netif_wake_queue(ndev); +} + +static int dwceqos_rx(struct net_local *lp, int budget) +{ + struct sk_buff *skb; + u32 tot_size = 0; + unsigned int n_packets = 0; + unsigned int n_descs = 0; + u32 len; + + struct dwceqos_dma_desc *dd; + struct sk_buff *new_skb; + dma_addr_t new_skb_baddr = 0; + + while (n_descs < budget) { + if (!dwceqos_packet_avail(lp)) + break; + + new_skb = netdev_alloc_skb(lp->ndev, DWCEQOS_RX_BUF_SIZE); + if (!new_skb) { + netdev_err(lp->ndev, "no memory for new sk_buff\n"); + break; + } + + /* Get dma handle of skb->data */ + new_skb_baddr = (u32)dma_map_single(lp->ndev->dev.parent, + new_skb->data, + DWCEQOS_RX_BUF_SIZE, + DMA_FROM_DEVICE); + if (dma_mapping_error(lp->ndev->dev.parent, new_skb_baddr)) { + netdev_err(lp->ndev, "DMA map error\n"); + dev_kfree_skb(new_skb); + break; + } + + /* Read descriptor data after reading owner bit. */ + dma_rmb(); + + dd = &lp->rx_descs[lp->rx_cur]; + len = DWCEQOS_DMA_RDES3_PL(dd->des3); + skb = lp->rx_skb[lp->rx_cur].skb; + + /* Unmap old buffer */ + dma_unmap_single(lp->ndev->dev.parent, + lp->rx_skb[lp->rx_cur].mapping, + lp->rx_skb[lp->rx_cur].len, DMA_FROM_DEVICE); + + /* Discard packet on reception error or bad checksum */ + if ((dd->des3 & DWCEQOS_DMA_RDES3_ES) || + (dd->des1 & DWCEQOS_DMA_RDES1_IPCE)) { + dev_kfree_skb(skb); + skb = NULL; + } else { + skb_put(skb, len); + skb->protocol = eth_type_trans(skb, lp->ndev); + switch (dd->des1 & DWCEQOS_DMA_RDES1_PT) { + case DWCEQOS_DMA_RDES1_PT_UDP: + case DWCEQOS_DMA_RDES1_PT_TCP: + case DWCEQOS_DMA_RDES1_PT_ICMP: + skb->ip_summed = CHECKSUM_UNNECESSARY; + break; + default: + skb->ip_summed = CHECKSUM_NONE; + break; + } + } + + if (unlikely(!skb)) { + if (netif_msg_rx_err(lp)) + netdev_dbg(lp->ndev, "rx error: des3=%X\n", + lp->rx_descs[lp->rx_cur].des3); + } else { + tot_size += skb->len; + n_packets++; + + netif_receive_skb(skb); + } + + lp->rx_descs[lp->rx_cur].des0 = new_skb_baddr; + lp->rx_descs[lp->rx_cur].des1 = 0; + lp->rx_descs[lp->rx_cur].des2 = 0; + /* The DMA must observe des0/1/2 written before des3. */ + wmb(); + lp->rx_descs[lp->rx_cur].des3 = DWCEQOS_DMA_RDES3_INTE | + DWCEQOS_DMA_RDES3_OWN | + DWCEQOS_DMA_RDES3_BUF1V; + + lp->rx_skb[lp->rx_cur].mapping = new_skb_baddr; + lp->rx_skb[lp->rx_cur].len = DWCEQOS_RX_BUF_SIZE; + lp->rx_skb[lp->rx_cur].skb = new_skb; + + n_descs++; + lp->rx_cur = (lp->rx_cur + 1) % DWCEQOS_RX_DCNT; + } + + /* Make sure any ownership update is written to the descriptors before + * DMA wakeup. + */ + wmb(); + + dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_STA, DWCEQOS_DMA_CH0_IS_RI); + /* Wake up RX by writing tail pointer */ + dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_TAIL, + lp->rx_descs_tail_addr); + + return n_descs; +} + +static int dwceqos_rx_poll(struct napi_struct *napi, int budget) +{ + struct net_local *lp = container_of(napi, struct net_local, napi); + int work_done = 0; + + work_done = dwceqos_rx(lp, budget - work_done); + + if (!dwceqos_packet_avail(lp) && work_done < budget) { + napi_complete(napi); + dwceqos_dma_enable_rxirq(lp); + } else { + work_done = budget; + } + + return work_done; +} + +/* Reinitialize function if a TX timed out */ +static void dwceqos_reinit_for_txtimeout(struct work_struct *data) +{ + struct net_local *lp = container_of(data, struct net_local, + txtimeout_reinit); + + netdev_err(lp->ndev, "transmit timeout %d s, resetting...\n", + DWCEQOS_TX_TIMEOUT); + + if (netif_msg_hw(lp)) + print_status(lp); + + rtnl_lock(); + dwceqos_stop(lp->ndev); + dwceqos_open(lp->ndev); + rtnl_unlock(); +} + +/* DT Probing function called by main probe */ +static inline int dwceqos_probe_config_dt(struct platform_device *pdev) +{ + struct net_device *ndev; + struct net_local *lp; + const void *mac_address; + struct dwceqos_bus_cfg *bus_cfg; + struct device_node *np = pdev->dev.of_node; + + ndev = platform_get_drvdata(pdev); + lp = netdev_priv(ndev); + bus_cfg = &lp->bus_cfg; + + /* Set the MAC address. */ + mac_address = of_get_mac_address(pdev->dev.of_node); + if (mac_address) + ether_addr_copy(ndev->dev_addr, mac_address); + + /* These are all optional parameters */ + lp->en_tx_lpi_clockgating = of_property_read_bool(np, + "snps,en-tx-lpi-clockgating"); + bus_cfg->en_lpi = of_property_read_bool(np, "snps,en-lpi"); + of_property_read_u32(np, "snps,write-requests", + &bus_cfg->write_requests); + of_property_read_u32(np, "snps,read-requests", &bus_cfg->read_requests); + of_property_read_u32(np, "snps,burst-map", &bus_cfg->burst_map); + of_property_read_u32(np, "snps,txpbl", &bus_cfg->tx_pbl); + of_property_read_u32(np, "snps,rxpbl", &bus_cfg->rx_pbl); + + netdev_dbg(ndev, "BusCfg: lpi:%u wr:%u rr:%u bm:%X rxpbl:%u txpbl:%d\n", + bus_cfg->en_lpi, + bus_cfg->write_requests, + bus_cfg->read_requests, + bus_cfg->burst_map, + bus_cfg->rx_pbl, + bus_cfg->tx_pbl); + + return 0; +} + +static int dwceqos_open(struct net_device *ndev) +{ + struct net_local *lp = netdev_priv(ndev); + int res; + + dwceqos_reset_state(lp); + res = dwceqos_descriptor_init(lp); + if (res) { + netdev_err(ndev, "Unable to allocate DMA memory, rc %d\n", res); + return res; + } + netdev_reset_queue(ndev); + + napi_enable(&lp->napi); + phy_start(lp->phy_dev); + dwceqos_init_hw(lp); + + netif_start_queue(ndev); + tasklet_enable(&lp->tx_bdreclaim_tasklet); + + return 0; +} + +static bool dweqos_is_tx_dma_suspended(struct net_local *lp) +{ + u32 reg; + + reg = dwceqos_read(lp, REG_DWCEQOS_DMA_DEBUG_ST0); + reg = DMA_GET_TX_STATE_CH0(reg); + + return reg == DMA_TX_CH_SUSPENDED; +} + +static void dwceqos_drain_dma(struct net_local *lp) +{ + /* Wait for all pending TX buffers to be sent. Upper limit based + * on max frame size on a 10 Mbit link. + */ + size_t limit = (DWCEQOS_TX_DCNT * 1250) / 100; + + while (!dweqos_is_tx_dma_suspended(lp) && limit--) + usleep_range(100, 200); +} + +static int dwceqos_stop(struct net_device *ndev) +{ + struct net_local *lp = netdev_priv(ndev); + + phy_stop(lp->phy_dev); + + tasklet_disable(&lp->tx_bdreclaim_tasklet); + netif_stop_queue(ndev); + napi_disable(&lp->napi); + + dwceqos_drain_dma(lp); + + netif_tx_lock(lp->ndev); + dwceqos_reset_hw(lp); + dwceqos_descriptor_free(lp); + netif_tx_unlock(lp->ndev); + + return 0; +} + +static void dwceqos_dmadesc_set_ctx(struct net_local *lp, + unsigned short gso_size) +{ + struct dwceqos_dma_desc *dd = &lp->tx_descs[lp->tx_next]; + + dd->des0 = 0; + dd->des1 = 0; + dd->des2 = gso_size; + dd->des3 = DWCEQOS_DMA_TDES3_CTXT | DWCEQOS_DMA_TDES3_TCMSSV; + + lp->tx_next = (lp->tx_next + 1) % DWCEQOS_TX_DCNT; +} + +static void dwceqos_tx_poll_demand(struct net_local *lp) +{ + dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL, + lp->tx_descs_tail_addr); +} + +struct dwceqos_tx { + size_t nr_descriptors; + size_t initial_descriptor; + size_t last_descriptor; + size_t prev_gso_size; + size_t network_header_len; +}; + +static void dwceqos_tx_prepare(struct sk_buff *skb, struct net_local *lp, + struct dwceqos_tx *tx) +{ + size_t n = 1; + size_t i; + + if (skb_is_gso(skb) && skb_shinfo(skb)->gso_size != lp->gso_size) + ++n; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + n += (skb_frag_size(frag) + BYTES_PER_DMA_DESC - 1) / + BYTES_PER_DMA_DESC; + } + + tx->nr_descriptors = n; + tx->initial_descriptor = lp->tx_next; + tx->last_descriptor = lp->tx_next; + tx->prev_gso_size = lp->gso_size; + + tx->network_header_len = skb_transport_offset(skb); + if (skb_is_gso(skb)) + tx->network_header_len += tcp_hdrlen(skb); +} + +static int dwceqos_tx_linear(struct sk_buff *skb, struct net_local *lp, + struct dwceqos_tx *tx) +{ + struct ring_desc *rd; + struct dwceqos_dma_desc *dd; + size_t payload_len; + dma_addr_t dma_handle; + + if (skb_is_gso(skb) && skb_shinfo(skb)->gso_size != lp->gso_size) { + dwceqos_dmadesc_set_ctx(lp, skb_shinfo(skb)->gso_size); + lp->gso_size = skb_shinfo(skb)->gso_size; + } + + dma_handle = dma_map_single(lp->ndev->dev.parent, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + + if (dma_mapping_error(lp->ndev->dev.parent, dma_handle)) { + netdev_err(lp->ndev, "TX DMA Mapping error\n"); + return -ENOMEM; + } + + rd = &lp->tx_skb[lp->tx_next]; + dd = &lp->tx_descs[lp->tx_next]; + + rd->skb = NULL; + rd->len = skb_headlen(skb); + rd->mapping = dma_handle; + + /* Set up DMA Descriptor */ + dd->des0 = dma_handle; + + if (skb_is_gso(skb)) { + payload_len = skb_headlen(skb) - tx->network_header_len; + + if (payload_len) + dd->des1 = dma_handle + tx->network_header_len; + dd->des2 = tx->network_header_len | + DWCEQOS_DMA_DES2_B2L(payload_len); + dd->des3 = DWCEQOS_DMA_TDES3_TSE | + DWCEQOS_DMA_DES3_THL((tcp_hdrlen(skb) / 4)) | + (skb->len - tx->network_header_len); + } else { + dd->des1 = 0; + dd->des2 = skb_headlen(skb); + dd->des3 = skb->len; + + switch (skb->ip_summed) { + case CHECKSUM_PARTIAL: + dd->des3 |= DWCEQOS_DMA_TDES3_CA; + case CHECKSUM_NONE: + case CHECKSUM_UNNECESSARY: + case CHECKSUM_COMPLETE: + default: + break; + } + } + + dd->des3 |= DWCEQOS_DMA_TDES3_FD; + if (lp->tx_next != tx->initial_descriptor) + dd->des3 |= DWCEQOS_DMA_TDES3_OWN; + + tx->last_descriptor = lp->tx_next; + lp->tx_next = (lp->tx_next + 1) % DWCEQOS_TX_DCNT; + + return 0; +} + +static int dwceqos_tx_frags(struct sk_buff *skb, struct net_local *lp, + struct dwceqos_tx *tx) +{ + struct ring_desc *rd = NULL; + struct dwceqos_dma_desc *dd; + dma_addr_t dma_handle; + size_t i; + + /* Setup more ring and DMA descriptor if the packet is fragmented */ + for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + size_t frag_size; + size_t consumed_size; + + /* Map DMA Area */ + dma_handle = skb_frag_dma_map(lp->ndev->dev.parent, frag, 0, + skb_frag_size(frag), + DMA_TO_DEVICE); + if (dma_mapping_error(lp->ndev->dev.parent, dma_handle)) { + netdev_err(lp->ndev, "DMA Mapping error\n"); + return -ENOMEM; + } + + /* order-3 fragments span more than one descriptor. */ + frag_size = skb_frag_size(frag); + consumed_size = 0; + while (consumed_size < frag_size) { + size_t dma_size = min_t(size_t, 16376, + frag_size - consumed_size); + + rd = &lp->tx_skb[lp->tx_next]; + memset(rd, 0, sizeof(*rd)); + + dd = &lp->tx_descs[lp->tx_next]; + + /* Set DMA Descriptor fields */ + dd->des0 = dma_handle; + dd->des1 = 0; + dd->des2 = dma_size; + + if (skb_is_gso(skb)) + dd->des3 = (skb->len - tx->network_header_len); + else + dd->des3 = skb->len; + + dd->des3 |= DWCEQOS_DMA_TDES3_OWN; + + tx->last_descriptor = lp->tx_next; + lp->tx_next = (lp->tx_next + 1) % DWCEQOS_TX_DCNT; + consumed_size += dma_size; + } + + rd->len = skb_frag_size(frag); + rd->mapping = dma_handle; + } + + return 0; +} + +static void dwceqos_tx_finalize(struct sk_buff *skb, struct net_local *lp, + struct dwceqos_tx *tx) +{ + lp->tx_descs[tx->last_descriptor].des3 |= DWCEQOS_DMA_TDES3_LD; + lp->tx_descs[tx->last_descriptor].des2 |= DWCEQOS_DMA_TDES2_IOC; + + lp->tx_skb[tx->last_descriptor].skb = skb; + + /* Make all descriptor updates visible to the DMA before setting the + * owner bit. + */ + wmb(); + + lp->tx_descs[tx->initial_descriptor].des3 |= DWCEQOS_DMA_TDES3_OWN; + + /* Make the owner bit visible before TX wakeup. */ + wmb(); + + dwceqos_tx_poll_demand(lp); +} + +static void dwceqos_tx_rollback(struct net_local *lp, struct dwceqos_tx *tx) +{ + size_t i = tx->initial_descriptor; + + while (i != lp->tx_next) { + if (lp->tx_skb[i].mapping) + dma_unmap_single(lp->ndev->dev.parent, + lp->tx_skb[i].mapping, + lp->tx_skb[i].len, + DMA_TO_DEVICE); + + lp->tx_skb[i].mapping = 0; + lp->tx_skb[i].skb = NULL; + + memset(&lp->tx_descs[i], 0, sizeof(lp->tx_descs[i])); + + i = (i + 1) % DWCEQOS_TX_DCNT; + } + + lp->tx_next = tx->initial_descriptor; + lp->gso_size = tx->prev_gso_size; +} + +static int dwceqos_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct net_local *lp = netdev_priv(ndev); + struct dwceqos_tx trans; + int err; + + dwceqos_tx_prepare(skb, lp, &trans); + if (lp->tx_free < trans.nr_descriptors) { + netif_stop_queue(ndev); + return NETDEV_TX_BUSY; + } + + err = dwceqos_tx_linear(skb, lp, &trans); + if (err) + goto tx_error; + + err = dwceqos_tx_frags(skb, lp, &trans); + if (err) + goto tx_error; + + WARN_ON(lp->tx_next != + ((trans.initial_descriptor + trans.nr_descriptors) % + DWCEQOS_TX_DCNT)); + + dwceqos_tx_finalize(skb, lp, &trans); + + netdev_sent_queue(ndev, skb->len); + + spin_lock_bh(&lp->tx_lock); + lp->tx_free -= trans.nr_descriptors; + spin_unlock_bh(&lp->tx_lock); + + ndev->trans_start = jiffies; + return 0; + +tx_error: + dwceqos_tx_rollback(lp, &trans); + dev_kfree_skb(skb); + return 0; +} + +/* Set MAC address and then update HW accordingly */ +static int dwceqos_set_mac_address(struct net_device *ndev, void *addr) +{ + struct net_local *lp = netdev_priv(ndev); + struct sockaddr *hwaddr = (struct sockaddr *)addr; + + if (netif_running(ndev)) + return -EBUSY; + + if (!is_valid_ether_addr(hwaddr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(ndev->dev_addr, hwaddr->sa_data, ndev->addr_len); + + dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0); + return 0; +} + +static void dwceqos_tx_timeout(struct net_device *ndev) +{ + struct net_local *lp = netdev_priv(ndev); + + queue_work(lp->txtimeout_handler_wq, &lp->txtimeout_reinit); +} + +static void dwceqos_set_umac_addr(struct net_local *lp, unsigned char *addr, + unsigned int reg_n) +{ + unsigned long data; + + data = (addr[5] << 8) | addr[4]; + dwceqos_write(lp, DWCEQOS_ADDR_HIGH(reg_n), + data | DWCEQOS_MAC_MAC_ADDR_HI_EN); + data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; + dwceqos_write(lp, DWCEQOS_ADDR_LOW(reg_n), data); +} + +static void dwceqos_disable_umac_addr(struct net_local *lp, unsigned int reg_n) +{ + /* Do not disable MAC address 0 */ + if (reg_n != 0) + dwceqos_write(lp, DWCEQOS_ADDR_HIGH(reg_n), 0); +} + +static void dwceqos_set_rx_mode(struct net_device *ndev) +{ + struct net_local *lp = netdev_priv(ndev); + u32 regval = 0; + u32 mc_filter[2]; + int reg = 1; + struct netdev_hw_addr *ha; + unsigned int max_mac_addr; + + max_mac_addr = DWCEQOS_MAX_PERFECT_ADDRESSES(lp->feature1); + + if (ndev->flags & IFF_PROMISC) { + regval = DWCEQOS_MAC_PKT_FILT_PR; + } else if (((netdev_mc_count(ndev) > DWCEQOS_HASH_TABLE_SIZE) || + (ndev->flags & IFF_ALLMULTI))) { + regval = DWCEQOS_MAC_PKT_FILT_PM; + dwceqos_write(lp, REG_DWCEQOS_HASTABLE_LO, 0xffffffff); + dwceqos_write(lp, REG_DWCEQOS_HASTABLE_HI, 0xffffffff); + } else if (!netdev_mc_empty(ndev)) { + regval = DWCEQOS_MAC_PKT_FILT_HMC; + memset(mc_filter, 0, sizeof(mc_filter)); + netdev_for_each_mc_addr(ha, ndev) { + /* The upper 6 bits of the calculated CRC are used to + * index the contens of the hash table + */ + int bit_nr = bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26; + /* The most significant bit determines the register + * to use (H/L) while the other 5 bits determine + * the bit within the register. + */ + mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); + } + dwceqos_write(lp, REG_DWCEQOS_HASTABLE_LO, mc_filter[0]); + dwceqos_write(lp, REG_DWCEQOS_HASTABLE_HI, mc_filter[1]); + } + if (netdev_uc_count(ndev) > max_mac_addr) { + regval |= DWCEQOS_MAC_PKT_FILT_PR; + } else { + netdev_for_each_uc_addr(ha, ndev) { + dwceqos_set_umac_addr(lp, ha->addr, reg); + reg++; + } + for (; reg < DWCEQOS_MAX_PERFECT_ADDRESSES(lp->feature1); reg++) + dwceqos_disable_umac_addr(lp, reg); + } + dwceqos_write(lp, REG_DWCEQOS_MAC_PKT_FILT, regval); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void dwceqos_poll_controller(struct net_device *ndev) +{ + disable_irq(ndev->irq); + dwceqos_interrupt(ndev->irq, ndev); + enable_irq(ndev->irq); +} +#endif + +static void dwceqos_read_mmc_counters(struct net_local *lp, u32 rx_mask, + u32 tx_mask) +{ + if (tx_mask & BIT(27)) + lp->mmc_counters.txlpitranscntr += + dwceqos_read(lp, DWC_MMC_TXLPITRANSCNTR); + if (tx_mask & BIT(26)) + lp->mmc_counters.txpiuscntr += + dwceqos_read(lp, DWC_MMC_TXLPIUSCNTR); + if (tx_mask & BIT(25)) + lp->mmc_counters.txoversize_g += + dwceqos_read(lp, DWC_MMC_TXOVERSIZE_G); + if (tx_mask & BIT(24)) + lp->mmc_counters.txvlanpackets_g += + dwceqos_read(lp, DWC_MMC_TXVLANPACKETS_G); + if (tx_mask & BIT(23)) + lp->mmc_counters.txpausepackets += + dwceqos_read(lp, DWC_MMC_TXPAUSEPACKETS); + if (tx_mask & BIT(22)) + lp->mmc_counters.txexcessdef += + dwceqos_read(lp, DWC_MMC_TXEXCESSDEF); + if (tx_mask & BIT(21)) + lp->mmc_counters.txpacketcount_g += + dwceqos_read(lp, DWC_MMC_TXPACKETCOUNT_G); + if (tx_mask & BIT(20)) + lp->mmc_counters.txoctetcount_g += + dwceqos_read(lp, DWC_MMC_TXOCTETCOUNT_G); + if (tx_mask & BIT(19)) + lp->mmc_counters.txcarriererror += + dwceqos_read(lp, DWC_MMC_TXCARRIERERROR); + if (tx_mask & BIT(18)) + lp->mmc_counters.txexcesscol += + dwceqos_read(lp, DWC_MMC_TXEXCESSCOL); + if (tx_mask & BIT(17)) + lp->mmc_counters.txlatecol += + dwceqos_read(lp, DWC_MMC_TXLATECOL); + if (tx_mask & BIT(16)) + lp->mmc_counters.txdeferred += + dwceqos_read(lp, DWC_MMC_TXDEFERRED); + if (tx_mask & BIT(15)) + lp->mmc_counters.txmulticol_g += + dwceqos_read(lp, DWC_MMC_TXMULTICOL_G); + if (tx_mask & BIT(14)) + lp->mmc_counters.txsinglecol_g += + dwceqos_read(lp, DWC_MMC_TXSINGLECOL_G); + if (tx_mask & BIT(13)) + lp->mmc_counters.txunderflowerror += + dwceqos_read(lp, DWC_MMC_TXUNDERFLOWERROR); + if (tx_mask & BIT(12)) + lp->mmc_counters.txbroadcastpackets_gb += + dwceqos_read(lp, DWC_MMC_TXBROADCASTPACKETS_GB); + if (tx_mask & BIT(11)) + lp->mmc_counters.txmulticastpackets_gb += + dwceqos_read(lp, DWC_MMC_TXMULTICASTPACKETS_GB); + if (tx_mask & BIT(10)) + lp->mmc_counters.txunicastpackets_gb += + dwceqos_read(lp, DWC_MMC_TXUNICASTPACKETS_GB); + if (tx_mask & BIT(9)) + lp->mmc_counters.tx1024tomaxoctets_gb += + dwceqos_read(lp, DWC_MMC_TX1024TOMAXOCTETS_GB); + if (tx_mask & BIT(8)) + lp->mmc_counters.tx512to1023octets_gb += + dwceqos_read(lp, DWC_MMC_TX512TO1023OCTETS_GB); + if (tx_mask & BIT(7)) + lp->mmc_counters.tx256to511octets_gb += + dwceqos_read(lp, DWC_MMC_TX256TO511OCTETS_GB); + if (tx_mask & BIT(6)) + lp->mmc_counters.tx128to255octets_gb += + dwceqos_read(lp, DWC_MMC_TX128TO255OCTETS_GB); + if (tx_mask & BIT(5)) + lp->mmc_counters.tx65to127octets_gb += + dwceqos_read(lp, DWC_MMC_TX65TO127OCTETS_GB); + if (tx_mask & BIT(4)) + lp->mmc_counters.tx64octets_gb += + dwceqos_read(lp, DWC_MMC_TX64OCTETS_GB); + if (tx_mask & BIT(3)) + lp->mmc_counters.txmulticastpackets_g += + dwceqos_read(lp, DWC_MMC_TXMULTICASTPACKETS_G); + if (tx_mask & BIT(2)) + lp->mmc_counters.txbroadcastpackets_g += + dwceqos_read(lp, DWC_MMC_TXBROADCASTPACKETS_G); + if (tx_mask & BIT(1)) + lp->mmc_counters.txpacketcount_gb += + dwceqos_read(lp, DWC_MMC_TXPACKETCOUNT_GB); + if (tx_mask & BIT(0)) + lp->mmc_counters.txoctetcount_gb += + dwceqos_read(lp, DWC_MMC_TXOCTETCOUNT_GB); + + if (rx_mask & BIT(27)) + lp->mmc_counters.rxlpitranscntr += + dwceqos_read(lp, DWC_MMC_RXLPITRANSCNTR); + if (rx_mask & BIT(26)) + lp->mmc_counters.rxlpiuscntr += + dwceqos_read(lp, DWC_MMC_RXLPIUSCNTR); + if (rx_mask & BIT(25)) + lp->mmc_counters.rxctrlpackets_g += + dwceqos_read(lp, DWC_MMC_RXCTRLPACKETS_G); + if (rx_mask & BIT(24)) + lp->mmc_counters.rxrcverror += + dwceqos_read(lp, DWC_MMC_RXRCVERROR); + if (rx_mask & BIT(23)) + lp->mmc_counters.rxwatchdog += + dwceqos_read(lp, DWC_MMC_RXWATCHDOG); + if (rx_mask & BIT(22)) + lp->mmc_counters.rxvlanpackets_gb += + dwceqos_read(lp, DWC_MMC_RXVLANPACKETS_GB); + if (rx_mask & BIT(21)) + lp->mmc_counters.rxfifooverflow += + dwceqos_read(lp, DWC_MMC_RXFIFOOVERFLOW); + if (rx_mask & BIT(20)) + lp->mmc_counters.rxpausepackets += + dwceqos_read(lp, DWC_MMC_RXPAUSEPACKETS); + if (rx_mask & BIT(19)) + lp->mmc_counters.rxoutofrangetype += + dwceqos_read(lp, DWC_MMC_RXOUTOFRANGETYPE); + if (rx_mask & BIT(18)) + lp->mmc_counters.rxlengtherror += + dwceqos_read(lp, DWC_MMC_RXLENGTHERROR); + if (rx_mask & BIT(17)) + lp->mmc_counters.rxunicastpackets_g += + dwceqos_read(lp, DWC_MMC_RXUNICASTPACKETS_G); + if (rx_mask & BIT(16)) + lp->mmc_counters.rx1024tomaxoctets_gb += + dwceqos_read(lp, DWC_MMC_RX1024TOMAXOCTETS_GB); + if (rx_mask & BIT(15)) + lp->mmc_counters.rx512to1023octets_gb += + dwceqos_read(lp, DWC_MMC_RX512TO1023OCTETS_GB); + if (rx_mask & BIT(14)) + lp->mmc_counters.rx256to511octets_gb += + dwceqos_read(lp, DWC_MMC_RX256TO511OCTETS_GB); + if (rx_mask & BIT(13)) + lp->mmc_counters.rx128to255octets_gb += + dwceqos_read(lp, DWC_MMC_RX128TO255OCTETS_GB); + if (rx_mask & BIT(12)) + lp->mmc_counters.rx65to127octets_gb += + dwceqos_read(lp, DWC_MMC_RX65TO127OCTETS_GB); + if (rx_mask & BIT(11)) + lp->mmc_counters.rx64octets_gb += + dwceqos_read(lp, DWC_MMC_RX64OCTETS_GB); + if (rx_mask & BIT(10)) + lp->mmc_counters.rxoversize_g += + dwceqos_read(lp, DWC_MMC_RXOVERSIZE_G); + if (rx_mask & BIT(9)) + lp->mmc_counters.rxundersize_g += + dwceqos_read(lp, DWC_MMC_RXUNDERSIZE_G); + if (rx_mask & BIT(8)) + lp->mmc_counters.rxjabbererror += + dwceqos_read(lp, DWC_MMC_RXJABBERERROR); + if (rx_mask & BIT(7)) + lp->mmc_counters.rxrunterror += + dwceqos_read(lp, DWC_MMC_RXRUNTERROR); + if (rx_mask & BIT(6)) + lp->mmc_counters.rxalignmenterror += + dwceqos_read(lp, DWC_MMC_RXALIGNMENTERROR); + if (rx_mask & BIT(5)) + lp->mmc_counters.rxcrcerror += + dwceqos_read(lp, DWC_MMC_RXCRCERROR); + if (rx_mask & BIT(4)) + lp->mmc_counters.rxmulticastpackets_g += + dwceqos_read(lp, DWC_MMC_RXMULTICASTPACKETS_G); + if (rx_mask & BIT(3)) + lp->mmc_counters.rxbroadcastpackets_g += + dwceqos_read(lp, DWC_MMC_RXBROADCASTPACKETS_G); + if (rx_mask & BIT(2)) + lp->mmc_counters.rxoctetcount_g += + dwceqos_read(lp, DWC_MMC_RXOCTETCOUNT_G); + if (rx_mask & BIT(1)) + lp->mmc_counters.rxoctetcount_gb += + dwceqos_read(lp, DWC_MMC_RXOCTETCOUNT_GB); + if (rx_mask & BIT(0)) + lp->mmc_counters.rxpacketcount_gb += + dwceqos_read(lp, DWC_MMC_RXPACKETCOUNT_GB); +} + +static struct rtnl_link_stats64* +dwceqos_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *s) +{ + unsigned long flags; + struct net_local *lp = netdev_priv(ndev); + struct dwceqos_mmc_counters *hwstats = &lp->mmc_counters; + + spin_lock_irqsave(&lp->stats_lock, flags); + dwceqos_read_mmc_counters(lp, lp->mmc_rx_counters_mask, + lp->mmc_tx_counters_mask); + spin_unlock_irqrestore(&lp->stats_lock, flags); + + s->rx_packets = hwstats->rxpacketcount_gb; + s->rx_bytes = hwstats->rxoctetcount_gb; + s->rx_errors = hwstats->rxpacketcount_gb - + hwstats->rxbroadcastpackets_g - + hwstats->rxmulticastpackets_g - + hwstats->rxunicastpackets_g; + s->multicast = hwstats->rxmulticastpackets_g; + s->rx_length_errors = hwstats->rxlengtherror; + s->rx_crc_errors = hwstats->rxcrcerror; + s->rx_fifo_errors = hwstats->rxfifooverflow; + + s->tx_packets = hwstats->txpacketcount_gb; + s->tx_bytes = hwstats->txoctetcount_gb; + + if (lp->mmc_tx_counters_mask & BIT(21)) + s->tx_errors = hwstats->txpacketcount_gb - + hwstats->txpacketcount_g; + else + s->tx_errors = hwstats->txunderflowerror + + hwstats->txcarriererror; + + return s; +} + +static int +dwceqos_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) +{ + struct net_local *lp = netdev_priv(ndev); + struct phy_device *phydev = lp->phy_dev; + + if (!phydev) + return -ENODEV; + + return phy_ethtool_gset(phydev, ecmd); +} + +static int +dwceqos_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) +{ + struct net_local *lp = netdev_priv(ndev); + struct phy_device *phydev = lp->phy_dev; + + if (!phydev) + return -ENODEV; + + return phy_ethtool_sset(phydev, ecmd); +} + +static void +dwceqos_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *ed) +{ + const struct net_local *lp = netdev_priv(ndev); + + strcpy(ed->driver, lp->pdev->dev.driver->name); + strcpy(ed->version, DRIVER_VERSION); +} + +static void dwceqos_get_pauseparam(struct net_device *ndev, + struct ethtool_pauseparam *pp) +{ + const struct net_local *lp = netdev_priv(ndev); + + pp->autoneg = lp->flowcontrol.autoneg; + pp->tx_pause = lp->flowcontrol.tx; + pp->rx_pause = lp->flowcontrol.rx; +} + +static int dwceqos_set_pauseparam(struct net_device *ndev, + struct ethtool_pauseparam *pp) +{ + struct net_local *lp = netdev_priv(ndev); + int ret = 0; + + lp->flowcontrol.autoneg = pp->autoneg; + if (pp->autoneg) { + lp->phy_dev->advertising |= ADVERTISED_Pause; + lp->phy_dev->advertising |= ADVERTISED_Asym_Pause; + } else { + lp->phy_dev->advertising &= ~ADVERTISED_Pause; + lp->phy_dev->advertising &= ~ADVERTISED_Asym_Pause; + lp->flowcontrol.rx = pp->rx_pause; + lp->flowcontrol.tx = pp->tx_pause; + } + + if (netif_running(ndev)) + ret = phy_start_aneg(lp->phy_dev); + + return ret; +} + +static void dwceqos_get_strings(struct net_device *ndev, u32 stringset, + u8 *data) +{ + size_t i; + + if (stringset != ETH_SS_STATS) + return; + + for (i = 0; i < ARRAY_SIZE(dwceqos_ethtool_stats); ++i) { + memcpy(data, dwceqos_ethtool_stats[i].stat_name, + ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } +} + +static void dwceqos_get_ethtool_stats(struct net_device *ndev, + struct ethtool_stats *stats, u64 *data) +{ + struct net_local *lp = netdev_priv(ndev); + unsigned long flags; + size_t i; + u8 *mmcstat = (u8 *)&lp->mmc_counters; + + spin_lock_irqsave(&lp->stats_lock, flags); + dwceqos_read_mmc_counters(lp, lp->mmc_rx_counters_mask, + lp->mmc_tx_counters_mask); + spin_unlock_irqrestore(&lp->stats_lock, flags); + + for (i = 0; i < ARRAY_SIZE(dwceqos_ethtool_stats); ++i) { + memcpy(data, + mmcstat + dwceqos_ethtool_stats[i].offset, + sizeof(u64)); + data++; + } +} + +static int dwceqos_get_sset_count(struct net_device *ndev, int sset) +{ + if (sset == ETH_SS_STATS) + return ARRAY_SIZE(dwceqos_ethtool_stats); + + return -EOPNOTSUPP; +} + +static void dwceqos_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *space) +{ + const struct net_local *lp = netdev_priv(dev); + u32 *reg_space = (u32 *)space; + int reg_offset; + int reg_ix = 0; + + /* MAC registers */ + for (reg_offset = START_MAC_REG_OFFSET; + reg_offset <= MAX_DMA_REG_OFFSET; reg_offset += 4) { + reg_space[reg_ix] = dwceqos_read(lp, reg_offset); + reg_ix++; + } + /* MTL registers */ + for (reg_offset = START_MTL_REG_OFFSET; + reg_offset <= MAX_MTL_REG_OFFSET; reg_offset += 4) { + reg_space[reg_ix] = dwceqos_read(lp, reg_offset); + reg_ix++; + } + + /* DMA registers */ + for (reg_offset = START_DMA_REG_OFFSET; + reg_offset <= MAX_DMA_REG_OFFSET; reg_offset += 4) { + reg_space[reg_ix] = dwceqos_read(lp, reg_offset); + reg_ix++; + } + + BUG_ON(4 * reg_ix > REG_SPACE_SIZE); +} + +static int dwceqos_get_regs_len(struct net_device *dev) +{ + return REG_SPACE_SIZE; +} + +static inline const char *dwceqos_get_rx_lpi_state(u32 lpi_ctrl) +{ + return (lpi_ctrl & DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIST) ? "on" : "off"; +} + +static inline const char *dwceqos_get_tx_lpi_state(u32 lpi_ctrl) +{ + return (lpi_ctrl & DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIST) ? "on" : "off"; +} + +static int dwceqos_get_eee(struct net_device *ndev, struct ethtool_eee *edata) +{ + struct net_local *lp = netdev_priv(ndev); + u32 lpi_status; + u32 lpi_enabled; + + if (!(lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_EEESEL)) + return -EOPNOTSUPP; + + edata->eee_active = lp->eee_active; + edata->eee_enabled = lp->eee_enabled; + edata->tx_lpi_timer = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_ENTRY_TIMER); + lpi_status = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS); + lpi_enabled = !!(lpi_status & DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA); + edata->tx_lpi_enabled = lpi_enabled; + + if (netif_msg_hw(lp)) { + u32 regval; + + regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS); + + netdev_info(lp->ndev, "MAC LPI State: RX:%s TX:%s\n", + dwceqos_get_rx_lpi_state(regval), + dwceqos_get_tx_lpi_state(regval)); + } + + return phy_ethtool_get_eee(lp->phy_dev, edata); +} + +static int dwceqos_set_eee(struct net_device *ndev, struct ethtool_eee *edata) +{ + struct net_local *lp = netdev_priv(ndev); + u32 regval; + unsigned long flags; + + if (!(lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_EEESEL)) + return -EOPNOTSUPP; + + if (edata->eee_enabled && !lp->eee_active) + return -EOPNOTSUPP; + + if (edata->tx_lpi_enabled) { + if (edata->tx_lpi_timer < DWCEQOS_LPI_TIMER_MIN || + edata->tx_lpi_timer > DWCEQOS_LPI_TIMER_MAX) + return -EINVAL; + } + + lp->eee_enabled = edata->eee_enabled; + + if (edata->eee_enabled && edata->tx_lpi_enabled) { + dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_ENTRY_TIMER, + edata->tx_lpi_timer); + + spin_lock_irqsave(&lp->hw_lock, flags); + regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS); + regval |= DWCEQOS_LPI_CTRL_ENABLE_EEE; + if (lp->en_tx_lpi_clockgating) + regval |= DWCEQOS_MAC_LPI_CTRL_STATUS_LPITCSE; + dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval); + spin_unlock_irqrestore(&lp->hw_lock, flags); + } else { + spin_lock_irqsave(&lp->hw_lock, flags); + regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS); + regval &= ~DWCEQOS_LPI_CTRL_ENABLE_EEE; + dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval); + spin_unlock_irqrestore(&lp->hw_lock, flags); + } + + return phy_ethtool_set_eee(lp->phy_dev, edata); +} + +static u32 dwceqos_get_msglevel(struct net_device *ndev) +{ + const struct net_local *lp = netdev_priv(ndev); + + return lp->msg_enable; +} + +static void dwceqos_set_msglevel(struct net_device *ndev, u32 msglevel) +{ + struct net_local *lp = netdev_priv(ndev); + + lp->msg_enable = msglevel; +} + +static struct ethtool_ops dwceqos_ethtool_ops = { + .get_settings = dwceqos_get_settings, + .set_settings = dwceqos_set_settings, + .get_drvinfo = dwceqos_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_pauseparam = dwceqos_get_pauseparam, + .set_pauseparam = dwceqos_set_pauseparam, + .get_strings = dwceqos_get_strings, + .get_ethtool_stats = dwceqos_get_ethtool_stats, + .get_sset_count = dwceqos_get_sset_count, + .get_regs = dwceqos_get_regs, + .get_regs_len = dwceqos_get_regs_len, + .get_eee = dwceqos_get_eee, + .set_eee = dwceqos_set_eee, + .get_msglevel = dwceqos_get_msglevel, + .set_msglevel = dwceqos_set_msglevel, +}; + +static struct net_device_ops netdev_ops = { + .ndo_open = dwceqos_open, + .ndo_stop = dwceqos_stop, + .ndo_start_xmit = dwceqos_start_xmit, + .ndo_set_rx_mode = dwceqos_set_rx_mode, + .ndo_set_mac_address = dwceqos_set_mac_address, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = dwceqos_poll_controller, +#endif + .ndo_do_ioctl = dwceqos_ioctl, + .ndo_tx_timeout = dwceqos_tx_timeout, + .ndo_get_stats64 = dwceqos_get_stats64, +}; + +static const struct of_device_id dwceq_of_match[] = { + { .compatible = "snps,dwc-qos-ethernet-4.10", }, + {} +}; +MODULE_DEVICE_TABLE(of, dwceq_of_match); + +static int dwceqos_probe(struct platform_device *pdev) +{ + struct resource *r_mem = NULL; + struct net_device *ndev; + struct net_local *lp; + int ret = -ENXIO; + + r_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!r_mem) { + dev_err(&pdev->dev, "no IO resource defined.\n"); + return -ENXIO; + } + + ndev = alloc_etherdev(sizeof(*lp)); + if (!ndev) { + dev_err(&pdev->dev, "etherdev allocation failed.\n"); + return -ENOMEM; + } + + SET_NETDEV_DEV(ndev, &pdev->dev); + + lp = netdev_priv(ndev); + lp->ndev = ndev; + lp->pdev = pdev; + lp->msg_enable = netif_msg_init(debug, DWCEQOS_MSG_DEFAULT); + + spin_lock_init(&lp->tx_lock); + spin_lock_init(&lp->hw_lock); + spin_lock_init(&lp->stats_lock); + + lp->apb_pclk = devm_clk_get(&pdev->dev, "apb_pclk"); + if (IS_ERR(lp->apb_pclk)) { + dev_err(&pdev->dev, "apb_pclk clock not found.\n"); + ret = PTR_ERR(lp->apb_pclk); + goto err_out_free_netdev; + } + + ret = clk_prepare_enable(lp->apb_pclk); + if (ret) { + dev_err(&pdev->dev, "Unable to enable APER clock.\n"); + goto err_out_free_netdev; + } + + lp->baseaddr = devm_ioremap_resource(&pdev->dev, r_mem); + if (IS_ERR(lp->baseaddr)) { + dev_err(&pdev->dev, "failed to map baseaddress.\n"); + ret = PTR_ERR(lp->baseaddr); + goto err_out_clk_dis_aper; + } + + ndev->irq = platform_get_irq(pdev, 0); + ndev->watchdog_timeo = DWCEQOS_TX_TIMEOUT * HZ; + ndev->netdev_ops = &netdev_ops; + ndev->ethtool_ops = &dwceqos_ethtool_ops; + ndev->base_addr = r_mem->start; + + dwceqos_get_hwfeatures(lp); + dwceqos_mdio_set_csr(lp); + + ndev->hw_features = NETIF_F_SG; + + if (lp->feature1 & DWCEQOS_MAC_HW_FEATURE1_TSOEN) + ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; + + if (lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_TXCOESEL) + ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + + if (lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_RXCOESEL) + ndev->hw_features |= NETIF_F_RXCSUM; + + ndev->features = ndev->hw_features; + + netif_napi_add(ndev, &lp->napi, dwceqos_rx_poll, NAPI_POLL_WEIGHT); + + ret = register_netdev(ndev); + if (ret) { + dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); + goto err_out_clk_dis_aper; + } + + lp->phy_ref_clk = devm_clk_get(&pdev->dev, "phy_ref_clk"); + if (IS_ERR(lp->phy_ref_clk)) { + dev_err(&pdev->dev, "phy_ref_clk clock not found.\n"); + ret = PTR_ERR(lp->phy_ref_clk); + goto err_out_unregister_netdev; + } + + ret = clk_prepare_enable(lp->phy_ref_clk); + if (ret) { + dev_err(&pdev->dev, "Unable to enable device clock.\n"); + goto err_out_unregister_netdev; + } + + lp->phy_node = of_parse_phandle(lp->pdev->dev.of_node, + "phy-handle", 0); + if (!lp->phy_node && of_phy_is_fixed_link(lp->pdev->dev.of_node)) { + ret = of_phy_register_fixed_link(lp->pdev->dev.of_node); + if (ret < 0) { + dev_err(&pdev->dev, "invalid fixed-link"); + goto err_out_unregister_netdev; + } + + lp->phy_node = of_node_get(lp->pdev->dev.of_node); + } + + ret = of_get_phy_mode(lp->pdev->dev.of_node); + if (ret < 0) { + dev_err(&lp->pdev->dev, "error in getting phy i/f\n"); + goto err_out_unregister_clk_notifier; + } + + lp->phy_interface = ret; + + ret = dwceqos_mii_init(lp); + if (ret) { + dev_err(&lp->pdev->dev, "error in dwceqos_mii_init\n"); + goto err_out_unregister_clk_notifier; + } + + ret = dwceqos_mii_probe(ndev); + if (ret != 0) { + netdev_err(ndev, "mii_probe fail.\n"); + ret = -ENXIO; + goto err_out_unregister_clk_notifier; + } + + dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0); + + tasklet_init(&lp->tx_bdreclaim_tasklet, dwceqos_tx_reclaim, + (unsigned long)ndev); + tasklet_disable(&lp->tx_bdreclaim_tasklet); + + lp->txtimeout_handler_wq = create_singlethread_workqueue(DRIVER_NAME); + INIT_WORK(&lp->txtimeout_reinit, dwceqos_reinit_for_txtimeout); + + platform_set_drvdata(pdev, ndev); + ret = dwceqos_probe_config_dt(pdev); + if (ret) { + dev_err(&lp->pdev->dev, "Unable to retrieve DT, error %d\n", + ret); + goto err_out_unregister_clk_notifier; + } + dev_info(&lp->pdev->dev, "pdev->id %d, baseaddr 0x%08lx, irq %d\n", + pdev->id, ndev->base_addr, ndev->irq); + + ret = devm_request_irq(&pdev->dev, ndev->irq, &dwceqos_interrupt, 0, + ndev->name, ndev); + if (ret) { + dev_err(&lp->pdev->dev, "Unable to request IRQ %d, error %d\n", + ndev->irq, ret); + goto err_out_unregister_clk_notifier; + } + + if (netif_msg_probe(lp)) + netdev_dbg(ndev, "net_local@%p\n", lp); + + return 0; + +err_out_unregister_clk_notifier: + clk_disable_unprepare(lp->phy_ref_clk); +err_out_unregister_netdev: + unregister_netdev(ndev); +err_out_clk_dis_aper: + clk_disable_unprepare(lp->apb_pclk); +err_out_free_netdev: + if (lp->phy_node) + of_node_put(lp->phy_node); + free_netdev(ndev); + platform_set_drvdata(pdev, NULL); + return ret; +} + +static int dwceqos_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct net_local *lp; + + if (ndev) { + lp = netdev_priv(ndev); + + if (lp->phy_dev) + phy_disconnect(lp->phy_dev); + mdiobus_unregister(lp->mii_bus); + kfree(lp->mii_bus->irq); + mdiobus_free(lp->mii_bus); + + unregister_netdev(ndev); + + clk_disable_unprepare(lp->phy_ref_clk); + clk_disable_unprepare(lp->apb_pclk); + + free_netdev(ndev); + } + + return 0; +} + +static struct platform_driver dwceqos_driver = { + .probe = dwceqos_probe, + .remove = dwceqos_remove, + .driver = { + .name = DRIVER_NAME, + .of_match_table = dwceq_of_match, + }, +}; + +module_platform_driver(dwceqos_driver); + +MODULE_DESCRIPTION("DWC Ethernet QoS v4.10a driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Andreas Irestaal <andreas.irestal@axis.com>"); +MODULE_AUTHOR("Lars Persson <lars.persson@axis.com>"); diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 9749dfd78c43..29ae672917b7 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -51,6 +51,8 @@ NETIF_MSG_PKTDATA | NETIF_MSG_TX_QUEUED | \ NETIF_MSG_RX_STATUS) +#define NETCP_EFUSE_ADDR_SWAP 2 + #define knav_queue_get_id(q) knav_queue_device_control(q, \ KNAV_QUEUE_GET_ID, (unsigned long)NULL) @@ -172,13 +174,22 @@ static void set_words(u32 *words, int num_words, u32 *desc) } /* Read the e-fuse value as 32 bit values to be endian independent */ -static int emac_arch_get_mac_addr(char *x, void __iomem *efuse_mac) +static int emac_arch_get_mac_addr(char *x, void __iomem *efuse_mac, u32 swap) { unsigned int addr0, addr1; addr1 = readl(efuse_mac + 4); addr0 = readl(efuse_mac); + switch (swap) { + case NETCP_EFUSE_ADDR_SWAP: + addr0 = addr1; + addr1 = readl(efuse_mac); + break; + default: + break; + } + x[0] = (addr1 & 0x0000ff00) >> 8; x[1] = addr1 & 0x000000ff; x[2] = (addr0 & 0xff000000) >> 24; @@ -1902,7 +1913,7 @@ static int netcp_create_interface(struct netcp_device *netcp_device, goto quit; } - emac_arch_get_mac_addr(efuse_mac_addr, efuse); + emac_arch_get_mac_addr(efuse_mac_addr, efuse, efuse_mac); if (is_valid_ether_addr(efuse_mac_addr)) ether_addr_copy(ndev->dev_addr, efuse_mac_addr); else @@ -2150,7 +2161,6 @@ MODULE_DEVICE_TABLE(of, of_match); static struct platform_driver netcp_driver = { .driver = { .name = "netcp-1.0", - .owner = THIS_MODULE, .of_match_table = of_match, }, .probe = netcp_probe, diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 1974a8ae764a..6f16d6aaf7b7 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -295,8 +295,6 @@ struct xgbe_hw_stats { u32 rx_dma_overruns; }; -#define XGBE10_NUM_STAT_ENTRIES (sizeof(struct xgbe_hw_stats)/sizeof(u32)) - struct gbenu_ss_regs { u32 id_ver; u32 synce_count; /* NU */ @@ -480,7 +478,6 @@ struct gbenu_hw_stats { u32 tx_pri7_drop_bcnt; }; -#define GBENU_NUM_HW_STAT_ENTRIES (sizeof(struct gbenu_hw_stats) / sizeof(u32)) #define GBENU_HW_STATS_REG_MAP_SZ 0x200 struct gbe_ss_regs { @@ -615,7 +612,6 @@ struct gbe_hw_stats { u32 rx_dma_overruns; }; -#define GBE13_NUM_HW_STAT_ENTRIES (sizeof(struct gbe_hw_stats)/sizeof(u32)) #define GBE_MAX_HW_STAT_MODS 9 #define GBE_HW_STATS_REG_MAP_SZ 0x100 @@ -646,6 +642,7 @@ struct gbe_priv { bool enable_ale; u8 max_num_slaves; u8 max_num_ports; /* max_num_slaves + 1 */ + u8 num_stats_mods; struct netcp_tx_pipe tx_pipe; int host_port; @@ -675,6 +672,7 @@ struct gbe_priv { struct net_device *dummy_ndev; u64 *hw_stats; + u32 *hw_stats_prev; const struct netcp_ethtool_stat *et_stats; int num_et_stats; /* Lock for updating the hwstats */ @@ -874,7 +872,7 @@ static const struct netcp_ethtool_stat gbe13_et_stats[] = { }; /* This is the size of entries in GBENU_STATS_HOST */ -#define GBENU_ET_STATS_HOST_SIZE 33 +#define GBENU_ET_STATS_HOST_SIZE 52 #define GBENU_STATS_HOST(field) \ { \ @@ -883,8 +881,8 @@ static const struct netcp_ethtool_stat gbe13_et_stats[] = { offsetof(struct gbenu_hw_stats, field) \ } -/* This is the size of entries in GBENU_STATS_HOST */ -#define GBENU_ET_STATS_PORT_SIZE 46 +/* This is the size of entries in GBENU_STATS_PORT */ +#define GBENU_ET_STATS_PORT_SIZE 65 #define GBENU_STATS_P1(field) \ { \ @@ -976,7 +974,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = { GBENU_STATS_HOST(ale_unknown_mcast_bytes), GBENU_STATS_HOST(ale_unknown_bcast), GBENU_STATS_HOST(ale_unknown_bcast_bytes), + GBENU_STATS_HOST(ale_pol_match), + GBENU_STATS_HOST(ale_pol_match_red), + GBENU_STATS_HOST(ale_pol_match_yellow), GBENU_STATS_HOST(tx_mem_protect_err), + GBENU_STATS_HOST(tx_pri0_drop), + GBENU_STATS_HOST(tx_pri1_drop), + GBENU_STATS_HOST(tx_pri2_drop), + GBENU_STATS_HOST(tx_pri3_drop), + GBENU_STATS_HOST(tx_pri4_drop), + GBENU_STATS_HOST(tx_pri5_drop), + GBENU_STATS_HOST(tx_pri6_drop), + GBENU_STATS_HOST(tx_pri7_drop), + GBENU_STATS_HOST(tx_pri0_drop_bcnt), + GBENU_STATS_HOST(tx_pri1_drop_bcnt), + GBENU_STATS_HOST(tx_pri2_drop_bcnt), + GBENU_STATS_HOST(tx_pri3_drop_bcnt), + GBENU_STATS_HOST(tx_pri4_drop_bcnt), + GBENU_STATS_HOST(tx_pri5_drop_bcnt), + GBENU_STATS_HOST(tx_pri6_drop_bcnt), + GBENU_STATS_HOST(tx_pri7_drop_bcnt), /* GBENU Module 1 */ GBENU_STATS_P1(rx_good_frames), GBENU_STATS_P1(rx_broadcast_frames), @@ -1023,7 +1040,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = { GBENU_STATS_P1(ale_unknown_mcast_bytes), GBENU_STATS_P1(ale_unknown_bcast), GBENU_STATS_P1(ale_unknown_bcast_bytes), + GBENU_STATS_P1(ale_pol_match), + GBENU_STATS_P1(ale_pol_match_red), + GBENU_STATS_P1(ale_pol_match_yellow), GBENU_STATS_P1(tx_mem_protect_err), + GBENU_STATS_P1(tx_pri0_drop), + GBENU_STATS_P1(tx_pri1_drop), + GBENU_STATS_P1(tx_pri2_drop), + GBENU_STATS_P1(tx_pri3_drop), + GBENU_STATS_P1(tx_pri4_drop), + GBENU_STATS_P1(tx_pri5_drop), + GBENU_STATS_P1(tx_pri6_drop), + GBENU_STATS_P1(tx_pri7_drop), + GBENU_STATS_P1(tx_pri0_drop_bcnt), + GBENU_STATS_P1(tx_pri1_drop_bcnt), + GBENU_STATS_P1(tx_pri2_drop_bcnt), + GBENU_STATS_P1(tx_pri3_drop_bcnt), + GBENU_STATS_P1(tx_pri4_drop_bcnt), + GBENU_STATS_P1(tx_pri5_drop_bcnt), + GBENU_STATS_P1(tx_pri6_drop_bcnt), + GBENU_STATS_P1(tx_pri7_drop_bcnt), /* GBENU Module 2 */ GBENU_STATS_P2(rx_good_frames), GBENU_STATS_P2(rx_broadcast_frames), @@ -1070,7 +1106,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = { GBENU_STATS_P2(ale_unknown_mcast_bytes), GBENU_STATS_P2(ale_unknown_bcast), GBENU_STATS_P2(ale_unknown_bcast_bytes), + GBENU_STATS_P2(ale_pol_match), + GBENU_STATS_P2(ale_pol_match_red), + GBENU_STATS_P2(ale_pol_match_yellow), GBENU_STATS_P2(tx_mem_protect_err), + GBENU_STATS_P2(tx_pri0_drop), + GBENU_STATS_P2(tx_pri1_drop), + GBENU_STATS_P2(tx_pri2_drop), + GBENU_STATS_P2(tx_pri3_drop), + GBENU_STATS_P2(tx_pri4_drop), + GBENU_STATS_P2(tx_pri5_drop), + GBENU_STATS_P2(tx_pri6_drop), + GBENU_STATS_P2(tx_pri7_drop), + GBENU_STATS_P2(tx_pri0_drop_bcnt), + GBENU_STATS_P2(tx_pri1_drop_bcnt), + GBENU_STATS_P2(tx_pri2_drop_bcnt), + GBENU_STATS_P2(tx_pri3_drop_bcnt), + GBENU_STATS_P2(tx_pri4_drop_bcnt), + GBENU_STATS_P2(tx_pri5_drop_bcnt), + GBENU_STATS_P2(tx_pri6_drop_bcnt), + GBENU_STATS_P2(tx_pri7_drop_bcnt), /* GBENU Module 3 */ GBENU_STATS_P3(rx_good_frames), GBENU_STATS_P3(rx_broadcast_frames), @@ -1117,7 +1172,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = { GBENU_STATS_P3(ale_unknown_mcast_bytes), GBENU_STATS_P3(ale_unknown_bcast), GBENU_STATS_P3(ale_unknown_bcast_bytes), + GBENU_STATS_P3(ale_pol_match), + GBENU_STATS_P3(ale_pol_match_red), + GBENU_STATS_P3(ale_pol_match_yellow), GBENU_STATS_P3(tx_mem_protect_err), + GBENU_STATS_P3(tx_pri0_drop), + GBENU_STATS_P3(tx_pri1_drop), + GBENU_STATS_P3(tx_pri2_drop), + GBENU_STATS_P3(tx_pri3_drop), + GBENU_STATS_P3(tx_pri4_drop), + GBENU_STATS_P3(tx_pri5_drop), + GBENU_STATS_P3(tx_pri6_drop), + GBENU_STATS_P3(tx_pri7_drop), + GBENU_STATS_P3(tx_pri0_drop_bcnt), + GBENU_STATS_P3(tx_pri1_drop_bcnt), + GBENU_STATS_P3(tx_pri2_drop_bcnt), + GBENU_STATS_P3(tx_pri3_drop_bcnt), + GBENU_STATS_P3(tx_pri4_drop_bcnt), + GBENU_STATS_P3(tx_pri5_drop_bcnt), + GBENU_STATS_P3(tx_pri6_drop_bcnt), + GBENU_STATS_P3(tx_pri7_drop_bcnt), /* GBENU Module 4 */ GBENU_STATS_P4(rx_good_frames), GBENU_STATS_P4(rx_broadcast_frames), @@ -1164,7 +1238,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = { GBENU_STATS_P4(ale_unknown_mcast_bytes), GBENU_STATS_P4(ale_unknown_bcast), GBENU_STATS_P4(ale_unknown_bcast_bytes), + GBENU_STATS_P4(ale_pol_match), + GBENU_STATS_P4(ale_pol_match_red), + GBENU_STATS_P4(ale_pol_match_yellow), GBENU_STATS_P4(tx_mem_protect_err), + GBENU_STATS_P4(tx_pri0_drop), + GBENU_STATS_P4(tx_pri1_drop), + GBENU_STATS_P4(tx_pri2_drop), + GBENU_STATS_P4(tx_pri3_drop), + GBENU_STATS_P4(tx_pri4_drop), + GBENU_STATS_P4(tx_pri5_drop), + GBENU_STATS_P4(tx_pri6_drop), + GBENU_STATS_P4(tx_pri7_drop), + GBENU_STATS_P4(tx_pri0_drop_bcnt), + GBENU_STATS_P4(tx_pri1_drop_bcnt), + GBENU_STATS_P4(tx_pri2_drop_bcnt), + GBENU_STATS_P4(tx_pri3_drop_bcnt), + GBENU_STATS_P4(tx_pri4_drop_bcnt), + GBENU_STATS_P4(tx_pri5_drop_bcnt), + GBENU_STATS_P4(tx_pri6_drop_bcnt), + GBENU_STATS_P4(tx_pri7_drop_bcnt), /* GBENU Module 5 */ GBENU_STATS_P5(rx_good_frames), GBENU_STATS_P5(rx_broadcast_frames), @@ -1211,7 +1304,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = { GBENU_STATS_P5(ale_unknown_mcast_bytes), GBENU_STATS_P5(ale_unknown_bcast), GBENU_STATS_P5(ale_unknown_bcast_bytes), + GBENU_STATS_P5(ale_pol_match), + GBENU_STATS_P5(ale_pol_match_red), + GBENU_STATS_P5(ale_pol_match_yellow), GBENU_STATS_P5(tx_mem_protect_err), + GBENU_STATS_P5(tx_pri0_drop), + GBENU_STATS_P5(tx_pri1_drop), + GBENU_STATS_P5(tx_pri2_drop), + GBENU_STATS_P5(tx_pri3_drop), + GBENU_STATS_P5(tx_pri4_drop), + GBENU_STATS_P5(tx_pri5_drop), + GBENU_STATS_P5(tx_pri6_drop), + GBENU_STATS_P5(tx_pri7_drop), + GBENU_STATS_P5(tx_pri0_drop_bcnt), + GBENU_STATS_P5(tx_pri1_drop_bcnt), + GBENU_STATS_P5(tx_pri2_drop_bcnt), + GBENU_STATS_P5(tx_pri3_drop_bcnt), + GBENU_STATS_P5(tx_pri4_drop_bcnt), + GBENU_STATS_P5(tx_pri5_drop_bcnt), + GBENU_STATS_P5(tx_pri6_drop_bcnt), + GBENU_STATS_P5(tx_pri7_drop_bcnt), /* GBENU Module 6 */ GBENU_STATS_P6(rx_good_frames), GBENU_STATS_P6(rx_broadcast_frames), @@ -1258,7 +1370,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = { GBENU_STATS_P6(ale_unknown_mcast_bytes), GBENU_STATS_P6(ale_unknown_bcast), GBENU_STATS_P6(ale_unknown_bcast_bytes), + GBENU_STATS_P6(ale_pol_match), + GBENU_STATS_P6(ale_pol_match_red), + GBENU_STATS_P6(ale_pol_match_yellow), GBENU_STATS_P6(tx_mem_protect_err), + GBENU_STATS_P6(tx_pri0_drop), + GBENU_STATS_P6(tx_pri1_drop), + GBENU_STATS_P6(tx_pri2_drop), + GBENU_STATS_P6(tx_pri3_drop), + GBENU_STATS_P6(tx_pri4_drop), + GBENU_STATS_P6(tx_pri5_drop), + GBENU_STATS_P6(tx_pri6_drop), + GBENU_STATS_P6(tx_pri7_drop), + GBENU_STATS_P6(tx_pri0_drop_bcnt), + GBENU_STATS_P6(tx_pri1_drop_bcnt), + GBENU_STATS_P6(tx_pri2_drop_bcnt), + GBENU_STATS_P6(tx_pri3_drop_bcnt), + GBENU_STATS_P6(tx_pri4_drop_bcnt), + GBENU_STATS_P6(tx_pri5_drop_bcnt), + GBENU_STATS_P6(tx_pri6_drop_bcnt), + GBENU_STATS_P6(tx_pri7_drop_bcnt), /* GBENU Module 7 */ GBENU_STATS_P7(rx_good_frames), GBENU_STATS_P7(rx_broadcast_frames), @@ -1305,7 +1436,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = { GBENU_STATS_P7(ale_unknown_mcast_bytes), GBENU_STATS_P7(ale_unknown_bcast), GBENU_STATS_P7(ale_unknown_bcast_bytes), + GBENU_STATS_P7(ale_pol_match), + GBENU_STATS_P7(ale_pol_match_red), + GBENU_STATS_P7(ale_pol_match_yellow), GBENU_STATS_P7(tx_mem_protect_err), + GBENU_STATS_P7(tx_pri0_drop), + GBENU_STATS_P7(tx_pri1_drop), + GBENU_STATS_P7(tx_pri2_drop), + GBENU_STATS_P7(tx_pri3_drop), + GBENU_STATS_P7(tx_pri4_drop), + GBENU_STATS_P7(tx_pri5_drop), + GBENU_STATS_P7(tx_pri6_drop), + GBENU_STATS_P7(tx_pri7_drop), + GBENU_STATS_P7(tx_pri0_drop_bcnt), + GBENU_STATS_P7(tx_pri1_drop_bcnt), + GBENU_STATS_P7(tx_pri2_drop_bcnt), + GBENU_STATS_P7(tx_pri3_drop_bcnt), + GBENU_STATS_P7(tx_pri4_drop_bcnt), + GBENU_STATS_P7(tx_pri5_drop_bcnt), + GBENU_STATS_P7(tx_pri6_drop_bcnt), + GBENU_STATS_P7(tx_pri7_drop_bcnt), /* GBENU Module 8 */ GBENU_STATS_P8(rx_good_frames), GBENU_STATS_P8(rx_broadcast_frames), @@ -1352,7 +1502,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = { GBENU_STATS_P8(ale_unknown_mcast_bytes), GBENU_STATS_P8(ale_unknown_bcast), GBENU_STATS_P8(ale_unknown_bcast_bytes), + GBENU_STATS_P8(ale_pol_match), + GBENU_STATS_P8(ale_pol_match_red), + GBENU_STATS_P8(ale_pol_match_yellow), GBENU_STATS_P8(tx_mem_protect_err), + GBENU_STATS_P8(tx_pri0_drop), + GBENU_STATS_P8(tx_pri1_drop), + GBENU_STATS_P8(tx_pri2_drop), + GBENU_STATS_P8(tx_pri3_drop), + GBENU_STATS_P8(tx_pri4_drop), + GBENU_STATS_P8(tx_pri5_drop), + GBENU_STATS_P8(tx_pri6_drop), + GBENU_STATS_P8(tx_pri7_drop), + GBENU_STATS_P8(tx_pri0_drop_bcnt), + GBENU_STATS_P8(tx_pri1_drop_bcnt), + GBENU_STATS_P8(tx_pri2_drop_bcnt), + GBENU_STATS_P8(tx_pri3_drop_bcnt), + GBENU_STATS_P8(tx_pri4_drop_bcnt), + GBENU_STATS_P8(tx_pri5_drop_bcnt), + GBENU_STATS_P8(tx_pri6_drop_bcnt), + GBENU_STATS_P8(tx_pri7_drop_bcnt), }; #define XGBE_STATS0_INFO(field) \ @@ -1554,70 +1723,97 @@ static int keystone_get_sset_count(struct net_device *ndev, int stringset) } } -static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data) +static void gbe_reset_mod_stats(struct gbe_priv *gbe_dev, int stats_mod) +{ + void __iomem *base = gbe_dev->hw_stats_regs[stats_mod]; + u32 __iomem *p_stats_entry; + int i; + + for (i = 0; i < gbe_dev->num_et_stats; i++) { + if (gbe_dev->et_stats[i].type == stats_mod) { + p_stats_entry = base + gbe_dev->et_stats[i].offset; + gbe_dev->hw_stats[i] = 0; + gbe_dev->hw_stats_prev[i] = readl(p_stats_entry); + } + } +} + +static inline void gbe_update_hw_stats_entry(struct gbe_priv *gbe_dev, + int et_stats_entry) { void __iomem *base = NULL; - u32 __iomem *p; - u32 tmp = 0; + u32 __iomem *p_stats_entry; + u32 curr, delta; + + /* The hw_stats_regs pointers are already + * properly set to point to the right base: + */ + base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[et_stats_entry].type]; + p_stats_entry = base + gbe_dev->et_stats[et_stats_entry].offset; + curr = readl(p_stats_entry); + delta = curr - gbe_dev->hw_stats_prev[et_stats_entry]; + gbe_dev->hw_stats_prev[et_stats_entry] = curr; + gbe_dev->hw_stats[et_stats_entry] += delta; +} + +static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data) +{ int i; for (i = 0; i < gbe_dev->num_et_stats; i++) { - base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[i].type]; - p = base + gbe_dev->et_stats[i].offset; - tmp = readl(p); - gbe_dev->hw_stats[i] = gbe_dev->hw_stats[i] + tmp; + gbe_update_hw_stats_entry(gbe_dev, i); + if (data) data[i] = gbe_dev->hw_stats[i]; - /* write-to-decrement: - * new register value = old register value - write value - */ - writel(tmp, p); } } -static void gbe_update_stats_ver14(struct gbe_priv *gbe_dev, uint64_t *data) +static inline void gbe_stats_mod_visible_ver14(struct gbe_priv *gbe_dev, + int stats_mod) { - void __iomem *gbe_statsa = gbe_dev->hw_stats_regs[0]; - void __iomem *gbe_statsb = gbe_dev->hw_stats_regs[1]; - u64 *hw_stats = &gbe_dev->hw_stats[0]; - void __iomem *base = NULL; - u32 __iomem *p; - u32 tmp = 0, val, pair_size = (gbe_dev->num_et_stats / 2); - int i, j, pair; + u32 val; - for (pair = 0; pair < 2; pair++) { - val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en)); + val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en)); - if (pair == 0) - val &= ~GBE_STATS_CD_SEL; - else - val |= GBE_STATS_CD_SEL; + switch (stats_mod) { + case GBE_STATSA_MODULE: + case GBE_STATSB_MODULE: + val &= ~GBE_STATS_CD_SEL; + break; + case GBE_STATSC_MODULE: + case GBE_STATSD_MODULE: + val |= GBE_STATS_CD_SEL; + break; + default: + return; + } - /* make the stat modules visible */ - writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en)); + /* make the stat module visible */ + writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en)); +} - for (i = 0; i < pair_size; i++) { - j = pair * pair_size + i; - switch (gbe_dev->et_stats[j].type) { - case GBE_STATSA_MODULE: - case GBE_STATSC_MODULE: - base = gbe_statsa; - break; - case GBE_STATSB_MODULE: - case GBE_STATSD_MODULE: - base = gbe_statsb; - break; - } +static void gbe_reset_mod_stats_ver14(struct gbe_priv *gbe_dev, int stats_mod) +{ + gbe_stats_mod_visible_ver14(gbe_dev, stats_mod); + gbe_reset_mod_stats(gbe_dev, stats_mod); +} + +static void gbe_update_stats_ver14(struct gbe_priv *gbe_dev, uint64_t *data) +{ + u32 half_num_et_stats = (gbe_dev->num_et_stats / 2); + int et_entry, j, pair; + + for (pair = 0; pair < 2; pair++) { + gbe_stats_mod_visible_ver14(gbe_dev, (pair ? + GBE_STATSC_MODULE : + GBE_STATSA_MODULE)); + + for (j = 0; j < half_num_et_stats; j++) { + et_entry = pair * half_num_et_stats + j; + gbe_update_hw_stats_entry(gbe_dev, et_entry); - p = base + gbe_dev->et_stats[j].offset; - tmp = readl(p); - hw_stats[j] += tmp; if (data) - data[j] = hw_stats[j]; - /* write-to-decrement: - * new register value = old register value - write value - */ - writel(tmp, p); + data[et_entry] = gbe_dev->hw_stats[et_entry]; } } } @@ -2207,14 +2403,15 @@ static void netcp_ethss_timer(unsigned long arg) netcp_ethss_update_link_state(gbe_dev, slave, NULL); } - spin_lock_bh(&gbe_dev->hw_stats_lock); + /* A timer runs as a BH, no need to block them */ + spin_lock(&gbe_dev->hw_stats_lock); if (gbe_dev->ss_version == GBE_SS_VERSION_14) gbe_update_stats_ver14(gbe_dev, NULL); else gbe_update_stats(gbe_dev, NULL); - spin_unlock_bh(&gbe_dev->hw_stats_lock); + spin_unlock(&gbe_dev->hw_stats_lock); gbe_dev->timer.expires = jiffies + GBE_TIMER_INTERVAL; add_timer(&gbe_dev->timer); @@ -2571,15 +2768,28 @@ static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev, } gbe_dev->xgbe_serdes_regs = regs; + gbe_dev->num_stats_mods = gbe_dev->max_num_ports; + gbe_dev->et_stats = xgbe10_et_stats; + gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats); + gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev, - XGBE10_NUM_STAT_ENTRIES * - (gbe_dev->max_num_ports) * sizeof(u64), - GFP_KERNEL); + gbe_dev->num_et_stats * sizeof(u64), + GFP_KERNEL); if (!gbe_dev->hw_stats) { dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n"); return -ENOMEM; } + gbe_dev->hw_stats_prev = + devm_kzalloc(gbe_dev->dev, + gbe_dev->num_et_stats * sizeof(u32), + GFP_KERNEL); + if (!gbe_dev->hw_stats_prev) { + dev_err(gbe_dev->dev, + "hw_stats_prev memory allocation failed\n"); + return -ENOMEM; + } + gbe_dev->ss_version = XGBE_SS_VERSION_10; gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + XGBE10_SGMII_MODULE_OFFSET; @@ -2593,8 +2803,6 @@ static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev, gbe_dev->ale_ports = gbe_dev->max_num_ports; gbe_dev->host_port = XGBE10_HOST_PORT_NUM; gbe_dev->ale_entries = XGBE10_NUM_ALE_ENTRIES; - gbe_dev->et_stats = xgbe10_et_stats; - gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats); gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1; /* Subsystem registers */ @@ -2679,30 +2887,45 @@ static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev, } gbe_dev->switch_regs = regs; + gbe_dev->num_stats_mods = gbe_dev->max_num_slaves; + gbe_dev->et_stats = gbe13_et_stats; + gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats); + gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev, - GBE13_NUM_HW_STAT_ENTRIES * - gbe_dev->max_num_slaves * sizeof(u64), - GFP_KERNEL); + gbe_dev->num_et_stats * sizeof(u64), + GFP_KERNEL); if (!gbe_dev->hw_stats) { dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n"); return -ENOMEM; } + gbe_dev->hw_stats_prev = + devm_kzalloc(gbe_dev->dev, + gbe_dev->num_et_stats * sizeof(u32), + GFP_KERNEL); + if (!gbe_dev->hw_stats_prev) { + dev_err(gbe_dev->dev, + "hw_stats_prev memory allocation failed\n"); + return -ENOMEM; + } + gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBE13_SGMII_MODULE_OFFSET; gbe_dev->host_port_regs = gbe_dev->switch_regs + GBE13_HOST_PORT_OFFSET; + /* K2HK has only 2 hw stats modules visible at a time, so + * module 0 & 2 points to one base and + * module 1 & 3 points to the other base + */ for (i = 0; i < gbe_dev->max_num_slaves; i++) { gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs + GBE13_HW_STATS_OFFSET + - (GBE_HW_STATS_REG_MAP_SZ * i); + (GBE_HW_STATS_REG_MAP_SZ * (i & 0x1)); } gbe_dev->ale_reg = gbe_dev->switch_regs + GBE13_ALE_OFFSET; gbe_dev->ale_ports = gbe_dev->max_num_ports; gbe_dev->host_port = GBE13_HOST_PORT_NUM; gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES; - gbe_dev->et_stats = gbe13_et_stats; - gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats); gbe_dev->stats_en_mask = GBE13_REG_VAL_STAT_ENABLE_ALL; /* Subsystem registers */ @@ -2729,15 +2952,34 @@ static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev, void __iomem *regs; int i, ret; + gbe_dev->num_stats_mods = gbe_dev->max_num_ports; + gbe_dev->et_stats = gbenu_et_stats; + + if (IS_SS_ID_NU(gbe_dev)) + gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE + + (gbe_dev->max_num_slaves * GBENU_ET_STATS_PORT_SIZE); + else + gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE + + GBENU_ET_STATS_PORT_SIZE; + gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev, - GBENU_NUM_HW_STAT_ENTRIES * - (gbe_dev->max_num_ports) * sizeof(u64), - GFP_KERNEL); + gbe_dev->num_et_stats * sizeof(u64), + GFP_KERNEL); if (!gbe_dev->hw_stats) { dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n"); return -ENOMEM; } + gbe_dev->hw_stats_prev = + devm_kzalloc(gbe_dev->dev, + gbe_dev->num_et_stats * sizeof(u32), + GFP_KERNEL); + if (!gbe_dev->hw_stats_prev) { + dev_err(gbe_dev->dev, + "hw_stats_prev memory allocation failed\n"); + return -ENOMEM; + } + ret = of_address_to_resource(node, GBENU_SM_REG_INDEX, &res); if (ret) { dev_err(gbe_dev->dev, @@ -2765,16 +3007,8 @@ static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev, gbe_dev->ale_ports = gbe_dev->max_num_ports; gbe_dev->host_port = GBENU_HOST_PORT_NUM; gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES; - gbe_dev->et_stats = gbenu_et_stats; gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1; - if (IS_SS_ID_NU(gbe_dev)) - gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE + - (gbe_dev->max_num_slaves * GBENU_ET_STATS_PORT_SIZE); - else - gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE + - GBENU_ET_STATS_PORT_SIZE; - /* Subsystem registers */ GBENU_SET_REG_OFS(gbe_dev, ss_regs, id_ver); @@ -2804,7 +3038,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, struct cpsw_ale_params ale_params; struct gbe_priv *gbe_dev; u32 slave_num; - int ret = 0; + int i, ret = 0; if (!node) { dev_err(dev, "device tree info unavailable\n"); @@ -2951,6 +3185,15 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, /* initialize host port */ gbe_init_host_port(gbe_dev); + spin_lock_bh(&gbe_dev->hw_stats_lock); + for (i = 0; i < gbe_dev->num_stats_mods; i++) { + if (gbe_dev->ss_version == GBE_SS_VERSION_14) + gbe_reset_mod_stats_ver14(gbe_dev, i); + else + gbe_reset_mod_stats(gbe_dev, i); + } + spin_unlock_bh(&gbe_dev->hw_stats_lock); + init_timer(&gbe_dev->timer); gbe_dev->timer.data = (unsigned long)gbe_dev; gbe_dev->timer.function = netcp_ethss_timer; diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index dd4544085db3..5ce7020ca530 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -541,6 +541,29 @@ union nvsp_2_message_uber { struct nvsp_2_free_rxbuf free_rxbuf; } __packed; +struct nvsp_4_send_vf_association { + /* 1: allocated, serial number is valid. 0: not allocated */ + u32 allocated; + + /* Serial number of the VF to team with */ + u32 serial; +} __packed; + +enum nvsp_vm_datapath { + NVSP_DATAPATH_SYNTHETIC = 0, + NVSP_DATAPATH_VF, + NVSP_DATAPATH_MAX +}; + +struct nvsp_4_sw_datapath { + u32 active_datapath; /* active data path in VM */ +} __packed; + +union nvsp_4_message_uber { + struct nvsp_4_send_vf_association vf_assoc; + struct nvsp_4_sw_datapath active_dp; +} __packed; + enum nvsp_subchannel_operation { NVSP_SUBCHANNEL_NONE = 0, NVSP_SUBCHANNEL_ALLOCATE, @@ -578,6 +601,7 @@ union nvsp_all_messages { union nvsp_message_init_uber init_msg; union nvsp_1_message_uber v1_msg; union nvsp_2_message_uber v2_msg; + union nvsp_4_message_uber v4_msg; union nvsp_5_message_uber v5_msg; } __packed; @@ -589,6 +613,7 @@ struct nvsp_message { #define NETVSC_MTU 65536 +#define NETVSC_MTU_MIN 68 #define NETVSC_RECEIVE_BUFFER_SIZE (1024*1024*16) /* 16MB */ #define NETVSC_RECEIVE_BUFFER_SIZE_LEGACY (1024*1024*15) /* 15MB */ @@ -670,6 +695,8 @@ struct netvsc_device { u32 send_table[VRSS_SEND_TAB_SIZE]; u32 max_chn; u32 num_chn; + spinlock_t sc_lock; /* Protects num_sc_offered variable */ + u32 num_sc_offered; atomic_t queue_sends[NR_CPUS]; /* Holds rndis device info */ @@ -688,6 +715,11 @@ struct netvsc_device { /* The net device context */ struct net_device_context *nd_ctx; + + /* 1: allocated, serial number is valid. 0: not allocated */ + u32 vf_alloc; + /* Serial number of the VF to team with */ + u32 vf_serial; }; /* NdisInitialize message */ diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 23126a74f357..51e4c0fd0a74 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -453,13 +453,16 @@ static int negotiate_nvsp_ver(struct hv_device *device, if (nvsp_ver == NVSP_PROTOCOL_VERSION_1) return 0; - /* NVSPv2 only: Send NDIS config */ + /* NVSPv2 or later: Send NDIS config */ memset(init_packet, 0, sizeof(struct nvsp_message)); init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG; init_packet->msg.v2_msg.send_ndis_config.mtu = net_device->ndev->mtu + ETH_HLEN; init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1; + if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5) + init_packet->msg.v2_msg.send_ndis_config.capability.sriov = 1; + ret = vmbus_sendpacket(device->channel, init_packet, sizeof(struct nvsp_message), (unsigned long)init_packet, @@ -1064,11 +1067,10 @@ static void netvsc_receive(struct netvsc_device *net_device, static void netvsc_send_table(struct hv_device *hdev, - struct vmpacket_descriptor *vmpkt) + struct nvsp_message *nvmsg) { struct netvsc_device *nvscdev; struct net_device *ndev; - struct nvsp_message *nvmsg; int i; u32 count, *tab; @@ -1077,12 +1079,6 @@ static void netvsc_send_table(struct hv_device *hdev, return; ndev = nvscdev->ndev; - nvmsg = (struct nvsp_message *)((unsigned long)vmpkt + - (vmpkt->offset8 << 3)); - - if (nvmsg->hdr.msg_type != NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE) - return; - count = nvmsg->msg.v5_msg.send_table.count; if (count != VRSS_SEND_TAB_SIZE) { netdev_err(ndev, "Received wrong send-table size:%u\n", count); @@ -1096,6 +1092,28 @@ static void netvsc_send_table(struct hv_device *hdev, nvscdev->send_table[i] = tab[i]; } +static void netvsc_send_vf(struct netvsc_device *nvdev, + struct nvsp_message *nvmsg) +{ + nvdev->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated; + nvdev->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial; +} + +static inline void netvsc_receive_inband(struct hv_device *hdev, + struct netvsc_device *nvdev, + struct nvsp_message *nvmsg) +{ + switch (nvmsg->hdr.msg_type) { + case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE: + netvsc_send_table(hdev, nvmsg); + break; + + case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION: + netvsc_send_vf(nvdev, nvmsg); + break; + } +} + void netvsc_channel_cb(void *context) { int ret; @@ -1108,6 +1126,7 @@ void netvsc_channel_cb(void *context) unsigned char *buffer; int bufferlen = NETVSC_PACKET_SIZE; struct net_device *ndev; + struct nvsp_message *nvmsg; if (channel->primary_channel != NULL) device = channel->primary_channel->device_obj; @@ -1126,6 +1145,8 @@ void netvsc_channel_cb(void *context) if (ret == 0) { if (bytes_recvd > 0) { desc = (struct vmpacket_descriptor *)buffer; + nvmsg = (struct nvsp_message *)((unsigned long) + desc + (desc->offset8 << 3)); switch (desc->type) { case VM_PKT_COMP: netvsc_send_completion(net_device, @@ -1138,7 +1159,9 @@ void netvsc_channel_cb(void *context) break; case VM_PKT_DATA_INBAND: - netvsc_send_table(device, desc); + netvsc_receive_inband(device, + net_device, + nvmsg); break; default: diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 358475ed9b59..7b36d5fecc1f 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -106,7 +106,7 @@ static int netvsc_open(struct net_device *net) return ret; } - netif_tx_start_all_queues(net); + netif_tx_wake_all_queues(net); nvdev = hv_get_drvdata(device_obj); rdev = nvdev->extension; @@ -120,15 +120,56 @@ static int netvsc_close(struct net_device *net) { struct net_device_context *net_device_ctx = netdev_priv(net); struct hv_device *device_obj = net_device_ctx->device_ctx; + struct netvsc_device *nvdev = hv_get_drvdata(device_obj); int ret; + u32 aread, awrite, i, msec = 10, retry = 0, retry_max = 20; + struct vmbus_channel *chn; netif_tx_disable(net); /* Make sure netvsc_set_multicast_list doesn't re-enable filter! */ cancel_work_sync(&net_device_ctx->work); ret = rndis_filter_close(device_obj); - if (ret != 0) + if (ret != 0) { netdev_err(net, "unable to close device (ret %d).\n", ret); + return ret; + } + + /* Ensure pending bytes in ring are read */ + while (true) { + aread = 0; + for (i = 0; i < nvdev->num_chn; i++) { + chn = nvdev->chn_table[i]; + if (!chn) + continue; + + hv_get_ringbuffer_availbytes(&chn->inbound, &aread, + &awrite); + + if (aread) + break; + + hv_get_ringbuffer_availbytes(&chn->outbound, &aread, + &awrite); + + if (aread) + break; + } + + retry++; + if (retry > retry_max || aread == 0) + break; + + msleep(msec); + + if (msec < 1000) + msec *= 2; + } + + if (aread) { + netdev_err(net, "Ring buffer not empty after closing rndis\n"); + ret = -ETIMEDOUT; + } return ret; } @@ -736,6 +777,7 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) struct netvsc_device *nvdev = hv_get_drvdata(hdev); struct netvsc_device_info device_info; int limit = ETH_DATA_LEN; + int ret = 0; if (nvdev == NULL || nvdev->destroy) return -ENODEV; @@ -743,13 +785,14 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2) limit = NETVSC_MTU - ETH_HLEN; - /* Hyper-V hosts don't support MTU < ETH_DATA_LEN (1500) */ - if (mtu < ETH_DATA_LEN || mtu > limit) + if (mtu < NETVSC_MTU_MIN || mtu > limit) return -EINVAL; + ret = netvsc_close(ndev); + if (ret) + goto out; + nvdev->start_remove = true; - cancel_work_sync(&ndevctx->work); - netif_tx_disable(ndev); rndis_filter_device_remove(hdev); ndev->mtu = mtu; @@ -759,9 +802,11 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) device_info.ring_size = ring_size; device_info.max_num_vrss_chns = max_num_vrss_chns; rndis_filter_device_add(hdev, &device_info); - netif_tx_wake_all_queues(ndev); - return 0; +out: + netvsc_open(ndev); + + return ret; } static struct rtnl_link_stats64 *netvsc_get_stats64(struct net_device *net, diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 236aeb76ef22..9b8263db49cc 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -984,9 +984,16 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc) struct netvsc_device *nvscdev; u16 chn_index = new_sc->offermsg.offer.sub_channel_index; int ret; + unsigned long flags; nvscdev = hv_get_drvdata(new_sc->primary_channel->device_obj); + spin_lock_irqsave(&nvscdev->sc_lock, flags); + nvscdev->num_sc_offered--; + spin_unlock_irqrestore(&nvscdev->sc_lock, flags); + if (nvscdev->num_sc_offered == 0) + complete(&nvscdev->channel_init_wait); + if (chn_index >= nvscdev->num_chn) return; @@ -1015,8 +1022,10 @@ int rndis_filter_device_add(struct hv_device *dev, u32 rsscap_size = sizeof(struct ndis_recv_scale_cap); u32 mtu, size; u32 num_rss_qs; + u32 sc_delta; const struct cpumask *node_cpu_mask; u32 num_possible_rss_qs; + unsigned long flags; rndis_device = get_rndis_device(); if (!rndis_device) @@ -1039,6 +1048,8 @@ int rndis_filter_device_add(struct hv_device *dev, net_device->max_chn = 1; net_device->num_chn = 1; + spin_lock_init(&net_device->sc_lock); + net_device->extension = rndis_device; rndis_device->net_dev = net_device; @@ -1054,7 +1065,7 @@ int rndis_filter_device_add(struct hv_device *dev, ret = rndis_filter_query_device(rndis_device, RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE, &mtu, &size); - if (ret == 0 && size == sizeof(u32)) + if (ret == 0 && size == sizeof(u32) && mtu < net_device->ndev->mtu) net_device->ndev->mtu = mtu; /* Get the mac address */ @@ -1116,6 +1127,9 @@ int rndis_filter_device_add(struct hv_device *dev, num_possible_rss_qs = cpumask_weight(node_cpu_mask); net_device->num_chn = min(num_possible_rss_qs, num_rss_qs); + num_rss_qs = net_device->num_chn - 1; + net_device->num_sc_offered = num_rss_qs; + if (net_device->num_chn == 1) goto out; @@ -1157,11 +1171,25 @@ int rndis_filter_device_add(struct hv_device *dev, ret = rndis_filter_set_rss_param(rndis_device, net_device->num_chn); + /* + * Wait for the host to send us the sub-channel offers. + */ + spin_lock_irqsave(&net_device->sc_lock, flags); + sc_delta = num_rss_qs - (net_device->num_chn - 1); + net_device->num_sc_offered -= sc_delta; + spin_unlock_irqrestore(&net_device->sc_lock, flags); + + while (net_device->num_sc_offered != 0) { + t = wait_for_completion_timeout(&net_device->channel_init_wait, 10*HZ); + if (t == 0) + WARN(1, "Netvsc: Waiting for sub-channel processing"); + } out: if (ret) { net_device->max_chn = 1; net_device->num_chn = 1; } + return 0; /* return 0 because primary channel can be used alone */ err_dev_remv: diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c index f7bd9f3ddaac..d0d5bf6cbb68 100644 --- a/drivers/net/ieee802154/at86rf230.c +++ b/drivers/net/ieee802154/at86rf230.c @@ -545,7 +545,9 @@ at86rf230_async_state_delay(void *context) } /* Default delay is 1us in the most cases */ - tim = ktime_set(0, NSEC_PER_USEC); + udelay(1); + at86rf230_async_state_timer(&ctx->timer); + return; change: hrtimer_start(&ctx->timer, tim, HRTIMER_MODE_REL); diff --git a/drivers/net/ieee802154/cc2520.c b/drivers/net/ieee802154/cc2520.c index b6fc29579667..613dae559925 100644 --- a/drivers/net/ieee802154/cc2520.c +++ b/drivers/net/ieee802154/cc2520.c @@ -1151,7 +1151,6 @@ MODULE_DEVICE_TABLE(of, cc2520_of_ids); static struct spi_driver cc2520_driver = { .driver = { .name = "cc2520", - .bus = &spi_bus_type, .owner = THIS_MODULE, .of_match_table = of_match_ptr(cc2520_of_ids), }, diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c index 2549760e039f..997724b8e434 100644 --- a/drivers/net/ieee802154/mrf24j40.c +++ b/drivers/net/ieee802154/mrf24j40.c @@ -812,7 +812,6 @@ MODULE_DEVICE_TABLE(spi, mrf24j40_ids); static struct spi_driver mrf24j40_driver = { .driver = { .name = "mrf24j40", - .bus = &spi_bus_type, .owner = THIS_MODULE, }, .id_table = mrf24j40_ids, diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c index 94570aace241..cc56fac3c3f8 100644 --- a/drivers/net/ifb.c +++ b/drivers/net/ifb.c @@ -38,69 +38,68 @@ #include <net/net_namespace.h> #define TX_Q_LIMIT 32 -struct ifb_private { +struct ifb_q_private { + struct net_device *dev; struct tasklet_struct ifb_tasklet; - int tasklet_pending; - - struct u64_stats_sync rsync; + int tasklet_pending; + int txqnum; struct sk_buff_head rq; - u64 rx_packets; - u64 rx_bytes; + u64 rx_packets; + u64 rx_bytes; + struct u64_stats_sync rsync; struct u64_stats_sync tsync; + u64 tx_packets; + u64 tx_bytes; struct sk_buff_head tq; - u64 tx_packets; - u64 tx_bytes; -}; +} ____cacheline_aligned_in_smp; -static int numifbs = 2; +struct ifb_dev_private { + struct ifb_q_private *tx_private; +}; -static void ri_tasklet(unsigned long dev); static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev); static int ifb_open(struct net_device *dev); static int ifb_close(struct net_device *dev); -static void ri_tasklet(unsigned long dev) +static void ifb_ri_tasklet(unsigned long _txp) { - struct net_device *_dev = (struct net_device *)dev; - struct ifb_private *dp = netdev_priv(_dev); + struct ifb_q_private *txp = (struct ifb_q_private *)_txp; struct netdev_queue *txq; struct sk_buff *skb; - txq = netdev_get_tx_queue(_dev, 0); - if ((skb = skb_peek(&dp->tq)) == NULL) { - if (__netif_tx_trylock(txq)) { - skb_queue_splice_tail_init(&dp->rq, &dp->tq); - __netif_tx_unlock(txq); - } else { - /* reschedule */ + txq = netdev_get_tx_queue(txp->dev, txp->txqnum); + skb = skb_peek(&txp->tq); + if (!skb) { + if (!__netif_tx_trylock(txq)) goto resched; - } + skb_queue_splice_tail_init(&txp->rq, &txp->tq); + __netif_tx_unlock(txq); } - while ((skb = __skb_dequeue(&dp->tq)) != NULL) { + while ((skb = __skb_dequeue(&txp->tq)) != NULL) { u32 from = G_TC_FROM(skb->tc_verd); skb->tc_verd = 0; skb->tc_verd = SET_TC_NCLS(skb->tc_verd); - u64_stats_update_begin(&dp->tsync); - dp->tx_packets++; - dp->tx_bytes += skb->len; - u64_stats_update_end(&dp->tsync); + u64_stats_update_begin(&txp->tsync); + txp->tx_packets++; + txp->tx_bytes += skb->len; + u64_stats_update_end(&txp->tsync); rcu_read_lock(); - skb->dev = dev_get_by_index_rcu(dev_net(_dev), skb->skb_iif); + skb->dev = dev_get_by_index_rcu(dev_net(txp->dev), skb->skb_iif); if (!skb->dev) { rcu_read_unlock(); dev_kfree_skb(skb); - _dev->stats.tx_dropped++; - if (skb_queue_len(&dp->tq) != 0) + txp->dev->stats.tx_dropped++; + if (skb_queue_len(&txp->tq) != 0) goto resched; break; } rcu_read_unlock(); - skb->skb_iif = _dev->ifindex; + skb->skb_iif = txp->dev->ifindex; if (from & AT_EGRESS) { dev_queue_xmit(skb); @@ -112,10 +111,11 @@ static void ri_tasklet(unsigned long dev) } if (__netif_tx_trylock(txq)) { - if ((skb = skb_peek(&dp->rq)) == NULL) { - dp->tasklet_pending = 0; - if (netif_queue_stopped(_dev)) - netif_wake_queue(_dev); + skb = skb_peek(&txp->rq); + if (!skb) { + txp->tasklet_pending = 0; + if (netif_tx_queue_stopped(txq)) + netif_tx_wake_queue(txq); } else { __netif_tx_unlock(txq); goto resched; @@ -123,8 +123,8 @@ static void ri_tasklet(unsigned long dev) __netif_tx_unlock(txq); } else { resched: - dp->tasklet_pending = 1; - tasklet_schedule(&dp->ifb_tasklet); + txp->tasklet_pending = 1; + tasklet_schedule(&txp->ifb_tasklet); } } @@ -132,29 +132,58 @@ resched: static struct rtnl_link_stats64 *ifb_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { - struct ifb_private *dp = netdev_priv(dev); + struct ifb_dev_private *dp = netdev_priv(dev); + struct ifb_q_private *txp = dp->tx_private; unsigned int start; - - do { - start = u64_stats_fetch_begin_irq(&dp->rsync); - stats->rx_packets = dp->rx_packets; - stats->rx_bytes = dp->rx_bytes; - } while (u64_stats_fetch_retry_irq(&dp->rsync, start)); - - do { - start = u64_stats_fetch_begin_irq(&dp->tsync); - - stats->tx_packets = dp->tx_packets; - stats->tx_bytes = dp->tx_bytes; - - } while (u64_stats_fetch_retry_irq(&dp->tsync, start)); - + u64 packets, bytes; + int i; + + for (i = 0; i < dev->num_tx_queues; i++,txp++) { + do { + start = u64_stats_fetch_begin_irq(&txp->rsync); + packets = txp->rx_packets; + bytes = txp->rx_bytes; + } while (u64_stats_fetch_retry_irq(&txp->rsync, start)); + stats->rx_packets += packets; + stats->rx_bytes += bytes; + + do { + start = u64_stats_fetch_begin_irq(&txp->tsync); + packets = txp->tx_packets; + bytes = txp->tx_bytes; + } while (u64_stats_fetch_retry_irq(&txp->tsync, start)); + stats->tx_packets += packets; + stats->tx_bytes += bytes; + } stats->rx_dropped = dev->stats.rx_dropped; stats->tx_dropped = dev->stats.tx_dropped; return stats; } +static int ifb_dev_init(struct net_device *dev) +{ + struct ifb_dev_private *dp = netdev_priv(dev); + struct ifb_q_private *txp; + int i; + + txp = kcalloc(dev->num_tx_queues, sizeof(*txp), GFP_KERNEL); + if (!txp) + return -ENOMEM; + dp->tx_private = txp; + for (i = 0; i < dev->num_tx_queues; i++,txp++) { + txp->txqnum = i; + txp->dev = dev; + __skb_queue_head_init(&txp->rq); + __skb_queue_head_init(&txp->tq); + u64_stats_init(&txp->rsync); + u64_stats_init(&txp->tsync); + tasklet_init(&txp->ifb_tasklet, ifb_ri_tasklet, + (unsigned long)txp); + netif_tx_start_queue(netdev_get_tx_queue(dev, i)); + } + return 0; +} static const struct net_device_ops ifb_netdev_ops = { .ndo_open = ifb_open, @@ -162,6 +191,7 @@ static const struct net_device_ops ifb_netdev_ops = { .ndo_get_stats64 = ifb_stats64, .ndo_start_xmit = ifb_xmit, .ndo_validate_addr = eth_validate_addr, + .ndo_init = ifb_dev_init, }; #define IFB_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST | \ @@ -169,10 +199,24 @@ static const struct net_device_ops ifb_netdev_ops = { NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX | \ NETIF_F_HW_VLAN_STAG_TX) +static void ifb_dev_free(struct net_device *dev) +{ + struct ifb_dev_private *dp = netdev_priv(dev); + struct ifb_q_private *txp = dp->tx_private; + int i; + + for (i = 0; i < dev->num_tx_queues; i++,txp++) { + tasklet_kill(&txp->ifb_tasklet); + __skb_queue_purge(&txp->rq); + __skb_queue_purge(&txp->tq); + } + kfree(dp->tx_private); + free_netdev(dev); +} + static void ifb_setup(struct net_device *dev) { /* Initialize the device structure. */ - dev->destructor = free_netdev; dev->netdev_ops = &ifb_netdev_ops; /* Fill in device structure with ethernet-generic values. */ @@ -188,17 +232,19 @@ static void ifb_setup(struct net_device *dev) dev->priv_flags &= ~IFF_TX_SKB_SHARING; netif_keep_dst(dev); eth_hw_addr_random(dev); + dev->destructor = ifb_dev_free; } static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev) { - struct ifb_private *dp = netdev_priv(dev); + struct ifb_dev_private *dp = netdev_priv(dev); u32 from = G_TC_FROM(skb->tc_verd); + struct ifb_q_private *txp = dp->tx_private + skb_get_queue_mapping(skb); - u64_stats_update_begin(&dp->rsync); - dp->rx_packets++; - dp->rx_bytes += skb->len; - u64_stats_update_end(&dp->rsync); + u64_stats_update_begin(&txp->rsync); + txp->rx_packets++; + txp->rx_bytes += skb->len; + u64_stats_update_end(&txp->rsync); if (!(from & (AT_INGRESS|AT_EGRESS)) || !skb->skb_iif) { dev_kfree_skb(skb); @@ -206,14 +252,13 @@ static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } - if (skb_queue_len(&dp->rq) >= dev->tx_queue_len) { - netif_stop_queue(dev); - } + if (skb_queue_len(&txp->rq) >= dev->tx_queue_len) + netif_tx_stop_queue(netdev_get_tx_queue(dev, txp->txqnum)); - __skb_queue_tail(&dp->rq, skb); - if (!dp->tasklet_pending) { - dp->tasklet_pending = 1; - tasklet_schedule(&dp->ifb_tasklet); + __skb_queue_tail(&txp->rq, skb); + if (!txp->tasklet_pending) { + txp->tasklet_pending = 1; + tasklet_schedule(&txp->ifb_tasklet); } return NETDEV_TX_OK; @@ -221,24 +266,13 @@ static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev) static int ifb_close(struct net_device *dev) { - struct ifb_private *dp = netdev_priv(dev); - - tasklet_kill(&dp->ifb_tasklet); - netif_stop_queue(dev); - __skb_queue_purge(&dp->rq); - __skb_queue_purge(&dp->tq); + netif_tx_stop_all_queues(dev); return 0; } static int ifb_open(struct net_device *dev) { - struct ifb_private *dp = netdev_priv(dev); - - tasklet_init(&dp->ifb_tasklet, ri_tasklet, (unsigned long)dev); - __skb_queue_head_init(&dp->rq); - __skb_queue_head_init(&dp->tq); - netif_start_queue(dev); - + netif_tx_start_all_queues(dev); return 0; } @@ -255,31 +289,30 @@ static int ifb_validate(struct nlattr *tb[], struct nlattr *data[]) static struct rtnl_link_ops ifb_link_ops __read_mostly = { .kind = "ifb", - .priv_size = sizeof(struct ifb_private), + .priv_size = sizeof(struct ifb_dev_private), .setup = ifb_setup, .validate = ifb_validate, }; -/* Number of ifb devices to be set up by this module. */ +/* Number of ifb devices to be set up by this module. + * Note that these legacy devices have one queue. + * Prefer something like : ip link add ifb10 numtxqueues 8 type ifb + */ +static int numifbs = 2; module_param(numifbs, int, 0); MODULE_PARM_DESC(numifbs, "Number of ifb devices"); static int __init ifb_init_one(int index) { struct net_device *dev_ifb; - struct ifb_private *dp; int err; - dev_ifb = alloc_netdev(sizeof(struct ifb_private), "ifb%d", + dev_ifb = alloc_netdev(sizeof(struct ifb_dev_private), "ifb%d", NET_NAME_UNKNOWN, ifb_setup); if (!dev_ifb) return -ENOMEM; - dp = netdev_priv(dev_ifb); - u64_stats_init(&dp->rsync); - u64_stats_init(&dp->tsync); - dev_ifb->rtnl_link_ops = &ifb_link_ops; err = register_netdevice(dev_ifb); if (err < 0) diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index cb86d7a01542..c07030dbe748 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -14,6 +14,11 @@ if PHYLIB comment "MII PHY device drivers" +config AQUANTIA_PHY + tristate "Drivers for the Aquantia PHYs" + ---help--- + Currently supports the Aquantia AQ1202, AQ2104, AQR105, AQR405 + config AT803X_PHY tristate "Drivers for Atheros AT803X PHYs" ---help--- @@ -54,6 +59,11 @@ config VITESSE_PHY ---help--- Currently supports the vsc8244 +config TERANETICS_PHY + tristate "Drivers for the Teranetics PHYs" + ---help--- + Currently supports the Teranetics TN2020 + config SMSC_PHY tristate "Drivers for SMSC PHYs" ---help--- @@ -145,13 +155,13 @@ config MDIO_GPIO will be called mdio-gpio. config MDIO_OCTEON - tristate "Support for MDIO buses on Octeon SOCs" - depends on CAVIUM_OCTEON_SOC - default y + tristate "Support for MDIO buses on Octeon and ThunderX SOCs" + depends on 64BIT help - This module provides a driver for the Octeon MDIO busses. - It is required by the Octeon Ethernet device drivers. + This module provides a driver for the Octeon and ThunderX MDIO + busses. It is required by the Octeon and ThunderX ethernet device + drivers. If in doubt, say Y. diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index fcc25a0c45cd..9bb103358c74 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -3,12 +3,14 @@ libphy-objs := phy.o phy_device.o mdio_bus.o obj-$(CONFIG_PHYLIB) += libphy.o +obj-$(CONFIG_AQUANTIA_PHY) += aquantia.o obj-$(CONFIG_MARVELL_PHY) += marvell.o obj-$(CONFIG_DAVICOM_PHY) += davicom.o obj-$(CONFIG_CICADA_PHY) += cicada.o obj-$(CONFIG_LXT_PHY) += lxt.o obj-$(CONFIG_QSEMI_PHY) += qsemi.o obj-$(CONFIG_SMSC_PHY) += smsc.o +obj-$(CONFIG_TERANETICS_PHY) += teranetics.o obj-$(CONFIG_VITESSE_PHY) += vitesse.o obj-$(CONFIG_BROADCOM_PHY) += broadcom.o obj-$(CONFIG_BCM63XX_PHY) += bcm63xx.o diff --git a/drivers/net/phy/aquantia.c b/drivers/net/phy/aquantia.c new file mode 100644 index 000000000000..73d347d7cb04 --- /dev/null +++ b/drivers/net/phy/aquantia.c @@ -0,0 +1,152 @@ +/* + * Driver for Aquantia PHY + * + * Author: Shaohui Xie <Shaohui.Xie@freescale.com> + * + * Copyright 2015 Freescale Semiconductor, Inc. + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/mii.h> +#include <linux/ethtool.h> +#include <linux/phy.h> +#include <linux/mdio.h> + +#define PHY_ID_AQ1202 0x03a1b445 +#define PHY_ID_AQ2104 0x03a1b460 +#define PHY_ID_AQR105 0x03a1b4a2 +#define PHY_ID_AQR405 0x03a1b4b0 + +#define PHY_AQUANTIA_FEATURES (SUPPORTED_10000baseT_Full | \ + SUPPORTED_1000baseT_Full | \ + SUPPORTED_100baseT_Full | \ + PHY_DEFAULT_FEATURES) + +static int aquantia_config_aneg(struct phy_device *phydev) +{ + phydev->supported = PHY_AQUANTIA_FEATURES; + phydev->advertising = phydev->supported; + + return 0; +} + +static int aquantia_aneg_done(struct phy_device *phydev) +{ + int reg; + + reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1); + return (reg < 0) ? reg : (reg & BMSR_ANEGCOMPLETE); +} + +static int aquantia_read_status(struct phy_device *phydev) +{ + int reg; + + reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1); + reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1); + if (reg & MDIO_STAT1_LSTATUS) + phydev->link = 1; + else + phydev->link = 0; + + reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xc800); + mdelay(10); + reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xc800); + + switch (reg) { + case 0x9: + phydev->speed = SPEED_2500; + break; + case 0x5: + phydev->speed = SPEED_1000; + break; + case 0x3: + phydev->speed = SPEED_100; + break; + case 0x7: + default: + phydev->speed = SPEED_10000; + break; + } + phydev->duplex = DUPLEX_FULL; + + return 0; +} + +static struct phy_driver aquantia_driver[] = { +{ + .phy_id = PHY_ID_AQ1202, + .phy_id_mask = 0xfffffff0, + .name = "Aquantia AQ1202", + .features = PHY_AQUANTIA_FEATURES, + .aneg_done = aquantia_aneg_done, + .config_aneg = aquantia_config_aneg, + .read_status = aquantia_read_status, + .driver = { .owner = THIS_MODULE,}, +}, +{ + .phy_id = PHY_ID_AQ2104, + .phy_id_mask = 0xfffffff0, + .name = "Aquantia AQ2104", + .features = PHY_AQUANTIA_FEATURES, + .aneg_done = aquantia_aneg_done, + .config_aneg = aquantia_config_aneg, + .read_status = aquantia_read_status, + .driver = { .owner = THIS_MODULE,}, +}, +{ + .phy_id = PHY_ID_AQR105, + .phy_id_mask = 0xfffffff0, + .name = "Aquantia AQR105", + .features = PHY_AQUANTIA_FEATURES, + .aneg_done = aquantia_aneg_done, + .config_aneg = aquantia_config_aneg, + .read_status = aquantia_read_status, + .driver = { .owner = THIS_MODULE,}, +}, +{ + .phy_id = PHY_ID_AQR405, + .phy_id_mask = 0xfffffff0, + .name = "Aquantia AQR405", + .features = PHY_AQUANTIA_FEATURES, + .aneg_done = aquantia_aneg_done, + .config_aneg = aquantia_config_aneg, + .read_status = aquantia_read_status, + .driver = { .owner = THIS_MODULE,}, +}, +}; + +static int __init aquantia_init(void) +{ + return phy_drivers_register(aquantia_driver, + ARRAY_SIZE(aquantia_driver)); +} + +static void __exit aquantia_exit(void) +{ + return phy_drivers_unregister(aquantia_driver, + ARRAY_SIZE(aquantia_driver)); +} + +module_init(aquantia_init); +module_exit(aquantia_exit); + +static struct mdio_device_id __maybe_unused aquantia_tbl[] = { + { PHY_ID_AQ1202, 0xfffffff0 }, + { PHY_ID_AQ2104, 0xfffffff0 }, + { PHY_ID_AQR105, 0xfffffff0 }, + { PHY_ID_AQR405, 0xfffffff0 }, + { } +}; + +MODULE_DEVICE_TABLE(mdio, aquantia_tbl); + +MODULE_DESCRIPTION("Aquantia PHY driver"); +MODULE_AUTHOR("Shaohui Xie <Shaohui.Xie@freescale.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index 00cb41e71312..185b03c08e16 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -1449,17 +1449,9 @@ static int dp83640_ts_info(struct phy_device *dev, struct ethtool_ts_info *info) info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ); + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); return 0; } diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index 8a3bf5469892..32f10662f4ac 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -123,12 +123,8 @@ static int dp83867_of_init(struct phy_device *phydev) if (ret) return ret; - ret = of_property_read_u32(of_node, "ti,fifo-depth", + return of_property_read_u32(of_node, "ti,fifo-depth", &dp83867->fifo_depth); - if (ret) - return ret; - - return 0; } #else static int dp83867_of_init(struct phy_device *phydev) diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c index 1960b46add65..479b93f9581c 100644 --- a/drivers/net/phy/fixed_phy.c +++ b/drivers/net/phy/fixed_phy.c @@ -52,6 +52,10 @@ static int fixed_phy_update_regs(struct fixed_phy *fp) u16 lpagb = 0; u16 lpa = 0; + if (!fp->status.link) + goto done; + bmsr |= BMSR_LSTATUS | BMSR_ANEGCOMPLETE; + if (fp->status.duplex) { bmcr |= BMCR_FULLDPLX; @@ -96,15 +100,13 @@ static int fixed_phy_update_regs(struct fixed_phy *fp) } } - if (fp->status.link) - bmsr |= BMSR_LSTATUS | BMSR_ANEGCOMPLETE; - if (fp->status.pause) lpa |= LPA_PAUSE_CAP; if (fp->status.asym_pause) lpa |= LPA_PAUSE_ASYM; +done: fp->regs[MII_PHYSID1] = 0; fp->regs[MII_PHYSID2] = 0; diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index f721444c2b0a..3320a179ee36 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -48,6 +48,8 @@ #define MII_M1011_IMASK_CLEAR 0x0000 #define MII_M1011_PHY_SCR 0x10 +#define MII_M1011_PHY_SCR_MDI 0x0000 +#define MII_M1011_PHY_SCR_MDI_X 0x0020 #define MII_M1011_PHY_SCR_AUTO_CROSS 0x0060 #define MII_M1145_PHY_EXT_SR 0x1b @@ -159,6 +161,43 @@ static int marvell_config_intr(struct phy_device *phydev) return err; } +static int marvell_set_polarity(struct phy_device *phydev, int polarity) +{ + int reg; + int err; + int val; + + /* get the current settings */ + reg = phy_read(phydev, MII_M1011_PHY_SCR); + if (reg < 0) + return reg; + + val = reg; + val &= ~MII_M1011_PHY_SCR_AUTO_CROSS; + switch (polarity) { + case ETH_TP_MDI: + val |= MII_M1011_PHY_SCR_MDI; + break; + case ETH_TP_MDI_X: + val |= MII_M1011_PHY_SCR_MDI_X; + break; + case ETH_TP_MDI_AUTO: + case ETH_TP_MDI_INVALID: + default: + val |= MII_M1011_PHY_SCR_AUTO_CROSS; + break; + } + + if (val != reg) { + /* Set the new polarity value in the register */ + err = phy_write(phydev, MII_M1011_PHY_SCR, val); + if (err) + return err; + } + + return 0; +} + static int marvell_config_aneg(struct phy_device *phydev) { int err; @@ -191,8 +230,7 @@ static int marvell_config_aneg(struct phy_device *phydev) if (err < 0) return err; - err = phy_write(phydev, MII_M1011_PHY_SCR, - MII_M1011_PHY_SCR_AUTO_CROSS); + err = marvell_set_polarity(phydev, phydev->mdix); if (err < 0) return err; diff --git a/drivers/net/phy/mdio-octeon.c b/drivers/net/phy/mdio-octeon.c index c838ad6155f7..fcf4e4df7cc8 100644 --- a/drivers/net/phy/mdio-octeon.c +++ b/drivers/net/phy/mdio-octeon.c @@ -7,6 +7,7 @@ */ #include <linux/platform_device.h> +#include <linux/of_address.h> #include <linux/of_mdio.h> #include <linux/delay.h> #include <linux/module.h> @@ -14,11 +15,12 @@ #include <linux/phy.h> #include <linux/io.h> +#ifdef CONFIG_CAVIUM_OCTEON_SOC #include <asm/octeon/octeon.h> -#include <asm/octeon/cvmx-smix-defs.h> +#endif -#define DRV_VERSION "1.0" -#define DRV_DESCRIPTION "Cavium Networks Octeon SMI/MDIO driver" +#define DRV_VERSION "1.1" +#define DRV_DESCRIPTION "Cavium Networks Octeon/ThunderX SMI/MDIO driver" #define SMI_CMD 0x0 #define SMI_WR_DAT 0x8 @@ -26,6 +28,79 @@ #define SMI_CLK 0x18 #define SMI_EN 0x20 +#ifdef __BIG_ENDIAN_BITFIELD +#define OCT_MDIO_BITFIELD_FIELD(field, more) \ + field; \ + more + +#else +#define OCT_MDIO_BITFIELD_FIELD(field, more) \ + more \ + field; + +#endif + +union cvmx_smix_clk { + u64 u64; + struct cvmx_smix_clk_s { + OCT_MDIO_BITFIELD_FIELD(u64 reserved_25_63:39, + OCT_MDIO_BITFIELD_FIELD(u64 mode:1, + OCT_MDIO_BITFIELD_FIELD(u64 reserved_21_23:3, + OCT_MDIO_BITFIELD_FIELD(u64 sample_hi:5, + OCT_MDIO_BITFIELD_FIELD(u64 sample_mode:1, + OCT_MDIO_BITFIELD_FIELD(u64 reserved_14_14:1, + OCT_MDIO_BITFIELD_FIELD(u64 clk_idle:1, + OCT_MDIO_BITFIELD_FIELD(u64 preamble:1, + OCT_MDIO_BITFIELD_FIELD(u64 sample:4, + OCT_MDIO_BITFIELD_FIELD(u64 phase:8, + ;)))))))))) + } s; +}; + +union cvmx_smix_cmd { + u64 u64; + struct cvmx_smix_cmd_s { + OCT_MDIO_BITFIELD_FIELD(u64 reserved_18_63:46, + OCT_MDIO_BITFIELD_FIELD(u64 phy_op:2, + OCT_MDIO_BITFIELD_FIELD(u64 reserved_13_15:3, + OCT_MDIO_BITFIELD_FIELD(u64 phy_adr:5, + OCT_MDIO_BITFIELD_FIELD(u64 reserved_5_7:3, + OCT_MDIO_BITFIELD_FIELD(u64 reg_adr:5, + ;)))))) + } s; +}; + +union cvmx_smix_en { + u64 u64; + struct cvmx_smix_en_s { + OCT_MDIO_BITFIELD_FIELD(u64 reserved_1_63:63, + OCT_MDIO_BITFIELD_FIELD(u64 en:1, + ;)) + } s; +}; + +union cvmx_smix_rd_dat { + u64 u64; + struct cvmx_smix_rd_dat_s { + OCT_MDIO_BITFIELD_FIELD(u64 reserved_18_63:46, + OCT_MDIO_BITFIELD_FIELD(u64 pending:1, + OCT_MDIO_BITFIELD_FIELD(u64 val:1, + OCT_MDIO_BITFIELD_FIELD(u64 dat:16, + ;)))) + } s; +}; + +union cvmx_smix_wr_dat { + u64 u64; + struct cvmx_smix_wr_dat_s { + OCT_MDIO_BITFIELD_FIELD(u64 reserved_18_63:46, + OCT_MDIO_BITFIELD_FIELD(u64 pending:1, + OCT_MDIO_BITFIELD_FIELD(u64 val:1, + OCT_MDIO_BITFIELD_FIELD(u64 dat:16, + ;)))) + } s; +}; + enum octeon_mdiobus_mode { UNINIT = 0, C22, @@ -41,6 +116,21 @@ struct octeon_mdiobus { int phy_irq[PHY_MAX_ADDR]; }; +#ifdef CONFIG_CAVIUM_OCTEON_SOC +static void oct_mdio_writeq(u64 val, u64 addr) +{ + cvmx_write_csr(addr, val); +} + +static u64 oct_mdio_readq(u64 addr) +{ + return cvmx_read_csr(addr); +} +#else +#define oct_mdio_writeq(val, addr) writeq_relaxed(val, (void *)addr) +#define oct_mdio_readq(addr) readq_relaxed((void *)addr) +#endif + static void octeon_mdiobus_set_mode(struct octeon_mdiobus *p, enum octeon_mdiobus_mode m) { @@ -49,10 +139,10 @@ static void octeon_mdiobus_set_mode(struct octeon_mdiobus *p, if (m == p->mode) return; - smi_clk.u64 = cvmx_read_csr(p->register_base + SMI_CLK); + smi_clk.u64 = oct_mdio_readq(p->register_base + SMI_CLK); smi_clk.s.mode = (m == C45) ? 1 : 0; smi_clk.s.preamble = 1; - cvmx_write_csr(p->register_base + SMI_CLK, smi_clk.u64); + oct_mdio_writeq(smi_clk.u64, p->register_base + SMI_CLK); p->mode = m; } @@ -67,7 +157,7 @@ static int octeon_mdiobus_c45_addr(struct octeon_mdiobus *p, smi_wr.u64 = 0; smi_wr.s.dat = regnum & 0xffff; - cvmx_write_csr(p->register_base + SMI_WR_DAT, smi_wr.u64); + oct_mdio_writeq(smi_wr.u64, p->register_base + SMI_WR_DAT); regnum = (regnum >> 16) & 0x1f; @@ -75,14 +165,14 @@ static int octeon_mdiobus_c45_addr(struct octeon_mdiobus *p, smi_cmd.s.phy_op = 0; /* MDIO_CLAUSE_45_ADDRESS */ smi_cmd.s.phy_adr = phy_id; smi_cmd.s.reg_adr = regnum; - cvmx_write_csr(p->register_base + SMI_CMD, smi_cmd.u64); + oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD); do { /* Wait 1000 clocks so we don't saturate the RSL bus * doing reads. */ __delay(1000); - smi_wr.u64 = cvmx_read_csr(p->register_base + SMI_WR_DAT); + smi_wr.u64 = oct_mdio_readq(p->register_base + SMI_WR_DAT); } while (smi_wr.s.pending && --timeout); if (timeout <= 0) @@ -114,14 +204,14 @@ static int octeon_mdiobus_read(struct mii_bus *bus, int phy_id, int regnum) smi_cmd.s.phy_op = op; smi_cmd.s.phy_adr = phy_id; smi_cmd.s.reg_adr = regnum; - cvmx_write_csr(p->register_base + SMI_CMD, smi_cmd.u64); + oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD); do { /* Wait 1000 clocks so we don't saturate the RSL bus * doing reads. */ __delay(1000); - smi_rd.u64 = cvmx_read_csr(p->register_base + SMI_RD_DAT); + smi_rd.u64 = oct_mdio_readq(p->register_base + SMI_RD_DAT); } while (smi_rd.s.pending && --timeout); if (smi_rd.s.val) @@ -153,20 +243,20 @@ static int octeon_mdiobus_write(struct mii_bus *bus, int phy_id, smi_wr.u64 = 0; smi_wr.s.dat = val; - cvmx_write_csr(p->register_base + SMI_WR_DAT, smi_wr.u64); + oct_mdio_writeq(smi_wr.u64, p->register_base + SMI_WR_DAT); smi_cmd.u64 = 0; smi_cmd.s.phy_op = op; smi_cmd.s.phy_adr = phy_id; smi_cmd.s.reg_adr = regnum; - cvmx_write_csr(p->register_base + SMI_CMD, smi_cmd.u64); + oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD); do { /* Wait 1000 clocks so we don't saturate the RSL bus * doing reads. */ __delay(1000); - smi_wr.u64 = cvmx_read_csr(p->register_base + SMI_WR_DAT); + smi_wr.u64 = oct_mdio_readq(p->register_base + SMI_WR_DAT); } while (smi_wr.s.pending && --timeout); if (timeout <= 0) @@ -187,30 +277,34 @@ static int octeon_mdiobus_probe(struct platform_device *pdev) return -ENOMEM; res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (res_mem == NULL) { dev_err(&pdev->dev, "found no memory resource\n"); - err = -ENXIO; - goto fail; + return -ENXIO; } + bus->mdio_phys = res_mem->start; bus->regsize = resource_size(res_mem); + if (!devm_request_mem_region(&pdev->dev, bus->mdio_phys, bus->regsize, res_mem->name)) { dev_err(&pdev->dev, "request_mem_region failed\n"); - goto fail; + return -ENXIO; } + bus->register_base = (u64)devm_ioremap(&pdev->dev, bus->mdio_phys, bus->regsize); + if (!bus->register_base) { + dev_err(&pdev->dev, "dev_ioremap failed\n"); + return -ENOMEM; + } bus->mii_bus = mdiobus_alloc(); - if (!bus->mii_bus) goto fail; smi_en.u64 = 0; smi_en.s.en = 1; - cvmx_write_csr(bus->register_base + SMI_EN, smi_en.u64); + oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN); bus->mii_bus->priv = bus; bus->mii_bus->irq = bus->phy_irq; @@ -234,7 +328,7 @@ fail_register: mdiobus_free(bus->mii_bus); fail: smi_en.u64 = 0; - cvmx_write_csr(bus->register_base + SMI_EN, smi_en.u64); + oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN); return err; } @@ -248,7 +342,7 @@ static int octeon_mdiobus_remove(struct platform_device *pdev) mdiobus_unregister(bus->mii_bus); mdiobus_free(bus->mii_bus); smi_en.u64 = 0; - cvmx_write_csr(bus->register_base + SMI_EN, smi_en.u64); + oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN); return 0; } diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index b2197b506acb..84b1fba58ac3 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -353,6 +353,8 @@ int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd) phydev->duplex = cmd->duplex; + phydev->mdix = cmd->eth_tp_mdix_ctrl; + /* Restart the PHY */ phy_start_aneg(phydev); @@ -377,6 +379,7 @@ int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd) cmd->transceiver = phy_is_internal(phydev) ? XCVR_INTERNAL : XCVR_EXTERNAL; cmd->autoneg = phydev->autoneg; + cmd->eth_tp_mdix_ctrl = phydev->mdix; return 0; } diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c index 46530159256b..f091d691cf6f 100644 --- a/drivers/net/phy/spi_ks8995.c +++ b/drivers/net/phy/spi_ks8995.c @@ -209,8 +209,6 @@ static int ks8995_reset(struct ks8995_switch *ks) return ks8995_start(ks); } -/* ------------------------------------------------------------------------ */ - static ssize_t ks8995_registers_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { @@ -220,19 +218,9 @@ static ssize_t ks8995_registers_read(struct file *filp, struct kobject *kobj, dev = container_of(kobj, struct device, kobj); ks8995 = dev_get_drvdata(dev); - if (unlikely(off > ks8995->regs_attr.size)) - return 0; - - if ((off + count) > ks8995->regs_attr.size) - count = ks8995->regs_attr.size - off; - - if (unlikely(!count)) - return count; - return ks8995_read(ks8995, buf, off, count); } - static ssize_t ks8995_registers_write(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { @@ -242,19 +230,9 @@ static ssize_t ks8995_registers_write(struct file *filp, struct kobject *kobj, dev = container_of(kobj, struct device, kobj); ks8995 = dev_get_drvdata(dev); - if (unlikely(off >= ks8995->regs_attr.size)) - return -EFBIG; - - if ((off + count) > ks8995->regs_attr.size) - count = ks8995->regs_attr.size - off; - - if (unlikely(!count)) - return count; - return ks8995_write(ks8995, buf, off, count); } - static const struct bin_attribute ks8995_registers_attr = { .attr = { .name = "registers", diff --git a/drivers/net/phy/teranetics.c b/drivers/net/phy/teranetics.c new file mode 100644 index 000000000000..91e1bec6079f --- /dev/null +++ b/drivers/net/phy/teranetics.c @@ -0,0 +1,135 @@ +/* + * Driver for Teranetics PHY + * + * Author: Shaohui Xie <Shaohui.Xie@freescale.com> + * + * Copyright 2015 Freescale Semiconductor, Inc. + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mii.h> +#include <linux/ethtool.h> +#include <linux/mdio.h> +#include <linux/phy.h> + +MODULE_DESCRIPTION("Teranetics PHY driver"); +MODULE_AUTHOR("Shaohui Xie <Shaohui.Xie@freescale.com>"); +MODULE_LICENSE("GPL v2"); + +#define PHY_ID_TN2020 0x00a19410 +#define MDIO_PHYXS_LNSTAT_SYNC0 0x0001 +#define MDIO_PHYXS_LNSTAT_SYNC1 0x0002 +#define MDIO_PHYXS_LNSTAT_SYNC2 0x0004 +#define MDIO_PHYXS_LNSTAT_SYNC3 0x0008 +#define MDIO_PHYXS_LNSTAT_ALIGN 0x1000 + +#define MDIO_PHYXS_LANE_READY (MDIO_PHYXS_LNSTAT_SYNC0 | \ + MDIO_PHYXS_LNSTAT_SYNC1 | \ + MDIO_PHYXS_LNSTAT_SYNC2 | \ + MDIO_PHYXS_LNSTAT_SYNC3 | \ + MDIO_PHYXS_LNSTAT_ALIGN) + +static int teranetics_config_init(struct phy_device *phydev) +{ + phydev->supported = SUPPORTED_10000baseT_Full; + phydev->advertising = SUPPORTED_10000baseT_Full; + + return 0; +} + +static int teranetics_soft_reset(struct phy_device *phydev) +{ + return 0; +} + +static int teranetics_aneg_done(struct phy_device *phydev) +{ + int reg; + + /* auto negotiation state can only be checked when using copper + * port, if using fiber port, just lie it's done. + */ + if (!phy_read_mmd(phydev, MDIO_MMD_VEND1, 93)) { + reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1); + return (reg < 0) ? reg : (reg & BMSR_ANEGCOMPLETE); + } + + return 1; +} + +static int teranetics_config_aneg(struct phy_device *phydev) +{ + return 0; +} + +static int teranetics_read_status(struct phy_device *phydev) +{ + int reg; + + phydev->link = 1; + + phydev->speed = SPEED_10000; + phydev->duplex = DUPLEX_FULL; + + if (!phy_read_mmd(phydev, MDIO_MMD_VEND1, 93)) { + reg = phy_read_mmd(phydev, MDIO_MMD_PHYXS, MDIO_PHYXS_LNSTAT); + if (reg < 0 || + !((reg & MDIO_PHYXS_LANE_READY) == MDIO_PHYXS_LANE_READY)) { + phydev->link = 0; + return 0; + } + + reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1); + if (reg < 0 || !(reg & MDIO_STAT1_LSTATUS)) + phydev->link = 0; + } + + return 0; +} + +static int teranetics_match_phy_device(struct phy_device *phydev) +{ + return phydev->c45_ids.device_ids[3] == PHY_ID_TN2020; +} + +static struct phy_driver teranetics_driver[] = { +{ + .phy_id = PHY_ID_TN2020, + .phy_id_mask = 0xffffffff, + .name = "Teranetics TN2020", + .soft_reset = teranetics_soft_reset, + .aneg_done = teranetics_aneg_done, + .config_init = teranetics_config_init, + .config_aneg = teranetics_config_aneg, + .read_status = teranetics_read_status, + .match_phy_device = teranetics_match_phy_device, + .driver = { .owner = THIS_MODULE,}, +}, +}; + +static int __init teranetics_init(void) +{ + return phy_drivers_register(teranetics_driver, + ARRAY_SIZE(teranetics_driver)); +} + +static void __exit teranetics_exit(void) +{ + return phy_drivers_unregister(teranetics_driver, + ARRAY_SIZE(teranetics_driver)); +} + +module_init(teranetics_init); +module_exit(teranetics_exit); + +static struct mdio_device_id __maybe_unused teranetics_tbl[] = { + { PHY_ID_TN2020, 0xffffffff }, + { } +}; + +MODULE_DEVICE_TABLE(mdio, teranetics_tbl); diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig index 7ba8d0885f12..1610b79ae386 100644 --- a/drivers/net/usb/Kconfig +++ b/drivers/net/usb/Kconfig @@ -106,6 +106,16 @@ config USB_RTL8152 To compile this driver as a module, choose M here: the module will be called r8152. +config USB_LAN78XX + tristate "Microchip LAN78XX Based USB Ethernet Adapters" + select MII + help + This option adds support for Microchip LAN78XX based USB 2 + & USB 3 10/100/1000 Ethernet adapters. + + To compile this driver as a module, choose M here: the + module will be called lan78xx. + config USB_USBNET tristate "Multi-purpose USB Networking Framework" select MII diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile index e2797f1e1b31..cf6a0e610a7f 100644 --- a/drivers/net/usb/Makefile +++ b/drivers/net/usb/Makefile @@ -8,6 +8,7 @@ obj-$(CONFIG_USB_PEGASUS) += pegasus.o obj-$(CONFIG_USB_RTL8150) += rtl8150.o obj-$(CONFIG_USB_RTL8152) += r8152.o obj-$(CONFIG_USB_HSO) += hso.o +obj-$(CONFIG_USB_LAN78XX) += lan78xx.o obj-$(CONFIG_USB_NET_AX8817X) += asix.o asix-y := asix_devices.o asix_common.o ax88172a.o obj-$(CONFIG_USB_NET_AX88179_178A) += ax88179_178a.o diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c new file mode 100644 index 000000000000..ec8bd34ce47b --- /dev/null +++ b/drivers/net/usb/lan78xx.c @@ -0,0 +1,3530 @@ +/* + * Copyright (C) 2015 Microchip Technology + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see <http://www.gnu.org/licenses/>. + */ +#include <linux/version.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/ethtool.h> +#include <linux/mii.h> +#include <linux/usb.h> +#include <linux/crc32.h> +#include <linux/signal.h> +#include <linux/slab.h> +#include <linux/if_vlan.h> +#include <linux/uaccess.h> +#include <linux/list.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <linux/mdio.h> +#include <net/ip6_checksum.h> +#include "lan78xx.h" + +#define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>" +#define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices" +#define DRIVER_NAME "lan78xx" +#define DRIVER_VERSION "1.0.0" + +#define TX_TIMEOUT_JIFFIES (5 * HZ) +#define THROTTLE_JIFFIES (HZ / 8) +#define UNLINK_TIMEOUT_MS 3 + +#define RX_MAX_QUEUE_MEMORY (60 * 1518) + +#define SS_USB_PKT_SIZE (1024) +#define HS_USB_PKT_SIZE (512) +#define FS_USB_PKT_SIZE (64) + +#define MAX_RX_FIFO_SIZE (12 * 1024) +#define MAX_TX_FIFO_SIZE (12 * 1024) +#define DEFAULT_BURST_CAP_SIZE (MAX_TX_FIFO_SIZE) +#define DEFAULT_BULK_IN_DELAY (0x0800) +#define MAX_SINGLE_PACKET_SIZE (9000) +#define DEFAULT_TX_CSUM_ENABLE (true) +#define DEFAULT_RX_CSUM_ENABLE (true) +#define DEFAULT_TSO_CSUM_ENABLE (true) +#define DEFAULT_VLAN_FILTER_ENABLE (true) +#define INTERNAL_PHY_ID (2) /* 2: GMII */ +#define TX_OVERHEAD (8) +#define RXW_PADDING 2 + +#define LAN78XX_USB_VENDOR_ID (0x0424) +#define LAN7800_USB_PRODUCT_ID (0x7800) +#define LAN7850_USB_PRODUCT_ID (0x7850) +#define LAN78XX_EEPROM_MAGIC (0x78A5) +#define LAN78XX_OTP_MAGIC (0x78F3) + +#define MII_READ 1 +#define MII_WRITE 0 + +#define EEPROM_INDICATOR (0xA5) +#define EEPROM_MAC_OFFSET (0x01) +#define MAX_EEPROM_SIZE 512 +#define OTP_INDICATOR_1 (0xF3) +#define OTP_INDICATOR_2 (0xF7) + +#define WAKE_ALL (WAKE_PHY | WAKE_UCAST | \ + WAKE_MCAST | WAKE_BCAST | \ + WAKE_ARP | WAKE_MAGIC) + +/* USB related defines */ +#define BULK_IN_PIPE 1 +#define BULK_OUT_PIPE 2 + +/* default autosuspend delay (mSec)*/ +#define DEFAULT_AUTOSUSPEND_DELAY (10 * 1000) + +static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = { + "RX FCS Errors", + "RX Alignment Errors", + "Rx Fragment Errors", + "RX Jabber Errors", + "RX Undersize Frame Errors", + "RX Oversize Frame Errors", + "RX Dropped Frames", + "RX Unicast Byte Count", + "RX Broadcast Byte Count", + "RX Multicast Byte Count", + "RX Unicast Frames", + "RX Broadcast Frames", + "RX Multicast Frames", + "RX Pause Frames", + "RX 64 Byte Frames", + "RX 65 - 127 Byte Frames", + "RX 128 - 255 Byte Frames", + "RX 256 - 511 Bytes Frames", + "RX 512 - 1023 Byte Frames", + "RX 1024 - 1518 Byte Frames", + "RX Greater 1518 Byte Frames", + "EEE RX LPI Transitions", + "EEE RX LPI Time", + "TX FCS Errors", + "TX Excess Deferral Errors", + "TX Carrier Errors", + "TX Bad Byte Count", + "TX Single Collisions", + "TX Multiple Collisions", + "TX Excessive Collision", + "TX Late Collisions", + "TX Unicast Byte Count", + "TX Broadcast Byte Count", + "TX Multicast Byte Count", + "TX Unicast Frames", + "TX Broadcast Frames", + "TX Multicast Frames", + "TX Pause Frames", + "TX 64 Byte Frames", + "TX 65 - 127 Byte Frames", + "TX 128 - 255 Byte Frames", + "TX 256 - 511 Bytes Frames", + "TX 512 - 1023 Byte Frames", + "TX 1024 - 1518 Byte Frames", + "TX Greater 1518 Byte Frames", + "EEE TX LPI Transitions", + "EEE TX LPI Time", +}; + +struct lan78xx_statstage { + u32 rx_fcs_errors; + u32 rx_alignment_errors; + u32 rx_fragment_errors; + u32 rx_jabber_errors; + u32 rx_undersize_frame_errors; + u32 rx_oversize_frame_errors; + u32 rx_dropped_frames; + u32 rx_unicast_byte_count; + u32 rx_broadcast_byte_count; + u32 rx_multicast_byte_count; + u32 rx_unicast_frames; + u32 rx_broadcast_frames; + u32 rx_multicast_frames; + u32 rx_pause_frames; + u32 rx_64_byte_frames; + u32 rx_65_127_byte_frames; + u32 rx_128_255_byte_frames; + u32 rx_256_511_bytes_frames; + u32 rx_512_1023_byte_frames; + u32 rx_1024_1518_byte_frames; + u32 rx_greater_1518_byte_frames; + u32 eee_rx_lpi_transitions; + u32 eee_rx_lpi_time; + u32 tx_fcs_errors; + u32 tx_excess_deferral_errors; + u32 tx_carrier_errors; + u32 tx_bad_byte_count; + u32 tx_single_collisions; + u32 tx_multiple_collisions; + u32 tx_excessive_collision; + u32 tx_late_collisions; + u32 tx_unicast_byte_count; + u32 tx_broadcast_byte_count; + u32 tx_multicast_byte_count; + u32 tx_unicast_frames; + u32 tx_broadcast_frames; + u32 tx_multicast_frames; + u32 tx_pause_frames; + u32 tx_64_byte_frames; + u32 tx_65_127_byte_frames; + u32 tx_128_255_byte_frames; + u32 tx_256_511_bytes_frames; + u32 tx_512_1023_byte_frames; + u32 tx_1024_1518_byte_frames; + u32 tx_greater_1518_byte_frames; + u32 eee_tx_lpi_transitions; + u32 eee_tx_lpi_time; +}; + +struct lan78xx_net; + +struct lan78xx_priv { + struct lan78xx_net *dev; + u32 rfe_ctl; + u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */ + u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */ + u32 vlan_table[DP_SEL_VHF_VLAN_LEN]; + struct mutex dataport_mutex; /* for dataport access */ + spinlock_t rfe_ctl_lock; /* for rfe register access */ + struct work_struct set_multicast; + struct work_struct set_vlan; + u32 wol; +}; + +enum skb_state { + illegal = 0, + tx_start, + tx_done, + rx_start, + rx_done, + rx_cleanup, + unlink_start +}; + +struct skb_data { /* skb->cb is one of these */ + struct urb *urb; + struct lan78xx_net *dev; + enum skb_state state; + size_t length; +}; + +struct usb_context { + struct usb_ctrlrequest req; + struct lan78xx_net *dev; +}; + +#define EVENT_TX_HALT 0 +#define EVENT_RX_HALT 1 +#define EVENT_RX_MEMORY 2 +#define EVENT_STS_SPLIT 3 +#define EVENT_LINK_RESET 4 +#define EVENT_RX_PAUSED 5 +#define EVENT_DEV_WAKING 6 +#define EVENT_DEV_ASLEEP 7 +#define EVENT_DEV_OPEN 8 + +struct lan78xx_net { + struct net_device *net; + struct usb_device *udev; + struct usb_interface *intf; + void *driver_priv; + + int rx_qlen; + int tx_qlen; + struct sk_buff_head rxq; + struct sk_buff_head txq; + struct sk_buff_head done; + struct sk_buff_head rxq_pause; + struct sk_buff_head txq_pend; + + struct tasklet_struct bh; + struct delayed_work wq; + + struct usb_host_endpoint *ep_blkin; + struct usb_host_endpoint *ep_blkout; + struct usb_host_endpoint *ep_intr; + + int msg_enable; + + struct urb *urb_intr; + struct usb_anchor deferred; + + struct mutex phy_mutex; /* for phy access */ + unsigned pipe_in, pipe_out, pipe_intr; + + u32 hard_mtu; /* count any extra framing */ + size_t rx_urb_size; /* size for rx urbs */ + + unsigned long flags; + + wait_queue_head_t *wait; + unsigned char suspend_count; + + unsigned maxpacket; + struct timer_list delay; + + unsigned long data[5]; + struct mii_if_info mii; + + int link_on; + u8 mdix_ctrl; +}; + +/* use ethtool to change the level for any given device */ +static int msg_level = -1; +module_param(msg_level, int, 0); +MODULE_PARM_DESC(msg_level, "Override default message level"); + +static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data) +{ + u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL); + int ret; + + BUG_ON(!dev); + + if (!buf) + return -ENOMEM; + + ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), + USB_VENDOR_REQUEST_READ_REGISTER, + USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + 0, index, buf, 4, USB_CTRL_GET_TIMEOUT); + if (likely(ret >= 0)) { + le32_to_cpus(buf); + *data = *buf; + } else { + netdev_warn(dev->net, + "Failed to read register index 0x%08x. ret = %d", + index, ret); + } + + kfree(buf); + + return ret; +} + +static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data) +{ + u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL); + int ret; + + BUG_ON(!dev); + + if (!buf) + return -ENOMEM; + + *buf = data; + cpu_to_le32s(buf); + + ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), + USB_VENDOR_REQUEST_WRITE_REGISTER, + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + 0, index, buf, 4, USB_CTRL_SET_TIMEOUT); + if (unlikely(ret < 0)) { + netdev_warn(dev->net, + "Failed to write register index 0x%08x. ret = %d", + index, ret); + } + + kfree(buf); + + return ret; +} + +static int lan78xx_read_stats(struct lan78xx_net *dev, + struct lan78xx_statstage *data) +{ + int ret = 0; + int i; + struct lan78xx_statstage *stats; + u32 *src; + u32 *dst; + + BUG_ON(!dev); + BUG_ON(!data); + BUG_ON(sizeof(struct lan78xx_statstage) != 0xBC); + + stats = kmalloc(sizeof(*stats), GFP_KERNEL); + if (!stats) + return -ENOMEM; + + ret = usb_control_msg(dev->udev, + usb_rcvctrlpipe(dev->udev, 0), + USB_VENDOR_REQUEST_GET_STATS, + USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + 0, + 0, + (void *)stats, + sizeof(*stats), + USB_CTRL_SET_TIMEOUT); + if (likely(ret >= 0)) { + src = (u32 *)stats; + dst = (u32 *)data; + for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) { + le32_to_cpus(&src[i]); + dst[i] = src[i]; + } + } else { + netdev_warn(dev->net, + "Failed to read stat ret = 0x%x", ret); + } + + kfree(stats); + + return ret; +} + +/* Loop until the read is completed with timeout called with phy_mutex held */ +static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev) +{ + unsigned long start_time = jiffies; + u32 val; + int ret; + + do { + ret = lan78xx_read_reg(dev, MII_ACC, &val); + if (unlikely(ret < 0)) + return -EIO; + + if (!(val & MII_ACC_MII_BUSY_)) + return 0; + } while (!time_after(jiffies, start_time + HZ)); + + return -EIO; +} + +static inline u32 mii_access(int id, int index, int read) +{ + u32 ret; + + ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_; + ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_; + if (read) + ret |= MII_ACC_MII_READ_; + else + ret |= MII_ACC_MII_WRITE_; + ret |= MII_ACC_MII_BUSY_; + + return ret; +} + +static int lan78xx_mdio_read(struct net_device *netdev, int phy_id, int idx) +{ + struct lan78xx_net *dev = netdev_priv(netdev); + u32 val, addr; + int ret; + + ret = usb_autopm_get_interface(dev->intf); + if (ret < 0) + return ret; + + mutex_lock(&dev->phy_mutex); + + /* confirm MII not busy */ + ret = lan78xx_phy_wait_not_busy(dev); + if (ret < 0) + goto done; + + /* set the address, index & direction (read from PHY) */ + phy_id &= dev->mii.phy_id_mask; + idx &= dev->mii.reg_num_mask; + addr = mii_access(phy_id, idx, MII_READ); + ret = lan78xx_write_reg(dev, MII_ACC, addr); + + ret = lan78xx_phy_wait_not_busy(dev); + if (ret < 0) + goto done; + + ret = lan78xx_read_reg(dev, MII_DATA, &val); + + ret = (int)(val & 0xFFFF); + +done: + mutex_unlock(&dev->phy_mutex); + usb_autopm_put_interface(dev->intf); + return ret; +} + +static void lan78xx_mdio_write(struct net_device *netdev, int phy_id, + int idx, int regval) +{ + struct lan78xx_net *dev = netdev_priv(netdev); + u32 val, addr; + int ret; + + if (usb_autopm_get_interface(dev->intf) < 0) + return; + + mutex_lock(&dev->phy_mutex); + + /* confirm MII not busy */ + ret = lan78xx_phy_wait_not_busy(dev); + if (ret < 0) + goto done; + + val = regval; + ret = lan78xx_write_reg(dev, MII_DATA, val); + + /* set the address, index & direction (write to PHY) */ + phy_id &= dev->mii.phy_id_mask; + idx &= dev->mii.reg_num_mask; + addr = mii_access(phy_id, idx, MII_WRITE); + ret = lan78xx_write_reg(dev, MII_ACC, addr); + + ret = lan78xx_phy_wait_not_busy(dev); + if (ret < 0) + goto done; + +done: + mutex_unlock(&dev->phy_mutex); + usb_autopm_put_interface(dev->intf); +} + +static void lan78xx_mmd_write(struct net_device *netdev, int phy_id, + int mmddev, int mmdidx, int regval) +{ + struct lan78xx_net *dev = netdev_priv(netdev); + u32 val, addr; + int ret; + + if (usb_autopm_get_interface(dev->intf) < 0) + return; + + mutex_lock(&dev->phy_mutex); + + /* confirm MII not busy */ + ret = lan78xx_phy_wait_not_busy(dev); + if (ret < 0) + goto done; + + mmddev &= 0x1F; + + /* set up device address for MMD */ + ret = lan78xx_write_reg(dev, MII_DATA, mmddev); + + phy_id &= dev->mii.phy_id_mask; + addr = mii_access(phy_id, PHY_MMD_CTL, MII_WRITE); + ret = lan78xx_write_reg(dev, MII_ACC, addr); + + ret = lan78xx_phy_wait_not_busy(dev); + if (ret < 0) + goto done; + + /* select register of MMD */ + val = mmdidx; + ret = lan78xx_write_reg(dev, MII_DATA, val); + + phy_id &= dev->mii.phy_id_mask; + addr = mii_access(phy_id, PHY_MMD_REG_DATA, MII_WRITE); + ret = lan78xx_write_reg(dev, MII_ACC, addr); + + ret = lan78xx_phy_wait_not_busy(dev); + if (ret < 0) + goto done; + + /* select register data for MMD */ + val = PHY_MMD_CTRL_OP_DNI_ | mmddev; + ret = lan78xx_write_reg(dev, MII_DATA, val); + + phy_id &= dev->mii.phy_id_mask; + addr = mii_access(phy_id, PHY_MMD_CTL, MII_WRITE); + ret = lan78xx_write_reg(dev, MII_ACC, addr); + + ret = lan78xx_phy_wait_not_busy(dev); + if (ret < 0) + goto done; + + /* write to MMD */ + val = regval; + ret = lan78xx_write_reg(dev, MII_DATA, val); + + phy_id &= dev->mii.phy_id_mask; + addr = mii_access(phy_id, PHY_MMD_REG_DATA, MII_WRITE); + ret = lan78xx_write_reg(dev, MII_ACC, addr); + + ret = lan78xx_phy_wait_not_busy(dev); + if (ret < 0) + goto done; + +done: + mutex_unlock(&dev->phy_mutex); + usb_autopm_put_interface(dev->intf); +} + +static int lan78xx_mmd_read(struct net_device *netdev, int phy_id, + int mmddev, int mmdidx) +{ + struct lan78xx_net *dev = netdev_priv(netdev); + u32 val, addr; + int ret; + + ret = usb_autopm_get_interface(dev->intf); + if (ret < 0) + return ret; + + mutex_lock(&dev->phy_mutex); + + /* confirm MII not busy */ + ret = lan78xx_phy_wait_not_busy(dev); + if (ret < 0) + goto done; + + /* set up device address for MMD */ + ret = lan78xx_write_reg(dev, MII_DATA, mmddev); + + phy_id &= dev->mii.phy_id_mask; + addr = mii_access(phy_id, PHY_MMD_CTL, MII_WRITE); + ret = lan78xx_write_reg(dev, MII_ACC, addr); + + ret = lan78xx_phy_wait_not_busy(dev); + if (ret < 0) + goto done; + + /* select register of MMD */ + val = mmdidx; + ret = lan78xx_write_reg(dev, MII_DATA, val); + + phy_id &= dev->mii.phy_id_mask; + addr = mii_access(phy_id, PHY_MMD_REG_DATA, MII_WRITE); + ret = lan78xx_write_reg(dev, MII_ACC, addr); + + ret = lan78xx_phy_wait_not_busy(dev); + if (ret < 0) + goto done; + + /* select register data for MMD */ + val = PHY_MMD_CTRL_OP_DNI_ | mmddev; + ret = lan78xx_write_reg(dev, MII_DATA, val); + + phy_id &= dev->mii.phy_id_mask; + addr = mii_access(phy_id, PHY_MMD_CTL, MII_WRITE); + ret = lan78xx_write_reg(dev, MII_ACC, addr); + + ret = lan78xx_phy_wait_not_busy(dev); + if (ret < 0) + goto done; + + /* set the address, index & direction (read from PHY) */ + phy_id &= dev->mii.phy_id_mask; + addr = mii_access(phy_id, PHY_MMD_REG_DATA, MII_READ); + ret = lan78xx_write_reg(dev, MII_ACC, addr); + + ret = lan78xx_phy_wait_not_busy(dev); + if (ret < 0) + goto done; + + /* read from MMD */ + ret = lan78xx_read_reg(dev, MII_DATA, &val); + + ret = (int)(val & 0xFFFF); + +done: + mutex_unlock(&dev->phy_mutex); + usb_autopm_put_interface(dev->intf); + return ret; +} + +static int lan78xx_wait_eeprom(struct lan78xx_net *dev) +{ + unsigned long start_time = jiffies; + u32 val; + int ret; + + do { + ret = lan78xx_read_reg(dev, E2P_CMD, &val); + if (unlikely(ret < 0)) + return -EIO; + + if (!(val & E2P_CMD_EPC_BUSY_) || + (val & E2P_CMD_EPC_TIMEOUT_)) + break; + usleep_range(40, 100); + } while (!time_after(jiffies, start_time + HZ)); + + if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) { + netdev_warn(dev->net, "EEPROM read operation timeout"); + return -EIO; + } + + return 0; +} + +static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev) +{ + unsigned long start_time = jiffies; + u32 val; + int ret; + + do { + ret = lan78xx_read_reg(dev, E2P_CMD, &val); + if (unlikely(ret < 0)) + return -EIO; + + if (!(val & E2P_CMD_EPC_BUSY_)) + return 0; + + usleep_range(40, 100); + } while (!time_after(jiffies, start_time + HZ)); + + netdev_warn(dev->net, "EEPROM is busy"); + return -EIO; +} + +static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset, + u32 length, u8 *data) +{ + u32 val; + int i, ret; + + BUG_ON(!dev); + BUG_ON(!data); + + ret = lan78xx_eeprom_confirm_not_busy(dev); + if (ret) + return ret; + + for (i = 0; i < length; i++) { + val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_; + val |= (offset & E2P_CMD_EPC_ADDR_MASK_); + ret = lan78xx_write_reg(dev, E2P_CMD, val); + if (unlikely(ret < 0)) + return -EIO; + + ret = lan78xx_wait_eeprom(dev); + if (ret < 0) + return ret; + + ret = lan78xx_read_reg(dev, E2P_DATA, &val); + if (unlikely(ret < 0)) + return -EIO; + + data[i] = val & 0xFF; + offset++; + } + + return 0; +} + +static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset, + u32 length, u8 *data) +{ + u8 sig; + int ret; + + ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig); + if ((ret == 0) && (sig == EEPROM_INDICATOR)) + ret = lan78xx_read_raw_eeprom(dev, offset, length, data); + else + ret = -EINVAL; + + return ret; +} + +static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset, + u32 length, u8 *data) +{ + u32 val; + int i, ret; + + BUG_ON(!dev); + BUG_ON(!data); + + ret = lan78xx_eeprom_confirm_not_busy(dev); + if (ret) + return ret; + + /* Issue write/erase enable command */ + val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_; + ret = lan78xx_write_reg(dev, E2P_CMD, val); + if (unlikely(ret < 0)) + return -EIO; + + ret = lan78xx_wait_eeprom(dev); + if (ret < 0) + return ret; + + for (i = 0; i < length; i++) { + /* Fill data register */ + val = data[i]; + ret = lan78xx_write_reg(dev, E2P_DATA, val); + if (ret < 0) + return ret; + + /* Send "write" command */ + val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_; + val |= (offset & E2P_CMD_EPC_ADDR_MASK_); + ret = lan78xx_write_reg(dev, E2P_CMD, val); + if (ret < 0) + return ret; + + ret = lan78xx_wait_eeprom(dev); + if (ret < 0) + return ret; + + offset++; + } + + return 0; +} + +static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset, + u32 length, u8 *data) +{ + int i; + int ret; + u32 buf; + unsigned long timeout; + + ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf); + + if (buf & OTP_PWR_DN_PWRDN_N_) { + /* clear it and wait to be cleared */ + ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0); + + timeout = jiffies + HZ; + do { + usleep_range(1, 10); + ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf); + if (time_after(jiffies, timeout)) { + netdev_warn(dev->net, + "timeout on OTP_PWR_DN"); + return -EIO; + } + } while (buf & OTP_PWR_DN_PWRDN_N_); + } + + for (i = 0; i < length; i++) { + ret = lan78xx_write_reg(dev, OTP_ADDR1, + ((offset + i) >> 8) & OTP_ADDR1_15_11); + ret = lan78xx_write_reg(dev, OTP_ADDR2, + ((offset + i) & OTP_ADDR2_10_3)); + + ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_); + ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_); + + timeout = jiffies + HZ; + do { + udelay(1); + ret = lan78xx_read_reg(dev, OTP_STATUS, &buf); + if (time_after(jiffies, timeout)) { + netdev_warn(dev->net, + "timeout on OTP_STATUS"); + return -EIO; + } + } while (buf & OTP_STATUS_BUSY_); + + ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf); + + data[i] = (u8)(buf & 0xFF); + } + + return 0; +} + +static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset, + u32 length, u8 *data) +{ + u8 sig; + int ret; + + ret = lan78xx_read_raw_otp(dev, 0, 1, &sig); + + if (ret == 0) { + if (sig == OTP_INDICATOR_1) + offset = offset; + else if (sig == OTP_INDICATOR_2) + offset += 0x100; + else + ret = -EINVAL; + ret = lan78xx_read_raw_otp(dev, offset, length, data); + } + + return ret; +} + +static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev) +{ + int i, ret; + + for (i = 0; i < 100; i++) { + u32 dp_sel; + + ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel); + if (unlikely(ret < 0)) + return -EIO; + + if (dp_sel & DP_SEL_DPRDY_) + return 0; + + usleep_range(40, 100); + } + + netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out"); + + return -EIO; +} + +static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select, + u32 addr, u32 length, u32 *buf) +{ + struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]); + u32 dp_sel; + int i, ret; + + if (usb_autopm_get_interface(dev->intf) < 0) + return 0; + + mutex_lock(&pdata->dataport_mutex); + + ret = lan78xx_dataport_wait_not_busy(dev); + if (ret < 0) + goto done; + + ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel); + + dp_sel &= ~DP_SEL_RSEL_MASK_; + dp_sel |= ram_select; + ret = lan78xx_write_reg(dev, DP_SEL, dp_sel); + + for (i = 0; i < length; i++) { + ret = lan78xx_write_reg(dev, DP_ADDR, addr + i); + + ret = lan78xx_write_reg(dev, DP_DATA, buf[i]); + + ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_); + + ret = lan78xx_dataport_wait_not_busy(dev); + if (ret < 0) + goto done; + } + +done: + mutex_unlock(&pdata->dataport_mutex); + usb_autopm_put_interface(dev->intf); + + return ret; +} + +static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata, + int index, u8 addr[ETH_ALEN]) +{ + u32 temp; + + if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) { + temp = addr[3]; + temp = addr[2] | (temp << 8); + temp = addr[1] | (temp << 8); + temp = addr[0] | (temp << 8); + pdata->pfilter_table[index][1] = temp; + temp = addr[5]; + temp = addr[4] | (temp << 8); + temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_; + pdata->pfilter_table[index][0] = temp; + } +} + +/* returns hash bit number for given MAC address */ +static inline u32 lan78xx_hash(char addr[ETH_ALEN]) +{ + return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff; +} + +static void lan78xx_deferred_multicast_write(struct work_struct *param) +{ + struct lan78xx_priv *pdata = + container_of(param, struct lan78xx_priv, set_multicast); + struct lan78xx_net *dev = pdata->dev; + int i; + int ret; + + netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n", + pdata->rfe_ctl); + + lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN, + DP_SEL_VHF_HASH_LEN, pdata->mchash_table); + + for (i = 1; i < NUM_OF_MAF; i++) { + ret = lan78xx_write_reg(dev, MAF_HI(i), 0); + ret = lan78xx_write_reg(dev, MAF_LO(i), + pdata->pfilter_table[i][1]); + ret = lan78xx_write_reg(dev, MAF_HI(i), + pdata->pfilter_table[i][0]); + } + + ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); +} + +static void lan78xx_set_multicast(struct net_device *netdev) +{ + struct lan78xx_net *dev = netdev_priv(netdev); + struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]); + unsigned long flags; + int i; + + spin_lock_irqsave(&pdata->rfe_ctl_lock, flags); + + pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ | + RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_); + + for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++) + pdata->mchash_table[i] = 0; + /* pfilter_table[0] has own HW address */ + for (i = 1; i < NUM_OF_MAF; i++) { + pdata->pfilter_table[i][0] = + pdata->pfilter_table[i][1] = 0; + } + + pdata->rfe_ctl |= RFE_CTL_BCAST_EN_; + + if (dev->net->flags & IFF_PROMISC) { + netif_dbg(dev, drv, dev->net, "promiscuous mode enabled"); + pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_; + } else { + if (dev->net->flags & IFF_ALLMULTI) { + netif_dbg(dev, drv, dev->net, + "receive all multicast enabled"); + pdata->rfe_ctl |= RFE_CTL_MCAST_EN_; + } + } + + if (netdev_mc_count(dev->net)) { + struct netdev_hw_addr *ha; + int i; + + netif_dbg(dev, drv, dev->net, "receive multicast hash filter"); + + pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_; + + i = 1; + netdev_for_each_mc_addr(ha, netdev) { + /* set first 32 into Perfect Filter */ + if (i < 33) { + lan78xx_set_addr_filter(pdata, i, ha->addr); + } else { + u32 bitnum = lan78xx_hash(ha->addr); + + pdata->mchash_table[bitnum / 32] |= + (1 << (bitnum % 32)); + pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_; + } + i++; + } + } + + spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags); + + /* defer register writes to a sleepable context */ + schedule_work(&pdata->set_multicast); +} + +static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex, + u16 lcladv, u16 rmtadv) +{ + u32 flow = 0, fct_flow = 0; + int ret; + + u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv); + + if (cap & FLOW_CTRL_TX) + flow = (FLOW_CR_TX_FCEN_ | 0xFFFF); + + if (cap & FLOW_CTRL_RX) + flow |= FLOW_CR_RX_FCEN_; + + if (dev->udev->speed == USB_SPEED_SUPER) + fct_flow = 0x817; + else if (dev->udev->speed == USB_SPEED_HIGH) + fct_flow = 0x211; + + netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s", + (cap & FLOW_CTRL_RX ? "enabled" : "disabled"), + (cap & FLOW_CTRL_TX ? "enabled" : "disabled")); + + ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow); + + /* threshold value should be set before enabling flow */ + ret = lan78xx_write_reg(dev, FLOW, flow); + + return 0; +} + +static int lan78xx_link_reset(struct lan78xx_net *dev) +{ + struct mii_if_info *mii = &dev->mii; + struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET }; + u16 ladv, radv; + int ret; + u32 buf; + + /* clear PHY interrupt status */ + /* VTSE PHY */ + ret = lan78xx_mdio_read(dev->net, mii->phy_id, PHY_VTSE_INT_STS); + if (unlikely(ret < 0)) + return -EIO; + + /* clear LAN78xx interrupt status */ + ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_); + if (unlikely(ret < 0)) + return -EIO; + + if (!mii_link_ok(mii) && dev->link_on) { + dev->link_on = false; + netif_carrier_off(dev->net); + + /* reset MAC */ + ret = lan78xx_read_reg(dev, MAC_CR, &buf); + if (unlikely(ret < 0)) + return -EIO; + buf |= MAC_CR_RST_; + ret = lan78xx_write_reg(dev, MAC_CR, buf); + if (unlikely(ret < 0)) + return -EIO; + } else if (mii_link_ok(mii) && !dev->link_on) { + dev->link_on = true; + + mii_check_media(mii, 1, 1); + mii_ethtool_gset(&dev->mii, &ecmd); + + mii->mdio_read(mii->dev, mii->phy_id, PHY_VTSE_INT_STS); + + if (dev->udev->speed == USB_SPEED_SUPER) { + if (ethtool_cmd_speed(&ecmd) == 1000) { + /* disable U2 */ + ret = lan78xx_read_reg(dev, USB_CFG1, &buf); + buf &= ~USB_CFG1_DEV_U2_INIT_EN_; + ret = lan78xx_write_reg(dev, USB_CFG1, buf); + /* enable U1 */ + ret = lan78xx_read_reg(dev, USB_CFG1, &buf); + buf |= USB_CFG1_DEV_U1_INIT_EN_; + ret = lan78xx_write_reg(dev, USB_CFG1, buf); + } else { + /* enable U1 & U2 */ + ret = lan78xx_read_reg(dev, USB_CFG1, &buf); + buf |= USB_CFG1_DEV_U2_INIT_EN_; + buf |= USB_CFG1_DEV_U1_INIT_EN_; + ret = lan78xx_write_reg(dev, USB_CFG1, buf); + } + } + + ladv = lan78xx_mdio_read(dev->net, mii->phy_id, MII_ADVERTISE); + if (unlikely(ladv < 0)) + return -EIO; + + radv = lan78xx_mdio_read(dev->net, mii->phy_id, MII_LPA); + if (unlikely(radv < 0)) + return -EIO; + + netif_dbg(dev, link, dev->net, + "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x", + ethtool_cmd_speed(&ecmd), ecmd.duplex, ladv, radv); + + ret = lan78xx_update_flowcontrol(dev, ecmd.duplex, ladv, radv); + netif_carrier_on(dev->net); + } + + return ret; +} + +/* some work can't be done in tasklets, so we use keventd + * + * NOTE: annoying asymmetry: if it's active, schedule_work() fails, + * but tasklet_schedule() doesn't. hope the failure is rare. + */ +void lan78xx_defer_kevent(struct lan78xx_net *dev, int work) +{ + set_bit(work, &dev->flags); + if (!schedule_delayed_work(&dev->wq, 0)) + netdev_err(dev->net, "kevent %d may have been dropped\n", work); +} + +static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb) +{ + u32 intdata; + + if (urb->actual_length != 4) { + netdev_warn(dev->net, + "unexpected urb length %d", urb->actual_length); + return; + } + + memcpy(&intdata, urb->transfer_buffer, 4); + le32_to_cpus(&intdata); + + if (intdata & INT_ENP_PHY_INT) { + netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata); + lan78xx_defer_kevent(dev, EVENT_LINK_RESET); + } else + netdev_warn(dev->net, + "unexpected interrupt: 0x%08x\n", intdata); +} + +static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev) +{ + return MAX_EEPROM_SIZE; +} + +static int lan78xx_ethtool_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *ee, u8 *data) +{ + struct lan78xx_net *dev = netdev_priv(netdev); + + ee->magic = LAN78XX_EEPROM_MAGIC; + + return lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data); +} + +static int lan78xx_ethtool_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *ee, u8 *data) +{ + struct lan78xx_net *dev = netdev_priv(netdev); + + /* Allow entire eeprom update only */ + if ((ee->magic == LAN78XX_EEPROM_MAGIC) && + (ee->offset == 0) && + (ee->len == 512) && + (data[0] == EEPROM_INDICATOR)) + return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data); + else if ((ee->magic == LAN78XX_OTP_MAGIC) && + (ee->offset == 0) && + (ee->len == 512) && + (data[0] == OTP_INDICATOR_1)) + return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data); + + return -EINVAL; +} + +static void lan78xx_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + if (stringset == ETH_SS_STATS) + memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings)); +} + +static int lan78xx_get_sset_count(struct net_device *netdev, int sset) +{ + if (sset == ETH_SS_STATS) + return ARRAY_SIZE(lan78xx_gstrings); + else + return -EOPNOTSUPP; +} + +static void lan78xx_get_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct lan78xx_net *dev = netdev_priv(netdev); + struct lan78xx_statstage lan78xx_stat; + u32 *p; + int i; + + if (usb_autopm_get_interface(dev->intf) < 0) + return; + + if (lan78xx_read_stats(dev, &lan78xx_stat) > 0) { + p = (u32 *)&lan78xx_stat; + for (i = 0; i < (sizeof(lan78xx_stat) / (sizeof(u32))); i++) + data[i] = p[i]; + } + + usb_autopm_put_interface(dev->intf); +} + +static void lan78xx_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct lan78xx_net *dev = netdev_priv(netdev); + int ret; + u32 buf; + struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]); + + if (usb_autopm_get_interface(dev->intf) < 0) + return; + + ret = lan78xx_read_reg(dev, USB_CFG0, &buf); + if (unlikely(ret < 0)) { + wol->supported = 0; + wol->wolopts = 0; + } else { + if (buf & USB_CFG_RMT_WKP_) { + wol->supported = WAKE_ALL; + wol->wolopts = pdata->wol; + } else { + wol->supported = 0; + wol->wolopts = 0; + } + } + + usb_autopm_put_interface(dev->intf); +} + +static int lan78xx_set_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct lan78xx_net *dev = netdev_priv(netdev); + struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]); + int ret; + + ret = usb_autopm_get_interface(dev->intf); + if (ret < 0) + return ret; + + pdata->wol = 0; + if (wol->wolopts & WAKE_UCAST) + pdata->wol |= WAKE_UCAST; + if (wol->wolopts & WAKE_MCAST) + pdata->wol |= WAKE_MCAST; + if (wol->wolopts & WAKE_BCAST) + pdata->wol |= WAKE_BCAST; + if (wol->wolopts & WAKE_MAGIC) + pdata->wol |= WAKE_MAGIC; + if (wol->wolopts & WAKE_PHY) + pdata->wol |= WAKE_PHY; + if (wol->wolopts & WAKE_ARP) + pdata->wol |= WAKE_ARP; + + device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts); + + usb_autopm_put_interface(dev->intf); + + return ret; +} + +static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata) +{ + struct lan78xx_net *dev = netdev_priv(net); + int ret; + u32 buf; + u32 adv, lpadv; + + ret = usb_autopm_get_interface(dev->intf); + if (ret < 0) + return ret; + + ret = lan78xx_read_reg(dev, MAC_CR, &buf); + if (buf & MAC_CR_EEE_EN_) { + buf = lan78xx_mmd_read(dev->net, dev->mii.phy_id, + PHY_MMD_DEV_7, PHY_EEE_ADVERTISEMENT); + adv = mmd_eee_adv_to_ethtool_adv_t(buf); + buf = lan78xx_mmd_read(dev->net, dev->mii.phy_id, + PHY_MMD_DEV_7, PHY_EEE_LP_ADVERTISEMENT); + lpadv = mmd_eee_adv_to_ethtool_adv_t(buf); + + edata->eee_enabled = true; + edata->supported = true; + edata->eee_active = !!(adv & lpadv); + edata->advertised = adv; + edata->lp_advertised = lpadv; + edata->tx_lpi_enabled = true; + /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */ + ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf); + edata->tx_lpi_timer = buf; + } else { + buf = lan78xx_mmd_read(dev->net, dev->mii.phy_id, + PHY_MMD_DEV_7, PHY_EEE_LP_ADVERTISEMENT); + lpadv = mmd_eee_adv_to_ethtool_adv_t(buf); + + edata->eee_enabled = false; + edata->eee_active = false; + edata->supported = false; + edata->advertised = 0; + edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(lpadv); + edata->tx_lpi_enabled = false; + edata->tx_lpi_timer = 0; + } + + usb_autopm_put_interface(dev->intf); + + return 0; +} + +static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata) +{ + struct lan78xx_net *dev = netdev_priv(net); + int ret; + u32 buf; + + ret = usb_autopm_get_interface(dev->intf); + if (ret < 0) + return ret; + + if (edata->eee_enabled) { + ret = lan78xx_read_reg(dev, MAC_CR, &buf); + buf |= MAC_CR_EEE_EN_; + ret = lan78xx_write_reg(dev, MAC_CR, buf); + + buf = ethtool_adv_to_mmd_eee_adv_t(edata->advertised); + lan78xx_mmd_write(dev->net, dev->mii.phy_id, + PHY_MMD_DEV_7, PHY_EEE_ADVERTISEMENT, buf); + } else { + ret = lan78xx_read_reg(dev, MAC_CR, &buf); + buf &= ~MAC_CR_EEE_EN_; + ret = lan78xx_write_reg(dev, MAC_CR, buf); + } + + usb_autopm_put_interface(dev->intf); + + return 0; +} + +static u32 lan78xx_get_link(struct net_device *net) +{ + struct lan78xx_net *dev = netdev_priv(net); + + return mii_link_ok(&dev->mii); +} + +int lan78xx_nway_reset(struct net_device *net) +{ + struct lan78xx_net *dev = netdev_priv(net); + + if ((!dev->mii.mdio_read) || (!dev->mii.mdio_write)) + return -EOPNOTSUPP; + + return mii_nway_restart(&dev->mii); +} + +static void lan78xx_get_drvinfo(struct net_device *net, + struct ethtool_drvinfo *info) +{ + struct lan78xx_net *dev = netdev_priv(net); + + strncpy(info->driver, DRIVER_NAME, sizeof(info->driver)); + strncpy(info->version, DRIVER_VERSION, sizeof(info->version)); + usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info)); +} + +static u32 lan78xx_get_msglevel(struct net_device *net) +{ + struct lan78xx_net *dev = netdev_priv(net); + + return dev->msg_enable; +} + +static void lan78xx_set_msglevel(struct net_device *net, u32 level) +{ + struct lan78xx_net *dev = netdev_priv(net); + + dev->msg_enable = level; +} + +static int lan78xx_get_settings(struct net_device *net, struct ethtool_cmd *cmd) +{ + struct lan78xx_net *dev = netdev_priv(net); + struct mii_if_info *mii = &dev->mii; + int ret; + int buf; + + if ((!dev->mii.mdio_read) || (!dev->mii.mdio_write)) + return -EOPNOTSUPP; + + ret = usb_autopm_get_interface(dev->intf); + if (ret < 0) + return ret; + + ret = mii_ethtool_gset(&dev->mii, cmd); + + mii->mdio_write(mii->dev, mii->phy_id, + PHY_EXT_GPIO_PAGE, PHY_EXT_GPIO_PAGE_SPACE_1); + buf = mii->mdio_read(mii->dev, mii->phy_id, PHY_EXT_MODE_CTRL); + mii->mdio_write(mii->dev, mii->phy_id, + PHY_EXT_GPIO_PAGE, PHY_EXT_GPIO_PAGE_SPACE_0); + + buf &= PHY_EXT_MODE_CTRL_MDIX_MASK_; + if (buf == PHY_EXT_MODE_CTRL_AUTO_MDIX_) { + cmd->eth_tp_mdix = ETH_TP_MDI_AUTO; + cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; + } else if (buf == PHY_EXT_MODE_CTRL_MDI_) { + cmd->eth_tp_mdix = ETH_TP_MDI; + cmd->eth_tp_mdix_ctrl = ETH_TP_MDI; + } else if (buf == PHY_EXT_MODE_CTRL_MDI_X_) { + cmd->eth_tp_mdix = ETH_TP_MDI_X; + cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_X; + } + + usb_autopm_put_interface(dev->intf); + + return ret; +} + +static int lan78xx_set_settings(struct net_device *net, struct ethtool_cmd *cmd) +{ + struct lan78xx_net *dev = netdev_priv(net); + struct mii_if_info *mii = &dev->mii; + int ret = 0; + int temp; + + if ((!dev->mii.mdio_read) || (!dev->mii.mdio_write)) + return -EOPNOTSUPP; + + ret = usb_autopm_get_interface(dev->intf); + if (ret < 0) + return ret; + + if (dev->mdix_ctrl != cmd->eth_tp_mdix_ctrl) { + if (cmd->eth_tp_mdix_ctrl == ETH_TP_MDI) { + mii->mdio_write(mii->dev, mii->phy_id, + PHY_EXT_GPIO_PAGE, + PHY_EXT_GPIO_PAGE_SPACE_1); + temp = mii->mdio_read(mii->dev, mii->phy_id, + PHY_EXT_MODE_CTRL); + temp &= ~PHY_EXT_MODE_CTRL_MDIX_MASK_; + mii->mdio_write(mii->dev, mii->phy_id, + PHY_EXT_MODE_CTRL, + temp | PHY_EXT_MODE_CTRL_MDI_); + mii->mdio_write(mii->dev, mii->phy_id, + PHY_EXT_GPIO_PAGE, + PHY_EXT_GPIO_PAGE_SPACE_0); + } else if (cmd->eth_tp_mdix_ctrl == ETH_TP_MDI_X) { + mii->mdio_write(mii->dev, mii->phy_id, + PHY_EXT_GPIO_PAGE, + PHY_EXT_GPIO_PAGE_SPACE_1); + temp = mii->mdio_read(mii->dev, mii->phy_id, + PHY_EXT_MODE_CTRL); + temp &= ~PHY_EXT_MODE_CTRL_MDIX_MASK_; + mii->mdio_write(mii->dev, mii->phy_id, + PHY_EXT_MODE_CTRL, + temp | PHY_EXT_MODE_CTRL_MDI_X_); + mii->mdio_write(mii->dev, mii->phy_id, + PHY_EXT_GPIO_PAGE, + PHY_EXT_GPIO_PAGE_SPACE_0); + } else if (cmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) { + mii->mdio_write(mii->dev, mii->phy_id, + PHY_EXT_GPIO_PAGE, + PHY_EXT_GPIO_PAGE_SPACE_1); + temp = mii->mdio_read(mii->dev, mii->phy_id, + PHY_EXT_MODE_CTRL); + temp &= ~PHY_EXT_MODE_CTRL_MDIX_MASK_; + mii->mdio_write(mii->dev, mii->phy_id, + PHY_EXT_MODE_CTRL, + temp | PHY_EXT_MODE_CTRL_AUTO_MDIX_); + mii->mdio_write(mii->dev, mii->phy_id, + PHY_EXT_GPIO_PAGE, + PHY_EXT_GPIO_PAGE_SPACE_0); + } + } + + /* change speed & duplex */ + ret = mii_ethtool_sset(&dev->mii, cmd); + + if (!cmd->autoneg) { + /* force link down */ + temp = mii->mdio_read(mii->dev, mii->phy_id, MII_BMCR); + mii->mdio_write(mii->dev, mii->phy_id, MII_BMCR, + temp | BMCR_LOOPBACK); + mdelay(1); + mii->mdio_write(mii->dev, mii->phy_id, MII_BMCR, temp); + } + + usb_autopm_put_interface(dev->intf); + + return ret; +} + +static const struct ethtool_ops lan78xx_ethtool_ops = { + .get_link = lan78xx_get_link, + .nway_reset = lan78xx_nway_reset, + .get_drvinfo = lan78xx_get_drvinfo, + .get_msglevel = lan78xx_get_msglevel, + .set_msglevel = lan78xx_set_msglevel, + .get_settings = lan78xx_get_settings, + .set_settings = lan78xx_set_settings, + .get_eeprom_len = lan78xx_ethtool_get_eeprom_len, + .get_eeprom = lan78xx_ethtool_get_eeprom, + .set_eeprom = lan78xx_ethtool_set_eeprom, + .get_ethtool_stats = lan78xx_get_stats, + .get_sset_count = lan78xx_get_sset_count, + .get_strings = lan78xx_get_strings, + .get_wol = lan78xx_get_wol, + .set_wol = lan78xx_set_wol, + .get_eee = lan78xx_get_eee, + .set_eee = lan78xx_set_eee, +}; + +static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) +{ + struct lan78xx_net *dev = netdev_priv(netdev); + + if (!netif_running(netdev)) + return -EINVAL; + + return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL); +} + +static void lan78xx_init_mac_address(struct lan78xx_net *dev) +{ + u32 addr_lo, addr_hi; + int ret; + u8 addr[6]; + + ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo); + ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi); + + addr[0] = addr_lo & 0xFF; + addr[1] = (addr_lo >> 8) & 0xFF; + addr[2] = (addr_lo >> 16) & 0xFF; + addr[3] = (addr_lo >> 24) & 0xFF; + addr[4] = addr_hi & 0xFF; + addr[5] = (addr_hi >> 8) & 0xFF; + + if (!is_valid_ether_addr(addr)) { + /* reading mac address from EEPROM or OTP */ + if ((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN, + addr) == 0) || + (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET, ETH_ALEN, + addr) == 0)) { + if (is_valid_ether_addr(addr)) { + /* eeprom values are valid so use them */ + netif_dbg(dev, ifup, dev->net, + "MAC address read from EEPROM"); + } else { + /* generate random MAC */ + random_ether_addr(addr); + netif_dbg(dev, ifup, dev->net, + "MAC address set to random addr"); + } + + addr_lo = addr[0] | (addr[1] << 8) | + (addr[2] << 16) | (addr[3] << 24); + addr_hi = addr[4] | (addr[5] << 8); + + ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo); + ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi); + } else { + /* generate random MAC */ + random_ether_addr(addr); + netif_dbg(dev, ifup, dev->net, + "MAC address set to random addr"); + } + } + + ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo); + ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_); + + ether_addr_copy(dev->net->dev_addr, addr); +} + +static void lan78xx_mii_init(struct lan78xx_net *dev) +{ + /* Initialize MII structure */ + dev->mii.dev = dev->net; + dev->mii.mdio_read = lan78xx_mdio_read; + dev->mii.mdio_write = lan78xx_mdio_write; + dev->mii.phy_id_mask = 0x1f; + dev->mii.reg_num_mask = 0x1f; + dev->mii.phy_id = INTERNAL_PHY_ID; + dev->mii.supports_gmii = true; +} + +static int lan78xx_phy_init(struct lan78xx_net *dev) +{ + int temp; + struct mii_if_info *mii = &dev->mii; + + if ((!mii->mdio_write) || (!mii->mdio_read)) + return -EOPNOTSUPP; + + temp = mii->mdio_read(mii->dev, mii->phy_id, MII_ADVERTISE); + temp |= ADVERTISE_ALL; + mii->mdio_write(mii->dev, mii->phy_id, MII_ADVERTISE, + temp | ADVERTISE_CSMA | + ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); + + /* set to AUTOMDIX */ + mii->mdio_write(mii->dev, mii->phy_id, + PHY_EXT_GPIO_PAGE, PHY_EXT_GPIO_PAGE_SPACE_1); + temp = mii->mdio_read(mii->dev, mii->phy_id, PHY_EXT_MODE_CTRL); + temp &= ~PHY_EXT_MODE_CTRL_MDIX_MASK_; + mii->mdio_write(mii->dev, mii->phy_id, PHY_EXT_MODE_CTRL, + temp | PHY_EXT_MODE_CTRL_AUTO_MDIX_); + mii->mdio_write(mii->dev, mii->phy_id, + PHY_EXT_GPIO_PAGE, PHY_EXT_GPIO_PAGE_SPACE_0); + dev->mdix_ctrl = ETH_TP_MDI_AUTO; + + /* MAC doesn't support 1000HD */ + temp = mii->mdio_read(mii->dev, mii->phy_id, MII_CTRL1000); + mii->mdio_write(mii->dev, mii->phy_id, MII_CTRL1000, + temp & ~ADVERTISE_1000HALF); + + /* clear interrupt */ + mii->mdio_read(mii->dev, mii->phy_id, PHY_VTSE_INT_STS); + mii->mdio_write(mii->dev, mii->phy_id, PHY_VTSE_INT_MASK, + PHY_VTSE_INT_MASK_MDINTPIN_EN_ | + PHY_VTSE_INT_MASK_LINK_CHANGE_); + + netif_dbg(dev, ifup, dev->net, "phy initialised successfully"); + + return 0; +} + +static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size) +{ + int ret = 0; + u32 buf; + bool rxenabled; + + ret = lan78xx_read_reg(dev, MAC_RX, &buf); + + rxenabled = ((buf & MAC_RX_RXEN_) != 0); + + if (rxenabled) { + buf &= ~MAC_RX_RXEN_; + ret = lan78xx_write_reg(dev, MAC_RX, buf); + } + + /* add 4 to size for FCS */ + buf &= ~MAC_RX_MAX_SIZE_MASK_; + buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_); + + ret = lan78xx_write_reg(dev, MAC_RX, buf); + + if (rxenabled) { + buf |= MAC_RX_RXEN_; + ret = lan78xx_write_reg(dev, MAC_RX, buf); + } + + return 0; +} + +static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q) +{ + struct sk_buff *skb; + unsigned long flags; + int count = 0; + + spin_lock_irqsave(&q->lock, flags); + while (!skb_queue_empty(q)) { + struct skb_data *entry; + struct urb *urb; + int ret; + + skb_queue_walk(q, skb) { + entry = (struct skb_data *)skb->cb; + if (entry->state != unlink_start) + goto found; + } + break; +found: + entry->state = unlink_start; + urb = entry->urb; + + /* Get reference count of the URB to avoid it to be + * freed during usb_unlink_urb, which may trigger + * use-after-free problem inside usb_unlink_urb since + * usb_unlink_urb is always racing with .complete + * handler(include defer_bh). + */ + usb_get_urb(urb); + spin_unlock_irqrestore(&q->lock, flags); + /* during some PM-driven resume scenarios, + * these (async) unlinks complete immediately + */ + ret = usb_unlink_urb(urb); + if (ret != -EINPROGRESS && ret != 0) + netdev_dbg(dev->net, "unlink urb err, %d\n", ret); + else + count++; + usb_put_urb(urb); + spin_lock_irqsave(&q->lock, flags); + } + spin_unlock_irqrestore(&q->lock, flags); + return count; +} + +static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct lan78xx_net *dev = netdev_priv(netdev); + int ll_mtu = new_mtu + netdev->hard_header_len; + int old_hard_mtu = dev->hard_mtu; + int old_rx_urb_size = dev->rx_urb_size; + int ret; + + if (new_mtu > MAX_SINGLE_PACKET_SIZE) + return -EINVAL; + + if (new_mtu <= 0) + return -EINVAL; + /* no second zero-length packet read wanted after mtu-sized packets */ + if ((ll_mtu % dev->maxpacket) == 0) + return -EDOM; + + ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN); + + netdev->mtu = new_mtu; + + dev->hard_mtu = netdev->mtu + netdev->hard_header_len; + if (dev->rx_urb_size == old_hard_mtu) { + dev->rx_urb_size = dev->hard_mtu; + if (dev->rx_urb_size > old_rx_urb_size) { + if (netif_running(dev->net)) { + unlink_urbs(dev, &dev->rxq); + tasklet_schedule(&dev->bh); + } + } + } + + return 0; +} + +int lan78xx_set_mac_addr(struct net_device *netdev, void *p) +{ + struct lan78xx_net *dev = netdev_priv(netdev); + struct sockaddr *addr = p; + u32 addr_lo, addr_hi; + int ret; + + if (netif_running(netdev)) + return -EBUSY; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + ether_addr_copy(netdev->dev_addr, addr->sa_data); + + addr_lo = netdev->dev_addr[0] | + netdev->dev_addr[1] << 8 | + netdev->dev_addr[2] << 16 | + netdev->dev_addr[3] << 24; + addr_hi = netdev->dev_addr[4] | + netdev->dev_addr[5] << 8; + + ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo); + ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi); + + return 0; +} + +/* Enable or disable Rx checksum offload engine */ +static int lan78xx_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct lan78xx_net *dev = netdev_priv(netdev); + struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]); + unsigned long flags; + int ret; + + spin_lock_irqsave(&pdata->rfe_ctl_lock, flags); + + if (features & NETIF_F_RXCSUM) { + pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_; + pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_; + } else { + pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_); + pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_); + } + + if (features & NETIF_F_HW_VLAN_CTAG_RX) + pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_; + else + pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_; + + spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags); + + ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); + + return 0; +} + +static void lan78xx_deferred_vlan_write(struct work_struct *param) +{ + struct lan78xx_priv *pdata = + container_of(param, struct lan78xx_priv, set_vlan); + struct lan78xx_net *dev = pdata->dev; + + lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0, + DP_SEL_VHF_VLAN_LEN, pdata->vlan_table); +} + +static int lan78xx_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct lan78xx_net *dev = netdev_priv(netdev); + struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]); + u16 vid_bit_index; + u16 vid_dword_index; + + vid_dword_index = (vid >> 5) & 0x7F; + vid_bit_index = vid & 0x1F; + + pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index); + + /* defer register writes to a sleepable context */ + schedule_work(&pdata->set_vlan); + + return 0; +} + +static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct lan78xx_net *dev = netdev_priv(netdev); + struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]); + u16 vid_bit_index; + u16 vid_dword_index; + + vid_dword_index = (vid >> 5) & 0x7F; + vid_bit_index = vid & 0x1F; + + pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index); + + /* defer register writes to a sleepable context */ + schedule_work(&pdata->set_vlan); + + return 0; +} + +static void lan78xx_init_ltm(struct lan78xx_net *dev) +{ + int ret; + u32 buf; + u32 regs[6] = { 0 }; + + ret = lan78xx_read_reg(dev, USB_CFG1, &buf); + if (buf & USB_CFG1_LTM_ENABLE_) { + u8 temp[2]; + /* Get values from EEPROM first */ + if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) { + if (temp[0] == 24) { + ret = lan78xx_read_raw_eeprom(dev, + temp[1] * 2, + 24, + (u8 *)regs); + if (ret < 0) + return; + } + } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) { + if (temp[0] == 24) { + ret = lan78xx_read_raw_otp(dev, + temp[1] * 2, + 24, + (u8 *)regs); + if (ret < 0) + return; + } + } + } + + lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]); + lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]); + lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]); + lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]); + lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]); + lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]); +} + +static int lan78xx_reset(struct lan78xx_net *dev) +{ + struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]); + u32 buf; + int ret = 0; + unsigned long timeout; + + ret = lan78xx_read_reg(dev, HW_CFG, &buf); + buf |= HW_CFG_LRST_; + ret = lan78xx_write_reg(dev, HW_CFG, buf); + + timeout = jiffies + HZ; + do { + mdelay(1); + ret = lan78xx_read_reg(dev, HW_CFG, &buf); + if (time_after(jiffies, timeout)) { + netdev_warn(dev->net, + "timeout on completion of LiteReset"); + return -EIO; + } + } while (buf & HW_CFG_LRST_); + + lan78xx_init_mac_address(dev); + + /* Respond to the IN token with a NAK */ + ret = lan78xx_read_reg(dev, USB_CFG0, &buf); + buf |= USB_CFG_BIR_; + ret = lan78xx_write_reg(dev, USB_CFG0, buf); + + /* Init LTM */ + lan78xx_init_ltm(dev); + + dev->net->hard_header_len += TX_OVERHEAD; + dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; + + if (dev->udev->speed == USB_SPEED_SUPER) { + buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE; + dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE; + dev->rx_qlen = 4; + dev->tx_qlen = 4; + } else if (dev->udev->speed == USB_SPEED_HIGH) { + buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE; + dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE; + dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size; + dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu; + } else { + buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE; + dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE; + dev->rx_qlen = 4; + } + + ret = lan78xx_write_reg(dev, BURST_CAP, buf); + ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY); + + ret = lan78xx_read_reg(dev, HW_CFG, &buf); + buf |= HW_CFG_MEF_; + ret = lan78xx_write_reg(dev, HW_CFG, buf); + + ret = lan78xx_read_reg(dev, USB_CFG0, &buf); + buf |= USB_CFG_BCE_; + ret = lan78xx_write_reg(dev, USB_CFG0, buf); + + /* set FIFO sizes */ + buf = (MAX_RX_FIFO_SIZE - 512) / 512; + ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf); + + buf = (MAX_TX_FIFO_SIZE - 512) / 512; + ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf); + + ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_); + ret = lan78xx_write_reg(dev, FLOW, 0); + ret = lan78xx_write_reg(dev, FCT_FLOW, 0); + + /* Don't need rfe_ctl_lock during initialisation */ + ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl); + pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_; + ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); + + /* Enable or disable checksum offload engines */ + lan78xx_set_features(dev->net, dev->net->features); + + lan78xx_set_multicast(dev->net); + + /* reset PHY */ + ret = lan78xx_read_reg(dev, PMT_CTL, &buf); + buf |= PMT_CTL_PHY_RST_; + ret = lan78xx_write_reg(dev, PMT_CTL, buf); + + timeout = jiffies + HZ; + do { + mdelay(1); + ret = lan78xx_read_reg(dev, PMT_CTL, &buf); + if (time_after(jiffies, timeout)) { + netdev_warn(dev->net, "timeout waiting for PHY Reset"); + return -EIO; + } + } while (buf & PMT_CTL_PHY_RST_); + + lan78xx_mii_init(dev); + + ret = lan78xx_phy_init(dev); + + ret = lan78xx_read_reg(dev, MAC_CR, &buf); + + buf |= MAC_CR_GMII_EN_; + buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_; + + ret = lan78xx_write_reg(dev, MAC_CR, buf); + + /* enable on PHY */ + if (buf & MAC_CR_EEE_EN_) + lan78xx_mmd_write(dev->net, dev->mii.phy_id, 0x07, 0x3C, 0x06); + + /* enable PHY interrupts */ + ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf); + buf |= INT_ENP_PHY_INT; + ret = lan78xx_write_reg(dev, INT_EP_CTL, buf); + + ret = lan78xx_read_reg(dev, MAC_TX, &buf); + buf |= MAC_TX_TXEN_; + ret = lan78xx_write_reg(dev, MAC_TX, buf); + + ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf); + buf |= FCT_TX_CTL_EN_; + ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf); + + ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN); + + ret = lan78xx_read_reg(dev, MAC_RX, &buf); + buf |= MAC_RX_RXEN_; + ret = lan78xx_write_reg(dev, MAC_RX, buf); + + ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf); + buf |= FCT_RX_CTL_EN_; + ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf); + + if (!mii_nway_restart(&dev->mii)) + netif_dbg(dev, link, dev->net, "autoneg initiated"); + + return 0; +} + +static int lan78xx_open(struct net_device *net) +{ + struct lan78xx_net *dev = netdev_priv(net); + int ret; + + ret = usb_autopm_get_interface(dev->intf); + if (ret < 0) + goto out; + + ret = lan78xx_reset(dev); + if (ret < 0) + goto done; + + /* for Link Check */ + if (dev->urb_intr) { + ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL); + if (ret < 0) { + netif_err(dev, ifup, dev->net, + "intr submit %d\n", ret); + goto done; + } + } + + set_bit(EVENT_DEV_OPEN, &dev->flags); + + netif_start_queue(net); + + dev->link_on = false; + + lan78xx_defer_kevent(dev, EVENT_LINK_RESET); +done: + usb_autopm_put_interface(dev->intf); + +out: + return ret; +} + +static void lan78xx_terminate_urbs(struct lan78xx_net *dev) +{ + DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup); + DECLARE_WAITQUEUE(wait, current); + int temp; + + /* ensure there are no more active urbs */ + add_wait_queue(&unlink_wakeup, &wait); + set_current_state(TASK_UNINTERRUPTIBLE); + dev->wait = &unlink_wakeup; + temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq); + + /* maybe wait for deletions to finish. */ + while (!skb_queue_empty(&dev->rxq) && + !skb_queue_empty(&dev->txq) && + !skb_queue_empty(&dev->done)) { + schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS)); + set_current_state(TASK_UNINTERRUPTIBLE); + netif_dbg(dev, ifdown, dev->net, + "waited for %d urb completions\n", temp); + } + set_current_state(TASK_RUNNING); + dev->wait = NULL; + remove_wait_queue(&unlink_wakeup, &wait); +} + +int lan78xx_stop(struct net_device *net) +{ + struct lan78xx_net *dev = netdev_priv(net); + + clear_bit(EVENT_DEV_OPEN, &dev->flags); + netif_stop_queue(net); + + netif_info(dev, ifdown, dev->net, + "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n", + net->stats.rx_packets, net->stats.tx_packets, + net->stats.rx_errors, net->stats.tx_errors); + + lan78xx_terminate_urbs(dev); + + usb_kill_urb(dev->urb_intr); + + skb_queue_purge(&dev->rxq_pause); + + /* deferred work (task, timer, softirq) must also stop. + * can't flush_scheduled_work() until we drop rtnl (later), + * else workers could deadlock; so make workers a NOP. + */ + dev->flags = 0; + cancel_delayed_work_sync(&dev->wq); + tasklet_kill(&dev->bh); + + usb_autopm_put_interface(dev->intf); + + return 0; +} + +static int lan78xx_linearize(struct sk_buff *skb) +{ + return skb_linearize(skb); +} + +static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev, + struct sk_buff *skb, gfp_t flags) +{ + u32 tx_cmd_a, tx_cmd_b; + + if (skb_headroom(skb) < TX_OVERHEAD) { + struct sk_buff *skb2; + + skb2 = skb_copy_expand(skb, TX_OVERHEAD, 0, flags); + dev_kfree_skb_any(skb); + skb = skb2; + if (!skb) + return NULL; + } + + if (lan78xx_linearize(skb) < 0) + return NULL; + + tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_; + + if (skb->ip_summed == CHECKSUM_PARTIAL) + tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_; + + tx_cmd_b = 0; + if (skb_is_gso(skb)) { + u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_); + + tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_; + + tx_cmd_a |= TX_CMD_A_LSO_; + } + + if (skb_vlan_tag_present(skb)) { + tx_cmd_a |= TX_CMD_A_IVTG_; + tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_; + } + + skb_push(skb, 4); + cpu_to_le32s(&tx_cmd_b); + memcpy(skb->data, &tx_cmd_b, 4); + + skb_push(skb, 4); + cpu_to_le32s(&tx_cmd_a); + memcpy(skb->data, &tx_cmd_a, 4); + + return skb; +} + +static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb, + struct sk_buff_head *list, enum skb_state state) +{ + unsigned long flags; + enum skb_state old_state; + struct skb_data *entry = (struct skb_data *)skb->cb; + + spin_lock_irqsave(&list->lock, flags); + old_state = entry->state; + entry->state = state; + if (!list->prev) + BUG_ON(!list->prev); + if (!list->next) + BUG_ON(!list->next); + if (!skb->prev || !skb->next) + BUG_ON(true); + + __skb_unlink(skb, list); + spin_unlock(&list->lock); + spin_lock(&dev->done.lock); + if (!dev->done.prev) + BUG_ON(!dev->done.prev); + if (!dev->done.next) + BUG_ON(!dev->done.next); + + __skb_queue_tail(&dev->done, skb); + if (skb_queue_len(&dev->done) == 1) + tasklet_schedule(&dev->bh); + spin_unlock_irqrestore(&dev->done.lock, flags); + + return old_state; +} + +static void tx_complete(struct urb *urb) +{ + struct sk_buff *skb = (struct sk_buff *)urb->context; + struct skb_data *entry = (struct skb_data *)skb->cb; + struct lan78xx_net *dev = entry->dev; + + if (urb->status == 0) { + dev->net->stats.tx_packets++; + dev->net->stats.tx_bytes += entry->length; + } else { + dev->net->stats.tx_errors++; + + switch (urb->status) { + case -EPIPE: + lan78xx_defer_kevent(dev, EVENT_TX_HALT); + break; + + /* software-driven interface shutdown */ + case -ECONNRESET: + case -ESHUTDOWN: + break; + + case -EPROTO: + case -ETIME: + case -EILSEQ: + netif_stop_queue(dev->net); + break; + default: + netif_dbg(dev, tx_err, dev->net, + "tx err %d\n", entry->urb->status); + break; + } + } + + usb_autopm_put_interface_async(dev->intf); + + if (skb) + defer_bh(dev, skb, &dev->txq, tx_done); +} + +static void lan78xx_queue_skb(struct sk_buff_head *list, + struct sk_buff *newsk, enum skb_state state) +{ + struct skb_data *entry = (struct skb_data *)newsk->cb; + + __skb_queue_tail(list, newsk); + entry->state = state; +} + +netdev_tx_t lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net) +{ + struct lan78xx_net *dev = netdev_priv(net); + + if (skb) + skb_tx_timestamp(skb); + + skb = lan78xx_tx_prep(dev, skb, GFP_ATOMIC); + if (skb) { + skb_queue_tail(&dev->txq_pend, skb); + + if (skb_queue_len(&dev->txq_pend) > 10) + netif_stop_queue(net); + } else { + netif_dbg(dev, tx_err, dev->net, + "lan78xx_tx_prep return NULL\n"); + dev->net->stats.tx_errors++; + dev->net->stats.tx_dropped++; + } + + tasklet_schedule(&dev->bh); + + return NETDEV_TX_OK; +} + +int lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf) +{ + int tmp; + struct usb_host_interface *alt = NULL; + struct usb_host_endpoint *in = NULL, *out = NULL; + struct usb_host_endpoint *status = NULL; + + for (tmp = 0; tmp < intf->num_altsetting; tmp++) { + unsigned ep; + + in = NULL; + out = NULL; + status = NULL; + alt = intf->altsetting + tmp; + + for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) { + struct usb_host_endpoint *e; + int intr = 0; + + e = alt->endpoint + ep; + switch (e->desc.bmAttributes) { + case USB_ENDPOINT_XFER_INT: + if (!usb_endpoint_dir_in(&e->desc)) + continue; + intr = 1; + /* FALLTHROUGH */ + case USB_ENDPOINT_XFER_BULK: + break; + default: + continue; + } + if (usb_endpoint_dir_in(&e->desc)) { + if (!intr && !in) + in = e; + else if (intr && !status) + status = e; + } else { + if (!out) + out = e; + } + } + if (in && out) + break; + } + if (!alt || !in || !out) + return -EINVAL; + + dev->pipe_in = usb_rcvbulkpipe(dev->udev, + in->desc.bEndpointAddress & + USB_ENDPOINT_NUMBER_MASK); + dev->pipe_out = usb_sndbulkpipe(dev->udev, + out->desc.bEndpointAddress & + USB_ENDPOINT_NUMBER_MASK); + dev->ep_intr = status; + + return 0; +} + +static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf) +{ + struct lan78xx_priv *pdata = NULL; + int ret; + int i; + + ret = lan78xx_get_endpoints(dev, intf); + + dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL); + + pdata = (struct lan78xx_priv *)(dev->data[0]); + if (!pdata) { + netdev_warn(dev->net, "Unable to allocate lan78xx_priv"); + return -ENOMEM; + } + + pdata->dev = dev; + + spin_lock_init(&pdata->rfe_ctl_lock); + mutex_init(&pdata->dataport_mutex); + + INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write); + + for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++) + pdata->vlan_table[i] = 0; + + INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write); + + dev->net->features = 0; + + if (DEFAULT_TX_CSUM_ENABLE) + dev->net->features |= NETIF_F_HW_CSUM; + + if (DEFAULT_RX_CSUM_ENABLE) + dev->net->features |= NETIF_F_RXCSUM; + + if (DEFAULT_TSO_CSUM_ENABLE) + dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG; + + dev->net->hw_features = dev->net->features; + + /* Init all registers */ + ret = lan78xx_reset(dev); + + dev->net->flags |= IFF_MULTICAST; + + pdata->wol = WAKE_MAGIC; + + return 0; +} + +static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf) +{ + struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]); + + if (pdata) { + netif_dbg(dev, ifdown, dev->net, "free pdata"); + kfree(pdata); + pdata = NULL; + dev->data[0] = 0; + } +} + +static void lan78xx_rx_csum_offload(struct lan78xx_net *dev, + struct sk_buff *skb, + u32 rx_cmd_a, u32 rx_cmd_b) +{ + if (!(dev->net->features & NETIF_F_RXCSUM) || + unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) { + skb->ip_summed = CHECKSUM_NONE; + } else { + skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_)); + skb->ip_summed = CHECKSUM_COMPLETE; + } +} + +void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb) +{ + int status; + + if (test_bit(EVENT_RX_PAUSED, &dev->flags)) { + skb_queue_tail(&dev->rxq_pause, skb); + return; + } + + skb->protocol = eth_type_trans(skb, dev->net); + dev->net->stats.rx_packets++; + dev->net->stats.rx_bytes += skb->len; + + netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n", + skb->len + sizeof(struct ethhdr), skb->protocol); + memset(skb->cb, 0, sizeof(struct skb_data)); + + if (skb_defer_rx_timestamp(skb)) + return; + + status = netif_rx(skb); + if (status != NET_RX_SUCCESS) + netif_dbg(dev, rx_err, dev->net, + "netif_rx status %d\n", status); +} + +static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb) +{ + if (skb->len < dev->net->hard_header_len) + return 0; + + while (skb->len > 0) { + u32 rx_cmd_a, rx_cmd_b, align_count, size; + u16 rx_cmd_c; + struct sk_buff *skb2; + unsigned char *packet; + + memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a)); + le32_to_cpus(&rx_cmd_a); + skb_pull(skb, sizeof(rx_cmd_a)); + + memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b)); + le32_to_cpus(&rx_cmd_b); + skb_pull(skb, sizeof(rx_cmd_b)); + + memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c)); + le16_to_cpus(&rx_cmd_c); + skb_pull(skb, sizeof(rx_cmd_c)); + + packet = skb->data; + + /* get the packet length */ + size = (rx_cmd_a & RX_CMD_A_LEN_MASK_); + align_count = (4 - ((size + RXW_PADDING) % 4)) % 4; + + if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) { + netif_dbg(dev, rx_err, dev->net, + "Error rx_cmd_a=0x%08x", rx_cmd_a); + } else { + /* last frame in this batch */ + if (skb->len == size) { + lan78xx_rx_csum_offload(dev, skb, + rx_cmd_a, rx_cmd_b); + + skb_trim(skb, skb->len - 4); /* remove fcs */ + skb->truesize = size + sizeof(struct sk_buff); + + return 1; + } + + skb2 = skb_clone(skb, GFP_ATOMIC); + if (unlikely(!skb2)) { + netdev_warn(dev->net, "Error allocating skb"); + return 0; + } + + skb2->len = size; + skb2->data = packet; + skb_set_tail_pointer(skb2, size); + + lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b); + + skb_trim(skb2, skb2->len - 4); /* remove fcs */ + skb2->truesize = size + sizeof(struct sk_buff); + + lan78xx_skb_return(dev, skb2); + } + + skb_pull(skb, size); + + /* padding bytes before the next frame starts */ + if (skb->len) + skb_pull(skb, align_count); + } + + if (unlikely(skb->len < 0)) { + netdev_warn(dev->net, "invalid rx length<0 %d", skb->len); + return 0; + } + + return 1; +} + +static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb) +{ + if (!lan78xx_rx(dev, skb)) { + dev->net->stats.rx_errors++; + goto done; + } + + if (skb->len) { + lan78xx_skb_return(dev, skb); + return; + } + + netif_dbg(dev, rx_err, dev->net, "drop\n"); + dev->net->stats.rx_errors++; +done: + skb_queue_tail(&dev->done, skb); +} + +static void rx_complete(struct urb *urb); + +static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags) +{ + struct sk_buff *skb; + struct skb_data *entry; + unsigned long lockflags; + size_t size = dev->rx_urb_size; + int ret = 0; + + skb = netdev_alloc_skb_ip_align(dev->net, size); + if (!skb) { + usb_free_urb(urb); + return -ENOMEM; + } + + entry = (struct skb_data *)skb->cb; + entry->urb = urb; + entry->dev = dev; + entry->length = 0; + + usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in, + skb->data, size, rx_complete, skb); + + spin_lock_irqsave(&dev->rxq.lock, lockflags); + + if (netif_device_present(dev->net) && + netif_running(dev->net) && + !test_bit(EVENT_RX_HALT, &dev->flags) && + !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) { + ret = usb_submit_urb(urb, GFP_ATOMIC); + switch (ret) { + case 0: + lan78xx_queue_skb(&dev->rxq, skb, rx_start); + break; + case -EPIPE: + lan78xx_defer_kevent(dev, EVENT_RX_HALT); + break; + case -ENODEV: + netif_dbg(dev, ifdown, dev->net, "device gone\n"); + netif_device_detach(dev->net); + break; + case -EHOSTUNREACH: + ret = -ENOLINK; + break; + default: + netif_dbg(dev, rx_err, dev->net, + "rx submit, %d\n", ret); + tasklet_schedule(&dev->bh); + } + } else { + netif_dbg(dev, ifdown, dev->net, "rx: stopped\n"); + ret = -ENOLINK; + } + spin_unlock_irqrestore(&dev->rxq.lock, lockflags); + if (ret) { + dev_kfree_skb_any(skb); + usb_free_urb(urb); + } + return ret; +} + +static void rx_complete(struct urb *urb) +{ + struct sk_buff *skb = (struct sk_buff *)urb->context; + struct skb_data *entry = (struct skb_data *)skb->cb; + struct lan78xx_net *dev = entry->dev; + int urb_status = urb->status; + enum skb_state state; + + skb_put(skb, urb->actual_length); + state = rx_done; + entry->urb = NULL; + + switch (urb_status) { + case 0: + if (skb->len < dev->net->hard_header_len) { + state = rx_cleanup; + dev->net->stats.rx_errors++; + dev->net->stats.rx_length_errors++; + netif_dbg(dev, rx_err, dev->net, + "rx length %d\n", skb->len); + } + usb_mark_last_busy(dev->udev); + break; + case -EPIPE: + dev->net->stats.rx_errors++; + lan78xx_defer_kevent(dev, EVENT_RX_HALT); + /* FALLTHROUGH */ + case -ECONNRESET: /* async unlink */ + case -ESHUTDOWN: /* hardware gone */ + netif_dbg(dev, ifdown, dev->net, + "rx shutdown, code %d\n", urb_status); + state = rx_cleanup; + entry->urb = urb; + urb = NULL; + break; + case -EPROTO: + case -ETIME: + case -EILSEQ: + dev->net->stats.rx_errors++; + state = rx_cleanup; + entry->urb = urb; + urb = NULL; + break; + + /* data overrun ... flush fifo? */ + case -EOVERFLOW: + dev->net->stats.rx_over_errors++; + /* FALLTHROUGH */ + + default: + state = rx_cleanup; + dev->net->stats.rx_errors++; + netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status); + break; + } + + state = defer_bh(dev, skb, &dev->rxq, state); + + if (urb) { + if (netif_running(dev->net) && + !test_bit(EVENT_RX_HALT, &dev->flags) && + state != unlink_start) { + rx_submit(dev, urb, GFP_ATOMIC); + return; + } + usb_free_urb(urb); + } + netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n"); +} + +static void lan78xx_tx_bh(struct lan78xx_net *dev) +{ + int length; + struct urb *urb = NULL; + struct skb_data *entry; + unsigned long flags; + struct sk_buff_head *tqp = &dev->txq_pend; + struct sk_buff *skb, *skb2; + int ret; + int count, pos; + int skb_totallen, pkt_cnt; + + skb_totallen = 0; + pkt_cnt = 0; + for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) { + if (skb_is_gso(skb)) { + if (pkt_cnt) { + /* handle previous packets first */ + break; + } + length = skb->len; + skb2 = skb_dequeue(tqp); + goto gso_skb; + } + + if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE) + break; + skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32)); + pkt_cnt++; + } + + /* copy to a single skb */ + skb = alloc_skb(skb_totallen, GFP_ATOMIC); + if (!skb) + goto drop; + + skb_put(skb, skb_totallen); + + for (count = pos = 0; count < pkt_cnt; count++) { + skb2 = skb_dequeue(tqp); + if (skb2) { + memcpy(skb->data + pos, skb2->data, skb2->len); + pos += roundup(skb2->len, sizeof(u32)); + dev_kfree_skb(skb2); + } else { + BUG_ON(true); + } + } + + length = skb_totallen; + +gso_skb: + urb = usb_alloc_urb(0, GFP_ATOMIC); + if (!urb) { + netif_dbg(dev, tx_err, dev->net, "no urb\n"); + goto drop; + } + + entry = (struct skb_data *)skb->cb; + entry->urb = urb; + entry->dev = dev; + entry->length = length; + + spin_lock_irqsave(&dev->txq.lock, flags); + ret = usb_autopm_get_interface_async(dev->intf); + if (ret < 0) { + spin_unlock_irqrestore(&dev->txq.lock, flags); + goto drop; + } + + usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out, + skb->data, skb->len, tx_complete, skb); + + if (length % dev->maxpacket == 0) { + /* send USB_ZERO_PACKET */ + urb->transfer_flags |= URB_ZERO_PACKET; + } + +#ifdef CONFIG_PM + /* if this triggers the device is still a sleep */ + if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) { + /* transmission will be done in resume */ + usb_anchor_urb(urb, &dev->deferred); + /* no use to process more packets */ + netif_stop_queue(dev->net); + usb_put_urb(urb); + spin_unlock_irqrestore(&dev->txq.lock, flags); + netdev_dbg(dev->net, "Delaying transmission for resumption\n"); + return; + } +#endif + + ret = usb_submit_urb(urb, GFP_ATOMIC); + switch (ret) { + case 0: + dev->net->trans_start = jiffies; + lan78xx_queue_skb(&dev->txq, skb, tx_start); + if (skb_queue_len(&dev->txq) >= dev->tx_qlen) + netif_stop_queue(dev->net); + break; + case -EPIPE: + netif_stop_queue(dev->net); + lan78xx_defer_kevent(dev, EVENT_TX_HALT); + usb_autopm_put_interface_async(dev->intf); + break; + default: + usb_autopm_put_interface_async(dev->intf); + netif_dbg(dev, tx_err, dev->net, + "tx: submit urb err %d\n", ret); + break; + } + + spin_unlock_irqrestore(&dev->txq.lock, flags); + + if (ret) { + netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret); +drop: + dev->net->stats.tx_dropped++; + if (skb) + dev_kfree_skb_any(skb); + usb_free_urb(urb); + } else + netif_dbg(dev, tx_queued, dev->net, + "> tx, len %d, type 0x%x\n", length, skb->protocol); +} + +static void lan78xx_rx_bh(struct lan78xx_net *dev) +{ + struct urb *urb; + int i; + + if (skb_queue_len(&dev->rxq) < dev->rx_qlen) { + for (i = 0; i < 10; i++) { + if (skb_queue_len(&dev->rxq) >= dev->rx_qlen) + break; + urb = usb_alloc_urb(0, GFP_ATOMIC); + if (urb) + if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK) + return; + } + + if (skb_queue_len(&dev->rxq) < dev->rx_qlen) + tasklet_schedule(&dev->bh); + } + if (skb_queue_len(&dev->txq) < dev->tx_qlen) + netif_wake_queue(dev->net); +} + +static void lan78xx_bh(unsigned long param) +{ + struct lan78xx_net *dev = (struct lan78xx_net *)param; + struct sk_buff *skb; + struct skb_data *entry; + + if (!dev->done.prev) + BUG_ON(!dev->done.prev); + if (!dev->done.next) + BUG_ON(!dev->done.next); + + while ((skb = skb_dequeue(&dev->done))) { + entry = (struct skb_data *)(skb->cb); + switch (entry->state) { + case rx_done: + entry->state = rx_cleanup; + rx_process(dev, skb); + continue; + case tx_done: + usb_free_urb(entry->urb); + dev_kfree_skb(skb); + continue; + case rx_cleanup: + usb_free_urb(entry->urb); + dev_kfree_skb(skb); + continue; + default: + netdev_dbg(dev->net, "skb state %d\n", entry->state); + return; + } + if (!dev->done.prev) + BUG_ON(!dev->done.prev); + if (!dev->done.next) + BUG_ON(!dev->done.next); + } + + if (netif_device_present(dev->net) && netif_running(dev->net)) { + if (!skb_queue_empty(&dev->txq_pend)) + lan78xx_tx_bh(dev); + + if (!timer_pending(&dev->delay) && + !test_bit(EVENT_RX_HALT, &dev->flags)) + lan78xx_rx_bh(dev); + } +} + +static void lan78xx_delayedwork(struct work_struct *work) +{ + int status; + struct lan78xx_net *dev; + + dev = container_of(work, struct lan78xx_net, wq.work); + + if (test_bit(EVENT_TX_HALT, &dev->flags)) { + unlink_urbs(dev, &dev->txq); + status = usb_autopm_get_interface(dev->intf); + if (status < 0) + goto fail_pipe; + status = usb_clear_halt(dev->udev, dev->pipe_out); + usb_autopm_put_interface(dev->intf); + if (status < 0 && + status != -EPIPE && + status != -ESHUTDOWN) { + if (netif_msg_tx_err(dev)) +fail_pipe: + netdev_err(dev->net, + "can't clear tx halt, status %d\n", + status); + } else { + clear_bit(EVENT_TX_HALT, &dev->flags); + if (status != -ESHUTDOWN) + netif_wake_queue(dev->net); + } + } + if (test_bit(EVENT_RX_HALT, &dev->flags)) { + unlink_urbs(dev, &dev->rxq); + status = usb_autopm_get_interface(dev->intf); + if (status < 0) + goto fail_halt; + status = usb_clear_halt(dev->udev, dev->pipe_in); + usb_autopm_put_interface(dev->intf); + if (status < 0 && + status != -EPIPE && + status != -ESHUTDOWN) { + if (netif_msg_rx_err(dev)) +fail_halt: + netdev_err(dev->net, + "can't clear rx halt, status %d\n", + status); + } else { + clear_bit(EVENT_RX_HALT, &dev->flags); + tasklet_schedule(&dev->bh); + } + } + + if (test_bit(EVENT_LINK_RESET, &dev->flags)) { + int ret = 0; + + clear_bit(EVENT_LINK_RESET, &dev->flags); + status = usb_autopm_get_interface(dev->intf); + if (status < 0) + goto skip_reset; + if (lan78xx_link_reset(dev) < 0) { + usb_autopm_put_interface(dev->intf); +skip_reset: + netdev_info(dev->net, "link reset failed (%d)\n", + ret); + } else { + usb_autopm_put_interface(dev->intf); + } + } +} + +static void intr_complete(struct urb *urb) +{ + struct lan78xx_net *dev = urb->context; + int status = urb->status; + + switch (status) { + /* success */ + case 0: + lan78xx_status(dev, urb); + break; + + /* software-driven interface shutdown */ + case -ENOENT: /* urb killed */ + case -ESHUTDOWN: /* hardware gone */ + netif_dbg(dev, ifdown, dev->net, + "intr shutdown, code %d\n", status); + return; + + /* NOTE: not throttling like RX/TX, since this endpoint + * already polls infrequently + */ + default: + netdev_dbg(dev->net, "intr status %d\n", status); + break; + } + + if (!netif_running(dev->net)) + return; + + memset(urb->transfer_buffer, 0, urb->transfer_buffer_length); + status = usb_submit_urb(urb, GFP_ATOMIC); + if (status != 0) + netif_err(dev, timer, dev->net, + "intr resubmit --> %d\n", status); +} + +static void lan78xx_disconnect(struct usb_interface *intf) +{ + struct lan78xx_net *dev; + struct usb_device *udev; + struct net_device *net; + + dev = usb_get_intfdata(intf); + usb_set_intfdata(intf, NULL); + if (!dev) + return; + + udev = interface_to_usbdev(intf); + + net = dev->net; + unregister_netdev(net); + + cancel_delayed_work_sync(&dev->wq); + + usb_scuttle_anchored_urbs(&dev->deferred); + + lan78xx_unbind(dev, intf); + + usb_kill_urb(dev->urb_intr); + usb_free_urb(dev->urb_intr); + + free_netdev(net); + usb_put_dev(udev); +} + +void lan78xx_tx_timeout(struct net_device *net) +{ + struct lan78xx_net *dev = netdev_priv(net); + + unlink_urbs(dev, &dev->txq); + tasklet_schedule(&dev->bh); +} + +static const struct net_device_ops lan78xx_netdev_ops = { + .ndo_open = lan78xx_open, + .ndo_stop = lan78xx_stop, + .ndo_start_xmit = lan78xx_start_xmit, + .ndo_tx_timeout = lan78xx_tx_timeout, + .ndo_change_mtu = lan78xx_change_mtu, + .ndo_set_mac_address = lan78xx_set_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = lan78xx_ioctl, + .ndo_set_rx_mode = lan78xx_set_multicast, + .ndo_set_features = lan78xx_set_features, + .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid, +}; + +static int lan78xx_probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + struct lan78xx_net *dev; + struct net_device *netdev; + struct usb_device *udev; + int ret; + unsigned maxp; + unsigned period; + u8 *buf = NULL; + + udev = interface_to_usbdev(intf); + udev = usb_get_dev(udev); + + ret = -ENOMEM; + netdev = alloc_etherdev(sizeof(struct lan78xx_net)); + if (!netdev) { + dev_err(&intf->dev, "Error: OOM\n"); + goto out1; + } + + /* netdev_printk() needs this */ + SET_NETDEV_DEV(netdev, &intf->dev); + + dev = netdev_priv(netdev); + dev->udev = udev; + dev->intf = intf; + dev->net = netdev; + dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV + | NETIF_MSG_PROBE | NETIF_MSG_LINK); + + skb_queue_head_init(&dev->rxq); + skb_queue_head_init(&dev->txq); + skb_queue_head_init(&dev->done); + skb_queue_head_init(&dev->rxq_pause); + skb_queue_head_init(&dev->txq_pend); + mutex_init(&dev->phy_mutex); + + tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev); + INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork); + init_usb_anchor(&dev->deferred); + + netdev->netdev_ops = &lan78xx_netdev_ops; + netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES; + netdev->ethtool_ops = &lan78xx_ethtool_ops; + + ret = lan78xx_bind(dev, intf); + if (ret < 0) + goto out2; + strcpy(netdev->name, "eth%d"); + + if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len)) + netdev->mtu = dev->hard_mtu - netdev->hard_header_len; + + dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0; + dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1; + dev->ep_intr = (intf->cur_altsetting)->endpoint + 2; + + dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE); + dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE); + + dev->pipe_intr = usb_rcvintpipe(dev->udev, + dev->ep_intr->desc.bEndpointAddress & + USB_ENDPOINT_NUMBER_MASK); + period = dev->ep_intr->desc.bInterval; + + maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0); + buf = kmalloc(maxp, GFP_KERNEL); + if (buf) { + dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL); + if (!dev->urb_intr) { + kfree(buf); + goto out3; + } else { + usb_fill_int_urb(dev->urb_intr, dev->udev, + dev->pipe_intr, buf, maxp, + intr_complete, dev, period); + } + } + + dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1); + + /* driver requires remote-wakeup capability during autosuspend. */ + intf->needs_remote_wakeup = 1; + + ret = register_netdev(netdev); + if (ret != 0) { + netif_err(dev, probe, netdev, "couldn't register the device\n"); + goto out2; + } + + usb_set_intfdata(intf, dev); + + ret = device_set_wakeup_enable(&udev->dev, true); + + /* Default delay of 2sec has more overhead than advantage. + * Set to 10sec as default. + */ + pm_runtime_set_autosuspend_delay(&udev->dev, + DEFAULT_AUTOSUSPEND_DELAY); + + return 0; + + usb_set_intfdata(intf, NULL); +out3: + lan78xx_unbind(dev, intf); +out2: + free_netdev(netdev); +out1: + usb_put_dev(udev); + + return ret; +} + +static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len) +{ + const u16 crc16poly = 0x8005; + int i; + u16 bit, crc, msb; + u8 data; + + crc = 0xFFFF; + for (i = 0; i < len; i++) { + data = *buf++; + for (bit = 0; bit < 8; bit++) { + msb = crc >> 15; + crc <<= 1; + + if (msb ^ (u16)(data & 1)) { + crc ^= crc16poly; + crc |= (u16)0x0001U; + } + data >>= 1; + } + } + + return crc; +} + +static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol) +{ + u32 buf; + int ret; + int mask_index; + u16 crc; + u32 temp_wucsr; + u32 temp_pmt_ctl; + const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E }; + const u8 ipv6_multicast[3] = { 0x33, 0x33 }; + const u8 arp_type[2] = { 0x08, 0x06 }; + + ret = lan78xx_read_reg(dev, MAC_TX, &buf); + buf &= ~MAC_TX_TXEN_; + ret = lan78xx_write_reg(dev, MAC_TX, buf); + ret = lan78xx_read_reg(dev, MAC_RX, &buf); + buf &= ~MAC_RX_RXEN_; + ret = lan78xx_write_reg(dev, MAC_RX, buf); + + ret = lan78xx_write_reg(dev, WUCSR, 0); + ret = lan78xx_write_reg(dev, WUCSR2, 0); + ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL); + + temp_wucsr = 0; + + temp_pmt_ctl = 0; + ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl); + temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_; + temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_; + + for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++) + ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0); + + mask_index = 0; + if (wol & WAKE_PHY) { + temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_; + + temp_pmt_ctl |= PMT_CTL_WOL_EN_; + temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_; + temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_; + } + if (wol & WAKE_MAGIC) { + temp_wucsr |= WUCSR_MPEN_; + + temp_pmt_ctl |= PMT_CTL_WOL_EN_; + temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_; + temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_; + } + if (wol & WAKE_BCAST) { + temp_wucsr |= WUCSR_BCST_EN_; + + temp_pmt_ctl |= PMT_CTL_WOL_EN_; + temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_; + temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_; + } + if (wol & WAKE_MCAST) { + temp_wucsr |= WUCSR_WAKE_EN_; + + /* set WUF_CFG & WUF_MASK for IPv4 Multicast */ + crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3); + ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), + WUF_CFGX_EN_ | + WUF_CFGX_TYPE_MCAST_ | + (0 << WUF_CFGX_OFFSET_SHIFT_) | + (crc & WUF_CFGX_CRC16_MASK_)); + + ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7); + ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0); + ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0); + ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0); + mask_index++; + + /* for IPv6 Multicast */ + crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2); + ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), + WUF_CFGX_EN_ | + WUF_CFGX_TYPE_MCAST_ | + (0 << WUF_CFGX_OFFSET_SHIFT_) | + (crc & WUF_CFGX_CRC16_MASK_)); + + ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3); + ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0); + ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0); + ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0); + mask_index++; + + temp_pmt_ctl |= PMT_CTL_WOL_EN_; + temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_; + temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_; + } + if (wol & WAKE_UCAST) { + temp_wucsr |= WUCSR_PFDA_EN_; + + temp_pmt_ctl |= PMT_CTL_WOL_EN_; + temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_; + temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_; + } + if (wol & WAKE_ARP) { + temp_wucsr |= WUCSR_WAKE_EN_; + + /* set WUF_CFG & WUF_MASK + * for packettype (offset 12,13) = ARP (0x0806) + */ + crc = lan78xx_wakeframe_crc16(arp_type, 2); + ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), + WUF_CFGX_EN_ | + WUF_CFGX_TYPE_ALL_ | + (0 << WUF_CFGX_OFFSET_SHIFT_) | + (crc & WUF_CFGX_CRC16_MASK_)); + + ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000); + ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0); + ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0); + ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0); + mask_index++; + + temp_pmt_ctl |= PMT_CTL_WOL_EN_; + temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_; + temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_; + } + + ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr); + + /* when multiple WOL bits are set */ + if (hweight_long((unsigned long)wol) > 1) { + temp_pmt_ctl |= PMT_CTL_WOL_EN_; + temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_; + temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_; + } + ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl); + + /* clear WUPS */ + ret = lan78xx_read_reg(dev, PMT_CTL, &buf); + buf |= PMT_CTL_WUPS_MASK_; + ret = lan78xx_write_reg(dev, PMT_CTL, buf); + + ret = lan78xx_read_reg(dev, MAC_RX, &buf); + buf |= MAC_RX_RXEN_; + ret = lan78xx_write_reg(dev, MAC_RX, buf); + + return 0; +} + +int lan78xx_suspend(struct usb_interface *intf, pm_message_t message) +{ + struct lan78xx_net *dev = usb_get_intfdata(intf); + struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]); + u32 buf; + int ret; + int event; + + ret = 0; + event = message.event; + + if (!dev->suspend_count++) { + spin_lock_irq(&dev->txq.lock); + /* don't autosuspend while transmitting */ + if ((skb_queue_len(&dev->txq) || + skb_queue_len(&dev->txq_pend)) && + PMSG_IS_AUTO(message)) { + spin_unlock_irq(&dev->txq.lock); + ret = -EBUSY; + goto out; + } else { + set_bit(EVENT_DEV_ASLEEP, &dev->flags); + spin_unlock_irq(&dev->txq.lock); + } + + /* stop TX & RX */ + ret = lan78xx_read_reg(dev, MAC_TX, &buf); + buf &= ~MAC_TX_TXEN_; + ret = lan78xx_write_reg(dev, MAC_TX, buf); + ret = lan78xx_read_reg(dev, MAC_RX, &buf); + buf &= ~MAC_RX_RXEN_; + ret = lan78xx_write_reg(dev, MAC_RX, buf); + + /* empty out the rx and queues */ + netif_device_detach(dev->net); + lan78xx_terminate_urbs(dev); + usb_kill_urb(dev->urb_intr); + + /* reattach */ + netif_device_attach(dev->net); + } + + if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) { + if (PMSG_IS_AUTO(message)) { + /* auto suspend (selective suspend) */ + ret = lan78xx_read_reg(dev, MAC_TX, &buf); + buf &= ~MAC_TX_TXEN_; + ret = lan78xx_write_reg(dev, MAC_TX, buf); + ret = lan78xx_read_reg(dev, MAC_RX, &buf); + buf &= ~MAC_RX_RXEN_; + ret = lan78xx_write_reg(dev, MAC_RX, buf); + + ret = lan78xx_write_reg(dev, WUCSR, 0); + ret = lan78xx_write_reg(dev, WUCSR2, 0); + ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL); + + /* set goodframe wakeup */ + ret = lan78xx_read_reg(dev, WUCSR, &buf); + + buf |= WUCSR_RFE_WAKE_EN_; + buf |= WUCSR_STORE_WAKE_; + + ret = lan78xx_write_reg(dev, WUCSR, buf); + + ret = lan78xx_read_reg(dev, PMT_CTL, &buf); + + buf &= ~PMT_CTL_RES_CLR_WKP_EN_; + buf |= PMT_CTL_RES_CLR_WKP_STS_; + + buf |= PMT_CTL_PHY_WAKE_EN_; + buf |= PMT_CTL_WOL_EN_; + buf &= ~PMT_CTL_SUS_MODE_MASK_; + buf |= PMT_CTL_SUS_MODE_3_; + + ret = lan78xx_write_reg(dev, PMT_CTL, buf); + + ret = lan78xx_read_reg(dev, PMT_CTL, &buf); + + buf |= PMT_CTL_WUPS_MASK_; + + ret = lan78xx_write_reg(dev, PMT_CTL, buf); + + ret = lan78xx_read_reg(dev, MAC_RX, &buf); + buf |= MAC_RX_RXEN_; + ret = lan78xx_write_reg(dev, MAC_RX, buf); + } else { + lan78xx_set_suspend(dev, pdata->wol); + } + } + +out: + return ret; +} + +int lan78xx_resume(struct usb_interface *intf) +{ + struct lan78xx_net *dev = usb_get_intfdata(intf); + struct sk_buff *skb; + struct urb *res; + int ret; + u32 buf; + + if (!--dev->suspend_count) { + /* resume interrupt URBs */ + if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags)) + usb_submit_urb(dev->urb_intr, GFP_NOIO); + + spin_lock_irq(&dev->txq.lock); + while ((res = usb_get_from_anchor(&dev->deferred))) { + skb = (struct sk_buff *)res->context; + ret = usb_submit_urb(res, GFP_ATOMIC); + if (ret < 0) { + dev_kfree_skb_any(skb); + usb_free_urb(res); + usb_autopm_put_interface_async(dev->intf); + } else { + dev->net->trans_start = jiffies; + lan78xx_queue_skb(&dev->txq, skb, tx_start); + } + } + + clear_bit(EVENT_DEV_ASLEEP, &dev->flags); + spin_unlock_irq(&dev->txq.lock); + + if (test_bit(EVENT_DEV_OPEN, &dev->flags)) { + if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen)) + netif_start_queue(dev->net); + tasklet_schedule(&dev->bh); + } + } + + ret = lan78xx_write_reg(dev, WUCSR2, 0); + ret = lan78xx_write_reg(dev, WUCSR, 0); + ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL); + + ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ | + WUCSR2_ARP_RCD_ | + WUCSR2_IPV6_TCPSYN_RCD_ | + WUCSR2_IPV4_TCPSYN_RCD_); + + ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ | + WUCSR_EEE_RX_WAKE_ | + WUCSR_PFDA_FR_ | + WUCSR_RFE_WAKE_FR_ | + WUCSR_WUFR_ | + WUCSR_MPR_ | + WUCSR_BCST_FR_); + + ret = lan78xx_read_reg(dev, MAC_TX, &buf); + buf |= MAC_TX_TXEN_; + ret = lan78xx_write_reg(dev, MAC_TX, buf); + + return 0; +} + +int lan78xx_reset_resume(struct usb_interface *intf) +{ + struct lan78xx_net *dev = usb_get_intfdata(intf); + + lan78xx_reset(dev); + return lan78xx_resume(intf); +} + +static const struct usb_device_id products[] = { + { + /* LAN7800 USB Gigabit Ethernet Device */ + USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID), + }, + { + /* LAN7850 USB Gigabit Ethernet Device */ + USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID), + }, + {}, +}; +MODULE_DEVICE_TABLE(usb, products); + +static struct usb_driver lan78xx_driver = { + .name = DRIVER_NAME, + .id_table = products, + .probe = lan78xx_probe, + .disconnect = lan78xx_disconnect, + .suspend = lan78xx_suspend, + .resume = lan78xx_resume, + .reset_resume = lan78xx_reset_resume, + .supports_autosuspend = 1, + .disable_hub_initiated_lpm = 1, +}; + +module_usb_driver(lan78xx_driver); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/usb/lan78xx.h b/drivers/net/usb/lan78xx.h new file mode 100644 index 000000000000..ae7562ee72ad --- /dev/null +++ b/drivers/net/usb/lan78xx.h @@ -0,0 +1,1069 @@ +/* + * Copyright (C) 2015 Microchip Technology + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see <http://www.gnu.org/licenses/>. + */ +#ifndef _LAN78XX_H +#define _LAN78XX_H + +/* USB Vendor Requests */ +#define USB_VENDOR_REQUEST_WRITE_REGISTER 0xA0 +#define USB_VENDOR_REQUEST_READ_REGISTER 0xA1 +#define USB_VENDOR_REQUEST_GET_STATS 0xA2 + +/* Interrupt Endpoint status word bitfields */ +#define INT_ENP_EEE_START_TX_LPI_INT BIT(26) +#define INT_ENP_EEE_STOP_TX_LPI_INT BIT(25) +#define INT_ENP_EEE_RX_LPI_INT BIT(24) +#define INT_ENP_RDFO_INT BIT(22) +#define INT_ENP_TXE_INT BIT(21) +#define INT_ENP_TX_DIS_INT BIT(19) +#define INT_ENP_RX_DIS_INT BIT(18) +#define INT_ENP_PHY_INT BIT(17) +#define INT_ENP_DP_INT BIT(16) +#define INT_ENP_MAC_ERR_INT BIT(15) +#define INT_ENP_TDFU_INT BIT(14) +#define INT_ENP_TDFO_INT BIT(13) +#define INT_ENP_UTX_FP_INT BIT(12) + +#define TX_PKT_ALIGNMENT 4 +#define RX_PKT_ALIGNMENT 4 + +/* Tx Command A */ +#define TX_CMD_A_IGE_ (0x20000000) +#define TX_CMD_A_ICE_ (0x10000000) +#define TX_CMD_A_LSO_ (0x08000000) +#define TX_CMD_A_IPE_ (0x04000000) +#define TX_CMD_A_TPE_ (0x02000000) +#define TX_CMD_A_IVTG_ (0x01000000) +#define TX_CMD_A_RVTG_ (0x00800000) +#define TX_CMD_A_FCS_ (0x00400000) +#define TX_CMD_A_LEN_MASK_ (0x000FFFFF) + +/* Tx Command B */ +#define TX_CMD_B_MSS_SHIFT_ (16) +#define TX_CMD_B_MSS_MASK_ (0x3FFF0000) +#define TX_CMD_B_MSS_MIN_ ((unsigned short)8) +#define TX_CMD_B_VTAG_MASK_ (0x0000FFFF) +#define TX_CMD_B_VTAG_PRI_MASK_ (0x0000E000) +#define TX_CMD_B_VTAG_CFI_MASK_ (0x00001000) +#define TX_CMD_B_VTAG_VID_MASK_ (0x00000FFF) + +/* Rx Command A */ +#define RX_CMD_A_ICE_ (0x80000000) +#define RX_CMD_A_TCE_ (0x40000000) +#define RX_CMD_A_CSE_MASK_ (0xC0000000) +#define RX_CMD_A_IPV_ (0x20000000) +#define RX_CMD_A_PID_MASK_ (0x18000000) +#define RX_CMD_A_PID_NONE_IP_ (0x00000000) +#define RX_CMD_A_PID_TCP_IP_ (0x08000000) +#define RX_CMD_A_PID_UDP_IP_ (0x10000000) +#define RX_CMD_A_PID_IP_ (0x18000000) +#define RX_CMD_A_PFF_ (0x04000000) +#define RX_CMD_A_BAM_ (0x02000000) +#define RX_CMD_A_MAM_ (0x01000000) +#define RX_CMD_A_FVTG_ (0x00800000) +#define RX_CMD_A_RED_ (0x00400000) +#define RX_CMD_A_RX_ERRS_MASK_ (0xC03F0000) +#define RX_CMD_A_RWT_ (0x00200000) +#define RX_CMD_A_RUNT_ (0x00100000) +#define RX_CMD_A_LONG_ (0x00080000) +#define RX_CMD_A_RXE_ (0x00040000) +#define RX_CMD_A_DRB_ (0x00020000) +#define RX_CMD_A_FCS_ (0x00010000) +#define RX_CMD_A_UAM_ (0x00008000) +#define RX_CMD_A_ICSM_ (0x00004000) +#define RX_CMD_A_LEN_MASK_ (0x00003FFF) + +/* Rx Command B */ +#define RX_CMD_B_CSUM_SHIFT_ (16) +#define RX_CMD_B_CSUM_MASK_ (0xFFFF0000) +#define RX_CMD_B_VTAG_MASK_ (0x0000FFFF) +#define RX_CMD_B_VTAG_PRI_MASK_ (0x0000E000) +#define RX_CMD_B_VTAG_CFI_MASK_ (0x00001000) +#define RX_CMD_B_VTAG_VID_MASK_ (0x00000FFF) + +/* Rx Command C */ +#define RX_CMD_C_WAKE_SHIFT_ (15) +#define RX_CMD_C_WAKE_ (0x8000) +#define RX_CMD_C_REF_FAIL_SHIFT_ (14) +#define RX_CMD_C_REF_FAIL_ (0x4000) + +/* SCSRs */ +#define NUMBER_OF_REGS (193) + +#define ID_REV (0x00) +#define ID_REV_CHIP_ID_MASK_ (0xFFFF0000) +#define ID_REV_CHIP_REV_MASK_ (0x0000FFFF) +#define ID_REV_CHIP_ID_7800_ (0x7800) + +#define FPGA_REV (0x04) +#define FPGA_REV_MINOR_MASK_ (0x0000FF00) +#define FPGA_REV_MAJOR_MASK_ (0x000000FF) + +#define INT_STS (0x0C) +#define INT_STS_CLEAR_ALL_ (0xFFFFFFFF) +#define INT_STS_EEE_TX_LPI_STRT_ (0x04000000) +#define INT_STS_EEE_TX_LPI_STOP_ (0x02000000) +#define INT_STS_EEE_RX_LPI_ (0x01000000) +#define INT_STS_RDFO_ (0x00400000) +#define INT_STS_TXE_ (0x00200000) +#define INT_STS_TX_DIS_ (0x00080000) +#define INT_STS_RX_DIS_ (0x00040000) +#define INT_STS_PHY_INT_ (0x00020000) +#define INT_STS_DP_INT_ (0x00010000) +#define INT_STS_MAC_ERR_ (0x00008000) +#define INT_STS_TDFU_ (0x00004000) +#define INT_STS_TDFO_ (0x00002000) +#define INT_STS_UFX_FP_ (0x00001000) +#define INT_STS_GPIO_MASK_ (0x00000FFF) +#define INT_STS_GPIO11_ (0x00000800) +#define INT_STS_GPIO10_ (0x00000400) +#define INT_STS_GPIO9_ (0x00000200) +#define INT_STS_GPIO8_ (0x00000100) +#define INT_STS_GPIO7_ (0x00000080) +#define INT_STS_GPIO6_ (0x00000040) +#define INT_STS_GPIO5_ (0x00000020) +#define INT_STS_GPIO4_ (0x00000010) +#define INT_STS_GPIO3_ (0x00000008) +#define INT_STS_GPIO2_ (0x00000004) +#define INT_STS_GPIO1_ (0x00000002) +#define INT_STS_GPIO0_ (0x00000001) + +#define HW_CFG (0x010) +#define HW_CFG_CLK125_EN_ (0x02000000) +#define HW_CFG_REFCLK25_EN_ (0x01000000) +#define HW_CFG_LED3_EN_ (0x00800000) +#define HW_CFG_LED2_EN_ (0x00400000) +#define HW_CFG_LED1_EN_ (0x00200000) +#define HW_CFG_LED0_EN_ (0x00100000) +#define HW_CFG_EEE_PHY_LUSU_ (0x00020000) +#define HW_CFG_EEE_TSU_ (0x00010000) +#define HW_CFG_NETDET_STS_ (0x00008000) +#define HW_CFG_NETDET_EN_ (0x00004000) +#define HW_CFG_EEM_ (0x00002000) +#define HW_CFG_RST_PROTECT_ (0x00001000) +#define HW_CFG_CONNECT_BUF_ (0x00000400) +#define HW_CFG_CONNECT_EN_ (0x00000200) +#define HW_CFG_CONNECT_POL_ (0x00000100) +#define HW_CFG_SUSPEND_N_SEL_MASK_ (0x000000C0) +#define HW_CFG_SUSPEND_N_SEL_2 (0x00000000) +#define HW_CFG_SUSPEND_N_SEL_12N (0x00000040) +#define HW_CFG_SUSPEND_N_SEL_012N (0x00000080) +#define HW_CFG_SUSPEND_N_SEL_0123N (0x000000C0) +#define HW_CFG_SUSPEND_N_POL_ (0x00000020) +#define HW_CFG_MEF_ (0x00000010) +#define HW_CFG_ETC_ (0x00000008) +#define HW_CFG_LRST_ (0x00000002) +#define HW_CFG_SRST_ (0x00000001) + +#define PMT_CTL (0x014) +#define PMT_CTL_EEE_WAKEUP_EN_ (0x00002000) +#define PMT_CTL_EEE_WUPS_ (0x00001000) +#define PMT_CTL_MAC_SRST_ (0x00000800) +#define PMT_CTL_PHY_PWRUP_ (0x00000400) +#define PMT_CTL_RES_CLR_WKP_MASK_ (0x00000300) +#define PMT_CTL_RES_CLR_WKP_STS_ (0x00000200) +#define PMT_CTL_RES_CLR_WKP_EN_ (0x00000100) +#define PMT_CTL_READY_ (0x00000080) +#define PMT_CTL_SUS_MODE_MASK_ (0x00000060) +#define PMT_CTL_SUS_MODE_0_ (0x00000000) +#define PMT_CTL_SUS_MODE_1_ (0x00000020) +#define PMT_CTL_SUS_MODE_2_ (0x00000040) +#define PMT_CTL_SUS_MODE_3_ (0x00000060) +#define PMT_CTL_PHY_RST_ (0x00000010) +#define PMT_CTL_WOL_EN_ (0x00000008) +#define PMT_CTL_PHY_WAKE_EN_ (0x00000004) +#define PMT_CTL_WUPS_MASK_ (0x00000003) +#define PMT_CTL_WUPS_MLT_ (0x00000003) +#define PMT_CTL_WUPS_MAC_ (0x00000002) +#define PMT_CTL_WUPS_PHY_ (0x00000001) + +#define GPIO_CFG0 (0x018) +#define GPIO_CFG0_GPIOEN_MASK_ (0x0000F000) +#define GPIO_CFG0_GPIOEN3_ (0x00008000) +#define GPIO_CFG0_GPIOEN2_ (0x00004000) +#define GPIO_CFG0_GPIOEN1_ (0x00002000) +#define GPIO_CFG0_GPIOEN0_ (0x00001000) +#define GPIO_CFG0_GPIOBUF_MASK_ (0x00000F00) +#define GPIO_CFG0_GPIOBUF3_ (0x00000800) +#define GPIO_CFG0_GPIOBUF2_ (0x00000400) +#define GPIO_CFG0_GPIOBUF1_ (0x00000200) +#define GPIO_CFG0_GPIOBUF0_ (0x00000100) +#define GPIO_CFG0_GPIODIR_MASK_ (0x000000F0) +#define GPIO_CFG0_GPIODIR3_ (0x00000080) +#define GPIO_CFG0_GPIODIR2_ (0x00000040) +#define GPIO_CFG0_GPIODIR1_ (0x00000020) +#define GPIO_CFG0_GPIODIR0_ (0x00000010) +#define GPIO_CFG0_GPIOD_MASK_ (0x0000000F) +#define GPIO_CFG0_GPIOD3_ (0x00000008) +#define GPIO_CFG0_GPIOD2_ (0x00000004) +#define GPIO_CFG0_GPIOD1_ (0x00000002) +#define GPIO_CFG0_GPIOD0_ (0x00000001) + +#define GPIO_CFG1 (0x01C) +#define GPIO_CFG1_GPIOEN_MASK_ (0xFF000000) +#define GPIO_CFG1_GPIOEN11_ (0x80000000) +#define GPIO_CFG1_GPIOEN10_ (0x40000000) +#define GPIO_CFG1_GPIOEN9_ (0x20000000) +#define GPIO_CFG1_GPIOEN8_ (0x10000000) +#define GPIO_CFG1_GPIOEN7_ (0x08000000) +#define GPIO_CFG1_GPIOEN6_ (0x04000000) +#define GPIO_CFG1_GPIOEN5_ (0x02000000) +#define GPIO_CFG1_GPIOEN4_ (0x01000000) +#define GPIO_CFG1_GPIOBUF_MASK_ (0x00FF0000) +#define GPIO_CFG1_GPIOBUF11_ (0x00800000) +#define GPIO_CFG1_GPIOBUF10_ (0x00400000) +#define GPIO_CFG1_GPIOBUF9_ (0x00200000) +#define GPIO_CFG1_GPIOBUF8_ (0x00100000) +#define GPIO_CFG1_GPIOBUF7_ (0x00080000) +#define GPIO_CFG1_GPIOBUF6_ (0x00040000) +#define GPIO_CFG1_GPIOBUF5_ (0x00020000) +#define GPIO_CFG1_GPIOBUF4_ (0x00010000) +#define GPIO_CFG1_GPIODIR_MASK_ (0x0000FF00) +#define GPIO_CFG1_GPIODIR11_ (0x00008000) +#define GPIO_CFG1_GPIODIR10_ (0x00004000) +#define GPIO_CFG1_GPIODIR9_ (0x00002000) +#define GPIO_CFG1_GPIODIR8_ (0x00001000) +#define GPIO_CFG1_GPIODIR7_ (0x00000800) +#define GPIO_CFG1_GPIODIR6_ (0x00000400) +#define GPIO_CFG1_GPIODIR5_ (0x00000200) +#define GPIO_CFG1_GPIODIR4_ (0x00000100) +#define GPIO_CFG1_GPIOD_MASK_ (0x000000FF) +#define GPIO_CFG1_GPIOD11_ (0x00000080) +#define GPIO_CFG1_GPIOD10_ (0x00000040) +#define GPIO_CFG1_GPIOD9_ (0x00000020) +#define GPIO_CFG1_GPIOD8_ (0x00000010) +#define GPIO_CFG1_GPIOD7_ (0x00000008) +#define GPIO_CFG1_GPIOD6_ (0x00000004) +#define GPIO_CFG1_GPIOD6_ (0x00000004) +#define GPIO_CFG1_GPIOD5_ (0x00000002) +#define GPIO_CFG1_GPIOD4_ (0x00000001) + +#define GPIO_WAKE (0x020) +#define GPIO_WAKE_GPIOPOL_MASK_ (0x0FFF0000) +#define GPIO_WAKE_GPIOPOL11_ (0x08000000) +#define GPIO_WAKE_GPIOPOL10_ (0x04000000) +#define GPIO_WAKE_GPIOPOL9_ (0x02000000) +#define GPIO_WAKE_GPIOPOL8_ (0x01000000) +#define GPIO_WAKE_GPIOPOL7_ (0x00800000) +#define GPIO_WAKE_GPIOPOL6_ (0x00400000) +#define GPIO_WAKE_GPIOPOL5_ (0x00200000) +#define GPIO_WAKE_GPIOPOL4_ (0x00100000) +#define GPIO_WAKE_GPIOPOL3_ (0x00080000) +#define GPIO_WAKE_GPIOPOL2_ (0x00040000) +#define GPIO_WAKE_GPIOPOL1_ (0x00020000) +#define GPIO_WAKE_GPIOPOL0_ (0x00010000) +#define GPIO_WAKE_GPIOWK_MASK_ (0x00000FFF) +#define GPIO_WAKE_GPIOWK11_ (0x00000800) +#define GPIO_WAKE_GPIOWK10_ (0x00000400) +#define GPIO_WAKE_GPIOWK9_ (0x00000200) +#define GPIO_WAKE_GPIOWK8_ (0x00000100) +#define GPIO_WAKE_GPIOWK7_ (0x00000080) +#define GPIO_WAKE_GPIOWK6_ (0x00000040) +#define GPIO_WAKE_GPIOWK5_ (0x00000020) +#define GPIO_WAKE_GPIOWK4_ (0x00000010) +#define GPIO_WAKE_GPIOWK3_ (0x00000008) +#define GPIO_WAKE_GPIOWK2_ (0x00000004) +#define GPIO_WAKE_GPIOWK1_ (0x00000002) +#define GPIO_WAKE_GPIOWK0_ (0x00000001) + +#define DP_SEL (0x024) +#define DP_SEL_DPRDY_ (0x80000000) +#define DP_SEL_RSEL_MASK_ (0x0000000F) +#define DP_SEL_RSEL_USB_PHY_CSRS_ (0x0000000F) +#define DP_SEL_RSEL_OTP_64BIT_ (0x00000009) +#define DP_SEL_RSEL_OTP_8BIT_ (0x00000008) +#define DP_SEL_RSEL_UTX_BUF_RAM_ (0x00000007) +#define DP_SEL_RSEL_DESC_RAM_ (0x00000005) +#define DP_SEL_RSEL_TXFIFO_ (0x00000004) +#define DP_SEL_RSEL_RXFIFO_ (0x00000003) +#define DP_SEL_RSEL_LSO_ (0x00000002) +#define DP_SEL_RSEL_VLAN_DA_ (0x00000001) +#define DP_SEL_RSEL_URXBUF_ (0x00000000) +#define DP_SEL_VHF_HASH_LEN (16) +#define DP_SEL_VHF_VLAN_LEN (128) + +#define DP_CMD (0x028) +#define DP_CMD_WRITE_ (0x00000001) +#define DP_CMD_READ_ (0x00000000) + +#define DP_ADDR (0x02C) +#define DP_ADDR_MASK_ (0x00003FFF) + +#define DP_DATA (0x030) + +#define E2P_CMD (0x040) +#define E2P_CMD_EPC_BUSY_ (0x80000000) +#define E2P_CMD_EPC_CMD_MASK_ (0x70000000) +#define E2P_CMD_EPC_CMD_RELOAD_ (0x70000000) +#define E2P_CMD_EPC_CMD_ERAL_ (0x60000000) +#define E2P_CMD_EPC_CMD_ERASE_ (0x50000000) +#define E2P_CMD_EPC_CMD_WRAL_ (0x40000000) +#define E2P_CMD_EPC_CMD_WRITE_ (0x30000000) +#define E2P_CMD_EPC_CMD_EWEN_ (0x20000000) +#define E2P_CMD_EPC_CMD_EWDS_ (0x10000000) +#define E2P_CMD_EPC_CMD_READ_ (0x00000000) +#define E2P_CMD_EPC_TIMEOUT_ (0x00000400) +#define E2P_CMD_EPC_DL_ (0x00000200) +#define E2P_CMD_EPC_ADDR_MASK_ (0x000001FF) + +#define E2P_DATA (0x044) +#define E2P_DATA_EEPROM_DATA_MASK_ (0x000000FF) + +#define BOS_ATTR (0x050) +#define BOS_ATTR_BLOCK_SIZE_MASK_ (0x000000FF) + +#define SS_ATTR (0x054) +#define SS_ATTR_POLL_INT_MASK_ (0x00FF0000) +#define SS_ATTR_DEV_DESC_SIZE_MASK_ (0x0000FF00) +#define SS_ATTR_CFG_BLK_SIZE_MASK_ (0x000000FF) + +#define HS_ATTR (0x058) +#define HS_ATTR_POLL_INT_MASK_ (0x00FF0000) +#define HS_ATTR_DEV_DESC_SIZE_MASK_ (0x0000FF00) +#define HS_ATTR_CFG_BLK_SIZE_MASK_ (0x000000FF) + +#define FS_ATTR (0x05C) +#define FS_ATTR_POLL_INT_MASK_ (0x00FF0000) +#define FS_ATTR_DEV_DESC_SIZE_MASK_ (0x0000FF00) +#define FS_ATTR_CFG_BLK_SIZE_MASK_ (0x000000FF) + +#define STR_ATTR0 (0x060) +#define STR_ATTR0_CFGSTR_DESC_SIZE_MASK_ (0xFF000000) +#define STR_ATTR0_SERSTR_DESC_SIZE_MASK_ (0x00FF0000) +#define STR_ATTR0_PRODSTR_DESC_SIZE_MASK_ (0x0000FF00) +#define STR_ATTR0_MANUF_DESC_SIZE_MASK_ (0x000000FF) + +#define STR_ATTR1 (0x064) +#define STR_ATTR1_INTSTR_DESC_SIZE_MASK_ (0x000000FF) + +#define STR_FLAG_ATTR (0x068) +#define STR_FLAG_ATTR_PME_FLAGS_MASK_ (0x000000FF) + +#define USB_CFG0 (0x080) +#define USB_CFG_LPM_RESPONSE_ (0x80000000) +#define USB_CFG_LPM_CAPABILITY_ (0x40000000) +#define USB_CFG_LPM_ENBL_SLPM_ (0x20000000) +#define USB_CFG_HIRD_THR_MASK_ (0x1F000000) +#define USB_CFG_HIRD_THR_960_ (0x1C000000) +#define USB_CFG_HIRD_THR_885_ (0x1B000000) +#define USB_CFG_HIRD_THR_810_ (0x1A000000) +#define USB_CFG_HIRD_THR_735_ (0x19000000) +#define USB_CFG_HIRD_THR_660_ (0x18000000) +#define USB_CFG_HIRD_THR_585_ (0x17000000) +#define USB_CFG_HIRD_THR_510_ (0x16000000) +#define USB_CFG_HIRD_THR_435_ (0x15000000) +#define USB_CFG_HIRD_THR_360_ (0x14000000) +#define USB_CFG_HIRD_THR_285_ (0x13000000) +#define USB_CFG_HIRD_THR_210_ (0x12000000) +#define USB_CFG_HIRD_THR_135_ (0x11000000) +#define USB_CFG_HIRD_THR_60_ (0x10000000) +#define USB_CFG_MAX_BURST_BI_MASK_ (0x00F00000) +#define USB_CFG_MAX_BURST_BO_MASK_ (0x000F0000) +#define USB_CFG_MAX_DEV_SPEED_MASK_ (0x0000E000) +#define USB_CFG_MAX_DEV_SPEED_SS_ (0x00008000) +#define USB_CFG_MAX_DEV_SPEED_HS_ (0x00000000) +#define USB_CFG_MAX_DEV_SPEED_FS_ (0x00002000) +#define USB_CFG_PHY_BOOST_MASK_ (0x00000180) +#define USB_CFG_PHY_BOOST_PLUS_12_ (0x00000180) +#define USB_CFG_PHY_BOOST_PLUS_8_ (0x00000100) +#define USB_CFG_PHY_BOOST_PLUS_4_ (0x00000080) +#define USB_CFG_PHY_BOOST_NORMAL_ (0x00000000) +#define USB_CFG_BIR_ (0x00000040) +#define USB_CFG_BCE_ (0x00000020) +#define USB_CFG_PORT_SWAP_ (0x00000010) +#define USB_CFG_LPM_EN_ (0x00000008) +#define USB_CFG_RMT_WKP_ (0x00000004) +#define USB_CFG_PWR_SEL_ (0x00000002) +#define USB_CFG_STALL_BO_DIS_ (0x00000001) + +#define USB_CFG1 (0x084) +#define USB_CFG1_U1_TIMEOUT_MASK_ (0xFF000000) +#define USB_CFG1_U2_TIMEOUT_MASK_ (0x00FF0000) +#define USB_CFG1_HS_TOUT_CAL_MASK_ (0x0000E000) +#define USB_CFG1_DEV_U2_INIT_EN_ (0x00001000) +#define USB_CFG1_DEV_U2_EN_ (0x00000800) +#define USB_CFG1_DEV_U1_INIT_EN_ (0x00000400) +#define USB_CFG1_DEV_U1_EN_ (0x00000200) +#define USB_CFG1_LTM_ENABLE_ (0x00000100) +#define USB_CFG1_FS_TOUT_CAL_MASK_ (0x00000070) +#define USB_CFG1_SCALE_DOWN_MASK_ (0x00000003) +#define USB_CFG1_SCALE_DOWN_MODE3_ (0x00000003) +#define USB_CFG1_SCALE_DOWN_MODE2_ (0x00000002) +#define USB_CFG1_SCALE_DOWN_MODE1_ (0x00000001) +#define USB_CFG1_SCALE_DOWN_MODE0_ (0x00000000) + +#define USB_CFG2 (0x088) +#define USB_CFG2_SS_DETACH_TIME_MASK_ (0xFFFF0000) +#define USB_CFG2_HS_DETACH_TIME_MASK_ (0x0000FFFF) + +#define BURST_CAP (0x090) +#define BURST_CAP_SIZE_MASK_ (0x000000FF) + +#define BULK_IN_DLY (0x094) +#define BULK_IN_DLY_MASK_ (0x0000FFFF) + +#define INT_EP_CTL (0x098) +#define INT_EP_INTEP_ON_ (0x80000000) +#define INT_STS_EEE_TX_LPI_STRT_EN_ (0x04000000) +#define INT_STS_EEE_TX_LPI_STOP_EN_ (0x02000000) +#define INT_STS_EEE_RX_LPI_EN_ (0x01000000) +#define INT_EP_RDFO_EN_ (0x00400000) +#define INT_EP_TXE_EN_ (0x00200000) +#define INT_EP_TX_DIS_EN_ (0x00080000) +#define INT_EP_RX_DIS_EN_ (0x00040000) +#define INT_EP_PHY_INT_EN_ (0x00020000) +#define INT_EP_DP_INT_EN_ (0x00010000) +#define INT_EP_MAC_ERR_EN_ (0x00008000) +#define INT_EP_TDFU_EN_ (0x00004000) +#define INT_EP_TDFO_EN_ (0x00002000) +#define INT_EP_UTX_FP_EN_ (0x00001000) +#define INT_EP_GPIO_EN_MASK_ (0x00000FFF) + +#define PIPE_CTL (0x09C) +#define PIPE_CTL_TXSWING_ (0x00000040) +#define PIPE_CTL_TXMARGIN_MASK_ (0x00000038) +#define PIPE_CTL_TXDEEMPHASIS_MASK_ (0x00000006) +#define PIPE_CTL_ELASTICITYBUFFERMODE_ (0x00000001) + +#define U1_LATENCY (0xA0) +#define U2_LATENCY (0xA4) + +#define USB_STATUS (0x0A8) +#define USB_STATUS_REMOTE_WK_ (0x00100000) +#define USB_STATUS_FUNC_REMOTE_WK_ (0x00080000) +#define USB_STATUS_LTM_ENABLE_ (0x00040000) +#define USB_STATUS_U2_ENABLE_ (0x00020000) +#define USB_STATUS_U1_ENABLE_ (0x00010000) +#define USB_STATUS_SET_SEL_ (0x00000020) +#define USB_STATUS_REMOTE_WK_STS_ (0x00000010) +#define USB_STATUS_FUNC_REMOTE_WK_STS_ (0x00000008) +#define USB_STATUS_LTM_ENABLE_STS_ (0x00000004) +#define USB_STATUS_U2_ENABLE_STS_ (0x00000002) +#define USB_STATUS_U1_ENABLE_STS_ (0x00000001) + +#define USB_CFG3 (0x0AC) +#define USB_CFG3_EN_U2_LTM_ (0x40000000) +#define USB_CFG3_BULK_OUT_NUMP_OVR_ (0x20000000) +#define USB_CFG3_DIS_FAST_U1_EXIT_ (0x10000000) +#define USB_CFG3_LPM_NYET_THR_ (0x0F000000) +#define USB_CFG3_RX_DET_2_POL_LFPS_ (0x00800000) +#define USB_CFG3_LFPS_FILT_ (0x00400000) +#define USB_CFG3_SKIP_RX_DET_ (0x00200000) +#define USB_CFG3_DELAY_P1P2P3_ (0x001C0000) +#define USB_CFG3_DELAY_PHY_PWR_CHG_ (0x00020000) +#define USB_CFG3_U1U2_EXIT_FR_ (0x00010000) +#define USB_CFG3_REQ_P1P2P3 (0x00008000) +#define USB_CFG3_HST_PRT_CMPL_ (0x00004000) +#define USB_CFG3_DIS_SCRAMB_ (0x00002000) +#define USB_CFG3_PWR_DN_SCALE_ (0x00001FFF) + +#define RFE_CTL (0x0B0) +#define RFE_CTL_IGMP_COE_ (0x00004000) +#define RFE_CTL_ICMP_COE_ (0x00002000) +#define RFE_CTL_TCPUDP_COE_ (0x00001000) +#define RFE_CTL_IP_COE_ (0x00000800) +#define RFE_CTL_BCAST_EN_ (0x00000400) +#define RFE_CTL_MCAST_EN_ (0x00000200) +#define RFE_CTL_UCAST_EN_ (0x00000100) +#define RFE_CTL_VLAN_STRIP_ (0x00000080) +#define RFE_CTL_DISCARD_UNTAGGED_ (0x00000040) +#define RFE_CTL_VLAN_FILTER_ (0x00000020) +#define RFE_CTL_SA_FILTER_ (0x00000010) +#define RFE_CTL_MCAST_HASH_ (0x00000008) +#define RFE_CTL_DA_HASH_ (0x00000004) +#define RFE_CTL_DA_PERFECT_ (0x00000002) +#define RFE_CTL_RST_ (0x00000001) + +#define VLAN_TYPE (0x0B4) +#define VLAN_TYPE_MASK_ (0x0000FFFF) + +#define FCT_RX_CTL (0x0C0) +#define FCT_RX_CTL_EN_ (0x80000000) +#define FCT_RX_CTL_RST_ (0x40000000) +#define FCT_RX_CTL_SBF_ (0x02000000) +#define FCT_RX_CTL_OVFL_ (0x01000000) +#define FCT_RX_CTL_DROP_ (0x00800000) +#define FCT_RX_CTL_NOT_EMPTY_ (0x00400000) +#define FCT_RX_CTL_EMPTY_ (0x00200000) +#define FCT_RX_CTL_DIS_ (0x00100000) +#define FCT_RX_CTL_USED_MASK_ (0x0000FFFF) + +#define FCT_TX_CTL (0x0C4) +#define FCT_TX_CTL_EN_ (0x80000000) +#define FCT_TX_CTL_RST_ (0x40000000) +#define FCT_TX_CTL_NOT_EMPTY_ (0x00400000) +#define FCT_TX_CTL_EMPTY_ (0x00200000) +#define FCT_TX_CTL_DIS_ (0x00100000) +#define FCT_TX_CTL_USED_MASK_ (0x0000FFFF) + +#define FCT_RX_FIFO_END (0x0C8) +#define FCT_RX_FIFO_END_MASK_ (0x0000007F) + +#define FCT_TX_FIFO_END (0x0CC) +#define FCT_TX_FIFO_END_MASK_ (0x0000003F) + +#define FCT_FLOW (0x0D0) +#define FCT_FLOW_OFF_MASK_ (0x00007F00) +#define FCT_FLOW_ON_MASK_ (0x0000007F) + +#define RX_DP_STOR (0x0D4) +#define RX_DP_STORE_TOT_RXUSED_MASK_ (0xFFFF0000) +#define RX_DP_STORE_UTX_RXUSED_MASK_ (0x0000FFFF) + +#define TX_DP_STOR (0x0D8) +#define TX_DP_STORE_TOT_TXUSED_MASK_ (0xFFFF0000) +#define TX_DP_STORE_URX_TXUSED_MASK_ (0x0000FFFF) + +#define LTM_BELT_IDLE0 (0x0E0) +#define LTM_BELT_IDLE0_IDLE1000_ (0x0FFF0000) +#define LTM_BELT_IDLE0_IDLE100_ (0x00000FFF) + +#define LTM_BELT_IDLE1 (0x0E4) +#define LTM_BELT_IDLE1_IDLE10_ (0x00000FFF) + +#define LTM_BELT_ACT0 (0x0E8) +#define LTM_BELT_ACT0_ACT1000_ (0x0FFF0000) +#define LTM_BELT_ACT0_ACT100_ (0x00000FFF) + +#define LTM_BELT_ACT1 (0x0EC) +#define LTM_BELT_ACT1_ACT10_ (0x00000FFF) + +#define LTM_INACTIVE0 (0x0F0) +#define LTM_INACTIVE0_TIMER1000_ (0xFFFF0000) +#define LTM_INACTIVE0_TIMER100_ (0x0000FFFF) + +#define LTM_INACTIVE1 (0x0F4) +#define LTM_INACTIVE1_TIMER10_ (0x0000FFFF) + +#define MAC_CR (0x100) +#define MAC_CR_GMII_EN_ (0x00080000) +#define MAC_CR_EEE_TX_CLK_STOP_EN_ (0x00040000) +#define MAC_CR_EEE_EN_ (0x00020000) +#define MAC_CR_EEE_TLAR_EN_ (0x00010000) +#define MAC_CR_ADP_ (0x00002000) +#define MAC_CR_AUTO_DUPLEX_ (0x00001000) +#define MAC_CR_AUTO_SPEED_ (0x00000800) +#define MAC_CR_LOOPBACK_ (0x00000400) +#define MAC_CR_BOLMT_MASK_ (0x000000C0) +#define MAC_CR_FULL_DUPLEX_ (0x00000008) +#define MAC_CR_SPEED_MASK_ (0x00000006) +#define MAC_CR_SPEED_1000_ (0x00000004) +#define MAC_CR_SPEED_100_ (0x00000002) +#define MAC_CR_SPEED_10_ (0x00000000) +#define MAC_CR_RST_ (0x00000001) + +#define MAC_RX (0x104) +#define MAC_RX_MAX_SIZE_SHIFT_ (16) +#define MAC_RX_MAX_SIZE_MASK_ (0x3FFF0000) +#define MAC_RX_FCS_STRIP_ (0x00000010) +#define MAC_RX_VLAN_FSE_ (0x00000004) +#define MAC_RX_RXD_ (0x00000002) +#define MAC_RX_RXEN_ (0x00000001) + +#define MAC_TX (0x108) +#define MAC_TX_BAD_FCS_ (0x00000004) +#define MAC_TX_TXD_ (0x00000002) +#define MAC_TX_TXEN_ (0x00000001) + +#define FLOW (0x10C) +#define FLOW_CR_FORCE_FC_ (0x80000000) +#define FLOW_CR_TX_FCEN_ (0x40000000) +#define FLOW_CR_RX_FCEN_ (0x20000000) +#define FLOW_CR_FPF_ (0x10000000) +#define FLOW_CR_FCPT_MASK_ (0x0000FFFF) + +#define RAND_SEED (0x110) +#define RAND_SEED_MASK_ (0x0000FFFF) + +#define ERR_STS (0x114) +#define ERR_STS_FERR_ (0x00000100) +#define ERR_STS_LERR_ (0x00000080) +#define ERR_STS_RFERR_ (0x00000040) +#define ERR_STS_ECERR_ (0x00000010) +#define ERR_STS_ALERR_ (0x00000008) +#define ERR_STS_URERR_ (0x00000004) + +#define RX_ADDRH (0x118) +#define RX_ADDRH_MASK_ (0x0000FFFF) + +#define RX_ADDRL (0x11C) +#define RX_ADDRL_MASK_ (0xFFFFFFFF) + +#define MII_ACC (0x120) +#define MII_ACC_PHY_ADDR_SHIFT_ (11) +#define MII_ACC_PHY_ADDR_MASK_ (0x0000F800) +#define MII_ACC_MIIRINDA_SHIFT_ (6) +#define MII_ACC_MIIRINDA_MASK_ (0x000007C0) +#define MII_ACC_MII_READ_ (0x00000000) +#define MII_ACC_MII_WRITE_ (0x00000002) +#define MII_ACC_MII_BUSY_ (0x00000001) + +#define MII_DATA (0x124) +#define MII_DATA_MASK_ (0x0000FFFF) + +#define MAC_RGMII_ID (0x128) +#define MAC_RGMII_ID_TXC_DELAY_EN_ (0x00000002) +#define MAC_RGMII_ID_RXC_DELAY_EN_ (0x00000001) + +#define EEE_TX_LPI_REQ_DLY (0x130) +#define EEE_TX_LPI_REQ_DLY_CNT_MASK_ (0xFFFFFFFF) + +#define EEE_TW_TX_SYS (0x134) +#define EEE_TW_TX_SYS_CNT1G_MASK_ (0xFFFF0000) +#define EEE_TW_TX_SYS_CNT100M_MASK_ (0x0000FFFF) + +#define EEE_TX_LPI_REM_DLY (0x138) +#define EEE_TX_LPI_REM_DLY_CNT_ (0x00FFFFFF) + +#define WUCSR (0x140) +#define WUCSR_TESTMODE_ (0x80000000) +#define WUCSR_RFE_WAKE_EN_ (0x00004000) +#define WUCSR_EEE_TX_WAKE_ (0x00002000) +#define WUCSR_EEE_TX_WAKE_EN_ (0x00001000) +#define WUCSR_EEE_RX_WAKE_ (0x00000800) +#define WUCSR_EEE_RX_WAKE_EN_ (0x00000400) +#define WUCSR_RFE_WAKE_FR_ (0x00000200) +#define WUCSR_STORE_WAKE_ (0x00000100) +#define WUCSR_PFDA_FR_ (0x00000080) +#define WUCSR_WUFR_ (0x00000040) +#define WUCSR_MPR_ (0x00000020) +#define WUCSR_BCST_FR_ (0x00000010) +#define WUCSR_PFDA_EN_ (0x00000008) +#define WUCSR_WAKE_EN_ (0x00000004) +#define WUCSR_MPEN_ (0x00000002) +#define WUCSR_BCST_EN_ (0x00000001) + +#define WK_SRC (0x144) +#define WK_SRC_GPIOX_INT_WK_SHIFT_ (20) +#define WK_SRC_GPIOX_INT_WK_MASK_ (0xFFF00000) +#define WK_SRC_IPV6_TCPSYN_RCD_WK_ (0x00010000) +#define WK_SRC_IPV4_TCPSYN_RCD_WK_ (0x00008000) +#define WK_SRC_EEE_TX_WK_ (0x00004000) +#define WK_SRC_EEE_RX_WK_ (0x00002000) +#define WK_SRC_GOOD_FR_WK_ (0x00001000) +#define WK_SRC_PFDA_FR_WK_ (0x00000800) +#define WK_SRC_MP_FR_WK_ (0x00000400) +#define WK_SRC_BCAST_FR_WK_ (0x00000200) +#define WK_SRC_WU_FR_WK_ (0x00000100) +#define WK_SRC_WUFF_MATCH_MASK_ (0x0000001F) + +#define WUF_CFG0 (0x150) +#define NUM_OF_WUF_CFG (32) +#define WUF_CFG_BEGIN (WUF_CFG0) +#define WUF_CFG(index) (WUF_CFG_BEGIN + (4 * (index))) +#define WUF_CFGX_EN_ (0x80000000) +#define WUF_CFGX_TYPE_MASK_ (0x03000000) +#define WUF_CFGX_TYPE_MCAST_ (0x02000000) +#define WUF_CFGX_TYPE_ALL_ (0x01000000) +#define WUF_CFGX_TYPE_UCAST_ (0x00000000) +#define WUF_CFGX_OFFSET_SHIFT_ (16) +#define WUF_CFGX_OFFSET_MASK_ (0x00FF0000) +#define WUF_CFGX_CRC16_MASK_ (0x0000FFFF) + +#define WUF_MASK0_0 (0x200) +#define WUF_MASK0_1 (0x204) +#define WUF_MASK0_2 (0x208) +#define WUF_MASK0_3 (0x20C) +#define NUM_OF_WUF_MASK (32) +#define WUF_MASK0_BEGIN (WUF_MASK0_0) +#define WUF_MASK1_BEGIN (WUF_MASK0_1) +#define WUF_MASK2_BEGIN (WUF_MASK0_2) +#define WUF_MASK3_BEGIN (WUF_MASK0_3) +#define WUF_MASK0(index) (WUF_MASK0_BEGIN + (0x10 * (index))) +#define WUF_MASK1(index) (WUF_MASK1_BEGIN + (0x10 * (index))) +#define WUF_MASK2(index) (WUF_MASK2_BEGIN + (0x10 * (index))) +#define WUF_MASK3(index) (WUF_MASK3_BEGIN + (0x10 * (index))) + +#define MAF_BASE (0x400) +#define MAF_HIX (0x00) +#define MAF_LOX (0x04) +#define NUM_OF_MAF (33) +#define MAF_HI_BEGIN (MAF_BASE + MAF_HIX) +#define MAF_LO_BEGIN (MAF_BASE + MAF_LOX) +#define MAF_HI(index) (MAF_BASE + (8 * (index)) + (MAF_HIX)) +#define MAF_LO(index) (MAF_BASE + (8 * (index)) + (MAF_LOX)) +#define MAF_HI_VALID_ (0x80000000) +#define MAF_HI_TYPE_MASK_ (0x40000000) +#define MAF_HI_TYPE_SRC_ (0x40000000) +#define MAF_HI_TYPE_DST_ (0x00000000) +#define MAF_HI_ADDR_MASK (0x0000FFFF) +#define MAF_LO_ADDR_MASK (0xFFFFFFFF) + +#define WUCSR2 (0x600) +#define WUCSR2_CSUM_DISABLE_ (0x80000000) +#define WUCSR2_NA_SA_SEL_ (0x00000100) +#define WUCSR2_NS_RCD_ (0x00000080) +#define WUCSR2_ARP_RCD_ (0x00000040) +#define WUCSR2_IPV6_TCPSYN_RCD_ (0x00000020) +#define WUCSR2_IPV4_TCPSYN_RCD_ (0x00000010) +#define WUCSR2_NS_OFFLOAD_EN_ (0x00000008) +#define WUCSR2_ARP_OFFLOAD_EN_ (0x00000004) +#define WUCSR2_IPV6_TCPSYN_WAKE_EN_ (0x00000002) +#define WUCSR2_IPV4_TCPSYN_WAKE_EN_ (0x00000001) + +#define NS1_IPV6_ADDR_DEST0 (0x610) +#define NS1_IPV6_ADDR_DEST1 (0x614) +#define NS1_IPV6_ADDR_DEST2 (0x618) +#define NS1_IPV6_ADDR_DEST3 (0x61C) + +#define NS1_IPV6_ADDR_SRC0 (0x620) +#define NS1_IPV6_ADDR_SRC1 (0x624) +#define NS1_IPV6_ADDR_SRC2 (0x628) +#define NS1_IPV6_ADDR_SRC3 (0x62C) + +#define NS1_ICMPV6_ADDR0_0 (0x630) +#define NS1_ICMPV6_ADDR0_1 (0x634) +#define NS1_ICMPV6_ADDR0_2 (0x638) +#define NS1_ICMPV6_ADDR0_3 (0x63C) + +#define NS1_ICMPV6_ADDR1_0 (0x640) +#define NS1_ICMPV6_ADDR1_1 (0x644) +#define NS1_ICMPV6_ADDR1_2 (0x648) +#define NS1_ICMPV6_ADDR1_3 (0x64C) + +#define NS2_IPV6_ADDR_DEST0 (0x650) +#define NS2_IPV6_ADDR_DEST1 (0x654) +#define NS2_IPV6_ADDR_DEST2 (0x658) +#define NS2_IPV6_ADDR_DEST3 (0x65C) + +#define NS2_IPV6_ADDR_SRC0 (0x660) +#define NS2_IPV6_ADDR_SRC1 (0x664) +#define NS2_IPV6_ADDR_SRC2 (0x668) +#define NS2_IPV6_ADDR_SRC3 (0x66C) + +#define NS2_ICMPV6_ADDR0_0 (0x670) +#define NS2_ICMPV6_ADDR0_1 (0x674) +#define NS2_ICMPV6_ADDR0_2 (0x678) +#define NS2_ICMPV6_ADDR0_3 (0x67C) + +#define NS2_ICMPV6_ADDR1_0 (0x680) +#define NS2_ICMPV6_ADDR1_1 (0x684) +#define NS2_ICMPV6_ADDR1_2 (0x688) +#define NS2_ICMPV6_ADDR1_3 (0x68C) + +#define SYN_IPV4_ADDR_SRC (0x690) +#define SYN_IPV4_ADDR_DEST (0x694) +#define SYN_IPV4_TCP_PORTS (0x698) +#define SYN_IPV4_TCP_PORTS_IPV4_DEST_PORT_SHIFT_ (16) +#define SYN_IPV4_TCP_PORTS_IPV4_DEST_PORT_MASK_ (0xFFFF0000) +#define SYN_IPV4_TCP_PORTS_IPV4_SRC_PORT_MASK_ (0x0000FFFF) + +#define SYN_IPV6_ADDR_SRC0 (0x69C) +#define SYN_IPV6_ADDR_SRC1 (0x6A0) +#define SYN_IPV6_ADDR_SRC2 (0x6A4) +#define SYN_IPV6_ADDR_SRC3 (0x6A8) + +#define SYN_IPV6_ADDR_DEST0 (0x6AC) +#define SYN_IPV6_ADDR_DEST1 (0x6B0) +#define SYN_IPV6_ADDR_DEST2 (0x6B4) +#define SYN_IPV6_ADDR_DEST3 (0x6B8) + +#define SYN_IPV6_TCP_PORTS (0x6BC) +#define SYN_IPV6_TCP_PORTS_IPV6_DEST_PORT_SHIFT_ (16) +#define SYN_IPV6_TCP_PORTS_IPV6_DEST_PORT_MASK_ (0xFFFF0000) +#define SYN_IPV6_TCP_PORTS_IPV6_SRC_PORT_MASK_ (0x0000FFFF) + +#define ARP_SPA (0x6C0) +#define ARP_TPA (0x6C4) + +#define PHY_DEV_ID (0x700) +#define PHY_DEV_ID_REV_SHIFT_ (28) +#define PHY_DEV_ID_REV_SHIFT_ (28) +#define PHY_DEV_ID_REV_MASK_ (0xF0000000) +#define PHY_DEV_ID_MODEL_SHIFT_ (22) +#define PHY_DEV_ID_MODEL_MASK_ (0x0FC00000) +#define PHY_DEV_ID_OUI_MASK_ (0x003FFFFF) + +#define OTP_BASE_ADDR (0x00001000) +#define OTP_ADDR_RANGE_ (0x1FF) + +#define OTP_PWR_DN (OTP_BASE_ADDR + 4 * 0x00) +#define OTP_PWR_DN_PWRDN_N_ (0x01) + +#define OTP_ADDR1 (OTP_BASE_ADDR + 4 * 0x01) +#define OTP_ADDR1_15_11 (0x1F) + +#define OTP_ADDR2 (OTP_BASE_ADDR + 4 * 0x02) +#define OTP_ADDR2_10_3 (0xFF) + +#define OTP_ADDR3 (OTP_BASE_ADDR + 4 * 0x03) +#define OTP_ADDR3_2_0 (0x03) + +#define OTP_PRGM_DATA (OTP_BASE_ADDR + 4 * 0x04) + +#define OTP_PRGM_MODE (OTP_BASE_ADDR + 4 * 0x05) +#define OTP_PRGM_MODE_BYTE_ (0x01) + +#define OTP_RD_DATA (OTP_BASE_ADDR + 4 * 0x06) + +#define OTP_FUNC_CMD (OTP_BASE_ADDR + 4 * 0x08) +#define OTP_FUNC_CMD_RESET_ (0x04) +#define OTP_FUNC_CMD_PROGRAM_ (0x02) +#define OTP_FUNC_CMD_READ_ (0x01) + +#define OTP_TST_CMD (OTP_BASE_ADDR + 4 * 0x09) +#define OTP_TST_CMD_TEST_DEC_SEL_ (0x10) +#define OTP_TST_CMD_PRGVRFY_ (0x08) +#define OTP_TST_CMD_WRTEST_ (0x04) +#define OTP_TST_CMD_TESTDEC_ (0x02) +#define OTP_TST_CMD_BLANKCHECK_ (0x01) + +#define OTP_CMD_GO (OTP_BASE_ADDR + 4 * 0x0A) +#define OTP_CMD_GO_GO_ (0x01) + +#define OTP_PASS_FAIL (OTP_BASE_ADDR + 4 * 0x0B) +#define OTP_PASS_FAIL_PASS_ (0x02) +#define OTP_PASS_FAIL_FAIL_ (0x01) + +#define OTP_STATUS (OTP_BASE_ADDR + 4 * 0x0C) +#define OTP_STATUS_OTP_LOCK_ (0x10) +#define OTP_STATUS_WEB_ (0x08) +#define OTP_STATUS_PGMEN (0x04) +#define OTP_STATUS_CPUMPEN_ (0x02) +#define OTP_STATUS_BUSY_ (0x01) + +#define OTP_MAX_PRG (OTP_BASE_ADDR + 4 * 0x0D) +#define OTP_MAX_PRG_MAX_PROG (0x1F) + +#define OTP_INTR_STATUS (OTP_BASE_ADDR + 4 * 0x10) +#define OTP_INTR_STATUS_READY_ (0x01) + +#define OTP_INTR_MASK (OTP_BASE_ADDR + 4 * 0x11) +#define OTP_INTR_MASK_READY_ (0x01) + +#define OTP_RSTB_PW1 (OTP_BASE_ADDR + 4 * 0x14) +#define OTP_RSTB_PW2 (OTP_BASE_ADDR + 4 * 0x15) +#define OTP_PGM_PW1 (OTP_BASE_ADDR + 4 * 0x18) +#define OTP_PGM_PW2 (OTP_BASE_ADDR + 4 * 0x19) +#define OTP_READ_PW1 (OTP_BASE_ADDR + 4 * 0x1C) +#define OTP_READ_PW2 (OTP_BASE_ADDR + 4 * 0x1D) +#define OTP_TCRST (OTP_BASE_ADDR + 4 * 0x20) +#define OTP_RSRD (OTP_BASE_ADDR + 4 * 0x21) +#define OTP_TREADEN_VAL (OTP_BASE_ADDR + 4 * 0x22) +#define OTP_TDLES_VAL (OTP_BASE_ADDR + 4 * 0x23) +#define OTP_TWWL_VAL (OTP_BASE_ADDR + 4 * 0x24) +#define OTP_TDLEH_VAL (OTP_BASE_ADDR + 4 * 0x25) +#define OTP_TWPED_VAL (OTP_BASE_ADDR + 4 * 0x26) +#define OTP_TPES_VAL (OTP_BASE_ADDR + 4 * 0x27) +#define OTP_TCPS_VAL (OTP_BASE_ADDR + 4 * 0x28) +#define OTP_TCPH_VAL (OTP_BASE_ADDR + 4 * 0x29) +#define OTP_TPGMVFY_VAL (OTP_BASE_ADDR + 4 * 0x2A) +#define OTP_TPEH_VAL (OTP_BASE_ADDR + 4 * 0x2B) +#define OTP_TPGRST_VAL (OTP_BASE_ADDR + 4 * 0x2C) +#define OTP_TCLES_VAL (OTP_BASE_ADDR + 4 * 0x2D) +#define OTP_TCLEH_VAL (OTP_BASE_ADDR + 4 * 0x2E) +#define OTP_TRDES_VAL (OTP_BASE_ADDR + 4 * 0x2F) +#define OTP_TBCACC_VAL (OTP_BASE_ADDR + 4 * 0x30) +#define OTP_TAAC_VAL (OTP_BASE_ADDR + 4 * 0x31) +#define OTP_TACCT_VAL (OTP_BASE_ADDR + 4 * 0x32) +#define OTP_TRDEP_VAL (OTP_BASE_ADDR + 4 * 0x38) +#define OTP_TPGSV_VAL (OTP_BASE_ADDR + 4 * 0x39) +#define OTP_TPVSR_VAL (OTP_BASE_ADDR + 4 * 0x3A) +#define OTP_TPVHR_VAL (OTP_BASE_ADDR + 4 * 0x3B) +#define OTP_TPVSA_VAL (OTP_BASE_ADDR + 4 * 0x3C) + +#define PHY_ID1 (0x02) +#define PHY_ID2 (0x03) + +#define PHY_DEV_ID_OUI_VTSE (0x04001C) +#define PHY_DEV_ID_MODEL_VTSE_8502 (0x23) + +#define PHY_AUTONEG_ADV (0x04) +#define NWAY_AR_NEXT_PAGE_ (0x8000) +#define NWAY_AR_REMOTE_FAULT_ (0x2000) +#define NWAY_AR_ASM_DIR_ (0x0800) +#define NWAY_AR_PAUSE_ (0x0400) +#define NWAY_AR_100T4_CAPS_ (0x0200) +#define NWAY_AR_100TX_FD_CAPS_ (0x0100) +#define NWAY_AR_SELECTOR_FIELD_ (0x001F) +#define NWAY_AR_100TX_HD_CAPS_ (0x0080) +#define NWAY_AR_10T_FD_CAPS_ (0x0040) +#define NWAY_AR_10T_HD_CAPS_ (0x0020) +#define NWAY_AR_ALL_CAPS_ (NWAY_AR_10T_HD_CAPS_ | \ + NWAY_AR_10T_FD_CAPS_ | \ + NWAY_AR_100TX_HD_CAPS_ | \ + NWAY_AR_100TX_FD_CAPS_) +#define NWAY_AR_PAUSE_MASK (NWAY_AR_PAUSE_ | NWAY_AR_ASM_DIR_) + +#define PHY_LP_ABILITY (0x05) +#define NWAY_LPAR_NEXT_PAGE_ (0x8000) +#define NWAY_LPAR_ACKNOWLEDGE_ (0x4000) +#define NWAY_LPAR_REMOTE_FAULT_ (0x2000) +#define NWAY_LPAR_ASM_DIR_ (0x0800) +#define NWAY_LPAR_PAUSE_ (0x0400) +#define NWAY_LPAR_100T4_CAPS_ (0x0200) +#define NWAY_LPAR_100TX_FD_CAPS_ (0x0100) +#define NWAY_LPAR_100TX_HD_CAPS_ (0x0080) +#define NWAY_LPAR_10T_FD_CAPS_ (0x0040) +#define NWAY_LPAR_10T_HD_CAPS_ (0x0020) +#define NWAY_LPAR_SELECTOR_FIELD_ (0x001F) + +#define PHY_AUTONEG_EXP (0x06) +#define NWAY_ER_PAR_DETECT_FAULT_ (0x0010) +#define NWAY_ER_LP_NEXT_PAGE_CAPS_ (0x0008) +#define NWAY_ER_NEXT_PAGE_CAPS_ (0x0004) +#define NWAY_ER_PAGE_RXD_ (0x0002) +#define NWAY_ER_LP_NWAY_CAPS_ (0x0001) + +#define PHY_NEXT_PAGE_TX (0x07) +#define NPTX_NEXT_PAGE_ (0x8000) +#define NPTX_MSG_PAGE_ (0x2000) +#define NPTX_ACKNOWLDGE2_ (0x1000) +#define NPTX_TOGGLE_ (0x0800) +#define NPTX_MSG_CODE_FIELD_ (0x0001) + +#define PHY_LP_NEXT_PAGE (0x08) +#define LP_RNPR_NEXT_PAGE_ (0x8000) +#define LP_RNPR_ACKNOWLDGE_ (0x4000) +#define LP_RNPR_MSG_PAGE_ (0x2000) +#define LP_RNPR_ACKNOWLDGE2_ (0x1000) +#define LP_RNPR_TOGGLE_ (0x0800) +#define LP_RNPR_MSG_CODE_FIELD_ (0x0001) + +#define PHY_1000T_CTRL (0x09) +#define CR_1000T_TEST_MODE_4_ (0x8000) +#define CR_1000T_TEST_MODE_3_ (0x6000) +#define CR_1000T_TEST_MODE_2_ (0x4000) +#define CR_1000T_TEST_MODE_1_ (0x2000) +#define CR_1000T_MS_ENABLE_ (0x1000) +#define CR_1000T_MS_VALUE_ (0x0800) +#define CR_1000T_REPEATER_DTE_ (0x0400) +#define CR_1000T_FD_CAPS_ (0x0200) +#define CR_1000T_HD_CAPS_ (0x0100) +#define CR_1000T_ASYM_PAUSE_ (0x0080) +#define CR_1000T_TEST_MODE_NORMAL_ (0x0000) + +#define PHY_1000T_STATUS (0x0A) +#define SR_1000T_MS_CONFIG_FAULT_ (0x8000) +#define SR_1000T_MS_CONFIG_RES_ (0x4000) +#define SR_1000T_LOCAL_RX_STATUS_ (0x2000) +#define SR_1000T_REMOTE_RX_STATUS_ (0x1000) +#define SR_1000T_LP_FD_CAPS_ (0x0800) +#define SR_1000T_LP_HD_CAPS_ (0x0400) +#define SR_1000T_ASYM_PAUSE_DIR_ (0x0100) +#define SR_1000T_IDLE_ERROR_CNT_ (0x00FF) +#define SR_1000T_REMOTE_RX_STATUS_SHIFT 12 +#define SR_1000T_LOCAL_RX_STATUS_SHIFT 13 +#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5 +#define FFE_IDLE_ERR_COUNT_TIMEOUT_20 20 +#define FFE_IDLE_ERR_COUNT_TIMEOUT_100 100 + +#define PHY_EXT_STATUS (0x0F) +#define IEEE_ESR_1000X_FD_CAPS_ (0x8000) +#define IEEE_ESR_1000X_HD_CAPS_ (0x4000) +#define IEEE_ESR_1000T_FD_CAPS_ (0x2000) +#define IEEE_ESR_1000T_HD_CAPS_ (0x1000) +#define PHY_TX_POLARITY_MASK_ (0x0100) +#define PHY_TX_NORMAL_POLARITY_ (0x0000) +#define AUTO_POLARITY_DISABLE_ (0x0010) + +#define PHY_MMD_CTL (0x0D) +#define PHY_MMD_CTRL_OP_MASK_ (0xC000) +#define PHY_MMD_CTRL_OP_REG_ (0x0000) +#define PHY_MMD_CTRL_OP_DNI_ (0x4000) +#define PHY_MMD_CTRL_OP_DPIRW_ (0x8000) +#define PHY_MMD_CTRL_OP_DPIWO_ (0xC000) +#define PHY_MMD_CTRL_DEV_ADDR_MASK_ (0x001F) + +#define PHY_MMD_REG_DATA (0x0E) + +/* VTSE Vendor Specific registers */ +#define PHY_VTSE_BYPASS (0x12) +#define PHY_VTSE_BYPASS_DISABLE_PAIR_SWAP_ (0x0020) + +#define PHY_VTSE_INT_MASK (0x19) +#define PHY_VTSE_INT_MASK_MDINTPIN_EN_ (0x8000) +#define PHY_VTSE_INT_MASK_SPEED_CHANGE_ (0x4000) +#define PHY_VTSE_INT_MASK_LINK_CHANGE_ (0x2000) +#define PHY_VTSE_INT_MASK_FDX_CHANGE_ (0x1000) +#define PHY_VTSE_INT_MASK_AUTONEG_ERR_ (0x0800) +#define PHY_VTSE_INT_MASK_AUTONEG_DONE_ (0x0400) +#define PHY_VTSE_INT_MASK_POE_DETECT_ (0x0200) +#define PHY_VTSE_INT_MASK_SYMBOL_ERR_ (0x0100) +#define PHY_VTSE_INT_MASK_FAST_LINK_FAIL_ (0x0080) +#define PHY_VTSE_INT_MASK_WOL_EVENT_ (0x0040) +#define PHY_VTSE_INT_MASK_EXTENDED_INT_ (0x0020) +#define PHY_VTSE_INT_MASK_RESERVED_ (0x0010) +#define PHY_VTSE_INT_MASK_FALSE_CARRIER_ (0x0008) +#define PHY_VTSE_INT_MASK_LINK_SPEED_DS_ (0x0004) +#define PHY_VTSE_INT_MASK_MASTER_SLAVE_DONE_ (0x0002) +#define PHY_VTSE_INT_MASK_RX__ER_ (0x0001) + +#define PHY_VTSE_INT_STS (0x1A) +#define PHY_VTSE_INT_STS_INT_ACTIVE_ (0x8000) +#define PHY_VTSE_INT_STS_SPEED_CHANGE_ (0x4000) +#define PHY_VTSE_INT_STS_LINK_CHANGE_ (0x2000) +#define PHY_VTSE_INT_STS_FDX_CHANGE_ (0x1000) +#define PHY_VTSE_INT_STS_AUTONEG_ERR_ (0x0800) +#define PHY_VTSE_INT_STS_AUTONEG_DONE_ (0x0400) +#define PHY_VTSE_INT_STS_POE_DETECT_ (0x0200) +#define PHY_VTSE_INT_STS_SYMBOL_ERR_ (0x0100) +#define PHY_VTSE_INT_STS_FAST_LINK_FAIL_ (0x0080) +#define PHY_VTSE_INT_STS_WOL_EVENT_ (0x0040) +#define PHY_VTSE_INT_STS_EXTENDED_INT_ (0x0020) +#define PHY_VTSE_INT_STS_RESERVED_ (0x0010) +#define PHY_VTSE_INT_STS_FALSE_CARRIER_ (0x0008) +#define PHY_VTSE_INT_STS_LINK_SPEED_DS_ (0x0004) +#define PHY_VTSE_INT_STS_MASTER_SLAVE_DONE_ (0x0002) +#define PHY_VTSE_INT_STS_RX_ER_ (0x0001) + +/* VTSE PHY registers */ +#define PHY_EXT_GPIO_PAGE (0x1F) +#define PHY_EXT_GPIO_PAGE_SPACE_0 (0x0000) +#define PHY_EXT_GPIO_PAGE_SPACE_1 (0x0001) +#define PHY_EXT_GPIO_PAGE_SPACE_2 (0x0002) + +/* Extended Register Page 1 space */ +#define PHY_EXT_MODE_CTRL (0x13) +#define PHY_EXT_MODE_CTRL_MDIX_MASK_ (0x000C) +#define PHY_EXT_MODE_CTRL_AUTO_MDIX_ (0x0000) +#define PHY_EXT_MODE_CTRL_MDI_ (0x0008) +#define PHY_EXT_MODE_CTRL_MDI_X_ (0x000C) + +#define PHY_ANA_10BASE_T_HD 0x01 +#define PHY_ANA_10BASE_T_FD 0x02 +#define PHY_ANA_100BASE_TX_HD 0x04 +#define PHY_ANA_100BASE_TX_FD 0x08 +#define PHY_ANA_1000BASE_T_FD 0x10 +#define PHY_ANA_ALL_SUPPORTED_MEDIA (PHY_ANA_10BASE_T_HD | \ + PHY_ANA_10BASE_T_FD | \ + PHY_ANA_100BASE_TX_HD | \ + PHY_ANA_100BASE_TX_FD | \ + PHY_ANA_1000BASE_T_FD) +/* PHY MMD registers */ +#define PHY_MMD_DEV_3 3 + +#define PHY_EEE_PCS_STATUS (0x1) +#define PHY_EEE_PCS_STATUS_TX_LPI_RCVD_ ((WORD)0x0800) +#define PHY_EEE_PCS_STATUS_RX_LPI_RCVD_ ((WORD)0x0400) +#define PHY_EEE_PCS_STATUS_TX_LPI_IND_ ((WORD)0x0200) +#define PHY_EEE_PCS_STATUS_RX_LPI_IND_ ((WORD)0x0100) +#define PHY_EEE_PCS_STATUS_PCS_RCV_LNK_STS_ ((WORD)0x0004) + +#define PHY_EEE_CAPABILITIES (0x14) +#define PHY_EEE_CAPABILITIES_1000BT_EEE_ ((WORD)0x0004) +#define PHY_EEE_CAPABILITIES_100BT_EEE_ ((WORD)0x0002) + +#define PHY_MMD_DEV_7 7 + +#define PHY_EEE_ADVERTISEMENT (0x3C) +#define PHY_EEE_ADVERTISEMENT_1000BT_EEE_ ((WORD)0x0004) +#define PHY_EEE_ADVERTISEMENT_100BT_EEE_ ((WORD)0x0002) + +#define PHY_EEE_LP_ADVERTISEMENT (0x3D) +#define PHY_EEE_1000BT_EEE_CAPABLE_ ((WORD)0x0004) +#define PHY_EEE_100BT_EEE_CAPABLE_ ((WORD)0x0002) +#endif /* _LAN78XX_H */ diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 9d43460ce3c7..1f7a7cd97e50 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -785,6 +785,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ {QMI_FIXED_INTF(0x413c, 0x81a8, 8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */ {QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ + {QMI_FIXED_INTF(0x413c, 0x81b1, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */ {QMI_FIXED_INTF(0x03f0, 0x581d, 4)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */ /* 4. Gobi 1000 devices */ diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index ad8cbc6c9ee7..fe4ec324aebc 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -339,6 +339,7 @@ /* USB_USB_CTRL */ #define RX_AGG_DISABLE 0x0010 +#define RX_ZERO_EN 0x0080 /* USB_U2P3_CTRL */ #define U2P3_ENABLE 0x0001 @@ -622,6 +623,7 @@ enum rtl_version { RTL_VER_03, RTL_VER_04, RTL_VER_05, + RTL_VER_06, RTL_VER_MAX }; @@ -2610,7 +2612,10 @@ static void r8153_hw_phy_cfg(struct r8152 *tp) u32 ocp_data; u16 data; - ocp_reg_write(tp, OCP_ADC_CFG, CKADSEL_L | ADC_EN | EN_EMI_L); + if (tp->version == RTL_VER_03 || tp->version == RTL_VER_04 || + tp->version == RTL_VER_05) + ocp_reg_write(tp, OCP_ADC_CFG, CKADSEL_L | ADC_EN | EN_EMI_L); + data = r8152_mdio_read(tp, MII_BMCR); if (data & BMCR_PDOWN) { data &= ~BMCR_PDOWN; @@ -2711,7 +2716,7 @@ static void r8153_first_init(struct r8152 *tp) /* rx aggregation */ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); - ocp_data &= ~RX_AGG_DISABLE; + ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN); ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data); } @@ -3241,7 +3246,7 @@ static void r8152b_init(struct r8152 *tp) /* enable rx aggregation */ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); - ocp_data &= ~RX_AGG_DISABLE; + ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN); ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data); } @@ -3293,6 +3298,13 @@ static void r8153_init(struct r8152 *tp) else ocp_data |= DYNAMIC_BURST; ocp_write_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY1, ocp_data); + } else if (tp->version == RTL_VER_06) { + ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY1); + if (ocp_read_word(tp, MCU_TYPE_USB, USB_BURST_SIZE) == 0) + ocp_data &= ~DYNAMIC_BURST; + else + ocp_data |= DYNAMIC_BURST; + ocp_write_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY1, ocp_data); } ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY2); @@ -3988,6 +4000,10 @@ static void r8152b_get_version(struct r8152 *tp) tp->version = RTL_VER_05; tp->mii.supports_gmii = 1; break; + case 0x5c30: + tp->version = RTL_VER_06; + tp->mii.supports_gmii = 1; + break; default: netif_info(tp, probe, tp->netdev, "Unknown version 0x%04x\n", version); @@ -4033,6 +4049,7 @@ static int rtl_ops_init(struct r8152 *tp) case RTL_VER_03: case RTL_VER_04: case RTL_VER_05: + case RTL_VER_06: ops->init = r8153_init; ops->enable = rtl8153_enable; ops->disable = rtl8153_disable; diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 34c519eb1db5..e90f7a484e1c 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -49,15 +49,12 @@ #include <net/ip6_tunnel.h> #include <net/ip6_checksum.h> #endif +#include <net/dst_metadata.h> #define VXLAN_VERSION "0.1" #define PORT_HASH_BITS 8 #define PORT_HASH_SIZE (1<<PORT_HASH_BITS) -#define VNI_HASH_BITS 10 -#define VNI_HASH_SIZE (1<<VNI_HASH_BITS) -#define FDB_HASH_BITS 8 -#define FDB_HASH_SIZE (1<<FDB_HASH_BITS) #define FDB_AGE_DEFAULT 300 /* 5 min */ #define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */ @@ -74,9 +71,13 @@ module_param(log_ecn_error, bool, 0644); MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); static int vxlan_net_id; +static struct rtnl_link_ops vxlan_link_ops; static const u8 all_zeros_mac[ETH_ALEN]; +static struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port, + bool no_share, u32 flags); + /* per-network namespace private data for this module */ struct vxlan_net { struct list_head vxlan_list; @@ -84,21 +85,6 @@ struct vxlan_net { spinlock_t sock_lock; }; -union vxlan_addr { - struct sockaddr_in sin; - struct sockaddr_in6 sin6; - struct sockaddr sa; -}; - -struct vxlan_rdst { - union vxlan_addr remote_ip; - __be16 remote_port; - u32 remote_vni; - u32 remote_ifindex; - struct list_head list; - struct rcu_head rcu; -}; - /* Forwarding table entry */ struct vxlan_fdb { struct hlist_node hlist; /* linked list of entries */ @@ -106,40 +92,21 @@ struct vxlan_fdb { unsigned long updated; /* jiffies */ unsigned long used; struct list_head remotes; + u8 eth_addr[ETH_ALEN]; u16 state; /* see ndm_state */ u8 flags; /* see ndm_flags */ - u8 eth_addr[ETH_ALEN]; -}; - -/* Pseudo network device */ -struct vxlan_dev { - struct hlist_node hlist; /* vni hash table */ - struct list_head next; /* vxlan's per namespace list */ - struct vxlan_sock *vn_sock; /* listening socket */ - struct net_device *dev; - struct net *net; /* netns for packet i/o */ - struct vxlan_rdst default_dst; /* default destination */ - union vxlan_addr saddr; /* source address */ - __be16 dst_port; - __u16 port_min; /* source port range */ - __u16 port_max; - __u8 tos; /* TOS override */ - __u8 ttl; - u32 flags; /* VXLAN_F_* in vxlan.h */ - - unsigned long age_interval; - struct timer_list age_timer; - spinlock_t hash_lock; - unsigned int addrcnt; - unsigned int addrmax; - - struct hlist_head fdb_head[FDB_HASH_SIZE]; }; /* salt for hash table */ static u32 vxlan_salt __read_mostly; static struct workqueue_struct *vxlan_wq; +static inline bool vxlan_collect_metadata(struct vxlan_sock *vs) +{ + return vs->flags & VXLAN_F_COLLECT_METADATA || + ip_tunnel_collect_metadata(); +} + #if IS_ENABLED(CONFIG_IPV6) static inline bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b) @@ -345,7 +312,7 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan, if (send_ip && vxlan_nla_put_addr(skb, NDA_DST, &rdst->remote_ip)) goto nla_put_failure; - if (rdst->remote_port && rdst->remote_port != vxlan->dst_port && + if (rdst->remote_port && rdst->remote_port != vxlan->cfg.dst_port && nla_put_be16(skb, NDA_PORT, rdst->remote_port)) goto nla_put_failure; if (rdst->remote_vni != vxlan->default_dst.remote_vni && @@ -749,7 +716,8 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan, if (!(flags & NLM_F_CREATE)) return -ENOENT; - if (vxlan->addrmax && vxlan->addrcnt >= vxlan->addrmax) + if (vxlan->cfg.addrmax && + vxlan->addrcnt >= vxlan->cfg.addrmax) return -ENOSPC; /* Disallow replace to add a multicast entry */ @@ -835,7 +803,7 @@ static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan, return -EINVAL; *port = nla_get_be16(tb[NDA_PORT]); } else { - *port = vxlan->dst_port; + *port = vxlan->cfg.dst_port; } if (tb[NDA_VNI]) { @@ -1021,7 +989,7 @@ static bool vxlan_snoop(struct net_device *dev, vxlan_fdb_create(vxlan, src_mac, src_ip, NUD_REACHABLE, NLM_F_EXCL|NLM_F_CREATE, - vxlan->dst_port, + vxlan->cfg.dst_port, vxlan->default_dst.remote_vni, 0, NTF_SELF); spin_unlock(&vxlan->hash_lock); @@ -1062,7 +1030,7 @@ static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev) return false; } -void vxlan_sock_release(struct vxlan_sock *vs) +static void vxlan_sock_release(struct vxlan_sock *vs) { struct sock *sk = vs->sock->sk; struct net *net = sock_net(sk); @@ -1078,7 +1046,6 @@ void vxlan_sock_release(struct vxlan_sock *vs) queue_work(vxlan_wq, &vs->del_work); } -EXPORT_SYMBOL_GPL(vxlan_sock_release); /* Update multicast group membership when first VNI on * multicast address is brought up @@ -1161,13 +1128,112 @@ static struct vxlanhdr *vxlan_remcsum(struct sk_buff *skb, struct vxlanhdr *vh, return vh; } +static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb, + struct vxlan_metadata *md, u32 vni, + struct metadata_dst *tun_dst) +{ + struct iphdr *oip = NULL; + struct ipv6hdr *oip6 = NULL; + struct vxlan_dev *vxlan; + struct pcpu_sw_netstats *stats; + union vxlan_addr saddr; + int err = 0; + union vxlan_addr *remote_ip; + + /* For flow based devices, map all packets to VNI 0 */ + if (vs->flags & VXLAN_F_FLOW_BASED) + vni = 0; + + /* Is this VNI defined? */ + vxlan = vxlan_vs_find_vni(vs, vni); + if (!vxlan) + goto drop; + + remote_ip = &vxlan->default_dst.remote_ip; + skb_reset_mac_header(skb); + skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev))); + skb->protocol = eth_type_trans(skb, vxlan->dev); + skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); + + /* Ignore packet loops (and multicast echo) */ + if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr)) + goto drop; + + /* Re-examine inner Ethernet packet */ + if (remote_ip->sa.sa_family == AF_INET) { + oip = ip_hdr(skb); + saddr.sin.sin_addr.s_addr = oip->saddr; + saddr.sa.sa_family = AF_INET; +#if IS_ENABLED(CONFIG_IPV6) + } else { + oip6 = ipv6_hdr(skb); + saddr.sin6.sin6_addr = oip6->saddr; + saddr.sa.sa_family = AF_INET6; +#endif + } + + if (tun_dst) { + skb_dst_set(skb, (struct dst_entry *)tun_dst); + tun_dst = NULL; + } + + if ((vxlan->flags & VXLAN_F_LEARN) && + vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source)) + goto drop; + + skb_reset_network_header(skb); + /* In flow-based mode, GBP is carried in dst_metadata */ + if (!(vs->flags & VXLAN_F_FLOW_BASED)) + skb->mark = md->gbp; + + if (oip6) + err = IP6_ECN_decapsulate(oip6, skb); + if (oip) + err = IP_ECN_decapsulate(oip, skb); + + if (unlikely(err)) { + if (log_ecn_error) { + if (oip6) + net_info_ratelimited("non-ECT from %pI6\n", + &oip6->saddr); + if (oip) + net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n", + &oip->saddr, oip->tos); + } + if (err > 1) { + ++vxlan->dev->stats.rx_frame_errors; + ++vxlan->dev->stats.rx_errors; + goto drop; + } + } + + stats = this_cpu_ptr(vxlan->dev->tstats); + u64_stats_update_begin(&stats->syncp); + stats->rx_packets++; + stats->rx_bytes += skb->len; + u64_stats_update_end(&stats->syncp); + + netif_rx(skb); + + return; +drop: + if (tun_dst) + dst_release((struct dst_entry *)tun_dst); + + /* Consume bad packet */ + kfree_skb(skb); +} + /* Callback from net/ipv4/udp.c to receive packets */ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) { + struct metadata_dst *tun_dst = NULL; + struct ip_tunnel_info *info; struct vxlan_sock *vs; struct vxlanhdr *vxh; u32 flags, vni; - struct vxlan_metadata md = {0}; + struct vxlan_metadata _md; + struct vxlan_metadata *md = &_md; /* Need Vxlan and inner Ethernet header to be present */ if (!pskb_may_pull(skb, VXLAN_HLEN)) @@ -1202,6 +1268,32 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) vni &= VXLAN_VNI_MASK; } + if (vxlan_collect_metadata(vs)) { + const struct iphdr *iph = ip_hdr(skb); + + tun_dst = metadata_dst_alloc(sizeof(*md), GFP_ATOMIC); + if (!tun_dst) + goto drop; + + info = &tun_dst->u.tun_info; + info->key.ipv4_src = iph->saddr; + info->key.ipv4_dst = iph->daddr; + info->key.ipv4_tos = iph->tos; + info->key.ipv4_ttl = iph->ttl; + info->key.tp_src = udp_hdr(skb)->source; + info->key.tp_dst = udp_hdr(skb)->dest; + + info->mode = IP_TUNNEL_INFO_RX; + info->key.tun_flags = TUNNEL_KEY; + info->key.tun_id = cpu_to_be64(vni >> 8); + if (udp_hdr(skb)->check != 0) + info->key.tun_flags |= TUNNEL_CSUM; + + md = ip_tunnel_info_opts(info, sizeof(*md)); + } else { + memset(md, 0, sizeof(*md)); + } + /* For backwards compatibility, only allow reserved fields to be * used by VXLAN extensions if explicitly requested. */ @@ -1209,13 +1301,16 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) struct vxlanhdr_gbp *gbp; gbp = (struct vxlanhdr_gbp *)vxh; - md.gbp = ntohs(gbp->policy_id); + md->gbp = ntohs(gbp->policy_id); + + if (tun_dst) + info->key.tun_flags |= TUNNEL_VXLAN_OPT; if (gbp->dont_learn) - md.gbp |= VXLAN_GBP_DONT_LEARN; + md->gbp |= VXLAN_GBP_DONT_LEARN; if (gbp->policy_applied) - md.gbp |= VXLAN_GBP_POLICY_APPLIED; + md->gbp |= VXLAN_GBP_POLICY_APPLIED; flags &= ~VXLAN_GBP_USED_BITS; } @@ -1233,8 +1328,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) goto bad_flags; } - md.vni = vxh->vx_vni; - vs->rcv(vs, skb, &md); + vxlan_rcv(vs, skb, md, vni >> 8, tun_dst); return 0; drop: @@ -1247,93 +1341,13 @@ bad_flags: ntohl(vxh->vx_flags), ntohl(vxh->vx_vni)); error: + if (tun_dst) + dst_release((struct dst_entry *)tun_dst); + /* Return non vxlan pkt */ return 1; } -static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb, - struct vxlan_metadata *md) -{ - struct iphdr *oip = NULL; - struct ipv6hdr *oip6 = NULL; - struct vxlan_dev *vxlan; - struct pcpu_sw_netstats *stats; - union vxlan_addr saddr; - __u32 vni; - int err = 0; - union vxlan_addr *remote_ip; - - vni = ntohl(md->vni) >> 8; - /* Is this VNI defined? */ - vxlan = vxlan_vs_find_vni(vs, vni); - if (!vxlan) - goto drop; - - remote_ip = &vxlan->default_dst.remote_ip; - skb_reset_mac_header(skb); - skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev))); - skb->protocol = eth_type_trans(skb, vxlan->dev); - skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); - - /* Ignore packet loops (and multicast echo) */ - if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr)) - goto drop; - - /* Re-examine inner Ethernet packet */ - if (remote_ip->sa.sa_family == AF_INET) { - oip = ip_hdr(skb); - saddr.sin.sin_addr.s_addr = oip->saddr; - saddr.sa.sa_family = AF_INET; -#if IS_ENABLED(CONFIG_IPV6) - } else { - oip6 = ipv6_hdr(skb); - saddr.sin6.sin6_addr = oip6->saddr; - saddr.sa.sa_family = AF_INET6; -#endif - } - - if ((vxlan->flags & VXLAN_F_LEARN) && - vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source)) - goto drop; - - skb_reset_network_header(skb); - skb->mark = md->gbp; - - if (oip6) - err = IP6_ECN_decapsulate(oip6, skb); - if (oip) - err = IP_ECN_decapsulate(oip, skb); - - if (unlikely(err)) { - if (log_ecn_error) { - if (oip6) - net_info_ratelimited("non-ECT from %pI6\n", - &oip6->saddr); - if (oip) - net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n", - &oip->saddr, oip->tos); - } - if (err > 1) { - ++vxlan->dev->stats.rx_frame_errors; - ++vxlan->dev->stats.rx_errors; - goto drop; - } - } - - stats = this_cpu_ptr(vxlan->dev->tstats); - u64_stats_update_begin(&stats->syncp); - stats->rx_packets++; - stats->rx_bytes += skb->len; - u64_stats_update_end(&stats->syncp); - - netif_rx(skb); - - return; -drop: - /* Consume bad packet */ - kfree_skb(skb); -} - static int arp_reduce(struct net_device *dev, struct sk_buff *skb) { struct vxlan_dev *vxlan = netdev_priv(dev); @@ -1672,7 +1686,7 @@ static int vxlan6_xmit_skb(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb, struct net_device *dev, struct in6_addr *saddr, struct in6_addr *daddr, __u8 prio, __u8 ttl, - __be16 src_port, __be16 dst_port, + __be16 src_port, __be16 dst_port, __be32 vni, struct vxlan_metadata *md, bool xnet, u32 vxflags) { struct vxlanhdr *vxh; @@ -1722,7 +1736,7 @@ static int vxlan6_xmit_skb(struct dst_entry *dst, struct sock *sk, vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh)); vxh->vx_flags = htonl(VXLAN_HF_VNI); - vxh->vx_vni = md->vni; + vxh->vx_vni = vni; if (type & SKB_GSO_TUNNEL_REMCSUM) { u32 data = (skb_checksum_start_offset(skb) - hdrlen) >> @@ -1755,10 +1769,10 @@ err: } #endif -int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb, - __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df, - __be16 src_port, __be16 dst_port, - struct vxlan_metadata *md, bool xnet, u32 vxflags) +static int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb, + __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df, + __be16 src_port, __be16 dst_port, __be32 vni, + struct vxlan_metadata *md, bool xnet, u32 vxflags) { struct vxlanhdr *vxh; int min_headroom; @@ -1801,7 +1815,7 @@ int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb, vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh)); vxh->vx_flags = htonl(VXLAN_HF_VNI); - vxh->vx_vni = md->vni; + vxh->vx_vni = vni; if (type & SKB_GSO_TUNNEL_REMCSUM) { u32 data = (skb_checksum_start_offset(skb) - hdrlen) >> @@ -1828,7 +1842,6 @@ int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb, ttl, df, src_port, dst_port, xnet, !(vxflags & VXLAN_F_UDP_CSUM)); } -EXPORT_SYMBOL_GPL(vxlan_xmit_skb); /* Bypass encapsulation if the destination is local */ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, @@ -1878,22 +1891,43 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, struct vxlan_rdst *rdst, bool did_rsc) { + struct ip_tunnel_info *info; struct vxlan_dev *vxlan = netdev_priv(dev); struct sock *sk = vxlan->vn_sock->sock->sk; struct rtable *rt = NULL; const struct iphdr *old_iph; struct flowi4 fl4; union vxlan_addr *dst; - struct vxlan_metadata md; + union vxlan_addr remote_ip; + struct vxlan_metadata _md; + struct vxlan_metadata *md = &_md; __be16 src_port = 0, dst_port; u32 vni; __be16 df = 0; __u8 tos, ttl; int err; + u32 flags = vxlan->flags; + + /* FIXME: Support IPv6 */ + info = skb_tunnel_info(skb, AF_INET); + + if (rdst) { + dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port; + vni = rdst->remote_vni; + dst = &rdst->remote_ip; + } else { + if (!info) { + WARN_ONCE(1, "%s: Missing encapsulation instructions\n", + dev->name); + goto drop; + } - dst_port = rdst->remote_port ? rdst->remote_port : vxlan->dst_port; - vni = rdst->remote_vni; - dst = &rdst->remote_ip; + dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port; + vni = be64_to_cpu(info->key.tun_id); + remote_ip.sin.sin_family = AF_INET; + remote_ip.sin.sin_addr.s_addr = info->key.ipv4_dst; + dst = &remote_ip; + } if (vxlan_addr_any(dst)) { if (did_rsc) { @@ -1906,25 +1940,42 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, old_iph = ip_hdr(skb); - ttl = vxlan->ttl; + ttl = vxlan->cfg.ttl; if (!ttl && vxlan_addr_multicast(dst)) ttl = 1; - tos = vxlan->tos; + tos = vxlan->cfg.tos; if (tos == 1) tos = ip_tunnel_get_dsfield(old_iph, skb); - src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->port_min, - vxlan->port_max, true); + src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min, + vxlan->cfg.port_max, true); if (dst->sa.sa_family == AF_INET) { + if (info) { + if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT) + df = htons(IP_DF); + if (info->key.tun_flags & TUNNEL_CSUM) + flags |= VXLAN_F_UDP_CSUM; + else + flags &= ~VXLAN_F_UDP_CSUM; + + ttl = info->key.ipv4_ttl; + tos = info->key.ipv4_tos; + + if (info->options_len) + md = ip_tunnel_info_opts(info, sizeof(*md)); + } else { + md->gbp = skb->mark; + } + memset(&fl4, 0, sizeof(fl4)); - fl4.flowi4_oif = rdst->remote_ifindex; + fl4.flowi4_oif = rdst ? rdst->remote_ifindex : 0; fl4.flowi4_tos = RT_TOS(tos); fl4.flowi4_mark = skb->mark; fl4.flowi4_proto = IPPROTO_UDP; fl4.daddr = dst->sin.sin_addr.s_addr; - fl4.saddr = vxlan->saddr.sin.sin_addr.s_addr; + fl4.saddr = vxlan->cfg.saddr.sin.sin_addr.s_addr; rt = ip_route_output_key(vxlan->net, &fl4); if (IS_ERR(rt)) { @@ -1958,14 +2009,11 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, tos = ip_tunnel_ecn_encap(tos, old_iph, skb); ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); - md.vni = htonl(vni << 8); - md.gbp = skb->mark; - err = vxlan_xmit_skb(rt, sk, skb, fl4.saddr, dst->sin.sin_addr.s_addr, tos, ttl, df, - src_port, dst_port, &md, + src_port, dst_port, htonl(vni << 8), md, !net_eq(vxlan->net, dev_net(vxlan->dev)), - vxlan->flags); + flags); if (err < 0) { /* skb is already freed. */ skb = NULL; @@ -1980,13 +2028,13 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, u32 flags; memset(&fl6, 0, sizeof(fl6)); - fl6.flowi6_oif = rdst->remote_ifindex; + fl6.flowi6_oif = rdst ? rdst->remote_ifindex : 0; fl6.daddr = dst->sin6.sin6_addr; - fl6.saddr = vxlan->saddr.sin6.sin6_addr; + fl6.saddr = vxlan->cfg.saddr.sin6.sin6_addr; fl6.flowi6_mark = skb->mark; fl6.flowi6_proto = IPPROTO_UDP; - if (ipv6_stub->ipv6_dst_lookup(sk, &ndst, &fl6)) { + if (ipv6_stub->ipv6_dst_lookup(vxlan->net, sk, &ndst, &fl6)) { netdev_dbg(dev, "no route to %pI6\n", &dst->sin6.sin6_addr); dev->stats.tx_carrier_errors++; @@ -2018,11 +2066,10 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, } ttl = ttl ? : ip6_dst_hoplimit(ndst); - md.vni = htonl(vni << 8); - md.gbp = skb->mark; + md->gbp = skb->mark; err = vxlan6_xmit_skb(ndst, sk, skb, dev, &fl6.saddr, &fl6.daddr, - 0, ttl, src_port, dst_port, &md, + 0, ttl, src_port, dst_port, htonl(vni << 8), md, !net_eq(vxlan->net, dev_net(vxlan->dev)), vxlan->flags); #endif @@ -2051,11 +2098,15 @@ tx_free: static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) { struct vxlan_dev *vxlan = netdev_priv(dev); + const struct ip_tunnel_info *info; struct ethhdr *eth; bool did_rsc = false; struct vxlan_rdst *rdst, *fdst = NULL; struct vxlan_fdb *f; + /* FIXME: Support IPv6 */ + info = skb_tunnel_info(skb, AF_INET); + skb_reset_mac_header(skb); eth = eth_hdr(skb); @@ -2078,6 +2129,12 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) #endif } + if (vxlan->flags & VXLAN_F_FLOW_BASED && + info && info->mode == IP_TUNNEL_INFO_TX) { + vxlan_xmit_one(skb, dev, NULL, false); + return NETDEV_TX_OK; + } + f = vxlan_find_mac(vxlan, eth->h_dest); did_rsc = false; @@ -2143,7 +2200,7 @@ static void vxlan_cleanup(unsigned long arg) if (f->state & NUD_PERMANENT) continue; - timeout = f->used + vxlan->age_interval * HZ; + timeout = f->used + vxlan->cfg.age_interval * HZ; if (time_before_eq(timeout, jiffies)) { netdev_dbg(vxlan->dev, "garbage collect %pM\n", @@ -2207,8 +2264,8 @@ static int vxlan_open(struct net_device *dev) struct vxlan_sock *vs; int ret = 0; - vs = vxlan_sock_add(vxlan->net, vxlan->dst_port, vxlan_rcv, NULL, - false, vxlan->flags); + vs = vxlan_sock_add(vxlan->net, vxlan->cfg.dst_port, + vxlan->cfg.no_share, vxlan->flags); if (IS_ERR(vs)) return PTR_ERR(vs); @@ -2222,7 +2279,7 @@ static int vxlan_open(struct net_device *dev) } } - if (vxlan->age_interval) + if (vxlan->cfg.age_interval) mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL); return ret; @@ -2380,7 +2437,7 @@ static void vxlan_setup(struct net_device *dev) vxlan->age_timer.function = vxlan_cleanup; vxlan->age_timer.data = (unsigned long) vxlan; - vxlan->dst_port = htons(vxlan_port); + vxlan->cfg.dst_port = htons(vxlan_port); vxlan->dev = dev; @@ -2405,6 +2462,8 @@ static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = { [IFLA_VXLAN_RSC] = { .type = NLA_U8 }, [IFLA_VXLAN_L2MISS] = { .type = NLA_U8 }, [IFLA_VXLAN_L3MISS] = { .type = NLA_U8 }, + [IFLA_VXLAN_FLOWBASED] = { .type = NLA_U8 }, + [IFLA_VXLAN_COLLECT_METADATA] = { .type = NLA_U8 }, [IFLA_VXLAN_PORT] = { .type = NLA_U16 }, [IFLA_VXLAN_UDP_CSUM] = { .type = NLA_U8 }, [IFLA_VXLAN_UDP_ZERO_CSUM6_TX] = { .type = NLA_U8 }, @@ -2500,7 +2559,6 @@ static struct socket *vxlan_create_sock(struct net *net, bool ipv6, /* Create new listen socket if needed */ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port, - vxlan_rcv_t *rcv, void *data, u32 flags) { struct vxlan_net *vn = net_generic(net, vxlan_net_id); @@ -2529,8 +2587,6 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port, vs->sock = sock; atomic_set(&vs->refcnt, 1); - vs->rcv = rcv; - vs->data = data; vs->flags = (flags & VXLAN_F_RCV_FLAGS); /* Initialize the vxlan udp offloads structure */ @@ -2554,9 +2610,8 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port, return vs; } -struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port, - vxlan_rcv_t *rcv, void *data, - bool no_share, u32 flags) +static struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port, + bool no_share, u32 flags) { struct vxlan_net *vn = net_generic(net, vxlan_net_id); struct vxlan_sock *vs; @@ -2566,7 +2621,7 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port, spin_lock(&vn->sock_lock); vs = vxlan_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port, flags); - if (vs && vs->rcv == rcv) { + if (vs) { if (!atomic_add_unless(&vs->refcnt, 1, 0)) vs = ERR_PTR(-EBUSY); spin_unlock(&vn->sock_lock); @@ -2575,58 +2630,38 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port, spin_unlock(&vn->sock_lock); } - return vxlan_socket_create(net, port, rcv, data, flags); + return vxlan_socket_create(net, port, flags); } -EXPORT_SYMBOL_GPL(vxlan_sock_add); -static int vxlan_newlink(struct net *src_net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[]) +static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, + struct vxlan_config *conf) { struct vxlan_net *vn = net_generic(src_net, vxlan_net_id); struct vxlan_dev *vxlan = netdev_priv(dev); struct vxlan_rdst *dst = &vxlan->default_dst; - __u32 vni; int err; bool use_ipv6 = false; - - if (!data[IFLA_VXLAN_ID]) - return -EINVAL; + __be16 default_port = vxlan->cfg.dst_port; vxlan->net = src_net; - vni = nla_get_u32(data[IFLA_VXLAN_ID]); - dst->remote_vni = vni; - - /* Unless IPv6 is explicitly requested, assume IPv4 */ - dst->remote_ip.sa.sa_family = AF_INET; - if (data[IFLA_VXLAN_GROUP]) { - dst->remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]); - } else if (data[IFLA_VXLAN_GROUP6]) { - if (!IS_ENABLED(CONFIG_IPV6)) - return -EPFNOSUPPORT; + dst->remote_vni = conf->vni; - dst->remote_ip.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_GROUP6]); - dst->remote_ip.sa.sa_family = AF_INET6; - use_ipv6 = true; - } + memcpy(&dst->remote_ip, &conf->remote_ip, sizeof(conf->remote_ip)); - if (data[IFLA_VXLAN_LOCAL]) { - vxlan->saddr.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_LOCAL]); - vxlan->saddr.sa.sa_family = AF_INET; - } else if (data[IFLA_VXLAN_LOCAL6]) { - if (!IS_ENABLED(CONFIG_IPV6)) - return -EPFNOSUPPORT; + /* Unless IPv6 is explicitly requested, assume IPv4 */ + if (!dst->remote_ip.sa.sa_family) + dst->remote_ip.sa.sa_family = AF_INET; - /* TODO: respect scope id */ - vxlan->saddr.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_LOCAL6]); - vxlan->saddr.sa.sa_family = AF_INET6; + if (dst->remote_ip.sa.sa_family == AF_INET6 || + vxlan->cfg.saddr.sa.sa_family == AF_INET6) use_ipv6 = true; - } - if (data[IFLA_VXLAN_LINK] && - (dst->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]))) { + if (conf->remote_ifindex) { struct net_device *lowerdev - = __dev_get_by_index(src_net, dst->remote_ifindex); + = __dev_get_by_index(src_net, conf->remote_ifindex); + + dst->remote_ifindex = conf->remote_ifindex; if (!lowerdev) { pr_info("ifindex %d does not exist\n", dst->remote_ifindex); @@ -2644,7 +2679,7 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev, } #endif - if (!tb[IFLA_MTU]) + if (!conf->mtu) dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); dev->needed_headroom = lowerdev->hard_header_len + @@ -2652,101 +2687,192 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev, } else if (use_ipv6) vxlan->flags |= VXLAN_F_IPV6; + memcpy(&vxlan->cfg, conf, sizeof(*conf)); + if (!vxlan->cfg.dst_port) + vxlan->cfg.dst_port = default_port; + vxlan->flags |= conf->flags; + + if (!vxlan->cfg.age_interval) + vxlan->cfg.age_interval = FDB_AGE_DEFAULT; + + if (vxlan_find_vni(src_net, conf->vni, use_ipv6 ? AF_INET6 : AF_INET, + vxlan->cfg.dst_port, vxlan->flags)) + return -EEXIST; + + dev->ethtool_ops = &vxlan_ethtool_ops; + + /* create an fdb entry for a valid default destination */ + if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) { + err = vxlan_fdb_create(vxlan, all_zeros_mac, + &vxlan->default_dst.remote_ip, + NUD_REACHABLE|NUD_PERMANENT, + NLM_F_EXCL|NLM_F_CREATE, + vxlan->cfg.dst_port, + vxlan->default_dst.remote_vni, + vxlan->default_dst.remote_ifindex, + NTF_SELF); + if (err) + return err; + } + + err = register_netdevice(dev); + if (err) { + vxlan_fdb_delete_default(vxlan); + return err; + } + + list_add(&vxlan->next, &vn->vxlan_list); + + return 0; +} + +struct net_device *vxlan_dev_create(struct net *net, const char *name, + u8 name_assign_type, struct vxlan_config *conf) +{ + struct nlattr *tb[IFLA_MAX+1]; + struct net_device *dev; + int err; + + memset(&tb, 0, sizeof(tb)); + + dev = rtnl_create_link(net, name, name_assign_type, + &vxlan_link_ops, tb); + if (IS_ERR(dev)) + return dev; + + err = vxlan_dev_configure(net, dev, conf); + if (err < 0) { + free_netdev(dev); + return ERR_PTR(err); + } + + return dev; +} +EXPORT_SYMBOL_GPL(vxlan_dev_create); + +static int vxlan_newlink(struct net *src_net, struct net_device *dev, + struct nlattr *tb[], struct nlattr *data[]) +{ + struct vxlan_config conf; + int err; + + if (!data[IFLA_VXLAN_ID]) + return -EINVAL; + + memset(&conf, 0, sizeof(conf)); + conf.vni = nla_get_u32(data[IFLA_VXLAN_ID]); + + if (data[IFLA_VXLAN_GROUP]) { + conf.remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]); + } else if (data[IFLA_VXLAN_GROUP6]) { + if (!IS_ENABLED(CONFIG_IPV6)) + return -EPFNOSUPPORT; + + conf.remote_ip.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_GROUP6]); + conf.remote_ip.sa.sa_family = AF_INET6; + } + + if (data[IFLA_VXLAN_LOCAL]) { + conf.saddr.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_LOCAL]); + conf.saddr.sa.sa_family = AF_INET; + } else if (data[IFLA_VXLAN_LOCAL6]) { + if (!IS_ENABLED(CONFIG_IPV6)) + return -EPFNOSUPPORT; + + /* TODO: respect scope id */ + conf.saddr.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_LOCAL6]); + conf.saddr.sa.sa_family = AF_INET6; + } + + if (data[IFLA_VXLAN_LINK]) + conf.remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]); + if (data[IFLA_VXLAN_TOS]) - vxlan->tos = nla_get_u8(data[IFLA_VXLAN_TOS]); + conf.tos = nla_get_u8(data[IFLA_VXLAN_TOS]); if (data[IFLA_VXLAN_TTL]) - vxlan->ttl = nla_get_u8(data[IFLA_VXLAN_TTL]); + conf.ttl = nla_get_u8(data[IFLA_VXLAN_TTL]); if (!data[IFLA_VXLAN_LEARNING] || nla_get_u8(data[IFLA_VXLAN_LEARNING])) - vxlan->flags |= VXLAN_F_LEARN; + conf.flags |= VXLAN_F_LEARN; if (data[IFLA_VXLAN_AGEING]) - vxlan->age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]); - else - vxlan->age_interval = FDB_AGE_DEFAULT; + conf.age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]); if (data[IFLA_VXLAN_PROXY] && nla_get_u8(data[IFLA_VXLAN_PROXY])) - vxlan->flags |= VXLAN_F_PROXY; + conf.flags |= VXLAN_F_PROXY; if (data[IFLA_VXLAN_RSC] && nla_get_u8(data[IFLA_VXLAN_RSC])) - vxlan->flags |= VXLAN_F_RSC; + conf.flags |= VXLAN_F_RSC; if (data[IFLA_VXLAN_L2MISS] && nla_get_u8(data[IFLA_VXLAN_L2MISS])) - vxlan->flags |= VXLAN_F_L2MISS; + conf.flags |= VXLAN_F_L2MISS; if (data[IFLA_VXLAN_L3MISS] && nla_get_u8(data[IFLA_VXLAN_L3MISS])) - vxlan->flags |= VXLAN_F_L3MISS; + conf.flags |= VXLAN_F_L3MISS; if (data[IFLA_VXLAN_LIMIT]) - vxlan->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]); + conf.addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]); + + if (data[IFLA_VXLAN_FLOWBASED] && + nla_get_u8(data[IFLA_VXLAN_FLOWBASED])) + conf.flags |= VXLAN_F_FLOW_BASED; + + if (data[IFLA_VXLAN_COLLECT_METADATA] && + nla_get_u8(data[IFLA_VXLAN_COLLECT_METADATA])) + conf.flags |= VXLAN_F_COLLECT_METADATA; if (data[IFLA_VXLAN_PORT_RANGE]) { const struct ifla_vxlan_port_range *p = nla_data(data[IFLA_VXLAN_PORT_RANGE]); - vxlan->port_min = ntohs(p->low); - vxlan->port_max = ntohs(p->high); + conf.port_min = ntohs(p->low); + conf.port_max = ntohs(p->high); } if (data[IFLA_VXLAN_PORT]) - vxlan->dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]); + conf.dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]); if (data[IFLA_VXLAN_UDP_CSUM] && nla_get_u8(data[IFLA_VXLAN_UDP_CSUM])) - vxlan->flags |= VXLAN_F_UDP_CSUM; + conf.flags |= VXLAN_F_UDP_CSUM; if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX] && nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX])) - vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_TX; + conf.flags |= VXLAN_F_UDP_ZERO_CSUM6_TX; if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX] && nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX])) - vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX; + conf.flags |= VXLAN_F_UDP_ZERO_CSUM6_RX; if (data[IFLA_VXLAN_REMCSUM_TX] && nla_get_u8(data[IFLA_VXLAN_REMCSUM_TX])) - vxlan->flags |= VXLAN_F_REMCSUM_TX; + conf.flags |= VXLAN_F_REMCSUM_TX; if (data[IFLA_VXLAN_REMCSUM_RX] && nla_get_u8(data[IFLA_VXLAN_REMCSUM_RX])) - vxlan->flags |= VXLAN_F_REMCSUM_RX; + conf.flags |= VXLAN_F_REMCSUM_RX; if (data[IFLA_VXLAN_GBP]) - vxlan->flags |= VXLAN_F_GBP; + conf.flags |= VXLAN_F_GBP; if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL]) - vxlan->flags |= VXLAN_F_REMCSUM_NOPARTIAL; - - if (vxlan_find_vni(src_net, vni, use_ipv6 ? AF_INET6 : AF_INET, - vxlan->dst_port, vxlan->flags)) { - pr_info("duplicate VNI %u\n", vni); - return -EEXIST; - } + conf.flags |= VXLAN_F_REMCSUM_NOPARTIAL; - dev->ethtool_ops = &vxlan_ethtool_ops; + err = vxlan_dev_configure(src_net, dev, &conf); + switch (err) { + case -ENODEV: + pr_info("ifindex %d does not exist\n", conf.remote_ifindex); + break; - /* create an fdb entry for a valid default destination */ - if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) { - err = vxlan_fdb_create(vxlan, all_zeros_mac, - &vxlan->default_dst.remote_ip, - NUD_REACHABLE|NUD_PERMANENT, - NLM_F_EXCL|NLM_F_CREATE, - vxlan->dst_port, - vxlan->default_dst.remote_vni, - vxlan->default_dst.remote_ifindex, - NTF_SELF); - if (err) - return err; - } + case -EPERM: + pr_info("IPv6 is disabled via sysctl\n"); + break; - err = register_netdevice(dev); - if (err) { - vxlan_fdb_delete_default(vxlan); - return err; + case -EEXIST: + pr_info("duplicate VNI %u\n", conf.vni); + break; } - list_add(&vxlan->next, &vn->vxlan_list); - - return 0; + return err; } static void vxlan_dellink(struct net_device *dev, struct list_head *head) @@ -2777,6 +2903,7 @@ static size_t vxlan_get_size(const struct net_device *dev) nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_RSC */ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L2MISS */ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L3MISS */ + nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_FLOWBASED */ nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */ nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */ nla_total_size(sizeof(struct ifla_vxlan_port_range)) + @@ -2794,8 +2921,8 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev) const struct vxlan_dev *vxlan = netdev_priv(dev); const struct vxlan_rdst *dst = &vxlan->default_dst; struct ifla_vxlan_port_range ports = { - .low = htons(vxlan->port_min), - .high = htons(vxlan->port_max), + .low = htons(vxlan->cfg.port_min), + .high = htons(vxlan->cfg.port_max), }; if (nla_put_u32(skb, IFLA_VXLAN_ID, dst->remote_vni)) @@ -2818,22 +2945,22 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev) if (dst->remote_ifindex && nla_put_u32(skb, IFLA_VXLAN_LINK, dst->remote_ifindex)) goto nla_put_failure; - if (!vxlan_addr_any(&vxlan->saddr)) { - if (vxlan->saddr.sa.sa_family == AF_INET) { + if (!vxlan_addr_any(&vxlan->cfg.saddr)) { + if (vxlan->cfg.saddr.sa.sa_family == AF_INET) { if (nla_put_in_addr(skb, IFLA_VXLAN_LOCAL, - vxlan->saddr.sin.sin_addr.s_addr)) + vxlan->cfg.saddr.sin.sin_addr.s_addr)) goto nla_put_failure; #if IS_ENABLED(CONFIG_IPV6) } else { if (nla_put_in6_addr(skb, IFLA_VXLAN_LOCAL6, - &vxlan->saddr.sin6.sin6_addr)) + &vxlan->cfg.saddr.sin6.sin6_addr)) goto nla_put_failure; #endif } } - if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->ttl) || - nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->tos) || + if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->cfg.ttl) || + nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) || nla_put_u8(skb, IFLA_VXLAN_LEARNING, !!(vxlan->flags & VXLAN_F_LEARN)) || nla_put_u8(skb, IFLA_VXLAN_PROXY, @@ -2843,9 +2970,11 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev) !!(vxlan->flags & VXLAN_F_L2MISS)) || nla_put_u8(skb, IFLA_VXLAN_L3MISS, !!(vxlan->flags & VXLAN_F_L3MISS)) || - nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->age_interval) || - nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->addrmax) || - nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->dst_port) || + nla_put_u8(skb, IFLA_VXLAN_FLOWBASED, + !!(vxlan->flags & VXLAN_F_FLOW_BASED)) || + nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->cfg.age_interval) || + nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->cfg.addrmax) || + nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->cfg.dst_port) || nla_put_u8(skb, IFLA_VXLAN_UDP_CSUM, !!(vxlan->flags & VXLAN_F_UDP_CSUM)) || nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_TX, diff --git a/drivers/net/wireless/ath/ath10k/Makefile b/drivers/net/wireless/ath/ath10k/Makefile index 9729e6941635..c04fb00e7930 100644 --- a/drivers/net/wireless/ath/ath10k/Makefile +++ b/drivers/net/wireless/ath/ath10k/Makefile @@ -11,7 +11,8 @@ ath10k_core-y += mac.o \ wmi-tlv.o \ bmi.o \ hw.o \ - p2p.o + p2p.o \ + swap.o ath10k_core-$(CONFIG_ATH10K_DEBUGFS) += spectral.o ath10k_core-$(CONFIG_NL80211_TESTMODE) += testmode.o diff --git a/drivers/net/wireless/ath/ath10k/bmi.h b/drivers/net/wireless/ath/ath10k/bmi.h index 31a990635490..df7c7616533b 100644 --- a/drivers/net/wireless/ath/ath10k/bmi.h +++ b/drivers/net/wireless/ath/ath10k/bmi.h @@ -178,7 +178,7 @@ struct bmi_target_info { }; /* in msec */ -#define BMI_COMMUNICATION_TIMEOUT_HZ (1*HZ) +#define BMI_COMMUNICATION_TIMEOUT_HZ (2 * HZ) #define BMI_CE_NUM_TO_TARG 0 #define BMI_CE_NUM_TO_HOST 1 diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c index e508c65b6ba8..cf28fbebaedc 100644 --- a/drivers/net/wireless/ath/ath10k/ce.c +++ b/drivers/net/wireless/ath/ath10k/ce.c @@ -452,6 +452,7 @@ int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state, { struct ath10k_ce_ring *dest_ring = ce_state->dest_ring; unsigned int nentries_mask = dest_ring->nentries_mask; + struct ath10k *ar = ce_state->ar; unsigned int sw_index = dest_ring->sw_index; struct ce_desc *base = dest_ring->base_addr_owner_space; diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h index 0eddb204d85b..5c903e15dd65 100644 --- a/drivers/net/wireless/ath/ath10k/ce.h +++ b/drivers/net/wireless/ath/ath10k/ce.h @@ -21,7 +21,7 @@ #include "hif.h" /* Maximum number of Copy Engine's supported */ -#define CE_COUNT_MAX 8 +#define CE_COUNT_MAX 12 #define CE_HTT_H2T_MSG_SRC_NENTRIES 4096 /* Descriptor rings must be aligned to this boundary */ @@ -38,8 +38,13 @@ struct ath10k_ce_pipe; #define CE_DESC_FLAGS_GATHER (1 << 0) #define CE_DESC_FLAGS_BYTE_SWAP (1 << 1) -#define CE_DESC_FLAGS_META_DATA_MASK 0xFFFC -#define CE_DESC_FLAGS_META_DATA_LSB 2 + +/* Following desc flags are used in QCA99X0 */ +#define CE_DESC_FLAGS_HOST_INT_DIS (1 << 2) +#define CE_DESC_FLAGS_TGT_INT_DIS (1 << 3) + +#define CE_DESC_FLAGS_META_DATA_MASK ar->hw_values->ce_desc_meta_data_mask +#define CE_DESC_FLAGS_META_DATA_LSB ar->hw_values->ce_desc_meta_data_lsb struct ce_desc { __le32 addr; @@ -423,8 +428,10 @@ static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id) #define CE_RING_IDX_INCR(nentries_mask, idx) (((idx) + 1) & (nentries_mask)) -#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB 8 -#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK 0x0000ff00 +#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB \ + ar->regs->ce_wrap_intr_sum_host_msi_lsb +#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK \ + ar->regs->ce_wrap_intr_sum_host_msi_mask #define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET(x) \ (((x) & CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK) >> \ CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB) diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 59496a90ad5e..f79fa6c67ebc 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -49,6 +49,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR, .uart_pin = 7, .has_shifted_cc_wraparound = true, + .otp_exe_param = 0, .fw = { .dir = QCA988X_HW_2_0_FW_DIR, .fw = QCA988X_HW_2_0_FW_FILE, @@ -63,6 +64,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .name = "qca6174 hw2.1", .patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR, .uart_pin = 6, + .otp_exe_param = 0, .fw = { .dir = QCA6174_HW_2_1_FW_DIR, .fw = QCA6174_HW_2_1_FW_FILE, @@ -77,6 +79,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .name = "qca6174 hw3.0", .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR, .uart_pin = 6, + .otp_exe_param = 0, .fw = { .dir = QCA6174_HW_3_0_FW_DIR, .fw = QCA6174_HW_3_0_FW_FILE, @@ -91,6 +94,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .name = "qca6174 hw3.2", .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR, .uart_pin = 6, + .otp_exe_param = 0, .fw = { /* uses same binaries as hw3.0 */ .dir = QCA6174_HW_3_0_FW_DIR, @@ -101,8 +105,68 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ, }, }, + { + .id = QCA99X0_HW_2_0_DEV_VERSION, + .name = "qca99x0 hw2.0", + .patch_load_addr = QCA99X0_HW_2_0_PATCH_LOAD_ADDR, + .uart_pin = 7, + .otp_exe_param = 0x00000700, + .continuous_frag_desc = true, + .fw = { + .dir = QCA99X0_HW_2_0_FW_DIR, + .fw = QCA99X0_HW_2_0_FW_FILE, + .otp = QCA99X0_HW_2_0_OTP_FILE, + .board = QCA99X0_HW_2_0_BOARD_DATA_FILE, + .board_size = QCA99X0_BOARD_DATA_SZ, + .board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ, + }, + }, }; +static const char *const ath10k_core_fw_feature_str[] = { + [ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX] = "wmi-mgmt-rx", + [ATH10K_FW_FEATURE_WMI_10X] = "wmi-10.x", + [ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX] = "has-wmi-mgmt-tx", + [ATH10K_FW_FEATURE_NO_P2P] = "no-p2p", + [ATH10K_FW_FEATURE_WMI_10_2] = "wmi-10.2", + [ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT] = "multi-vif-ps", + [ATH10K_FW_FEATURE_WOWLAN_SUPPORT] = "wowlan", + [ATH10K_FW_FEATURE_IGNORE_OTP_RESULT] = "ignore-otp", + [ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING] = "no-4addr-pad", + [ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT] = "skip-clock-init", +}; + +static unsigned int ath10k_core_get_fw_feature_str(char *buf, + size_t buf_len, + enum ath10k_fw_features feat) +{ + if (feat >= ARRAY_SIZE(ath10k_core_fw_feature_str) || + WARN_ON(!ath10k_core_fw_feature_str[feat])) { + return scnprintf(buf, buf_len, "bit%d", feat); + } + + return scnprintf(buf, buf_len, "%s", ath10k_core_fw_feature_str[feat]); +} + +void ath10k_core_get_fw_features_str(struct ath10k *ar, + char *buf, + size_t buf_len) +{ + unsigned int len = 0; + int i; + + for (i = 0; i < ATH10K_FW_FEATURE_COUNT; i++) { + if (test_bit(i, ar->fw_features)) { + if (len > 0) + len += scnprintf(buf + len, buf_len - len, ","); + + len += ath10k_core_get_fw_feature_str(buf + len, + buf_len - len, + i); + } + } +} + static void ath10k_send_suspend_complete(struct ath10k *ar) { ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot suspend complete\n"); @@ -355,6 +419,7 @@ out: static int ath10k_download_and_run_otp(struct ath10k *ar) { u32 result, address = ar->hw_params.patch_load_addr; + u32 bmi_otp_exe_param = ar->hw_params.otp_exe_param; int ret; ret = ath10k_download_board_data(ar, ar->board_data, ar->board_len); @@ -380,7 +445,7 @@ static int ath10k_download_and_run_otp(struct ath10k *ar) return ret; } - ret = ath10k_bmi_execute(ar, address, 0, &result); + ret = ath10k_bmi_execute(ar, address, bmi_otp_exe_param, &result); if (ret) { ath10k_err(ar, "could not execute otp (%d)\n", ret); return ret; @@ -412,6 +477,13 @@ static int ath10k_download_fw(struct ath10k *ar, enum ath10k_firmware_mode mode) data = ar->firmware_data; data_len = ar->firmware_len; mode_name = "normal"; + ret = ath10k_swap_code_seg_configure(ar, + ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW); + if (ret) { + ath10k_err(ar, "failed to configure fw code swap: %d\n", + ret); + return ret; + } break; case ATH10K_FIRMWARE_MODE_UTF: data = ar->testmode.utf->data; @@ -451,6 +523,8 @@ static void ath10k_core_free_firmware_files(struct ath10k *ar) if (!IS_ERR(ar->cal_file)) release_firmware(ar->cal_file); + ath10k_swap_code_seg_release(ar); + ar->board = NULL; ar->board_data = NULL; ar->board_len = 0; @@ -464,6 +538,7 @@ static void ath10k_core_free_firmware_files(struct ath10k *ar) ar->firmware_len = 0; ar->cal_file = NULL; + } static int ath10k_fetch_cal_file(struct ath10k *ar) @@ -737,6 +812,13 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name) ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie htt op version %d\n", ar->htt.op_version); break; + case ATH10K_FW_IE_FW_CODE_SWAP_IMAGE: + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "found fw code swap image ie (%zd B)\n", + ie_len); + ar->swap.firmware_codeswap_data = data; + ar->swap.firmware_codeswap_len = ie_len; + break; default: ath10k_warn(ar, "Unknown FW IE: %u\n", le32_to_cpu(hdr->id)); @@ -1014,6 +1096,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar) ar->htt.max_num_pending_tx = TARGET_NUM_MSDU_DESC; ar->fw_stats_req_mask = WMI_STAT_PDEV | WMI_STAT_VDEV | WMI_STAT_PEER; + ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM; break; case ATH10K_FW_WMI_OP_VERSION_10_1: case ATH10K_FW_WMI_OP_VERSION_10_2: @@ -1023,6 +1106,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar) ar->max_num_vdevs = TARGET_10X_NUM_VDEVS; ar->htt.max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC; ar->fw_stats_req_mask = WMI_STAT_PEER; + ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM; break; case ATH10K_FW_WMI_OP_VERSION_TLV: ar->max_num_peers = TARGET_TLV_NUM_PEERS; @@ -1033,6 +1117,17 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar) ar->wow.max_num_patterns = TARGET_TLV_NUM_WOW_PATTERNS; ar->fw_stats_req_mask = WMI_STAT_PDEV | WMI_STAT_VDEV | WMI_STAT_PEER; + ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM; + break; + case ATH10K_FW_WMI_OP_VERSION_10_4: + ar->max_num_peers = TARGET_10_4_NUM_PEERS; + ar->max_num_stations = TARGET_10_4_NUM_STATIONS; + ar->num_active_peers = TARGET_10_4_ACTIVE_PEERS; + ar->max_num_vdevs = TARGET_10_4_NUM_VDEVS; + ar->num_tids = TARGET_10_4_TGT_NUM_TIDS; + ar->htt.max_num_pending_tx = TARGET_10_4_NUM_MSDU_DESC; + ar->fw_stats_req_mask = WMI_STAT_PEER; + ar->max_spatial_stream = WMI_10_4_MAX_SPATIAL_STREAM; break; case ATH10K_FW_WMI_OP_VERSION_UNSET: case ATH10K_FW_WMI_OP_VERSION_MAX: @@ -1056,6 +1151,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar) case ATH10K_FW_WMI_OP_VERSION_TLV: ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_TLV; break; + case ATH10K_FW_WMI_OP_VERSION_10_4: case ATH10K_FW_WMI_OP_VERSION_UNSET: case ATH10K_FW_WMI_OP_VERSION_MAX: WARN_ON(1); @@ -1330,6 +1426,13 @@ static int ath10k_core_probe_fw(struct ath10k *ar) goto err_free_firmware_files; } + ret = ath10k_swap_code_seg_init(ar); + if (ret) { + ath10k_err(ar, "failed to initialize code swap segment: %d\n", + ret); + goto err_free_firmware_files; + } + mutex_lock(&ar->conf_mutex); ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL); @@ -1470,9 +1573,15 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, switch (hw_rev) { case ATH10K_HW_QCA988X: ar->regs = &qca988x_regs; + ar->hw_values = &qca988x_values; break; case ATH10K_HW_QCA6174: ar->regs = &qca6174_regs; + ar->hw_values = &qca6174_values; + break; + case ATH10K_HW_QCA99X0: + ar->regs = &qca99x0_regs; + ar->hw_values = &qca99x0_values; break; default: ath10k_err(ar, "unsupported core hardware revision %d\n", diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 78094f23c9dd..78e07051b897 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -36,6 +36,7 @@ #include "spectral.h" #include "thermal.h" #include "wow.h" +#include "swap.h" #define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB) #define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK) @@ -327,8 +328,8 @@ struct ath10k_vif { u32 uapsd; } sta; struct { - /* 127 stations; wmi limit */ - u8 tim_bitmap[16]; + /* 512 stations */ + u8 tim_bitmap[64]; u8 tim_len; u32 ssid_len; u8 ssid[IEEE80211_MAX_SSID_LEN]; @@ -545,6 +546,7 @@ struct ath10k { u32 ht_cap_info; u32 vht_cap_info; u32 num_rf_chains; + u32 max_spatial_stream; /* protected by conf_mutex */ bool ani_enabled; @@ -560,6 +562,7 @@ struct ath10k { struct completion target_suspend; const struct ath10k_hw_regs *regs; + const struct ath10k_hw_values *hw_values; struct ath10k_bmi bmi; struct ath10k_wmi wmi; struct ath10k_htc htc; @@ -570,6 +573,7 @@ struct ath10k { const char *name; u32 patch_load_addr; int uart_pin; + u32 otp_exe_param; /* This is true if given HW chip has a quirky Cycle Counter * wraparound which resets to 0x7fffffff instead of 0. All @@ -578,6 +582,12 @@ struct ath10k { */ bool has_shifted_cc_wraparound; + /* Some of chip expects fragment descriptor to be continuous + * memory for any TX operation. Set continuous_frag_desc flag + * for the hardware which have such requirement. + */ + bool continuous_frag_desc; + struct ath10k_hw_params_fw { const char *dir; const char *fw; @@ -602,6 +612,12 @@ struct ath10k { const struct firmware *cal_file; + struct { + const void *firmware_codeswap_data; + size_t firmware_codeswap_len; + struct ath10k_swap_code_seg_info *firmware_swap_code_seg_info; + } swap; + char spec_board_id[100]; bool spec_board_loaded; @@ -617,6 +633,7 @@ struct ath10k { bool is_roc; int vdev_id; int roc_freq; + bool roc_notify; } scan; struct { @@ -675,6 +692,8 @@ struct ath10k { int max_num_stations; int max_num_vdevs; int max_num_tdls_vdevs; + int num_active_peers; + int num_tids; struct work_struct offchan_tx_work; struct sk_buff_head offchan_tx_queue; @@ -749,6 +768,9 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, enum ath10k_hw_rev hw_rev, const struct ath10k_hif_ops *hif_ops); void ath10k_core_destroy(struct ath10k *ar); +void ath10k_core_get_fw_features_str(struct ath10k *ar, + char *buf, + size_t max_len); int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode); int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt); diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index 8fa606a9c4dd..edf6047997a7 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -124,7 +124,11 @@ EXPORT_SYMBOL(ath10k_info); void ath10k_print_driver_info(struct ath10k *ar) { - ath10k_info(ar, "%s (0x%08x, 0x%08x%s%s%s) fw %s api %d htt %d.%d wmi %d cal %s max_sta %d\n", + char fw_features[128]; + + ath10k_core_get_fw_features_str(ar, fw_features, sizeof(fw_features)); + + ath10k_info(ar, "%s (0x%08x, 0x%08x%s%s%s) fw %s api %d htt-ver %d.%d wmi-op %d htt-op %d cal %s max-sta %d features %s\n", ar->hw_params.name, ar->target_version, ar->chip_id, @@ -137,8 +141,10 @@ void ath10k_print_driver_info(struct ath10k *ar) ar->htt.target_version_major, ar->htt.target_version_minor, ar->wmi.op_version, + ar->htt.op_version, ath10k_cal_mode_str(ar->cal_mode), - ar->max_num_stations); + ar->max_num_stations, + fw_features); ath10k_info(ar, "debug %d debugfs %d tracing %d dfs %d testmode %d\n", config_enabled(CONFIG_ATH10K_DEBUG), config_enabled(CONFIG_ATH10K_DEBUGFS), diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c index 6da6ef26143a..4474c3e839db 100644 --- a/drivers/net/wireless/ath/ath10k/htt.c +++ b/drivers/net/wireless/ath/ath10k/htt.c @@ -102,6 +102,43 @@ static const enum htt_t2h_msg_type htt_tlv_t2h_msg_types[] = { [HTT_TLV_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST, }; +static const enum htt_t2h_msg_type htt_10_4_t2h_msg_types[] = { + [HTT_10_4_T2H_MSG_TYPE_VERSION_CONF] = HTT_T2H_MSG_TYPE_VERSION_CONF, + [HTT_10_4_T2H_MSG_TYPE_RX_IND] = HTT_T2H_MSG_TYPE_RX_IND, + [HTT_10_4_T2H_MSG_TYPE_RX_FLUSH] = HTT_T2H_MSG_TYPE_RX_FLUSH, + [HTT_10_4_T2H_MSG_TYPE_PEER_MAP] = HTT_T2H_MSG_TYPE_PEER_MAP, + [HTT_10_4_T2H_MSG_TYPE_PEER_UNMAP] = HTT_T2H_MSG_TYPE_PEER_UNMAP, + [HTT_10_4_T2H_MSG_TYPE_RX_ADDBA] = HTT_T2H_MSG_TYPE_RX_ADDBA, + [HTT_10_4_T2H_MSG_TYPE_RX_DELBA] = HTT_T2H_MSG_TYPE_RX_DELBA, + [HTT_10_4_T2H_MSG_TYPE_TX_COMPL_IND] = HTT_T2H_MSG_TYPE_TX_COMPL_IND, + [HTT_10_4_T2H_MSG_TYPE_PKTLOG] = HTT_T2H_MSG_TYPE_PKTLOG, + [HTT_10_4_T2H_MSG_TYPE_STATS_CONF] = HTT_T2H_MSG_TYPE_STATS_CONF, + [HTT_10_4_T2H_MSG_TYPE_RX_FRAG_IND] = HTT_T2H_MSG_TYPE_RX_FRAG_IND, + [HTT_10_4_T2H_MSG_TYPE_SEC_IND] = HTT_T2H_MSG_TYPE_SEC_IND, + [HTT_10_4_T2H_MSG_TYPE_RC_UPDATE_IND] = HTT_T2H_MSG_TYPE_RC_UPDATE_IND, + [HTT_10_4_T2H_MSG_TYPE_TX_INSPECT_IND] = + HTT_T2H_MSG_TYPE_TX_INSPECT_IND, + [HTT_10_4_T2H_MSG_TYPE_MGMT_TX_COMPL_IND] = + HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION, + [HTT_10_4_T2H_MSG_TYPE_CHAN_CHANGE] = HTT_T2H_MSG_TYPE_CHAN_CHANGE, + [HTT_10_4_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND] = + HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND, + [HTT_10_4_T2H_MSG_TYPE_RX_PN_IND] = HTT_T2H_MSG_TYPE_RX_PN_IND, + [HTT_10_4_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND] = + HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND, + [HTT_10_4_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST, + [HTT_10_4_T2H_MSG_TYPE_EN_STATS] = HTT_T2H_MSG_TYPE_EN_STATS, + [HTT_10_4_T2H_MSG_TYPE_AGGR_CONF] = HTT_T2H_MSG_TYPE_AGGR_CONF, + [HTT_10_4_T2H_MSG_TYPE_TX_FETCH_IND] = + HTT_T2H_MSG_TYPE_TX_FETCH_IND, + [HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONF] = + HTT_T2H_MSG_TYPE_TX_FETCH_CONF, + [HTT_10_4_T2H_MSG_TYPE_STATS_NOUPLOAD] = + HTT_T2H_MSG_TYPE_STATS_NOUPLOAD, + [HTT_10_4_T2H_MSG_TYPE_TX_LOW_LATENCY_IND] = + HTT_T2H_MSG_TYPE_TX_LOW_LATENCY_IND, +}; + int ath10k_htt_connect(struct ath10k_htt *htt) { struct ath10k_htc_svc_conn_req conn_req; @@ -147,6 +184,10 @@ int ath10k_htt_init(struct ath10k *ar) 2; /* ip4 dscp or ip6 priority */ switch (ar->htt.op_version) { + case ATH10K_FW_HTT_OP_VERSION_10_4: + ar->htt.t2h_msg_types = htt_10_4_t2h_msg_types; + ar->htt.t2h_msg_types_max = HTT_10_4_T2H_NUM_MSGS; + break; case ATH10K_FW_HTT_OP_VERSION_10_1: ar->htt.t2h_msg_types = htt_10x_t2h_msg_types; ar->htt.t2h_msg_types_max = HTT_10X_T2H_NUM_MSGS; @@ -208,5 +249,9 @@ int ath10k_htt_setup(struct ath10k_htt *htt) if (status) return status; + status = ath10k_htt_send_frag_desc_bank_cfg(htt); + if (status) + return status; + return ath10k_htt_send_rx_ring_cfg_ll(htt); } diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h index 7e8a0d835663..8bdf1e7dd171 100644 --- a/drivers/net/wireless/ath/ath10k/htt.h +++ b/drivers/net/wireless/ath/ath10k/htt.h @@ -87,6 +87,11 @@ struct htt_data_tx_desc_frag { __le32 len; } __packed; +struct htt_msdu_ext_desc { + __le32 tso_flag[4]; + struct htt_data_tx_desc_frag frags[6]; +}; + enum htt_data_tx_desc_flags0 { HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT = 1 << 0, HTT_DATA_TX_DESC_FLAGS0_NO_AGGR = 1 << 1, @@ -349,6 +354,38 @@ enum htt_tlv_t2h_msg_type { HTT_TLV_T2H_NUM_MSGS }; +enum htt_10_4_t2h_msg_type { + HTT_10_4_T2H_MSG_TYPE_VERSION_CONF = 0x0, + HTT_10_4_T2H_MSG_TYPE_RX_IND = 0x1, + HTT_10_4_T2H_MSG_TYPE_RX_FLUSH = 0x2, + HTT_10_4_T2H_MSG_TYPE_PEER_MAP = 0x3, + HTT_10_4_T2H_MSG_TYPE_PEER_UNMAP = 0x4, + HTT_10_4_T2H_MSG_TYPE_RX_ADDBA = 0x5, + HTT_10_4_T2H_MSG_TYPE_RX_DELBA = 0x6, + HTT_10_4_T2H_MSG_TYPE_TX_COMPL_IND = 0x7, + HTT_10_4_T2H_MSG_TYPE_PKTLOG = 0x8, + HTT_10_4_T2H_MSG_TYPE_STATS_CONF = 0x9, + HTT_10_4_T2H_MSG_TYPE_RX_FRAG_IND = 0xa, + HTT_10_4_T2H_MSG_TYPE_SEC_IND = 0xb, + HTT_10_4_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc, + HTT_10_4_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd, + HTT_10_4_T2H_MSG_TYPE_MGMT_TX_COMPL_IND = 0xe, + HTT_10_4_T2H_MSG_TYPE_CHAN_CHANGE = 0xf, + HTT_10_4_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0x10, + HTT_10_4_T2H_MSG_TYPE_RX_PN_IND = 0x11, + HTT_10_4_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x12, + HTT_10_4_T2H_MSG_TYPE_TEST = 0x13, + HTT_10_4_T2H_MSG_TYPE_EN_STATS = 0x14, + HTT_10_4_T2H_MSG_TYPE_AGGR_CONF = 0x15, + HTT_10_4_T2H_MSG_TYPE_TX_FETCH_IND = 0x16, + HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONF = 0x17, + HTT_10_4_T2H_MSG_TYPE_STATS_NOUPLOAD = 0x18, + /* 0x19 to 0x2f are reserved */ + HTT_10_4_T2H_MSG_TYPE_TX_LOW_LATENCY_IND = 0x30, + /* keep this last */ + HTT_10_4_T2H_NUM_MSGS +}; + enum htt_t2h_msg_type { HTT_T2H_MSG_TYPE_VERSION_CONF, HTT_T2H_MSG_TYPE_RX_IND, @@ -375,6 +412,10 @@ enum htt_t2h_msg_type { HTT_T2H_MSG_TYPE_AGGR_CONF, HTT_T2H_MSG_TYPE_STATS_NOUPLOAD, HTT_T2H_MSG_TYPE_TEST, + HTT_T2H_MSG_TYPE_EN_STATS, + HTT_T2H_MSG_TYPE_TX_FETCH_IND, + HTT_T2H_MSG_TYPE_TX_FETCH_CONF, + HTT_T2H_MSG_TYPE_TX_LOW_LATENCY_IND, /* keep this last */ HTT_T2H_NUM_MSGS }; @@ -1430,6 +1471,11 @@ struct ath10k_htt { /* rx_status template */ struct ieee80211_rx_status rx_status; + + struct { + dma_addr_t paddr; + struct htt_msdu_ext_desc *vaddr; + } frag_desc; }; #define RX_HTT_HDR_STATUS_LEN 64 @@ -1497,6 +1543,7 @@ void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb); void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb); int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt); int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie); +int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt); int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt); int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt, u8 max_subfrms_ampdu, diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 89eb16b30fc4..d7d118328f31 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -1201,7 +1201,6 @@ static void ath10k_htt_rx_h_undecap(struct ath10k *ar, { struct htt_rx_desc *rxd; enum rx_msdu_decap_format decap; - struct ieee80211_hdr *hdr; /* First msdu's decapped header: * [802.11 header] <-- padded to 4 bytes long @@ -1215,7 +1214,6 @@ static void ath10k_htt_rx_h_undecap(struct ath10k *ar, */ rxd = (void *)msdu->data - sizeof(*rxd); - hdr = (void *)rxd->rx_hdr_status; decap = MS(__le32_to_cpu(rxd->msdu_start.info1), RX_MSDU_START_INFO1_DECAP_FORMAT); @@ -2074,6 +2072,10 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) break; case HTT_T2H_MSG_TYPE_CHAN_CHANGE: break; + case HTT_T2H_MSG_TYPE_EN_STATS: + case HTT_T2H_MSG_TYPE_TX_FETCH_IND: + case HTT_T2H_MSG_TYPE_TX_FETCH_CONF: + case HTT_T2H_MSG_TYPE_TX_LOW_LATENCY_IND: default: ath10k_warn(ar, "htt event (%d) not handled\n", resp->hdr.msg_type); diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c index a60ef7d1d5fc..148d5b607c3c 100644 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c @@ -84,6 +84,7 @@ void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id) int ath10k_htt_tx_alloc(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; + int ret, size; ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n", htt->max_num_pending_tx); @@ -94,11 +95,31 @@ int ath10k_htt_tx_alloc(struct ath10k_htt *htt) htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev, sizeof(struct ath10k_htt_txbuf), 4, 0); if (!htt->tx_pool) { - idr_destroy(&htt->pending_tx); - return -ENOMEM; + ret = -ENOMEM; + goto free_idr_pending_tx; + } + + if (!ar->hw_params.continuous_frag_desc) + goto skip_frag_desc_alloc; + + size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc); + htt->frag_desc.vaddr = dma_alloc_coherent(ar->dev, size, + &htt->frag_desc.paddr, + GFP_DMA); + if (!htt->frag_desc.vaddr) { + ath10k_warn(ar, "failed to alloc fragment desc memory\n"); + ret = -ENOMEM; + goto free_tx_pool; } +skip_frag_desc_alloc: return 0; + +free_tx_pool: + dma_pool_destroy(htt->tx_pool); +free_idr_pending_tx: + idr_destroy(&htt->pending_tx); + return ret; } static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx) @@ -121,9 +142,18 @@ static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx) void ath10k_htt_tx_free(struct ath10k_htt *htt) { + int size; + idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar); idr_destroy(&htt->pending_tx); dma_pool_destroy(htt->tx_pool); + + if (htt->frag_desc.vaddr) { + size = htt->max_num_pending_tx * + sizeof(struct htt_msdu_ext_desc); + dma_free_coherent(htt->ar->dev, size, htt->frag_desc.vaddr, + htt->frag_desc.paddr); + } } void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb) @@ -201,6 +231,48 @@ int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie) return 0; } +int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt) +{ + struct ath10k *ar = htt->ar; + struct sk_buff *skb; + struct htt_cmd *cmd; + int ret, size; + + if (!ar->hw_params.continuous_frag_desc) + return 0; + + if (!htt->frag_desc.paddr) { + ath10k_warn(ar, "invalid frag desc memory\n"); + return -EINVAL; + } + + size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg); + skb = ath10k_htc_alloc_skb(ar, size); + if (!skb) + return -ENOMEM; + + skb_put(skb, size); + cmd = (struct htt_cmd *)skb->data; + cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG; + cmd->frag_desc_bank_cfg.info = 0; + cmd->frag_desc_bank_cfg.num_banks = 1; + cmd->frag_desc_bank_cfg.desc_size = sizeof(struct htt_msdu_ext_desc); + cmd->frag_desc_bank_cfg.bank_base_addrs[0] = + __cpu_to_le32(htt->frag_desc.paddr); + cmd->frag_desc_bank_cfg.bank_id[0].bank_max_id = + __cpu_to_le16(htt->max_num_pending_tx - 1); + + ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); + if (ret) { + ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n", + ret); + dev_kfree_skb_any(skb); + return ret; + } + + return 0; +} + int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c index 5997f00afe3b..fef7ccf6e185 100644 --- a/drivers/net/wireless/ath/ath10k/hw.c +++ b/drivers/net/wireless/ath/ath10k/hw.c @@ -34,8 +34,15 @@ const struct ath10k_hw_regs qca988x_regs = { .ce7_base_address = 0x00059000, .soc_reset_control_si0_rst_mask = 0x00000001, .soc_reset_control_ce_rst_mask = 0x00040000, - .soc_chip_id_address = 0x00ec, - .scratch_3_address = 0x0030, + .soc_chip_id_address = 0x000000ec, + .scratch_3_address = 0x00000030, + .fw_indicator_address = 0x00009030, + .pcie_local_base_address = 0x00080000, + .ce_wrap_intr_sum_host_msi_lsb = 0x00000008, + .ce_wrap_intr_sum_host_msi_mask = 0x0000ff00, + .pcie_intr_fw_mask = 0x00000400, + .pcie_intr_ce_mask_all = 0x0007f800, + .pcie_intr_clr_address = 0x00000014, }; const struct ath10k_hw_regs qca6174_regs = { @@ -54,8 +61,79 @@ const struct ath10k_hw_regs qca6174_regs = { .ce7_base_address = 0x00036000, .soc_reset_control_si0_rst_mask = 0x00000000, .soc_reset_control_ce_rst_mask = 0x00000001, - .soc_chip_id_address = 0x000f0, - .scratch_3_address = 0x0028, + .soc_chip_id_address = 0x000000f0, + .scratch_3_address = 0x00000028, + .fw_indicator_address = 0x0003a028, + .pcie_local_base_address = 0x00080000, + .ce_wrap_intr_sum_host_msi_lsb = 0x00000008, + .ce_wrap_intr_sum_host_msi_mask = 0x0000ff00, + .pcie_intr_fw_mask = 0x00000400, + .pcie_intr_ce_mask_all = 0x0007f800, + .pcie_intr_clr_address = 0x00000014, +}; + +const struct ath10k_hw_regs qca99x0_regs = { + .rtc_state_cold_reset_mask = 0x00000400, + .rtc_soc_base_address = 0x00080000, + .rtc_wmac_base_address = 0x00000000, + .soc_core_base_address = 0x00082000, + .ce_wrapper_base_address = 0x0004d000, + .ce0_base_address = 0x0004a000, + .ce1_base_address = 0x0004a400, + .ce2_base_address = 0x0004a800, + .ce3_base_address = 0x0004ac00, + .ce4_base_address = 0x0004b000, + .ce5_base_address = 0x0004b400, + .ce6_base_address = 0x0004b800, + .ce7_base_address = 0x0004bc00, + /* Note: qca99x0 supports upto 12 Copy Engines. Other than address of + * CE0 and CE1 no other copy engine is directly referred in the code. + * It is not really neccessary to assign address for newly supported + * CEs in this address table. + * Copy Engine Address + * CE8 0x0004c000 + * CE9 0x0004c400 + * CE10 0x0004c800 + * CE11 0x0004cc00 + */ + .soc_reset_control_si0_rst_mask = 0x00000001, + .soc_reset_control_ce_rst_mask = 0x00000100, + .soc_chip_id_address = 0x000000ec, + .scratch_3_address = 0x00040050, + .fw_indicator_address = 0x00040050, + .pcie_local_base_address = 0x00000000, + .ce_wrap_intr_sum_host_msi_lsb = 0x0000000c, + .ce_wrap_intr_sum_host_msi_mask = 0x00fff000, + .pcie_intr_fw_mask = 0x00100000, + .pcie_intr_ce_mask_all = 0x000fff00, + .pcie_intr_clr_address = 0x00000010, +}; + +const struct ath10k_hw_values qca988x_values = { + .rtc_state_val_on = 3, + .ce_count = 8, + .msi_assign_ce_max = 7, + .num_target_ce_config_wlan = 7, + .ce_desc_meta_data_mask = 0xFFFC, + .ce_desc_meta_data_lsb = 2, +}; + +const struct ath10k_hw_values qca6174_values = { + .rtc_state_val_on = 3, + .ce_count = 8, + .msi_assign_ce_max = 7, + .num_target_ce_config_wlan = 7, + .ce_desc_meta_data_mask = 0xFFFC, + .ce_desc_meta_data_lsb = 2, +}; + +const struct ath10k_hw_values qca99x0_values = { + .rtc_state_val_on = 5, + .ce_count = 12, + .msi_assign_ce_max = 12, + .num_target_ce_config_wlan = 10, + .ce_desc_meta_data_mask = 0xFFF0, + .ce_desc_meta_data_lsb = 4, }; void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey, diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index 85cca29375fe..917228517546 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -72,6 +72,18 @@ enum qca6174_chip_id_rev { #define QCA6174_HW_3_0_BOARD_DATA_FILE "board.bin" #define QCA6174_HW_3_0_PATCH_LOAD_ADDR 0x1234 +/* QCA99X0 1.0 definitions (unsupported) */ +#define QCA99X0_HW_1_0_CHIP_ID_REV 0x0 + +/* QCA99X0 2.0 definitions */ +#define QCA99X0_HW_2_0_DEV_VERSION 0x01000000 +#define QCA99X0_HW_2_0_CHIP_ID_REV 0x1 +#define QCA99X0_HW_2_0_FW_DIR ATH10K_FW_DIR "/QCA99X0/hw2.0" +#define QCA99X0_HW_2_0_FW_FILE "firmware.bin" +#define QCA99X0_HW_2_0_OTP_FILE "otp.bin" +#define QCA99X0_HW_2_0_BOARD_DATA_FILE "board.bin" +#define QCA99X0_HW_2_0_PATCH_LOAD_ADDR 0x1234 + #define ATH10K_FW_API2_FILE "firmware-2.bin" #define ATH10K_FW_API3_FILE "firmware-3.bin" @@ -112,6 +124,9 @@ enum ath10k_fw_ie_type { * FW API 5 and above. */ ATH10K_FW_IE_HTT_OP_VERSION = 6, + + /* Code swap image for firmware binary */ + ATH10K_FW_IE_FW_CODE_SWAP_IMAGE = 7, }; enum ath10k_fw_wmi_op_version { @@ -122,6 +137,7 @@ enum ath10k_fw_wmi_op_version { ATH10K_FW_WMI_OP_VERSION_10_2 = 3, ATH10K_FW_WMI_OP_VERSION_TLV = 4, ATH10K_FW_WMI_OP_VERSION_10_2_4 = 5, + ATH10K_FW_WMI_OP_VERSION_10_4 = 6, /* keep last */ ATH10K_FW_WMI_OP_VERSION_MAX, @@ -137,6 +153,8 @@ enum ath10k_fw_htt_op_version { ATH10K_FW_HTT_OP_VERSION_TLV = 3, + ATH10K_FW_HTT_OP_VERSION_10_4 = 4, + /* keep last */ ATH10K_FW_HTT_OP_VERSION_MAX, }; @@ -144,6 +162,7 @@ enum ath10k_fw_htt_op_version { enum ath10k_hw_rev { ATH10K_HW_QCA988X, ATH10K_HW_QCA6174, + ATH10K_HW_QCA99X0, }; struct ath10k_hw_regs { @@ -164,16 +183,38 @@ struct ath10k_hw_regs { u32 soc_reset_control_ce_rst_mask; u32 soc_chip_id_address; u32 scratch_3_address; + u32 fw_indicator_address; + u32 pcie_local_base_address; + u32 ce_wrap_intr_sum_host_msi_lsb; + u32 ce_wrap_intr_sum_host_msi_mask; + u32 pcie_intr_fw_mask; + u32 pcie_intr_ce_mask_all; + u32 pcie_intr_clr_address; }; extern const struct ath10k_hw_regs qca988x_regs; extern const struct ath10k_hw_regs qca6174_regs; +extern const struct ath10k_hw_regs qca99x0_regs; + +struct ath10k_hw_values { + u32 rtc_state_val_on; + u8 ce_count; + u8 msi_assign_ce_max; + u8 num_target_ce_config_wlan; + u16 ce_desc_meta_data_mask; + u8 ce_desc_meta_data_lsb; +}; + +extern const struct ath10k_hw_values qca988x_values; +extern const struct ath10k_hw_values qca6174_values; +extern const struct ath10k_hw_values qca99x0_values; void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey, u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev); #define QCA_REV_988X(ar) ((ar)->hw_rev == ATH10K_HW_QCA988X) #define QCA_REV_6174(ar) ((ar)->hw_rev == ATH10K_HW_QCA6174) +#define QCA_REV_99X0(ar) ((ar)->hw_rev == ATH10K_HW_QCA99X0) /* Known pecularities: * - current FW doesn't support raw rx mode (last tested v599) @@ -310,8 +351,73 @@ enum ath10k_hw_rate_cck { #define TARGET_TLV_NUM_MSDU_DESC (1024 + 32) #define TARGET_TLV_NUM_WOW_PATTERNS 22 +/* Diagnostic Window */ +#define CE_DIAG_PIPE 7 + +#define NUM_TARGET_CE_CONFIG_WLAN ar->hw_values->num_target_ce_config_wlan + +/* Target specific defines for 10.4 firmware */ +#define TARGET_10_4_NUM_VDEVS 16 +#define TARGET_10_4_NUM_STATIONS 32 +#define TARGET_10_4_NUM_PEERS ((TARGET_10_4_NUM_STATIONS) + \ + (TARGET_10_4_NUM_VDEVS)) +#define TARGET_10_4_ACTIVE_PEERS 0 + +/* TODO: increase qcache max client limit to 512 after + * testing with 512 client. + */ +#define TARGET_10_4_NUM_QCACHE_PEERS_MAX 256 +#define TARGET_10_4_QCACHE_ACTIVE_PEERS 50 +#define TARGET_10_4_NUM_OFFLOAD_PEERS 0 +#define TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS 0 +#define TARGET_10_4_NUM_PEER_KEYS 2 +#define TARGET_10_4_TGT_NUM_TIDS ((TARGET_10_4_NUM_PEERS) * 2) +#define TARGET_10_4_AST_SKID_LIMIT 32 +#define TARGET_10_4_TX_CHAIN_MASK (BIT(0) | BIT(1) | \ + BIT(2) | BIT(3)) +#define TARGET_10_4_RX_CHAIN_MASK (BIT(0) | BIT(1) | \ + BIT(2) | BIT(3)) + +/* 100 ms for video, best-effort, and background */ +#define TARGET_10_4_RX_TIMEOUT_LO_PRI 100 + +/* 40 ms for voice */ +#define TARGET_10_4_RX_TIMEOUT_HI_PRI 40 + +#define TARGET_10_4_RX_DECAP_MODE ATH10K_HW_TXRX_NATIVE_WIFI +#define TARGET_10_4_SCAN_MAX_REQS 4 +#define TARGET_10_4_BMISS_OFFLOAD_MAX_VDEV 3 +#define TARGET_10_4_ROAM_OFFLOAD_MAX_VDEV 3 +#define TARGET_10_4_ROAM_OFFLOAD_MAX_PROFILES 8 + +/* Note: mcast to ucast is disabled by default */ +#define TARGET_10_4_NUM_MCAST_GROUPS 0 +#define TARGET_10_4_NUM_MCAST_TABLE_ELEMS 0 +#define TARGET_10_4_MCAST2UCAST_MODE 0 + +#define TARGET_10_4_TX_DBG_LOG_SIZE 1024 +#define TARGET_10_4_NUM_WDS_ENTRIES 32 +#define TARGET_10_4_DMA_BURST_SIZE 1 +#define TARGET_10_4_MAC_AGGR_DELIM 0 +#define TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1 +#define TARGET_10_4_VOW_CONFIG 0 +#define TARGET_10_4_GTK_OFFLOAD_MAX_VDEV 3 +#define TARGET_10_4_NUM_MSDU_DESC (1024 + 400) +#define TARGET_10_4_11AC_TX_MAX_FRAGS 2 +#define TARGET_10_4_MAX_PEER_EXT_STATS 16 +#define TARGET_10_4_SMART_ANT_CAP 0 +#define TARGET_10_4_BK_MIN_FREE 0 +#define TARGET_10_4_BE_MIN_FREE 0 +#define TARGET_10_4_VI_MIN_FREE 0 +#define TARGET_10_4_VO_MIN_FREE 0 +#define TARGET_10_4_RX_BATCH_MODE 1 +#define TARGET_10_4_THERMAL_THROTTLING_CONFIG 0 +#define TARGET_10_4_ATF_CONFIG 0 +#define TARGET_10_4_IPHDR_PAD_CONFIG 1 +#define TARGET_10_4_QWRAP_CONFIG 0 + /* Number of Copy Engines supported */ -#define CE_COUNT 8 +#define CE_COUNT ar->hw_values->ce_count /* * Total number of PCIe MSI interrupts requested for all interrupt sources. @@ -335,10 +441,10 @@ enum ath10k_hw_rate_cck { /* MSIs for Copy Engines */ #define MSI_ASSIGN_CE_INITIAL 1 -#define MSI_ASSIGN_CE_MAX 7 +#define MSI_ASSIGN_CE_MAX ar->hw_values->msi_assign_ce_max /* as of IP3.7.1 */ -#define RTC_STATE_V_ON 3 +#define RTC_STATE_V_ON ar->hw_values->rtc_state_val_on #define RTC_STATE_COLD_RESET_MASK ar->regs->rtc_state_cold_reset_mask #define RTC_STATE_V_LSB 0 @@ -374,7 +480,7 @@ enum ath10k_hw_rate_cck { #define CE7_BASE_ADDRESS ar->regs->ce7_base_address #define DBI_BASE_ADDRESS 0x00060000 #define WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS 0x0006c000 -#define PCIE_LOCAL_BASE_ADDRESS 0x00080000 +#define PCIE_LOCAL_BASE_ADDRESS ar->regs->pcie_local_base_address #define SOC_RESET_CONTROL_ADDRESS 0x00000000 #define SOC_RESET_CONTROL_OFFSET 0x00000000 @@ -448,7 +554,7 @@ enum ath10k_hw_rate_cck { #define CORE_CTRL_ADDRESS 0x0000 #define PCIE_INTR_ENABLE_ADDRESS 0x0008 #define PCIE_INTR_CAUSE_ADDRESS 0x000c -#define PCIE_INTR_CLR_ADDRESS 0x0014 +#define PCIE_INTR_CLR_ADDRESS ar->regs->pcie_intr_clr_address #define SCRATCH_3_ADDRESS ar->regs->scratch_3_address #define CPU_INTR_ADDRESS 0x0010 @@ -456,16 +562,18 @@ enum ath10k_hw_rate_cck { #define CCNT_TO_MSEC(x) ((x) / 88000) /* Firmware indications to the Host via SCRATCH_3 register. */ -#define FW_INDICATOR_ADDRESS (SOC_CORE_BASE_ADDRESS + SCRATCH_3_ADDRESS) +#define FW_INDICATOR_ADDRESS ar->regs->fw_indicator_address #define FW_IND_EVENT_PENDING 1 #define FW_IND_INITIALIZED 2 /* HOST_REG interrupt from firmware */ -#define PCIE_INTR_FIRMWARE_MASK 0x00000400 -#define PCIE_INTR_CE_MASK_ALL 0x0007f800 +#define PCIE_INTR_FIRMWARE_MASK ar->regs->pcie_intr_fw_mask +#define PCIE_INTR_CE_MASK_ALL ar->regs->pcie_intr_ce_mask_all #define DRAM_BASE_ADDRESS 0x00400000 +#define PCIE_BAR_REG_ADDRESS 0x40030 + #define MISSING 0 #define SYSTEM_SLEEP_OFFSET SOC_SYSTEM_SLEEP_OFFSET diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 218b6af63447..c9a7d5b5dffc 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -1668,7 +1668,7 @@ static int ath10k_mac_vif_recalc_ps_poll_count(struct ath10k_vif *arvif) return 0; } -static int ath10k_mac_ps_vif_count(struct ath10k *ar) +static int ath10k_mac_num_vifs_started(struct ath10k *ar) { struct ath10k_vif *arvif; int num = 0; @@ -1676,7 +1676,7 @@ static int ath10k_mac_ps_vif_count(struct ath10k *ar) lockdep_assert_held(&ar->conf_mutex); list_for_each_entry(arvif, &ar->arvifs, list) - if (arvif->ps) + if (arvif->is_started) num++; return num; @@ -1700,7 +1700,7 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif) enable_ps = arvif->ps; - if (enable_ps && ath10k_mac_ps_vif_count(ar) > 1 && + if (enable_ps && ath10k_mac_num_vifs_started(ar) > 1 && !test_bit(ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT, ar->fw_features)) { ath10k_warn(ar, "refusing to enable ps on vdev %i: not supported by fw\n", @@ -3034,38 +3034,16 @@ static void ath10k_mac_vif_handle_tx_pause(struct ath10k_vif *arvif, lockdep_assert_held(&ar->htt.tx_lock); - switch (pause_id) { - case WMI_TLV_TX_PAUSE_ID_MCC: - case WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA: - case WMI_TLV_TX_PAUSE_ID_P2P_GO_PS: - case WMI_TLV_TX_PAUSE_ID_AP_PS: - case WMI_TLV_TX_PAUSE_ID_IBSS_PS: - switch (action) { - case WMI_TLV_TX_PAUSE_ACTION_STOP: - ath10k_mac_vif_tx_lock(arvif, pause_id); - break; - case WMI_TLV_TX_PAUSE_ACTION_WAKE: - ath10k_mac_vif_tx_unlock(arvif, pause_id); - break; - default: - ath10k_warn(ar, "received unknown tx pause action %d on vdev %i, ignoring\n", - action, arvif->vdev_id); - break; - } + switch (action) { + case WMI_TLV_TX_PAUSE_ACTION_STOP: + ath10k_mac_vif_tx_lock(arvif, pause_id); + break; + case WMI_TLV_TX_PAUSE_ACTION_WAKE: + ath10k_mac_vif_tx_unlock(arvif, pause_id); break; - case WMI_TLV_TX_PAUSE_ID_AP_PEER_PS: - case WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD: - case WMI_TLV_TX_PAUSE_ID_STA_ADD_BA: - case WMI_TLV_TX_PAUSE_ID_HOST: default: - /* FIXME: Some pause_ids aren't vdev specific. Instead they - * target peer_id and tid. Implementing these could improve - * traffic scheduling fairness across multiple connected - * stations in AP/IBSS modes. - */ - ath10k_dbg(ar, ATH10K_DBG_MAC, - "mac ignoring unsupported tx pause vdev %i id %d\n", - arvif->vdev_id, pause_id); + ath10k_warn(ar, "received unknown tx pause action %d on vdev %i, ignoring\n", + action, arvif->vdev_id); break; } } @@ -3082,12 +3060,15 @@ static void ath10k_mac_handle_tx_pause_iter(void *data, u8 *mac, struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); struct ath10k_mac_tx_pause *arg = data; + if (arvif->vdev_id != arg->vdev_id) + return; + ath10k_mac_vif_handle_tx_pause(arvif, arg->pause_id, arg->action); } -void ath10k_mac_handle_tx_pause(struct ath10k *ar, u32 vdev_id, - enum wmi_tlv_tx_pause_id pause_id, - enum wmi_tlv_tx_pause_action action) +void ath10k_mac_handle_tx_pause_vdev(struct ath10k *ar, u32 vdev_id, + enum wmi_tlv_tx_pause_id pause_id, + enum wmi_tlv_tx_pause_action action) { struct ath10k_mac_tx_pause arg = { .vdev_id = vdev_id, @@ -3449,14 +3430,13 @@ void __ath10k_scan_finish(struct ath10k *ar) case ATH10K_SCAN_IDLE: break; case ATH10K_SCAN_RUNNING: - if (ar->scan.is_roc) - ieee80211_remain_on_channel_expired(ar->hw); - /* fall through */ case ATH10K_SCAN_ABORTING: if (!ar->scan.is_roc) ieee80211_scan_completed(ar->hw, (ar->scan.state == ATH10K_SCAN_ABORTING)); + else if (ar->scan.roc_notify) + ieee80211_remain_on_channel_expired(ar->hw); /* fall through */ case ATH10K_SCAN_STARTING: ar->scan.state = ATH10K_SCAN_IDLE; @@ -4641,9 +4621,6 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw, arg.vdev_id = arvif->vdev_id; arg.scan_id = ATH10K_SCAN_ID; - if (!req->no_cck) - arg.scan_ctrl_flags |= WMI_SCAN_ADD_CCK_RATES; - if (req->ie_len) { arg.ie_len = req->ie_len; memcpy(arg.ie, req->ie, arg.ie_len); @@ -5462,6 +5439,7 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw, ar->scan.is_roc = true; ar->scan.vdev_id = arvif->vdev_id; ar->scan.roc_freq = chan->center_freq; + ar->scan.roc_notify = true; ret = 0; break; case ATH10K_SCAN_STARTING: @@ -5525,7 +5503,13 @@ static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw) struct ath10k *ar = hw->priv; mutex_lock(&ar->conf_mutex); + + spin_lock_bh(&ar->data_lock); + ar->scan.roc_notify = false; + spin_unlock_bh(&ar->data_lock); + ath10k_scan_abort(ar); + mutex_unlock(&ar->conf_mutex); cancel_delayed_work_sync(&ar->scan.timeout); @@ -5566,7 +5550,7 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, { struct ath10k *ar = hw->priv; bool skip; - int ret; + long time_left; /* mac80211 doesn't care if we really xmit queued frames or not * we'll collect those frames either way if we stop/delete vdevs */ @@ -5578,7 +5562,7 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, if (ar->state == ATH10K_STATE_WEDGED) goto skip; - ret = wait_event_timeout(ar->htt.empty_tx_wq, ({ + time_left = wait_event_timeout(ar->htt.empty_tx_wq, ({ bool empty; spin_lock_bh(&ar->htt.tx_lock); @@ -5592,9 +5576,9 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, (empty || skip); }), ATH10K_FLUSH_TIMEOUT_HZ); - if (ret <= 0 || skip) - ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %i\n", - skip, ar->state, ret); + if (time_left == 0 || skip) + ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %ld\n", + skip, ar->state, time_left); skip: mutex_unlock(&ar->conf_mutex); @@ -6219,6 +6203,13 @@ ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, arvif->is_started = true; + ret = ath10k_mac_vif_setup_ps(arvif); + if (ret) { + ath10k_warn(ar, "failed to update vdev %i ps: %d\n", + arvif->vdev_id, ret); + goto err_stop; + } + if (vif->type == NL80211_IFTYPE_MONITOR) { ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, 0, vif->addr); if (ret) { @@ -6236,6 +6227,7 @@ ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, err_stop: ath10k_vdev_stop(arvif); arvif->is_started = false; + ath10k_mac_vif_setup_ps(arvif); err: mutex_unlock(&ar->conf_mutex); @@ -6565,8 +6557,11 @@ static const struct ieee80211_iface_combination ath10k_10x_if_comb[] = { static const struct ieee80211_iface_limit ath10k_tlv_if_limit[] = { { .max = 2, - .types = BIT(NL80211_IFTYPE_STATION) | - BIT(NL80211_IFTYPE_AP) | + .types = BIT(NL80211_IFTYPE_STATION), + }, + { + .max = 2, + .types = BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO), }, @@ -6576,6 +6571,26 @@ static const struct ieee80211_iface_limit ath10k_tlv_if_limit[] = { }, }; +static const struct ieee80211_iface_limit ath10k_tlv_qcs_if_limit[] = { + { + .max = 2, + .types = BIT(NL80211_IFTYPE_STATION), + }, + { + .max = 2, + .types = BIT(NL80211_IFTYPE_P2P_CLIENT), + }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_P2P_GO), + }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_P2P_DEVICE), + }, +}; + static const struct ieee80211_iface_limit ath10k_tlv_if_limit_ibss[] = { { .max = 1, @@ -6594,7 +6609,7 @@ static struct ieee80211_iface_combination ath10k_tlv_if_comb[] = { { .limits = ath10k_tlv_if_limit, .num_different_channels = 1, - .max_interfaces = 3, + .max_interfaces = 4, .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit), }, { @@ -6608,11 +6623,17 @@ static struct ieee80211_iface_combination ath10k_tlv_if_comb[] = { static struct ieee80211_iface_combination ath10k_tlv_qcs_if_comb[] = { { .limits = ath10k_tlv_if_limit, - .num_different_channels = 2, - .max_interfaces = 3, + .num_different_channels = 1, + .max_interfaces = 4, .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit), }, { + .limits = ath10k_tlv_qcs_if_limit, + .num_different_channels = 2, + .max_interfaces = 4, + .n_limits = ARRAY_SIZE(ath10k_tlv_qcs_if_limit), + }, + { .limits = ath10k_tlv_if_limit_ibss, .num_different_channels = 1, .max_interfaces = 2, @@ -6620,6 +6641,33 @@ static struct ieee80211_iface_combination ath10k_tlv_qcs_if_comb[] = { }, }; +static const struct ieee80211_iface_limit ath10k_10_4_if_limits[] = { + { + .max = 1, + .types = BIT(NL80211_IFTYPE_STATION), + }, + { + .max = 16, + .types = BIT(NL80211_IFTYPE_AP) + }, +}; + +static const struct ieee80211_iface_combination ath10k_10_4_if_comb[] = { + { + .limits = ath10k_10_4_if_limits, + .n_limits = ARRAY_SIZE(ath10k_10_4_if_limits), + .max_interfaces = 16, + .num_different_channels = 1, + .beacon_int_infra_match = true, +#ifdef CONFIG_ATH10K_DFS_CERTIFIED + .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | + BIT(NL80211_CHAN_WIDTH_20) | + BIT(NL80211_CHAN_WIDTH_40) | + BIT(NL80211_CHAN_WIDTH_80), +#endif + }, +}; + static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar) { struct ieee80211_sta_vht_cap vht_cap = {0}; @@ -6902,6 +6950,8 @@ int ath10k_mac_register(struct ath10k *ar) goto err_free; } + wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS); + /* * on LL hardware queues are managed entirely by the FW * so we only advertise to mac we can do the queues thing @@ -6941,6 +6991,11 @@ int ath10k_mac_register(struct ath10k *ar) ar->hw->wiphy->n_iface_combinations = ARRAY_SIZE(ath10k_10x_if_comb); break; + case ATH10K_FW_WMI_OP_VERSION_10_4: + ar->hw->wiphy->iface_combinations = ath10k_10_4_if_comb; + ar->hw->wiphy->n_iface_combinations = + ARRAY_SIZE(ath10k_10_4_if_comb); + break; case ATH10K_FW_WMI_OP_VERSION_UNSET: case ATH10K_FW_WMI_OP_VERSION_MAX: WARN_ON(1); diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h index b291f063705c..e3cefe4c7cfd 100644 --- a/drivers/net/wireless/ath/ath10k/mac.h +++ b/drivers/net/wireless/ath/ath10k/mac.h @@ -61,9 +61,9 @@ int ath10k_mac_vif_chan(struct ieee80211_vif *vif, void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb); void ath10k_mac_handle_beacon_miss(struct ath10k *ar, u32 vdev_id); -void ath10k_mac_handle_tx_pause(struct ath10k *ar, u32 vdev_id, - enum wmi_tlv_tx_pause_id pause_id, - enum wmi_tlv_tx_pause_action action); +void ath10k_mac_handle_tx_pause_vdev(struct ath10k *ar, u32 vdev_id, + enum wmi_tlv_tx_pause_id pause_id, + enum wmi_tlv_tx_pause_action action); u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband, u8 hw_rate); diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index ea656e011a96..5778e5277823 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -59,6 +59,7 @@ MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)"); #define QCA988X_2_0_DEVICE_ID (0x003c) #define QCA6174_2_1_DEVICE_ID (0x003e) +#define QCA99X0_2_0_DEVICE_ID (0x0040) static const struct pci_device_id ath10k_pci_id_table[] = { { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */ @@ -81,7 +82,7 @@ static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = { static void ath10k_pci_buffer_cleanup(struct ath10k *ar); static int ath10k_pci_cold_reset(struct ath10k *ar); -static int ath10k_pci_warm_reset(struct ath10k *ar); +static int ath10k_pci_safe_chip_reset(struct ath10k *ar); static int ath10k_pci_wait_for_target_init(struct ath10k *ar); static int ath10k_pci_init_irq(struct ath10k *ar); static int ath10k_pci_deinit_irq(struct ath10k *ar); @@ -90,6 +91,7 @@ static void ath10k_pci_free_irq(struct ath10k *ar); static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe, struct ath10k_ce_pipe *rx_pipe, struct bmi_xfer *xfer); +static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar); static const struct ce_attr host_ce_config_wlan[] = { /* CE0: host->target HTC control and raw streams */ @@ -155,6 +157,38 @@ static const struct ce_attr host_ce_config_wlan[] = { .src_sz_max = DIAG_TRANSFER_LIMIT, .dest_nentries = 2, }, + + /* CE8: target->host pktlog */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 2048, + .dest_nentries = 128, + }, + + /* CE9 target autonomous qcache memcpy */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 0, + .dest_nentries = 0, + }, + + /* CE10: target autonomous hif memcpy */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 0, + .dest_nentries = 0, + }, + + /* CE11: target autonomous hif memcpy */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 0, + .dest_nentries = 0, + }, }; /* Target firmware's Copy Engine configuration. */ @@ -232,6 +266,38 @@ static const struct ce_pipe_config target_ce_config_wlan[] = { }, /* CE7 used only by Host */ + { + .pipenum = __cpu_to_le32(7), + .pipedir = __cpu_to_le32(PIPEDIR_INOUT), + .nentries = __cpu_to_le32(0), + .nbytes_max = __cpu_to_le32(0), + .flags = __cpu_to_le32(0), + .reserved = __cpu_to_le32(0), + }, + + /* CE8 target->host packtlog */ + { + .pipenum = __cpu_to_le32(8), + .pipedir = __cpu_to_le32(PIPEDIR_IN), + .nentries = __cpu_to_le32(64), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR), + .reserved = __cpu_to_le32(0), + }, + + /* CE9 target autonomous qcache memcpy */ + { + .pipenum = __cpu_to_le32(9), + .pipedir = __cpu_to_le32(PIPEDIR_INOUT), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR), + .reserved = __cpu_to_le32(0), + }, + + /* It not necessary to send target wlan configuration for CE10 & CE11 + * as these CEs are not actively used in target. + */ }; /* @@ -479,6 +545,12 @@ void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value) struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); int ret; + if (unlikely(offset + sizeof(value) > ar_pci->mem_len)) { + ath10k_warn(ar, "refusing to write mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n", + offset, offset + sizeof(value), ar_pci->mem_len); + return; + } + ret = ath10k_pci_wake(ar); if (ret) { ath10k_warn(ar, "failed to wake target for write32 of 0x%08x at 0x%08x: %d\n", @@ -496,6 +568,12 @@ u32 ath10k_pci_read32(struct ath10k *ar, u32 offset) u32 val; int ret; + if (unlikely(offset + sizeof(val) > ar_pci->mem_len)) { + ath10k_warn(ar, "refusing to read mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n", + offset, offset + sizeof(val), ar_pci->mem_len); + return 0; + } + ret = ath10k_pci_wake(ar); if (ret) { ath10k_warn(ar, "failed to wake target for read32 at 0x%08x: %d\n", @@ -678,6 +756,26 @@ static void ath10k_pci_rx_replenish_retry(unsigned long ptr) ath10k_pci_rx_post(ar); } +static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr) +{ + u32 val = 0; + + switch (ar->hw_rev) { + case ATH10K_HW_QCA988X: + case ATH10K_HW_QCA6174: + val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + + CORE_CTRL_ADDRESS) & + 0x7ff) << 21; + break; + case ATH10K_HW_QCA99X0: + val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS); + break; + } + + val |= 0x100000 | (addr & 0xfffff); + return val; +} + /* * Diagnostic read/write access is provided for startup/config/debug usage. * Caller must guarantee proper alignment, when applicable, and single user @@ -740,8 +838,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, * convert it from Target CPU virtual address space * to CE address space */ - address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, - address); + address = ath10k_pci_targ_cpu_to_ce_addr(ar, address); ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)address, nbytes, 0, 0); @@ -899,7 +996,7 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, * to * CE address space */ - address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address); + address = ath10k_pci_targ_cpu_to_ce_addr(ar, address); remaining_bytes = orig_nbytes; ce_data = ce_data_base; @@ -1331,20 +1428,42 @@ static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar) { u32 val; - val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS); - val &= ~CORE_CTRL_PCIE_REG_31_MASK; - - ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS, val); + switch (ar->hw_rev) { + case ATH10K_HW_QCA988X: + case ATH10K_HW_QCA6174: + val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + + CORE_CTRL_ADDRESS); + val &= ~CORE_CTRL_PCIE_REG_31_MASK; + ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + + CORE_CTRL_ADDRESS, val); + break; + case ATH10K_HW_QCA99X0: + /* TODO: Find appropriate register configuration for QCA99X0 + * to mask irq/MSI. + */ + break; + } } static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar) { u32 val; - val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS); - val |= CORE_CTRL_PCIE_REG_31_MASK; - - ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS, val); + switch (ar->hw_rev) { + case ATH10K_HW_QCA988X: + case ATH10K_HW_QCA6174: + val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + + CORE_CTRL_ADDRESS); + val |= CORE_CTRL_PCIE_REG_31_MASK; + ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + + CORE_CTRL_ADDRESS, val); + break; + case ATH10K_HW_QCA99X0: + /* TODO: Find appropriate register configuration for QCA99X0 + * to unmask irq/MSI. + */ + break; + } } static void ath10k_pci_irq_disable(struct ath10k *ar) @@ -1506,7 +1625,7 @@ static void ath10k_pci_hif_stop(struct ath10k *ar) * masked. To prevent the device from asserting the interrupt reset it * before proceeding with cleanup. */ - ath10k_pci_warm_reset(ar); + ath10k_pci_safe_chip_reset(ar); ath10k_pci_irq_disable(ar); ath10k_pci_irq_sync(ar); @@ -1687,6 +1806,7 @@ static int ath10k_pci_get_num_banks(struct ath10k *ar) switch (ar_pci->pdev->device) { case QCA988X_2_0_DEVICE_ID: + case QCA99X0_2_0_DEVICE_ID: return 1; case QCA6174_2_1_DEVICE_ID: switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) { @@ -1757,7 +1877,8 @@ static int ath10k_pci_init_config(struct ath10k *ar) ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr, target_ce_config_wlan, - sizeof(target_ce_config_wlan)); + sizeof(struct ce_pipe_config) * + NUM_TARGET_CE_CONFIG_WLAN); if (ret != 0) { ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret); @@ -1871,7 +1992,7 @@ static int ath10k_pci_alloc_pipes(struct ath10k *ar) } /* Last CE is Diagnostic Window */ - if (i == CE_COUNT - 1) { + if (i == CE_DIAG_PIPE) { ar_pci->ce_diag = pipe->ce_hdl; continue; } @@ -2016,6 +2137,18 @@ static int ath10k_pci_warm_reset(struct ath10k *ar) return 0; } +static int ath10k_pci_safe_chip_reset(struct ath10k *ar) +{ + if (QCA_REV_988X(ar) || QCA_REV_6174(ar)) { + return ath10k_pci_warm_reset(ar); + } else if (QCA_REV_99X0(ar)) { + ath10k_pci_irq_disable(ar); + return ath10k_pci_qca99x0_chip_reset(ar); + } else { + return -ENOTSUPP; + } +} + static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar) { int i, ret; @@ -2122,12 +2255,38 @@ static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar) return 0; } +static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar) +{ + int ret; + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset\n"); + + ret = ath10k_pci_cold_reset(ar); + if (ret) { + ath10k_warn(ar, "failed to cold reset: %d\n", ret); + return ret; + } + + ret = ath10k_pci_wait_for_target_init(ar); + if (ret) { + ath10k_warn(ar, "failed to wait for target after cold reset: %d\n", + ret); + return ret; + } + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset complete (cold)\n"); + + return 0; +} + static int ath10k_pci_chip_reset(struct ath10k *ar) { if (QCA_REV_988X(ar)) return ath10k_pci_qca988x_chip_reset(ar); else if (QCA_REV_6174(ar)) return ath10k_pci_qca6174_chip_reset(ar); + else if (QCA_REV_99X0(ar)) + return ath10k_pci_qca99x0_chip_reset(ar); else return -ENOTSUPP; } @@ -2679,6 +2838,7 @@ static int ath10k_pci_claim(struct ath10k *ar) pci_set_master(pdev); /* Arrange for access to Target SoC registers. */ + ar_pci->mem_len = pci_resource_len(pdev, BAR_NUM); ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0); if (!ar_pci->mem) { ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM); @@ -2745,6 +2905,9 @@ static int ath10k_pci_probe(struct pci_dev *pdev, case QCA6174_2_1_DEVICE_ID: hw_rev = ATH10K_HW_QCA6174; break; + case QCA99X0_2_0_DEVICE_ID: + hw_rev = ATH10K_HW_QCA99X0; + break; default: WARN_ON(1); return -ENOTSUPP; diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h index d7696ddc03c4..8d364fb8f743 100644 --- a/drivers/net/wireless/ath/ath10k/pci.h +++ b/drivers/net/wireless/ath/ath10k/pci.h @@ -162,6 +162,7 @@ struct ath10k_pci { struct device *dev; struct ath10k *ar; void __iomem *mem; + size_t mem_len; /* * Number of MSI interrupts granted, 0 --> using legacy PCI line @@ -236,18 +237,6 @@ static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar) #define CDC_WAR_MAGIC_STR 0xceef0000 #define CDC_WAR_DATA_CE 4 -/* - * TODO: Should be a function call specific to each Target-type. - * This convoluted macro converts from Target CPU Virtual Address Space to CE - * Address Space. As part of this process, we conservatively fetch the current - * PCIE_BAR. MOST of the time, this should match the upper bits of PCI space - * for this device; but that's not guaranteed. - */ -#define TARG_CPU_SPACE_TO_CE_SPACE(ar, pci_addr, addr) \ - (((ath10k_pci_read32(ar, (SOC_CORE_BASE_ADDRESS | \ - CORE_CTRL_ADDRESS)) & 0x7ff) << 21) | \ - 0x100000 | ((addr) & 0xfffff)) - /* Wait up to this many Ms for a Diagnostic Access CE operation to complete */ #define DIAG_ACCESS_CE_TIMEOUT_MS 10 diff --git a/drivers/net/wireless/ath/ath10k/swap.c b/drivers/net/wireless/ath/ath10k/swap.c new file mode 100644 index 000000000000..3ca3fae408a7 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/swap.c @@ -0,0 +1,208 @@ +/* + * Copyright (c) 2015 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* This file has implementation for code swap logic. With code swap feature, + * target can run the fw binary with even smaller IRAM size by using host + * memory to store some of the code segments. + */ + +#include "core.h" +#include "bmi.h" +#include "debug.h" + +static int ath10k_swap_code_seg_fill(struct ath10k *ar, + struct ath10k_swap_code_seg_info *seg_info, + const void *data, size_t data_len) +{ + u8 *virt_addr = seg_info->virt_address[0]; + u8 swap_magic[ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ] = {}; + const u8 *fw_data = data; + union ath10k_swap_code_seg_item *swap_item; + u32 length = 0; + u32 payload_len; + u32 total_payload_len = 0; + u32 size_left = data_len; + + /* Parse swap bin and copy the content to host allocated memory. + * The format is Address, length and value. The last 4-bytes is + * target write address. Currently address field is not used. + */ + seg_info->target_addr = -1; + while (size_left >= sizeof(*swap_item)) { + swap_item = (union ath10k_swap_code_seg_item *)fw_data; + payload_len = __le32_to_cpu(swap_item->tlv.length); + if ((payload_len > size_left) || + (payload_len == 0 && + size_left != sizeof(struct ath10k_swap_code_seg_tail))) { + ath10k_err(ar, "refusing to parse invalid tlv length %d\n", + payload_len); + return -EINVAL; + } + + if (payload_len == 0) { + if (memcmp(swap_item->tail.magic_signature, swap_magic, + ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ)) { + ath10k_err(ar, "refusing an invalid swap file\n"); + return -EINVAL; + } + seg_info->target_addr = + __le32_to_cpu(swap_item->tail.bmi_write_addr); + break; + } + + memcpy(virt_addr, swap_item->tlv.data, payload_len); + virt_addr += payload_len; + length = payload_len + sizeof(struct ath10k_swap_code_seg_tlv); + size_left -= length; + fw_data += length; + total_payload_len += payload_len; + } + + if (seg_info->target_addr == -1) { + ath10k_err(ar, "failed to parse invalid swap file\n"); + return -EINVAL; + } + seg_info->seg_hw_info.swap_size = __cpu_to_le32(total_payload_len); + + return 0; +} + +static void +ath10k_swap_code_seg_free(struct ath10k *ar, + struct ath10k_swap_code_seg_info *seg_info) +{ + u32 seg_size; + + if (!seg_info) + return; + + if (!seg_info->virt_address[0]) + return; + + seg_size = __le32_to_cpu(seg_info->seg_hw_info.size); + dma_free_coherent(ar->dev, seg_size, seg_info->virt_address[0], + seg_info->paddr[0]); +} + +static struct ath10k_swap_code_seg_info * +ath10k_swap_code_seg_alloc(struct ath10k *ar, size_t swap_bin_len) +{ + struct ath10k_swap_code_seg_info *seg_info; + void *virt_addr; + dma_addr_t paddr; + + swap_bin_len = roundup(swap_bin_len, 2); + if (swap_bin_len > ATH10K_SWAP_CODE_SEG_BIN_LEN_MAX) { + ath10k_err(ar, "refusing code swap bin because it is too big %zu > %d\n", + swap_bin_len, ATH10K_SWAP_CODE_SEG_BIN_LEN_MAX); + return NULL; + } + + seg_info = devm_kzalloc(ar->dev, sizeof(*seg_info), GFP_KERNEL); + if (!seg_info) + return NULL; + + virt_addr = dma_alloc_coherent(ar->dev, swap_bin_len, &paddr, + GFP_KERNEL); + if (!virt_addr) { + ath10k_err(ar, "failed to allocate dma coherent memory\n"); + return NULL; + } + + seg_info->seg_hw_info.bus_addr[0] = __cpu_to_le32(paddr); + seg_info->seg_hw_info.size = __cpu_to_le32(swap_bin_len); + seg_info->seg_hw_info.swap_size = __cpu_to_le32(swap_bin_len); + seg_info->seg_hw_info.num_segs = + __cpu_to_le32(ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED); + seg_info->seg_hw_info.size_log2 = __cpu_to_le32(ilog2(swap_bin_len)); + seg_info->virt_address[0] = virt_addr; + seg_info->paddr[0] = paddr; + + return seg_info; +} + +int ath10k_swap_code_seg_configure(struct ath10k *ar, + enum ath10k_swap_code_seg_bin_type type) +{ + int ret; + struct ath10k_swap_code_seg_info *seg_info = NULL; + + switch (type) { + case ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW: + if (!ar->swap.firmware_swap_code_seg_info) + return 0; + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot found firmware code swap binary\n"); + seg_info = ar->swap.firmware_swap_code_seg_info; + break; + default: + case ATH10K_SWAP_CODE_SEG_BIN_TYPE_OTP: + case ATH10K_SWAP_CODE_SEG_BIN_TYPE_UTF: + ath10k_warn(ar, "ignoring unknown code swap binary type %d\n", + type); + return 0; + } + + ret = ath10k_bmi_write_memory(ar, seg_info->target_addr, + &seg_info->seg_hw_info, + sizeof(seg_info->seg_hw_info)); + if (ret) { + ath10k_err(ar, "failed to write Code swap segment information (%d)\n", + ret); + return ret; + } + + return 0; +} + +void ath10k_swap_code_seg_release(struct ath10k *ar) +{ + ath10k_swap_code_seg_free(ar, ar->swap.firmware_swap_code_seg_info); + ar->swap.firmware_codeswap_data = NULL; + ar->swap.firmware_codeswap_len = 0; + ar->swap.firmware_swap_code_seg_info = NULL; +} + +int ath10k_swap_code_seg_init(struct ath10k *ar) +{ + int ret; + struct ath10k_swap_code_seg_info *seg_info; + + if (!ar->swap.firmware_codeswap_len || !ar->swap.firmware_codeswap_data) + return 0; + + seg_info = ath10k_swap_code_seg_alloc(ar, + ar->swap.firmware_codeswap_len); + if (!seg_info) { + ath10k_err(ar, "failed to allocate fw code swap segment\n"); + return -ENOMEM; + } + + ret = ath10k_swap_code_seg_fill(ar, seg_info, + ar->swap.firmware_codeswap_data, + ar->swap.firmware_codeswap_len); + + if (ret) { + ath10k_warn(ar, "failed to initialize fw code swap segment: %d\n", + ret); + ath10k_swap_code_seg_free(ar, seg_info); + return ret; + } + + ar->swap.firmware_swap_code_seg_info = seg_info; + + return 0; +} diff --git a/drivers/net/wireless/ath/ath10k/swap.h b/drivers/net/wireless/ath/ath10k/swap.h new file mode 100644 index 000000000000..5c89952dd20f --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/swap.h @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2015 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _SWAP_H_ +#define _SWAP_H_ + +#define ATH10K_SWAP_CODE_SEG_BIN_LEN_MAX (512 * 1024) +#define ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ 12 +#define ATH10K_SWAP_CODE_SEG_NUM_MAX 16 +/* Currently only one swap segment is supported */ +#define ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED 1 + +struct ath10k_swap_code_seg_tlv { + __le32 address; + __le32 length; + u8 data[0]; +} __packed; + +struct ath10k_swap_code_seg_tail { + u8 magic_signature[ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ]; + __le32 bmi_write_addr; +} __packed; + +union ath10k_swap_code_seg_item { + struct ath10k_swap_code_seg_tlv tlv; + struct ath10k_swap_code_seg_tail tail; +} __packed; + +enum ath10k_swap_code_seg_bin_type { + ATH10K_SWAP_CODE_SEG_BIN_TYPE_OTP, + ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW, + ATH10K_SWAP_CODE_SEG_BIN_TYPE_UTF, +}; + +struct ath10k_swap_code_seg_hw_info { + /* Swap binary image size */ + __le32 swap_size; + __le32 num_segs; + + /* Swap data size */ + __le32 size; + __le32 size_log2; + __le32 bus_addr[ATH10K_SWAP_CODE_SEG_NUM_MAX]; + __le64 reserved[ATH10K_SWAP_CODE_SEG_NUM_MAX]; +} __packed; + +struct ath10k_swap_code_seg_info { + struct ath10k_swap_code_seg_hw_info seg_hw_info; + void *virt_address[ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED]; + u32 target_addr; + dma_addr_t paddr[ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED]; +}; + +int ath10k_swap_code_seg_configure(struct ath10k *ar, + enum ath10k_swap_code_seg_bin_type type); +void ath10k_swap_code_seg_release(struct ath10k *ar); +int ath10k_swap_code_seg_init(struct ath10k *ar); + +#endif diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h index a417aae52623..768bef629099 100644 --- a/drivers/net/wireless/ath/ath10k/targaddrs.h +++ b/drivers/net/wireless/ath/ath10k/targaddrs.h @@ -450,4 +450,7 @@ Fw Mode/SubMode Mask #define QCA6174_BOARD_DATA_SZ 8192 #define QCA6174_BOARD_EXT_DATA_SZ 0 +#define QCA99X0_BOARD_DATA_SZ 12288 +#define QCA99X0_BOARD_EXT_DATA_SZ 0 + #endif /* __TARGADDRS_H__ */ diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c index 826500bb2b1b..6cf289158840 100644 --- a/drivers/net/wireless/ath/ath10k/txrx.c +++ b/drivers/net/wireless/ath/ath10k/txrx.c @@ -147,9 +147,9 @@ struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar, int peer_id) static int ath10k_wait_for_peer_common(struct ath10k *ar, int vdev_id, const u8 *addr, bool expect_mapped) { - int ret; + long time_left; - ret = wait_event_timeout(ar->peer_mapping_wq, ({ + time_left = wait_event_timeout(ar->peer_mapping_wq, ({ bool mapped; spin_lock_bh(&ar->data_lock); @@ -160,7 +160,7 @@ static int ath10k_wait_for_peer_common(struct ath10k *ar, int vdev_id, test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)); }), 3*HZ); - if (ret <= 0) + if (time_left == 0) return -ETIMEDOUT; return 0; diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c index 8fdba3865c96..4189d4a90ce0 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c @@ -377,12 +377,34 @@ static int ath10k_wmi_tlv_event_tx_pause(struct ath10k *ar, "wmi tlv tx pause pause_id %u action %u vdev_map 0x%08x peer_id %u tid_map 0x%08x\n", pause_id, action, vdev_map, peer_id, tid_map); - for (vdev_id = 0; vdev_map; vdev_id++) { - if (!(vdev_map & BIT(vdev_id))) - continue; - - vdev_map &= ~BIT(vdev_id); - ath10k_mac_handle_tx_pause(ar, vdev_id, pause_id, action); + switch (pause_id) { + case WMI_TLV_TX_PAUSE_ID_MCC: + case WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA: + case WMI_TLV_TX_PAUSE_ID_P2P_GO_PS: + case WMI_TLV_TX_PAUSE_ID_AP_PS: + case WMI_TLV_TX_PAUSE_ID_IBSS_PS: + for (vdev_id = 0; vdev_map; vdev_id++) { + if (!(vdev_map & BIT(vdev_id))) + continue; + + vdev_map &= ~BIT(vdev_id); + ath10k_mac_handle_tx_pause_vdev(ar, vdev_id, pause_id, + action); + } + break; + case WMI_TLV_TX_PAUSE_ID_AP_PEER_PS: + case WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD: + case WMI_TLV_TX_PAUSE_ID_STA_ADD_BA: + case WMI_TLV_TX_PAUSE_ID_HOST: + ath10k_dbg(ar, ATH10K_DBG_MAC, + "mac ignoring unsupported tx pause id %d\n", + pause_id); + break; + default: + ath10k_dbg(ar, ATH10K_DBG_MAC, + "mac ignoring unknown tx pause vdev %d\n", + pause_id); + break; } kfree(tb); @@ -709,6 +731,8 @@ static int ath10k_wmi_tlv_swba_tim_parse(struct ath10k *ar, u16 tag, u16 len, const void *ptr, void *data) { struct wmi_tlv_swba_parse *swba = data; + struct wmi_tim_info_arg *tim_info_arg; + const struct wmi_tim_info *tim_info_ev = ptr; if (tag != WMI_TLV_TAG_STRUCT_TIM_INFO) return -EPROTO; @@ -716,7 +740,21 @@ static int ath10k_wmi_tlv_swba_tim_parse(struct ath10k *ar, u16 tag, u16 len, if (swba->n_tim >= ARRAY_SIZE(swba->arg->tim_info)) return -ENOBUFS; - swba->arg->tim_info[swba->n_tim++] = ptr; + if (__le32_to_cpu(tim_info_ev->tim_len) > + sizeof(tim_info_ev->tim_bitmap)) { + ath10k_warn(ar, "refusing to parse invalid swba structure\n"); + return -EPROTO; + } + + tim_info_arg = &swba->arg->tim_info[swba->n_tim]; + tim_info_arg->tim_len = tim_info_ev->tim_len; + tim_info_arg->tim_mcast = tim_info_ev->tim_mcast; + tim_info_arg->tim_bitmap = tim_info_ev->tim_bitmap; + tim_info_arg->tim_changed = tim_info_ev->tim_changed; + tim_info_arg->tim_num_ps_pending = tim_info_ev->tim_num_ps_pending; + + swba->n_tim++; + return 0; } @@ -3151,6 +3189,38 @@ static struct wmi_cmd_map wmi_tlv_cmd_map = { .tdls_set_state_cmdid = WMI_TLV_TDLS_SET_STATE_CMDID, .tdls_peer_update_cmdid = WMI_TLV_TDLS_PEER_UPDATE_CMDID, .adaptive_qcs_cmdid = WMI_TLV_RESMGR_ADAPTIVE_OCS_CMDID, + .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED, + .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED, + .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED, + .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED, + .oem_req_cmdid = WMI_CMD_UNSUPPORTED, + .nan_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED, + .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED, + .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED, + .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED, + .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED, + .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED, + .fwtest_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED, + .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED, }; static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = { @@ -3204,6 +3274,48 @@ static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = { .burst_dur = WMI_TLV_PDEV_PARAM_BURST_DUR, .burst_enable = WMI_TLV_PDEV_PARAM_BURST_ENABLE, .cal_period = WMI_PDEV_PARAM_UNSUPPORTED, + .aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED, + .rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED, + .smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED, + .igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED, + .igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED, + .antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED, + .rx_filter = WMI_PDEV_PARAM_UNSUPPORTED, + .set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED, + .proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED, + .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED, + .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED, + .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED, + .peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED, + .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED, + .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED, + .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .en_stats = WMI_PDEV_PARAM_UNSUPPORTED, + .mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED, + .noise_detection = WMI_PDEV_PARAM_UNSUPPORTED, + .noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED, + .dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED, + .set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED, + .atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED, + .atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED, + .ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED, + .mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED, + .sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED, + .signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED, + .signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED, + .enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED, + .enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED, + .cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED, + .rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED, + .pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED, + .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED, + .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED, + .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED, }; static struct wmi_vdev_param_map wmi_tlv_vdev_param_map = { @@ -3262,6 +3374,22 @@ static struct wmi_vdev_param_map wmi_tlv_vdev_param_map = { .tx_encap_type = WMI_TLV_VDEV_PARAM_TX_ENCAP_TYPE, .ap_detect_out_of_sync_sleeping_sta_time_secs = WMI_TLV_VDEV_PARAM_UNSUPPORTED, + .rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED, + .cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED, + .mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED, + .rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED, + .vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED, + .vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED, + .proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED, + .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED, + .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED, + .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED, }; static const struct wmi_ops wmi_tlv_ops = { diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 6c046c244705..0791a4336e80 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -148,6 +148,48 @@ static struct wmi_cmd_map wmi_cmd_map = { .gpio_config_cmdid = WMI_GPIO_CONFIG_CMDID, .gpio_output_cmdid = WMI_GPIO_OUTPUT_CMDID, .pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED, + .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED, + .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED, + .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED, + .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED, + .oem_req_cmdid = WMI_CMD_UNSUPPORTED, + .nan_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED, + .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED, + .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED, + .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED, + .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED, + .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED, + .fwtest_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED, + .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED, + .mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED, + .set_cca_params_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED, }; /* 10.X WMI cmd track */ @@ -271,6 +313,48 @@ static struct wmi_cmd_map wmi_10x_cmd_map = { .gpio_config_cmdid = WMI_10X_GPIO_CONFIG_CMDID, .gpio_output_cmdid = WMI_10X_GPIO_OUTPUT_CMDID, .pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED, + .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED, + .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED, + .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED, + .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED, + .oem_req_cmdid = WMI_CMD_UNSUPPORTED, + .nan_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED, + .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED, + .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED, + .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED, + .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED, + .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED, + .fwtest_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED, + .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED, + .mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED, + .set_cca_params_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED, }; /* 10.2.4 WMI cmd track */ @@ -393,6 +477,231 @@ static struct wmi_cmd_map wmi_10_2_4_cmd_map = { .gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID, .gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID, .pdev_get_temperature_cmdid = WMI_10_2_PDEV_GET_TEMPERATURE_CMDID, + .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED, + .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED, + .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED, + .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED, + .oem_req_cmdid = WMI_CMD_UNSUPPORTED, + .nan_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED, + .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED, + .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED, + .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED, + .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED, + .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED, + .fwtest_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED, + .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED, + .mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED, + .set_cca_params_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED, +}; + +/* 10.4 WMI cmd track */ +static struct wmi_cmd_map wmi_10_4_cmd_map = { + .init_cmdid = WMI_10_4_INIT_CMDID, + .start_scan_cmdid = WMI_10_4_START_SCAN_CMDID, + .stop_scan_cmdid = WMI_10_4_STOP_SCAN_CMDID, + .scan_chan_list_cmdid = WMI_10_4_SCAN_CHAN_LIST_CMDID, + .scan_sch_prio_tbl_cmdid = WMI_10_4_SCAN_SCH_PRIO_TBL_CMDID, + .pdev_set_regdomain_cmdid = WMI_10_4_PDEV_SET_REGDOMAIN_CMDID, + .pdev_set_channel_cmdid = WMI_10_4_PDEV_SET_CHANNEL_CMDID, + .pdev_set_param_cmdid = WMI_10_4_PDEV_SET_PARAM_CMDID, + .pdev_pktlog_enable_cmdid = WMI_10_4_PDEV_PKTLOG_ENABLE_CMDID, + .pdev_pktlog_disable_cmdid = WMI_10_4_PDEV_PKTLOG_DISABLE_CMDID, + .pdev_set_wmm_params_cmdid = WMI_10_4_PDEV_SET_WMM_PARAMS_CMDID, + .pdev_set_ht_cap_ie_cmdid = WMI_10_4_PDEV_SET_HT_CAP_IE_CMDID, + .pdev_set_vht_cap_ie_cmdid = WMI_10_4_PDEV_SET_VHT_CAP_IE_CMDID, + .pdev_set_dscp_tid_map_cmdid = WMI_10_4_PDEV_SET_DSCP_TID_MAP_CMDID, + .pdev_set_quiet_mode_cmdid = WMI_10_4_PDEV_SET_QUIET_MODE_CMDID, + .pdev_green_ap_ps_enable_cmdid = WMI_10_4_PDEV_GREEN_AP_PS_ENABLE_CMDID, + .pdev_get_tpc_config_cmdid = WMI_10_4_PDEV_GET_TPC_CONFIG_CMDID, + .pdev_set_base_macaddr_cmdid = WMI_10_4_PDEV_SET_BASE_MACADDR_CMDID, + .vdev_create_cmdid = WMI_10_4_VDEV_CREATE_CMDID, + .vdev_delete_cmdid = WMI_10_4_VDEV_DELETE_CMDID, + .vdev_start_request_cmdid = WMI_10_4_VDEV_START_REQUEST_CMDID, + .vdev_restart_request_cmdid = WMI_10_4_VDEV_RESTART_REQUEST_CMDID, + .vdev_up_cmdid = WMI_10_4_VDEV_UP_CMDID, + .vdev_stop_cmdid = WMI_10_4_VDEV_STOP_CMDID, + .vdev_down_cmdid = WMI_10_4_VDEV_DOWN_CMDID, + .vdev_set_param_cmdid = WMI_10_4_VDEV_SET_PARAM_CMDID, + .vdev_install_key_cmdid = WMI_10_4_VDEV_INSTALL_KEY_CMDID, + .peer_create_cmdid = WMI_10_4_PEER_CREATE_CMDID, + .peer_delete_cmdid = WMI_10_4_PEER_DELETE_CMDID, + .peer_flush_tids_cmdid = WMI_10_4_PEER_FLUSH_TIDS_CMDID, + .peer_set_param_cmdid = WMI_10_4_PEER_SET_PARAM_CMDID, + .peer_assoc_cmdid = WMI_10_4_PEER_ASSOC_CMDID, + .peer_add_wds_entry_cmdid = WMI_10_4_PEER_ADD_WDS_ENTRY_CMDID, + .peer_remove_wds_entry_cmdid = WMI_10_4_PEER_REMOVE_WDS_ENTRY_CMDID, + .peer_mcast_group_cmdid = WMI_10_4_PEER_MCAST_GROUP_CMDID, + .bcn_tx_cmdid = WMI_10_4_BCN_TX_CMDID, + .pdev_send_bcn_cmdid = WMI_10_4_PDEV_SEND_BCN_CMDID, + .bcn_tmpl_cmdid = WMI_10_4_BCN_PRB_TMPL_CMDID, + .bcn_filter_rx_cmdid = WMI_10_4_BCN_FILTER_RX_CMDID, + .prb_req_filter_rx_cmdid = WMI_10_4_PRB_REQ_FILTER_RX_CMDID, + .mgmt_tx_cmdid = WMI_10_4_MGMT_TX_CMDID, + .prb_tmpl_cmdid = WMI_10_4_PRB_TMPL_CMDID, + .addba_clear_resp_cmdid = WMI_10_4_ADDBA_CLEAR_RESP_CMDID, + .addba_send_cmdid = WMI_10_4_ADDBA_SEND_CMDID, + .addba_status_cmdid = WMI_10_4_ADDBA_STATUS_CMDID, + .delba_send_cmdid = WMI_10_4_DELBA_SEND_CMDID, + .addba_set_resp_cmdid = WMI_10_4_ADDBA_SET_RESP_CMDID, + .send_singleamsdu_cmdid = WMI_10_4_SEND_SINGLEAMSDU_CMDID, + .sta_powersave_mode_cmdid = WMI_10_4_STA_POWERSAVE_MODE_CMDID, + .sta_powersave_param_cmdid = WMI_10_4_STA_POWERSAVE_PARAM_CMDID, + .sta_mimo_ps_mode_cmdid = WMI_10_4_STA_MIMO_PS_MODE_CMDID, + .pdev_dfs_enable_cmdid = WMI_10_4_PDEV_DFS_ENABLE_CMDID, + .pdev_dfs_disable_cmdid = WMI_10_4_PDEV_DFS_DISABLE_CMDID, + .roam_scan_mode = WMI_10_4_ROAM_SCAN_MODE, + .roam_scan_rssi_threshold = WMI_10_4_ROAM_SCAN_RSSI_THRESHOLD, + .roam_scan_period = WMI_10_4_ROAM_SCAN_PERIOD, + .roam_scan_rssi_change_threshold = + WMI_10_4_ROAM_SCAN_RSSI_CHANGE_THRESHOLD, + .roam_ap_profile = WMI_10_4_ROAM_AP_PROFILE, + .ofl_scan_add_ap_profile = WMI_10_4_OFL_SCAN_ADD_AP_PROFILE, + .ofl_scan_remove_ap_profile = WMI_10_4_OFL_SCAN_REMOVE_AP_PROFILE, + .ofl_scan_period = WMI_10_4_OFL_SCAN_PERIOD, + .p2p_dev_set_device_info = WMI_10_4_P2P_DEV_SET_DEVICE_INFO, + .p2p_dev_set_discoverability = WMI_10_4_P2P_DEV_SET_DISCOVERABILITY, + .p2p_go_set_beacon_ie = WMI_10_4_P2P_GO_SET_BEACON_IE, + .p2p_go_set_probe_resp_ie = WMI_10_4_P2P_GO_SET_PROBE_RESP_IE, + .p2p_set_vendor_ie_data_cmdid = WMI_10_4_P2P_SET_VENDOR_IE_DATA_CMDID, + .ap_ps_peer_param_cmdid = WMI_10_4_AP_PS_PEER_PARAM_CMDID, + .ap_ps_peer_uapsd_coex_cmdid = WMI_10_4_AP_PS_PEER_UAPSD_COEX_CMDID, + .peer_rate_retry_sched_cmdid = WMI_10_4_PEER_RATE_RETRY_SCHED_CMDID, + .wlan_profile_trigger_cmdid = WMI_10_4_WLAN_PROFILE_TRIGGER_CMDID, + .wlan_profile_set_hist_intvl_cmdid = + WMI_10_4_WLAN_PROFILE_SET_HIST_INTVL_CMDID, + .wlan_profile_get_profile_data_cmdid = + WMI_10_4_WLAN_PROFILE_GET_PROFILE_DATA_CMDID, + .wlan_profile_enable_profile_id_cmdid = + WMI_10_4_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID, + .wlan_profile_list_profile_id_cmdid = + WMI_10_4_WLAN_PROFILE_LIST_PROFILE_ID_CMDID, + .pdev_suspend_cmdid = WMI_10_4_PDEV_SUSPEND_CMDID, + .pdev_resume_cmdid = WMI_10_4_PDEV_RESUME_CMDID, + .add_bcn_filter_cmdid = WMI_10_4_ADD_BCN_FILTER_CMDID, + .rmv_bcn_filter_cmdid = WMI_10_4_RMV_BCN_FILTER_CMDID, + .wow_add_wake_pattern_cmdid = WMI_10_4_WOW_ADD_WAKE_PATTERN_CMDID, + .wow_del_wake_pattern_cmdid = WMI_10_4_WOW_DEL_WAKE_PATTERN_CMDID, + .wow_enable_disable_wake_event_cmdid = + WMI_10_4_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID, + .wow_enable_cmdid = WMI_10_4_WOW_ENABLE_CMDID, + .wow_hostwakeup_from_sleep_cmdid = + WMI_10_4_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID, + .rtt_measreq_cmdid = WMI_10_4_RTT_MEASREQ_CMDID, + .rtt_tsf_cmdid = WMI_10_4_RTT_TSF_CMDID, + .vdev_spectral_scan_configure_cmdid = + WMI_10_4_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID, + .vdev_spectral_scan_enable_cmdid = + WMI_10_4_VDEV_SPECTRAL_SCAN_ENABLE_CMDID, + .request_stats_cmdid = WMI_10_4_REQUEST_STATS_CMDID, + .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED, + .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED, + .gtk_offload_cmdid = WMI_10_4_GTK_OFFLOAD_CMDID, + .csa_offload_enable_cmdid = WMI_10_4_CSA_OFFLOAD_ENABLE_CMDID, + .csa_offload_chanswitch_cmdid = WMI_10_4_CSA_OFFLOAD_CHANSWITCH_CMDID, + .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED, + .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED, + .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED, + .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED, + .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED, + .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED, + .echo_cmdid = WMI_10_4_ECHO_CMDID, + .pdev_utf_cmdid = WMI_10_4_PDEV_UTF_CMDID, + .dbglog_cfg_cmdid = WMI_10_4_DBGLOG_CFG_CMDID, + .pdev_qvit_cmdid = WMI_10_4_PDEV_QVIT_CMDID, + .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_set_keepalive_cmdid = WMI_10_4_VDEV_SET_KEEPALIVE_CMDID, + .vdev_get_keepalive_cmdid = WMI_10_4_VDEV_GET_KEEPALIVE_CMDID, + .force_fw_hang_cmdid = WMI_10_4_FORCE_FW_HANG_CMDID, + .gpio_config_cmdid = WMI_10_4_GPIO_CONFIG_CMDID, + .gpio_output_cmdid = WMI_10_4_GPIO_OUTPUT_CMDID, + .pdev_get_temperature_cmdid = WMI_10_4_PDEV_GET_TEMPERATURE_CMDID, + .vdev_set_wmm_params_cmdid = WMI_CMD_UNSUPPORTED, + .tdls_set_state_cmdid = WMI_CMD_UNSUPPORTED, + .tdls_peer_update_cmdid = WMI_CMD_UNSUPPORTED, + .adaptive_qcs_cmdid = WMI_CMD_UNSUPPORTED, + .scan_update_request_cmdid = WMI_10_4_SCAN_UPDATE_REQUEST_CMDID, + .vdev_standby_response_cmdid = WMI_10_4_VDEV_STANDBY_RESPONSE_CMDID, + .vdev_resume_response_cmdid = WMI_10_4_VDEV_RESUME_RESPONSE_CMDID, + .wlan_peer_caching_add_peer_cmdid = + WMI_10_4_WLAN_PEER_CACHING_ADD_PEER_CMDID, + .wlan_peer_caching_evict_peer_cmdid = + WMI_10_4_WLAN_PEER_CACHING_EVICT_PEER_CMDID, + .wlan_peer_caching_restore_peer_cmdid = + WMI_10_4_WLAN_PEER_CACHING_RESTORE_PEER_CMDID, + .wlan_peer_caching_print_all_peers_info_cmdid = + WMI_10_4_WLAN_PEER_CACHING_PRINT_ALL_PEERS_INFO_CMDID, + .peer_update_wds_entry_cmdid = WMI_10_4_PEER_UPDATE_WDS_ENTRY_CMDID, + .peer_add_proxy_sta_entry_cmdid = + WMI_10_4_PEER_ADD_PROXY_STA_ENTRY_CMDID, + .rtt_keepalive_cmdid = WMI_10_4_RTT_KEEPALIVE_CMDID, + .oem_req_cmdid = WMI_10_4_OEM_REQ_CMDID, + .nan_cmdid = WMI_10_4_NAN_CMDID, + .vdev_ratemask_cmdid = WMI_10_4_VDEV_RATEMASK_CMDID, + .qboost_cfg_cmdid = WMI_10_4_QBOOST_CFG_CMDID, + .pdev_smart_ant_enable_cmdid = WMI_10_4_PDEV_SMART_ANT_ENABLE_CMDID, + .pdev_smart_ant_set_rx_antenna_cmdid = + WMI_10_4_PDEV_SMART_ANT_SET_RX_ANTENNA_CMDID, + .peer_smart_ant_set_tx_antenna_cmdid = + WMI_10_4_PEER_SMART_ANT_SET_TX_ANTENNA_CMDID, + .peer_smart_ant_set_train_info_cmdid = + WMI_10_4_PEER_SMART_ANT_SET_TRAIN_INFO_CMDID, + .peer_smart_ant_set_node_config_ops_cmdid = + WMI_10_4_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMDID, + .pdev_set_antenna_switch_table_cmdid = + WMI_10_4_PDEV_SET_ANTENNA_SWITCH_TABLE_CMDID, + .pdev_set_ctl_table_cmdid = WMI_10_4_PDEV_SET_CTL_TABLE_CMDID, + .pdev_set_mimogain_table_cmdid = WMI_10_4_PDEV_SET_MIMOGAIN_TABLE_CMDID, + .pdev_ratepwr_table_cmdid = WMI_10_4_PDEV_RATEPWR_TABLE_CMDID, + .pdev_ratepwr_chainmsk_table_cmdid = + WMI_10_4_PDEV_RATEPWR_CHAINMSK_TABLE_CMDID, + .pdev_fips_cmdid = WMI_10_4_PDEV_FIPS_CMDID, + .tt_set_conf_cmdid = WMI_10_4_TT_SET_CONF_CMDID, + .fwtest_cmdid = WMI_10_4_FWTEST_CMDID, + .vdev_atf_request_cmdid = WMI_10_4_VDEV_ATF_REQUEST_CMDID, + .peer_atf_request_cmdid = WMI_10_4_PEER_ATF_REQUEST_CMDID, + .pdev_get_ani_cck_config_cmdid = WMI_10_4_PDEV_GET_ANI_CCK_CONFIG_CMDID, + .pdev_get_ani_ofdm_config_cmdid = + WMI_10_4_PDEV_GET_ANI_OFDM_CONFIG_CMDID, + .pdev_reserve_ast_entry_cmdid = WMI_10_4_PDEV_RESERVE_AST_ENTRY_CMDID, + .pdev_get_nfcal_power_cmdid = WMI_10_4_PDEV_GET_NFCAL_POWER_CMDID, + .pdev_get_tpc_cmdid = WMI_10_4_PDEV_GET_TPC_CMDID, + .pdev_get_ast_info_cmdid = WMI_10_4_PDEV_GET_AST_INFO_CMDID, + .vdev_set_dscp_tid_map_cmdid = WMI_10_4_VDEV_SET_DSCP_TID_MAP_CMDID, + .pdev_get_info_cmdid = WMI_10_4_PDEV_GET_INFO_CMDID, + .vdev_get_info_cmdid = WMI_10_4_VDEV_GET_INFO_CMDID, + .vdev_filter_neighbor_rx_packets_cmdid = + WMI_10_4_VDEV_FILTER_NEIGHBOR_RX_PACKETS_CMDID, + .mu_cal_start_cmdid = WMI_10_4_MU_CAL_START_CMDID, + .set_cca_params_cmdid = WMI_10_4_SET_CCA_PARAMS_CMDID, + .pdev_bss_chan_info_request_cmdid = + WMI_10_4_PDEV_BSS_CHAN_INFO_REQUEST_CMDID, }; /* MAIN WMI VDEV param map */ @@ -452,6 +761,22 @@ static struct wmi_vdev_param_map wmi_vdev_param_map = { .tx_encap_type = WMI_VDEV_PARAM_TX_ENCAP_TYPE, .ap_detect_out_of_sync_sleeping_sta_time_secs = WMI_VDEV_PARAM_UNSUPPORTED, + .rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED, + .cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED, + .mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED, + .rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED, + .vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED, + .vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED, + .proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED, + .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED, + .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED, + .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED, }; /* 10.X WMI VDEV param map */ @@ -511,6 +836,22 @@ static struct wmi_vdev_param_map wmi_10x_vdev_param_map = { .tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED, .ap_detect_out_of_sync_sleeping_sta_time_secs = WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS, + .rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED, + .cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED, + .mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED, + .rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED, + .vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED, + .vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED, + .proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED, + .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED, + .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED, + .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED, }; static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = { @@ -569,6 +910,97 @@ static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = { .tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED, .ap_detect_out_of_sync_sleeping_sta_time_secs = WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS, + .rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED, + .cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED, + .mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED, + .rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED, + .vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED, + .vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED, + .early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED, + .proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED, + .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED, + .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED, + .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED, +}; + +static struct wmi_vdev_param_map wmi_10_4_vdev_param_map = { + .rts_threshold = WMI_10_4_VDEV_PARAM_RTS_THRESHOLD, + .fragmentation_threshold = WMI_10_4_VDEV_PARAM_FRAGMENTATION_THRESHOLD, + .beacon_interval = WMI_10_4_VDEV_PARAM_BEACON_INTERVAL, + .listen_interval = WMI_10_4_VDEV_PARAM_LISTEN_INTERVAL, + .multicast_rate = WMI_10_4_VDEV_PARAM_MULTICAST_RATE, + .mgmt_tx_rate = WMI_10_4_VDEV_PARAM_MGMT_TX_RATE, + .slot_time = WMI_10_4_VDEV_PARAM_SLOT_TIME, + .preamble = WMI_10_4_VDEV_PARAM_PREAMBLE, + .swba_time = WMI_10_4_VDEV_PARAM_SWBA_TIME, + .wmi_vdev_stats_update_period = WMI_10_4_VDEV_STATS_UPDATE_PERIOD, + .wmi_vdev_pwrsave_ageout_time = WMI_10_4_VDEV_PWRSAVE_AGEOUT_TIME, + .wmi_vdev_host_swba_interval = WMI_10_4_VDEV_HOST_SWBA_INTERVAL, + .dtim_period = WMI_10_4_VDEV_PARAM_DTIM_PERIOD, + .wmi_vdev_oc_scheduler_air_time_limit = + WMI_10_4_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT, + .wds = WMI_10_4_VDEV_PARAM_WDS, + .atim_window = WMI_10_4_VDEV_PARAM_ATIM_WINDOW, + .bmiss_count_max = WMI_10_4_VDEV_PARAM_BMISS_COUNT_MAX, + .bmiss_first_bcnt = WMI_10_4_VDEV_PARAM_BMISS_FIRST_BCNT, + .bmiss_final_bcnt = WMI_10_4_VDEV_PARAM_BMISS_FINAL_BCNT, + .feature_wmm = WMI_10_4_VDEV_PARAM_FEATURE_WMM, + .chwidth = WMI_10_4_VDEV_PARAM_CHWIDTH, + .chextoffset = WMI_10_4_VDEV_PARAM_CHEXTOFFSET, + .disable_htprotection = WMI_10_4_VDEV_PARAM_DISABLE_HTPROTECTION, + .sta_quickkickout = WMI_10_4_VDEV_PARAM_STA_QUICKKICKOUT, + .mgmt_rate = WMI_10_4_VDEV_PARAM_MGMT_RATE, + .protection_mode = WMI_10_4_VDEV_PARAM_PROTECTION_MODE, + .fixed_rate = WMI_10_4_VDEV_PARAM_FIXED_RATE, + .sgi = WMI_10_4_VDEV_PARAM_SGI, + .ldpc = WMI_10_4_VDEV_PARAM_LDPC, + .tx_stbc = WMI_10_4_VDEV_PARAM_TX_STBC, + .rx_stbc = WMI_10_4_VDEV_PARAM_RX_STBC, + .intra_bss_fwd = WMI_10_4_VDEV_PARAM_INTRA_BSS_FWD, + .def_keyid = WMI_10_4_VDEV_PARAM_DEF_KEYID, + .nss = WMI_10_4_VDEV_PARAM_NSS, + .bcast_data_rate = WMI_10_4_VDEV_PARAM_BCAST_DATA_RATE, + .mcast_data_rate = WMI_10_4_VDEV_PARAM_MCAST_DATA_RATE, + .mcast_indicate = WMI_10_4_VDEV_PARAM_MCAST_INDICATE, + .dhcp_indicate = WMI_10_4_VDEV_PARAM_DHCP_INDICATE, + .unknown_dest_indicate = WMI_10_4_VDEV_PARAM_UNKNOWN_DEST_INDICATE, + .ap_keepalive_min_idle_inactive_time_secs = + WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS, + .ap_keepalive_max_idle_inactive_time_secs = + WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS, + .ap_keepalive_max_unresponsive_time_secs = + WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS, + .ap_enable_nawds = WMI_10_4_VDEV_PARAM_AP_ENABLE_NAWDS, + .mcast2ucast_set = WMI_10_4_VDEV_PARAM_MCAST2UCAST_SET, + .enable_rtscts = WMI_10_4_VDEV_PARAM_ENABLE_RTSCTS, + .txbf = WMI_10_4_VDEV_PARAM_TXBF, + .packet_powersave = WMI_10_4_VDEV_PARAM_PACKET_POWERSAVE, + .drop_unencry = WMI_10_4_VDEV_PARAM_DROP_UNENCRY, + .tx_encap_type = WMI_10_4_VDEV_PARAM_TX_ENCAP_TYPE, + .ap_detect_out_of_sync_sleeping_sta_time_secs = + WMI_10_4_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS, + .rc_num_retries = WMI_10_4_VDEV_PARAM_RC_NUM_RETRIES, + .cabq_maxdur = WMI_10_4_VDEV_PARAM_CABQ_MAXDUR, + .mfptest_set = WMI_10_4_VDEV_PARAM_MFPTEST_SET, + .rts_fixed_rate = WMI_10_4_VDEV_PARAM_RTS_FIXED_RATE, + .vht_sgimask = WMI_10_4_VDEV_PARAM_VHT_SGIMASK, + .vht80_ratemask = WMI_10_4_VDEV_PARAM_VHT80_RATEMASK, + .early_rx_adjust_enable = WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_ENABLE, + .early_rx_tgt_bmiss_num = WMI_10_4_VDEV_PARAM_EARLY_RX_TGT_BMISS_NUM, + .early_rx_bmiss_sample_cycle = + WMI_10_4_VDEV_PARAM_EARLY_RX_BMISS_SAMPLE_CYCLE, + .early_rx_slop_step = WMI_10_4_VDEV_PARAM_EARLY_RX_SLOP_STEP, + .early_rx_init_slop = WMI_10_4_VDEV_PARAM_EARLY_RX_INIT_SLOP, + .early_rx_adjust_pause = WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_PAUSE, + .proxy_sta = WMI_10_4_VDEV_PARAM_PROXY_STA, + .meru_vc = WMI_10_4_VDEV_PARAM_MERU_VC, + .rx_decap_type = WMI_10_4_VDEV_PARAM_RX_DECAP_TYPE, + .bw_nss_ratemask = WMI_10_4_VDEV_PARAM_BW_NSS_RATEMASK, }; static struct wmi_pdev_param_map wmi_pdev_param_map = { @@ -621,6 +1053,48 @@ static struct wmi_pdev_param_map wmi_pdev_param_map = { .burst_dur = WMI_PDEV_PARAM_UNSUPPORTED, .burst_enable = WMI_PDEV_PARAM_UNSUPPORTED, .cal_period = WMI_PDEV_PARAM_UNSUPPORTED, + .aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED, + .rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED, + .smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED, + .igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED, + .igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED, + .antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED, + .rx_filter = WMI_PDEV_PARAM_UNSUPPORTED, + .set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED, + .proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED, + .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED, + .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED, + .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED, + .peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED, + .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED, + .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED, + .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .en_stats = WMI_PDEV_PARAM_UNSUPPORTED, + .mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED, + .noise_detection = WMI_PDEV_PARAM_UNSUPPORTED, + .noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED, + .dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED, + .set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED, + .atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED, + .atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED, + .ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED, + .mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED, + .sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED, + .signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED, + .signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED, + .enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED, + .enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED, + .cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED, + .rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED, + .pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED, + .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED, + .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED, + .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED, }; static struct wmi_pdev_param_map wmi_10x_pdev_param_map = { @@ -674,6 +1148,48 @@ static struct wmi_pdev_param_map wmi_10x_pdev_param_map = { .burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR, .burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE, .cal_period = WMI_10X_PDEV_PARAM_CAL_PERIOD, + .aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED, + .rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED, + .smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED, + .igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED, + .igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED, + .antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED, + .rx_filter = WMI_PDEV_PARAM_UNSUPPORTED, + .set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED, + .proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED, + .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED, + .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED, + .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED, + .peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED, + .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED, + .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED, + .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .en_stats = WMI_PDEV_PARAM_UNSUPPORTED, + .mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED, + .noise_detection = WMI_PDEV_PARAM_UNSUPPORTED, + .noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED, + .dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED, + .set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED, + .atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED, + .atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED, + .ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED, + .mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED, + .sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED, + .signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED, + .signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED, + .enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED, + .enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED, + .cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED, + .rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED, + .pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED, + .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED, + .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED, + .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED, }; static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = { @@ -727,6 +1243,48 @@ static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = { .burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR, .burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE, .cal_period = WMI_10X_PDEV_PARAM_CAL_PERIOD, + .aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED, + .rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED, + .smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED, + .igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED, + .igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED, + .antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED, + .rx_filter = WMI_PDEV_PARAM_UNSUPPORTED, + .set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED, + .proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED, + .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED, + .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED, + .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED, + .peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED, + .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED, + .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED, + .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, + .en_stats = WMI_PDEV_PARAM_UNSUPPORTED, + .mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED, + .noise_detection = WMI_PDEV_PARAM_UNSUPPORTED, + .noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED, + .dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED, + .set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED, + .atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED, + .atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED, + .ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED, + .mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED, + .sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED, + .signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED, + .signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED, + .enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED, + .enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED, + .cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED, + .rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED, + .pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED, + .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED, + .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED, + .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED, }; /* firmware 10.2 specific mappings */ @@ -849,6 +1407,139 @@ static struct wmi_cmd_map wmi_10_2_cmd_map = { .gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID, .gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID, .pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED, + .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED, + .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED, + .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED, + .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED, + .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED, + .oem_req_cmdid = WMI_CMD_UNSUPPORTED, + .nan_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED, + .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED, + .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED, + .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED, + .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED, + .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED, + .fwtest_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED, + .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED, +}; + +static struct wmi_pdev_param_map wmi_10_4_pdev_param_map = { + .tx_chain_mask = WMI_10_4_PDEV_PARAM_TX_CHAIN_MASK, + .rx_chain_mask = WMI_10_4_PDEV_PARAM_RX_CHAIN_MASK, + .txpower_limit2g = WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT2G, + .txpower_limit5g = WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT5G, + .txpower_scale = WMI_10_4_PDEV_PARAM_TXPOWER_SCALE, + .beacon_gen_mode = WMI_10_4_PDEV_PARAM_BEACON_GEN_MODE, + .beacon_tx_mode = WMI_10_4_PDEV_PARAM_BEACON_TX_MODE, + .resmgr_offchan_mode = WMI_10_4_PDEV_PARAM_RESMGR_OFFCHAN_MODE, + .protection_mode = WMI_10_4_PDEV_PARAM_PROTECTION_MODE, + .dynamic_bw = WMI_10_4_PDEV_PARAM_DYNAMIC_BW, + .non_agg_sw_retry_th = WMI_10_4_PDEV_PARAM_NON_AGG_SW_RETRY_TH, + .agg_sw_retry_th = WMI_10_4_PDEV_PARAM_AGG_SW_RETRY_TH, + .sta_kickout_th = WMI_10_4_PDEV_PARAM_STA_KICKOUT_TH, + .ac_aggrsize_scaling = WMI_10_4_PDEV_PARAM_AC_AGGRSIZE_SCALING, + .ltr_enable = WMI_10_4_PDEV_PARAM_LTR_ENABLE, + .ltr_ac_latency_be = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BE, + .ltr_ac_latency_bk = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BK, + .ltr_ac_latency_vi = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VI, + .ltr_ac_latency_vo = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VO, + .ltr_ac_latency_timeout = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT, + .ltr_sleep_override = WMI_10_4_PDEV_PARAM_LTR_SLEEP_OVERRIDE, + .ltr_rx_override = WMI_10_4_PDEV_PARAM_LTR_RX_OVERRIDE, + .ltr_tx_activity_timeout = WMI_10_4_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT, + .l1ss_enable = WMI_10_4_PDEV_PARAM_L1SS_ENABLE, + .dsleep_enable = WMI_10_4_PDEV_PARAM_DSLEEP_ENABLE, + .pcielp_txbuf_flush = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_FLUSH, + .pcielp_txbuf_watermark = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_WATERMARK, + .pcielp_txbuf_tmo_en = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_EN, + .pcielp_txbuf_tmo_value = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE, + .pdev_stats_update_period = + WMI_10_4_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD, + .vdev_stats_update_period = + WMI_10_4_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD, + .peer_stats_update_period = + WMI_10_4_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD, + .bcnflt_stats_update_period = + WMI_10_4_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD, + .pmf_qos = WMI_10_4_PDEV_PARAM_PMF_QOS, + .arp_ac_override = WMI_10_4_PDEV_PARAM_ARP_AC_OVERRIDE, + .dcs = WMI_10_4_PDEV_PARAM_DCS, + .ani_enable = WMI_10_4_PDEV_PARAM_ANI_ENABLE, + .ani_poll_period = WMI_10_4_PDEV_PARAM_ANI_POLL_PERIOD, + .ani_listen_period = WMI_10_4_PDEV_PARAM_ANI_LISTEN_PERIOD, + .ani_ofdm_level = WMI_10_4_PDEV_PARAM_ANI_OFDM_LEVEL, + .ani_cck_level = WMI_10_4_PDEV_PARAM_ANI_CCK_LEVEL, + .dyntxchain = WMI_10_4_PDEV_PARAM_DYNTXCHAIN, + .proxy_sta = WMI_10_4_PDEV_PARAM_PROXY_STA, + .idle_ps_config = WMI_10_4_PDEV_PARAM_IDLE_PS_CONFIG, + .power_gating_sleep = WMI_10_4_PDEV_PARAM_POWER_GATING_SLEEP, + .fast_channel_reset = WMI_10_4_PDEV_PARAM_FAST_CHANNEL_RESET, + .burst_dur = WMI_10_4_PDEV_PARAM_BURST_DUR, + .burst_enable = WMI_10_4_PDEV_PARAM_BURST_ENABLE, + .cal_period = WMI_10_4_PDEV_PARAM_CAL_PERIOD, + .aggr_burst = WMI_10_4_PDEV_PARAM_AGGR_BURST, + .rx_decap_mode = WMI_10_4_PDEV_PARAM_RX_DECAP_MODE, + .smart_antenna_default_antenna = + WMI_10_4_PDEV_PARAM_SMART_ANTENNA_DEFAULT_ANTENNA, + .igmpmld_override = WMI_10_4_PDEV_PARAM_IGMPMLD_OVERRIDE, + .igmpmld_tid = WMI_10_4_PDEV_PARAM_IGMPMLD_TID, + .antenna_gain = WMI_10_4_PDEV_PARAM_ANTENNA_GAIN, + .rx_filter = WMI_10_4_PDEV_PARAM_RX_FILTER, + .set_mcast_to_ucast_tid = WMI_10_4_PDEV_SET_MCAST_TO_UCAST_TID, + .proxy_sta_mode = WMI_10_4_PDEV_PARAM_PROXY_STA_MODE, + .set_mcast2ucast_mode = WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_MODE, + .set_mcast2ucast_buffer = WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_BUFFER, + .remove_mcast2ucast_buffer = + WMI_10_4_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER, + .peer_sta_ps_statechg_enable = + WMI_10_4_PDEV_PEER_STA_PS_STATECHG_ENABLE, + .igmpmld_ac_override = WMI_10_4_PDEV_PARAM_IGMPMLD_AC_OVERRIDE, + .block_interbss = WMI_10_4_PDEV_PARAM_BLOCK_INTERBSS, + .set_disable_reset_cmdid = WMI_10_4_PDEV_PARAM_SET_DISABLE_RESET_CMDID, + .set_msdu_ttl_cmdid = WMI_10_4_PDEV_PARAM_SET_MSDU_TTL_CMDID, + .set_ppdu_duration_cmdid = WMI_10_4_PDEV_PARAM_SET_PPDU_DURATION_CMDID, + .txbf_sound_period_cmdid = WMI_10_4_PDEV_PARAM_TXBF_SOUND_PERIOD_CMDID, + .set_promisc_mode_cmdid = WMI_10_4_PDEV_PARAM_SET_PROMISC_MODE_CMDID, + .set_burst_mode_cmdid = WMI_10_4_PDEV_PARAM_SET_BURST_MODE_CMDID, + .en_stats = WMI_10_4_PDEV_PARAM_EN_STATS, + .mu_group_policy = WMI_10_4_PDEV_PARAM_MU_GROUP_POLICY, + .noise_detection = WMI_10_4_PDEV_PARAM_NOISE_DETECTION, + .noise_threshold = WMI_10_4_PDEV_PARAM_NOISE_THRESHOLD, + .dpd_enable = WMI_10_4_PDEV_PARAM_DPD_ENABLE, + .set_mcast_bcast_echo = WMI_10_4_PDEV_PARAM_SET_MCAST_BCAST_ECHO, + .atf_strict_sch = WMI_10_4_PDEV_PARAM_ATF_STRICT_SCH, + .atf_sched_duration = WMI_10_4_PDEV_PARAM_ATF_SCHED_DURATION, + .ant_plzn = WMI_10_4_PDEV_PARAM_ANT_PLZN, + .mgmt_retry_limit = WMI_10_4_PDEV_PARAM_MGMT_RETRY_LIMIT, + .sensitivity_level = WMI_10_4_PDEV_PARAM_SENSITIVITY_LEVEL, + .signed_txpower_2g = WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_2G, + .signed_txpower_5g = WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_5G, + .enable_per_tid_amsdu = WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMSDU, + .enable_per_tid_ampdu = WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMPDU, + .cca_threshold = WMI_10_4_PDEV_PARAM_CCA_THRESHOLD, + .rts_fixed_rate = WMI_10_4_PDEV_PARAM_RTS_FIXED_RATE, + .pdev_reset = WMI_10_4_PDEV_PARAM_PDEV_RESET, + .wapi_mbssid_offset = WMI_10_4_PDEV_PARAM_WAPI_MBSSID_OFFSET, + .arp_srcaddr = WMI_10_4_PDEV_PARAM_ARP_SRCADDR, + .arp_dstaddr = WMI_10_4_PDEV_PARAM_ARP_DSTADDR, }; void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch, @@ -1232,6 +1923,8 @@ ath10k_wmi_event_scan_type_str(enum wmi_scan_event_type type, return "completed [preempted]"; case WMI_SCAN_REASON_TIMEDOUT: return "completed [timedout]"; + case WMI_SCAN_REASON_INTERNAL_FAILURE: + return "completed [internal err]"; case WMI_SCAN_REASON_MAX: break; } @@ -1246,6 +1939,10 @@ ath10k_wmi_event_scan_type_str(enum wmi_scan_event_type type, return "preempted"; case WMI_SCAN_EVENT_START_FAILED: return "start failed"; + case WMI_SCAN_EVENT_RESTARTED: + return "restarted"; + case WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT: + return "foreign channel exit"; default: return "unknown"; } @@ -1321,6 +2018,8 @@ int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb) break; case WMI_SCAN_EVENT_DEQUEUED: case WMI_SCAN_EVENT_PREEMPTED: + case WMI_SCAN_EVENT_RESTARTED: + case WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT: default: break; } @@ -1433,6 +2132,40 @@ static int ath10k_wmi_op_pull_mgmt_rx_ev(struct ath10k *ar, struct sk_buff *skb, return 0; } +static int ath10k_wmi_10_4_op_pull_mgmt_rx_ev(struct ath10k *ar, + struct sk_buff *skb, + struct wmi_mgmt_rx_ev_arg *arg) +{ + struct wmi_10_4_mgmt_rx_event *ev; + struct wmi_10_4_mgmt_rx_hdr *ev_hdr; + size_t pull_len; + u32 msdu_len; + + ev = (struct wmi_10_4_mgmt_rx_event *)skb->data; + ev_hdr = &ev->hdr; + pull_len = sizeof(*ev); + + if (skb->len < pull_len) + return -EPROTO; + + skb_pull(skb, pull_len); + arg->channel = ev_hdr->channel; + arg->buf_len = ev_hdr->buf_len; + arg->status = ev_hdr->status; + arg->snr = ev_hdr->snr; + arg->phy_mode = ev_hdr->phy_mode; + arg->rate = ev_hdr->rate; + + msdu_len = __le32_to_cpu(arg->buf_len); + if (skb->len < msdu_len) + return -EPROTO; + + /* Make sure bytes added for padding are removed. */ + skb_trim(skb, msdu_len); + + return 0; +} + int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) { struct wmi_mgmt_rx_ev_arg arg = {}; @@ -1593,6 +2326,29 @@ static int ath10k_wmi_op_pull_ch_info_ev(struct ath10k *ar, struct sk_buff *skb, return 0; } +static int ath10k_wmi_10_4_op_pull_ch_info_ev(struct ath10k *ar, + struct sk_buff *skb, + struct wmi_ch_info_ev_arg *arg) +{ + struct wmi_10_4_chan_info_event *ev = (void *)skb->data; + + if (skb->len < sizeof(*ev)) + return -EPROTO; + + skb_pull(skb, sizeof(*ev)); + arg->err_code = ev->err_code; + arg->freq = ev->freq; + arg->cmd_flags = ev->cmd_flags; + arg->noise_floor = ev->noise_floor; + arg->rx_clear_count = ev->rx_clear_count; + arg->cycle_count = ev->cycle_count; + arg->chan_tx_pwr_range = ev->chan_tx_pwr_range; + arg->chan_tx_pwr_tp = ev->chan_tx_pwr_tp; + arg->rx_frame_count = ev->rx_frame_count; + + return 0; +} + void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb) { struct wmi_ch_info_ev_arg arg = {}; @@ -2149,33 +2905,42 @@ exit: static void ath10k_wmi_update_tim(struct ath10k *ar, struct ath10k_vif *arvif, struct sk_buff *bcn, - const struct wmi_tim_info *tim_info) + const struct wmi_tim_info_arg *tim_info) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)bcn->data; struct ieee80211_tim_ie *tim; u8 *ies, *ie; u8 ie_len, pvm_len; __le32 t; - u32 v; + u32 v, tim_len; + + /* When FW reports 0 in tim_len, ensure atleast first byte + * in tim_bitmap is considered for pvm calculation. + */ + tim_len = tim_info->tim_len ? __le32_to_cpu(tim_info->tim_len) : 1; /* if next SWBA has no tim_changed the tim_bitmap is garbage. * we must copy the bitmap upon change and reuse it later */ if (__le32_to_cpu(tim_info->tim_changed)) { int i; - BUILD_BUG_ON(sizeof(arvif->u.ap.tim_bitmap) != - sizeof(tim_info->tim_bitmap)); + if (sizeof(arvif->u.ap.tim_bitmap) < tim_len) { + ath10k_warn(ar, "SWBA TIM field is too big (%u), truncated it to %zu", + tim_len, sizeof(arvif->u.ap.tim_bitmap)); + tim_len = sizeof(arvif->u.ap.tim_bitmap); + } - for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++) { + for (i = 0; i < tim_len; i++) { t = tim_info->tim_bitmap[i / 4]; v = __le32_to_cpu(t); arvif->u.ap.tim_bitmap[i] = (v >> ((i % 4) * 8)) & 0xFF; } - /* FW reports either length 0 or 16 - * so we calculate this on our own */ + /* FW reports either length 0 or length based on max supported + * station. so we calculate this on our own + */ arvif->u.ap.tim_len = 0; - for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++) + for (i = 0; i < tim_len; i++) if (arvif->u.ap.tim_bitmap[i]) arvif->u.ap.tim_len = i; @@ -2199,7 +2964,7 @@ static void ath10k_wmi_update_tim(struct ath10k *ar, pvm_len = ie_len - 3; /* exclude dtim count, dtim period, bmap ctl */ if (pvm_len < arvif->u.ap.tim_len) { - int expand_size = sizeof(arvif->u.ap.tim_bitmap) - pvm_len; + int expand_size = tim_len - pvm_len; int move_size = skb_tail_pointer(bcn) - (ie + 2 + ie_len); void *next_ie = ie + 2 + ie_len; @@ -2214,7 +2979,7 @@ static void ath10k_wmi_update_tim(struct ath10k *ar, } } - if (pvm_len > sizeof(arvif->u.ap.tim_bitmap)) { + if (pvm_len > tim_len) { ath10k_warn(ar, "tim pvm length is too great (%d)\n", pvm_len); return; } @@ -2278,7 +3043,21 @@ static int ath10k_wmi_op_pull_swba_ev(struct ath10k *ar, struct sk_buff *skb, if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info))) break; - arg->tim_info[i] = &ev->bcn_info[i].tim_info; + if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) > + sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) { + ath10k_warn(ar, "refusing to parse invalid swba structure\n"); + return -EPROTO; + } + + arg->tim_info[i].tim_len = ev->bcn_info[i].tim_info.tim_len; + arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast; + arg->tim_info[i].tim_bitmap = + ev->bcn_info[i].tim_info.tim_bitmap; + arg->tim_info[i].tim_changed = + ev->bcn_info[i].tim_info.tim_changed; + arg->tim_info[i].tim_num_ps_pending = + ev->bcn_info[i].tim_info.tim_num_ps_pending; + arg->noa_info[i] = &ev->bcn_info[i].p2p_noa_info; i++; } @@ -2286,12 +3065,69 @@ static int ath10k_wmi_op_pull_swba_ev(struct ath10k *ar, struct sk_buff *skb, return 0; } +static int ath10k_wmi_10_4_op_pull_swba_ev(struct ath10k *ar, + struct sk_buff *skb, + struct wmi_swba_ev_arg *arg) +{ + struct wmi_10_4_host_swba_event *ev = (void *)skb->data; + u32 map, tim_len; + size_t i; + + if (skb->len < sizeof(*ev)) + return -EPROTO; + + skb_pull(skb, sizeof(*ev)); + arg->vdev_map = ev->vdev_map; + + for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) { + if (!(map & BIT(0))) + continue; + + /* If this happens there were some changes in firmware and + * ath10k should update the max size of tim_info array. + */ + if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info))) + break; + + if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) > + sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) { + ath10k_warn(ar, "refusing to parse invalid swba structure\n"); + return -EPROTO; + } + + tim_len = __le32_to_cpu(ev->bcn_info[i].tim_info.tim_len); + if (tim_len) { + /* Exclude 4 byte guard length */ + tim_len -= 4; + arg->tim_info[i].tim_len = __cpu_to_le32(tim_len); + } else { + arg->tim_info[i].tim_len = 0; + } + + arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast; + arg->tim_info[i].tim_bitmap = + ev->bcn_info[i].tim_info.tim_bitmap; + arg->tim_info[i].tim_changed = + ev->bcn_info[i].tim_info.tim_changed; + arg->tim_info[i].tim_num_ps_pending = + ev->bcn_info[i].tim_info.tim_num_ps_pending; + + /* 10.4 firmware doesn't have p2p support. notice of absence + * info can be ignored for now. + */ + + i++; + } + + return 0; +} + void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb) { struct wmi_swba_ev_arg arg = {}; u32 map; int i = -1; - const struct wmi_tim_info *tim_info; + const struct wmi_tim_info_arg *tim_info; const struct wmi_p2p_noa_info *noa_info; struct ath10k_vif *arvif; struct sk_buff *bcn; @@ -2320,7 +3156,7 @@ void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb) break; } - tim_info = arg.tim_info[i]; + tim_info = &arg.tim_info[i]; noa_info = arg.noa_info[i]; ath10k_dbg(ar, ATH10K_DBG_MGMT, @@ -2335,6 +3171,10 @@ void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb) __le32_to_cpu(tim_info->tim_bitmap[1]), __le32_to_cpu(tim_info->tim_bitmap[0])); + /* TODO: Only first 4 word from tim_bitmap is dumped. + * Extend debug code to dump full tim_bitmap. + */ + arvif = ath10k_get_arvif(ar, vdev_id); if (arvif == NULL) { ath10k_warn(ar, "no vif for vdev_id %d found\n", @@ -3075,10 +3915,10 @@ void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb) if (ar->fw_api == 1 && ar->fw_version_build > 636) set_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features); - if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) { + if (ar->num_rf_chains > ar->max_spatial_stream) { ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n", - ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM); - ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM; + ar->num_rf_chains, ar->max_spatial_stream); + ar->num_rf_chains = ar->max_spatial_stream; } ar->supp_tx_chainmask = (1 << ar->num_rf_chains) - 1; @@ -3101,20 +3941,39 @@ void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb) return; } + if (test_bit(WMI_SERVICE_PEER_CACHING, ar->wmi.svc_map)) { + ar->max_num_peers = TARGET_10_4_NUM_QCACHE_PEERS_MAX + + TARGET_10_4_NUM_VDEVS; + ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS + + TARGET_10_4_NUM_VDEVS; + ar->num_tids = ar->num_active_peers * 2; + ar->max_num_stations = TARGET_10_4_NUM_QCACHE_PEERS_MAX; + } + + /* TODO: Adjust max peer count for cases like WMI_SERVICE_RATECTRL_CACHE + * and WMI_SERVICE_IRAM_TIDS, etc. + */ + for (i = 0; i < num_mem_reqs; ++i) { req_id = __le32_to_cpu(arg.mem_reqs[i]->req_id); num_units = __le32_to_cpu(arg.mem_reqs[i]->num_units); unit_size = __le32_to_cpu(arg.mem_reqs[i]->unit_size); num_unit_info = __le32_to_cpu(arg.mem_reqs[i]->num_unit_info); - if (num_unit_info & NUM_UNITS_IS_NUM_PEERS) + if (num_unit_info & NUM_UNITS_IS_NUM_ACTIVE_PEERS) { + if (ar->num_active_peers) + num_units = ar->num_active_peers + 1; + else + num_units = ar->max_num_peers + 1; + } else if (num_unit_info & NUM_UNITS_IS_NUM_PEERS) { /* number of units to allocate is number of * peers, 1 extra for self peer on target */ /* this needs to be tied, host and target * can get out of sync */ - num_units = TARGET_10X_NUM_PEERS + 1; - else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS) - num_units = TARGET_10X_NUM_VDEVS + 1; + num_units = ar->max_num_peers + 1; + } else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS) { + num_units = ar->max_num_vdevs + 1; + } ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mem_req_id %d num_units %d num_unit_info %d unit size %d actual units %d\n", @@ -3576,6 +4435,73 @@ out: dev_kfree_skb(skb); } +static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb) +{ + struct wmi_cmd_hdr *cmd_hdr; + enum wmi_10_4_event_id id; + + cmd_hdr = (struct wmi_cmd_hdr *)skb->data; + id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID); + + if (!skb_pull(skb, sizeof(struct wmi_cmd_hdr))) + goto out; + + trace_ath10k_wmi_event(ar, id, skb->data, skb->len); + + switch (id) { + case WMI_10_4_MGMT_RX_EVENTID: + ath10k_wmi_event_mgmt_rx(ar, skb); + /* mgmt_rx() owns the skb now! */ + return; + case WMI_10_4_ECHO_EVENTID: + ath10k_wmi_event_echo(ar, skb); + break; + case WMI_10_4_DEBUG_MESG_EVENTID: + ath10k_wmi_event_debug_mesg(ar, skb); + break; + case WMI_10_4_SERVICE_READY_EVENTID: + ath10k_wmi_event_service_ready(ar, skb); + break; + case WMI_10_4_SCAN_EVENTID: + ath10k_wmi_event_scan(ar, skb); + break; + case WMI_10_4_CHAN_INFO_EVENTID: + ath10k_wmi_event_chan_info(ar, skb); + break; + case WMI_10_4_READY_EVENTID: + ath10k_wmi_event_ready(ar, skb); + break; + case WMI_10_4_PEER_STA_KICKOUT_EVENTID: + ath10k_wmi_event_peer_sta_kickout(ar, skb); + break; + case WMI_10_4_HOST_SWBA_EVENTID: + ath10k_wmi_event_host_swba(ar, skb); + break; + case WMI_10_4_TBTTOFFSET_UPDATE_EVENTID: + ath10k_wmi_event_tbttoffset_update(ar, skb); + break; + case WMI_10_4_DEBUG_PRINT_EVENTID: + ath10k_wmi_event_debug_print(ar, skb); + break; + case WMI_10_4_VDEV_START_RESP_EVENTID: + ath10k_wmi_event_vdev_start_resp(ar, skb); + break; + case WMI_10_4_VDEV_STOPPED_EVENTID: + ath10k_wmi_event_vdev_stopped(ar, skb); + break; + case WMI_10_4_WOW_WAKEUP_HOST_EVENTID: + ath10k_dbg(ar, ATH10K_DBG_WMI, + "received event id %d not implemented\n", id); + break; + default: + ath10k_warn(ar, "Unknown eventid: %d\n", id); + break; + } + +out: + dev_kfree_skb(skb); +} + static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb) { int ret; @@ -3950,6 +4876,88 @@ static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar) return buf; } +static struct sk_buff *ath10k_wmi_10_4_op_gen_init(struct ath10k *ar) +{ + struct wmi_init_cmd_10_4 *cmd; + struct sk_buff *buf; + struct wmi_resource_config_10_4 config = {}; + u32 len; + + config.num_vdevs = __cpu_to_le32(ar->max_num_vdevs); + config.num_peers = __cpu_to_le32(ar->max_num_peers); + config.num_active_peers = __cpu_to_le32(ar->num_active_peers); + config.num_tids = __cpu_to_le32(ar->num_tids); + + config.num_offload_peers = __cpu_to_le32(TARGET_10_4_NUM_OFFLOAD_PEERS); + config.num_offload_reorder_buffs = + __cpu_to_le32(TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS); + config.num_peer_keys = __cpu_to_le32(TARGET_10_4_NUM_PEER_KEYS); + config.ast_skid_limit = __cpu_to_le32(TARGET_10_4_AST_SKID_LIMIT); + config.tx_chain_mask = __cpu_to_le32(TARGET_10_4_TX_CHAIN_MASK); + config.rx_chain_mask = __cpu_to_le32(TARGET_10_4_RX_CHAIN_MASK); + + config.rx_timeout_pri[0] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI); + config.rx_timeout_pri[1] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI); + config.rx_timeout_pri[2] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI); + config.rx_timeout_pri[3] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_HI_PRI); + + config.rx_decap_mode = __cpu_to_le32(TARGET_10_4_RX_DECAP_MODE); + config.scan_max_pending_req = __cpu_to_le32(TARGET_10_4_SCAN_MAX_REQS); + config.bmiss_offload_max_vdev = + __cpu_to_le32(TARGET_10_4_BMISS_OFFLOAD_MAX_VDEV); + config.roam_offload_max_vdev = + __cpu_to_le32(TARGET_10_4_ROAM_OFFLOAD_MAX_VDEV); + config.roam_offload_max_ap_profiles = + __cpu_to_le32(TARGET_10_4_ROAM_OFFLOAD_MAX_PROFILES); + config.num_mcast_groups = __cpu_to_le32(TARGET_10_4_NUM_MCAST_GROUPS); + config.num_mcast_table_elems = + __cpu_to_le32(TARGET_10_4_NUM_MCAST_TABLE_ELEMS); + + config.mcast2ucast_mode = __cpu_to_le32(TARGET_10_4_MCAST2UCAST_MODE); + config.tx_dbg_log_size = __cpu_to_le32(TARGET_10_4_TX_DBG_LOG_SIZE); + config.num_wds_entries = __cpu_to_le32(TARGET_10_4_NUM_WDS_ENTRIES); + config.dma_burst_size = __cpu_to_le32(TARGET_10_4_DMA_BURST_SIZE); + config.mac_aggr_delim = __cpu_to_le32(TARGET_10_4_MAC_AGGR_DELIM); + + config.rx_skip_defrag_timeout_dup_detection_check = + __cpu_to_le32(TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK); + + config.vow_config = __cpu_to_le32(TARGET_10_4_VOW_CONFIG); + config.gtk_offload_max_vdev = + __cpu_to_le32(TARGET_10_4_GTK_OFFLOAD_MAX_VDEV); + config.num_msdu_desc = __cpu_to_le32(TARGET_10_4_NUM_MSDU_DESC); + config.max_frag_entries = __cpu_to_le32(TARGET_10_4_11AC_TX_MAX_FRAGS); + config.max_peer_ext_stats = + __cpu_to_le32(TARGET_10_4_MAX_PEER_EXT_STATS); + config.smart_ant_cap = __cpu_to_le32(TARGET_10_4_SMART_ANT_CAP); + + config.bk_minfree = __cpu_to_le32(TARGET_10_4_BK_MIN_FREE); + config.be_minfree = __cpu_to_le32(TARGET_10_4_BE_MIN_FREE); + config.vi_minfree = __cpu_to_le32(TARGET_10_4_VI_MIN_FREE); + config.vo_minfree = __cpu_to_le32(TARGET_10_4_VO_MIN_FREE); + + config.rx_batchmode = __cpu_to_le32(TARGET_10_4_RX_BATCH_MODE); + config.tt_support = + __cpu_to_le32(TARGET_10_4_THERMAL_THROTTLING_CONFIG); + config.atf_config = __cpu_to_le32(TARGET_10_4_ATF_CONFIG); + config.iphdr_pad_config = __cpu_to_le32(TARGET_10_4_IPHDR_PAD_CONFIG); + config.qwrap_config = __cpu_to_le32(TARGET_10_4_QWRAP_CONFIG); + + len = sizeof(*cmd) + + (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks); + + buf = ath10k_wmi_alloc_skb(ar, len); + if (!buf) + return ERR_PTR(-ENOMEM); + + cmd = (struct wmi_init_cmd_10_4 *)buf->data; + memcpy(&cmd->resource_config, &config, sizeof(config)); + ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10.4\n"); + return buf; +} + int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg) { if (arg->ie_len && !arg->ie) @@ -4172,7 +5180,6 @@ void ath10k_wmi_start_scan_init(struct ath10k *ar, | WMI_SCAN_EVENT_BSS_CHANNEL | WMI_SCAN_EVENT_FOREIGN_CHANNEL | WMI_SCAN_EVENT_DEQUEUED; - arg->scan_ctrl_flags |= WMI_SCAN_ADD_OFDM_RATES; arg->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT; arg->n_bssids = 1; arg->bssids[0].bssid = "\xFF\xFF\xFF\xFF\xFF\xFF"; @@ -5412,9 +6419,64 @@ static const struct wmi_ops wmi_10_2_4_ops = { /* .gen_adaptive_qcs not implemented */ }; +static const struct wmi_ops wmi_10_4_ops = { + .rx = ath10k_wmi_10_4_op_rx, + .map_svc = wmi_10_4_svc_map, + + .pull_scan = ath10k_wmi_op_pull_scan_ev, + .pull_mgmt_rx = ath10k_wmi_10_4_op_pull_mgmt_rx_ev, + .pull_ch_info = ath10k_wmi_10_4_op_pull_ch_info_ev, + .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev, + .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev, + .pull_swba = ath10k_wmi_10_4_op_pull_swba_ev, + .pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev, + .pull_rdy = ath10k_wmi_op_pull_rdy_ev, + + .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend, + .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume, + .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd, + .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param, + .gen_init = ath10k_wmi_10_4_op_gen_init, + .gen_start_scan = ath10k_wmi_op_gen_start_scan, + .gen_stop_scan = ath10k_wmi_op_gen_stop_scan, + .gen_vdev_create = ath10k_wmi_op_gen_vdev_create, + .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete, + .gen_vdev_start = ath10k_wmi_op_gen_vdev_start, + .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop, + .gen_vdev_up = ath10k_wmi_op_gen_vdev_up, + .gen_vdev_down = ath10k_wmi_op_gen_vdev_down, + .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param, + .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key, + .gen_peer_create = ath10k_wmi_op_gen_peer_create, + .gen_peer_delete = ath10k_wmi_op_gen_peer_delete, + .gen_peer_flush = ath10k_wmi_op_gen_peer_flush, + .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param, + .gen_set_psmode = ath10k_wmi_op_gen_set_psmode, + .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps, + .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps, + .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list, + .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma, + .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm, + .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang, + .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx, + .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg, + .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable, + .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable, + .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode, + + /* shared with 10.2 */ + .gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc, +}; + int ath10k_wmi_attach(struct ath10k *ar) { switch (ar->wmi.op_version) { + case ATH10K_FW_WMI_OP_VERSION_10_4: + ar->wmi.ops = &wmi_10_4_ops; + ar->wmi.cmd = &wmi_10_4_cmd_map; + ar->wmi.vdev_param = &wmi_10_4_vdev_param_map; + ar->wmi.pdev_param = &wmi_10_4_pdev_param_map; + break; case ATH10K_FW_WMI_OP_VERSION_10_2_4: ar->wmi.cmd = &wmi_10_2_4_cmd_map; ar->wmi.ops = &wmi_10_2_4_ops; diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index cf44a3d080a3..0d4efc9c5796 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -150,6 +150,12 @@ enum wmi_service { WMI_SERVICE_SAP_AUTH_OFFLOAD, WMI_SERVICE_ATF, WMI_SERVICE_COEX_GPIO, + WMI_SERVICE_ENHANCED_PROXY_STA, + WMI_SERVICE_TT, + WMI_SERVICE_PEER_CACHING, + WMI_SERVICE_AUX_SPECTRAL_INTF, + WMI_SERVICE_AUX_CHAN_LOAD_INTF, + WMI_SERVICE_BSS_CHANNEL_INFO_64, /* keep last */ WMI_SERVICE_MAX, @@ -218,6 +224,51 @@ enum wmi_main_service { WMI_MAIN_SERVICE_TX_ENCAP, }; +enum wmi_10_4_service { + WMI_10_4_SERVICE_BEACON_OFFLOAD = 0, + WMI_10_4_SERVICE_SCAN_OFFLOAD, + WMI_10_4_SERVICE_ROAM_OFFLOAD, + WMI_10_4_SERVICE_BCN_MISS_OFFLOAD, + WMI_10_4_SERVICE_STA_PWRSAVE, + WMI_10_4_SERVICE_STA_ADVANCED_PWRSAVE, + WMI_10_4_SERVICE_AP_UAPSD, + WMI_10_4_SERVICE_AP_DFS, + WMI_10_4_SERVICE_11AC, + WMI_10_4_SERVICE_BLOCKACK, + WMI_10_4_SERVICE_PHYERR, + WMI_10_4_SERVICE_BCN_FILTER, + WMI_10_4_SERVICE_RTT, + WMI_10_4_SERVICE_RATECTRL, + WMI_10_4_SERVICE_WOW, + WMI_10_4_SERVICE_RATECTRL_CACHE, + WMI_10_4_SERVICE_IRAM_TIDS, + WMI_10_4_SERVICE_BURST, + WMI_10_4_SERVICE_SMART_ANTENNA_SW_SUPPORT, + WMI_10_4_SERVICE_GTK_OFFLOAD, + WMI_10_4_SERVICE_SCAN_SCH, + WMI_10_4_SERVICE_CSA_OFFLOAD, + WMI_10_4_SERVICE_CHATTER, + WMI_10_4_SERVICE_COEX_FREQAVOID, + WMI_10_4_SERVICE_PACKET_POWER_SAVE, + WMI_10_4_SERVICE_FORCE_FW_HANG, + WMI_10_4_SERVICE_SMART_ANTENNA_HW_SUPPORT, + WMI_10_4_SERVICE_GPIO, + WMI_10_4_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, + WMI_10_4_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, + WMI_10_4_SERVICE_STA_KEEP_ALIVE, + WMI_10_4_SERVICE_TX_ENCAP, + WMI_10_4_SERVICE_AP_PS_DETECT_OUT_OF_SYNC, + WMI_10_4_SERVICE_EARLY_RX, + WMI_10_4_SERVICE_ENHANCED_PROXY_STA, + WMI_10_4_SERVICE_TT, + WMI_10_4_SERVICE_ATF, + WMI_10_4_SERVICE_PEER_CACHING, + WMI_10_4_SERVICE_COEX_GPIO, + WMI_10_4_SERVICE_AUX_SPECTRAL_INTF, + WMI_10_4_SERVICE_AUX_CHAN_LOAD_INTF, + WMI_10_4_SERVICE_BSS_CHANNEL_INFO_64, +}; + static inline char *wmi_service_name(int service_id) { #define SVCSTR(x) case x: return #x @@ -299,6 +350,12 @@ static inline char *wmi_service_name(int service_id) SVCSTR(WMI_SERVICE_SAP_AUTH_OFFLOAD); SVCSTR(WMI_SERVICE_ATF); SVCSTR(WMI_SERVICE_COEX_GPIO); + SVCSTR(WMI_SERVICE_ENHANCED_PROXY_STA); + SVCSTR(WMI_SERVICE_TT); + SVCSTR(WMI_SERVICE_PEER_CACHING); + SVCSTR(WMI_SERVICE_AUX_SPECTRAL_INTF); + SVCSTR(WMI_SERVICE_AUX_CHAN_LOAD_INTF); + SVCSTR(WMI_SERVICE_BSS_CHANNEL_INFO_64); default: return NULL; } @@ -437,6 +494,95 @@ static inline void wmi_main_svc_map(const __le32 *in, unsigned long *out, WMI_SERVICE_TX_ENCAP, len); } +static inline void wmi_10_4_svc_map(const __le32 *in, unsigned long *out, + size_t len) +{ + SVCMAP(WMI_10_4_SERVICE_BEACON_OFFLOAD, + WMI_SERVICE_BEACON_OFFLOAD, len); + SVCMAP(WMI_10_4_SERVICE_SCAN_OFFLOAD, + WMI_SERVICE_SCAN_OFFLOAD, len); + SVCMAP(WMI_10_4_SERVICE_ROAM_OFFLOAD, + WMI_SERVICE_ROAM_OFFLOAD, len); + SVCMAP(WMI_10_4_SERVICE_BCN_MISS_OFFLOAD, + WMI_SERVICE_BCN_MISS_OFFLOAD, len); + SVCMAP(WMI_10_4_SERVICE_STA_PWRSAVE, + WMI_SERVICE_STA_PWRSAVE, len); + SVCMAP(WMI_10_4_SERVICE_STA_ADVANCED_PWRSAVE, + WMI_SERVICE_STA_ADVANCED_PWRSAVE, len); + SVCMAP(WMI_10_4_SERVICE_AP_UAPSD, + WMI_SERVICE_AP_UAPSD, len); + SVCMAP(WMI_10_4_SERVICE_AP_DFS, + WMI_SERVICE_AP_DFS, len); + SVCMAP(WMI_10_4_SERVICE_11AC, + WMI_SERVICE_11AC, len); + SVCMAP(WMI_10_4_SERVICE_BLOCKACK, + WMI_SERVICE_BLOCKACK, len); + SVCMAP(WMI_10_4_SERVICE_PHYERR, + WMI_SERVICE_PHYERR, len); + SVCMAP(WMI_10_4_SERVICE_BCN_FILTER, + WMI_SERVICE_BCN_FILTER, len); + SVCMAP(WMI_10_4_SERVICE_RTT, + WMI_SERVICE_RTT, len); + SVCMAP(WMI_10_4_SERVICE_RATECTRL, + WMI_SERVICE_RATECTRL, len); + SVCMAP(WMI_10_4_SERVICE_WOW, + WMI_SERVICE_WOW, len); + SVCMAP(WMI_10_4_SERVICE_RATECTRL_CACHE, + WMI_SERVICE_RATECTRL_CACHE, len); + SVCMAP(WMI_10_4_SERVICE_IRAM_TIDS, + WMI_SERVICE_IRAM_TIDS, len); + SVCMAP(WMI_10_4_SERVICE_BURST, + WMI_SERVICE_BURST, len); + SVCMAP(WMI_10_4_SERVICE_SMART_ANTENNA_SW_SUPPORT, + WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT, len); + SVCMAP(WMI_10_4_SERVICE_GTK_OFFLOAD, + WMI_SERVICE_GTK_OFFLOAD, len); + SVCMAP(WMI_10_4_SERVICE_SCAN_SCH, + WMI_SERVICE_SCAN_SCH, len); + SVCMAP(WMI_10_4_SERVICE_CSA_OFFLOAD, + WMI_SERVICE_CSA_OFFLOAD, len); + SVCMAP(WMI_10_4_SERVICE_CHATTER, + WMI_SERVICE_CHATTER, len); + SVCMAP(WMI_10_4_SERVICE_COEX_FREQAVOID, + WMI_SERVICE_COEX_FREQAVOID, len); + SVCMAP(WMI_10_4_SERVICE_PACKET_POWER_SAVE, + WMI_SERVICE_PACKET_POWER_SAVE, len); + SVCMAP(WMI_10_4_SERVICE_FORCE_FW_HANG, + WMI_SERVICE_FORCE_FW_HANG, len); + SVCMAP(WMI_10_4_SERVICE_SMART_ANTENNA_HW_SUPPORT, + WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT, len); + SVCMAP(WMI_10_4_SERVICE_GPIO, + WMI_SERVICE_GPIO, len); + SVCMAP(WMI_10_4_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, + WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, len); + SVCMAP(WMI_10_4_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, + WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, len); + SVCMAP(WMI_10_4_SERVICE_STA_KEEP_ALIVE, + WMI_SERVICE_STA_KEEP_ALIVE, len); + SVCMAP(WMI_10_4_SERVICE_TX_ENCAP, + WMI_SERVICE_TX_ENCAP, len); + SVCMAP(WMI_10_4_SERVICE_AP_PS_DETECT_OUT_OF_SYNC, + WMI_SERVICE_AP_PS_DETECT_OUT_OF_SYNC, len); + SVCMAP(WMI_10_4_SERVICE_EARLY_RX, + WMI_SERVICE_EARLY_RX, len); + SVCMAP(WMI_10_4_SERVICE_ENHANCED_PROXY_STA, + WMI_SERVICE_ENHANCED_PROXY_STA, len); + SVCMAP(WMI_10_4_SERVICE_TT, + WMI_SERVICE_TT, len); + SVCMAP(WMI_10_4_SERVICE_ATF, + WMI_SERVICE_ATF, len); + SVCMAP(WMI_10_4_SERVICE_PEER_CACHING, + WMI_SERVICE_PEER_CACHING, len); + SVCMAP(WMI_10_4_SERVICE_COEX_GPIO, + WMI_SERVICE_COEX_GPIO, len); + SVCMAP(WMI_10_4_SERVICE_AUX_SPECTRAL_INTF, + WMI_SERVICE_AUX_SPECTRAL_INTF, len); + SVCMAP(WMI_10_4_SERVICE_AUX_CHAN_LOAD_INTF, + WMI_SERVICE_AUX_CHAN_LOAD_INTF, len); + SVCMAP(WMI_10_4_SERVICE_BSS_CHANNEL_INFO_64, + WMI_SERVICE_BSS_CHANNEL_INFO_64, len); +} + #undef SVCMAP /* 2 word representation of MAC addr */ @@ -565,6 +711,48 @@ struct wmi_cmd_map { u32 tdls_set_state_cmdid; u32 tdls_peer_update_cmdid; u32 adaptive_qcs_cmdid; + u32 scan_update_request_cmdid; + u32 vdev_standby_response_cmdid; + u32 vdev_resume_response_cmdid; + u32 wlan_peer_caching_add_peer_cmdid; + u32 wlan_peer_caching_evict_peer_cmdid; + u32 wlan_peer_caching_restore_peer_cmdid; + u32 wlan_peer_caching_print_all_peers_info_cmdid; + u32 peer_update_wds_entry_cmdid; + u32 peer_add_proxy_sta_entry_cmdid; + u32 rtt_keepalive_cmdid; + u32 oem_req_cmdid; + u32 nan_cmdid; + u32 vdev_ratemask_cmdid; + u32 qboost_cfg_cmdid; + u32 pdev_smart_ant_enable_cmdid; + u32 pdev_smart_ant_set_rx_antenna_cmdid; + u32 peer_smart_ant_set_tx_antenna_cmdid; + u32 peer_smart_ant_set_train_info_cmdid; + u32 peer_smart_ant_set_node_config_ops_cmdid; + u32 pdev_set_antenna_switch_table_cmdid; + u32 pdev_set_ctl_table_cmdid; + u32 pdev_set_mimogain_table_cmdid; + u32 pdev_ratepwr_table_cmdid; + u32 pdev_ratepwr_chainmsk_table_cmdid; + u32 pdev_fips_cmdid; + u32 tt_set_conf_cmdid; + u32 fwtest_cmdid; + u32 vdev_atf_request_cmdid; + u32 peer_atf_request_cmdid; + u32 pdev_get_ani_cck_config_cmdid; + u32 pdev_get_ani_ofdm_config_cmdid; + u32 pdev_reserve_ast_entry_cmdid; + u32 pdev_get_nfcal_power_cmdid; + u32 pdev_get_tpc_cmdid; + u32 pdev_get_ast_info_cmdid; + u32 vdev_set_dscp_tid_map_cmdid; + u32 pdev_get_info_cmdid; + u32 vdev_get_info_cmdid; + u32 vdev_filter_neighbor_rx_packets_cmdid; + u32 mu_cal_start_cmdid; + u32 set_cca_params_cmdid; + u32 pdev_bss_chan_info_request_cmdid; }; /* @@ -1220,6 +1408,216 @@ enum wmi_10_2_event_id { WMI_10_2_PDEV_UTF_EVENTID = WMI_10_2_END_EVENTID - 1, }; +enum wmi_10_4_cmd_id { + WMI_10_4_START_CMDID = 0x9000, + WMI_10_4_END_CMDID = 0x9FFF, + WMI_10_4_INIT_CMDID, + WMI_10_4_START_SCAN_CMDID = WMI_10_4_START_CMDID, + WMI_10_4_STOP_SCAN_CMDID, + WMI_10_4_SCAN_CHAN_LIST_CMDID, + WMI_10_4_SCAN_SCH_PRIO_TBL_CMDID, + WMI_10_4_SCAN_UPDATE_REQUEST_CMDID, + WMI_10_4_ECHO_CMDID, + WMI_10_4_PDEV_SET_REGDOMAIN_CMDID, + WMI_10_4_PDEV_SET_CHANNEL_CMDID, + WMI_10_4_PDEV_SET_PARAM_CMDID, + WMI_10_4_PDEV_PKTLOG_ENABLE_CMDID, + WMI_10_4_PDEV_PKTLOG_DISABLE_CMDID, + WMI_10_4_PDEV_SET_WMM_PARAMS_CMDID, + WMI_10_4_PDEV_SET_HT_CAP_IE_CMDID, + WMI_10_4_PDEV_SET_VHT_CAP_IE_CMDID, + WMI_10_4_PDEV_SET_BASE_MACADDR_CMDID, + WMI_10_4_PDEV_SET_DSCP_TID_MAP_CMDID, + WMI_10_4_PDEV_SET_QUIET_MODE_CMDID, + WMI_10_4_PDEV_GREEN_AP_PS_ENABLE_CMDID, + WMI_10_4_PDEV_GET_TPC_CONFIG_CMDID, + WMI_10_4_VDEV_CREATE_CMDID, + WMI_10_4_VDEV_DELETE_CMDID, + WMI_10_4_VDEV_START_REQUEST_CMDID, + WMI_10_4_VDEV_RESTART_REQUEST_CMDID, + WMI_10_4_VDEV_UP_CMDID, + WMI_10_4_VDEV_STOP_CMDID, + WMI_10_4_VDEV_DOWN_CMDID, + WMI_10_4_VDEV_STANDBY_RESPONSE_CMDID, + WMI_10_4_VDEV_RESUME_RESPONSE_CMDID, + WMI_10_4_VDEV_SET_PARAM_CMDID, + WMI_10_4_VDEV_INSTALL_KEY_CMDID, + WMI_10_4_WLAN_PEER_CACHING_ADD_PEER_CMDID, + WMI_10_4_WLAN_PEER_CACHING_EVICT_PEER_CMDID, + WMI_10_4_WLAN_PEER_CACHING_RESTORE_PEER_CMDID, + WMI_10_4_WLAN_PEER_CACHING_PRINT_ALL_PEERS_INFO_CMDID, + WMI_10_4_PEER_CREATE_CMDID, + WMI_10_4_PEER_DELETE_CMDID, + WMI_10_4_PEER_FLUSH_TIDS_CMDID, + WMI_10_4_PEER_SET_PARAM_CMDID, + WMI_10_4_PEER_ASSOC_CMDID, + WMI_10_4_PEER_ADD_WDS_ENTRY_CMDID, + WMI_10_4_PEER_UPDATE_WDS_ENTRY_CMDID, + WMI_10_4_PEER_REMOVE_WDS_ENTRY_CMDID, + WMI_10_4_PEER_ADD_PROXY_STA_ENTRY_CMDID, + WMI_10_4_PEER_MCAST_GROUP_CMDID, + WMI_10_4_BCN_TX_CMDID, + WMI_10_4_PDEV_SEND_BCN_CMDID, + WMI_10_4_BCN_PRB_TMPL_CMDID, + WMI_10_4_BCN_FILTER_RX_CMDID, + WMI_10_4_PRB_REQ_FILTER_RX_CMDID, + WMI_10_4_MGMT_TX_CMDID, + WMI_10_4_PRB_TMPL_CMDID, + WMI_10_4_ADDBA_CLEAR_RESP_CMDID, + WMI_10_4_ADDBA_SEND_CMDID, + WMI_10_4_ADDBA_STATUS_CMDID, + WMI_10_4_DELBA_SEND_CMDID, + WMI_10_4_ADDBA_SET_RESP_CMDID, + WMI_10_4_SEND_SINGLEAMSDU_CMDID, + WMI_10_4_STA_POWERSAVE_MODE_CMDID, + WMI_10_4_STA_POWERSAVE_PARAM_CMDID, + WMI_10_4_STA_MIMO_PS_MODE_CMDID, + WMI_10_4_DBGLOG_CFG_CMDID, + WMI_10_4_PDEV_DFS_ENABLE_CMDID, + WMI_10_4_PDEV_DFS_DISABLE_CMDID, + WMI_10_4_PDEV_QVIT_CMDID, + WMI_10_4_ROAM_SCAN_MODE, + WMI_10_4_ROAM_SCAN_RSSI_THRESHOLD, + WMI_10_4_ROAM_SCAN_PERIOD, + WMI_10_4_ROAM_SCAN_RSSI_CHANGE_THRESHOLD, + WMI_10_4_ROAM_AP_PROFILE, + WMI_10_4_OFL_SCAN_ADD_AP_PROFILE, + WMI_10_4_OFL_SCAN_REMOVE_AP_PROFILE, + WMI_10_4_OFL_SCAN_PERIOD, + WMI_10_4_P2P_DEV_SET_DEVICE_INFO, + WMI_10_4_P2P_DEV_SET_DISCOVERABILITY, + WMI_10_4_P2P_GO_SET_BEACON_IE, + WMI_10_4_P2P_GO_SET_PROBE_RESP_IE, + WMI_10_4_P2P_SET_VENDOR_IE_DATA_CMDID, + WMI_10_4_AP_PS_PEER_PARAM_CMDID, + WMI_10_4_AP_PS_PEER_UAPSD_COEX_CMDID, + WMI_10_4_PEER_RATE_RETRY_SCHED_CMDID, + WMI_10_4_WLAN_PROFILE_TRIGGER_CMDID, + WMI_10_4_WLAN_PROFILE_SET_HIST_INTVL_CMDID, + WMI_10_4_WLAN_PROFILE_GET_PROFILE_DATA_CMDID, + WMI_10_4_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID, + WMI_10_4_WLAN_PROFILE_LIST_PROFILE_ID_CMDID, + WMI_10_4_PDEV_SUSPEND_CMDID, + WMI_10_4_PDEV_RESUME_CMDID, + WMI_10_4_ADD_BCN_FILTER_CMDID, + WMI_10_4_RMV_BCN_FILTER_CMDID, + WMI_10_4_WOW_ADD_WAKE_PATTERN_CMDID, + WMI_10_4_WOW_DEL_WAKE_PATTERN_CMDID, + WMI_10_4_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID, + WMI_10_4_WOW_ENABLE_CMDID, + WMI_10_4_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID, + WMI_10_4_RTT_MEASREQ_CMDID, + WMI_10_4_RTT_TSF_CMDID, + WMI_10_4_RTT_KEEPALIVE_CMDID, + WMI_10_4_OEM_REQ_CMDID, + WMI_10_4_NAN_CMDID, + WMI_10_4_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID, + WMI_10_4_VDEV_SPECTRAL_SCAN_ENABLE_CMDID, + WMI_10_4_REQUEST_STATS_CMDID, + WMI_10_4_GPIO_CONFIG_CMDID, + WMI_10_4_GPIO_OUTPUT_CMDID, + WMI_10_4_VDEV_RATEMASK_CMDID, + WMI_10_4_CSA_OFFLOAD_ENABLE_CMDID, + WMI_10_4_GTK_OFFLOAD_CMDID, + WMI_10_4_QBOOST_CFG_CMDID, + WMI_10_4_CSA_OFFLOAD_CHANSWITCH_CMDID, + WMI_10_4_PDEV_SMART_ANT_ENABLE_CMDID, + WMI_10_4_PDEV_SMART_ANT_SET_RX_ANTENNA_CMDID, + WMI_10_4_PEER_SMART_ANT_SET_TX_ANTENNA_CMDID, + WMI_10_4_PEER_SMART_ANT_SET_TRAIN_INFO_CMDID, + WMI_10_4_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMDID, + WMI_10_4_VDEV_SET_KEEPALIVE_CMDID, + WMI_10_4_VDEV_GET_KEEPALIVE_CMDID, + WMI_10_4_FORCE_FW_HANG_CMDID, + WMI_10_4_PDEV_SET_ANTENNA_SWITCH_TABLE_CMDID, + WMI_10_4_PDEV_SET_CTL_TABLE_CMDID, + WMI_10_4_PDEV_SET_MIMOGAIN_TABLE_CMDID, + WMI_10_4_PDEV_RATEPWR_TABLE_CMDID, + WMI_10_4_PDEV_RATEPWR_CHAINMSK_TABLE_CMDID, + WMI_10_4_PDEV_FIPS_CMDID, + WMI_10_4_TT_SET_CONF_CMDID, + WMI_10_4_FWTEST_CMDID, + WMI_10_4_VDEV_ATF_REQUEST_CMDID, + WMI_10_4_PEER_ATF_REQUEST_CMDID, + WMI_10_4_PDEV_GET_ANI_CCK_CONFIG_CMDID, + WMI_10_4_PDEV_GET_ANI_OFDM_CONFIG_CMDID, + WMI_10_4_PDEV_RESERVE_AST_ENTRY_CMDID, + WMI_10_4_PDEV_GET_NFCAL_POWER_CMDID, + WMI_10_4_PDEV_GET_TPC_CMDID, + WMI_10_4_PDEV_GET_AST_INFO_CMDID, + WMI_10_4_VDEV_SET_DSCP_TID_MAP_CMDID, + WMI_10_4_PDEV_GET_TEMPERATURE_CMDID, + WMI_10_4_PDEV_GET_INFO_CMDID, + WMI_10_4_VDEV_GET_INFO_CMDID, + WMI_10_4_VDEV_FILTER_NEIGHBOR_RX_PACKETS_CMDID, + WMI_10_4_MU_CAL_START_CMDID, + WMI_10_4_SET_CCA_PARAMS_CMDID, + WMI_10_4_PDEV_BSS_CHAN_INFO_REQUEST_CMDID, + WMI_10_4_PDEV_UTF_CMDID = WMI_10_4_END_CMDID - 1, +}; + +enum wmi_10_4_event_id { + WMI_10_4_SERVICE_READY_EVENTID = 0x8000, + WMI_10_4_READY_EVENTID, + WMI_10_4_DEBUG_MESG_EVENTID, + WMI_10_4_START_EVENTID = 0x9000, + WMI_10_4_END_EVENTID = 0x9FFF, + WMI_10_4_SCAN_EVENTID = WMI_10_4_START_EVENTID, + WMI_10_4_ECHO_EVENTID, + WMI_10_4_UPDATE_STATS_EVENTID, + WMI_10_4_INST_RSSI_STATS_EVENTID, + WMI_10_4_VDEV_START_RESP_EVENTID, + WMI_10_4_VDEV_STANDBY_REQ_EVENTID, + WMI_10_4_VDEV_RESUME_REQ_EVENTID, + WMI_10_4_VDEV_STOPPED_EVENTID, + WMI_10_4_PEER_STA_KICKOUT_EVENTID, + WMI_10_4_HOST_SWBA_EVENTID, + WMI_10_4_TBTTOFFSET_UPDATE_EVENTID, + WMI_10_4_MGMT_RX_EVENTID, + WMI_10_4_CHAN_INFO_EVENTID, + WMI_10_4_PHYERR_EVENTID, + WMI_10_4_ROAM_EVENTID, + WMI_10_4_PROFILE_MATCH, + WMI_10_4_DEBUG_PRINT_EVENTID, + WMI_10_4_PDEV_QVIT_EVENTID, + WMI_10_4_WLAN_PROFILE_DATA_EVENTID, + WMI_10_4_RTT_MEASUREMENT_REPORT_EVENTID, + WMI_10_4_TSF_MEASUREMENT_REPORT_EVENTID, + WMI_10_4_RTT_ERROR_REPORT_EVENTID, + WMI_10_4_RTT_KEEPALIVE_EVENTID, + WMI_10_4_OEM_CAPABILITY_EVENTID, + WMI_10_4_OEM_MEASUREMENT_REPORT_EVENTID, + WMI_10_4_OEM_ERROR_REPORT_EVENTID, + WMI_10_4_NAN_EVENTID, + WMI_10_4_WOW_WAKEUP_HOST_EVENTID, + WMI_10_4_GTK_OFFLOAD_STATUS_EVENTID, + WMI_10_4_GTK_REKEY_FAIL_EVENTID, + WMI_10_4_DCS_INTERFERENCE_EVENTID, + WMI_10_4_PDEV_TPC_CONFIG_EVENTID, + WMI_10_4_CSA_HANDLING_EVENTID, + WMI_10_4_GPIO_INPUT_EVENTID, + WMI_10_4_PEER_RATECODE_LIST_EVENTID, + WMI_10_4_GENERIC_BUFFER_EVENTID, + WMI_10_4_MCAST_BUF_RELEASE_EVENTID, + WMI_10_4_MCAST_LIST_AGEOUT_EVENTID, + WMI_10_4_VDEV_GET_KEEPALIVE_EVENTID, + WMI_10_4_WDS_PEER_EVENTID, + WMI_10_4_PEER_STA_PS_STATECHG_EVENTID, + WMI_10_4_PDEV_FIPS_EVENTID, + WMI_10_4_TT_STATS_EVENTID, + WMI_10_4_PDEV_CHANNEL_HOPPING_EVENTID, + WMI_10_4_PDEV_ANI_CCK_LEVEL_EVENTID, + WMI_10_4_PDEV_ANI_OFDM_LEVEL_EVENTID, + WMI_10_4_PDEV_RESERVE_AST_ENTRY_EVENTID, + WMI_10_4_PDEV_NFCAL_POWER_EVENTID, + WMI_10_4_PDEV_TPC_EVENTID, + WMI_10_4_PDEV_GET_AST_INFO_EVENTID, + WMI_10_4_PDEV_TEMPERATURE_EVENTID, + WMI_10_4_PDEV_NFCAL_POWER_ALL_CHANNELS_EVENTID, + WMI_10_4_PDEV_BSS_CHAN_INFO_EVENTID, + WMI_10_4_PDEV_UTF_EVENTID = WMI_10_4_END_EVENTID - 1, +}; + enum wmi_phy_mode { MODE_11A = 0, /* 11a Mode */ MODE_11G = 1, /* 11b/g Mode */ @@ -1349,7 +1747,8 @@ enum wmi_channel_change_cause { /* Indicate reason for channel switch */ #define WMI_CHANNEL_CHANGE_CAUSE_CSA (1 << 13) -#define WMI_MAX_SPATIAL_STREAM 3 +#define WMI_MAX_SPATIAL_STREAM 3 /* default max ss */ +#define WMI_10_4_MAX_SPATIAL_STREAM 4 /* HT Capabilities*/ #define WMI_HT_CAP_ENABLED 0x0001 /* HT Enabled/ disabled */ @@ -1979,8 +2378,224 @@ struct wmi_resource_config_10_2 { __le32 feature_mask; } __packed; -#define NUM_UNITS_IS_NUM_VDEVS 0x1 -#define NUM_UNITS_IS_NUM_PEERS 0x2 +#define NUM_UNITS_IS_NUM_VDEVS BIT(0) +#define NUM_UNITS_IS_NUM_PEERS BIT(1) +#define NUM_UNITS_IS_NUM_ACTIVE_PEERS BIT(2) + +struct wmi_resource_config_10_4 { + /* Number of virtual devices (VAPs) to support */ + __le32 num_vdevs; + + /* Number of peer nodes to support */ + __le32 num_peers; + + /* Number of active peer nodes to support */ + __le32 num_active_peers; + + /* In offload mode, target supports features like WOW, chatter and other + * protocol offloads. In order to support them some functionalities like + * reorder buffering, PN checking need to be done in target. + * This determines maximum number of peers supported by target in + * offload mode. + */ + __le32 num_offload_peers; + + /* Number of reorder buffers available for doing target based reorder + * Rx reorder buffering + */ + __le32 num_offload_reorder_buffs; + + /* Number of keys per peer */ + __le32 num_peer_keys; + + /* Total number of TX/RX data TIDs */ + __le32 num_tids; + + /* Max skid for resolving hash collisions. + * The address search table is sparse, so that if two MAC addresses + * result in the same hash value, the second of these conflicting + * entries can slide to the next index in the address search table, + * and use it, if it is unoccupied. This ast_skid_limit parameter + * specifies the upper bound on how many subsequent indices to search + * over to find an unoccupied space. + */ + __le32 ast_skid_limit; + + /* The nominal chain mask for transmit. + * The chain mask may be modified dynamically, e.g. to operate AP tx + * with a reduced number of chains if no clients are associated. + * This configuration parameter specifies the nominal chain-mask that + * should be used when not operating with a reduced set of tx chains. + */ + __le32 tx_chain_mask; + + /* The nominal chain mask for receive. + * The chain mask may be modified dynamically, e.g. for a client to use + * a reduced number of chains for receive if the traffic to the client + * is low enough that it doesn't require downlink MIMO or antenna + * diversity. This configuration parameter specifies the nominal + * chain-mask that should be used when not operating with a reduced + * set of rx chains. + */ + __le32 rx_chain_mask; + + /* What rx reorder timeout (ms) to use for the AC. + * Each WMM access class (voice, video, best-effort, background) will + * have its own timeout value to dictate how long to wait for missing + * rx MPDUs to arrive before flushing subsequent MPDUs that have already + * been received. This parameter specifies the timeout in milliseconds + * for each class. + */ + __le32 rx_timeout_pri[4]; + + /* What mode the rx should decap packets to. + * MAC can decap to RAW (no decap), native wifi or Ethernet types. + * This setting also determines the default TX behavior, however TX + * behavior can be modified on a per VAP basis during VAP init + */ + __le32 rx_decap_mode; + + __le32 scan_max_pending_req; + + __le32 bmiss_offload_max_vdev; + + __le32 roam_offload_max_vdev; + + __le32 roam_offload_max_ap_profiles; + + /* How many groups to use for mcast->ucast conversion. + * The target's WAL maintains a table to hold information regarding + * which peers belong to a given multicast group, so that if + * multicast->unicast conversion is enabled, the target can convert + * multicast tx frames to a series of unicast tx frames, to each peer + * within the multicast group. This num_mcast_groups configuration + * parameter tells the target how many multicast groups to provide + * storage for within its multicast group membership table. + */ + __le32 num_mcast_groups; + + /* Size to alloc for the mcast membership table. + * This num_mcast_table_elems configuration parameter tells the target + * how many peer elements it needs to provide storage for in its + * multicast group membership table. These multicast group membership + * table elements are shared by the multicast groups stored within + * the table. + */ + __le32 num_mcast_table_elems; + + /* Whether/how to do multicast->unicast conversion. + * This configuration parameter specifies whether the target should + * perform multicast --> unicast conversion on transmit, and if so, + * what to do if it finds no entries in its multicast group membership + * table for the multicast IP address in the tx frame. + * Configuration value: + * 0 -> Do not perform multicast to unicast conversion. + * 1 -> Convert multicast frames to unicast, if the IP multicast address + * from the tx frame is found in the multicast group membership + * table. If the IP multicast address is not found, drop the frame + * 2 -> Convert multicast frames to unicast, if the IP multicast address + * from the tx frame is found in the multicast group membership + * table. If the IP multicast address is not found, transmit the + * frame as multicast. + */ + __le32 mcast2ucast_mode; + + /* How much memory to allocate for a tx PPDU dbg log. + * This parameter controls how much memory the target will allocate to + * store a log of tx PPDU meta-information (how large the PPDU was, + * when it was sent, whether it was successful, etc.) + */ + __le32 tx_dbg_log_size; + + /* How many AST entries to be allocated for WDS */ + __le32 num_wds_entries; + + /* MAC DMA burst size. 0 -default, 1 -256B */ + __le32 dma_burst_size; + + /* Fixed delimiters to be inserted after every MPDU to account for + * interface latency to avoid underrun. + */ + __le32 mac_aggr_delim; + + /* Determine whether target is responsible for detecting duplicate + * non-aggregate MPDU and timing out stale fragments. A-MPDU reordering + * is always performed on the target. + * + * 0: target responsible for frag timeout and dup checking + * 1: host responsible for frag timeout and dup checking + */ + __le32 rx_skip_defrag_timeout_dup_detection_check; + + /* Configuration for VoW : No of Video nodes to be supported and max + * no of descriptors for each video link (node). + */ + __le32 vow_config; + + /* Maximum vdev that could use gtk offload */ + __le32 gtk_offload_max_vdev; + + /* Number of msdu descriptors target should use */ + __le32 num_msdu_desc; + + /* Max number of tx fragments per MSDU. + * This parameter controls the max number of tx fragments per MSDU. + * This will passed by target as part of the WMI_SERVICE_READY event + * and is overridden by the OS shim as required. + */ + __le32 max_frag_entries; + + /* Max number of extended peer stats. + * This parameter controls the max number of peers for which extended + * statistics are supported by target + */ + __le32 max_peer_ext_stats; + + /* Smart antenna capabilities information. + * 1 - Smart antenna is enabled + * 0 - Smart antenna is disabled + * In future this can contain smart antenna specific capabilities. + */ + __le32 smart_ant_cap; + + /* User can configure the buffers allocated for each AC (BE, BK, VI, VO) + * during init. + */ + __le32 bk_minfree; + __le32 be_minfree; + __le32 vi_minfree; + __le32 vo_minfree; + + /* Rx batch mode capability. + * 1 - Rx batch mode enabled + * 0 - Rx batch mode disabled + */ + __le32 rx_batchmode; + + /* Thermal throttling capability. + * 1 - Capable of thermal throttling + * 0 - Not capable of thermal throttling + */ + __le32 tt_support; + + /* ATF configuration. + * 1 - Enable ATF + * 0 - Disable ATF + */ + __le32 atf_config; + + /* Configure padding to manage IP header un-alignment + * 1 - Enable padding + * 0 - Disable padding + */ + __le32 iphdr_pad_config; + + /* qwrap configuration + * 1 - This is qwrap configuration + * 0 - This is not qwrap + */ + __le32 qwrap_config; +} __packed; /* strucutre describing host memory chunk. */ struct host_memory_chunk { @@ -2014,6 +2629,11 @@ struct wmi_init_cmd_10_2 { struct wmi_host_mem_chunks mem_chunks; } __packed; +struct wmi_init_cmd_10_4 { + struct wmi_resource_config_10_4 resource_config; + struct wmi_host_mem_chunks mem_chunks; +} __packed; + struct wmi_chan_list_entry { __le16 freq; u8 phy_mode; /* valid for 10.2 only */ @@ -2260,15 +2880,17 @@ enum wmi_bss_filter { }; enum wmi_scan_event_type { - WMI_SCAN_EVENT_STARTED = 0x1, - WMI_SCAN_EVENT_COMPLETED = 0x2, - WMI_SCAN_EVENT_BSS_CHANNEL = 0x4, - WMI_SCAN_EVENT_FOREIGN_CHANNEL = 0x8, - WMI_SCAN_EVENT_DEQUEUED = 0x10, - WMI_SCAN_EVENT_PREEMPTED = 0x20, /* possibly by high-prio scan */ - WMI_SCAN_EVENT_START_FAILED = 0x40, - WMI_SCAN_EVENT_RESTARTED = 0x80, - WMI_SCAN_EVENT_MAX = 0x8000 + WMI_SCAN_EVENT_STARTED = BIT(0), + WMI_SCAN_EVENT_COMPLETED = BIT(1), + WMI_SCAN_EVENT_BSS_CHANNEL = BIT(2), + WMI_SCAN_EVENT_FOREIGN_CHANNEL = BIT(3), + WMI_SCAN_EVENT_DEQUEUED = BIT(4), + /* possibly by high-prio scan */ + WMI_SCAN_EVENT_PREEMPTED = BIT(5), + WMI_SCAN_EVENT_START_FAILED = BIT(6), + WMI_SCAN_EVENT_RESTARTED = BIT(7), + WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT = BIT(8), + WMI_SCAN_EVENT_MAX = BIT(15), }; enum wmi_scan_completion_reason { @@ -2276,6 +2898,7 @@ enum wmi_scan_completion_reason { WMI_SCAN_REASON_CANCELLED, WMI_SCAN_REASON_PREEMPTED, WMI_SCAN_REASON_TIMEDOUT, + WMI_SCAN_REASON_INTERNAL_FAILURE, WMI_SCAN_REASON_MAX, }; @@ -2329,6 +2952,21 @@ struct wmi_mgmt_rx_event_v2 { u8 buf[0]; } __packed; +struct wmi_10_4_mgmt_rx_hdr { + __le32 channel; + __le32 snr; + u8 rssi_ctl[4]; + __le32 rate; + __le32 phy_mode; + __le32 buf_len; + __le32 status; +} __packed; + +struct wmi_10_4_mgmt_rx_event { + struct wmi_10_4_mgmt_rx_hdr hdr; + u8 buf[0]; +} __packed; + #define WMI_RX_STATUS_OK 0x00 #define WMI_RX_STATUS_ERR_CRC 0x01 #define WMI_RX_STATUS_ERR_DECRYPT 0x08 @@ -2613,6 +3251,48 @@ struct wmi_pdev_param_map { u32 burst_dur; u32 burst_enable; u32 cal_period; + u32 aggr_burst; + u32 rx_decap_mode; + u32 smart_antenna_default_antenna; + u32 igmpmld_override; + u32 igmpmld_tid; + u32 antenna_gain; + u32 rx_filter; + u32 set_mcast_to_ucast_tid; + u32 proxy_sta_mode; + u32 set_mcast2ucast_mode; + u32 set_mcast2ucast_buffer; + u32 remove_mcast2ucast_buffer; + u32 peer_sta_ps_statechg_enable; + u32 igmpmld_ac_override; + u32 block_interbss; + u32 set_disable_reset_cmdid; + u32 set_msdu_ttl_cmdid; + u32 set_ppdu_duration_cmdid; + u32 txbf_sound_period_cmdid; + u32 set_promisc_mode_cmdid; + u32 set_burst_mode_cmdid; + u32 en_stats; + u32 mu_group_policy; + u32 noise_detection; + u32 noise_threshold; + u32 dpd_enable; + u32 set_mcast_bcast_echo; + u32 atf_strict_sch; + u32 atf_sched_duration; + u32 ant_plzn; + u32 mgmt_retry_limit; + u32 sensitivity_level; + u32 signed_txpower_2g; + u32 signed_txpower_5g; + u32 enable_per_tid_amsdu; + u32 enable_per_tid_ampdu; + u32 cca_threshold; + u32 rts_fixed_rate; + u32 pdev_reset; + u32 wapi_mbssid_offset; + u32 arp_srcaddr; + u32 arp_dstaddr; }; #define WMI_PDEV_PARAM_UNSUPPORTED 0 @@ -2828,6 +3508,100 @@ enum wmi_10x_pdev_param { WMI_10X_PDEV_PARAM_CAL_PERIOD }; +enum wmi_10_4_pdev_param { + WMI_10_4_PDEV_PARAM_TX_CHAIN_MASK = 0x1, + WMI_10_4_PDEV_PARAM_RX_CHAIN_MASK, + WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT2G, + WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT5G, + WMI_10_4_PDEV_PARAM_TXPOWER_SCALE, + WMI_10_4_PDEV_PARAM_BEACON_GEN_MODE, + WMI_10_4_PDEV_PARAM_BEACON_TX_MODE, + WMI_10_4_PDEV_PARAM_RESMGR_OFFCHAN_MODE, + WMI_10_4_PDEV_PARAM_PROTECTION_MODE, + WMI_10_4_PDEV_PARAM_DYNAMIC_BW, + WMI_10_4_PDEV_PARAM_NON_AGG_SW_RETRY_TH, + WMI_10_4_PDEV_PARAM_AGG_SW_RETRY_TH, + WMI_10_4_PDEV_PARAM_STA_KICKOUT_TH, + WMI_10_4_PDEV_PARAM_AC_AGGRSIZE_SCALING, + WMI_10_4_PDEV_PARAM_LTR_ENABLE, + WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BE, + WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BK, + WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VI, + WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VO, + WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT, + WMI_10_4_PDEV_PARAM_LTR_SLEEP_OVERRIDE, + WMI_10_4_PDEV_PARAM_LTR_RX_OVERRIDE, + WMI_10_4_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT, + WMI_10_4_PDEV_PARAM_L1SS_ENABLE, + WMI_10_4_PDEV_PARAM_DSLEEP_ENABLE, + WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_FLUSH, + WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_WATERMARK, + WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_EN, + WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE, + WMI_10_4_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD, + WMI_10_4_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD, + WMI_10_4_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD, + WMI_10_4_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD, + WMI_10_4_PDEV_PARAM_PMF_QOS, + WMI_10_4_PDEV_PARAM_ARP_AC_OVERRIDE, + WMI_10_4_PDEV_PARAM_DCS, + WMI_10_4_PDEV_PARAM_ANI_ENABLE, + WMI_10_4_PDEV_PARAM_ANI_POLL_PERIOD, + WMI_10_4_PDEV_PARAM_ANI_LISTEN_PERIOD, + WMI_10_4_PDEV_PARAM_ANI_OFDM_LEVEL, + WMI_10_4_PDEV_PARAM_ANI_CCK_LEVEL, + WMI_10_4_PDEV_PARAM_DYNTXCHAIN, + WMI_10_4_PDEV_PARAM_PROXY_STA, + WMI_10_4_PDEV_PARAM_IDLE_PS_CONFIG, + WMI_10_4_PDEV_PARAM_POWER_GATING_SLEEP, + WMI_10_4_PDEV_PARAM_AGGR_BURST, + WMI_10_4_PDEV_PARAM_RX_DECAP_MODE, + WMI_10_4_PDEV_PARAM_FAST_CHANNEL_RESET, + WMI_10_4_PDEV_PARAM_BURST_DUR, + WMI_10_4_PDEV_PARAM_BURST_ENABLE, + WMI_10_4_PDEV_PARAM_SMART_ANTENNA_DEFAULT_ANTENNA, + WMI_10_4_PDEV_PARAM_IGMPMLD_OVERRIDE, + WMI_10_4_PDEV_PARAM_IGMPMLD_TID, + WMI_10_4_PDEV_PARAM_ANTENNA_GAIN, + WMI_10_4_PDEV_PARAM_RX_FILTER, + WMI_10_4_PDEV_SET_MCAST_TO_UCAST_TID, + WMI_10_4_PDEV_PARAM_PROXY_STA_MODE, + WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_MODE, + WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_BUFFER, + WMI_10_4_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER, + WMI_10_4_PDEV_PEER_STA_PS_STATECHG_ENABLE, + WMI_10_4_PDEV_PARAM_IGMPMLD_AC_OVERRIDE, + WMI_10_4_PDEV_PARAM_BLOCK_INTERBSS, + WMI_10_4_PDEV_PARAM_SET_DISABLE_RESET_CMDID, + WMI_10_4_PDEV_PARAM_SET_MSDU_TTL_CMDID, + WMI_10_4_PDEV_PARAM_SET_PPDU_DURATION_CMDID, + WMI_10_4_PDEV_PARAM_TXBF_SOUND_PERIOD_CMDID, + WMI_10_4_PDEV_PARAM_SET_PROMISC_MODE_CMDID, + WMI_10_4_PDEV_PARAM_SET_BURST_MODE_CMDID, + WMI_10_4_PDEV_PARAM_EN_STATS, + WMI_10_4_PDEV_PARAM_MU_GROUP_POLICY, + WMI_10_4_PDEV_PARAM_NOISE_DETECTION, + WMI_10_4_PDEV_PARAM_NOISE_THRESHOLD, + WMI_10_4_PDEV_PARAM_DPD_ENABLE, + WMI_10_4_PDEV_PARAM_SET_MCAST_BCAST_ECHO, + WMI_10_4_PDEV_PARAM_ATF_STRICT_SCH, + WMI_10_4_PDEV_PARAM_ATF_SCHED_DURATION, + WMI_10_4_PDEV_PARAM_ANT_PLZN, + WMI_10_4_PDEV_PARAM_MGMT_RETRY_LIMIT, + WMI_10_4_PDEV_PARAM_SENSITIVITY_LEVEL, + WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_2G, + WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_5G, + WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMSDU, + WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMPDU, + WMI_10_4_PDEV_PARAM_CCA_THRESHOLD, + WMI_10_4_PDEV_PARAM_RTS_FIXED_RATE, + WMI_10_4_PDEV_PARAM_CAL_PERIOD, + WMI_10_4_PDEV_PARAM_PDEV_RESET, + WMI_10_4_PDEV_PARAM_WAPI_MBSSID_OFFSET, + WMI_10_4_PDEV_PARAM_ARP_SRCADDR, + WMI_10_4_PDEV_PARAM_ARP_DSTADDR, +}; + struct wmi_pdev_set_param_cmd { __le32 param_id; __le32 param_value; @@ -3506,6 +4280,22 @@ struct wmi_vdev_param_map { u32 drop_unencry; u32 tx_encap_type; u32 ap_detect_out_of_sync_sleeping_sta_time_secs; + u32 rc_num_retries; + u32 cabq_maxdur; + u32 mfptest_set; + u32 rts_fixed_rate; + u32 vht_sgimask; + u32 vht80_ratemask; + u32 early_rx_adjust_enable; + u32 early_rx_tgt_bmiss_num; + u32 early_rx_bmiss_sample_cycle; + u32 early_rx_slop_step; + u32 early_rx_init_slop; + u32 early_rx_adjust_pause; + u32 proxy_sta; + u32 meru_vc; + u32 rx_decap_type; + u32 bw_nss_ratemask; }; #define WMI_VDEV_PARAM_UNSUPPORTED 0 @@ -3764,6 +4554,75 @@ enum wmi_10x_vdev_param { WMI_10X_VDEV_PARAM_VHT80_RATEMASK, }; +enum wmi_10_4_vdev_param { + WMI_10_4_VDEV_PARAM_RTS_THRESHOLD = 0x1, + WMI_10_4_VDEV_PARAM_FRAGMENTATION_THRESHOLD, + WMI_10_4_VDEV_PARAM_BEACON_INTERVAL, + WMI_10_4_VDEV_PARAM_LISTEN_INTERVAL, + WMI_10_4_VDEV_PARAM_MULTICAST_RATE, + WMI_10_4_VDEV_PARAM_MGMT_TX_RATE, + WMI_10_4_VDEV_PARAM_SLOT_TIME, + WMI_10_4_VDEV_PARAM_PREAMBLE, + WMI_10_4_VDEV_PARAM_SWBA_TIME, + WMI_10_4_VDEV_STATS_UPDATE_PERIOD, + WMI_10_4_VDEV_PWRSAVE_AGEOUT_TIME, + WMI_10_4_VDEV_HOST_SWBA_INTERVAL, + WMI_10_4_VDEV_PARAM_DTIM_PERIOD, + WMI_10_4_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT, + WMI_10_4_VDEV_PARAM_WDS, + WMI_10_4_VDEV_PARAM_ATIM_WINDOW, + WMI_10_4_VDEV_PARAM_BMISS_COUNT_MAX, + WMI_10_4_VDEV_PARAM_BMISS_FIRST_BCNT, + WMI_10_4_VDEV_PARAM_BMISS_FINAL_BCNT, + WMI_10_4_VDEV_PARAM_FEATURE_WMM, + WMI_10_4_VDEV_PARAM_CHWIDTH, + WMI_10_4_VDEV_PARAM_CHEXTOFFSET, + WMI_10_4_VDEV_PARAM_DISABLE_HTPROTECTION, + WMI_10_4_VDEV_PARAM_STA_QUICKKICKOUT, + WMI_10_4_VDEV_PARAM_MGMT_RATE, + WMI_10_4_VDEV_PARAM_PROTECTION_MODE, + WMI_10_4_VDEV_PARAM_FIXED_RATE, + WMI_10_4_VDEV_PARAM_SGI, + WMI_10_4_VDEV_PARAM_LDPC, + WMI_10_4_VDEV_PARAM_TX_STBC, + WMI_10_4_VDEV_PARAM_RX_STBC, + WMI_10_4_VDEV_PARAM_INTRA_BSS_FWD, + WMI_10_4_VDEV_PARAM_DEF_KEYID, + WMI_10_4_VDEV_PARAM_NSS, + WMI_10_4_VDEV_PARAM_BCAST_DATA_RATE, + WMI_10_4_VDEV_PARAM_MCAST_DATA_RATE, + WMI_10_4_VDEV_PARAM_MCAST_INDICATE, + WMI_10_4_VDEV_PARAM_DHCP_INDICATE, + WMI_10_4_VDEV_PARAM_UNKNOWN_DEST_INDICATE, + WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS, + WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS, + WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS, + WMI_10_4_VDEV_PARAM_AP_ENABLE_NAWDS, + WMI_10_4_VDEV_PARAM_MCAST2UCAST_SET, + WMI_10_4_VDEV_PARAM_ENABLE_RTSCTS, + WMI_10_4_VDEV_PARAM_RC_NUM_RETRIES, + WMI_10_4_VDEV_PARAM_TXBF, + WMI_10_4_VDEV_PARAM_PACKET_POWERSAVE, + WMI_10_4_VDEV_PARAM_DROP_UNENCRY, + WMI_10_4_VDEV_PARAM_TX_ENCAP_TYPE, + WMI_10_4_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS, + WMI_10_4_VDEV_PARAM_CABQ_MAXDUR, + WMI_10_4_VDEV_PARAM_MFPTEST_SET, + WMI_10_4_VDEV_PARAM_RTS_FIXED_RATE, + WMI_10_4_VDEV_PARAM_VHT_SGIMASK, + WMI_10_4_VDEV_PARAM_VHT80_RATEMASK, + WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_ENABLE, + WMI_10_4_VDEV_PARAM_EARLY_RX_TGT_BMISS_NUM, + WMI_10_4_VDEV_PARAM_EARLY_RX_BMISS_SAMPLE_CYCLE, + WMI_10_4_VDEV_PARAM_EARLY_RX_SLOP_STEP, + WMI_10_4_VDEV_PARAM_EARLY_RX_INIT_SLOP, + WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_PAUSE, + WMI_10_4_VDEV_PARAM_PROXY_STA, + WMI_10_4_VDEV_PARAM_MERU_VC, + WMI_10_4_VDEV_PARAM_RX_DECAP_TYPE, + WMI_10_4_VDEV_PARAM_BW_NSS_RATEMASK, +}; + #define WMI_VDEV_PARAM_TXBF_SU_TX_BFEE BIT(0) #define WMI_VDEV_PARAM_TXBF_MU_TX_BFEE BIT(1) #define WMI_VDEV_PARAM_TXBF_SU_TX_BFER BIT(2) @@ -4305,6 +5164,14 @@ struct wmi_tim_info { __le32 tim_num_ps_pending; } __packed; +struct wmi_tim_info_arg { + __le32 tim_len; + __le32 tim_mcast; + const __le32 *tim_bitmap; + __le32 tim_changed; + __le32 tim_num_ps_pending; +} __packed; + /* Maximum number of NOA Descriptors supported */ #define WMI_P2P_MAX_NOA_DESCRIPTORS 4 #define WMI_P2P_OPPPS_ENABLE_BIT BIT(0) @@ -4336,6 +5203,47 @@ struct wmi_host_swba_event { struct wmi_bcn_info bcn_info[0]; } __packed; +/* 16 words = 512 client + 1 word = for guard */ +#define WMI_10_4_TIM_BITMAP_ARRAY_SIZE 17 + +struct wmi_10_4_tim_info { + __le32 tim_len; + __le32 tim_mcast; + __le32 tim_bitmap[WMI_10_4_TIM_BITMAP_ARRAY_SIZE]; + __le32 tim_changed; + __le32 tim_num_ps_pending; +} __packed; + +#define WMI_10_4_P2P_MAX_NOA_DESCRIPTORS 1 + +struct wmi_10_4_p2p_noa_info { + /* Bit 0 - Flag to indicate an update in NOA schedule + * Bits 7-1 - Reserved + */ + u8 changed; + /* NOA index */ + u8 index; + /* Bit 0 - Opp PS state of the AP + * Bits 1-7 - Ctwindow in TUs + */ + u8 ctwindow_oppps; + /* Number of NOA descriptors */ + u8 num_descriptors; + + struct wmi_p2p_noa_descriptor + noa_descriptors[WMI_10_4_P2P_MAX_NOA_DESCRIPTORS]; +} __packed; + +struct wmi_10_4_bcn_info { + struct wmi_10_4_tim_info tim_info; + struct wmi_10_4_p2p_noa_info p2p_noa_info; +} __packed; + +struct wmi_10_4_host_swba_event { + __le32 vdev_map; + struct wmi_10_4_bcn_info bcn_info[0]; +} __packed; + #define WMI_MAX_AP_VDEV 16 struct wmi_tbtt_offset_event { @@ -4660,6 +5568,18 @@ struct wmi_chan_info_event { __le32 cycle_count; } __packed; +struct wmi_10_4_chan_info_event { + __le32 err_code; + __le32 freq; + __le32 cmd_flags; + __le32 noise_floor; + __le32 rx_clear_count; + __le32 cycle_count; + __le32 chan_tx_pwr_range; + __le32 chan_tx_pwr_tp; + __le32 rx_frame_count; +} __packed; + struct wmi_peer_sta_kickout_event { struct wmi_mac_addr peer_macaddr; } __packed; @@ -4840,6 +5760,9 @@ struct wmi_ch_info_ev_arg { __le32 noise_floor; __le32 rx_clear_count; __le32 cycle_count; + __le32 chan_tx_pwr_range; + __le32 chan_tx_pwr_tp; + __le32 rx_frame_count; }; struct wmi_vdev_start_ev_arg { @@ -4855,7 +5778,7 @@ struct wmi_peer_kick_ev_arg { struct wmi_swba_ev_arg { __le32 vdev_map; - const struct wmi_tim_info *tim_info[WMI_MAX_AP_VDEV]; + struct wmi_tim_info_arg tim_info[WMI_MAX_AP_VDEV]; const struct wmi_p2p_noa_info *noa_info[WMI_MAX_AP_VDEV]; }; diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h index fc595b92ac56..c5f8bc4b5595 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h @@ -455,7 +455,7 @@ #define AR_PHY_MODE (AR_SM_BASE + 0x8) #define AR_PHY_ACTIVE (AR_SM_BASE + 0xc) #define AR_PHY_SPUR_MASK_A (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x18 : 0x20)) -#define AR_PHY_SPUR_MASK_B (AR_SM_BASE + 0x24) +#define AR_PHY_SPUR_MASK_B (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x1c : 0x24)) #define AR_PHY_SPECTRAL_SCAN (AR_SM_BASE + 0x28) #define AR_PHY_RADAR_BW_FILTER (AR_SM_BASE + 0x2c) #define AR_PHY_SEARCH_START_DELAY (AR_SM_BASE + 0x30) @@ -495,7 +495,7 @@ #define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A 0x3FF #define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A_S 0 -#define AR_PHY_TEST (AR_SM_BASE + 0x160) +#define AR_PHY_TEST (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x15c : 0x160)) #define AR_PHY_TEST_BBB_OBS_SEL 0x780000 #define AR_PHY_TEST_BBB_OBS_SEL_S 19 @@ -521,24 +521,29 @@ #define AR_PHY_TEST_CTL_DEBUGPORT_SEL_S 29 -#define AR_PHY_TSTDAC (AR_SM_BASE + 0x168) +#define AR_PHY_TSTDAC (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x164 : 0x168)) -#define AR_PHY_CHAN_STATUS (AR_SM_BASE + 0x16c) +#define AR_PHY_CHAN_STATUS (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x168 : 0x16c)) #define AR_PHY_CHAN_INFO_MEMORY (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x16c : 0x170)) #define AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ 0x00000008 #define AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ_S 3 -#define AR_PHY_CHNINFO_NOISEPWR (AR_SM_BASE + 0x174) -#define AR_PHY_CHNINFO_GAINDIFF (AR_SM_BASE + 0x178) -#define AR_PHY_CHNINFO_FINETIM (AR_SM_BASE + 0x17c) -#define AR_PHY_CHAN_INFO_GAIN_0 (AR_SM_BASE + 0x180) -#define AR_PHY_SCRAMBLER_SEED (AR_SM_BASE + 0x190) -#define AR_PHY_CCK_TX_CTRL (AR_SM_BASE + 0x194) +#define AR_PHY_CHNINFO_NOISEPWR (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x170 : 0x174)) +#define AR_PHY_CHNINFO_GAINDIFF (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x174 : 0x178)) +#define AR_PHY_CHNINFO_FINETIM (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x178 : 0x17c)) +#define AR_PHY_CHAN_INFO_GAIN_0 (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x17c : 0x180)) +#define AR_PHY_SCRAMBLER_SEED (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x184 : 0x190)) +#define AR_PHY_CCK_TX_CTRL (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x188 : 0x194)) #define AR_PHY_HEAVYCLIP_CTL (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x198 : 0x1a4)) #define AR_PHY_HEAVYCLIP_20 (AR_SM_BASE + 0x1a8) #define AR_PHY_HEAVYCLIP_40 (AR_SM_BASE + 0x1ac) +#define AR_PHY_HEAVYCLIP_1 (AR_SM_BASE + 0x19c) +#define AR_PHY_HEAVYCLIP_2 (AR_SM_BASE + 0x1a0) +#define AR_PHY_HEAVYCLIP_3 (AR_SM_BASE + 0x1a4) +#define AR_PHY_HEAVYCLIP_4 (AR_SM_BASE + 0x1a8) +#define AR_PHY_HEAVYCLIP_5 (AR_SM_BASE + 0x1ac) #define AR_PHY_ILLEGAL_TXRATE (AR_SM_BASE + 0x1b0) #define AR_PHY_POWER_TX_RATE(_d) (AR_SM_BASE + 0x1c0 + ((_d) << 2)) diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c index dbf8f4959642..da32c8faad94 100644 --- a/drivers/net/wireless/ath/ath9k/debug.c +++ b/drivers/net/wireless/ath/ath9k/debug.c @@ -765,6 +765,8 @@ static int read_file_reset(struct seq_file *file, void *data) [RESET_TYPE_BEACON_STUCK] = "Stuck Beacon", [RESET_TYPE_MCI] = "MCI Reset", [RESET_TYPE_CALIBRATION] = "Calibration error", + [RESET_TX_DMA_ERROR] = "Tx DMA stop error", + [RESET_RX_DMA_ERROR] = "Rx DMA stop error", }; int i; diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h index a8e9319958e6..cd68c5f0e751 100644 --- a/drivers/net/wireless/ath/ath9k/debug.h +++ b/drivers/net/wireless/ath/ath9k/debug.h @@ -50,6 +50,8 @@ enum ath_reset_type { RESET_TYPE_BEACON_STUCK, RESET_TYPE_MCI, RESET_TYPE_CALIBRATION, + RESET_TX_DMA_ERROR, + RESET_RX_DMA_ERROR, __RESET_TYPE_MAX }; diff --git a/drivers/net/wireless/ath/ath9k/dfs.c b/drivers/net/wireless/ath/ath9k/dfs.c index e98a9eaba7ff..1ece42c2443d 100644 --- a/drivers/net/wireless/ath/ath9k/dfs.c +++ b/drivers/net/wireless/ath/ath9k/dfs.c @@ -30,6 +30,157 @@ struct ath_radar_data { u8 pulse_length_pri; }; +/**** begin: CHIRP ************************************************************/ + +/* min and max gradients for defined FCC chirping pulses, given by + * - 20MHz chirp width over a pulse width of 50us + * - 5MHz chirp width over a pulse width of 100us + */ +static const int BIN_DELTA_MIN = 1; +static const int BIN_DELTA_MAX = 10; + +/* we need at least 3 deltas / 4 samples for a reliable chirp detection */ +#define NUM_DIFFS 3 +static const int FFT_NUM_SAMPLES = (NUM_DIFFS + 1); + +/* Threshold for difference of delta peaks */ +static const int MAX_DIFF = 2; + +/* width range to be checked for chirping */ +static const int MIN_CHIRP_PULSE_WIDTH = 20; +static const int MAX_CHIRP_PULSE_WIDTH = 110; + +struct ath9k_dfs_fft_20 { + u8 bin[28]; + u8 lower_bins[3]; +} __packed; +struct ath9k_dfs_fft_40 { + u8 bin[64]; + u8 lower_bins[3]; + u8 upper_bins[3]; +} __packed; + +static inline int fft_max_index(u8 *bins) +{ + return (bins[2] & 0xfc) >> 2; +} +static inline int fft_max_magnitude(u8 *bins) +{ + return (bins[0] & 0xc0) >> 6 | bins[1] << 2 | (bins[2] & 0x03) << 10; +} +static inline u8 fft_bitmap_weight(u8 *bins) +{ + return bins[0] & 0x3f; +} + +static int ath9k_get_max_index_ht40(struct ath9k_dfs_fft_40 *fft, + bool is_ctl, bool is_ext) +{ + const int DFS_UPPER_BIN_OFFSET = 64; + /* if detected radar on both channels, select the significant one */ + if (is_ctl && is_ext) { + /* first check wether channels have 'strong' bins */ + is_ctl = fft_bitmap_weight(fft->lower_bins) != 0; + is_ext = fft_bitmap_weight(fft->upper_bins) != 0; + + /* if still unclear, take higher magnitude */ + if (is_ctl && is_ext) { + int mag_lower = fft_max_magnitude(fft->lower_bins); + int mag_upper = fft_max_magnitude(fft->upper_bins); + if (mag_upper > mag_lower) + is_ctl = false; + else + is_ext = false; + } + } + if (is_ctl) + return fft_max_index(fft->lower_bins); + return fft_max_index(fft->upper_bins) + DFS_UPPER_BIN_OFFSET; +} +static bool ath9k_check_chirping(struct ath_softc *sc, u8 *data, + int datalen, bool is_ctl, bool is_ext) +{ + int i; + int max_bin[FFT_NUM_SAMPLES]; + struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(ah); + int prev_delta; + + if (IS_CHAN_HT40(ah->curchan)) { + struct ath9k_dfs_fft_40 *fft = (struct ath9k_dfs_fft_40 *) data; + int num_fft_packets = datalen / sizeof(*fft); + if (num_fft_packets == 0) + return false; + + ath_dbg(common, DFS, "HT40: datalen=%d, num_fft_packets=%d\n", + datalen, num_fft_packets); + if (num_fft_packets < (FFT_NUM_SAMPLES)) { + ath_dbg(common, DFS, "not enough packets for chirp\n"); + return false; + } + /* HW sometimes adds 2 garbage bytes in front of FFT samples */ + if ((datalen % sizeof(*fft)) == 2) { + fft = (struct ath9k_dfs_fft_40 *) (data + 2); + ath_dbg(common, DFS, "fixing datalen by 2\n"); + } + if (IS_CHAN_HT40MINUS(ah->curchan)) { + int temp = is_ctl; + is_ctl = is_ext; + is_ext = temp; + } + for (i = 0; i < FFT_NUM_SAMPLES; i++) + max_bin[i] = ath9k_get_max_index_ht40(fft + i, is_ctl, + is_ext); + } else { + struct ath9k_dfs_fft_20 *fft = (struct ath9k_dfs_fft_20 *) data; + int num_fft_packets = datalen / sizeof(*fft); + if (num_fft_packets == 0) + return false; + ath_dbg(common, DFS, "HT20: datalen=%d, num_fft_packets=%d\n", + datalen, num_fft_packets); + if (num_fft_packets < (FFT_NUM_SAMPLES)) { + ath_dbg(common, DFS, "not enough packets for chirp\n"); + return false; + } + /* in ht20, this is a 6-bit signed number => shift it to 0 */ + for (i = 0; i < FFT_NUM_SAMPLES; i++) + max_bin[i] = fft_max_index(fft[i].lower_bins) ^ 0x20; + } + ath_dbg(common, DFS, "bin_max = [%d, %d, %d, %d]\n", + max_bin[0], max_bin[1], max_bin[2], max_bin[3]); + + /* Check for chirp attributes within specs + * a) delta of adjacent max_bins is within range + * b) delta of adjacent deltas are within tolerance + */ + prev_delta = 0; + for (i = 0; i < NUM_DIFFS; i++) { + int ddelta = -1; + int delta = max_bin[i + 1] - max_bin[i]; + + /* ensure gradient is within valid range */ + if (abs(delta) < BIN_DELTA_MIN || abs(delta) > BIN_DELTA_MAX) { + ath_dbg(common, DFS, "CHIRP: invalid delta %d " + "in sample %d\n", delta, i); + return false; + } + if (i == 0) + goto done; + ddelta = delta - prev_delta; + if (abs(ddelta) > MAX_DIFF) { + ath_dbg(common, DFS, "CHIRP: ddelta %d too high\n", + ddelta); + return false; + } +done: + ath_dbg(common, DFS, "CHIRP - %d: delta=%d, ddelta=%d\n", + i, delta, ddelta); + prev_delta = delta; + } + return true; +} +/**** end: CHIRP **************************************************************/ + /* convert pulse duration to usecs, considering clock mode */ static u32 dur_to_usecs(struct ath_hw *ah, u32 dur) { @@ -113,12 +264,6 @@ ath9k_postprocess_radar_event(struct ath_softc *sc, return false; } - /* - * TODO: check chirping pulses - * checks for chirping are dependent on the DFS regulatory domain - * used, which is yet TBD - */ - /* convert duration to usecs */ pe->width = dur_to_usecs(sc->sc_ah, dur); pe->rssi = rssi; @@ -190,6 +335,16 @@ void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data, if (!ath9k_postprocess_radar_event(sc, &ard, &pe)) return; + if (pe.width > MIN_CHIRP_PULSE_WIDTH && + pe.width < MAX_CHIRP_PULSE_WIDTH) { + bool is_ctl = !!(ard.pulse_bw_info & PRI_CH_RADAR_FOUND); + bool is_ext = !!(ard.pulse_bw_info & EXT_CH_RADAR_FOUND); + int clen = datalen - 3; + pe.chirp = ath9k_check_chirping(sc, data, clen, is_ctl, is_ext); + } else { + pe.chirp = false; + } + ath_dbg(common, DFS, "ath9k_dfs_process_phyerr: type=%d, freq=%d, ts=%llu, " "width=%d, rssi=%d, delta_ts=%llu\n", @@ -198,7 +353,8 @@ void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data, sc->dfs_prev_pulse_ts = pe.ts; if (ard.pulse_bw_info & PRI_CH_RADAR_FOUND) ath9k_dfs_process_radar_pulse(sc, &pe); - if (ard.pulse_bw_info & EXT_CH_RADAR_FOUND) { + if (IS_CHAN_HT40(ah->curchan) && + ard.pulse_bw_info & EXT_CH_RADAR_FOUND) { pe.freq += IS_CHAN_HT40PLUS(ah->curchan) ? 20 : -20; ath9k_dfs_process_radar_pulse(sc, &pe); } diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index 6c75fb1ab77d..d3189daf9996 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -491,10 +491,9 @@ bool ath_stoprecv(struct ath_softc *sc) if (!(ah->ah_flags & AH_UNPLUGGED) && unlikely(!stopped)) { - ath_err(ath9k_hw_common(sc->sc_ah), - "Could not stop RX, we could be " - "confusing the DMA engine when we start RX up\n"); - ATH_DBG_WARN_ON_ONCE(!stopped); + ath_dbg(ath9k_hw_common(sc->sc_ah), RESET, + "Failed to stop Rx DMA\n"); + RESET_STAT_INC(sc, RESET_RX_DMA_ERROR); } return stopped && !reset; } diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 3ad79bb4f2c2..b766a7fc60aa 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -1883,8 +1883,11 @@ bool ath_drain_all_txq(struct ath_softc *sc) npend |= BIT(i); } - if (npend) - ath_err(common, "Failed to stop TX DMA, queues=0x%03x!\n", npend); + if (npend) { + RESET_STAT_INC(sc, RESET_TX_DMA_ERROR); + ath_dbg(common, RESET, + "Failed to stop TX DMA, queues=0x%03x!\n", npend); + } for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { if (!ATH_TXQ_SETUP(sc, i)) @@ -2470,8 +2473,8 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif, bf = list_first_entry(&bf_q, struct ath_buf, list); hdr = (struct ieee80211_hdr *) bf->bf_mpdu->data; - if (hdr->frame_control & IEEE80211_FCTL_MOREDATA) { - hdr->frame_control &= ~IEEE80211_FCTL_MOREDATA; + if (hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_MOREDATA)) { + hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_MOREDATA); dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, sizeof(*hdr), DMA_TO_DEVICE); } diff --git a/drivers/net/wireless/ath/dfs_pri_detector.c b/drivers/net/wireless/ath/dfs_pri_detector.c index 1b5ad1965607..cc5c592fc4c0 100644 --- a/drivers/net/wireless/ath/dfs_pri_detector.c +++ b/drivers/net/wireless/ath/dfs_pri_detector.c @@ -273,7 +273,7 @@ static bool pseq_handler_create_sequences(struct pri_detector *pde, tmp_false_count++; } } - if (ps.count < min_count) + if (ps.count <= min_count) /* did not reach minimum count, drop sequence */ continue; diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index c79cfe02ec80..e4be2d9bbac4 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -736,6 +736,92 @@ static int wil_fix_bcon(struct wil6210_priv *wil, return rc; } +/* internal functions for device reset and starting AP */ +static int _wil_cfg80211_set_ies(struct wiphy *wiphy, + size_t probe_ies_len, const u8 *probe_ies, + size_t assoc_ies_len, const u8 *assoc_ies) + +{ + int rc; + struct wil6210_priv *wil = wiphy_to_wil(wiphy); + + /* FW do not form regular beacon, so bcon IE's are not set + * For the DMG bcon, when it will be supported, bcon IE's will + * be reused; add something like: + * wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len, + * bcon->beacon_ies); + */ + rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, probe_ies_len, probe_ies); + if (rc) { + wil_err(wil, "set_ie(PROBE_RESP) failed\n"); + return rc; + } + + rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, assoc_ies_len, assoc_ies); + if (rc) { + wil_err(wil, "set_ie(ASSOC_RESP) failed\n"); + return rc; + } + + return 0; +} + +static int _wil_cfg80211_start_ap(struct wiphy *wiphy, + struct net_device *ndev, + const u8 *ssid, size_t ssid_len, u32 privacy, + int bi, u8 chan, + size_t probe_ies_len, const u8 *probe_ies, + size_t assoc_ies_len, const u8 *assoc_ies, + u8 hidden_ssid) +{ + struct wil6210_priv *wil = wiphy_to_wil(wiphy); + int rc; + struct wireless_dev *wdev = ndev->ieee80211_ptr; + u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype); + + wil_set_recovery_state(wil, fw_recovery_idle); + + mutex_lock(&wil->mutex); + + __wil_down(wil); + rc = __wil_up(wil); + if (rc) + goto out; + + rc = wmi_set_ssid(wil, ssid_len, ssid); + if (rc) + goto out; + + rc = _wil_cfg80211_set_ies(wiphy, probe_ies_len, probe_ies, + assoc_ies_len, assoc_ies); + if (rc) + goto out; + + wil->privacy = privacy; + wil->channel = chan; + wil->hidden_ssid = hidden_ssid; + + netif_carrier_on(ndev); + + rc = wmi_pcp_start(wil, bi, wmi_nettype, chan, hidden_ssid); + if (rc) + goto err_pcp_start; + + rc = wil_bcast_init(wil); + if (rc) + goto err_bcast; + + goto out; /* success */ + +err_bcast: + wmi_pcp_stop(wil); +err_pcp_start: + netif_carrier_off(ndev); +out: + mutex_unlock(&wil->mutex); + return rc; +} + static int wil_cfg80211_change_beacon(struct wiphy *wiphy, struct net_device *ndev, struct cfg80211_beacon_data *bcon) @@ -746,6 +832,7 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy, const u8 *pr_ies = NULL; size_t pr_ies_len = 0; int rc; + u32 privacy = 0; wil_dbg_misc(wil, "%s()\n", __func__); wil_print_bcon_data(bcon); @@ -760,40 +847,41 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy, wil_print_bcon_data(bcon); } - /* FW do not form regular beacon, so bcon IE's are not set - * For the DMG bcon, when it will be supported, bcon IE's will - * be reused; add something like: - * wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len, - * bcon->beacon_ies); - */ - rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, pr_ies_len, pr_ies); - if (rc) { - wil_err(wil, "set_ie(PROBE_RESP) failed\n"); - return rc; - } + if (pr_ies && cfg80211_find_ie(WLAN_EID_RSN, pr_ies, pr_ies_len)) + privacy = 1; - rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, - bcon->assocresp_ies_len, - bcon->assocresp_ies); - if (rc) { - wil_err(wil, "set_ie(ASSOC_RESP) failed\n"); - return rc; + /* in case privacy has changed, need to restart the AP */ + if (wil->privacy != privacy) { + struct wireless_dev *wdev = ndev->ieee80211_ptr; + + wil_dbg_misc(wil, "privacy changed %d=>%d. Restarting AP\n", + wil->privacy, privacy); + + rc = _wil_cfg80211_start_ap(wiphy, ndev, wdev->ssid, + wdev->ssid_len, privacy, + wdev->beacon_interval, + wil->channel, pr_ies_len, pr_ies, + bcon->assocresp_ies_len, + bcon->assocresp_ies, + wil->hidden_ssid); + } else { + rc = _wil_cfg80211_set_ies(wiphy, pr_ies_len, pr_ies, + bcon->assocresp_ies_len, + bcon->assocresp_ies); } - return 0; + return rc; } static int wil_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, struct cfg80211_ap_settings *info) { - int rc = 0; + int rc; struct wil6210_priv *wil = wiphy_to_wil(wiphy); - struct wireless_dev *wdev = ndev->ieee80211_ptr; struct ieee80211_channel *channel = info->chandef.chan; struct cfg80211_beacon_data *bcon = &info->beacon; struct cfg80211_crypto_settings *crypto = &info->crypto; - u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype); struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp; size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable); const u8 *pr_ies = NULL; @@ -807,6 +895,23 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy, return -EINVAL; } + switch (info->hidden_ssid) { + case NL80211_HIDDEN_SSID_NOT_IN_USE: + hidden_ssid = WMI_HIDDEN_SSID_DISABLED; + break; + + case NL80211_HIDDEN_SSID_ZERO_LEN: + hidden_ssid = WMI_HIDDEN_SSID_SEND_EMPTY; + break; + + case NL80211_HIDDEN_SSID_ZERO_CONTENTS: + hidden_ssid = WMI_HIDDEN_SSID_CLEAR; + break; + + default: + wil_err(wil, "AP: Invalid hidden SSID %d\n", info->hidden_ssid); + return -EOPNOTSUPP; + } wil_dbg_misc(wil, "AP on Channel %d %d MHz, %s\n", channel->hw_value, channel->center_freq, info->privacy ? "secure" : "open"); wil_dbg_misc(wil, "Privacy: %d auth_type %d\n", @@ -830,70 +935,14 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy, wil_print_bcon_data(bcon); } - wil_set_recovery_state(wil, fw_recovery_idle); - - mutex_lock(&wil->mutex); - - __wil_down(wil); - rc = __wil_up(wil); - if (rc) - goto out; - - rc = wmi_set_ssid(wil, info->ssid_len, info->ssid); - if (rc) - goto out; - - /* IE's */ - /* bcon 'head IE's are not relevant for 60g band */ - /* - * FW do not form regular beacon, so bcon IE's are not set - * For the DMG bcon, when it will be supported, bcon IE's will - * be reused; add something like: - * wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len, - * bcon->beacon_ies); - */ - wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, pr_ies_len, pr_ies); - wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, bcon->assocresp_ies_len, - bcon->assocresp_ies); - - wil->privacy = info->privacy; - - switch (info->hidden_ssid) { - case NL80211_HIDDEN_SSID_NOT_IN_USE: - hidden_ssid = WMI_HIDDEN_SSID_DISABLED; - break; - - case NL80211_HIDDEN_SSID_ZERO_LEN: - hidden_ssid = WMI_HIDDEN_SSID_SEND_EMPTY; - break; - - case NL80211_HIDDEN_SSID_ZERO_CONTENTS: - hidden_ssid = WMI_HIDDEN_SSID_CLEAR; - break; - - default: - rc = -EOPNOTSUPP; - goto out; - } - - netif_carrier_on(ndev); - - rc = wmi_pcp_start(wil, info->beacon_interval, wmi_nettype, - channel->hw_value, hidden_ssid); - if (rc) - goto err_pcp_start; + rc = _wil_cfg80211_start_ap(wiphy, ndev, + info->ssid, info->ssid_len, info->privacy, + info->beacon_interval, channel->hw_value, + pr_ies_len, pr_ies, + bcon->assocresp_ies_len, + bcon->assocresp_ies, + hidden_ssid); - rc = wil_bcast_init(wil); - if (rc) - goto err_bcast; - - goto out; /* success */ -err_bcast: - wmi_pcp_stop(wil); -err_pcp_start: - netif_carrier_off(ndev); -out: - mutex_unlock(&wil->mutex); return rc; } diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 275355d46a36..c63e4a35eaa0 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -559,6 +559,8 @@ struct wil6210_priv { /* profile */ u32 monitor_flags; u32 privacy; /* secure connection? */ + u8 hidden_ssid; /* relevant in AP mode */ + u16 channel; /* relevant in AP mode */ int sinfo_gen; u32 ap_isolate; /* no intra-BSS communication */ /* interrupt moderation */ diff --git a/drivers/net/wireless/b43/lo.c b/drivers/net/wireless/b43/lo.c index 916123a3d74e..a335f94c72ff 100644 --- a/drivers/net/wireless/b43/lo.c +++ b/drivers/net/wireless/b43/lo.c @@ -929,8 +929,8 @@ void b43_lo_g_adjust_to(struct b43_wldev *dev, b43_lo_write(dev, &cal->ctl); } -/* Periodic LO maintanance work */ -void b43_lo_g_maintanance_work(struct b43_wldev *dev) +/* Periodic LO maintenance work */ +void b43_lo_g_maintenance_work(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; struct b43_phy_g *gphy = phy->g; diff --git a/drivers/net/wireless/b43/lo.h b/drivers/net/wireless/b43/lo.h index 3b27e20eff80..7b4df3883bc2 100644 --- a/drivers/net/wireless/b43/lo.h +++ b/drivers/net/wireless/b43/lo.h @@ -80,7 +80,7 @@ void b43_lo_g_adjust_to(struct b43_wldev *dev, void b43_gphy_dc_lt_init(struct b43_wldev *dev, bool update_all); -void b43_lo_g_maintanance_work(struct b43_wldev *dev); +void b43_lo_g_maintenance_work(struct b43_wldev *dev); void b43_lo_g_cleanup(struct b43_wldev *dev); void b43_lo_g_init(struct b43_wldev *dev); diff --git a/drivers/net/wireless/b43/phy_g.c b/drivers/net/wireless/b43/phy_g.c index 727ce6edb4b3..462310e6e88f 100644 --- a/drivers/net/wireless/b43/phy_g.c +++ b/drivers/net/wireless/b43/phy_g.c @@ -3004,7 +3004,7 @@ static void b43_gphy_op_pwork_15sec(struct b43_wldev *dev) phy->rev == 1) { //TODO: implement rev1 workaround } - b43_lo_g_maintanance_work(dev); + b43_lo_g_maintenance_work(dev); b43_mac_enable(dev); } diff --git a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c index d86d1f1f1c91..ffe526070d6f 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c @@ -5785,6 +5785,7 @@ static void brcmf_wiphy_wowl_params(struct wiphy *wiphy) static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp) { + struct brcmf_pub *drvr = ifp->drvr; struct ieee80211_supported_band *band; __le32 bandlist[3]; u32 n_bands; @@ -5798,6 +5799,19 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp) if (err) return err; + for (i = 0; i < wiphy->iface_combinations->max_interfaces && + i < ARRAY_SIZE(drvr->addresses); i++) { + u8 *addr = drvr->addresses[i].addr; + + memcpy(addr, drvr->mac, ETH_ALEN); + if (i) { + addr[0] |= BIT(1); + addr[ETH_ALEN - 1] ^= i; + } + } + wiphy->addresses = drvr->addresses; + wiphy->n_addresses = i; + wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; wiphy->cipher_suites = __wl_cipher_suites; wiphy->n_cipher_suites = ARRAY_SIZE(__wl_cipher_suites); diff --git a/drivers/net/wireless/brcm80211/brcmfmac/core.h b/drivers/net/wireless/brcm80211/brcmfmac/core.h index fd74a9c6e9ac..746304121cdb 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/core.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/core.h @@ -21,6 +21,7 @@ #ifndef BRCMFMAC_CORE_H #define BRCMFMAC_CORE_H +#include <net/cfg80211.h> #include "fweh.h" #define TOE_TX_CSUM_OL 0x00000001 @@ -118,6 +119,8 @@ struct brcmf_pub { /* Multicast data packets sent to dongle */ unsigned long tx_multicast; + struct mac_address addresses[BRCMF_MAX_IFS]; + struct brcmf_if *iflist[BRCMF_MAX_IFS]; struct mutex proto_block; diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c index ab775a5d5b33..d2c5747e3ac9 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c @@ -1472,9 +1472,7 @@ struct brcms_timer *brcms_init_timer(struct brcms_info *wl, wl->timers = t; #ifdef DEBUG - t->name = kmalloc(strlen(name) + 1, GFP_ATOMIC); - if (t->name) - strcpy(t->name, name); + t->name = kstrdup(name, GFP_ATOMIC); #endif return t; diff --git a/drivers/net/wireless/cw1200/cw1200_spi.c b/drivers/net/wireless/cw1200/cw1200_spi.c index 7603546d2de3..29185aeccba8 100644 --- a/drivers/net/wireless/cw1200/cw1200_spi.c +++ b/drivers/net/wireless/cw1200/cw1200_spi.c @@ -467,7 +467,6 @@ static struct spi_driver spi_driver = { .remove = cw1200_spi_disconnect, .driver = { .name = "cw1200_wlan_spi", - .bus = &spi_bus_type, .owner = THIS_MODULE, #ifdef CONFIG_PM .pm = &cw1200_pm_ops, diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c index 08eb229e7816..36818c7f30b9 100644 --- a/drivers/net/wireless/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/ipw2x00/ipw2100.c @@ -1410,7 +1410,7 @@ static int ipw2100_power_cycle_adapter(struct ipw2100_priv *priv) static int ipw2100_hw_phy_off(struct ipw2100_priv *priv) { -#define HW_PHY_OFF_LOOP_DELAY (HZ / 5000) +#define HW_PHY_OFF_LOOP_DELAY (msecs_to_jiffies(50)) struct host_command cmd = { .host_command = CARD_DISABLE_PHY_OFF, diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c index 7f4cb692cc57..af1b3e6839fa 100644 --- a/drivers/net/wireless/iwlegacy/3945-mac.c +++ b/drivers/net/wireless/iwlegacy/3945-mac.c @@ -3259,7 +3259,7 @@ il3945_show_measurement(struct device *d, struct device_attribute *attr, while (size && PAGE_SIZE - len) { hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len, - PAGE_SIZE - len, 1); + PAGE_SIZE - len, true); len = strlen(buf); if (PAGE_SIZE - len) buf[len++] = '\n'; diff --git a/drivers/net/wireless/iwlegacy/debug.c b/drivers/net/wireless/iwlegacy/debug.c index 344010153196..908b9f4fef6f 100644 --- a/drivers/net/wireless/iwlegacy/debug.c +++ b/drivers/net/wireless/iwlegacy/debug.c @@ -515,12 +515,8 @@ il_dbgfs_nvm_read(struct file *file, char __user *user_buf, size_t count, scnprintf(buf + pos, buf_size - pos, "EEPROM " "version: 0x%x\n", eeprom_ver); for (ofs = 0; ofs < eeprom_len; ofs += 16) { - pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs); - hex_dump_to_buffer(ptr + ofs, 16, 16, 2, buf + pos, - buf_size - pos, 0); - pos += strlen(buf + pos); - if (buf_size - pos > 0) - buf[pos++] = '\n'; + pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x %16ph\n", + ofs, ptr + ofs); } ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c index b15e4c7acbec..ff63cb5632eb 100644 --- a/drivers/net/wireless/mwifiex/cfg80211.c +++ b/drivers/net/wireless/mwifiex/cfg80211.c @@ -19,6 +19,7 @@ #include "cfg80211.h" #include "main.h" +#include "11n.h" static char *reg_alpha2; module_param(reg_alpha2, charp, 0); @@ -34,12 +35,38 @@ static const struct ieee80211_iface_limit mwifiex_ap_sta_limits[] = { }, }; -static const struct ieee80211_iface_combination mwifiex_iface_comb_ap_sta = { +static const struct ieee80211_iface_combination +mwifiex_iface_comb_ap_sta = { .limits = mwifiex_ap_sta_limits, .num_different_channels = 1, .n_limits = ARRAY_SIZE(mwifiex_ap_sta_limits), .max_interfaces = MWIFIEX_MAX_BSS_NUM, .beacon_int_infra_match = true, + .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | + BIT(NL80211_CHAN_WIDTH_20) | + BIT(NL80211_CHAN_WIDTH_40), +}; + +static const struct ieee80211_iface_combination +mwifiex_iface_comb_ap_sta_vht = { + .limits = mwifiex_ap_sta_limits, + .num_different_channels = 1, + .n_limits = ARRAY_SIZE(mwifiex_ap_sta_limits), + .max_interfaces = MWIFIEX_MAX_BSS_NUM, + .beacon_int_infra_match = true, + .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | + BIT(NL80211_CHAN_WIDTH_20) | + BIT(NL80211_CHAN_WIDTH_40) | + BIT(NL80211_CHAN_WIDTH_80), +}; + +static const struct +ieee80211_iface_combination mwifiex_iface_comb_ap_sta_drcs = { + .limits = mwifiex_ap_sta_limits, + .num_different_channels = 2, + .n_limits = ARRAY_SIZE(mwifiex_ap_sta_limits), + .max_interfaces = MWIFIEX_MAX_BSS_NUM, + .beacon_int_infra_match = true, }; /* @@ -441,7 +468,7 @@ mwifiex_cfg80211_add_key(struct wiphy *wiphy, struct net_device *netdev, * - Country codes * - Sub bands (first channel, number of channels, maximum Tx power) */ -static int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy) +int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy) { u8 no_of_triplet = 0; struct ieee80211_country_ie_triplet *t; @@ -804,10 +831,13 @@ mwifiex_init_new_priv_params(struct mwifiex_private *priv, priv->bss_type = MWIFIEX_BSS_TYPE_STA; break; case NL80211_IFTYPE_P2P_CLIENT: - case NL80211_IFTYPE_P2P_GO: priv->bss_role = MWIFIEX_BSS_ROLE_STA; priv->bss_type = MWIFIEX_BSS_TYPE_P2P; break; + case NL80211_IFTYPE_P2P_GO: + priv->bss_role = MWIFIEX_BSS_ROLE_UAP; + priv->bss_type = MWIFIEX_BSS_TYPE_P2P; + break; case NL80211_IFTYPE_AP: priv->bss_type = MWIFIEX_BSS_TYPE_UAP; priv->bss_role = MWIFIEX_BSS_ROLE_UAP; @@ -1115,8 +1145,10 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy, case NL80211_IFTYPE_P2P_GO: switch (type) { case NL80211_IFTYPE_STATION: - if (mwifiex_cfg80211_init_p2p_client(priv)) + if (mwifiex_cfg80211_deinit_p2p(priv)) return -EFAULT; + priv->adapter->curr_iface_comb.p2p_intf--; + priv->adapter->curr_iface_comb.sta_intf++; dev->ieee80211_ptr->iftype = type; break; case NL80211_IFTYPE_ADHOC: @@ -2788,6 +2820,7 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev) { struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev); struct mwifiex_adapter *adapter = priv->adapter; + struct sk_buff *skb, *tmp; #ifdef CONFIG_DEBUG_FS mwifiex_dev_debugfs_remove(priv); @@ -2795,6 +2828,9 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev) mwifiex_stop_net_dev_queue(priv->netdev, adapter); + skb_queue_walk_safe(&priv->bypass_txq, skb, tmp) + mwifiex_write_data_complete(priv->adapter, skb, 0, -1); + if (netif_carrier_ok(priv->netdev)) netif_carrier_off(priv->netdev); @@ -2954,7 +2990,6 @@ static int mwifiex_set_wowlan_mef_entry(struct mwifiex_private *priv, MWIFIEX_MEF_MAX_BYTESEQ)) { mwifiex_dbg(priv->adapter, ERROR, "Pattern not supported\n"); - kfree(mef_entry); return -EOPNOTSUPP; } @@ -3036,9 +3071,12 @@ static int mwifiex_set_mef_filter(struct mwifiex_private *priv, mwifiex_set_auto_arp_mef_entry(priv, &mef_entry[0]); - if (wowlan->n_patterns || wowlan->magic_pkt) + if (wowlan->n_patterns || wowlan->magic_pkt) { ret = mwifiex_set_wowlan_mef_entry(priv, &mef_cfg, &mef_entry[1], wowlan); + if (ret) + goto err; + } if (!mef_cfg.criteria) mef_cfg.criteria = MWIFIEX_CRITERIA_BROADCAST | @@ -3048,6 +3086,8 @@ static int mwifiex_set_mef_filter(struct mwifiex_private *priv, ret = mwifiex_send_cmd(priv, HostCmd_CMD_MEF_CFG, HostCmd_ACT_GEN_SET, 0, &mef_cfg, true); + +err: kfree(mef_entry); return ret; } @@ -3360,6 +3400,72 @@ mwifiex_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev, } static int +mwifiex_cfg80211_tdls_chan_switch(struct wiphy *wiphy, struct net_device *dev, + const u8 *addr, u8 oper_class, + struct cfg80211_chan_def *chandef) +{ + struct mwifiex_sta_node *sta_ptr; + unsigned long flags; + u16 chan; + u8 second_chan_offset, band; + struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); + + spin_lock_irqsave(&priv->sta_list_spinlock, flags); + sta_ptr = mwifiex_get_sta_entry(priv, addr); + spin_unlock_irqrestore(&priv->sta_list_spinlock, flags); + + if (!sta_ptr) { + wiphy_err(wiphy, "%s: Invalid TDLS peer %pM\n", + __func__, addr); + return -ENOENT; + } + + if (!(sta_ptr->tdls_cap.extcap.ext_capab[3] & + WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH)) { + wiphy_err(wiphy, "%pM do not support tdls cs\n", addr); + return -ENOENT; + } + + if (sta_ptr->tdls_status == TDLS_CHAN_SWITCHING || + sta_ptr->tdls_status == TDLS_IN_OFF_CHAN) { + wiphy_err(wiphy, "channel switch is running, abort request\n"); + return -EALREADY; + } + + chan = chandef->chan->hw_value; + second_chan_offset = mwifiex_get_sec_chan_offset(chan); + band = chandef->chan->band; + mwifiex_start_tdls_cs(priv, addr, chan, second_chan_offset, band); + + return 0; +} + +static void +mwifiex_cfg80211_tdls_cancel_chan_switch(struct wiphy *wiphy, + struct net_device *dev, + const u8 *addr) +{ + struct mwifiex_sta_node *sta_ptr; + unsigned long flags; + struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); + + spin_lock_irqsave(&priv->sta_list_spinlock, flags); + sta_ptr = mwifiex_get_sta_entry(priv, addr); + spin_unlock_irqrestore(&priv->sta_list_spinlock, flags); + + if (!sta_ptr) { + wiphy_err(wiphy, "%s: Invalid TDLS peer %pM\n", + __func__, addr); + } else if (!(sta_ptr->tdls_status == TDLS_CHAN_SWITCHING || + sta_ptr->tdls_status == TDLS_IN_BASE_CHAN || + sta_ptr->tdls_status == TDLS_IN_OFF_CHAN)) { + wiphy_err(wiphy, "tdls chan switch not initialize by %pM\n", + addr); + } else + mwifiex_stop_tdls_cs(priv, addr); +} + +static int mwifiex_cfg80211_add_station(struct wiphy *wiphy, struct net_device *dev, const u8 *mac, struct station_parameters *params) { @@ -3575,6 +3681,8 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = { .set_coalesce = mwifiex_cfg80211_set_coalesce, .tdls_mgmt = mwifiex_cfg80211_tdls_mgmt, .tdls_oper = mwifiex_cfg80211_tdls_oper, + .tdls_channel_switch = mwifiex_cfg80211_tdls_chan_switch, + .tdls_cancel_channel_switch = mwifiex_cfg80211_tdls_cancel_chan_switch, .add_station = mwifiex_cfg80211_add_station, .change_station = mwifiex_cfg80211_change_station, .get_channel = mwifiex_cfg80211_get_channel, @@ -3672,7 +3780,12 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter) else wiphy->bands[IEEE80211_BAND_5GHZ] = NULL; - wiphy->iface_combinations = &mwifiex_iface_comb_ap_sta; + if (adapter->drcs_enabled && ISSUPP_DRCS_ENABLED(adapter->fw_cap_info)) + wiphy->iface_combinations = &mwifiex_iface_comb_ap_sta_drcs; + else if (adapter->is_hw_11ac_capable) + wiphy->iface_combinations = &mwifiex_iface_comb_ap_sta_vht; + else + wiphy->iface_combinations = &mwifiex_iface_comb_ap_sta; wiphy->n_iface_combinations = 1; /* Initialize cipher suits */ @@ -3709,6 +3822,9 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter) NL80211_FEATURE_INACTIVITY_TIMER | NL80211_FEATURE_NEED_OBSS_SCAN; + if (ISSUPP_TDLS_ENABLED(adapter->fw_cap_info)) + wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH; + if (adapter->fw_api_ver == MWIFIEX_FW_V15) wiphy->features |= NL80211_FEATURE_SK_TX_STATUS; diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/mwifiex/decl.h index 51e344789ba2..098e1f14dc9a 100644 --- a/drivers/net/wireless/mwifiex/decl.h +++ b/drivers/net/wireless/mwifiex/decl.h @@ -141,6 +141,9 @@ enum mwifiex_tdls_status { TDLS_SETUP_COMPLETE, TDLS_SETUP_FAILURE, TDLS_LINK_TEARDOWN, + TDLS_CHAN_SWITCHING, + TDLS_IN_BASE_CHAN, + TDLS_IN_OFF_CHAN, }; enum mwifiex_tdls_error_code { diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h index cd09051710e6..cff38ad129aa 100644 --- a/drivers/net/wireless/mwifiex/fw.h +++ b/drivers/net/wireless/mwifiex/fw.h @@ -169,14 +169,17 @@ enum MWIFIEX_802_11_PRIVACY_FILTER { #define TLV_TYPE_UAP_PS_AO_TIMER (PROPRIETARY_TLV_BASE_ID + 123) #define TLV_TYPE_PWK_CIPHER (PROPRIETARY_TLV_BASE_ID + 145) #define TLV_TYPE_GWK_CIPHER (PROPRIETARY_TLV_BASE_ID + 146) +#define TLV_TYPE_TX_PAUSE (PROPRIETARY_TLV_BASE_ID + 148) #define TLV_TYPE_COALESCE_RULE (PROPRIETARY_TLV_BASE_ID + 154) #define TLV_TYPE_KEY_PARAM_V2 (PROPRIETARY_TLV_BASE_ID + 156) +#define TLV_TYPE_MULTI_CHAN_INFO (PROPRIETARY_TLV_BASE_ID + 183) #define TLV_TYPE_TDLS_IDLE_TIMEOUT (PROPRIETARY_TLV_BASE_ID + 194) #define TLV_TYPE_SCAN_CHANNEL_GAP (PROPRIETARY_TLV_BASE_ID + 197) #define TLV_TYPE_API_REV (PROPRIETARY_TLV_BASE_ID + 199) #define TLV_TYPE_CHANNEL_STATS (PROPRIETARY_TLV_BASE_ID + 198) #define TLV_BTCOEX_WL_AGGR_WINSIZE (PROPRIETARY_TLV_BASE_ID + 202) #define TLV_BTCOEX_WL_SCANTIME (PROPRIETARY_TLV_BASE_ID + 203) +#define TLV_TYPE_BSS_MODE (PROPRIETARY_TLV_BASE_ID + 206) #define MWIFIEX_TX_DATA_BUF_SIZE_2K 2048 @@ -200,6 +203,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER { #define ISSUPP_11NENABLED(FwCapInfo) (FwCapInfo & BIT(11)) #define ISSUPP_TDLS_ENABLED(FwCapInfo) (FwCapInfo & BIT(14)) +#define ISSUPP_DRCS_ENABLED(FwCapInfo) (FwCapInfo & BIT(15)) #define ISSUPP_SDIO_SPA_ENABLED(FwCapInfo) (FwCapInfo & BIT(16)) #define MWIFIEX_DEF_HT_CAP (IEEE80211_HT_CAP_DSSSCCK40 | \ @@ -359,6 +363,8 @@ enum MWIFIEX_802_11_PRIVACY_FILTER { #define HostCmd_CMD_MGMT_FRAME_REG 0x010c #define HostCmd_CMD_REMAIN_ON_CHAN 0x010d #define HostCmd_CMD_11AC_CFG 0x0112 +#define HostCmd_CMD_TDLS_CONFIG 0x0100 +#define HostCmd_CMD_MC_POLICY 0x0121 #define HostCmd_CMD_TDLS_OPER 0x0122 #define HostCmd_CMD_SDIO_SP_RX_AGGR_CFG 0x0223 @@ -509,8 +515,10 @@ enum P2P_MODES { #define EVENT_TDLS_GENERIC_EVENT 0x00000052 #define EVENT_RADAR_DETECTED 0x00000053 #define EVENT_CHANNEL_REPORT_RDY 0x00000054 +#define EVENT_TX_DATA_PAUSE 0x00000055 #define EVENT_EXT_SCAN_REPORT 0x00000058 #define EVENT_REMAIN_ON_CHAN_EXPIRED 0x0000005f +#define EVENT_MULTI_CHAN_INFO 0x0000006a #define EVENT_TX_STATUS_REPORT 0x00000074 #define EVENT_BT_COEX_WLAN_PARA_CHANGE 0X00000076 @@ -545,7 +553,27 @@ enum P2P_MODES { #define ACT_TDLS_DELETE 0x00 #define ACT_TDLS_CREATE 0x01 #define ACT_TDLS_CONFIG 0x02 -#define TDLS_EVENT_LINK_TEAR_DOWN 3 + +#define TDLS_EVENT_LINK_TEAR_DOWN 3 +#define TDLS_EVENT_CHAN_SWITCH_RESULT 7 +#define TDLS_EVENT_START_CHAN_SWITCH 8 +#define TDLS_EVENT_CHAN_SWITCH_STOPPED 9 + +#define TDLS_BASE_CHANNEL 0 +#define TDLS_OFF_CHANNEL 1 + +#define ACT_TDLS_CS_ENABLE_CONFIG 0x00 +#define ACT_TDLS_CS_INIT 0x06 +#define ACT_TDLS_CS_STOP 0x07 +#define ACT_TDLS_CS_PARAMS 0x08 + +#define MWIFIEX_DEF_CS_UNIT_TIME 2 +#define MWIFIEX_DEF_CS_THR_OTHERLINK 10 +#define MWIFIEX_DEF_THR_DIRECTLINK 0 +#define MWIFIEX_DEF_CS_TIME 10 +#define MWIFIEX_DEF_CS_TIMEOUT 16 +#define MWIFIEX_DEF_CS_REG_CLASS 12 +#define MWIFIEX_DEF_CS_PERIODICITY 1 #define MWIFIEX_FW_V15 15 @@ -1131,6 +1159,13 @@ struct host_cmd_ds_tx_rate_query { u8 ht_info; } __packed; +struct mwifiex_tx_pause_tlv { + struct mwifiex_ie_types_header header; + u8 peermac[ETH_ALEN]; + u8 tx_pause; + u8 pkt_cnt; +} __packed; + enum Host_Sleep_Action { HS_CONFIGURE = 0x0001, HS_ACTIVATE = 0x0002, @@ -1249,6 +1284,36 @@ struct host_cmd_ds_tdls_oper { u8 peer_mac[ETH_ALEN]; } __packed; +struct mwifiex_tdls_config { + __le16 enable; +}; + +struct mwifiex_tdls_config_cs_params { + u8 unit_time; + u8 thr_otherlink; + u8 thr_directlink; +}; + +struct mwifiex_tdls_init_cs_params { + u8 peer_mac[ETH_ALEN]; + u8 primary_chan; + u8 second_chan_offset; + u8 band; + __le16 switch_time; + __le16 switch_timeout; + u8 reg_class; + u8 periodicity; +} __packed; + +struct mwifiex_tdls_stop_cs_params { + u8 peer_mac[ETH_ALEN]; +}; + +struct host_cmd_ds_tdls_config { + __le16 tdls_action; + u8 tdls_data[1]; +} __packed; + struct mwifiex_chan_desc { __le16 start_freq; u8 chan_width; @@ -1370,6 +1435,11 @@ struct host_cmd_ds_802_11_scan_ext { u8 tlv_buffer[1]; } __packed; +struct mwifiex_ie_types_bss_mode { + struct mwifiex_ie_types_header header; + u8 bss_mode; +} __packed; + struct mwifiex_ie_types_bss_scan_rsp { struct mwifiex_ie_types_header header; u8 bssid[ETH_ALEN]; @@ -1908,6 +1978,12 @@ struct mwifiex_radar_det_event { __le32 passed; } __packed; +struct mwifiex_ie_types_multi_chan_info { + struct mwifiex_ie_types_header header; + __le16 status; + u8 tlv_buffer[0]; +} __packed; + struct meas_rpt_map { u8 rssi:3; u8 unmeasured:1; @@ -1927,10 +2003,18 @@ struct host_cmd_ds_802_11_subsc_evt { __le16 events; } __packed; +struct chan_switch_result { + u8 cur_chan; + u8 status; + u8 reason; +} __packed; + struct mwifiex_tdls_generic_event { __le16 type; u8 peer_mac[ETH_ALEN]; union { + struct chan_switch_result switch_result; + u8 cs_stop_reason; __le16 reason_code; __le16 reserved; } u; @@ -1971,6 +2055,11 @@ struct host_cmd_ds_coalesce_cfg { struct coalesce_receive_filt_rule rule[0]; } __packed; +struct host_cmd_ds_multi_chan_policy { + __le16 action; + __le16 policy; +} __packed; + struct host_cmd_ds_command { __le16 command; __le16 size; @@ -2035,9 +2124,11 @@ struct host_cmd_ds_command { struct host_cmd_ds_sta_list sta_list; struct host_cmd_11ac_vht_cfg vht_cfg; struct host_cmd_ds_coalesce_cfg coalesce_cfg; + struct host_cmd_ds_tdls_config tdls_config; struct host_cmd_ds_tdls_oper tdls_oper; struct host_cmd_ds_chan_rpt_req chan_rpt_req; struct host_cmd_sdio_sp_rx_aggr_cfg sdio_rx_aggr_cfg; + struct host_cmd_ds_multi_chan_policy mc_policy; } params; } __packed; diff --git a/drivers/net/wireless/mwifiex/ie.c b/drivers/net/wireless/mwifiex/ie.c index 0ba894509413..abf52d25b981 100644 --- a/drivers/net/wireless/mwifiex/ie.c +++ b/drivers/net/wireless/mwifiex/ie.c @@ -409,6 +409,8 @@ int mwifiex_set_mgmt_ies(struct mwifiex_private *priv, int ret; ret = mwifiex_uap_parse_tail_ies(priv, info); + + if (ret) return ret; return mwifiex_set_mgmt_beacon_data_ies(priv, info); @@ -477,6 +479,7 @@ int mwifiex_del_mgmt_ies(struct mwifiex_private *priv) ar_ie, &priv->assocresp_idx); done: + kfree(gen_ie); kfree(beacon_ie); kfree(pr_ie); kfree(ar_ie); diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c index df7fdc09d38c..8fa363add970 100644 --- a/drivers/net/wireless/mwifiex/init.c +++ b/drivers/net/wireless/mwifiex/init.c @@ -77,7 +77,7 @@ int mwifiex_init_priv(struct mwifiex_private *priv) priv->media_connected = false; eth_broadcast_addr(priv->curr_addr); - + priv->port_open = false; priv->pkt_tx_ctrl = 0; priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED; priv->data_rate = 0; /* Initially indicate the rate as auto */ @@ -499,6 +499,7 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter) INIT_LIST_HEAD(&priv->sta_list); INIT_LIST_HEAD(&priv->auto_tdls_list); skb_queue_head_init(&priv->tdls_txq); + skb_queue_head_init(&priv->bypass_txq); spin_lock_init(&priv->tx_ba_stream_tbl_lock); spin_lock_init(&priv->rx_reorder_tbl_lock); diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c index 56b024a6aaa5..3cda1f956f0b 100644 --- a/drivers/net/wireless/mwifiex/join.c +++ b/drivers/net/wireless/mwifiex/join.c @@ -783,6 +783,8 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv, if (priv->sec_info.wpa_enabled || priv->sec_info.wpa2_enabled) priv->scan_block = true; + else + priv->port_open = true; done: /* Need to indicate IOCTL complete */ diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c index 3ba4e0e04223..278dc94eaecb 100644 --- a/drivers/net/wireless/mwifiex/main.c +++ b/drivers/net/wireless/mwifiex/main.c @@ -276,6 +276,7 @@ process_start: !adapter->pm_wakeup_fw_try) && (is_command_pending(adapter) || !skb_queue_empty(&adapter->tx_data_q) || + !mwifiex_bypass_txlist_empty(adapter) || !mwifiex_wmm_lists_empty(adapter))) { adapter->pm_wakeup_fw_try = true; mod_timer(&adapter->wakeup_timer, jiffies + (HZ*3)); @@ -299,9 +300,16 @@ process_start: if ((!adapter->scan_chan_gap_enabled && adapter->scan_processing) || adapter->data_sent || + mwifiex_is_tdls_chan_switching + (mwifiex_get_priv(adapter, + MWIFIEX_BSS_ROLE_STA)) || (mwifiex_wmm_lists_empty(adapter) && + mwifiex_bypass_txlist_empty(adapter) && skb_queue_empty(&adapter->tx_data_q))) { if (adapter->cmd_sent || adapter->curr_cmd || + !mwifiex_is_send_cmd_allowed + (mwifiex_get_priv(adapter, + MWIFIEX_BSS_ROLE_STA)) || (!is_command_pending(adapter))) break; } @@ -342,7 +350,9 @@ process_start: continue; } - if (!adapter->cmd_sent && !adapter->curr_cmd) { + if (!adapter->cmd_sent && !adapter->curr_cmd && + mwifiex_is_send_cmd_allowed + (mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA))) { if (mwifiex_exec_next_cmd(adapter) == -1) { ret = -1; break; @@ -365,7 +375,25 @@ process_start: if ((adapter->scan_chan_gap_enabled || !adapter->scan_processing) && - !adapter->data_sent && !mwifiex_wmm_lists_empty(adapter)) { + !adapter->data_sent && + !mwifiex_bypass_txlist_empty(adapter) && + !mwifiex_is_tdls_chan_switching + (mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA))) { + mwifiex_process_bypass_tx(adapter); + if (adapter->hs_activated) { + adapter->is_hs_configured = false; + mwifiex_hs_activated_event + (mwifiex_get_priv + (adapter, MWIFIEX_BSS_ROLE_ANY), + false); + } + } + + if ((adapter->scan_chan_gap_enabled || + !adapter->scan_processing) && + !adapter->data_sent && !mwifiex_wmm_lists_empty(adapter) && + !mwifiex_is_tdls_chan_switching + (mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA))) { mwifiex_wmm_process_tx(adapter); if (adapter->hs_activated) { adapter->is_hs_configured = false; @@ -379,6 +407,7 @@ process_start: if (adapter->delay_null_pkt && !adapter->cmd_sent && !adapter->curr_cmd && !is_command_pending(adapter) && (mwifiex_wmm_lists_empty(adapter) && + mwifiex_bypass_txlist_empty(adapter) && skb_queue_empty(&adapter->tx_data_q))) { if (!mwifiex_send_null_packet (mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA), @@ -649,6 +678,26 @@ mwifiex_close(struct net_device *dev) return 0; } +static bool +mwifiex_bypass_tx_queue(struct mwifiex_private *priv, + struct sk_buff *skb) +{ + struct ethhdr *eth_hdr = (struct ethhdr *)skb->data; + + if (ntohs(eth_hdr->h_proto) == ETH_P_PAE || + mwifiex_is_skb_mgmt_frame(skb) || + (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA && + ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) && + (ntohs(eth_hdr->h_proto) == ETH_P_TDLS))) { + mwifiex_dbg(priv->adapter, DATA, + "bypass txqueue; eth type %#x, mgmt %d\n", + ntohs(eth_hdr->h_proto), + mwifiex_is_skb_mgmt_frame(skb)); + return true; + } + + return false; +} /* * Add buffer into wmm tx queue and queue work to transmit it. */ @@ -666,8 +715,14 @@ int mwifiex_queue_tx_pkt(struct mwifiex_private *priv, struct sk_buff *skb) } } - atomic_inc(&priv->adapter->tx_pending); - mwifiex_wmm_add_buf_txqueue(priv, skb); + if (mwifiex_bypass_tx_queue(priv, skb)) { + atomic_inc(&priv->adapter->tx_pending); + atomic_inc(&priv->adapter->bypass_tx_pending); + mwifiex_wmm_add_buf_bypass_txqueue(priv, skb); + } else { + atomic_inc(&priv->adapter->tx_pending); + mwifiex_wmm_add_buf_txqueue(priv, skb); + } mwifiex_queue_main_work(priv->adapter); diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h index ae98b5b83b1f..face7478937f 100644 --- a/drivers/net/wireless/mwifiex/main.h +++ b/drivers/net/wireless/mwifiex/main.h @@ -281,6 +281,7 @@ struct mwifiex_ra_list_tbl { u8 amsdu_in_ampdu; u16 total_pkt_count; bool tdls_link; + bool tx_paused; }; struct mwifiex_tid_tbl { @@ -294,6 +295,7 @@ struct mwifiex_tid_tbl { struct mwifiex_wmm_desc { struct mwifiex_tid_tbl tid_tbl_ptr[MAX_NUM_TID]; u32 packets_out[MAX_NUM_TID]; + u32 pkts_paused[MAX_NUM_TID]; /* spin lock to protect ra_list */ spinlock_t ra_list_spinlock; struct mwifiex_wmm_ac_status ac_status[IEEE80211_NUM_ACS]; @@ -517,6 +519,7 @@ struct mwifiex_private { u8 frame_type; u8 curr_addr[ETH_ALEN]; u8 media_connected; + u8 port_open; u32 num_tx_timeout; /* track consecutive timeout */ u8 tx_timeout_cnt; @@ -662,6 +665,7 @@ struct mwifiex_private { struct cfg80211_beacon_data beacon_after; struct mwifiex_11h_intf_state state_11h; struct mwifiex_ds_mem_rw mem_rw; + struct sk_buff_head bypass_txq; }; @@ -768,6 +772,7 @@ struct mwifiex_sta_node { u8 tdls_status; struct mwifiex_tdls_capab tdls_cap; struct mwifiex_station_stats stats; + u8 tx_pause; }; struct mwifiex_auto_tdls_peer { @@ -831,6 +836,7 @@ struct mwifiex_adapter { wait_queue_head_t init_wait_q; void *card; struct mwifiex_if_ops if_ops; + atomic_t bypass_tx_pending; atomic_t rx_pending; atomic_t tx_pending; atomic_t cmd_pending; @@ -979,6 +985,7 @@ struct mwifiex_adapter { u8 coex_win_size; u8 coex_tx_win_size; u8 coex_rx_win_size; + bool drcs_enabled; }; void mwifiex_process_tx_queue(struct mwifiex_adapter *adapter); @@ -1330,6 +1337,21 @@ static inline u8 mwifiex_is_any_intf_active(struct mwifiex_private *priv) return 0; } +static inline u8 mwifiex_is_tdls_link_setup(u8 status) +{ + switch (status) { + case TDLS_SETUP_COMPLETE: + case TDLS_CHAN_SWITCHING: + case TDLS_IN_BASE_CHAN: + case TDLS_IN_OFF_CHAN: + return true; + default: + break; + } + + return false; +} + int mwifiex_init_shutdown_fw(struct mwifiex_private *priv, u32 func_init_shutdown); int mwifiex_add_card(void *, struct semaphore *, struct mwifiex_if_ops *, u8); @@ -1458,6 +1480,9 @@ struct mwifiex_sta_node * mwifiex_add_sta_entry(struct mwifiex_private *priv, const u8 *mac); struct mwifiex_sta_node * mwifiex_get_sta_entry(struct mwifiex_private *priv, const u8 *mac); +u8 mwifiex_is_tdls_chan_switching(struct mwifiex_private *priv); +u8 mwifiex_is_tdls_off_chan(struct mwifiex_private *priv); +u8 mwifiex_is_send_cmd_allowed(struct mwifiex_private *priv); int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, const u8 *peer, u8 action_code, u8 dialog_token, u16 status_code, const u8 *extra_ies, @@ -1488,6 +1513,13 @@ void mwifiex_check_auto_tdls(unsigned long context); void mwifiex_add_auto_tdls_peer(struct mwifiex_private *priv, const u8 *mac); void mwifiex_setup_auto_tdls_timer(struct mwifiex_private *priv); void mwifiex_clean_auto_tdls(struct mwifiex_private *priv); +int mwifiex_config_tdls_enable(struct mwifiex_private *priv); +int mwifiex_config_tdls_disable(struct mwifiex_private *priv); +int mwifiex_config_tdls_cs_params(struct mwifiex_private *priv); +int mwifiex_stop_tdls_cs(struct mwifiex_private *priv, const u8 *peer_mac); +int mwifiex_start_tdls_cs(struct mwifiex_private *priv, const u8 *peer_mac, + u8 primary_chan, u8 second_chan_offset, u8 band); + int mwifiex_cmd_issue_chan_report_request(struct mwifiex_private *priv, struct host_cmd_ds_command *cmd, void *data_buf); @@ -1522,6 +1554,12 @@ void *mwifiex_alloc_dma_align_buf(int rx_len, gfp_t flags); void mwifiex_queue_main_work(struct mwifiex_adapter *adapter); void mwifiex_coex_ampdu_rxwinsize(struct mwifiex_adapter *adapter); void mwifiex_11n_delba(struct mwifiex_private *priv, int tid); +int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy); +void mwifiex_process_tx_pause_event(struct mwifiex_private *priv, + struct sk_buff *event); +void mwifiex_process_multi_chan_event(struct mwifiex_private *priv, + struct sk_buff *event_skb); + #ifdef CONFIG_DEBUG_FS void mwifiex_debugfs_init(void); void mwifiex_debugfs_remove(void); diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c index baf9715ddc10..ef8da8ebcbab 100644 --- a/drivers/net/wireless/mwifiex/scan.c +++ b/drivers/net/wireless/mwifiex/scan.c @@ -823,6 +823,7 @@ mwifiex_config_scan(struct mwifiex_private *priv, int i; u8 ssid_filter; struct mwifiex_ie_types_htcap *ht_cap; + struct mwifiex_ie_types_bss_mode *bss_mode; /* The tlv_buf_len is calculated for each scan command. The TLVs added in this routine will be preserved since the routine that sends the @@ -908,6 +909,10 @@ mwifiex_config_scan(struct mwifiex_private *priv, wildcard_ssid_tlv->max_ssid_length = IEEE80211_MAX_SSID_LEN; + if (!memcmp(user_scan_in->ssid_list[i].ssid, + "DIRECT-", 7)) + wildcard_ssid_tlv->max_ssid_length = 0xfe; + memcpy(wildcard_ssid_tlv->ssid, user_scan_in->ssid_list[i].ssid, ssid_len); @@ -968,6 +973,15 @@ mwifiex_config_scan(struct mwifiex_private *priv, else *max_chan_per_scan = MWIFIEX_DEF_CHANNELS_PER_SCAN_CMD; + if (adapter->ext_scan) { + bss_mode = (struct mwifiex_ie_types_bss_mode *)tlv_pos; + bss_mode->header.type = cpu_to_le16(TLV_TYPE_BSS_MODE); + bss_mode->header.len = cpu_to_le16(sizeof(bss_mode->bss_mode)); + bss_mode->bss_mode = scan_cfg_out->bss_mode; + tlv_pos += sizeof(bss_mode->header) + + le16_to_cpu(bss_mode->header.len); + } + /* If the input config or adapter has the number of Probes set, add tlv */ if (num_probes) { diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c index 037adcd1f484..a49a80dd773e 100644 --- a/drivers/net/wireless/mwifiex/sta_cmd.c +++ b/drivers/net/wireless/mwifiex/sta_cmd.c @@ -26,6 +26,10 @@ #include "11n.h" #include "11ac.h" +static bool drcs; +module_param(drcs, bool, 0644); +MODULE_PARM_DESC(drcs, "multi-channel operation:1, single-channel operation:0"); + static bool disable_auto_ds; module_param(disable_auto_ds, bool, 0); MODULE_PARM_DESC(disable_auto_ds, @@ -1512,6 +1516,22 @@ static int mwifiex_cmd_cfg_data(struct mwifiex_private *priv, } static int +mwifiex_cmd_set_mc_policy(struct mwifiex_private *priv, + struct host_cmd_ds_command *cmd, + u16 cmd_action, void *data_buf) +{ + struct host_cmd_ds_multi_chan_policy *mc_pol = &cmd->params.mc_policy; + const u16 *drcs_info = data_buf; + + mc_pol->action = cpu_to_le16(cmd_action); + mc_pol->policy = cpu_to_le16(*drcs_info); + cmd->command = cpu_to_le16(HostCmd_CMD_MC_POLICY); + cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_multi_chan_policy) + + S_DS_GEN); + return 0; +} + +static int mwifiex_cmd_coalesce_cfg(struct mwifiex_private *priv, struct host_cmd_ds_command *cmd, u16 cmd_action, void *data_buf) @@ -1576,6 +1596,50 @@ mwifiex_cmd_coalesce_cfg(struct mwifiex_private *priv, } static int +mwifiex_cmd_tdls_config(struct mwifiex_private *priv, + struct host_cmd_ds_command *cmd, + u16 cmd_action, void *data_buf) +{ + struct host_cmd_ds_tdls_config *tdls_config = &cmd->params.tdls_config; + struct mwifiex_tdls_init_cs_params *config; + struct mwifiex_tdls_config *init_config; + u16 len; + + cmd->command = cpu_to_le16(HostCmd_CMD_TDLS_CONFIG); + cmd->size = cpu_to_le16(S_DS_GEN); + tdls_config->tdls_action = cpu_to_le16(cmd_action); + le16_add_cpu(&cmd->size, sizeof(tdls_config->tdls_action)); + + switch (cmd_action) { + case ACT_TDLS_CS_ENABLE_CONFIG: + init_config = data_buf; + len = sizeof(*init_config); + memcpy(tdls_config->tdls_data, init_config, len); + break; + case ACT_TDLS_CS_INIT: + config = data_buf; + len = sizeof(*config); + memcpy(tdls_config->tdls_data, config, len); + break; + case ACT_TDLS_CS_STOP: + len = sizeof(struct mwifiex_tdls_stop_cs_params); + memcpy(tdls_config->tdls_data, data_buf, len); + break; + case ACT_TDLS_CS_PARAMS: + len = sizeof(struct mwifiex_tdls_config_cs_params); + memcpy(tdls_config->tdls_data, data_buf, len); + break; + default: + mwifiex_dbg(priv->adapter, ERROR, + "Unknown TDLS configuration\n"); + return -ENOTSUPP; + } + + le16_add_cpu(&cmd->size, len); + return 0; +} + +static int mwifiex_cmd_tdls_oper(struct mwifiex_private *priv, struct host_cmd_ds_command *cmd, void *data_buf) @@ -1933,10 +1997,12 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no, if (priv->bss_mode == NL80211_IFTYPE_ADHOC) cmd_ptr->params.bss_mode.con_type = CONNECTION_TYPE_ADHOC; - else if (priv->bss_mode == NL80211_IFTYPE_STATION) + else if (priv->bss_mode == NL80211_IFTYPE_STATION || + priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT) cmd_ptr->params.bss_mode.con_type = CONNECTION_TYPE_INFRA; - else if (priv->bss_mode == NL80211_IFTYPE_AP) + else if (priv->bss_mode == NL80211_IFTYPE_AP || + priv->bss_mode == NL80211_IFTYPE_P2P_GO) cmd_ptr->params.bss_mode.con_type = CONNECTION_TYPE_AP; cmd_ptr->size = cpu_to_le16(sizeof(struct host_cmd_ds_set_bss_mode) + S_DS_GEN); @@ -1958,6 +2024,10 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no, case HostCmd_CMD_TDLS_OPER: ret = mwifiex_cmd_tdls_oper(priv, cmd_ptr, data_buf); break; + case HostCmd_CMD_TDLS_CONFIG: + ret = mwifiex_cmd_tdls_config(priv, cmd_ptr, cmd_action, + data_buf); + break; case HostCmd_CMD_CHAN_REPORT_REQUEST: ret = mwifiex_cmd_issue_chan_report_request(priv, cmd_ptr, data_buf); @@ -1966,6 +2036,10 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no, ret = mwifiex_cmd_sdio_rx_aggr_cfg(cmd_ptr, cmd_action, data_buf); break; + case HostCmd_CMD_MC_POLICY: + ret = mwifiex_cmd_set_mc_policy(priv, cmd_ptr, cmd_action, + data_buf); + break; default: mwifiex_dbg(priv->adapter, ERROR, "PREP_CMD: unknown cmd- %#x\n", cmd_no); @@ -2082,6 +2156,18 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init) if (ret) return -1; } + + if (drcs) { + adapter->drcs_enabled = true; + if (ISSUPP_DRCS_ENABLED(adapter->fw_cap_info)) + ret = mwifiex_send_cmd(priv, + HostCmd_CMD_MC_POLICY, + HostCmd_ACT_GEN_SET, 0, + &adapter->drcs_enabled, + true); + if (ret) + return -1; + } } /* get tx rate */ diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c index b645884b3b97..89e8dafb4738 100644 --- a/drivers/net/wireless/mwifiex/sta_cmdresp.c +++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c @@ -599,6 +599,7 @@ static int mwifiex_ret_802_11_key_material_v1(struct mwifiex_private *priv, "info: key: GTK is set\n"); priv->wpa_is_gtk_set = true; priv->scan_block = false; + priv->port_open = true; } } @@ -629,6 +630,7 @@ static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv, mwifiex_dbg(priv->adapter, INFO, "info: key: GTK is set\n"); priv->wpa_is_gtk_set = true; priv->scan_block = false; + priv->port_open = true; } } @@ -1191,12 +1193,15 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no, break; case HostCmd_CMD_TDLS_OPER: ret = mwifiex_ret_tdls_oper(priv, resp); + case HostCmd_CMD_MC_POLICY: break; case HostCmd_CMD_CHAN_REPORT_REQUEST: break; case HostCmd_CMD_SDIO_SP_RX_AGGR_CFG: ret = mwifiex_ret_sdio_rx_aggr_cfg(priv, resp); break; + case HostCmd_CMD_TDLS_CONFIG: + break; default: mwifiex_dbg(adapter, ERROR, "CMD_RESP: unknown cmd response %#x\n", diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c index 848de2621958..3d18c585e543 100644 --- a/drivers/net/wireless/mwifiex/sta_event.c +++ b/drivers/net/wireless/mwifiex/sta_event.c @@ -54,6 +54,7 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code) priv->media_connected = false; priv->scan_block = false; + priv->port_open = false; if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) && ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info)) { @@ -153,6 +154,7 @@ static int mwifiex_parse_tdls_event(struct mwifiex_private *priv, struct mwifiex_sta_node *sta_ptr; struct mwifiex_tdls_generic_event *tdls_evt = (void *)event_skb->data + sizeof(adapter->event_cause); + u8 *mac = tdls_evt->peer_mac; /* reserved 2 bytes are not mandatory in tdls event */ if (event_skb->len < (sizeof(struct mwifiex_tdls_generic_event) - @@ -175,6 +177,59 @@ static int mwifiex_parse_tdls_event(struct mwifiex_private *priv, le16_to_cpu(tdls_evt->u.reason_code), GFP_KERNEL); break; + case TDLS_EVENT_CHAN_SWITCH_RESULT: + mwifiex_dbg(adapter, EVENT, "tdls channel switch result :\n"); + mwifiex_dbg(adapter, EVENT, + "status=0x%x, reason=0x%x cur_chan=%d\n", + tdls_evt->u.switch_result.status, + tdls_evt->u.switch_result.reason, + tdls_evt->u.switch_result.cur_chan); + + /* tdls channel switch failed */ + if (tdls_evt->u.switch_result.status != 0) { + switch (tdls_evt->u.switch_result.cur_chan) { + case TDLS_BASE_CHANNEL: + sta_ptr->tdls_status = TDLS_IN_BASE_CHAN; + break; + case TDLS_OFF_CHANNEL: + sta_ptr->tdls_status = TDLS_IN_OFF_CHAN; + break; + default: + break; + } + return ret; + } + + /* tdls channel switch success */ + switch (tdls_evt->u.switch_result.cur_chan) { + case TDLS_BASE_CHANNEL: + if (sta_ptr->tdls_status == TDLS_IN_BASE_CHAN) + break; + mwifiex_update_ralist_tx_pause_in_tdls_cs(priv, mac, + false); + sta_ptr->tdls_status = TDLS_IN_BASE_CHAN; + break; + case TDLS_OFF_CHANNEL: + if (sta_ptr->tdls_status == TDLS_IN_OFF_CHAN) + break; + mwifiex_update_ralist_tx_pause_in_tdls_cs(priv, mac, + true); + sta_ptr->tdls_status = TDLS_IN_OFF_CHAN; + break; + default: + break; + } + + break; + case TDLS_EVENT_START_CHAN_SWITCH: + mwifiex_dbg(adapter, EVENT, "tdls start channel switch...\n"); + sta_ptr->tdls_status = TDLS_CHAN_SWITCHING; + break; + case TDLS_EVENT_CHAN_SWITCH_STOPPED: + mwifiex_dbg(adapter, EVENT, + "tdls chan switch stopped, reason=%d\n", + tdls_evt->u.cs_stop_reason); + break; default: break; } @@ -182,6 +237,145 @@ static int mwifiex_parse_tdls_event(struct mwifiex_private *priv, return ret; } +static void mwifiex_process_uap_tx_pause(struct mwifiex_private *priv, + struct mwifiex_ie_types_header *tlv) +{ + struct mwifiex_tx_pause_tlv *tp; + struct mwifiex_sta_node *sta_ptr; + unsigned long flags; + + tp = (void *)tlv; + mwifiex_dbg(priv->adapter, EVENT, + "uap tx_pause: %pM pause=%d, pkts=%d\n", + tp->peermac, tp->tx_pause, + tp->pkt_cnt); + + if (ether_addr_equal(tp->peermac, priv->netdev->dev_addr)) { + if (tp->tx_pause) + priv->port_open = false; + else + priv->port_open = true; + } else if (is_multicast_ether_addr(tp->peermac)) { + mwifiex_update_ralist_tx_pause(priv, tp->peermac, tp->tx_pause); + } else { + spin_lock_irqsave(&priv->sta_list_spinlock, flags); + sta_ptr = mwifiex_get_sta_entry(priv, tp->peermac); + spin_unlock_irqrestore(&priv->sta_list_spinlock, flags); + + if (sta_ptr && sta_ptr->tx_pause != tp->tx_pause) { + sta_ptr->tx_pause = tp->tx_pause; + mwifiex_update_ralist_tx_pause(priv, tp->peermac, + tp->tx_pause); + } + } +} + +static void mwifiex_process_sta_tx_pause(struct mwifiex_private *priv, + struct mwifiex_ie_types_header *tlv) +{ + struct mwifiex_tx_pause_tlv *tp; + struct mwifiex_sta_node *sta_ptr; + int status; + unsigned long flags; + + tp = (void *)tlv; + mwifiex_dbg(priv->adapter, EVENT, + "sta tx_pause: %pM pause=%d, pkts=%d\n", + tp->peermac, tp->tx_pause, + tp->pkt_cnt); + + if (ether_addr_equal(tp->peermac, priv->cfg_bssid)) { + if (tp->tx_pause) + priv->port_open = false; + else + priv->port_open = true; + } else { + if (!ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info)) + return; + + status = mwifiex_get_tdls_link_status(priv, tp->peermac); + if (mwifiex_is_tdls_link_setup(status)) { + spin_lock_irqsave(&priv->sta_list_spinlock, flags); + sta_ptr = mwifiex_get_sta_entry(priv, tp->peermac); + spin_unlock_irqrestore(&priv->sta_list_spinlock, flags); + + if (sta_ptr && sta_ptr->tx_pause != tp->tx_pause) { + sta_ptr->tx_pause = tp->tx_pause; + mwifiex_update_ralist_tx_pause(priv, + tp->peermac, + tp->tx_pause); + } + } + } +} + +void mwifiex_process_multi_chan_event(struct mwifiex_private *priv, + struct sk_buff *event_skb) +{ + struct mwifiex_ie_types_multi_chan_info *chan_info; + u16 status; + + chan_info = (void *)event_skb->data + sizeof(u32); + + if (le16_to_cpu(chan_info->header.type) != TLV_TYPE_MULTI_CHAN_INFO) { + mwifiex_dbg(priv->adapter, ERROR, + "unknown TLV in chan_info event\n"); + return; + } + + status = le16_to_cpu(chan_info->status); + + if (status) { + mwifiex_dbg(priv->adapter, EVENT, + "multi-channel operation started\n"); + } else { + mwifiex_dbg(priv->adapter, EVENT, + "multi-channel operation over\n"); + } +} + +void mwifiex_process_tx_pause_event(struct mwifiex_private *priv, + struct sk_buff *event_skb) +{ + struct mwifiex_ie_types_header *tlv; + u16 tlv_type, tlv_len; + int tlv_buf_left; + + if (!priv->media_connected) { + mwifiex_dbg(priv->adapter, ERROR, + "tx_pause event while disconnected; bss_role=%d\n", + priv->bss_role); + return; + } + + tlv_buf_left = event_skb->len - sizeof(u32); + tlv = (void *)event_skb->data + sizeof(u32); + + while (tlv_buf_left >= (int)sizeof(struct mwifiex_ie_types_header)) { + tlv_type = le16_to_cpu(tlv->type); + tlv_len = le16_to_cpu(tlv->len); + if ((sizeof(struct mwifiex_ie_types_header) + tlv_len) > + tlv_buf_left) { + mwifiex_dbg(priv->adapter, ERROR, + "wrong tlv: tlvLen=%d, tlvBufLeft=%d\n", + tlv_len, tlv_buf_left); + break; + } + if (tlv_type == TLV_TYPE_TX_PAUSE) { + if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) + mwifiex_process_sta_tx_pause(priv, tlv); + else + mwifiex_process_uap_tx_pause(priv, tlv); + } + + tlv_buf_left -= sizeof(struct mwifiex_ie_types_header) + + tlv_len; + tlv = (void *)((u8 *)tlv + tlv_len + + sizeof(struct mwifiex_ie_types_header)); + } + +} + /* * This function handles coex events generated by firmware */ @@ -359,7 +553,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) case EVENT_PS_AWAKE: mwifiex_dbg(adapter, EVENT, "info: EVENT: AWAKE\n"); - if (!adapter->pps_uapsd_mode && + if (!adapter->pps_uapsd_mode && priv->port_open && priv->media_connected && adapter->sleep_period.period) { adapter->pps_uapsd_mode = true; mwifiex_dbg(adapter, EVENT, @@ -438,6 +632,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) case EVENT_PORT_RELEASE: mwifiex_dbg(adapter, EVENT, "event: PORT RELEASE\n"); + priv->port_open = true; break; case EVENT_EXT_SCAN_REPORT: @@ -573,6 +768,16 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) ret = mwifiex_parse_tdls_event(priv, adapter->event_skb); break; + case EVENT_TX_DATA_PAUSE: + mwifiex_dbg(adapter, EVENT, "event: TX DATA PAUSE\n"); + mwifiex_process_tx_pause_event(priv, adapter->event_skb); + break; + + case EVENT_MULTI_CHAN_INFO: + mwifiex_dbg(adapter, EVENT, "event: multi-chan info\n"); + mwifiex_process_multi_chan_event(priv, adapter->event_skb); + break; + case EVENT_TX_STATUS_REPORT: mwifiex_dbg(adapter, EVENT, "event: TX_STATUS Report\n"); mwifiex_parse_tx_status_event(priv, adapter->event_body); diff --git a/drivers/net/wireless/mwifiex/tdls.c b/drivers/net/wireless/mwifiex/tdls.c index 2faa1bc42abe..aa3d3c5ed07b 100644 --- a/drivers/net/wireless/mwifiex/tdls.c +++ b/drivers/net/wireless/mwifiex/tdls.c @@ -49,7 +49,7 @@ static void mwifiex_restore_tdls_packets(struct mwifiex_private *priv, tid = skb->priority; tid_down = mwifiex_wmm_downgrade_tid(priv, tid); - if (status == TDLS_SETUP_COMPLETE) { + if (mwifiex_is_tdls_link_setup(status)) { ra_list = mwifiex_wmm_get_queue_raptr(priv, tid, mac); ra_list->tdls_link = true; tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT; @@ -355,6 +355,7 @@ static void mwifiex_tdls_add_ext_capab(struct mwifiex_private *priv, extcap->ieee_hdr.len = 8; memset(extcap->ext_capab, 0, 8); extcap->ext_capab[4] |= WLAN_EXT_CAPA5_TDLS_ENABLED; + extcap->ext_capab[3] |= WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH; if (priv->adapter->is_hw_11ac_capable) extcap->ext_capab[7] |= WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED; @@ -1071,6 +1072,11 @@ mwifiex_tdls_process_enable_link(struct mwifiex_private *priv, const u8 *peer) for (i = 0; i < MAX_NUM_TID; i++) sta_ptr->ampdu_sta[i] = BA_STREAM_NOT_ALLOWED; } + if (sta_ptr->tdls_cap.extcap.ext_capab[3] & + WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH) { + mwifiex_config_tdls_enable(priv); + mwifiex_config_tdls_cs_params(priv); + } memset(sta_ptr->rx_seq, 0xff, sizeof(sta_ptr->rx_seq)); mwifiex_restore_tdls_packets(priv, peer, TDLS_SETUP_COMPLETE); @@ -1141,7 +1147,7 @@ int mwifiex_get_tdls_list(struct mwifiex_private *priv, spin_lock_irqsave(&priv->sta_list_spinlock, flags); list_for_each_entry(sta_ptr, &priv->sta_list, list) { - if (sta_ptr->tdls_status == TDLS_SETUP_COMPLETE) { + if (mwifiex_is_tdls_link_setup(sta_ptr->tdls_status)) { ether_addr_copy(peer->peer_addr, sta_ptr->mac_addr); peer++; count++; @@ -1295,7 +1301,7 @@ void mwifiex_auto_tdls_update_peer_status(struct mwifiex_private *priv, if ((link_status == TDLS_NOT_SETUP) && (peer->tdls_status == TDLS_SETUP_INPROGRESS)) peer->failure_count++; - else if (link_status == TDLS_SETUP_COMPLETE) + else if (mwifiex_is_tdls_link_setup(link_status)) peer->failure_count = 0; peer->tdls_status = link_status; @@ -1367,7 +1373,7 @@ void mwifiex_check_auto_tdls(unsigned long context) if (((tdls_peer->rssi >= MWIFIEX_TDLS_RSSI_LOW) || !tdls_peer->rssi) && - tdls_peer->tdls_status == TDLS_SETUP_COMPLETE) { + mwifiex_is_tdls_link_setup(tdls_peer->tdls_status)) { tdls_peer->tdls_status = TDLS_LINK_TEARDOWN; mwifiex_dbg(priv->adapter, MSG, "teardown TDLS link,peer=%pM rssi=%d\n", @@ -1416,3 +1422,67 @@ void mwifiex_clean_auto_tdls(struct mwifiex_private *priv) mwifiex_flush_auto_tdls_list(priv); } } + +static int mwifiex_config_tdls(struct mwifiex_private *priv, u8 enable) +{ + struct mwifiex_tdls_config config; + + config.enable = cpu_to_le16(enable); + return mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_CONFIG, + ACT_TDLS_CS_ENABLE_CONFIG, 0, &config, true); +} + +int mwifiex_config_tdls_enable(struct mwifiex_private *priv) +{ + return mwifiex_config_tdls(priv, true); +} + +int mwifiex_config_tdls_disable(struct mwifiex_private *priv) +{ + return mwifiex_config_tdls(priv, false); +} + +int mwifiex_config_tdls_cs_params(struct mwifiex_private *priv) +{ + struct mwifiex_tdls_config_cs_params config_tdls_cs_params; + + config_tdls_cs_params.unit_time = MWIFIEX_DEF_CS_UNIT_TIME; + config_tdls_cs_params.thr_otherlink = MWIFIEX_DEF_CS_THR_OTHERLINK; + config_tdls_cs_params.thr_directlink = MWIFIEX_DEF_THR_DIRECTLINK; + + return mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_CONFIG, + ACT_TDLS_CS_PARAMS, 0, + &config_tdls_cs_params, true); +} + +int mwifiex_stop_tdls_cs(struct mwifiex_private *priv, const u8 *peer_mac) +{ + struct mwifiex_tdls_stop_cs_params stop_tdls_cs_params; + + ether_addr_copy(stop_tdls_cs_params.peer_mac, peer_mac); + + return mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_CONFIG, + ACT_TDLS_CS_STOP, 0, + &stop_tdls_cs_params, true); +} + +int mwifiex_start_tdls_cs(struct mwifiex_private *priv, const u8 *peer_mac, + u8 primary_chan, u8 second_chan_offset, u8 band) +{ + struct mwifiex_tdls_init_cs_params start_tdls_cs_params; + + ether_addr_copy(start_tdls_cs_params.peer_mac, peer_mac); + start_tdls_cs_params.primary_chan = primary_chan; + start_tdls_cs_params.second_chan_offset = second_chan_offset; + start_tdls_cs_params.band = band; + + start_tdls_cs_params.switch_time = cpu_to_le16(MWIFIEX_DEF_CS_TIME); + start_tdls_cs_params.switch_timeout = + cpu_to_le16(MWIFIEX_DEF_CS_TIMEOUT); + start_tdls_cs_params.reg_class = MWIFIEX_DEF_CS_REG_CLASS; + start_tdls_cs_params.periodicity = MWIFIEX_DEF_CS_PERIODICITY; + + return mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_CONFIG, + ACT_TDLS_CS_INIT, 0, + &start_tdls_cs_params, true); +} diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c index 5ed9b794053e..8b1e5b5d47fe 100644 --- a/drivers/net/wireless/mwifiex/txrx.c +++ b/drivers/net/wireless/mwifiex/txrx.c @@ -370,8 +370,28 @@ void mwifiex_parse_tx_status_event(struct mwifiex_private *priv, /* consumes ack_skb */ skb_complete_wifi_ack(ack_skb, !tx_status->status); } else { + /* Remove broadcast address which was added by driver */ + memmove(ack_skb->data + + sizeof(struct ieee80211_hdr_3addr) + + MWIFIEX_MGMT_FRAME_HEADER_SIZE + sizeof(u16), + ack_skb->data + + sizeof(struct ieee80211_hdr_3addr) + + MWIFIEX_MGMT_FRAME_HEADER_SIZE + sizeof(u16) + + ETH_ALEN, ack_skb->len - + (sizeof(struct ieee80211_hdr_3addr) + + MWIFIEX_MGMT_FRAME_HEADER_SIZE + sizeof(u16) + + ETH_ALEN)); + ack_skb->len = ack_skb->len - ETH_ALEN; + /* Remove driver's proprietary header including 2 bytes + * of packet length and pass actual management frame buffer + * to cfg80211. + */ cfg80211_mgmt_tx_status(&priv->wdev, tx_info->cookie, - ack_skb->data, ack_skb->len, + ack_skb->data + + MWIFIEX_MGMT_FRAME_HEADER_SIZE + + sizeof(u16), ack_skb->len - + (MWIFIEX_MGMT_FRAME_HEADER_SIZE + + sizeof(u16)), !tx_status->status, GFP_ATOMIC); dev_kfree_skb_any(ack_skb); } diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c index b74930054b8c..4d5a6e3b6361 100644 --- a/drivers/net/wireless/mwifiex/uap_cmd.c +++ b/drivers/net/wireless/mwifiex/uap_cmd.c @@ -808,7 +808,7 @@ void mwifiex_uap_set_channel(struct mwifiex_private *priv, struct mwifiex_uap_bss_param *bss_cfg, struct cfg80211_chan_def chandef) { - u8 config_bands = 0; + u8 config_bands = 0, old_bands = priv->adapter->config_bands; priv->bss_chandef = chandef; @@ -834,6 +834,11 @@ void mwifiex_uap_set_channel(struct mwifiex_private *priv, } priv->adapter->config_bands = config_bands; + + if (old_bands != config_bands) { + mwifiex_send_domain_info_cmd_fw(priv->adapter->wiphy); + mwifiex_dnld_txpwr_table(priv); + } } int mwifiex_config_start_uap(struct mwifiex_private *priv, diff --git a/drivers/net/wireless/mwifiex/uap_event.c b/drivers/net/wireless/mwifiex/uap_event.c index 7bc1f850e3b7..492a8b3c636e 100644 --- a/drivers/net/wireless/mwifiex/uap_event.c +++ b/drivers/net/wireless/mwifiex/uap_event.c @@ -176,6 +176,7 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv) break; case EVENT_UAP_BSS_IDLE: priv->media_connected = false; + priv->port_open = false; if (netif_carrier_ok(priv->netdev)) netif_carrier_off(priv->netdev); mwifiex_stop_net_dev_queue(priv->netdev, adapter); @@ -185,6 +186,7 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv) break; case EVENT_UAP_BSS_ACTIVE: priv->media_connected = true; + priv->port_open = true; if (!netif_carrier_ok(priv->netdev)) netif_carrier_on(priv->netdev); mwifiex_wake_up_net_dev_queue(priv->netdev, adapter); @@ -192,6 +194,7 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv) case EVENT_UAP_BSS_START: mwifiex_dbg(adapter, EVENT, "AP EVENT: event id: %#x\n", eventcause); + priv->port_open = false; memcpy(priv->netdev->dev_addr, adapter->event_body + 2, ETH_ALEN); if (priv->hist_data) @@ -297,6 +300,16 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv) mwifiex_bt_coex_wlan_param_update_event(priv, adapter->event_skb); break; + case EVENT_TX_DATA_PAUSE: + mwifiex_dbg(adapter, EVENT, "event: TX DATA PAUSE\n"); + mwifiex_process_tx_pause_event(priv, adapter->event_skb); + break; + + case EVENT_MULTI_CHAN_INFO: + mwifiex_dbg(adapter, EVENT, "event: multi-chan info\n"); + mwifiex_process_multi_chan_event(priv, adapter->event_skb); + break; + default: mwifiex_dbg(adapter, EVENT, "event: unknown event id: %#x\n", eventcause); diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c index aada93425f80..fbad99c50307 100644 --- a/drivers/net/wireless/mwifiex/usb.c +++ b/drivers/net/wireless/mwifiex/usb.c @@ -244,9 +244,11 @@ setup_for_next: if (card->rx_cmd_ep == context->ep) { mwifiex_usb_submit_rx_urb(context, size); } else { - context->skb = NULL; - if (atomic_read(&adapter->rx_pending) <= HIGH_RX_PENDING) + if (atomic_read(&adapter->rx_pending) <= HIGH_RX_PENDING){ mwifiex_usb_submit_rx_urb(context, size); + }else{ + context->skb = NULL; + } } return; diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c index 790e61953abf..2504e422364a 100644 --- a/drivers/net/wireless/mwifiex/util.c +++ b/drivers/net/wireless/mwifiex/util.c @@ -531,6 +531,65 @@ mwifiex_get_sta_entry(struct mwifiex_private *priv, const u8 *mac) return NULL; } +static struct mwifiex_sta_node * +mwifiex_get_tdls_sta_entry(struct mwifiex_private *priv, u8 status) +{ + struct mwifiex_sta_node *node; + + list_for_each_entry(node, &priv->sta_list, list) { + if (node->tdls_status == status) + return node; + } + + return NULL; +} + +/* If tdls channel switching is on-going, tx data traffic should be + * blocked until the switching stage completed. + */ +u8 mwifiex_is_tdls_chan_switching(struct mwifiex_private *priv) +{ + struct mwifiex_sta_node *sta_ptr; + + if (!priv || !ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info)) + return false; + + sta_ptr = mwifiex_get_tdls_sta_entry(priv, TDLS_CHAN_SWITCHING); + if (sta_ptr) + return true; + + return false; +} + +u8 mwifiex_is_tdls_off_chan(struct mwifiex_private *priv) +{ + struct mwifiex_sta_node *sta_ptr; + + if (!priv || !ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info)) + return false; + + sta_ptr = mwifiex_get_tdls_sta_entry(priv, TDLS_IN_OFF_CHAN); + if (sta_ptr) + return true; + + return false; +} + +/* If tdls channel switching is on-going or tdls operate on off-channel, + * cmd path should be blocked until tdls switched to base-channel. + */ +u8 mwifiex_is_send_cmd_allowed(struct mwifiex_private *priv) +{ + if (!priv || !ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info)) + return true; + + if (mwifiex_is_tdls_chan_switching(priv) || + mwifiex_is_tdls_off_chan(priv)) + return false; + + return true; +} + /* This function will add a sta_node entry to associated station list * table with the given mac address. * If entry exist already, existing entry is returned. diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c index a8ea21c3340c..173d3663c2e0 100644 --- a/drivers/net/wireless/mwifiex/wmm.c +++ b/drivers/net/wireless/mwifiex/wmm.c @@ -160,9 +160,10 @@ void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra) ra_list->tdls_link = false; ra_list->ba_status = BA_SETUP_NONE; ra_list->amsdu_in_ampdu = false; + ra_list->tx_paused = false; if (!mwifiex_queuing_ra_based(priv)) { - if (mwifiex_get_tdls_link_status(priv, ra) == - TDLS_SETUP_COMPLETE) { + if (mwifiex_is_tdls_link_setup + (mwifiex_get_tdls_link_status(priv, ra))) { ra_list->tdls_link = true; ra_list->is_11n_enabled = mwifiex_tdls_peer_11n_enabled(priv, ra); @@ -448,6 +449,11 @@ mwifiex_wmm_init(struct mwifiex_adapter *adapter) } } +int mwifiex_bypass_txlist_empty(struct mwifiex_adapter *adapter) +{ + return atomic_read(&adapter->bypass_tx_pending) ? false : true; +} + /* * This function checks if WMM Tx queue is empty. */ @@ -459,6 +465,8 @@ mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter) for (i = 0; i < adapter->priv_num; ++i) { priv = adapter->priv[i]; + if (priv && !priv->port_open) + continue; if (priv && atomic_read(&priv->wmm.tx_pkts_queued)) return false; } @@ -580,6 +588,10 @@ mwifiex_clean_txrx(struct mwifiex_private *priv) skb_queue_walk_safe(&priv->tdls_txq, skb, tmp) mwifiex_write_data_complete(priv->adapter, skb, 0, -1); + skb_queue_walk_safe(&priv->bypass_txq, skb, tmp) + mwifiex_write_data_complete(priv->adapter, skb, 0, -1); + atomic_set(&priv->adapter->bypass_tx_pending, 0); + idr_for_each(&priv->ack_status_frames, mwifiex_free_ack_frame, NULL); idr_destroy(&priv->ack_status_frames); } @@ -603,6 +615,88 @@ mwifiex_wmm_get_ralist_node(struct mwifiex_private *priv, u8 tid, return NULL; } +void mwifiex_update_ralist_tx_pause(struct mwifiex_private *priv, u8 *mac, + u8 tx_pause) +{ + struct mwifiex_ra_list_tbl *ra_list; + u32 pkt_cnt = 0, tx_pkts_queued; + unsigned long flags; + int i; + + spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags); + + for (i = 0; i < MAX_NUM_TID; ++i) { + ra_list = mwifiex_wmm_get_ralist_node(priv, i, mac); + if (ra_list && ra_list->tx_paused != tx_pause) { + pkt_cnt += ra_list->total_pkt_count; + ra_list->tx_paused = tx_pause; + if (tx_pause) + priv->wmm.pkts_paused[i] += + ra_list->total_pkt_count; + else + priv->wmm.pkts_paused[i] -= + ra_list->total_pkt_count; + } + } + + if (pkt_cnt) { + tx_pkts_queued = atomic_read(&priv->wmm.tx_pkts_queued); + if (tx_pause) + tx_pkts_queued -= pkt_cnt; + else + tx_pkts_queued += pkt_cnt; + + atomic_set(&priv->wmm.tx_pkts_queued, tx_pkts_queued); + atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID); + } + spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags); +} + +/* This function update non-tdls peer ralist tx_pause while + * tdls channel swithing + */ +void mwifiex_update_ralist_tx_pause_in_tdls_cs(struct mwifiex_private *priv, + u8 *mac, u8 tx_pause) +{ + struct mwifiex_ra_list_tbl *ra_list; + u32 pkt_cnt = 0, tx_pkts_queued; + unsigned long flags; + int i; + + spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags); + + for (i = 0; i < MAX_NUM_TID; ++i) { + list_for_each_entry(ra_list, &priv->wmm.tid_tbl_ptr[i].ra_list, + list) { + if (!memcmp(ra_list->ra, mac, ETH_ALEN)) + continue; + + if (ra_list && ra_list->tx_paused != tx_pause) { + pkt_cnt += ra_list->total_pkt_count; + ra_list->tx_paused = tx_pause; + if (tx_pause) + priv->wmm.pkts_paused[i] += + ra_list->total_pkt_count; + else + priv->wmm.pkts_paused[i] -= + ra_list->total_pkt_count; + } + } + } + + if (pkt_cnt) { + tx_pkts_queued = atomic_read(&priv->wmm.tx_pkts_queued); + if (tx_pause) + tx_pkts_queued -= pkt_cnt; + else + tx_pkts_queued += pkt_cnt; + + atomic_set(&priv->wmm.tx_pkts_queued, tx_pkts_queued); + atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID); + } + spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags); +} + /* * This function retrieves an RA list node for a given TID and * RA address pair. @@ -670,6 +764,18 @@ mwifiex_is_ralist_valid(struct mwifiex_private *priv, } /* + * This function adds a packet to bypass TX queue. + * This is special TX queue for packets which can be sent even when port_open + * is false. + */ +void +mwifiex_wmm_add_buf_bypass_txqueue(struct mwifiex_private *priv, + struct sk_buff *skb) +{ + skb_queue_tail(&priv->bypass_txq, skb); +} + +/* * This function adds a packet to WMM queue. * * In disconnected state the packet is immediately dropped and the @@ -723,6 +829,9 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv, !mwifiex_is_skb_mgmt_frame(skb)) { switch (tdls_status) { case TDLS_SETUP_COMPLETE: + case TDLS_CHAN_SWITCHING: + case TDLS_IN_BASE_CHAN: + case TDLS_IN_OFF_CHAN: ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down, ra); tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT; @@ -765,7 +874,10 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv, atomic_set(&priv->wmm.highest_queued_prio, priv->tos_to_tid_inv[tid_down]); - atomic_inc(&priv->wmm.tx_pkts_queued); + if (ra_list->tx_paused) + priv->wmm.pkts_paused[tid_down]++; + else + atomic_inc(&priv->wmm.tx_pkts_queued); spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags); } @@ -970,7 +1082,8 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter, priv_tmp = adapter->bss_prio_tbl[j].bss_prio_cur->priv; - if (atomic_read(&priv_tmp->wmm.tx_pkts_queued) == 0) + if (!priv_tmp->port_open || + (atomic_read(&priv_tmp->wmm.tx_pkts_queued) == 0)) continue; /* iterate over the WMM queues of the BSS */ @@ -987,7 +1100,8 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter, list_for_each_entry(ptr, &tid_ptr->ra_list, list) { - if (!skb_queue_empty(&ptr->skb_head)) + if (!ptr->tx_paused && + !skb_queue_empty(&ptr->skb_head)) /* holds both locks */ goto found; } @@ -1339,6 +1453,38 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter) return 0; } +void mwifiex_process_bypass_tx(struct mwifiex_adapter *adapter) +{ + struct mwifiex_tx_param tx_param; + struct sk_buff *skb; + struct mwifiex_txinfo *tx_info; + struct mwifiex_private *priv; + int i; + + if (adapter->data_sent || adapter->tx_lock_flag) + return; + + for (i = 0; i < adapter->priv_num; ++i) { + priv = adapter->priv[i]; + + if (skb_queue_empty(&priv->bypass_txq)) + continue; + + skb = skb_dequeue(&priv->bypass_txq); + tx_info = MWIFIEX_SKB_TXCB(skb); + + /* no aggregation for bypass packets */ + tx_param.next_pkt_len = 0; + + if (mwifiex_process_tx(priv, skb, &tx_param) == -EBUSY) { + skb_queue_head(&priv->bypass_txq, skb); + tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT; + } else { + atomic_dec(&adapter->bypass_tx_pending); + } + } +} + /* * This function transmits the highest priority packet awaiting in the * WMM Queues. diff --git a/drivers/net/wireless/mwifiex/wmm.h b/drivers/net/wireless/mwifiex/wmm.h index 48ece0b35591..38f09762bd2f 100644 --- a/drivers/net/wireless/mwifiex/wmm.h +++ b/drivers/net/wireless/mwifiex/wmm.h @@ -99,12 +99,16 @@ mwifiex_wmm_is_ra_list_empty(struct list_head *ra_list_hhead) void mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv, struct sk_buff *skb); +void mwifiex_wmm_add_buf_bypass_txqueue(struct mwifiex_private *priv, + struct sk_buff *skb); void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra); void mwifiex_rotate_priolists(struct mwifiex_private *priv, struct mwifiex_ra_list_tbl *ra, int tid); int mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter); +int mwifiex_bypass_txlist_empty(struct mwifiex_adapter *adapter); void mwifiex_wmm_process_tx(struct mwifiex_adapter *adapter); +void mwifiex_process_bypass_tx(struct mwifiex_adapter *adapter); int mwifiex_is_ralist_valid(struct mwifiex_private *priv, struct mwifiex_ra_list_tbl *ra_list, int tid); @@ -126,6 +130,10 @@ struct mwifiex_ra_list_tbl * mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid, const u8 *ra_addr); u8 mwifiex_wmm_downgrade_tid(struct mwifiex_private *priv, u32 tid); +void mwifiex_update_ralist_tx_pause(struct mwifiex_private *priv, u8 *mac, + u8 tx_pause); +void mwifiex_update_ralist_tx_pause_in_tdls_cs(struct mwifiex_private *priv, + u8 *mac, u8 tx_pause); struct mwifiex_ra_list_tbl *mwifiex_wmm_get_ralist_node(struct mwifiex_private *priv, u8 tid, const u8 *ra_addr); diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/def.h b/drivers/net/wireless/rtlwifi/rtl8192cu/def.h index c940a87175ca..74a479ac323d 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/def.h +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/def.h @@ -32,24 +32,15 @@ /*------------------------------------------------------------------------- * Chip specific *-------------------------------------------------------------------------*/ -#define CHIP_8723 BIT(2) /* RTL8723 With BT feature */ -#define CHIP_8723_DRV_REV BIT(3) /* RTL8723 Driver Revised */ #define NORMAL_CHIP BIT(4) #define CHIP_VENDOR_UMC BIT(5) #define CHIP_VENDOR_UMC_B_CUT BIT(6) -#define IS_8723_SERIES(version) \ - (((version) & CHIP_8723) ? true : false) - #define IS_92C_1T2R(version) \ (((version) & CHIP_92C) && ((version) & CHIP_92C_1T2R)) #define IS_VENDOR_UMC(version) \ (((version) & CHIP_VENDOR_UMC) ? true : false) -#define IS_VENDOR_8723_A_CUT(version) \ - (((version) & CHIP_VENDOR_UMC) ? (((version) & (BIT(6))) ? \ - false : true) : false) - #define CHIP_BONDING_92C_1T2R 0x1 #define CHIP_BONDING_IDENTIFIER(_value) (((_value) >> 22) & 0x3) diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c index 767358a553fb..7cf36619f250 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c @@ -2280,7 +2280,6 @@ bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); - struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); enum rf_pwrstate e_rfpowerstate_toset, cur_rfstate; u8 u1tmp = 0; bool actuallyset = false; @@ -2357,20 +2356,7 @@ bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid) if (ppsc->pwrdown_mode && e_rfpowerstate_toset == ERFOFF) { /* Enable register area 0x0-0xc. */ rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0); - if (IS_HARDWARE_TYPE_8723U(rtlhal)) { - /* - * We should configure HW PDn source for WiFi - * ONLY, and then our HW will be set in - * power-down mode if PDn source from all - * functions are configured. - */ - u1tmp = rtl_read_byte(rtlpriv, - REG_MULTI_FUNC_CTRL); - rtl_write_byte(rtlpriv, REG_MULTI_FUNC_CTRL, - (u1tmp|WL_HWPDN_EN)); - } else { - rtl_write_word(rtlpriv, REG_APS_FSMCO, 0x8812); - } + rtl_write_word(rtlpriv, REG_APS_FSMCO, 0x8812); } if (e_rfpowerstate_toset == ERFOFF) { if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c index 490a7cf7c702..1c55a002d4bd 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c @@ -69,8 +69,6 @@ void rtl92c_read_chip_version(struct ieee80211_hw *hw) chip_version = NORMAL_CHIP; chip_version |= ((value32 & TYPE_ID) ? CHIP_92C : 0); chip_version |= ((value32 & VENDOR_ID) ? CHIP_VENDOR_UMC : 0); - /* RTL8723 with BT function. */ - chip_version |= ((value32 & BT_FUNC) ? CHIP_8723 : 0); if (IS_VENDOR_UMC(chip_version)) chip_version |= ((value32 & CHIP_VER_RTL_MASK) ? CHIP_VENDOR_UMC_B_CUT : 0); @@ -78,10 +76,6 @@ void rtl92c_read_chip_version(struct ieee80211_hw *hw) value32 = rtl_read_dword(rtlpriv, REG_HPON_FSM); chip_version |= ((CHIP_BONDING_IDENTIFIER(value32) == CHIP_BONDING_92C_1T2R) ? CHIP_92C_1T2R : 0); - } else if (IS_8723_SERIES(chip_version)) { - value32 = rtl_read_dword(rtlpriv, REG_GPIO_OUTSTS); - chip_version |= ((value32 & RF_RL_ID) ? - CHIP_8723_DRV_REV : 0); } } rtlhal->version = (enum version_8192c)chip_version; @@ -114,12 +108,6 @@ void rtl92c_read_chip_version(struct ieee80211_hw *hw) case VERSION_NORMAL_UMC_CHIP_88C_B_CUT: versionid = "NORMAL_UMC_CHIP_88C_B_CUT"; break; - case VERSION_NORMA_UMC_CHIP_8723_1T1R_A_CUT: - versionid = "NORMAL_UMC_CHIP_8723_1T1R_A_CUT"; - break; - case VERSION_NORMA_UMC_CHIP_8723_1T1R_B_CUT: - versionid = "NORMAL_UMC_CHIP_8723_1T1R_B_CUT"; - break; case VERSION_TEST_CHIP_92C: versionid = "TEST_CHIP_92C"; break; diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c index 1961b8e28dc1..bb06fe836fe7 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c +++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c @@ -3515,14 +3515,14 @@ void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw) for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath; rfpath++) { if (rtlhal->current_bandtype == BAND_ON_2_4G) { - /* MOD_AG for RF paht_A 0x18 BIT8,BIT16 */ + /* MOD_AG for RF path_A 0x18 BIT8,BIT16 */ rtl_set_rfreg(hw, rfpath, RF_CHNLBW, BIT(8) | BIT(16) | BIT(18), 0); /* RF0x0b[16:14] =3b'111 */ rtl_set_rfreg(hw, (enum radio_path)rfpath, 0x0B, 0x1c000, 0x07); } else { - /* MOD_AG for RF paht_A 0x18 BIT8,BIT16 */ + /* MOD_AG for RF path_A 0x18 BIT8,BIT16 */ rtl_set_rfreg(hw, rfpath, RF_CHNLBW, BIT(8) | BIT(16) | BIT(18), (BIT(16) | BIT(8)) >> 8); diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c index 3236d44b459d..b7f18e2155eb 100644 --- a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c @@ -2180,7 +2180,7 @@ static int _rtl8821ae_set_media_status(struct ieee80211_hw *hw, rtl_write_byte(rtlpriv, MSR, bt_msr); rtlpriv->cfg->ops->led_control(hw, ledaction); - if ((bt_msr & 0xfc) == MSR_AP) + if ((bt_msr & MSR_MASK) == MSR_AP) rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00); else rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66); diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/reg.h b/drivers/net/wireless/rtlwifi/rtl8821ae/reg.h index 53668fc8f23e..1d6110f9c1fb 100644 --- a/drivers/net/wireless/rtlwifi/rtl8821ae/reg.h +++ b/drivers/net/wireless/rtlwifi/rtl8821ae/reg.h @@ -429,6 +429,7 @@ #define MSR_ADHOC 0x01 #define MSR_INFRA 0x02 #define MSR_AP 0x03 +#define MSR_MASK 0x03 #define RRSR_RSC_OFFSET 21 #define RRSR_SHORT_OFFSET 23 diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c index e125974285cc..7df672a84530 100644 --- a/drivers/net/wireless/ti/wlcore/rx.c +++ b/drivers/net/wireless/ti/wlcore/rx.c @@ -74,7 +74,8 @@ static void wl1271_rx_status(struct wl1271 *wl, if (desc->rate <= wl->hw_min_ht_rate) status->flag |= RX_FLAG_HT; - status->signal = desc->rssi; + status->signal = ((desc->rssi & RSSI_LEVEL_BITMASK) | BIT(7)); + status->antenna = ((desc->rssi & ANT_DIVERSITY_BITMASK) >> 7); /* * FIXME: In wl1251, the SNR should be divided by two. In wl1271 we diff --git a/drivers/net/wireless/ti/wlcore/rx.h b/drivers/net/wireless/ti/wlcore/rx.h index a3b1618db27c..f5a7087cfb97 100644 --- a/drivers/net/wireless/ti/wlcore/rx.h +++ b/drivers/net/wireless/ti/wlcore/rx.h @@ -30,6 +30,9 @@ #define WL1271_RX_MAX_RSSI -30 #define WL1271_RX_MIN_RSSI -95 +#define RSSI_LEVEL_BITMASK 0x7F +#define ANT_DIVERSITY_BITMASK BIT(7) + #define SHORT_PREAMBLE_BIT BIT(0) #define OFDM_RATE_BIT BIT(6) #define PBCC_RATE_BIT BIT(7) diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c index ea7e07abca4e..c172da56b550 100644 --- a/drivers/net/wireless/ti/wlcore/sdio.c +++ b/drivers/net/wireless/ti/wlcore/sdio.c @@ -293,7 +293,8 @@ static int wl1271_probe(struct sdio_func *func, /* Use block mode for transferring over one block size of data */ func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE; - if (wlcore_probe_of(&func->dev, &irq, &pdev_data)) + ret = wlcore_probe_of(&func->dev, &irq, &pdev_data); + if (ret) goto out_free_glue; /* if sdio can keep power while host is suspended, enable wow */ diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index 8a495b318b6f..c6cb85a85c89 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -325,9 +325,6 @@ static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue *queue) queue->pending_prod + queue->pending_cons; } -/* Callback from stack when TX packet can be released */ -void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success); - irqreturn_t xenvif_interrupt(int irq, void *dev_id); extern bool separate_tx_rx_irq; diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index fdc60db60829..7c8c23cc6896 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -266,7 +266,8 @@ EXPORT_SYMBOL(of_phy_attach); bool of_phy_is_fixed_link(struct device_node *np) { struct device_node *dn; - int len; + int len, err; + const char *managed; /* New binding */ dn = of_get_child_by_name(np, "fixed-link"); @@ -275,6 +276,10 @@ bool of_phy_is_fixed_link(struct device_node *np) return true; } + err = of_property_read_string(np, "managed", &managed); + if (err == 0 && strcmp(managed, "auto") != 0) + return true; + /* Old binding */ if (of_get_property(np, "fixed-link", &len) && len == (5 * sizeof(__be32))) @@ -289,8 +294,18 @@ int of_phy_register_fixed_link(struct device_node *np) struct fixed_phy_status status = {}; struct device_node *fixed_link_node; const __be32 *fixed_link_prop; - int len; + int len, err; struct phy_device *phy; + const char *managed; + + err = of_property_read_string(np, "managed", &managed); + if (err == 0) { + if (strcmp(managed, "in-band-status") == 0) { + /* status is zeroed, namely its .link member */ + phy = fixed_phy_register(PHY_POLL, &status, np); + return IS_ERR(phy) ? PTR_ERR(phy) : 0; + } + } /* New binding */ fixed_link_node = of_get_child_by_name(np, "fixed-link"); |