summaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/bonding/bond_alb.c44
-rw-r--r--drivers/net/dsa/b53/b53_common.c2
-rw-r--r--drivers/net/dsa/bcm_sf2.c4
-rw-r--r--drivers/net/dsa/microchip/ksz9477_spi.c6
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h12
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c96
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.h9
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c46
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c6
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h2
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c3
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c14
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c7
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c5
-rw-r--r--drivers/net/ethernet/freescale/Kconfig2
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c14
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c22
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c35
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c37
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c99
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_nl.c20
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c355
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c71
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c195
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c67
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c4
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c55
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ptp.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c6
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c1
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c14
-rw-r--r--drivers/net/ethernet/sun/sunvnet_common.c23
-rw-r--r--drivers/net/gtp.c4
-rw-r--r--drivers/net/hyperv/netvsc_bpf.c13
-rw-r--r--drivers/net/hyperv/netvsc_drv.c5
-rw-r--r--drivers/net/netdevsim/dev.c4
-rw-r--r--drivers/net/usb/qmi_wwan.c43
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c23
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.h2
-rw-r--r--drivers/net/wireguard/allowedips.c1
-rw-r--r--drivers/net/wireguard/device.c4
-rw-r--r--drivers/net/wireguard/netlink.c6
-rw-r--r--drivers/net/wireguard/noise.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c52
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tdls.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c71
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c10
-rw-r--r--drivers/net/wireless/marvell/libertas/cfg.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c7
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_ioctl.c1
-rw-r--r--drivers/net/wireless/marvell/mwifiex/wmm.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/wow.c23
86 files changed, 852 insertions, 956 deletions
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 4f2e6910c623..1cc2cd894f87 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -1383,26 +1383,31 @@ netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
bool do_tx_balance = true;
u32 hash_index = 0;
const u8 *hash_start = NULL;
- struct ipv6hdr *ip6hdr;
skb_reset_mac_header(skb);
eth_data = eth_hdr(skb);
switch (ntohs(skb->protocol)) {
case ETH_P_IP: {
- const struct iphdr *iph = ip_hdr(skb);
+ const struct iphdr *iph;
if (is_broadcast_ether_addr(eth_data->h_dest) ||
- iph->daddr == ip_bcast ||
- iph->protocol == IPPROTO_IGMP) {
+ !pskb_network_may_pull(skb, sizeof(*iph))) {
+ do_tx_balance = false;
+ break;
+ }
+ iph = ip_hdr(skb);
+ if (iph->daddr == ip_bcast || iph->protocol == IPPROTO_IGMP) {
do_tx_balance = false;
break;
}
hash_start = (char *)&(iph->daddr);
hash_size = sizeof(iph->daddr);
- }
break;
- case ETH_P_IPV6:
+ }
+ case ETH_P_IPV6: {
+ const struct ipv6hdr *ip6hdr;
+
/* IPv6 doesn't really use broadcast mac address, but leave
* that here just in case.
*/
@@ -1419,7 +1424,11 @@ netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
break;
}
- /* Additianally, DAD probes should not be tx-balanced as that
+ if (!pskb_network_may_pull(skb, sizeof(*ip6hdr))) {
+ do_tx_balance = false;
+ break;
+ }
+ /* Additionally, DAD probes should not be tx-balanced as that
* will lead to false positives for duplicate addresses and
* prevent address configuration from working.
*/
@@ -1429,17 +1438,26 @@ netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
break;
}
- hash_start = (char *)&(ipv6_hdr(skb)->daddr);
- hash_size = sizeof(ipv6_hdr(skb)->daddr);
+ hash_start = (char *)&ip6hdr->daddr;
+ hash_size = sizeof(ip6hdr->daddr);
break;
- case ETH_P_IPX:
- if (ipx_hdr(skb)->ipx_checksum != IPX_NO_CHECKSUM) {
+ }
+ case ETH_P_IPX: {
+ const struct ipxhdr *ipxhdr;
+
+ if (pskb_network_may_pull(skb, sizeof(*ipxhdr))) {
+ do_tx_balance = false;
+ break;
+ }
+ ipxhdr = (struct ipxhdr *)skb_network_header(skb);
+
+ if (ipxhdr->ipx_checksum != IPX_NO_CHECKSUM) {
/* something is wrong with this packet */
do_tx_balance = false;
break;
}
- if (ipx_hdr(skb)->ipx_type != IPX_TYPE_NCP) {
+ if (ipxhdr->ipx_type != IPX_TYPE_NCP) {
/* The only protocol worth balancing in
* this family since it has an "ARP" like
* mechanism
@@ -1448,9 +1466,11 @@ netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
break;
}
+ eth_data = eth_hdr(skb);
hash_start = (char *)eth_data->h_dest;
hash_size = ETH_ALEN;
break;
+ }
case ETH_P_ARP:
do_tx_balance = false;
if (bond_info->rlb_enabled)
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 060497512159..449a22172e07 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -693,7 +693,7 @@ int b53_configure_vlan(struct dsa_switch *ds)
b53_do_vlan_op(dev, VTA_CMD_CLEAR);
}
- b53_enable_vlan(dev, false, ds->vlan_filtering);
+ b53_enable_vlan(dev, dev->vlan_enabled, ds->vlan_filtering);
b53_for_each_port(dev, i)
b53_write16(dev, B53_VLAN_PAGE,
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 3e8635311d0d..d1955543acd1 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -68,7 +68,9 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
/* Force link status for IMP port */
reg = core_readl(priv, offset);
- reg |= (MII_SW_OR | LINK_STS | GMII_SPEED_UP_2G);
+ reg |= (MII_SW_OR | LINK_STS);
+ if (priv->type == BCM7278_DEVICE_ID)
+ reg |= GMII_SPEED_UP_2G;
core_writel(priv, reg, offset);
/* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
diff --git a/drivers/net/dsa/microchip/ksz9477_spi.c b/drivers/net/dsa/microchip/ksz9477_spi.c
index c5f64959a184..1142768969c2 100644
--- a/drivers/net/dsa/microchip/ksz9477_spi.c
+++ b/drivers/net/dsa/microchip/ksz9477_spi.c
@@ -101,6 +101,12 @@ static struct spi_driver ksz9477_spi_driver = {
module_spi_driver(ksz9477_spi_driver);
+MODULE_ALIAS("spi:ksz9477");
+MODULE_ALIAS("spi:ksz9897");
+MODULE_ALIAS("spi:ksz9893");
+MODULE_ALIAS("spi:ksz9563");
+MODULE_ALIAS("spi:ksz8563");
+MODULE_ALIAS("spi:ksz9567");
MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>");
MODULE_DESCRIPTION("Microchip KSZ9477 Series Switch SPI access Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index f332cb4b2fbf..79cad5e751c6 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -236,7 +236,7 @@ struct mv88e6xxx_port {
bool mirror_ingress;
bool mirror_egress;
unsigned int serdes_irq;
- char serdes_irq_name[32];
+ char serdes_irq_name[64];
};
struct mv88e6xxx_chip {
@@ -293,16 +293,16 @@ struct mv88e6xxx_chip {
struct mv88e6xxx_irq g1_irq;
struct mv88e6xxx_irq g2_irq;
int irq;
- char irq_name[32];
+ char irq_name[64];
int device_irq;
- char device_irq_name[32];
+ char device_irq_name[64];
int watchdog_irq;
- char watchdog_irq_name[32];
+ char watchdog_irq_name[64];
int atu_prob_irq;
- char atu_prob_irq_name[32];
+ char atu_prob_irq_name[64];
int vtu_prob_irq;
- char vtu_prob_irq_name[32];
+ char vtu_prob_irq_name[64];
struct kthread_worker *kworker;
struct kthread_delayed_work irq_poll_work;
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index ea62604fdf8c..1fb58f9ad80b 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -200,6 +200,11 @@ static void comp_ctxt_release(struct ena_com_admin_queue *queue,
static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
u16 command_id, bool capture)
{
+ if (unlikely(!queue->comp_ctx)) {
+ pr_err("Completion context is NULL\n");
+ return NULL;
+ }
+
if (unlikely(command_id >= queue->q_depth)) {
pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
command_id, queue->q_depth);
@@ -1041,9 +1046,41 @@ static int ena_com_get_feature(struct ena_com_dev *ena_dev,
feature_ver);
}
+int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev)
+{
+ return ena_dev->rss.hash_func;
+}
+
+static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev)
+{
+ struct ena_admin_feature_rss_flow_hash_control *hash_key =
+ (ena_dev->rss).hash_key;
+
+ netdev_rss_key_fill(&hash_key->key, sizeof(hash_key->key));
+ /* The key is stored in the device in u32 array
+ * as well as the API requires the key to be passed in this
+ * format. Thus the size of our array should be divided by 4
+ */
+ hash_key->keys_num = sizeof(hash_key->key) / sizeof(u32);
+}
+
static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
{
struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_feature_rss_flow_hash_control *hash_key;
+ struct ena_admin_get_feat_resp get_resp;
+ int rc;
+
+ hash_key = (ena_dev->rss).hash_key;
+
+ rc = ena_com_get_feature_ex(ena_dev, &get_resp,
+ ENA_ADMIN_RSS_HASH_FUNCTION,
+ ena_dev->rss.hash_key_dma_addr,
+ sizeof(ena_dev->rss.hash_key), 0);
+ if (unlikely(rc)) {
+ hash_key = NULL;
+ return -EOPNOTSUPP;
+ }
rss->hash_key =
dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
@@ -1254,30 +1291,6 @@ static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
return 0;
}
-static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev)
-{
- u16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { (u16)-1 };
- struct ena_rss *rss = &ena_dev->rss;
- u8 idx;
- u16 i;
-
- for (i = 0; i < ENA_TOTAL_NUM_QUEUES; i++)
- dev_idx_to_host_tbl[ena_dev->io_sq_queues[i].idx] = i;
-
- for (i = 0; i < 1 << rss->tbl_log_size; i++) {
- if (rss->rss_ind_tbl[i].cq_idx > ENA_TOTAL_NUM_QUEUES)
- return -EINVAL;
- idx = (u8)rss->rss_ind_tbl[i].cq_idx;
-
- if (dev_idx_to_host_tbl[idx] > ENA_TOTAL_NUM_QUEUES)
- return -EINVAL;
-
- rss->host_rss_ind_tbl[i] = dev_idx_to_host_tbl[idx];
- }
-
- return 0;
-}
-
static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
u16 intr_delay_resolution)
{
@@ -2297,15 +2310,16 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
switch (func) {
case ENA_ADMIN_TOEPLITZ:
- if (key_len > sizeof(hash_key->key)) {
- pr_err("key len (%hu) is bigger than the max supported (%zu)\n",
- key_len, sizeof(hash_key->key));
- return -EINVAL;
+ if (key) {
+ if (key_len != sizeof(hash_key->key)) {
+ pr_err("key len (%hu) doesn't equal the supported size (%zu)\n",
+ key_len, sizeof(hash_key->key));
+ return -EINVAL;
+ }
+ memcpy(hash_key->key, key, key_len);
+ rss->hash_init_val = init_val;
+ hash_key->keys_num = key_len >> 2;
}
-
- memcpy(hash_key->key, key, key_len);
- rss->hash_init_val = init_val;
- hash_key->keys_num = key_len >> 2;
break;
case ENA_ADMIN_CRC32:
rss->hash_init_val = init_val;
@@ -2342,7 +2356,11 @@ int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
if (unlikely(rc))
return rc;
- rss->hash_func = get_resp.u.flow_hash_func.selected_func;
+ /* ffs() returns 1 in case the lsb is set */
+ rss->hash_func = ffs(get_resp.u.flow_hash_func.selected_func);
+ if (rss->hash_func)
+ rss->hash_func--;
+
if (func)
*func = rss->hash_func;
@@ -2606,10 +2624,6 @@ int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
if (!ind_tbl)
return 0;
- rc = ena_com_ind_tbl_convert_from_device(ena_dev);
- if (unlikely(rc))
- return rc;
-
for (i = 0; i < (1 << rss->tbl_log_size); i++)
ind_tbl[i] = rss->host_rss_ind_tbl[i];
@@ -2626,9 +2640,15 @@ int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
if (unlikely(rc))
goto err_indr_tbl;
+ /* The following function might return unsupported in case the
+ * device doesn't support setting the key / hash function. We can safely
+ * ignore this error and have indirection table support only.
+ */
rc = ena_com_hash_key_allocate(ena_dev);
- if (unlikely(rc))
+ if (unlikely(rc) && rc != -EOPNOTSUPP)
goto err_hash_key;
+ else if (rc != -EOPNOTSUPP)
+ ena_com_hash_key_fill_default_key(ena_dev);
rc = ena_com_hash_ctrl_init(ena_dev);
if (unlikely(rc))
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h
index 0ce37d54ed10..469f298199a7 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_com.h
@@ -44,6 +44,7 @@
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/wait.h>
+#include <linux/netdevice.h>
#include "ena_common_defs.h"
#include "ena_admin_defs.h"
@@ -655,6 +656,14 @@ int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 log_size);
*/
void ena_com_rss_destroy(struct ena_com_dev *ena_dev);
+/* ena_com_get_current_hash_function - Get RSS hash function
+ * @ena_dev: ENA communication layer struct
+ *
+ * Return the current hash function.
+ * @return: 0 or one of the ena_admin_hash_functions values.
+ */
+int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev);
+
/* ena_com_fill_hash_function - Fill RSS hash function
* @ena_dev: ENA communication layer struct
* @func: The hash function (Toeplitz or crc)
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index b4e891d49a94..ced1d577b62a 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -636,6 +636,28 @@ static u32 ena_get_rxfh_key_size(struct net_device *netdev)
return ENA_HASH_KEY_SIZE;
}
+static int ena_indirection_table_get(struct ena_adapter *adapter, u32 *indir)
+{
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
+ int i, rc;
+
+ if (!indir)
+ return 0;
+
+ rc = ena_com_indirect_table_get(ena_dev, indir);
+ if (rc)
+ return rc;
+
+ /* Our internal representation of the indices is: even indices
+ * for Tx and uneven indices for Rx. We need to convert the Rx
+ * indices to be consecutive
+ */
+ for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++)
+ indir[i] = ENA_IO_RXQ_IDX_TO_COMBINED_IDX(indir[i]);
+
+ return rc;
+}
+
static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
u8 *hfunc)
{
@@ -644,11 +666,25 @@ static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
u8 func;
int rc;
- rc = ena_com_indirect_table_get(adapter->ena_dev, indir);
+ rc = ena_indirection_table_get(adapter, indir);
if (rc)
return rc;
+ /* We call this function in order to check if the device
+ * supports getting/setting the hash function.
+ */
rc = ena_com_get_hash_function(adapter->ena_dev, &ena_func, key);
+
+ if (rc) {
+ if (rc == -EOPNOTSUPP) {
+ key = NULL;
+ hfunc = NULL;
+ rc = 0;
+ }
+
+ return rc;
+ }
+
if (rc)
return rc;
@@ -657,7 +693,7 @@ static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
func = ETH_RSS_HASH_TOP;
break;
case ENA_ADMIN_CRC32:
- func = ETH_RSS_HASH_XOR;
+ func = ETH_RSS_HASH_CRC32;
break;
default:
netif_err(adapter, drv, netdev,
@@ -700,10 +736,13 @@ static int ena_set_rxfh(struct net_device *netdev, const u32 *indir,
}
switch (hfunc) {
+ case ETH_RSS_HASH_NO_CHANGE:
+ func = ena_com_get_current_hash_function(ena_dev);
+ break;
case ETH_RSS_HASH_TOP:
func = ENA_ADMIN_TOEPLITZ;
break;
- case ETH_RSS_HASH_XOR:
+ case ETH_RSS_HASH_CRC32:
func = ENA_ADMIN_CRC32;
break;
default:
@@ -814,6 +853,7 @@ static const struct ethtool_ops ena_ethtool_ops = {
.set_channels = ena_set_channels,
.get_tunable = ena_get_tunable,
.set_tunable = ena_set_tunable,
+ .get_ts_info = ethtool_op_get_ts_info,
};
void ena_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 894e8c1a8cf1..0b2fd96b93d7 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -3706,8 +3706,8 @@ static void check_for_missing_keep_alive(struct ena_adapter *adapter)
if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
return;
- keep_alive_expired = round_jiffies(adapter->last_keep_alive_jiffies +
- adapter->keep_alive_timeout);
+ keep_alive_expired = adapter->last_keep_alive_jiffies +
+ adapter->keep_alive_timeout;
if (unlikely(time_is_before_jiffies(keep_alive_expired))) {
netif_err(adapter, drv, adapter->netdev,
"Keep alive watchdog timeout.\n");
@@ -3809,7 +3809,7 @@ static void ena_timer_service(struct timer_list *t)
}
/* Reset the timer */
- mod_timer(&adapter->timer_service, jiffies + HZ);
+ mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
}
static int ena_calc_max_io_queue_num(struct pci_dev *pdev,
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 094324fd0edc..8795e0b1dc3c 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -130,6 +130,8 @@
#define ENA_IO_TXQ_IDX(q) (2 * (q))
#define ENA_IO_RXQ_IDX(q) (2 * (q) + 1)
+#define ENA_IO_TXQ_IDX_TO_COMBINED_IDX(q) ((q) / 2)
+#define ENA_IO_RXQ_IDX_TO_COMBINED_IDX(q) (((q) - 1) / 2)
#define ENA_MGMNT_IRQ_IDX 0
#define ENA_IO_IRQ_FIRST_IDX 1
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index f07ac0e0af59..e0611cba87f9 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -2736,6 +2736,9 @@ static int __maybe_unused bcm_sysport_resume(struct device *d)
umac_reset(priv);
+ /* Disable the UniMAC RX/TX */
+ umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0);
+
/* We may have been suspended and never received a WOL event that
* would turn off MPD detection, take care of that now
*/
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 7a2fe63d1136..4508f0d150da 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -73,7 +73,11 @@ struct sifive_fu540_macb_mgmt {
/* Max length of transmit frame must be a multiple of 8 bytes */
#define MACB_TX_LEN_ALIGN 8
#define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
-#define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
+/* Limit maximum TX length as per Cadence TSO errata. This is to avoid a
+ * false amba_error in TX path from the DMA assuming there is not enough
+ * space in the SRAM (16KB) even when there is.
+ */
+#define GEM_MAX_TX_LEN (unsigned int)(0x3FC0)
#define GEM_MTU_MIN_SIZE ETH_MIN_MTU
#define MACB_NETIF_LSO NETIF_F_TSO
@@ -1791,16 +1795,14 @@ static netdev_features_t macb_features_check(struct sk_buff *skb,
/* Validate LSO compatibility */
- /* there is only one buffer */
- if (!skb_is_nonlinear(skb))
+ /* there is only one buffer or protocol is not UDP */
+ if (!skb_is_nonlinear(skb) || (ip_hdr(skb)->protocol != IPPROTO_UDP))
return features;
/* length of header */
hdrlen = skb_transport_offset(skb);
- if (ip_hdr(skb)->protocol == IPPROTO_TCP)
- hdrlen += tcp_hdrlen(skb);
- /* For LSO:
+ /* For UFO only:
* When software supplies two or more payload buffers all payload buffers
* apart from the last must be a multiple of 8 bytes in size.
*/
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index c4f6ec0cd183..17a4110c2e49 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1039,7 +1039,7 @@ static int phy_interface_mode(u8 lmac_type)
if (lmac_type == BGX_MODE_QSGMII)
return PHY_INTERFACE_MODE_QSGMII;
if (lmac_type == BGX_MODE_RGMII)
- return PHY_INTERFACE_MODE_RGMII;
+ return PHY_INTERFACE_MODE_RGMII_RXID;
return PHY_INTERFACE_MODE_SGMII;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 9d1f2f88b945..de30d61af065 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -3403,6 +3403,13 @@ static int chcr_stats_show(struct seq_file *seq, void *v)
atomic_read(&adap->chcr_stats.fallback));
seq_printf(seq, "IPSec PDU: %10u\n",
atomic_read(&adap->chcr_stats.ipsec_cnt));
+ seq_printf(seq, "TLS PDU Tx: %10u\n",
+ atomic_read(&adap->chcr_stats.tls_pdu_tx));
+ seq_printf(seq, "TLS PDU Rx: %10u\n",
+ atomic_read(&adap->chcr_stats.tls_pdu_rx));
+ seq_printf(seq, "TLS Keys (DDR) Count: %10u\n",
+ atomic_read(&adap->chcr_stats.tls_key));
+
return 0;
}
DEFINE_SHOW_ATTRIBUTE(chcr_stats);
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index bbd7b3175f09..ddf60dc9ad16 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -2013,10 +2013,10 @@ static int enic_stop(struct net_device *netdev)
napi_disable(&enic->napi[i]);
netif_carrier_off(netdev);
- netif_tx_disable(netdev);
if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
for (i = 0; i < enic->wq_count; i++)
napi_disable(&enic->napi[enic_cq_wq(enic, i)]);
+ netif_tx_disable(netdev);
if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
enic_dev_del_station_addr(enic);
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index d305d1b24b0a..42b798a3fad4 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -417,7 +417,10 @@ static void de_rx (struct de_private *de)
if (status & DescOwn)
break;
- len = ((status >> 16) & 0x7ff) - 4;
+ /* the length is actually a 15 bit value here according
+ * to Table 4-1 in the DE2104x spec so mask is 0x7fff
+ */
+ len = ((status >> 16) & 0x7fff) - 4;
mapping = de->rx_skb[rx_tail].mapping;
if (unlikely(drop)) {
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index 6a7e8993119f..2bd7ace0a953 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -74,7 +74,7 @@ config FSL_XGMAC_MDIO
config UCC_GETH
tristate "Freescale QE Gigabit Ethernet"
- depends on QUICC_ENGINE
+ depends on QUICC_ENGINE && PPC32
select FSL_PQ_MDIO
select PHYLIB
---help---
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 09dbcd819d84..fd93d542f497 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -2453,6 +2453,9 @@ static void dpaa_adjust_link(struct net_device *net_dev)
mac_dev->adjust_link(mac_dev);
}
+/* The Aquantia PHYs are capable of performing rate adaptation */
+#define PHY_VEND_AQUANTIA 0x03a1b400
+
static int dpaa_phy_init(struct net_device *net_dev)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
@@ -2471,9 +2474,14 @@ static int dpaa_phy_init(struct net_device *net_dev)
return -ENODEV;
}
- /* Remove any features not supported by the controller */
- ethtool_convert_legacy_u32_to_link_mode(mask, mac_dev->if_support);
- linkmode_and(phy_dev->supported, phy_dev->supported, mask);
+ /* Unless the PHY is capable of rate adaptation */
+ if (mac_dev->phy_if != PHY_INTERFACE_MODE_XGMII ||
+ ((phy_dev->drv->phy_id & GENMASK(31, 10)) != PHY_VEND_AQUANTIA)) {
+ /* remove any features not supported by the controller */
+ ethtool_convert_legacy_u32_to_link_mode(mask,
+ mac_dev->if_support);
+ linkmode_and(phy_dev->supported, phy_dev->supported, mask);
+ }
phy_support_asym_pause(phy_dev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index ec5f6eeb639b..492bc9446463 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -6113,6 +6113,9 @@ static int hclge_get_all_rules(struct hnae3_handle *handle,
static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
struct hclge_fd_rule_tuples *tuples)
{
+#define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
+#define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
+
tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
tuples->ip_proto = fkeys->basic.ip_proto;
tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
@@ -6121,12 +6124,12 @@ static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
} else {
- memcpy(tuples->src_ip,
- fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
- sizeof(tuples->src_ip));
- memcpy(tuples->dst_ip,
- fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
- sizeof(tuples->dst_ip));
+ int i;
+
+ for (i = 0; i < IPV6_SIZE; i++) {
+ tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
+ tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
+ }
}
}
@@ -9834,6 +9837,13 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
+ ret = init_mgr_tbl(hdev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to reinit manager table, ret = %d\n", ret);
+ return ret;
+ }
+
ret = hclge_init_fd_config(hdev);
if (ret) {
dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 180224eab1ca..28db13253a5e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -566,7 +566,7 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
*/
kinfo->num_tc = vport->vport_id ? 1 :
min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc);
- vport->qs_offset = (vport->vport_id ? hdev->tm_info.num_tc : 0) +
+ vport->qs_offset = (vport->vport_id ? HNAE3_MAX_TC : 0) +
(vport->vport_id ? (vport->vport_id - 1) : 0);
max_rss_size = min_t(u16, hdev->rss_size_max,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 69523ac85639..56b9e445732b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -2362,7 +2362,7 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
goto error_param;
}
- if (i40e_vc_validate_vqs_bitmaps(vqs)) {
+ if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -2424,7 +2424,7 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
goto error_param;
}
- if (i40e_vc_validate_vqs_bitmaps(vqs)) {
+ if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 42058fad6a3c..0b7d29192b2c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -791,7 +791,7 @@ int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
struct i40e_ring *ring;
if (test_bit(__I40E_CONFIG_BUSY, pf->state))
- return -ENETDOWN;
+ return -EAGAIN;
if (test_bit(__I40E_VSI_DOWN, vsi->state))
return -ENETDOWN;
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 4459bc564b11..6873998cf145 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -1660,6 +1660,7 @@ struct ice_aqc_get_pkg_info_resp {
__le32 count;
struct ice_aqc_get_pkg_info pkg_info[1];
};
+
/**
* struct ice_aq_desc - Admin Queue (AQ) descriptor
* @flags: ICE_AQ_FLAG_* flags
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index d8e975cceb21..81885efadc7a 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -324,7 +324,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
if (err)
return err;
- dev_info(&vsi->back->pdev->dev, "Registered XDP mem model MEM_TYPE_ZERO_COPY on Rx ring %d\n",
+ dev_info(ice_pf_to_dev(vsi->back), "Registered XDP mem model MEM_TYPE_ZERO_COPY on Rx ring %d\n",
ring->q_index);
} else {
ring->zca.free = NULL;
@@ -405,8 +405,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
/* Absolute queue number out of 2K needs to be passed */
err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
if (err) {
- dev_err(&vsi->back->pdev->dev,
- "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
pf_q, err);
return -EIO;
}
@@ -428,8 +427,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
ice_alloc_rx_bufs_slow_zc(ring, ICE_DESC_UNUSED(ring)) :
ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring));
if (err)
- dev_info(&vsi->back->pdev->dev,
- "Failed allocate some buffers on %sRx ring %d (pf_q %d)\n",
+ dev_info(ice_pf_to_dev(vsi->back), "Failed allocate some buffers on %sRx ring %d (pf_q %d)\n",
ring->xsk_umem ? "UMEM enabled " : "",
ring->q_index, pf_q);
@@ -490,8 +488,7 @@ int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
/* wait for the change to finish */
ret = ice_pf_rxq_wait(pf, pf_q, ena);
if (ret)
- dev_err(ice_pf_to_dev(pf),
- "VSI idx %d Rx ring %d %sable timeout\n",
+ dev_err(ice_pf_to_dev(pf), "VSI idx %d Rx ring %d %sable timeout\n",
vsi->idx, pf_q, (ena ? "en" : "dis"));
return ret;
@@ -506,20 +503,15 @@ int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
*/
int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
{
- struct ice_pf *pf = vsi->back;
- int v_idx = 0, num_q_vectors;
- struct device *dev;
- int err;
+ struct device *dev = ice_pf_to_dev(vsi->back);
+ int v_idx, err;
- dev = ice_pf_to_dev(pf);
if (vsi->q_vectors[0]) {
dev_dbg(dev, "VSI %d has existing q_vectors\n", vsi->vsi_num);
return -EEXIST;
}
- num_q_vectors = vsi->num_q_vectors;
-
- for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
+ for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) {
err = ice_vsi_alloc_q_vector(vsi, v_idx);
if (err)
goto err_out;
@@ -648,8 +640,7 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle,
1, qg_buf, buf_len, NULL);
if (status) {
- dev_err(ice_pf_to_dev(pf),
- "Failed to set LAN Tx queue context, error: %d\n",
+ dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %d\n",
status);
return -ENODEV;
}
@@ -815,14 +806,12 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
* queues at the hardware level anyway.
*/
if (status == ICE_ERR_RESET_ONGOING) {
- dev_dbg(&vsi->back->pdev->dev,
- "Reset in progress. LAN Tx queues already disabled\n");
+ dev_dbg(ice_pf_to_dev(vsi->back), "Reset in progress. LAN Tx queues already disabled\n");
} else if (status == ICE_ERR_DOES_NOT_EXIST) {
- dev_dbg(&vsi->back->pdev->dev,
- "LAN Tx queues do not exist, nothing to disable\n");
+ dev_dbg(ice_pf_to_dev(vsi->back), "LAN Tx queues do not exist, nothing to disable\n");
} else if (status) {
- dev_err(&vsi->back->pdev->dev,
- "Failed to disable LAN Tx queues, error: %d\n", status);
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %d\n",
+ status);
return -ENODEV;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 0207e28c2682..04d5db0a25bf 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -25,20 +25,6 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw)
}
/**
- * ice_dev_onetime_setup - Temporary HW/FW workarounds
- * @hw: pointer to the HW structure
- *
- * This function provides temporary workarounds for certain issues
- * that are expected to be fixed in the HW/FW.
- */
-void ice_dev_onetime_setup(struct ice_hw *hw)
-{
-#define MBX_PF_VT_PFALLOC 0x00231E80
- /* set VFs per PF */
- wr32(hw, MBX_PF_VT_PFALLOC, rd32(hw, PF_VT_PFALLOC_HIF));
-}
-
-/**
* ice_clear_pf_cfg - Clear PF configuration
* @hw: pointer to the hardware structure
*
@@ -602,10 +588,10 @@ void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
}
/**
- * ice_get_itr_intrl_gran - determine int/intrl granularity
+ * ice_get_itr_intrl_gran
* @hw: pointer to the HW struct
*
- * Determines the ITR/intrl granularities based on the maximum aggregate
+ * Determines the ITR/INTRL granularities based on the maximum aggregate
* bandwidth according to the device's configuration during power-on.
*/
static void ice_get_itr_intrl_gran(struct ice_hw *hw)
@@ -763,8 +749,6 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
if (status)
goto err_unroll_sched;
- ice_dev_onetime_setup(hw);
-
/* Get MAC information */
/* A single port can report up to two (LAN and WoL) addresses */
mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
@@ -834,7 +818,7 @@ void ice_deinit_hw(struct ice_hw *hw)
*/
enum ice_status ice_check_reset(struct ice_hw *hw)
{
- u32 cnt, reg = 0, grst_delay;
+ u32 cnt, reg = 0, grst_delay, uld_mask;
/* Poll for Device Active state in case a recent CORER, GLOBR,
* or EMPR has occurred. The grst delay value is in 100ms units.
@@ -856,13 +840,20 @@ enum ice_status ice_check_reset(struct ice_hw *hw)
return ICE_ERR_RESET_FAILED;
}
-#define ICE_RESET_DONE_MASK (GLNVM_ULD_CORER_DONE_M | \
- GLNVM_ULD_GLOBR_DONE_M)
+#define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\
+ GLNVM_ULD_PCIER_DONE_1_M |\
+ GLNVM_ULD_CORER_DONE_M |\
+ GLNVM_ULD_GLOBR_DONE_M |\
+ GLNVM_ULD_POR_DONE_M |\
+ GLNVM_ULD_POR_DONE_1_M |\
+ GLNVM_ULD_PCIER_DONE_2_M)
+
+ uld_mask = ICE_RESET_DONE_MASK;
/* Device is Active; check Global Reset processes are done */
for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
- reg = rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK;
- if (reg == ICE_RESET_DONE_MASK) {
+ reg = rd32(hw, GLNVM_ULD) & uld_mask;
+ if (reg == uld_mask) {
ice_debug(hw, ICE_DBG_INIT,
"Global reset processes done. %d\n", cnt);
break;
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index b5c013fdaaf9..f9fc005d35a7 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -54,8 +54,6 @@ enum ice_status ice_get_caps(struct ice_hw *hw);
void ice_set_safe_mode_caps(struct ice_hw *hw);
-void ice_dev_onetime_setup(struct ice_hw *hw);
-
enum ice_status
ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
u32 rxq_index);
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
index 713e8a892e14..adb8dab765c8 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
@@ -1323,13 +1323,13 @@ enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi)
}
/**
- * ice_aq_query_port_ets - query port ets configuration
+ * ice_aq_query_port_ets - query port ETS configuration
* @pi: port information structure
* @buf: pointer to buffer
* @buf_size: buffer size in bytes
* @cd: pointer to command details structure or NULL
*
- * query current port ets configuration
+ * query current port ETS configuration
*/
static enum ice_status
ice_aq_query_port_ets(struct ice_port_info *pi,
@@ -1416,13 +1416,13 @@ ice_update_port_tc_tree_cfg(struct ice_port_info *pi,
}
/**
- * ice_query_port_ets - query port ets configuration
+ * ice_query_port_ets - query port ETS configuration
* @pi: port information structure
* @buf: pointer to buffer
* @buf_size: buffer size in bytes
* @cd: pointer to command details structure or NULL
*
- * query current port ets configuration and update the
+ * query current port ETS configuration and update the
* SW DB with the TC changes
*/
enum ice_status
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index 0664e5b8d130..7108fb41b604 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -315,9 +315,9 @@ ice_dcb_need_recfg(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg,
*/
void ice_dcb_rebuild(struct ice_pf *pf)
{
- struct ice_dcbx_cfg *local_dcbx_cfg, *desired_dcbx_cfg, *prev_cfg;
struct ice_aqc_port_ets_elem buf = { 0 };
struct device *dev = ice_pf_to_dev(pf);
+ struct ice_dcbx_cfg *err_cfg;
enum ice_status ret;
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
@@ -330,53 +330,25 @@ void ice_dcb_rebuild(struct ice_pf *pf)
if (!test_bit(ICE_FLAG_DCB_ENA, pf->flags))
return;
- local_dcbx_cfg = &pf->hw.port_info->local_dcbx_cfg;
- desired_dcbx_cfg = &pf->hw.port_info->desired_dcbx_cfg;
+ mutex_lock(&pf->tc_mutex);
- /* Save current willing state and force FW to unwilling */
- local_dcbx_cfg->etscfg.willing = 0x0;
- local_dcbx_cfg->pfc.willing = 0x0;
- local_dcbx_cfg->app_mode = ICE_DCBX_APPS_NON_WILLING;
+ if (!pf->hw.port_info->is_sw_lldp)
+ ice_cfg_etsrec_defaults(pf->hw.port_info);
- ice_cfg_etsrec_defaults(pf->hw.port_info);
ret = ice_set_dcb_cfg(pf->hw.port_info);
if (ret) {
- dev_err(dev, "Failed to set DCB to unwilling\n");
+ dev_err(dev, "Failed to set DCB config in rebuild\n");
goto dcb_error;
}
- /* Retrieve DCB config and ensure same as current in SW */
- prev_cfg = kmemdup(local_dcbx_cfg, sizeof(*prev_cfg), GFP_KERNEL);
- if (!prev_cfg)
- goto dcb_error;
-
- ice_init_dcb(&pf->hw, true);
- if (pf->hw.port_info->dcbx_status == ICE_DCBX_STATUS_DIS)
- pf->hw.port_info->is_sw_lldp = true;
- else
- pf->hw.port_info->is_sw_lldp = false;
-
- if (ice_dcb_need_recfg(pf, prev_cfg, local_dcbx_cfg)) {
- /* difference in cfg detected - disable DCB till next MIB */
- dev_err(dev, "Set local MIB not accurate\n");
- kfree(prev_cfg);
- goto dcb_error;
+ if (!pf->hw.port_info->is_sw_lldp) {
+ ret = ice_cfg_lldp_mib_change(&pf->hw, true);
+ if (ret && !pf->hw.port_info->is_sw_lldp) {
+ dev_err(dev, "Failed to register for MIB changes\n");
+ goto dcb_error;
+ }
}
- /* fetched config congruent to previous configuration */
- kfree(prev_cfg);
-
- /* Set the local desired config */
- if (local_dcbx_cfg->dcbx_mode == ICE_DCBX_MODE_CEE)
- memcpy(local_dcbx_cfg, desired_dcbx_cfg,
- sizeof(*local_dcbx_cfg));
-
- ice_cfg_etsrec_defaults(pf->hw.port_info);
- ret = ice_set_dcb_cfg(pf->hw.port_info);
- if (ret) {
- dev_err(dev, "Failed to set desired config\n");
- goto dcb_error;
- }
dev_info(dev, "DCB restored after reset\n");
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
if (ret) {
@@ -384,26 +356,32 @@ void ice_dcb_rebuild(struct ice_pf *pf)
goto dcb_error;
}
+ mutex_unlock(&pf->tc_mutex);
+
return;
dcb_error:
dev_err(dev, "Disabling DCB until new settings occur\n");
- prev_cfg = kzalloc(sizeof(*prev_cfg), GFP_KERNEL);
- if (!prev_cfg)
+ err_cfg = kzalloc(sizeof(*err_cfg), GFP_KERNEL);
+ if (!err_cfg) {
+ mutex_unlock(&pf->tc_mutex);
return;
+ }
- prev_cfg->etscfg.willing = true;
- prev_cfg->etscfg.tcbwtable[0] = ICE_TC_MAX_BW;
- prev_cfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS;
- memcpy(&prev_cfg->etsrec, &prev_cfg->etscfg, sizeof(prev_cfg->etsrec));
+ err_cfg->etscfg.willing = true;
+ err_cfg->etscfg.tcbwtable[0] = ICE_TC_MAX_BW;
+ err_cfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS;
+ memcpy(&err_cfg->etsrec, &err_cfg->etscfg, sizeof(err_cfg->etsrec));
/* Coverity warns the return code of ice_pf_dcb_cfg() is not checked
* here as is done for other calls to that function. That check is
* not necessary since this is in this function's error cleanup path.
* Suppress the Coverity warning with the following comment...
*/
/* coverity[check_return] */
- ice_pf_dcb_cfg(pf, prev_cfg, false);
- kfree(prev_cfg);
+ ice_pf_dcb_cfg(pf, err_cfg, false);
+ kfree(err_cfg);
+
+ mutex_unlock(&pf->tc_mutex);
}
/**
@@ -434,9 +412,9 @@ static int ice_dcb_init_cfg(struct ice_pf *pf, bool locked)
}
/**
- * ice_dcb_sw_default_config - Apply a default DCB config
+ * ice_dcb_sw_dflt_cfg - Apply a default DCB config
* @pf: PF to apply config to
- * @ets_willing: configure ets willing
+ * @ets_willing: configure ETS willing
* @locked: was this function called with RTNL held
*/
static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked)
@@ -599,8 +577,7 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
goto dcb_init_err;
}
- dev_info(dev,
- "DCB is enabled in the hardware, max number of TCs supported on this port are %d\n",
+ dev_info(dev, "DCB is enabled in the hardware, max number of TCs supported on this port are %d\n",
pf->hw.func_caps.common_cap.maxtc);
if (err) {
struct ice_vsi *pf_vsi;
@@ -610,8 +587,8 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
clear_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags);
err = ice_dcb_sw_dflt_cfg(pf, true, locked);
if (err) {
- dev_err(dev,
- "Failed to set local DCB config %d\n", err);
+ dev_err(dev, "Failed to set local DCB config %d\n",
+ err);
err = -EIO;
goto dcb_init_err;
}
@@ -777,6 +754,8 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
}
}
+ mutex_lock(&pf->tc_mutex);
+
/* store the old configuration */
tmp_dcbx_cfg = pf->hw.port_info->local_dcbx_cfg;
@@ -787,20 +766,20 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
ret = ice_get_dcb_cfg(pf->hw.port_info);
if (ret) {
dev_err(dev, "Failed to get DCB config\n");
- return;
+ goto out;
}
/* No change detected in DCBX configs */
if (!memcmp(&tmp_dcbx_cfg, &pi->local_dcbx_cfg, sizeof(tmp_dcbx_cfg))) {
dev_dbg(dev, "No change detected in DCBX configuration.\n");
- return;
+ goto out;
}
need_reconfig = ice_dcb_need_recfg(pf, &tmp_dcbx_cfg,
&pi->local_dcbx_cfg);
ice_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &pi->local_dcbx_cfg);
if (!need_reconfig)
- return;
+ goto out;
/* Enable DCB tagging only when more than one TC */
if (ice_dcb_get_num_tc(&pi->local_dcbx_cfg) > 1) {
@@ -814,7 +793,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
pf_vsi = ice_get_main_vsi(pf);
if (!pf_vsi) {
dev_dbg(dev, "PF VSI doesn't exist\n");
- return;
+ goto out;
}
rtnl_lock();
@@ -823,13 +802,15 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
if (ret) {
dev_err(dev, "Query Port ETS failed\n");
- rtnl_unlock();
- return;
+ goto unlock_rtnl;
}
/* changes in configuration update VSI */
ice_pf_dcb_recfg(pf);
ice_ena_vsi(pf_vsi, true);
+unlock_rtnl:
rtnl_unlock();
+out:
+ mutex_unlock(&pf->tc_mutex);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
index d870c1aedc17..b61aba428adb 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
@@ -297,8 +297,7 @@ ice_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio, u8 *setting)
return;
*setting = (pi->local_dcbx_cfg.pfc.pfcena >> prio) & 0x1;
- dev_dbg(ice_pf_to_dev(pf),
- "Get PFC Config up=%d, setting=%d, pfcenable=0x%x\n",
+ dev_dbg(ice_pf_to_dev(pf), "Get PFC Config up=%d, setting=%d, pfcenable=0x%x\n",
prio, *setting, pi->local_dcbx_cfg.pfc.pfcena);
}
@@ -418,8 +417,8 @@ ice_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int prio,
return;
*pgid = pi->local_dcbx_cfg.etscfg.prio_table[prio];
- dev_dbg(ice_pf_to_dev(pf),
- "Get PG config prio=%d tc=%d\n", prio, *pgid);
+ dev_dbg(ice_pf_to_dev(pf), "Get PG config prio=%d tc=%d\n", prio,
+ *pgid);
}
/**
@@ -713,13 +712,13 @@ static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app)
return -EINVAL;
mutex_lock(&pf->tc_mutex);
- ret = dcb_ieee_delapp(netdev, app);
- if (ret)
- goto delapp_out;
-
old_cfg = &pf->hw.port_info->local_dcbx_cfg;
- if (old_cfg->numapps == 1)
+ if (old_cfg->numapps <= 1)
+ goto delapp_out;
+
+ ret = dcb_ieee_delapp(netdev, app);
+ if (ret)
goto delapp_out;
new_cfg = &pf->hw.port_info->desired_dcbx_cfg;
@@ -882,8 +881,7 @@ ice_dcbnl_vsi_del_app(struct ice_vsi *vsi,
sapp.protocol = app->prot_id;
sapp.priority = app->priority;
err = ice_dcbnl_delapp(vsi->netdev, &sapp);
- dev_dbg(&vsi->back->pdev->dev,
- "Deleting app for VSI idx=%d err=%d sel=%d proto=0x%x, prio=%d\n",
+ dev_dbg(ice_pf_to_dev(vsi->back), "Deleting app for VSI idx=%d err=%d sel=%d proto=0x%x, prio=%d\n",
vsi->idx, err, app->selector, app->prot_id, app->priority);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 90c6a3ca20c9..b002ab4e5838 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -166,13 +166,24 @@ static void
ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
+ u8 oem_ver, oem_patch, nvm_ver_hi, nvm_ver_lo;
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ u16 oem_build;
strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, ice_drv_ver, sizeof(drvinfo->version));
- strlcpy(drvinfo->fw_version, ice_nvm_version_str(&pf->hw),
- sizeof(drvinfo->fw_version));
+
+ /* Display NVM version (from which the firmware version can be
+ * determined) which contains more pertinent information.
+ */
+ ice_get_nvm_version(hw, &oem_ver, &oem_build, &oem_patch,
+ &nvm_ver_hi, &nvm_ver_lo);
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%x.%02x 0x%x %d.%d.%d", nvm_ver_hi, nvm_ver_lo,
+ hw->nvm.eetrack, oem_ver, oem_build, oem_patch);
+
strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
sizeof(drvinfo->bus_info));
drvinfo->n_priv_flags = ICE_PRIV_FLAG_ARRAY_SIZE;
@@ -363,8 +374,7 @@ static int ice_reg_pattern_test(struct ice_hw *hw, u32 reg, u32 mask)
val = rd32(hw, reg);
if (val == pattern)
continue;
- dev_err(dev,
- "%s: reg pattern test failed - reg 0x%08x pat 0x%08x val 0x%08x\n"
+ dev_err(dev, "%s: reg pattern test failed - reg 0x%08x pat 0x%08x val 0x%08x\n"
, __func__, reg, pattern, val);
return 1;
}
@@ -372,8 +382,7 @@ static int ice_reg_pattern_test(struct ice_hw *hw, u32 reg, u32 mask)
wr32(hw, reg, orig_val);
val = rd32(hw, reg);
if (val != orig_val) {
- dev_err(dev,
- "%s: reg restore test failed - reg 0x%08x orig 0x%08x val 0x%08x\n"
+ dev_err(dev, "%s: reg restore test failed - reg 0x%08x orig 0x%08x val 0x%08x\n"
, __func__, reg, orig_val, val);
return 1;
}
@@ -791,8 +800,7 @@ ice_self_test(struct net_device *netdev, struct ethtool_test *eth_test,
set_bit(__ICE_TESTING, pf->state);
if (ice_active_vfs(pf)) {
- dev_warn(dev,
- "Please take active VFs and Netqueues offline and restart the adapter before running NIC diagnostics\n");
+ dev_warn(dev, "Please take active VFs and Netqueues offline and restart the adapter before running NIC diagnostics\n");
data[ICE_ETH_TEST_REG] = 1;
data[ICE_ETH_TEST_EEPROM] = 1;
data[ICE_ETH_TEST_INTR] = 1;
@@ -1047,7 +1055,7 @@ ice_set_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam)
fec = ICE_FEC_NONE;
break;
default:
- dev_warn(&vsi->back->pdev->dev, "Unsupported FEC mode: %d\n",
+ dev_warn(ice_pf_to_dev(vsi->back), "Unsupported FEC mode: %d\n",
fecparam->fec);
return -EINVAL;
}
@@ -1200,8 +1208,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
* events to respond to.
*/
if (status)
- dev_info(dev,
- "Failed to unreg for LLDP events\n");
+ dev_info(dev, "Failed to unreg for LLDP events\n");
/* The AQ call to stop the FW LLDP agent will generate
* an error if the agent is already stopped.
@@ -1256,8 +1263,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
/* Register for MIB change events */
status = ice_cfg_lldp_mib_change(&pf->hw, true);
if (status)
- dev_dbg(dev,
- "Fail to enable MIB change events\n");
+ dev_dbg(dev, "Fail to enable MIB change events\n");
}
}
if (test_bit(ICE_FLAG_LEGACY_RX, change_flags)) {
@@ -1710,291 +1716,13 @@ ice_get_settings_link_up(struct ethtool_link_ksettings *ks,
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_port_info *pi = np->vsi->port_info;
- struct ethtool_link_ksettings cap_ksettings;
struct ice_link_status *link_info;
struct ice_vsi *vsi = np->vsi;
- bool unrecog_phy_high = false;
- bool unrecog_phy_low = false;
link_info = &vsi->port_info->phy.link_info;
- /* Initialize supported and advertised settings based on PHY settings */
- switch (link_info->phy_type_low) {
- case ICE_PHY_TYPE_LOW_100BASE_TX:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 100baseT_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 100baseT_Full);
- break;
- case ICE_PHY_TYPE_LOW_100M_SGMII:
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 100baseT_Full);
- break;
- case ICE_PHY_TYPE_LOW_1000BASE_T:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 1000baseT_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 1000baseT_Full);
- break;
- case ICE_PHY_TYPE_LOW_1G_SGMII:
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 1000baseT_Full);
- break;
- case ICE_PHY_TYPE_LOW_1000BASE_SX:
- case ICE_PHY_TYPE_LOW_1000BASE_LX:
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 1000baseX_Full);
- break;
- case ICE_PHY_TYPE_LOW_1000BASE_KX:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 1000baseKX_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 1000baseKX_Full);
- break;
- case ICE_PHY_TYPE_LOW_2500BASE_T:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 2500baseT_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 2500baseT_Full);
- break;
- case ICE_PHY_TYPE_LOW_2500BASE_X:
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 2500baseX_Full);
- break;
- case ICE_PHY_TYPE_LOW_2500BASE_KX:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 2500baseX_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 2500baseX_Full);
- break;
- case ICE_PHY_TYPE_LOW_5GBASE_T:
- case ICE_PHY_TYPE_LOW_5GBASE_KR:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 5000baseT_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 5000baseT_Full);
- break;
- case ICE_PHY_TYPE_LOW_10GBASE_T:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 10000baseT_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 10000baseT_Full);
- break;
- case ICE_PHY_TYPE_LOW_10G_SFI_DA:
- case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
- case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 10000baseT_Full);
- break;
- case ICE_PHY_TYPE_LOW_10GBASE_SR:
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 10000baseSR_Full);
- break;
- case ICE_PHY_TYPE_LOW_10GBASE_LR:
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 10000baseLR_Full);
- break;
- case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 10000baseKR_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 10000baseKR_Full);
- break;
- case ICE_PHY_TYPE_LOW_25GBASE_T:
- case ICE_PHY_TYPE_LOW_25GBASE_CR:
- case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
- case ICE_PHY_TYPE_LOW_25GBASE_CR1:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 25000baseCR_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 25000baseCR_Full);
- break;
- case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
- case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 25000baseCR_Full);
- break;
- case ICE_PHY_TYPE_LOW_25GBASE_SR:
- case ICE_PHY_TYPE_LOW_25GBASE_LR:
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 25000baseSR_Full);
- break;
- case ICE_PHY_TYPE_LOW_25GBASE_KR:
- case ICE_PHY_TYPE_LOW_25GBASE_KR1:
- case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 25000baseKR_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 25000baseKR_Full);
- break;
- case ICE_PHY_TYPE_LOW_40GBASE_CR4:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 40000baseCR4_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 40000baseCR4_Full);
- break;
- case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
- case ICE_PHY_TYPE_LOW_40G_XLAUI:
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 40000baseCR4_Full);
- break;
- case ICE_PHY_TYPE_LOW_40GBASE_SR4:
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 40000baseSR4_Full);
- break;
- case ICE_PHY_TYPE_LOW_40GBASE_LR4:
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 40000baseLR4_Full);
- break;
- case ICE_PHY_TYPE_LOW_40GBASE_KR4:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 40000baseKR4_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 40000baseKR4_Full);
- break;
- case ICE_PHY_TYPE_LOW_50GBASE_CR2:
- case ICE_PHY_TYPE_LOW_50GBASE_CP:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 50000baseCR2_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 50000baseCR2_Full);
- break;
- case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
- case ICE_PHY_TYPE_LOW_50G_LAUI2:
- case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
- case ICE_PHY_TYPE_LOW_50G_AUI2:
- case ICE_PHY_TYPE_LOW_50GBASE_SR:
- case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
- case ICE_PHY_TYPE_LOW_50G_AUI1:
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 50000baseCR2_Full);
- break;
- case ICE_PHY_TYPE_LOW_50GBASE_KR2:
- case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 50000baseKR2_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 50000baseKR2_Full);
- break;
- case ICE_PHY_TYPE_LOW_50GBASE_SR2:
- case ICE_PHY_TYPE_LOW_50GBASE_LR2:
- case ICE_PHY_TYPE_LOW_50GBASE_FR:
- case ICE_PHY_TYPE_LOW_50GBASE_LR:
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 50000baseSR2_Full);
- break;
- case ICE_PHY_TYPE_LOW_100GBASE_CR4:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 100000baseCR4_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 100000baseCR4_Full);
- break;
- case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
- case ICE_PHY_TYPE_LOW_100G_CAUI4:
- case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
- case ICE_PHY_TYPE_LOW_100G_AUI4:
- case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 100000baseCR4_Full);
- break;
- case ICE_PHY_TYPE_LOW_100GBASE_CP2:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 100000baseCR4_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 100000baseCR4_Full);
- break;
- case ICE_PHY_TYPE_LOW_100GBASE_SR4:
- case ICE_PHY_TYPE_LOW_100GBASE_SR2:
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 100000baseSR4_Full);
- break;
- case ICE_PHY_TYPE_LOW_100GBASE_LR4:
- case ICE_PHY_TYPE_LOW_100GBASE_DR:
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 100000baseLR4_ER4_Full);
- break;
- case ICE_PHY_TYPE_LOW_100GBASE_KR4:
- case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 100000baseKR4_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 100000baseKR4_Full);
- break;
- default:
- unrecog_phy_low = true;
- }
-
- switch (link_info->phy_type_high) {
- case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 100000baseKR4_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 100000baseKR4_Full);
- break;
- case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
- case ICE_PHY_TYPE_HIGH_100G_CAUI2:
- case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
- case ICE_PHY_TYPE_HIGH_100G_AUI2:
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 100000baseCR4_Full);
- break;
- default:
- unrecog_phy_high = true;
- }
-
- if (unrecog_phy_low && unrecog_phy_high) {
- /* if we got here and link is up something bad is afoot */
- netdev_info(netdev,
- "WARNING: Unrecognized PHY_Low (0x%llx).\n",
- (u64)link_info->phy_type_low);
- netdev_info(netdev,
- "WARNING: Unrecognized PHY_High (0x%llx).\n",
- (u64)link_info->phy_type_high);
- }
-
- /* Now that we've worked out everything that could be supported by the
- * current PHY type, get what is supported by the NVM and intersect
- * them to get what is truly supported
- */
- memset(&cap_ksettings, 0, sizeof(cap_ksettings));
- ice_phy_type_to_ethtool(netdev, &cap_ksettings);
- ethtool_intersect_link_masks(ks, &cap_ksettings);
+ /* Get supported and advertised settings from PHY ability with media */
+ ice_phy_type_to_ethtool(netdev, ks);
switch (link_info->link_speed) {
case ICE_AQ_LINK_SPEED_100GB:
@@ -2028,8 +1756,7 @@ ice_get_settings_link_up(struct ethtool_link_ksettings *ks,
ks->base.speed = SPEED_100;
break;
default:
- netdev_info(netdev,
- "WARNING: Unrecognized link_speed (0x%x).\n",
+ netdev_info(netdev, "WARNING: Unrecognized link_speed (0x%x).\n",
link_info->link_speed);
break;
}
@@ -2845,13 +2572,11 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
new_tx_cnt = ALIGN(ring->tx_pending, ICE_REQ_DESC_MULTIPLE);
if (new_tx_cnt != ring->tx_pending)
- netdev_info(netdev,
- "Requested Tx descriptor count rounded up to %d\n",
+ netdev_info(netdev, "Requested Tx descriptor count rounded up to %d\n",
new_tx_cnt);
new_rx_cnt = ALIGN(ring->rx_pending, ICE_REQ_DESC_MULTIPLE);
if (new_rx_cnt != ring->rx_pending)
- netdev_info(netdev,
- "Requested Rx descriptor count rounded up to %d\n",
+ netdev_info(netdev, "Requested Rx descriptor count rounded up to %d\n",
new_rx_cnt);
/* if nothing to do return success */
@@ -3718,8 +3443,7 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
if (ec->rx_coalesce_usecs_high > ICE_MAX_INTRL ||
(ec->rx_coalesce_usecs_high &&
ec->rx_coalesce_usecs_high < pf->hw.intrl_gran)) {
- netdev_info(vsi->netdev,
- "Invalid value, %s-usecs-high valid values are 0 (disabled), %d-%d\n",
+ netdev_info(vsi->netdev, "Invalid value, %s-usecs-high valid values are 0 (disabled), %d-%d\n",
c_type_str, pf->hw.intrl_gran,
ICE_MAX_INTRL);
return -EINVAL;
@@ -3737,8 +3461,7 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
break;
case ICE_TX_CONTAINER:
if (ec->tx_coalesce_usecs_high) {
- netdev_info(vsi->netdev,
- "setting %s-usecs-high is not supported\n",
+ netdev_info(vsi->netdev, "setting %s-usecs-high is not supported\n",
c_type_str);
return -EINVAL;
}
@@ -3755,23 +3478,20 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
itr_setting = rc->itr_setting & ~ICE_ITR_DYNAMIC;
if (coalesce_usecs != itr_setting && use_adaptive_coalesce) {
- netdev_info(vsi->netdev,
- "%s interrupt throttling cannot be changed if adaptive-%s is enabled\n",
+ netdev_info(vsi->netdev, "%s interrupt throttling cannot be changed if adaptive-%s is enabled\n",
c_type_str, c_type_str);
return -EINVAL;
}
if (coalesce_usecs > ICE_ITR_MAX) {
- netdev_info(vsi->netdev,
- "Invalid value, %s-usecs range is 0-%d\n",
+ netdev_info(vsi->netdev, "Invalid value, %s-usecs range is 0-%d\n",
c_type_str, ICE_ITR_MAX);
return -EINVAL;
}
/* hardware only supports an ITR granularity of 2us */
if (coalesce_usecs % 2 != 0) {
- netdev_info(vsi->netdev,
- "Invalid value, %s-usecs must be even\n",
+ netdev_info(vsi->netdev, "Invalid value, %s-usecs must be even\n",
c_type_str);
return -EINVAL;
}
@@ -4012,8 +3732,7 @@ ice_get_module_info(struct net_device *netdev,
}
break;
default:
- netdev_warn(netdev,
- "SFF Module Type not recognized.\n");
+ netdev_warn(netdev, "SFF Module Type not recognized.\n");
return -EINVAL;
}
return 0;
@@ -4081,11 +3800,11 @@ ice_get_module_eeprom(struct net_device *netdev,
static const struct ethtool_ops ice_ethtool_ops = {
.get_link_ksettings = ice_get_link_ksettings,
.set_link_ksettings = ice_set_link_ksettings,
- .get_drvinfo = ice_get_drvinfo,
- .get_regs_len = ice_get_regs_len,
- .get_regs = ice_get_regs,
- .get_msglevel = ice_get_msglevel,
- .set_msglevel = ice_set_msglevel,
+ .get_drvinfo = ice_get_drvinfo,
+ .get_regs_len = ice_get_regs_len,
+ .get_regs = ice_get_regs,
+ .get_msglevel = ice_get_msglevel,
+ .set_msglevel = ice_set_msglevel,
.self_test = ice_self_test,
.get_link = ethtool_op_get_link,
.get_eeprom_len = ice_get_eeprom_len,
@@ -4112,8 +3831,8 @@ static const struct ethtool_ops ice_ethtool_ops = {
.get_channels = ice_get_channels,
.set_channels = ice_set_channels,
.get_ts_info = ethtool_op_get_ts_info,
- .get_per_queue_coalesce = ice_get_per_q_coalesce,
- .set_per_queue_coalesce = ice_set_per_q_coalesce,
+ .get_per_queue_coalesce = ice_get_per_q_coalesce,
+ .set_per_queue_coalesce = ice_set_per_q_coalesce,
.get_fecparam = ice_get_fecparam,
.set_fecparam = ice_set_fecparam,
.get_module_info = ice_get_module_info,
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index f2cababf2561..6db3d0494127 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -267,8 +267,14 @@
#define GLNVM_GENS_SR_SIZE_S 5
#define GLNVM_GENS_SR_SIZE_M ICE_M(0x7, 5)
#define GLNVM_ULD 0x000B6008
+#define GLNVM_ULD_PCIER_DONE_M BIT(0)
+#define GLNVM_ULD_PCIER_DONE_1_M BIT(1)
#define GLNVM_ULD_CORER_DONE_M BIT(3)
#define GLNVM_ULD_GLOBR_DONE_M BIT(4)
+#define GLNVM_ULD_POR_DONE_M BIT(5)
+#define GLNVM_ULD_POR_DONE_1_M BIT(8)
+#define GLNVM_ULD_PCIER_DONE_2_M BIT(9)
+#define GLNVM_ULD_PE_DONE_M BIT(10)
#define GLPCI_CNF2 0x000BE004
#define GLPCI_CNF2_CACHELINE_SIZE_M BIT(1)
#define PF_FUNC_RID 0x0009E880
@@ -331,7 +337,6 @@
#define GLV_TEPC(_VSI) (0x00312000 + ((_VSI) * 4))
#define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8))
#define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8))
-#define PF_VT_PFALLOC_HIF 0x0009DD80
#define VSIQF_HKEY_MAX_INDEX 12
#define VSIQF_HLUT_MAX_INDEX 15
#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4))
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 1874c9f51a32..d974e2fa3e63 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -117,8 +117,7 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC;
break;
default:
- dev_dbg(&vsi->back->pdev->dev,
- "Not setting number of Tx/Rx descriptors for VSI type %d\n",
+ dev_dbg(ice_pf_to_dev(vsi->back), "Not setting number of Tx/Rx descriptors for VSI type %d\n",
vsi->type);
break;
}
@@ -724,7 +723,7 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
vsi->num_txq = tx_count;
if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
- dev_dbg(&vsi->back->pdev->dev, "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
+ dev_dbg(ice_pf_to_dev(vsi->back), "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
/* since there is a chance that num_rxq could have been changed
* in the above for loop, make num_txq equal to num_rxq.
*/
@@ -929,8 +928,7 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
vsi->base_vector = ice_get_res(pf, pf->irq_tracker, num_q_vectors,
vsi->idx);
if (vsi->base_vector < 0) {
- dev_err(dev,
- "Failed to get tracking for %d vectors for VSI %d, err=%d\n",
+ dev_err(dev, "Failed to get tracking for %d vectors for VSI %d, err=%d\n",
num_q_vectors, vsi->vsi_num, vsi->base_vector);
return -ENOENT;
}
@@ -1232,8 +1230,9 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
*
* Returns 0 on success or ENOMEM on failure.
*/
-int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
- const u8 *macaddr)
+int
+ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
+ const u8 *macaddr)
{
struct ice_fltr_list_entry *tmp;
struct ice_pf *pf = vsi->back;
@@ -1392,12 +1391,10 @@ int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
status = ice_remove_vlan(&pf->hw, &tmp_add_list);
if (status == ICE_ERR_DOES_NOT_EXIST) {
- dev_dbg(dev,
- "Failed to remove VLAN %d on VSI %i, it does not exist, status: %d\n",
+ dev_dbg(dev, "Failed to remove VLAN %d on VSI %i, it does not exist, status: %d\n",
vid, vsi->vsi_num, status);
} else if (status) {
- dev_err(dev,
- "Error removing VLAN %d on vsi %i error: %d\n",
+ dev_err(dev, "Error removing VLAN %d on vsi %i error: %d\n",
vid, vsi->vsi_num, status);
err = -EIO;
}
@@ -1453,8 +1450,7 @@ setup_rings:
err = ice_setup_rx_ctx(vsi->rx_rings[i]);
if (err) {
- dev_err(&vsi->back->pdev->dev,
- "ice_setup_rx_ctx failed for RxQ %d, err %d\n",
+ dev_err(ice_pf_to_dev(vsi->back), "ice_setup_rx_ctx failed for RxQ %d, err %d\n",
i, err);
return err;
}
@@ -1623,7 +1619,7 @@ int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
- dev_err(&vsi->back->pdev->dev, "update VSI for VLAN insert failed, err %d aq_err %d\n",
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN insert failed, err %d aq_err %d\n",
status, hw->adminq.sq_last_status);
ret = -EIO;
goto out;
@@ -1669,7 +1665,7 @@ int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
- dev_err(&vsi->back->pdev->dev, "update VSI for VLAN strip failed, ena = %d err %d aq_err %d\n",
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN strip failed, ena = %d err %d aq_err %d\n",
ena, status, hw->adminq.sq_last_status);
ret = -EIO;
goto out;
@@ -1834,8 +1830,7 @@ ice_vsi_set_q_vectors_reg_idx(struct ice_vsi *vsi)
struct ice_q_vector *q_vector = vsi->q_vectors[i];
if (!q_vector) {
- dev_err(&vsi->back->pdev->dev,
- "Failed to set reg_idx on q_vector %d VSI %d\n",
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to set reg_idx on q_vector %d VSI %d\n",
i, vsi->vsi_num);
goto clear_reg_idx;
}
@@ -1898,8 +1893,7 @@ ice_vsi_add_rem_eth_mac(struct ice_vsi *vsi, bool add_rule)
status = ice_remove_eth_mac(&pf->hw, &tmp_add_list);
if (status)
- dev_err(dev,
- "Failure Adding or Removing Ethertype on VSI %i error: %d\n",
+ dev_err(dev, "Failure Adding or Removing Ethertype on VSI %i error: %d\n",
vsi->vsi_num, status);
ice_free_fltr_list(dev, &tmp_add_list);
@@ -2384,8 +2378,7 @@ ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id)
return -EINVAL;
if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) {
- dev_err(ice_pf_to_dev(pf),
- "param err: needed=%d, num_entries = %d id=0x%04x\n",
+ dev_err(ice_pf_to_dev(pf), "param err: needed=%d, num_entries = %d id=0x%04x\n",
needed, res->num_entries, id);
return -EINVAL;
}
@@ -2686,7 +2679,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
ice_vsi_put_qs(vsi);
ice_vsi_clear_rings(vsi);
ice_vsi_free_arrays(vsi);
- ice_dev_onetime_setup(&pf->hw);
if (vsi->type == ICE_VSI_VF)
ice_vsi_set_num_qs(vsi, vf->vf_id);
else
@@ -2765,8 +2757,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
if (status) {
- dev_err(ice_pf_to_dev(pf),
- "VSI %d failed lan queue config, error %d\n",
+ dev_err(ice_pf_to_dev(pf), "VSI %d failed lan queue config, error %d\n",
vsi->vsi_num, status);
if (init_vsi) {
ret = -EIO;
@@ -2834,8 +2825,8 @@ static void ice_vsi_update_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx)
int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
- struct ice_vsi_ctx *ctx;
struct ice_pf *pf = vsi->back;
+ struct ice_vsi_ctx *ctx;
enum ice_status status;
struct device *dev;
int i, ret = 0;
@@ -2892,25 +2883,6 @@ out:
#endif /* CONFIG_DCB */
/**
- * ice_nvm_version_str - format the NVM version strings
- * @hw: ptr to the hardware info
- */
-char *ice_nvm_version_str(struct ice_hw *hw)
-{
- u8 oem_ver, oem_patch, ver_hi, ver_lo;
- static char buf[ICE_NVM_VER_LEN];
- u16 oem_build;
-
- ice_get_nvm_version(hw, &oem_ver, &oem_build, &oem_patch, &ver_hi,
- &ver_lo);
-
- snprintf(buf, sizeof(buf), "%x.%02x 0x%x %d.%d.%d", ver_hi, ver_lo,
- hw->nvm.eetrack, oem_ver, oem_build, oem_patch);
-
- return buf;
-}
-
-/**
* ice_update_ring_stats - Update ring statistics
* @ring: ring to update
* @cont: used to increment per-vector counters
@@ -2981,7 +2953,7 @@ ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set)
status = ice_remove_mac(&vsi->back->hw, &tmp_add_list);
cfg_mac_fltr_exit:
- ice_free_fltr_list(&vsi->back->pdev->dev, &tmp_add_list);
+ ice_free_fltr_list(ice_pf_to_dev(vsi->back), &tmp_add_list);
return status;
}
@@ -3043,16 +3015,14 @@ int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi)
/* another VSI is already the default VSI for this switch */
if (ice_is_dflt_vsi_in_use(sw)) {
- dev_err(dev,
- "Default forwarding VSI %d already in use, disable it and try again\n",
+ dev_err(dev, "Default forwarding VSI %d already in use, disable it and try again\n",
sw->dflt_vsi->vsi_num);
return -EEXIST;
}
status = ice_cfg_dflt_vsi(&vsi->back->hw, vsi->idx, true, ICE_FLTR_RX);
if (status) {
- dev_err(dev,
- "Failed to set VSI %d as the default forwarding VSI, error %d\n",
+ dev_err(dev, "Failed to set VSI %d as the default forwarding VSI, error %d\n",
vsi->vsi_num, status);
return -EIO;
}
@@ -3091,8 +3061,7 @@ int ice_clear_dflt_vsi(struct ice_sw *sw)
status = ice_cfg_dflt_vsi(&dflt_vsi->back->hw, dflt_vsi->idx, false,
ICE_FLTR_RX);
if (status) {
- dev_err(dev,
- "Failed to clear the default forwarding VSI %d, error %d\n",
+ dev_err(dev, "Failed to clear the default forwarding VSI %d, error %d\n",
dflt_vsi->vsi_num, status);
return -EIO;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 68fd0d4505c2..e2c0dadce920 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -97,8 +97,6 @@ void ice_vsi_cfg_frame_size(struct ice_vsi *vsi);
u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran);
-char *ice_nvm_version_str(struct ice_hw *hw);
-
enum ice_status
ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 5ae671609f98..5ef28052c0f8 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -162,8 +162,7 @@ unregister:
* had an error
*/
if (status && vsi->netdev->reg_state == NETREG_REGISTERED) {
- dev_err(ice_pf_to_dev(pf),
- "Could not add MAC filters error %d. Unregistering device\n",
+ dev_err(ice_pf_to_dev(pf), "Could not add MAC filters error %d. Unregistering device\n",
status);
unregister_netdev(vsi->netdev);
free_netdev(vsi->netdev);
@@ -269,7 +268,7 @@ static int ice_cfg_promisc(struct ice_vsi *vsi, u8 promisc_m, bool set_promisc)
*/
static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
{
- struct device *dev = &vsi->back->pdev->dev;
+ struct device *dev = ice_pf_to_dev(vsi->back);
struct net_device *netdev = vsi->netdev;
bool promisc_forced_on = false;
struct ice_pf *pf = vsi->back;
@@ -335,8 +334,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
!test_and_set_bit(__ICE_FLTR_OVERFLOW_PROMISC,
vsi->state)) {
promisc_forced_on = true;
- netdev_warn(netdev,
- "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
+ netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
vsi->vsi_num);
} else {
err = -EIO;
@@ -382,8 +380,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
if (!ice_is_dflt_vsi_in_use(pf->first_sw)) {
err = ice_set_dflt_vsi(pf->first_sw, vsi);
if (err && err != -EEXIST) {
- netdev_err(netdev,
- "Error %d setting default VSI %i Rx rule\n",
+ netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n",
err, vsi->vsi_num);
vsi->current_netdev_flags &=
~IFF_PROMISC;
@@ -395,8 +392,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
if (ice_is_vsi_dflt_vsi(pf->first_sw, vsi)) {
err = ice_clear_dflt_vsi(pf->first_sw);
if (err) {
- netdev_err(netdev,
- "Error %d clearing default VSI %i Rx rule\n",
+ netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n",
err, vsi->vsi_num);
vsi->current_netdev_flags |=
IFF_PROMISC;
@@ -752,7 +748,7 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
kfree(caps);
done:
- netdev_info(vsi->netdev, "NIC Link is up %sbps, Requested FEC: %s, FEC: %s, Autoneg: %s, Flow Control: %s\n",
+ netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
speed, fec_req, fec, an, fc);
ice_print_topo_conflict(vsi);
}
@@ -815,8 +811,7 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
*/
result = ice_update_link_info(pi);
if (result)
- dev_dbg(dev,
- "Failed to update link status and re-enable link events for port %d\n",
+ dev_dbg(dev, "Failed to update link status and re-enable link events for port %d\n",
pi->lport);
/* if the old link up/down and speed is the same as the new */
@@ -834,13 +829,13 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
result = ice_aq_set_link_restart_an(pi, false, NULL);
if (result) {
- dev_dbg(dev,
- "Failed to set link down, VSI %d error %d\n",
+ dev_dbg(dev, "Failed to set link down, VSI %d error %d\n",
vsi->vsi_num, result);
return result;
}
}
+ ice_dcb_rebuild(pf);
ice_vsi_link_event(vsi, link_up);
ice_print_link_msg(vsi, link_up);
@@ -892,15 +887,13 @@ static int ice_init_link_events(struct ice_port_info *pi)
ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL));
if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
- dev_dbg(ice_hw_to_dev(pi->hw),
- "Failed to set link event mask for port %d\n",
+ dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n",
pi->lport);
return -EIO;
}
if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
- dev_dbg(ice_hw_to_dev(pi->hw),
- "Failed to enable link events for port %d\n",
+ dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n",
pi->lport);
return -EIO;
}
@@ -929,8 +922,8 @@ ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
!!(link_data->link_info & ICE_AQ_LINK_UP),
le16_to_cpu(link_data->link_speed));
if (status)
- dev_dbg(ice_pf_to_dev(pf),
- "Could not process link event, error %d\n", status);
+ dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n",
+ status);
return status;
}
@@ -979,13 +972,11 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
dev_dbg(dev, "%s Receive Queue VF Error detected\n",
qtype);
if (val & PF_FW_ARQLEN_ARQOVFL_M) {
- dev_dbg(dev,
- "%s Receive Queue Overflow Error detected\n",
+ dev_dbg(dev, "%s Receive Queue Overflow Error detected\n",
qtype);
}
if (val & PF_FW_ARQLEN_ARQCRIT_M)
- dev_dbg(dev,
- "%s Receive Queue Critical Error detected\n",
+ dev_dbg(dev, "%s Receive Queue Critical Error detected\n",
qtype);
val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
PF_FW_ARQLEN_ARQCRIT_M);
@@ -998,8 +989,8 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
PF_FW_ATQLEN_ATQCRIT_M)) {
oldval = val;
if (val & PF_FW_ATQLEN_ATQVFE_M)
- dev_dbg(dev,
- "%s Send Queue VF Error detected\n", qtype);
+ dev_dbg(dev, "%s Send Queue VF Error detected\n",
+ qtype);
if (val & PF_FW_ATQLEN_ATQOVFL_M) {
dev_dbg(dev, "%s Send Queue Overflow Error detected\n",
qtype);
@@ -1048,8 +1039,7 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
ice_dcb_process_lldp_set_mib_change(pf, &event);
break;
default:
- dev_dbg(dev,
- "%s Receive Queue unknown event 0x%04x ignored\n",
+ dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n",
qtype, opcode);
break;
}
@@ -1238,7 +1228,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >>
GL_MDET_TX_TCLAN_QNUM_S);
- if (netif_msg_rx_err(pf))
+ if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
event, queue, pf_num, vf_num);
wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
@@ -1335,8 +1325,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
vf->num_mdd_events++;
if (vf->num_mdd_events &&
vf->num_mdd_events <= ICE_MDD_EVENTS_THRESHOLD)
- dev_info(dev,
- "VF %d has had %llu MDD events since last boot, Admin might need to reload AVF driver with this number of events\n",
+ dev_info(dev, "VF %d has had %llu MDD events since last boot, Admin might need to reload AVF driver with this number of events\n",
i, vf->num_mdd_events);
}
}
@@ -1367,7 +1356,7 @@ static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
if (vsi->type != ICE_VSI_PF)
return 0;
- dev = &vsi->back->pdev->dev;
+ dev = ice_pf_to_dev(vsi->back);
pi = vsi->port_info;
@@ -1378,8 +1367,7 @@ static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
NULL);
if (retcode) {
- dev_err(dev,
- "Failed to get phy capabilities, VSI %d error %d\n",
+ dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n",
vsi->vsi_num, retcode);
retcode = -EIO;
goto out;
@@ -1649,8 +1637,8 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
err = devm_request_irq(dev, irq_num, vsi->irq_handler, 0,
q_vector->name, q_vector);
if (err) {
- netdev_err(vsi->netdev,
- "MSIX request_irq failed, error: %d\n", err);
+ netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n",
+ err);
goto free_q_irqs;
}
@@ -1685,7 +1673,7 @@ free_q_irqs:
*/
static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
{
- struct device *dev = &vsi->back->pdev->dev;
+ struct device *dev = ice_pf_to_dev(vsi->back);
int i;
for (i = 0; i < vsi->num_xdp_txq; i++) {
@@ -2664,14 +2652,12 @@ static void ice_set_pf_caps(struct ice_pf *pf)
clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
if (func_caps->common_cap.dcb)
set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
-#ifdef CONFIG_PCI_IOV
clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
if (func_caps->common_cap.sr_iov_1_1) {
set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
pf->num_vfs_supported = min_t(int, func_caps->num_allocd_vfs,
ICE_MAX_VF_COUNT);
}
-#endif /* CONFIG_PCI_IOV */
clear_bit(ICE_FLAG_RSS_ENA, pf->flags);
if (func_caps->common_cap.rss_table_size)
set_bit(ICE_FLAG_RSS_ENA, pf->flags);
@@ -2764,8 +2750,7 @@ static int ice_ena_msix_range(struct ice_pf *pf)
}
if (v_actual < v_budget) {
- dev_warn(dev,
- "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
+ dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
v_budget, v_actual);
/* 2 vectors for LAN (traffic + OICR) */
#define ICE_MIN_LAN_VECS 2
@@ -2787,8 +2772,7 @@ msix_err:
goto exit_err;
no_hw_vecs_left_err:
- dev_err(dev,
- "not enough device MSI-X vectors. requested = %d, available = %d\n",
+ dev_err(dev, "not enough device MSI-X vectors. requested = %d, available = %d\n",
needed, v_left);
err = -ERANGE;
exit_err:
@@ -2921,16 +2905,14 @@ ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
!memcmp(hw->pkg_name, hw->active_pkg_name,
sizeof(hw->pkg_name))) {
if (hw->pkg_dwnld_status == ICE_AQ_RC_EEXIST)
- dev_info(dev,
- "DDP package already present on device: %s version %d.%d.%d.%d\n",
+ dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n",
hw->active_pkg_name,
hw->active_pkg_ver.major,
hw->active_pkg_ver.minor,
hw->active_pkg_ver.update,
hw->active_pkg_ver.draft);
else
- dev_info(dev,
- "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
+ dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
hw->active_pkg_name,
hw->active_pkg_ver.major,
hw->active_pkg_ver.minor,
@@ -2938,8 +2920,7 @@ ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
hw->active_pkg_ver.draft);
} else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ ||
hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) {
- dev_err(dev,
- "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n",
+ dev_err(dev, "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n",
hw->active_pkg_name,
hw->active_pkg_ver.major,
hw->active_pkg_ver.minor,
@@ -2947,8 +2928,7 @@ ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
*status = ICE_ERR_NOT_SUPPORTED;
} else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) {
- dev_info(dev,
- "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
+ dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
hw->active_pkg_name,
hw->active_pkg_ver.major,
hw->active_pkg_ver.minor,
@@ -2960,54 +2940,46 @@ ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
hw->pkg_ver.update,
hw->pkg_ver.draft);
} else {
- dev_err(dev,
- "An unknown error occurred when loading the DDP package, please reboot the system. If the problem persists, update the NVM. Entering Safe Mode.\n");
+ dev_err(dev, "An unknown error occurred when loading the DDP package, please reboot the system. If the problem persists, update the NVM. Entering Safe Mode.\n");
*status = ICE_ERR_NOT_SUPPORTED;
}
break;
case ICE_ERR_BUF_TOO_SHORT:
/* fall-through */
case ICE_ERR_CFG:
- dev_err(dev,
- "The DDP package file is invalid. Entering Safe Mode.\n");
+ dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
break;
case ICE_ERR_NOT_SUPPORTED:
/* Package File version not supported */
if (hw->pkg_ver.major > ICE_PKG_SUPP_VER_MAJ ||
(hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
hw->pkg_ver.minor > ICE_PKG_SUPP_VER_MNR))
- dev_err(dev,
- "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n");
+ dev_err(dev, "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n");
else if (hw->pkg_ver.major < ICE_PKG_SUPP_VER_MAJ ||
(hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
hw->pkg_ver.minor < ICE_PKG_SUPP_VER_MNR))
- dev_err(dev,
- "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n",
+ dev_err(dev, "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n",
ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
break;
case ICE_ERR_AQ_ERROR:
switch (hw->pkg_dwnld_status) {
case ICE_AQ_RC_ENOSEC:
case ICE_AQ_RC_EBADSIG:
- dev_err(dev,
- "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n");
+ dev_err(dev, "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n");
return;
case ICE_AQ_RC_ESVN:
- dev_err(dev,
- "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n");
+ dev_err(dev, "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n");
return;
case ICE_AQ_RC_EBADMAN:
case ICE_AQ_RC_EBADBUF:
- dev_err(dev,
- "An error occurred on the device while loading the DDP package. The device will be reset.\n");
+ dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n");
return;
default:
break;
}
/* fall-through */
default:
- dev_err(dev,
- "An unknown error (%d) occurred when loading the DDP package. Entering Safe Mode.\n",
+ dev_err(dev, "An unknown error (%d) occurred when loading the DDP package. Entering Safe Mode.\n",
*status);
break;
}
@@ -3038,8 +3010,7 @@ ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
status = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
ice_log_pkg_init(hw, &status);
} else {
- dev_err(dev,
- "The DDP package file failed to load. Entering Safe Mode.\n");
+ dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n");
}
if (status) {
@@ -3065,8 +3036,7 @@ ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
static void ice_verify_cacheline_size(struct ice_pf *pf)
{
if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
- dev_warn(ice_pf_to_dev(pf),
- "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
+ dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
ICE_CACHE_LINE_BYTES);
}
@@ -3159,8 +3129,7 @@ static void ice_request_fw(struct ice_pf *pf)
dflt_pkg_load:
err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev);
if (err) {
- dev_err(dev,
- "The DDP package file was not found or could not be read. Entering Safe Mode\n");
+ dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n");
return;
}
@@ -3184,7 +3153,9 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
struct ice_hw *hw;
int err;
- /* this driver uses devres, see Documentation/driver-api/driver-model/devres.rst */
+ /* this driver uses devres, see
+ * Documentation/driver-api/driver-model/devres.rst
+ */
err = pcim_enable_device(pdev);
if (err)
return err;
@@ -3245,11 +3216,6 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
goto err_exit_unroll;
}
- dev_info(dev, "firmware %d.%d.%d api %d.%d.%d nvm %s build 0x%08x\n",
- hw->fw_maj_ver, hw->fw_min_ver, hw->fw_patch,
- hw->api_maj_ver, hw->api_min_ver, hw->api_patch,
- ice_nvm_version_str(hw), hw->fw_build);
-
ice_request_fw(pf);
/* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be
@@ -3257,8 +3223,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
* true
*/
if (ice_is_safe_mode(pf)) {
- dev_err(dev,
- "Package download failed. Advanced features disabled - Device now in Safe Mode\n");
+ dev_err(dev, "Package download failed. Advanced features disabled - Device now in Safe Mode\n");
/* we already got function/device capabilities but these don't
* reflect what the driver needs to do in safe mode. Instead of
* adding conditional logic everywhere to ignore these
@@ -3335,8 +3300,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
/* tell the firmware we are up */
err = ice_send_version(pf);
if (err) {
- dev_err(dev,
- "probe failed sending driver version %s. error: %d\n",
+ dev_err(dev, "probe failed sending driver version %s. error: %d\n",
ice_drv_ver, err);
goto err_alloc_sw_unroll;
}
@@ -3477,8 +3441,7 @@ static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
err = pci_enable_device_mem(pdev);
if (err) {
- dev_err(&pdev->dev,
- "Cannot re-enable PCI device after reset, error %d\n",
+ dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n",
err);
result = PCI_ERS_RESULT_DISCONNECT;
} else {
@@ -3497,8 +3460,7 @@ static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
err = pci_cleanup_aer_uncorrect_error_status(pdev);
if (err)
- dev_dbg(&pdev->dev,
- "pci_cleanup_aer_uncorrect_error_status failed, error %d\n",
+ dev_dbg(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status failed, error %d\n",
err);
/* non-fatal, continue */
@@ -3517,8 +3479,8 @@ static void ice_pci_err_resume(struct pci_dev *pdev)
struct ice_pf *pf = pci_get_drvdata(pdev);
if (!pf) {
- dev_err(&pdev->dev,
- "%s failed, device is unrecoverable\n", __func__);
+ dev_err(&pdev->dev, "%s failed, device is unrecoverable\n",
+ __func__);
return;
}
@@ -3766,8 +3728,7 @@ ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
/* Validate maxrate requested is within permitted range */
if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) {
- netdev_err(netdev,
- "Invalid max rate %d specified for the queue %d\n",
+ netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n",
maxrate, queue_index);
return -EINVAL;
}
@@ -3783,8 +3744,8 @@ ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
q_handle, ICE_MAX_BW, maxrate * 1000);
if (status) {
- netdev_err(netdev,
- "Unable to set Tx max rate, error %d\n", status);
+ netdev_err(netdev, "Unable to set Tx max rate, error %d\n",
+ status);
return -EIO;
}
@@ -3876,15 +3837,13 @@ ice_set_features(struct net_device *netdev, netdev_features_t features)
/* Don't set any netdev advanced features with device in Safe Mode */
if (ice_is_safe_mode(vsi->back)) {
- dev_err(&vsi->back->pdev->dev,
- "Device is in Safe Mode - not enabling advanced netdev features\n");
+ dev_err(ice_pf_to_dev(vsi->back), "Device is in Safe Mode - not enabling advanced netdev features\n");
return ret;
}
/* Do not change setting during reset */
if (ice_is_reset_in_progress(pf->state)) {
- dev_err(&vsi->back->pdev->dev,
- "Device is resetting, changing advanced netdev features temporarily unavailable.\n");
+ dev_err(ice_pf_to_dev(vsi->back), "Device is resetting, changing advanced netdev features temporarily unavailable.\n");
return -EBUSY;
}
@@ -4372,21 +4331,18 @@ int ice_down(struct ice_vsi *vsi)
tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
if (tx_err)
- netdev_err(vsi->netdev,
- "Failed stop Tx rings, VSI %d error %d\n",
+ netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n",
vsi->vsi_num, tx_err);
if (!tx_err && ice_is_xdp_ena_vsi(vsi)) {
tx_err = ice_vsi_stop_xdp_tx_rings(vsi);
if (tx_err)
- netdev_err(vsi->netdev,
- "Failed stop XDP rings, VSI %d error %d\n",
+ netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n",
vsi->vsi_num, tx_err);
}
rx_err = ice_vsi_stop_rx_rings(vsi);
if (rx_err)
- netdev_err(vsi->netdev,
- "Failed stop Rx rings, VSI %d error %d\n",
+ netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n",
vsi->vsi_num, rx_err);
ice_napi_disable_all(vsi);
@@ -4394,8 +4350,7 @@ int ice_down(struct ice_vsi *vsi)
if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
link_err = ice_force_phys_link_state(vsi, false);
if (link_err)
- netdev_err(vsi->netdev,
- "Failed to set physical link down, VSI %d error %d\n",
+ netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
vsi->vsi_num, link_err);
}
@@ -4406,8 +4361,7 @@ int ice_down(struct ice_vsi *vsi)
ice_clean_rx_ring(vsi->rx_rings[i]);
if (tx_err || rx_err || link_err) {
- netdev_err(vsi->netdev,
- "Failed to close VSI 0x%04X on switch 0x%04X\n",
+ netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
vsi->vsi_num, vsi->vsw->sw_id);
return -EIO;
}
@@ -4426,7 +4380,7 @@ int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
int i, err = 0;
if (!vsi->num_txq) {
- dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Tx queues\n",
+ dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n",
vsi->vsi_num);
return -EINVAL;
}
@@ -4457,7 +4411,7 @@ int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
int i, err = 0;
if (!vsi->num_rxq) {
- dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Rx queues\n",
+ dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n",
vsi->vsi_num);
return -EINVAL;
}
@@ -4554,8 +4508,7 @@ static void ice_vsi_release_all(struct ice_pf *pf)
err = ice_vsi_release(pf->vsi[i]);
if (err)
- dev_dbg(ice_pf_to_dev(pf),
- "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
+ dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
i, err, pf->vsi[i]->vsi_num);
}
}
@@ -4582,8 +4535,7 @@ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
/* rebuild the VSI */
err = ice_vsi_rebuild(vsi, true);
if (err) {
- dev_err(dev,
- "rebuild VSI failed, err %d, VSI index %d, type %s\n",
+ dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n",
err, vsi->idx, ice_vsi_type_str(type));
return err;
}
@@ -4591,8 +4543,7 @@ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
/* replay filters for the VSI */
status = ice_replay_vsi(&pf->hw, vsi->idx);
if (status) {
- dev_err(dev,
- "replay VSI failed, status %d, VSI index %d, type %s\n",
+ dev_err(dev, "replay VSI failed, status %d, VSI index %d, type %s\n",
status, vsi->idx, ice_vsi_type_str(type));
return -EIO;
}
@@ -4605,8 +4556,7 @@ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
/* enable the VSI */
err = ice_ena_vsi(vsi, false);
if (err) {
- dev_err(dev,
- "enable VSI failed, err %d, VSI index %d, type %s\n",
+ dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n",
err, vsi->idx, ice_vsi_type_str(type));
return err;
}
@@ -4684,8 +4634,7 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
}
if (pf->first_sw->dflt_vsi_ena)
- dev_info(dev,
- "Clearing default VSI, re-enable after reset completes\n");
+ dev_info(dev, "Clearing default VSI, re-enable after reset completes\n");
/* clear the default VSI configuration if it exists */
pf->first_sw->dflt_vsi = NULL;
pf->first_sw->dflt_vsi_ena = false;
@@ -4736,8 +4685,7 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
/* tell the firmware we are up */
ret = ice_send_version(pf);
if (ret) {
- dev_err(dev,
- "Rebuild failed due to error sending driver version: %d\n",
+ dev_err(dev, "Rebuild failed due to error sending driver version: %d\n",
ret);
goto err_vsi_rebuild;
}
@@ -4993,7 +4941,7 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
- dev_err(&vsi->back->pdev->dev, "update VSI for bridge mode failed, bmode = %d err %d aq_err %d\n",
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %d\n",
bmode, status, hw->adminq.sq_last_status);
ret = -EIO;
goto out;
@@ -5185,8 +5133,7 @@ int ice_open(struct net_device *netdev)
if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
err = ice_force_phys_link_state(vsi, true);
if (err) {
- netdev_err(netdev,
- "Failed to set physical link up, error %d\n",
+ netdev_err(netdev, "Failed to set physical link up, error %d\n",
err);
return err;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index fd17ace6b226..4de61dbedd36 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -644,7 +644,7 @@ static bool ice_page_is_reserved(struct page *page)
* Update the offset within page so that Rx buf will be ready to be reused.
* For systems with PAGE_SIZE < 8192 this function will flip the page offset
* so the second half of page assigned to Rx buffer will be used, otherwise
- * the offset is moved by the @size bytes
+ * the offset is moved by "size" bytes
*/
static void
ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
@@ -1078,8 +1078,6 @@ construct_skb:
skb = ice_build_skb(rx_ring, rx_buf, &xdp);
else
skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
- } else {
- skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
}
/* exit if we failed to retrieve a buffer */
if (!skb) {
@@ -1621,11 +1619,11 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
{
u64 td_offset, td_tag, td_cmd;
u16 i = tx_ring->next_to_use;
- skb_frag_t *frag;
unsigned int data_len, size;
struct ice_tx_desc *tx_desc;
struct ice_tx_buf *tx_buf;
struct sk_buff *skb;
+ skb_frag_t *frag;
dma_addr_t dma;
td_tag = off->td_l2tag1;
@@ -1738,9 +1736,8 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
ice_maybe_stop_tx(tx_ring, DESC_NEEDED);
/* notify HW of packet */
- if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
+ if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more())
writel(i, tx_ring->tail);
- }
return;
@@ -2078,7 +2075,7 @@ static bool __ice_chk_linearize(struct sk_buff *skb)
frag = &skb_shinfo(skb)->frags[0];
/* Initialize size to the negative value of gso_size minus 1. We
- * use this as the worst case scenerio in which the frag ahead
+ * use this as the worst case scenario in which the frag ahead
* of us only provides one byte which is why we are limited to 6
* descriptors for a single transmit as the header and previous
* fragment are already consuming 2 descriptors.
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index a86270696df1..14a1bf445889 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -33,8 +33,8 @@
* frame.
*
* Note: For cache line sizes 256 or larger this value is going to end
- * up negative. In these cases we should fall back to the legacy
- * receive path.
+ * up negative. In these cases we should fall back to the legacy
+ * receive path.
*/
#if (PAGE_SIZE < 8192)
#define ICE_2K_TOO_SMALL_WITH_PADDING \
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index 35bbc4ff603c..6da048a6ca7c 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -10,7 +10,7 @@
*/
void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val)
{
- u16 prev_ntu = rx_ring->next_to_use;
+ u16 prev_ntu = rx_ring->next_to_use & ~0x7;
rx_ring->next_to_use = val;
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index b361ffabb0ca..db0ef6ba907f 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -517,7 +517,7 @@ struct ice_hw {
struct ice_fw_log_cfg fw_log;
/* Device max aggregate bandwidths corresponding to the GL_PWR_MODE_CTL
- * register. Used for determining the ITR/intrl granularity during
+ * register. Used for determining the ITR/INTRL granularity during
* initialization.
*/
#define ICE_MAX_AGG_BW_200G 0x0
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index 82b1e7a4cb92..262714d5f54a 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -199,8 +199,7 @@ static void ice_dis_vf_mappings(struct ice_vf *vf)
if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
else
- dev_err(dev,
- "Scattered mode for VF Rx queues is not yet implemented\n");
+ dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
}
/**
@@ -402,8 +401,7 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
if ((reg & VF_TRANS_PENDING_M) == 0)
break;
- dev_err(dev,
- "VF %d PCI transactions stuck\n", vf->vf_id);
+ dev_err(dev, "VF %d PCI transactions stuck\n", vf->vf_id);
udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
}
}
@@ -462,7 +460,7 @@ static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 vid, bool enable)
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
- dev_info(&vsi->back->pdev->dev, "update VSI for port VLAN failed, err %d aq_err %d\n",
+ dev_info(ice_pf_to_dev(vsi->back), "update VSI for port VLAN failed, err %d aq_err %d\n",
status, hw->adminq.sq_last_status);
ret = -EIO;
goto out;
@@ -1095,7 +1093,6 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
* finished resetting.
*/
for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
-
/* Check each VF in sequence */
while (v < pf->num_alloc_vfs) {
u32 reg;
@@ -1553,8 +1550,7 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
dev_info(dev, "VF %d failed opcode %d, retval: %d\n", vf->vf_id,
v_opcode, v_retval);
if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) {
- dev_err(dev,
- "Number of invalid messages exceeded for VF %d\n",
+ dev_err(dev, "Number of invalid messages exceeded for VF %d\n",
vf->vf_id);
dev_err(dev, "Use PF Control I/F to enable the VF\n");
set_bit(ICE_VF_STATE_DIS, vf->vf_states);
@@ -1569,8 +1565,7 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
msg, msglen, NULL);
if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
- dev_info(dev,
- "Unable to send the message to VF %d ret %d aq_err %d\n",
+ dev_info(dev, "Unable to send the message to VF %d ret %d aq_err %d\n",
vf->vf_id, aq_ret, pf->hw.mailboxq.sq_last_status);
return -EIO;
}
@@ -1914,8 +1909,7 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
}
if (vf_vsi->type != ICE_VSI_VF) {
- netdev_err(netdev,
- "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n",
+ netdev_err(netdev, "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n",
vf_vsi->type, vf_vsi->vsi_num, vf->vf_id);
return -ENODEV;
}
@@ -1945,8 +1939,7 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
status = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL);
if (status) {
- dev_err(dev,
- "Failed to %sable spoofchk on VF %d VSI %d\n error %d",
+ dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %d",
ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num, status);
ret = -EIO;
goto out;
@@ -2063,8 +2056,7 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
continue;
if (ice_vsi_ctrl_rx_ring(vsi, true, vf_q_id)) {
- dev_err(&vsi->back->pdev->dev,
- "Failed to enable Rx ring %d on VSI %d\n",
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n",
vf_q_id, vsi->vsi_num);
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
@@ -2166,8 +2158,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id,
ring, &txq_meta)) {
- dev_err(&vsi->back->pdev->dev,
- "Failed to stop Tx ring %d on VSI %d\n",
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
vf_q_id, vsi->vsi_num);
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
@@ -2193,8 +2184,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
continue;
if (ice_vsi_ctrl_rx_ring(vsi, false, vf_q_id)) {
- dev_err(&vsi->back->pdev->dev,
- "Failed to stop Rx ring %d on VSI %d\n",
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n",
vf_q_id, vsi->vsi_num);
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
@@ -2357,8 +2347,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
if (qci->num_queue_pairs > ICE_MAX_BASE_QS_PER_VF ||
qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
- dev_err(ice_pf_to_dev(pf),
- "VF-%d requesting more than supported number of queues: %d\n",
+ dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
@@ -2570,8 +2559,7 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
*/
if (set && !ice_is_vf_trusted(vf) &&
(vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
- dev_err(ice_pf_to_dev(pf),
- "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
+ dev_err(ice_pf_to_dev(pf), "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
vf->vf_id);
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto handle_mac_exit;
@@ -2648,8 +2636,8 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
struct ice_pf *pf = vf->pf;
u16 max_allowed_vf_queues;
u16 tx_rx_queue_left;
- u16 cur_queues;
struct device *dev;
+ u16 cur_queues;
dev = ice_pf_to_dev(pf);
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
@@ -2670,8 +2658,7 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
vfres->num_queue_pairs = ICE_MAX_BASE_QS_PER_VF;
} else if (req_queues > cur_queues &&
req_queues - cur_queues > tx_rx_queue_left) {
- dev_warn(dev,
- "VF %d requested %u more queues, but only %u left.\n",
+ dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n",
vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
ICE_MAX_BASE_QS_PER_VF);
@@ -2821,8 +2808,8 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
for (i = 0; i < vfl->num_elements; i++) {
if (vfl->vlan_id[i] > ICE_MAX_VLANID) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- dev_err(dev,
- "invalid VF VLAN id %d\n", vfl->vlan_id[i]);
+ dev_err(dev, "invalid VF VLAN id %d\n",
+ vfl->vlan_id[i]);
goto error_param;
}
}
@@ -2836,8 +2823,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
if (add_v && !ice_is_vf_trusted(vf) &&
vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
- dev_info(dev,
- "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
+ dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
vf->vf_id);
/* There is no need to let VF know about being not trusted,
* so we can just return success message here
@@ -2860,8 +2846,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
if (!ice_is_vf_trusted(vf) &&
vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
- dev_info(dev,
- "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
+ dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
vf->vf_id);
/* There is no need to let VF know about being
* not trusted, so we can just return success
@@ -2889,8 +2874,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
status = ice_cfg_vlan_pruning(vsi, true, false);
if (status) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- dev_err(dev,
- "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
+ dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
vid, status);
goto error_param;
}
@@ -2903,8 +2887,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
promisc_m, vid);
if (status) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- dev_err(dev,
- "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
+ dev_err(dev, "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
vid, status);
}
}
@@ -3140,8 +3123,7 @@ error_handler:
case VIRTCHNL_OP_GET_VF_RESOURCES:
err = ice_vc_get_vf_res_msg(vf, msg);
if (ice_vf_init_vlan_stripping(vf))
- dev_err(dev,
- "Failed to initialize VLAN stripping for VF %d\n",
+ dev_err(dev, "Failed to initialize VLAN stripping for VF %d\n",
vf->vf_id);
ice_vc_notify_vf_link_state(vf);
break;
@@ -3313,8 +3295,7 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
*/
ether_addr_copy(vf->dflt_lan_addr.addr, mac);
vf->pf_set_mac = true;
- netdev_info(netdev,
- "MAC on VF %d set to %pM. VF driver will be reinitialized\n",
+ netdev_info(netdev, "MAC on VF %d set to %pM. VF driver will be reinitialized\n",
vf_id, mac);
ice_vc_reset_vf(vf);
@@ -3332,10 +3313,8 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
{
struct ice_pf *pf = ice_netdev_to_pf(netdev);
- struct device *dev;
struct ice_vf *vf;
- dev = ice_pf_to_dev(pf);
if (ice_validate_vf_id(pf, vf_id))
return -EINVAL;
@@ -3358,7 +3337,7 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
vf->trusted = trusted;
ice_vc_reset_vf(vf);
- dev_info(dev, "VF %u is now %strusted\n",
+ dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n",
vf_id, trusted ? "" : "un");
return 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 149dca0012ba..4d3407bbd4c4 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -338,8 +338,8 @@ static int ice_xsk_umem_dma_map(struct ice_vsi *vsi, struct xdp_umem *umem)
DMA_BIDIRECTIONAL,
ICE_RX_DMA_ATTR);
if (dma_mapping_error(dev, dma)) {
- dev_dbg(dev,
- "XSK UMEM DMA mapping error on page num %d", i);
+ dev_dbg(dev, "XSK UMEM DMA mapping error on page num %d\n",
+ i);
goto out_unmap;
}
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 037e054b01a2..98017e7d5dd0 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -401,6 +401,8 @@ struct mvneta_pcpu_stats {
struct u64_stats_sync syncp;
u64 rx_packets;
u64 rx_bytes;
+ u64 rx_dropped;
+ u64 rx_errors;
u64 tx_packets;
u64 tx_bytes;
};
@@ -738,6 +740,8 @@ mvneta_get_stats64(struct net_device *dev,
struct mvneta_pcpu_stats *cpu_stats;
u64 rx_packets;
u64 rx_bytes;
+ u64 rx_dropped;
+ u64 rx_errors;
u64 tx_packets;
u64 tx_bytes;
@@ -746,19 +750,20 @@ mvneta_get_stats64(struct net_device *dev,
start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
rx_packets = cpu_stats->rx_packets;
rx_bytes = cpu_stats->rx_bytes;
+ rx_dropped = cpu_stats->rx_dropped;
+ rx_errors = cpu_stats->rx_errors;
tx_packets = cpu_stats->tx_packets;
tx_bytes = cpu_stats->tx_bytes;
} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
stats->rx_packets += rx_packets;
stats->rx_bytes += rx_bytes;
+ stats->rx_dropped += rx_dropped;
+ stats->rx_errors += rx_errors;
stats->tx_packets += tx_packets;
stats->tx_bytes += tx_bytes;
}
- stats->rx_errors = dev->stats.rx_errors;
- stats->rx_dropped = dev->stats.rx_dropped;
-
stats->tx_dropped = dev->stats.tx_dropped;
}
@@ -1736,8 +1741,14 @@ static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
static void mvneta_rx_error(struct mvneta_port *pp,
struct mvneta_rx_desc *rx_desc)
{
+ struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
u32 status = rx_desc->status;
+ /* update per-cpu counter */
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_errors++;
+ u64_stats_update_end(&stats->syncp);
+
switch (status & MVNETA_RXD_ERR_CODE_MASK) {
case MVNETA_RXD_ERR_CRC:
netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
@@ -2179,11 +2190,15 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
rxq->skb = build_skb(xdp->data_hard_start, PAGE_SIZE);
if (unlikely(!rxq->skb)) {
- netdev_err(dev,
- "Can't allocate skb on queue %d\n",
- rxq->id);
- dev->stats.rx_dropped++;
+ struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
+
+ netdev_err(dev, "Can't allocate skb on queue %d\n", rxq->id);
rxq->skb_alloc_err++;
+
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_dropped++;
+ u64_stats_update_end(&stats->syncp);
+
return -ENOMEM;
}
page_pool_release_page(rxq->page_pool, page);
@@ -2270,7 +2285,6 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
/* Check errors only for FIRST descriptor */
if (rx_status & MVNETA_RXD_ERR_SUMMARY) {
mvneta_rx_error(pp, rx_desc);
- dev->stats.rx_errors++;
/* leave the descriptor untouched */
continue;
}
@@ -2372,7 +2386,6 @@ err_drop_frame_ret_pool:
mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
rx_desc->buf_phys_addr);
err_drop_frame:
- dev->stats.rx_errors++;
mvneta_rx_error(pp, rx_desc);
/* leave the descriptor untouched */
continue;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h
index d787bc0a4155..e09bc3858d57 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h
@@ -45,7 +45,7 @@ void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id);
static inline bool mlx5_accel_is_ktls_device(struct mlx5_core_dev *mdev)
{
- if (!MLX5_CAP_GEN(mdev, tls))
+ if (!MLX5_CAP_GEN(mdev, tls_tx))
return false;
if (!MLX5_CAP_GEN(mdev, log_max_dek))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
index 71384ad1a443..ef1ed15a53b4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
@@ -269,7 +269,7 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
int datalen;
u32 skb_seq;
- if (MLX5_CAP_GEN(sq->channel->mdev, tls)) {
+ if (MLX5_CAP_GEN(sq->channel->mdev, tls_tx)) {
skb = mlx5e_ktls_handle_tx_skb(netdev, sq, skb, wqe, pi);
goto out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 9e9960146e5b..1c3ab69cbd96 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -613,13 +613,6 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
wqe_counter = be16_to_cpu(cqe->wqe_counter);
- if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
- netdev_WARN_ONCE(cq->channel->netdev,
- "Bad OP in ICOSQ CQE: 0x%x\n", get_cqe_opcode(cqe));
- if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
- queue_work(cq->channel->priv->wq, &sq->recover_work);
- break;
- }
do {
struct mlx5e_sq_wqe_info *wi;
u16 ci;
@@ -629,6 +622,15 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
wi = &sq->db.ico_wqe[ci];
+ if (last_wqe && unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
+ netdev_WARN_ONCE(cq->channel->netdev,
+ "Bad OP in ICOSQ CQE: 0x%x\n",
+ get_cqe_opcode(cqe));
+ if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
+ queue_work(cq->channel->priv->wq, &sq->recover_work);
+ break;
+ }
+
if (likely(wi->opcode == MLX5_OPCODE_UMR)) {
sqcc += MLX5E_UMR_WQEBBS;
wi->umr.rq->mpwqe.umr_completed++;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 2565ba8692d9..ee60383adc5b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -451,34 +451,17 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
i = 0;
do {
+ struct mlx5e_tx_wqe_info *wi;
u16 wqe_counter;
bool last_wqe;
+ u16 ci;
mlx5_cqwq_pop(&cq->wq);
wqe_counter = be16_to_cpu(cqe->wqe_counter);
- if (unlikely(get_cqe_opcode(cqe) == MLX5_CQE_REQ_ERR)) {
- if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING,
- &sq->state)) {
- struct mlx5e_tx_wqe_info *wi;
- u16 ci;
-
- ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
- wi = &sq->db.wqe_info[ci];
- mlx5e_dump_error_cqe(sq,
- (struct mlx5_err_cqe *)cqe);
- mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
- queue_work(cq->channel->priv->wq,
- &sq->recover_work);
- }
- stats->cqe_err++;
- }
-
do {
- struct mlx5e_tx_wqe_info *wi;
struct sk_buff *skb;
- u16 ci;
int j;
last_wqe = (sqcc == wqe_counter);
@@ -516,6 +499,18 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
napi_consume_skb(skb, napi_budget);
} while (!last_wqe);
+ if (unlikely(get_cqe_opcode(cqe) == MLX5_CQE_REQ_ERR)) {
+ if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING,
+ &sq->state)) {
+ mlx5e_dump_error_cqe(sq,
+ (struct mlx5_err_cqe *)cqe);
+ mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
+ queue_work(cq->channel->priv->wq,
+ &sq->recover_work);
+ }
+ stats->cqe_err++;
+ }
+
} while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
stats->cqes += i;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
index e4ec0e03c289..4c61d25d2e88 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
@@ -850,6 +850,7 @@ void mlx5_fpga_ipsec_delete_sa_ctx(void *context)
mutex_lock(&fpga_xfrm->lock);
if (!--fpga_xfrm->num_rules) {
mlx5_fpga_ipsec_release_sa_ctx(fpga_xfrm->sa_ctx);
+ kfree(fpga_xfrm->sa_ctx);
fpga_xfrm->sa_ctx = NULL;
}
mutex_unlock(&fpga_xfrm->lock);
@@ -1478,7 +1479,7 @@ int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
if (!memcmp(&xfrm->attrs, attrs, sizeof(xfrm->attrs)))
return 0;
- if (!mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) {
+ if (mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) {
mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n");
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index c7a16ae05fa8..9dc24241dc91 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1582,16 +1582,16 @@ struct match_list_head {
struct match_list first;
};
-static void free_match_list(struct match_list_head *head)
+static void free_match_list(struct match_list_head *head, bool ft_locked)
{
if (!list_empty(&head->list)) {
struct match_list *iter, *match_tmp;
list_del(&head->first.list);
- tree_put_node(&head->first.g->node, false);
+ tree_put_node(&head->first.g->node, ft_locked);
list_for_each_entry_safe(iter, match_tmp, &head->list,
list) {
- tree_put_node(&iter->g->node, false);
+ tree_put_node(&iter->g->node, ft_locked);
list_del(&iter->list);
kfree(iter);
}
@@ -1600,7 +1600,8 @@ static void free_match_list(struct match_list_head *head)
static int build_match_list(struct match_list_head *match_head,
struct mlx5_flow_table *ft,
- const struct mlx5_flow_spec *spec)
+ const struct mlx5_flow_spec *spec,
+ bool ft_locked)
{
struct rhlist_head *tmp, *list;
struct mlx5_flow_group *g;
@@ -1625,7 +1626,7 @@ static int build_match_list(struct match_list_head *match_head,
curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
if (!curr_match) {
- free_match_list(match_head);
+ free_match_list(match_head, ft_locked);
err = -ENOMEM;
goto out;
}
@@ -1805,7 +1806,7 @@ search_again_locked:
version = atomic_read(&ft->node.version);
/* Collect all fgs which has a matching match_criteria */
- err = build_match_list(&match_head, ft, spec);
+ err = build_match_list(&match_head, ft, spec, take_write);
if (err) {
if (take_write)
up_write_ref_node(&ft->node, false);
@@ -1819,7 +1820,7 @@ search_again_locked:
rule = try_add_to_existing_fg(ft, &match_head.list, spec, flow_act, dest,
dest_num, version);
- free_match_list(&match_head);
+ free_match_list(&match_head, take_write);
if (!IS_ERR(rule) ||
(PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) {
if (take_write)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index d89ff1d09119..909a7f284614 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -242,7 +242,7 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return err;
}
- if (MLX5_CAP_GEN(dev, tls)) {
+ if (MLX5_CAP_GEN(dev, tls_tx)) {
err = mlx5_core_get_caps(dev, MLX5_CAP_TLS);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
index 9bf8da5f6daf..3fe878d7c94c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
@@ -573,6 +573,7 @@ static int mlxsw_hwmon_module_init(struct mlxsw_hwmon *mlxsw_hwmon)
static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon *mlxsw_hwmon)
{
+ enum mlxsw_reg_mgpir_device_type device_type;
int index, max_index, sensor_index;
char mgpir_pl[MLXSW_REG_MGPIR_LEN];
char mtmp_pl[MLXSW_REG_MTMP_LEN];
@@ -584,8 +585,9 @@ static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon *mlxsw_hwmon)
if (err)
return err;
- mlxsw_reg_mgpir_unpack(mgpir_pl, &gbox_num, NULL, NULL, NULL);
- if (!gbox_num)
+ mlxsw_reg_mgpir_unpack(mgpir_pl, &gbox_num, &device_type, NULL, NULL);
+ if (device_type != MLXSW_REG_MGPIR_DEVICE_TYPE_GEARBOX_DIE ||
+ !gbox_num)
return 0;
index = mlxsw_hwmon->module_sensor_max;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index c721b171bd8d..ce0a6837daa3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -895,8 +895,10 @@ static int
mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core,
struct mlxsw_thermal *thermal)
{
+ enum mlxsw_reg_mgpir_device_type device_type;
struct mlxsw_thermal_module *gearbox_tz;
char mgpir_pl[MLXSW_REG_MGPIR_LEN];
+ u8 gbox_num;
int i;
int err;
@@ -908,11 +910,13 @@ mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core,
if (err)
return err;
- mlxsw_reg_mgpir_unpack(mgpir_pl, &thermal->tz_gearbox_num, NULL, NULL,
+ mlxsw_reg_mgpir_unpack(mgpir_pl, &gbox_num, &device_type, NULL,
NULL);
- if (!thermal->tz_gearbox_num)
+ if (device_type != MLXSW_REG_MGPIR_DEVICE_TYPE_GEARBOX_DIE ||
+ !gbox_num)
return 0;
+ thermal->tz_gearbox_num = gbox_num;
thermal->tz_gearbox_arr = kcalloc(thermal->tz_gearbox_num,
sizeof(*thermal->tz_gearbox_arr),
GFP_KERNEL);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
index 49933818c6f5..2dc0978428e6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
@@ -215,7 +215,7 @@ mlxsw_sp_dpipe_table_erif_entries_dump(void *priv, bool counters_enabled,
start_again:
err = devlink_dpipe_entry_ctx_prepare(dump_ctx);
if (err)
- return err;
+ goto err_ctx_prepare;
j = 0;
for (; i < rif_count; i++) {
struct mlxsw_sp_rif *rif = mlxsw_sp_rif_by_index(mlxsw_sp, i);
@@ -247,6 +247,7 @@ start_again:
return 0;
err_entry_append:
err_entry_get:
+err_ctx_prepare:
rtnl_unlock();
devlink_dpipe_entry_clear(&entry);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index ce707723f8cf..4a77b511ead2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -4844,6 +4844,23 @@ mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
fib_node->fib_entry = NULL;
}
+static bool mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry *fib4_entry)
+{
+ struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
+ struct mlxsw_sp_fib4_entry *fib4_replaced;
+
+ if (!fib_node->fib_entry)
+ return true;
+
+ fib4_replaced = container_of(fib_node->fib_entry,
+ struct mlxsw_sp_fib4_entry, common);
+ if (fib4_entry->tb_id == RT_TABLE_MAIN &&
+ fib4_replaced->tb_id == RT_TABLE_LOCAL)
+ return false;
+
+ return true;
+}
+
static int
mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
const struct fib_entry_notifier_info *fen_info)
@@ -4872,6 +4889,12 @@ mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
goto err_fib4_entry_create;
}
+ if (!mlxsw_sp_fib4_allow_replace(fib4_entry)) {
+ mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
+ mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
+ return 0;
+ }
+
replaced = fib_node->fib_entry;
err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib4_entry->common);
if (err) {
@@ -4908,7 +4931,7 @@ static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
return;
fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
- if (WARN_ON(!fib4_entry))
+ if (!fib4_entry)
return;
fib_node = fib4_entry->common.fib_node;
@@ -4970,6 +4993,9 @@ static void mlxsw_sp_rt6_release(struct fib6_info *rt)
static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
{
+ struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
+
+ fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
kfree(mlxsw_sp_rt6);
}
@@ -5408,6 +5434,27 @@ mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
return NULL;
}
+static bool mlxsw_sp_fib6_allow_replace(struct mlxsw_sp_fib6_entry *fib6_entry)
+{
+ struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
+ struct mlxsw_sp_fib6_entry *fib6_replaced;
+ struct fib6_info *rt, *rt_replaced;
+
+ if (!fib_node->fib_entry)
+ return true;
+
+ fib6_replaced = container_of(fib_node->fib_entry,
+ struct mlxsw_sp_fib6_entry,
+ common);
+ rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
+ rt_replaced = mlxsw_sp_fib6_entry_rt(fib6_replaced);
+ if (rt->fib6_table->tb6_id == RT_TABLE_MAIN &&
+ rt_replaced->fib6_table->tb6_id == RT_TABLE_LOCAL)
+ return false;
+
+ return true;
+}
+
static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
struct fib6_info **rt_arr,
unsigned int nrt6)
@@ -5442,6 +5489,12 @@ static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
goto err_fib6_entry_create;
}
+ if (!mlxsw_sp_fib6_allow_replace(fib6_entry)) {
+ mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
+ mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
+ return 0;
+ }
+
replaced = fib_node->fib_entry;
err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib6_entry->common);
if (err)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ptp.c b/drivers/net/ethernet/qlogic/qed/qed_ptp.c
index 0dacf2c18c09..3e613058e225 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ptp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ptp.c
@@ -44,8 +44,8 @@
/* Add/subtract the Adjustment_Value when making a Drift adjustment */
#define QED_DRIFT_CNTR_DIRECTION_SHIFT 31
#define QED_TIMESTAMP_MASK BIT(16)
-/* Param mask for Hardware to detect/timestamp the unicast PTP packets */
-#define QED_PTP_UCAST_PARAM_MASK 0xF
+/* Param mask for Hardware to detect/timestamp the L2/L4 unicast PTP packets */
+#define QED_PTP_UCAST_PARAM_MASK 0x70F
static enum qed_resc_lock qed_ptcdev_to_resc(struct qed_hwfn *p_hwfn)
{
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index aaa316be6183..a2168a14794c 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -2477,15 +2477,18 @@ static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_12:
case RTL_GIGA_MAC_VER_17:
+ pcie_set_readrq(tp->pci_dev, 512);
r8168b_1_hw_jumbo_enable(tp);
break;
case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_26:
+ pcie_set_readrq(tp->pci_dev, 512);
r8168c_hw_jumbo_enable(tp);
break;
case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28:
r8168dp_hw_jumbo_enable(tp);
break;
case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_33:
+ pcie_set_readrq(tp->pci_dev, 512);
r8168e_hw_jumbo_enable(tp);
break;
default:
@@ -2515,6 +2518,9 @@ static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
break;
}
rtl_lock_config_regs(tp);
+
+ if (pci_is_pcie(tp->pci_dev) && tp->supports_gmii)
+ pcie_set_readrq(tp->pci_dev, 4096);
}
static void rtl_jumbo_config(struct rtl8169_private *tp, int mtu)
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index e61eb891c0f7..db6b2988e632 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -823,7 +823,6 @@ static int ioc3_close(struct net_device *dev)
netif_stop_queue(dev);
ioc3_stop(ip);
- free_irq(dev->irq, dev);
ioc3_free_rx_bufs(ip);
ioc3_clean_tx_ring(ip);
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
index b7032422393f..67ddf782d98a 100644
--- a/drivers/net/ethernet/socionext/sni_ave.c
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -1810,6 +1810,9 @@ static int ave_pro4_get_pinmode(struct ave_private *priv,
break;
case PHY_INTERFACE_MODE_MII:
case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
priv->pinmode_val = 0;
break;
default:
@@ -1854,6 +1857,9 @@ static int ave_ld20_get_pinmode(struct ave_private *priv,
priv->pinmode_val = SG_ETPINMODE_RMII(0);
break;
case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
priv->pinmode_val = 0;
break;
default:
@@ -1876,6 +1882,9 @@ static int ave_pxs3_get_pinmode(struct ave_private *priv,
priv->pinmode_val = SG_ETPINMODE_RMII(arg);
break;
case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
priv->pinmode_val = 0;
break;
default:
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
index 7ec895407d23..e0a5fe83d8e0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -413,6 +413,7 @@ static int ethqos_configure(struct qcom_ethqos *ethqos)
dll_lock = rgmii_readl(ethqos, SDC4_STATUS);
if (dll_lock & SDC4_STATUS_DLL_LOCK)
break;
+ retry--;
} while (retry > 0);
if (!retry)
dev_err(&ethqos->pdev->dev,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index f0c0ea616032..dc09d2131e40 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -420,7 +420,7 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
value |= GMAC_PACKET_FILTER_PM;
/* Set all the bits of the HASH tab */
memset(mc_filter, 0xff, sizeof(mc_filter));
- } else if (!netdev_mc_empty(dev)) {
+ } else if (!netdev_mc_empty(dev) && (dev->flags & IFF_MULTICAST)) {
struct netdev_hw_addr *ha;
/* Hash filter for multicast */
@@ -736,11 +736,14 @@ static void dwmac4_update_vlan_hash(struct mac_device_info *hw, u32 hash,
__le16 perfect_match, bool is_double)
{
void __iomem *ioaddr = hw->pcsr;
+ u32 value;
writel(hash, ioaddr + GMAC_VLAN_HASH_TABLE);
+ value = readl(ioaddr + GMAC_VLAN_TAG);
+
if (hash) {
- u32 value = GMAC_VLAN_VTHM | GMAC_VLAN_ETV;
+ value |= GMAC_VLAN_VTHM | GMAC_VLAN_ETV;
if (is_double) {
value |= GMAC_VLAN_EDVLP;
value |= GMAC_VLAN_ESVL;
@@ -759,8 +762,6 @@ static void dwmac4_update_vlan_hash(struct mac_device_info *hw, u32 hash,
writel(value | perfect_match, ioaddr + GMAC_VLAN_TAG);
} else {
- u32 value = readl(ioaddr + GMAC_VLAN_TAG);
-
value &= ~(GMAC_VLAN_VTHM | GMAC_VLAN_ETV);
value &= ~(GMAC_VLAN_EDVLP | GMAC_VLAN_ESVL);
value &= ~GMAC_VLAN_DOVLTC;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index 2af3ac5409b7..67b754a56288 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -458,7 +458,7 @@ static void dwxgmac2_set_filter(struct mac_device_info *hw,
for (i = 0; i < XGMAC_MAX_HASH_TABLE; i++)
writel(~0x0, ioaddr + XGMAC_HASH_TABLE(i));
- } else if (!netdev_mc_empty(dev)) {
+ } else if (!netdev_mc_empty(dev) && (dev->flags & IFF_MULTICAST)) {
struct netdev_hw_addr *ha;
value |= XGMAC_FILTER_HMC;
@@ -569,7 +569,9 @@ static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
writel(value, ioaddr + XGMAC_PACKET_FILTER);
- value = XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV;
+ value = readl(ioaddr + XGMAC_VLAN_TAG);
+
+ value |= XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV;
if (is_double) {
value |= XGMAC_VLAN_EDVLP;
value |= XGMAC_VLAN_ESVL;
@@ -584,7 +586,9 @@ static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
writel(value, ioaddr + XGMAC_PACKET_FILTER);
- value = XGMAC_VLAN_ETV;
+ value = readl(ioaddr + XGMAC_VLAN_TAG);
+
+ value |= XGMAC_VLAN_ETV;
if (is_double) {
value |= XGMAC_VLAN_EDVLP;
value |= XGMAC_VLAN_ESVL;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 623521052152..fe2c9fa6a71c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -95,7 +95,7 @@ static int stmmac_default_data(struct pci_dev *pdev,
plat->bus_id = 1;
plat->phy_addr = 0;
- plat->interface = PHY_INTERFACE_MODE_GMII;
+ plat->phy_interface = PHY_INTERFACE_MODE_GMII;
plat->dma_cfg->pbl = 32;
plat->dma_cfg->pblx8 = true;
@@ -217,7 +217,8 @@ static int ehl_sgmii_data(struct pci_dev *pdev,
{
plat->bus_id = 1;
plat->phy_addr = 0;
- plat->interface = PHY_INTERFACE_MODE_SGMII;
+ plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
+
return ehl_common_data(pdev, plat);
}
@@ -230,7 +231,8 @@ static int ehl_rgmii_data(struct pci_dev *pdev,
{
plat->bus_id = 1;
plat->phy_addr = 0;
- plat->interface = PHY_INTERFACE_MODE_RGMII;
+ plat->phy_interface = PHY_INTERFACE_MODE_RGMII;
+
return ehl_common_data(pdev, plat);
}
@@ -258,7 +260,7 @@ static int tgl_sgmii_data(struct pci_dev *pdev,
{
plat->bus_id = 1;
plat->phy_addr = 0;
- plat->interface = PHY_INTERFACE_MODE_SGMII;
+ plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
return tgl_common_data(pdev, plat);
}
@@ -358,7 +360,7 @@ static int quark_default_data(struct pci_dev *pdev,
plat->bus_id = pci_dev_id(pdev);
plat->phy_addr = ret;
- plat->interface = PHY_INTERFACE_MODE_RMII;
+ plat->phy_interface = PHY_INTERFACE_MODE_RMII;
plat->dma_cfg->pbl = 16;
plat->dma_cfg->pblx8 = true;
@@ -415,7 +417,7 @@ static int snps_gmac5_default_data(struct pci_dev *pdev,
plat->bus_id = 1;
plat->phy_addr = -1;
- plat->interface = PHY_INTERFACE_MODE_GMII;
+ plat->phy_interface = PHY_INTERFACE_MODE_GMII;
plat->dma_cfg->pbl = 32;
plat->dma_cfg->pblx8 = true;
diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c
index c23ce838ff63..8dc6c9ff22e1 100644
--- a/drivers/net/ethernet/sun/sunvnet_common.c
+++ b/drivers/net/ethernet/sun/sunvnet_common.c
@@ -1350,27 +1350,12 @@ sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev,
if (vio_version_after_eq(&port->vio, 1, 3))
localmtu -= VLAN_HLEN;
- if (skb->protocol == htons(ETH_P_IP)) {
- struct flowi4 fl4;
- struct rtable *rt = NULL;
-
- memset(&fl4, 0, sizeof(fl4));
- fl4.flowi4_oif = dev->ifindex;
- fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
- fl4.daddr = ip_hdr(skb)->daddr;
- fl4.saddr = ip_hdr(skb)->saddr;
-
- rt = ip_route_output_key(dev_net(dev), &fl4);
- if (!IS_ERR(rt)) {
- skb_dst_set(skb, &rt->dst);
- icmp_send(skb, ICMP_DEST_UNREACH,
- ICMP_FRAG_NEEDED,
- htonl(localmtu));
- }
- }
+ if (skb->protocol == htons(ETH_P_IP))
+ icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
+ htonl(localmtu));
#if IS_ENABLED(CONFIG_IPV6)
else if (skb->protocol == htons(ETH_P_IPV6))
- icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, localmtu);
+ icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, localmtu);
#endif
goto out_dropped;
}
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index af07ea760b35..672cd2caf2fb 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -546,8 +546,8 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
mtu < ntohs(iph->tot_len)) {
netdev_dbg(dev, "packet too big, fragmentation needed\n");
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
- icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
- htonl(mtu));
+ icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
+ htonl(mtu));
goto err_rt;
}
diff --git a/drivers/net/hyperv/netvsc_bpf.c b/drivers/net/hyperv/netvsc_bpf.c
index 20adfe544294..b86611041db6 100644
--- a/drivers/net/hyperv/netvsc_bpf.c
+++ b/drivers/net/hyperv/netvsc_bpf.c
@@ -120,7 +120,7 @@ int netvsc_xdp_set(struct net_device *dev, struct bpf_prog *prog,
}
if (prog)
- bpf_prog_add(prog, nvdev->num_chn);
+ bpf_prog_add(prog, nvdev->num_chn - 1);
for (i = 0; i < nvdev->num_chn; i++)
rcu_assign_pointer(nvdev->chan_table[i].bpf_prog, prog);
@@ -136,6 +136,7 @@ int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog)
{
struct netdev_bpf xdp;
bpf_op_t ndo_bpf;
+ int ret;
ASSERT_RTNL();
@@ -148,10 +149,18 @@ int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog)
memset(&xdp, 0, sizeof(xdp));
+ if (prog)
+ bpf_prog_inc(prog);
+
xdp.command = XDP_SETUP_PROG;
xdp.prog = prog;
- return ndo_bpf(vf_netdev, &xdp);
+ ret = ndo_bpf(vf_netdev, &xdp);
+
+ if (ret && prog)
+ bpf_prog_put(prog);
+
+ return ret;
}
static u32 netvsc_xdp_query(struct netvsc_device *nvdev)
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 8fc71bd49894..65e12cb07f45 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -1059,9 +1059,12 @@ static int netvsc_attach(struct net_device *ndev,
prog = dev_info->bprog;
if (prog) {
+ bpf_prog_inc(prog);
ret = netvsc_xdp_set(ndev, prog, NULL, nvdev);
- if (ret)
+ if (ret) {
+ bpf_prog_put(prog);
goto err1;
+ }
}
/* In any case device is now ready */
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index 5c5427c840b6..d7706a0346f2 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -934,9 +934,7 @@ int nsim_dev_port_del(struct nsim_bus_dev *nsim_bus_dev,
int nsim_dev_init(void)
{
nsim_dev_ddir = debugfs_create_dir(DRV_NAME, NULL);
- if (IS_ERR(nsim_dev_ddir))
- return PTR_ERR(nsim_dev_ddir);
- return 0;
+ return PTR_ERR_OR_ZERO(nsim_dev_ddir);
}
void nsim_dev_exit(void)
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 9485c8d1de8a..3b7a3b8a5e06 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -61,7 +61,6 @@ enum qmi_wwan_flags {
enum qmi_wwan_quirks {
QMI_WWAN_QUIRK_DTR = 1 << 0, /* needs "set DTR" request */
- QMI_WWAN_QUIRK_QUECTEL_DYNCFG = 1 << 1, /* check num. endpoints */
};
struct qmimux_hdr {
@@ -916,16 +915,6 @@ static const struct driver_info qmi_wwan_info_quirk_dtr = {
.data = QMI_WWAN_QUIRK_DTR,
};
-static const struct driver_info qmi_wwan_info_quirk_quectel_dyncfg = {
- .description = "WWAN/QMI device",
- .flags = FLAG_WWAN | FLAG_SEND_ZLP,
- .bind = qmi_wwan_bind,
- .unbind = qmi_wwan_unbind,
- .manage_power = qmi_wwan_manage_power,
- .rx_fixup = qmi_wwan_rx_fixup,
- .data = QMI_WWAN_QUIRK_DTR | QMI_WWAN_QUIRK_QUECTEL_DYNCFG,
-};
-
#define HUAWEI_VENDOR_ID 0x12D1
/* map QMI/wwan function by a fixed interface number */
@@ -946,14 +935,18 @@ static const struct driver_info qmi_wwan_info_quirk_quectel_dyncfg = {
#define QMI_GOBI_DEVICE(vend, prod) \
QMI_FIXED_INTF(vend, prod, 0)
-/* Quectel does not use fixed interface numbers on at least some of their
- * devices. We need to check the number of endpoints to ensure that we bind to
- * the correct interface.
+/* Many devices have QMI and DIAG functions which are distinguishable
+ * from other vendor specific functions by class, subclass and
+ * protocol all being 0xff. The DIAG function has exactly 2 endpoints
+ * and is silently rejected when probed.
+ *
+ * This makes it possible to match dynamically numbered QMI functions
+ * as seen on e.g. many Quectel modems.
*/
-#define QMI_QUIRK_QUECTEL_DYNCFG(vend, prod) \
+#define QMI_MATCH_FF_FF_FF(vend, prod) \
USB_DEVICE_AND_INTERFACE_INFO(vend, prod, USB_CLASS_VENDOR_SPEC, \
USB_SUBCLASS_VENDOR_SPEC, 0xff), \
- .driver_info = (unsigned long)&qmi_wwan_info_quirk_quectel_dyncfg
+ .driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr
static const struct usb_device_id products[] = {
/* 1. CDC ECM like devices match on the control interface */
@@ -1059,10 +1052,10 @@ static const struct usb_device_id products[] = {
USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7),
.driver_info = (unsigned long)&qmi_wwan_info,
},
- {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0125)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
- {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0306)}, /* Quectel EP06/EG06/EM06 */
- {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */
- {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0800)}, /* Quectel RM500Q-GL */
+ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0125)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
+ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0306)}, /* Quectel EP06/EG06/EM06 */
+ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */
+ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0800)}, /* Quectel RM500Q-GL */
/* 3. Combined interface devices matching on interface number */
{QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
@@ -1363,6 +1356,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */
{QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */
{QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */
+ {QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e preproduction config */
{QMI_FIXED_INTF(0x413c, 0x81e0, 0)}, /* Dell Wireless 5821e with eSIM support*/
{QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
{QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */
@@ -1454,7 +1448,6 @@ static int qmi_wwan_probe(struct usb_interface *intf,
{
struct usb_device_id *id = (struct usb_device_id *)prod;
struct usb_interface_descriptor *desc = &intf->cur_altsetting->desc;
- const struct driver_info *info;
/* Workaround to enable dynamic IDs. This disables usbnet
* blacklisting functionality. Which, if required, can be
@@ -1490,12 +1483,8 @@ static int qmi_wwan_probe(struct usb_interface *intf,
* different. Ignore the current interface if the number of endpoints
* equals the number for the diag interface (two).
*/
- info = (void *)id->driver_info;
-
- if (info->data & QMI_WWAN_QUIRK_QUECTEL_DYNCFG) {
- if (desc->bNumEndpoints == 2)
- return -ENODEV;
- }
+ if (desc->bNumEndpoints == 2)
+ return -ENODEV;
return usbnet_probe(intf, id);
}
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
index 3998cac49d7f..9edd94679283 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.c
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -84,8 +84,8 @@ static int uhdlc_init(struct ucc_hdlc_private *priv)
int ret, i;
void *bd_buffer;
dma_addr_t bd_dma_addr;
- u32 riptr;
- u32 tiptr;
+ s32 riptr;
+ s32 tiptr;
u32 gumr;
ut_info = priv->ut_info;
@@ -195,7 +195,7 @@ static int uhdlc_init(struct ucc_hdlc_private *priv)
priv->ucc_pram_offset = qe_muram_alloc(sizeof(struct ucc_hdlc_param),
ALIGNMENT_OF_UCC_HDLC_PRAM);
- if (IS_ERR_VALUE(priv->ucc_pram_offset)) {
+ if (priv->ucc_pram_offset < 0) {
dev_err(priv->dev, "Can not allocate MURAM for hdlc parameter.\n");
ret = -ENOMEM;
goto free_tx_bd;
@@ -233,18 +233,23 @@ static int uhdlc_init(struct ucc_hdlc_private *priv)
/* Alloc riptr, tiptr */
riptr = qe_muram_alloc(32, 32);
- if (IS_ERR_VALUE(riptr)) {
+ if (riptr < 0) {
dev_err(priv->dev, "Cannot allocate MURAM mem for Receive internal temp data pointer\n");
ret = -ENOMEM;
goto free_tx_skbuff;
}
tiptr = qe_muram_alloc(32, 32);
- if (IS_ERR_VALUE(tiptr)) {
+ if (tiptr < 0) {
dev_err(priv->dev, "Cannot allocate MURAM mem for Transmit internal temp data pointer\n");
ret = -ENOMEM;
goto free_riptr;
}
+ if (riptr != (u16)riptr || tiptr != (u16)tiptr) {
+ dev_err(priv->dev, "MURAM allocation out of addressable range\n");
+ ret = -ENOMEM;
+ goto free_tiptr;
+ }
/* Set RIPTR, TIPTR */
iowrite16be(riptr, &priv->ucc_pram->riptr);
@@ -623,8 +628,8 @@ static int ucc_hdlc_poll(struct napi_struct *napi, int budget)
if (howmany < budget) {
napi_complete_done(napi, howmany);
- qe_setbits32(priv->uccf->p_uccm,
- (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) << 16);
+ qe_setbits_be32(priv->uccf->p_uccm,
+ (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) << 16);
}
return howmany;
@@ -730,8 +735,8 @@ static int uhdlc_open(struct net_device *dev)
static void uhdlc_memclean(struct ucc_hdlc_private *priv)
{
- qe_muram_free(priv->ucc_pram->riptr);
- qe_muram_free(priv->ucc_pram->tiptr);
+ qe_muram_free(ioread16be(&priv->ucc_pram->riptr));
+ qe_muram_free(ioread16be(&priv->ucc_pram->tiptr));
if (priv->rx_bd_base) {
dma_free_coherent(priv->dev,
diff --git a/drivers/net/wan/fsl_ucc_hdlc.h b/drivers/net/wan/fsl_ucc_hdlc.h
index 8b3507ae1781..71d5ad0a7b98 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.h
+++ b/drivers/net/wan/fsl_ucc_hdlc.h
@@ -98,7 +98,7 @@ struct ucc_hdlc_private {
unsigned short tx_ring_size;
unsigned short rx_ring_size;
- u32 ucc_pram_offset;
+ s32 ucc_pram_offset;
unsigned short encoding;
unsigned short parity;
diff --git a/drivers/net/wireguard/allowedips.c b/drivers/net/wireguard/allowedips.c
index 121d9ea0f135..3725e9cd85f4 100644
--- a/drivers/net/wireguard/allowedips.c
+++ b/drivers/net/wireguard/allowedips.c
@@ -263,6 +263,7 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
} else {
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (unlikely(!node)) {
+ list_del(&newnode->peer_list);
kfree(newnode);
return -ENOMEM;
}
diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c
index 16b19824b9ad..43db442b1373 100644
--- a/drivers/net/wireguard/device.c
+++ b/drivers/net/wireguard/device.c
@@ -203,9 +203,9 @@ err_peer:
err:
++dev->stats.tx_errors;
if (skb->protocol == htons(ETH_P_IP))
- icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
+ icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
else if (skb->protocol == htons(ETH_P_IPV6))
- icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
+ icmpv6_ndo_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
kfree_skb(skb);
return ret;
}
diff --git a/drivers/net/wireguard/netlink.c b/drivers/net/wireguard/netlink.c
index 0fdbd1c45977..bda26405497c 100644
--- a/drivers/net/wireguard/netlink.c
+++ b/drivers/net/wireguard/netlink.c
@@ -569,10 +569,8 @@ static int wg_set_device(struct sk_buff *skb, struct genl_info *info)
private_key);
list_for_each_entry_safe(peer, temp, &wg->peer_list,
peer_list) {
- if (wg_noise_precompute_static_static(peer))
- wg_noise_expire_current_peer_keypairs(peer);
- else
- wg_peer_remove(peer);
+ BUG_ON(!wg_noise_precompute_static_static(peer));
+ wg_noise_expire_current_peer_keypairs(peer);
}
wg_cookie_checker_precompute_device_keys(&wg->cookie_checker);
up_write(&wg->static_identity.lock);
diff --git a/drivers/net/wireguard/noise.c b/drivers/net/wireguard/noise.c
index d71c8db68a8c..919d9d866446 100644
--- a/drivers/net/wireguard/noise.c
+++ b/drivers/net/wireguard/noise.c
@@ -46,17 +46,21 @@ void __init wg_noise_init(void)
/* Must hold peer->handshake.static_identity->lock */
bool wg_noise_precompute_static_static(struct wg_peer *peer)
{
- bool ret = true;
+ bool ret;
down_write(&peer->handshake.lock);
- if (peer->handshake.static_identity->has_identity)
+ if (peer->handshake.static_identity->has_identity) {
ret = curve25519(
peer->handshake.precomputed_static_static,
peer->handshake.static_identity->static_private,
peer->handshake.remote_static);
- else
+ } else {
+ u8 empty[NOISE_PUBLIC_KEY_LEN] = { 0 };
+
+ ret = curve25519(empty, empty, peer->handshake.remote_static);
memset(peer->handshake.precomputed_static_static, 0,
NOISE_PUBLIC_KEY_LEN);
+ }
up_write(&peer->handshake.lock);
return ret;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 8878409d2f07..22a32eb10f01 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -1897,27 +1897,55 @@ static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac,
ieee80211_resume_disconnect(vif);
}
-static int iwl_mvm_check_rt_status(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
+static bool iwl_mvm_rt_status(struct iwl_trans *trans, u32 base, u32 *err_id)
{
- u32 base = mvm->trans->dbg.lmac_error_event_table[0];
struct error_table_start {
/* cf. struct iwl_error_event_table */
u32 valid;
- u32 error_id;
+ __le32 err_id;
} err_info;
- iwl_trans_read_mem_bytes(mvm->trans, base,
+ if (!base)
+ return false;
+
+ iwl_trans_read_mem_bytes(trans, base,
&err_info, sizeof(err_info));
+ if (err_info.valid && err_id)
+ *err_id = le32_to_cpu(err_info.err_id);
- if (err_info.valid &&
- err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
- struct cfg80211_wowlan_wakeup wakeup = {
- .rfkill_release = true,
- };
- ieee80211_report_wowlan_wakeup(vif, &wakeup, GFP_KERNEL);
+ return !!err_info.valid;
+}
+
+static bool iwl_mvm_check_rt_status(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ u32 err_id;
+
+ /* check for lmac1 error */
+ if (iwl_mvm_rt_status(mvm->trans,
+ mvm->trans->dbg.lmac_error_event_table[0],
+ &err_id)) {
+ if (err_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
+ struct cfg80211_wowlan_wakeup wakeup = {
+ .rfkill_release = true,
+ };
+ ieee80211_report_wowlan_wakeup(vif, &wakeup,
+ GFP_KERNEL);
+ }
+ return true;
}
- return err_info.valid;
+
+ /* check if we have lmac2 set and check for error */
+ if (iwl_mvm_rt_status(mvm->trans,
+ mvm->trans->dbg.lmac_error_event_table[1], NULL))
+ return true;
+
+ /* check for umac error */
+ if (iwl_mvm_rt_status(mvm->trans,
+ mvm->trans->dbg.umac_error_event_table, NULL))
+ return true;
+
+ return false;
}
static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
index f783d6d53b6f..6e1ea921c299 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
@@ -8,6 +8,7 @@
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* Copyright (C) 2018 Intel Corporation
* Copyright (C) 2019 Intel Corporation
+ * Copyright (C) 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* Copyright (C) 2018 Intel Corporation
* Copyright (C) 2019 Intel Corporation
+ * Copyright (C) 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -528,6 +530,8 @@ void iwl_mvm_ftm_abort(struct iwl_mvm *mvm, struct cfg80211_pmsr_request *req)
if (req != mvm->ftm_initiator.req)
return;
+ iwl_mvm_ftm_reset(mvm);
+
if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_RANGE_ABORT_CMD,
LOCATION_GROUP, 0),
0, sizeof(cmd), &cmd))
@@ -641,7 +645,6 @@ void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
lockdep_assert_held(&mvm->mutex);
if (!mvm->ftm_initiator.req) {
- IWL_ERR(mvm, "Got FTM response but have no request?\n");
return;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 6717f25c46b1..02df603b6400 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -5,10 +5,9 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,10 +27,9 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -2037,7 +2035,7 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
rcu_read_lock();
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_ctxt_cmd.sta_id]);
- if (IS_ERR(sta)) {
+ if (IS_ERR_OR_NULL(sta)) {
rcu_read_unlock();
WARN(1, "Can't find STA to configure HE\n");
return;
@@ -3293,7 +3291,7 @@ static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw,
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD))
iwl_mvm_schedule_session_protection(mvm, vif, 900,
- min_duration);
+ min_duration, false);
else
iwl_mvm_protect_session(mvm, vif, duration,
min_duration, 500, false);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 7b35f416404c..64ef3f3ba23b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -3320,6 +3320,10 @@ static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
igtk_cmd.sta_id = cpu_to_le32(sta_id);
if (remove_key) {
+ /* This is a valid situation for IGTK */
+ if (sta_id == IWL_MVM_INVALID_STA)
+ return 0;
+
igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
} else {
struct ieee80211_key_seq seq;
@@ -3574,9 +3578,9 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
keyconf->keyidx, sta_id);
- if (mvm_sta && (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
- keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
- keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256))
+ if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
+ keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
+ keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
index 1851719e9f4b..d781777b6b96 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
@@ -205,9 +205,15 @@ void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw,
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
u32 duration = 2 * vif->bss_conf.dtim_period * vif->bss_conf.beacon_int;
- mutex_lock(&mvm->mutex);
/* Protect the session to hear the TDLS setup response on the channel */
- iwl_mvm_protect_session(mvm, vif, duration, duration, 100, true);
+ mutex_lock(&mvm->mutex);
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD))
+ iwl_mvm_schedule_session_protection(mvm, vif, duration,
+ duration, true);
+ else
+ iwl_mvm_protect_session(mvm, vif, duration,
+ duration, 100, true);
mutex_unlock(&mvm->mutex);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index 51b138673ddb..c0b420fe5e48 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -1056,13 +1056,42 @@ int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm,
return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
}
+static bool iwl_mvm_session_prot_notif(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ struct iwl_mvm *mvm =
+ container_of(notif_wait, struct iwl_mvm, notif_wait);
+ struct iwl_mvm_session_prot_notif *resp;
+ int resp_len = iwl_rx_packet_payload_len(pkt);
+
+ if (WARN_ON(pkt->hdr.cmd != SESSION_PROTECTION_NOTIF ||
+ pkt->hdr.group_id != MAC_CONF_GROUP))
+ return true;
+
+ if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
+ IWL_ERR(mvm, "Invalid SESSION_PROTECTION_NOTIF response\n");
+ return true;
+ }
+
+ resp = (void *)pkt->data;
+
+ if (!resp->status)
+ IWL_ERR(mvm,
+ "TIME_EVENT_NOTIFICATION received but not executed\n");
+
+ return true;
+}
+
void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- u32 duration, u32 min_duration)
+ u32 duration, u32 min_duration,
+ bool wait_for_notif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
-
+ const u16 notif[] = { iwl_cmd_id(SESSION_PROTECTION_NOTIF,
+ MAC_CONF_GROUP, 0) };
+ struct iwl_notification_wait wait_notif;
struct iwl_mvm_session_prot_cmd cmd = {
.id_and_color =
cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
@@ -1071,7 +1100,6 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
.conf_id = cpu_to_le32(SESSION_PROTECT_CONF_ASSOC),
.duration_tu = cpu_to_le32(MSEC_TO_TU(duration)),
};
- int ret;
lockdep_assert_held(&mvm->mutex);
@@ -1092,14 +1120,35 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
IWL_DEBUG_TE(mvm, "Add new session protection, duration %d TU\n",
le32_to_cpu(cmd.duration_tu));
- ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SESSION_PROTECTION_CMD,
- MAC_CONF_GROUP, 0),
- 0, sizeof(cmd), &cmd);
- if (ret) {
+ if (!wait_for_notif) {
+ if (iwl_mvm_send_cmd_pdu(mvm,
+ iwl_cmd_id(SESSION_PROTECTION_CMD,
+ MAC_CONF_GROUP, 0),
+ 0, sizeof(cmd), &cmd)) {
+ IWL_ERR(mvm,
+ "Couldn't send the SESSION_PROTECTION_CMD\n");
+ spin_lock_bh(&mvm->time_event_lock);
+ iwl_mvm_te_clear_data(mvm, te_data);
+ spin_unlock_bh(&mvm->time_event_lock);
+ }
+
+ return;
+ }
+
+ iwl_init_notification_wait(&mvm->notif_wait, &wait_notif,
+ notif, ARRAY_SIZE(notif),
+ iwl_mvm_session_prot_notif, NULL);
+
+ if (iwl_mvm_send_cmd_pdu(mvm,
+ iwl_cmd_id(SESSION_PROTECTION_CMD,
+ MAC_CONF_GROUP, 0),
+ 0, sizeof(cmd), &cmd)) {
IWL_ERR(mvm,
- "Couldn't send the SESSION_PROTECTION_CMD: %d\n", ret);
- spin_lock_bh(&mvm->time_event_lock);
- iwl_mvm_te_clear_data(mvm, te_data);
- spin_unlock_bh(&mvm->time_event_lock);
+ "Couldn't send the SESSION_PROTECTION_CMD\n");
+ iwl_remove_notification(&mvm->notif_wait, &wait_notif);
+ } else if (iwl_wait_notification(&mvm->notif_wait, &wait_notif,
+ TU_TO_JIFFIES(100))) {
+ IWL_ERR(mvm,
+ "Failed to protect session until session protection\n");
}
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h
index df6832b79666..3186d7e40567 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h
@@ -250,10 +250,12 @@ iwl_mvm_te_scheduled(struct iwl_mvm_time_event_data *te_data)
* @mvm: the mvm component
* @vif: the virtual interface for which the protection issued
* @duration: the duration of the protection
+ * @wait_for_notif: if true, will block until the start of the protection
*/
void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- u32 duration, u32 min_duration);
+ u32 duration, u32 min_duration,
+ bool wait_for_notif);
/**
* iwl_mvm_rx_session_protect_notif - handles %SESSION_PROTECTION_NOTIF
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
index b5a16f00bada..418e59b7c671 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -8,7 +8,7 @@
* Copyright(c) 2013 - 2014, 2019 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
- * Copyright(c) 2019 Intel Corporation
+ * Copyright(c) 2019 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014, 2019 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
- * Copyright(c) 2019 Intel Corporation
+ * Copyright(c) 2019 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -234,7 +234,7 @@ static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm)
.flags = cpu_to_le32(DTS_TRIGGER_CMD_FLAGS_TEMP),
};
struct iwl_ext_dts_measurement_cmd extcmd = {
- .control_mode = cpu_to_le32(DTS_AUTOMATIC),
+ .control_mode = cpu_to_le32(DTS_DIRECT_WITHOUT_MEASURE),
};
u32 cmdid;
@@ -734,7 +734,8 @@ static struct thermal_zone_device_ops tzone_ops = {
static void iwl_mvm_thermal_zone_register(struct iwl_mvm *mvm)
{
int i;
- char name[] = "iwlwifi";
+ char name[16];
+ static atomic_t counter = ATOMIC_INIT(0);
if (!iwl_mvm_is_tt_in_fw(mvm)) {
mvm->tz_device.tzone = NULL;
@@ -744,6 +745,7 @@ static void iwl_mvm_thermal_zone_register(struct iwl_mvm *mvm)
BUILD_BUG_ON(ARRAY_SIZE(name) >= THERMAL_NAME_LENGTH);
+ sprintf(name, "iwlwifi_%u", atomic_inc_return(&counter) & 0xFF);
mvm->tz_device.tzone = thermal_zone_device_register(name,
IWL_MAX_DTS_TRIPS,
IWL_WRITABLE_TRIPS_MSK,
diff --git a/drivers/net/wireless/marvell/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c
index c9401c121a14..4e3de684928b 100644
--- a/drivers/net/wireless/marvell/libertas/cfg.c
+++ b/drivers/net/wireless/marvell/libertas/cfg.c
@@ -1785,6 +1785,8 @@ static int lbs_ibss_join_existing(struct lbs_private *priv,
rates_max = rates_eid[1];
if (rates_max > MAX_RATES) {
lbs_deb_join("invalid rates");
+ rcu_read_unlock();
+ ret = -EINVAL;
goto out;
}
rates = cmd.bss.rates;
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index 98f942b797f7..a7968a84aaf8 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -2884,6 +2884,13 @@ mwifiex_cmd_append_vsie_tlv(struct mwifiex_private *priv,
vs_param_set->header.len =
cpu_to_le16((((u16) priv->vs_ie[id].ie[1])
& 0x00FF) + 2);
+ if (le16_to_cpu(vs_param_set->header.len) >
+ MWIFIEX_MAX_VSIE_LEN) {
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Invalid param length!\n");
+ break;
+ }
+
memcpy(vs_param_set->ie, priv->vs_ie[id].ie,
le16_to_cpu(vs_param_set->header.len));
*buffer += le16_to_cpu(vs_param_set->header.len) +
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
index 6dd835f1efc2..fbfa0b15d0c8 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
@@ -232,6 +232,7 @@ static int mwifiex_process_country_ie(struct mwifiex_private *priv,
if (country_ie_len >
(IEEE80211_COUNTRY_STRING_LEN + MWIFIEX_MAX_TRIPLET_802_11D)) {
+ rcu_read_unlock();
mwifiex_dbg(priv->adapter, ERROR,
"11D: country_ie_len overflow!, deauth AP\n");
return -EINVAL;
diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c
index 41f0231376c0..132f9e8ed68c 100644
--- a/drivers/net/wireless/marvell/mwifiex/wmm.c
+++ b/drivers/net/wireless/marvell/mwifiex/wmm.c
@@ -970,6 +970,10 @@ int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
"WMM Parameter Set Count: %d\n",
wmm_param_ie->qos_info_bitmap & mask);
+ if (wmm_param_ie->vend_hdr.len + 2 >
+ sizeof(struct ieee_types_wmm_parameter))
+ break;
+
memcpy((u8 *) &priv->curr_bss_params.bss_descriptor.
wmm_ie, wmm_param_ie,
wmm_param_ie->vend_hdr.len + 2);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
index eccad4987ac8..17e277bf39e0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
@@ -92,8 +92,9 @@ static int mt7615_check_eeprom(struct mt76_dev *dev)
static void mt7615_eeprom_parse_hw_cap(struct mt7615_dev *dev)
{
- u8 val, *eeprom = dev->mt76.eeprom.data;
+ u8 *eeprom = dev->mt76.eeprom.data;
u8 tx_mask, rx_mask, max_nss;
+ u32 val;
val = FIELD_GET(MT_EE_NIC_WIFI_CONF_BAND_SEL,
eeprom[MT_EE_WIFI_CONF]);
diff --git a/drivers/net/wireless/realtek/rtw88/wow.c b/drivers/net/wireless/realtek/rtw88/wow.c
index af5c27e1bb07..4820dca958dd 100644
--- a/drivers/net/wireless/realtek/rtw88/wow.c
+++ b/drivers/net/wireless/realtek/rtw88/wow.c
@@ -281,27 +281,26 @@ static void rtw_wow_rx_dma_start(struct rtw_dev *rtwdev)
rtw_write32_clr(rtwdev, REG_RXPKT_NUM, BIT_RW_RELEASE);
}
-static bool rtw_wow_check_fw_status(struct rtw_dev *rtwdev, bool wow_enable)
+static int rtw_wow_check_fw_status(struct rtw_dev *rtwdev, bool wow_enable)
{
- bool ret;
-
/* wait 100ms for wow firmware to finish work */
msleep(100);
if (wow_enable) {
- if (!rtw_read8(rtwdev, REG_WOWLAN_WAKE_REASON))
- ret = 0;
+ if (rtw_read8(rtwdev, REG_WOWLAN_WAKE_REASON))
+ goto wow_fail;
} else {
- if (rtw_read32_mask(rtwdev, REG_FE1IMR, BIT_FS_RXDONE) == 0 &&
- rtw_read32_mask(rtwdev, REG_RXPKT_NUM, BIT_RW_RELEASE) == 0)
- ret = 0;
+ if (rtw_read32_mask(rtwdev, REG_FE1IMR, BIT_FS_RXDONE) ||
+ rtw_read32_mask(rtwdev, REG_RXPKT_NUM, BIT_RW_RELEASE))
+ goto wow_fail;
}
- if (ret)
- rtw_err(rtwdev, "failed to check wow status %s\n",
- wow_enable ? "enabled" : "disabled");
+ return 0;
- return ret;
+wow_fail:
+ rtw_err(rtwdev, "failed to check wow status %s\n",
+ wow_enable ? "enabled" : "disabled");
+ return -EBUSY;
}
static void rtw_wow_fw_security_type_iter(struct ieee80211_hw *hw,
OpenPOWER on IntegriCloud