From 8edf3fd6eb0649b0f19363baf23bca39c6fbdba4 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 28 May 2013 21:32:47 +0200 Subject: iwlwifi: don't print module loading error if not modular MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If the opmode modules aren't modular, there's no point in printing an error message that request_module() failed. This will happen because the probe runs during iwlwifi's init and the opmode is only added during its init. Reported-by: Jörg Otte Reviewed-by: Emmanuel Grumbach Signed-off-by: Johannes Berg --- drivers/net/wireless/iwlwifi/iwl-drv.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c index 39aad9893e0b..40fed1f511e2 100644 --- a/drivers/net/wireless/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/iwlwifi/iwl-drv.c @@ -1000,10 +1000,12 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) */ if (load_module) { err = request_module("%s", op->name); +#ifdef CONFIG_IWLWIFI_OPMODE_MODULAR if (err) IWL_ERR(drv, "failed to load module %s (error %d), is dynamic loading enabled?\n", op->name, err); +#endif } return; -- cgit v1.2.1 From b28b6dfe580ab1ab8bf08b908fd69e299b877103 Mon Sep 17 00:00:00 2001 From: Nikolay Martynov Date: Fri, 31 May 2013 01:29:12 -0400 Subject: iwlwifi: dvm: fix chain noise calibration First step of chain noise calibration process had disable flag check inverted. Chain noise calibration never started because of this. Tested on intel 5300 with two antennas attached. The driver correctly disabled one chain. Cc: stable@vger.kernel.org Signed-off-by: Nikolay Martynov Reviewed-by: Emmanuel Grumbach Signed-off-by: Johannes Berg --- drivers/net/wireless/iwlwifi/dvm/rxon.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c index 707446fa00bd..cd1ad0019185 100644 --- a/drivers/net/wireless/iwlwifi/dvm/rxon.c +++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c @@ -1378,7 +1378,7 @@ static void iwlagn_chain_noise_reset(struct iwl_priv *priv) struct iwl_chain_noise_data *data = &priv->chain_noise_data; int ret; - if (!(priv->calib_disabled & IWL_CHAIN_NOISE_CALIB_DISABLED)) + if (priv->calib_disabled & IWL_CHAIN_NOISE_CALIB_DISABLED) return; if ((data->state == IWL_CHAIN_NOISE_ALIVE) && -- cgit v1.2.1 From 2edc6ec6330c7906f4dbd7f5da71be8989efc5a3 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Sun, 2 Jun 2013 19:49:15 +0300 Subject: iwlwifi: mvm: correctly set the flags for BAR Somehow, the Tx flags for BAR were completely wrong. Signed-off-by: Emmanuel Grumbach Signed-off-by: Johannes Berg --- drivers/net/wireless/iwlwifi/mvm/tx.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c index f212f16502ff..48c1891e3df6 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c @@ -180,7 +180,8 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE); return; } else if (ieee80211_is_back_req(fc)) { - tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE); + tx_cmd->tx_flags |= + cpu_to_le32(TX_CMD_FLG_ACK | TX_CMD_FLG_BAR); } /* HT rate doesn't make sense for a non data frame */ -- cgit v1.2.1 From 622ebe994f6866b8d46ee5d3bcc329ed65d3722d Mon Sep 17 00:00:00 2001 From: Moshe Benji Date: Mon, 3 Jun 2013 19:27:16 +0300 Subject: iwlwifi: fix rate control regression Since driver does not use control.rates[0].count, we have never set that variable. But currently, after rate control API rewrite, this is required by mac80211. Otherwise legacy rates control does not work and we transmit always at 1Mbit/s on pre 11n networks. [same fix as for iwlegacy, thanks Stanislaw!] Signed-off-by: Moshe Benji Reviewed-by: Emmanuel Grumbach Signed-off-by: Johannes Berg --- drivers/net/wireless/iwlwifi/dvm/rs.c | 2 +- drivers/net/wireless/iwlwifi/mvm/rs.c | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c index 907bd6e50aad..10fbb176cc8e 100644 --- a/drivers/net/wireless/iwlwifi/dvm/rs.c +++ b/drivers/net/wireless/iwlwifi/dvm/rs.c @@ -2799,7 +2799,7 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta, info->control.rates[0].flags = 0; } info->control.rates[0].idx = rate_idx; - + info->control.rates[0].count = 1; } static void *rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta, diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c index 55334d542e26..b99fe3163866 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/iwlwifi/mvm/rs.c @@ -2546,6 +2546,7 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta, info->control.rates[0].flags = 0; } info->control.rates[0].idx = rate_idx; + info->control.rates[0].count = 1; } static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta, -- cgit v1.2.1 From a8cf0194b7187fb65dfff28a1c5153d442e3836a Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Tue, 4 Jun 2013 14:19:10 +0200 Subject: iwlegacy: fix rate control regression Since driver does not use control.rates[0].count, we have never set that variable. But currently, after rate control API rewrite, this is required by mac80211. Otherwise legacy rates control does not work and we transmit always at 1Mbit/s on pre 11n networks. Signed-off-by: Stanislaw Gruszka Signed-off-by: John W. Linville --- drivers/net/wireless/iwlegacy/3945-rs.c | 1 + drivers/net/wireless/iwlegacy/4965-rs.c | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/iwlegacy/3945-rs.c b/drivers/net/wireless/iwlegacy/3945-rs.c index c9f197d9ca1e..fe31590a51b2 100644 --- a/drivers/net/wireless/iwlegacy/3945-rs.c +++ b/drivers/net/wireless/iwlegacy/3945-rs.c @@ -816,6 +816,7 @@ out: rs_sta->last_txrate_idx = idx; info->control.rates[0].idx = rs_sta->last_txrate_idx; } + info->control.rates[0].count = 1; D_RATE("leave: %d\n", idx); } diff --git a/drivers/net/wireless/iwlegacy/4965-rs.c b/drivers/net/wireless/iwlegacy/4965-rs.c index 1fc0b227e120..ed3c42a63a43 100644 --- a/drivers/net/wireless/iwlegacy/4965-rs.c +++ b/drivers/net/wireless/iwlegacy/4965-rs.c @@ -2268,7 +2268,7 @@ il4965_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta, info->control.rates[0].flags = 0; } info->control.rates[0].idx = rate_idx; - + info->control.rates[0].count = 1; } static void * -- cgit v1.2.1 From 541e667e1c0f31a7e11d909eb831cf476814a201 Mon Sep 17 00:00:00 2001 From: Arend van Spriel Date: Thu, 6 Jun 2013 13:29:56 +0200 Subject: brcmsmac: disable power-save related functions This patch fixes a regression introduced by: commit 6da3b6c48d79da96a36c2632053cf4f53bf48fb2 Author: Hauke Mehrtens Date: Sun Mar 24 01:45:52 2013 +0100 brcmsmac: remove brcms_bss_cfg->associated The regression behaviour was described on mailing list. http://mid.gmane.org/5197DC4F.7030503@broadcom.com: "On laptop I installed kernel with brcmsmac compiled as module. It comes up and associates during boot, but after logging in there is no connectivity. Triggering reassoc gives connectivity for some time, but after a while (1-2 min) it stops." Before the mentioned commit the return value of the function brcms_c_ps_allowed() was always false, which is desired behaviour as power-save is not supported at the moment. Therefor, the function is changed to just return false instead of simply reverting the mentioned commit. Bug: 58471 Signed-off-by: Arend van Spriel Signed-off-by: John W. Linville --- drivers/net/wireless/brcm80211/brcmsmac/main.c | 17 ++--------------- 1 file changed, 2 insertions(+), 15 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c index 28e7aeedd184..9fd6f2fef11b 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/main.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c @@ -3074,21 +3074,8 @@ static void brcms_b_antsel_set(struct brcms_hardware *wlc_hw, u32 antsel_avail) */ static bool brcms_c_ps_allowed(struct brcms_c_info *wlc) { - /* disallow PS when one of the following global conditions meets */ - if (!wlc->pub->associated) - return false; - - /* disallow PS when one of these meets when not scanning */ - if (wlc->filter_flags & FIF_PROMISC_IN_BSS) - return false; - - if (wlc->bsscfg->type == BRCMS_TYPE_AP) - return false; - - if (wlc->bsscfg->type == BRCMS_TYPE_ADHOC) - return false; - - return true; + /* not supporting PS so always return false for now */ + return false; } static void brcms_c_statsupd(struct brcms_c_info *wlc) -- cgit v1.2.1 From 8c8d2017ba25c510ddf093419048460db1109bc4 Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Tue, 11 Jun 2013 18:48:53 +0200 Subject: rt2800: fix RT5390 & RT3290 TX power settings regression My change: commit cee2c7315f60beeff6137ee59e99acc77d636eeb Author: Stanislaw Gruszka Date: Fri Oct 5 13:44:09 2012 +0200 rt2800: use BBP_R1 for setting tx power unfortunately does not work well with RT5390 and RT3290 chips as they require different temperature compensation TX power settings (TSSI tuning). Since that commit make wireless connection very unstable on those chips, restore previous behavior to fix regression. Once we implement proper TSSI tuning on 5390/3290 we can restore back setting TX power by BBP_R1 register for those chips. Reported-and-tested-by: Mike Romberg Cc: stable@vger.kernel.org Signed-off-by: Stanislaw Gruszka Acked-by: Gertjan van Wingerde Signed-off-by: John W. Linville --- drivers/net/wireless/rt2x00/rt2800lib.c | 29 ++++++++++++++++++----------- 1 file changed, 18 insertions(+), 11 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index b52d70c75e1a..72f32e5caa4d 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -3027,19 +3027,26 @@ static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev, * TODO: we do not use +6 dBm option to do not increase power beyond * regulatory limit, however this could be utilized for devices with * CAPABILITY_POWER_LIMIT. + * + * TODO: add different temperature compensation code for RT3290 & RT5390 + * to allow to use BBP_R1 for those chips. */ - rt2800_bbp_read(rt2x00dev, 1, &r1); - if (delta <= -12) { - power_ctrl = 2; - delta += 12; - } else if (delta <= -6) { - power_ctrl = 1; - delta += 6; - } else { - power_ctrl = 0; + if (!rt2x00_rt(rt2x00dev, RT3290) && + !rt2x00_rt(rt2x00dev, RT5390)) { + rt2800_bbp_read(rt2x00dev, 1, &r1); + if (delta <= -12) { + power_ctrl = 2; + delta += 12; + } else if (delta <= -6) { + power_ctrl = 1; + delta += 6; + } else { + power_ctrl = 0; + } + rt2x00_set_field8(&r1, BBP1_TX_POWER_CTRL, power_ctrl); + rt2800_bbp_write(rt2x00dev, 1, r1); } - rt2x00_set_field8(&r1, BBP1_TX_POWER_CTRL, power_ctrl); - rt2800_bbp_write(rt2x00dev, 1, r1); + offset = TX_PWR_CFG_0; for (i = 0; i < EEPROM_TXPOWER_BYRATE_SIZE; i += 2) { -- cgit v1.2.1 From fcb3701849957917a234a61b58ad70ed35c83eda Mon Sep 17 00:00:00 2001 From: Arend van Spriel Date: Fri, 7 Jun 2013 11:03:00 +0200 Subject: brcmfmac: free primary net_device when brcmf_bus_start() fails When initialization within brcmf_bus_start() fails on steps before the brcmf_net_attach() the net_device for the primary interface needs to be freed. This patch resolves a panic during kernel boot as reported by Stephen Warren. ref.: http://mid.gmane.org/51AD1F22.2080004@wwwdotorg.org Tested-by: Stephen Warren Reviewed-by: Hante Meuleman Reviewed-by: Pieter-Paul Giesberts Signed-off-by: Arend van Spriel Signed-off-by: John W. Linville --- drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c index b98f2235978e..2c593570497c 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c @@ -930,6 +930,10 @@ fail: brcmf_fws_del_interface(ifp); brcmf_fws_deinit(drvr); } + if (drvr->iflist[0]) { + free_netdev(ifp->ndev); + drvr->iflist[0] = NULL; + } if (p2p_ifp) { free_netdev(p2p_ifp->ndev); drvr->iflist[1] = NULL; -- cgit v1.2.1 From 3bf74b1aecdce719f1445200d5db7dfee2297bba Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Mon, 17 Jun 2013 12:09:57 -0700 Subject: vxlan: fix race between flush and incoming learning It is possible for a packet to arrive during vxlan_stop(), and have a dynamic entry created. Close this by checking if device is up. CPU1 CPU2 vxlan_stop vxlan_flush hash_lock acquired vxlan_encap_recv vxlan_snoop waiting for hash_lock hash_lock relased vxlan_flush done hash_lock acquired vxlan_fdb_create This is a day-one bug in vxlan goes back to 3.7. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 3b1d2ee7156b..577a069a6dde 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -571,7 +571,6 @@ static void vxlan_snoop(struct net_device *dev, { struct vxlan_dev *vxlan = netdev_priv(dev); struct vxlan_fdb *f; - int err; f = vxlan_find_mac(vxlan, src_mac); if (likely(f)) { @@ -588,12 +587,15 @@ static void vxlan_snoop(struct net_device *dev, } else { /* learned new entry */ spin_lock(&vxlan->hash_lock); - err = vxlan_fdb_create(vxlan, src_mac, src_ip, - NUD_REACHABLE, - NLM_F_EXCL|NLM_F_CREATE, - vxlan->dst_port, - vxlan->default_dst.remote_vni, - 0, NTF_SELF); + + /* close off race between vxlan_flush and incoming packets */ + if (netif_running(dev)) + vxlan_fdb_create(vxlan, src_mac, src_ip, + NUD_REACHABLE, + NLM_F_EXCL|NLM_F_CREATE, + vxlan->dst_port, + vxlan->default_dst.remote_vni, + 0, NTF_SELF); spin_unlock(&vxlan->hash_lock); } } -- cgit v1.2.1 From 26a41ae604381c5cc0caf1c3261ca6b298b5fe69 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Mon, 17 Jun 2013 12:09:58 -0700 Subject: vxlan: only migrate dynamic FDB entries Only migrate dynamic forwarding table entries, don't modify static entries. If packet received from incorrect source IP address assume it is an imposter and drop it. This patch applies only to -net, a different patch would be needed for earlier kernels since the NTF_SELF flag was introduced with 3.10. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 577a069a6dde..15a73ec42c64 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -565,8 +565,9 @@ skip: /* Watch incoming packets to learn mapping between Ethernet address * and Tunnel endpoint. + * Return true if packet is bogus and should be droppped. */ -static void vxlan_snoop(struct net_device *dev, +static bool vxlan_snoop(struct net_device *dev, __be32 src_ip, const u8 *src_mac) { struct vxlan_dev *vxlan = netdev_priv(dev); @@ -575,7 +576,11 @@ static void vxlan_snoop(struct net_device *dev, f = vxlan_find_mac(vxlan, src_mac); if (likely(f)) { if (likely(f->remote.remote_ip == src_ip)) - return; + return false; + + /* Don't migrate static entries, drop packets */ + if (!(f->flags & NTF_SELF)) + return true; if (net_ratelimit()) netdev_info(dev, @@ -598,6 +603,8 @@ static void vxlan_snoop(struct net_device *dev, 0, NTF_SELF); spin_unlock(&vxlan->hash_lock); } + + return false; } @@ -729,8 +736,9 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) vxlan->dev->dev_addr) == 0) goto drop; - if (vxlan->flags & VXLAN_F_LEARN) - vxlan_snoop(skb->dev, oip->saddr, eth_hdr(skb)->h_source); + if ((vxlan->flags & VXLAN_F_LEARN) && + vxlan_snoop(skb->dev, oip->saddr, eth_hdr(skb)->h_source)) + goto drop; __skb_tunnel_rx(skb, vxlan->dev); skb_reset_network_header(skb); -- cgit v1.2.1 From 7aa27238417a34ec9be2d8eff05ceff319f2d39b Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Mon, 17 Jun 2013 12:09:59 -0700 Subject: vxlan: handle skb_clone failure If skb_clone fails if out of memory then just skip the fanout. Problem was introduced in 3.10 with: commit 6681712d67eef14c4ce793561c3231659153a320 Author: David Stevens Date: Fri Mar 15 04:35:51 2013 +0000 vxlan: generalize forwarding tables Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 15a73ec42c64..dda997a0102c 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1161,9 +1161,11 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) struct sk_buff *skb1; skb1 = skb_clone(skb, GFP_ATOMIC); - rc1 = vxlan_xmit_one(skb1, dev, rdst, did_rsc); - if (rc == NETDEV_TX_OK) - rc = rc1; + if (skb1) { + rc1 = vxlan_xmit_one(skb1, dev, rdst, did_rsc); + if (rc == NETDEV_TX_OK) + rc = rc1; + } } rc1 = vxlan_xmit_one(skb, dev, rdst0, did_rsc); -- cgit v1.2.1 From 93725cbd22ed716bea8dc479b4925d40a4dbd0c6 Mon Sep 17 00:00:00 2001 From: Haiyang Zhang Date: Mon, 17 Jun 2013 15:36:49 -0700 Subject: Fix the VLAN_TAG_PRESENT in netvsc_recv_callback() We should call __vlan_hwaccel_put_tag() only if the packet comes from vlan, otherwise VLAN_TAG_PRESENT will always be added. Reported-by: Olaf Hering Signed-off-by: Haiyang Zhang Reviewed-by: K. Y. Srinivasan Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc_drv.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index ab2307b5d9a7..4dccead586be 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -285,7 +285,9 @@ int netvsc_recv_callback(struct hv_device *device_obj, skb->protocol = eth_type_trans(skb, net); skb->ip_summed = CHECKSUM_NONE; - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), packet->vlan_tci); + if (packet->vlan_tci & VLAN_TAG_PRESENT) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + packet->vlan_tci); net->stats.rx_packets++; net->stats.rx_bytes += packet->total_data_buflen; -- cgit v1.2.1 From ab69bde6b2e9c37456eeb0051a185446336aef9f Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Mon, 17 Jun 2013 22:44:02 +0200 Subject: alx: add a simple AR816x/AR817x device driver This is a very simple driver, based on the original vendor driver that Qualcomm/Atheros published/submitted previously, but reworked to make the code saner. However, it also lost a number of features (TSO/GSO, VLAN acceleration and multi- queue support) in the process, as well as debugging support features I didn't have any use for. The only thing I left is checksum offload. More features can obviously be added, but this seemed like a good start for having a driver in mainline at all. Johannes Stezenbach has verified that the driver works on AR8161, I have a AR8171 myself. The E2200 device ID I found on github in somebody's repository. Signed-off-by: Johannes Berg Signed-off-by: David S. Miller --- drivers/net/ethernet/atheros/Kconfig | 18 + drivers/net/ethernet/atheros/Makefile | 1 + drivers/net/ethernet/atheros/alx/Makefile | 3 + drivers/net/ethernet/atheros/alx/alx.h | 114 ++ drivers/net/ethernet/atheros/alx/ethtool.c | 272 +++++ drivers/net/ethernet/atheros/alx/hw.c | 1226 +++++++++++++++++++++ drivers/net/ethernet/atheros/alx/hw.h | 499 +++++++++ drivers/net/ethernet/atheros/alx/main.c | 1625 ++++++++++++++++++++++++++++ drivers/net/ethernet/atheros/alx/reg.h | 810 ++++++++++++++ 9 files changed, 4568 insertions(+) create mode 100644 drivers/net/ethernet/atheros/alx/Makefile create mode 100644 drivers/net/ethernet/atheros/alx/alx.h create mode 100644 drivers/net/ethernet/atheros/alx/ethtool.c create mode 100644 drivers/net/ethernet/atheros/alx/hw.c create mode 100644 drivers/net/ethernet/atheros/alx/hw.h create mode 100644 drivers/net/ethernet/atheros/alx/main.c create mode 100644 drivers/net/ethernet/atheros/alx/reg.h (limited to 'drivers/net') diff --git a/drivers/net/ethernet/atheros/Kconfig b/drivers/net/ethernet/atheros/Kconfig index 36d6abd1cfff..ad6aa1e98348 100644 --- a/drivers/net/ethernet/atheros/Kconfig +++ b/drivers/net/ethernet/atheros/Kconfig @@ -67,4 +67,22 @@ config ATL1C To compile this driver as a module, choose M here. The module will be called atl1c. +config ALX + tristate "Qualcomm Atheros AR816x/AR817x support" + depends on PCI + select CRC32 + select NET_CORE + select MDIO + help + This driver supports the Qualcomm Atheros L1F ethernet adapter, + i.e. the following chipsets: + + 1969:1091 - AR8161 Gigabit Ethernet + 1969:1090 - AR8162 Fast Ethernet + 1969:10A1 - AR8171 Gigabit Ethernet + 1969:10A0 - AR8172 Fast Ethernet + + To compile this driver as a module, choose M here. The module + will be called alx. + endif # NET_VENDOR_ATHEROS diff --git a/drivers/net/ethernet/atheros/Makefile b/drivers/net/ethernet/atheros/Makefile index e7e76fb576ff..5cf1c65bbce9 100644 --- a/drivers/net/ethernet/atheros/Makefile +++ b/drivers/net/ethernet/atheros/Makefile @@ -6,3 +6,4 @@ obj-$(CONFIG_ATL1) += atlx/ obj-$(CONFIG_ATL2) += atlx/ obj-$(CONFIG_ATL1E) += atl1e/ obj-$(CONFIG_ATL1C) += atl1c/ +obj-$(CONFIG_ALX) += alx/ diff --git a/drivers/net/ethernet/atheros/alx/Makefile b/drivers/net/ethernet/atheros/alx/Makefile new file mode 100644 index 000000000000..5901fa407d52 --- /dev/null +++ b/drivers/net/ethernet/atheros/alx/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_ALX) += alx.o +alx-objs := main.o ethtool.o hw.o +ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/net/ethernet/atheros/alx/alx.h b/drivers/net/ethernet/atheros/alx/alx.h new file mode 100644 index 000000000000..50b3ae2b143d --- /dev/null +++ b/drivers/net/ethernet/atheros/alx/alx.h @@ -0,0 +1,114 @@ +/* + * Copyright (c) 2013 Johannes Berg + * + * This file is free software: you may copy, redistribute and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation, either version 2 of the License, or (at your + * option) any later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * This file incorporates work covered by the following copyright and + * permission notice: + * + * Copyright (c) 2012 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _ALX_H_ +#define _ALX_H_ + +#include +#include +#include +#include +#include "hw.h" + +#define ALX_WATCHDOG_TIME (5 * HZ) + +struct alx_buffer { + struct sk_buff *skb; + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(size); +}; + +struct alx_rx_queue { + struct alx_rrd *rrd; + dma_addr_t rrd_dma; + + struct alx_rfd *rfd; + dma_addr_t rfd_dma; + + struct alx_buffer *bufs; + + u16 write_idx, read_idx; + u16 rrd_read_idx; +}; +#define ALX_RX_ALLOC_THRESH 32 + +struct alx_tx_queue { + struct alx_txd *tpd; + dma_addr_t tpd_dma; + struct alx_buffer *bufs; + u16 write_idx, read_idx; +}; + +#define ALX_DEFAULT_TX_WORK 128 + +enum alx_device_quirks { + ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG = BIT(0), +}; + +struct alx_priv { + struct net_device *dev; + + struct alx_hw hw; + + /* all descriptor memory */ + struct { + dma_addr_t dma; + void *virt; + int size; + } descmem; + + /* protect int_mask updates */ + spinlock_t irq_lock; + u32 int_mask; + + int tx_ringsz; + int rx_ringsz; + int rxbuf_size; + + struct napi_struct napi; + struct alx_tx_queue txq; + struct alx_rx_queue rxq; + + struct work_struct link_check_wk; + struct work_struct reset_wk; + + u16 msg_enable; + + bool msi; +}; + +extern const struct ethtool_ops alx_ethtool_ops; +extern const char alx_drv_name[]; + +#endif diff --git a/drivers/net/ethernet/atheros/alx/ethtool.c b/drivers/net/ethernet/atheros/alx/ethtool.c new file mode 100644 index 000000000000..6fa2aec2bc81 --- /dev/null +++ b/drivers/net/ethernet/atheros/alx/ethtool.c @@ -0,0 +1,272 @@ +/* + * Copyright (c) 2013 Johannes Berg + * + * This file is free software: you may copy, redistribute and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation, either version 2 of the License, or (at your + * option) any later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * This file incorporates work covered by the following copyright and + * permission notice: + * + * Copyright (c) 2012 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "alx.h" +#include "reg.h" +#include "hw.h" + + +static int alx_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) +{ + struct alx_priv *alx = netdev_priv(netdev); + struct alx_hw *hw = &alx->hw; + + ecmd->supported = SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_Autoneg | + SUPPORTED_TP | + SUPPORTED_Pause; + if (alx_hw_giga(hw)) + ecmd->supported |= SUPPORTED_1000baseT_Full; + + ecmd->advertising = ADVERTISED_TP; + if (hw->adv_cfg & ADVERTISED_Autoneg) + ecmd->advertising |= hw->adv_cfg; + + ecmd->port = PORT_TP; + ecmd->phy_address = 0; + if (hw->adv_cfg & ADVERTISED_Autoneg) + ecmd->autoneg = AUTONEG_ENABLE; + else + ecmd->autoneg = AUTONEG_DISABLE; + ecmd->transceiver = XCVR_INTERNAL; + + if (hw->flowctrl & ALX_FC_ANEG && hw->adv_cfg & ADVERTISED_Autoneg) { + if (hw->flowctrl & ALX_FC_RX) { + ecmd->advertising |= ADVERTISED_Pause; + + if (!(hw->flowctrl & ALX_FC_TX)) + ecmd->advertising |= ADVERTISED_Asym_Pause; + } else if (hw->flowctrl & ALX_FC_TX) { + ecmd->advertising |= ADVERTISED_Asym_Pause; + } + } + + if (hw->link_speed != SPEED_UNKNOWN) { + ethtool_cmd_speed_set(ecmd, + hw->link_speed - hw->link_speed % 10); + ecmd->duplex = hw->link_speed % 10; + } else { + ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); + ecmd->duplex = DUPLEX_UNKNOWN; + } + + return 0; +} + +static int alx_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) +{ + struct alx_priv *alx = netdev_priv(netdev); + struct alx_hw *hw = &alx->hw; + u32 adv_cfg; + + ASSERT_RTNL(); + + if (ecmd->autoneg == AUTONEG_ENABLE) { + if (ecmd->advertising & ADVERTISED_1000baseT_Half) + return -EINVAL; + adv_cfg = ecmd->advertising | ADVERTISED_Autoneg; + } else { + int speed = ethtool_cmd_speed(ecmd); + + switch (speed + ecmd->duplex) { + case SPEED_10 + DUPLEX_HALF: + adv_cfg = ADVERTISED_10baseT_Half; + break; + case SPEED_10 + DUPLEX_FULL: + adv_cfg = ADVERTISED_10baseT_Full; + break; + case SPEED_100 + DUPLEX_HALF: + adv_cfg = ADVERTISED_100baseT_Half; + break; + case SPEED_100 + DUPLEX_FULL: + adv_cfg = ADVERTISED_100baseT_Full; + break; + default: + return -EINVAL; + } + } + + hw->adv_cfg = adv_cfg; + return alx_setup_speed_duplex(hw, adv_cfg, hw->flowctrl); +} + +static void alx_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct alx_priv *alx = netdev_priv(netdev); + struct alx_hw *hw = &alx->hw; + + if (hw->flowctrl & ALX_FC_ANEG && + hw->adv_cfg & ADVERTISED_Autoneg) + pause->autoneg = AUTONEG_ENABLE; + else + pause->autoneg = AUTONEG_DISABLE; + + if (hw->flowctrl & ALX_FC_TX) + pause->tx_pause = 1; + else + pause->tx_pause = 0; + + if (hw->flowctrl & ALX_FC_RX) + pause->rx_pause = 1; + else + pause->rx_pause = 0; +} + + +static int alx_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct alx_priv *alx = netdev_priv(netdev); + struct alx_hw *hw = &alx->hw; + int err = 0; + bool reconfig_phy = false; + u8 fc = 0; + + if (pause->tx_pause) + fc |= ALX_FC_TX; + if (pause->rx_pause) + fc |= ALX_FC_RX; + if (pause->autoneg) + fc |= ALX_FC_ANEG; + + ASSERT_RTNL(); + + /* restart auto-neg for auto-mode */ + if (hw->adv_cfg & ADVERTISED_Autoneg) { + if (!((fc ^ hw->flowctrl) & ALX_FC_ANEG)) + reconfig_phy = true; + if (fc & hw->flowctrl & ALX_FC_ANEG && + (fc ^ hw->flowctrl) & (ALX_FC_RX | ALX_FC_TX)) + reconfig_phy = true; + } + + if (reconfig_phy) { + err = alx_setup_speed_duplex(hw, hw->adv_cfg, fc); + return err; + } + + /* flow control on mac */ + if ((fc ^ hw->flowctrl) & (ALX_FC_RX | ALX_FC_TX)) + alx_cfg_mac_flowcontrol(hw, fc); + + hw->flowctrl = fc; + + return 0; +} + +static u32 alx_get_msglevel(struct net_device *netdev) +{ + struct alx_priv *alx = netdev_priv(netdev); + + return alx->msg_enable; +} + +static void alx_set_msglevel(struct net_device *netdev, u32 data) +{ + struct alx_priv *alx = netdev_priv(netdev); + + alx->msg_enable = data; +} + +static void alx_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct alx_priv *alx = netdev_priv(netdev); + struct alx_hw *hw = &alx->hw; + + wol->supported = WAKE_MAGIC | WAKE_PHY; + wol->wolopts = 0; + + if (hw->sleep_ctrl & ALX_SLEEP_WOL_MAGIC) + wol->wolopts |= WAKE_MAGIC; + if (hw->sleep_ctrl & ALX_SLEEP_WOL_PHY) + wol->wolopts |= WAKE_PHY; +} + +static int alx_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct alx_priv *alx = netdev_priv(netdev); + struct alx_hw *hw = &alx->hw; + + if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | + WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)) + return -EOPNOTSUPP; + + hw->sleep_ctrl = 0; + + if (wol->wolopts & WAKE_MAGIC) + hw->sleep_ctrl |= ALX_SLEEP_WOL_MAGIC; + if (wol->wolopts & WAKE_PHY) + hw->sleep_ctrl |= ALX_SLEEP_WOL_PHY; + + device_set_wakeup_enable(&alx->hw.pdev->dev, hw->sleep_ctrl); + + return 0; +} + +static void alx_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct alx_priv *alx = netdev_priv(netdev); + + strlcpy(drvinfo->driver, alx_drv_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->bus_info, pci_name(alx->hw.pdev), + sizeof(drvinfo->bus_info)); +} + +const struct ethtool_ops alx_ethtool_ops = { + .get_settings = alx_get_settings, + .set_settings = alx_set_settings, + .get_pauseparam = alx_get_pauseparam, + .set_pauseparam = alx_set_pauseparam, + .get_drvinfo = alx_get_drvinfo, + .get_msglevel = alx_get_msglevel, + .set_msglevel = alx_set_msglevel, + .get_wol = alx_get_wol, + .set_wol = alx_set_wol, + .get_link = ethtool_op_get_link, +}; diff --git a/drivers/net/ethernet/atheros/alx/hw.c b/drivers/net/ethernet/atheros/alx/hw.c new file mode 100644 index 000000000000..220a16ad0e49 --- /dev/null +++ b/drivers/net/ethernet/atheros/alx/hw.c @@ -0,0 +1,1226 @@ +/* + * Copyright (c) 2013 Johannes Berg + * + * This file is free software: you may copy, redistribute and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation, either version 2 of the License, or (at your + * option) any later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * This file incorporates work covered by the following copyright and + * permission notice: + * + * Copyright (c) 2012 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +#include +#include +#include +#include +#include "reg.h" +#include "hw.h" + +static inline bool alx_is_rev_a(u8 rev) +{ + return rev == ALX_REV_A0 || rev == ALX_REV_A1; +} + +static int alx_wait_mdio_idle(struct alx_hw *hw) +{ + u32 val; + int i; + + for (i = 0; i < ALX_MDIO_MAX_AC_TO; i++) { + val = alx_read_mem32(hw, ALX_MDIO); + if (!(val & ALX_MDIO_BUSY)) + return 0; + udelay(10); + } + + return -ETIMEDOUT; +} + +static int alx_read_phy_core(struct alx_hw *hw, bool ext, u8 dev, + u16 reg, u16 *phy_data) +{ + u32 val, clk_sel; + int err; + + *phy_data = 0; + + /* use slow clock when it's in hibernation status */ + clk_sel = hw->link_speed != SPEED_UNKNOWN ? + ALX_MDIO_CLK_SEL_25MD4 : + ALX_MDIO_CLK_SEL_25MD128; + + if (ext) { + val = dev << ALX_MDIO_EXTN_DEVAD_SHIFT | + reg << ALX_MDIO_EXTN_REG_SHIFT; + alx_write_mem32(hw, ALX_MDIO_EXTN, val); + + val = ALX_MDIO_SPRES_PRMBL | ALX_MDIO_START | + ALX_MDIO_MODE_EXT | ALX_MDIO_OP_READ | + clk_sel << ALX_MDIO_CLK_SEL_SHIFT; + } else { + val = ALX_MDIO_SPRES_PRMBL | + clk_sel << ALX_MDIO_CLK_SEL_SHIFT | + reg << ALX_MDIO_REG_SHIFT | + ALX_MDIO_START | ALX_MDIO_OP_READ; + } + alx_write_mem32(hw, ALX_MDIO, val); + + err = alx_wait_mdio_idle(hw); + if (err) + return err; + val = alx_read_mem32(hw, ALX_MDIO); + *phy_data = ALX_GET_FIELD(val, ALX_MDIO_DATA); + return 0; +} + +static int alx_write_phy_core(struct alx_hw *hw, bool ext, u8 dev, + u16 reg, u16 phy_data) +{ + u32 val, clk_sel; + + /* use slow clock when it's in hibernation status */ + clk_sel = hw->link_speed != SPEED_UNKNOWN ? + ALX_MDIO_CLK_SEL_25MD4 : + ALX_MDIO_CLK_SEL_25MD128; + + if (ext) { + val = dev << ALX_MDIO_EXTN_DEVAD_SHIFT | + reg << ALX_MDIO_EXTN_REG_SHIFT; + alx_write_mem32(hw, ALX_MDIO_EXTN, val); + + val = ALX_MDIO_SPRES_PRMBL | + clk_sel << ALX_MDIO_CLK_SEL_SHIFT | + phy_data << ALX_MDIO_DATA_SHIFT | + ALX_MDIO_START | ALX_MDIO_MODE_EXT; + } else { + val = ALX_MDIO_SPRES_PRMBL | + clk_sel << ALX_MDIO_CLK_SEL_SHIFT | + reg << ALX_MDIO_REG_SHIFT | + phy_data << ALX_MDIO_DATA_SHIFT | + ALX_MDIO_START; + } + alx_write_mem32(hw, ALX_MDIO, val); + + return alx_wait_mdio_idle(hw); +} + +static int __alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data) +{ + return alx_read_phy_core(hw, false, 0, reg, phy_data); +} + +static int __alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data) +{ + return alx_write_phy_core(hw, false, 0, reg, phy_data); +} + +static int __alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata) +{ + return alx_read_phy_core(hw, true, dev, reg, pdata); +} + +static int __alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data) +{ + return alx_write_phy_core(hw, true, dev, reg, data); +} + +static int __alx_read_phy_dbg(struct alx_hw *hw, u16 reg, u16 *pdata) +{ + int err; + + err = __alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, reg); + if (err) + return err; + + return __alx_read_phy_reg(hw, ALX_MII_DBG_DATA, pdata); +} + +static int __alx_write_phy_dbg(struct alx_hw *hw, u16 reg, u16 data) +{ + int err; + + err = __alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, reg); + if (err) + return err; + + return __alx_write_phy_reg(hw, ALX_MII_DBG_DATA, data); +} + +int alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data) +{ + int err; + + spin_lock(&hw->mdio_lock); + err = __alx_read_phy_reg(hw, reg, phy_data); + spin_unlock(&hw->mdio_lock); + + return err; +} + +int alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data) +{ + int err; + + spin_lock(&hw->mdio_lock); + err = __alx_write_phy_reg(hw, reg, phy_data); + spin_unlock(&hw->mdio_lock); + + return err; +} + +int alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata) +{ + int err; + + spin_lock(&hw->mdio_lock); + err = __alx_read_phy_ext(hw, dev, reg, pdata); + spin_unlock(&hw->mdio_lock); + + return err; +} + +int alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data) +{ + int err; + + spin_lock(&hw->mdio_lock); + err = __alx_write_phy_ext(hw, dev, reg, data); + spin_unlock(&hw->mdio_lock); + + return err; +} + +static int alx_read_phy_dbg(struct alx_hw *hw, u16 reg, u16 *pdata) +{ + int err; + + spin_lock(&hw->mdio_lock); + err = __alx_read_phy_dbg(hw, reg, pdata); + spin_unlock(&hw->mdio_lock); + + return err; +} + +static int alx_write_phy_dbg(struct alx_hw *hw, u16 reg, u16 data) +{ + int err; + + spin_lock(&hw->mdio_lock); + err = __alx_write_phy_dbg(hw, reg, data); + spin_unlock(&hw->mdio_lock); + + return err; +} + +static u16 alx_get_phy_config(struct alx_hw *hw) +{ + u32 val; + u16 phy_val; + + val = alx_read_mem32(hw, ALX_PHY_CTRL); + /* phy in reset */ + if ((val & ALX_PHY_CTRL_DSPRST_OUT) == 0) + return ALX_DRV_PHY_UNKNOWN; + + val = alx_read_mem32(hw, ALX_DRV); + val = ALX_GET_FIELD(val, ALX_DRV_PHY); + if (ALX_DRV_PHY_UNKNOWN == val) + return ALX_DRV_PHY_UNKNOWN; + + alx_read_phy_reg(hw, ALX_MII_DBG_ADDR, &phy_val); + if (ALX_PHY_INITED == phy_val) + return val; + + return ALX_DRV_PHY_UNKNOWN; +} + +static bool alx_wait_reg(struct alx_hw *hw, u32 reg, u32 wait, u32 *val) +{ + u32 read; + int i; + + for (i = 0; i < ALX_SLD_MAX_TO; i++) { + read = alx_read_mem32(hw, reg); + if ((read & wait) == 0) { + if (val) + *val = read; + return true; + } + mdelay(1); + } + + return false; +} + +static bool alx_read_macaddr(struct alx_hw *hw, u8 *addr) +{ + u32 mac0, mac1; + + mac0 = alx_read_mem32(hw, ALX_STAD0); + mac1 = alx_read_mem32(hw, ALX_STAD1); + + /* addr should be big-endian */ + *(__be32 *)(addr + 2) = cpu_to_be32(mac0); + *(__be16 *)addr = cpu_to_be16(mac1); + + return is_valid_ether_addr(addr); +} + +int alx_get_perm_macaddr(struct alx_hw *hw, u8 *addr) +{ + u32 val; + + /* try to get it from register first */ + if (alx_read_macaddr(hw, addr)) + return 0; + + /* try to load from efuse */ + if (!alx_wait_reg(hw, ALX_SLD, ALX_SLD_STAT | ALX_SLD_START, &val)) + return -EIO; + alx_write_mem32(hw, ALX_SLD, val | ALX_SLD_START); + if (!alx_wait_reg(hw, ALX_SLD, ALX_SLD_START, NULL)) + return -EIO; + if (alx_read_macaddr(hw, addr)) + return 0; + + /* try to load from flash/eeprom (if present) */ + val = alx_read_mem32(hw, ALX_EFLD); + if (val & (ALX_EFLD_F_EXIST | ALX_EFLD_E_EXIST)) { + if (!alx_wait_reg(hw, ALX_EFLD, + ALX_EFLD_STAT | ALX_EFLD_START, &val)) + return -EIO; + alx_write_mem32(hw, ALX_EFLD, val | ALX_EFLD_START); + if (!alx_wait_reg(hw, ALX_EFLD, ALX_EFLD_START, NULL)) + return -EIO; + if (alx_read_macaddr(hw, addr)) + return 0; + } + + return -EIO; +} + +void alx_set_macaddr(struct alx_hw *hw, const u8 *addr) +{ + u32 val; + + /* for example: 00-0B-6A-F6-00-DC * STAD0=6AF600DC, STAD1=000B */ + val = be32_to_cpu(*(__be32 *)(addr + 2)); + alx_write_mem32(hw, ALX_STAD0, val); + val = be16_to_cpu(*(__be16 *)addr); + alx_write_mem32(hw, ALX_STAD1, val); +} + +static void alx_enable_osc(struct alx_hw *hw) +{ + u32 val; + + /* rising edge */ + val = alx_read_mem32(hw, ALX_MISC); + alx_write_mem32(hw, ALX_MISC, val & ~ALX_MISC_INTNLOSC_OPEN); + alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN); +} + +static void alx_reset_osc(struct alx_hw *hw, u8 rev) +{ + u32 val, val2; + + /* clear Internal OSC settings, switching OSC by hw itself */ + val = alx_read_mem32(hw, ALX_MISC3); + alx_write_mem32(hw, ALX_MISC3, + (val & ~ALX_MISC3_25M_BY_SW) | + ALX_MISC3_25M_NOTO_INTNL); + + /* 25M clk from chipset may be unstable 1s after de-assert of + * PERST, driver need re-calibrate before enter Sleep for WoL + */ + val = alx_read_mem32(hw, ALX_MISC); + if (rev >= ALX_REV_B0) { + /* restore over current protection def-val, + * this val could be reset by MAC-RST + */ + ALX_SET_FIELD(val, ALX_MISC_PSW_OCP, ALX_MISC_PSW_OCP_DEF); + /* a 0->1 change will update the internal val of osc */ + val &= ~ALX_MISC_INTNLOSC_OPEN; + alx_write_mem32(hw, ALX_MISC, val); + alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN); + /* hw will automatically dis OSC after cab. */ + val2 = alx_read_mem32(hw, ALX_MSIC2); + val2 &= ~ALX_MSIC2_CALB_START; + alx_write_mem32(hw, ALX_MSIC2, val2); + alx_write_mem32(hw, ALX_MSIC2, val2 | ALX_MSIC2_CALB_START); + } else { + val &= ~ALX_MISC_INTNLOSC_OPEN; + /* disable isolate for rev A devices */ + if (alx_is_rev_a(rev)) + val &= ~ALX_MISC_ISO_EN; + + alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN); + alx_write_mem32(hw, ALX_MISC, val); + } + + udelay(20); +} + +static int alx_stop_mac(struct alx_hw *hw) +{ + u32 rxq, txq, val; + u16 i; + + rxq = alx_read_mem32(hw, ALX_RXQ0); + alx_write_mem32(hw, ALX_RXQ0, rxq & ~ALX_RXQ0_EN); + txq = alx_read_mem32(hw, ALX_TXQ0); + alx_write_mem32(hw, ALX_TXQ0, txq & ~ALX_TXQ0_EN); + + udelay(40); + + hw->rx_ctrl &= ~(ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_TX_EN); + alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl); + + for (i = 0; i < ALX_DMA_MAC_RST_TO; i++) { + val = alx_read_mem32(hw, ALX_MAC_STS); + if (!(val & ALX_MAC_STS_IDLE)) + return 0; + udelay(10); + } + + return -ETIMEDOUT; +} + +int alx_reset_mac(struct alx_hw *hw) +{ + u32 val, pmctrl; + int i, ret; + u8 rev; + bool a_cr; + + pmctrl = 0; + rev = alx_hw_revision(hw); + a_cr = alx_is_rev_a(rev) && alx_hw_with_cr(hw); + + /* disable all interrupts, RXQ/TXQ */ + alx_write_mem32(hw, ALX_MSIX_MASK, 0xFFFFFFFF); + alx_write_mem32(hw, ALX_IMR, 0); + alx_write_mem32(hw, ALX_ISR, ALX_ISR_DIS); + + ret = alx_stop_mac(hw); + if (ret) + return ret; + + /* mac reset workaroud */ + alx_write_mem32(hw, ALX_RFD_PIDX, 1); + + /* dis l0s/l1 before mac reset */ + if (a_cr) { + pmctrl = alx_read_mem32(hw, ALX_PMCTRL); + if (pmctrl & (ALX_PMCTRL_L1_EN | ALX_PMCTRL_L0S_EN)) + alx_write_mem32(hw, ALX_PMCTRL, + pmctrl & ~(ALX_PMCTRL_L1_EN | + ALX_PMCTRL_L0S_EN)); + } + + /* reset whole mac safely */ + val = alx_read_mem32(hw, ALX_MASTER); + alx_write_mem32(hw, ALX_MASTER, + val | ALX_MASTER_DMA_MAC_RST | ALX_MASTER_OOB_DIS); + + /* make sure it's real idle */ + udelay(10); + for (i = 0; i < ALX_DMA_MAC_RST_TO; i++) { + val = alx_read_mem32(hw, ALX_RFD_PIDX); + if (val == 0) + break; + udelay(10); + } + for (; i < ALX_DMA_MAC_RST_TO; i++) { + val = alx_read_mem32(hw, ALX_MASTER); + if ((val & ALX_MASTER_DMA_MAC_RST) == 0) + break; + udelay(10); + } + if (i == ALX_DMA_MAC_RST_TO) + return -EIO; + udelay(10); + + if (a_cr) { + alx_write_mem32(hw, ALX_MASTER, val | ALX_MASTER_PCLKSEL_SRDS); + /* restore l0s / l1 */ + if (pmctrl & (ALX_PMCTRL_L1_EN | ALX_PMCTRL_L0S_EN)) + alx_write_mem32(hw, ALX_PMCTRL, pmctrl); + } + + alx_reset_osc(hw, rev); + + /* clear Internal OSC settings, switching OSC by hw itself, + * disable isolate for rev A devices + */ + val = alx_read_mem32(hw, ALX_MISC3); + alx_write_mem32(hw, ALX_MISC3, + (val & ~ALX_MISC3_25M_BY_SW) | + ALX_MISC3_25M_NOTO_INTNL); + val = alx_read_mem32(hw, ALX_MISC); + val &= ~ALX_MISC_INTNLOSC_OPEN; + if (alx_is_rev_a(rev)) + val &= ~ALX_MISC_ISO_EN; + alx_write_mem32(hw, ALX_MISC, val); + udelay(20); + + /* driver control speed/duplex, hash-alg */ + alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl); + + val = alx_read_mem32(hw, ALX_SERDES); + alx_write_mem32(hw, ALX_SERDES, + val | ALX_SERDES_MACCLK_SLWDWN | + ALX_SERDES_PHYCLK_SLWDWN); + + return 0; +} + +void alx_reset_phy(struct alx_hw *hw) +{ + int i; + u32 val; + u16 phy_val; + + /* (DSP)reset PHY core */ + val = alx_read_mem32(hw, ALX_PHY_CTRL); + val &= ~(ALX_PHY_CTRL_DSPRST_OUT | ALX_PHY_CTRL_IDDQ | + ALX_PHY_CTRL_GATE_25M | ALX_PHY_CTRL_POWER_DOWN | + ALX_PHY_CTRL_CLS); + val |= ALX_PHY_CTRL_RST_ANALOG; + + val |= (ALX_PHY_CTRL_HIB_PULSE | ALX_PHY_CTRL_HIB_EN); + alx_write_mem32(hw, ALX_PHY_CTRL, val); + udelay(10); + alx_write_mem32(hw, ALX_PHY_CTRL, val | ALX_PHY_CTRL_DSPRST_OUT); + + for (i = 0; i < ALX_PHY_CTRL_DSPRST_TO; i++) + udelay(10); + + /* phy power saving & hib */ + alx_write_phy_dbg(hw, ALX_MIIDBG_LEGCYPS, ALX_LEGCYPS_DEF); + alx_write_phy_dbg(hw, ALX_MIIDBG_SYSMODCTRL, + ALX_SYSMODCTRL_IECHOADJ_DEF); + alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_VDRVBIAS, + ALX_VDRVBIAS_DEF); + + /* EEE advertisement */ + val = alx_read_mem32(hw, ALX_LPI_CTRL); + alx_write_mem32(hw, ALX_LPI_CTRL, val & ~ALX_LPI_CTRL_EN); + alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_LOCAL_EEEADV, 0); + + /* phy power saving */ + alx_write_phy_dbg(hw, ALX_MIIDBG_TST10BTCFG, ALX_TST10BTCFG_DEF); + alx_write_phy_dbg(hw, ALX_MIIDBG_SRDSYSMOD, ALX_SRDSYSMOD_DEF); + alx_write_phy_dbg(hw, ALX_MIIDBG_TST100BTCFG, ALX_TST100BTCFG_DEF); + alx_write_phy_dbg(hw, ALX_MIIDBG_ANACTRL, ALX_ANACTRL_DEF); + alx_read_phy_dbg(hw, ALX_MIIDBG_GREENCFG2, &phy_val); + alx_write_phy_dbg(hw, ALX_MIIDBG_GREENCFG2, + phy_val & ~ALX_GREENCFG2_GATE_DFSE_EN); + /* rtl8139c, 120m issue */ + alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_NLP78, + ALX_MIIEXT_NLP78_120M_DEF); + alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_S3DIG10, + ALX_MIIEXT_S3DIG10_DEF); + + if (hw->lnk_patch) { + /* Turn off half amplitude */ + alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL3, + &phy_val); + alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL3, + phy_val | ALX_CLDCTRL3_BP_CABLE1TH_DET_GT); + /* Turn off Green feature */ + alx_read_phy_dbg(hw, ALX_MIIDBG_GREENCFG2, &phy_val); + alx_write_phy_dbg(hw, ALX_MIIDBG_GREENCFG2, + phy_val | ALX_GREENCFG2_BP_GREEN); + /* Turn off half Bias */ + alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL5, + &phy_val); + alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL5, + phy_val | ALX_CLDCTRL5_BP_VD_HLFBIAS); + } + + /* set phy interrupt mask */ + alx_write_phy_reg(hw, ALX_MII_IER, ALX_IER_LINK_UP | ALX_IER_LINK_DOWN); +} + +#define ALX_PCI_CMD (PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY | PCI_COMMAND_IO) + +void alx_reset_pcie(struct alx_hw *hw) +{ + u8 rev = alx_hw_revision(hw); + u32 val; + u16 val16; + + /* Workaround for PCI problem when BIOS sets MMRBC incorrectly. */ + pci_read_config_word(hw->pdev, PCI_COMMAND, &val16); + if (!(val16 & ALX_PCI_CMD) || (val16 & PCI_COMMAND_INTX_DISABLE)) { + val16 = (val16 | ALX_PCI_CMD) & ~PCI_COMMAND_INTX_DISABLE; + pci_write_config_word(hw->pdev, PCI_COMMAND, val16); + } + + /* clear WoL setting/status */ + val = alx_read_mem32(hw, ALX_WOL0); + alx_write_mem32(hw, ALX_WOL0, 0); + + val = alx_read_mem32(hw, ALX_PDLL_TRNS1); + alx_write_mem32(hw, ALX_PDLL_TRNS1, val & ~ALX_PDLL_TRNS1_D3PLLOFF_EN); + + /* mask some pcie error bits */ + val = alx_read_mem32(hw, ALX_UE_SVRT); + val &= ~(ALX_UE_SVRT_DLPROTERR | ALX_UE_SVRT_FCPROTERR); + alx_write_mem32(hw, ALX_UE_SVRT, val); + + /* wol 25M & pclk */ + val = alx_read_mem32(hw, ALX_MASTER); + if (alx_is_rev_a(rev) && alx_hw_with_cr(hw)) { + if ((val & ALX_MASTER_WAKEN_25M) == 0 || + (val & ALX_MASTER_PCLKSEL_SRDS) == 0) + alx_write_mem32(hw, ALX_MASTER, + val | ALX_MASTER_PCLKSEL_SRDS | + ALX_MASTER_WAKEN_25M); + } else { + if ((val & ALX_MASTER_WAKEN_25M) == 0 || + (val & ALX_MASTER_PCLKSEL_SRDS) != 0) + alx_write_mem32(hw, ALX_MASTER, + (val & ~ALX_MASTER_PCLKSEL_SRDS) | + ALX_MASTER_WAKEN_25M); + } + + /* ASPM setting */ + alx_enable_aspm(hw, true, true); + + udelay(10); +} + +void alx_start_mac(struct alx_hw *hw) +{ + u32 mac, txq, rxq; + + rxq = alx_read_mem32(hw, ALX_RXQ0); + alx_write_mem32(hw, ALX_RXQ0, rxq | ALX_RXQ0_EN); + txq = alx_read_mem32(hw, ALX_TXQ0); + alx_write_mem32(hw, ALX_TXQ0, txq | ALX_TXQ0_EN); + + mac = hw->rx_ctrl; + if (hw->link_speed % 10 == DUPLEX_FULL) + mac |= ALX_MAC_CTRL_FULLD; + else + mac &= ~ALX_MAC_CTRL_FULLD; + ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED, + hw->link_speed >= SPEED_1000 ? ALX_MAC_CTRL_SPEED_1000 : + ALX_MAC_CTRL_SPEED_10_100); + mac |= ALX_MAC_CTRL_TX_EN | ALX_MAC_CTRL_RX_EN; + hw->rx_ctrl = mac; + alx_write_mem32(hw, ALX_MAC_CTRL, mac); +} + +void alx_cfg_mac_flowcontrol(struct alx_hw *hw, u8 fc) +{ + if (fc & ALX_FC_RX) + hw->rx_ctrl |= ALX_MAC_CTRL_RXFC_EN; + else + hw->rx_ctrl &= ~ALX_MAC_CTRL_RXFC_EN; + + if (fc & ALX_FC_TX) + hw->rx_ctrl |= ALX_MAC_CTRL_TXFC_EN; + else + hw->rx_ctrl &= ~ALX_MAC_CTRL_TXFC_EN; + + alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl); +} + +void alx_enable_aspm(struct alx_hw *hw, bool l0s_en, bool l1_en) +{ + u32 pmctrl; + u8 rev = alx_hw_revision(hw); + + pmctrl = alx_read_mem32(hw, ALX_PMCTRL); + + ALX_SET_FIELD(pmctrl, ALX_PMCTRL_LCKDET_TIMER, + ALX_PMCTRL_LCKDET_TIMER_DEF); + pmctrl |= ALX_PMCTRL_RCVR_WT_1US | + ALX_PMCTRL_L1_CLKSW_EN | + ALX_PMCTRL_L1_SRDSRX_PWD; + ALX_SET_FIELD(pmctrl, ALX_PMCTRL_L1REQ_TO, ALX_PMCTRL_L1REG_TO_DEF); + ALX_SET_FIELD(pmctrl, ALX_PMCTRL_L1_TIMER, ALX_PMCTRL_L1_TIMER_16US); + pmctrl &= ~(ALX_PMCTRL_L1_SRDS_EN | + ALX_PMCTRL_L1_SRDSPLL_EN | + ALX_PMCTRL_L1_BUFSRX_EN | + ALX_PMCTRL_SADLY_EN | + ALX_PMCTRL_HOTRST_WTEN| + ALX_PMCTRL_L0S_EN | + ALX_PMCTRL_L1_EN | + ALX_PMCTRL_ASPM_FCEN | + ALX_PMCTRL_TXL1_AFTER_L0S | + ALX_PMCTRL_RXL1_AFTER_L0S); + if (alx_is_rev_a(rev) && alx_hw_with_cr(hw)) + pmctrl |= ALX_PMCTRL_L1_SRDS_EN | ALX_PMCTRL_L1_SRDSPLL_EN; + + if (l0s_en) + pmctrl |= (ALX_PMCTRL_L0S_EN | ALX_PMCTRL_ASPM_FCEN); + if (l1_en) + pmctrl |= (ALX_PMCTRL_L1_EN | ALX_PMCTRL_ASPM_FCEN); + + alx_write_mem32(hw, ALX_PMCTRL, pmctrl); +} + + +static u32 ethadv_to_hw_cfg(struct alx_hw *hw, u32 ethadv_cfg) +{ + u32 cfg = 0; + + if (ethadv_cfg & ADVERTISED_Autoneg) { + cfg |= ALX_DRV_PHY_AUTO; + if (ethadv_cfg & ADVERTISED_10baseT_Half) + cfg |= ALX_DRV_PHY_10; + if (ethadv_cfg & ADVERTISED_10baseT_Full) + cfg |= ALX_DRV_PHY_10 | ALX_DRV_PHY_DUPLEX; + if (ethadv_cfg & ADVERTISED_100baseT_Half) + cfg |= ALX_DRV_PHY_100; + if (ethadv_cfg & ADVERTISED_100baseT_Full) + cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX; + if (ethadv_cfg & ADVERTISED_1000baseT_Half) + cfg |= ALX_DRV_PHY_1000; + if (ethadv_cfg & ADVERTISED_1000baseT_Full) + cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX; + if (ethadv_cfg & ADVERTISED_Pause) + cfg |= ADVERTISE_PAUSE_CAP; + if (ethadv_cfg & ADVERTISED_Asym_Pause) + cfg |= ADVERTISE_PAUSE_ASYM; + } else { + switch (ethadv_cfg) { + case ADVERTISED_10baseT_Half: + cfg |= ALX_DRV_PHY_10; + break; + case ADVERTISED_100baseT_Half: + cfg |= ALX_DRV_PHY_100; + break; + case ADVERTISED_10baseT_Full: + cfg |= ALX_DRV_PHY_10 | ALX_DRV_PHY_DUPLEX; + break; + case ADVERTISED_100baseT_Full: + cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX; + break; + } + } + + return cfg; +} + +int alx_setup_speed_duplex(struct alx_hw *hw, u32 ethadv, u8 flowctrl) +{ + u16 adv, giga, cr; + u32 val; + int err = 0; + + alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, 0); + val = alx_read_mem32(hw, ALX_DRV); + ALX_SET_FIELD(val, ALX_DRV_PHY, 0); + + if (ethadv & ADVERTISED_Autoneg) { + adv = ADVERTISE_CSMA; + adv |= ethtool_adv_to_mii_adv_t(ethadv); + + if (flowctrl & ALX_FC_ANEG) { + if (flowctrl & ALX_FC_RX) { + adv |= ADVERTISED_Pause; + if (!(flowctrl & ALX_FC_TX)) + adv |= ADVERTISED_Asym_Pause; + } else if (flowctrl & ALX_FC_TX) { + adv |= ADVERTISED_Asym_Pause; + } + } + giga = 0; + if (alx_hw_giga(hw)) + giga = ethtool_adv_to_mii_ctrl1000_t(ethadv); + + cr = BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART; + + if (alx_write_phy_reg(hw, MII_ADVERTISE, adv) || + alx_write_phy_reg(hw, MII_CTRL1000, giga) || + alx_write_phy_reg(hw, MII_BMCR, cr)) + err = -EBUSY; + } else { + cr = BMCR_RESET; + if (ethadv == ADVERTISED_100baseT_Half || + ethadv == ADVERTISED_100baseT_Full) + cr |= BMCR_SPEED100; + if (ethadv == ADVERTISED_10baseT_Full || + ethadv == ADVERTISED_100baseT_Full) + cr |= BMCR_FULLDPLX; + + err = alx_write_phy_reg(hw, MII_BMCR, cr); + } + + if (!err) { + alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, ALX_PHY_INITED); + val |= ethadv_to_hw_cfg(hw, ethadv); + } + + alx_write_mem32(hw, ALX_DRV, val); + + return err; +} + + +void alx_post_phy_link(struct alx_hw *hw) +{ + u16 phy_val, len, agc; + u8 revid = alx_hw_revision(hw); + bool adj_th = revid == ALX_REV_B0; + int speed; + + if (hw->link_speed == SPEED_UNKNOWN) + speed = SPEED_UNKNOWN; + else + speed = hw->link_speed - hw->link_speed % 10; + + if (revid != ALX_REV_B0 && !alx_is_rev_a(revid)) + return; + + /* 1000BT/AZ, wrong cable length */ + if (speed != SPEED_UNKNOWN) { + alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL6, + &phy_val); + len = ALX_GET_FIELD(phy_val, ALX_CLDCTRL6_CAB_LEN); + alx_read_phy_dbg(hw, ALX_MIIDBG_AGC, &phy_val); + agc = ALX_GET_FIELD(phy_val, ALX_AGC_2_VGA); + + if ((speed == SPEED_1000 && + (len > ALX_CLDCTRL6_CAB_LEN_SHORT1G || + (len == 0 && agc > ALX_AGC_LONG1G_LIMT))) || + (speed == SPEED_100 && + (len > ALX_CLDCTRL6_CAB_LEN_SHORT100M || + (len == 0 && agc > ALX_AGC_LONG100M_LIMT)))) { + alx_write_phy_dbg(hw, ALX_MIIDBG_AZ_ANADECT, + ALX_AZ_ANADECT_LONG); + alx_read_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE, + &phy_val); + alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE, + phy_val | ALX_AFE_10BT_100M_TH); + } else { + alx_write_phy_dbg(hw, ALX_MIIDBG_AZ_ANADECT, + ALX_AZ_ANADECT_DEF); + alx_read_phy_ext(hw, ALX_MIIEXT_ANEG, + ALX_MIIEXT_AFE, &phy_val); + alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE, + phy_val & ~ALX_AFE_10BT_100M_TH); + } + + /* threshold adjust */ + if (adj_th && hw->lnk_patch) { + if (speed == SPEED_100) { + alx_write_phy_dbg(hw, ALX_MIIDBG_MSE16DB, + ALX_MSE16DB_UP); + } else if (speed == SPEED_1000) { + /* + * Giga link threshold, raise the tolerance of + * noise 50% + */ + alx_read_phy_dbg(hw, ALX_MIIDBG_MSE20DB, + &phy_val); + ALX_SET_FIELD(phy_val, ALX_MSE20DB_TH, + ALX_MSE20DB_TH_HI); + alx_write_phy_dbg(hw, ALX_MIIDBG_MSE20DB, + phy_val); + } + } + } else { + alx_read_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE, + &phy_val); + alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE, + phy_val & ~ALX_AFE_10BT_100M_TH); + + if (adj_th && hw->lnk_patch) { + alx_write_phy_dbg(hw, ALX_MIIDBG_MSE16DB, + ALX_MSE16DB_DOWN); + alx_read_phy_dbg(hw, ALX_MIIDBG_MSE20DB, &phy_val); + ALX_SET_FIELD(phy_val, ALX_MSE20DB_TH, + ALX_MSE20DB_TH_DEF); + alx_write_phy_dbg(hw, ALX_MIIDBG_MSE20DB, phy_val); + } + } +} + + +/* NOTE: + * 1. phy link must be established before calling this function + * 2. wol option (pattern,magic,link,etc.) is configed before call it. + */ +int alx_pre_suspend(struct alx_hw *hw, int speed) +{ + u32 master, mac, phy, val; + int err = 0; + + master = alx_read_mem32(hw, ALX_MASTER); + master &= ~ALX_MASTER_PCLKSEL_SRDS; + mac = hw->rx_ctrl; + /* 10/100 half */ + ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED, ALX_MAC_CTRL_SPEED_10_100); + mac &= ~(ALX_MAC_CTRL_FULLD | ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_TX_EN); + + phy = alx_read_mem32(hw, ALX_PHY_CTRL); + phy &= ~(ALX_PHY_CTRL_DSPRST_OUT | ALX_PHY_CTRL_CLS); + phy |= ALX_PHY_CTRL_RST_ANALOG | ALX_PHY_CTRL_HIB_PULSE | + ALX_PHY_CTRL_HIB_EN; + + /* without any activity */ + if (!(hw->sleep_ctrl & ALX_SLEEP_ACTIVE)) { + err = alx_write_phy_reg(hw, ALX_MII_IER, 0); + if (err) + return err; + phy |= ALX_PHY_CTRL_IDDQ | ALX_PHY_CTRL_POWER_DOWN; + } else { + if (hw->sleep_ctrl & (ALX_SLEEP_WOL_MAGIC | ALX_SLEEP_CIFS)) + mac |= ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_BRD_EN; + if (hw->sleep_ctrl & ALX_SLEEP_CIFS) + mac |= ALX_MAC_CTRL_TX_EN; + if (speed % 10 == DUPLEX_FULL) + mac |= ALX_MAC_CTRL_FULLD; + if (speed >= SPEED_1000) + ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED, + ALX_MAC_CTRL_SPEED_1000); + phy |= ALX_PHY_CTRL_DSPRST_OUT; + err = alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, + ALX_MIIEXT_S3DIG10, + ALX_MIIEXT_S3DIG10_SL); + if (err) + return err; + } + + alx_enable_osc(hw); + hw->rx_ctrl = mac; + alx_write_mem32(hw, ALX_MASTER, master); + alx_write_mem32(hw, ALX_MAC_CTRL, mac); + alx_write_mem32(hw, ALX_PHY_CTRL, phy); + + /* set val of PDLL D3PLLOFF */ + val = alx_read_mem32(hw, ALX_PDLL_TRNS1); + val |= ALX_PDLL_TRNS1_D3PLLOFF_EN; + alx_write_mem32(hw, ALX_PDLL_TRNS1, val); + + return 0; +} + +bool alx_phy_configured(struct alx_hw *hw) +{ + u32 cfg, hw_cfg; + + cfg = ethadv_to_hw_cfg(hw, hw->adv_cfg); + cfg = ALX_GET_FIELD(cfg, ALX_DRV_PHY); + hw_cfg = alx_get_phy_config(hw); + + if (hw_cfg == ALX_DRV_PHY_UNKNOWN) + return false; + + return cfg == hw_cfg; +} + +int alx_get_phy_link(struct alx_hw *hw, int *speed) +{ + struct pci_dev *pdev = hw->pdev; + u16 bmsr, giga; + int err; + + err = alx_read_phy_reg(hw, MII_BMSR, &bmsr); + if (err) + return err; + + err = alx_read_phy_reg(hw, MII_BMSR, &bmsr); + if (err) + return err; + + if (!(bmsr & BMSR_LSTATUS)) { + *speed = SPEED_UNKNOWN; + return 0; + } + + /* speed/duplex result is saved in PHY Specific Status Register */ + err = alx_read_phy_reg(hw, ALX_MII_GIGA_PSSR, &giga); + if (err) + return err; + + if (!(giga & ALX_GIGA_PSSR_SPD_DPLX_RESOLVED)) + goto wrong_speed; + + switch (giga & ALX_GIGA_PSSR_SPEED) { + case ALX_GIGA_PSSR_1000MBS: + *speed = SPEED_1000; + break; + case ALX_GIGA_PSSR_100MBS: + *speed = SPEED_100; + break; + case ALX_GIGA_PSSR_10MBS: + *speed = SPEED_10; + break; + default: + goto wrong_speed; + } + + *speed += (giga & ALX_GIGA_PSSR_DPLX) ? DUPLEX_FULL : DUPLEX_HALF; + return 1; + +wrong_speed: + dev_err(&pdev->dev, "invalid PHY speed/duplex: 0x%x\n", giga); + return -EINVAL; +} + +int alx_clear_phy_intr(struct alx_hw *hw) +{ + u16 isr; + + /* clear interrupt status by reading it */ + return alx_read_phy_reg(hw, ALX_MII_ISR, &isr); +} + +int alx_config_wol(struct alx_hw *hw) +{ + u32 wol = 0; + int err = 0; + + /* turn on magic packet event */ + if (hw->sleep_ctrl & ALX_SLEEP_WOL_MAGIC) + wol |= ALX_WOL0_MAGIC_EN | ALX_WOL0_PME_MAGIC_EN; + + /* turn on link up event */ + if (hw->sleep_ctrl & ALX_SLEEP_WOL_PHY) { + wol |= ALX_WOL0_LINK_EN | ALX_WOL0_PME_LINK; + /* only link up can wake up */ + err = alx_write_phy_reg(hw, ALX_MII_IER, ALX_IER_LINK_UP); + } + alx_write_mem32(hw, ALX_WOL0, wol); + + return err; +} + +void alx_disable_rss(struct alx_hw *hw) +{ + u32 ctrl = alx_read_mem32(hw, ALX_RXQ0); + + ctrl &= ~ALX_RXQ0_RSS_HASH_EN; + alx_write_mem32(hw, ALX_RXQ0, ctrl); +} + +void alx_configure_basic(struct alx_hw *hw) +{ + u32 val, raw_mtu, max_payload; + u16 val16; + u8 chip_rev = alx_hw_revision(hw); + + alx_set_macaddr(hw, hw->mac_addr); + + alx_write_mem32(hw, ALX_CLK_GATE, ALX_CLK_GATE_ALL); + + /* idle timeout to switch clk_125M */ + if (chip_rev >= ALX_REV_B0) + alx_write_mem32(hw, ALX_IDLE_DECISN_TIMER, + ALX_IDLE_DECISN_TIMER_DEF); + + alx_write_mem32(hw, ALX_SMB_TIMER, hw->smb_timer * 500UL); + + val = alx_read_mem32(hw, ALX_MASTER); + val |= ALX_MASTER_IRQMOD2_EN | + ALX_MASTER_IRQMOD1_EN | + ALX_MASTER_SYSALVTIMER_EN; + alx_write_mem32(hw, ALX_MASTER, val); + alx_write_mem32(hw, ALX_IRQ_MODU_TIMER, + (hw->imt >> 1) << ALX_IRQ_MODU_TIMER1_SHIFT); + /* intr re-trig timeout */ + alx_write_mem32(hw, ALX_INT_RETRIG, ALX_INT_RETRIG_TO); + /* tpd threshold to trig int */ + alx_write_mem32(hw, ALX_TINT_TPD_THRSHLD, hw->ith_tpd); + alx_write_mem32(hw, ALX_TINT_TIMER, hw->imt); + + raw_mtu = hw->mtu + ETH_HLEN; + alx_write_mem32(hw, ALX_MTU, raw_mtu + 8); + if (raw_mtu > ALX_MTU_JUMBO_TH) + hw->rx_ctrl &= ~ALX_MAC_CTRL_FAST_PAUSE; + + if ((raw_mtu + 8) < ALX_TXQ1_JUMBO_TSO_TH) + val = (raw_mtu + 8 + 7) >> 3; + else + val = ALX_TXQ1_JUMBO_TSO_TH >> 3; + alx_write_mem32(hw, ALX_TXQ1, val | ALX_TXQ1_ERRLGPKT_DROP_EN); + + max_payload = pcie_get_readrq(hw->pdev) >> 8; + /* + * if BIOS had changed the default dma read max length, + * restore it to default value + */ + if (max_payload < ALX_DEV_CTRL_MAXRRS_MIN) + pcie_set_readrq(hw->pdev, 128 << ALX_DEV_CTRL_MAXRRS_MIN); + + val = ALX_TXQ_TPD_BURSTPREF_DEF << ALX_TXQ0_TPD_BURSTPREF_SHIFT | + ALX_TXQ0_MODE_ENHANCE | ALX_TXQ0_LSO_8023_EN | + ALX_TXQ0_SUPT_IPOPT | + ALX_TXQ_TXF_BURST_PREF_DEF << ALX_TXQ0_TXF_BURST_PREF_SHIFT; + alx_write_mem32(hw, ALX_TXQ0, val); + val = ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q1_NUMPREF_SHIFT | + ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q2_NUMPREF_SHIFT | + ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q3_NUMPREF_SHIFT | + ALX_HQTPD_BURST_EN; + alx_write_mem32(hw, ALX_HQTPD, val); + + /* rxq, flow control */ + val = alx_read_mem32(hw, ALX_SRAM5); + val = ALX_GET_FIELD(val, ALX_SRAM_RXF_LEN) << 3; + if (val > ALX_SRAM_RXF_LEN_8K) { + val16 = ALX_MTU_STD_ALGN >> 3; + val = (val - ALX_RXQ2_RXF_FLOW_CTRL_RSVD) >> 3; + } else { + val16 = ALX_MTU_STD_ALGN >> 3; + val = (val - ALX_MTU_STD_ALGN) >> 3; + } + alx_write_mem32(hw, ALX_RXQ2, + val16 << ALX_RXQ2_RXF_XOFF_THRESH_SHIFT | + val << ALX_RXQ2_RXF_XON_THRESH_SHIFT); + val = ALX_RXQ0_NUM_RFD_PREF_DEF << ALX_RXQ0_NUM_RFD_PREF_SHIFT | + ALX_RXQ0_RSS_MODE_DIS << ALX_RXQ0_RSS_MODE_SHIFT | + ALX_RXQ0_IDT_TBL_SIZE_DEF << ALX_RXQ0_IDT_TBL_SIZE_SHIFT | + ALX_RXQ0_RSS_HSTYP_ALL | ALX_RXQ0_RSS_HASH_EN | + ALX_RXQ0_IPV6_PARSE_EN; + + if (alx_hw_giga(hw)) + ALX_SET_FIELD(val, ALX_RXQ0_ASPM_THRESH, + ALX_RXQ0_ASPM_THRESH_100M); + + alx_write_mem32(hw, ALX_RXQ0, val); + + val = alx_read_mem32(hw, ALX_DMA); + val = ALX_DMA_RORDER_MODE_OUT << ALX_DMA_RORDER_MODE_SHIFT | + ALX_DMA_RREQ_PRI_DATA | + max_payload << ALX_DMA_RREQ_BLEN_SHIFT | + ALX_DMA_WDLY_CNT_DEF << ALX_DMA_WDLY_CNT_SHIFT | + ALX_DMA_RDLY_CNT_DEF << ALX_DMA_RDLY_CNT_SHIFT | + (hw->dma_chnl - 1) << ALX_DMA_RCHNL_SEL_SHIFT; + alx_write_mem32(hw, ALX_DMA, val); + + /* default multi-tx-q weights */ + val = ALX_WRR_PRI_RESTRICT_NONE << ALX_WRR_PRI_SHIFT | + 4 << ALX_WRR_PRI0_SHIFT | + 4 << ALX_WRR_PRI1_SHIFT | + 4 << ALX_WRR_PRI2_SHIFT | + 4 << ALX_WRR_PRI3_SHIFT; + alx_write_mem32(hw, ALX_WRR, val); +} + +static inline u32 alx_speed_to_ethadv(int speed) +{ + switch (speed) { + case SPEED_1000 + DUPLEX_FULL: + return ADVERTISED_1000baseT_Full; + case SPEED_100 + DUPLEX_FULL: + return ADVERTISED_100baseT_Full; + case SPEED_100 + DUPLEX_HALF: + return ADVERTISED_10baseT_Half; + case SPEED_10 + DUPLEX_FULL: + return ADVERTISED_10baseT_Full; + case SPEED_10 + DUPLEX_HALF: + return ADVERTISED_10baseT_Half; + default: + return 0; + } +} + +int alx_select_powersaving_speed(struct alx_hw *hw, int *speed) +{ + int i, err, spd; + u16 lpa; + + err = alx_get_phy_link(hw, &spd); + if (err < 0) + return err; + + if (spd == SPEED_UNKNOWN) + return 0; + + err = alx_read_phy_reg(hw, MII_LPA, &lpa); + if (err) + return err; + + if (!(lpa & LPA_LPACK)) { + *speed = spd; + return 0; + } + + if (lpa & LPA_10FULL) + *speed = SPEED_10 + DUPLEX_FULL; + else if (lpa & LPA_10HALF) + *speed = SPEED_10 + DUPLEX_HALF; + else if (lpa & LPA_100FULL) + *speed = SPEED_100 + DUPLEX_FULL; + else + *speed = SPEED_100 + DUPLEX_HALF; + + if (*speed != spd) { + err = alx_write_phy_reg(hw, ALX_MII_IER, 0); + if (err) + return err; + err = alx_setup_speed_duplex(hw, + alx_speed_to_ethadv(*speed) | + ADVERTISED_Autoneg, + ALX_FC_ANEG | ALX_FC_RX | + ALX_FC_TX); + if (err) + return err; + + /* wait for linkup */ + for (i = 0; i < ALX_MAX_SETUP_LNK_CYCLE; i++) { + int speed2; + + msleep(100); + + err = alx_get_phy_link(hw, &speed2); + if (err < 0) + return err; + if (speed2 != SPEED_UNKNOWN) + break; + } + if (i == ALX_MAX_SETUP_LNK_CYCLE) + return -ETIMEDOUT; + } + + return 0; +} + +bool alx_get_phy_info(struct alx_hw *hw) +{ + u16 devs1, devs2; + + if (alx_read_phy_reg(hw, MII_PHYSID1, &hw->phy_id[0]) || + alx_read_phy_reg(hw, MII_PHYSID2, &hw->phy_id[1])) + return false; + + /* since we haven't PMA/PMD status2 register, we can't + * use mdio45_probe function for prtad and mmds. + * use fixed MMD3 to get mmds. + */ + if (alx_read_phy_ext(hw, 3, MDIO_DEVS1, &devs1) || + alx_read_phy_ext(hw, 3, MDIO_DEVS2, &devs2)) + return false; + hw->mdio.mmds = devs1 | devs2 << 16; + + return true; +} diff --git a/drivers/net/ethernet/atheros/alx/hw.h b/drivers/net/ethernet/atheros/alx/hw.h new file mode 100644 index 000000000000..65e723d2172a --- /dev/null +++ b/drivers/net/ethernet/atheros/alx/hw.h @@ -0,0 +1,499 @@ +/* + * Copyright (c) 2013 Johannes Berg + * + * This file is free software: you may copy, redistribute and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation, either version 2 of the License, or (at your + * option) any later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * This file incorporates work covered by the following copyright and + * permission notice: + * + * Copyright (c) 2012 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef ALX_HW_H_ +#define ALX_HW_H_ +#include +#include +#include +#include "reg.h" + +/* Transmit Packet Descriptor, contains 4 32-bit words. + * + * 31 16 0 + * +----------------+----------------+ + * | vlan-tag | buf length | + * +----------------+----------------+ + * | Word 1 | + * +----------------+----------------+ + * | Word 2: buf addr lo | + * +----------------+----------------+ + * | Word 3: buf addr hi | + * +----------------+----------------+ + * + * Word 2 and 3 combine to form a 64-bit buffer address + * + * Word 1 has three forms, depending on the state of bit 8/12/13: + * if bit8 =='1', the definition is just for custom checksum offload. + * if bit8 == '0' && bit12 == '1' && bit13 == '1', the *FIRST* descriptor + * for the skb is special for LSO V2, Word 2 become total skb length , + * Word 3 is meaningless. + * other condition, the definition is for general skb or ip/tcp/udp + * checksum or LSO(TSO) offload. + * + * Here is the depiction: + * + * 0-+ 0-+ + * 1 | 1 | + * 2 | 2 | + * 3 | Payload offset 3 | L4 header offset + * 4 | (7:0) 4 | (7:0) + * 5 | 5 | + * 6 | 6 | + * 7-+ 7-+ + * 8 Custom csum enable = 1 8 Custom csum enable = 0 + * 9 General IPv4 checksum 9 General IPv4 checksum + * 10 General TCP checksum 10 General TCP checksum + * 11 General UDP checksum 11 General UDP checksum + * 12 Large Send Segment enable 12 Large Send Segment enable + * 13 Large Send Segment type 13 Large Send Segment type + * 14 VLAN tagged 14 VLAN tagged + * 15 Insert VLAN tag 15 Insert VLAN tag + * 16 IPv4 packet 16 IPv4 packet + * 17 Ethernet frame type 17 Ethernet frame type + * 18-+ 18-+ + * 19 | 19 | + * 20 | 20 | + * 21 | Custom csum offset 21 | + * 22 | (25:18) 22 | + * 23 | 23 | MSS (30:18) + * 24 | 24 | + * 25-+ 25 | + * 26-+ 26 | + * 27 | 27 | + * 28 | Reserved 28 | + * 29 | 29 | + * 30-+ 30-+ + * 31 End of packet 31 End of packet + */ +struct alx_txd { + __le16 len; + __le16 vlan_tag; + __le32 word1; + union { + __le64 addr; + struct { + __le32 pkt_len; + __le32 resvd; + } l; + } adrl; +} __packed; + +/* tpd word 1 */ +#define TPD_CXSUMSTART_MASK 0x00FF +#define TPD_CXSUMSTART_SHIFT 0 +#define TPD_L4HDROFFSET_MASK 0x00FF +#define TPD_L4HDROFFSET_SHIFT 0 +#define TPD_CXSUM_EN_MASK 0x0001 +#define TPD_CXSUM_EN_SHIFT 8 +#define TPD_IP_XSUM_MASK 0x0001 +#define TPD_IP_XSUM_SHIFT 9 +#define TPD_TCP_XSUM_MASK 0x0001 +#define TPD_TCP_XSUM_SHIFT 10 +#define TPD_UDP_XSUM_MASK 0x0001 +#define TPD_UDP_XSUM_SHIFT 11 +#define TPD_LSO_EN_MASK 0x0001 +#define TPD_LSO_EN_SHIFT 12 +#define TPD_LSO_V2_MASK 0x0001 +#define TPD_LSO_V2_SHIFT 13 +#define TPD_VLTAGGED_MASK 0x0001 +#define TPD_VLTAGGED_SHIFT 14 +#define TPD_INS_VLTAG_MASK 0x0001 +#define TPD_INS_VLTAG_SHIFT 15 +#define TPD_IPV4_MASK 0x0001 +#define TPD_IPV4_SHIFT 16 +#define TPD_ETHTYPE_MASK 0x0001 +#define TPD_ETHTYPE_SHIFT 17 +#define TPD_CXSUMOFFSET_MASK 0x00FF +#define TPD_CXSUMOFFSET_SHIFT 18 +#define TPD_MSS_MASK 0x1FFF +#define TPD_MSS_SHIFT 18 +#define TPD_EOP_MASK 0x0001 +#define TPD_EOP_SHIFT 31 + +#define DESC_GET(_x, _name) ((_x) >> _name##SHIFT & _name##MASK) + +/* Receive Free Descriptor */ +struct alx_rfd { + __le64 addr; /* data buffer address, length is + * declared in register --- every + * buffer has the same size + */ +} __packed; + +/* Receive Return Descriptor, contains 4 32-bit words. + * + * 31 16 0 + * +----------------+----------------+ + * | Word 0 | + * +----------------+----------------+ + * | Word 1: RSS Hash value | + * +----------------+----------------+ + * | Word 2 | + * +----------------+----------------+ + * | Word 3 | + * +----------------+----------------+ + * + * Word 0 depiction & Word 2 depiction: + * + * 0--+ 0--+ + * 1 | 1 | + * 2 | 2 | + * 3 | 3 | + * 4 | 4 | + * 5 | 5 | + * 6 | 6 | + * 7 | IP payload checksum 7 | VLAN tag + * 8 | (15:0) 8 | (15:0) + * 9 | 9 | + * 10 | 10 | + * 11 | 11 | + * 12 | 12 | + * 13 | 13 | + * 14 | 14 | + * 15-+ 15-+ + * 16-+ 16-+ + * 17 | Number of RFDs 17 | + * 18 | (19:16) 18 | + * 19-+ 19 | Protocol ID + * 20-+ 20 | (23:16) + * 21 | 21 | + * 22 | 22 | + * 23 | 23-+ + * 24 | 24 | Reserved + * 25 | Start index of RFD-ring 25-+ + * 26 | (31:20) 26 | RSS Q-num (27:25) + * 27 | 27-+ + * 28 | 28-+ + * 29 | 29 | RSS Hash algorithm + * 30 | 30 | (31:28) + * 31-+ 31-+ + * + * Word 3 depiction: + * + * 0--+ + * 1 | + * 2 | + * 3 | + * 4 | + * 5 | + * 6 | + * 7 | Packet length (include FCS) + * 8 | (13:0) + * 9 | + * 10 | + * 11 | + * 12 | + * 13-+ + * 14 L4 Header checksum error + * 15 IPv4 checksum error + * 16 VLAN tagged + * 17-+ + * 18 | Protocol ID (19:17) + * 19-+ + * 20 Receive error summary + * 21 FCS(CRC) error + * 22 Frame alignment error + * 23 Truncated packet + * 24 Runt packet + * 25 Incomplete packet due to insufficient rx-desc + * 26 Broadcast packet + * 27 Multicast packet + * 28 Ethernet type (EII or 802.3) + * 29 FIFO overflow + * 30 Length error (for 802.3, length field mismatch with actual len) + * 31 Updated, indicate to driver that this RRD is refreshed. + */ +struct alx_rrd { + __le32 word0; + __le32 rss_hash; + __le32 word2; + __le32 word3; +} __packed; + +/* rrd word 0 */ +#define RRD_XSUM_MASK 0xFFFF +#define RRD_XSUM_SHIFT 0 +#define RRD_NOR_MASK 0x000F +#define RRD_NOR_SHIFT 16 +#define RRD_SI_MASK 0x0FFF +#define RRD_SI_SHIFT 20 + +/* rrd word 2 */ +#define RRD_VLTAG_MASK 0xFFFF +#define RRD_VLTAG_SHIFT 0 +#define RRD_PID_MASK 0x00FF +#define RRD_PID_SHIFT 16 +/* non-ip packet */ +#define RRD_PID_NONIP 0 +/* ipv4(only) */ +#define RRD_PID_IPV4 1 +/* tcp/ipv6 */ +#define RRD_PID_IPV6TCP 2 +/* tcp/ipv4 */ +#define RRD_PID_IPV4TCP 3 +/* udp/ipv6 */ +#define RRD_PID_IPV6UDP 4 +/* udp/ipv4 */ +#define RRD_PID_IPV4UDP 5 +/* ipv6(only) */ +#define RRD_PID_IPV6 6 +/* LLDP packet */ +#define RRD_PID_LLDP 7 +/* 1588 packet */ +#define RRD_PID_1588 8 +#define RRD_RSSQ_MASK 0x0007 +#define RRD_RSSQ_SHIFT 25 +#define RRD_RSSALG_MASK 0x000F +#define RRD_RSSALG_SHIFT 28 +#define RRD_RSSALG_TCPV6 0x1 +#define RRD_RSSALG_IPV6 0x2 +#define RRD_RSSALG_TCPV4 0x4 +#define RRD_RSSALG_IPV4 0x8 + +/* rrd word 3 */ +#define RRD_PKTLEN_MASK 0x3FFF +#define RRD_PKTLEN_SHIFT 0 +#define RRD_ERR_L4_MASK 0x0001 +#define RRD_ERR_L4_SHIFT 14 +#define RRD_ERR_IPV4_MASK 0x0001 +#define RRD_ERR_IPV4_SHIFT 15 +#define RRD_VLTAGGED_MASK 0x0001 +#define RRD_VLTAGGED_SHIFT 16 +#define RRD_OLD_PID_MASK 0x0007 +#define RRD_OLD_PID_SHIFT 17 +#define RRD_ERR_RES_MASK 0x0001 +#define RRD_ERR_RES_SHIFT 20 +#define RRD_ERR_FCS_MASK 0x0001 +#define RRD_ERR_FCS_SHIFT 21 +#define RRD_ERR_FAE_MASK 0x0001 +#define RRD_ERR_FAE_SHIFT 22 +#define RRD_ERR_TRUNC_MASK 0x0001 +#define RRD_ERR_TRUNC_SHIFT 23 +#define RRD_ERR_RUNT_MASK 0x0001 +#define RRD_ERR_RUNT_SHIFT 24 +#define RRD_ERR_ICMP_MASK 0x0001 +#define RRD_ERR_ICMP_SHIFT 25 +#define RRD_BCAST_MASK 0x0001 +#define RRD_BCAST_SHIFT 26 +#define RRD_MCAST_MASK 0x0001 +#define RRD_MCAST_SHIFT 27 +#define RRD_ETHTYPE_MASK 0x0001 +#define RRD_ETHTYPE_SHIFT 28 +#define RRD_ERR_FIFOV_MASK 0x0001 +#define RRD_ERR_FIFOV_SHIFT 29 +#define RRD_ERR_LEN_MASK 0x0001 +#define RRD_ERR_LEN_SHIFT 30 +#define RRD_UPDATED_MASK 0x0001 +#define RRD_UPDATED_SHIFT 31 + + +#define ALX_MAX_SETUP_LNK_CYCLE 50 + +/* for FlowControl */ +#define ALX_FC_RX 0x01 +#define ALX_FC_TX 0x02 +#define ALX_FC_ANEG 0x04 + +/* for sleep control */ +#define ALX_SLEEP_WOL_PHY 0x00000001 +#define ALX_SLEEP_WOL_MAGIC 0x00000002 +#define ALX_SLEEP_CIFS 0x00000004 +#define ALX_SLEEP_ACTIVE (ALX_SLEEP_WOL_PHY | \ + ALX_SLEEP_WOL_MAGIC | \ + ALX_SLEEP_CIFS) + +/* for RSS hash type */ +#define ALX_RSS_HASH_TYPE_IPV4 0x1 +#define ALX_RSS_HASH_TYPE_IPV4_TCP 0x2 +#define ALX_RSS_HASH_TYPE_IPV6 0x4 +#define ALX_RSS_HASH_TYPE_IPV6_TCP 0x8 +#define ALX_RSS_HASH_TYPE_ALL (ALX_RSS_HASH_TYPE_IPV4 | \ + ALX_RSS_HASH_TYPE_IPV4_TCP | \ + ALX_RSS_HASH_TYPE_IPV6 | \ + ALX_RSS_HASH_TYPE_IPV6_TCP) +#define ALX_DEF_RXBUF_SIZE 1536 +#define ALX_MAX_JUMBO_PKT_SIZE (9*1024) +#define ALX_MAX_TSO_PKT_SIZE (7*1024) +#define ALX_MAX_FRAME_SIZE ALX_MAX_JUMBO_PKT_SIZE +#define ALX_MIN_FRAME_SIZE 68 +#define ALX_RAW_MTU(_mtu) (_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN) + +#define ALX_MAX_RX_QUEUES 8 +#define ALX_MAX_TX_QUEUES 4 +#define ALX_MAX_HANDLED_INTRS 5 + +#define ALX_ISR_MISC (ALX_ISR_PCIE_LNKDOWN | \ + ALX_ISR_DMAW | \ + ALX_ISR_DMAR | \ + ALX_ISR_SMB | \ + ALX_ISR_MANU | \ + ALX_ISR_TIMER) + +#define ALX_ISR_FATAL (ALX_ISR_PCIE_LNKDOWN | \ + ALX_ISR_DMAW | ALX_ISR_DMAR) + +#define ALX_ISR_ALERT (ALX_ISR_RXF_OV | \ + ALX_ISR_TXF_UR | \ + ALX_ISR_RFD_UR) + +#define ALX_ISR_ALL_QUEUES (ALX_ISR_TX_Q0 | \ + ALX_ISR_TX_Q1 | \ + ALX_ISR_TX_Q2 | \ + ALX_ISR_TX_Q3 | \ + ALX_ISR_RX_Q0 | \ + ALX_ISR_RX_Q1 | \ + ALX_ISR_RX_Q2 | \ + ALX_ISR_RX_Q3 | \ + ALX_ISR_RX_Q4 | \ + ALX_ISR_RX_Q5 | \ + ALX_ISR_RX_Q6 | \ + ALX_ISR_RX_Q7) + +/* maximum interrupt vectors for msix */ +#define ALX_MAX_MSIX_INTRS 16 + +#define ALX_GET_FIELD(_data, _field) \ + (((_data) >> _field ## _SHIFT) & _field ## _MASK) + +#define ALX_SET_FIELD(_data, _field, _value) do { \ + (_data) &= ~(_field ## _MASK << _field ## _SHIFT); \ + (_data) |= ((_value) & _field ## _MASK) << _field ## _SHIFT;\ + } while (0) + +struct alx_hw { + struct pci_dev *pdev; + u8 __iomem *hw_addr; + + /* current & permanent mac addr */ + u8 mac_addr[ETH_ALEN]; + u8 perm_addr[ETH_ALEN]; + + u16 mtu; + u16 imt; + u8 dma_chnl; + u8 max_dma_chnl; + /* tpd threshold to trig INT */ + u32 ith_tpd; + u32 rx_ctrl; + u32 mc_hash[2]; + + u32 smb_timer; + /* SPEED_* + DUPLEX_*, SPEED_UNKNOWN if link is down */ + int link_speed; + + /* auto-neg advertisement or force mode config */ + u32 adv_cfg; + u8 flowctrl; + + u32 sleep_ctrl; + + spinlock_t mdio_lock; + struct mdio_if_info mdio; + u16 phy_id[2]; + + /* PHY link patch flag */ + bool lnk_patch; +}; + +static inline int alx_hw_revision(struct alx_hw *hw) +{ + return hw->pdev->revision >> ALX_PCI_REVID_SHIFT; +} + +static inline bool alx_hw_with_cr(struct alx_hw *hw) +{ + return hw->pdev->revision & 1; +} + +static inline bool alx_hw_giga(struct alx_hw *hw) +{ + return hw->pdev->device & 1; +} + +static inline void alx_write_mem8(struct alx_hw *hw, u32 reg, u8 val) +{ + writeb(val, hw->hw_addr + reg); +} + +static inline void alx_write_mem16(struct alx_hw *hw, u32 reg, u16 val) +{ + writew(val, hw->hw_addr + reg); +} + +static inline u16 alx_read_mem16(struct alx_hw *hw, u32 reg) +{ + return readw(hw->hw_addr + reg); +} + +static inline void alx_write_mem32(struct alx_hw *hw, u32 reg, u32 val) +{ + writel(val, hw->hw_addr + reg); +} + +static inline u32 alx_read_mem32(struct alx_hw *hw, u32 reg) +{ + return readl(hw->hw_addr + reg); +} + +static inline void alx_post_write(struct alx_hw *hw) +{ + readl(hw->hw_addr); +} + +int alx_get_perm_macaddr(struct alx_hw *hw, u8 *addr); +void alx_reset_phy(struct alx_hw *hw); +void alx_reset_pcie(struct alx_hw *hw); +void alx_enable_aspm(struct alx_hw *hw, bool l0s_en, bool l1_en); +int alx_setup_speed_duplex(struct alx_hw *hw, u32 ethadv, u8 flowctrl); +void alx_post_phy_link(struct alx_hw *hw); +int alx_pre_suspend(struct alx_hw *hw, int speed); +int alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data); +int alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data); +int alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata); +int alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data); +int alx_get_phy_link(struct alx_hw *hw, int *speed); +int alx_clear_phy_intr(struct alx_hw *hw); +int alx_config_wol(struct alx_hw *hw); +void alx_cfg_mac_flowcontrol(struct alx_hw *hw, u8 fc); +void alx_start_mac(struct alx_hw *hw); +int alx_reset_mac(struct alx_hw *hw); +void alx_set_macaddr(struct alx_hw *hw, const u8 *addr); +bool alx_phy_configured(struct alx_hw *hw); +void alx_configure_basic(struct alx_hw *hw); +void alx_disable_rss(struct alx_hw *hw); +int alx_select_powersaving_speed(struct alx_hw *hw, int *speed); +bool alx_get_phy_info(struct alx_hw *hw); + +#endif diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c new file mode 100644 index 000000000000..418de8b13165 --- /dev/null +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -0,0 +1,1625 @@ +/* + * Copyright (c) 2013 Johannes Berg + * + * This file is free software: you may copy, redistribute and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation, either version 2 of the License, or (at your + * option) any later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * This file incorporates work covered by the following copyright and + * permission notice: + * + * Copyright (c) 2012 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "alx.h" +#include "hw.h" +#include "reg.h" + +const char alx_drv_name[] = "alx"; + + +static void alx_free_txbuf(struct alx_priv *alx, int entry) +{ + struct alx_buffer *txb = &alx->txq.bufs[entry]; + + if (dma_unmap_len(txb, size)) { + dma_unmap_single(&alx->hw.pdev->dev, + dma_unmap_addr(txb, dma), + dma_unmap_len(txb, size), + DMA_TO_DEVICE); + dma_unmap_len_set(txb, size, 0); + } + + if (txb->skb) { + dev_kfree_skb_any(txb->skb); + txb->skb = NULL; + } +} + +static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp) +{ + struct alx_rx_queue *rxq = &alx->rxq; + struct sk_buff *skb; + struct alx_buffer *cur_buf; + dma_addr_t dma; + u16 cur, next, count = 0; + + next = cur = rxq->write_idx; + if (++next == alx->rx_ringsz) + next = 0; + cur_buf = &rxq->bufs[cur]; + + while (!cur_buf->skb && next != rxq->read_idx) { + struct alx_rfd *rfd = &rxq->rfd[cur]; + + skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp); + if (!skb) + break; + dma = dma_map_single(&alx->hw.pdev->dev, + skb->data, alx->rxbuf_size, + DMA_FROM_DEVICE); + if (dma_mapping_error(&alx->hw.pdev->dev, dma)) { + dev_kfree_skb(skb); + break; + } + + /* Unfortunately, RX descriptor buffers must be 4-byte + * aligned, so we can't use IP alignment. + */ + if (WARN_ON(dma & 3)) { + dev_kfree_skb(skb); + break; + } + + cur_buf->skb = skb; + dma_unmap_len_set(cur_buf, size, alx->rxbuf_size); + dma_unmap_addr_set(cur_buf, dma, dma); + rfd->addr = cpu_to_le64(dma); + + cur = next; + if (++next == alx->rx_ringsz) + next = 0; + cur_buf = &rxq->bufs[cur]; + count++; + } + + if (count) { + /* flush all updates before updating hardware */ + wmb(); + rxq->write_idx = cur; + alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur); + } + + return count; +} + +static inline int alx_tpd_avail(struct alx_priv *alx) +{ + struct alx_tx_queue *txq = &alx->txq; + + if (txq->write_idx >= txq->read_idx) + return alx->tx_ringsz + txq->read_idx - txq->write_idx - 1; + return txq->read_idx - txq->write_idx - 1; +} + +static bool alx_clean_tx_irq(struct alx_priv *alx) +{ + struct alx_tx_queue *txq = &alx->txq; + u16 hw_read_idx, sw_read_idx; + unsigned int total_bytes = 0, total_packets = 0; + int budget = ALX_DEFAULT_TX_WORK; + + sw_read_idx = txq->read_idx; + hw_read_idx = alx_read_mem16(&alx->hw, ALX_TPD_PRI0_CIDX); + + if (sw_read_idx != hw_read_idx) { + while (sw_read_idx != hw_read_idx && budget > 0) { + struct sk_buff *skb; + + skb = txq->bufs[sw_read_idx].skb; + if (skb) { + total_bytes += skb->len; + total_packets++; + budget--; + } + + alx_free_txbuf(alx, sw_read_idx); + + if (++sw_read_idx == alx->tx_ringsz) + sw_read_idx = 0; + } + txq->read_idx = sw_read_idx; + + netdev_completed_queue(alx->dev, total_packets, total_bytes); + } + + if (netif_queue_stopped(alx->dev) && netif_carrier_ok(alx->dev) && + alx_tpd_avail(alx) > alx->tx_ringsz/4) + netif_wake_queue(alx->dev); + + return sw_read_idx == hw_read_idx; +} + +static void alx_schedule_link_check(struct alx_priv *alx) +{ + schedule_work(&alx->link_check_wk); +} + +static void alx_schedule_reset(struct alx_priv *alx) +{ + schedule_work(&alx->reset_wk); +} + +static bool alx_clean_rx_irq(struct alx_priv *alx, int budget) +{ + struct alx_rx_queue *rxq = &alx->rxq; + struct alx_rrd *rrd; + struct alx_buffer *rxb; + struct sk_buff *skb; + u16 length, rfd_cleaned = 0; + + while (budget > 0) { + rrd = &rxq->rrd[rxq->rrd_read_idx]; + if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT))) + break; + rrd->word3 &= ~cpu_to_le32(1 << RRD_UPDATED_SHIFT); + + if (ALX_GET_FIELD(le32_to_cpu(rrd->word0), + RRD_SI) != rxq->read_idx || + ALX_GET_FIELD(le32_to_cpu(rrd->word0), + RRD_NOR) != 1) { + alx_schedule_reset(alx); + return 0; + } + + rxb = &rxq->bufs[rxq->read_idx]; + dma_unmap_single(&alx->hw.pdev->dev, + dma_unmap_addr(rxb, dma), + dma_unmap_len(rxb, size), + DMA_FROM_DEVICE); + dma_unmap_len_set(rxb, size, 0); + skb = rxb->skb; + rxb->skb = NULL; + + if (rrd->word3 & cpu_to_le32(1 << RRD_ERR_RES_SHIFT) || + rrd->word3 & cpu_to_le32(1 << RRD_ERR_LEN_SHIFT)) { + rrd->word3 = 0; + dev_kfree_skb_any(skb); + goto next_pkt; + } + + length = ALX_GET_FIELD(le32_to_cpu(rrd->word3), + RRD_PKTLEN) - ETH_FCS_LEN; + skb_put(skb, length); + skb->protocol = eth_type_trans(skb, alx->dev); + + skb_checksum_none_assert(skb); + if (alx->dev->features & NETIF_F_RXCSUM && + !(rrd->word3 & (cpu_to_le32(1 << RRD_ERR_L4_SHIFT) | + cpu_to_le32(1 << RRD_ERR_IPV4_SHIFT)))) { + switch (ALX_GET_FIELD(le32_to_cpu(rrd->word2), + RRD_PID)) { + case RRD_PID_IPV6UDP: + case RRD_PID_IPV4UDP: + case RRD_PID_IPV4TCP: + case RRD_PID_IPV6TCP: + skb->ip_summed = CHECKSUM_UNNECESSARY; + break; + } + } + + napi_gro_receive(&alx->napi, skb); + budget--; + +next_pkt: + if (++rxq->read_idx == alx->rx_ringsz) + rxq->read_idx = 0; + if (++rxq->rrd_read_idx == alx->rx_ringsz) + rxq->rrd_read_idx = 0; + + if (++rfd_cleaned > ALX_RX_ALLOC_THRESH) + rfd_cleaned -= alx_refill_rx_ring(alx, GFP_ATOMIC); + } + + if (rfd_cleaned) + alx_refill_rx_ring(alx, GFP_ATOMIC); + + return budget > 0; +} + +static int alx_poll(struct napi_struct *napi, int budget) +{ + struct alx_priv *alx = container_of(napi, struct alx_priv, napi); + struct alx_hw *hw = &alx->hw; + bool complete = true; + unsigned long flags; + + complete = alx_clean_tx_irq(alx) && + alx_clean_rx_irq(alx, budget); + + if (!complete) + return 1; + + napi_complete(&alx->napi); + + /* enable interrupt */ + spin_lock_irqsave(&alx->irq_lock, flags); + alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0; + alx_write_mem32(hw, ALX_IMR, alx->int_mask); + spin_unlock_irqrestore(&alx->irq_lock, flags); + + alx_post_write(hw); + + return 0; +} + +static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr) +{ + struct alx_hw *hw = &alx->hw; + bool write_int_mask = false; + + spin_lock(&alx->irq_lock); + + /* ACK interrupt */ + alx_write_mem32(hw, ALX_ISR, intr | ALX_ISR_DIS); + intr &= alx->int_mask; + + if (intr & ALX_ISR_FATAL) { + netif_warn(alx, hw, alx->dev, + "fatal interrupt 0x%x, resetting\n", intr); + alx_schedule_reset(alx); + goto out; + } + + if (intr & ALX_ISR_ALERT) + netdev_warn(alx->dev, "alert interrupt: 0x%x\n", intr); + + if (intr & ALX_ISR_PHY) { + /* suppress PHY interrupt, because the source + * is from PHY internal. only the internal status + * is cleared, the interrupt status could be cleared. + */ + alx->int_mask &= ~ALX_ISR_PHY; + write_int_mask = true; + alx_schedule_link_check(alx); + } + + if (intr & (ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0)) { + napi_schedule(&alx->napi); + /* mask rx/tx interrupt, enable them when napi complete */ + alx->int_mask &= ~ALX_ISR_ALL_QUEUES; + write_int_mask = true; + } + + if (write_int_mask) + alx_write_mem32(hw, ALX_IMR, alx->int_mask); + + alx_write_mem32(hw, ALX_ISR, 0); + + out: + spin_unlock(&alx->irq_lock); + return IRQ_HANDLED; +} + +static irqreturn_t alx_intr_msi(int irq, void *data) +{ + struct alx_priv *alx = data; + + return alx_intr_handle(alx, alx_read_mem32(&alx->hw, ALX_ISR)); +} + +static irqreturn_t alx_intr_legacy(int irq, void *data) +{ + struct alx_priv *alx = data; + struct alx_hw *hw = &alx->hw; + u32 intr; + + intr = alx_read_mem32(hw, ALX_ISR); + + if (intr & ALX_ISR_DIS || !(intr & alx->int_mask)) + return IRQ_NONE; + + return alx_intr_handle(alx, intr); +} + +static void alx_init_ring_ptrs(struct alx_priv *alx) +{ + struct alx_hw *hw = &alx->hw; + u32 addr_hi = ((u64)alx->descmem.dma) >> 32; + + alx->rxq.read_idx = 0; + alx->rxq.write_idx = 0; + alx->rxq.rrd_read_idx = 0; + alx_write_mem32(hw, ALX_RX_BASE_ADDR_HI, addr_hi); + alx_write_mem32(hw, ALX_RRD_ADDR_LO, alx->rxq.rrd_dma); + alx_write_mem32(hw, ALX_RRD_RING_SZ, alx->rx_ringsz); + alx_write_mem32(hw, ALX_RFD_ADDR_LO, alx->rxq.rfd_dma); + alx_write_mem32(hw, ALX_RFD_RING_SZ, alx->rx_ringsz); + alx_write_mem32(hw, ALX_RFD_BUF_SZ, alx->rxbuf_size); + + alx->txq.read_idx = 0; + alx->txq.write_idx = 0; + alx_write_mem32(hw, ALX_TX_BASE_ADDR_HI, addr_hi); + alx_write_mem32(hw, ALX_TPD_PRI0_ADDR_LO, alx->txq.tpd_dma); + alx_write_mem32(hw, ALX_TPD_RING_SZ, alx->tx_ringsz); + + /* load these pointers into the chip */ + alx_write_mem32(hw, ALX_SRAM9, ALX_SRAM_LOAD_PTR); +} + +static void alx_free_txring_buf(struct alx_priv *alx) +{ + struct alx_tx_queue *txq = &alx->txq; + int i; + + if (!txq->bufs) + return; + + for (i = 0; i < alx->tx_ringsz; i++) + alx_free_txbuf(alx, i); + + memset(txq->bufs, 0, alx->tx_ringsz * sizeof(struct alx_buffer)); + memset(txq->tpd, 0, alx->tx_ringsz * sizeof(struct alx_txd)); + txq->write_idx = 0; + txq->read_idx = 0; + + netdev_reset_queue(alx->dev); +} + +static void alx_free_rxring_buf(struct alx_priv *alx) +{ + struct alx_rx_queue *rxq = &alx->rxq; + struct alx_buffer *cur_buf; + u16 i; + + if (rxq == NULL) + return; + + for (i = 0; i < alx->rx_ringsz; i++) { + cur_buf = rxq->bufs + i; + if (cur_buf->skb) { + dma_unmap_single(&alx->hw.pdev->dev, + dma_unmap_addr(cur_buf, dma), + dma_unmap_len(cur_buf, size), + DMA_FROM_DEVICE); + dev_kfree_skb(cur_buf->skb); + cur_buf->skb = NULL; + dma_unmap_len_set(cur_buf, size, 0); + dma_unmap_addr_set(cur_buf, dma, 0); + } + } + + rxq->write_idx = 0; + rxq->read_idx = 0; + rxq->rrd_read_idx = 0; +} + +static void alx_free_buffers(struct alx_priv *alx) +{ + alx_free_txring_buf(alx); + alx_free_rxring_buf(alx); +} + +static int alx_reinit_rings(struct alx_priv *alx) +{ + alx_free_buffers(alx); + + alx_init_ring_ptrs(alx); + + if (!alx_refill_rx_ring(alx, GFP_KERNEL)) + return -ENOMEM; + + return 0; +} + +static void alx_add_mc_addr(struct alx_hw *hw, const u8 *addr, u32 *mc_hash) +{ + u32 crc32, bit, reg; + + crc32 = ether_crc(ETH_ALEN, addr); + reg = (crc32 >> 31) & 0x1; + bit = (crc32 >> 26) & 0x1F; + + mc_hash[reg] |= BIT(bit); +} + +static void __alx_set_rx_mode(struct net_device *netdev) +{ + struct alx_priv *alx = netdev_priv(netdev); + struct alx_hw *hw = &alx->hw; + struct netdev_hw_addr *ha; + u32 mc_hash[2] = {}; + + if (!(netdev->flags & IFF_ALLMULTI)) { + netdev_for_each_mc_addr(ha, netdev) + alx_add_mc_addr(hw, ha->addr, mc_hash); + + alx_write_mem32(hw, ALX_HASH_TBL0, mc_hash[0]); + alx_write_mem32(hw, ALX_HASH_TBL1, mc_hash[1]); + } + + hw->rx_ctrl &= ~(ALX_MAC_CTRL_MULTIALL_EN | ALX_MAC_CTRL_PROMISC_EN); + if (netdev->flags & IFF_PROMISC) + hw->rx_ctrl |= ALX_MAC_CTRL_PROMISC_EN; + if (netdev->flags & IFF_ALLMULTI) + hw->rx_ctrl |= ALX_MAC_CTRL_MULTIALL_EN; + + alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl); +} + +static void alx_set_rx_mode(struct net_device *netdev) +{ + __alx_set_rx_mode(netdev); +} + +static int alx_set_mac_address(struct net_device *netdev, void *data) +{ + struct alx_priv *alx = netdev_priv(netdev); + struct alx_hw *hw = &alx->hw; + struct sockaddr *addr = data; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + if (netdev->addr_assign_type & NET_ADDR_RANDOM) + netdev->addr_assign_type ^= NET_ADDR_RANDOM; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len); + alx_set_macaddr(hw, hw->mac_addr); + + return 0; +} + +static int alx_alloc_descriptors(struct alx_priv *alx) +{ + alx->txq.bufs = kcalloc(alx->tx_ringsz, + sizeof(struct alx_buffer), + GFP_KERNEL); + if (!alx->txq.bufs) + return -ENOMEM; + + alx->rxq.bufs = kcalloc(alx->rx_ringsz, + sizeof(struct alx_buffer), + GFP_KERNEL); + if (!alx->rxq.bufs) + goto out_free; + + /* physical tx/rx ring descriptors + * + * Allocate them as a single chunk because they must not cross a + * 4G boundary (hardware has a single register for high 32 bits + * of addresses only) + */ + alx->descmem.size = sizeof(struct alx_txd) * alx->tx_ringsz + + sizeof(struct alx_rrd) * alx->rx_ringsz + + sizeof(struct alx_rfd) * alx->rx_ringsz; + alx->descmem.virt = dma_zalloc_coherent(&alx->hw.pdev->dev, + alx->descmem.size, + &alx->descmem.dma, + GFP_KERNEL); + if (!alx->descmem.virt) + goto out_free; + + alx->txq.tpd = (void *)alx->descmem.virt; + alx->txq.tpd_dma = alx->descmem.dma; + + /* alignment requirement for next block */ + BUILD_BUG_ON(sizeof(struct alx_txd) % 8); + + alx->rxq.rrd = + (void *)((u8 *)alx->descmem.virt + + sizeof(struct alx_txd) * alx->tx_ringsz); + alx->rxq.rrd_dma = alx->descmem.dma + + sizeof(struct alx_txd) * alx->tx_ringsz; + + /* alignment requirement for next block */ + BUILD_BUG_ON(sizeof(struct alx_rrd) % 8); + + alx->rxq.rfd = + (void *)((u8 *)alx->descmem.virt + + sizeof(struct alx_txd) * alx->tx_ringsz + + sizeof(struct alx_rrd) * alx->rx_ringsz); + alx->rxq.rfd_dma = alx->descmem.dma + + sizeof(struct alx_txd) * alx->tx_ringsz + + sizeof(struct alx_rrd) * alx->rx_ringsz; + + return 0; +out_free: + kfree(alx->txq.bufs); + kfree(alx->rxq.bufs); + return -ENOMEM; +} + +static int alx_alloc_rings(struct alx_priv *alx) +{ + int err; + + err = alx_alloc_descriptors(alx); + if (err) + return err; + + alx->int_mask &= ~ALX_ISR_ALL_QUEUES; + alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0; + alx->tx_ringsz = alx->tx_ringsz; + + netif_napi_add(alx->dev, &alx->napi, alx_poll, 64); + + alx_reinit_rings(alx); + return 0; +} + +static void alx_free_rings(struct alx_priv *alx) +{ + netif_napi_del(&alx->napi); + alx_free_buffers(alx); + + kfree(alx->txq.bufs); + kfree(alx->rxq.bufs); + + dma_free_coherent(&alx->hw.pdev->dev, + alx->descmem.size, + alx->descmem.virt, + alx->descmem.dma); +} + +static void alx_config_vector_mapping(struct alx_priv *alx) +{ + struct alx_hw *hw = &alx->hw; + + alx_write_mem32(hw, ALX_MSI_MAP_TBL1, 0); + alx_write_mem32(hw, ALX_MSI_MAP_TBL2, 0); + alx_write_mem32(hw, ALX_MSI_ID_MAP, 0); +} + +static void alx_irq_enable(struct alx_priv *alx) +{ + struct alx_hw *hw = &alx->hw; + + /* level-1 interrupt switch */ + alx_write_mem32(hw, ALX_ISR, 0); + alx_write_mem32(hw, ALX_IMR, alx->int_mask); + alx_post_write(hw); +} + +static void alx_irq_disable(struct alx_priv *alx) +{ + struct alx_hw *hw = &alx->hw; + + alx_write_mem32(hw, ALX_ISR, ALX_ISR_DIS); + alx_write_mem32(hw, ALX_IMR, 0); + alx_post_write(hw); + + synchronize_irq(alx->hw.pdev->irq); +} + +static int alx_request_irq(struct alx_priv *alx) +{ + struct pci_dev *pdev = alx->hw.pdev; + struct alx_hw *hw = &alx->hw; + int err; + u32 msi_ctrl; + + msi_ctrl = (hw->imt >> 1) << ALX_MSI_RETRANS_TM_SHIFT; + + if (!pci_enable_msi(alx->hw.pdev)) { + alx->msi = true; + + alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER, + msi_ctrl | ALX_MSI_MASK_SEL_LINE); + err = request_irq(pdev->irq, alx_intr_msi, 0, + alx->dev->name, alx); + if (!err) + goto out; + /* fall back to legacy interrupt */ + pci_disable_msi(alx->hw.pdev); + } + + alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER, 0); + err = request_irq(pdev->irq, alx_intr_legacy, IRQF_SHARED, + alx->dev->name, alx); +out: + if (!err) + alx_config_vector_mapping(alx); + return err; +} + +static void alx_free_irq(struct alx_priv *alx) +{ + struct pci_dev *pdev = alx->hw.pdev; + + free_irq(pdev->irq, alx); + + if (alx->msi) { + pci_disable_msi(alx->hw.pdev); + alx->msi = false; + } +} + +static int alx_identify_hw(struct alx_priv *alx) +{ + struct alx_hw *hw = &alx->hw; + int rev = alx_hw_revision(hw); + + if (rev > ALX_REV_C0) + return -EINVAL; + + hw->max_dma_chnl = rev >= ALX_REV_B0 ? 4 : 2; + + return 0; +} + +static int alx_init_sw(struct alx_priv *alx) +{ + struct pci_dev *pdev = alx->hw.pdev; + struct alx_hw *hw = &alx->hw; + int err; + + err = alx_identify_hw(alx); + if (err) { + dev_err(&pdev->dev, "unrecognized chip, aborting\n"); + return err; + } + + alx->hw.lnk_patch = + pdev->device == ALX_DEV_ID_AR8161 && + pdev->subsystem_vendor == PCI_VENDOR_ID_ATTANSIC && + pdev->subsystem_device == 0x0091 && + pdev->revision == 0; + + hw->smb_timer = 400; + hw->mtu = alx->dev->mtu; + alx->rxbuf_size = ALIGN(ALX_RAW_MTU(hw->mtu), 8); + alx->tx_ringsz = 256; + alx->rx_ringsz = 512; + hw->sleep_ctrl = ALX_SLEEP_WOL_MAGIC | ALX_SLEEP_WOL_PHY; + hw->imt = 200; + alx->int_mask = ALX_ISR_MISC; + hw->dma_chnl = hw->max_dma_chnl; + hw->ith_tpd = alx->tx_ringsz / 3; + hw->link_speed = SPEED_UNKNOWN; + hw->adv_cfg = ADVERTISED_Autoneg | + ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_1000baseT_Full; + hw->flowctrl = ALX_FC_ANEG | ALX_FC_RX | ALX_FC_TX; + + hw->rx_ctrl = ALX_MAC_CTRL_WOLSPED_SWEN | + ALX_MAC_CTRL_MHASH_ALG_HI5B | + ALX_MAC_CTRL_BRD_EN | + ALX_MAC_CTRL_PCRCE | + ALX_MAC_CTRL_CRCE | + ALX_MAC_CTRL_RXFC_EN | + ALX_MAC_CTRL_TXFC_EN | + 7 << ALX_MAC_CTRL_PRMBLEN_SHIFT; + + return err; +} + + +static netdev_features_t alx_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + if (netdev->mtu > ALX_MAX_TSO_PKT_SIZE) + features &= ~(NETIF_F_TSO | NETIF_F_TSO6); + + return features; +} + +static void alx_netif_stop(struct alx_priv *alx) +{ + alx->dev->trans_start = jiffies; + if (netif_carrier_ok(alx->dev)) { + netif_carrier_off(alx->dev); + netif_tx_disable(alx->dev); + napi_disable(&alx->napi); + } +} + +static void alx_halt(struct alx_priv *alx) +{ + struct alx_hw *hw = &alx->hw; + + alx_netif_stop(alx); + hw->link_speed = SPEED_UNKNOWN; + + alx_reset_mac(hw); + + /* disable l0s/l1 */ + alx_enable_aspm(hw, false, false); + alx_irq_disable(alx); + alx_free_buffers(alx); +} + +static void alx_configure(struct alx_priv *alx) +{ + struct alx_hw *hw = &alx->hw; + + alx_configure_basic(hw); + alx_disable_rss(hw); + __alx_set_rx_mode(alx->dev); + + alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl); +} + +static void alx_activate(struct alx_priv *alx) +{ + /* hardware setting lost, restore it */ + alx_reinit_rings(alx); + alx_configure(alx); + + /* clear old interrupts */ + alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS); + + alx_irq_enable(alx); + + alx_schedule_link_check(alx); +} + +static void alx_reinit(struct alx_priv *alx) +{ + ASSERT_RTNL(); + + alx_halt(alx); + alx_activate(alx); +} + +static int alx_change_mtu(struct net_device *netdev, int mtu) +{ + struct alx_priv *alx = netdev_priv(netdev); + int max_frame = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + + if ((max_frame < ALX_MIN_FRAME_SIZE) || + (max_frame > ALX_MAX_FRAME_SIZE)) + return -EINVAL; + + if (netdev->mtu == mtu) + return 0; + + netdev->mtu = mtu; + alx->hw.mtu = mtu; + alx->rxbuf_size = mtu > ALX_DEF_RXBUF_SIZE ? + ALIGN(max_frame, 8) : ALX_DEF_RXBUF_SIZE; + netdev_update_features(netdev); + if (netif_running(netdev)) + alx_reinit(alx); + return 0; +} + +static void alx_netif_start(struct alx_priv *alx) +{ + netif_tx_wake_all_queues(alx->dev); + napi_enable(&alx->napi); + netif_carrier_on(alx->dev); +} + +static int __alx_open(struct alx_priv *alx, bool resume) +{ + int err; + + if (!resume) + netif_carrier_off(alx->dev); + + err = alx_alloc_rings(alx); + if (err) + return err; + + alx_configure(alx); + + err = alx_request_irq(alx); + if (err) + goto out_free_rings; + + /* clear old interrupts */ + alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS); + + alx_irq_enable(alx); + + if (!resume) + netif_tx_start_all_queues(alx->dev); + + alx_schedule_link_check(alx); + return 0; + +out_free_rings: + alx_free_rings(alx); + return err; +} + +static void __alx_stop(struct alx_priv *alx) +{ + alx_halt(alx); + alx_free_irq(alx); + alx_free_rings(alx); +} + +static const char *alx_speed_desc(u16 speed) +{ + switch (speed) { + case SPEED_1000 + DUPLEX_FULL: + return "1 Gbps Full"; + case SPEED_100 + DUPLEX_FULL: + return "100 Mbps Full"; + case SPEED_100 + DUPLEX_HALF: + return "100 Mbps Half"; + case SPEED_10 + DUPLEX_FULL: + return "10 Mbps Full"; + case SPEED_10 + DUPLEX_HALF: + return "10 Mbps Half"; + default: + return "Unknown speed"; + } +} + +static void alx_check_link(struct alx_priv *alx) +{ + struct alx_hw *hw = &alx->hw; + unsigned long flags; + int speed, old_speed; + int err; + + /* clear PHY internal interrupt status, otherwise the main + * interrupt status will be asserted forever + */ + alx_clear_phy_intr(hw); + + err = alx_get_phy_link(hw, &speed); + if (err < 0) + goto reset; + + spin_lock_irqsave(&alx->irq_lock, flags); + alx->int_mask |= ALX_ISR_PHY; + alx_write_mem32(hw, ALX_IMR, alx->int_mask); + spin_unlock_irqrestore(&alx->irq_lock, flags); + + old_speed = hw->link_speed; + + if (old_speed == speed) + return; + hw->link_speed = speed; + + if (speed != SPEED_UNKNOWN) { + netif_info(alx, link, alx->dev, + "NIC Up: %s\n", alx_speed_desc(speed)); + alx_post_phy_link(hw); + alx_enable_aspm(hw, true, true); + alx_start_mac(hw); + + if (old_speed == SPEED_UNKNOWN) + alx_netif_start(alx); + } else { + /* link is now down */ + alx_netif_stop(alx); + netif_info(alx, link, alx->dev, "Link Down\n"); + err = alx_reset_mac(hw); + if (err) + goto reset; + alx_irq_disable(alx); + + /* MAC reset causes all HW settings to be lost, restore all */ + err = alx_reinit_rings(alx); + if (err) + goto reset; + alx_configure(alx); + alx_enable_aspm(hw, false, true); + alx_post_phy_link(hw); + alx_irq_enable(alx); + } + + return; + +reset: + alx_schedule_reset(alx); +} + +static int alx_open(struct net_device *netdev) +{ + return __alx_open(netdev_priv(netdev), false); +} + +static int alx_stop(struct net_device *netdev) +{ + __alx_stop(netdev_priv(netdev)); + return 0; +} + +static int __alx_shutdown(struct pci_dev *pdev, bool *wol_en) +{ + struct alx_priv *alx = pci_get_drvdata(pdev); + struct net_device *netdev = alx->dev; + struct alx_hw *hw = &alx->hw; + int err, speed; + + netif_device_detach(netdev); + + if (netif_running(netdev)) + __alx_stop(alx); + +#ifdef CONFIG_PM_SLEEP + err = pci_save_state(pdev); + if (err) + return err; +#endif + + err = alx_select_powersaving_speed(hw, &speed); + if (err) + return err; + err = alx_clear_phy_intr(hw); + if (err) + return err; + err = alx_pre_suspend(hw, speed); + if (err) + return err; + err = alx_config_wol(hw); + if (err) + return err; + + *wol_en = false; + if (hw->sleep_ctrl & ALX_SLEEP_ACTIVE) { + netif_info(alx, wol, netdev, + "wol: ctrl=%X, speed=%X\n", + hw->sleep_ctrl, speed); + device_set_wakeup_enable(&pdev->dev, true); + *wol_en = true; + } + + pci_disable_device(pdev); + + return 0; +} + +static void alx_shutdown(struct pci_dev *pdev) +{ + int err; + bool wol_en; + + err = __alx_shutdown(pdev, &wol_en); + if (!err) { + pci_wake_from_d3(pdev, wol_en); + pci_set_power_state(pdev, PCI_D3hot); + } else { + dev_err(&pdev->dev, "shutdown fail %d\n", err); + } +} + +static void alx_link_check(struct work_struct *work) +{ + struct alx_priv *alx; + + alx = container_of(work, struct alx_priv, link_check_wk); + + rtnl_lock(); + alx_check_link(alx); + rtnl_unlock(); +} + +static void alx_reset(struct work_struct *work) +{ + struct alx_priv *alx = container_of(work, struct alx_priv, reset_wk); + + rtnl_lock(); + alx_reinit(alx); + rtnl_unlock(); +} + +static int alx_tx_csum(struct sk_buff *skb, struct alx_txd *first) +{ + u8 cso, css; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + cso = skb_checksum_start_offset(skb); + if (cso & 1) + return -EINVAL; + + css = cso + skb->csum_offset; + first->word1 |= cpu_to_le32((cso >> 1) << TPD_CXSUMSTART_SHIFT); + first->word1 |= cpu_to_le32((css >> 1) << TPD_CXSUMOFFSET_SHIFT); + first->word1 |= cpu_to_le32(1 << TPD_CXSUM_EN_SHIFT); + + return 0; +} + +static int alx_map_tx_skb(struct alx_priv *alx, struct sk_buff *skb) +{ + struct alx_tx_queue *txq = &alx->txq; + struct alx_txd *tpd, *first_tpd; + dma_addr_t dma; + int maplen, f, first_idx = txq->write_idx; + + first_tpd = &txq->tpd[txq->write_idx]; + tpd = first_tpd; + + maplen = skb_headlen(skb); + dma = dma_map_single(&alx->hw.pdev->dev, skb->data, maplen, + DMA_TO_DEVICE); + if (dma_mapping_error(&alx->hw.pdev->dev, dma)) + goto err_dma; + + dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen); + dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma); + + tpd->adrl.addr = cpu_to_le64(dma); + tpd->len = cpu_to_le16(maplen); + + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { + struct skb_frag_struct *frag; + + frag = &skb_shinfo(skb)->frags[f]; + + if (++txq->write_idx == alx->tx_ringsz) + txq->write_idx = 0; + tpd = &txq->tpd[txq->write_idx]; + + tpd->word1 = first_tpd->word1; + + maplen = skb_frag_size(frag); + dma = skb_frag_dma_map(&alx->hw.pdev->dev, frag, 0, + maplen, DMA_TO_DEVICE); + if (dma_mapping_error(&alx->hw.pdev->dev, dma)) + goto err_dma; + dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen); + dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma); + + tpd->adrl.addr = cpu_to_le64(dma); + tpd->len = cpu_to_le16(maplen); + } + + /* last TPD, set EOP flag and store skb */ + tpd->word1 |= cpu_to_le32(1 << TPD_EOP_SHIFT); + txq->bufs[txq->write_idx].skb = skb; + + if (++txq->write_idx == alx->tx_ringsz) + txq->write_idx = 0; + + return 0; + +err_dma: + f = first_idx; + while (f != txq->write_idx) { + alx_free_txbuf(alx, f); + if (++f == alx->tx_ringsz) + f = 0; + } + return -ENOMEM; +} + +static netdev_tx_t alx_start_xmit(struct sk_buff *skb, + struct net_device *netdev) +{ + struct alx_priv *alx = netdev_priv(netdev); + struct alx_tx_queue *txq = &alx->txq; + struct alx_txd *first; + int tpdreq = skb_shinfo(skb)->nr_frags + 1; + + if (alx_tpd_avail(alx) < tpdreq) { + netif_stop_queue(alx->dev); + goto drop; + } + + first = &txq->tpd[txq->write_idx]; + memset(first, 0, sizeof(*first)); + + if (alx_tx_csum(skb, first)) + goto drop; + + if (alx_map_tx_skb(alx, skb) < 0) + goto drop; + + netdev_sent_queue(alx->dev, skb->len); + + /* flush updates before updating hardware */ + wmb(); + alx_write_mem16(&alx->hw, ALX_TPD_PRI0_PIDX, txq->write_idx); + + if (alx_tpd_avail(alx) < alx->tx_ringsz/8) + netif_stop_queue(alx->dev); + + return NETDEV_TX_OK; + +drop: + dev_kfree_skb(skb); + return NETDEV_TX_OK; +} + +static void alx_tx_timeout(struct net_device *dev) +{ + struct alx_priv *alx = netdev_priv(dev); + + alx_schedule_reset(alx); +} + +static int alx_mdio_read(struct net_device *netdev, + int prtad, int devad, u16 addr) +{ + struct alx_priv *alx = netdev_priv(netdev); + struct alx_hw *hw = &alx->hw; + u16 val; + int err; + + if (prtad != hw->mdio.prtad) + return -EINVAL; + + if (devad == MDIO_DEVAD_NONE) + err = alx_read_phy_reg(hw, addr, &val); + else + err = alx_read_phy_ext(hw, devad, addr, &val); + + if (err) + return err; + return val; +} + +static int alx_mdio_write(struct net_device *netdev, + int prtad, int devad, u16 addr, u16 val) +{ + struct alx_priv *alx = netdev_priv(netdev); + struct alx_hw *hw = &alx->hw; + + if (prtad != hw->mdio.prtad) + return -EINVAL; + + if (devad == MDIO_DEVAD_NONE) + return alx_write_phy_reg(hw, addr, val); + + return alx_write_phy_ext(hw, devad, addr, val); +} + +static int alx_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct alx_priv *alx = netdev_priv(netdev); + + if (!netif_running(netdev)) + return -EAGAIN; + + return mdio_mii_ioctl(&alx->hw.mdio, if_mii(ifr), cmd); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void alx_poll_controller(struct net_device *netdev) +{ + struct alx_priv *alx = netdev_priv(netdev); + + if (alx->msi) + alx_intr_msi(0, alx); + else + alx_intr_legacy(0, alx); +} +#endif + +static const struct net_device_ops alx_netdev_ops = { + .ndo_open = alx_open, + .ndo_stop = alx_stop, + .ndo_start_xmit = alx_start_xmit, + .ndo_set_rx_mode = alx_set_rx_mode, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = alx_set_mac_address, + .ndo_change_mtu = alx_change_mtu, + .ndo_do_ioctl = alx_ioctl, + .ndo_tx_timeout = alx_tx_timeout, + .ndo_fix_features = alx_fix_features, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = alx_poll_controller, +#endif +}; + +static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct alx_priv *alx; + struct alx_hw *hw; + bool phy_configured; + int bars, pm_cap, err; + + err = pci_enable_device_mem(pdev); + if (err) + return err; + + /* The alx chip can DMA to 64-bit addresses, but it uses a single + * shared register for the high 32 bits, so only a single, aligned, + * 4 GB physical address range can be used for descriptors. + */ + if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && + !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { + dev_dbg(&pdev->dev, "DMA to 64-BIT addresses\n"); + } else { + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + err = dma_set_coherent_mask(&pdev->dev, + DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, + "No usable DMA config, aborting\n"); + goto out_pci_disable; + } + } + } + + bars = pci_select_bars(pdev, IORESOURCE_MEM); + err = pci_request_selected_regions(pdev, bars, alx_drv_name); + if (err) { + dev_err(&pdev->dev, + "pci_request_selected_regions failed(bars:%d)\n", bars); + goto out_pci_disable; + } + + pci_enable_pcie_error_reporting(pdev); + pci_set_master(pdev); + + pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); + if (pm_cap == 0) { + dev_err(&pdev->dev, + "Can't find power management capability, aborting\n"); + err = -EIO; + goto out_pci_release; + } + + err = pci_set_power_state(pdev, PCI_D0); + if (err) + goto out_pci_release; + + netdev = alloc_etherdev(sizeof(*alx)); + if (!netdev) { + err = -ENOMEM; + goto out_pci_release; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + alx = netdev_priv(netdev); + alx->dev = netdev; + alx->hw.pdev = pdev; + alx->msg_enable = NETIF_MSG_LINK | NETIF_MSG_HW | NETIF_MSG_IFUP | + NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR | NETIF_MSG_WOL; + hw = &alx->hw; + pci_set_drvdata(pdev, alx); + + hw->hw_addr = pci_ioremap_bar(pdev, 0); + if (!hw->hw_addr) { + dev_err(&pdev->dev, "cannot map device registers\n"); + err = -EIO; + goto out_free_netdev; + } + + netdev->netdev_ops = &alx_netdev_ops; + SET_ETHTOOL_OPS(netdev, &alx_ethtool_ops); + netdev->irq = pdev->irq; + netdev->watchdog_timeo = ALX_WATCHDOG_TIME; + + if (ent->driver_data & ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG) + pdev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG; + + err = alx_init_sw(alx); + if (err) { + dev_err(&pdev->dev, "net device private data init failed\n"); + goto out_unmap; + } + + alx_reset_pcie(hw); + + phy_configured = alx_phy_configured(hw); + + if (!phy_configured) + alx_reset_phy(hw); + + err = alx_reset_mac(hw); + if (err) { + dev_err(&pdev->dev, "MAC Reset failed, error = %d\n", err); + goto out_unmap; + } + + /* setup link to put it in a known good starting state */ + if (!phy_configured) { + err = alx_setup_speed_duplex(hw, hw->adv_cfg, hw->flowctrl); + if (err) { + dev_err(&pdev->dev, + "failed to configure PHY speed/duplex (err=%d)\n", + err); + goto out_unmap; + } + } + + netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM; + + if (alx_get_perm_macaddr(hw, hw->perm_addr)) { + dev_warn(&pdev->dev, + "Invalid permanent address programmed, using random one\n"); + eth_hw_addr_random(netdev); + memcpy(hw->perm_addr, netdev->dev_addr, netdev->addr_len); + } + + memcpy(hw->mac_addr, hw->perm_addr, ETH_ALEN); + memcpy(netdev->dev_addr, hw->mac_addr, ETH_ALEN); + memcpy(netdev->perm_addr, hw->perm_addr, ETH_ALEN); + + hw->mdio.prtad = 0; + hw->mdio.mmds = 0; + hw->mdio.dev = netdev; + hw->mdio.mode_support = MDIO_SUPPORTS_C45 | + MDIO_SUPPORTS_C22 | + MDIO_EMULATE_C22; + hw->mdio.mdio_read = alx_mdio_read; + hw->mdio.mdio_write = alx_mdio_write; + + if (!alx_get_phy_info(hw)) { + dev_err(&pdev->dev, "failed to identify PHY\n"); + err = -EIO; + goto out_unmap; + } + + INIT_WORK(&alx->link_check_wk, alx_link_check); + INIT_WORK(&alx->reset_wk, alx_reset); + spin_lock_init(&alx->hw.mdio_lock); + spin_lock_init(&alx->irq_lock); + + netif_carrier_off(netdev); + + err = register_netdev(netdev); + if (err) { + dev_err(&pdev->dev, "register netdevice failed\n"); + goto out_unmap; + } + + device_set_wakeup_enable(&pdev->dev, hw->sleep_ctrl); + + netdev_info(netdev, + "Qualcomm Atheros AR816x/AR817x Ethernet [%pM]\n", + netdev->dev_addr); + + return 0; + +out_unmap: + iounmap(hw->hw_addr); +out_free_netdev: + free_netdev(netdev); +out_pci_release: + pci_release_selected_regions(pdev, bars); +out_pci_disable: + pci_disable_device(pdev); + return err; +} + +static void alx_remove(struct pci_dev *pdev) +{ + struct alx_priv *alx = pci_get_drvdata(pdev); + struct alx_hw *hw = &alx->hw; + + cancel_work_sync(&alx->link_check_wk); + cancel_work_sync(&alx->reset_wk); + + /* restore permanent mac address */ + alx_set_macaddr(hw, hw->perm_addr); + + unregister_netdev(alx->dev); + iounmap(hw->hw_addr); + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); + + pci_disable_pcie_error_reporting(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + + free_netdev(alx->dev); +} + +#ifdef CONFIG_PM_SLEEP +static int alx_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + int err; + bool wol_en; + + err = __alx_shutdown(pdev, &wol_en); + if (err) { + dev_err(&pdev->dev, "shutdown fail in suspend %d\n", err); + return err; + } + + if (wol_en) { + pci_prepare_to_sleep(pdev); + } else { + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + } + + return 0; +} + +static int alx_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct alx_priv *alx = pci_get_drvdata(pdev); + struct net_device *netdev = alx->dev; + struct alx_hw *hw = &alx->hw; + int err; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + pci_save_state(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + hw->link_speed = SPEED_UNKNOWN; + alx->int_mask = ALX_ISR_MISC; + + alx_reset_pcie(hw); + alx_reset_phy(hw); + + err = alx_reset_mac(hw); + if (err) { + netif_err(alx, hw, alx->dev, + "resume:reset_mac fail %d\n", err); + return -EIO; + } + + err = alx_setup_speed_duplex(hw, hw->adv_cfg, hw->flowctrl); + if (err) { + netif_err(alx, hw, alx->dev, + "resume:setup_speed_duplex fail %d\n", err); + return -EIO; + } + + if (netif_running(netdev)) { + err = __alx_open(alx, true); + if (err) + return err; + } + + netif_device_attach(netdev); + + return err; +} +#endif + +static pci_ers_result_t alx_pci_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct alx_priv *alx = pci_get_drvdata(pdev); + struct net_device *netdev = alx->dev; + pci_ers_result_t rc = PCI_ERS_RESULT_NEED_RESET; + + dev_info(&pdev->dev, "pci error detected\n"); + + rtnl_lock(); + + if (netif_running(netdev)) { + netif_device_detach(netdev); + alx_halt(alx); + } + + if (state == pci_channel_io_perm_failure) + rc = PCI_ERS_RESULT_DISCONNECT; + else + pci_disable_device(pdev); + + rtnl_unlock(); + + return rc; +} + +static pci_ers_result_t alx_pci_error_slot_reset(struct pci_dev *pdev) +{ + struct alx_priv *alx = pci_get_drvdata(pdev); + struct alx_hw *hw = &alx->hw; + pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; + + dev_info(&pdev->dev, "pci error slot reset\n"); + + rtnl_lock(); + + if (pci_enable_device(pdev)) { + dev_err(&pdev->dev, "Failed to re-enable PCI device after reset\n"); + goto out; + } + + pci_set_master(pdev); + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + alx_reset_pcie(hw); + if (!alx_reset_mac(hw)) + rc = PCI_ERS_RESULT_RECOVERED; +out: + pci_cleanup_aer_uncorrect_error_status(pdev); + + rtnl_unlock(); + + return rc; +} + +static void alx_pci_error_resume(struct pci_dev *pdev) +{ + struct alx_priv *alx = pci_get_drvdata(pdev); + struct net_device *netdev = alx->dev; + + dev_info(&pdev->dev, "pci error resume\n"); + + rtnl_lock(); + + if (netif_running(netdev)) { + alx_activate(alx); + netif_device_attach(netdev); + } + + rtnl_unlock(); +} + +static const struct pci_error_handlers alx_err_handlers = { + .error_detected = alx_pci_error_detected, + .slot_reset = alx_pci_error_slot_reset, + .resume = alx_pci_error_resume, +}; + +#ifdef CONFIG_PM_SLEEP +static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume); +#define ALX_PM_OPS (&alx_pm_ops) +#else +#define ALX_PM_OPS NULL +#endif + +static DEFINE_PCI_DEVICE_TABLE(alx_pci_tbl) = { + { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8161), + .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, + { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2200), + .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, + { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8162), + .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, + { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8171) }, + { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8172) }, + {} +}; + +static struct pci_driver alx_driver = { + .name = alx_drv_name, + .id_table = alx_pci_tbl, + .probe = alx_probe, + .remove = alx_remove, + .shutdown = alx_shutdown, + .err_handler = &alx_err_handlers, + .driver.pm = ALX_PM_OPS, +}; + +module_pci_driver(alx_driver); +MODULE_DEVICE_TABLE(pci, alx_pci_tbl); +MODULE_AUTHOR("Johannes Berg "); +MODULE_AUTHOR("Qualcomm Corporation, "); +MODULE_DESCRIPTION( + "Qualcomm Atheros(R) AR816x/AR817x PCI-E Ethernet Network Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/atheros/alx/reg.h b/drivers/net/ethernet/atheros/alx/reg.h new file mode 100644 index 000000000000..e4358c98bc4e --- /dev/null +++ b/drivers/net/ethernet/atheros/alx/reg.h @@ -0,0 +1,810 @@ +/* + * Copyright (c) 2013 Johannes Berg + * + * This file is free software: you may copy, redistribute and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation, either version 2 of the License, or (at your + * option) any later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * This file incorporates work covered by the following copyright and + * permission notice: + * + * Copyright (c) 2012 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef ALX_REG_H +#define ALX_REG_H + +#define ALX_DEV_ID_AR8161 0x1091 +#define ALX_DEV_ID_E2200 0xe091 +#define ALX_DEV_ID_AR8162 0x1090 +#define ALX_DEV_ID_AR8171 0x10A1 +#define ALX_DEV_ID_AR8172 0x10A0 + +/* rev definition, + * bit(0): with xD support + * bit(1): with Card Reader function + * bit(7:2): real revision + */ +#define ALX_PCI_REVID_SHIFT 3 +#define ALX_REV_A0 0 +#define ALX_REV_A1 1 +#define ALX_REV_B0 2 +#define ALX_REV_C0 3 + +#define ALX_DEV_CTRL 0x0060 +#define ALX_DEV_CTRL_MAXRRS_MIN 2 + +#define ALX_MSIX_MASK 0x0090 + +#define ALX_UE_SVRT 0x010C +#define ALX_UE_SVRT_FCPROTERR BIT(13) +#define ALX_UE_SVRT_DLPROTERR BIT(4) + +/* eeprom & flash load register */ +#define ALX_EFLD 0x0204 +#define ALX_EFLD_F_EXIST BIT(10) +#define ALX_EFLD_E_EXIST BIT(9) +#define ALX_EFLD_STAT BIT(5) +#define ALX_EFLD_START BIT(0) + +/* eFuse load register */ +#define ALX_SLD 0x0218 +#define ALX_SLD_STAT BIT(12) +#define ALX_SLD_START BIT(11) +#define ALX_SLD_MAX_TO 100 + +#define ALX_PDLL_TRNS1 0x1104 +#define ALX_PDLL_TRNS1_D3PLLOFF_EN BIT(11) + +#define ALX_PMCTRL 0x12F8 +#define ALX_PMCTRL_HOTRST_WTEN BIT(31) +/* bit30: L0s/L1 controlled by MAC based on throughput(setting in 15A0) */ +#define ALX_PMCTRL_ASPM_FCEN BIT(30) +#define ALX_PMCTRL_SADLY_EN BIT(29) +#define ALX_PMCTRL_LCKDET_TIMER_MASK 0xF +#define ALX_PMCTRL_LCKDET_TIMER_SHIFT 24 +#define ALX_PMCTRL_LCKDET_TIMER_DEF 0xC +/* bit[23:20] if pm_request_l1 time > @, then enter L0s not L1 */ +#define ALX_PMCTRL_L1REQ_TO_MASK 0xF +#define ALX_PMCTRL_L1REQ_TO_SHIFT 20 +#define ALX_PMCTRL_L1REG_TO_DEF 0xF +#define ALX_PMCTRL_TXL1_AFTER_L0S BIT(19) +#define ALX_PMCTRL_L1_TIMER_MASK 0x7 +#define ALX_PMCTRL_L1_TIMER_SHIFT 16 +#define ALX_PMCTRL_L1_TIMER_16US 4 +#define ALX_PMCTRL_RCVR_WT_1US BIT(15) +/* bit13: enable pcie clk switch in L1 state */ +#define ALX_PMCTRL_L1_CLKSW_EN BIT(13) +#define ALX_PMCTRL_L0S_EN BIT(12) +#define ALX_PMCTRL_RXL1_AFTER_L0S BIT(11) +#define ALX_PMCTRL_L1_BUFSRX_EN BIT(7) +/* bit6: power down serdes RX */ +#define ALX_PMCTRL_L1_SRDSRX_PWD BIT(6) +#define ALX_PMCTRL_L1_SRDSPLL_EN BIT(5) +#define ALX_PMCTRL_L1_SRDS_EN BIT(4) +#define ALX_PMCTRL_L1_EN BIT(3) + +/*******************************************************/ +/* following registers are mapped only to memory space */ +/*******************************************************/ + +#define ALX_MASTER 0x1400 +/* bit12: 1:alwys select pclk from serdes, not sw to 25M */ +#define ALX_MASTER_PCLKSEL_SRDS BIT(12) +/* bit11: irq moduration for rx */ +#define ALX_MASTER_IRQMOD2_EN BIT(11) +/* bit10: irq moduration for tx/rx */ +#define ALX_MASTER_IRQMOD1_EN BIT(10) +#define ALX_MASTER_SYSALVTIMER_EN BIT(7) +#define ALX_MASTER_OOB_DIS BIT(6) +/* bit5: wakeup without pcie clk */ +#define ALX_MASTER_WAKEN_25M BIT(5) +/* bit0: MAC & DMA reset */ +#define ALX_MASTER_DMA_MAC_RST BIT(0) +#define ALX_DMA_MAC_RST_TO 50 + +#define ALX_IRQ_MODU_TIMER 0x1408 +#define ALX_IRQ_MODU_TIMER1_MASK 0xFFFF +#define ALX_IRQ_MODU_TIMER1_SHIFT 0 + +#define ALX_PHY_CTRL 0x140C +#define ALX_PHY_CTRL_100AB_EN BIT(17) +/* bit14: affect MAC & PHY, go to low power sts */ +#define ALX_PHY_CTRL_POWER_DOWN BIT(14) +/* bit13: 1:pll always ON, 0:can switch in lpw */ +#define ALX_PHY_CTRL_PLL_ON BIT(13) +#define ALX_PHY_CTRL_RST_ANALOG BIT(12) +#define ALX_PHY_CTRL_HIB_PULSE BIT(11) +#define ALX_PHY_CTRL_HIB_EN BIT(10) +#define ALX_PHY_CTRL_IDDQ BIT(7) +#define ALX_PHY_CTRL_GATE_25M BIT(5) +#define ALX_PHY_CTRL_LED_MODE BIT(2) +/* bit0: out of dsp RST state */ +#define ALX_PHY_CTRL_DSPRST_OUT BIT(0) +#define ALX_PHY_CTRL_DSPRST_TO 80 +#define ALX_PHY_CTRL_CLS (ALX_PHY_CTRL_LED_MODE | \ + ALX_PHY_CTRL_100AB_EN | \ + ALX_PHY_CTRL_PLL_ON) + +#define ALX_MAC_STS 0x1410 +#define ALX_MAC_STS_TXQ_BUSY BIT(3) +#define ALX_MAC_STS_RXQ_BUSY BIT(2) +#define ALX_MAC_STS_TXMAC_BUSY BIT(1) +#define ALX_MAC_STS_RXMAC_BUSY BIT(0) +#define ALX_MAC_STS_IDLE (ALX_MAC_STS_TXQ_BUSY | \ + ALX_MAC_STS_RXQ_BUSY | \ + ALX_MAC_STS_TXMAC_BUSY | \ + ALX_MAC_STS_RXMAC_BUSY) + +#define ALX_MDIO 0x1414 +#define ALX_MDIO_MODE_EXT BIT(30) +#define ALX_MDIO_BUSY BIT(27) +#define ALX_MDIO_CLK_SEL_MASK 0x7 +#define ALX_MDIO_CLK_SEL_SHIFT 24 +#define ALX_MDIO_CLK_SEL_25MD4 0 +#define ALX_MDIO_CLK_SEL_25MD128 7 +#define ALX_MDIO_START BIT(23) +#define ALX_MDIO_SPRES_PRMBL BIT(22) +/* bit21: 1:read,0:write */ +#define ALX_MDIO_OP_READ BIT(21) +#define ALX_MDIO_REG_MASK 0x1F +#define ALX_MDIO_REG_SHIFT 16 +#define ALX_MDIO_DATA_MASK 0xFFFF +#define ALX_MDIO_DATA_SHIFT 0 +#define ALX_MDIO_MAX_AC_TO 120 + +#define ALX_MDIO_EXTN 0x1448 +#define ALX_MDIO_EXTN_DEVAD_MASK 0x1F +#define ALX_MDIO_EXTN_DEVAD_SHIFT 16 +#define ALX_MDIO_EXTN_REG_MASK 0xFFFF +#define ALX_MDIO_EXTN_REG_SHIFT 0 + +#define ALX_SERDES 0x1424 +#define ALX_SERDES_PHYCLK_SLWDWN BIT(18) +#define ALX_SERDES_MACCLK_SLWDWN BIT(17) + +#define ALX_LPI_CTRL 0x1440 +#define ALX_LPI_CTRL_EN BIT(0) + +/* for B0+, bit[13..] for C0+ */ +#define ALX_HRTBT_EXT_CTRL 0x1AD0 +#define L1F_HRTBT_EXT_CTRL_PERIOD_HIGH_MASK 0x3F +#define L1F_HRTBT_EXT_CTRL_PERIOD_HIGH_SHIFT 24 +#define L1F_HRTBT_EXT_CTRL_SWOI_STARTUP_PKT_EN BIT(23) +#define L1F_HRTBT_EXT_CTRL_IOAC_2_FRAGMENTED BIT(22) +#define L1F_HRTBT_EXT_CTRL_IOAC_1_FRAGMENTED BIT(21) +#define L1F_HRTBT_EXT_CTRL_IOAC_1_KEEPALIVE_EN BIT(20) +#define L1F_HRTBT_EXT_CTRL_IOAC_1_HAS_VLAN BIT(19) +#define L1F_HRTBT_EXT_CTRL_IOAC_1_IS_8023 BIT(18) +#define L1F_HRTBT_EXT_CTRL_IOAC_1_IS_IPV6 BIT(17) +#define L1F_HRTBT_EXT_CTRL_IOAC_2_KEEPALIVE_EN BIT(16) +#define L1F_HRTBT_EXT_CTRL_IOAC_2_HAS_VLAN BIT(15) +#define L1F_HRTBT_EXT_CTRL_IOAC_2_IS_8023 BIT(14) +#define L1F_HRTBT_EXT_CTRL_IOAC_2_IS_IPV6 BIT(13) +#define ALX_HRTBT_EXT_CTRL_NS_EN BIT(12) +#define ALX_HRTBT_EXT_CTRL_FRAG_LEN_MASK 0xFF +#define ALX_HRTBT_EXT_CTRL_FRAG_LEN_SHIFT 4 +#define ALX_HRTBT_EXT_CTRL_IS_8023 BIT(3) +#define ALX_HRTBT_EXT_CTRL_IS_IPV6 BIT(2) +#define ALX_HRTBT_EXT_CTRL_WAKEUP_EN BIT(1) +#define ALX_HRTBT_EXT_CTRL_ARP_EN BIT(0) + +#define ALX_HRTBT_REM_IPV4_ADDR 0x1AD4 +#define ALX_HRTBT_HOST_IPV4_ADDR 0x1478 +#define ALX_HRTBT_REM_IPV6_ADDR3 0x1AD8 +#define ALX_HRTBT_REM_IPV6_ADDR2 0x1ADC +#define ALX_HRTBT_REM_IPV6_ADDR1 0x1AE0 +#define ALX_HRTBT_REM_IPV6_ADDR0 0x1AE4 + +/* 1B8C ~ 1B94 for C0+ */ +#define ALX_SWOI_ACER_CTRL 0x1B8C +#define ALX_SWOI_ORIG_ACK_NAK_EN BIT(20) +#define ALX_SWOI_ORIG_ACK_NAK_PKT_LEN_MASK 0XFF +#define ALX_SWOI_ORIG_ACK_NAK_PKT_LEN_SHIFT 12 +#define ALX_SWOI_ORIG_ACK_ADDR_MASK 0XFFF +#define ALX_SWOI_ORIG_ACK_ADDR_SHIFT 0 + +#define ALX_SWOI_IOAC_CTRL_2 0x1B90 +#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_FRAG_LEN_MASK 0xFF +#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_FRAG_LEN_SHIFT 24 +#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_PKT_LEN_MASK 0xFFF +#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_PKT_LEN_SHIFT 12 +#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_HDR_ADDR_MASK 0xFFF +#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_HDR_ADDR_SHIFT 0 + +#define ALX_SWOI_IOAC_CTRL_3 0x1B94 +#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_FRAG_LEN_MASK 0xFF +#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_FRAG_LEN_SHIFT 24 +#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_PKT_LEN_MASK 0xFFF +#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_PKT_LEN_SHIFT 12 +#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_HDR_ADDR_MASK 0xFFF +#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_HDR_ADDR_SHIFT 0 + +/* for B0 */ +#define ALX_IDLE_DECISN_TIMER 0x1474 +/* 1ms */ +#define ALX_IDLE_DECISN_TIMER_DEF 0x400 + +#define ALX_MAC_CTRL 0x1480 +#define ALX_MAC_CTRL_FAST_PAUSE BIT(31) +#define ALX_MAC_CTRL_WOLSPED_SWEN BIT(30) +/* bit29: 1:legacy(hi5b), 0:marvl(lo5b)*/ +#define ALX_MAC_CTRL_MHASH_ALG_HI5B BIT(29) +#define ALX_MAC_CTRL_BRD_EN BIT(26) +#define ALX_MAC_CTRL_MULTIALL_EN BIT(25) +#define ALX_MAC_CTRL_SPEED_MASK 0x3 +#define ALX_MAC_CTRL_SPEED_SHIFT 20 +#define ALX_MAC_CTRL_SPEED_10_100 1 +#define ALX_MAC_CTRL_SPEED_1000 2 +#define ALX_MAC_CTRL_PROMISC_EN BIT(15) +#define ALX_MAC_CTRL_VLANSTRIP BIT(14) +#define ALX_MAC_CTRL_PRMBLEN_MASK 0xF +#define ALX_MAC_CTRL_PRMBLEN_SHIFT 10 +#define ALX_MAC_CTRL_PCRCE BIT(7) +#define ALX_MAC_CTRL_CRCE BIT(6) +#define ALX_MAC_CTRL_FULLD BIT(5) +#define ALX_MAC_CTRL_RXFC_EN BIT(3) +#define ALX_MAC_CTRL_TXFC_EN BIT(2) +#define ALX_MAC_CTRL_RX_EN BIT(1) +#define ALX_MAC_CTRL_TX_EN BIT(0) + +#define ALX_STAD0 0x1488 +#define ALX_STAD1 0x148C + +#define ALX_HASH_TBL0 0x1490 +#define ALX_HASH_TBL1 0x1494 + +#define ALX_MTU 0x149C +#define ALX_MTU_JUMBO_TH 1514 +#define ALX_MTU_STD_ALGN 1536 + +#define ALX_SRAM5 0x1524 +#define ALX_SRAM_RXF_LEN_MASK 0xFFF +#define ALX_SRAM_RXF_LEN_SHIFT 0 +#define ALX_SRAM_RXF_LEN_8K (8*1024) + +#define ALX_SRAM9 0x1534 +#define ALX_SRAM_LOAD_PTR BIT(0) + +#define ALX_RX_BASE_ADDR_HI 0x1540 + +#define ALX_TX_BASE_ADDR_HI 0x1544 + +#define ALX_RFD_ADDR_LO 0x1550 +#define ALX_RFD_RING_SZ 0x1560 +#define ALX_RFD_BUF_SZ 0x1564 + +#define ALX_RRD_ADDR_LO 0x1568 +#define ALX_RRD_RING_SZ 0x1578 + +/* pri3: highest, pri0: lowest */ +#define ALX_TPD_PRI3_ADDR_LO 0x14E4 +#define ALX_TPD_PRI2_ADDR_LO 0x14E0 +#define ALX_TPD_PRI1_ADDR_LO 0x157C +#define ALX_TPD_PRI0_ADDR_LO 0x1580 + +/* producer index is 16bit */ +#define ALX_TPD_PRI3_PIDX 0x1618 +#define ALX_TPD_PRI2_PIDX 0x161A +#define ALX_TPD_PRI1_PIDX 0x15F0 +#define ALX_TPD_PRI0_PIDX 0x15F2 + +/* consumer index is 16bit */ +#define ALX_TPD_PRI3_CIDX 0x161C +#define ALX_TPD_PRI2_CIDX 0x161E +#define ALX_TPD_PRI1_CIDX 0x15F4 +#define ALX_TPD_PRI0_CIDX 0x15F6 + +#define ALX_TPD_RING_SZ 0x1584 + +#define ALX_TXQ0 0x1590 +#define ALX_TXQ0_TXF_BURST_PREF_MASK 0xFFFF +#define ALX_TXQ0_TXF_BURST_PREF_SHIFT 16 +#define ALX_TXQ_TXF_BURST_PREF_DEF 0x200 +#define ALX_TXQ0_LSO_8023_EN BIT(7) +#define ALX_TXQ0_MODE_ENHANCE BIT(6) +#define ALX_TXQ0_EN BIT(5) +#define ALX_TXQ0_SUPT_IPOPT BIT(4) +#define ALX_TXQ0_TPD_BURSTPREF_MASK 0xF +#define ALX_TXQ0_TPD_BURSTPREF_SHIFT 0 +#define ALX_TXQ_TPD_BURSTPREF_DEF 5 + +#define ALX_TXQ1 0x1594 +/* bit11: drop large packet, len > (rfd buf) */ +#define ALX_TXQ1_ERRLGPKT_DROP_EN BIT(11) +#define ALX_TXQ1_JUMBO_TSO_TH (7*1024) + +#define ALX_RXQ0 0x15A0 +#define ALX_RXQ0_EN BIT(31) +#define ALX_RXQ0_RSS_HASH_EN BIT(29) +#define ALX_RXQ0_RSS_MODE_MASK 0x3 +#define ALX_RXQ0_RSS_MODE_SHIFT 26 +#define ALX_RXQ0_RSS_MODE_DIS 0 +#define ALX_RXQ0_RSS_MODE_MQMI 3 +#define ALX_RXQ0_NUM_RFD_PREF_MASK 0x3F +#define ALX_RXQ0_NUM_RFD_PREF_SHIFT 20 +#define ALX_RXQ0_NUM_RFD_PREF_DEF 8 +#define ALX_RXQ0_IDT_TBL_SIZE_MASK 0x1FF +#define ALX_RXQ0_IDT_TBL_SIZE_SHIFT 8 +#define ALX_RXQ0_IDT_TBL_SIZE_DEF 0x100 +#define ALX_RXQ0_IDT_TBL_SIZE_NORMAL 128 +#define ALX_RXQ0_IPV6_PARSE_EN BIT(7) +#define ALX_RXQ0_RSS_HSTYP_MASK 0xF +#define ALX_RXQ0_RSS_HSTYP_SHIFT 2 +#define ALX_RXQ0_RSS_HSTYP_IPV6_TCP_EN BIT(5) +#define ALX_RXQ0_RSS_HSTYP_IPV6_EN BIT(4) +#define ALX_RXQ0_RSS_HSTYP_IPV4_TCP_EN BIT(3) +#define ALX_RXQ0_RSS_HSTYP_IPV4_EN BIT(2) +#define ALX_RXQ0_RSS_HSTYP_ALL (ALX_RXQ0_RSS_HSTYP_IPV6_TCP_EN | \ + ALX_RXQ0_RSS_HSTYP_IPV4_TCP_EN | \ + ALX_RXQ0_RSS_HSTYP_IPV6_EN | \ + ALX_RXQ0_RSS_HSTYP_IPV4_EN) +#define ALX_RXQ0_ASPM_THRESH_MASK 0x3 +#define ALX_RXQ0_ASPM_THRESH_SHIFT 0 +#define ALX_RXQ0_ASPM_THRESH_100M 3 + +#define ALX_RXQ2 0x15A8 +#define ALX_RXQ2_RXF_XOFF_THRESH_MASK 0xFFF +#define ALX_RXQ2_RXF_XOFF_THRESH_SHIFT 16 +#define ALX_RXQ2_RXF_XON_THRESH_MASK 0xFFF +#define ALX_RXQ2_RXF_XON_THRESH_SHIFT 0 +/* Size = tx-packet(1522) + IPG(12) + SOF(8) + 64(Pause) + IPG(12) + SOF(8) + + * rx-packet(1522) + delay-of-link(64) + * = 3212. + */ +#define ALX_RXQ2_RXF_FLOW_CTRL_RSVD 3212 + +#define ALX_DMA 0x15C0 +#define ALX_DMA_RCHNL_SEL_MASK 0x3 +#define ALX_DMA_RCHNL_SEL_SHIFT 26 +#define ALX_DMA_WDLY_CNT_MASK 0xF +#define ALX_DMA_WDLY_CNT_SHIFT 16 +#define ALX_DMA_WDLY_CNT_DEF 4 +#define ALX_DMA_RDLY_CNT_MASK 0x1F +#define ALX_DMA_RDLY_CNT_SHIFT 11 +#define ALX_DMA_RDLY_CNT_DEF 15 +/* bit10: 0:tpd with pri, 1: data */ +#define ALX_DMA_RREQ_PRI_DATA BIT(10) +#define ALX_DMA_RREQ_BLEN_MASK 0x7 +#define ALX_DMA_RREQ_BLEN_SHIFT 4 +#define ALX_DMA_RORDER_MODE_MASK 0x7 +#define ALX_DMA_RORDER_MODE_SHIFT 0 +#define ALX_DMA_RORDER_MODE_OUT 4 + +#define ALX_WOL0 0x14A0 +#define ALX_WOL0_PME_LINK BIT(5) +#define ALX_WOL0_LINK_EN BIT(4) +#define ALX_WOL0_PME_MAGIC_EN BIT(3) +#define ALX_WOL0_MAGIC_EN BIT(2) + +#define ALX_RFD_PIDX 0x15E0 + +#define ALX_RFD_CIDX 0x15F8 + +/* MIB */ +#define ALX_MIB_BASE 0x1700 +#define ALX_MIB_RX_OK (ALX_MIB_BASE + 0) +#define ALX_MIB_RX_ERRADDR (ALX_MIB_BASE + 92) +#define ALX_MIB_TX_OK (ALX_MIB_BASE + 96) +#define ALX_MIB_TX_MCCNT (ALX_MIB_BASE + 192) + +#define ALX_RX_STATS_BIN ALX_MIB_RX_OK +#define ALX_RX_STATS_END ALX_MIB_RX_ERRADDR +#define ALX_TX_STATS_BIN ALX_MIB_TX_OK +#define ALX_TX_STATS_END ALX_MIB_TX_MCCNT + +#define ALX_ISR 0x1600 +#define ALX_ISR_DIS BIT(31) +#define ALX_ISR_RX_Q7 BIT(30) +#define ALX_ISR_RX_Q6 BIT(29) +#define ALX_ISR_RX_Q5 BIT(28) +#define ALX_ISR_RX_Q4 BIT(27) +#define ALX_ISR_PCIE_LNKDOWN BIT(26) +#define ALX_ISR_RX_Q3 BIT(19) +#define ALX_ISR_RX_Q2 BIT(18) +#define ALX_ISR_RX_Q1 BIT(17) +#define ALX_ISR_RX_Q0 BIT(16) +#define ALX_ISR_TX_Q0 BIT(15) +#define ALX_ISR_PHY BIT(12) +#define ALX_ISR_DMAW BIT(10) +#define ALX_ISR_DMAR BIT(9) +#define ALX_ISR_TXF_UR BIT(8) +#define ALX_ISR_TX_Q3 BIT(7) +#define ALX_ISR_TX_Q2 BIT(6) +#define ALX_ISR_TX_Q1 BIT(5) +#define ALX_ISR_RFD_UR BIT(4) +#define ALX_ISR_RXF_OV BIT(3) +#define ALX_ISR_MANU BIT(2) +#define ALX_ISR_TIMER BIT(1) +#define ALX_ISR_SMB BIT(0) + +#define ALX_IMR 0x1604 + +/* re-send assert msg if SW no response */ +#define ALX_INT_RETRIG 0x1608 +/* 40ms */ +#define ALX_INT_RETRIG_TO 20000 + +#define ALX_SMB_TIMER 0x15C4 + +#define ALX_TINT_TPD_THRSHLD 0x15C8 + +#define ALX_TINT_TIMER 0x15CC + +#define ALX_CLK_GATE 0x1814 +#define ALX_CLK_GATE_RXMAC BIT(5) +#define ALX_CLK_GATE_TXMAC BIT(4) +#define ALX_CLK_GATE_RXQ BIT(3) +#define ALX_CLK_GATE_TXQ BIT(2) +#define ALX_CLK_GATE_DMAR BIT(1) +#define ALX_CLK_GATE_DMAW BIT(0) +#define ALX_CLK_GATE_ALL (ALX_CLK_GATE_RXMAC | \ + ALX_CLK_GATE_TXMAC | \ + ALX_CLK_GATE_RXQ | \ + ALX_CLK_GATE_TXQ | \ + ALX_CLK_GATE_DMAR | \ + ALX_CLK_GATE_DMAW) + +/* interop between drivers */ +#define ALX_DRV 0x1804 +#define ALX_DRV_PHY_AUTO BIT(28) +#define ALX_DRV_PHY_1000 BIT(27) +#define ALX_DRV_PHY_100 BIT(26) +#define ALX_DRV_PHY_10 BIT(25) +#define ALX_DRV_PHY_DUPLEX BIT(24) +/* bit23: adv Pause */ +#define ALX_DRV_PHY_PAUSE BIT(23) +/* bit22: adv Asym Pause */ +#define ALX_DRV_PHY_MASK 0xFF +#define ALX_DRV_PHY_SHIFT 21 +#define ALX_DRV_PHY_UNKNOWN 0 + +/* flag of phy inited */ +#define ALX_PHY_INITED 0x003F + +/* reg 1830 ~ 186C for C0+, 16 bit map patterns and wake packet detection */ +#define ALX_WOL_CTRL2 0x1830 +#define ALX_WOL_CTRL2_DATA_STORE BIT(3) +#define ALX_WOL_CTRL2_PTRN_EVT BIT(2) +#define ALX_WOL_CTRL2_PME_PTRN_EN BIT(1) +#define ALX_WOL_CTRL2_PTRN_EN BIT(0) + +#define ALX_WOL_CTRL3 0x1834 +#define ALX_WOL_CTRL3_PTRN_ADDR_MASK 0xFFFFF +#define ALX_WOL_CTRL3_PTRN_ADDR_SHIFT 0 + +#define ALX_WOL_CTRL4 0x1838 +#define ALX_WOL_CTRL4_PT15_MATCH BIT(31) +#define ALX_WOL_CTRL4_PT14_MATCH BIT(30) +#define ALX_WOL_CTRL4_PT13_MATCH BIT(29) +#define ALX_WOL_CTRL4_PT12_MATCH BIT(28) +#define ALX_WOL_CTRL4_PT11_MATCH BIT(27) +#define ALX_WOL_CTRL4_PT10_MATCH BIT(26) +#define ALX_WOL_CTRL4_PT9_MATCH BIT(25) +#define ALX_WOL_CTRL4_PT8_MATCH BIT(24) +#define ALX_WOL_CTRL4_PT7_MATCH BIT(23) +#define ALX_WOL_CTRL4_PT6_MATCH BIT(22) +#define ALX_WOL_CTRL4_PT5_MATCH BIT(21) +#define ALX_WOL_CTRL4_PT4_MATCH BIT(20) +#define ALX_WOL_CTRL4_PT3_MATCH BIT(19) +#define ALX_WOL_CTRL4_PT2_MATCH BIT(18) +#define ALX_WOL_CTRL4_PT1_MATCH BIT(17) +#define ALX_WOL_CTRL4_PT0_MATCH BIT(16) +#define ALX_WOL_CTRL4_PT15_EN BIT(15) +#define ALX_WOL_CTRL4_PT14_EN BIT(14) +#define ALX_WOL_CTRL4_PT13_EN BIT(13) +#define ALX_WOL_CTRL4_PT12_EN BIT(12) +#define ALX_WOL_CTRL4_PT11_EN BIT(11) +#define ALX_WOL_CTRL4_PT10_EN BIT(10) +#define ALX_WOL_CTRL4_PT9_EN BIT(9) +#define ALX_WOL_CTRL4_PT8_EN BIT(8) +#define ALX_WOL_CTRL4_PT7_EN BIT(7) +#define ALX_WOL_CTRL4_PT6_EN BIT(6) +#define ALX_WOL_CTRL4_PT5_EN BIT(5) +#define ALX_WOL_CTRL4_PT4_EN BIT(4) +#define ALX_WOL_CTRL4_PT3_EN BIT(3) +#define ALX_WOL_CTRL4_PT2_EN BIT(2) +#define ALX_WOL_CTRL4_PT1_EN BIT(1) +#define ALX_WOL_CTRL4_PT0_EN BIT(0) + +#define ALX_WOL_CTRL5 0x183C +#define ALX_WOL_CTRL5_PT3_LEN_MASK 0xFF +#define ALX_WOL_CTRL5_PT3_LEN_SHIFT 24 +#define ALX_WOL_CTRL5_PT2_LEN_MASK 0xFF +#define ALX_WOL_CTRL5_PT2_LEN_SHIFT 16 +#define ALX_WOL_CTRL5_PT1_LEN_MASK 0xFF +#define ALX_WOL_CTRL5_PT1_LEN_SHIFT 8 +#define ALX_WOL_CTRL5_PT0_LEN_MASK 0xFF +#define ALX_WOL_CTRL5_PT0_LEN_SHIFT 0 + +#define ALX_WOL_CTRL6 0x1840 +#define ALX_WOL_CTRL5_PT7_LEN_MASK 0xFF +#define ALX_WOL_CTRL5_PT7_LEN_SHIFT 24 +#define ALX_WOL_CTRL5_PT6_LEN_MASK 0xFF +#define ALX_WOL_CTRL5_PT6_LEN_SHIFT 16 +#define ALX_WOL_CTRL5_PT5_LEN_MASK 0xFF +#define ALX_WOL_CTRL5_PT5_LEN_SHIFT 8 +#define ALX_WOL_CTRL5_PT4_LEN_MASK 0xFF +#define ALX_WOL_CTRL5_PT4_LEN_SHIFT 0 + +#define ALX_WOL_CTRL7 0x1844 +#define ALX_WOL_CTRL5_PT11_LEN_MASK 0xFF +#define ALX_WOL_CTRL5_PT11_LEN_SHIFT 24 +#define ALX_WOL_CTRL5_PT10_LEN_MASK 0xFF +#define ALX_WOL_CTRL5_PT10_LEN_SHIFT 16 +#define ALX_WOL_CTRL5_PT9_LEN_MASK 0xFF +#define ALX_WOL_CTRL5_PT9_LEN_SHIFT 8 +#define ALX_WOL_CTRL5_PT8_LEN_MASK 0xFF +#define ALX_WOL_CTRL5_PT8_LEN_SHIFT 0 + +#define ALX_WOL_CTRL8 0x1848 +#define ALX_WOL_CTRL5_PT15_LEN_MASK 0xFF +#define ALX_WOL_CTRL5_PT15_LEN_SHIFT 24 +#define ALX_WOL_CTRL5_PT14_LEN_MASK 0xFF +#define ALX_WOL_CTRL5_PT14_LEN_SHIFT 16 +#define ALX_WOL_CTRL5_PT13_LEN_MASK 0xFF +#define ALX_WOL_CTRL5_PT13_LEN_SHIFT 8 +#define ALX_WOL_CTRL5_PT12_LEN_MASK 0xFF +#define ALX_WOL_CTRL5_PT12_LEN_SHIFT 0 + +#define ALX_ACER_FIXED_PTN0 0x1850 +#define ALX_ACER_FIXED_PTN0_MASK 0xFFFFFFFF +#define ALX_ACER_FIXED_PTN0_SHIFT 0 + +#define ALX_ACER_FIXED_PTN1 0x1854 +#define ALX_ACER_FIXED_PTN1_MASK 0xFFFF +#define ALX_ACER_FIXED_PTN1_SHIFT 0 + +#define ALX_ACER_RANDOM_NUM0 0x1858 +#define ALX_ACER_RANDOM_NUM0_MASK 0xFFFFFFFF +#define ALX_ACER_RANDOM_NUM0_SHIFT 0 + +#define ALX_ACER_RANDOM_NUM1 0x185C +#define ALX_ACER_RANDOM_NUM1_MASK 0xFFFFFFFF +#define ALX_ACER_RANDOM_NUM1_SHIFT 0 + +#define ALX_ACER_RANDOM_NUM2 0x1860 +#define ALX_ACER_RANDOM_NUM2_MASK 0xFFFFFFFF +#define ALX_ACER_RANDOM_NUM2_SHIFT 0 + +#define ALX_ACER_RANDOM_NUM3 0x1864 +#define ALX_ACER_RANDOM_NUM3_MASK 0xFFFFFFFF +#define ALX_ACER_RANDOM_NUM3_SHIFT 0 + +#define ALX_ACER_MAGIC 0x1868 +#define ALX_ACER_MAGIC_EN BIT(31) +#define ALX_ACER_MAGIC_PME_EN BIT(30) +#define ALX_ACER_MAGIC_MATCH BIT(29) +#define ALX_ACER_MAGIC_FF_CHECK BIT(10) +#define ALX_ACER_MAGIC_RAN_LEN_MASK 0x1F +#define ALX_ACER_MAGIC_RAN_LEN_SHIFT 5 +#define ALX_ACER_MAGIC_FIX_LEN_MASK 0x1F +#define ALX_ACER_MAGIC_FIX_LEN_SHIFT 0 + +#define ALX_ACER_TIMER 0x186C +#define ALX_ACER_TIMER_EN BIT(31) +#define ALX_ACER_TIMER_PME_EN BIT(30) +#define ALX_ACER_TIMER_MATCH BIT(29) +#define ALX_ACER_TIMER_THRES_MASK 0x1FFFF +#define ALX_ACER_TIMER_THRES_SHIFT 0 +#define ALX_ACER_TIMER_THRES_DEF 1 + +/* RSS definitions */ +#define ALX_RSS_KEY0 0x14B0 +#define ALX_RSS_KEY1 0x14B4 +#define ALX_RSS_KEY2 0x14B8 +#define ALX_RSS_KEY3 0x14BC +#define ALX_RSS_KEY4 0x14C0 +#define ALX_RSS_KEY5 0x14C4 +#define ALX_RSS_KEY6 0x14C8 +#define ALX_RSS_KEY7 0x14CC +#define ALX_RSS_KEY8 0x14D0 +#define ALX_RSS_KEY9 0x14D4 + +#define ALX_RSS_IDT_TBL0 0x1B00 + +#define ALX_MSI_MAP_TBL1 0x15D0 +#define ALX_MSI_MAP_TBL1_TXQ1_SHIFT 20 +#define ALX_MSI_MAP_TBL1_TXQ0_SHIFT 16 +#define ALX_MSI_MAP_TBL1_RXQ3_SHIFT 12 +#define ALX_MSI_MAP_TBL1_RXQ2_SHIFT 8 +#define ALX_MSI_MAP_TBL1_RXQ1_SHIFT 4 +#define ALX_MSI_MAP_TBL1_RXQ0_SHIFT 0 + +#define ALX_MSI_MAP_TBL2 0x15D8 +#define ALX_MSI_MAP_TBL2_TXQ3_SHIFT 20 +#define ALX_MSI_MAP_TBL2_TXQ2_SHIFT 16 +#define ALX_MSI_MAP_TBL2_RXQ7_SHIFT 12 +#define ALX_MSI_MAP_TBL2_RXQ6_SHIFT 8 +#define ALX_MSI_MAP_TBL2_RXQ5_SHIFT 4 +#define ALX_MSI_MAP_TBL2_RXQ4_SHIFT 0 + +#define ALX_MSI_ID_MAP 0x15D4 + +#define ALX_MSI_RETRANS_TIMER 0x1920 +/* bit16: 1:line,0:standard */ +#define ALX_MSI_MASK_SEL_LINE BIT(16) +#define ALX_MSI_RETRANS_TM_MASK 0xFFFF +#define ALX_MSI_RETRANS_TM_SHIFT 0 + +/* CR DMA ctrl */ + +/* TX QoS */ +#define ALX_WRR 0x1938 +#define ALX_WRR_PRI_MASK 0x3 +#define ALX_WRR_PRI_SHIFT 29 +#define ALX_WRR_PRI_RESTRICT_NONE 3 +#define ALX_WRR_PRI3_MASK 0x1F +#define ALX_WRR_PRI3_SHIFT 24 +#define ALX_WRR_PRI2_MASK 0x1F +#define ALX_WRR_PRI2_SHIFT 16 +#define ALX_WRR_PRI1_MASK 0x1F +#define ALX_WRR_PRI1_SHIFT 8 +#define ALX_WRR_PRI0_MASK 0x1F +#define ALX_WRR_PRI0_SHIFT 0 + +#define ALX_HQTPD 0x193C +#define ALX_HQTPD_BURST_EN BIT(31) +#define ALX_HQTPD_Q3_NUMPREF_MASK 0xF +#define ALX_HQTPD_Q3_NUMPREF_SHIFT 8 +#define ALX_HQTPD_Q2_NUMPREF_MASK 0xF +#define ALX_HQTPD_Q2_NUMPREF_SHIFT 4 +#define ALX_HQTPD_Q1_NUMPREF_MASK 0xF +#define ALX_HQTPD_Q1_NUMPREF_SHIFT 0 + +#define ALX_MISC 0x19C0 +#define ALX_MISC_PSW_OCP_MASK 0x7 +#define ALX_MISC_PSW_OCP_SHIFT 21 +#define ALX_MISC_PSW_OCP_DEF 0x7 +#define ALX_MISC_ISO_EN BIT(12) +#define ALX_MISC_INTNLOSC_OPEN BIT(3) + +#define ALX_MSIC2 0x19C8 +#define ALX_MSIC2_CALB_START BIT(0) + +#define ALX_MISC3 0x19CC +/* bit1: 1:Software control 25M */ +#define ALX_MISC3_25M_BY_SW BIT(1) +/* bit0: 25M switch to intnl OSC */ +#define ALX_MISC3_25M_NOTO_INTNL BIT(0) + +/* MSIX tbl in memory space */ +#define ALX_MSIX_ENTRY_BASE 0x2000 + +/********************* PHY regs definition ***************************/ + +/* PHY Specific Status Register */ +#define ALX_MII_GIGA_PSSR 0x11 +#define ALX_GIGA_PSSR_SPD_DPLX_RESOLVED 0x0800 +#define ALX_GIGA_PSSR_DPLX 0x2000 +#define ALX_GIGA_PSSR_SPEED 0xC000 +#define ALX_GIGA_PSSR_10MBS 0x0000 +#define ALX_GIGA_PSSR_100MBS 0x4000 +#define ALX_GIGA_PSSR_1000MBS 0x8000 + +/* PHY Interrupt Enable Register */ +#define ALX_MII_IER 0x12 +#define ALX_IER_LINK_UP 0x0400 +#define ALX_IER_LINK_DOWN 0x0800 + +/* PHY Interrupt Status Register */ +#define ALX_MII_ISR 0x13 + +#define ALX_MII_DBG_ADDR 0x1D +#define ALX_MII_DBG_DATA 0x1E + +/***************************** debug port *************************************/ + +#define ALX_MIIDBG_ANACTRL 0x00 +#define ALX_ANACTRL_DEF 0x02EF + +#define ALX_MIIDBG_SYSMODCTRL 0x04 +/* en half bias */ +#define ALX_SYSMODCTRL_IECHOADJ_DEF 0xBB8B + +#define ALX_MIIDBG_SRDSYSMOD 0x05 +#define ALX_SRDSYSMOD_DEEMP_EN 0x0040 +#define ALX_SRDSYSMOD_DEF 0x2C46 + +#define ALX_MIIDBG_HIBNEG 0x0B +#define ALX_HIBNEG_PSHIB_EN 0x8000 +#define ALX_HIBNEG_HIB_PSE 0x1000 +#define ALX_HIBNEG_DEF 0xBC40 +#define ALX_HIBNEG_NOHIB (ALX_HIBNEG_DEF & \ + ~(ALX_HIBNEG_PSHIB_EN | ALX_HIBNEG_HIB_PSE)) + +#define ALX_MIIDBG_TST10BTCFG 0x12 +#define ALX_TST10BTCFG_DEF 0x4C04 + +#define ALX_MIIDBG_AZ_ANADECT 0x15 +#define ALX_AZ_ANADECT_DEF 0x3220 +#define ALX_AZ_ANADECT_LONG 0x3210 + +#define ALX_MIIDBG_MSE16DB 0x18 +#define ALX_MSE16DB_UP 0x05EA +#define ALX_MSE16DB_DOWN 0x02EA + +#define ALX_MIIDBG_MSE20DB 0x1C +#define ALX_MSE20DB_TH_MASK 0x7F +#define ALX_MSE20DB_TH_SHIFT 2 +#define ALX_MSE20DB_TH_DEF 0x2E +#define ALX_MSE20DB_TH_HI 0x54 + +#define ALX_MIIDBG_AGC 0x23 +#define ALX_AGC_2_VGA_MASK 0x3FU +#define ALX_AGC_2_VGA_SHIFT 8 +#define ALX_AGC_LONG1G_LIMT 40 +#define ALX_AGC_LONG100M_LIMT 44 + +#define ALX_MIIDBG_LEGCYPS 0x29 +#define ALX_LEGCYPS_EN 0x8000 +#define ALX_LEGCYPS_DEF 0x129D + +#define ALX_MIIDBG_TST100BTCFG 0x36 +#define ALX_TST100BTCFG_DEF 0xE12C + +#define ALX_MIIDBG_GREENCFG 0x3B +#define ALX_GREENCFG_DEF 0x7078 + +#define ALX_MIIDBG_GREENCFG2 0x3D +#define ALX_GREENCFG2_BP_GREEN 0x8000 +#define ALX_GREENCFG2_GATE_DFSE_EN 0x0080 + +/******* dev 3 *********/ +#define ALX_MIIEXT_PCS 3 + +#define ALX_MIIEXT_CLDCTRL3 0x8003 +#define ALX_CLDCTRL3_BP_CABLE1TH_DET_GT 0x8000 + +#define ALX_MIIEXT_CLDCTRL5 0x8005 +#define ALX_CLDCTRL5_BP_VD_HLFBIAS 0x4000 + +#define ALX_MIIEXT_CLDCTRL6 0x8006 +#define ALX_CLDCTRL6_CAB_LEN_MASK 0xFF +#define ALX_CLDCTRL6_CAB_LEN_SHIFT 0 +#define ALX_CLDCTRL6_CAB_LEN_SHORT1G 116 +#define ALX_CLDCTRL6_CAB_LEN_SHORT100M 152 + +#define ALX_MIIEXT_VDRVBIAS 0x8062 +#define ALX_VDRVBIAS_DEF 0x3 + +/********* dev 7 **********/ +#define ALX_MIIEXT_ANEG 7 + +#define ALX_MIIEXT_LOCAL_EEEADV 0x3C +#define ALX_LOCAL_EEEADV_1000BT 0x0004 +#define ALX_LOCAL_EEEADV_100BT 0x0002 + +#define ALX_MIIEXT_AFE 0x801A +#define ALX_AFE_10BT_100M_TH 0x0040 + +#define ALX_MIIEXT_S3DIG10 0x8023 +/* bit0: 1:bypass 10BT rx fifo, 0:original 10BT rx */ +#define ALX_MIIEXT_S3DIG10_SL 0x0001 +#define ALX_MIIEXT_S3DIG10_DEF 0 + +#define ALX_MIIEXT_NLP78 0x8027 +#define ALX_MIIEXT_NLP78_120M_DEF 0x8A05 + +#endif -- cgit v1.2.1 From ab8e99d276d4b72d1532bda4423b14c9e2ce053c Mon Sep 17 00:00:00 2001 From: Sebastian Siewior Date: Mon, 17 Jun 2013 19:31:52 +0200 Subject: net: cpsw: check for cpts pointer after its allocation after priv->cpts got allocated then this pointer should check to determine if the allocation succeeded or not. Cc: Mugunthan V N Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/cpsw.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 21a5b291b4b3..2fd69db3c09f 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -1679,7 +1679,7 @@ static int cpsw_probe(struct platform_device *pdev) priv->rx_packet_max = max(rx_packet_max, 128); priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL); priv->irq_enabled = true; - if (!ndev) { + if (!priv->cpts) { pr_err("error allocating cpts\n"); goto clean_ndev_ret; } -- cgit v1.2.1 From 4afe2156eb639e563d6ef0c2706b66ea400348b2 Mon Sep 17 00:00:00 2001 From: Marc Kleine-Budde Date: Tue, 18 Jun 2013 14:33:58 +0200 Subject: can: usb_8dev: unregister netdev before free()ing The usb_8dev hardware has problems on some xhci USB hosts. The driver fails to read the firmware revision in the probe function. This leads to the following Oops: [ 3356.635912] kernel BUG at net/core/dev.c:5701! The driver tries to free the netdev, which has already been registered, without unregistering it. This patch fixes the problem by unregistering the netdev in the error path. Reported-by: Michael Olbrich Reviewed-by: Bernd Krumboeck Signed-off-by: Marc Kleine-Budde --- drivers/net/can/usb/usb_8dev.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c index 6e15ef08f301..cbd388eea682 100644 --- a/drivers/net/can/usb/usb_8dev.c +++ b/drivers/net/can/usb/usb_8dev.c @@ -977,7 +977,7 @@ static int usb_8dev_probe(struct usb_interface *intf, err = usb_8dev_cmd_version(priv, &version); if (err) { netdev_err(netdev, "can't get firmware version\n"); - goto cleanup_cmd_msg_buffer; + goto cleanup_unregister_candev; } else { netdev_info(netdev, "firmware: %d.%d, hardware: %d.%d\n", @@ -989,6 +989,9 @@ static int usb_8dev_probe(struct usb_interface *intf, return 0; +cleanup_unregister_candev: + unregister_netdev(priv->netdev); + cleanup_cmd_msg_buffer: kfree(priv->cmd_msg_buffer); -- cgit v1.2.1 From eb064c3b49931dc73bba59887019c7f5cb97d322 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Tue, 18 Jun 2013 14:27:01 -0700 Subject: vxlan: fix check for migration of static entry The check introduced by: commit 26a41ae604381c5cc0caf1c3261ca6b298b5fe69 Author: stephen hemminger Date: Mon Jun 17 12:09:58 2013 -0700 vxlan: only migrate dynamic FDB entries was not correct because it is checking flag about type of FDB entry, rather than the state (dynamic versus static). The confusion arises because vxlan is reusing values from bridge, and bridge is reusing values from neighbour table, and easy to get lost in translation. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index dda997a0102c..57325f356d4f 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -579,7 +579,7 @@ static bool vxlan_snoop(struct net_device *dev, return false; /* Don't migrate static entries, drop packets */ - if (!(f->flags & NTF_SELF)) + if (f->state & NUD_NOARP) return true; if (net_ratelimit()) -- cgit v1.2.1 From d13919301d9a34cd4d3cc5ac73b89012c48cad71 Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Tue, 18 Jun 2013 10:04:59 -0700 Subject: net: fec: Fix build for MCF5272 Commits 4c09eed9 (net: fec: Enable imx6 enet checksum acceleration) and baa70a5c (net: fec: enable pause frame to improve rx prefomance for 1G network) introduced functionality into the FEC driver which is not supported on MCF5272. The registers used to implement this functionality do not exist on MCF5272. Since register defines for MCF5272 are separate from register defines for other chips, building images for MCF5272 fails as follows. fec_main.c: In function 'fec_restart': fec_main.c:520:8: error: 'FEC_RACC' undeclared (first use in this function) fec_main.c:585:3: error: 'FEC_R_FIFO_RSEM' undeclared (first use in this function) fec_main.c:586:3: error: 'FEC_R_FIFO_RSFL' undeclared (first use in this function) fec_main.c:587:3: error: 'FEC_R_FIFO_RAEM' undeclared (first use in this function) fec_main.c:588:3: error: 'FEC_R_FIFO_RAFL' undeclared (first use in this function) fec_main.c:591:3: error: 'FEC_OPD' undeclared (first use in this function) Adding the missing register defines is not an option, since the registers do not exist on MCF5272. Disable the added functionality for MCF5272 builds. Cc: Frank Li Cc: Jim Baxter Signed-off-by: Guenter Roeck Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fec_main.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index a667015be22a..d48099f03b7f 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -516,6 +516,7 @@ fec_restart(struct net_device *ndev, int duplex) /* Set MII speed */ writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); +#if !defined(CONFIG_M5272) /* set RX checksum */ val = readl(fep->hwp + FEC_RACC); if (fep->csum_flags & FLAG_RX_CSUM_ENABLED) @@ -523,6 +524,7 @@ fec_restart(struct net_device *ndev, int duplex) else val &= ~FEC_RACC_OPTIONS; writel(val, fep->hwp + FEC_RACC); +#endif /* * The phy interface and speed need to get configured @@ -575,6 +577,7 @@ fec_restart(struct net_device *ndev, int duplex) #endif } +#if !defined(CONFIG_M5272) /* enable pause frame*/ if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) || ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) && @@ -592,6 +595,7 @@ fec_restart(struct net_device *ndev, int duplex) } else { rcntl &= ~FEC_ENET_FCE; } +#endif /* !defined(CONFIG_M5272) */ writel(rcntl, fep->hwp + FEC_R_CNTRL); @@ -1205,7 +1209,9 @@ static int fec_enet_mii_probe(struct net_device *ndev) /* mask with MAC supported features */ if (id_entry->driver_data & FEC_QUIRK_HAS_GBIT) { phy_dev->supported &= PHY_GBIT_FEATURES; +#if !defined(CONFIG_M5272) phy_dev->supported |= SUPPORTED_Pause; +#endif } else phy_dev->supported &= PHY_BASIC_FEATURES; @@ -1390,6 +1396,8 @@ static int fec_enet_get_ts_info(struct net_device *ndev, } } +#if !defined(CONFIG_M5272) + static void fec_enet_get_pauseparam(struct net_device *ndev, struct ethtool_pauseparam *pause) { @@ -1436,9 +1444,13 @@ static int fec_enet_set_pauseparam(struct net_device *ndev, return 0; } +#endif /* !defined(CONFIG_M5272) */ + static const struct ethtool_ops fec_enet_ethtool_ops = { +#if !defined(CONFIG_M5272) .get_pauseparam = fec_enet_get_pauseparam, .set_pauseparam = fec_enet_set_pauseparam, +#endif .get_settings = fec_enet_get_settings, .set_settings = fec_enet_set_settings, .get_drvinfo = fec_enet_get_drvinfo, @@ -1874,10 +1886,12 @@ fec_probe(struct platform_device *pdev) /* setup board info structure */ fep = netdev_priv(ndev); +#if !defined(CONFIG_M5272) /* default enable pause frame auto negotiation */ if (pdev->id_entry && (pdev->id_entry->driver_data & FEC_QUIRK_HAS_GBIT)) fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG; +#endif fep->hwp = devm_request_and_ioremap(&pdev->dev, r); fep->pdev = pdev; -- cgit v1.2.1 From 6d3d76f877ca061911343d5d1650458906fdf0ea Mon Sep 17 00:00:00 2001 From: Mugunthan V N Date: Tue, 18 Jun 2013 15:04:35 +0530 Subject: drivers: net: cpsw: fix cpsw clock gating issue across suspend/resume Due to some hardware integration issue, CPSW sliver modules requires a reset across suspend/resume cycle for a successful clock gating to CPGMAC (CPSW and Davinci MDIO) in AM335x PG1.0. This issue is fixed in PG2.x, though to support suspend/resume on PG1.0 this reset is required. Signed-off-by: Mugunthan V N Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/cpsw.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 2fd69db3c09f..e66a20223abb 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -1976,6 +1976,8 @@ static int cpsw_suspend(struct device *dev) if (netif_running(ndev)) cpsw_ndo_stop(ndev); + soft_reset("sliver 0", &priv->slaves[0].sliver->soft_reset); + soft_reset("sliver 1", &priv->slaves[1].sliver->soft_reset); pm_runtime_put_sync(&pdev->dev); return 0; -- cgit v1.2.1 From f5351ef73e476d7019fcd7a1c4461a153f663b4b Mon Sep 17 00:00:00 2001 From: Giuseppe CAVALLARO Date: Tue, 18 Jun 2013 07:03:23 +0200 Subject: stmmac: fix EEE setup This patch fixes the EEE setup allowing to configure this support when the link changes. Signed-off-by: Giuseppe Cavallaro Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/common.h | 4 +- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 66 +++++++++++------------ 2 files changed, 33 insertions(+), 37 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 7788fbe44f0a..95176979b2d2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -297,8 +297,8 @@ struct dma_features { #define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */ /* Default LPI timers */ -#define STMMAC_DEFAULT_LIT_LS_TIMER 0x3E8 -#define STMMAC_DEFAULT_TWT_LS_TIMER 0x0 +#define STMMAC_DEFAULT_LIT_LS 0x3E8 +#define STMMAC_DEFAULT_TWT_LS 0x0 #define STMMAC_CHAIN_MODE 0x1 #define STMMAC_RING_MODE 0x2 diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index ee919ca8b8a0..e9eab29db7be 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -130,7 +130,7 @@ static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | static int eee_timer = STMMAC_DEFAULT_LPI_TIMER; module_param(eee_timer, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec"); -#define STMMAC_LPI_TIMER(x) (jiffies + msecs_to_jiffies(x)) +#define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x)) /* By default the driver will use the ring mode to manage tx and rx descriptors * but passing this value so user can force to use the chain instead of the ring @@ -288,7 +288,7 @@ static void stmmac_eee_ctrl_timer(unsigned long arg) struct stmmac_priv *priv = (struct stmmac_priv *)arg; stmmac_enable_eee_mode(priv); - mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_TIMER(eee_timer)); + mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer)); } /** @@ -304,22 +304,34 @@ bool stmmac_eee_init(struct stmmac_priv *priv) { bool ret = false; + /* Using PCS we cannot dial with the phy registers at this stage + * so we do not support extra feature like EEE. + */ + if ((priv->pcs == STMMAC_PCS_RGMII) || (priv->pcs == STMMAC_PCS_TBI) || + (priv->pcs == STMMAC_PCS_RTBI)) + goto out; + /* MAC core supports the EEE feature. */ if (priv->dma_cap.eee) { /* Check if the PHY supports EEE */ if (phy_init_eee(priv->phydev, 1)) goto out; - priv->eee_active = 1; - init_timer(&priv->eee_ctrl_timer); - priv->eee_ctrl_timer.function = stmmac_eee_ctrl_timer; - priv->eee_ctrl_timer.data = (unsigned long)priv; - priv->eee_ctrl_timer.expires = STMMAC_LPI_TIMER(eee_timer); - add_timer(&priv->eee_ctrl_timer); - - priv->hw->mac->set_eee_timer(priv->ioaddr, - STMMAC_DEFAULT_LIT_LS_TIMER, - priv->tx_lpi_timer); + if (!priv->eee_active) { + priv->eee_active = 1; + init_timer(&priv->eee_ctrl_timer); + priv->eee_ctrl_timer.function = stmmac_eee_ctrl_timer; + priv->eee_ctrl_timer.data = (unsigned long)priv; + priv->eee_ctrl_timer.expires = STMMAC_LPI_T(eee_timer); + add_timer(&priv->eee_ctrl_timer); + + priv->hw->mac->set_eee_timer(priv->ioaddr, + STMMAC_DEFAULT_LIT_LS, + priv->tx_lpi_timer); + } else + /* Set HW EEE according to the speed */ + priv->hw->mac->set_eee_pls(priv->ioaddr, + priv->phydev->link); pr_info("stmmac: Energy-Efficient Ethernet initialized\n"); @@ -329,20 +341,6 @@ out: return ret; } -/** - * stmmac_eee_adjust: adjust HW EEE according to the speed - * @priv: driver private structure - * Description: - * When the EEE has been already initialised we have to - * modify the PLS bit in the LPI ctrl & status reg according - * to the PHY link status. For this reason. - */ -static void stmmac_eee_adjust(struct stmmac_priv *priv) -{ - if (priv->eee_enabled) - priv->hw->mac->set_eee_pls(priv->ioaddr, priv->phydev->link); -} - /* stmmac_get_tx_hwtstamp: get HW TX timestamps * @priv: driver private structure * @entry : descriptor index to be used. @@ -769,7 +767,10 @@ static void stmmac_adjust_link(struct net_device *dev) if (new_state && netif_msg_link(priv)) phy_print_status(phydev); - stmmac_eee_adjust(priv); + /* At this stage, it could be needed to setup the EEE or adjust some + * MAC related HW registers. + */ + priv->eee_enabled = stmmac_eee_init(priv); spin_unlock_irqrestore(&priv->lock, flags); @@ -1277,7 +1278,7 @@ static void stmmac_tx_clean(struct stmmac_priv *priv) if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) { stmmac_enable_eee_mode(priv); - mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_TIMER(eee_timer)); + mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer)); } spin_unlock(&priv->tx_lock); } @@ -1671,14 +1672,9 @@ static int stmmac_open(struct net_device *dev) if (priv->phydev) phy_start(priv->phydev); - priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS_TIMER; + priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS; - /* Using PCS we cannot dial with the phy registers at this stage - * so we do not support extra feature like EEE. - */ - if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI && - priv->pcs != STMMAC_PCS_RTBI) - priv->eee_enabled = stmmac_eee_init(priv); + priv->eee_enabled = stmmac_eee_init(priv); stmmac_init_tx_coalesce(priv); -- cgit v1.2.1 From 906996d6eb8fbd5ee4241a0e8e0fb013423c934d Mon Sep 17 00:00:00 2001 From: David Daney Date: Wed, 19 Jun 2013 17:40:19 -0700 Subject: netdev: octeon_mgmt: Correct tx IFG workaround. The previous fix was still too agressive to meet ieee specs. Increase to (14, 10). Signed-off-by: David Daney Signed-off-by: David S. Miller --- drivers/net/ethernet/octeon/octeon_mgmt.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c index 921729f9c85c..a603faf64a9f 100644 --- a/drivers/net/ethernet/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/octeon/octeon_mgmt.c @@ -1141,10 +1141,13 @@ static int octeon_mgmt_open(struct net_device *netdev) /* For compensation state to lock. */ ndelay(1040 * NS_PER_PHY_CLK); - /* Some Ethernet switches cannot handle standard - * Interframe Gap, increase to 16 bytes. + /* Default Interframe Gaps are too small. Recommended + * workaround is. + * + * AGL_GMX_TX_IFG[IFG1]=14 + * AGL_GMX_TX_IFG[IFG2]=10 */ - cvmx_write_csr(CVMX_AGL_GMX_TX_IFG, 0x88); + cvmx_write_csr(CVMX_AGL_GMX_TX_IFG, 0xae); } octeon_mgmt_rx_fill_ring(netdev); -- cgit v1.2.1 From 3ac19c900cf4a46748d789c49d84a58c37b93841 Mon Sep 17 00:00:00 2001 From: David Daney Date: Wed, 19 Jun 2013 17:40:20 -0700 Subject: netdev: octeon_mgmt: Fix structure layout for little-endian. The C ABI reverses the bitfield fill order when compiled as little-endian. Signed-off-by: David Daney Signed-off-by: David S. Miller --- drivers/net/ethernet/octeon/octeon_mgmt.c | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c index a603faf64a9f..91a8a5d28037 100644 --- a/drivers/net/ethernet/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/octeon/octeon_mgmt.c @@ -46,17 +46,25 @@ union mgmt_port_ring_entry { u64 d64; struct { - u64 reserved_62_63:2; +#define RING_ENTRY_CODE_DONE 0xf +#define RING_ENTRY_CODE_MORE 0x10 +#ifdef __BIG_ENDIAN_BITFIELD + u64 reserved_62_63:2; /* Length of the buffer/packet in bytes */ - u64 len:14; + u64 len:14; /* For TX, signals that the packet should be timestamped */ - u64 tstamp:1; + u64 tstamp:1; /* The RX error code */ - u64 code:7; -#define RING_ENTRY_CODE_DONE 0xf -#define RING_ENTRY_CODE_MORE 0x10 + u64 code:7; /* Physical address of the buffer */ - u64 addr:40; + u64 addr:40; +#else + u64 addr:40; + u64 code:7; + u64 tstamp:1; + u64 len:14; + u64 reserved_62_63:2; +#endif } s; }; -- cgit v1.2.1 From 776fbcc9cbece10756bdb67b48a0798e6446f16b Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Tue, 18 Jun 2013 17:45:40 +0100 Subject: sfc: Remove write permission from phy_type attribute Driver probe currently results in: WARNING: at drivers/base/core.c:576 device_create_file+0x57/0x7e() Attribute phy_type: write permission without 'store' Signed-off-by: Ben Hutchings Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/efx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 39e4cb39de29..4a14a940c65e 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -2139,7 +2139,7 @@ show_phy_type(struct device *dev, struct device_attribute *attr, char *buf) struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); return sprintf(buf, "%d\n", efx->phy_type); } -static DEVICE_ATTR(phy_type, 0644, show_phy_type, NULL); +static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL); static int efx_register_netdev(struct efx_nic *efx) { -- cgit v1.2.1 From 451bff2932a69e904f41a180145de1d2358bb8f5 Mon Sep 17 00:00:00 2001 From: Lubomir Rintel Date: Tue, 18 Jun 2013 19:30:48 +0200 Subject: pxa168_eth: Allocate receive queue initialized to zero Zero pointer in rx_skb or tx_skb is how respective *_deinit() functions find out that a skb slot is unallocated. If *_init() functions unsuccessfully return after the allocation (e.g. when subsequent dma_alloc_coherent() is not successful), this would result in attempt to kfree() invalid pointers. Signed-off-by: Lubomir Rintel Cc: Kosta Zertsekel Cc: netdev@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/pxa168_eth.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index 339bb323cb0c..1c8af8ba08d9 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -1015,7 +1015,7 @@ static int rxq_init(struct net_device *dev) int rx_desc_num = pep->rx_ring_size; /* Allocate RX skb rings */ - pep->rx_skb = kmalloc(sizeof(*pep->rx_skb) * pep->rx_ring_size, + pep->rx_skb = kzalloc(sizeof(*pep->rx_skb) * pep->rx_ring_size, GFP_KERNEL); if (!pep->rx_skb) return -ENOMEM; @@ -1076,7 +1076,7 @@ static int txq_init(struct net_device *dev) int size = 0, i = 0; int tx_desc_num = pep->tx_ring_size; - pep->tx_skb = kmalloc(sizeof(*pep->tx_skb) * pep->tx_ring_size, + pep->tx_skb = kzalloc(sizeof(*pep->tx_skb) * pep->tx_ring_size, GFP_KERNEL); if (!pep->tx_skb) return -ENOMEM; -- cgit v1.2.1 From 9fa8e980bb0a56e46ddd97731f83f608c39aa7a5 Mon Sep 17 00:00:00 2001 From: Lubomir Rintel Date: Tue, 18 Jun 2013 19:32:38 +0200 Subject: mv643xx_eth: Allocate receive queue initialized to zero Zero pointer in rx_skb is how respective rxq_deinit() finds out out that a skb slot is unallocated. If rxq_refill() fails (e.g. on OOM condition), subsequent teardown would result in an attempt to kfree() invalid pointers. Signed-off-by: Lubomir Rintel Cc: Lennert Buytenhek Cc: netdev@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mv643xx_eth.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 2ad1494efbb3..d1cbfb12c1ca 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -1757,7 +1757,7 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index) memset(rxq->rx_desc_area, 0, size); rxq->rx_desc_area_size = size; - rxq->rx_skb = kmalloc_array(rxq->rx_ring_size, sizeof(*rxq->rx_skb), + rxq->rx_skb = kcalloc(rxq->rx_ring_size, sizeof(*rxq->rx_skb), GFP_KERNEL); if (rxq->rx_skb == NULL) goto out_free; -- cgit v1.2.1 From f9bd2d7f6d18d96fa31a657f35290741117c0dd3 Mon Sep 17 00:00:00 2001 From: Amir Vadai Date: Thu, 20 Jun 2013 14:58:10 +0300 Subject: net/mlx_en: Timestamping is not supported in slave mode Old hypervisors don't mask out timestamp capability for slave. Till slave support will be added, need to disable capability by slave. Signed-off-by: Amir Vadai Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/main.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 2f4a26039e80..8a434997a0df 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -632,6 +632,9 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) dev->caps.cqe_size = 32; } + dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; + mlx4_warn(dev, "Timestamping is not supported in slave mode.\n"); + slave_adjust_steering_mode(dev, &dev_cap, &hca_param); return 0; -- cgit v1.2.1 From db4e9b2b98bac7adc7657ef94bb6d1a419a35571 Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Thu, 20 Jun 2013 14:34:13 +0200 Subject: bonding: fix slave speed reporting in bond_miimon_commit When we have BOND_LINK_UP the speed is reported unconditionally with %u format although it can be SPEED_UNKNOWN (-1). After this patch it returns 0 in that case in an attempt to keep the existing scripts happy. One line is intenionally left 81 chars because it gets ugly if broken. Signed-off-by: Nikolay Aleksandrov Acked-by: Veaceslav Falico Signed-off-by: David S. Miller --- drivers/net/bonding/bond_main.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 02d9ae7d527e..f97569613526 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -2413,7 +2413,8 @@ static void bond_miimon_commit(struct bonding *bond) pr_info("%s: link status definitely up for interface %s, %u Mbps %s duplex.\n", bond->dev->name, slave->dev->name, - slave->speed, slave->duplex ? "full" : "half"); + slave->speed == SPEED_UNKNOWN ? 0 : slave->speed, + slave->duplex ? "full" : "half"); /* notify ad that the link status has changed */ if (bond->params.mode == BOND_MODE_8023AD) -- cgit v1.2.1 From 14bd076955f4e9cb911326d1b7291e3a2b40bdec Mon Sep 17 00:00:00 2001 From: Sebastian Siewior Date: Thu, 20 Jun 2013 16:58:45 +0200 Subject: net: eth: davicnci_cpdma: check dma map error Since the DMA mapping may fail the caller should check the return value. Cc: Mugunthan V N Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/davinci_cpdma.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index 49dfd592ac1e..053c84fd0853 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c @@ -705,6 +705,13 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data, } buffer = dma_map_single(ctlr->dev, data, len, chan->dir); + ret = dma_mapping_error(ctlr->dev, buffer); + if (ret) { + cpdma_desc_free(ctlr->pool, desc, 1); + ret = -EINVAL; + goto unlock_ret; + } + mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP; cpdma_desc_to_port(chan, mode, directed); -- cgit v1.2.1 From 8afe3dc891c4dec2ebddbba6f3767684f95ac2f9 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 20 Jun 2013 16:10:59 -0500 Subject: qmi_wwan: add various Novatel Gobi1K IDs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Found in the Windows INF files while investigating the Novatel/Verizon USB-1000 device. The USB-1000 is verified as a Gobi1K device and works with QMI after loading appropriate firmware. Signed-off-by: Dan Williams Acked-by: Bjørn Mork Signed-off-by: David S. Miller --- drivers/net/usb/qmi_wwan.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index d095d0d3056b..56459215a22b 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -590,7 +590,13 @@ static const struct usb_device_id products[] = { {QMI_GOBI1K_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */ {QMI_GOBI1K_DEVICE(0x04da, 0x250d)}, /* Panasonic Gobi Modem device */ {QMI_GOBI1K_DEVICE(0x413c, 0x8172)}, /* Dell Gobi Modem device */ - {QMI_GOBI1K_DEVICE(0x1410, 0xa001)}, /* Novatel Gobi Modem device */ + {QMI_GOBI1K_DEVICE(0x1410, 0xa001)}, /* Novatel/Verizon USB-1000 */ + {QMI_GOBI1K_DEVICE(0x1410, 0xa002)}, /* Novatel Gobi Modem device */ + {QMI_GOBI1K_DEVICE(0x1410, 0xa003)}, /* Novatel Gobi Modem device */ + {QMI_GOBI1K_DEVICE(0x1410, 0xa004)}, /* Novatel Gobi Modem device */ + {QMI_GOBI1K_DEVICE(0x1410, 0xa005)}, /* Novatel Gobi Modem device */ + {QMI_GOBI1K_DEVICE(0x1410, 0xa006)}, /* Novatel Gobi Modem device */ + {QMI_GOBI1K_DEVICE(0x1410, 0xa007)}, /* Novatel Gobi Modem device */ {QMI_GOBI1K_DEVICE(0x0b05, 0x1776)}, /* Asus Gobi Modem device */ {QMI_GOBI1K_DEVICE(0x19d2, 0xfff3)}, /* ONDA Gobi Modem device */ {QMI_GOBI1K_DEVICE(0x05c6, 0x9001)}, /* Generic Gobi Modem device */ -- cgit v1.2.1 From ca8c35852138ee0585eaffe6b9f10a5261ea7771 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Fri, 21 Jun 2013 01:12:21 +0400 Subject: sh_eth: fix unhandled RFE interrupt EESR.RFE (receive FIFO overflow) interrupt is enabled by the driver on all SoCs and sh_eth_error() handles it but it's not present in any initializer/assignment of the 'eesr_err_check' field of 'struct sh_eth_cpu_data'. This leads to that interrupt not being handled and cleared, and finally to disabling IRQ and the driver being non-functional. Modify DEFAULT_EESR_ERR_CHECK macro and all explicit initializers of the above mentioned field to contain the EESR.RFE bit. Remove useless backslashes from the initializers, while at it. Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 33 ++++++++++++++++++--------------- drivers/net/ethernet/renesas/sh_eth.h | 2 +- 2 files changed, 19 insertions(+), 16 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 5e3982fc5398..d9c21dd596d7 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -380,8 +380,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = { .eesipr_value = 0x01ff009f, .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, - .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE | - EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, + .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | + EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | + EESR_ECI, .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE, .apr = 1, @@ -427,8 +428,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = { .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x01ff009f, .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, - .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE | - EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, + .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | + EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | + EESR_ECI, .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE, .apr = 1, @@ -478,8 +480,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = { .rmcr_value = 0x00000001, .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, - .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE | - EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, + .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | + EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | + EESR_ECI, .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE, .apr = 1, @@ -592,9 +595,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = { .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, .tx_check = EESR_TC1 | EESR_FTC, - .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \ - EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \ - EESR_ECI, + .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | + EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | + EESR_TDE | EESR_ECI, .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ EESR_TFE, .fdr_value = 0x0000072f, @@ -674,9 +677,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = { .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, .tx_check = EESR_TC1 | EESR_FTC, - .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \ - EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \ - EESR_ECI, + .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | + EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | + EESR_TDE | EESR_ECI, .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ EESR_TFE, @@ -811,9 +814,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = { .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, .tx_check = EESR_TC1 | EESR_FTC, - .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \ - EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \ - EESR_ECI, + .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | + EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | + EESR_TDE | EESR_ECI, .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ EESR_TFE, diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index 1ddc9f235bcb..62689a5823be 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -253,7 +253,7 @@ enum EESR_BIT { #define DEFAULT_TX_CHECK (EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | \ EESR_RTO) -#define DEFAULT_EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | \ +#define DEFAULT_EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | \ EESR_RDE | EESR_RFRMER | EESR_ADE | \ EESR_TFE | EESR_TDE | EESR_ECI) #define DEFAULT_TX_ERROR_CHECK (EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | \ -- cgit v1.2.1 From 4eb313a7a939a983c525648227b5fe0a9d41679f Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Fri, 21 Jun 2013 01:13:42 +0400 Subject: sh_eth: fix misreporting of transmit abort Due to obviously missing braces, EESR.TABT (transmit abort) interrupt may be reported even if it hasn't happened, just when EESR.TWB (transmit descriptor write-back) interrupt happens. Luckily (?), EESR.TWB is disabled by the driver via the TRIMD register and all the interrupt masks, so that transmit abort is never actually logged... Put the braces where they should be and fix the incoherent comment, while at it. Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index d9c21dd596d7..e29fe8dbd226 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -1552,11 +1552,12 @@ static void sh_eth_error(struct net_device *ndev, int intr_status) ignore_link: if (intr_status & EESR_TWB) { - /* Write buck end. unused write back interrupt */ - if (intr_status & EESR_TABT) /* Transmit Abort int */ + /* Unused write back interrupt */ + if (intr_status & EESR_TABT) { /* Transmit Abort int */ ndev->stats.tx_aborted_errors++; if (netif_msg_tx_err(mdp)) dev_err(&ndev->dev, "Transmit Abort\n"); + } } if (intr_status & EESR_RABT) { -- cgit v1.2.1 From b90fc27a64a57b273351ceef7ae298558ff8a6ac Mon Sep 17 00:00:00 2001 From: Mugunthan V N Date: Fri, 21 Jun 2013 19:15:09 +0530 Subject: drivers: net: cpsw: fix compilation error with cpsw driver drivers/net/ethernet/ti/cpsw.c: In function 'cpsw_suspend': drivers/net/ethernet/ti/cpsw.c:1979:26: error: 'priv' undeclared (first use in this function) drivers/net/ethernet/ti/cpsw.c:1979:26: note: each undeclared identifier is reported only once for each function it appears in make[4]: *** [drivers/net/ethernet/ti/cpsw.o] Error 1 The compilation error was introduced by the following commit 6d3d76f (drivers: net: cpsw: fix cpsw clock gating issue across suspend/resume) Signed-off-by: Mugunthan V N Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/cpsw.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index e66a20223abb..d1a769f35f9d 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -1973,6 +1973,7 @@ static int cpsw_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct net_device *ndev = platform_get_drvdata(pdev); + struct cpsw_priv *priv = netdev_priv(ndev); if (netif_running(ndev)) cpsw_ndo_stop(ndev); -- cgit v1.2.1 From f57da7a65b386dd4535daa4f7a3773a025fbb022 Mon Sep 17 00:00:00 2001 From: Shahed Shaikh Date: Fri, 21 Jun 2013 12:09:42 -0400 Subject: qlcnic: Do not sleep while holding spinlock This patch reverts commit a4791254b6625b06aa33e36304f6e8a1a4a1fdea ("qlcnic: change mdelay to msleep") which overwrote a commit 68b3f28c11bf896be32ad2a2e151aaa5f4a7c98e ("qlcnic: Fix scheduling while atomic bug") Signed-off-by: Shahed Shaikh Signed-off-by: Jitendra Kalsaria Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c index 43562c256379..6acf82b9f018 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c @@ -642,7 +642,7 @@ void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter) qlcnic_83xx_config_intrpt(adapter, 0); } /* Allow dma queues to drain after context reset */ - msleep(20); + mdelay(20); } } -- cgit v1.2.1 From 23bc2021c54b048302f6f4844c030660ce0fcfe9 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Sat, 22 Jun 2013 12:39:26 +0200 Subject: ath9k: fix an RCU issue in calling ieee80211_get_tx_rates ath_txq_schedule is called outside of the drv_tx call, so it needs RCU protection. Signed-off-by: Felix Fietkau Signed-off-by: John W. Linville --- drivers/net/wireless/ath/ath9k/xmit.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 1c9b1bac8b0d..83ab6be3fe6d 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -1570,6 +1570,8 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) return; + rcu_read_lock(); + ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list); last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list); @@ -1608,8 +1610,10 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) if (ac == last_ac || txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) - return; + break; } + + rcu_read_unlock(); } /***********/ -- cgit v1.2.1 From 075163bbb0f51174359947e1bce84f5edb23f21e Mon Sep 17 00:00:00 2001 From: Sujith Manoharan Date: Thu, 20 Jun 2013 13:57:07 +0530 Subject: ath9k_htc: Handle IDLE state transition properly Make sure that a chip reset is done when IDLE is turned off - this fixes authentication timeouts. Cc: stable@vger.kernel.org Reported-by: Ignacy Gawedzki Signed-off-by: Sujith Manoharan Signed-off-by: John W. Linville --- drivers/net/wireless/ath/ath9k/htc_drv_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c index 0743a47cef8f..62f1b7636c92 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c @@ -1174,7 +1174,7 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed) mutex_lock(&priv->htc_pm_lock); priv->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE); - if (priv->ps_idle) + if (!priv->ps_idle) chip_reset = true; mutex_unlock(&priv->htc_pm_lock); -- cgit v1.2.1 From 7e24bfbe43b545b1689a5f134ed83645b9e34b86 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 23 Jun 2013 17:19:03 +0300 Subject: tun: fix recovery from gup errors get user pages might fail partially in tun zero copy mode. To recover we need to put all pages that we got, but code used a wrong index resulting in double-free errors. Reported-by: Brad Hubbard Signed-off-by: Michael S. Tsirkin Acked-by: Jason Wang Acked-by: Neil Horman Signed-off-by: David S. Miller --- drivers/net/tun.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/tun.c b/drivers/net/tun.c index bfa9bb48e42d..9c61f8734a40 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1010,8 +1010,10 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from, return -EMSGSIZE; num_pages = get_user_pages_fast(base, size, 0, &page[i]); if (num_pages != size) { - for (i = 0; i < num_pages; i++) - put_page(page[i]); + int j; + + for (j = 0; j < num_pages; j++) + put_page(page[i + j]); return -EFAULT; } truesize = size * PAGE_SIZE; -- cgit v1.2.1 From 4c7ab054ab4f5d63625508ed6f8a607184cae7c2 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 23 Jun 2013 17:26:58 +0300 Subject: macvtap: fix recovery from gup errors get user pages might fail partially in macvtap zero copy mode. To recover we need to put all pages that we got, but code used a wrong index resulting in double-free errors. Reported-by: Brad Hubbard Signed-off-by: Michael S. Tsirkin Acked-by: Jason Wang Signed-off-by: David S. Miller --- drivers/net/macvtap.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 59e9605de316..b6dd6a75919a 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -524,8 +524,10 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from, return -EMSGSIZE; num_pages = get_user_pages_fast(base, size, 0, &page[i]); if (num_pages != size) { - for (i = 0; i < num_pages; i++) - put_page(page[i]); + int j; + + for (j = 0; j < num_pages; j++) + put_page(page[i + j]); return -EFAULT; } truesize = size * PAGE_SIZE; -- cgit v1.2.1 From 6d446ec32f169c6a5d9bc90684a8082a6cbe90f6 Mon Sep 17 00:00:00 2001 From: Gavin Shan Date: Tue, 25 Jun 2013 15:24:32 +0800 Subject: net/tg3: Avoid delay during MMIO access When the EEH error is the result of a fenced host bridge, MMIO accesses can be very slow (milliseconds) to timeout and return all 1's, thus causing the driver various timeout loops to take way too long and trigger soft-lockup warnings (in addition to taking minutes to recover). It might be worthwhile to check if for any of these cases, ffffffff is a valid possible value, and if not, bail early since that means the HW is either gone or isolated. In the meantime, checking that the PCI channel is offline would be workaround of the problem. Cc: # v3.0+ Signed-off-by: Gavin Shan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/tg3.c | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index c777b9013164..a13463e8a2c3 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -744,6 +744,9 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum) status = tg3_ape_read32(tp, gnt + off); if (status == bit) break; + if (pci_channel_offline(tp->pdev)) + break; + udelay(10); } @@ -1635,6 +1638,9 @@ static void tg3_wait_for_event_ack(struct tg3 *tp) for (i = 0; i < delay_cnt; i++) { if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT)) break; + if (pci_channel_offline(tp->pdev)) + break; + udelay(8); } } @@ -1813,6 +1819,9 @@ static int tg3_poll_fw(struct tg3 *tp) for (i = 0; i < 200; i++) { if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE) return 0; + if (pci_channel_offline(tp->pdev)) + return -ENODEV; + udelay(100); } return -ENODEV; @@ -1823,6 +1832,15 @@ static int tg3_poll_fw(struct tg3 *tp) tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val); if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) break; + if (pci_channel_offline(tp->pdev)) { + if (!tg3_flag(tp, NO_FWARE_REPORTED)) { + tg3_flag_set(tp, NO_FWARE_REPORTED); + netdev_info(tp->dev, "No firmware running\n"); + } + + break; + } + udelay(10); } @@ -3520,6 +3538,8 @@ static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base) tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT) break; + if (pci_channel_offline(tp->pdev)) + return -EBUSY; } return (i == iters) ? -EBUSY : 0; @@ -8589,6 +8609,14 @@ static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, boo tw32_f(ofs, val); for (i = 0; i < MAX_WAIT_CNT; i++) { + if (pci_channel_offline(tp->pdev)) { + dev_err(&tp->pdev->dev, + "tg3_stop_block device offline, " + "ofs=%lx enable_bit=%x\n", + ofs, enable_bit); + return -ENODEV; + } + udelay(100); val = tr32(ofs); if ((val & enable_bit) == 0) @@ -8612,6 +8640,13 @@ static int tg3_abort_hw(struct tg3 *tp, bool silent) tg3_disable_ints(tp); + if (pci_channel_offline(tp->pdev)) { + tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE); + tp->mac_mode &= ~MAC_MODE_TDE_ENABLE; + err = -ENODEV; + goto err_no_dev; + } + tp->rx_mode &= ~RX_MODE_ENABLE; tw32_f(MAC_RX_MODE, tp->rx_mode); udelay(10); @@ -8660,6 +8695,7 @@ static int tg3_abort_hw(struct tg3 *tp, bool silent) err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent); err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent); +err_no_dev: for (i = 0; i < tp->irq_cnt; i++) { struct tg3_napi *tnapi = &tp->napi[i]; if (tnapi->hw_status) -- cgit v1.2.1 From 11eb2645cbf38a08ae491bf6c602eea900ec0bb5 Mon Sep 17 00:00:00 2001 From: Zefan Li Date: Wed, 26 Jun 2013 15:29:54 +0800 Subject: dlci: acquire rtnl_lock before calling __dev_get_by_name() Otherwise the net device returned can be freed at anytime. Signed-off-by: Li Zefan Cc: stable@vger.kernel.org Signed-off-by: David S. Miller --- drivers/net/wan/dlci.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c index 147614ed86aa..1f6e0538c716 100644 --- a/drivers/net/wan/dlci.c +++ b/drivers/net/wan/dlci.c @@ -385,20 +385,24 @@ static int dlci_del(struct dlci_add *dlci) struct net_device *master, *slave; int err; + rtnl_lock(); + /* validate slave device */ master = __dev_get_by_name(&init_net, dlci->devname); - if (!master) - return -ENODEV; + if (!master) { + err = -ENODEV; + goto out; + } if (netif_running(master)) { - return -EBUSY; + err = -EBUSY; + goto out; } dlp = netdev_priv(master); slave = dlp->slave; flp = netdev_priv(slave); - rtnl_lock(); err = (*flp->deassoc)(slave, master); if (!err) { list_del(&dlp->list); @@ -407,8 +411,8 @@ static int dlci_del(struct dlci_add *dlci) dev_put(slave); } +out: rtnl_unlock(); - return err; } -- cgit v1.2.1 From 578a1310f2592ba90c5674bca21c1dbd1adf3f0a Mon Sep 17 00:00:00 2001 From: Zefan Li Date: Wed, 26 Jun 2013 15:31:58 +0800 Subject: dlci: validate the net device in dlci_del() We triggered an oops while running trinity with 3.4 kernel: BUG: unable to handle kernel paging request at 0000000100000d07 IP: [] dlci_ioctl+0xd8/0x2d4 [dlci] PGD 640c0d067 PUD 0 Oops: 0000 [#1] PREEMPT SMP CPU 3 ... Pid: 7302, comm: trinity-child3 Not tainted 3.4.24.09+ 40 Huawei Technologies Co., Ltd. Tecal RH2285 /BC11BTSA RIP: 0010:[] [] dlci_ioctl+0xd8/0x2d4 [dlci] ... Call Trace: [] sock_ioctl+0x153/0x280 [] do_vfs_ioctl+0xa4/0x5e0 [] ? fget_light+0x3ea/0x490 [] sys_ioctl+0x4f/0x80 [] system_call_fastpath+0x16/0x1b ... It's because the net device is not a dlci device. Reported-by: Li Jinyue Signed-off-by: Li Zefan Cc: stable@vger.kernel.org Signed-off-by: David S. Miller --- drivers/net/wan/dlci.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c index 1f6e0538c716..6a8a382c5f4c 100644 --- a/drivers/net/wan/dlci.c +++ b/drivers/net/wan/dlci.c @@ -384,6 +384,7 @@ static int dlci_del(struct dlci_add *dlci) struct frad_local *flp; struct net_device *master, *slave; int err; + bool found = false; rtnl_lock(); @@ -394,6 +395,17 @@ static int dlci_del(struct dlci_add *dlci) goto out; } + list_for_each_entry(dlp, &dlci_devs, list) { + if (dlp->master == master) { + found = true; + break; + } + } + if (!found) { + err = -ENODEV; + goto out; + } + if (netif_running(master)) { err = -EBUSY; goto out; -- cgit v1.2.1