diff options
Diffstat (limited to 'drivers/net')
207 files changed, 9790 insertions, 3239 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 0c2bd806950e..707ab7bd4ea5 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -107,8 +107,6 @@ config MII or internal device. It is safe to say Y or M here even if your ethernet card lacks MII. -source "drivers/ieee802154/Kconfig" - config IFB tristate "Intermediate Functional Block support" depends on NET_CLS_ACT @@ -290,6 +288,8 @@ source "drivers/net/wimax/Kconfig" source "drivers/net/wan/Kconfig" +source "drivers/net/ieee802154/Kconfig" + config XEN_NETDEV_FRONTEND tristate "Xen network device frontend driver" depends on XEN diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 3d375ca128a6..b682a1de7be8 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -53,6 +53,7 @@ obj-$(CONFIG_SUNGEM_PHY) += sungem_phy.o obj-$(CONFIG_WAN) += wan/ obj-$(CONFIG_WLAN) += wireless/ obj-$(CONFIG_WIMAX) += wimax/ +obj-$(CONFIG_IEEE802154) += ieee802154/ obj-$(CONFIG_VMXNET3) += vmxnet3/ obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index d688a8af432c..7858c58df4a3 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1120,10 +1120,10 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active) write_unlock_bh(&bond->curr_slave_lock); read_unlock(&bond->lock); - netdev_bonding_change(bond->dev, NETDEV_BONDING_FAILOVER); + call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, bond->dev); if (should_notify_peers) - netdev_bonding_change(bond->dev, - NETDEV_NOTIFY_PEERS); + call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, + bond->dev); read_lock(&bond->lock); write_lock_bh(&bond->curr_slave_lock); @@ -1558,8 +1558,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) bond_dev->name, bond_dev->type, slave_dev->type); - res = netdev_bonding_change(bond_dev, - NETDEV_PRE_TYPE_CHANGE); + res = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE, + bond_dev); res = notifier_to_errno(res); if (res) { pr_err("%s: refused to change device type\n", @@ -1579,8 +1579,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING; } - netdev_bonding_change(bond_dev, - NETDEV_POST_TYPE_CHANGE); + call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, + bond_dev); } } else if (bond_dev->type != slave_dev->type) { pr_err("%s ether type (%d) is different from other slaves (%d), can not enslave it.\n", @@ -1941,7 +1941,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) } block_netpoll_tx(); - netdev_bonding_change(bond_dev, NETDEV_RELEASE); + call_netdevice_notifiers(NETDEV_RELEASE, bond_dev); write_lock_bh(&bond->lock); slave = bond_get_slave_by_dev(bond, slave_dev); @@ -2584,7 +2584,7 @@ re_arm: read_unlock(&bond->lock); return; } - netdev_bonding_change(bond->dev, NETDEV_NOTIFY_PEERS); + call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev); rtnl_unlock(); } } @@ -2811,12 +2811,13 @@ void bond_loadbalance_arp_mon(struct work_struct *work) arp_work.work); struct slave *slave, *oldcurrent; int do_failover = 0; - int delta_in_ticks; + int delta_in_ticks, extra_ticks; int i; read_lock(&bond->lock); delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval); + extra_ticks = delta_in_ticks / 2; if (bond->slave_cnt == 0) goto re_arm; @@ -2839,10 +2840,10 @@ void bond_loadbalance_arp_mon(struct work_struct *work) if (slave->link != BOND_LINK_UP) { if (time_in_range(jiffies, trans_start - delta_in_ticks, - trans_start + delta_in_ticks) && + trans_start + delta_in_ticks + extra_ticks) && time_in_range(jiffies, slave->dev->last_rx - delta_in_ticks, - slave->dev->last_rx + delta_in_ticks)) { + slave->dev->last_rx + delta_in_ticks + extra_ticks)) { slave->link = BOND_LINK_UP; bond_set_active_slave(slave); @@ -2872,10 +2873,10 @@ void bond_loadbalance_arp_mon(struct work_struct *work) */ if (!time_in_range(jiffies, trans_start - delta_in_ticks, - trans_start + 2 * delta_in_ticks) || + trans_start + 2 * delta_in_ticks + extra_ticks) || !time_in_range(jiffies, slave->dev->last_rx - delta_in_ticks, - slave->dev->last_rx + 2 * delta_in_ticks)) { + slave->dev->last_rx + 2 * delta_in_ticks + extra_ticks)) { slave->link = BOND_LINK_DOWN; bond_set_backup_slave(slave); @@ -2933,6 +2934,14 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks) struct slave *slave; int i, commit = 0; unsigned long trans_start; + int extra_ticks; + + /* All the time comparisons below need some extra time. Otherwise, on + * fast networks the ARP probe/reply may arrive within the same jiffy + * as it was sent. Then, the next time the ARP monitor is run, one + * arp_interval will already have passed in the comparisons. + */ + extra_ticks = delta_in_ticks / 2; bond_for_each_slave(bond, slave, i) { slave->new_link = BOND_LINK_NOCHANGE; @@ -2940,7 +2949,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks) if (slave->link != BOND_LINK_UP) { if (time_in_range(jiffies, slave_last_rx(bond, slave) - delta_in_ticks, - slave_last_rx(bond, slave) + delta_in_ticks)) { + slave_last_rx(bond, slave) + delta_in_ticks + extra_ticks)) { slave->new_link = BOND_LINK_UP; commit++; @@ -2956,7 +2965,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks) */ if (time_in_range(jiffies, slave->jiffies - delta_in_ticks, - slave->jiffies + 2 * delta_in_ticks)) + slave->jiffies + 2 * delta_in_ticks + extra_ticks)) continue; /* @@ -2976,7 +2985,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks) !bond->current_arp_slave && !time_in_range(jiffies, slave_last_rx(bond, slave) - delta_in_ticks, - slave_last_rx(bond, slave) + 3 * delta_in_ticks)) { + slave_last_rx(bond, slave) + 3 * delta_in_ticks + extra_ticks)) { slave->new_link = BOND_LINK_DOWN; commit++; @@ -2992,10 +3001,10 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks) if (bond_is_active_slave(slave) && (!time_in_range(jiffies, trans_start - delta_in_ticks, - trans_start + 2 * delta_in_ticks) || + trans_start + 2 * delta_in_ticks + extra_ticks) || !time_in_range(jiffies, slave_last_rx(bond, slave) - delta_in_ticks, - slave_last_rx(bond, slave) + 2 * delta_in_ticks))) { + slave_last_rx(bond, slave) + 2 * delta_in_ticks + extra_ticks))) { slave->new_link = BOND_LINK_DOWN; commit++; @@ -3027,7 +3036,7 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks) if ((!bond->curr_active_slave && time_in_range(jiffies, trans_start - delta_in_ticks, - trans_start + delta_in_ticks)) || + trans_start + delta_in_ticks + delta_in_ticks / 2)) || bond->curr_active_slave != slave) { slave->link = BOND_LINK_UP; if (bond->current_arp_slave) { @@ -3203,7 +3212,7 @@ re_arm: read_unlock(&bond->lock); return; } - netdev_bonding_change(bond->dev, NETDEV_NOTIFY_PEERS); + call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev); rtnl_unlock(); } } @@ -3352,56 +3361,93 @@ static struct notifier_block bond_netdev_notifier = { /*---------------------------- Hashing Policies -----------------------------*/ /* + * Hash for the output device based upon layer 2 data + */ +static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count) +{ + struct ethhdr *data = (struct ethhdr *)skb->data; + + if (skb_headlen(skb) >= offsetof(struct ethhdr, h_proto)) + return (data->h_dest[5] ^ data->h_source[5]) % count; + + return 0; +} + +/* * Hash for the output device based upon layer 2 and layer 3 data. If - * the packet is not IP mimic bond_xmit_hash_policy_l2() + * the packet is not IP, fall back on bond_xmit_hash_policy_l2() */ static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count) { struct ethhdr *data = (struct ethhdr *)skb->data; - struct iphdr *iph = ip_hdr(skb); - - if (skb->protocol == htons(ETH_P_IP)) { + struct iphdr *iph; + struct ipv6hdr *ipv6h; + u32 v6hash; + __be32 *s, *d; + + if (skb->protocol == htons(ETH_P_IP) && + skb_network_header_len(skb) >= sizeof(*iph)) { + iph = ip_hdr(skb); return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^ (data->h_dest[5] ^ data->h_source[5])) % count; + } else if (skb->protocol == htons(ETH_P_IPV6) && + skb_network_header_len(skb) >= sizeof(*ipv6h)) { + ipv6h = ipv6_hdr(skb); + s = &ipv6h->saddr.s6_addr32[0]; + d = &ipv6h->daddr.s6_addr32[0]; + v6hash = (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]); + v6hash ^= (v6hash >> 24) ^ (v6hash >> 16) ^ (v6hash >> 8); + return (v6hash ^ data->h_dest[5] ^ data->h_source[5]) % count; } - return (data->h_dest[5] ^ data->h_source[5]) % count; + return bond_xmit_hash_policy_l2(skb, count); } /* * Hash for the output device based upon layer 3 and layer 4 data. If * the packet is a frag or not TCP or UDP, just use layer 3 data. If it is - * altogether not IP, mimic bond_xmit_hash_policy_l2() + * altogether not IP, fall back on bond_xmit_hash_policy_l2() */ static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count) { - struct ethhdr *data = (struct ethhdr *)skb->data; - struct iphdr *iph = ip_hdr(skb); - __be16 *layer4hdr = (__be16 *)((u32 *)iph + iph->ihl); - int layer4_xor = 0; - - if (skb->protocol == htons(ETH_P_IP)) { + u32 layer4_xor = 0; + struct iphdr *iph; + struct ipv6hdr *ipv6h; + __be32 *s, *d; + __be16 *layer4hdr; + + if (skb->protocol == htons(ETH_P_IP) && + skb_network_header_len(skb) >= sizeof(*iph)) { + iph = ip_hdr(skb); if (!ip_is_fragment(iph) && (iph->protocol == IPPROTO_TCP || - iph->protocol == IPPROTO_UDP)) { - layer4_xor = ntohs((*layer4hdr ^ *(layer4hdr + 1))); + iph->protocol == IPPROTO_UDP) && + (skb_headlen(skb) - skb_network_offset(skb) >= + iph->ihl * sizeof(u32) + sizeof(*layer4hdr) * 2)) { + layer4hdr = (__be16 *)((u32 *)iph + iph->ihl); + layer4_xor = ntohs(*layer4hdr ^ *(layer4hdr + 1)); } return (layer4_xor ^ ((ntohl(iph->saddr ^ iph->daddr)) & 0xffff)) % count; - + } else if (skb->protocol == htons(ETH_P_IPV6) && + skb_network_header_len(skb) >= sizeof(*ipv6h)) { + ipv6h = ipv6_hdr(skb); + if ((ipv6h->nexthdr == IPPROTO_TCP || + ipv6h->nexthdr == IPPROTO_UDP) && + (skb_headlen(skb) - skb_network_offset(skb) >= + sizeof(*ipv6h) + sizeof(*layer4hdr) * 2)) { + layer4hdr = (__be16 *)(ipv6h + 1); + layer4_xor = ntohs(*layer4hdr ^ *(layer4hdr + 1)); + } + s = &ipv6h->saddr.s6_addr32[0]; + d = &ipv6h->daddr.s6_addr32[0]; + layer4_xor ^= (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]); + layer4_xor ^= (layer4_xor >> 24) ^ (layer4_xor >> 16) ^ + (layer4_xor >> 8); + return layer4_xor % count; } - return (data->h_dest[5] ^ data->h_source[5]) % count; -} - -/* - * Hash for the output device based upon layer 2 data - */ -static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count) -{ - struct ethhdr *data = (struct ethhdr *)skb->data; - - return (data->h_dest[5] ^ data->h_source[5]) % count; + return bond_xmit_hash_policy_l2(skb, count); } /*-------------------------- Device entry points ----------------------------*/ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index af20c6ee2cd9..ca8048757c84 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -2283,7 +2283,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) /* Wait for all pending SP commands to complete */ if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) { BNX2X_ERR("Timeout waiting for SP elements to complete\n"); - bnx2x_nic_unload(bp, UNLOAD_CLOSE); + bnx2x_nic_unload(bp, UNLOAD_CLOSE, false); return -EBUSY; } @@ -2331,7 +2331,7 @@ load_error0: } /* must be called with rtnl_lock */ -int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) +int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) { int i; bool global = false; @@ -2393,7 +2393,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) /* Cleanup the chip if needed */ if (unload_mode != UNLOAD_RECOVERY) - bnx2x_chip_cleanup(bp, unload_mode); + bnx2x_chip_cleanup(bp, unload_mode, keep_link); else { /* Send the UNLOAD_REQUEST to the MCP */ bnx2x_send_unload_req(bp, unload_mode); @@ -2417,7 +2417,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) bnx2x_free_irq(bp); /* Report UNLOAD_DONE to MCP */ - bnx2x_send_unload_done(bp); + bnx2x_send_unload_done(bp, false); } /* @@ -3768,7 +3768,7 @@ int bnx2x_reload_if_running(struct net_device *dev) if (unlikely(!netif_running(dev))) return 0; - bnx2x_nic_unload(bp, UNLOAD_NORMAL); + bnx2x_nic_unload(bp, UNLOAD_NORMAL, true); return bnx2x_nic_load(bp, LOAD_NORMAL); } @@ -3965,7 +3965,7 @@ int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state) netif_device_detach(dev); - bnx2x_nic_unload(bp, UNLOAD_CLOSE); + bnx2x_nic_unload(bp, UNLOAD_CLOSE, false); bnx2x_set_power_state(bp, pci_choose_state(pdev, state)); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index dfd86a55f1dc..9c5ea6c5b4c7 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -83,8 +83,9 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode); * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP. * * @bp: driver handle + * @keep_link: true iff link should be kept up */ -void bnx2x_send_unload_done(struct bnx2x *bp); +void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link); /** * bnx2x_config_rss_pf - configure RSS parameters in a PF. @@ -153,6 +154,14 @@ u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode); void bnx2x_link_set(struct bnx2x *bp); /** + * bnx2x_force_link_reset - Forces link reset, and put the PHY + * in reset as well. + * + * @bp: driver handle + */ +void bnx2x_force_link_reset(struct bnx2x *bp); + +/** * bnx2x_link_test - query link status. * * @bp: driver handle @@ -312,12 +321,13 @@ void bnx2x_set_num_queues(struct bnx2x *bp); * * @bp: driver handle * @unload_mode: COMMON, PORT, FUNCTION + * @keep_link: true iff link should be kept up. * * - Cleanup MAC configuration. * - Closes clients. * - etc. */ -void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode); +void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link); /** * bnx2x_acquire_hw_lock - acquire HW lock. @@ -446,7 +456,7 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl); bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err); /* dev_close main block */ -int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode); +int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link); /* dev_open main block */ int bnx2x_nic_load(struct bnx2x *bp, int load_mode); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index ebf40cd7aa10..f923125e1c20 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -905,6 +905,7 @@ static int bnx2x_nway_reset(struct net_device *dev) if (netif_running(dev)) { bnx2x_stats_handle(bp, STATS_EVENT_STOP); + bnx2x_force_link_reset(bp); bnx2x_link_set(bp); } @@ -1653,7 +1654,7 @@ static int bnx2x_get_eee(struct net_device *dev, struct ethtool_eee *edata) return -EOPNOTSUPP; } - eee_cfg = SHMEM2_RD(bp, eee_status[BP_PORT(bp)]); + eee_cfg = bp->link_vars.eee_status; edata->supported = bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_SUPPORTED_MASK) >> @@ -1690,7 +1691,7 @@ static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata) return -EOPNOTSUPP; } - eee_cfg = SHMEM2_RD(bp, eee_status[BP_PORT(bp)]); + eee_cfg = bp->link_vars.eee_status; if (!(eee_cfg & SHMEM_EEE_SUPPORTED_MASK)) { DP(BNX2X_MSG_ETHTOOL, "Board does not support EEE!\n"); @@ -1739,6 +1740,7 @@ static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata) /* Restart link to propogate changes */ if (netif_running(dev)) { bnx2x_stats_handle(bp, STATS_EVENT_STOP); + bnx2x_force_link_reset(bp); bnx2x_link_set(bp); } @@ -2263,7 +2265,7 @@ static int bnx2x_test_ext_loopback(struct bnx2x *bp) if (!netif_running(bp->dev)) return BNX2X_EXT_LOOPBACK_FAILED; - bnx2x_nic_unload(bp, UNLOAD_NORMAL); + bnx2x_nic_unload(bp, UNLOAD_NORMAL, false); rc = bnx2x_nic_load(bp, LOAD_LOOPBACK_EXT); if (rc) { DP(BNX2X_MSG_ETHTOOL, @@ -2414,7 +2416,7 @@ static void bnx2x_self_test(struct net_device *dev, link_up = bp->link_vars.link_up; - bnx2x_nic_unload(bp, UNLOAD_NORMAL); + bnx2x_nic_unload(bp, UNLOAD_NORMAL, false); rc = bnx2x_nic_load(bp, LOAD_DIAG); if (rc) { etest->flags |= ETH_TEST_FL_FAILED; @@ -2446,7 +2448,7 @@ static void bnx2x_self_test(struct net_device *dev, etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; } - bnx2x_nic_unload(bp, UNLOAD_NORMAL); + bnx2x_nic_unload(bp, UNLOAD_NORMAL, false); /* restore input for TX port IF */ REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val); @@ -2940,7 +2942,7 @@ static int bnx2x_set_channels(struct net_device *dev, bnx2x_change_num_queues(bp, channels->combined_count); return 0; } - bnx2x_nic_unload(bp, UNLOAD_NORMAL); + bnx2x_nic_unload(bp, UNLOAD_NORMAL, true); bnx2x_change_num_queues(bp, channels->combined_count); return bnx2x_nic_load(bp, LOAD_NORMAL); } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h index 76b6e65790f8..c795cfc5a545 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h @@ -1286,6 +1286,9 @@ struct drv_func_mb { #define DRV_MSG_CODE_SET_MF_BW_MIN_MASK 0x00ff0000 #define DRV_MSG_CODE_SET_MF_BW_MAX_MASK 0xff000000 + #define DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET 0x00000002 + + #define DRV_MSG_CODE_LOAD_REQ_WITH_LFA 0x0000100a u32 fw_mb_header; #define FW_MSG_CODE_MASK 0xffff0000 #define FW_MSG_CODE_DRV_LOAD_COMMON 0x10100000 @@ -1909,6 +1912,54 @@ struct lldp_local_mib { }; /***END OF DCBX STRUCTURES DECLARATIONS***/ +/***********************************************************/ +/* Elink section */ +/***********************************************************/ +#define SHMEM_LINK_CONFIG_SIZE 2 +struct shmem_lfa { + u32 req_duplex; + #define REQ_DUPLEX_PHY0_MASK 0x0000ffff + #define REQ_DUPLEX_PHY0_SHIFT 0 + #define REQ_DUPLEX_PHY1_MASK 0xffff0000 + #define REQ_DUPLEX_PHY1_SHIFT 16 + u32 req_flow_ctrl; + #define REQ_FLOW_CTRL_PHY0_MASK 0x0000ffff + #define REQ_FLOW_CTRL_PHY0_SHIFT 0 + #define REQ_FLOW_CTRL_PHY1_MASK 0xffff0000 + #define REQ_FLOW_CTRL_PHY1_SHIFT 16 + u32 req_line_speed; /* Also determine AutoNeg */ + #define REQ_LINE_SPD_PHY0_MASK 0x0000ffff + #define REQ_LINE_SPD_PHY0_SHIFT 0 + #define REQ_LINE_SPD_PHY1_MASK 0xffff0000 + #define REQ_LINE_SPD_PHY1_SHIFT 16 + u32 speed_cap_mask[SHMEM_LINK_CONFIG_SIZE]; + u32 additional_config; + #define REQ_FC_AUTO_ADV_MASK 0x0000ffff + #define REQ_FC_AUTO_ADV0_SHIFT 0 + #define NO_LFA_DUE_TO_DCC_MASK 0x00010000 + u32 lfa_sts; + #define LFA_LINK_FLAP_REASON_OFFSET 0 + #define LFA_LINK_FLAP_REASON_MASK 0x000000ff + #define LFA_LINK_DOWN 0x1 + #define LFA_LOOPBACK_ENABLED 0x2 + #define LFA_DUPLEX_MISMATCH 0x3 + #define LFA_MFW_IS_TOO_OLD 0x4 + #define LFA_LINK_SPEED_MISMATCH 0x5 + #define LFA_FLOW_CTRL_MISMATCH 0x6 + #define LFA_SPEED_CAP_MISMATCH 0x7 + #define LFA_DCC_LFA_DISABLED 0x8 + #define LFA_EEE_MISMATCH 0x9 + + #define LINK_FLAP_AVOIDANCE_COUNT_OFFSET 8 + #define LINK_FLAP_AVOIDANCE_COUNT_MASK 0x0000ff00 + + #define LINK_FLAP_COUNT_OFFSET 16 + #define LINK_FLAP_COUNT_MASK 0x00ff0000 + + #define LFA_FLAGS_MASK 0xff000000 + #define SHMEM_LFA_DONT_CLEAR_STAT (1<<24) +}; + struct ncsi_oem_fcoe_features { u32 fcoe_features1; #define FCOE_FEATURES1_IOS_PER_CONNECTION_MASK 0x0000FFFF diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index b046beb435b2..bcc112b82831 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -321,6 +321,127 @@ static u32 bnx2x_bits_dis(struct bnx2x *bp, u32 reg, u32 bits) return val; } +/* + * bnx2x_check_lfa - This function checks if link reinitialization is required, + * or link flap can be avoided. + * + * @params: link parameters + * Returns 0 if Link Flap Avoidance conditions are met otherwise, the failed + * condition code. + */ +static int bnx2x_check_lfa(struct link_params *params) +{ + u32 link_status, cfg_idx, lfa_mask, cfg_size; + u32 cur_speed_cap_mask, cur_req_fc_auto_adv, additional_config; + u32 saved_val, req_val, eee_status; + struct bnx2x *bp = params->bp; + + additional_config = + REG_RD(bp, params->lfa_base + + offsetof(struct shmem_lfa, additional_config)); + + /* NOTE: must be first condition checked - + * to verify DCC bit is cleared in any case! + */ + if (additional_config & NO_LFA_DUE_TO_DCC_MASK) { + DP(NETIF_MSG_LINK, "No LFA due to DCC flap after clp exit\n"); + REG_WR(bp, params->lfa_base + + offsetof(struct shmem_lfa, additional_config), + additional_config & ~NO_LFA_DUE_TO_DCC_MASK); + return LFA_DCC_LFA_DISABLED; + } + + /* Verify that link is up */ + link_status = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + port_mb[params->port].link_status)); + if (!(link_status & LINK_STATUS_LINK_UP)) + return LFA_LINK_DOWN; + + /* Verify that loopback mode is not set */ + if (params->loopback_mode) + return LFA_LOOPBACK_ENABLED; + + /* Verify that MFW supports LFA */ + if (!params->lfa_base) + return LFA_MFW_IS_TOO_OLD; + + if (params->num_phys == 3) { + cfg_size = 2; + lfa_mask = 0xffffffff; + } else { + cfg_size = 1; + lfa_mask = 0xffff; + } + + /* Compare Duplex */ + saved_val = REG_RD(bp, params->lfa_base + + offsetof(struct shmem_lfa, req_duplex)); + req_val = params->req_duplex[0] | (params->req_duplex[1] << 16); + if ((saved_val & lfa_mask) != (req_val & lfa_mask)) { + DP(NETIF_MSG_LINK, "Duplex mismatch %x vs. %x\n", + (saved_val & lfa_mask), (req_val & lfa_mask)); + return LFA_DUPLEX_MISMATCH; + } + /* Compare Flow Control */ + saved_val = REG_RD(bp, params->lfa_base + + offsetof(struct shmem_lfa, req_flow_ctrl)); + req_val = params->req_flow_ctrl[0] | (params->req_flow_ctrl[1] << 16); + if ((saved_val & lfa_mask) != (req_val & lfa_mask)) { + DP(NETIF_MSG_LINK, "Flow control mismatch %x vs. %x\n", + (saved_val & lfa_mask), (req_val & lfa_mask)); + return LFA_FLOW_CTRL_MISMATCH; + } + /* Compare Link Speed */ + saved_val = REG_RD(bp, params->lfa_base + + offsetof(struct shmem_lfa, req_line_speed)); + req_val = params->req_line_speed[0] | (params->req_line_speed[1] << 16); + if ((saved_val & lfa_mask) != (req_val & lfa_mask)) { + DP(NETIF_MSG_LINK, "Link speed mismatch %x vs. %x\n", + (saved_val & lfa_mask), (req_val & lfa_mask)); + return LFA_LINK_SPEED_MISMATCH; + } + + for (cfg_idx = 0; cfg_idx < cfg_size; cfg_idx++) { + cur_speed_cap_mask = REG_RD(bp, params->lfa_base + + offsetof(struct shmem_lfa, + speed_cap_mask[cfg_idx])); + + if (cur_speed_cap_mask != params->speed_cap_mask[cfg_idx]) { + DP(NETIF_MSG_LINK, "Speed Cap mismatch %x vs. %x\n", + cur_speed_cap_mask, + params->speed_cap_mask[cfg_idx]); + return LFA_SPEED_CAP_MISMATCH; + } + } + + cur_req_fc_auto_adv = + REG_RD(bp, params->lfa_base + + offsetof(struct shmem_lfa, additional_config)) & + REQ_FC_AUTO_ADV_MASK; + + if ((u16)cur_req_fc_auto_adv != params->req_fc_auto_adv) { + DP(NETIF_MSG_LINK, "Flow Ctrl AN mismatch %x vs. %x\n", + cur_req_fc_auto_adv, params->req_fc_auto_adv); + return LFA_FLOW_CTRL_MISMATCH; + } + + eee_status = REG_RD(bp, params->shmem2_base + + offsetof(struct shmem2_region, + eee_status[params->port])); + + if (((eee_status & SHMEM_EEE_LPI_REQUESTED_BIT) ^ + (params->eee_mode & EEE_MODE_ENABLE_LPI)) || + ((eee_status & SHMEM_EEE_REQUESTED_BIT) ^ + (params->eee_mode & EEE_MODE_ADV_LPI))) { + DP(NETIF_MSG_LINK, "EEE mismatch %x vs. %x\n", params->eee_mode, + eee_status); + return LFA_EEE_MISMATCH; + } + + /* LFA conditions are met */ + return 0; +} /******************************************************************/ /* EPIO/GPIO section */ /******************************************************************/ @@ -1307,93 +1428,6 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos) } /******************************************************************/ -/* EEE section */ -/******************************************************************/ -static u8 bnx2x_eee_has_cap(struct link_params *params) -{ - struct bnx2x *bp = params->bp; - - if (REG_RD(bp, params->shmem2_base) <= - offsetof(struct shmem2_region, eee_status[params->port])) - return 0; - - return 1; -} - -static int bnx2x_eee_nvram_to_time(u32 nvram_mode, u32 *idle_timer) -{ - switch (nvram_mode) { - case PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED: - *idle_timer = EEE_MODE_NVRAM_BALANCED_TIME; - break; - case PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE: - *idle_timer = EEE_MODE_NVRAM_AGGRESSIVE_TIME; - break; - case PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY: - *idle_timer = EEE_MODE_NVRAM_LATENCY_TIME; - break; - default: - *idle_timer = 0; - break; - } - - return 0; -} - -static int bnx2x_eee_time_to_nvram(u32 idle_timer, u32 *nvram_mode) -{ - switch (idle_timer) { - case EEE_MODE_NVRAM_BALANCED_TIME: - *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED; - break; - case EEE_MODE_NVRAM_AGGRESSIVE_TIME: - *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE; - break; - case EEE_MODE_NVRAM_LATENCY_TIME: - *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY; - break; - default: - *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED; - break; - } - - return 0; -} - -static u32 bnx2x_eee_calc_timer(struct link_params *params) -{ - u32 eee_mode, eee_idle; - struct bnx2x *bp = params->bp; - - if (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) { - if (params->eee_mode & EEE_MODE_OUTPUT_TIME) { - /* time value in eee_mode --> used directly*/ - eee_idle = params->eee_mode & EEE_MODE_TIMER_MASK; - } else { - /* hsi value in eee_mode --> time */ - if (bnx2x_eee_nvram_to_time(params->eee_mode & - EEE_MODE_NVRAM_MASK, - &eee_idle)) - return 0; - } - } else { - /* hsi values in nvram --> time*/ - eee_mode = ((REG_RD(bp, params->shmem_base + - offsetof(struct shmem_region, dev_info. - port_feature_config[params->port]. - eee_power_mode)) & - PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >> - PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT); - - if (bnx2x_eee_nvram_to_time(eee_mode, &eee_idle)) - return 0; - } - - return eee_idle; -} - - -/******************************************************************/ /* PFC section */ /******************************************************************/ static void bnx2x_update_pfc_xmac(struct link_params *params, @@ -1606,16 +1640,23 @@ static void bnx2x_set_xumac_nig(struct link_params *params, NIG_REG_P0_MAC_PAUSE_OUT_EN, tx_pause_en); } -static void bnx2x_umac_disable(struct link_params *params) +static void bnx2x_set_umac_rxtx(struct link_params *params, u8 en) { u32 umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0; + u32 val; struct bnx2x *bp = params->bp; if (!(REG_RD(bp, MISC_REG_RESET_REG_2) & (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port))) return; - + val = REG_RD(bp, umac_base + UMAC_REG_COMMAND_CONFIG); + if (en) + val |= (UMAC_COMMAND_CONFIG_REG_TX_ENA | + UMAC_COMMAND_CONFIG_REG_RX_ENA); + else + val &= ~(UMAC_COMMAND_CONFIG_REG_TX_ENA | + UMAC_COMMAND_CONFIG_REG_RX_ENA); /* Disable RX and TX */ - REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, 0); + REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val); } static void bnx2x_umac_enable(struct link_params *params, @@ -1671,6 +1712,16 @@ static void bnx2x_umac_enable(struct link_params *params, REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val); udelay(50); + /* Configure UMAC for EEE */ + if (vars->eee_status & SHMEM_EEE_ADV_STATUS_MASK) { + DP(NETIF_MSG_LINK, "configured UMAC for EEE\n"); + REG_WR(bp, umac_base + UMAC_REG_UMAC_EEE_CTRL, + UMAC_UMAC_EEE_CTRL_REG_EEE_EN); + REG_WR(bp, umac_base + UMAC_REG_EEE_WAKE_TIMER, 0x11); + } else { + REG_WR(bp, umac_base + UMAC_REG_UMAC_EEE_CTRL, 0x0); + } + /* Set MAC address for source TX Pause/PFC frames (under SW reset) */ REG_WR(bp, umac_base + UMAC_REG_MAC_ADDR0, ((params->mac_addr[2] << 24) | @@ -1766,11 +1817,12 @@ static void bnx2x_xmac_init(struct link_params *params, u32 max_speed) } -static void bnx2x_xmac_disable(struct link_params *params) +static void bnx2x_set_xmac_rxtx(struct link_params *params, u8 en) { u8 port = params->port; struct bnx2x *bp = params->bp; u32 pfc_ctrl, xmac_base = (port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; + u32 val; if (REG_RD(bp, MISC_REG_RESET_REG_2) & MISC_REGISTERS_RESET_REG_2_XMAC) { @@ -1784,7 +1836,12 @@ static void bnx2x_xmac_disable(struct link_params *params) REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI, (pfc_ctrl | (1<<1))); DP(NETIF_MSG_LINK, "Disable XMAC on port %x\n", port); - REG_WR(bp, xmac_base + XMAC_REG_CTRL, 0); + val = REG_RD(bp, xmac_base + XMAC_REG_CTRL); + if (en) + val |= (XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN); + else + val &= ~(XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN); + REG_WR(bp, xmac_base + XMAC_REG_CTRL, val); } } @@ -2529,16 +2586,6 @@ static void bnx2x_update_mng(struct link_params *params, u32 link_status) port_mb[params->port].link_status), link_status); } -static void bnx2x_update_mng_eee(struct link_params *params, u32 eee_status) -{ - struct bnx2x *bp = params->bp; - - if (bnx2x_eee_has_cap(params)) - REG_WR(bp, params->shmem2_base + - offsetof(struct shmem2_region, - eee_status[params->port]), eee_status); -} - static void bnx2x_update_pfc_nig(struct link_params *params, struct link_vars *vars, struct bnx2x_nig_brb_pfc_port_params *nig_params) @@ -2827,16 +2874,18 @@ static int bnx2x_bmac2_enable(struct link_params *params, static int bnx2x_bmac_enable(struct link_params *params, struct link_vars *vars, - u8 is_lb) + u8 is_lb, u8 reset_bmac) { int rc = 0; u8 port = params->port; struct bnx2x *bp = params->bp; u32 val; /* Reset and unreset the BigMac */ - REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, - (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); - usleep_range(1000, 2000); + if (reset_bmac) { + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, + (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); + usleep_range(1000, 2000); + } REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); @@ -2868,37 +2917,28 @@ static int bnx2x_bmac_enable(struct link_params *params, return rc; } -static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port) +static void bnx2x_set_bmac_rx(struct bnx2x *bp, u32 chip_id, u8 port, u8 en) { u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM : NIG_REG_INGRESS_BMAC0_MEM; u32 wb_data[2]; u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4); + if (CHIP_IS_E2(bp)) + bmac_addr += BIGMAC2_REGISTER_BMAC_CONTROL; + else + bmac_addr += BIGMAC_REGISTER_BMAC_CONTROL; /* Only if the bmac is out of reset */ if (REG_RD(bp, MISC_REG_RESET_REG_2) & (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port) && nig_bmac_enable) { - - if (CHIP_IS_E2(bp)) { - /* Clear Rx Enable bit in BMAC_CONTROL register */ - REG_RD_DMAE(bp, bmac_addr + - BIGMAC2_REGISTER_BMAC_CONTROL, - wb_data, 2); - wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE; - REG_WR_DMAE(bp, bmac_addr + - BIGMAC2_REGISTER_BMAC_CONTROL, - wb_data, 2); - } else { - /* Clear Rx Enable bit in BMAC_CONTROL register */ - REG_RD_DMAE(bp, bmac_addr + - BIGMAC_REGISTER_BMAC_CONTROL, - wb_data, 2); + /* Clear Rx Enable bit in BMAC_CONTROL register */ + REG_RD_DMAE(bp, bmac_addr, wb_data, 2); + if (en) + wb_data[0] |= BMAC_CONTROL_RX_ENABLE; + else wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE; - REG_WR_DMAE(bp, bmac_addr + - BIGMAC_REGISTER_BMAC_CONTROL, - wb_data, 2); - } + REG_WR_DMAE(bp, bmac_addr, wb_data, 2); usleep_range(1000, 2000); } } @@ -3233,6 +3273,245 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy, EMAC_MDIO_STATUS_10MB); return rc; } + +/******************************************************************/ +/* EEE section */ +/******************************************************************/ +static u8 bnx2x_eee_has_cap(struct link_params *params) +{ + struct bnx2x *bp = params->bp; + + if (REG_RD(bp, params->shmem2_base) <= + offsetof(struct shmem2_region, eee_status[params->port])) + return 0; + + return 1; +} + +static int bnx2x_eee_nvram_to_time(u32 nvram_mode, u32 *idle_timer) +{ + switch (nvram_mode) { + case PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED: + *idle_timer = EEE_MODE_NVRAM_BALANCED_TIME; + break; + case PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE: + *idle_timer = EEE_MODE_NVRAM_AGGRESSIVE_TIME; + break; + case PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY: + *idle_timer = EEE_MODE_NVRAM_LATENCY_TIME; + break; + default: + *idle_timer = 0; + break; + } + + return 0; +} + +static int bnx2x_eee_time_to_nvram(u32 idle_timer, u32 *nvram_mode) +{ + switch (idle_timer) { + case EEE_MODE_NVRAM_BALANCED_TIME: + *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED; + break; + case EEE_MODE_NVRAM_AGGRESSIVE_TIME: + *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE; + break; + case EEE_MODE_NVRAM_LATENCY_TIME: + *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY; + break; + default: + *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED; + break; + } + + return 0; +} + +static u32 bnx2x_eee_calc_timer(struct link_params *params) +{ + u32 eee_mode, eee_idle; + struct bnx2x *bp = params->bp; + + if (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) { + if (params->eee_mode & EEE_MODE_OUTPUT_TIME) { + /* time value in eee_mode --> used directly*/ + eee_idle = params->eee_mode & EEE_MODE_TIMER_MASK; + } else { + /* hsi value in eee_mode --> time */ + if (bnx2x_eee_nvram_to_time(params->eee_mode & + EEE_MODE_NVRAM_MASK, + &eee_idle)) + return 0; + } + } else { + /* hsi values in nvram --> time*/ + eee_mode = ((REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, dev_info. + port_feature_config[params->port]. + eee_power_mode)) & + PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >> + PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT); + + if (bnx2x_eee_nvram_to_time(eee_mode, &eee_idle)) + return 0; + } + + return eee_idle; +} + +static int bnx2x_eee_set_timers(struct link_params *params, + struct link_vars *vars) +{ + u32 eee_idle = 0, eee_mode; + struct bnx2x *bp = params->bp; + + eee_idle = bnx2x_eee_calc_timer(params); + + if (eee_idle) { + REG_WR(bp, MISC_REG_CPMU_LP_IDLE_THR_P0 + (params->port << 2), + eee_idle); + } else if ((params->eee_mode & EEE_MODE_ENABLE_LPI) && + (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) && + (params->eee_mode & EEE_MODE_OUTPUT_TIME)) { + DP(NETIF_MSG_LINK, "Error: Tx LPI is enabled with timer 0\n"); + return -EINVAL; + } + + vars->eee_status &= ~(SHMEM_EEE_TIMER_MASK | SHMEM_EEE_TIME_OUTPUT_BIT); + if (params->eee_mode & EEE_MODE_OUTPUT_TIME) { + /* eee_idle in 1u --> eee_status in 16u */ + eee_idle >>= 4; + vars->eee_status |= (eee_idle & SHMEM_EEE_TIMER_MASK) | + SHMEM_EEE_TIME_OUTPUT_BIT; + } else { + if (bnx2x_eee_time_to_nvram(eee_idle, &eee_mode)) + return -EINVAL; + vars->eee_status |= eee_mode; + } + + return 0; +} + +static int bnx2x_eee_initial_config(struct link_params *params, + struct link_vars *vars, u8 mode) +{ + vars->eee_status |= ((u32) mode) << SHMEM_EEE_SUPPORTED_SHIFT; + + /* Propogate params' bits --> vars (for migration exposure) */ + if (params->eee_mode & EEE_MODE_ENABLE_LPI) + vars->eee_status |= SHMEM_EEE_LPI_REQUESTED_BIT; + else + vars->eee_status &= ~SHMEM_EEE_LPI_REQUESTED_BIT; + + if (params->eee_mode & EEE_MODE_ADV_LPI) + vars->eee_status |= SHMEM_EEE_REQUESTED_BIT; + else + vars->eee_status &= ~SHMEM_EEE_REQUESTED_BIT; + + return bnx2x_eee_set_timers(params, vars); +} + +static int bnx2x_eee_disable(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + + /* Make Certain LPI is disabled */ + REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2), 0); + + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, 0x0); + + vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK; + + return 0; +} + +static int bnx2x_eee_advertise(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars, u8 modes) +{ + struct bnx2x *bp = params->bp; + u16 val = 0; + + /* Mask events preventing LPI generation */ + REG_WR(bp, MISC_REG_CPMU_LP_MASK_EXT_P0 + (params->port << 2), 0xfc20); + + if (modes & SHMEM_EEE_10G_ADV) { + DP(NETIF_MSG_LINK, "Advertise 10GBase-T EEE\n"); + val |= 0x8; + } + if (modes & SHMEM_EEE_1G_ADV) { + DP(NETIF_MSG_LINK, "Advertise 1GBase-T EEE\n"); + val |= 0x4; + } + + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, val); + + vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK; + vars->eee_status |= (modes << SHMEM_EEE_ADV_STATUS_SHIFT); + + return 0; +} + +static void bnx2x_update_mng_eee(struct link_params *params, u32 eee_status) +{ + struct bnx2x *bp = params->bp; + + if (bnx2x_eee_has_cap(params)) + REG_WR(bp, params->shmem2_base + + offsetof(struct shmem2_region, + eee_status[params->port]), eee_status); +} + +static void bnx2x_eee_an_resolve(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u16 adv = 0, lp = 0; + u32 lp_adv = 0; + u8 neg = 0; + + bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, &adv); + bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_LP_EEE_ADV, &lp); + + if (lp & 0x2) { + lp_adv |= SHMEM_EEE_100M_ADV; + if (adv & 0x2) { + if (vars->line_speed == SPEED_100) + neg = 1; + DP(NETIF_MSG_LINK, "EEE negotiated - 100M\n"); + } + } + if (lp & 0x14) { + lp_adv |= SHMEM_EEE_1G_ADV; + if (adv & 0x14) { + if (vars->line_speed == SPEED_1000) + neg = 1; + DP(NETIF_MSG_LINK, "EEE negotiated - 1G\n"); + } + } + if (lp & 0x68) { + lp_adv |= SHMEM_EEE_10G_ADV; + if (adv & 0x68) { + if (vars->line_speed == SPEED_10000) + neg = 1; + DP(NETIF_MSG_LINK, "EEE negotiated - 10G\n"); + } + } + + vars->eee_status &= ~SHMEM_EEE_LP_ADV_STATUS_MASK; + vars->eee_status |= (lp_adv << SHMEM_EEE_LP_ADV_STATUS_SHIFT); + + if (neg) { + DP(NETIF_MSG_LINK, "EEE is active\n"); + vars->eee_status |= SHMEM_EEE_ACTIVE_BIT; + } + +} + /******************************************************************/ /* BSC access functions from E3 */ /******************************************************************/ @@ -3754,6 +4033,19 @@ static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy, * init configuration, and set/clear SGMII flag. Internal * phy init is done purely in phy_init stage. */ + +static void bnx2x_warpcore_set_lpi_passthrough(struct bnx2x_phy *phy, + struct link_params *params) +{ + struct bnx2x *bp = params->bp; + + DP(NETIF_MSG_LINK, "Configure WC for LPI pass through\n"); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_EEE_COMBO_CONTROL0, 0x7c); + bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL4_MISC5, 0xc000); +} + static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, struct link_params *params, struct link_vars *vars) { @@ -4013,13 +4305,7 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy, bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL4_MISC3, 0x8080); - /* Enable LPI pass through */ - DP(NETIF_MSG_LINK, "Configure WC for LPI pass through\n"); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_EEE_COMBO_CONTROL0, - 0x7c); - bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_DIGITAL4_MISC5, 0xc000); + bnx2x_warpcore_set_lpi_passthrough(phy, params); /* 10G XFI Full Duplex */ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, @@ -4116,6 +4402,8 @@ static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy, bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_RX66_CONTROL, val16 & ~(3<<13)); + bnx2x_warpcore_set_lpi_passthrough(phy, params); + if (always_autoneg || phy->req_line_speed == SPEED_AUTO_NEG) { /* SGMII Autoneg */ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, @@ -4409,7 +4697,7 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy, "serdes_net_if = 0x%x\n", vars->line_speed, serdes_net_if); bnx2x_set_aer_mmd(params, phy); - + bnx2x_warpcore_reset_lane(bp, phy, 1); vars->phy_flags |= PHY_XGXS_FLAG; if ((serdes_net_if == PORT_HW_CFG_NET_SERDES_IF_SGMII) || (phy->req_line_speed && @@ -4718,6 +5006,10 @@ void bnx2x_link_status_update(struct link_params *params, vars->link_status = REG_RD(bp, params->shmem_base + offsetof(struct shmem_region, port_mb[port].link_status)); + if (bnx2x_eee_has_cap(params)) + vars->eee_status = REG_RD(bp, params->shmem2_base + + offsetof(struct shmem2_region, + eee_status[params->port])); vars->phy_flags = PHY_XGXS_FLAG; bnx2x_sync_link(params, vars); @@ -6530,25 +6822,21 @@ static int bnx2x_update_link_down(struct link_params *params, usleep_range(10000, 20000); /* Reset BigMac/Xmac */ if (CHIP_IS_E1x(bp) || - CHIP_IS_E2(bp)) { - bnx2x_bmac_rx_disable(bp, params->port); - REG_WR(bp, GRCBASE_MISC + - MISC_REGISTERS_RESET_REG_2_CLEAR, - (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); - } + CHIP_IS_E2(bp)) + bnx2x_set_bmac_rx(bp, params->chip_id, params->port, 0); + if (CHIP_IS_E3(bp)) { /* Prevent LPI Generation by chip */ REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2), 0); - REG_WR(bp, MISC_REG_CPMU_LP_DR_ENABLE, 0); REG_WR(bp, MISC_REG_CPMU_LP_MASK_ENT_P0 + (params->port << 2), 0); vars->eee_status &= ~(SHMEM_EEE_LP_ADV_STATUS_MASK | SHMEM_EEE_ACTIVE_BIT); bnx2x_update_mng_eee(params, vars->eee_status); - bnx2x_xmac_disable(params); - bnx2x_umac_disable(params); + bnx2x_set_xmac_rxtx(params, 0); + bnx2x_set_umac_rxtx(params, 0); } return 0; @@ -6600,7 +6888,7 @@ static int bnx2x_update_link_up(struct link_params *params, if ((CHIP_IS_E1x(bp) || CHIP_IS_E2(bp))) { if (link_10g) { - if (bnx2x_bmac_enable(params, vars, 0) == + if (bnx2x_bmac_enable(params, vars, 0, 1) == -ESRCH) { DP(NETIF_MSG_LINK, "Found errors on BMAC\n"); vars->link_up = 0; @@ -7207,6 +7495,22 @@ static void bnx2x_8073_set_pause_cl37(struct link_params *params, msleep(500); } +static void bnx2x_8073_specific_func(struct bnx2x_phy *phy, + struct link_params *params, + u32 action) +{ + struct bnx2x *bp = params->bp; + switch (action) { + case PHY_INIT: + /* Enable LASI */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, (1<<2)); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x0004); + break; + } +} + static int bnx2x_8073_config_init(struct bnx2x_phy *phy, struct link_params *params, struct link_vars *vars) @@ -7227,12 +7531,7 @@ static int bnx2x_8073_config_init(struct bnx2x_phy *phy, bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port); - /* Enable LASI */ - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, (1<<2)); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x0004); - + bnx2x_8073_specific_func(phy, params, PHY_INIT); bnx2x_8073_set_pause_cl37(params, phy, vars); bnx2x_cl45_read(bp, phy, @@ -8267,7 +8566,7 @@ static void bnx2x_8727_specific_func(struct bnx2x_phy *phy, u32 action) { struct bnx2x *bp = params->bp; - + u16 val; switch (action) { case DISABLE_TX: bnx2x_sfp_set_transmitter(params, phy, 0); @@ -8276,6 +8575,40 @@ static void bnx2x_8727_specific_func(struct bnx2x_phy *phy, if (!(phy->flags & FLAGS_SFP_NOT_APPROVED)) bnx2x_sfp_set_transmitter(params, phy, 1); break; + case PHY_INIT: + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, + (1<<2) | (1<<5)); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL, + 0); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x0006); + /* Make MOD_ABS give interrupt on change */ + bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, + MDIO_PMA_REG_8727_PCS_OPT_CTRL, + &val); + val |= (1<<12); + if (phy->flags & FLAGS_NOC) + val |= (3<<5); + /* Set 8727 GPIOs to input to allow reading from the 8727 GPIO0 + * status which reflect SFP+ module over-current + */ + if (!(phy->flags & FLAGS_NOC)) + val &= 0xff8f; /* Reset bits 4-6 */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL, + val); + + /* Set 2-wire transfer rate of SFP+ module EEPROM + * to 100Khz since some DACs(direct attached cables) do + * not work at 400Khz. + */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR, + 0xa001); + break; default: DP(NETIF_MSG_LINK, "Function 0x%x not supported by 8727\n", action); @@ -9058,28 +9391,15 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy, struct link_vars *vars) { u32 tx_en_mode; - u16 tmp1, val, mod_abs, tmp2; - u16 rx_alarm_ctrl_val; - u16 lasi_ctrl_val; + u16 tmp1, mod_abs, tmp2; struct bnx2x *bp = params->bp; /* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */ bnx2x_wait_reset_complete(bp, phy, params); - rx_alarm_ctrl_val = (1<<2) | (1<<5) ; - /* Should be 0x6 to enable XS on Tx side. */ - lasi_ctrl_val = 0x0006; DP(NETIF_MSG_LINK, "Initializing BCM8727\n"); - /* Enable LASI */ - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, - rx_alarm_ctrl_val); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL, - 0); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, lasi_ctrl_val); + bnx2x_8727_specific_func(phy, params, PHY_INIT); /* Initially configure MOD_ABS to interrupt when module is * presence( bit 8) */ @@ -9095,25 +9415,9 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy, bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); - /* Enable/Disable PHY transmitter output */ bnx2x_set_disable_pmd_transmit(params, phy, 0); - /* Make MOD_ABS give interrupt on change */ - bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL, - &val); - val |= (1<<12); - if (phy->flags & FLAGS_NOC) - val |= (3<<5); - - /* Set 8727 GPIOs to input to allow reading from the 8727 GPIO0 - * status which reflect SFP+ module over-current - */ - if (!(phy->flags & FLAGS_NOC)) - val &= 0xff8f; /* Reset bits 4-6 */ - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL, val); - bnx2x_8727_power_module(bp, phy, 1); bnx2x_cl45_read(bp, phy, @@ -9123,13 +9427,7 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1); bnx2x_8727_config_speed(phy, params); - /* Set 2-wire transfer rate of SFP+ module EEPROM - * to 100Khz since some DACs(direct attached cables) do - * not work at 400Khz. - */ - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR, - 0xa001); + /* Set TX PreEmphasis if needed */ if ((params->feature_config_flags & @@ -9558,6 +9856,29 @@ static void bnx2x_848xx_set_led(struct bnx2x *bp, 0xFFFB, 0xFFFD); } +static void bnx2x_848xx_specific_func(struct bnx2x_phy *phy, + struct link_params *params, + u32 action) +{ + struct bnx2x *bp = params->bp; + switch (action) { + case PHY_INIT: + if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) { + /* Save spirom version */ + bnx2x_save_848xx_spirom_version(phy, bp, params->port); + } + /* This phy uses the NIG latch mechanism since link indication + * arrives through its LED4 and not via its LASI signal, so we + * get steady signal instead of clear on read + */ + bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4, + 1 << NIG_LATCH_BC_ENABLE_MI_INT); + + bnx2x_848xx_set_led(bp, phy); + break; + } +} + static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, struct link_params *params, struct link_vars *vars) @@ -9565,22 +9886,10 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, struct bnx2x *bp = params->bp; u16 autoneg_val, an_1000_val, an_10_100_val, an_10g_val; - if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) { - /* Save spirom version */ - bnx2x_save_848xx_spirom_version(phy, bp, params->port); - } - /* This phy uses the NIG latch mechanism since link indication - * arrives through its LED4 and not via its LASI signal, so we - * get steady signal instead of clear on read - */ - bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4, - 1 << NIG_LATCH_BC_ENABLE_MI_INT); - + bnx2x_848xx_specific_func(phy, params, PHY_INIT); bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0000); - bnx2x_848xx_set_led(bp, phy); - /* set 1000 speed advertisement */ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL, @@ -9887,39 +10196,6 @@ static int bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy, return 0; } -static int bnx2x_8483x_eee_timers(struct link_params *params, - struct link_vars *vars) -{ - u32 eee_idle = 0, eee_mode; - struct bnx2x *bp = params->bp; - - eee_idle = bnx2x_eee_calc_timer(params); - - if (eee_idle) { - REG_WR(bp, MISC_REG_CPMU_LP_IDLE_THR_P0 + (params->port << 2), - eee_idle); - } else if ((params->eee_mode & EEE_MODE_ENABLE_LPI) && - (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) && - (params->eee_mode & EEE_MODE_OUTPUT_TIME)) { - DP(NETIF_MSG_LINK, "Error: Tx LPI is enabled with timer 0\n"); - return -EINVAL; - } - - vars->eee_status &= ~(SHMEM_EEE_TIMER_MASK | SHMEM_EEE_TIME_OUTPUT_BIT); - if (params->eee_mode & EEE_MODE_OUTPUT_TIME) { - /* eee_idle in 1u --> eee_status in 16u */ - eee_idle >>= 4; - vars->eee_status |= (eee_idle & SHMEM_EEE_TIMER_MASK) | - SHMEM_EEE_TIME_OUTPUT_BIT; - } else { - if (bnx2x_eee_time_to_nvram(eee_idle, &eee_mode)) - return -EINVAL; - vars->eee_status |= eee_mode; - } - - return 0; -} - static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy, struct link_params *params, struct link_vars *vars) @@ -9930,10 +10206,6 @@ static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "Don't Advertise 10GBase-T EEE\n"); - /* Make Certain LPI is disabled */ - REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2), 0); - REG_WR(bp, MISC_REG_CPMU_LP_DR_ENABLE, 0); - /* Prevent Phy from working in EEE and advertising it */ rc = bnx2x_84833_cmd_hdlr(phy, params, PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1); @@ -9942,10 +10214,7 @@ static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy, return rc; } - bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, 0); - vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK; - - return 0; + return bnx2x_eee_disable(phy, params, vars); } static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy, @@ -9956,8 +10225,6 @@ static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy, struct bnx2x *bp = params->bp; u16 cmd_args = 1; - DP(NETIF_MSG_LINK, "Advertise 10GBase-T EEE\n"); - rc = bnx2x_84833_cmd_hdlr(phy, params, PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1); if (rc) { @@ -9965,15 +10232,7 @@ static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy, return rc; } - bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, 0x8); - - /* Mask events preventing LPI generation */ - REG_WR(bp, MISC_REG_CPMU_LP_MASK_EXT_P0 + (params->port << 2), 0xfc20); - - vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK; - vars->eee_status |= (SHMEM_EEE_10G_ADV << SHMEM_EEE_ADV_STATUS_SHIFT); - - return 0; + return bnx2x_eee_advertise(phy, params, vars, SHMEM_EEE_10G_ADV); } #define PHY84833_CONSTANT_LATENCY 1193 @@ -10105,22 +10364,10 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, MDIO_84833_TOP_CFG_FW_REV, &val); /* Configure EEE support */ - if ((val >= MDIO_84833_TOP_CFG_FW_EEE) && bnx2x_eee_has_cap(params)) { - phy->flags |= FLAGS_EEE_10GBT; - vars->eee_status |= SHMEM_EEE_10G_ADV << - SHMEM_EEE_SUPPORTED_SHIFT; - /* Propogate params' bits --> vars (for migration exposure) */ - if (params->eee_mode & EEE_MODE_ENABLE_LPI) - vars->eee_status |= SHMEM_EEE_LPI_REQUESTED_BIT; - else - vars->eee_status &= ~SHMEM_EEE_LPI_REQUESTED_BIT; - - if (params->eee_mode & EEE_MODE_ADV_LPI) - vars->eee_status |= SHMEM_EEE_REQUESTED_BIT; - else - vars->eee_status &= ~SHMEM_EEE_REQUESTED_BIT; - - rc = bnx2x_8483x_eee_timers(params, vars); + if ((val >= MDIO_84833_TOP_CFG_FW_EEE) && + (val != MDIO_84833_TOP_CFG_FW_NO_EEE) && + bnx2x_eee_has_cap(params)) { + rc = bnx2x_eee_initial_config(params, vars, SHMEM_EEE_10G_ADV); if (rc) { DP(NETIF_MSG_LINK, "Failed to configure EEE timers\n"); bnx2x_8483x_disable_eee(phy, params, vars); @@ -10139,7 +10386,6 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, return rc; } } else { - phy->flags &= ~FLAGS_EEE_10GBT; vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK; } @@ -10278,29 +10524,8 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy, LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE; /* Determine if EEE was negotiated */ - if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) { - u32 eee_shmem = 0; - - bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, - MDIO_AN_REG_EEE_ADV, &val1); - bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, - MDIO_AN_REG_LP_EEE_ADV, &val2); - if ((val1 & val2) & 0x8) { - DP(NETIF_MSG_LINK, "EEE negotiated\n"); - vars->eee_status |= SHMEM_EEE_ACTIVE_BIT; - } - - if (val2 & 0x12) - eee_shmem |= SHMEM_EEE_100M_ADV; - if (val2 & 0x4) - eee_shmem |= SHMEM_EEE_1G_ADV; - if (val2 & 0x68) - eee_shmem |= SHMEM_EEE_10G_ADV; - - vars->eee_status &= ~SHMEM_EEE_LP_ADV_STATUS_MASK; - vars->eee_status |= (eee_shmem << - SHMEM_EEE_LP_ADV_STATUS_SHIFT); - } + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) + bnx2x_eee_an_resolve(phy, params, vars); } return link_up; @@ -10569,6 +10794,35 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy, /******************************************************************/ /* 54618SE PHY SECTION */ /******************************************************************/ +static void bnx2x_54618se_specific_func(struct bnx2x_phy *phy, + struct link_params *params, + u32 action) +{ + struct bnx2x *bp = params->bp; + u16 temp; + switch (action) { + case PHY_INIT: + /* Configure LED4: set to INTR (0x6). */ + /* Accessing shadow register 0xe. */ + bnx2x_cl22_write(bp, phy, + MDIO_REG_GPHY_SHADOW, + MDIO_REG_GPHY_SHADOW_LED_SEL2); + bnx2x_cl22_read(bp, phy, + MDIO_REG_GPHY_SHADOW, + &temp); + temp &= ~(0xf << 4); + temp |= (0x6 << 4); + bnx2x_cl22_write(bp, phy, + MDIO_REG_GPHY_SHADOW, + MDIO_REG_GPHY_SHADOW_WR_ENA | temp); + /* Configure INTR based on link status change. */ + bnx2x_cl22_write(bp, phy, + MDIO_REG_INTR_MASK, + ~MDIO_REG_INTR_MASK_LINK_STATUS); + break; + } +} + static int bnx2x_54618se_config_init(struct bnx2x_phy *phy, struct link_params *params, struct link_vars *vars) @@ -10606,24 +10860,8 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy, /* Wait for GPHY to reset */ msleep(50); - /* Configure LED4: set to INTR (0x6). */ - /* Accessing shadow register 0xe. */ - bnx2x_cl22_write(bp, phy, - MDIO_REG_GPHY_SHADOW, - MDIO_REG_GPHY_SHADOW_LED_SEL2); - bnx2x_cl22_read(bp, phy, - MDIO_REG_GPHY_SHADOW, - &temp); - temp &= ~(0xf << 4); - temp |= (0x6 << 4); - bnx2x_cl22_write(bp, phy, - MDIO_REG_GPHY_SHADOW, - MDIO_REG_GPHY_SHADOW_WR_ENA | temp); - /* Configure INTR based on link status change. */ - bnx2x_cl22_write(bp, phy, - MDIO_REG_INTR_MASK, - ~MDIO_REG_INTR_MASK_LINK_STATUS); + bnx2x_54618se_specific_func(phy, params, PHY_INIT); /* Flip the signal detect polarity (set 0x1c.0x1e[8]). */ bnx2x_cl22_write(bp, phy, MDIO_REG_GPHY_SHADOW, @@ -10728,28 +10966,52 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "Setting 10M force\n"); } - /* Check if we should turn on Auto-GrEEEn */ - bnx2x_cl22_read(bp, phy, MDIO_REG_GPHY_PHYID_LSB, &temp); - if (temp == MDIO_REG_GPHY_ID_54618SE) { - if (params->feature_config_flags & - FEATURE_CONFIG_AUTOGREEEN_ENABLED) { - temp = 6; - DP(NETIF_MSG_LINK, "Enabling Auto-GrEEEn\n"); + if ((phy->flags & FLAGS_EEE) && bnx2x_eee_has_cap(params)) { + int rc; + + bnx2x_cl22_write(bp, phy, MDIO_REG_GPHY_EXP_ACCESS, + MDIO_REG_GPHY_EXP_ACCESS_TOP | + MDIO_REG_GPHY_EXP_TOP_2K_BUF); + bnx2x_cl22_read(bp, phy, MDIO_REG_GPHY_EXP_ACCESS_GATE, &temp); + temp &= 0xfffe; + bnx2x_cl22_write(bp, phy, MDIO_REG_GPHY_EXP_ACCESS_GATE, temp); + + rc = bnx2x_eee_initial_config(params, vars, SHMEM_EEE_1G_ADV); + if (rc) { + DP(NETIF_MSG_LINK, "Failed to configure EEE timers\n"); + bnx2x_eee_disable(phy, params, vars); + } else if ((params->eee_mode & EEE_MODE_ADV_LPI) && + (phy->req_duplex == DUPLEX_FULL) && + (bnx2x_eee_calc_timer(params) || + !(params->eee_mode & EEE_MODE_ENABLE_LPI))) { + /* Need to advertise EEE only when requested, + * and either no LPI assertion was requested, + * or it was requested and a valid timer was set. + * Also notice full duplex is required for EEE. + */ + bnx2x_eee_advertise(phy, params, vars, + SHMEM_EEE_1G_ADV); } else { - temp = 0; - DP(NETIF_MSG_LINK, "Disabling Auto-GrEEEn\n"); + DP(NETIF_MSG_LINK, "Don't Advertise 1GBase-T EEE\n"); + bnx2x_eee_disable(phy, params, vars); + } + } else { + vars->eee_status &= ~SHMEM_EEE_1G_ADV << + SHMEM_EEE_SUPPORTED_SHIFT; + + if (phy->flags & FLAGS_EEE) { + /* Handle legacy auto-grEEEn */ + if (params->feature_config_flags & + FEATURE_CONFIG_AUTOGREEEN_ENABLED) { + temp = 6; + DP(NETIF_MSG_LINK, "Enabling Auto-GrEEEn\n"); + } else { + temp = 0; + DP(NETIF_MSG_LINK, "Don't Adv. EEE\n"); + } + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_EEE_ADV, temp); } - bnx2x_cl22_write(bp, phy, - MDIO_REG_GPHY_CL45_ADDR_REG, MDIO_AN_DEVAD); - bnx2x_cl22_write(bp, phy, - MDIO_REG_GPHY_CL45_DATA_REG, - MDIO_REG_GPHY_EEE_ADV); - bnx2x_cl22_write(bp, phy, - MDIO_REG_GPHY_CL45_ADDR_REG, - (0x1 << 14) | MDIO_AN_DEVAD); - bnx2x_cl22_write(bp, phy, - MDIO_REG_GPHY_CL45_DATA_REG, - temp); } bnx2x_cl22_write(bp, phy, @@ -10896,29 +11158,6 @@ static u8 bnx2x_54618se_read_status(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "BCM54618SE: link speed is %d\n", vars->line_speed); - /* Report whether EEE is resolved. */ - bnx2x_cl22_read(bp, phy, MDIO_REG_GPHY_PHYID_LSB, &val); - if (val == MDIO_REG_GPHY_ID_54618SE) { - if (vars->link_status & - LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) - val = 0; - else { - bnx2x_cl22_write(bp, phy, - MDIO_REG_GPHY_CL45_ADDR_REG, - MDIO_AN_DEVAD); - bnx2x_cl22_write(bp, phy, - MDIO_REG_GPHY_CL45_DATA_REG, - MDIO_REG_GPHY_EEE_RESOLVED); - bnx2x_cl22_write(bp, phy, - MDIO_REG_GPHY_CL45_ADDR_REG, - (0x1 << 14) | MDIO_AN_DEVAD); - bnx2x_cl22_read(bp, phy, - MDIO_REG_GPHY_CL45_DATA_REG, - &val); - } - DP(NETIF_MSG_LINK, "EEE resolution: 0x%x\n", val); - } - bnx2x_ext_phy_resolve_fc(phy, params, vars); if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) { @@ -10948,6 +11187,10 @@ static u8 bnx2x_54618se_read_status(struct bnx2x_phy *phy, if (val & (1<<11)) vars->link_status |= LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE; + + if ((phy->flags & FLAGS_EEE) && + bnx2x_eee_has_cap(params)) + bnx2x_eee_an_resolve(phy, params, vars); } } return link_up; @@ -11353,7 +11596,7 @@ static struct bnx2x_phy phy_8073 = { .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver, .hw_reset = (hw_reset_t)NULL, .set_link_led = (set_link_led_t)NULL, - .phy_specific_func = (phy_specific_func_t)NULL + .phy_specific_func = (phy_specific_func_t)bnx2x_8073_specific_func }; static struct bnx2x_phy phy_8705 = { .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705, @@ -11546,7 +11789,7 @@ static struct bnx2x_phy phy_84823 = { .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver, .hw_reset = (hw_reset_t)NULL, .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led, - .phy_specific_func = (phy_specific_func_t)NULL + .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func }; static struct bnx2x_phy phy_84833 = { @@ -11555,8 +11798,7 @@ static struct bnx2x_phy phy_84833 = { .def_md_devad = 0, .flags = (FLAGS_FAN_FAILURE_DET_REQ | FLAGS_REARM_LATCH_SIGNAL | - FLAGS_TX_ERROR_CHECK | - FLAGS_EEE_10GBT), + FLAGS_TX_ERROR_CHECK), .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .mdio_ctrl = 0, @@ -11582,7 +11824,7 @@ static struct bnx2x_phy phy_84833 = { .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver, .hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy, .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led, - .phy_specific_func = (phy_specific_func_t)NULL + .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func }; static struct bnx2x_phy phy_54618se = { @@ -11616,7 +11858,7 @@ static struct bnx2x_phy phy_54618se = { .format_fw_ver = (format_fw_ver_t)NULL, .hw_reset = (hw_reset_t)NULL, .set_link_led = (set_link_led_t)bnx2x_5461x_set_link_led, - .phy_specific_func = (phy_specific_func_t)NULL + .phy_specific_func = (phy_specific_func_t)bnx2x_54618se_specific_func }; /*****************************************************************/ /* */ @@ -11862,6 +12104,8 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp, case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616: case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE: *phy = phy_54618se; + if (phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) + phy->flags |= FLAGS_EEE; break; case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: *phy = phy_7101; @@ -12141,7 +12385,7 @@ void bnx2x_init_bmac_loopback(struct link_params *params, bnx2x_xgxs_deassert(params); /* set bmac loopback */ - bnx2x_bmac_enable(params, vars, 1); + bnx2x_bmac_enable(params, vars, 1, 1); REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); } @@ -12233,7 +12477,7 @@ void bnx2x_init_xgxs_loopback(struct link_params *params, if (USES_WARPCORE(bp)) bnx2x_xmac_enable(params, vars, 0); else - bnx2x_bmac_enable(params, vars, 0); + bnx2x_bmac_enable(params, vars, 0, 1); } if (params->loopback_mode == LOOPBACK_XGXS) { @@ -12258,8 +12502,161 @@ void bnx2x_init_xgxs_loopback(struct link_params *params, bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed); } +static void bnx2x_set_rx_filter(struct link_params *params, u8 en) +{ + struct bnx2x *bp = params->bp; + u8 val = en * 0x1F; + + /* Open the gate between the NIG to the BRB */ + if (!CHIP_IS_E1x(bp)) + val |= en * 0x20; + REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + params->port*4, val); + + if (!CHIP_IS_E1(bp)) { + REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + params->port*4, + en*0x3); + } + + REG_WR(bp, (params->port ? NIG_REG_LLH1_BRB1_NOT_MCP : + NIG_REG_LLH0_BRB1_NOT_MCP), en); +} +static int bnx2x_avoid_link_flap(struct link_params *params, + struct link_vars *vars) +{ + u32 phy_idx; + u32 dont_clear_stat, lfa_sts; + struct bnx2x *bp = params->bp; + + /* Sync the link parameters */ + bnx2x_link_status_update(params, vars); + + /* + * The module verification was already done by previous link owner, + * so this call is meant only to get warning message + */ + + for (phy_idx = INT_PHY; phy_idx < params->num_phys; phy_idx++) { + struct bnx2x_phy *phy = ¶ms->phy[phy_idx]; + if (phy->phy_specific_func) { + DP(NETIF_MSG_LINK, "Calling PHY specific func\n"); + phy->phy_specific_func(phy, params, PHY_INIT); + } + if ((phy->media_type == ETH_PHY_SFPP_10G_FIBER) || + (phy->media_type == ETH_PHY_SFP_1G_FIBER) || + (phy->media_type == ETH_PHY_DA_TWINAX)) + bnx2x_verify_sfp_module(phy, params); + } + lfa_sts = REG_RD(bp, params->lfa_base + + offsetof(struct shmem_lfa, + lfa_sts)); + + dont_clear_stat = lfa_sts & SHMEM_LFA_DONT_CLEAR_STAT; + + /* Re-enable the NIG/MAC */ + if (CHIP_IS_E3(bp)) { + if (!dont_clear_stat) { + REG_WR(bp, GRCBASE_MISC + + MISC_REGISTERS_RESET_REG_2_CLEAR, + (MISC_REGISTERS_RESET_REG_2_MSTAT0 << + params->port)); + REG_WR(bp, GRCBASE_MISC + + MISC_REGISTERS_RESET_REG_2_SET, + (MISC_REGISTERS_RESET_REG_2_MSTAT0 << + params->port)); + } + if (vars->line_speed < SPEED_10000) + bnx2x_umac_enable(params, vars, 0); + else + bnx2x_xmac_enable(params, vars, 0); + } else { + if (vars->line_speed < SPEED_10000) + bnx2x_emac_enable(params, vars, 0); + else + bnx2x_bmac_enable(params, vars, 0, !dont_clear_stat); + } + + /* Increment LFA count */ + lfa_sts = ((lfa_sts & ~LINK_FLAP_AVOIDANCE_COUNT_MASK) | + (((((lfa_sts & LINK_FLAP_AVOIDANCE_COUNT_MASK) >> + LINK_FLAP_AVOIDANCE_COUNT_OFFSET) + 1) & 0xff) + << LINK_FLAP_AVOIDANCE_COUNT_OFFSET)); + /* Clear link flap reason */ + lfa_sts &= ~LFA_LINK_FLAP_REASON_MASK; + + REG_WR(bp, params->lfa_base + + offsetof(struct shmem_lfa, lfa_sts), lfa_sts); + + /* Disable NIG DRAIN */ + REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); + + /* Enable interrupts */ + bnx2x_link_int_enable(params); + return 0; +} + +static void bnx2x_cannot_avoid_link_flap(struct link_params *params, + struct link_vars *vars, + int lfa_status) +{ + u32 lfa_sts, cfg_idx, tmp_val; + struct bnx2x *bp = params->bp; + + bnx2x_link_reset(params, vars, 1); + + if (!params->lfa_base) + return; + /* Store the new link parameters */ + REG_WR(bp, params->lfa_base + + offsetof(struct shmem_lfa, req_duplex), + params->req_duplex[0] | (params->req_duplex[1] << 16)); + + REG_WR(bp, params->lfa_base + + offsetof(struct shmem_lfa, req_flow_ctrl), + params->req_flow_ctrl[0] | (params->req_flow_ctrl[1] << 16)); + + REG_WR(bp, params->lfa_base + + offsetof(struct shmem_lfa, req_line_speed), + params->req_line_speed[0] | (params->req_line_speed[1] << 16)); + + for (cfg_idx = 0; cfg_idx < SHMEM_LINK_CONFIG_SIZE; cfg_idx++) { + REG_WR(bp, params->lfa_base + + offsetof(struct shmem_lfa, + speed_cap_mask[cfg_idx]), + params->speed_cap_mask[cfg_idx]); + } + + tmp_val = REG_RD(bp, params->lfa_base + + offsetof(struct shmem_lfa, additional_config)); + tmp_val &= ~REQ_FC_AUTO_ADV_MASK; + tmp_val |= params->req_fc_auto_adv; + + REG_WR(bp, params->lfa_base + + offsetof(struct shmem_lfa, additional_config), tmp_val); + + lfa_sts = REG_RD(bp, params->lfa_base + + offsetof(struct shmem_lfa, lfa_sts)); + + /* Clear the "Don't Clear Statistics" bit, and set reason */ + lfa_sts &= ~SHMEM_LFA_DONT_CLEAR_STAT; + + /* Set link flap reason */ + lfa_sts &= ~LFA_LINK_FLAP_REASON_MASK; + lfa_sts |= ((lfa_status & LFA_LINK_FLAP_REASON_MASK) << + LFA_LINK_FLAP_REASON_OFFSET); + + /* Increment link flap counter */ + lfa_sts = ((lfa_sts & ~LINK_FLAP_COUNT_MASK) | + (((((lfa_sts & LINK_FLAP_COUNT_MASK) >> + LINK_FLAP_COUNT_OFFSET) + 1) & 0xff) + << LINK_FLAP_COUNT_OFFSET)); + REG_WR(bp, params->lfa_base + + offsetof(struct shmem_lfa, lfa_sts), lfa_sts); + /* Proceed with regular link initialization */ +} + int bnx2x_phy_init(struct link_params *params, struct link_vars *vars) { + int lfa_status; struct bnx2x *bp = params->bp; DP(NETIF_MSG_LINK, "Phy Initialization started\n"); DP(NETIF_MSG_LINK, "(1) req_speed %d, req_flowctrl %d\n", @@ -12274,6 +12671,19 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars) vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; vars->mac_type = MAC_TYPE_NONE; vars->phy_flags = 0; + /* Driver opens NIG-BRB filters */ + bnx2x_set_rx_filter(params, 1); + /* Check if link flap can be avoided */ + lfa_status = bnx2x_check_lfa(params); + + if (lfa_status == 0) { + DP(NETIF_MSG_LINK, "Link Flap Avoidance in progress\n"); + return bnx2x_avoid_link_flap(params, vars); + } + + DP(NETIF_MSG_LINK, "Cannot avoid link flap lfa_sta=0x%x\n", + lfa_status); + bnx2x_cannot_avoid_link_flap(params, vars, lfa_status); /* Disable attentions */ bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4, @@ -12356,13 +12766,12 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars, REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0); } - /* Stop BigMac rx */ - if (!CHIP_IS_E3(bp)) - bnx2x_bmac_rx_disable(bp, port); - else { - bnx2x_xmac_disable(params); - bnx2x_umac_disable(params); - } + if (!CHIP_IS_E3(bp)) { + bnx2x_set_bmac_rx(bp, params->chip_id, port, 0); + } else { + bnx2x_set_xmac_rxtx(params, 0); + bnx2x_set_umac_rxtx(params, 0); + } /* Disable emac */ if (!CHIP_IS_E3(bp)) REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); @@ -12420,6 +12829,56 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars, vars->phy_flags = 0; return 0; } +int bnx2x_lfa_reset(struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + vars->link_up = 0; + vars->phy_flags = 0; + if (!params->lfa_base) + return bnx2x_link_reset(params, vars, 1); + /* + * Activate NIG drain so that during this time the device won't send + * anything while it is unable to response. + */ + REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 1); + + /* + * Close gracefully the gate from BMAC to NIG such that no half packets + * are passed. + */ + if (!CHIP_IS_E3(bp)) + bnx2x_set_bmac_rx(bp, params->chip_id, params->port, 0); + + if (CHIP_IS_E3(bp)) { + bnx2x_set_xmac_rxtx(params, 0); + bnx2x_set_umac_rxtx(params, 0); + } + /* Wait 10ms for the pipe to clean up*/ + usleep_range(10000, 20000); + + /* Clean the NIG-BRB using the network filters in a way that will + * not cut a packet in the middle. + */ + bnx2x_set_rx_filter(params, 0); + + /* + * Re-open the gate between the BMAC and the NIG, after verifying the + * gate to the BRB is closed, otherwise packets may arrive to the + * firmware before driver had initialized it. The target is to achieve + * minimum management protocol down time. + */ + if (!CHIP_IS_E3(bp)) + bnx2x_set_bmac_rx(bp, params->chip_id, params->port, 1); + + if (CHIP_IS_E3(bp)) { + bnx2x_set_xmac_rxtx(params, 1); + bnx2x_set_umac_rxtx(params, 1); + } + /* Disable NIG drain */ + REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); + return 0; +} /****************************************************************************/ /* Common function */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h index 51cac8130051..9165b89a4b19 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h @@ -155,7 +155,7 @@ struct bnx2x_phy { #define FLAGS_DUMMY_READ (1<<9) #define FLAGS_MDC_MDIO_WA_B0 (1<<10) #define FLAGS_TX_ERROR_CHECK (1<<12) -#define FLAGS_EEE_10GBT (1<<13) +#define FLAGS_EEE (1<<13) /* preemphasis values for the rx side */ u16 rx_preemphasis[4]; @@ -216,6 +216,7 @@ struct bnx2x_phy { phy_specific_func_t phy_specific_func; #define DISABLE_TX 1 #define ENABLE_TX 2 +#define PHY_INIT 3 }; /* Inputs parameters to the CLC */ @@ -304,6 +305,8 @@ struct link_params { struct bnx2x *bp; u16 req_fc_auto_adv; /* Should be set to TX / BOTH when req_flow_ctrl is set to AUTO */ + u16 rsrv1; + u32 lfa_base; }; /* Output parameters */ @@ -356,7 +359,7 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars); to 0 */ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars, u8 reset_ext_phy); - +int bnx2x_lfa_reset(struct link_params *params, struct link_vars *vars); /* bnx2x_link_update should be called upon link interrupt */ int bnx2x_link_update(struct link_params *params, struct link_vars *vars); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 211753e01f81..7a9157052c7c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -2171,7 +2171,6 @@ void bnx2x_link_set(struct bnx2x *bp) { if (!BP_NOMCP(bp)) { bnx2x_acquire_phy_lock(bp); - bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1); bnx2x_phy_init(&bp->link_params, &bp->link_vars); bnx2x_release_phy_lock(bp); @@ -2184,12 +2183,19 @@ static void bnx2x__link_reset(struct bnx2x *bp) { if (!BP_NOMCP(bp)) { bnx2x_acquire_phy_lock(bp); - bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1); + bnx2x_lfa_reset(&bp->link_params, &bp->link_vars); bnx2x_release_phy_lock(bp); } else BNX2X_ERR("Bootcode is missing - can not reset link\n"); } +void bnx2x_force_link_reset(struct bnx2x *bp) +{ + bnx2x_acquire_phy_lock(bp); + bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1); + bnx2x_release_phy_lock(bp); +} + u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes) { u8 rc = 0; @@ -6757,7 +6763,6 @@ static int bnx2x_init_hw_port(struct bnx2x *bp) u32 low, high; u32 val; - bnx2x__link_reset(bp); DP(NETIF_MSG_HW, "starting port init port %d\n", port); @@ -8250,12 +8255,15 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode) * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP. * * @bp: driver handle + * @keep_link: true iff link should be kept up */ -void bnx2x_send_unload_done(struct bnx2x *bp) +void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link) { + u32 reset_param = keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0; + /* Report UNLOAD_DONE to MCP */ if (!BP_NOMCP(bp)) - bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); + bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, reset_param); } static int bnx2x_func_wait_started(struct bnx2x *bp) @@ -8324,7 +8332,7 @@ static int bnx2x_func_wait_started(struct bnx2x *bp) return 0; } -void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode) +void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link) { int port = BP_PORT(bp); int i, rc = 0; @@ -8446,7 +8454,7 @@ unload_error: /* Report UNLOAD_DONE to MCP */ - bnx2x_send_unload_done(bp); + bnx2x_send_unload_done(bp, keep_link); } void bnx2x_disable_close_the_gate(struct bnx2x *bp) @@ -8858,7 +8866,8 @@ int bnx2x_leader_reset(struct bnx2x *bp) * driver is owner of the HW */ if (!global && !BP_NOMCP(bp)) { - load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0); + load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, + DRV_MSG_CODE_LOAD_REQ_WITH_LFA); if (!load_code) { BNX2X_ERR("MCP response failure, aborting\n"); rc = -EAGAIN; @@ -8964,7 +8973,7 @@ static void bnx2x_parity_recover(struct bnx2x *bp) /* Stop the driver */ /* If interface has been removed - break */ - if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY)) + if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY, false)) return; bp->recovery_state = BNX2X_RECOVERY_WAIT; @@ -9130,7 +9139,7 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work) bp->sp_rtnl_state = 0; smp_mb(); - bnx2x_nic_unload(bp, UNLOAD_NORMAL); + bnx2x_nic_unload(bp, UNLOAD_NORMAL, true); bnx2x_nic_load(bp, LOAD_NORMAL); goto sp_rtnl_exit; @@ -9316,7 +9325,8 @@ static void __devinit bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 port, static int __devinit bnx2x_prev_mcp_done(struct bnx2x *bp) { - u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); + u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, + DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET); if (!rc) { BNX2X_ERR("MCP response failure, aborting\n"); return -EBUSY; @@ -11009,7 +11019,7 @@ static int bnx2x_close(struct net_device *dev) struct bnx2x *bp = netdev_priv(dev); /* Unload the driver, release IRQs */ - bnx2x_nic_unload(bp, UNLOAD_CLOSE); + bnx2x_nic_unload(bp, UNLOAD_CLOSE, false); /* Power off */ bnx2x_set_power_state(bp, PCI_D3hot); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h index 28a0bcfe61ff..1b1999d34c71 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h @@ -4949,6 +4949,10 @@ #define UMAC_COMMAND_CONFIG_REG_SW_RESET (0x1<<13) #define UMAC_COMMAND_CONFIG_REG_TX_ENA (0x1<<0) #define UMAC_REG_COMMAND_CONFIG 0x8 +/* [RW 16] This is the duration for which MAC must wait to go back to ACTIVE + * state from LPI state when it receives packet for transmission. The + * decrement unit is 1 micro-second. */ +#define UMAC_REG_EEE_WAKE_TIMER 0x6c /* [RW 32] Register Bit 0 refers to Bit 16 of the MAC address; Bit 1 refers * to bit 17 of the MAC address etc. */ #define UMAC_REG_MAC_ADDR0 0xc @@ -4958,6 +4962,8 @@ /* [RW 14] Defines a 14-Bit maximum frame length used by the MAC receive * logic to check frames. */ #define UMAC_REG_MAXFR 0x14 +#define UMAC_REG_UMAC_EEE_CTRL 0x64 +#define UMAC_UMAC_EEE_CTRL_REG_EEE_EN (0x1<<3) /* [RW 8] The event id for aggregated interrupt 0 */ #define USDM_REG_AGG_INT_EVENT_0 0xc4038 #define USDM_REG_AGG_INT_EVENT_1 0xc403c @@ -6992,6 +6998,7 @@ Theotherbitsarereservedandshouldbezero*/ /* BCM84833 only */ #define MDIO_84833_TOP_CFG_FW_REV 0x400f #define MDIO_84833_TOP_CFG_FW_EEE 0x10b1 +#define MDIO_84833_TOP_CFG_FW_NO_EEE 0x1f81 #define MDIO_84833_TOP_CFG_XGPHY_STRAP1 0x401a #define MDIO_84833_SUPER_ISOLATE 0x8000 /* These are mailbox register set used by 84833. */ @@ -7160,10 +7167,11 @@ Theotherbitsarereservedandshouldbezero*/ #define MDIO_REG_GPHY_ID_54618SE 0x5cd5 #define MDIO_REG_GPHY_CL45_ADDR_REG 0xd #define MDIO_REG_GPHY_CL45_DATA_REG 0xe -#define MDIO_REG_GPHY_EEE_ADV 0x3c -#define MDIO_REG_GPHY_EEE_1G (0x1 << 2) -#define MDIO_REG_GPHY_EEE_100 (0x1 << 1) #define MDIO_REG_GPHY_EEE_RESOLVED 0x803e +#define MDIO_REG_GPHY_EXP_ACCESS_GATE 0x15 +#define MDIO_REG_GPHY_EXP_ACCESS 0x17 +#define MDIO_REG_GPHY_EXP_ACCESS_TOP 0xd00 +#define MDIO_REG_GPHY_EXP_TOP_2K_BUF 0x40 #define MDIO_REG_GPHY_AUX_STATUS 0x19 #define MDIO_REG_INTR_STATUS 0x1a #define MDIO_REG_INTR_MASK 0x1b diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 62f754bd0dfe..5a5fbf57c4b4 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -229,8 +229,7 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp, */ list_add_tail(&spacer.link, &o->pending_comp); mb(); - list_del(&elem->link); - list_add_tail(&elem->link, &o->pending_comp); + list_move_tail(&elem->link, &o->pending_comp); list_del(&spacer.link); } else break; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c index a1d0446b39b3..348ed02d3c69 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c @@ -39,14 +39,39 @@ static inline long bnx2x_hilo(u32 *hiref) #endif } -static u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp) +static inline u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp) { - u16 res = sizeof(struct host_port_stats) >> 2; + u16 res = 0; - /* if PFC stats are not supported by the MFW, don't DMA them */ - if (!(bp->flags & BC_SUPPORTS_PFC_STATS)) - res -= (sizeof(u32)*4) >> 2; + /* 'newest' convention - shmem2 cotains the size of the port stats */ + if (SHMEM2_HAS(bp, sizeof_port_stats)) { + u32 size = SHMEM2_RD(bp, sizeof_port_stats); + if (size) + res = size; + /* prevent newer BC from causing buffer overflow */ + if (res > sizeof(struct host_port_stats)) + res = sizeof(struct host_port_stats); + } + + /* Older convention - all BCs support the port stats' fields up until + * the 'not_used' field + */ + if (!res) { + res = offsetof(struct host_port_stats, not_used) + 4; + + /* if PFC stats are supported by the MFW, DMA them as well */ + if (bp->flags & BC_SUPPORTS_PFC_STATS) { + res += offsetof(struct host_port_stats, + pfc_frames_rx_lo) - + offsetof(struct host_port_stats, + pfc_frames_tx_hi) + 4 ; + } + } + + res >>= 2; + + WARN_ON(res > 2 * DMAE_LEN32_RD_MAX); return res; } diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 3b4fc61f24cf..2107d79d69b3 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -823,10 +823,8 @@ static void cnic_free_context(struct cnic_dev *dev) } } -static void __cnic_free_uio(struct cnic_uio_dev *udev) +static void __cnic_free_uio_rings(struct cnic_uio_dev *udev) { - uio_unregister_device(&udev->cnic_uinfo); - if (udev->l2_buf) { dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size, udev->l2_buf, udev->l2_buf_map); @@ -839,6 +837,14 @@ static void __cnic_free_uio(struct cnic_uio_dev *udev) udev->l2_ring = NULL; } +} + +static void __cnic_free_uio(struct cnic_uio_dev *udev) +{ + uio_unregister_device(&udev->cnic_uinfo); + + __cnic_free_uio_rings(udev); + pci_dev_put(udev->pdev); kfree(udev); } @@ -862,6 +868,8 @@ static void cnic_free_resc(struct cnic_dev *dev) if (udev) { udev->dev = NULL; cp->udev = NULL; + if (udev->uio_dev == -1) + __cnic_free_uio_rings(udev); } cnic_free_context(dev); @@ -996,6 +1004,34 @@ static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info, return 0; } +static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages) +{ + struct cnic_local *cp = udev->dev->cnic_priv; + + if (udev->l2_ring) + return 0; + + udev->l2_ring_size = pages * BCM_PAGE_SIZE; + udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size, + &udev->l2_ring_map, + GFP_KERNEL | __GFP_COMP); + if (!udev->l2_ring) + return -ENOMEM; + + udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size; + udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size); + udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size, + &udev->l2_buf_map, + GFP_KERNEL | __GFP_COMP); + if (!udev->l2_buf) { + __cnic_free_uio_rings(udev); + return -ENOMEM; + } + + return 0; + +} + static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages) { struct cnic_local *cp = dev->cnic_priv; @@ -1005,6 +1041,11 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages) list_for_each_entry(udev, &cnic_udev_list, list) { if (udev->pdev == dev->pcidev) { udev->dev = dev; + if (__cnic_alloc_uio_rings(udev, pages)) { + udev->dev = NULL; + read_unlock(&cnic_dev_lock); + return -ENOMEM; + } cp->udev = udev; read_unlock(&cnic_dev_lock); return 0; @@ -1020,20 +1061,9 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages) udev->dev = dev; udev->pdev = dev->pcidev; - udev->l2_ring_size = pages * BCM_PAGE_SIZE; - udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size, - &udev->l2_ring_map, - GFP_KERNEL | __GFP_COMP); - if (!udev->l2_ring) - goto err_udev; - udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size; - udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size); - udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size, - &udev->l2_buf_map, - GFP_KERNEL | __GFP_COMP); - if (!udev->l2_buf) - goto err_dma; + if (__cnic_alloc_uio_rings(udev, pages)) + goto err_udev; write_lock(&cnic_dev_lock); list_add(&udev->list, &cnic_udev_list); @@ -1044,9 +1074,7 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages) cp->udev = udev; return 0; - err_dma: - dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size, - udev->l2_ring, udev->l2_ring_map); + err_udev: kfree(udev); return -ENOMEM; @@ -1260,7 +1288,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) if (ret) goto error; - if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { + if (CNIC_SUPPORTS_FCOE(cp)) { ret = cnic_alloc_kcq(dev, &cp->kcq2, true); if (ret) goto error; @@ -1275,6 +1303,9 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) if (ret) goto error; + if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI) + return 0; + cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk; cp->l2_rx_ring_size = 15; @@ -3050,6 +3081,22 @@ static void cnic_ack_bnx2x_e2_msix(struct cnic_dev *dev) IGU_INT_DISABLE, 0); } +static void cnic_arm_bnx2x_msix(struct cnic_dev *dev, u32 idx) +{ + struct cnic_local *cp = dev->cnic_priv; + + cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, idx, + IGU_INT_ENABLE, 1); +} + +static void cnic_arm_bnx2x_e2_msix(struct cnic_dev *dev, u32 idx) +{ + struct cnic_local *cp = dev->cnic_priv; + + cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, idx, + IGU_INT_ENABLE, 1); +} + static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info) { u32 last_status = *info->status_idx_ptr; @@ -3086,9 +3133,8 @@ static void cnic_service_bnx2x_bh(unsigned long data) CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx + MAX_KCQ_IDX); - if (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { - cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID, - status_idx, IGU_INT_ENABLE, 1); + if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE) { + cp->arm_int(dev, status_idx); break; } @@ -5308,7 +5354,7 @@ static void cnic_stop_hw(struct cnic_dev *dev) /* Need to wait for the ring shutdown event to complete * before clearing the CNIC_UP flag. */ - while (cp->udev->uio_dev != -1 && i < 15) { + while (cp->udev && cp->udev->uio_dev != -1 && i < 15) { msleep(100); i++; } @@ -5473,8 +5519,7 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev) if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)) cdev->max_iscsi_conn = ethdev->max_iscsi_conn; - if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) && - !(ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE)) + if (CNIC_SUPPORTS_FCOE(cp)) cdev->max_fcoe_conn = ethdev->max_fcoe_conn; if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS) @@ -5492,10 +5537,13 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev) cp->stop_cm = cnic_cm_stop_bnx2x_hw; cp->enable_int = cnic_enable_bnx2x_int; cp->disable_int_sync = cnic_disable_bnx2x_int_sync; - if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) + if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { cp->ack_int = cnic_ack_bnx2x_e2_msix; - else + cp->arm_int = cnic_arm_bnx2x_e2_msix; + } else { cp->ack_int = cnic_ack_bnx2x_msix; + cp->arm_int = cnic_arm_bnx2x_msix; + } cp->close_conn = cnic_close_bnx2x_conn; return cdev; } diff --git a/drivers/net/ethernet/broadcom/cnic.h b/drivers/net/ethernet/broadcom/cnic.h index 30328097f516..148604c3fa0c 100644 --- a/drivers/net/ethernet/broadcom/cnic.h +++ b/drivers/net/ethernet/broadcom/cnic.h @@ -334,6 +334,7 @@ struct cnic_local { void (*enable_int)(struct cnic_dev *); void (*disable_int_sync)(struct cnic_dev *); void (*ack_int)(struct cnic_dev *); + void (*arm_int)(struct cnic_dev *, u32 index); void (*close_conn)(struct cnic_sock *, u32 opcode); }; @@ -474,6 +475,10 @@ struct bnx2x_bd_chain_next { MAX_STAT_COUNTER_ID_E1)) #endif +#define CNIC_SUPPORTS_FCOE(cp) \ + (BNX2X_CHIP_IS_E2_PLUS((cp)->chip_id) && \ + !((cp)->ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE)) + #define CNIC_RAMROD_TMO (HZ / 4) #endif diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h index 5cb88881bba1..2e92c348083e 100644 --- a/drivers/net/ethernet/broadcom/cnic_if.h +++ b/drivers/net/ethernet/broadcom/cnic_if.h @@ -14,8 +14,8 @@ #include "bnx2x/bnx2x_mfw_req.h" -#define CNIC_MODULE_VERSION "2.5.12" -#define CNIC_MODULE_RELDATE "June 29, 2012" +#define CNIC_MODULE_VERSION "2.5.13" +#define CNIC_MODULE_RELDATE "Sep 07, 2012" #define CNIC_ULP_RDMA 0 #define CNIC_ULP_ISCSI 1 diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 5ed49af23d6a..34d510dd56a8 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -2470,8 +2470,8 @@ int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx, else delta = size - hw_pidx + pidx; wmb(); - t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL), - V_QID(qid) | V_PIDX(delta)); + t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), + QID(qid) | PIDX(delta)); } out: return ret; @@ -2579,8 +2579,8 @@ static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q) else delta = q->size - hw_pidx + q->db_pidx; wmb(); - t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL), - V_QID(q->cntxt_id) | V_PIDX(delta)); + t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), + QID(q->cntxt_id) | PIDX(delta)); } out: q->db_disabled = 0; @@ -2617,9 +2617,9 @@ static void process_db_full(struct work_struct *work) notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL); drain_db_fifo(adap, dbfifo_drain_delay); - t4_set_reg_field(adap, A_SGE_INT_ENABLE3, - F_DBFIFO_HP_INT | F_DBFIFO_LP_INT, - F_DBFIFO_HP_INT | F_DBFIFO_LP_INT); + t4_set_reg_field(adap, SGE_INT_ENABLE3, + DBFIFO_HP_INT | DBFIFO_LP_INT, + DBFIFO_HP_INT | DBFIFO_LP_INT); notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY); } @@ -2639,8 +2639,8 @@ static void process_db_drop(struct work_struct *work) void t4_db_full(struct adapter *adap) { - t4_set_reg_field(adap, A_SGE_INT_ENABLE3, - F_DBFIFO_HP_INT | F_DBFIFO_LP_INT, 0); + t4_set_reg_field(adap, SGE_INT_ENABLE3, + DBFIFO_HP_INT | DBFIFO_LP_INT, 0); queue_work(workq, &adap->db_full_task); } diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index d49933ed551f..1fde57d45318 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -769,8 +769,8 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) wmb(); /* write descriptors before telling HW */ spin_lock(&q->db_lock); if (!q->db_disabled) { - t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL), - V_QID(q->cntxt_id) | V_PIDX(n)); + t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), + QID(q->cntxt_id) | PIDX(n)); } q->db_pidx = q->pidx; spin_unlock(&q->db_lock); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index fa947dfa4c30..8e988d699d05 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -1018,9 +1018,9 @@ static void sge_intr_handler(struct adapter *adapter) { ERR_INVALID_CIDX_INC, "SGE GTS CIDX increment too large", -1, 0 }, { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 }, - { F_DBFIFO_LP_INT, NULL, -1, 0, t4_db_full }, - { F_DBFIFO_HP_INT, NULL, -1, 0, t4_db_full }, - { F_ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped }, + { DBFIFO_LP_INT, NULL, -1, 0, t4_db_full }, + { DBFIFO_HP_INT, NULL, -1, 0, t4_db_full }, + { ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped }, { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0, "SGE IQID > 1023 received CPL for FL", -1, 0 }, { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1, @@ -1520,7 +1520,7 @@ void t4_intr_enable(struct adapter *adapter) ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 | ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO | ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR | - F_DBFIFO_HP_INT | F_DBFIFO_LP_INT | + DBFIFO_HP_INT | DBFIFO_LP_INT | EGRESS_SIZE_ERR); t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK); t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf); @@ -2033,8 +2033,8 @@ int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len) if ((addr & 3) || (len + off) > MEMWIN0_APERTURE) return -EINVAL; - t4_write_reg(adap, A_PCIE_MEM_ACCESS_OFFSET, addr & ~15); - t4_read_reg(adap, A_PCIE_MEM_ACCESS_OFFSET); + t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET, addr & ~15); + t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET); for (i = 0; i < len; i += 4) *data++ = t4_read_reg(adap, (MEMWIN0_BASE + off + i)); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index 111fc323f155..8e814bc46822 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -190,58 +190,30 @@ #define SGE_DEBUG_DATA_LOW 0x10d4 #define SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4 -#define S_LP_INT_THRESH 12 -#define V_LP_INT_THRESH(x) ((x) << S_LP_INT_THRESH) #define S_HP_INT_THRESH 28 +#define M_HP_INT_THRESH 0xfU #define V_HP_INT_THRESH(x) ((x) << S_HP_INT_THRESH) +#define M_HP_COUNT 0x7ffU +#define S_HP_COUNT 16 +#define G_HP_COUNT(x) (((x) >> S_HP_COUNT) & M_HP_COUNT) +#define S_LP_INT_THRESH 12 +#define M_LP_INT_THRESH 0xfU +#define V_LP_INT_THRESH(x) ((x) << S_LP_INT_THRESH) +#define M_LP_COUNT 0x7ffU +#define S_LP_COUNT 0 +#define G_LP_COUNT(x) (((x) >> S_LP_COUNT) & M_LP_COUNT) #define A_SGE_DBFIFO_STATUS 0x10a4 #define S_ENABLE_DROP 13 #define V_ENABLE_DROP(x) ((x) << S_ENABLE_DROP) #define F_ENABLE_DROP V_ENABLE_DROP(1U) -#define A_SGE_DOORBELL_CONTROL 0x10a8 - -#define A_SGE_CTXT_CMD 0x11fc -#define A_SGE_DBQ_CTXT_BADDR 0x1084 - -#define A_SGE_PF_KDOORBELL 0x0 - -#define S_QID 15 -#define V_QID(x) ((x) << S_QID) - -#define S_PIDX 0 -#define V_PIDX(x) ((x) << S_PIDX) - -#define M_LP_COUNT 0x7ffU -#define S_LP_COUNT 0 -#define G_LP_COUNT(x) (((x) >> S_LP_COUNT) & M_LP_COUNT) - -#define M_HP_COUNT 0x7ffU -#define S_HP_COUNT 16 -#define G_HP_COUNT(x) (((x) >> S_HP_COUNT) & M_HP_COUNT) - -#define A_SGE_INT_ENABLE3 0x1040 - -#define S_DBFIFO_HP_INT 8 -#define V_DBFIFO_HP_INT(x) ((x) << S_DBFIFO_HP_INT) -#define F_DBFIFO_HP_INT V_DBFIFO_HP_INT(1U) - -#define S_DBFIFO_LP_INT 7 -#define V_DBFIFO_LP_INT(x) ((x) << S_DBFIFO_LP_INT) -#define F_DBFIFO_LP_INT V_DBFIFO_LP_INT(1U) - #define S_DROPPED_DB 0 #define V_DROPPED_DB(x) ((x) << S_DROPPED_DB) #define F_DROPPED_DB V_DROPPED_DB(1U) +#define A_SGE_DOORBELL_CONTROL 0x10a8 -#define S_ERR_DROPPED_DB 18 -#define V_ERR_DROPPED_DB(x) ((x) << S_ERR_DROPPED_DB) -#define F_ERR_DROPPED_DB V_ERR_DROPPED_DB(1U) - -#define A_PCIE_MEM_ACCESS_OFFSET 0x306c - -#define M_HP_INT_THRESH 0xfU -#define M_LP_INT_THRESH 0xfU +#define A_SGE_CTXT_CMD 0x11fc +#define A_SGE_DBQ_CTXT_BADDR 0x1084 #define PCIE_PF_CLI 0x44 #define PCIE_INT_CAUSE 0x3004 diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index d266c86a53f7..5b622993ff17 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -110,6 +110,7 @@ static inline char *nic_name(struct pci_dev *pdev) #define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */ #define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST) +#define MAX_VFS 30 /* Max VFs supported by BE3 FW */ #define FW_VER_LEN 32 struct be_dma_mem { diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 8c63d06ab12b..701b3e9a715b 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -120,7 +120,7 @@ static int be_mcc_compl_process(struct be_adapter *adapter, if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) { dev_warn(&adapter->pdev->dev, - "opcode %d-%d is not permitted\n", + "VF is not privileged to issue opcode %d-%d\n", opcode, subsystem); } else { extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) & diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 78b8aa8069f0..84379f4fe837 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -20,6 +20,7 @@ #include "be.h" #include "be_cmds.h" #include <asm/div64.h> +#include <linux/aer.h> MODULE_VERSION(DRV_VER); MODULE_DEVICE_TABLE(pci, be_dev_ids); @@ -2176,8 +2177,7 @@ static uint be_num_rss_want(struct be_adapter *adapter) { u32 num = 0; if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) && - !sriov_want(adapter) && be_physfn(adapter) && - !be_is_mc(adapter)) { + !sriov_want(adapter) && be_physfn(adapter)) { num = (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS; num = min_t(u32, num, (u32)netif_get_num_default_rss_queues()); } @@ -2646,8 +2646,8 @@ static int be_vf_setup(struct be_adapter *adapter) } for_all_vfs(adapter, vf_cfg, vf) { - status = be_cmd_link_status_query(adapter, NULL, &lnk_speed, - NULL, vf + 1); + lnk_speed = 1000; + status = be_cmd_set_qos(adapter, lnk_speed, vf + 1); if (status) goto err; vf_cfg->tx_rate = lnk_speed * 10; @@ -2724,6 +2724,8 @@ static int be_get_config(struct be_adapter *adapter) if (pos) { pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF, &dev_num_vfs); + if (!lancer_chip(adapter)) + dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS); adapter->dev_num_vfs = dev_num_vfs; } return 0; @@ -3437,6 +3439,7 @@ static void be_ctrl_cleanup(struct be_adapter *adapter) if (mem->va) dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va, mem->dma); + kfree(adapter->pmac_id); } static int be_ctrl_init(struct be_adapter *adapter) @@ -3473,6 +3476,12 @@ static int be_ctrl_init(struct be_adapter *adapter) } memset(rx_filter->va, 0, rx_filter->size); + /* primary mac needs 1 pmac entry */ + adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1, + sizeof(*adapter->pmac_id), GFP_KERNEL); + if (!adapter->pmac_id) + return -ENOMEM; + mutex_init(&adapter->mbox_lock); spin_lock_init(&adapter->mcc_lock); spin_lock_init(&adapter->mcc_cq_lock); @@ -3543,6 +3552,8 @@ static void __devexit be_remove(struct pci_dev *pdev) be_ctrl_cleanup(adapter); + pci_disable_pcie_error_reporting(pdev); + pci_set_drvdata(pdev, NULL); pci_release_regions(pdev); pci_disable_device(pdev); @@ -3609,12 +3620,6 @@ static int be_get_initial_config(struct be_adapter *adapter) else adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT; - /* primary mac needs 1 pmac entry */ - adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1, - sizeof(u32), GFP_KERNEL); - if (!adapter->pmac_id) - return -ENOMEM; - status = be_cmd_get_cntl_attributes(adapter); if (status) return status; @@ -3844,6 +3849,10 @@ static int __devinit be_probe(struct pci_dev *pdev, } } + status = pci_enable_pcie_error_reporting(pdev); + if (status) + dev_err(&pdev->dev, "Could not use PCIe error reporting\n"); + status = be_ctrl_init(adapter); if (status) goto free_netdev; @@ -4066,6 +4075,7 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev) if (status) return PCI_ERS_RESULT_DISCONNECT; + pci_cleanup_aer_uncorrect_error_status(pdev); return PCI_ERS_RESULT_RECOVERED; } diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig index 3574e1499dfc..feff51664dcf 100644 --- a/drivers/net/ethernet/freescale/Kconfig +++ b/drivers/net/ethernet/freescale/Kconfig @@ -62,6 +62,13 @@ config FSL_PQ_MDIO ---help--- This driver supports the MDIO bus used by the gianfar and UCC drivers. +config FSL_XGMAC_MDIO + tristate "Freescale XGMAC MDIO" + depends on FSL_SOC + select PHYLIB + ---help--- + This driver supports the MDIO bus on the Fman 10G Ethernet MACs. + config UCC_GETH tristate "Freescale QE Gigabit Ethernet" depends on QUICC_ENGINE diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile index 1752488c9ee5..3d1839afff65 100644 --- a/drivers/net/ethernet/freescale/Makefile +++ b/drivers/net/ethernet/freescale/Makefile @@ -9,6 +9,7 @@ ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y) endif obj-$(CONFIG_FS_ENET) += fs_enet/ obj-$(CONFIG_FSL_PQ_MDIO) += fsl_pq_mdio.o +obj-$(CONFIG_FSL_XGMAC_MDIO) += xgmac_mdio.o obj-$(CONFIG_GIANFAR) += gianfar_driver.o obj-$(CONFIG_PTP_1588_CLOCK_GIANFAR) += gianfar_ptp.o gianfar_driver-objs := gianfar.o \ diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c index 9527b28d70d1..c93a05654b46 100644 --- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c +++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c @@ -19,54 +19,90 @@ #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> -#include <linux/unistd.h> #include <linux/slab.h> -#include <linux/interrupt.h> #include <linux/init.h> #include <linux/delay.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/skbuff.h> -#include <linux/spinlock.h> -#include <linux/mm.h> #include <linux/module.h> -#include <linux/platform_device.h> -#include <linux/crc32.h> #include <linux/mii.h> -#include <linux/phy.h> -#include <linux/of.h> #include <linux/of_address.h> #include <linux/of_mdio.h> -#include <linux/of_platform.h> +#include <linux/of_device.h> #include <asm/io.h> -#include <asm/irq.h> -#include <asm/uaccess.h> -#include <asm/ucc.h> +#include <asm/ucc.h> /* for ucc_set_qe_mux_mii_mng() */ #include "gianfar.h" -#include "fsl_pq_mdio.h" + +#define MIIMIND_BUSY 0x00000001 +#define MIIMIND_NOTVALID 0x00000004 +#define MIIMCFG_INIT_VALUE 0x00000007 +#define MIIMCFG_RESET 0x80000000 + +#define MII_READ_COMMAND 0x00000001 + +struct fsl_pq_mii { + u32 miimcfg; /* MII management configuration reg */ + u32 miimcom; /* MII management command reg */ + u32 miimadd; /* MII management address reg */ + u32 miimcon; /* MII management control reg */ + u32 miimstat; /* MII management status reg */ + u32 miimind; /* MII management indication reg */ +}; + +struct fsl_pq_mdio { + u8 res1[16]; + u32 ieventm; /* MDIO Interrupt event register (for etsec2)*/ + u32 imaskm; /* MDIO Interrupt mask register (for etsec2)*/ + u8 res2[4]; + u32 emapm; /* MDIO Event mapping register (for etsec2)*/ + u8 res3[1280]; + struct fsl_pq_mii mii; + u8 res4[28]; + u32 utbipar; /* TBI phy address reg (only on UCC) */ + u8 res5[2728]; +} __packed; /* Number of microseconds to wait for an MII register to respond */ #define MII_TIMEOUT 1000 struct fsl_pq_mdio_priv { void __iomem *map; - struct fsl_pq_mdio __iomem *regs; + struct fsl_pq_mii __iomem *regs; + int irqs[PHY_MAX_ADDR]; +}; + +/* + * Per-device-type data. Each type of device tree node that we support gets + * one of these. + * + * @mii_offset: the offset of the MII registers within the memory map of the + * node. Some nodes define only the MII registers, and some define the whole + * MAC (which includes the MII registers). + * + * @get_tbipa: determines the address of the TBIPA register + * + * @ucc_configure: a special function for extra QE configuration + */ +struct fsl_pq_mdio_data { + unsigned int mii_offset; /* offset of the MII registers */ + uint32_t __iomem * (*get_tbipa)(void __iomem *p); + void (*ucc_configure)(phys_addr_t start, phys_addr_t end); }; /* - * Write value to the PHY at mii_id at register regnum, - * on the bus attached to the local interface, which may be different from the - * generic mdio bus (tied to a single interface), waiting until the write is - * done before returning. This is helpful in programming interfaces like - * the TBI which control interfaces like onchip SERDES and are always tied to - * the local mdio pins, which may not be the same as system mdio bus, used for + * Write value to the PHY at mii_id at register regnum, on the bus attached + * to the local interface, which may be different from the generic mdio bus + * (tied to a single interface), waiting until the write is done before + * returning. This is helpful in programming interfaces like the TBI which + * control interfaces like onchip SERDES and are always tied to the local + * mdio pins, which may not be the same as system mdio bus, used for * controlling the external PHYs, for example. */ -int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id, - int regnum, u16 value) +static int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, + u16 value) { + struct fsl_pq_mdio_priv *priv = bus->priv; + struct fsl_pq_mii __iomem *regs = priv->regs; u32 status; /* Set the PHY address and the register address we want to write */ @@ -83,20 +119,21 @@ int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id, } /* - * Read the bus for PHY at addr mii_id, register regnum, and - * return the value. Clears miimcom first. All PHY operation - * done on the bus attached to the local interface, - * which may be different from the generic mdio bus - * This is helpful in programming interfaces like - * the TBI which, in turn, control interfaces like onchip SERDES - * and are always tied to the local mdio pins, which may not be the + * Read the bus for PHY at addr mii_id, register regnum, and return the value. + * Clears miimcom first. + * + * All PHY operation done on the bus attached to the local interface, which + * may be different from the generic mdio bus. This is helpful in programming + * interfaces like the TBI which, in turn, control interfaces like on-chip + * SERDES and are always tied to the local mdio pins, which may not be the * same as system mdio bus, used for controlling the external PHYs, for eg. */ -int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs, - int mii_id, int regnum) +static int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum) { - u16 value; + struct fsl_pq_mdio_priv *priv = bus->priv; + struct fsl_pq_mii __iomem *regs = priv->regs; u32 status; + u16 value; /* Set the PHY address and the register address we want to read */ out_be32(®s->miimadd, (mii_id << 8) | regnum); @@ -115,44 +152,15 @@ int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs, /* Grab the value of the register from miimstat */ value = in_be32(®s->miimstat); + dev_dbg(&bus->dev, "read %04x from address %x/%x\n", value, mii_id, regnum); return value; } -static struct fsl_pq_mdio __iomem *fsl_pq_mdio_get_regs(struct mii_bus *bus) -{ - struct fsl_pq_mdio_priv *priv = bus->priv; - - return priv->regs; -} - -/* - * Write value to the PHY at mii_id at register regnum, - * on the bus, waiting until the write is done before returning. - */ -int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value) -{ - struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus); - - /* Write to the local MII regs */ - return fsl_pq_local_mdio_write(regs, mii_id, regnum, value); -} - -/* - * Read the bus for PHY at addr mii_id, register regnum, and - * return the value. Clears miimcom first. - */ -int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum) -{ - struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus); - - /* Read the local MII regs */ - return fsl_pq_local_mdio_read(regs, mii_id, regnum); -} - /* Reset the MIIM registers, and wait for the bus to free */ static int fsl_pq_mdio_reset(struct mii_bus *bus) { - struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus); + struct fsl_pq_mdio_priv *priv = bus->priv; + struct fsl_pq_mii __iomem *regs = priv->regs; u32 status; mutex_lock(&bus->mdio_lock); @@ -170,234 +178,291 @@ static int fsl_pq_mdio_reset(struct mii_bus *bus) mutex_unlock(&bus->mdio_lock); if (!status) { - printk(KERN_ERR "%s: The MII Bus is stuck!\n", - bus->name); + dev_err(&bus->dev, "timeout waiting for MII bus\n"); return -EBUSY; } return 0; } -void fsl_pq_mdio_bus_name(char *name, struct device_node *np) +#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE) +/* + * This is mildly evil, but so is our hardware for doing this. + * Also, we have to cast back to struct gfar because of + * definition weirdness done in gianfar.h. + */ +static uint32_t __iomem *get_gfar_tbipa(void __iomem *p) { - const u32 *addr; - u64 taddr = OF_BAD_ADDR; - - addr = of_get_address(np, 0, NULL, NULL); - if (addr) - taddr = of_translate_address(np, addr); + struct gfar __iomem *enet_regs = p; - snprintf(name, MII_BUS_ID_SIZE, "%s@%llx", np->name, - (unsigned long long)taddr); + return &enet_regs->tbipa; } -EXPORT_SYMBOL_GPL(fsl_pq_mdio_bus_name); +/* + * Return the TBIPAR address for an eTSEC2 node + */ +static uint32_t __iomem *get_etsec_tbipa(void __iomem *p) +{ + return p; +} +#endif -static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct device_node *np) +#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE) +/* + * Return the TBIPAR address for a QE MDIO node + */ +static uint32_t __iomem *get_ucc_tbipa(void __iomem *p) { -#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE) - struct gfar __iomem *enet_regs; + struct fsl_pq_mdio __iomem *mdio = p; - /* - * This is mildly evil, but so is our hardware for doing this. - * Also, we have to cast back to struct gfar because of - * definition weirdness done in gianfar.h. - */ - if(of_device_is_compatible(np, "fsl,gianfar-mdio") || - of_device_is_compatible(np, "fsl,gianfar-tbi") || - of_device_is_compatible(np, "gianfar")) { - enet_regs = (struct gfar __iomem *)regs; - return &enet_regs->tbipa; - } else if (of_device_is_compatible(np, "fsl,etsec2-mdio") || - of_device_is_compatible(np, "fsl,etsec2-tbi")) { - return of_iomap(np, 1); - } -#endif - return NULL; + return &mdio->utbipar; } - -static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id) +/* + * Find the UCC node that controls the given MDIO node + * + * For some reason, the QE MDIO nodes are not children of the UCC devices + * that control them. Therefore, we need to scan all UCC nodes looking for + * the one that encompases the given MDIO node. We do this by comparing + * physical addresses. The 'start' and 'end' addresses of the MDIO node are + * passed, and the correct UCC node will cover the entire address range. + * + * This assumes that there is only one QE MDIO node in the entire device tree. + */ +static void ucc_configure(phys_addr_t start, phys_addr_t end) { -#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE) + static bool found_mii_master; struct device_node *np = NULL; - int err = 0; - for_each_compatible_node(np, NULL, "ucc_geth") { - struct resource tempres; + if (found_mii_master) + return; - err = of_address_to_resource(np, 0, &tempres); - if (err) + for_each_compatible_node(np, NULL, "ucc_geth") { + struct resource res; + const uint32_t *iprop; + uint32_t id; + int ret; + + ret = of_address_to_resource(np, 0, &res); + if (ret < 0) { + pr_debug("fsl-pq-mdio: no address range in node %s\n", + np->full_name); continue; + } /* if our mdio regs fall within this UCC regs range */ - if ((start >= tempres.start) && (end <= tempres.end)) { - /* Find the id of the UCC */ - const u32 *id; - - id = of_get_property(np, "cell-index", NULL); - if (!id) { - id = of_get_property(np, "device-id", NULL); - if (!id) - continue; + if ((start < res.start) || (end > res.end)) + continue; + + iprop = of_get_property(np, "cell-index", NULL); + if (!iprop) { + iprop = of_get_property(np, "device-id", NULL); + if (!iprop) { + pr_debug("fsl-pq-mdio: no UCC ID in node %s\n", + np->full_name); + continue; } + } - *ucc_id = *id; + id = be32_to_cpup(iprop); - return 0; + /* + * cell-index and device-id for QE nodes are + * numbered from 1, not 0. + */ + if (ucc_set_qe_mux_mii_mng(id - 1) < 0) { + pr_debug("fsl-pq-mdio: invalid UCC ID in node %s\n", + np->full_name); + continue; } + + pr_debug("fsl-pq-mdio: setting node UCC%u to MII master\n", id); + found_mii_master = true; } +} - if (err) - return err; - else - return -EINVAL; -#else - return -ENODEV; #endif -} -static int fsl_pq_mdio_probe(struct platform_device *ofdev) +static struct of_device_id fsl_pq_mdio_match[] = { +#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE) + { + .compatible = "fsl,gianfar-tbi", + .data = &(struct fsl_pq_mdio_data) { + .mii_offset = 0, + .get_tbipa = get_gfar_tbipa, + }, + }, + { + .compatible = "fsl,gianfar-mdio", + .data = &(struct fsl_pq_mdio_data) { + .mii_offset = 0, + .get_tbipa = get_gfar_tbipa, + }, + }, + { + .type = "mdio", + .compatible = "gianfar", + .data = &(struct fsl_pq_mdio_data) { + .mii_offset = offsetof(struct fsl_pq_mdio, mii), + .get_tbipa = get_gfar_tbipa, + }, + }, + { + .compatible = "fsl,etsec2-tbi", + .data = &(struct fsl_pq_mdio_data) { + .mii_offset = offsetof(struct fsl_pq_mdio, mii), + .get_tbipa = get_etsec_tbipa, + }, + }, + { + .compatible = "fsl,etsec2-mdio", + .data = &(struct fsl_pq_mdio_data) { + .mii_offset = offsetof(struct fsl_pq_mdio, mii), + .get_tbipa = get_etsec_tbipa, + }, + }, +#endif +#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE) + { + .compatible = "fsl,ucc-mdio", + .data = &(struct fsl_pq_mdio_data) { + .mii_offset = 0, + .get_tbipa = get_ucc_tbipa, + .ucc_configure = ucc_configure, + }, + }, + { + /* Legacy UCC MDIO node */ + .type = "mdio", + .compatible = "ucc_geth_phy", + .data = &(struct fsl_pq_mdio_data) { + .mii_offset = 0, + .get_tbipa = get_ucc_tbipa, + .ucc_configure = ucc_configure, + }, + }, +#endif + /* No Kconfig option for Fman support yet */ + { + .compatible = "fsl,fman-mdio", + .data = &(struct fsl_pq_mdio_data) { + .mii_offset = 0, + /* Fman TBI operations are handled elsewhere */ + }, + }, + + {}, +}; +MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match); + +static int fsl_pq_mdio_probe(struct platform_device *pdev) { - struct device_node *np = ofdev->dev.of_node; + const struct of_device_id *id = + of_match_device(fsl_pq_mdio_match, &pdev->dev); + const struct fsl_pq_mdio_data *data = id->data; + struct device_node *np = pdev->dev.of_node; + struct resource res; struct device_node *tbi; struct fsl_pq_mdio_priv *priv; - struct fsl_pq_mdio __iomem *regs = NULL; - void __iomem *map; - u32 __iomem *tbipa; struct mii_bus *new_bus; - int tbiaddr = -1; - const u32 *addrp; - u64 addr = 0, size = 0; int err; - priv = kzalloc(sizeof(*priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; + dev_dbg(&pdev->dev, "found %s compatible node\n", id->compatible); - new_bus = mdiobus_alloc(); - if (!new_bus) { - err = -ENOMEM; - goto err_free_priv; - } + new_bus = mdiobus_alloc_size(sizeof(*priv)); + if (!new_bus) + return -ENOMEM; + priv = new_bus->priv; new_bus->name = "Freescale PowerQUICC MII Bus", - new_bus->read = &fsl_pq_mdio_read, - new_bus->write = &fsl_pq_mdio_write, - new_bus->reset = &fsl_pq_mdio_reset, - new_bus->priv = priv; - fsl_pq_mdio_bus_name(new_bus->id, np); - - addrp = of_get_address(np, 0, &size, NULL); - if (!addrp) { - err = -EINVAL; - goto err_free_bus; + new_bus->read = &fsl_pq_mdio_read; + new_bus->write = &fsl_pq_mdio_write; + new_bus->reset = &fsl_pq_mdio_reset; + new_bus->irq = priv->irqs; + + err = of_address_to_resource(np, 0, &res); + if (err < 0) { + dev_err(&pdev->dev, "could not obtain address information\n"); + goto error; } - /* Set the PHY base address */ - addr = of_translate_address(np, addrp); - if (addr == OF_BAD_ADDR) { - err = -EINVAL; - goto err_free_bus; - } + snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s@%llx", np->name, + (unsigned long long)res.start); - map = ioremap(addr, size); - if (!map) { + priv->map = of_iomap(np, 0); + if (!priv->map) { err = -ENOMEM; - goto err_free_bus; + goto error; } - priv->map = map; - - if (of_device_is_compatible(np, "fsl,gianfar-mdio") || - of_device_is_compatible(np, "fsl,gianfar-tbi") || - of_device_is_compatible(np, "fsl,ucc-mdio") || - of_device_is_compatible(np, "ucc_geth_phy")) - map -= offsetof(struct fsl_pq_mdio, miimcfg); - regs = map; - priv->regs = regs; - - new_bus->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL); - if (NULL == new_bus->irq) { - err = -ENOMEM; - goto err_unmap_regs; + /* + * Some device tree nodes represent only the MII registers, and + * others represent the MAC and MII registers. The 'mii_offset' field + * contains the offset of the MII registers inside the mapped register + * space. + */ + if (data->mii_offset > resource_size(&res)) { + dev_err(&pdev->dev, "invalid register map\n"); + err = -EINVAL; + goto error; } + priv->regs = priv->map + data->mii_offset; - new_bus->parent = &ofdev->dev; - dev_set_drvdata(&ofdev->dev, new_bus); - - if (of_device_is_compatible(np, "fsl,gianfar-mdio") || - of_device_is_compatible(np, "fsl,gianfar-tbi") || - of_device_is_compatible(np, "fsl,etsec2-mdio") || - of_device_is_compatible(np, "fsl,etsec2-tbi") || - of_device_is_compatible(np, "gianfar")) { - tbipa = get_gfar_tbipa(regs, np); - if (!tbipa) { - err = -EINVAL; - goto err_free_irqs; - } - } else if (of_device_is_compatible(np, "fsl,ucc-mdio") || - of_device_is_compatible(np, "ucc_geth_phy")) { - u32 id; - static u32 mii_mng_master; - - tbipa = ®s->utbipar; - - if ((err = get_ucc_id_for_range(addr, addr + size, &id))) - goto err_free_irqs; + new_bus->parent = &pdev->dev; + dev_set_drvdata(&pdev->dev, new_bus); - if (!mii_mng_master) { - mii_mng_master = id; - ucc_set_qe_mux_mii_mng(id - 1); + if (data->get_tbipa) { + for_each_child_of_node(np, tbi) { + if (strcmp(tbi->type, "tbi-phy") == 0) { + dev_dbg(&pdev->dev, "found TBI PHY node %s\n", + strrchr(tbi->full_name, '/') + 1); + break; + } } - } else { - err = -ENODEV; - goto err_free_irqs; - } - for_each_child_of_node(np, tbi) { - if (!strncmp(tbi->type, "tbi-phy", 8)) - break; - } + if (tbi) { + const u32 *prop = of_get_property(tbi, "reg", NULL); + uint32_t __iomem *tbipa; - if (tbi) { - const u32 *prop = of_get_property(tbi, "reg", NULL); + if (!prop) { + dev_err(&pdev->dev, + "missing 'reg' property in node %s\n", + tbi->full_name); + err = -EBUSY; + goto error; + } - if (prop) - tbiaddr = *prop; + tbipa = data->get_tbipa(priv->map); - if (tbiaddr == -1) { - err = -EBUSY; - goto err_free_irqs; - } else { - out_be32(tbipa, tbiaddr); + out_be32(tbipa, be32_to_cpup(prop)); } } + if (data->ucc_configure) + data->ucc_configure(res.start, res.end); + err = of_mdiobus_register(new_bus, np); if (err) { - printk (KERN_ERR "%s: Cannot register as MDIO bus\n", - new_bus->name); - goto err_free_irqs; + dev_err(&pdev->dev, "cannot register %s as MDIO bus\n", + new_bus->name); + goto error; } return 0; -err_free_irqs: - kfree(new_bus->irq); -err_unmap_regs: - iounmap(priv->map); -err_free_bus: +error: + if (priv->map) + iounmap(priv->map); + kfree(new_bus); -err_free_priv: - kfree(priv); + return err; } -static int fsl_pq_mdio_remove(struct platform_device *ofdev) +static int fsl_pq_mdio_remove(struct platform_device *pdev) { - struct device *device = &ofdev->dev; + struct device *device = &pdev->dev; struct mii_bus *bus = dev_get_drvdata(device); struct fsl_pq_mdio_priv *priv = bus->priv; @@ -406,41 +471,11 @@ static int fsl_pq_mdio_remove(struct platform_device *ofdev) dev_set_drvdata(device, NULL); iounmap(priv->map); - bus->priv = NULL; mdiobus_free(bus); - kfree(priv); return 0; } -static struct of_device_id fsl_pq_mdio_match[] = { - { - .type = "mdio", - .compatible = "ucc_geth_phy", - }, - { - .type = "mdio", - .compatible = "gianfar", - }, - { - .compatible = "fsl,ucc-mdio", - }, - { - .compatible = "fsl,gianfar-tbi", - }, - { - .compatible = "fsl,gianfar-mdio", - }, - { - .compatible = "fsl,etsec2-tbi", - }, - { - .compatible = "fsl,etsec2-mdio", - }, - {}, -}; -MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match); - static struct platform_driver fsl_pq_mdio_driver = { .driver = { .name = "fsl-pq_mdio", diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.h b/drivers/net/ethernet/freescale/fsl_pq_mdio.h deleted file mode 100644 index bd17a2a0139b..000000000000 --- a/drivers/net/ethernet/freescale/fsl_pq_mdio.h +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Freescale PowerQUICC MDIO Driver -- MII Management Bus Implementation - * Driver for the MDIO bus controller on Freescale PowerQUICC processors - * - * Author: Andy Fleming - * Modifier: Sandeep Gopalpet - * - * Copyright 2002-2004, 2008-2009 Freescale Semiconductor, Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * - */ -#ifndef __FSL_PQ_MDIO_H -#define __FSL_PQ_MDIO_H - -#define MIIMIND_BUSY 0x00000001 -#define MIIMIND_NOTVALID 0x00000004 -#define MIIMCFG_INIT_VALUE 0x00000007 -#define MIIMCFG_RESET 0x80000000 - -#define MII_READ_COMMAND 0x00000001 - -struct fsl_pq_mdio { - u8 res1[16]; - u32 ieventm; /* MDIO Interrupt event register (for etsec2)*/ - u32 imaskm; /* MDIO Interrupt mask register (for etsec2)*/ - u8 res2[4]; - u32 emapm; /* MDIO Event mapping register (for etsec2)*/ - u8 res3[1280]; - u32 miimcfg; /* MII management configuration reg */ - u32 miimcom; /* MII management command reg */ - u32 miimadd; /* MII management address reg */ - u32 miimcon; /* MII management control reg */ - u32 miimstat; /* MII management status reg */ - u32 miimind; /* MII management indication reg */ - u8 reserved[28]; /* Space holder */ - u32 utbipar; /* TBI phy address reg (only on UCC) */ - u8 res4[2728]; -} __packed; - -int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum); -int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value); -int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id, - int regnum, u16 value); -int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs, int mii_id, int regnum); -int __init fsl_pq_mdio_init(void); -void fsl_pq_mdio_exit(void); -void fsl_pq_mdio_bus_name(char *name, struct device_node *np); -#endif /* FSL_PQ_MDIO_H */ diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index d3233f59a82e..4d5b58ce1298 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -100,7 +100,6 @@ #include <linux/of_net.h> #include "gianfar.h" -#include "fsl_pq_mdio.h" #define TX_TIMEOUT (1*HZ) diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index 21c6574c5f15..164288439220 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -42,7 +42,6 @@ #include <asm/machdep.h> #include "ucc_geth.h" -#include "fsl_pq_mdio.h" #undef DEBUG diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c new file mode 100644 index 000000000000..1afb5ea2a984 --- /dev/null +++ b/drivers/net/ethernet/freescale/xgmac_mdio.c @@ -0,0 +1,274 @@ +/* + * QorIQ 10G MDIO Controller + * + * Copyright 2012 Freescale Semiconductor, Inc. + * + * Authors: Andy Fleming <afleming@freescale.com> + * Timur Tabi <timur@freescale.com> + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/phy.h> +#include <linux/mdio.h> +#include <linux/of_platform.h> +#include <linux/of_mdio.h> + +/* Number of microseconds to wait for a register to respond */ +#define TIMEOUT 1000 + +struct tgec_mdio_controller { + __be32 reserved[12]; + __be32 mdio_stat; /* MDIO configuration and status */ + __be32 mdio_ctl; /* MDIO control */ + __be32 mdio_data; /* MDIO data */ + __be32 mdio_addr; /* MDIO address */ +} __packed; + +#define MDIO_STAT_CLKDIV(x) (((x>>1) & 0xff) << 8) +#define MDIO_STAT_BSY (1 << 0) +#define MDIO_STAT_RD_ER (1 << 1) +#define MDIO_CTL_DEV_ADDR(x) (x & 0x1f) +#define MDIO_CTL_PORT_ADDR(x) ((x & 0x1f) << 5) +#define MDIO_CTL_PRE_DIS (1 << 10) +#define MDIO_CTL_SCAN_EN (1 << 11) +#define MDIO_CTL_POST_INC (1 << 14) +#define MDIO_CTL_READ (1 << 15) + +#define MDIO_DATA(x) (x & 0xffff) +#define MDIO_DATA_BSY (1 << 31) + +/* + * Wait untill the MDIO bus is free + */ +static int xgmac_wait_until_free(struct device *dev, + struct tgec_mdio_controller __iomem *regs) +{ + uint32_t status; + + /* Wait till the bus is free */ + status = spin_event_timeout( + !((in_be32(®s->mdio_stat)) & MDIO_STAT_BSY), TIMEOUT, 0); + if (!status) { + dev_err(dev, "timeout waiting for bus to be free\n"); + return -ETIMEDOUT; + } + + return 0; +} + +/* + * Wait till the MDIO read or write operation is complete + */ +static int xgmac_wait_until_done(struct device *dev, + struct tgec_mdio_controller __iomem *regs) +{ + uint32_t status; + + /* Wait till the MDIO write is complete */ + status = spin_event_timeout( + !((in_be32(®s->mdio_data)) & MDIO_DATA_BSY), TIMEOUT, 0); + if (!status) { + dev_err(dev, "timeout waiting for operation to complete\n"); + return -ETIMEDOUT; + } + + return 0; +} + +/* + * Write value to the PHY for this device to the register at regnum,waiting + * until the write is done before it returns. All PHY configuration has to be + * done through the TSEC1 MIIM regs. + */ +static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value) +{ + struct tgec_mdio_controller __iomem *regs = bus->priv; + uint16_t dev_addr = regnum >> 16; + int ret; + + /* Setup the MII Mgmt clock speed */ + out_be32(®s->mdio_stat, MDIO_STAT_CLKDIV(100)); + + ret = xgmac_wait_until_free(&bus->dev, regs); + if (ret) + return ret; + + /* Set the port and dev addr */ + out_be32(®s->mdio_ctl, + MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr)); + + /* Set the register address */ + out_be32(®s->mdio_addr, regnum & 0xffff); + + ret = xgmac_wait_until_free(&bus->dev, regs); + if (ret) + return ret; + + /* Write the value to the register */ + out_be32(®s->mdio_data, MDIO_DATA(value)); + + ret = xgmac_wait_until_done(&bus->dev, regs); + if (ret) + return ret; + + return 0; +} + +/* + * Reads from register regnum in the PHY for device dev, returning the value. + * Clears miimcom first. All PHY configuration has to be done through the + * TSEC1 MIIM regs. + */ +static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum) +{ + struct tgec_mdio_controller __iomem *regs = bus->priv; + uint16_t dev_addr = regnum >> 16; + uint32_t mdio_ctl; + uint16_t value; + int ret; + + /* Setup the MII Mgmt clock speed */ + out_be32(®s->mdio_stat, MDIO_STAT_CLKDIV(100)); + + ret = xgmac_wait_until_free(&bus->dev, regs); + if (ret) + return ret; + + /* Set the Port and Device Addrs */ + mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr); + out_be32(®s->mdio_ctl, mdio_ctl); + + /* Set the register address */ + out_be32(®s->mdio_addr, regnum & 0xffff); + + ret = xgmac_wait_until_free(&bus->dev, regs); + if (ret) + return ret; + + /* Initiate the read */ + out_be32(®s->mdio_ctl, mdio_ctl | MDIO_CTL_READ); + + ret = xgmac_wait_until_done(&bus->dev, regs); + if (ret) + return ret; + + /* Return all Fs if nothing was there */ + if (in_be32(®s->mdio_stat) & MDIO_STAT_RD_ER) { + dev_err(&bus->dev, "MDIO read error\n"); + return 0xffff; + } + + value = in_be32(®s->mdio_data) & 0xffff; + dev_dbg(&bus->dev, "read %04x\n", value); + + return value; +} + +/* Reset the MIIM registers, and wait for the bus to free */ +static int xgmac_mdio_reset(struct mii_bus *bus) +{ + struct tgec_mdio_controller __iomem *regs = bus->priv; + int ret; + + mutex_lock(&bus->mdio_lock); + + /* Setup the MII Mgmt clock speed */ + out_be32(®s->mdio_stat, MDIO_STAT_CLKDIV(100)); + + ret = xgmac_wait_until_free(&bus->dev, regs); + + mutex_unlock(&bus->mdio_lock); + + return ret; +} + +static int __devinit xgmac_mdio_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct mii_bus *bus; + struct resource res; + int ret; + + ret = of_address_to_resource(np, 0, &res); + if (ret) { + dev_err(&pdev->dev, "could not obtain address\n"); + return ret; + } + + bus = mdiobus_alloc_size(PHY_MAX_ADDR * sizeof(int)); + if (!bus) + return -ENOMEM; + + bus->name = "Freescale XGMAC MDIO Bus"; + bus->read = xgmac_mdio_read; + bus->write = xgmac_mdio_write; + bus->reset = xgmac_mdio_reset; + bus->irq = bus->priv; + bus->parent = &pdev->dev; + snprintf(bus->id, MII_BUS_ID_SIZE, "%llx", (unsigned long long)res.start); + + /* Set the PHY base address */ + bus->priv = of_iomap(np, 0); + if (!bus->priv) { + ret = -ENOMEM; + goto err_ioremap; + } + + ret = of_mdiobus_register(bus, np); + if (ret) { + dev_err(&pdev->dev, "cannot register MDIO bus\n"); + goto err_registration; + } + + dev_set_drvdata(&pdev->dev, bus); + + return 0; + +err_registration: + iounmap(bus->priv); + +err_ioremap: + mdiobus_free(bus); + + return ret; +} + +static int __devexit xgmac_mdio_remove(struct platform_device *pdev) +{ + struct mii_bus *bus = dev_get_drvdata(&pdev->dev); + + mdiobus_unregister(bus); + iounmap(bus->priv); + mdiobus_free(bus); + + return 0; +} + +static struct of_device_id xgmac_mdio_match[] = { + { + .compatible = "fsl,fman-xmdio", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, xgmac_mdio_match); + +static struct platform_driver xgmac_mdio_driver = { + .driver = { + .name = "fsl-fman_xmdio", + .of_match_table = xgmac_mdio_match, + }, + .probe = xgmac_mdio_probe, + .remove = xgmac_mdio_remove, +}; + +module_platform_driver(xgmac_mdio_driver); + +MODULE_DESCRIPTION("Freescale QorIQ 10G MDIO Controller"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c index 736a7d987db5..9089d00f1421 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c +++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c @@ -174,6 +174,20 @@ static int e1000_get_settings(struct net_device *netdev, ecmd->autoneg = ((hw->media_type == e1000_media_type_fiber) || hw->autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE; + + /* MDI-X => 1; MDI => 0 */ + if ((hw->media_type == e1000_media_type_copper) && + netif_carrier_ok(netdev)) + ecmd->eth_tp_mdix = (!!adapter->phy_info.mdix_mode ? + ETH_TP_MDI_X : + ETH_TP_MDI); + else + ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID; + + if (hw->mdix == AUTO_ALL_MODES) + ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; + else + ecmd->eth_tp_mdix_ctrl = hw->mdix; return 0; } @@ -183,6 +197,22 @@ static int e1000_set_settings(struct net_device *netdev, struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; + /* + * MDI setting is only allowed when autoneg enabled because + * some hardware doesn't allow MDI setting when speed or + * duplex is forced. + */ + if (ecmd->eth_tp_mdix_ctrl) { + if (hw->media_type != e1000_media_type_copper) + return -EOPNOTSUPP; + + if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) && + (ecmd->autoneg != AUTONEG_ENABLE)) { + e_err(drv, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n"); + return -EINVAL; + } + } + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) msleep(1); @@ -199,12 +229,21 @@ static int e1000_set_settings(struct net_device *netdev, ecmd->advertising = hw->autoneg_advertised; } else { u32 speed = ethtool_cmd_speed(ecmd); + /* calling this overrides forced MDI setting */ if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) { clear_bit(__E1000_RESETTING, &adapter->flags); return -EINVAL; } } + /* MDI-X => 2; MDI => 1; Auto => 3 */ + if (ecmd->eth_tp_mdix_ctrl) { + if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) + hw->mdix = AUTO_ALL_MODES; + else + hw->mdix = ecmd->eth_tp_mdix_ctrl; + } + /* reset the link */ if (netif_running(adapter->netdev)) { diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 3bfbb8df8989..0ae2fcfa5124 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -4939,6 +4939,10 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx) default: goto err_inval; } + + /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ + hw->mdix = AUTO_ALL_MODES; + return 0; err_inval: diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c index 080c89093feb..c98586408005 100644 --- a/drivers/net/ethernet/intel/e1000e/82571.c +++ b/drivers/net/ethernet/intel/e1000e/82571.c @@ -653,7 +653,7 @@ static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw) **/ static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active) { - u16 data = er32(POEMB); + u32 data = er32(POEMB); if (active) data |= E1000_PHY_CTRL_D0A_LPLU; @@ -677,7 +677,7 @@ static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active) **/ static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active) { - u16 data = er32(POEMB); + u32 data = er32(POEMB); if (!active) { data &= ~E1000_PHY_CTRL_NOND0A_LPLU; diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index 0349e2478df8..c11ac2756667 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -199,6 +199,11 @@ static int e1000_get_settings(struct net_device *netdev, else ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID; + if (hw->phy.mdix == AUTO_ALL_MODES) + ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; + else + ecmd->eth_tp_mdix_ctrl = hw->phy.mdix; + return 0; } @@ -241,6 +246,10 @@ static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx) default: goto err_inval; } + + /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ + adapter->hw.phy.mdix = AUTO_ALL_MODES; + return 0; err_inval: @@ -264,6 +273,22 @@ static int e1000_set_settings(struct net_device *netdev, return -EINVAL; } + /* + * MDI setting is only allowed when autoneg enabled because + * some hardware doesn't allow MDI setting when speed or + * duplex is forced. + */ + if (ecmd->eth_tp_mdix_ctrl) { + if (hw->phy.media_type != e1000_media_type_copper) + return -EOPNOTSUPP; + + if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) && + (ecmd->autoneg != AUTONEG_ENABLE)) { + e_err("forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n"); + return -EINVAL; + } + } + while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) usleep_range(1000, 2000); @@ -282,20 +307,32 @@ static int e1000_set_settings(struct net_device *netdev, hw->fc.requested_mode = e1000_fc_default; } else { u32 speed = ethtool_cmd_speed(ecmd); + /* calling this overrides forced MDI setting */ if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) { clear_bit(__E1000_RESETTING, &adapter->state); return -EINVAL; } } + /* MDI-X => 2; MDI => 1; Auto => 3 */ + if (ecmd->eth_tp_mdix_ctrl) { + /* + * fix up the value for auto (3 => 0) as zero is mapped + * internally to auto + */ + if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) + hw->phy.mdix = AUTO_ALL_MODES; + else + hw->phy.mdix = ecmd->eth_tp_mdix_ctrl; + } + /* reset the link */ if (netif_running(adapter->netdev)) { e1000e_down(adapter); e1000e_up(adapter); - } else { + } else e1000e_reset(adapter); - } clear_bit(__E1000_RESETTING, &adapter->state); return 0; @@ -1905,7 +1942,8 @@ static int e1000_set_coalesce(struct net_device *netdev, return -EINVAL; if (ec->rx_coalesce_usecs == 4) { - adapter->itr = adapter->itr_setting = 4; + adapter->itr_setting = 4; + adapter->itr = adapter->itr_setting; } else if (ec->rx_coalesce_usecs <= 3) { adapter->itr = 20000; adapter->itr_setting = ec->rx_coalesce_usecs; diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index d01a099475a1..121990cab144 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -56,7 +56,7 @@ #define DRV_EXTRAVERSION "-k" -#define DRV_VERSION "2.0.0" DRV_EXTRAVERSION +#define DRV_VERSION "2.1.4" DRV_EXTRAVERSION char e1000e_driver_name[] = "e1000e"; const char e1000e_driver_version[] = DRV_VERSION; @@ -3446,7 +3446,7 @@ void e1000e_reset(struct e1000_adapter *adapter) /* * if short on Rx space, Rx wins and must trump Tx - * adjustment or use Early Receive if available + * adjustment */ if (pba < min_rx_space) pba = min_rx_space; @@ -3755,6 +3755,10 @@ static irqreturn_t e1000_intr_msi_test(int irq, void *data) e_dbg("icr is %08X\n", icr); if (icr & E1000_ICR_RXSEQ) { adapter->flags &= ~FLAG_MSI_TEST_FAILED; + /* + * Force memory writes to complete before acknowledging the + * interrupt is handled. + */ wmb(); } @@ -3796,6 +3800,10 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter) goto msi_test_failed; } + /* + * Force memory writes to complete before enabling and firing an + * interrupt. + */ wmb(); e1000_irq_enable(adapter); @@ -3807,7 +3815,7 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter) e1000_irq_disable(adapter); - rmb(); + rmb(); /* read flags after interrupt has been fired */ if (adapter->flags & FLAG_MSI_TEST_FAILED) { adapter->int_mode = E1000E_INT_MODE_LEGACY; @@ -4670,7 +4678,7 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb) struct e1000_buffer *buffer_info; unsigned int i; u32 cmd_length = 0; - u16 ipcse = 0, tucse, mss; + u16 ipcse = 0, mss; u8 ipcss, ipcso, tucss, tucso, hdr_len; if (!skb_is_gso(skb)) @@ -4704,7 +4712,6 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb) ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; tucss = skb_transport_offset(skb); tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; - tucse = 0; cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); @@ -4718,7 +4725,7 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb) context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); context_desc->upper_setup.tcp_fields.tucss = tucss; context_desc->upper_setup.tcp_fields.tucso = tucso; - context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); + context_desc->upper_setup.tcp_fields.tucse = 0; context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; context_desc->cmd_and_length = cpu_to_le32(cmd_length); diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c index b860d4f7ea2a..fc62a3f3a5be 100644 --- a/drivers/net/ethernet/intel/e1000e/phy.c +++ b/drivers/net/ethernet/intel/e1000e/phy.c @@ -84,8 +84,9 @@ static const u16 e1000_igp_2_cable_length_table[] = { #define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200 /* I82577 PHY Control 2 */ -#define I82577_PHY_CTRL2_AUTO_MDIX 0x0400 -#define I82577_PHY_CTRL2_FORCE_MDI_MDIX 0x0200 +#define I82577_PHY_CTRL2_MANUAL_MDIX 0x0200 +#define I82577_PHY_CTRL2_AUTO_MDI_MDIX 0x0400 +#define I82577_PHY_CTRL2_MDIX_CFG_MASK 0x0600 /* I82577 PHY Diagnostics Status */ #define I82577_DSTATUS_CABLE_LENGTH 0x03FC @@ -702,6 +703,32 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw) if (ret_val) return ret_val; + /* Set MDI/MDIX mode */ + ret_val = e1e_rphy(hw, I82577_PHY_CTRL_2, &phy_data); + if (ret_val) + return ret_val; + phy_data &= ~I82577_PHY_CTRL2_MDIX_CFG_MASK; + /* + * Options: + * 0 - Auto (default) + * 1 - MDI mode + * 2 - MDI-X mode + */ + switch (hw->phy.mdix) { + case 1: + break; + case 2: + phy_data |= I82577_PHY_CTRL2_MANUAL_MDIX; + break; + case 0: + default: + phy_data |= I82577_PHY_CTRL2_AUTO_MDI_MDIX; + break; + } + ret_val = e1e_wphy(hw, I82577_PHY_CTRL_2, phy_data); + if (ret_val) + return ret_val; + return e1000_set_master_slave_mode(hw); } diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c index 7be98b6f1052..3404bc79f4ca 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.c +++ b/drivers/net/ethernet/intel/igb/e1000_phy.c @@ -464,6 +464,32 @@ s32 igb_copper_link_setup_82580(struct e1000_hw *hw) phy_data |= I82580_CFG_ENABLE_DOWNSHIFT; ret_val = phy->ops.write_reg(hw, I82580_CFG_REG, phy_data); + if (ret_val) + goto out; + + /* Set MDI/MDIX mode */ + ret_val = phy->ops.read_reg(hw, I82580_PHY_CTRL_2, &phy_data); + if (ret_val) + goto out; + phy_data &= ~I82580_PHY_CTRL2_MDIX_CFG_MASK; + /* + * Options: + * 0 - Auto (default) + * 1 - MDI mode + * 2 - MDI-X mode + */ + switch (hw->phy.mdix) { + case 1: + break; + case 2: + phy_data |= I82580_PHY_CTRL2_MANUAL_MDIX; + break; + case 0: + default: + phy_data |= I82580_PHY_CTRL2_AUTO_MDI_MDIX; + break; + } + ret_val = hw->phy.ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data); out: return ret_val; @@ -2246,8 +2272,7 @@ s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw) if (ret_val) goto out; - phy_data &= ~I82580_PHY_CTRL2_AUTO_MDIX; - phy_data &= ~I82580_PHY_CTRL2_FORCE_MDI_MDIX; + phy_data &= ~I82580_PHY_CTRL2_MDIX_CFG_MASK; ret_val = phy->ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data); if (ret_val) diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h index 34e40619f16b..6ac3299bfcb9 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.h +++ b/drivers/net/ethernet/intel/igb/e1000_phy.h @@ -111,8 +111,9 @@ s32 igb_check_polarity_m88(struct e1000_hw *hw); #define I82580_PHY_STATUS2_SPEED_100MBPS 0x0100 /* I82580 PHY Control 2 */ -#define I82580_PHY_CTRL2_AUTO_MDIX 0x0400 -#define I82580_PHY_CTRL2_FORCE_MDI_MDIX 0x0200 +#define I82580_PHY_CTRL2_MANUAL_MDIX 0x0200 +#define I82580_PHY_CTRL2_AUTO_MDI_MDIX 0x0400 +#define I82580_PHY_CTRL2_MDIX_CFG_MASK 0x0600 /* I82580 PHY Diagnostics Status */ #define I82580_DSTATUS_CABLE_LENGTH 0x03FC diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 9e572dd29ab2..0c9f62caa8fa 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -131,9 +131,9 @@ struct vf_data_storage { #define MAXIMUM_ETHERNET_VLAN_SIZE 1522 /* Supported Rx Buffer Sizes */ -#define IGB_RXBUFFER_512 512 +#define IGB_RXBUFFER_256 256 #define IGB_RXBUFFER_16384 16384 -#define IGB_RX_HDR_LEN IGB_RXBUFFER_512 +#define IGB_RX_HDR_LEN IGB_RXBUFFER_256 /* How many Tx Descriptors do we need to call netif_wake_queue ? */ #define IGB_TX_QUEUE_WAKE 16 diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 70591117051b..a2944412a55e 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -148,9 +148,9 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full| SUPPORTED_Autoneg | - SUPPORTED_TP); - ecmd->advertising = (ADVERTISED_TP | - ADVERTISED_Pause); + SUPPORTED_TP | + SUPPORTED_Pause); + ecmd->advertising = ADVERTISED_TP; if (hw->mac.autoneg == 1) { ecmd->advertising |= ADVERTISED_Autoneg; @@ -158,6 +158,21 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) ecmd->advertising |= hw->phy.autoneg_advertised; } + if (hw->mac.autoneg != 1) + ecmd->advertising &= ~(ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + + if (hw->fc.requested_mode == e1000_fc_full) + ecmd->advertising |= ADVERTISED_Pause; + else if (hw->fc.requested_mode == e1000_fc_rx_pause) + ecmd->advertising |= (ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + else if (hw->fc.requested_mode == e1000_fc_tx_pause) + ecmd->advertising |= ADVERTISED_Asym_Pause; + else + ecmd->advertising &= ~(ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + ecmd->port = PORT_TP; ecmd->phy_address = hw->phy.addr; } else { @@ -198,6 +213,19 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) } ecmd->autoneg = hw->mac.autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; + + /* MDI-X => 2; MDI =>1; Invalid =>0 */ + if (hw->phy.media_type == e1000_media_type_copper) + ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : + ETH_TP_MDI; + else + ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID; + + if (hw->phy.mdix == AUTO_ALL_MODES) + ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; + else + ecmd->eth_tp_mdix_ctrl = hw->phy.mdix; + return 0; } @@ -214,6 +242,22 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) return -EINVAL; } + /* + * MDI setting is only allowed when autoneg enabled because + * some hardware doesn't allow MDI setting when speed or + * duplex is forced. + */ + if (ecmd->eth_tp_mdix_ctrl) { + if (hw->phy.media_type != e1000_media_type_copper) + return -EOPNOTSUPP; + + if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) && + (ecmd->autoneg != AUTONEG_ENABLE)) { + dev_err(&adapter->pdev->dev, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n"); + return -EINVAL; + } + } + while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) msleep(1); @@ -227,12 +271,25 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) hw->fc.requested_mode = e1000_fc_default; } else { u32 speed = ethtool_cmd_speed(ecmd); + /* calling this overrides forced MDI setting */ if (igb_set_spd_dplx(adapter, speed, ecmd->duplex)) { clear_bit(__IGB_RESETTING, &adapter->state); return -EINVAL; } } + /* MDI-X => 2; MDI => 1; Auto => 3 */ + if (ecmd->eth_tp_mdix_ctrl) { + /* + * fix up the value for auto (3 => 0) as zero is mapped + * internally to auto + */ + if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) + hw->phy.mdix = AUTO_ALL_MODES; + else + hw->phy.mdix = ecmd->eth_tp_mdix_ctrl; + } + /* reset the link */ if (netif_running(adapter->netdev)) { igb_down(adapter); @@ -1469,33 +1526,22 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 ctrl_reg = 0; - u16 phy_reg = 0; hw->mac.autoneg = false; - switch (hw->phy.type) { - case e1000_phy_m88: - /* Auto-MDI/MDIX Off */ - igb_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); - /* reset to update Auto-MDI/MDIX */ - igb_write_phy_reg(hw, PHY_CONTROL, 0x9140); - /* autoneg off */ - igb_write_phy_reg(hw, PHY_CONTROL, 0x8140); - break; - case e1000_phy_82580: - /* enable MII loopback */ - igb_write_phy_reg(hw, I82580_PHY_LBK_CTRL, 0x8041); - break; - case e1000_phy_i210: - /* set loopback speed in PHY */ - igb_read_phy_reg(hw, (GS40G_PAGE_SELECT & GS40G_PAGE_2), - &phy_reg); - phy_reg |= GS40G_MAC_SPEED_1G; - igb_write_phy_reg(hw, (GS40G_PAGE_SELECT & GS40G_PAGE_2), - phy_reg); - ctrl_reg = rd32(E1000_CTRL_EXT); - default: - break; + if (hw->phy.type == e1000_phy_m88) { + if (hw->phy.id != I210_I_PHY_ID) { + /* Auto-MDI/MDIX Off */ + igb_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); + /* reset to update Auto-MDI/MDIX */ + igb_write_phy_reg(hw, PHY_CONTROL, 0x9140); + /* autoneg off */ + igb_write_phy_reg(hw, PHY_CONTROL, 0x8140); + } else { + /* force 1000, set loopback */ + igb_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0); + igb_write_phy_reg(hw, PHY_CONTROL, 0x4140); + } } /* add small delay to avoid loopback test failure */ @@ -1513,7 +1559,7 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter) E1000_CTRL_FD | /* Force Duplex to FULL */ E1000_CTRL_SLU); /* Set link up enable bit */ - if ((hw->phy.type == e1000_phy_m88) || (hw->phy.type == e1000_phy_i210)) + if (hw->phy.type == e1000_phy_m88) ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ wr32(E1000_CTRL, ctrl_reg); @@ -1521,11 +1567,10 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter) /* Disable the receiver on the PHY so when a cable is plugged in, the * PHY does not begin to autoneg when a cable is reconnected to the NIC. */ - if ((hw->phy.type == e1000_phy_m88) || (hw->phy.type == e1000_phy_i210)) + if (hw->phy.type == e1000_phy_m88) igb_phy_disable_receiver(adapter); - udelay(500); - + mdelay(500); return 0; } @@ -1785,13 +1830,6 @@ static int igb_loopback_test(struct igb_adapter *adapter, u64 *data) *data = 0; goto out; } - if ((adapter->hw.mac.type == e1000_i210) - || (adapter->hw.mac.type == e1000_i211)) { - dev_err(&adapter->pdev->dev, - "Loopback test not supported on this part at this time.\n"); - *data = 0; - goto out; - } *data = igb_setup_desc_rings(adapter); if (*data) goto out; diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 48cc4fb1a307..73cc273ef98b 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -6675,6 +6675,10 @@ int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx) default: goto err_inval; } + + /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ + adapter->hw.phy.mdix = AUTO_ALL_MODES; + return 0; err_inval: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index b9623e9ea895..bffcf1f2357a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -78,6 +78,9 @@ /* Supported Rx Buffer Sizes */ #define IXGBE_RXBUFFER_256 256 /* Used for skb receive header */ +#define IXGBE_RXBUFFER_2K 2048 +#define IXGBE_RXBUFFER_3K 3072 +#define IXGBE_RXBUFFER_4K 4096 #define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */ /* @@ -104,6 +107,7 @@ #define IXGBE_TX_FLAGS_FSO (u32)(1 << 6) #define IXGBE_TX_FLAGS_TXSW (u32)(1 << 7) #define IXGBE_TX_FLAGS_TSTAMP (u32)(1 << 8) +#define IXGBE_TX_FLAGS_NO_IFCS (u32)(1 << 9) #define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 #define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29 @@ -293,16 +297,25 @@ struct ixgbe_ring_feature { * this is twice the size of a half page we need to double the page order * for FCoE enabled Rx queues. */ -#if defined(IXGBE_FCOE) && (PAGE_SIZE < 8192) -static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring) +static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring *ring) { - return test_bit(__IXGBE_RX_FCOE, &ring->state) ? 1 : 0; +#ifdef IXGBE_FCOE + if (test_bit(__IXGBE_RX_FCOE, &ring->state)) + return (PAGE_SIZE < 8192) ? IXGBE_RXBUFFER_4K : + IXGBE_RXBUFFER_3K; +#endif + return IXGBE_RXBUFFER_2K; } -#else -#define ixgbe_rx_pg_order(_ring) 0 + +static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring) +{ +#ifdef IXGBE_FCOE + if (test_bit(__IXGBE_RX_FCOE, &ring->state)) + return (PAGE_SIZE < 8192) ? 1 : 0; #endif + return 0; +} #define ixgbe_rx_pg_size(_ring) (PAGE_SIZE << ixgbe_rx_pg_order(_ring)) -#define ixgbe_rx_bufsz(_ring) ((PAGE_SIZE / 2) << ixgbe_rx_pg_order(_ring)) struct ixgbe_ring_container { struct ixgbe_ring *ring; /* pointer to linked list of rings */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 4326f74f7137..1cbb34f507c9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1167,7 +1167,7 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, } bi->dma = dma; - bi->page_offset ^= ixgbe_rx_bufsz(rx_ring); + bi->page_offset = 0; return true; } @@ -1320,29 +1320,6 @@ static unsigned int ixgbe_get_headlen(unsigned char *data, return max_len; } -static void ixgbe_get_rsc_cnt(struct ixgbe_ring *rx_ring, - union ixgbe_adv_rx_desc *rx_desc, - struct sk_buff *skb) -{ - __le32 rsc_enabled; - u32 rsc_cnt; - - if (!ring_is_rsc_enabled(rx_ring)) - return; - - rsc_enabled = rx_desc->wb.lower.lo_dword.data & - cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK); - - /* If this is an RSC frame rsc_cnt should be non-zero */ - if (!rsc_enabled) - return; - - rsc_cnt = le32_to_cpu(rsc_enabled); - rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT; - - IXGBE_CB(skb)->append_cnt += rsc_cnt - 1; -} - static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring, struct sk_buff *skb) { @@ -1440,16 +1417,28 @@ static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring, prefetch(IXGBE_RX_DESC(rx_ring, ntc)); - if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))) - return false; + /* update RSC append count if present */ + if (ring_is_rsc_enabled(rx_ring)) { + __le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data & + cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK); + + if (unlikely(rsc_enabled)) { + u32 rsc_cnt = le32_to_cpu(rsc_enabled); - /* append_cnt indicates packet is RSC, if so fetch nextp */ - if (IXGBE_CB(skb)->append_cnt) { - ntc = le32_to_cpu(rx_desc->wb.upper.status_error); - ntc &= IXGBE_RXDADV_NEXTP_MASK; - ntc >>= IXGBE_RXDADV_NEXTP_SHIFT; + rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT; + IXGBE_CB(skb)->append_cnt += rsc_cnt - 1; + + /* update ntc based on RSC value */ + ntc = le32_to_cpu(rx_desc->wb.upper.status_error); + ntc &= IXGBE_RXDADV_NEXTP_MASK; + ntc >>= IXGBE_RXDADV_NEXTP_SHIFT; + } } + /* if we are the last buffer then there is nothing else to do */ + if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))) + return false; + /* place skb in next buffer to be received */ rx_ring->rx_buffer_info[ntc].skb = skb; rx_ring->rx_stats.non_eop_descs++; @@ -1458,6 +1447,78 @@ static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring, } /** + * ixgbe_pull_tail - ixgbe specific version of skb_pull_tail + * @rx_ring: rx descriptor ring packet is being transacted on + * @skb: pointer to current skb being adjusted + * + * This function is an ixgbe specific version of __pskb_pull_tail. The + * main difference between this version and the original function is that + * this function can make several assumptions about the state of things + * that allow for significant optimizations versus the standard function. + * As a result we can do things like drop a frag and maintain an accurate + * truesize for the skb. + */ +static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring, + struct sk_buff *skb) +{ + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + unsigned char *va; + unsigned int pull_len; + + /* + * it is valid to use page_address instead of kmap since we are + * working with pages allocated out of the lomem pool per + * alloc_page(GFP_ATOMIC) + */ + va = skb_frag_address(frag); + + /* + * we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ + pull_len = ixgbe_get_headlen(va, IXGBE_RX_HDR_SIZE); + + /* align pull length to size of long to optimize memcpy performance */ + skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); + + /* update all of the pointers */ + skb_frag_size_sub(frag, pull_len); + frag->page_offset += pull_len; + skb->data_len -= pull_len; + skb->tail += pull_len; +} + +/** + * ixgbe_dma_sync_frag - perform DMA sync for first frag of SKB + * @rx_ring: rx descriptor ring packet is being transacted on + * @skb: pointer to current skb being updated + * + * This function provides a basic DMA sync up for the first fragment of an + * skb. The reason for doing this is that the first fragment cannot be + * unmapped until we have reached the end of packet descriptor for a buffer + * chain. + */ +static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, + struct sk_buff *skb) +{ + /* if the page was released unmap it, else just sync our portion */ + if (unlikely(IXGBE_CB(skb)->page_released)) { + dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma, + ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); + IXGBE_CB(skb)->page_released = false; + } else { + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + + dma_sync_single_range_for_cpu(rx_ring->dev, + IXGBE_CB(skb)->dma, + frag->page_offset, + ixgbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + } + IXGBE_CB(skb)->dma = 0; +} + +/** * ixgbe_cleanup_headers - Correct corrupted or empty headers * @rx_ring: rx descriptor ring packet is being transacted on * @rx_desc: pointer to the EOP Rx descriptor @@ -1479,24 +1540,7 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) { - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; struct net_device *netdev = rx_ring->netdev; - unsigned char *va; - unsigned int pull_len; - - /* if the page was released unmap it, else just sync our portion */ - if (unlikely(IXGBE_CB(skb)->page_released)) { - dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma, - ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); - IXGBE_CB(skb)->page_released = false; - } else { - dma_sync_single_range_for_cpu(rx_ring->dev, - IXGBE_CB(skb)->dma, - frag->page_offset, - ixgbe_rx_bufsz(rx_ring), - DMA_FROM_DEVICE); - } - IXGBE_CB(skb)->dma = 0; /* verify that the packet does not have any known errors */ if (unlikely(ixgbe_test_staterr(rx_desc, @@ -1506,40 +1550,9 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, return true; } - /* - * it is valid to use page_address instead of kmap since we are - * working with pages allocated out of the lomem pool per - * alloc_page(GFP_ATOMIC) - */ - va = skb_frag_address(frag); - - /* - * we need the header to contain the greater of either ETH_HLEN or - * 60 bytes if the skb->len is less than 60 for skb_pad. - */ - pull_len = skb_frag_size(frag); - if (pull_len > IXGBE_RX_HDR_SIZE) - pull_len = ixgbe_get_headlen(va, IXGBE_RX_HDR_SIZE); - - /* align pull length to size of long to optimize memcpy performance */ - skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); - - /* update all of the pointers */ - skb_frag_size_sub(frag, pull_len); - frag->page_offset += pull_len; - skb->data_len -= pull_len; - skb->tail += pull_len; - - /* - * if we sucked the frag empty then we should free it, - * if there are other frags here something is screwed up in hardware - */ - if (skb_frag_size(frag) == 0) { - BUG_ON(skb_shinfo(skb)->nr_frags != 1); - skb_shinfo(skb)->nr_frags = 0; - __skb_frag_unref(frag); - skb->truesize -= ixgbe_rx_bufsz(rx_ring); - } + /* place header in linear portion of buffer */ + if (skb_is_nonlinear(skb)) + ixgbe_pull_tail(rx_ring, skb); #ifdef IXGBE_FCOE /* do not attempt to pad FCoE Frames as this will disrupt DDP */ @@ -1560,33 +1573,17 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, } /** - * ixgbe_can_reuse_page - determine if we can reuse a page - * @rx_buffer: pointer to rx_buffer containing the page we want to reuse - * - * Returns true if page can be reused in another Rx buffer - **/ -static inline bool ixgbe_can_reuse_page(struct ixgbe_rx_buffer *rx_buffer) -{ - struct page *page = rx_buffer->page; - - /* if we are only owner of page and it is local we can reuse it */ - return likely(page_count(page) == 1) && - likely(page_to_nid(page) == numa_node_id()); -} - -/** * ixgbe_reuse_rx_page - page flip buffer and store it back on the ring * @rx_ring: rx descriptor ring to store buffers on * @old_buff: donor buffer to have page reused * - * Syncronizes page for reuse by the adapter + * Synchronizes page for reuse by the adapter **/ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring, struct ixgbe_rx_buffer *old_buff) { struct ixgbe_rx_buffer *new_buff; u16 nta = rx_ring->next_to_alloc; - u16 bufsz = ixgbe_rx_bufsz(rx_ring); new_buff = &rx_ring->rx_buffer_info[nta]; @@ -1597,17 +1594,13 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring, /* transfer page from old buffer to new buffer */ new_buff->page = old_buff->page; new_buff->dma = old_buff->dma; - - /* flip page offset to other buffer and store to new_buff */ - new_buff->page_offset = old_buff->page_offset ^ bufsz; + new_buff->page_offset = old_buff->page_offset; /* sync the buffer for use by the device */ dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma, - new_buff->page_offset, bufsz, + new_buff->page_offset, + ixgbe_rx_bufsz(rx_ring), DMA_FROM_DEVICE); - - /* bump ref count on page before it is given to the stack */ - get_page(new_buff->page); } /** @@ -1617,20 +1610,159 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring, * @rx_desc: descriptor containing length of buffer written by hardware * @skb: sk_buff to place the data into * - * This function is based on skb_add_rx_frag. I would have used that - * function however it doesn't handle the truesize case correctly since we - * are allocating more memory than might be used for a single receive. + * This function will add the data contained in rx_buffer->page to the skb. + * This is done either through a direct copy if the data in the buffer is + * less than the skb header size, otherwise it will just attach the page as + * a frag to the skb. + * + * The function will then update the page offset if necessary and return + * true if the buffer can be reused by the adapter. **/ -static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, +static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, struct ixgbe_rx_buffer *rx_buffer, - struct sk_buff *skb, int size) + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct page *page = rx_buffer->page; + unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); +#if (PAGE_SIZE < 8192) + unsigned int truesize = ixgbe_rx_bufsz(rx_ring); +#else + unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); + unsigned int last_offset = ixgbe_rx_pg_size(rx_ring) - + ixgbe_rx_bufsz(rx_ring); +#endif + + if ((size <= IXGBE_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) { + unsigned char *va = page_address(page) + rx_buffer->page_offset; + + memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); + + /* we can reuse buffer as-is, just make sure it is local */ + if (likely(page_to_nid(page) == numa_node_id())) + return true; + + /* this page cannot be reused so discard it */ + put_page(page); + return false; + } + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + rx_buffer->page_offset, size, truesize); + + /* avoid re-using remote pages */ + if (unlikely(page_to_nid(page) != numa_node_id())) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely(page_count(page) != 1)) + return false; + + /* flip page offset to other buffer */ + rx_buffer->page_offset ^= truesize; + + /* + * since we are the only owner of the page and we need to + * increment it, just set the value to 2 in order to avoid + * an unecessary locked operation + */ + atomic_set(&page->_count, 2); +#else + /* move offset up to the next cache line */ + rx_buffer->page_offset += truesize; + + if (rx_buffer->page_offset > last_offset) + return false; + + /* bump ref count on page before it is given to the stack */ + get_page(page); +#endif + + return true; +} + +static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc) { - skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, - rx_buffer->page, rx_buffer->page_offset, - size); - skb->len += size; - skb->data_len += size; - skb->truesize += ixgbe_rx_bufsz(rx_ring); + struct ixgbe_rx_buffer *rx_buffer; + struct sk_buff *skb; + struct page *page; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + page = rx_buffer->page; + prefetchw(page); + + skb = rx_buffer->skb; + + if (likely(!skb)) { + void *page_addr = page_address(page) + + rx_buffer->page_offset; + + /* prefetch first cache line of first page */ + prefetch(page_addr); +#if L1_CACHE_BYTES < 128 + prefetch(page_addr + L1_CACHE_BYTES); +#endif + + /* allocate a skb to store the frags */ + skb = netdev_alloc_skb_ip_align(rx_ring->netdev, + IXGBE_RX_HDR_SIZE); + if (unlikely(!skb)) { + rx_ring->rx_stats.alloc_rx_buff_failed++; + return NULL; + } + + /* + * we will be copying header into skb->data in + * pskb_may_pull so it is in our interest to prefetch + * it now to avoid a possible cache miss + */ + prefetchw(skb->data); + + /* + * Delay unmapping of the first packet. It carries the + * header information, HW may still access the header + * after the writeback. Only unmap it when EOP is + * reached + */ + if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))) + goto dma_sync; + + IXGBE_CB(skb)->dma = rx_buffer->dma; + } else { + if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) + ixgbe_dma_sync_frag(rx_ring, skb); + +dma_sync: + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + ixgbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + } + + /* pull page into skb */ + if (ixgbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { + /* hand second half of page back to the ring */ + ixgbe_reuse_rx_page(rx_ring, rx_buffer); + } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) { + /* the page has been released from the ring */ + IXGBE_CB(skb)->page_released = true; + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page(rx_ring->dev, rx_buffer->dma, + ixgbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE); + } + + /* clear contents of buffer_info */ + rx_buffer->skb = NULL; + rx_buffer->dma = 0; + rx_buffer->page = NULL; + + return skb; } /** @@ -1658,11 +1790,8 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, u16 cleaned_count = ixgbe_desc_unused(rx_ring); do { - struct ixgbe_rx_buffer *rx_buffer; union ixgbe_adv_rx_desc *rx_desc; struct sk_buff *skb; - struct page *page; - u16 ntc; /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { @@ -1670,9 +1799,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, cleaned_count = 0; } - ntc = rx_ring->next_to_clean; - rx_desc = IXGBE_RX_DESC(rx_ring, ntc); - rx_buffer = &rx_ring->rx_buffer_info[ntc]; + rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean); if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD)) break; @@ -1684,75 +1811,12 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, */ rmb(); - page = rx_buffer->page; - prefetchw(page); - - skb = rx_buffer->skb; - - if (likely(!skb)) { - void *page_addr = page_address(page) + - rx_buffer->page_offset; - - /* prefetch first cache line of first page */ - prefetch(page_addr); -#if L1_CACHE_BYTES < 128 - prefetch(page_addr + L1_CACHE_BYTES); -#endif - - /* allocate a skb to store the frags */ - skb = netdev_alloc_skb_ip_align(rx_ring->netdev, - IXGBE_RX_HDR_SIZE); - if (unlikely(!skb)) { - rx_ring->rx_stats.alloc_rx_buff_failed++; - break; - } - - /* - * we will be copying header into skb->data in - * pskb_may_pull so it is in our interest to prefetch - * it now to avoid a possible cache miss - */ - prefetchw(skb->data); - - /* - * Delay unmapping of the first packet. It carries the - * header information, HW may still access the header - * after the writeback. Only unmap it when EOP is - * reached - */ - IXGBE_CB(skb)->dma = rx_buffer->dma; - } else { - /* we are reusing so sync this buffer for CPU use */ - dma_sync_single_range_for_cpu(rx_ring->dev, - rx_buffer->dma, - rx_buffer->page_offset, - ixgbe_rx_bufsz(rx_ring), - DMA_FROM_DEVICE); - } - - /* pull page into skb */ - ixgbe_add_rx_frag(rx_ring, rx_buffer, skb, - le16_to_cpu(rx_desc->wb.upper.length)); - - if (ixgbe_can_reuse_page(rx_buffer)) { - /* hand second half of page back to the ring */ - ixgbe_reuse_rx_page(rx_ring, rx_buffer); - } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) { - /* the page has been released from the ring */ - IXGBE_CB(skb)->page_released = true; - } else { - /* we are not reusing the buffer so unmap it */ - dma_unmap_page(rx_ring->dev, rx_buffer->dma, - ixgbe_rx_pg_size(rx_ring), - DMA_FROM_DEVICE); - } - - /* clear contents of buffer_info */ - rx_buffer->skb = NULL; - rx_buffer->dma = 0; - rx_buffer->page = NULL; + /* retrieve a buffer from the ring */ + skb = ixgbe_fetch_rx_buffer(rx_ring, rx_desc); - ixgbe_get_rsc_cnt(rx_ring, rx_desc, skb); + /* exit if we failed to retrieve a buffer */ + if (!skb) + break; cleaned_count++; @@ -2868,11 +2932,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT; /* configure the packet buffer length */ -#if PAGE_SIZE > IXGBE_MAX_RXBUFFER - srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; -#else srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; -#endif /* configure descriptor type */ srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; @@ -2980,13 +3040,7 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, * total size of max desc * buf_len is not greater * than 65536 */ -#if (PAGE_SIZE <= 8192) rscctrl |= IXGBE_RSCCTL_MAXDESC_16; -#elif (PAGE_SIZE <= 16384) - rscctrl |= IXGBE_RSCCTL_MAXDESC_8; -#else - rscctrl |= IXGBE_RSCCTL_MAXDESC_4; -#endif IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl); } @@ -4130,27 +4184,6 @@ void ixgbe_reset(struct ixgbe_adapter *adapter) } /** - * ixgbe_init_rx_page_offset - initialize page offset values for Rx buffers - * @rx_ring: ring to setup - * - * On many IA platforms the L1 cache has a critical stride of 4K, this - * results in each receive buffer starting in the same cache set. To help - * reduce the pressure on this cache set we can interleave the offsets so - * that only every other buffer will be in the same cache set. - **/ -static void ixgbe_init_rx_page_offset(struct ixgbe_ring *rx_ring) -{ - struct ixgbe_rx_buffer *rx_buffer = rx_ring->rx_buffer_info; - u16 i; - - for (i = 0; i < rx_ring->count; i += 2) { - rx_buffer[0].page_offset = 0; - rx_buffer[1].page_offset = ixgbe_rx_bufsz(rx_ring); - rx_buffer = &rx_buffer[2]; - } -} - -/** * ixgbe_clean_rx_ring - Free Rx Buffers per Queue * @rx_ring: ring to free buffers from **/ @@ -4195,8 +4228,6 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; memset(rx_ring->rx_buffer_info, 0, size); - ixgbe_init_rx_page_offset(rx_ring); - /* Zero out the descriptor ring */ memset(rx_ring->desc, 0, rx_ring->size); @@ -4646,8 +4677,6 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring) rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; - ixgbe_init_rx_page_offset(rx_ring); - return 0; err: vfree(rx_ring->rx_buffer_info); @@ -5874,9 +5903,12 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring, u32 type_tucmd = 0; if (skb->ip_summed != CHECKSUM_PARTIAL) { - if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) && - !(first->tx_flags & IXGBE_TX_FLAGS_TXSW)) - return; + if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN)) { + if (unlikely(skb->no_fcs)) + first->tx_flags |= IXGBE_TX_FLAGS_NO_IFCS; + if (!(first->tx_flags & IXGBE_TX_FLAGS_TXSW)) + return; + } } else { u8 l4_hdr = 0; switch (first->protocol) { @@ -5938,7 +5970,6 @@ static __le32 ixgbe_tx_cmd_type(u32 tx_flags) { /* set type for advanced descriptor with frame checksum insertion */ __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA | - IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT); /* set HW vlan bit if vlan is present */ @@ -5958,6 +5989,10 @@ static __le32 ixgbe_tx_cmd_type(u32 tx_flags) #endif cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE); + /* insert frame checksum */ + if (!(tx_flags & IXGBE_TX_FLAGS_NO_IFCS)) + cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_IFCS); + return cmd_type; } @@ -6063,8 +6098,6 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, if (likely(!data_len)) break; - if (unlikely(skb->no_fcs)) - cmd_type &= ~(cpu_to_le32(IXGBE_ADVTXD_DCMD_IFCS)); tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size); i++; @@ -7136,11 +7169,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, goto err_ioremap; } - for (i = 1; i <= 5; i++) { - if (pci_resource_len(pdev, i) == 0) - continue; - } - netdev->netdev_ops = &ixgbe_netdev_ops; ixgbe_set_ethtool_ops(netdev); netdev->watchdog_timeo = 5 * HZ; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index 98cadb0c4dab..eb26fda63c99 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -101,7 +101,9 @@ struct ixgbevf_ring { /* Supported Rx Buffer Sizes */ #define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */ -#define IXGBEVF_RXBUFFER_2048 2048 +#define IXGBEVF_RXBUFFER_3K 3072 +#define IXGBEVF_RXBUFFER_7K 7168 +#define IXGBEVF_RXBUFFER_15K 15360 #define IXGBEVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */ #define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256 diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 60ef64587412..a5d9cc5bb257 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -1057,15 +1057,46 @@ static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index) srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; - if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE) - srrctl |= IXGBEVF_RXBUFFER_2048 >> - IXGBE_SRRCTL_BSIZEPKT_SHIFT; - else - srrctl |= rx_ring->rx_buf_len >> - IXGBE_SRRCTL_BSIZEPKT_SHIFT; + srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >> + IXGBE_SRRCTL_BSIZEPKT_SHIFT; + IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl); } +static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + int i; + u16 rx_buf_len; + + /* notify the PF of our intent to use this size of frame */ + ixgbevf_rlpml_set_vf(hw, max_frame); + + /* PF will allow an extra 4 bytes past for vlan tagged frames */ + max_frame += VLAN_HLEN; + + /* + * Make best use of allocation by using all but 1K of a + * power of 2 allocation that will be used for skb->head. + */ + if ((hw->mac.type == ixgbe_mac_X540_vf) && + (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)) + rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; + else if (max_frame <= IXGBEVF_RXBUFFER_3K) + rx_buf_len = IXGBEVF_RXBUFFER_3K; + else if (max_frame <= IXGBEVF_RXBUFFER_7K) + rx_buf_len = IXGBEVF_RXBUFFER_7K; + else if (max_frame <= IXGBEVF_RXBUFFER_15K) + rx_buf_len = IXGBEVF_RXBUFFER_15K; + else + rx_buf_len = IXGBEVF_MAX_RXBUFFER; + + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i].rx_buf_len = rx_buf_len; +} + /** * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset * @adapter: board private structure @@ -1076,18 +1107,14 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) { u64 rdba; struct ixgbe_hw *hw = &adapter->hw; - struct net_device *netdev = adapter->netdev; - int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; int i, j; u32 rdlen; - int rx_buf_len; /* PSRTYPE must be initialized in 82599 */ IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0); - if (netdev->mtu <= ETH_DATA_LEN) - rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; - else - rx_buf_len = ALIGN(max_frame, 1024); + + /* set_rx_buffer_len must be called before ring initialization */ + ixgbevf_set_rx_buffer_len(adapter); rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc); /* Setup the HW Rx Head and Tail Descriptor Pointers and @@ -1103,7 +1130,6 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0); adapter->rx_ring[i].head = IXGBE_VFRDH(j); adapter->rx_ring[i].tail = IXGBE_VFRDT(j); - adapter->rx_ring[i].rx_buf_len = rx_buf_len; ixgbevf_configure_srrctl(adapter, j); } @@ -1315,7 +1341,6 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) int i, j = 0; int num_rx_rings = adapter->num_rx_queues; u32 txdctl, rxdctl; - u32 msg[2]; for (i = 0; i < adapter->num_tx_queues; i++) { j = adapter->tx_ring[i].reg_idx; @@ -1356,10 +1381,6 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0); } - msg[0] = IXGBE_VF_SET_LPE; - msg[1] = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; - hw->mbx.ops.write_posted(hw, msg, 2); - spin_unlock(&adapter->mbx_lock); clear_bit(__IXGBEVF_DOWN, &adapter->state); @@ -1867,6 +1888,22 @@ err_set_interrupt: } /** + * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings + * @adapter: board private structure to clear interrupt scheme on + * + * We go through and clear interrupt specific resources and reset the structure + * to pre-load conditions + **/ +static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter) +{ + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + + ixgbevf_free_q_vectors(adapter); + ixgbevf_reset_interrupt_capability(adapter); +} + +/** * ixgbevf_sw_init - Initialize general software structures * (struct ixgbevf_adapter) * @adapter: board private structure to initialize @@ -2860,10 +2897,8 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p) static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE; - u32 msg[2]; if (adapter->hw.mac.type == ixgbe_mac_X540_vf) max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE; @@ -2877,35 +2912,91 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) /* must set new MTU before calling down or up */ netdev->mtu = new_mtu; - if (!netif_running(netdev)) { - msg[0] = IXGBE_VF_SET_LPE; - msg[1] = max_frame; - hw->mbx.ops.write_posted(hw, msg, 2); - } - if (netif_running(netdev)) ixgbevf_reinit_locked(adapter); return 0; } -static void ixgbevf_shutdown(struct pci_dev *pdev) +static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state) { struct net_device *netdev = pci_get_drvdata(pdev); struct ixgbevf_adapter *adapter = netdev_priv(netdev); +#ifdef CONFIG_PM + int retval = 0; +#endif netif_device_detach(netdev); if (netif_running(netdev)) { + rtnl_lock(); ixgbevf_down(adapter); ixgbevf_free_irq(adapter); ixgbevf_free_all_tx_resources(adapter); ixgbevf_free_all_rx_resources(adapter); + rtnl_unlock(); } - pci_save_state(pdev); + ixgbevf_clear_interrupt_scheme(adapter); + +#ifdef CONFIG_PM + retval = pci_save_state(pdev); + if (retval) + return retval; +#endif pci_disable_device(pdev); + + return 0; +} + +#ifdef CONFIG_PM +static int ixgbevf_resume(struct pci_dev *pdev) +{ + struct ixgbevf_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + u32 err; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + /* + * pci_restore_state clears dev->state_saved so call + * pci_save_state to restore it. + */ + pci_save_state(pdev); + + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); + return err; + } + pci_set_master(pdev); + + rtnl_lock(); + err = ixgbevf_init_interrupt_scheme(adapter); + rtnl_unlock(); + if (err) { + dev_err(&pdev->dev, "Cannot initialize interrupts\n"); + return err; + } + + ixgbevf_reset(adapter); + + if (netif_running(netdev)) { + err = ixgbevf_open(netdev); + if (err) + return err; + } + + netif_device_attach(netdev); + + return err; +} + +#endif /* CONFIG_PM */ +static void ixgbevf_shutdown(struct pci_dev *pdev) +{ + ixgbevf_suspend(pdev, PMSG_SUSPEND); } static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev, @@ -2946,7 +3037,7 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev, return stats; } -static const struct net_device_ops ixgbe_netdev_ops = { +static const struct net_device_ops ixgbevf_netdev_ops = { .ndo_open = ixgbevf_open, .ndo_stop = ixgbevf_close, .ndo_start_xmit = ixgbevf_xmit_frame, @@ -2962,7 +3053,7 @@ static const struct net_device_ops ixgbe_netdev_ops = { static void ixgbevf_assign_netdev_ops(struct net_device *dev) { - dev->netdev_ops = &ixgbe_netdev_ops; + dev->netdev_ops = &ixgbevf_netdev_ops; ixgbevf_set_ethtool_ops(dev); dev->watchdog_timeo = 5 * HZ; } @@ -3131,6 +3222,7 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev, return 0; err_register: + ixgbevf_clear_interrupt_scheme(adapter); err_sw_init: ixgbevf_reset_interrupt_capability(adapter); iounmap(hw->hw_addr); @@ -3168,6 +3260,7 @@ static void __devexit ixgbevf_remove(struct pci_dev *pdev) if (netdev->reg_state == NETREG_REGISTERED) unregister_netdev(netdev); + ixgbevf_clear_interrupt_scheme(adapter); ixgbevf_reset_interrupt_capability(adapter); iounmap(adapter->hw.hw_addr); @@ -3267,6 +3360,11 @@ static struct pci_driver ixgbevf_driver = { .id_table = ixgbevf_pci_tbl, .probe = ixgbevf_probe, .remove = __devexit_p(ixgbevf_remove), +#ifdef CONFIG_PM + /* Power Management Hooks */ + .suspend = ixgbevf_suspend, + .resume = ixgbevf_resume, +#endif .shutdown = ixgbevf_shutdown, .err_handler = &ixgbevf_err_handler }; diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index ec89b86f7ca4..3d555a10f592 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -419,6 +419,20 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw, return 0; } +/** + * ixgbevf_rlpml_set_vf - Set the maximum receive packet length + * @hw: pointer to the HW structure + * @max_size: value to assign to max frame size + **/ +void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size) +{ + u32 msgbuf[2]; + + msgbuf[0] = IXGBE_VF_SET_LPE; + msgbuf[1] = max_size; + ixgbevf_write_msg_read_ack(hw, msgbuf, 2); +} + static const struct ixgbe_mac_operations ixgbevf_mac_ops = { .init_hw = ixgbevf_init_hw_vf, .reset_hw = ixgbevf_reset_hw_vf, diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h index 25c951daee5d..07fd87688e35 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h @@ -170,5 +170,6 @@ struct ixgbevf_info { const struct ixgbe_mac_operations *mac_ops; }; +void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size); #endif /* __IXGBE_VF_H__ */ diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index f45def01a98e..876beceaf2d7 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -3409,7 +3409,7 @@ set_speed: pause_flags = 0; /* setup pause frame */ - if (np->duplex != 0) { + if (netif_running(dev) && (np->duplex != 0)) { if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) { adv_pause = adv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); lpa_pause = lpa & (LPA_PAUSE_CAP | LPA_PAUSE_ASYM); @@ -4435,7 +4435,7 @@ static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void regs->version = FORCEDETH_REGS_VER; spin_lock_irq(&np->lock); - for (i = 0; i <= np->register_size/sizeof(u32); i++) + for (i = 0; i < np->register_size/sizeof(u32); i++) rbuf[i] = readl(base + i*sizeof(u32)); spin_unlock_irq(&np->lock); } @@ -5455,6 +5455,7 @@ static int nv_close(struct net_device *dev) netif_stop_queue(dev); spin_lock_irq(&np->lock); + nv_update_pause(dev, 0); /* otherwise stop_tx bricks NIC */ nv_stop_rxtx(dev); nv_txrx_reset(dev); @@ -5904,11 +5905,19 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i goto out_error; } + netif_carrier_off(dev); + + /* Some NICs freeze when TX pause is enabled while NIC is + * down, and this stays across warm reboots. The sequence + * below should be enough to recover from that state. + */ + nv_update_pause(dev, 0); + nv_start_tx(dev); + nv_stop_tx(dev); + if (id->driver_data & DEV_HAS_VLAN) nv_vlan_mode(dev, dev->features); - netif_carrier_off(dev); - dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n", dev->name, np->phy_oui, np->phyaddr, dev->dev_addr); diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index b47d5b35024e..0c96604e6246 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -287,6 +287,8 @@ static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = { { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 }, { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 }, { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 }, + { PCI_VENDOR_ID_DLINK, 0x4300, + PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 }, { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 }, { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302), 0, 0, RTL_CFG_0 }, { PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 }, diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 65a8d49106a4..a606db43c5ba 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -202,11 +202,21 @@ static void efx_stop_all(struct efx_nic *efx); #define EFX_ASSERT_RESET_SERIALISED(efx) \ do { \ - if ((efx->state == STATE_RUNNING) || \ + if ((efx->state == STATE_READY) || \ (efx->state == STATE_DISABLED)) \ ASSERT_RTNL(); \ } while (0) +static int efx_check_disabled(struct efx_nic *efx) +{ + if (efx->state == STATE_DISABLED) { + netif_err(efx, drv, efx->net_dev, + "device is disabled due to earlier errors\n"); + return -EIO; + } + return 0; +} + /************************************************************************** * * Event queue processing @@ -630,6 +640,16 @@ static void efx_start_datapath(struct efx_nic *efx) efx->rx_buffer_order = get_order(efx->rx_buffer_len + sizeof(struct efx_rx_page_state)); + /* We must keep at least one descriptor in a TX ring empty. + * We could avoid this when the queue size does not exactly + * match the hardware ring size, but it's not that important. + * Therefore we stop the queue when one more skb might fill + * the ring completely. We wake it when half way back to + * empty. + */ + efx->txq_stop_thresh = efx->txq_entries - efx_tx_max_skb_descs(efx); + efx->txq_wake_thresh = efx->txq_stop_thresh / 2; + /* Initialise the channels */ efx_for_each_channel(channel, efx) { efx_for_each_channel_tx_queue(tx_queue, channel) @@ -730,7 +750,11 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel; u32 old_rxq_entries, old_txq_entries; unsigned i, next_buffer_table = 0; - int rc = 0; + int rc; + + rc = efx_check_disabled(efx); + if (rc) + return rc; /* Not all channels should be reallocated. We must avoid * reallocating their buffer table entries. @@ -1365,6 +1389,8 @@ static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq) { struct efx_channel *channel; + BUG_ON(efx->state == STATE_DISABLED); + if (efx->legacy_irq) efx->legacy_irq_enabled = true; efx_nic_enable_interrupts(efx); @@ -1382,6 +1408,9 @@ static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq) { struct efx_channel *channel; + if (efx->state == STATE_DISABLED) + return; + efx_mcdi_mode_poll(efx); efx_nic_disable_interrupts(efx); @@ -1533,22 +1562,21 @@ static int efx_probe_all(struct efx_nic *efx) return rc; } -/* Called after previous invocation(s) of efx_stop_all, restarts the port, - * kernel transmit queues and NAPI processing, and ensures that the port is - * scheduled to be reconfigured. This function is safe to call multiple - * times when the NIC is in any state. +/* If the interface is supposed to be running but is not, start + * the hardware and software data path, regular activity for the port + * (MAC statistics, link polling, etc.) and schedule the port to be + * reconfigured. Interrupts must already be enabled. This function + * is safe to call multiple times, so long as the NIC is not disabled. + * Requires the RTNL lock. */ static void efx_start_all(struct efx_nic *efx) { EFX_ASSERT_RESET_SERIALISED(efx); + BUG_ON(efx->state == STATE_DISABLED); /* Check that it is appropriate to restart the interface. All * of these flags are safe to read under just the rtnl lock */ - if (efx->port_enabled) - return; - if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT)) - return; - if (!netif_running(efx->net_dev)) + if (efx->port_enabled || !netif_running(efx->net_dev)) return; efx_start_port(efx); @@ -1582,11 +1610,11 @@ static void efx_flush_all(struct efx_nic *efx) cancel_work_sync(&efx->mac_work); } -/* Quiesce hardware and software without bringing the link down. - * Safe to call multiple times, when the nic and interface is in any - * state. The caller is guaranteed to subsequently be in a position - * to modify any hardware and software state they see fit without - * taking locks. */ +/* Quiesce the hardware and software data path, and regular activity + * for the port without bringing the link down. Safe to call multiple + * times with the NIC in almost any state, but interrupts should be + * enabled. Requires the RTNL lock. + */ static void efx_stop_all(struct efx_nic *efx) { EFX_ASSERT_RESET_SERIALISED(efx); @@ -1739,8 +1767,6 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) struct efx_nic *efx = netdev_priv(net_dev); struct mii_ioctl_data *data = if_mii(ifr); - EFX_ASSERT_RESET_SERIALISED(efx); - /* Convert phy_id from older PRTAD/DEVAD format */ if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) && (data->phy_id & 0xfc00) == 0x0400) @@ -1820,13 +1846,14 @@ static void efx_netpoll(struct net_device *net_dev) static int efx_net_open(struct net_device *net_dev) { struct efx_nic *efx = netdev_priv(net_dev); - EFX_ASSERT_RESET_SERIALISED(efx); + int rc; netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n", raw_smp_processor_id()); - if (efx->state == STATE_DISABLED) - return -EIO; + rc = efx_check_disabled(efx); + if (rc) + return rc; if (efx->phy_mode & PHY_MODE_SPECIAL) return -EBUSY; if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL)) @@ -1852,10 +1879,8 @@ static int efx_net_stop(struct net_device *net_dev) netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n", raw_smp_processor_id()); - if (efx->state != STATE_DISABLED) { - /* Stop the device and flush all the channels */ - efx_stop_all(efx); - } + /* Stop the device and flush all the channels */ + efx_stop_all(efx); return 0; } @@ -1915,9 +1940,11 @@ static void efx_watchdog(struct net_device *net_dev) static int efx_change_mtu(struct net_device *net_dev, int new_mtu) { struct efx_nic *efx = netdev_priv(net_dev); + int rc; - EFX_ASSERT_RESET_SERIALISED(efx); - + rc = efx_check_disabled(efx); + if (rc) + return rc; if (new_mtu > EFX_MAX_MTU) return -EINVAL; @@ -1926,8 +1953,6 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu) netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu); mutex_lock(&efx->mac_lock); - /* Reconfigure the MAC before enabling the dma queues so that - * the RX buffers don't overflow */ net_dev->mtu = new_mtu; efx->type->reconfigure_mac(efx); mutex_unlock(&efx->mac_lock); @@ -1942,8 +1967,6 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data) struct sockaddr *addr = data; char *new_addr = addr->sa_data; - EFX_ASSERT_RESET_SERIALISED(efx); - if (!is_valid_ether_addr(new_addr)) { netif_err(efx, drv, efx->net_dev, "invalid ethernet MAC address requested: %pM\n", @@ -2079,11 +2102,27 @@ static int efx_register_netdev(struct efx_nic *efx) rtnl_lock(); + /* Enable resets to be scheduled and check whether any were + * already requested. If so, the NIC is probably hosed so we + * abort. + */ + efx->state = STATE_READY; + smp_mb(); /* ensure we change state before checking reset_pending */ + if (efx->reset_pending) { + netif_err(efx, probe, efx->net_dev, + "aborting probe due to scheduled reset\n"); + rc = -EIO; + goto fail_locked; + } + rc = dev_alloc_name(net_dev, net_dev->name); if (rc < 0) goto fail_locked; efx_update_name(efx); + /* Always start with carrier off; PHY events will detect the link */ + netif_carrier_off(net_dev); + rc = register_netdevice(net_dev); if (rc) goto fail_locked; @@ -2094,9 +2133,6 @@ static int efx_register_netdev(struct efx_nic *efx) efx_init_tx_queue_core_txq(tx_queue); } - /* Always start with carrier off; PHY events will detect the link */ - netif_carrier_off(net_dev); - rtnl_unlock(); rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type); @@ -2108,14 +2144,14 @@ static int efx_register_netdev(struct efx_nic *efx) return 0; +fail_registered: + rtnl_lock(); + unregister_netdevice(net_dev); fail_locked: + efx->state = STATE_UNINIT; rtnl_unlock(); netif_err(efx, drv, efx->net_dev, "could not register net dev\n"); return rc; - -fail_registered: - unregister_netdev(net_dev); - return rc; } static void efx_unregister_netdev(struct efx_nic *efx) @@ -2138,7 +2174,11 @@ static void efx_unregister_netdev(struct efx_nic *efx) strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type); - unregister_netdev(efx->net_dev); + + rtnl_lock(); + unregister_netdevice(efx->net_dev); + efx->state = STATE_UNINIT; + rtnl_unlock(); } /************************************************************************** @@ -2154,9 +2194,9 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method) EFX_ASSERT_RESET_SERIALISED(efx); efx_stop_all(efx); - mutex_lock(&efx->mac_lock); - efx_stop_interrupts(efx, false); + + mutex_lock(&efx->mac_lock); if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) efx->phy_op->fini(efx); efx->type->fini(efx); @@ -2276,16 +2316,15 @@ static void efx_reset_work(struct work_struct *data) if (!pending) return; - /* If we're not RUNNING then don't reset. Leave the reset_pending - * flags set so that efx_pci_probe_main will be retried */ - if (efx->state != STATE_RUNNING) { - netif_info(efx, drv, efx->net_dev, - "scheduled reset quenched. NIC not RUNNING\n"); - return; - } - rtnl_lock(); - (void)efx_reset(efx, fls(pending) - 1); + + /* We checked the state in efx_schedule_reset() but it may + * have changed by now. Now that we have the RTNL lock, + * it cannot change again. + */ + if (efx->state == STATE_READY) + (void)efx_reset(efx, fls(pending) - 1); + rtnl_unlock(); } @@ -2311,6 +2350,13 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) } set_bit(method, &efx->reset_pending); + smp_mb(); /* ensure we change reset_pending before checking state */ + + /* If we're not READY then just leave the flags set as the cue + * to abort probing or reschedule the reset later. + */ + if (ACCESS_ONCE(efx->state) != STATE_READY) + return; /* efx_process_channel() will no longer read events once a * reset is scheduled. So switch back to poll'd MCDI completions. */ @@ -2376,13 +2422,12 @@ static const struct efx_phy_operations efx_dummy_phy_operations = { /* This zeroes out and then fills in the invariants in a struct * efx_nic (including all sub-structures). */ -static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type, +static int efx_init_struct(struct efx_nic *efx, struct pci_dev *pci_dev, struct net_device *net_dev) { int i; /* Initialise common structures */ - memset(efx, 0, sizeof(*efx)); spin_lock_init(&efx->biu_lock); #ifdef CONFIG_SFC_MTD INIT_LIST_HEAD(&efx->mtd_list); @@ -2392,7 +2437,7 @@ static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type, INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work); efx->pci_dev = pci_dev; efx->msg_enable = debug; - efx->state = STATE_INIT; + efx->state = STATE_UNINIT; strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name)); efx->net_dev = net_dev; @@ -2409,8 +2454,6 @@ static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type, goto fail; } - efx->type = type; - EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS); /* Higher numbered interrupt modes are less capable! */ @@ -2455,6 +2498,12 @@ static void efx_fini_struct(struct efx_nic *efx) */ static void efx_pci_remove_main(struct efx_nic *efx) { + /* Flush reset_work. It can no longer be scheduled since we + * are not READY. + */ + BUG_ON(efx->state == STATE_READY); + cancel_work_sync(&efx->reset_work); + #ifdef CONFIG_RFS_ACCEL free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap); efx->net_dev->rx_cpu_rmap = NULL; @@ -2480,24 +2529,15 @@ static void efx_pci_remove(struct pci_dev *pci_dev) /* Mark the NIC as fini, then stop the interface */ rtnl_lock(); - efx->state = STATE_FINI; dev_close(efx->net_dev); - - /* Allow any queued efx_resets() to complete */ + efx_stop_interrupts(efx, false); rtnl_unlock(); - efx_stop_interrupts(efx, false); efx_sriov_fini(efx); efx_unregister_netdev(efx); efx_mtd_remove(efx); - /* Wait for any scheduled resets to complete. No more will be - * scheduled from this point because efx_stop_all() has been - * called, we are no longer registered with driverlink, and - * the net_device's have been removed. */ - cancel_work_sync(&efx->reset_work); - efx_pci_remove_main(efx); efx_fini_io(efx); @@ -2617,7 +2657,6 @@ static int efx_pci_probe_main(struct efx_nic *efx) static int __devinit efx_pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *entry) { - const struct efx_nic_type *type = (const struct efx_nic_type *) entry->driver_data; struct net_device *net_dev; struct efx_nic *efx; int rc; @@ -2627,10 +2666,12 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev, EFX_MAX_RX_QUEUES); if (!net_dev) return -ENOMEM; - net_dev->features |= (type->offload_features | NETIF_F_SG | + efx = netdev_priv(net_dev); + efx->type = (const struct efx_nic_type *) entry->driver_data; + net_dev->features |= (efx->type->offload_features | NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_TSO | NETIF_F_RXCSUM); - if (type->offload_features & NETIF_F_V6_CSUM) + if (efx->type->offload_features & NETIF_F_V6_CSUM) net_dev->features |= NETIF_F_TSO6; /* Mask for features that also apply to VLAN devices */ net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG | @@ -2638,10 +2679,9 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev, NETIF_F_RXCSUM); /* All offloads can be toggled */ net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA; - efx = netdev_priv(net_dev); pci_set_drvdata(pci_dev, efx); SET_NETDEV_DEV(net_dev, &pci_dev->dev); - rc = efx_init_struct(efx, type, pci_dev, net_dev); + rc = efx_init_struct(efx, pci_dev, net_dev); if (rc) goto fail1; @@ -2656,28 +2696,9 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev, goto fail2; rc = efx_pci_probe_main(efx); - - /* Serialise against efx_reset(). No more resets will be - * scheduled since efx_stop_all() has been called, and we have - * not and never have been registered. - */ - cancel_work_sync(&efx->reset_work); - if (rc) goto fail3; - /* If there was a scheduled reset during probe, the NIC is - * probably hosed anyway. - */ - if (efx->reset_pending) { - rc = -EIO; - goto fail4; - } - - /* Switch to the running state before we expose the device to the OS, - * so that dev_open()|efx_start_all() will actually start the device */ - efx->state = STATE_RUNNING; - rc = efx_register_netdev(efx); if (rc) goto fail4; @@ -2717,12 +2738,18 @@ static int efx_pm_freeze(struct device *dev) { struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); - efx->state = STATE_FINI; + rtnl_lock(); - netif_device_detach(efx->net_dev); + if (efx->state != STATE_DISABLED) { + efx->state = STATE_UNINIT; - efx_stop_all(efx); - efx_stop_interrupts(efx, false); + netif_device_detach(efx->net_dev); + + efx_stop_all(efx); + efx_stop_interrupts(efx, false); + } + + rtnl_unlock(); return 0; } @@ -2731,21 +2758,25 @@ static int efx_pm_thaw(struct device *dev) { struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); - efx->state = STATE_INIT; + rtnl_lock(); - efx_start_interrupts(efx, false); + if (efx->state != STATE_DISABLED) { + efx_start_interrupts(efx, false); - mutex_lock(&efx->mac_lock); - efx->phy_op->reconfigure(efx); - mutex_unlock(&efx->mac_lock); + mutex_lock(&efx->mac_lock); + efx->phy_op->reconfigure(efx); + mutex_unlock(&efx->mac_lock); - efx_start_all(efx); + efx_start_all(efx); - netif_device_attach(efx->net_dev); + netif_device_attach(efx->net_dev); - efx->state = STATE_RUNNING; + efx->state = STATE_READY; - efx->type->resume_wol(efx); + efx->type->resume_wol(efx); + } + + rtnl_unlock(); /* Reschedule any quenched resets scheduled during efx_pm_freeze() */ queue_work(reset_workqueue, &efx->reset_work); diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index 5faedd855b77..f8e7e204981f 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -529,9 +529,7 @@ static void efx_ethtool_self_test(struct net_device *net_dev, if (!efx_tests) goto fail; - - ASSERT_RTNL(); - if (efx->state != STATE_RUNNING) { + if (efx->state != STATE_READY) { rc = -EIO; goto fail1; } diff --git a/drivers/net/ethernet/sfc/falcon_boards.c b/drivers/net/ethernet/sfc/falcon_boards.c index 8687a6c3db0d..ec1e99d0dcad 100644 --- a/drivers/net/ethernet/sfc/falcon_boards.c +++ b/drivers/net/ethernet/sfc/falcon_boards.c @@ -380,7 +380,7 @@ static ssize_t set_phy_flash_cfg(struct device *dev, new_mode = PHY_MODE_SPECIAL; if (!((old_mode ^ new_mode) & PHY_MODE_SPECIAL)) { err = 0; - } else if (efx->state != STATE_RUNNING || netif_running(efx->net_dev)) { + } else if (efx->state != STATE_READY || netif_running(efx->net_dev)) { err = -EBUSY; } else { /* Reset the PHY, reconfigure the MAC and enable/disable diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index cd9c0a989692..7ab1232494ef 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -91,29 +91,31 @@ struct efx_special_buffer { }; /** - * struct efx_tx_buffer - An Efx TX buffer - * @skb: The associated socket buffer. - * Set only on the final fragment of a packet; %NULL for all other - * fragments. When this fragment completes, then we can free this - * skb. - * @tsoh: The associated TSO header structure, or %NULL if this - * buffer is not a TSO header. + * struct efx_tx_buffer - buffer state for a TX descriptor + * @skb: When @flags & %EFX_TX_BUF_SKB, the associated socket buffer to be + * freed when descriptor completes + * @heap_buf: When @flags & %EFX_TX_BUF_HEAP, the associated heap buffer to be + * freed when descriptor completes. * @dma_addr: DMA address of the fragment. + * @flags: Flags for allocation and DMA mapping type * @len: Length of this fragment. * This field is zero when the queue slot is empty. - * @continuation: True if this fragment is not the end of a packet. - * @unmap_single: True if dma_unmap_single should be used. * @unmap_len: Length of this fragment to unmap */ struct efx_tx_buffer { - const struct sk_buff *skb; - struct efx_tso_header *tsoh; + union { + const struct sk_buff *skb; + void *heap_buf; + }; dma_addr_t dma_addr; + unsigned short flags; unsigned short len; - bool continuation; - bool unmap_single; unsigned short unmap_len; }; +#define EFX_TX_BUF_CONT 1 /* not last descriptor of packet */ +#define EFX_TX_BUF_SKB 2 /* buffer is last part of skb */ +#define EFX_TX_BUF_HEAP 4 /* buffer was allocated with kmalloc() */ +#define EFX_TX_BUF_MAP_SINGLE 8 /* buffer was mapped with dma_map_single() */ /** * struct efx_tx_queue - An Efx TX queue @@ -133,6 +135,7 @@ struct efx_tx_buffer { * @channel: The associated channel * @core_txq: The networking core TX queue structure * @buffer: The software buffer ring + * @tsoh_page: Array of pages of TSO header buffers * @txd: The hardware descriptor ring * @ptr_mask: The size of the ring minus 1. * @initialised: Has hardware queue been initialised? @@ -156,9 +159,6 @@ struct efx_tx_buffer { * variable indicates that the queue is full. This is to * avoid cache-line ping-pong between the xmit path and the * completion path. - * @tso_headers_free: A list of TSO headers allocated for this TX queue - * that are not in use, and so available for new TSO sends. The list - * is protected by the TX queue lock. * @tso_bursts: Number of times TSO xmit invoked by kernel * @tso_long_headers: Number of packets with headers too long for standard * blocks @@ -175,6 +175,7 @@ struct efx_tx_queue { struct efx_channel *channel; struct netdev_queue *core_txq; struct efx_tx_buffer *buffer; + struct efx_buffer *tsoh_page; struct efx_special_buffer txd; unsigned int ptr_mask; bool initialised; @@ -187,7 +188,6 @@ struct efx_tx_queue { unsigned int insert_count ____cacheline_aligned_in_smp; unsigned int write_count; unsigned int old_read_count; - struct efx_tso_header *tso_headers_free; unsigned int tso_bursts; unsigned int tso_long_headers; unsigned int tso_packets; @@ -430,11 +430,9 @@ enum efx_int_mode { #define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI) enum nic_state { - STATE_INIT = 0, - STATE_RUNNING = 1, - STATE_FINI = 2, - STATE_DISABLED = 3, - STATE_MAX, + STATE_UNINIT = 0, /* device being probed/removed or is frozen */ + STATE_READY = 1, /* hardware ready and netdev registered */ + STATE_DISABLED = 2, /* device disabled due to hardware errors */ }; /* @@ -654,7 +652,7 @@ struct vfdi_status; * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues * @irq_rx_moderation: IRQ moderation time for RX event queues * @msg_enable: Log message enable flags - * @state: Device state flag. Serialised by the rtnl_lock. + * @state: Device state number (%STATE_*). Serialised by the rtnl_lock. * @reset_pending: Bitmask for pending resets * @tx_queue: TX DMA queues * @rx_queue: RX DMA queues @@ -664,6 +662,8 @@ struct vfdi_status; * should be allocated for this NIC * @rxq_entries: Size of receive queues requested by user. * @txq_entries: Size of transmit queues requested by user. + * @txq_stop_thresh: TX queue fill level at or above which we stop it. + * @txq_wake_thresh: TX queue fill level at or below which we wake it. * @tx_dc_base: Base qword address in SRAM of TX queue descriptor caches * @rx_dc_base: Base qword address in SRAM of RX queue descriptor caches * @sram_lim_qw: Qword address limit of SRAM @@ -774,6 +774,9 @@ struct efx_nic { unsigned rxq_entries; unsigned txq_entries; + unsigned int txq_stop_thresh; + unsigned int txq_wake_thresh; + unsigned tx_dc_base; unsigned rx_dc_base; unsigned sram_lim_qw; diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c index 326d799762d6..cdff40b65729 100644 --- a/drivers/net/ethernet/sfc/nic.c +++ b/drivers/net/ethernet/sfc/nic.c @@ -298,7 +298,7 @@ efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) /************************************************************************** * * Generic buffer handling - * These buffers are used for interrupt status and MAC stats + * These buffers are used for interrupt status, MAC stats, etc. * **************************************************************************/ @@ -401,8 +401,10 @@ void efx_nic_push_buffers(struct efx_tx_queue *tx_queue) ++tx_queue->write_count; /* Create TX descriptor ring entry */ + BUILD_BUG_ON(EFX_TX_BUF_CONT != 1); EFX_POPULATE_QWORD_4(*txd, - FSF_AZ_TX_KER_CONT, buffer->continuation, + FSF_AZ_TX_KER_CONT, + buffer->flags & EFX_TX_BUF_CONT, FSF_AZ_TX_KER_BYTE_COUNT, buffer->len, FSF_AZ_TX_KER_BUF_REGION, 0, FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr); diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 18713436b443..ebca75ed78dc 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -22,14 +22,6 @@ #include "nic.h" #include "workarounds.h" -/* - * TX descriptor ring full threshold - * - * The tx_queue descriptor ring fill-level must fall below this value - * before we restart the netif queue - */ -#define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u) - static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, struct efx_tx_buffer *buffer, unsigned int *pkts_compl, @@ -39,67 +31,32 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, struct device *dma_dev = &tx_queue->efx->pci_dev->dev; dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len - buffer->unmap_len); - if (buffer->unmap_single) + if (buffer->flags & EFX_TX_BUF_MAP_SINGLE) dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len, DMA_TO_DEVICE); else dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len, DMA_TO_DEVICE); buffer->unmap_len = 0; - buffer->unmap_single = false; } - if (buffer->skb) { + if (buffer->flags & EFX_TX_BUF_SKB) { (*pkts_compl)++; (*bytes_compl) += buffer->skb->len; dev_kfree_skb_any((struct sk_buff *) buffer->skb); - buffer->skb = NULL; netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev, "TX queue %d transmission id %x complete\n", tx_queue->queue, tx_queue->read_count); + } else if (buffer->flags & EFX_TX_BUF_HEAP) { + kfree(buffer->heap_buf); } -} -/** - * struct efx_tso_header - a DMA mapped buffer for packet headers - * @next: Linked list of free ones. - * The list is protected by the TX queue lock. - * @dma_unmap_len: Length to unmap for an oversize buffer, or 0. - * @dma_addr: The DMA address of the header below. - * - * This controls the memory used for a TSO header. Use TSOH_DATA() - * to find the packet header data. Use TSOH_SIZE() to calculate the - * total size required for a given packet header length. TSO headers - * in the free list are exactly %TSOH_STD_SIZE bytes in size. - */ -struct efx_tso_header { - union { - struct efx_tso_header *next; - size_t unmap_len; - }; - dma_addr_t dma_addr; -}; + buffer->len = 0; + buffer->flags = 0; +} static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb); -static void efx_fini_tso(struct efx_tx_queue *tx_queue); -static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, - struct efx_tso_header *tsoh); - -static void efx_tsoh_free(struct efx_tx_queue *tx_queue, - struct efx_tx_buffer *buffer) -{ - if (buffer->tsoh) { - if (likely(!buffer->tsoh->unmap_len)) { - buffer->tsoh->next = tx_queue->tso_headers_free; - tx_queue->tso_headers_free = buffer->tsoh; - } else { - efx_tsoh_heap_free(tx_queue, buffer->tsoh); - } - buffer->tsoh = NULL; - } -} - static inline unsigned efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr) @@ -138,6 +95,56 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx) return max_descs; } +/* Get partner of a TX queue, seen as part of the same net core queue */ +static struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue) +{ + if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) + return tx_queue - EFX_TXQ_TYPE_OFFLOAD; + else + return tx_queue + EFX_TXQ_TYPE_OFFLOAD; +} + +static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1) +{ + /* We need to consider both queues that the net core sees as one */ + struct efx_tx_queue *txq2 = efx_tx_queue_partner(txq1); + struct efx_nic *efx = txq1->efx; + unsigned int fill_level; + + fill_level = max(txq1->insert_count - txq1->old_read_count, + txq2->insert_count - txq2->old_read_count); + if (likely(fill_level < efx->txq_stop_thresh)) + return; + + /* We used the stale old_read_count above, which gives us a + * pessimistic estimate of the fill level (which may even + * validly be >= efx->txq_entries). Now try again using + * read_count (more likely to be a cache miss). + * + * If we read read_count and then conditionally stop the + * queue, it is possible for the completion path to race with + * us and complete all outstanding descriptors in the middle, + * after which there will be no more completions to wake it. + * Therefore we stop the queue first, then read read_count + * (with a memory barrier to ensure the ordering), then + * restart the queue if the fill level turns out to be low + * enough. + */ + netif_tx_stop_queue(txq1->core_txq); + smp_mb(); + txq1->old_read_count = ACCESS_ONCE(txq1->read_count); + txq2->old_read_count = ACCESS_ONCE(txq2->read_count); + + fill_level = max(txq1->insert_count - txq1->old_read_count, + txq2->insert_count - txq2->old_read_count); + EFX_BUG_ON_PARANOID(fill_level >= efx->txq_entries); + if (likely(fill_level < efx->txq_stop_thresh)) { + smp_mb(); + if (likely(!efx->loopback_selftest)) + netif_tx_start_queue(txq1->core_txq); + } +} + /* * Add a socket buffer to a TX queue * @@ -151,7 +158,7 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx) * This function is split out from efx_hard_start_xmit to allow the * loopback test to direct packets via specific TX queues. * - * Returns NETDEV_TX_OK or NETDEV_TX_BUSY + * Returns NETDEV_TX_OK. * You must hold netif_tx_lock() to call this function. */ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) @@ -160,12 +167,11 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) struct device *dma_dev = &efx->pci_dev->dev; struct efx_tx_buffer *buffer; skb_frag_t *fragment; - unsigned int len, unmap_len = 0, fill_level, insert_ptr; + unsigned int len, unmap_len = 0, insert_ptr; dma_addr_t dma_addr, unmap_addr = 0; unsigned int dma_len; - bool unmap_single; - int q_space, i = 0; - netdev_tx_t rc = NETDEV_TX_OK; + unsigned short dma_flags; + int i = 0; EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); @@ -183,14 +189,11 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) return NETDEV_TX_OK; } - fill_level = tx_queue->insert_count - tx_queue->old_read_count; - q_space = efx->txq_entries - 1 - fill_level; - /* Map for DMA. Use dma_map_single rather than dma_map_page * since this is more efficient on machines with sparse * memory. */ - unmap_single = true; + dma_flags = EFX_TX_BUF_MAP_SINGLE; dma_addr = dma_map_single(dma_dev, skb->data, len, PCI_DMA_TODEVICE); /* Process all fragments */ @@ -205,39 +208,10 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) /* Add to TX queue, splitting across DMA boundaries */ do { - if (unlikely(q_space-- <= 0)) { - /* It might be that completions have - * happened since the xmit path last - * checked. Update the xmit path's - * copy of read_count. - */ - netif_tx_stop_queue(tx_queue->core_txq); - /* This memory barrier protects the - * change of queue state from the access - * of read_count. */ - smp_mb(); - tx_queue->old_read_count = - ACCESS_ONCE(tx_queue->read_count); - fill_level = (tx_queue->insert_count - - tx_queue->old_read_count); - q_space = efx->txq_entries - 1 - fill_level; - if (unlikely(q_space-- <= 0)) { - rc = NETDEV_TX_BUSY; - goto unwind; - } - smp_mb(); - if (likely(!efx->loopback_selftest)) - netif_tx_start_queue( - tx_queue->core_txq); - } - insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; buffer = &tx_queue->buffer[insert_ptr]; - efx_tsoh_free(tx_queue, buffer); - EFX_BUG_ON_PARANOID(buffer->tsoh); - EFX_BUG_ON_PARANOID(buffer->skb); + EFX_BUG_ON_PARANOID(buffer->flags); EFX_BUG_ON_PARANOID(buffer->len); - EFX_BUG_ON_PARANOID(!buffer->continuation); EFX_BUG_ON_PARANOID(buffer->unmap_len); dma_len = efx_max_tx_len(efx, dma_addr); @@ -247,13 +221,14 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) /* Fill out per descriptor fields */ buffer->len = dma_len; buffer->dma_addr = dma_addr; + buffer->flags = EFX_TX_BUF_CONT; len -= dma_len; dma_addr += dma_len; ++tx_queue->insert_count; } while (len); /* Transfer ownership of the unmapping to the final buffer */ - buffer->unmap_single = unmap_single; + buffer->flags = EFX_TX_BUF_CONT | dma_flags; buffer->unmap_len = unmap_len; unmap_len = 0; @@ -264,20 +239,22 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) len = skb_frag_size(fragment); i++; /* Map for DMA */ - unmap_single = false; + dma_flags = 0; dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len, DMA_TO_DEVICE); } /* Transfer ownership of the skb to the final buffer */ buffer->skb = skb; - buffer->continuation = false; + buffer->flags = EFX_TX_BUF_SKB | dma_flags; netdev_tx_sent_queue(tx_queue->core_txq, skb->len); /* Pass off to hardware */ efx_nic_push_buffers(tx_queue); + efx_tx_maybe_stop_queue(tx_queue); + return NETDEV_TX_OK; dma_err: @@ -289,7 +266,6 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) /* Mark the packet as transmitted, and free the SKB ourselves */ dev_kfree_skb_any(skb); - unwind: /* Work backwards until we hit the original insert pointer value */ while (tx_queue->insert_count != tx_queue->write_count) { unsigned int pkts_compl = 0, bytes_compl = 0; @@ -297,12 +273,11 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; buffer = &tx_queue->buffer[insert_ptr]; efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); - buffer->len = 0; } /* Free the fragment we were mid-way through pushing */ if (unmap_len) { - if (unmap_single) + if (dma_flags & EFX_TX_BUF_MAP_SINGLE) dma_unmap_single(dma_dev, unmap_addr, unmap_len, DMA_TO_DEVICE); else @@ -310,7 +285,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) DMA_TO_DEVICE); } - return rc; + return NETDEV_TX_OK; } /* Remove packets from the TX queue @@ -340,8 +315,6 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, } efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl); - buffer->continuation = true; - buffer->len = 0; ++tx_queue->read_count; read_ptr = tx_queue->read_count & tx_queue->ptr_mask; @@ -450,6 +423,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) { unsigned fill_level; struct efx_nic *efx = tx_queue->efx; + struct efx_tx_queue *txq2; unsigned int pkts_compl = 0, bytes_compl = 0; EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask); @@ -457,15 +431,18 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl); netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl); - /* See if we need to restart the netif queue. This barrier - * separates the update of read_count from the test of the - * queue state. */ + /* See if we need to restart the netif queue. This memory + * barrier ensures that we write read_count (inside + * efx_dequeue_buffers()) before reading the queue status. + */ smp_mb(); if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) && likely(efx->port_enabled) && likely(netif_device_present(efx->net_dev))) { - fill_level = tx_queue->insert_count - tx_queue->read_count; - if (fill_level < EFX_TXQ_THRESHOLD(efx)) + txq2 = efx_tx_queue_partner(tx_queue); + fill_level = max(tx_queue->insert_count - tx_queue->read_count, + txq2->insert_count - txq2->read_count); + if (fill_level <= efx->txq_wake_thresh) netif_tx_wake_queue(tx_queue->core_txq); } @@ -480,11 +457,26 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) } } +/* Size of page-based TSO header buffers. Larger blocks must be + * allocated from the heap. + */ +#define TSOH_STD_SIZE 128 +#define TSOH_PER_PAGE (PAGE_SIZE / TSOH_STD_SIZE) + +/* At most half the descriptors in the queue at any time will refer to + * a TSO header buffer, since they must always be followed by a + * payload descriptor referring to an skb. + */ +static unsigned int efx_tsoh_page_count(struct efx_tx_queue *tx_queue) +{ + return DIV_ROUND_UP(tx_queue->ptr_mask + 1, 2 * TSOH_PER_PAGE); +} + int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) { struct efx_nic *efx = tx_queue->efx; unsigned int entries; - int i, rc; + int rc; /* Create the smallest power-of-two aligned ring */ entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE); @@ -500,17 +492,28 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) GFP_KERNEL); if (!tx_queue->buffer) return -ENOMEM; - for (i = 0; i <= tx_queue->ptr_mask; ++i) - tx_queue->buffer[i].continuation = true; + + if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) { + tx_queue->tsoh_page = + kcalloc(efx_tsoh_page_count(tx_queue), + sizeof(tx_queue->tsoh_page[0]), GFP_KERNEL); + if (!tx_queue->tsoh_page) { + rc = -ENOMEM; + goto fail1; + } + } /* Allocate hardware ring */ rc = efx_nic_probe_tx(tx_queue); if (rc) - goto fail; + goto fail2; return 0; - fail: +fail2: + kfree(tx_queue->tsoh_page); + tx_queue->tsoh_page = NULL; +fail1: kfree(tx_queue->buffer); tx_queue->buffer = NULL; return rc; @@ -546,8 +549,6 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue) unsigned int pkts_compl = 0, bytes_compl = 0; buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask]; efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); - buffer->continuation = true; - buffer->len = 0; ++tx_queue->read_count; } @@ -568,13 +569,12 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) efx_nic_fini_tx(tx_queue); efx_release_tx_buffers(tx_queue); - - /* Free up TSO header cache */ - efx_fini_tso(tx_queue); } void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) { + int i; + if (!tx_queue->buffer) return; @@ -582,6 +582,14 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) "destroying TX queue %d\n", tx_queue->queue); efx_nic_remove_tx(tx_queue); + if (tx_queue->tsoh_page) { + for (i = 0; i < efx_tsoh_page_count(tx_queue); i++) + efx_nic_free_buffer(tx_queue->efx, + &tx_queue->tsoh_page[i]); + kfree(tx_queue->tsoh_page); + tx_queue->tsoh_page = NULL; + } + kfree(tx_queue->buffer); tx_queue->buffer = NULL; } @@ -604,22 +612,7 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) #define TSOH_OFFSET NET_IP_ALIGN #endif -#define TSOH_BUFFER(tsoh) ((u8 *)(tsoh + 1) + TSOH_OFFSET) - -/* Total size of struct efx_tso_header, buffer and padding */ -#define TSOH_SIZE(hdr_len) \ - (sizeof(struct efx_tso_header) + TSOH_OFFSET + hdr_len) - -/* Size of blocks on free list. Larger blocks must be allocated from - * the heap. - */ -#define TSOH_STD_SIZE 128 - #define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2)) -#define ETH_HDR_LEN(skb) (skb_network_header(skb) - (skb)->data) -#define SKB_TCP_OFF(skb) PTR_DIFF(tcp_hdr(skb), (skb)->data) -#define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data) -#define SKB_IPV6_OFF(skb) PTR_DIFF(ipv6_hdr(skb), (skb)->data) /** * struct tso_state - TSO state for an SKB @@ -631,10 +624,12 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) * @in_len: Remaining length in current SKB fragment * @unmap_len: Length of SKB fragment * @unmap_addr: DMA address of SKB fragment - * @unmap_single: DMA single vs page mapping flag + * @dma_flags: TX buffer flags for DMA mapping - %EFX_TX_BUF_MAP_SINGLE or 0 * @protocol: Network protocol (after any VLAN header) + * @ip_off: Offset of IP header + * @tcp_off: Offset of TCP header * @header_len: Number of bytes of header - * @full_packet_size: Number of bytes to put in each outgoing segment + * @ip_base_len: IPv4 tot_len or IPv6 payload_len, before TCP payload * * The state used during segmentation. It is put into this data structure * just to make it easy to pass into inline functions. @@ -651,11 +646,13 @@ struct tso_state { unsigned in_len; unsigned unmap_len; dma_addr_t unmap_addr; - bool unmap_single; + unsigned short dma_flags; __be16 protocol; + unsigned int ip_off; + unsigned int tcp_off; unsigned header_len; - int full_packet_size; + unsigned int ip_base_len; }; @@ -687,91 +684,43 @@ static __be16 efx_tso_check_protocol(struct sk_buff *skb) return protocol; } - -/* - * Allocate a page worth of efx_tso_header structures, and string them - * into the tx_queue->tso_headers_free linked list. Return 0 or -ENOMEM. - */ -static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue) +static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue, + struct efx_tx_buffer *buffer, unsigned int len) { - struct device *dma_dev = &tx_queue->efx->pci_dev->dev; - struct efx_tso_header *tsoh; - dma_addr_t dma_addr; - u8 *base_kva, *kva; - - base_kva = dma_alloc_coherent(dma_dev, PAGE_SIZE, &dma_addr, GFP_ATOMIC); - if (base_kva == NULL) { - netif_err(tx_queue->efx, tx_err, tx_queue->efx->net_dev, - "Unable to allocate page for TSO headers\n"); - return -ENOMEM; - } - - /* dma_alloc_coherent() allocates pages. */ - EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u)); - - for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) { - tsoh = (struct efx_tso_header *)kva; - tsoh->dma_addr = dma_addr + (TSOH_BUFFER(tsoh) - base_kva); - tsoh->next = tx_queue->tso_headers_free; - tx_queue->tso_headers_free = tsoh; - } - - return 0; -} + u8 *result; + EFX_BUG_ON_PARANOID(buffer->len); + EFX_BUG_ON_PARANOID(buffer->flags); + EFX_BUG_ON_PARANOID(buffer->unmap_len); -/* Free up a TSO header, and all others in the same page. */ -static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue, - struct efx_tso_header *tsoh, - struct device *dma_dev) -{ - struct efx_tso_header **p; - unsigned long base_kva; - dma_addr_t base_dma; - - base_kva = (unsigned long)tsoh & PAGE_MASK; - base_dma = tsoh->dma_addr & PAGE_MASK; - - p = &tx_queue->tso_headers_free; - while (*p != NULL) { - if (((unsigned long)*p & PAGE_MASK) == base_kva) - *p = (*p)->next; - else - p = &(*p)->next; - } - - dma_free_coherent(dma_dev, PAGE_SIZE, (void *)base_kva, base_dma); -} + if (likely(len <= TSOH_STD_SIZE - TSOH_OFFSET)) { + unsigned index = + (tx_queue->insert_count & tx_queue->ptr_mask) / 2; + struct efx_buffer *page_buf = + &tx_queue->tsoh_page[index / TSOH_PER_PAGE]; + unsigned offset = + TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + TSOH_OFFSET; + + if (unlikely(!page_buf->addr) && + efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE)) + return NULL; + + result = (u8 *)page_buf->addr + offset; + buffer->dma_addr = page_buf->dma_addr + offset; + buffer->flags = EFX_TX_BUF_CONT; + } else { + tx_queue->tso_long_headers++; -static struct efx_tso_header * -efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len) -{ - struct efx_tso_header *tsoh; - - tsoh = kmalloc(TSOH_SIZE(header_len), GFP_ATOMIC | GFP_DMA); - if (unlikely(!tsoh)) - return NULL; - - tsoh->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev, - TSOH_BUFFER(tsoh), header_len, - DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev, - tsoh->dma_addr))) { - kfree(tsoh); - return NULL; + buffer->heap_buf = kmalloc(TSOH_OFFSET + len, GFP_ATOMIC); + if (unlikely(!buffer->heap_buf)) + return NULL; + result = (u8 *)buffer->heap_buf + TSOH_OFFSET; + buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_HEAP; } - tsoh->unmap_len = header_len; - return tsoh; -} + buffer->len = len; -static void -efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh) -{ - dma_unmap_single(&tx_queue->efx->pci_dev->dev, - tsoh->dma_addr, tsoh->unmap_len, - DMA_TO_DEVICE); - kfree(tsoh); + return result; } /** @@ -781,47 +730,19 @@ efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh) * @len: Length of fragment * @final_buffer: The final buffer inserted into the queue * - * Push descriptors onto the TX queue. Return 0 on success or 1 if - * @tx_queue full. + * Push descriptors onto the TX queue. */ -static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, - dma_addr_t dma_addr, unsigned len, - struct efx_tx_buffer **final_buffer) +static void efx_tx_queue_insert(struct efx_tx_queue *tx_queue, + dma_addr_t dma_addr, unsigned len, + struct efx_tx_buffer **final_buffer) { struct efx_tx_buffer *buffer; struct efx_nic *efx = tx_queue->efx; - unsigned dma_len, fill_level, insert_ptr; - int q_space; + unsigned dma_len, insert_ptr; EFX_BUG_ON_PARANOID(len <= 0); - fill_level = tx_queue->insert_count - tx_queue->old_read_count; - /* -1 as there is no way to represent all descriptors used */ - q_space = efx->txq_entries - 1 - fill_level; - while (1) { - if (unlikely(q_space-- <= 0)) { - /* It might be that completions have happened - * since the xmit path last checked. Update - * the xmit path's copy of read_count. - */ - netif_tx_stop_queue(tx_queue->core_txq); - /* This memory barrier protects the change of - * queue state from the access of read_count. */ - smp_mb(); - tx_queue->old_read_count = - ACCESS_ONCE(tx_queue->read_count); - fill_level = (tx_queue->insert_count - - tx_queue->old_read_count); - q_space = efx->txq_entries - 1 - fill_level; - if (unlikely(q_space-- <= 0)) { - *final_buffer = NULL; - return 1; - } - smp_mb(); - netif_tx_start_queue(tx_queue->core_txq); - } - insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; buffer = &tx_queue->buffer[insert_ptr]; ++tx_queue->insert_count; @@ -830,12 +751,9 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, tx_queue->read_count >= efx->txq_entries); - efx_tsoh_free(tx_queue, buffer); EFX_BUG_ON_PARANOID(buffer->len); EFX_BUG_ON_PARANOID(buffer->unmap_len); - EFX_BUG_ON_PARANOID(buffer->skb); - EFX_BUG_ON_PARANOID(!buffer->continuation); - EFX_BUG_ON_PARANOID(buffer->tsoh); + EFX_BUG_ON_PARANOID(buffer->flags); buffer->dma_addr = dma_addr; @@ -845,7 +763,8 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, if (dma_len >= len) break; - buffer->len = dma_len; /* Don't set the other members */ + buffer->len = dma_len; + buffer->flags = EFX_TX_BUF_CONT; dma_addr += dma_len; len -= dma_len; } @@ -853,7 +772,6 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, EFX_BUG_ON_PARANOID(!len); buffer->len = len; *final_buffer = buffer; - return 0; } @@ -864,54 +782,42 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, * a single fragment, and we know it doesn't cross a page boundary. It * also allows us to not worry about end-of-packet etc. */ -static void efx_tso_put_header(struct efx_tx_queue *tx_queue, - struct efx_tso_header *tsoh, unsigned len) +static int efx_tso_put_header(struct efx_tx_queue *tx_queue, + struct efx_tx_buffer *buffer, u8 *header) { - struct efx_tx_buffer *buffer; - - buffer = &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask]; - efx_tsoh_free(tx_queue, buffer); - EFX_BUG_ON_PARANOID(buffer->len); - EFX_BUG_ON_PARANOID(buffer->unmap_len); - EFX_BUG_ON_PARANOID(buffer->skb); - EFX_BUG_ON_PARANOID(!buffer->continuation); - EFX_BUG_ON_PARANOID(buffer->tsoh); - buffer->len = len; - buffer->dma_addr = tsoh->dma_addr; - buffer->tsoh = tsoh; + if (unlikely(buffer->flags & EFX_TX_BUF_HEAP)) { + buffer->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev, + header, buffer->len, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev, + buffer->dma_addr))) { + kfree(buffer->heap_buf); + buffer->len = 0; + buffer->flags = 0; + return -ENOMEM; + } + buffer->unmap_len = buffer->len; + buffer->flags |= EFX_TX_BUF_MAP_SINGLE; + } ++tx_queue->insert_count; + return 0; } -/* Remove descriptors put into a tx_queue. */ +/* Remove buffers put into a tx_queue. None of the buffers must have + * an skb attached. + */ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) { struct efx_tx_buffer *buffer; - dma_addr_t unmap_addr; /* Work backwards until we hit the original insert pointer value */ while (tx_queue->insert_count != tx_queue->write_count) { --tx_queue->insert_count; buffer = &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask]; - efx_tsoh_free(tx_queue, buffer); - EFX_BUG_ON_PARANOID(buffer->skb); - if (buffer->unmap_len) { - unmap_addr = (buffer->dma_addr + buffer->len - - buffer->unmap_len); - if (buffer->unmap_single) - dma_unmap_single(&tx_queue->efx->pci_dev->dev, - unmap_addr, buffer->unmap_len, - DMA_TO_DEVICE); - else - dma_unmap_page(&tx_queue->efx->pci_dev->dev, - unmap_addr, buffer->unmap_len, - DMA_TO_DEVICE); - buffer->unmap_len = 0; - } - buffer->len = 0; - buffer->continuation = true; + efx_dequeue_buffer(tx_queue, buffer, NULL, NULL); } } @@ -919,17 +825,16 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) /* Parse the SKB header and initialise state. */ static void tso_start(struct tso_state *st, const struct sk_buff *skb) { - /* All ethernet/IP/TCP headers combined size is TCP header size - * plus offset of TCP header relative to start of packet. - */ - st->header_len = ((tcp_hdr(skb)->doff << 2u) - + PTR_DIFF(tcp_hdr(skb), skb->data)); - st->full_packet_size = st->header_len + skb_shinfo(skb)->gso_size; - - if (st->protocol == htons(ETH_P_IP)) + st->ip_off = skb_network_header(skb) - skb->data; + st->tcp_off = skb_transport_header(skb) - skb->data; + st->header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u); + if (st->protocol == htons(ETH_P_IP)) { + st->ip_base_len = st->header_len - st->ip_off; st->ipv4_id = ntohs(ip_hdr(skb)->id); - else + } else { + st->ip_base_len = st->header_len - st->tcp_off; st->ipv4_id = 0; + } st->seqnum = ntohl(tcp_hdr(skb)->seq); EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg); @@ -938,7 +843,7 @@ static void tso_start(struct tso_state *st, const struct sk_buff *skb) st->out_len = skb->len - st->header_len; st->unmap_len = 0; - st->unmap_single = false; + st->dma_flags = 0; } static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx, @@ -947,7 +852,7 @@ static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx, st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) { - st->unmap_single = false; + st->dma_flags = 0; st->unmap_len = skb_frag_size(frag); st->in_len = skb_frag_size(frag); st->dma_addr = st->unmap_addr; @@ -965,7 +870,7 @@ static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx, st->unmap_addr = dma_map_single(&efx->pci_dev->dev, skb->data + hl, len, DMA_TO_DEVICE); if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) { - st->unmap_single = true; + st->dma_flags = EFX_TX_BUF_MAP_SINGLE; st->unmap_len = len; st->in_len = len; st->dma_addr = st->unmap_addr; @@ -982,20 +887,19 @@ static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx, * @st: TSO state * * Form descriptors for the current fragment, until we reach the end - * of fragment or end-of-packet. Return 0 on success, 1 if not enough - * space in @tx_queue. + * of fragment or end-of-packet. */ -static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue, - const struct sk_buff *skb, - struct tso_state *st) +static void tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue, + const struct sk_buff *skb, + struct tso_state *st) { struct efx_tx_buffer *buffer; - int n, end_of_packet, rc; + int n; if (st->in_len == 0) - return 0; + return; if (st->packet_space == 0) - return 0; + return; EFX_BUG_ON_PARANOID(st->in_len <= 0); EFX_BUG_ON_PARANOID(st->packet_space <= 0); @@ -1006,25 +910,24 @@ static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue, st->out_len -= n; st->in_len -= n; - rc = efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer); - if (likely(rc == 0)) { - if (st->out_len == 0) - /* Transfer ownership of the skb */ - buffer->skb = skb; + efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer); - end_of_packet = st->out_len == 0 || st->packet_space == 0; - buffer->continuation = !end_of_packet; + if (st->out_len == 0) { + /* Transfer ownership of the skb */ + buffer->skb = skb; + buffer->flags = EFX_TX_BUF_SKB; + } else if (st->packet_space != 0) { + buffer->flags = EFX_TX_BUF_CONT; + } - if (st->in_len == 0) { - /* Transfer ownership of the DMA mapping */ - buffer->unmap_len = st->unmap_len; - buffer->unmap_single = st->unmap_single; - st->unmap_len = 0; - } + if (st->in_len == 0) { + /* Transfer ownership of the DMA mapping */ + buffer->unmap_len = st->unmap_len; + buffer->flags |= st->dma_flags; + st->unmap_len = 0; } st->dma_addr += n; - return rc; } @@ -1035,36 +938,25 @@ static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue, * @st: TSO state * * Generate a new header and prepare for the new packet. Return 0 on - * success, or -1 if failed to alloc header. + * success, or -%ENOMEM if failed to alloc header. */ static int tso_start_new_packet(struct efx_tx_queue *tx_queue, const struct sk_buff *skb, struct tso_state *st) { - struct efx_tso_header *tsoh; + struct efx_tx_buffer *buffer = + &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask]; struct tcphdr *tsoh_th; unsigned ip_length; u8 *header; + int rc; - /* Allocate a DMA-mapped header buffer. */ - if (likely(TSOH_SIZE(st->header_len) <= TSOH_STD_SIZE)) { - if (tx_queue->tso_headers_free == NULL) { - if (efx_tsoh_block_alloc(tx_queue)) - return -1; - } - EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free); - tsoh = tx_queue->tso_headers_free; - tx_queue->tso_headers_free = tsoh->next; - tsoh->unmap_len = 0; - } else { - tx_queue->tso_long_headers++; - tsoh = efx_tsoh_heap_alloc(tx_queue, st->header_len); - if (unlikely(!tsoh)) - return -1; - } + /* Allocate and insert a DMA-mapped header buffer. */ + header = efx_tsoh_get_buffer(tx_queue, buffer, st->header_len); + if (!header) + return -ENOMEM; - header = TSOH_BUFFER(tsoh); - tsoh_th = (struct tcphdr *)(header + SKB_TCP_OFF(skb)); + tsoh_th = (struct tcphdr *)(header + st->tcp_off); /* Copy and update the headers. */ memcpy(header, skb->data, st->header_len); @@ -1073,19 +965,19 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue, st->seqnum += skb_shinfo(skb)->gso_size; if (st->out_len > skb_shinfo(skb)->gso_size) { /* This packet will not finish the TSO burst. */ - ip_length = st->full_packet_size - ETH_HDR_LEN(skb); + st->packet_space = skb_shinfo(skb)->gso_size; tsoh_th->fin = 0; tsoh_th->psh = 0; } else { /* This packet will be the last in the TSO burst. */ - ip_length = st->header_len - ETH_HDR_LEN(skb) + st->out_len; + st->packet_space = st->out_len; tsoh_th->fin = tcp_hdr(skb)->fin; tsoh_th->psh = tcp_hdr(skb)->psh; } + ip_length = st->ip_base_len + st->packet_space; if (st->protocol == htons(ETH_P_IP)) { - struct iphdr *tsoh_iph = - (struct iphdr *)(header + SKB_IPV4_OFF(skb)); + struct iphdr *tsoh_iph = (struct iphdr *)(header + st->ip_off); tsoh_iph->tot_len = htons(ip_length); @@ -1094,16 +986,16 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue, st->ipv4_id++; } else { struct ipv6hdr *tsoh_iph = - (struct ipv6hdr *)(header + SKB_IPV6_OFF(skb)); + (struct ipv6hdr *)(header + st->ip_off); - tsoh_iph->payload_len = htons(ip_length - sizeof(*tsoh_iph)); + tsoh_iph->payload_len = htons(ip_length); } - st->packet_space = skb_shinfo(skb)->gso_size; - ++tx_queue->tso_packets; + rc = efx_tso_put_header(tx_queue, buffer, header); + if (unlikely(rc)) + return rc; - /* Form a descriptor for this header. */ - efx_tso_put_header(tx_queue, tsoh, st->header_len); + ++tx_queue->tso_packets; return 0; } @@ -1118,13 +1010,13 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue, * * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if * @skb was not enqueued. In all cases @skb is consumed. Return - * %NETDEV_TX_OK or %NETDEV_TX_BUSY. + * %NETDEV_TX_OK. */ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb) { struct efx_nic *efx = tx_queue->efx; - int frag_i, rc, rc2 = NETDEV_TX_OK; + int frag_i, rc; struct tso_state state; /* Find the packet protocol and sanity-check it */ @@ -1156,11 +1048,7 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, goto mem_err; while (1) { - rc = tso_fill_packet_with_fragment(tx_queue, skb, &state); - if (unlikely(rc)) { - rc2 = NETDEV_TX_BUSY; - goto unwind; - } + tso_fill_packet_with_fragment(tx_queue, skb, &state); /* Move onto the next fragment? */ if (state.in_len == 0) { @@ -1184,6 +1072,8 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, /* Pass off to hardware */ efx_nic_push_buffers(tx_queue); + efx_tx_maybe_stop_queue(tx_queue); + tx_queue->tso_bursts++; return NETDEV_TX_OK; @@ -1192,10 +1082,9 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, "Out of memory for TSO headers, or DMA mapping error\n"); dev_kfree_skb_any(skb); - unwind: /* Free the DMA mapping we were in the process of writing out */ if (state.unmap_len) { - if (state.unmap_single) + if (state.dma_flags & EFX_TX_BUF_MAP_SINGLE) dma_unmap_single(&efx->pci_dev->dev, state.unmap_addr, state.unmap_len, DMA_TO_DEVICE); else @@ -1204,25 +1093,5 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, } efx_enqueue_unwind(tx_queue); - return rc2; -} - - -/* - * Free up all TSO datastructures associated with tx_queue. This - * routine should be called only once the tx_queue is both empty and - * will no longer be used. - */ -static void efx_fini_tso(struct efx_tx_queue *tx_queue) -{ - unsigned i; - - if (tx_queue->buffer) { - for (i = 0; i <= tx_queue->ptr_mask; ++i) - efx_tsoh_free(tx_queue, &tx_queue->buffer[i]); - } - - while (tx_queue->tso_headers_free != NULL) - efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free, - &tx_queue->efx->pci_dev->dev); + return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index ade108232048..0376a5e6b2bf 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -177,7 +177,7 @@ int stmmac_mdio_register(struct net_device *ndev) new_bus->write = &stmmac_mdio_write; new_bus->reset = &stmmac_mdio_reset; snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x", - new_bus->name, mdio_bus_data->bus_id); + new_bus->name, priv->plat->bus_id); new_bus->priv = ndev; new_bus->irq = irqlist; new_bus->phy_mask = mdio_bus_data->phy_mask; @@ -213,12 +213,10 @@ int stmmac_mdio_register(struct net_device *ndev) * and no PHY number was provided to the MAC, * use the one probed here. */ - if ((priv->plat->bus_id == mdio_bus_data->bus_id) && - (priv->plat->phy_addr == -1)) + if (priv->plat->phy_addr == -1) priv->plat->phy_addr = addr; - act = (priv->plat->bus_id == mdio_bus_data->bus_id) && - (priv->plat->phy_addr == addr); + act = (priv->plat->phy_addr == addr); switch (phydev->irq) { case PHY_POLL: irq_str = "POLL"; @@ -258,6 +256,9 @@ int stmmac_mdio_unregister(struct net_device *ndev) { struct stmmac_priv *priv = netdev_priv(ndev); + if (!priv->mii) + return 0; + mdiobus_unregister(priv->mii); priv->mii->priv = NULL; mdiobus_free(priv->mii); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c index 13afb8edfadc..1f069b0f6af5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c @@ -40,7 +40,6 @@ static void stmmac_default_data(void) plat_dat.has_gmac = 1; plat_dat.force_sf_dma_mode = 1; - mdio_data.bus_id = 1; mdio_data.phy_reset = NULL; mdio_data.phy_mask = 0; plat_dat.mdio_bus_data = &mdio_data; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index b93245c11995..ed112b55ae7f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -78,6 +78,7 @@ static int __devinit stmmac_pltfr_probe(struct platform_device *pdev) { int ret = 0; struct resource *res; + struct device *dev = &pdev->dev; void __iomem *addr = NULL; struct stmmac_priv *priv = NULL; struct plat_stmmacenet_data *plat_dat = NULL; @@ -87,18 +88,10 @@ static int __devinit stmmac_pltfr_probe(struct platform_device *pdev) if (!res) return -ENODEV; - if (!request_mem_region(res->start, resource_size(res), pdev->name)) { - pr_err("%s: ERROR: memory allocation failed" - "cannot get the I/O addr 0x%x\n", - __func__, (unsigned int)res->start); - return -EBUSY; - } - - addr = ioremap(res->start, resource_size(res)); + addr = devm_request_and_ioremap(dev, res); if (!addr) { pr_err("%s: ERROR: memory mapping failed", __func__); - ret = -ENOMEM; - goto out_release_region; + return -ENOMEM; } if (pdev->dev.of_node) { @@ -107,14 +100,13 @@ static int __devinit stmmac_pltfr_probe(struct platform_device *pdev) GFP_KERNEL); if (!plat_dat) { pr_err("%s: ERROR: no memory", __func__); - ret = -ENOMEM; - goto out_unmap; + return -ENOMEM; } ret = stmmac_probe_config_dt(pdev, plat_dat, &mac); if (ret) { pr_err("%s: main dt probe failed", __func__); - goto out_unmap; + return ret; } } else { plat_dat = pdev->dev.platform_data; @@ -124,13 +116,13 @@ static int __devinit stmmac_pltfr_probe(struct platform_device *pdev) if (plat_dat->init) { ret = plat_dat->init(pdev); if (unlikely(ret)) - goto out_unmap; + return ret; } priv = stmmac_dvr_probe(&(pdev->dev), plat_dat, addr); if (!priv) { pr_err("%s: main driver probe failed", __func__); - goto out_unmap; + return -ENODEV; } /* Get MAC address if available (DT) */ @@ -142,8 +134,7 @@ static int __devinit stmmac_pltfr_probe(struct platform_device *pdev) if (priv->dev->irq == -ENXIO) { pr_err("%s: ERROR: MAC IRQ configuration " "information not found\n", __func__); - ret = -ENXIO; - goto out_unmap; + return -ENXIO; } /* @@ -165,15 +156,6 @@ static int __devinit stmmac_pltfr_probe(struct platform_device *pdev) pr_debug("STMMAC platform driver registration completed"); return 0; - -out_unmap: - iounmap(addr); - platform_set_drvdata(pdev, NULL); - -out_release_region: - release_mem_region(res->start, resource_size(res)); - - return ret; } /** @@ -186,7 +168,6 @@ static int stmmac_pltfr_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct stmmac_priv *priv = netdev_priv(ndev); - struct resource *res; int ret = stmmac_dvr_remove(ndev); if (priv->plat->exit) @@ -194,10 +175,6 @@ static int stmmac_pltfr_remove(struct platform_device *pdev) platform_set_drvdata(pdev, NULL); - iounmap((void __force __iomem *)priv->ioaddr); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - release_mem_region(res->start, resource_size(res)); - return ret; } diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig index 1b173a6145d6..b26cbda5efa9 100644 --- a/drivers/net/ethernet/ti/Kconfig +++ b/drivers/net/ethernet/ti/Kconfig @@ -32,7 +32,7 @@ config TI_DAVINCI_EMAC config TI_DAVINCI_MDIO tristate "TI DaVinci MDIO Support" - depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 ) + depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 || SOC_AM33XX ) select PHYLIB ---help--- This driver supports TI's DaVinci MDIO module. @@ -42,7 +42,7 @@ config TI_DAVINCI_MDIO config TI_DAVINCI_CPDMA tristate "TI DaVinci CPDMA Support" - depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 ) + depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 || SOC_AM33XX ) ---help--- This driver supports TI's DaVinci CPDMA dma engine. diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 1e5d85b06e71..0cbc0e59252c 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -28,6 +28,9 @@ #include <linux/workqueue.h> #include <linux/delay.h> #include <linux/pm_runtime.h> +#include <linux/of.h> +#include <linux/of_net.h> +#include <linux/of_device.h> #include <linux/platform_data/cpsw.h> @@ -709,6 +712,158 @@ static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv) slave->sliver = regs + data->sliver_reg_ofs; } +static int cpsw_probe_dt(struct cpsw_platform_data *data, + struct platform_device *pdev) +{ + struct device_node *node = pdev->dev.of_node; + struct device_node *slave_node; + int i = 0, ret; + u32 prop; + + if (!node) + return -EINVAL; + + if (of_property_read_u32(node, "slaves", &prop)) { + pr_err("Missing slaves property in the DT.\n"); + return -EINVAL; + } + data->slaves = prop; + + data->slave_data = kzalloc(sizeof(struct cpsw_slave_data) * + data->slaves, GFP_KERNEL); + if (!data->slave_data) { + pr_err("Could not allocate slave memory.\n"); + return -EINVAL; + } + + data->no_bd_ram = of_property_read_bool(node, "no_bd_ram"); + + if (of_property_read_u32(node, "cpdma_channels", &prop)) { + pr_err("Missing cpdma_channels property in the DT.\n"); + ret = -EINVAL; + goto error_ret; + } + data->channels = prop; + + if (of_property_read_u32(node, "host_port_no", &prop)) { + pr_err("Missing host_port_no property in the DT.\n"); + ret = -EINVAL; + goto error_ret; + } + data->host_port_num = prop; + + if (of_property_read_u32(node, "cpdma_reg_ofs", &prop)) { + pr_err("Missing cpdma_reg_ofs property in the DT.\n"); + ret = -EINVAL; + goto error_ret; + } + data->cpdma_reg_ofs = prop; + + if (of_property_read_u32(node, "cpdma_sram_ofs", &prop)) { + pr_err("Missing cpdma_sram_ofs property in the DT.\n"); + ret = -EINVAL; + goto error_ret; + } + data->cpdma_sram_ofs = prop; + + if (of_property_read_u32(node, "ale_reg_ofs", &prop)) { + pr_err("Missing ale_reg_ofs property in the DT.\n"); + ret = -EINVAL; + goto error_ret; + } + data->ale_reg_ofs = prop; + + if (of_property_read_u32(node, "ale_entries", &prop)) { + pr_err("Missing ale_entries property in the DT.\n"); + ret = -EINVAL; + goto error_ret; + } + data->ale_entries = prop; + + if (of_property_read_u32(node, "host_port_reg_ofs", &prop)) { + pr_err("Missing host_port_reg_ofs property in the DT.\n"); + ret = -EINVAL; + goto error_ret; + } + data->host_port_reg_ofs = prop; + + if (of_property_read_u32(node, "hw_stats_reg_ofs", &prop)) { + pr_err("Missing hw_stats_reg_ofs property in the DT.\n"); + ret = -EINVAL; + goto error_ret; + } + data->hw_stats_reg_ofs = prop; + + if (of_property_read_u32(node, "bd_ram_ofs", &prop)) { + pr_err("Missing bd_ram_ofs property in the DT.\n"); + ret = -EINVAL; + goto error_ret; + } + data->bd_ram_ofs = prop; + + if (of_property_read_u32(node, "bd_ram_size", &prop)) { + pr_err("Missing bd_ram_size property in the DT.\n"); + ret = -EINVAL; + goto error_ret; + } + data->bd_ram_size = prop; + + if (of_property_read_u32(node, "rx_descs", &prop)) { + pr_err("Missing rx_descs property in the DT.\n"); + ret = -EINVAL; + goto error_ret; + } + data->rx_descs = prop; + + if (of_property_read_u32(node, "mac_control", &prop)) { + pr_err("Missing mac_control property in the DT.\n"); + ret = -EINVAL; + goto error_ret; + } + data->mac_control = prop; + + for_each_child_of_node(node, slave_node) { + struct cpsw_slave_data *slave_data = data->slave_data + i; + const char *phy_id = NULL; + const void *mac_addr = NULL; + + if (of_property_read_string(slave_node, "phy_id", &phy_id)) { + pr_err("Missing slave[%d] phy_id property\n", i); + ret = -EINVAL; + goto error_ret; + } + slave_data->phy_id = phy_id; + + if (of_property_read_u32(slave_node, "slave_reg_ofs", &prop)) { + pr_err("Missing slave[%d] slave_reg_ofs property\n", i); + ret = -EINVAL; + goto error_ret; + } + slave_data->slave_reg_ofs = prop; + + if (of_property_read_u32(slave_node, "sliver_reg_ofs", + &prop)) { + pr_err("Missing slave[%d] sliver_reg_ofs property\n", + i); + ret = -EINVAL; + goto error_ret; + } + slave_data->sliver_reg_ofs = prop; + + mac_addr = of_get_mac_address(slave_node); + if (mac_addr) + memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN); + + i++; + } + + return 0; + +error_ret: + kfree(data->slave_data); + return ret; +} + static int __devinit cpsw_probe(struct platform_device *pdev) { struct cpsw_platform_data *data = pdev->dev.platform_data; @@ -720,11 +875,6 @@ static int __devinit cpsw_probe(struct platform_device *pdev) struct resource *res; int ret = 0, i, k = 0; - if (!data) { - pr_err("platform data missing\n"); - return -ENODEV; - } - ndev = alloc_etherdev(sizeof(struct cpsw_priv)); if (!ndev) { pr_err("error allocating net_device\n"); @@ -734,13 +884,19 @@ static int __devinit cpsw_probe(struct platform_device *pdev) platform_set_drvdata(pdev, ndev); priv = netdev_priv(ndev); spin_lock_init(&priv->lock); - priv->data = *data; priv->pdev = pdev; priv->ndev = ndev; priv->dev = &ndev->dev; priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG); priv->rx_packet_max = max(rx_packet_max, 128); + if (cpsw_probe_dt(&priv->data, pdev)) { + pr_err("cpsw: platform data missing\n"); + ret = -ENODEV; + goto clean_ndev_ret; + } + data = &priv->data; + if (is_valid_ether_addr(data->slave_data[0].mac_addr)) { memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN); pr_info("Detected MACID = %pM", priv->mac_addr); @@ -996,11 +1152,17 @@ static const struct dev_pm_ops cpsw_pm_ops = { .resume = cpsw_resume, }; +static const struct of_device_id cpsw_of_mtable[] = { + { .compatible = "ti,cpsw", }, + { /* sentinel */ }, +}; + static struct platform_driver cpsw_driver = { .driver = { .name = "cpsw", .owner = THIS_MODULE, .pm = &cpsw_pm_ops, + .of_match_table = of_match_ptr(cpsw_of_mtable), }, .probe = cpsw_probe, .remove = __devexit_p(cpsw_remove), diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c index a9ca4a03d31b..51a96dbee9ac 100644 --- a/drivers/net/ethernet/ti/davinci_mdio.c +++ b/drivers/net/ethernet/ti/davinci_mdio.c @@ -36,6 +36,8 @@ #include <linux/io.h> #include <linux/pm_runtime.h> #include <linux/davinci_emac.h> +#include <linux/of.h> +#include <linux/of_device.h> /* * This timeout definition is a worst-case ultra defensive measure against @@ -289,6 +291,25 @@ static int davinci_mdio_write(struct mii_bus *bus, int phy_id, return 0; } +static int davinci_mdio_probe_dt(struct mdio_platform_data *data, + struct platform_device *pdev) +{ + struct device_node *node = pdev->dev.of_node; + u32 prop; + + if (!node) + return -EINVAL; + + if (of_property_read_u32(node, "bus_freq", &prop)) { + pr_err("Missing bus_freq property in the DT.\n"); + return -EINVAL; + } + data->bus_freq = prop; + + return 0; +} + + static int __devinit davinci_mdio_probe(struct platform_device *pdev) { struct mdio_platform_data *pdata = pdev->dev.platform_data; @@ -304,8 +325,6 @@ static int __devinit davinci_mdio_probe(struct platform_device *pdev) return -ENOMEM; } - data->pdata = pdata ? (*pdata) : default_pdata; - data->bus = mdiobus_alloc(); if (!data->bus) { dev_err(dev, "failed to alloc mii bus\n"); @@ -313,14 +332,22 @@ static int __devinit davinci_mdio_probe(struct platform_device *pdev) goto bail_out; } + if (dev->of_node) { + if (davinci_mdio_probe_dt(&data->pdata, pdev)) + data->pdata = default_pdata; + snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s", pdev->name); + } else { + data->pdata = pdata ? (*pdata) : default_pdata; + snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s-%x", + pdev->name, pdev->id); + } + data->bus->name = dev_name(dev); data->bus->read = davinci_mdio_read, data->bus->write = davinci_mdio_write, data->bus->reset = davinci_mdio_reset, data->bus->parent = dev; data->bus->priv = data; - snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s-%x", - pdev->name, pdev->id); pm_runtime_enable(&pdev->dev); pm_runtime_get_sync(&pdev->dev); @@ -456,11 +483,17 @@ static const struct dev_pm_ops davinci_mdio_pm_ops = { .resume = davinci_mdio_resume, }; +static const struct of_device_id davinci_mdio_of_mtable[] = { + { .compatible = "ti,davinci_mdio", }, + { /* sentinel */ }, +}; + static struct platform_driver davinci_mdio_driver = { .driver = { .name = "davinci_mdio", .owner = THIS_MODULE, .pm = &davinci_mdio_pm_ops, + .of_match_table = of_match_ptr(davinci_mdio_of_mtable), }, .probe = davinci_mdio_probe, .remove = __devexit_p(davinci_mdio_remove), diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c index 277c93e9ff4d..8fa947a2d929 100644 --- a/drivers/net/ethernet/tundra/tsi108_eth.c +++ b/drivers/net/ethernet/tundra/tsi108_eth.c @@ -1359,7 +1359,6 @@ static int tsi108_open(struct net_device *dev) } data->rxskbs[i] = skb; - data->rxskbs[i] = skb; data->rxring[i].buf0 = virt_to_phys(data->rxskbs[i]->data); data->rxring[i].misc = TSI108_RX_OWN | TSI108_RX_INT; } diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index a5826a3111a6..2c08bf6e7bf3 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c @@ -637,8 +637,7 @@ static int __devinit w5100_hw_probe(struct platform_device *pdev) if (data && is_valid_ether_addr(data->mac_addr)) { memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN); } else { - eth_random_addr(ndev->dev_addr); - ndev->addr_assign_type |= NET_ADDR_RANDOM; + eth_hw_addr_random(ndev); } mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c index bdd8891c215a..88943d90c765 100644 --- a/drivers/net/ethernet/wiznet/w5300.c +++ b/drivers/net/ethernet/wiznet/w5300.c @@ -557,8 +557,7 @@ static int __devinit w5300_hw_probe(struct platform_device *pdev) if (data && is_valid_ether_addr(data->mac_addr)) { memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN); } else { - eth_random_addr(ndev->dev_addr); - ndev->addr_assign_type |= NET_ADDR_RANDOM; + eth_hw_addr_random(ndev); } mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 8c5a1c43c81d..e91111a656f7 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -400,7 +400,7 @@ static void netvsc_send_garp(struct work_struct *w) ndev_ctx = container_of(w, struct net_device_context, dwork.work); net_device = hv_get_drvdata(ndev_ctx->device_ctx); net = net_device->ndev; - netif_notify_peers(net); + netdev_notify_peers(net); } diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 1e88a1095934..06f8601f32fc 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -46,8 +46,14 @@ struct rndis_request { /* Simplify allocation by having a netvsc packet inline */ struct hv_netvsc_packet pkt; struct hv_page_buffer buf; - /* FIXME: We assumed a fixed size request here. */ + struct rndis_message request_msg; + /* + * The buffer for the extended info after the RNDIS message. It's + * referenced based on the data offset in the RNDIS message. Its size + * is enough for current needs, and should be sufficient for the near + * future. + */ u8 ext[100]; }; diff --git a/drivers/net/ieee802154/Kconfig b/drivers/net/ieee802154/Kconfig new file mode 100644 index 000000000000..08ae4655423a --- /dev/null +++ b/drivers/net/ieee802154/Kconfig @@ -0,0 +1,47 @@ +menuconfig IEEE802154_DRIVERS + tristate "IEEE 802.15.4 drivers" + depends on NETDEVICES && IEEE802154 + default y + ---help--- + Say Y here to get to see options for IEEE 802.15.4 Low-Rate + Wireless Personal Area Network device drivers. This option alone + does not add any kernel code. + + If you say N, all options in this submenu will be skipped and + disabled. + +config IEEE802154_FAKEHARD + tristate "Fake LR-WPAN driver with several interconnected devices" + depends on IEEE802154_DRIVERS + ---help--- + Say Y here to enable the fake driver that serves as an example + of HardMAC device driver. + + This driver can also be built as a module. To do so say M here. + The module will be called 'fakehard'. + +config IEEE802154_FAKELB + depends on IEEE802154_DRIVERS && MAC802154 + tristate "IEEE 802.15.4 loopback driver" + ---help--- + Say Y here to enable the fake driver that can emulate a net + of several interconnected radio devices. + + This driver can also be built as a module. To do so say M here. + The module will be called 'fakelb'. + +config IEEE802154_AT86RF230 + depends on IEEE802154_DRIVERS && MAC802154 + tristate "AT86RF230/231 transceiver driver" + depends on SPI + +config IEEE802154_MRF24J40 + tristate "Microchip MRF24J40 transceiver driver" + depends on IEEE802154_DRIVERS && MAC802154 + depends on SPI + ---help--- + Say Y here to enable the MRF24J20 SPI 802.15.4 wireless + controller. + + This driver can also be built as a module. To do so, say M here. + the module will be called 'mrf24j40'. diff --git a/drivers/net/ieee802154/Makefile b/drivers/net/ieee802154/Makefile new file mode 100644 index 000000000000..abb0c08decb0 --- /dev/null +++ b/drivers/net/ieee802154/Makefile @@ -0,0 +1,4 @@ +obj-$(CONFIG_IEEE802154_FAKEHARD) += fakehard.o +obj-$(CONFIG_IEEE802154_FAKELB) += fakelb.o +obj-$(CONFIG_IEEE802154_AT86RF230) += at86rf230.o +obj-$(CONFIG_IEEE802154_MRF24J40) += mrf24j40.o diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c new file mode 100644 index 000000000000..ba753d87a32f --- /dev/null +++ b/drivers/net/ieee802154/at86rf230.c @@ -0,0 +1,958 @@ +/* + * AT86RF230/RF231 driver + * + * Copyright (C) 2009-2012 Siemens AG + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Written by: + * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> + * Alexander Smirnov <alex.bluesman.smirnov@gmail.com> + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/gpio.h> +#include <linux/delay.h> +#include <linux/mutex.h> +#include <linux/workqueue.h> +#include <linux/spinlock.h> +#include <linux/spi/spi.h> +#include <linux/spi/at86rf230.h> +#include <linux/skbuff.h> + +#include <net/mac802154.h> +#include <net/wpan-phy.h> + +struct at86rf230_local { + struct spi_device *spi; + int rstn, slp_tr, dig2; + + u8 part; + u8 vers; + + u8 buf[2]; + struct mutex bmux; + + struct work_struct irqwork; + struct completion tx_complete; + + struct ieee802154_dev *dev; + + spinlock_t lock; + bool irq_disabled; + bool is_tx; +}; + +#define RG_TRX_STATUS (0x01) +#define SR_TRX_STATUS 0x01, 0x1f, 0 +#define SR_RESERVED_01_3 0x01, 0x20, 5 +#define SR_CCA_STATUS 0x01, 0x40, 6 +#define SR_CCA_DONE 0x01, 0x80, 7 +#define RG_TRX_STATE (0x02) +#define SR_TRX_CMD 0x02, 0x1f, 0 +#define SR_TRAC_STATUS 0x02, 0xe0, 5 +#define RG_TRX_CTRL_0 (0x03) +#define SR_CLKM_CTRL 0x03, 0x07, 0 +#define SR_CLKM_SHA_SEL 0x03, 0x08, 3 +#define SR_PAD_IO_CLKM 0x03, 0x30, 4 +#define SR_PAD_IO 0x03, 0xc0, 6 +#define RG_TRX_CTRL_1 (0x04) +#define SR_IRQ_POLARITY 0x04, 0x01, 0 +#define SR_IRQ_MASK_MODE 0x04, 0x02, 1 +#define SR_SPI_CMD_MODE 0x04, 0x0c, 2 +#define SR_RX_BL_CTRL 0x04, 0x10, 4 +#define SR_TX_AUTO_CRC_ON 0x04, 0x20, 5 +#define SR_IRQ_2_EXT_EN 0x04, 0x40, 6 +#define SR_PA_EXT_EN 0x04, 0x80, 7 +#define RG_PHY_TX_PWR (0x05) +#define SR_TX_PWR 0x05, 0x0f, 0 +#define SR_PA_LT 0x05, 0x30, 4 +#define SR_PA_BUF_LT 0x05, 0xc0, 6 +#define RG_PHY_RSSI (0x06) +#define SR_RSSI 0x06, 0x1f, 0 +#define SR_RND_VALUE 0x06, 0x60, 5 +#define SR_RX_CRC_VALID 0x06, 0x80, 7 +#define RG_PHY_ED_LEVEL (0x07) +#define SR_ED_LEVEL 0x07, 0xff, 0 +#define RG_PHY_CC_CCA (0x08) +#define SR_CHANNEL 0x08, 0x1f, 0 +#define SR_CCA_MODE 0x08, 0x60, 5 +#define SR_CCA_REQUEST 0x08, 0x80, 7 +#define RG_CCA_THRES (0x09) +#define SR_CCA_ED_THRES 0x09, 0x0f, 0 +#define SR_RESERVED_09_1 0x09, 0xf0, 4 +#define RG_RX_CTRL (0x0a) +#define SR_PDT_THRES 0x0a, 0x0f, 0 +#define SR_RESERVED_0a_1 0x0a, 0xf0, 4 +#define RG_SFD_VALUE (0x0b) +#define SR_SFD_VALUE 0x0b, 0xff, 0 +#define RG_TRX_CTRL_2 (0x0c) +#define SR_OQPSK_DATA_RATE 0x0c, 0x03, 0 +#define SR_RESERVED_0c_2 0x0c, 0x7c, 2 +#define SR_RX_SAFE_MODE 0x0c, 0x80, 7 +#define RG_ANT_DIV (0x0d) +#define SR_ANT_CTRL 0x0d, 0x03, 0 +#define SR_ANT_EXT_SW_EN 0x0d, 0x04, 2 +#define SR_ANT_DIV_EN 0x0d, 0x08, 3 +#define SR_RESERVED_0d_2 0x0d, 0x70, 4 +#define SR_ANT_SEL 0x0d, 0x80, 7 +#define RG_IRQ_MASK (0x0e) +#define SR_IRQ_MASK 0x0e, 0xff, 0 +#define RG_IRQ_STATUS (0x0f) +#define SR_IRQ_0_PLL_LOCK 0x0f, 0x01, 0 +#define SR_IRQ_1_PLL_UNLOCK 0x0f, 0x02, 1 +#define SR_IRQ_2_RX_START 0x0f, 0x04, 2 +#define SR_IRQ_3_TRX_END 0x0f, 0x08, 3 +#define SR_IRQ_4_CCA_ED_DONE 0x0f, 0x10, 4 +#define SR_IRQ_5_AMI 0x0f, 0x20, 5 +#define SR_IRQ_6_TRX_UR 0x0f, 0x40, 6 +#define SR_IRQ_7_BAT_LOW 0x0f, 0x80, 7 +#define RG_VREG_CTRL (0x10) +#define SR_RESERVED_10_6 0x10, 0x03, 0 +#define SR_DVDD_OK 0x10, 0x04, 2 +#define SR_DVREG_EXT 0x10, 0x08, 3 +#define SR_RESERVED_10_3 0x10, 0x30, 4 +#define SR_AVDD_OK 0x10, 0x40, 6 +#define SR_AVREG_EXT 0x10, 0x80, 7 +#define RG_BATMON (0x11) +#define SR_BATMON_VTH 0x11, 0x0f, 0 +#define SR_BATMON_HR 0x11, 0x10, 4 +#define SR_BATMON_OK 0x11, 0x20, 5 +#define SR_RESERVED_11_1 0x11, 0xc0, 6 +#define RG_XOSC_CTRL (0x12) +#define SR_XTAL_TRIM 0x12, 0x0f, 0 +#define SR_XTAL_MODE 0x12, 0xf0, 4 +#define RG_RX_SYN (0x15) +#define SR_RX_PDT_LEVEL 0x15, 0x0f, 0 +#define SR_RESERVED_15_2 0x15, 0x70, 4 +#define SR_RX_PDT_DIS 0x15, 0x80, 7 +#define RG_XAH_CTRL_1 (0x17) +#define SR_RESERVED_17_8 0x17, 0x01, 0 +#define SR_AACK_PROM_MODE 0x17, 0x02, 1 +#define SR_AACK_ACK_TIME 0x17, 0x04, 2 +#define SR_RESERVED_17_5 0x17, 0x08, 3 +#define SR_AACK_UPLD_RES_FT 0x17, 0x10, 4 +#define SR_AACK_FLTR_RES_FT 0x17, 0x20, 5 +#define SR_RESERVED_17_2 0x17, 0x40, 6 +#define SR_RESERVED_17_1 0x17, 0x80, 7 +#define RG_FTN_CTRL (0x18) +#define SR_RESERVED_18_2 0x18, 0x7f, 0 +#define SR_FTN_START 0x18, 0x80, 7 +#define RG_PLL_CF (0x1a) +#define SR_RESERVED_1a_2 0x1a, 0x7f, 0 +#define SR_PLL_CF_START 0x1a, 0x80, 7 +#define RG_PLL_DCU (0x1b) +#define SR_RESERVED_1b_3 0x1b, 0x3f, 0 +#define SR_RESERVED_1b_2 0x1b, 0x40, 6 +#define SR_PLL_DCU_START 0x1b, 0x80, 7 +#define RG_PART_NUM (0x1c) +#define SR_PART_NUM 0x1c, 0xff, 0 +#define RG_VERSION_NUM (0x1d) +#define SR_VERSION_NUM 0x1d, 0xff, 0 +#define RG_MAN_ID_0 (0x1e) +#define SR_MAN_ID_0 0x1e, 0xff, 0 +#define RG_MAN_ID_1 (0x1f) +#define SR_MAN_ID_1 0x1f, 0xff, 0 +#define RG_SHORT_ADDR_0 (0x20) +#define SR_SHORT_ADDR_0 0x20, 0xff, 0 +#define RG_SHORT_ADDR_1 (0x21) +#define SR_SHORT_ADDR_1 0x21, 0xff, 0 +#define RG_PAN_ID_0 (0x22) +#define SR_PAN_ID_0 0x22, 0xff, 0 +#define RG_PAN_ID_1 (0x23) +#define SR_PAN_ID_1 0x23, 0xff, 0 +#define RG_IEEE_ADDR_0 (0x24) +#define SR_IEEE_ADDR_0 0x24, 0xff, 0 +#define RG_IEEE_ADDR_1 (0x25) +#define SR_IEEE_ADDR_1 0x25, 0xff, 0 +#define RG_IEEE_ADDR_2 (0x26) +#define SR_IEEE_ADDR_2 0x26, 0xff, 0 +#define RG_IEEE_ADDR_3 (0x27) +#define SR_IEEE_ADDR_3 0x27, 0xff, 0 +#define RG_IEEE_ADDR_4 (0x28) +#define SR_IEEE_ADDR_4 0x28, 0xff, 0 +#define RG_IEEE_ADDR_5 (0x29) +#define SR_IEEE_ADDR_5 0x29, 0xff, 0 +#define RG_IEEE_ADDR_6 (0x2a) +#define SR_IEEE_ADDR_6 0x2a, 0xff, 0 +#define RG_IEEE_ADDR_7 (0x2b) +#define SR_IEEE_ADDR_7 0x2b, 0xff, 0 +#define RG_XAH_CTRL_0 (0x2c) +#define SR_SLOTTED_OPERATION 0x2c, 0x01, 0 +#define SR_MAX_CSMA_RETRIES 0x2c, 0x0e, 1 +#define SR_MAX_FRAME_RETRIES 0x2c, 0xf0, 4 +#define RG_CSMA_SEED_0 (0x2d) +#define SR_CSMA_SEED_0 0x2d, 0xff, 0 +#define RG_CSMA_SEED_1 (0x2e) +#define SR_CSMA_SEED_1 0x2e, 0x07, 0 +#define SR_AACK_I_AM_COORD 0x2e, 0x08, 3 +#define SR_AACK_DIS_ACK 0x2e, 0x10, 4 +#define SR_AACK_SET_PD 0x2e, 0x20, 5 +#define SR_AACK_FVN_MODE 0x2e, 0xc0, 6 +#define RG_CSMA_BE (0x2f) +#define SR_MIN_BE 0x2f, 0x0f, 0 +#define SR_MAX_BE 0x2f, 0xf0, 4 + +#define CMD_REG 0x80 +#define CMD_REG_MASK 0x3f +#define CMD_WRITE 0x40 +#define CMD_FB 0x20 + +#define IRQ_BAT_LOW (1 << 7) +#define IRQ_TRX_UR (1 << 6) +#define IRQ_AMI (1 << 5) +#define IRQ_CCA_ED (1 << 4) +#define IRQ_TRX_END (1 << 3) +#define IRQ_RX_START (1 << 2) +#define IRQ_PLL_UNL (1 << 1) +#define IRQ_PLL_LOCK (1 << 0) + +#define STATE_P_ON 0x00 /* BUSY */ +#define STATE_BUSY_RX 0x01 +#define STATE_BUSY_TX 0x02 +#define STATE_FORCE_TRX_OFF 0x03 +#define STATE_FORCE_TX_ON 0x04 /* IDLE */ +/* 0x05 */ /* INVALID_PARAMETER */ +#define STATE_RX_ON 0x06 +/* 0x07 */ /* SUCCESS */ +#define STATE_TRX_OFF 0x08 +#define STATE_TX_ON 0x09 +/* 0x0a - 0x0e */ /* 0x0a - UNSUPPORTED_ATTRIBUTE */ +#define STATE_SLEEP 0x0F +#define STATE_BUSY_RX_AACK 0x11 +#define STATE_BUSY_TX_ARET 0x12 +#define STATE_BUSY_RX_AACK_ON 0x16 +#define STATE_BUSY_TX_ARET_ON 0x19 +#define STATE_RX_ON_NOCLK 0x1C +#define STATE_RX_AACK_ON_NOCLK 0x1D +#define STATE_BUSY_RX_AACK_NOCLK 0x1E +#define STATE_TRANSITION_IN_PROGRESS 0x1F + +static int +__at86rf230_write(struct at86rf230_local *lp, u8 addr, u8 data) +{ + u8 *buf = lp->buf; + int status; + struct spi_message msg; + struct spi_transfer xfer = { + .len = 2, + .tx_buf = buf, + }; + + buf[0] = (addr & CMD_REG_MASK) | CMD_REG | CMD_WRITE; + buf[1] = data; + dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]); + dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]); + spi_message_init(&msg); + spi_message_add_tail(&xfer, &msg); + + status = spi_sync(lp->spi, &msg); + dev_vdbg(&lp->spi->dev, "status = %d\n", status); + if (msg.status) + status = msg.status; + + dev_vdbg(&lp->spi->dev, "status = %d\n", status); + dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]); + dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]); + + return status; +} + +static int +__at86rf230_read_subreg(struct at86rf230_local *lp, + u8 addr, u8 mask, int shift, u8 *data) +{ + u8 *buf = lp->buf; + int status; + struct spi_message msg; + struct spi_transfer xfer = { + .len = 2, + .tx_buf = buf, + .rx_buf = buf, + }; + + buf[0] = (addr & CMD_REG_MASK) | CMD_REG; + buf[1] = 0xff; + dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]); + spi_message_init(&msg); + spi_message_add_tail(&xfer, &msg); + + status = spi_sync(lp->spi, &msg); + dev_vdbg(&lp->spi->dev, "status = %d\n", status); + if (msg.status) + status = msg.status; + + dev_vdbg(&lp->spi->dev, "status = %d\n", status); + dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]); + dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]); + + if (status == 0) + *data = buf[1]; + + return status; +} + +static int +at86rf230_read_subreg(struct at86rf230_local *lp, + u8 addr, u8 mask, int shift, u8 *data) +{ + int status; + + mutex_lock(&lp->bmux); + status = __at86rf230_read_subreg(lp, addr, mask, shift, data); + mutex_unlock(&lp->bmux); + + return status; +} + +static int +at86rf230_write_subreg(struct at86rf230_local *lp, + u8 addr, u8 mask, int shift, u8 data) +{ + int status; + u8 val; + + mutex_lock(&lp->bmux); + status = __at86rf230_read_subreg(lp, addr, 0xff, 0, &val); + if (status) + goto out; + + val &= ~mask; + val |= (data << shift) & mask; + + status = __at86rf230_write(lp, addr, val); +out: + mutex_unlock(&lp->bmux); + + return status; +} + +static int +at86rf230_write_fbuf(struct at86rf230_local *lp, u8 *data, u8 len) +{ + u8 *buf = lp->buf; + int status; + struct spi_message msg; + struct spi_transfer xfer_head = { + .len = 2, + .tx_buf = buf, + + }; + struct spi_transfer xfer_buf = { + .len = len, + .tx_buf = data, + }; + + mutex_lock(&lp->bmux); + buf[0] = CMD_WRITE | CMD_FB; + buf[1] = len + 2; /* 2 bytes for CRC that isn't written */ + + dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]); + dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]); + + spi_message_init(&msg); + spi_message_add_tail(&xfer_head, &msg); + spi_message_add_tail(&xfer_buf, &msg); + + status = spi_sync(lp->spi, &msg); + dev_vdbg(&lp->spi->dev, "status = %d\n", status); + if (msg.status) + status = msg.status; + + dev_vdbg(&lp->spi->dev, "status = %d\n", status); + dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]); + dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]); + + mutex_unlock(&lp->bmux); + return status; +} + +static int +at86rf230_read_fbuf(struct at86rf230_local *lp, u8 *data, u8 *len, u8 *lqi) +{ + u8 *buf = lp->buf; + int status; + struct spi_message msg; + struct spi_transfer xfer_head = { + .len = 2, + .tx_buf = buf, + .rx_buf = buf, + }; + struct spi_transfer xfer_head1 = { + .len = 2, + .tx_buf = buf, + .rx_buf = buf, + }; + struct spi_transfer xfer_buf = { + .len = 0, + .rx_buf = data, + }; + + mutex_lock(&lp->bmux); + + buf[0] = CMD_FB; + buf[1] = 0x00; + + spi_message_init(&msg); + spi_message_add_tail(&xfer_head, &msg); + + status = spi_sync(lp->spi, &msg); + dev_vdbg(&lp->spi->dev, "status = %d\n", status); + + xfer_buf.len = *(buf + 1) + 1; + *len = buf[1]; + + buf[0] = CMD_FB; + buf[1] = 0x00; + + spi_message_init(&msg); + spi_message_add_tail(&xfer_head1, &msg); + spi_message_add_tail(&xfer_buf, &msg); + + status = spi_sync(lp->spi, &msg); + + if (msg.status) + status = msg.status; + + dev_vdbg(&lp->spi->dev, "status = %d\n", status); + dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]); + dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]); + + if (status) { + if (lqi && (*len > lp->buf[1])) + *lqi = data[lp->buf[1]]; + } + mutex_unlock(&lp->bmux); + + return status; +} + +static int +at86rf230_ed(struct ieee802154_dev *dev, u8 *level) +{ + might_sleep(); + BUG_ON(!level); + *level = 0xbe; + return 0; +} + +static int +at86rf230_state(struct ieee802154_dev *dev, int state) +{ + struct at86rf230_local *lp = dev->priv; + int rc; + u8 val; + u8 desired_status; + + might_sleep(); + + if (state == STATE_FORCE_TX_ON) + desired_status = STATE_TX_ON; + else if (state == STATE_FORCE_TRX_OFF) + desired_status = STATE_TRX_OFF; + else + desired_status = state; + + do { + rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &val); + if (rc) + goto err; + } while (val == STATE_TRANSITION_IN_PROGRESS); + + if (val == desired_status) + return 0; + + /* state is equal to phy states */ + rc = at86rf230_write_subreg(lp, SR_TRX_CMD, state); + if (rc) + goto err; + + do { + rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &val); + if (rc) + goto err; + } while (val == STATE_TRANSITION_IN_PROGRESS); + + + if (val == desired_status) + return 0; + + pr_err("unexpected state change: %d, asked for %d\n", val, state); + return -EBUSY; + +err: + pr_err("error: %d\n", rc); + return rc; +} + +static int +at86rf230_start(struct ieee802154_dev *dev) +{ + struct at86rf230_local *lp = dev->priv; + u8 rc; + + rc = at86rf230_write_subreg(lp, SR_RX_SAFE_MODE, 1); + if (rc) + return rc; + + return at86rf230_state(dev, STATE_RX_ON); +} + +static void +at86rf230_stop(struct ieee802154_dev *dev) +{ + at86rf230_state(dev, STATE_FORCE_TRX_OFF); +} + +static int +at86rf230_channel(struct ieee802154_dev *dev, int page, int channel) +{ + struct at86rf230_local *lp = dev->priv; + int rc; + + might_sleep(); + + if (page != 0 || channel < 11 || channel > 26) { + WARN_ON(1); + return -EINVAL; + } + + rc = at86rf230_write_subreg(lp, SR_CHANNEL, channel); + msleep(1); /* Wait for PLL */ + dev->phy->current_channel = channel; + + return 0; +} + +static int +at86rf230_xmit(struct ieee802154_dev *dev, struct sk_buff *skb) +{ + struct at86rf230_local *lp = dev->priv; + int rc; + unsigned long flags; + + spin_lock(&lp->lock); + if (lp->irq_disabled) { + spin_unlock(&lp->lock); + return -EBUSY; + } + spin_unlock(&lp->lock); + + might_sleep(); + + rc = at86rf230_state(dev, STATE_FORCE_TX_ON); + if (rc) + goto err; + + spin_lock_irqsave(&lp->lock, flags); + lp->is_tx = 1; + INIT_COMPLETION(lp->tx_complete); + spin_unlock_irqrestore(&lp->lock, flags); + + rc = at86rf230_write_fbuf(lp, skb->data, skb->len); + if (rc) + goto err_rx; + + rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_BUSY_TX); + if (rc) + goto err_rx; + + rc = wait_for_completion_interruptible(&lp->tx_complete); + if (rc < 0) + goto err_rx; + + rc = at86rf230_start(dev); + + return rc; + +err_rx: + at86rf230_start(dev); +err: + pr_err("error: %d\n", rc); + + spin_lock_irqsave(&lp->lock, flags); + lp->is_tx = 0; + spin_unlock_irqrestore(&lp->lock, flags); + + return rc; +} + +static int at86rf230_rx(struct at86rf230_local *lp) +{ + u8 len = 128, lqi = 0; + struct sk_buff *skb; + + skb = alloc_skb(len, GFP_KERNEL); + + if (!skb) + return -ENOMEM; + + if (at86rf230_read_fbuf(lp, skb_put(skb, len), &len, &lqi)) + goto err; + + if (len < 2) + goto err; + + skb_trim(skb, len - 2); /* We do not put CRC into the frame */ + + ieee802154_rx_irqsafe(lp->dev, skb, lqi); + + dev_dbg(&lp->spi->dev, "READ_FBUF: %d %x\n", len, lqi); + + return 0; +err: + pr_debug("received frame is too small\n"); + + kfree_skb(skb); + return -EINVAL; +} + +static struct ieee802154_ops at86rf230_ops = { + .owner = THIS_MODULE, + .xmit = at86rf230_xmit, + .ed = at86rf230_ed, + .set_channel = at86rf230_channel, + .start = at86rf230_start, + .stop = at86rf230_stop, +}; + +static void at86rf230_irqwork(struct work_struct *work) +{ + struct at86rf230_local *lp = + container_of(work, struct at86rf230_local, irqwork); + u8 status = 0, val; + int rc; + unsigned long flags; + + rc = at86rf230_read_subreg(lp, RG_IRQ_STATUS, 0xff, 0, &val); + status |= val; + + status &= ~IRQ_PLL_LOCK; /* ignore */ + status &= ~IRQ_RX_START; /* ignore */ + status &= ~IRQ_AMI; /* ignore */ + status &= ~IRQ_TRX_UR; /* FIXME: possibly handle ???*/ + + if (status & IRQ_TRX_END) { + spin_lock_irqsave(&lp->lock, flags); + status &= ~IRQ_TRX_END; + if (lp->is_tx) { + lp->is_tx = 0; + spin_unlock_irqrestore(&lp->lock, flags); + complete(&lp->tx_complete); + } else { + spin_unlock_irqrestore(&lp->lock, flags); + at86rf230_rx(lp); + } + } + + spin_lock_irqsave(&lp->lock, flags); + lp->irq_disabled = 0; + spin_unlock_irqrestore(&lp->lock, flags); + + enable_irq(lp->spi->irq); +} + +static irqreturn_t at86rf230_isr(int irq, void *data) +{ + struct at86rf230_local *lp = data; + + disable_irq_nosync(irq); + + spin_lock(&lp->lock); + lp->irq_disabled = 1; + spin_unlock(&lp->lock); + + schedule_work(&lp->irqwork); + + return IRQ_HANDLED; +} + + +static int at86rf230_hw_init(struct at86rf230_local *lp) +{ + u8 status; + int rc; + + rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status); + if (rc) + return rc; + + dev_info(&lp->spi->dev, "Status: %02x\n", status); + if (status == STATE_P_ON) { + rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_TRX_OFF); + if (rc) + return rc; + msleep(1); + rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status); + if (rc) + return rc; + dev_info(&lp->spi->dev, "Status: %02x\n", status); + } + + rc = at86rf230_write_subreg(lp, SR_IRQ_MASK, 0xff); /* IRQ_TRX_UR | + * IRQ_CCA_ED | + * IRQ_TRX_END | + * IRQ_PLL_UNL | + * IRQ_PLL_LOCK + */ + if (rc) + return rc; + + /* CLKM changes are applied immediately */ + rc = at86rf230_write_subreg(lp, SR_CLKM_SHA_SEL, 0x00); + if (rc) + return rc; + + /* Turn CLKM Off */ + rc = at86rf230_write_subreg(lp, SR_CLKM_CTRL, 0x00); + if (rc) + return rc; + /* Wait the next SLEEP cycle */ + msleep(100); + + rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_TX_ON); + if (rc) + return rc; + msleep(1); + + rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status); + if (rc) + return rc; + dev_info(&lp->spi->dev, "Status: %02x\n", status); + + rc = at86rf230_read_subreg(lp, SR_DVDD_OK, &status); + if (rc) + return rc; + if (!status) { + dev_err(&lp->spi->dev, "DVDD error\n"); + return -EINVAL; + } + + rc = at86rf230_read_subreg(lp, SR_AVDD_OK, &status); + if (rc) + return rc; + if (!status) { + dev_err(&lp->spi->dev, "AVDD error\n"); + return -EINVAL; + } + + return 0; +} + +static int at86rf230_suspend(struct spi_device *spi, pm_message_t message) +{ + return 0; +} + +static int at86rf230_resume(struct spi_device *spi) +{ + return 0; +} + +static int at86rf230_fill_data(struct spi_device *spi) +{ + struct at86rf230_local *lp = spi_get_drvdata(spi); + struct at86rf230_platform_data *pdata = spi->dev.platform_data; + + if (!pdata) { + dev_err(&spi->dev, "no platform_data\n"); + return -EINVAL; + } + + lp->rstn = pdata->rstn; + lp->slp_tr = pdata->slp_tr; + lp->dig2 = pdata->dig2; + + return 0; +} + +static int __devinit at86rf230_probe(struct spi_device *spi) +{ + struct ieee802154_dev *dev; + struct at86rf230_local *lp; + u8 man_id_0, man_id_1; + int rc; + const char *chip; + int supported = 0; + + if (!spi->irq) { + dev_err(&spi->dev, "no IRQ specified\n"); + return -EINVAL; + } + + dev = ieee802154_alloc_device(sizeof(*lp), &at86rf230_ops); + if (!dev) + return -ENOMEM; + + lp = dev->priv; + lp->dev = dev; + + lp->spi = spi; + + dev->priv = lp; + dev->parent = &spi->dev; + dev->extra_tx_headroom = 0; + /* We do support only 2.4 Ghz */ + dev->phy->channels_supported[0] = 0x7FFF800; + dev->flags = IEEE802154_HW_OMIT_CKSUM; + + mutex_init(&lp->bmux); + INIT_WORK(&lp->irqwork, at86rf230_irqwork); + spin_lock_init(&lp->lock); + init_completion(&lp->tx_complete); + + spi_set_drvdata(spi, lp); + + rc = at86rf230_fill_data(spi); + if (rc) + goto err_fill; + + rc = gpio_request(lp->rstn, "rstn"); + if (rc) + goto err_rstn; + + if (gpio_is_valid(lp->slp_tr)) { + rc = gpio_request(lp->slp_tr, "slp_tr"); + if (rc) + goto err_slp_tr; + } + + rc = gpio_direction_output(lp->rstn, 1); + if (rc) + goto err_gpio_dir; + + if (gpio_is_valid(lp->slp_tr)) { + rc = gpio_direction_output(lp->slp_tr, 0); + if (rc) + goto err_gpio_dir; + } + + /* Reset */ + msleep(1); + gpio_set_value(lp->rstn, 0); + msleep(1); + gpio_set_value(lp->rstn, 1); + msleep(1); + + rc = at86rf230_read_subreg(lp, SR_MAN_ID_0, &man_id_0); + if (rc) + goto err_gpio_dir; + rc = at86rf230_read_subreg(lp, SR_MAN_ID_1, &man_id_1); + if (rc) + goto err_gpio_dir; + + if (man_id_1 != 0x00 || man_id_0 != 0x1f) { + dev_err(&spi->dev, "Non-Atmel dev found (MAN_ID %02x %02x)\n", + man_id_1, man_id_0); + rc = -EINVAL; + goto err_gpio_dir; + } + + rc = at86rf230_read_subreg(lp, SR_PART_NUM, &lp->part); + if (rc) + goto err_gpio_dir; + + rc = at86rf230_read_subreg(lp, SR_VERSION_NUM, &lp->vers); + if (rc) + goto err_gpio_dir; + + switch (lp->part) { + case 2: + chip = "at86rf230"; + /* supported = 1; FIXME: should be easy to support; */ + break; + case 3: + chip = "at86rf231"; + supported = 1; + break; + default: + chip = "UNKNOWN"; + break; + } + + dev_info(&spi->dev, "Detected %s chip version %d\n", chip, lp->vers); + if (!supported) { + rc = -ENOTSUPP; + goto err_gpio_dir; + } + + rc = at86rf230_hw_init(lp); + if (rc) + goto err_gpio_dir; + + rc = request_irq(spi->irq, at86rf230_isr, IRQF_SHARED, + dev_name(&spi->dev), lp); + if (rc) + goto err_gpio_dir; + + rc = ieee802154_register_device(lp->dev); + if (rc) + goto err_irq; + + return rc; + + ieee802154_unregister_device(lp->dev); +err_irq: + free_irq(spi->irq, lp); + flush_work(&lp->irqwork); +err_gpio_dir: + if (gpio_is_valid(lp->slp_tr)) + gpio_free(lp->slp_tr); +err_slp_tr: + gpio_free(lp->rstn); +err_rstn: +err_fill: + spi_set_drvdata(spi, NULL); + mutex_destroy(&lp->bmux); + ieee802154_free_device(lp->dev); + return rc; +} + +static int __devexit at86rf230_remove(struct spi_device *spi) +{ + struct at86rf230_local *lp = spi_get_drvdata(spi); + + ieee802154_unregister_device(lp->dev); + + free_irq(spi->irq, lp); + flush_work(&lp->irqwork); + + if (gpio_is_valid(lp->slp_tr)) + gpio_free(lp->slp_tr); + gpio_free(lp->rstn); + + spi_set_drvdata(spi, NULL); + mutex_destroy(&lp->bmux); + ieee802154_free_device(lp->dev); + + dev_dbg(&spi->dev, "unregistered at86rf230\n"); + return 0; +} + +static struct spi_driver at86rf230_driver = { + .driver = { + .name = "at86rf230", + .owner = THIS_MODULE, + }, + .probe = at86rf230_probe, + .remove = __devexit_p(at86rf230_remove), + .suspend = at86rf230_suspend, + .resume = at86rf230_resume, +}; + +module_spi_driver(at86rf230_driver); + +MODULE_DESCRIPTION("AT86RF230 Transceiver Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c new file mode 100644 index 000000000000..7d39add7d467 --- /dev/null +++ b/drivers/net/ieee802154/fakehard.c @@ -0,0 +1,448 @@ +/* + * Sample driver for HardMAC IEEE 802.15.4 devices + * + * Copyright (C) 2009 Siemens AG + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Written by: + * Dmitry Eremin-Solenikov <dmitry.baryshkov@siemens.com> + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/netdevice.h> +#include <linux/skbuff.h> +#include <linux/if_arp.h> + +#include <net/af_ieee802154.h> +#include <net/ieee802154_netdev.h> +#include <net/ieee802154.h> +#include <net/nl802154.h> +#include <net/wpan-phy.h> + +struct fakehard_priv { + struct wpan_phy *phy; +}; + +static struct wpan_phy *fake_to_phy(const struct net_device *dev) +{ + struct fakehard_priv *priv = netdev_priv(dev); + return priv->phy; +} + +/** + * fake_get_phy - Return a phy corresponding to this device. + * @dev: The network device for which to return the wan-phy object + * + * This function returns a wpan-phy object corresponding to the passed + * network device. Reference counter for wpan-phy object is incremented, + * so when the wpan-phy isn't necessary, you should drop the reference + * via @wpan_phy_put() call. + */ +static struct wpan_phy *fake_get_phy(const struct net_device *dev) +{ + struct wpan_phy *phy = fake_to_phy(dev); + return to_phy(get_device(&phy->dev)); +} + +/** + * fake_get_pan_id - Retrieve the PAN ID of the device. + * @dev: The network device to retrieve the PAN of. + * + * Return the ID of the PAN from the PIB. + */ +static u16 fake_get_pan_id(const struct net_device *dev) +{ + BUG_ON(dev->type != ARPHRD_IEEE802154); + + return 0xeba1; +} + +/** + * fake_get_short_addr - Retrieve the short address of the device. + * @dev: The network device to retrieve the short address of. + * + * Returns the IEEE 802.15.4 short-form address cached for this + * device. If the device has not yet had a short address assigned + * then this should return 0xFFFF to indicate a lack of association. + */ +static u16 fake_get_short_addr(const struct net_device *dev) +{ + BUG_ON(dev->type != ARPHRD_IEEE802154); + + return 0x1; +} + +/** + * fake_get_dsn - Retrieve the DSN of the device. + * @dev: The network device to retrieve the DSN for. + * + * Returns the IEEE 802.15.4 DSN for the network device. + * The DSN is the sequence number which will be added to each + * packet or MAC command frame by the MAC during transmission. + * + * DSN means 'Data Sequence Number'. + * + * Note: This is in section 7.2.1.2 of the IEEE 802.15.4-2006 + * document. + */ +static u8 fake_get_dsn(const struct net_device *dev) +{ + BUG_ON(dev->type != ARPHRD_IEEE802154); + + return 0x00; /* DSN are implemented in HW, so return just 0 */ +} + +/** + * fake_get_bsn - Retrieve the BSN of the device. + * @dev: The network device to retrieve the BSN for. + * + * Returns the IEEE 802.15.4 BSN for the network device. + * The BSN is the sequence number which will be added to each + * beacon frame sent by the MAC. + * + * BSN means 'Beacon Sequence Number'. + * + * Note: This is in section 7.2.1.2 of the IEEE 802.15.4-2006 + * document. + */ +static u8 fake_get_bsn(const struct net_device *dev) +{ + BUG_ON(dev->type != ARPHRD_IEEE802154); + + return 0x00; /* BSN are implemented in HW, so return just 0 */ +} + +/** + * fake_assoc_req - Make an association request to the HW. + * @dev: The network device which we are associating to a network. + * @addr: The coordinator with which we wish to associate. + * @channel: The channel on which to associate. + * @cap: The capability information field to use in the association. + * + * Start an association with a coordinator. The coordinator's address + * and PAN ID can be found in @addr. + * + * Note: This is in section 7.3.1 and 7.5.3.1 of the IEEE + * 802.15.4-2006 document. + */ +static int fake_assoc_req(struct net_device *dev, + struct ieee802154_addr *addr, u8 channel, u8 page, u8 cap) +{ + struct wpan_phy *phy = fake_to_phy(dev); + + mutex_lock(&phy->pib_lock); + phy->current_channel = channel; + phy->current_page = page; + mutex_unlock(&phy->pib_lock); + + /* We simply emulate it here */ + return ieee802154_nl_assoc_confirm(dev, fake_get_short_addr(dev), + IEEE802154_SUCCESS); +} + +/** + * fake_assoc_resp - Send an association response to a device. + * @dev: The network device on which to send the response. + * @addr: The address of the device to respond to. + * @short_addr: The assigned short address for the device (if any). + * @status: The result of the association request. + * + * Queue the association response of the coordinator to another + * device's attempt to associate with the network which we + * coordinate. This is then added to the indirect-send queue to be + * transmitted to the end device when it polls for data. + * + * Note: This is in section 7.3.2 and 7.5.3.1 of the IEEE + * 802.15.4-2006 document. + */ +static int fake_assoc_resp(struct net_device *dev, + struct ieee802154_addr *addr, u16 short_addr, u8 status) +{ + return 0; +} + +/** + * fake_disassoc_req - Disassociate a device from a network. + * @dev: The network device on which we're disassociating a device. + * @addr: The device to disassociate from the network. + * @reason: The reason to give to the device for being disassociated. + * + * This sends a disassociation notification to the device being + * disassociated from the network. + * + * Note: This is in section 7.5.3.2 of the IEEE 802.15.4-2006 + * document, with the reason described in 7.3.3.2. + */ +static int fake_disassoc_req(struct net_device *dev, + struct ieee802154_addr *addr, u8 reason) +{ + return ieee802154_nl_disassoc_confirm(dev, IEEE802154_SUCCESS); +} + +/** + * fake_start_req - Start an IEEE 802.15.4 PAN. + * @dev: The network device on which to start the PAN. + * @addr: The coordinator address to use when starting the PAN. + * @channel: The channel on which to start the PAN. + * @bcn_ord: Beacon order. + * @sf_ord: Superframe order. + * @pan_coord: Whether or not we are the PAN coordinator or just + * requesting a realignment perhaps? + * @blx: Battery Life Extension feature bitfield. + * @coord_realign: Something to realign something else. + * + * If pan_coord is non-zero then this starts a network with the + * provided parameters, otherwise it attempts a coordinator + * realignment of the stated network instead. + * + * Note: This is in section 7.5.2.3 of the IEEE 802.15.4-2006 + * document, with 7.3.8 describing coordinator realignment. + */ +static int fake_start_req(struct net_device *dev, struct ieee802154_addr *addr, + u8 channel, u8 page, + u8 bcn_ord, u8 sf_ord, u8 pan_coord, u8 blx, + u8 coord_realign) +{ + struct wpan_phy *phy = fake_to_phy(dev); + + mutex_lock(&phy->pib_lock); + phy->current_channel = channel; + phy->current_page = page; + mutex_unlock(&phy->pib_lock); + + /* We don't emulate beacons here at all, so START should fail */ + ieee802154_nl_start_confirm(dev, IEEE802154_INVALID_PARAMETER); + return 0; +} + +/** + * fake_scan_req - Start a channel scan. + * @dev: The network device on which to perform a channel scan. + * @type: The type of scan to perform. + * @channels: The channel bitmask to scan. + * @duration: How long to spend on each channel. + * + * This starts either a passive (energy) scan or an active (PAN) scan + * on the channels indicated in the @channels bitmask. The duration of + * the scan is measured in terms of superframe duration. Specifically, + * the scan will spend aBaseSuperFrameDuration * ((2^n) + 1) on each + * channel. + * + * Note: This is in section 7.5.2.1 of the IEEE 802.15.4-2006 document. + */ +static int fake_scan_req(struct net_device *dev, u8 type, u32 channels, + u8 page, u8 duration) +{ + u8 edl[27] = {}; + return ieee802154_nl_scan_confirm(dev, IEEE802154_SUCCESS, type, + channels, page, + type == IEEE802154_MAC_SCAN_ED ? edl : NULL); +} + +static struct ieee802154_mlme_ops fake_mlme = { + .assoc_req = fake_assoc_req, + .assoc_resp = fake_assoc_resp, + .disassoc_req = fake_disassoc_req, + .start_req = fake_start_req, + .scan_req = fake_scan_req, + + .get_phy = fake_get_phy, + + .get_pan_id = fake_get_pan_id, + .get_short_addr = fake_get_short_addr, + .get_dsn = fake_get_dsn, + .get_bsn = fake_get_bsn, +}; + +static int ieee802154_fake_open(struct net_device *dev) +{ + netif_start_queue(dev); + return 0; +} + +static int ieee802154_fake_close(struct net_device *dev) +{ + netif_stop_queue(dev); + return 0; +} + +static netdev_tx_t ieee802154_fake_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + + /* FIXME: do hardware work here ... */ + + dev_kfree_skb(skb); + return NETDEV_TX_OK; +} + + +static int ieee802154_fake_ioctl(struct net_device *dev, struct ifreq *ifr, + int cmd) +{ + struct sockaddr_ieee802154 *sa = + (struct sockaddr_ieee802154 *)&ifr->ifr_addr; + u16 pan_id, short_addr; + + switch (cmd) { + case SIOCGIFADDR: + /* FIXME: fixed here, get from device IRL */ + pan_id = fake_get_pan_id(dev); + short_addr = fake_get_short_addr(dev); + if (pan_id == IEEE802154_PANID_BROADCAST || + short_addr == IEEE802154_ADDR_BROADCAST) + return -EADDRNOTAVAIL; + + sa->family = AF_IEEE802154; + sa->addr.addr_type = IEEE802154_ADDR_SHORT; + sa->addr.pan_id = pan_id; + sa->addr.short_addr = short_addr; + return 0; + } + return -ENOIOCTLCMD; +} + +static int ieee802154_fake_mac_addr(struct net_device *dev, void *p) +{ + return -EBUSY; /* HW address is built into the device */ +} + +static const struct net_device_ops fake_ops = { + .ndo_open = ieee802154_fake_open, + .ndo_stop = ieee802154_fake_close, + .ndo_start_xmit = ieee802154_fake_xmit, + .ndo_do_ioctl = ieee802154_fake_ioctl, + .ndo_set_mac_address = ieee802154_fake_mac_addr, +}; + +static void ieee802154_fake_destruct(struct net_device *dev) +{ + struct wpan_phy *phy = fake_to_phy(dev); + + wpan_phy_unregister(phy); + free_netdev(dev); + wpan_phy_free(phy); +} + +static void ieee802154_fake_setup(struct net_device *dev) +{ + dev->addr_len = IEEE802154_ADDR_LEN; + memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN); + dev->features = NETIF_F_HW_CSUM; + dev->needed_tailroom = 2; /* FCS */ + dev->mtu = 127; + dev->tx_queue_len = 10; + dev->type = ARPHRD_IEEE802154; + dev->flags = IFF_NOARP | IFF_BROADCAST; + dev->watchdog_timeo = 0; + dev->destructor = ieee802154_fake_destruct; +} + + +static int __devinit ieee802154fake_probe(struct platform_device *pdev) +{ + struct net_device *dev; + struct fakehard_priv *priv; + struct wpan_phy *phy = wpan_phy_alloc(0); + int err; + + if (!phy) + return -ENOMEM; + + dev = alloc_netdev(sizeof(struct fakehard_priv), "hardwpan%d", ieee802154_fake_setup); + if (!dev) { + wpan_phy_free(phy); + return -ENOMEM; + } + + memcpy(dev->dev_addr, "\xba\xbe\xca\xfe\xde\xad\xbe\xef", + dev->addr_len); + memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); + + /* + * For now we'd like to emulate 2.4 GHz-only device, + * both O-QPSK and CSS + */ + /* 2.4 GHz O-QPSK 802.15.4-2003 */ + phy->channels_supported[0] |= 0x7FFF800; + /* 2.4 GHz CSS 802.15.4a-2007 */ + phy->channels_supported[3] |= 0x3fff; + + phy->transmit_power = 0xbf; + + dev->netdev_ops = &fake_ops; + dev->ml_priv = &fake_mlme; + + priv = netdev_priv(dev); + priv->phy = phy; + + wpan_phy_set_dev(phy, &pdev->dev); + SET_NETDEV_DEV(dev, &phy->dev); + + platform_set_drvdata(pdev, dev); + + err = wpan_phy_register(phy); + if (err) + goto out; + + err = register_netdev(dev); + if (err < 0) + goto out; + + dev_info(&pdev->dev, "Added ieee802154 HardMAC hardware\n"); + return 0; + +out: + unregister_netdev(dev); + return err; +} + +static int __devexit ieee802154fake_remove(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + unregister_netdev(dev); + return 0; +} + +static struct platform_device *ieee802154fake_dev; + +static struct platform_driver ieee802154fake_driver = { + .probe = ieee802154fake_probe, + .remove = __devexit_p(ieee802154fake_remove), + .driver = { + .name = "ieee802154hardmac", + .owner = THIS_MODULE, + }, +}; + +static __init int fake_init(void) +{ + ieee802154fake_dev = platform_device_register_simple( + "ieee802154hardmac", -1, NULL, 0); + return platform_driver_register(&ieee802154fake_driver); +} + +static __exit void fake_exit(void) +{ + platform_driver_unregister(&ieee802154fake_driver); + platform_device_unregister(ieee802154fake_dev); +} + +module_init(fake_init); +module_exit(fake_exit); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c new file mode 100644 index 000000000000..e7456fcd0913 --- /dev/null +++ b/drivers/net/ieee802154/fakelb.c @@ -0,0 +1,294 @@ +/* + * Loopback IEEE 802.15.4 interface + * + * Copyright 2007-2012 Siemens AG + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Written by: + * Sergey Lapin <slapin@ossfans.org> + * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> + * Alexander Smirnov <alex.bluesman.smirnov@gmail.com> + */ + +#include <linux/module.h> +#include <linux/timer.h> +#include <linux/platform_device.h> +#include <linux/netdevice.h> +#include <linux/spinlock.h> +#include <net/mac802154.h> +#include <net/wpan-phy.h> + +static int numlbs = 1; + +struct fakelb_dev_priv { + struct ieee802154_dev *dev; + + struct list_head list; + struct fakelb_priv *fake; + + spinlock_t lock; + bool working; +}; + +struct fakelb_priv { + struct list_head list; + rwlock_t lock; +}; + +static int +fakelb_hw_ed(struct ieee802154_dev *dev, u8 *level) +{ + might_sleep(); + BUG_ON(!level); + *level = 0xbe; + + return 0; +} + +static int +fakelb_hw_channel(struct ieee802154_dev *dev, int page, int channel) +{ + pr_debug("set channel to %d\n", channel); + + might_sleep(); + dev->phy->current_page = page; + dev->phy->current_channel = channel; + + return 0; +} + +static void +fakelb_hw_deliver(struct fakelb_dev_priv *priv, struct sk_buff *skb) +{ + struct sk_buff *newskb; + + spin_lock(&priv->lock); + if (priv->working) { + newskb = pskb_copy(skb, GFP_ATOMIC); + ieee802154_rx_irqsafe(priv->dev, newskb, 0xcc); + } + spin_unlock(&priv->lock); +} + +static int +fakelb_hw_xmit(struct ieee802154_dev *dev, struct sk_buff *skb) +{ + struct fakelb_dev_priv *priv = dev->priv; + struct fakelb_priv *fake = priv->fake; + + might_sleep(); + + read_lock_bh(&fake->lock); + if (priv->list.next == priv->list.prev) { + /* we are the only one device */ + fakelb_hw_deliver(priv, skb); + } else { + struct fakelb_dev_priv *dp; + list_for_each_entry(dp, &priv->fake->list, list) { + if (dp != priv && + (dp->dev->phy->current_channel == + priv->dev->phy->current_channel)) + fakelb_hw_deliver(dp, skb); + } + } + read_unlock_bh(&fake->lock); + + return 0; +} + +static int +fakelb_hw_start(struct ieee802154_dev *dev) { + struct fakelb_dev_priv *priv = dev->priv; + int ret = 0; + + spin_lock(&priv->lock); + if (priv->working) + ret = -EBUSY; + else + priv->working = 1; + spin_unlock(&priv->lock); + + return ret; +} + +static void +fakelb_hw_stop(struct ieee802154_dev *dev) { + struct fakelb_dev_priv *priv = dev->priv; + + spin_lock(&priv->lock); + priv->working = 0; + spin_unlock(&priv->lock); +} + +static struct ieee802154_ops fakelb_ops = { + .owner = THIS_MODULE, + .xmit = fakelb_hw_xmit, + .ed = fakelb_hw_ed, + .set_channel = fakelb_hw_channel, + .start = fakelb_hw_start, + .stop = fakelb_hw_stop, +}; + +/* Number of dummy devices to be set up by this module. */ +module_param(numlbs, int, 0); +MODULE_PARM_DESC(numlbs, " number of pseudo devices"); + +static int fakelb_add_one(struct device *dev, struct fakelb_priv *fake) +{ + struct fakelb_dev_priv *priv; + int err; + struct ieee802154_dev *ieee; + + ieee = ieee802154_alloc_device(sizeof(*priv), &fakelb_ops); + if (!ieee) + return -ENOMEM; + + priv = ieee->priv; + priv->dev = ieee; + + /* 868 MHz BPSK 802.15.4-2003 */ + ieee->phy->channels_supported[0] |= 1; + /* 915 MHz BPSK 802.15.4-2003 */ + ieee->phy->channels_supported[0] |= 0x7fe; + /* 2.4 GHz O-QPSK 802.15.4-2003 */ + ieee->phy->channels_supported[0] |= 0x7FFF800; + /* 868 MHz ASK 802.15.4-2006 */ + ieee->phy->channels_supported[1] |= 1; + /* 915 MHz ASK 802.15.4-2006 */ + ieee->phy->channels_supported[1] |= 0x7fe; + /* 868 MHz O-QPSK 802.15.4-2006 */ + ieee->phy->channels_supported[2] |= 1; + /* 915 MHz O-QPSK 802.15.4-2006 */ + ieee->phy->channels_supported[2] |= 0x7fe; + /* 2.4 GHz CSS 802.15.4a-2007 */ + ieee->phy->channels_supported[3] |= 0x3fff; + /* UWB Sub-gigahertz 802.15.4a-2007 */ + ieee->phy->channels_supported[4] |= 1; + /* UWB Low band 802.15.4a-2007 */ + ieee->phy->channels_supported[4] |= 0x1e; + /* UWB High band 802.15.4a-2007 */ + ieee->phy->channels_supported[4] |= 0xffe0; + /* 750 MHz O-QPSK 802.15.4c-2009 */ + ieee->phy->channels_supported[5] |= 0xf; + /* 750 MHz MPSK 802.15.4c-2009 */ + ieee->phy->channels_supported[5] |= 0xf0; + /* 950 MHz BPSK 802.15.4d-2009 */ + ieee->phy->channels_supported[6] |= 0x3ff; + /* 950 MHz GFSK 802.15.4d-2009 */ + ieee->phy->channels_supported[6] |= 0x3ffc00; + + INIT_LIST_HEAD(&priv->list); + priv->fake = fake; + + spin_lock_init(&priv->lock); + + ieee->parent = dev; + + err = ieee802154_register_device(ieee); + if (err) + goto err_reg; + + write_lock_bh(&fake->lock); + list_add_tail(&priv->list, &fake->list); + write_unlock_bh(&fake->lock); + + return 0; + +err_reg: + ieee802154_free_device(priv->dev); + return err; +} + +static void fakelb_del(struct fakelb_dev_priv *priv) +{ + write_lock_bh(&priv->fake->lock); + list_del(&priv->list); + write_unlock_bh(&priv->fake->lock); + + ieee802154_unregister_device(priv->dev); + ieee802154_free_device(priv->dev); +} + +static int __devinit fakelb_probe(struct platform_device *pdev) +{ + struct fakelb_priv *priv; + struct fakelb_dev_priv *dp; + int err = -ENOMEM; + int i; + + priv = kzalloc(sizeof(struct fakelb_priv), GFP_KERNEL); + if (!priv) + goto err_alloc; + + INIT_LIST_HEAD(&priv->list); + rwlock_init(&priv->lock); + + for (i = 0; i < numlbs; i++) { + err = fakelb_add_one(&pdev->dev, priv); + if (err < 0) + goto err_slave; + } + + platform_set_drvdata(pdev, priv); + dev_info(&pdev->dev, "added ieee802154 hardware\n"); + return 0; + +err_slave: + list_for_each_entry(dp, &priv->list, list) + fakelb_del(dp); + kfree(priv); +err_alloc: + return err; +} + +static int __devexit fakelb_remove(struct platform_device *pdev) +{ + struct fakelb_priv *priv = platform_get_drvdata(pdev); + struct fakelb_dev_priv *dp, *temp; + + list_for_each_entry_safe(dp, temp, &priv->list, list) + fakelb_del(dp); + kfree(priv); + + return 0; +} + +static struct platform_device *ieee802154fake_dev; + +static struct platform_driver ieee802154fake_driver = { + .probe = fakelb_probe, + .remove = __devexit_p(fakelb_remove), + .driver = { + .name = "ieee802154fakelb", + .owner = THIS_MODULE, + }, +}; + +static __init int fakelb_init_module(void) +{ + ieee802154fake_dev = platform_device_register_simple( + "ieee802154fakelb", -1, NULL, 0); + return platform_driver_register(&ieee802154fake_driver); +} + +static __exit void fake_remove_module(void) +{ + platform_driver_unregister(&ieee802154fake_driver); + platform_device_unregister(ieee802154fake_dev); +} + +module_init(fakelb_init_module); +module_exit(fake_remove_module); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c new file mode 100644 index 000000000000..0e53d4f431d2 --- /dev/null +++ b/drivers/net/ieee802154/mrf24j40.c @@ -0,0 +1,767 @@ +/* + * Driver for Microchip MRF24J40 802.15.4 Wireless-PAN Networking controller + * + * Copyright (C) 2012 Alan Ott <alan@signal11.us> + * Signal 11 Software + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/spi/spi.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <net/wpan-phy.h> +#include <net/mac802154.h> + +/* MRF24J40 Short Address Registers */ +#define REG_RXMCR 0x00 /* Receive MAC control */ +#define REG_PANIDL 0x01 /* PAN ID (low) */ +#define REG_PANIDH 0x02 /* PAN ID (high) */ +#define REG_SADRL 0x03 /* Short address (low) */ +#define REG_SADRH 0x04 /* Short address (high) */ +#define REG_EADR0 0x05 /* Long address (low) (high is EADR7) */ +#define REG_TXMCR 0x11 /* Transmit MAC control */ +#define REG_PACON0 0x16 /* Power Amplifier Control */ +#define REG_PACON1 0x17 /* Power Amplifier Control */ +#define REG_PACON2 0x18 /* Power Amplifier Control */ +#define REG_TXNCON 0x1B /* Transmit Normal FIFO Control */ +#define REG_TXSTAT 0x24 /* TX MAC Status Register */ +#define REG_SOFTRST 0x2A /* Soft Reset */ +#define REG_TXSTBL 0x2E /* TX Stabilization */ +#define REG_INTSTAT 0x31 /* Interrupt Status */ +#define REG_INTCON 0x32 /* Interrupt Control */ +#define REG_RFCTL 0x36 /* RF Control Mode Register */ +#define REG_BBREG1 0x39 /* Baseband Registers */ +#define REG_BBREG2 0x3A /* */ +#define REG_BBREG6 0x3E /* */ +#define REG_CCAEDTH 0x3F /* Energy Detection Threshold */ + +/* MRF24J40 Long Address Registers */ +#define REG_RFCON0 0x200 /* RF Control Registers */ +#define REG_RFCON1 0x201 +#define REG_RFCON2 0x202 +#define REG_RFCON3 0x203 +#define REG_RFCON5 0x205 +#define REG_RFCON6 0x206 +#define REG_RFCON7 0x207 +#define REG_RFCON8 0x208 +#define REG_RSSI 0x210 +#define REG_SLPCON0 0x211 /* Sleep Clock Control Registers */ +#define REG_SLPCON1 0x220 +#define REG_WAKETIMEL 0x222 /* Wake-up Time Match Value Low */ +#define REG_WAKETIMEH 0x223 /* Wake-up Time Match Value High */ +#define REG_RX_FIFO 0x300 /* Receive FIFO */ + +/* Device configuration: Only channels 11-26 on page 0 are supported. */ +#define MRF24J40_CHAN_MIN 11 +#define MRF24J40_CHAN_MAX 26 +#define CHANNEL_MASK (((u32)1 << (MRF24J40_CHAN_MAX + 1)) \ + - ((u32)1 << MRF24J40_CHAN_MIN)) + +#define TX_FIFO_SIZE 128 /* From datasheet */ +#define RX_FIFO_SIZE 144 /* From datasheet */ +#define SET_CHANNEL_DELAY_US 192 /* From datasheet */ + +/* Device Private Data */ +struct mrf24j40 { + struct spi_device *spi; + struct ieee802154_dev *dev; + + struct mutex buffer_mutex; /* only used to protect buf */ + struct completion tx_complete; + struct work_struct irqwork; + u8 *buf; /* 3 bytes. Used for SPI single-register transfers. */ +}; + +/* Read/Write SPI Commands for Short and Long Address registers. */ +#define MRF24J40_READSHORT(reg) ((reg) << 1) +#define MRF24J40_WRITESHORT(reg) ((reg) << 1 | 1) +#define MRF24J40_READLONG(reg) (1 << 15 | (reg) << 5) +#define MRF24J40_WRITELONG(reg) (1 << 15 | (reg) << 5 | 1 << 4) + +/* Maximum speed to run the device at. TODO: Get the real max value from + * someone at Microchip since it isn't in the datasheet. */ +#define MAX_SPI_SPEED_HZ 1000000 + +#define printdev(X) (&X->spi->dev) + +static int write_short_reg(struct mrf24j40 *devrec, u8 reg, u8 value) +{ + int ret; + struct spi_message msg; + struct spi_transfer xfer = { + .len = 2, + .tx_buf = devrec->buf, + .rx_buf = devrec->buf, + }; + + spi_message_init(&msg); + spi_message_add_tail(&xfer, &msg); + + mutex_lock(&devrec->buffer_mutex); + devrec->buf[0] = MRF24J40_WRITESHORT(reg); + devrec->buf[1] = value; + + ret = spi_sync(devrec->spi, &msg); + if (ret) + dev_err(printdev(devrec), + "SPI write Failed for short register 0x%hhx\n", reg); + + mutex_unlock(&devrec->buffer_mutex); + return ret; +} + +static int read_short_reg(struct mrf24j40 *devrec, u8 reg, u8 *val) +{ + int ret = -1; + struct spi_message msg; + struct spi_transfer xfer = { + .len = 2, + .tx_buf = devrec->buf, + .rx_buf = devrec->buf, + }; + + spi_message_init(&msg); + spi_message_add_tail(&xfer, &msg); + + mutex_lock(&devrec->buffer_mutex); + devrec->buf[0] = MRF24J40_READSHORT(reg); + devrec->buf[1] = 0; + + ret = spi_sync(devrec->spi, &msg); + if (ret) + dev_err(printdev(devrec), + "SPI read Failed for short register 0x%hhx\n", reg); + else + *val = devrec->buf[1]; + + mutex_unlock(&devrec->buffer_mutex); + return ret; +} + +static int read_long_reg(struct mrf24j40 *devrec, u16 reg, u8 *value) +{ + int ret; + u16 cmd; + struct spi_message msg; + struct spi_transfer xfer = { + .len = 3, + .tx_buf = devrec->buf, + .rx_buf = devrec->buf, + }; + + spi_message_init(&msg); + spi_message_add_tail(&xfer, &msg); + + cmd = MRF24J40_READLONG(reg); + mutex_lock(&devrec->buffer_mutex); + devrec->buf[0] = cmd >> 8 & 0xff; + devrec->buf[1] = cmd & 0xff; + devrec->buf[2] = 0; + + ret = spi_sync(devrec->spi, &msg); + if (ret) + dev_err(printdev(devrec), + "SPI read Failed for long register 0x%hx\n", reg); + else + *value = devrec->buf[2]; + + mutex_unlock(&devrec->buffer_mutex); + return ret; +} + +static int write_long_reg(struct mrf24j40 *devrec, u16 reg, u8 val) +{ + int ret; + u16 cmd; + struct spi_message msg; + struct spi_transfer xfer = { + .len = 3, + .tx_buf = devrec->buf, + .rx_buf = devrec->buf, + }; + + spi_message_init(&msg); + spi_message_add_tail(&xfer, &msg); + + cmd = MRF24J40_WRITELONG(reg); + mutex_lock(&devrec->buffer_mutex); + devrec->buf[0] = cmd >> 8 & 0xff; + devrec->buf[1] = cmd & 0xff; + devrec->buf[2] = val; + + ret = spi_sync(devrec->spi, &msg); + if (ret) + dev_err(printdev(devrec), + "SPI write Failed for long register 0x%hx\n", reg); + + mutex_unlock(&devrec->buffer_mutex); + return ret; +} + +/* This function relies on an undocumented write method. Once a write command + and address is set, as many bytes of data as desired can be clocked into + the device. The datasheet only shows setting one byte at a time. */ +static int write_tx_buf(struct mrf24j40 *devrec, u16 reg, + const u8 *data, size_t length) +{ + int ret; + u16 cmd; + u8 lengths[2]; + struct spi_message msg; + struct spi_transfer addr_xfer = { + .len = 2, + .tx_buf = devrec->buf, + }; + struct spi_transfer lengths_xfer = { + .len = 2, + .tx_buf = &lengths, /* TODO: Is DMA really required for SPI? */ + }; + struct spi_transfer data_xfer = { + .len = length, + .tx_buf = data, + }; + + /* Range check the length. 2 bytes are used for the length fields.*/ + if (length > TX_FIFO_SIZE-2) { + dev_err(printdev(devrec), "write_tx_buf() was passed too large a buffer. Performing short write.\n"); + length = TX_FIFO_SIZE-2; + } + + spi_message_init(&msg); + spi_message_add_tail(&addr_xfer, &msg); + spi_message_add_tail(&lengths_xfer, &msg); + spi_message_add_tail(&data_xfer, &msg); + + cmd = MRF24J40_WRITELONG(reg); + mutex_lock(&devrec->buffer_mutex); + devrec->buf[0] = cmd >> 8 & 0xff; + devrec->buf[1] = cmd & 0xff; + lengths[0] = 0x0; /* Header Length. Set to 0 for now. TODO */ + lengths[1] = length; /* Total length */ + + ret = spi_sync(devrec->spi, &msg); + if (ret) + dev_err(printdev(devrec), "SPI write Failed for TX buf\n"); + + mutex_unlock(&devrec->buffer_mutex); + return ret; +} + +static int mrf24j40_read_rx_buf(struct mrf24j40 *devrec, + u8 *data, u8 *len, u8 *lqi) +{ + u8 rx_len; + u8 addr[2]; + u8 lqi_rssi[2]; + u16 cmd; + int ret; + struct spi_message msg; + struct spi_transfer addr_xfer = { + .len = 2, + .tx_buf = &addr, + }; + struct spi_transfer data_xfer = { + .len = 0x0, /* set below */ + .rx_buf = data, + }; + struct spi_transfer status_xfer = { + .len = 2, + .rx_buf = &lqi_rssi, + }; + + /* Get the length of the data in the RX FIFO. The length in this + * register exclues the 1-byte length field at the beginning. */ + ret = read_long_reg(devrec, REG_RX_FIFO, &rx_len); + if (ret) + goto out; + + /* Range check the RX FIFO length, accounting for the one-byte + * length field at the begining. */ + if (rx_len > RX_FIFO_SIZE-1) { + dev_err(printdev(devrec), "Invalid length read from device. Performing short read.\n"); + rx_len = RX_FIFO_SIZE-1; + } + + if (rx_len > *len) { + /* Passed in buffer wasn't big enough. Should never happen. */ + dev_err(printdev(devrec), "Buffer not big enough. Performing short read\n"); + rx_len = *len; + } + + /* Set up the commands to read the data. */ + cmd = MRF24J40_READLONG(REG_RX_FIFO+1); + addr[0] = cmd >> 8 & 0xff; + addr[1] = cmd & 0xff; + data_xfer.len = rx_len; + + spi_message_init(&msg); + spi_message_add_tail(&addr_xfer, &msg); + spi_message_add_tail(&data_xfer, &msg); + spi_message_add_tail(&status_xfer, &msg); + + ret = spi_sync(devrec->spi, &msg); + if (ret) { + dev_err(printdev(devrec), "SPI RX Buffer Read Failed.\n"); + goto out; + } + + *lqi = lqi_rssi[0]; + *len = rx_len; + +#ifdef DEBUG + print_hex_dump(KERN_DEBUG, "mrf24j40 rx: ", + DUMP_PREFIX_OFFSET, 16, 1, data, *len, 0); + printk(KERN_DEBUG "mrf24j40 rx: lqi: %02hhx rssi: %02hhx\n", + lqi_rssi[0], lqi_rssi[1]); +#endif + +out: + return ret; +} + +static int mrf24j40_tx(struct ieee802154_dev *dev, struct sk_buff *skb) +{ + struct mrf24j40 *devrec = dev->priv; + u8 val; + int ret = 0; + + dev_dbg(printdev(devrec), "tx packet of %d bytes\n", skb->len); + + ret = write_tx_buf(devrec, 0x000, skb->data, skb->len); + if (ret) + goto err; + + /* Set TXNTRIG bit of TXNCON to send packet */ + ret = read_short_reg(devrec, REG_TXNCON, &val); + if (ret) + goto err; + val |= 0x1; + val &= ~0x4; + write_short_reg(devrec, REG_TXNCON, val); + + INIT_COMPLETION(devrec->tx_complete); + + /* Wait for the device to send the TX complete interrupt. */ + ret = wait_for_completion_interruptible_timeout( + &devrec->tx_complete, + 5 * HZ); + if (ret == -ERESTARTSYS) + goto err; + if (ret == 0) { + ret = -ETIMEDOUT; + goto err; + } + + /* Check for send error from the device. */ + ret = read_short_reg(devrec, REG_TXSTAT, &val); + if (ret) + goto err; + if (val & 0x1) { + dev_err(printdev(devrec), "Error Sending. Retry count exceeded\n"); + ret = -ECOMM; /* TODO: Better error code ? */ + } else + dev_dbg(printdev(devrec), "Packet Sent\n"); + +err: + + return ret; +} + +static int mrf24j40_ed(struct ieee802154_dev *dev, u8 *level) +{ + /* TODO: */ + printk(KERN_WARNING "mrf24j40: ed not implemented\n"); + *level = 0; + return 0; +} + +static int mrf24j40_start(struct ieee802154_dev *dev) +{ + struct mrf24j40 *devrec = dev->priv; + u8 val; + int ret; + + dev_dbg(printdev(devrec), "start\n"); + + ret = read_short_reg(devrec, REG_INTCON, &val); + if (ret) + return ret; + val &= ~(0x1|0x8); /* Clear TXNIE and RXIE. Enable interrupts */ + write_short_reg(devrec, REG_INTCON, val); + + return 0; +} + +static void mrf24j40_stop(struct ieee802154_dev *dev) +{ + struct mrf24j40 *devrec = dev->priv; + u8 val; + int ret; + dev_dbg(printdev(devrec), "stop\n"); + + ret = read_short_reg(devrec, REG_INTCON, &val); + if (ret) + return; + val |= 0x1|0x8; /* Set TXNIE and RXIE. Disable Interrupts */ + write_short_reg(devrec, REG_INTCON, val); + + return; +} + +static int mrf24j40_set_channel(struct ieee802154_dev *dev, + int page, int channel) +{ + struct mrf24j40 *devrec = dev->priv; + u8 val; + int ret; + + dev_dbg(printdev(devrec), "Set Channel %d\n", channel); + + WARN_ON(page != 0); + WARN_ON(channel < MRF24J40_CHAN_MIN); + WARN_ON(channel > MRF24J40_CHAN_MAX); + + /* Set Channel TODO */ + val = (channel-11) << 4 | 0x03; + write_long_reg(devrec, REG_RFCON0, val); + + /* RF Reset */ + ret = read_short_reg(devrec, REG_RFCTL, &val); + if (ret) + return ret; + val |= 0x04; + write_short_reg(devrec, REG_RFCTL, val); + val &= ~0x04; + write_short_reg(devrec, REG_RFCTL, val); + + udelay(SET_CHANNEL_DELAY_US); /* per datasheet */ + + return 0; +} + +static int mrf24j40_filter(struct ieee802154_dev *dev, + struct ieee802154_hw_addr_filt *filt, + unsigned long changed) +{ + struct mrf24j40 *devrec = dev->priv; + + dev_dbg(printdev(devrec), "filter\n"); + + if (changed & IEEE802515_AFILT_SADDR_CHANGED) { + /* Short Addr */ + u8 addrh, addrl; + addrh = filt->short_addr >> 8 & 0xff; + addrl = filt->short_addr & 0xff; + + write_short_reg(devrec, REG_SADRH, addrh); + write_short_reg(devrec, REG_SADRL, addrl); + dev_dbg(printdev(devrec), + "Set short addr to %04hx\n", filt->short_addr); + } + + if (changed & IEEE802515_AFILT_IEEEADDR_CHANGED) { + /* Device Address */ + int i; + for (i = 0; i < 8; i++) + write_short_reg(devrec, REG_EADR0+i, + filt->ieee_addr[i]); + +#ifdef DEBUG + printk(KERN_DEBUG "Set long addr to: "); + for (i = 0; i < 8; i++) + printk("%02hhx ", filt->ieee_addr[i]); + printk(KERN_DEBUG "\n"); +#endif + } + + if (changed & IEEE802515_AFILT_PANID_CHANGED) { + /* PAN ID */ + u8 panidl, panidh; + panidh = filt->pan_id >> 8 & 0xff; + panidl = filt->pan_id & 0xff; + write_short_reg(devrec, REG_PANIDH, panidh); + write_short_reg(devrec, REG_PANIDL, panidl); + + dev_dbg(printdev(devrec), "Set PANID to %04hx\n", filt->pan_id); + } + + if (changed & IEEE802515_AFILT_PANC_CHANGED) { + /* Pan Coordinator */ + u8 val; + int ret; + + ret = read_short_reg(devrec, REG_RXMCR, &val); + if (ret) + return ret; + if (filt->pan_coord) + val |= 0x8; + else + val &= ~0x8; + write_short_reg(devrec, REG_RXMCR, val); + + /* REG_SLOTTED is maintained as default (unslotted/CSMA-CA). + * REG_ORDER is maintained as default (no beacon/superframe). + */ + + dev_dbg(printdev(devrec), "Set Pan Coord to %s\n", + filt->pan_coord ? "on" : "off"); + } + + return 0; +} + +static int mrf24j40_handle_rx(struct mrf24j40 *devrec) +{ + u8 len = RX_FIFO_SIZE; + u8 lqi = 0; + u8 val; + int ret = 0; + struct sk_buff *skb; + + /* Turn off reception of packets off the air. This prevents the + * device from overwriting the buffer while we're reading it. */ + ret = read_short_reg(devrec, REG_BBREG1, &val); + if (ret) + goto out; + val |= 4; /* SET RXDECINV */ + write_short_reg(devrec, REG_BBREG1, val); + + skb = alloc_skb(len, GFP_KERNEL); + if (!skb) { + ret = -ENOMEM; + goto out; + } + + ret = mrf24j40_read_rx_buf(devrec, skb_put(skb, len), &len, &lqi); + if (ret < 0) { + dev_err(printdev(devrec), "Failure reading RX FIFO\n"); + kfree_skb(skb); + ret = -EINVAL; + goto out; + } + + /* Cut off the checksum */ + skb_trim(skb, len-2); + + /* TODO: Other drivers call ieee20154_rx_irqsafe() here (eg: cc2040, + * also from a workqueue). I think irqsafe is not necessary here. + * Can someone confirm? */ + ieee802154_rx_irqsafe(devrec->dev, skb, lqi); + + dev_dbg(printdev(devrec), "RX Handled\n"); + +out: + /* Turn back on reception of packets off the air. */ + ret = read_short_reg(devrec, REG_BBREG1, &val); + if (ret) + return ret; + val &= ~0x4; /* Clear RXDECINV */ + write_short_reg(devrec, REG_BBREG1, val); + + return ret; +} + +static struct ieee802154_ops mrf24j40_ops = { + .owner = THIS_MODULE, + .xmit = mrf24j40_tx, + .ed = mrf24j40_ed, + .start = mrf24j40_start, + .stop = mrf24j40_stop, + .set_channel = mrf24j40_set_channel, + .set_hw_addr_filt = mrf24j40_filter, +}; + +static irqreturn_t mrf24j40_isr(int irq, void *data) +{ + struct mrf24j40 *devrec = data; + + disable_irq_nosync(irq); + + schedule_work(&devrec->irqwork); + + return IRQ_HANDLED; +} + +static void mrf24j40_isrwork(struct work_struct *work) +{ + struct mrf24j40 *devrec = container_of(work, struct mrf24j40, irqwork); + u8 intstat; + int ret; + + /* Read the interrupt status */ + ret = read_short_reg(devrec, REG_INTSTAT, &intstat); + if (ret) + goto out; + + /* Check for TX complete */ + if (intstat & 0x1) + complete(&devrec->tx_complete); + + /* Check for Rx */ + if (intstat & 0x8) + mrf24j40_handle_rx(devrec); + +out: + enable_irq(devrec->spi->irq); +} + +static int __devinit mrf24j40_probe(struct spi_device *spi) +{ + int ret = -ENOMEM; + u8 val; + struct mrf24j40 *devrec; + + printk(KERN_INFO "mrf24j40: probe(). IRQ: %d\n", spi->irq); + + devrec = kzalloc(sizeof(struct mrf24j40), GFP_KERNEL); + if (!devrec) + goto err_devrec; + devrec->buf = kzalloc(3, GFP_KERNEL); + if (!devrec->buf) + goto err_buf; + + spi->mode = SPI_MODE_0; /* TODO: Is this appropriate for right here? */ + if (spi->max_speed_hz > MAX_SPI_SPEED_HZ) + spi->max_speed_hz = MAX_SPI_SPEED_HZ; + + mutex_init(&devrec->buffer_mutex); + init_completion(&devrec->tx_complete); + INIT_WORK(&devrec->irqwork, mrf24j40_isrwork); + devrec->spi = spi; + dev_set_drvdata(&spi->dev, devrec); + + /* Register with the 802154 subsystem */ + + devrec->dev = ieee802154_alloc_device(0, &mrf24j40_ops); + if (!devrec->dev) + goto err_alloc_dev; + + devrec->dev->priv = devrec; + devrec->dev->parent = &devrec->spi->dev; + devrec->dev->phy->channels_supported[0] = CHANNEL_MASK; + devrec->dev->flags = IEEE802154_HW_OMIT_CKSUM|IEEE802154_HW_AACK; + + dev_dbg(printdev(devrec), "registered mrf24j40\n"); + ret = ieee802154_register_device(devrec->dev); + if (ret) + goto err_register_device; + + /* Initialize the device. + From datasheet section 3.2: Initialization. */ + write_short_reg(devrec, REG_SOFTRST, 0x07); + write_short_reg(devrec, REG_PACON2, 0x98); + write_short_reg(devrec, REG_TXSTBL, 0x95); + write_long_reg(devrec, REG_RFCON0, 0x03); + write_long_reg(devrec, REG_RFCON1, 0x01); + write_long_reg(devrec, REG_RFCON2, 0x80); + write_long_reg(devrec, REG_RFCON6, 0x90); + write_long_reg(devrec, REG_RFCON7, 0x80); + write_long_reg(devrec, REG_RFCON8, 0x10); + write_long_reg(devrec, REG_SLPCON1, 0x21); + write_short_reg(devrec, REG_BBREG2, 0x80); + write_short_reg(devrec, REG_CCAEDTH, 0x60); + write_short_reg(devrec, REG_BBREG6, 0x40); + write_short_reg(devrec, REG_RFCTL, 0x04); + write_short_reg(devrec, REG_RFCTL, 0x0); + udelay(192); + + /* Set RX Mode. RXMCR<1:0>: 0x0 normal, 0x1 promisc, 0x2 error */ + ret = read_short_reg(devrec, REG_RXMCR, &val); + if (ret) + goto err_read_reg; + val &= ~0x3; /* Clear RX mode (normal) */ + write_short_reg(devrec, REG_RXMCR, val); + + ret = request_irq(spi->irq, + mrf24j40_isr, + IRQF_TRIGGER_FALLING, + dev_name(&spi->dev), + devrec); + + if (ret) { + dev_err(printdev(devrec), "Unable to get IRQ"); + goto err_irq; + } + + return 0; + +err_irq: +err_read_reg: + ieee802154_unregister_device(devrec->dev); +err_register_device: + ieee802154_free_device(devrec->dev); +err_alloc_dev: + kfree(devrec->buf); +err_buf: + kfree(devrec); +err_devrec: + return ret; +} + +static int __devexit mrf24j40_remove(struct spi_device *spi) +{ + struct mrf24j40 *devrec = dev_get_drvdata(&spi->dev); + + dev_dbg(printdev(devrec), "remove\n"); + + free_irq(spi->irq, devrec); + flush_work_sync(&devrec->irqwork); /* TODO: Is this the right call? */ + ieee802154_unregister_device(devrec->dev); + ieee802154_free_device(devrec->dev); + /* TODO: Will ieee802154_free_device() wait until ->xmit() is + * complete? */ + + /* Clean up the SPI stuff. */ + dev_set_drvdata(&spi->dev, NULL); + kfree(devrec->buf); + kfree(devrec); + return 0; +} + +static const struct spi_device_id mrf24j40_ids[] = { + { "mrf24j40", 0 }, + { "mrf24j40ma", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(spi, mrf24j40_ids); + +static struct spi_driver mrf24j40_driver = { + .driver = { + .name = "mrf24j40", + .bus = &spi_bus_type, + .owner = THIS_MODULE, + }, + .id_table = mrf24j40_ids, + .probe = mrf24j40_probe, + .remove = __devexit_p(mrf24j40_remove), +}; + +static int __init mrf24j40_init(void) +{ + return spi_register_driver(&mrf24j40_driver); +} + +static void __exit mrf24j40_exit(void) +{ + spi_unregister_driver(&mrf24j40_driver); +} + +module_init(mrf24j40_init); +module_exit(mrf24j40_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Alan Ott"); +MODULE_DESCRIPTION("MRF24J40 SPI 802.15.4 Controller Driver"); diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index e2a06fd996d5..4a075babe193 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -197,6 +197,7 @@ static __net_init int loopback_net_init(struct net *net) if (err) goto out_free_netdev; + BUG_ON(dev->ifindex != LOOPBACK_IFINDEX); net->loopback_dev = dev; return 0; diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 3090dc65a6f1..983bbf4d5ef6 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -159,6 +159,19 @@ config MDIO_BUS_MUX_GPIO several child MDIO busses to a parent bus. Child bus selection is under the control of GPIO lines. +config MDIO_BUS_MUX_MMIOREG + tristate "Support for MMIO device-controlled MDIO bus multiplexers" + depends on OF_MDIO + select MDIO_BUS_MUX + help + This module provides a driver for MDIO bus multiplexers that + are controlled via a simple memory-mapped device, like an FPGA. + The multiplexer connects one of several child MDIO busses to a + parent bus. Child bus selection is under the control of one of + the FPGA's registers. + + Currently, only 8-bit registers are supported. + endif # PHYLIB config MICREL_KS8995MA diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index 6d2dc6c94f2e..426674debae4 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -28,3 +28,4 @@ obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o obj-$(CONFIG_AMD_PHY) += amd.o obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o +obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index 7189adf54bd1..899274f2f9b1 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c @@ -28,17 +28,38 @@ #include <linux/gpio.h> #include <linux/mdio-gpio.h> -#ifdef CONFIG_OF_GPIO #include <linux/of_gpio.h> #include <linux/of_mdio.h> -#include <linux/of_platform.h> -#endif struct mdio_gpio_info { struct mdiobb_ctrl ctrl; int mdc, mdio; }; +static void *mdio_gpio_of_get_data(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct mdio_gpio_platform_data *pdata; + int ret; + + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return NULL; + + ret = of_get_gpio(np, 0); + if (ret < 0) + return NULL; + + pdata->mdc = ret; + + ret = of_get_gpio(np, 1); + if (ret < 0) + return NULL; + pdata->mdio = ret; + + return pdata; +} + static void mdio_dir(struct mdiobb_ctrl *ctrl, int dir) { struct mdio_gpio_info *bitbang = @@ -162,10 +183,15 @@ static void __devexit mdio_gpio_bus_destroy(struct device *dev) static int __devinit mdio_gpio_probe(struct platform_device *pdev) { - struct mdio_gpio_platform_data *pdata = pdev->dev.platform_data; + struct mdio_gpio_platform_data *pdata; struct mii_bus *new_bus; int ret; + if (pdev->dev.of_node) + pdata = mdio_gpio_of_get_data(pdev); + else + pdata = pdev->dev.platform_data; + if (!pdata) return -ENODEV; @@ -173,7 +199,11 @@ static int __devinit mdio_gpio_probe(struct platform_device *pdev) if (!new_bus) return -ENODEV; - ret = mdiobus_register(new_bus); + if (pdev->dev.of_node) + ret = of_mdiobus_register(new_bus, pdev->dev.of_node); + else + ret = mdiobus_register(new_bus); + if (ret) mdio_gpio_bus_deinit(&pdev->dev); @@ -187,112 +217,30 @@ static int __devexit mdio_gpio_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_OF_GPIO - -static int __devinit mdio_ofgpio_probe(struct platform_device *ofdev) -{ - struct mdio_gpio_platform_data *pdata; - struct mii_bus *new_bus; - int ret; - - pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); - if (!pdata) - return -ENOMEM; - - ret = of_get_gpio(ofdev->dev.of_node, 0); - if (ret < 0) - goto out_free; - pdata->mdc = ret; - - ret = of_get_gpio(ofdev->dev.of_node, 1); - if (ret < 0) - goto out_free; - pdata->mdio = ret; - - new_bus = mdio_gpio_bus_init(&ofdev->dev, pdata, pdata->mdc); - if (!new_bus) - goto out_free; - - ret = of_mdiobus_register(new_bus, ofdev->dev.of_node); - if (ret) - mdio_gpio_bus_deinit(&ofdev->dev); - - return ret; - -out_free: - kfree(pdata); - return -ENODEV; -} - -static int __devexit mdio_ofgpio_remove(struct platform_device *ofdev) -{ - mdio_gpio_bus_destroy(&ofdev->dev); - kfree(ofdev->dev.platform_data); - - return 0; -} - -static struct of_device_id mdio_ofgpio_match[] = { - { - .compatible = "virtual,mdio-gpio", - }, - {}, -}; -MODULE_DEVICE_TABLE(of, mdio_ofgpio_match); - -static struct platform_driver mdio_ofgpio_driver = { - .driver = { - .name = "mdio-ofgpio", - .owner = THIS_MODULE, - .of_match_table = mdio_ofgpio_match, - }, - .probe = mdio_ofgpio_probe, - .remove = __devexit_p(mdio_ofgpio_remove), +static struct of_device_id mdio_gpio_of_match[] = { + { .compatible = "virtual,mdio-gpio", }, + { /* sentinel */ } }; -static inline int __init mdio_ofgpio_init(void) -{ - return platform_driver_register(&mdio_ofgpio_driver); -} - -static inline void mdio_ofgpio_exit(void) -{ - platform_driver_unregister(&mdio_ofgpio_driver); -} -#else -static inline int __init mdio_ofgpio_init(void) { return 0; } -static inline void mdio_ofgpio_exit(void) { } -#endif /* CONFIG_OF_GPIO */ - static struct platform_driver mdio_gpio_driver = { .probe = mdio_gpio_probe, .remove = __devexit_p(mdio_gpio_remove), .driver = { .name = "mdio-gpio", .owner = THIS_MODULE, + .of_match_table = mdio_gpio_of_match, }, }; static int __init mdio_gpio_init(void) { - int ret; - - ret = mdio_ofgpio_init(); - if (ret) - return ret; - - ret = platform_driver_register(&mdio_gpio_driver); - if (ret) - mdio_ofgpio_exit(); - - return ret; + return platform_driver_register(&mdio_gpio_driver); } module_init(mdio_gpio_init); static void __exit mdio_gpio_exit(void) { platform_driver_unregister(&mdio_gpio_driver); - mdio_ofgpio_exit(); } module_exit(mdio_gpio_exit); diff --git a/drivers/net/phy/mdio-mux-mmioreg.c b/drivers/net/phy/mdio-mux-mmioreg.c new file mode 100644 index 000000000000..9061ba622ac4 --- /dev/null +++ b/drivers/net/phy/mdio-mux-mmioreg.c @@ -0,0 +1,171 @@ +/* + * Simple memory-mapped device MDIO MUX driver + * + * Author: Timur Tabi <timur@freescale.com> + * + * Copyright 2012 Freescale Semiconductor, Inc. + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#include <linux/platform_device.h> +#include <linux/device.h> +#include <linux/of_address.h> +#include <linux/of_mdio.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/phy.h> +#include <linux/mdio-mux.h> + +struct mdio_mux_mmioreg_state { + void *mux_handle; + phys_addr_t phys; + uint8_t mask; +}; + +/* + * MDIO multiplexing switch function + * + * This function is called by the mdio-mux layer when it thinks the mdio bus + * multiplexer needs to switch. + * + * 'current_child' is the current value of the mux register (masked via + * s->mask). + * + * 'desired_child' is the value of the 'reg' property of the target child MDIO + * node. + * + * The first time this function is called, current_child == -1. + * + * If current_child == desired_child, then the mux is already set to the + * correct bus. + */ +static int mdio_mux_mmioreg_switch_fn(int current_child, int desired_child, + void *data) +{ + struct mdio_mux_mmioreg_state *s = data; + + if (current_child ^ desired_child) { + void *p = ioremap(s->phys, 1); + uint8_t x, y; + + if (!p) + return -ENOMEM; + + x = ioread8(p); + y = (x & ~s->mask) | desired_child; + if (x != y) { + iowrite8((x & ~s->mask) | desired_child, p); + pr_debug("%s: %02x -> %02x\n", __func__, x, y); + } + + iounmap(p); + } + + return 0; +} + +static int __devinit mdio_mux_mmioreg_probe(struct platform_device *pdev) +{ + struct device_node *np2, *np = pdev->dev.of_node; + struct mdio_mux_mmioreg_state *s; + struct resource res; + const __be32 *iprop; + int len, ret; + + dev_dbg(&pdev->dev, "probing node %s\n", np->full_name); + + s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL); + if (!s) + return -ENOMEM; + + ret = of_address_to_resource(np, 0, &res); + if (ret) { + dev_err(&pdev->dev, "could not obtain memory map for node %s\n", + np->full_name); + return ret; + } + s->phys = res.start; + + if (resource_size(&res) != sizeof(uint8_t)) { + dev_err(&pdev->dev, "only 8-bit registers are supported\n"); + return -EINVAL; + } + + iprop = of_get_property(np, "mux-mask", &len); + if (!iprop || len != sizeof(uint32_t)) { + dev_err(&pdev->dev, "missing or invalid mux-mask property\n"); + return -ENODEV; + } + if (be32_to_cpup(iprop) > 255) { + dev_err(&pdev->dev, "only 8-bit registers are supported\n"); + return -EINVAL; + } + s->mask = be32_to_cpup(iprop); + + /* + * Verify that the 'reg' property of each child MDIO bus does not + * set any bits outside of the 'mask'. + */ + for_each_available_child_of_node(np, np2) { + iprop = of_get_property(np2, "reg", &len); + if (!iprop || len != sizeof(uint32_t)) { + dev_err(&pdev->dev, "mdio-mux child node %s is " + "missing a 'reg' property\n", np2->full_name); + return -ENODEV; + } + if (be32_to_cpup(iprop) & ~s->mask) { + dev_err(&pdev->dev, "mdio-mux child node %s has " + "a 'reg' value with unmasked bits\n", + np2->full_name); + return -ENODEV; + } + } + + ret = mdio_mux_init(&pdev->dev, mdio_mux_mmioreg_switch_fn, + &s->mux_handle, s); + if (ret) { + dev_err(&pdev->dev, "failed to register mdio-mux bus %s\n", + np->full_name); + return ret; + } + + pdev->dev.platform_data = s; + + return 0; +} + +static int __devexit mdio_mux_mmioreg_remove(struct platform_device *pdev) +{ + struct mdio_mux_mmioreg_state *s = dev_get_platdata(&pdev->dev); + + mdio_mux_uninit(s->mux_handle); + + return 0; +} + +static struct of_device_id mdio_mux_mmioreg_match[] = { + { + .compatible = "mdio-mux-mmioreg", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, mdio_mux_mmioreg_match); + +static struct platform_driver mdio_mux_mmioreg_driver = { + .driver = { + .name = "mdio-mux-mmioreg", + .owner = THIS_MODULE, + .of_match_table = mdio_mux_mmioreg_match, + }, + .probe = mdio_mux_mmioreg_probe, + .remove = __devexit_p(mdio_mux_mmioreg_remove), +}; + +module_platform_driver(mdio_mux_mmioreg_driver); + +MODULE_AUTHOR("Timur Tabi <timur@freescale.com>"); +MODULE_DESCRIPTION("Memory-mapped device MDIO MUX driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 7ca2ff97c368..ef9ea9248223 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -1035,66 +1035,6 @@ static void phy_write_mmd_indirect(struct mii_bus *bus, int prtad, int devad, bus->write(bus, addr, MII_MMD_DATA, data); } -static u32 phy_eee_to_adv(u16 eee_adv) -{ - u32 adv = 0; - - if (eee_adv & MDIO_EEE_100TX) - adv |= ADVERTISED_100baseT_Full; - if (eee_adv & MDIO_EEE_1000T) - adv |= ADVERTISED_1000baseT_Full; - if (eee_adv & MDIO_EEE_10GT) - adv |= ADVERTISED_10000baseT_Full; - if (eee_adv & MDIO_EEE_1000KX) - adv |= ADVERTISED_1000baseKX_Full; - if (eee_adv & MDIO_EEE_10GKX4) - adv |= ADVERTISED_10000baseKX4_Full; - if (eee_adv & MDIO_EEE_10GKR) - adv |= ADVERTISED_10000baseKR_Full; - - return adv; -} - -static u32 phy_eee_to_supported(u16 eee_caported) -{ - u32 supported = 0; - - if (eee_caported & MDIO_EEE_100TX) - supported |= SUPPORTED_100baseT_Full; - if (eee_caported & MDIO_EEE_1000T) - supported |= SUPPORTED_1000baseT_Full; - if (eee_caported & MDIO_EEE_10GT) - supported |= SUPPORTED_10000baseT_Full; - if (eee_caported & MDIO_EEE_1000KX) - supported |= SUPPORTED_1000baseKX_Full; - if (eee_caported & MDIO_EEE_10GKX4) - supported |= SUPPORTED_10000baseKX4_Full; - if (eee_caported & MDIO_EEE_10GKR) - supported |= SUPPORTED_10000baseKR_Full; - - return supported; -} - -static u16 phy_adv_to_eee(u32 adv) -{ - u16 reg = 0; - - if (adv & ADVERTISED_100baseT_Full) - reg |= MDIO_EEE_100TX; - if (adv & ADVERTISED_1000baseT_Full) - reg |= MDIO_EEE_1000T; - if (adv & ADVERTISED_10000baseT_Full) - reg |= MDIO_EEE_10GT; - if (adv & ADVERTISED_1000baseKX_Full) - reg |= MDIO_EEE_1000KX; - if (adv & ADVERTISED_10000baseKX4_Full) - reg |= MDIO_EEE_10GKX4; - if (adv & ADVERTISED_10000baseKR_Full) - reg |= MDIO_EEE_10GKR; - - return reg; -} - /** * phy_init_eee - init and check the EEE feature * @phydev: target phy_device struct @@ -1132,7 +1072,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) if (eee_cap < 0) return eee_cap; - cap = phy_eee_to_supported(eee_cap); + cap = mmd_eee_cap_to_ethtool_sup_t(eee_cap); if (!cap) goto eee_exit; @@ -1149,8 +1089,8 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) if (eee_adv < 0) return eee_adv; - adv = phy_eee_to_adv(eee_adv); - lp = phy_eee_to_adv(eee_lp); + adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv); + lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp); idx = phy_find_setting(phydev->speed, phydev->duplex); if ((lp & adv & settings[idx].setting)) goto eee_exit; @@ -1210,21 +1150,21 @@ int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data) MDIO_MMD_PCS, phydev->addr); if (val < 0) return val; - data->supported = phy_eee_to_supported(val); + data->supported = mmd_eee_cap_to_ethtool_sup_t(val); /* Get advertisement EEE */ val = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV, MDIO_MMD_AN, phydev->addr); if (val < 0) return val; - data->advertised = phy_eee_to_adv(val); + data->advertised = mmd_eee_adv_to_ethtool_adv_t(val); /* Get LP advertisement EEE */ val = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_LPABLE, MDIO_MMD_AN, phydev->addr); if (val < 0) return val; - data->lp_advertised = phy_eee_to_adv(val); + data->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val); return 0; } @@ -1241,7 +1181,7 @@ int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data) { int val; - val = phy_adv_to_eee(data->advertised); + val = ethtool_adv_to_mmd_eee_adv_t(data->advertised); phy_write_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV, MDIO_MMD_AN, phydev->addr, val); diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 5c0557222f20..eb3f5cefeba3 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -94,6 +94,18 @@ struct ppp_file { #define PF_TO_CHANNEL(pf) PF_TO_X(pf, struct channel) /* + * Data structure to hold primary network stats for which + * we want to use 64 bit storage. Other network stats + * are stored in dev->stats of the ppp strucute. + */ +struct ppp_link_stats { + u64 rx_packets; + u64 tx_packets; + u64 rx_bytes; + u64 tx_bytes; +}; + +/* * Data structure describing one ppp unit. * A ppp unit corresponds to a ppp network interface device * and represents a multilink bundle. @@ -136,6 +148,7 @@ struct ppp { unsigned pass_len, active_len; #endif /* CONFIG_PPP_FILTER */ struct net *ppp_net; /* the net we belong to */ + struct ppp_link_stats stats64; /* 64 bit network stats */ }; /* @@ -1021,9 +1034,34 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return err; } +struct rtnl_link_stats64* +ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64) +{ + struct ppp *ppp = netdev_priv(dev); + + ppp_recv_lock(ppp); + stats64->rx_packets = ppp->stats64.rx_packets; + stats64->rx_bytes = ppp->stats64.rx_bytes; + ppp_recv_unlock(ppp); + + ppp_xmit_lock(ppp); + stats64->tx_packets = ppp->stats64.tx_packets; + stats64->tx_bytes = ppp->stats64.tx_bytes; + ppp_xmit_unlock(ppp); + + stats64->rx_errors = dev->stats.rx_errors; + stats64->tx_errors = dev->stats.tx_errors; + stats64->rx_dropped = dev->stats.rx_dropped; + stats64->tx_dropped = dev->stats.tx_dropped; + stats64->rx_length_errors = dev->stats.rx_length_errors; + + return stats64; +} + static const struct net_device_ops ppp_netdev_ops = { - .ndo_start_xmit = ppp_start_xmit, - .ndo_do_ioctl = ppp_net_ioctl, + .ndo_start_xmit = ppp_start_xmit, + .ndo_do_ioctl = ppp_net_ioctl, + .ndo_get_stats64 = ppp_get_stats64, }; static void ppp_setup(struct net_device *dev) @@ -1157,8 +1195,8 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb) #endif /* CONFIG_PPP_FILTER */ } - ++ppp->dev->stats.tx_packets; - ppp->dev->stats.tx_bytes += skb->len - 2; + ++ppp->stats64.tx_packets; + ppp->stats64.tx_bytes += skb->len - 2; switch (proto) { case PPP_IP: @@ -1745,8 +1783,8 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb) break; } - ++ppp->dev->stats.rx_packets; - ppp->dev->stats.rx_bytes += skb->len - 2; + ++ppp->stats64.rx_packets; + ppp->stats64.rx_bytes += skb->len - 2; npi = proto_to_npindex(proto); if (npi < 0) { @@ -2570,12 +2608,12 @@ ppp_get_stats(struct ppp *ppp, struct ppp_stats *st) struct slcompress *vj = ppp->vj; memset(st, 0, sizeof(*st)); - st->p.ppp_ipackets = ppp->dev->stats.rx_packets; + st->p.ppp_ipackets = ppp->stats64.rx_packets; st->p.ppp_ierrors = ppp->dev->stats.rx_errors; - st->p.ppp_ibytes = ppp->dev->stats.rx_bytes; - st->p.ppp_opackets = ppp->dev->stats.tx_packets; + st->p.ppp_ibytes = ppp->stats64.rx_bytes; + st->p.ppp_opackets = ppp->stats64.tx_packets; st->p.ppp_oerrors = ppp->dev->stats.tx_errors; - st->p.ppp_obytes = ppp->dev->stats.tx_bytes; + st->p.ppp_obytes = ppp->stats64.tx_bytes; if (!vj) return; st->vj.vjs_packets = vj->sls_o_compressed + vj->sls_o_uncompressed; diff --git a/drivers/net/team/Kconfig b/drivers/net/team/Kconfig index 6a7260b03a1e..6b08bd419fba 100644 --- a/drivers/net/team/Kconfig +++ b/drivers/net/team/Kconfig @@ -21,7 +21,7 @@ config NET_TEAM_MODE_BROADCAST ---help--- Basic mode where packets are transmitted always by all suitable ports. - All added ports are setup to have team's mac address. + All added ports are setup to have team's device address. To compile this team mode as a module, choose M here: the module will be called team_mode_broadcast. @@ -33,7 +33,7 @@ config NET_TEAM_MODE_ROUNDROBIN Basic mode where port used for transmitting packets is selected in round-robin fashion using packet counter. - All added ports are setup to have team's mac address. + All added ports are setup to have team's device address. To compile this team mode as a module, choose M here: the module will be called team_mode_roundrobin. diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 341b65dbbcd3..266af7b38ebc 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -54,29 +54,29 @@ static struct team_port *team_port_get_rtnl(const struct net_device *dev) } /* - * Since the ability to change mac address for open port device is tested in + * Since the ability to change device address for open port device is tested in * team_port_add, this function can be called without control of return value */ -static int __set_port_mac(struct net_device *port_dev, - const unsigned char *dev_addr) +static int __set_port_dev_addr(struct net_device *port_dev, + const unsigned char *dev_addr) { struct sockaddr addr; - memcpy(addr.sa_data, dev_addr, ETH_ALEN); - addr.sa_family = ARPHRD_ETHER; + memcpy(addr.sa_data, dev_addr, port_dev->addr_len); + addr.sa_family = port_dev->type; return dev_set_mac_address(port_dev, &addr); } -static int team_port_set_orig_mac(struct team_port *port) +static int team_port_set_orig_dev_addr(struct team_port *port) { - return __set_port_mac(port->dev, port->orig.dev_addr); + return __set_port_dev_addr(port->dev, port->orig.dev_addr); } -int team_port_set_team_mac(struct team_port *port) +int team_port_set_team_dev_addr(struct team_port *port) { - return __set_port_mac(port->dev, port->team->dev->dev_addr); + return __set_port_dev_addr(port->dev, port->team->dev->dev_addr); } -EXPORT_SYMBOL(team_port_set_team_mac); +EXPORT_SYMBOL(team_port_set_team_dev_addr); static void team_refresh_port_linkup(struct team_port *port) { @@ -658,6 +658,122 @@ static rx_handler_result_t team_handle_frame(struct sk_buff **pskb) } +/************************************* + * Multiqueue Tx port select override + *************************************/ + +static int team_queue_override_init(struct team *team) +{ + struct list_head *listarr; + unsigned int queue_cnt = team->dev->num_tx_queues - 1; + unsigned int i; + + if (!queue_cnt) + return 0; + listarr = kmalloc(sizeof(struct list_head) * queue_cnt, GFP_KERNEL); + if (!listarr) + return -ENOMEM; + team->qom_lists = listarr; + for (i = 0; i < queue_cnt; i++) + INIT_LIST_HEAD(listarr++); + return 0; +} + +static void team_queue_override_fini(struct team *team) +{ + kfree(team->qom_lists); +} + +static struct list_head *__team_get_qom_list(struct team *team, u16 queue_id) +{ + return &team->qom_lists[queue_id - 1]; +} + +/* + * note: already called with rcu_read_lock + */ +static bool team_queue_override_transmit(struct team *team, struct sk_buff *skb) +{ + struct list_head *qom_list; + struct team_port *port; + + if (!team->queue_override_enabled || !skb->queue_mapping) + return false; + qom_list = __team_get_qom_list(team, skb->queue_mapping); + list_for_each_entry_rcu(port, qom_list, qom_list) { + if (!team_dev_queue_xmit(team, port, skb)) + return true; + } + return false; +} + +static void __team_queue_override_port_del(struct team *team, + struct team_port *port) +{ + list_del_rcu(&port->qom_list); + synchronize_rcu(); + INIT_LIST_HEAD(&port->qom_list); +} + +static bool team_queue_override_port_has_gt_prio_than(struct team_port *port, + struct team_port *cur) +{ + if (port->priority < cur->priority) + return true; + if (port->priority > cur->priority) + return false; + if (port->index < cur->index) + return true; + return false; +} + +static void __team_queue_override_port_add(struct team *team, + struct team_port *port) +{ + struct team_port *cur; + struct list_head *qom_list; + struct list_head *node; + + if (!port->queue_id || !team_port_enabled(port)) + return; + + qom_list = __team_get_qom_list(team, port->queue_id); + node = qom_list; + list_for_each_entry(cur, qom_list, qom_list) { + if (team_queue_override_port_has_gt_prio_than(port, cur)) + break; + node = &cur->qom_list; + } + list_add_tail_rcu(&port->qom_list, node); +} + +static void __team_queue_override_enabled_check(struct team *team) +{ + struct team_port *port; + bool enabled = false; + + list_for_each_entry(port, &team->port_list, list) { + if (!list_empty(&port->qom_list)) { + enabled = true; + break; + } + } + if (enabled == team->queue_override_enabled) + return; + netdev_dbg(team->dev, "%s queue override\n", + enabled ? "Enabling" : "Disabling"); + team->queue_override_enabled = enabled; +} + +static void team_queue_override_port_refresh(struct team *team, + struct team_port *port) +{ + __team_queue_override_port_del(team, port); + __team_queue_override_port_add(team, port); + __team_queue_override_enabled_check(team); +} + + /**************** * Port handling ****************/ @@ -688,6 +804,7 @@ static void team_port_enable(struct team *team, hlist_add_head_rcu(&port->hlist, team_port_index_hash(team, port->index)); team_adjust_ops(team); + team_queue_override_port_refresh(team, port); if (team->ops.port_enabled) team->ops.port_enabled(team, port); } @@ -716,6 +833,7 @@ static void team_port_disable(struct team *team, hlist_del_rcu(&port->hlist); __reconstruct_port_hlist(team, port->index); port->index = -1; + team_queue_override_port_refresh(team, port); __team_adjust_ops(team, team->en_port_count - 1); /* * Wait until readers see adjusted ops. This ensures that @@ -849,6 +967,8 @@ static struct netpoll_info *team_netpoll_info(struct team *team) #endif static void __team_port_change_check(struct team_port *port, bool linkup); +static int team_dev_type_check_change(struct net_device *dev, + struct net_device *port_dev); static int team_port_add(struct team *team, struct net_device *port_dev) { @@ -857,9 +977,8 @@ static int team_port_add(struct team *team, struct net_device *port_dev) char *portname = port_dev->name; int err; - if (port_dev->flags & IFF_LOOPBACK || - port_dev->type != ARPHRD_ETHER) { - netdev_err(dev, "Device %s is of an unsupported type\n", + if (port_dev->flags & IFF_LOOPBACK) { + netdev_err(dev, "Device %s is loopback device. Loopback devices can't be added as a team port\n", portname); return -EINVAL; } @@ -870,6 +989,17 @@ static int team_port_add(struct team *team, struct net_device *port_dev) return -EBUSY; } + if (port_dev->features & NETIF_F_VLAN_CHALLENGED && + vlan_uses_dev(dev)) { + netdev_err(dev, "Device %s is VLAN challenged and team device has VLAN set up\n", + portname); + return -EPERM; + } + + err = team_dev_type_check_change(dev, port_dev); + if (err) + return err; + if (port_dev->flags & IFF_UP) { netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n", portname); @@ -883,6 +1013,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev) port->dev = port_dev; port->team = team; + INIT_LIST_HEAD(&port->qom_list); port->orig.mtu = port_dev->mtu; err = dev_set_mtu(port_dev, dev->mtu); @@ -891,7 +1022,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev) goto err_set_mtu; } - memcpy(port->orig.dev_addr, port_dev->dev_addr, ETH_ALEN); + memcpy(port->orig.dev_addr, port_dev->dev_addr, port_dev->addr_len); err = team_port_enter(team, port); if (err) { @@ -972,7 +1103,7 @@ err_vids_add: err_dev_open: team_port_leave(team, port); - team_port_set_orig_mac(port); + team_port_set_orig_dev_addr(port); err_port_enter: dev_set_mtu(port_dev, port->orig.mtu); @@ -1009,7 +1140,7 @@ static int team_port_del(struct team *team, struct net_device *port_dev) vlan_vids_del_by_dev(port_dev, dev); dev_close(port_dev); team_port_leave(team, port); - team_port_set_orig_mac(port); + team_port_set_orig_dev_addr(port); dev_set_mtu(port_dev, port->orig.mtu); synchronize_rcu(); kfree(port); @@ -1094,6 +1225,49 @@ static int team_user_linkup_en_option_set(struct team *team, return 0; } +static int team_priority_option_get(struct team *team, + struct team_gsetter_ctx *ctx) +{ + struct team_port *port = ctx->info->port; + + ctx->data.s32_val = port->priority; + return 0; +} + +static int team_priority_option_set(struct team *team, + struct team_gsetter_ctx *ctx) +{ + struct team_port *port = ctx->info->port; + + port->priority = ctx->data.s32_val; + team_queue_override_port_refresh(team, port); + return 0; +} + +static int team_queue_id_option_get(struct team *team, + struct team_gsetter_ctx *ctx) +{ + struct team_port *port = ctx->info->port; + + ctx->data.u32_val = port->queue_id; + return 0; +} + +static int team_queue_id_option_set(struct team *team, + struct team_gsetter_ctx *ctx) +{ + struct team_port *port = ctx->info->port; + + if (port->queue_id == ctx->data.u32_val) + return 0; + if (ctx->data.u32_val >= team->dev->real_num_tx_queues) + return -EINVAL; + port->queue_id = ctx->data.u32_val; + team_queue_override_port_refresh(team, port); + return 0; +} + + static const struct team_option team_options[] = { { .name = "mode", @@ -1122,6 +1296,20 @@ static const struct team_option team_options[] = { .getter = team_user_linkup_en_option_get, .setter = team_user_linkup_en_option_set, }, + { + .name = "priority", + .type = TEAM_OPTION_TYPE_S32, + .per_port = true, + .getter = team_priority_option_get, + .setter = team_priority_option_set, + }, + { + .name = "queue_id", + .type = TEAM_OPTION_TYPE_U32, + .per_port = true, + .getter = team_queue_id_option_get, + .setter = team_queue_id_option_set, + }, }; static struct lock_class_key team_netdev_xmit_lock_key; @@ -1157,6 +1345,9 @@ static int team_init(struct net_device *dev) for (i = 0; i < TEAM_PORT_HASHENTRIES; i++) INIT_HLIST_HEAD(&team->en_port_hlist[i]); INIT_LIST_HEAD(&team->port_list); + err = team_queue_override_init(team); + if (err) + goto err_team_queue_override_init; team_adjust_ops(team); @@ -1172,6 +1363,8 @@ static int team_init(struct net_device *dev) return 0; err_options_register: + team_queue_override_fini(team); +err_team_queue_override_init: free_percpu(team->pcpu_stats); return err; @@ -1189,6 +1382,7 @@ static void team_uninit(struct net_device *dev) __team_change_mode(team, NULL); /* cleanup */ __team_options_unregister(team, team_options, ARRAY_SIZE(team_options)); + team_queue_override_fini(team); mutex_unlock(&team->lock); } @@ -1218,10 +1412,12 @@ static int team_close(struct net_device *dev) static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev) { struct team *team = netdev_priv(dev); - bool tx_success = false; + bool tx_success; unsigned int len = skb->len; - tx_success = team->ops.transmit(team, skb); + tx_success = team_queue_override_transmit(team, skb); + if (!tx_success) + tx_success = team->ops.transmit(team, skb); if (tx_success) { struct team_pcpu_stats *pcpu_stats; @@ -1295,17 +1491,18 @@ static void team_set_rx_mode(struct net_device *dev) static int team_set_mac_address(struct net_device *dev, void *p) { + struct sockaddr *addr = p; struct team *team = netdev_priv(dev); struct team_port *port; - int err; - err = eth_mac_addr(dev, p); - if (err) - return err; + if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + dev->addr_assign_type &= ~NET_ADDR_RANDOM; rcu_read_lock(); list_for_each_entry_rcu(port, &team->port_list, list) - if (team->ops.port_change_mac) - team->ops.port_change_mac(team, port); + if (team->ops.port_change_dev_addr) + team->ops.port_change_dev_addr(team, port); rcu_read_unlock(); return 0; } @@ -1536,6 +1733,45 @@ static const struct net_device_ops team_netdev_ops = { * rt netlink interface ***********************/ +static void team_setup_by_port(struct net_device *dev, + struct net_device *port_dev) +{ + dev->header_ops = port_dev->header_ops; + dev->type = port_dev->type; + dev->hard_header_len = port_dev->hard_header_len; + dev->addr_len = port_dev->addr_len; + dev->mtu = port_dev->mtu; + memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len); + memcpy(dev->dev_addr, port_dev->dev_addr, port_dev->addr_len); + dev->addr_assign_type &= ~NET_ADDR_RANDOM; +} + +static int team_dev_type_check_change(struct net_device *dev, + struct net_device *port_dev) +{ + struct team *team = netdev_priv(dev); + char *portname = port_dev->name; + int err; + + if (dev->type == port_dev->type) + return 0; + if (!list_empty(&team->port_list)) { + netdev_err(dev, "Device %s is of different type\n", portname); + return -EBUSY; + } + err = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE, dev); + err = notifier_to_errno(err); + if (err) { + netdev_err(dev, "Refused to change device type\n"); + return err; + } + dev_uc_flush(dev); + dev_mc_flush(dev); + team_setup_by_port(dev, port_dev); + call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev); + return 0; +} + static void team_setup(struct net_device *dev) { ether_setup(dev); @@ -1650,7 +1886,7 @@ static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info) if (!msg) return -ENOMEM; - hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq, + hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq, &team_nl_family, 0, TEAM_CMD_NOOP); if (IS_ERR(hdr)) { err = PTR_ERR(hdr); @@ -1659,7 +1895,7 @@ static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info) genlmsg_end(msg, hdr); - return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid); + return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid); err_msg_put: nlmsg_free(msg); @@ -1716,7 +1952,7 @@ static int team_nl_send_generic(struct genl_info *info, struct team *team, if (err < 0) goto err_fill; - err = genlmsg_unicast(genl_info_net(info), skb, info->snd_pid); + err = genlmsg_unicast(genl_info_net(info), skb, info->snd_portid); return err; err_fill: @@ -1725,11 +1961,11 @@ err_fill: } typedef int team_nl_send_func_t(struct sk_buff *skb, - struct team *team, u32 pid); + struct team *team, u32 portid); -static int team_nl_send_unicast(struct sk_buff *skb, struct team *team, u32 pid) +static int team_nl_send_unicast(struct sk_buff *skb, struct team *team, u32 portid) { - return genlmsg_unicast(dev_net(team->dev), skb, pid); + return genlmsg_unicast(dev_net(team->dev), skb, portid); } static int team_nl_fill_one_option_get(struct sk_buff *skb, struct team *team, @@ -1789,6 +2025,12 @@ static int team_nl_fill_one_option_get(struct sk_buff *skb, struct team *team, nla_put_flag(skb, TEAM_ATTR_OPTION_DATA)) goto nest_cancel; break; + case TEAM_OPTION_TYPE_S32: + if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_S32)) + goto nest_cancel; + if (nla_put_s32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.s32_val)) + goto nest_cancel; + break; default: BUG(); } @@ -1808,13 +2050,13 @@ nest_cancel: } static int __send_and_alloc_skb(struct sk_buff **pskb, - struct team *team, u32 pid, + struct team *team, u32 portid, team_nl_send_func_t *send_func) { int err; if (*pskb) { - err = send_func(*pskb, team, pid); + err = send_func(*pskb, team, portid); if (err) return err; } @@ -1824,7 +2066,7 @@ static int __send_and_alloc_skb(struct sk_buff **pskb, return 0; } -static int team_nl_send_options_get(struct team *team, u32 pid, u32 seq, +static int team_nl_send_options_get(struct team *team, u32 portid, u32 seq, int flags, team_nl_send_func_t *send_func, struct list_head *sel_opt_inst_list) { @@ -1841,11 +2083,11 @@ static int team_nl_send_options_get(struct team *team, u32 pid, u32 seq, struct team_option_inst, tmp_list); start_again: - err = __send_and_alloc_skb(&skb, team, pid, send_func); + err = __send_and_alloc_skb(&skb, team, portid, send_func); if (err) return err; - hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags | NLM_F_MULTI, + hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI, TEAM_CMD_OPTIONS_GET); if (IS_ERR(hdr)) return PTR_ERR(hdr); @@ -1878,15 +2120,15 @@ start_again: goto start_again; send_done: - nlh = nlmsg_put(skb, pid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI); + nlh = nlmsg_put(skb, portid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI); if (!nlh) { - err = __send_and_alloc_skb(&skb, team, pid, send_func); + err = __send_and_alloc_skb(&skb, team, portid, send_func); if (err) goto errout; goto send_done; } - return send_func(skb, team, pid); + return send_func(skb, team, portid); nla_put_failure: err = -EMSGSIZE; @@ -1909,7 +2151,7 @@ static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info) list_for_each_entry(opt_inst, &team->option_inst_list, list) list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list); - err = team_nl_send_options_get(team, info->snd_pid, info->snd_seq, + err = team_nl_send_options_get(team, info->snd_portid, info->snd_seq, NLM_F_ACK, team_nl_send_unicast, &sel_opt_inst_list); @@ -1977,6 +2219,9 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) case NLA_FLAG: opt_type = TEAM_OPTION_TYPE_BOOL; break; + case NLA_S32: + opt_type = TEAM_OPTION_TYPE_S32; + break; default: goto team_put; } @@ -2033,6 +2278,9 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) case TEAM_OPTION_TYPE_BOOL: ctx.data.bool_val = attr_data ? true : false; break; + case TEAM_OPTION_TYPE_S32: + ctx.data.s32_val = nla_get_s32(attr_data); + break; default: BUG(); } @@ -2057,7 +2305,7 @@ team_put: } static int team_nl_fill_port_list_get(struct sk_buff *skb, - u32 pid, u32 seq, int flags, + u32 portid, u32 seq, int flags, struct team *team, bool fillall) { @@ -2065,7 +2313,7 @@ static int team_nl_fill_port_list_get(struct sk_buff *skb, void *hdr; struct team_port *port; - hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags, + hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags, TEAM_CMD_PORT_LIST_GET); if (IS_ERR(hdr)) return PTR_ERR(hdr); @@ -2114,7 +2362,7 @@ static int team_nl_fill_port_list_get_all(struct sk_buff *skb, struct genl_info *info, int flags, struct team *team) { - return team_nl_fill_port_list_get(skb, info->snd_pid, + return team_nl_fill_port_list_get(skb, info->snd_portid, info->snd_seq, NLM_F_ACK, team, true); } @@ -2167,7 +2415,7 @@ static struct genl_multicast_group team_change_event_mcgrp = { }; static int team_nl_send_multicast(struct sk_buff *skb, - struct team *team, u32 pid) + struct team *team, u32 portid) { return genlmsg_multicast_netns(dev_net(team->dev), skb, 0, team_change_event_mcgrp.id, GFP_KERNEL); @@ -2245,7 +2493,7 @@ static void __team_options_change_check(struct team *team) list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list); } err = team_nl_send_event_options_get(team, &sel_opt_inst_list); - if (err) + if (err && err != -ESRCH) netdev_warn(team->dev, "Failed to send options change via netlink (err %d)\n", err); } @@ -2276,9 +2524,9 @@ static void __team_port_change_check(struct team_port *port, bool linkup) send_event: err = team_nl_send_event_port_list_get(port->team); - if (err) - netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink\n", - port->dev->name); + if (err && err != -ESRCH) + netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink (err %d)\n", + port->dev->name, err); } diff --git a/drivers/net/team/team_mode_broadcast.c b/drivers/net/team/team_mode_broadcast.c index c96e4d2967f0..9db0171e9366 100644 --- a/drivers/net/team/team_mode_broadcast.c +++ b/drivers/net/team/team_mode_broadcast.c @@ -48,18 +48,18 @@ static bool bc_transmit(struct team *team, struct sk_buff *skb) static int bc_port_enter(struct team *team, struct team_port *port) { - return team_port_set_team_mac(port); + return team_port_set_team_dev_addr(port); } -static void bc_port_change_mac(struct team *team, struct team_port *port) +static void bc_port_change_dev_addr(struct team *team, struct team_port *port) { - team_port_set_team_mac(port); + team_port_set_team_dev_addr(port); } static const struct team_mode_ops bc_mode_ops = { .transmit = bc_transmit, .port_enter = bc_port_enter, - .port_change_mac = bc_port_change_mac, + .port_change_dev_addr = bc_port_change_dev_addr, }; static const struct team_mode bc_mode = { diff --git a/drivers/net/team/team_mode_roundrobin.c b/drivers/net/team/team_mode_roundrobin.c index ad7ed0ec544c..105135aa8f05 100644 --- a/drivers/net/team/team_mode_roundrobin.c +++ b/drivers/net/team/team_mode_roundrobin.c @@ -66,18 +66,18 @@ drop: static int rr_port_enter(struct team *team, struct team_port *port) { - return team_port_set_team_mac(port); + return team_port_set_team_dev_addr(port); } -static void rr_port_change_mac(struct team *team, struct team_port *port) +static void rr_port_change_dev_addr(struct team *team, struct team_port *port) { - team_port_set_team_mac(port); + team_port_set_team_dev_addr(port); } static const struct team_mode_ops rr_mode_ops = { .transmit = rr_transmit, .port_enter = rr_port_enter, - .port_change_mac = rr_port_change_mac, + .port_change_dev_addr = rr_port_change_dev_addr, }; static const struct team_mode rr_mode = { diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 3a16d4fdaa05..498dc0d4ba5e 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -120,8 +120,8 @@ struct tun_sock; struct tun_struct { struct tun_file *tfile; unsigned int flags; - uid_t owner; - gid_t group; + kuid_t owner; + kgid_t group; struct net_device *dev; netdev_features_t set_features; @@ -1031,8 +1031,8 @@ static void tun_setup(struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); - tun->owner = -1; - tun->group = -1; + tun->owner = INVALID_UID; + tun->group = INVALID_GID; dev->ethtool_ops = &tun_ethtool_ops; dev->destructor = tun_free_netdev; @@ -1155,14 +1155,20 @@ static ssize_t tun_show_owner(struct device *dev, struct device_attribute *attr, char *buf) { struct tun_struct *tun = netdev_priv(to_net_dev(dev)); - return sprintf(buf, "%d\n", tun->owner); + return uid_valid(tun->owner)? + sprintf(buf, "%u\n", + from_kuid_munged(current_user_ns(), tun->owner)): + sprintf(buf, "-1\n"); } static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr, char *buf) { struct tun_struct *tun = netdev_priv(to_net_dev(dev)); - return sprintf(buf, "%d\n", tun->group); + return gid_valid(tun->group) ? + sprintf(buf, "%u\n", + from_kgid_munged(current_user_ns(), tun->group)): + sprintf(buf, "-1\n"); } static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL); @@ -1189,8 +1195,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) else return -EINVAL; - if (((tun->owner != -1 && cred->euid != tun->owner) || - (tun->group != -1 && !in_egroup_p(tun->group))) && + if (((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) || + (gid_valid(tun->group) && !in_egroup_p(tun->group))) && !capable(CAP_NET_ADMIN)) return -EPERM; err = security_tun_dev_attach(tun->socket.sk); @@ -1374,6 +1380,8 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, void __user* argp = (void __user*)arg; struct sock_fprog fprog; struct ifreq ifr; + kuid_t owner; + kgid_t group; int sndbuf; int vnet_hdr_sz; int ret; @@ -1447,16 +1455,26 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, case TUNSETOWNER: /* Set owner of the device */ - tun->owner = (uid_t) arg; - - tun_debug(KERN_INFO, tun, "owner set to %d\n", tun->owner); + owner = make_kuid(current_user_ns(), arg); + if (!uid_valid(owner)) { + ret = -EINVAL; + break; + } + tun->owner = owner; + tun_debug(KERN_INFO, tun, "owner set to %d\n", + from_kuid(&init_user_ns, tun->owner)); break; case TUNSETGROUP: /* Set group of the device */ - tun->group= (gid_t) arg; - - tun_debug(KERN_INFO, tun, "group set to %d\n", tun->group); + group = make_kgid(current_user_ns(), arg); + if (!gid_valid(group)) { + ret = -EINVAL; + break; + } + tun->group = group; + tun_debug(KERN_INFO, tun, "group set to %d\n", + from_kgid(&init_user_ns, tun->group)); break; case TUNSETLINK: diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c index 49ab45e17fe8..1e207f086b75 100644 --- a/drivers/net/usb/cx82310_eth.c +++ b/drivers/net/usb/cx82310_eth.c @@ -302,18 +302,9 @@ static const struct driver_info cx82310_info = { .tx_fixup = cx82310_tx_fixup, }; -#define USB_DEVICE_CLASS(vend, prod, cl, sc, pr) \ - .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \ - USB_DEVICE_ID_MATCH_DEV_INFO, \ - .idVendor = (vend), \ - .idProduct = (prod), \ - .bDeviceClass = (cl), \ - .bDeviceSubClass = (sc), \ - .bDeviceProtocol = (pr) - static const struct usb_device_id products[] = { { - USB_DEVICE_CLASS(0x0572, 0xcb01, 0xff, 0, 0), + USB_DEVICE_AND_INTERFACE_INFO(0x0572, 0xcb01, 0xff, 0, 0), .driver_info = (unsigned long) &cx82310_info }, { }, diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index b1ba68f1a049..e7b53f020729 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -108,7 +108,7 @@ static int qmi_wwan_register_subdriver(struct usbnet *dev) atomic_set(&info->pmcount, 0); /* register subdriver */ - subdriver = usb_cdc_wdm_register(info->control, &dev->status->desc, 512, &qmi_wwan_cdc_wdm_manage_power); + subdriver = usb_cdc_wdm_register(info->control, &dev->status->desc, 4096, &qmi_wwan_cdc_wdm_manage_power); if (IS_ERR(subdriver)) { dev_err(&info->control->dev, "subdriver registration failed\n"); rv = PTR_ERR(subdriver); @@ -139,10 +139,18 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf) BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) < sizeof(struct qmi_wwan_state))); - /* require a single interrupt status endpoint for subdriver */ + /* control and data is shared? */ + if (intf->cur_altsetting->desc.bNumEndpoints == 3) { + info->control = intf; + info->data = intf; + goto shared; + } + + /* else require a single interrupt status endpoint on control intf */ if (intf->cur_altsetting->desc.bNumEndpoints != 1) goto err; + /* and a number of CDC descriptors */ while (len > 3) { struct usb_descriptor_header *h = (void *)buf; @@ -231,8 +239,9 @@ next_desc: if (status < 0) goto err; +shared: status = qmi_wwan_register_subdriver(dev); - if (status < 0) { + if (status < 0 && info->control != info->data) { usb_set_intfdata(info->data, NULL); usb_driver_release_interface(driver, info->data); } @@ -241,20 +250,6 @@ err: return status; } -/* Some devices combine the "control" and "data" functions into a - * single interface with all three endpoints: interrupt + bulk in and - * out - */ -static int qmi_wwan_bind_shared(struct usbnet *dev, struct usb_interface *intf) -{ - struct qmi_wwan_state *info = (void *)&dev->data; - - /* control and data is shared */ - info->control = intf; - info->data = intf; - return qmi_wwan_register_subdriver(dev); -} - static void qmi_wwan_unbind(struct usbnet *dev, struct usb_interface *intf) { struct qmi_wwan_state *info = (void *)&dev->data; @@ -331,20 +326,12 @@ static const struct driver_info qmi_wwan_info = { .manage_power = qmi_wwan_manage_power, }; -static const struct driver_info qmi_wwan_shared = { - .description = "WWAN/QMI device", - .flags = FLAG_WWAN, - .bind = qmi_wwan_bind_shared, - .unbind = qmi_wwan_unbind, - .manage_power = qmi_wwan_manage_power, -}; - #define HUAWEI_VENDOR_ID 0x12D1 /* map QMI/wwan function by a fixed interface number */ #define QMI_FIXED_INTF(vend, prod, num) \ USB_DEVICE_INTERFACE_NUMBER(vend, prod, num), \ - .driver_info = (unsigned long)&qmi_wwan_shared + .driver_info = (unsigned long)&qmi_wwan_info /* Gobi 1000 QMI/wwan interface number is 3 according to qcserial */ #define QMI_GOBI1K_DEVICE(vend, prod) \ @@ -368,15 +355,15 @@ static const struct usb_device_id products[] = { /* 2. Combined interface devices matching on class+protocol */ { /* Huawei E392, E398 and possibly others in "Windows mode" */ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 17), - .driver_info = (unsigned long)&qmi_wwan_shared, + .driver_info = (unsigned long)&qmi_wwan_info, }, { /* Pantech UML290 */ USB_DEVICE_AND_INTERFACE_INFO(0x106c, 0x3718, USB_CLASS_VENDOR_SPEC, 0xf0, 0xff), - .driver_info = (unsigned long)&qmi_wwan_shared, + .driver_info = (unsigned long)&qmi_wwan_info, }, { /* Pantech UML290 - newer firmware */ USB_DEVICE_AND_INTERFACE_INFO(0x106c, 0x3718, USB_CLASS_VENDOR_SPEC, 0xf1, 0xff), - .driver_info = (unsigned long)&qmi_wwan_shared, + .driver_info = (unsigned long)&qmi_wwan_info, }, /* 3. Combined interface devices matching on interface number */ @@ -462,7 +449,7 @@ static int qmi_wwan_probe(struct usb_interface *intf, const struct usb_device_id */ if (!id->driver_info) { dev_dbg(&intf->dev, "setting defaults for dynamic device id\n"); - id->driver_info = (unsigned long)&qmi_wwan_shared; + id->driver_info = (unsigned long)&qmi_wwan_info; } return usbnet_probe(intf, id); diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c index 8e22417fa6c1..c27d27701aee 100644 --- a/drivers/net/usb/sierra_net.c +++ b/drivers/net/usb/sierra_net.c @@ -68,9 +68,8 @@ static atomic_t iface_counter = ATOMIC_INIT(0); */ #define SIERRA_NET_USBCTL_BUF_LEN 1024 -struct sierra_net_info_data { - u16 rx_urb_size; -}; +/* Overriding the default usbnet rx_urb_size */ +#define SIERRA_NET_RX_URB_SIZE (8 * 1024) /* Private data structure */ struct sierra_net_data { @@ -560,7 +559,7 @@ static void sierra_net_defer_kevent(struct usbnet *dev, int work) /* * Sync Retransmit Timer Handler. On expiry, kick the work queue */ -void sierra_sync_timer(unsigned long syncdata) +static void sierra_sync_timer(unsigned long syncdata) { struct usbnet *dev = (struct usbnet *)syncdata; @@ -678,9 +677,6 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf) static const u8 shdwn_tmplate[sizeof(priv->shdwn_msg)] = { 0x00, 0x00, SIERRA_NET_HIP_SHUTD_ID, 0x00}; - struct sierra_net_info_data *data = - (struct sierra_net_info_data *)dev->driver_info->data; - dev_dbg(&dev->udev->dev, "%s", __func__); ifacenum = intf->cur_altsetting->desc.bInterfaceNumber; @@ -725,9 +721,9 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf) sierra_net_set_ctx_index(priv, 0); /* decrease the rx_urb_size and max_tx_size to 4k on USB 1.1 */ - dev->rx_urb_size = data->rx_urb_size; + dev->rx_urb_size = SIERRA_NET_RX_URB_SIZE; if (dev->udev->speed != USB_SPEED_HIGH) - dev->rx_urb_size = min_t(size_t, 4096, data->rx_urb_size); + dev->rx_urb_size = min_t(size_t, 4096, SIERRA_NET_RX_URB_SIZE); dev->net->hard_header_len += SIERRA_NET_HIP_EXT_HDR_LEN; dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; @@ -842,7 +838,7 @@ static int sierra_net_rx_fixup(struct usbnet *dev, struct sk_buff *skb) netdev_err(dev->net, "HIP/ETH: Invalid pkt\n"); dev->net->stats.rx_frame_errors++; - /* dev->net->stats.rx_errors incremented by caller */; + /* dev->net->stats.rx_errors incremented by caller */ return 0; } @@ -866,8 +862,8 @@ static int sierra_net_rx_fixup(struct usbnet *dev, struct sk_buff *skb) } /* ---------------------------- Transmit data path ----------------------*/ -struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev, struct sk_buff *skb, - gfp_t flags) +static struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev, + struct sk_buff *skb, gfp_t flags) { struct sierra_net_data *priv = sierra_net_get_private(dev); u16 len; @@ -918,10 +914,6 @@ struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev, struct sk_buff *skb, return NULL; } -static const struct sierra_net_info_data sierra_net_info_data_direct_ip = { - .rx_urb_size = 8 * 1024, -}; - static const struct driver_info sierra_net_info_direct_ip = { .description = "Sierra Wireless USB-to-WWAN Modem", .flags = FLAG_WWAN | FLAG_SEND_ZLP, @@ -930,7 +922,6 @@ static const struct driver_info sierra_net_info_direct_ip = { .status = sierra_net_status, .rx_fixup = sierra_net_rx_fixup, .tx_fixup = sierra_net_tx_fixup, - .data = (unsigned long)&sierra_net_info_data_direct_ip, }; #define DIRECT_IP_DEVICE(vend, prod) \ diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 5852361032c4..e522ff70444c 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -348,6 +348,9 @@ static int veth_newlink(struct net *src_net, struct net_device *dev, if (tbp[IFLA_ADDRESS] == NULL) eth_hw_addr_random(peer); + if (ifmp && (dev->ifindex != 0)) + peer->ifindex = ifmp->ifi_index; + err = register_netdevice(peer); put_net(net); net = NULL; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 83d2b0c34c5e..81a64c58e8ad 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -993,7 +993,7 @@ static void virtnet_config_changed_work(struct work_struct *work) goto done; if (v & VIRTIO_NET_S_ANNOUNCE) { - netif_notify_peers(vi->dev); + netdev_notify_peers(vi->dev); virtnet_ack_link_announce(vi); } diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c index 025426132754..9c34d2fccfac 100644 --- a/drivers/net/wimax/i2400m/driver.c +++ b/drivers/net/wimax/i2400m/driver.c @@ -222,7 +222,6 @@ int i2400m_check_mac_addr(struct i2400m *i2400m) struct sk_buff *skb; const struct i2400m_tlv_detailed_device_info *ddi; struct net_device *net_dev = i2400m->wimax_dev.net_dev; - const unsigned char zeromac[ETH_ALEN] = { 0 }; d_fnstart(3, dev, "(i2400m %p)\n", i2400m); skb = i2400m_get_device_info(i2400m); @@ -244,7 +243,7 @@ int i2400m_check_mac_addr(struct i2400m *i2400m) "to that of boot mode's\n"); dev_warn(dev, "device reports %pM\n", ddi->mac_address); dev_warn(dev, "boot mode reported %pM\n", net_dev->perm_addr); - if (!memcmp(zeromac, ddi->mac_address, sizeof(zeromac))) + if (is_zero_ether_addr(ddi->mac_address)) dev_err(dev, "device reports an invalid MAC address, " "not updating\n"); else { diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c index 689a71c1af71..154a4965be4f 100644 --- a/drivers/net/wireless/adm8211.c +++ b/drivers/net/wireless/adm8211.c @@ -1661,7 +1661,9 @@ static void adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb, } /* Put adm8211_tx_hdr on skb and transmit */ -static void adm8211_tx(struct ieee80211_hw *dev, struct sk_buff *skb) +static void adm8211_tx(struct ieee80211_hw *dev, + struct ieee80211_tx_control *control, + struct sk_buff *skb) { struct adm8211_tx_hdr *txhdr; size_t payload_len, hdrlen; diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c index f9f15bb3f03a..c586f78c307f 100644 --- a/drivers/net/wireless/airo.c +++ b/drivers/net/wireless/airo.c @@ -232,8 +232,10 @@ static int adhoc; static int probe = 1; +static kuid_t proc_kuid; static int proc_uid /* = 0 */; +static kgid_t proc_kgid; static int proc_gid /* = 0 */; static int airo_perm = 0555; @@ -4499,78 +4501,79 @@ struct proc_data { static int setup_proc_entry( struct net_device *dev, struct airo_info *apriv ) { struct proc_dir_entry *entry; + /* First setup the device directory */ strcpy(apriv->proc_name,dev->name); apriv->proc_entry = proc_mkdir_mode(apriv->proc_name, airo_perm, airo_entry); if (!apriv->proc_entry) goto fail; - apriv->proc_entry->uid = proc_uid; - apriv->proc_entry->gid = proc_gid; + apriv->proc_entry->uid = proc_kuid; + apriv->proc_entry->gid = proc_kgid; /* Setup the StatsDelta */ entry = proc_create_data("StatsDelta", S_IRUGO & proc_perm, apriv->proc_entry, &proc_statsdelta_ops, dev); if (!entry) goto fail_stats_delta; - entry->uid = proc_uid; - entry->gid = proc_gid; + entry->uid = proc_kuid; + entry->gid = proc_kgid; /* Setup the Stats */ entry = proc_create_data("Stats", S_IRUGO & proc_perm, apriv->proc_entry, &proc_stats_ops, dev); if (!entry) goto fail_stats; - entry->uid = proc_uid; - entry->gid = proc_gid; + entry->uid = proc_kuid; + entry->gid = proc_kgid; /* Setup the Status */ entry = proc_create_data("Status", S_IRUGO & proc_perm, apriv->proc_entry, &proc_status_ops, dev); if (!entry) goto fail_status; - entry->uid = proc_uid; - entry->gid = proc_gid; + entry->uid = proc_kuid; + entry->gid = proc_kgid; /* Setup the Config */ entry = proc_create_data("Config", proc_perm, apriv->proc_entry, &proc_config_ops, dev); if (!entry) goto fail_config; - entry->uid = proc_uid; - entry->gid = proc_gid; + entry->uid = proc_kuid; + entry->gid = proc_kgid; /* Setup the SSID */ entry = proc_create_data("SSID", proc_perm, apriv->proc_entry, &proc_SSID_ops, dev); if (!entry) goto fail_ssid; - entry->uid = proc_uid; - entry->gid = proc_gid; + entry->uid = proc_kuid; + entry->gid = proc_kgid; /* Setup the APList */ entry = proc_create_data("APList", proc_perm, apriv->proc_entry, &proc_APList_ops, dev); if (!entry) goto fail_aplist; - entry->uid = proc_uid; - entry->gid = proc_gid; + entry->uid = proc_kuid; + entry->gid = proc_kgid; /* Setup the BSSList */ entry = proc_create_data("BSSList", proc_perm, apriv->proc_entry, &proc_BSSList_ops, dev); if (!entry) goto fail_bsslist; - entry->uid = proc_uid; - entry->gid = proc_gid; + entry->uid = proc_kuid; + entry->gid = proc_kgid; /* Setup the WepKey */ entry = proc_create_data("WepKey", proc_perm, apriv->proc_entry, &proc_wepkey_ops, dev); if (!entry) goto fail_wepkey; - entry->uid = proc_uid; - entry->gid = proc_gid; + entry->uid = proc_kuid; + entry->gid = proc_kgid; return 0; @@ -5697,11 +5700,16 @@ static int __init airo_init_module( void ) { int i; + proc_kuid = make_kuid(&init_user_ns, proc_uid); + proc_kgid = make_kgid(&init_user_ns, proc_gid); + if (!uid_valid(proc_kuid) || !gid_valid(proc_kgid)) + return -EINVAL; + airo_entry = proc_mkdir_mode("driver/aironet", airo_perm, NULL); if (airo_entry) { - airo_entry->uid = proc_uid; - airo_entry->gid = proc_gid; + airo_entry->uid = proc_kuid; + airo_entry->gid = proc_kgid; } for (i = 0; i < 4 && io[i] && irq[i]; i++) { diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c index 88b8d64c90f1..e361afed99ff 100644 --- a/drivers/net/wireless/at76c50x-usb.c +++ b/drivers/net/wireless/at76c50x-usb.c @@ -1726,7 +1726,9 @@ static void at76_mac80211_tx_callback(struct urb *urb) ieee80211_wake_queues(priv->hw); } -static void at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +static void at76_mac80211_tx(struct ieee80211_hw *hw, + struct ieee80211_tx_control *control, + struct sk_buff *skb) { struct at76_priv *priv = hw->priv; struct at76_tx_buffer *tx_buffer = priv->bulk_out_buffer; diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h index 64a453a6dfe4..3150def17193 100644 --- a/drivers/net/wireless/ath/ath5k/ath5k.h +++ b/drivers/net/wireless/ath/ath5k/ath5k.h @@ -1331,7 +1331,6 @@ struct ath5k_hw { unsigned int nexttbtt; /* next beacon time in TU */ struct ath5k_txq *cabq; /* content after beacon */ - int power_level; /* Requested tx power in dBm */ bool assoc; /* associate state */ bool enable_beacon; /* true if beacons are on */ @@ -1425,6 +1424,7 @@ struct ath5k_hw { /* Value in dB units */ s16 txp_cck_ofdm_pwr_delta; bool txp_setup; + int txp_requested; /* Requested tx power in dBm */ } ah_txpower; struct ath5k_nfcal_hist ah_nfcal_hist; diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index 2aab20ee9f38..a0a202de1109 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c @@ -723,7 +723,7 @@ ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf, ret = ah->ah_setup_tx_desc(ah, ds, pktlen, ieee80211_get_hdrlen_from_skb(skb), padsize, get_hw_packet_type(skb), - (ah->power_level * 2), + (ah->ah_txpower.txp_requested * 2), hw_rate, info->control.rates[0].count, keyidx, ah->ah_tx_ant, flags, cts_rate, duration); @@ -1778,7 +1778,8 @@ ath5k_beacon_setup(struct ath5k_hw *ah, struct ath5k_buf *bf) ds->ds_data = bf->skbaddr; ret = ah->ah_setup_tx_desc(ah, ds, skb->len, ieee80211_get_hdrlen_from_skb(skb), padsize, - AR5K_PKT_TYPE_BEACON, (ah->power_level * 2), + AR5K_PKT_TYPE_BEACON, + (ah->ah_txpower.txp_requested * 2), ieee80211_get_tx_rate(ah->hw, info)->hw_value, 1, AR5K_TXKEYIX_INVALID, antenna, flags, 0, 0); diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c index d56453e43d7e..df61a09adb6d 100644 --- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c +++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c @@ -55,7 +55,8 @@ \********************/ static void -ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +ath5k_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, + struct sk_buff *skb) { struct ath5k_hw *ah = hw->priv; u16 qnum = skb_get_queue_mapping(skb); @@ -207,8 +208,8 @@ ath5k_config(struct ieee80211_hw *hw, u32 changed) } if ((changed & IEEE80211_CONF_CHANGE_POWER) && - (ah->power_level != conf->power_level)) { - ah->power_level = conf->power_level; + (ah->ah_txpower.txp_requested != conf->power_level)) { + ah->ah_txpower.txp_requested = conf->power_level; /* Half dB steps */ ath5k_hw_set_txpower_limit(ah, (conf->power_level * 2)); diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c index 8b71a2d947e0..01c90ed58453 100644 --- a/drivers/net/wireless/ath/ath5k/phy.c +++ b/drivers/net/wireless/ath/ath5k/phy.c @@ -3516,6 +3516,7 @@ ath5k_setup_rate_powertable(struct ath5k_hw *ah, u16 max_pwr, { unsigned int i; u16 *rates; + s16 rate_idx_scaled = 0; /* max_pwr is power level we got from driver/user in 0.5dB * units, switch to 0.25dB units so we can compare */ @@ -3562,20 +3563,32 @@ ath5k_setup_rate_powertable(struct ath5k_hw *ah, u16 max_pwr, for (i = 8; i <= 15; i++) rates[i] -= ah->ah_txpower.txp_cck_ofdm_gainf_delta; + /* Save min/max and current tx power for this channel + * in 0.25dB units. + * + * Note: We use rates[0] for current tx power because + * it covers most of the rates, in most cases. It's our + * tx power limit and what the user expects to see. */ + ah->ah_txpower.txp_min_pwr = 2 * rates[7]; + ah->ah_txpower.txp_cur_pwr = 2 * rates[0]; + + /* Set max txpower for correct OFDM operation on all rates + * -that is the txpower for 54Mbit-, it's used for the PAPD + * gain probe and it's in 0.5dB units */ + ah->ah_txpower.txp_ofdm = rates[7]; + /* Now that we have all rates setup use table offset to * match the power range set by user with the power indices * on PCDAC/PDADC table */ for (i = 0; i < 16; i++) { - rates[i] += ah->ah_txpower.txp_offset; + rate_idx_scaled = rates[i] + ah->ah_txpower.txp_offset; /* Don't get out of bounds */ - if (rates[i] > 63) - rates[i] = 63; + if (rate_idx_scaled > 63) + rate_idx_scaled = 63; + if (rate_idx_scaled < 0) + rate_idx_scaled = 0; + rates[i] = rate_idx_scaled; } - - /* Min/max in 0.25dB units */ - ah->ah_txpower.txp_min_pwr = 2 * rates[7]; - ah->ah_txpower.txp_cur_pwr = 2 * rates[0]; - ah->ah_txpower.txp_ofdm = rates[7]; } @@ -3639,10 +3652,17 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel, if (!ah->ah_txpower.txp_setup || (channel->hw_value != curr_channel->hw_value) || (channel->center_freq != curr_channel->center_freq)) { - /* Reset TX power values */ + /* Reset TX power values but preserve requested + * tx power from above */ + int requested_txpower = ah->ah_txpower.txp_requested; + memset(&ah->ah_txpower, 0, sizeof(ah->ah_txpower)); + + /* Restore TPC setting and requested tx power */ ah->ah_txpower.txp_tpc = AR5K_TUNE_TPC_TXPOWER; + ah->ah_txpower.txp_requested = requested_txpower; + /* Calculate the powertable */ ret = ath5k_setup_channel_powertable(ah, channel, ee_mode, type); @@ -3789,8 +3809,9 @@ ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel, * RF buffer settings on 5211/5212+ so that we * properly set curve indices. */ - ret = ath5k_hw_txpower(ah, channel, ah->ah_txpower.txp_cur_pwr ? - ah->ah_txpower.txp_cur_pwr / 2 : AR5K_TUNE_MAX_TXPOWER); + ret = ath5k_hw_txpower(ah, channel, ah->ah_txpower.txp_requested ? + ah->ah_txpower.txp_requested * 2 : + AR5K_TUNE_MAX_TXPOWER); if (ret) return ret; diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index 2588848f4a82..c37fe9620e41 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@ -4901,90 +4901,79 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah, i, cfgCtl, pCtlMode[ctlMode], ctlIndex[i], chan->channel); - /* - * compare test group from regulatory - * channel list with test mode from pCtlMode - * list - */ - if ((((cfgCtl & ~CTL_MODE_M) | - (pCtlMode[ctlMode] & CTL_MODE_M)) == - ctlIndex[i]) || - (((cfgCtl & ~CTL_MODE_M) | - (pCtlMode[ctlMode] & CTL_MODE_M)) == - ((ctlIndex[i] & CTL_MODE_M) | - SD_NO_CTL))) { - twiceMinEdgePower = - ar9003_hw_get_max_edge_power(pEepData, - freq, i, - is2ghz); - - if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL) - /* - * Find the minimum of all CTL - * edge powers that apply to - * this channel - */ - twiceMaxEdgePower = - min(twiceMaxEdgePower, - twiceMinEdgePower); - else { - /* specific */ - twiceMaxEdgePower = - twiceMinEdgePower; - break; - } + /* + * compare test group from regulatory + * channel list with test mode from pCtlMode + * list + */ + if ((((cfgCtl & ~CTL_MODE_M) | + (pCtlMode[ctlMode] & CTL_MODE_M)) == + ctlIndex[i]) || + (((cfgCtl & ~CTL_MODE_M) | + (pCtlMode[ctlMode] & CTL_MODE_M)) == + ((ctlIndex[i] & CTL_MODE_M) | + SD_NO_CTL))) { + twiceMinEdgePower = + ar9003_hw_get_max_edge_power(pEepData, + freq, i, + is2ghz); + + if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL) + /* + * Find the minimum of all CTL + * edge powers that apply to + * this channel + */ + twiceMaxEdgePower = + min(twiceMaxEdgePower, + twiceMinEdgePower); + else { + /* specific */ + twiceMaxEdgePower = twiceMinEdgePower; + break; } } + } - minCtlPower = (u8)min(twiceMaxEdgePower, scaledPower); + minCtlPower = (u8)min(twiceMaxEdgePower, scaledPower); - ath_dbg(common, REGULATORY, - "SEL-Min ctlMode %d pCtlMode %d 2xMaxEdge %d sP %d minCtlPwr %d\n", - ctlMode, pCtlMode[ctlMode], twiceMaxEdgePower, - scaledPower, minCtlPower); - - /* Apply ctl mode to correct target power set */ - switch (pCtlMode[ctlMode]) { - case CTL_11B: - for (i = ALL_TARGET_LEGACY_1L_5L; - i <= ALL_TARGET_LEGACY_11S; i++) - pPwrArray[i] = - (u8)min((u16)pPwrArray[i], - minCtlPower); - break; - case CTL_11A: - case CTL_11G: - for (i = ALL_TARGET_LEGACY_6_24; - i <= ALL_TARGET_LEGACY_54; i++) - pPwrArray[i] = - (u8)min((u16)pPwrArray[i], - minCtlPower); - break; - case CTL_5GHT20: - case CTL_2GHT20: - for (i = ALL_TARGET_HT20_0_8_16; - i <= ALL_TARGET_HT20_21; i++) - pPwrArray[i] = - (u8)min((u16)pPwrArray[i], - minCtlPower); - pPwrArray[ALL_TARGET_HT20_22] = - (u8)min((u16)pPwrArray[ALL_TARGET_HT20_22], - minCtlPower); - pPwrArray[ALL_TARGET_HT20_23] = - (u8)min((u16)pPwrArray[ALL_TARGET_HT20_23], - minCtlPower); - break; - case CTL_5GHT40: - case CTL_2GHT40: - for (i = ALL_TARGET_HT40_0_8_16; - i <= ALL_TARGET_HT40_23; i++) - pPwrArray[i] = - (u8)min((u16)pPwrArray[i], - minCtlPower); - break; - default: - break; - } + ath_dbg(common, REGULATORY, + "SEL-Min ctlMode %d pCtlMode %d 2xMaxEdge %d sP %d minCtlPwr %d\n", + ctlMode, pCtlMode[ctlMode], twiceMaxEdgePower, + scaledPower, minCtlPower); + + /* Apply ctl mode to correct target power set */ + switch (pCtlMode[ctlMode]) { + case CTL_11B: + for (i = ALL_TARGET_LEGACY_1L_5L; + i <= ALL_TARGET_LEGACY_11S; i++) + pPwrArray[i] = (u8)min((u16)pPwrArray[i], + minCtlPower); + break; + case CTL_11A: + case CTL_11G: + for (i = ALL_TARGET_LEGACY_6_24; + i <= ALL_TARGET_LEGACY_54; i++) + pPwrArray[i] = (u8)min((u16)pPwrArray[i], + minCtlPower); + break; + case CTL_5GHT20: + case CTL_2GHT20: + for (i = ALL_TARGET_HT20_0_8_16; + i <= ALL_TARGET_HT20_23; i++) + pPwrArray[i] = (u8)min((u16)pPwrArray[i], + minCtlPower); + break; + case CTL_5GHT40: + case CTL_2GHT40: + for (i = ALL_TARGET_HT40_0_8_16; + i <= ALL_TARGET_HT40_23; i++) + pPwrArray[i] = (u8)min((u16)pPwrArray[i], + minCtlPower); + break; + default: + break; + } } /* end ctl mode checking */ } diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index b09285c36c4a..7373e4b92c92 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -280,6 +280,7 @@ struct ath_tx_control { struct ath_txq *txq; struct ath_node *an; u8 paprd; + struct ieee80211_sta *sta; }; #define ATH_TX_ERROR 0x01 diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h index 936e920fb88e..b30596fcf73a 100644 --- a/drivers/net/wireless/ath/ath9k/htc.h +++ b/drivers/net/wireless/ath/ath9k/htc.h @@ -542,6 +542,7 @@ void ath9k_htc_stop_ani(struct ath9k_htc_priv *priv); int ath9k_tx_init(struct ath9k_htc_priv *priv); int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, + struct ieee80211_sta *sta, struct sk_buff *skb, u8 slot, bool is_cab); void ath9k_tx_cleanup(struct ath9k_htc_priv *priv); bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv, int subtype); diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c index 77d541feb910..f42d2eb6af99 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c @@ -326,7 +326,7 @@ static void ath9k_htc_send_buffered(struct ath9k_htc_priv *priv, goto next; } - ret = ath9k_htc_tx_start(priv, skb, tx_slot, true); + ret = ath9k_htc_tx_start(priv, NULL, skb, tx_slot, true); if (ret != 0) { ath9k_htc_tx_clear_slot(priv, tx_slot); dev_kfree_skb_any(skb); diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c index c785129692ff..c32f6e3ffb18 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c @@ -856,7 +856,9 @@ set_timer: /* mac80211 Callbacks */ /**********************/ -static void ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +static void ath9k_htc_tx(struct ieee80211_hw *hw, + struct ieee80211_tx_control *control, + struct sk_buff *skb) { struct ieee80211_hdr *hdr; struct ath9k_htc_priv *priv = hw->priv; @@ -883,7 +885,7 @@ static void ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb) goto fail_tx; } - ret = ath9k_htc_tx_start(priv, skb, slot, false); + ret = ath9k_htc_tx_start(priv, control->sta, skb, slot, false); if (ret != 0) { ath_dbg(common, XMIT, "Tx failed\n"); goto clear_slot; @@ -1331,6 +1333,34 @@ static int ath9k_htc_sta_remove(struct ieee80211_hw *hw, return ret; } +static void ath9k_htc_sta_rc_update(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u32 changed) +{ + struct ath9k_htc_priv *priv = hw->priv; + struct ath_common *common = ath9k_hw_common(priv->ah); + struct ath9k_htc_target_rate trate; + + mutex_lock(&priv->mutex); + ath9k_htc_ps_wakeup(priv); + + if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) { + memset(&trate, 0, sizeof(struct ath9k_htc_target_rate)); + ath9k_htc_setup_rate(priv, sta, &trate); + if (!ath9k_htc_send_rate_cmd(priv, &trate)) + ath_dbg(common, CONFIG, + "Supported rates for sta: %pM updated, rate caps: 0x%X\n", + sta->addr, be32_to_cpu(trate.capflags)); + else + ath_dbg(common, CONFIG, + "Unable to update supported rates for sta: %pM\n", + sta->addr); + } + + ath9k_htc_ps_restore(priv); + mutex_unlock(&priv->mutex); +} + static int ath9k_htc_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue, const struct ieee80211_tx_queue_params *params) @@ -1758,6 +1788,7 @@ struct ieee80211_ops ath9k_htc_ops = { .sta_add = ath9k_htc_sta_add, .sta_remove = ath9k_htc_sta_remove, .conf_tx = ath9k_htc_conf_tx, + .sta_rc_update = ath9k_htc_sta_rc_update, .bss_info_changed = ath9k_htc_bss_info_changed, .set_key = ath9k_htc_set_key, .get_tsf = ath9k_htc_get_tsf, diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c index 47e61d0da33b..06cdcb772d78 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c @@ -333,12 +333,12 @@ static void ath9k_htc_tx_data(struct ath9k_htc_priv *priv, } int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, + struct ieee80211_sta *sta, struct sk_buff *skb, u8 slot, bool is_cab) { struct ieee80211_hdr *hdr; struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); - struct ieee80211_sta *sta = tx_info->control.sta; struct ieee80211_vif *vif = tx_info->control.vif; struct ath9k_htc_sta *ista; struct ath9k_htc_vif *avp = NULL; diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index a22df749b8db..8a2b04d5922f 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -696,7 +696,9 @@ mutex_unlock: return r; } -static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +static void ath9k_tx(struct ieee80211_hw *hw, + struct ieee80211_tx_control *control, + struct sk_buff *skb) { struct ath_softc *sc = hw->priv; struct ath_common *common = ath9k_hw_common(sc->sc_ah); @@ -756,6 +758,7 @@ static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb) memset(&txctl, 0, sizeof(struct ath_tx_control)); txctl.txq = sc->tx.txq_map[skb_get_queue_mapping(skb)]; + txctl.sta = control->sta; ath_dbg(common, XMIT, "transmitting packet, skb: %p\n", skb); diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c index e034add9cd5a..4b12c347d188 100644 --- a/drivers/net/wireless/ath/ath9k/rc.c +++ b/drivers/net/wireless/ath/ath9k/rc.c @@ -25,141 +25,141 @@ static const struct ath_rate_table ar5416_11na_ratetable = { 8, /* MCS start */ { [0] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 6000, - 5400, 0, 12, 0, 0, 0, 0 }, /* 6 Mb */ + 5400, 0, 12 }, /* 6 Mb */ [1] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 9000, - 7800, 1, 18, 0, 1, 1, 1 }, /* 9 Mb */ + 7800, 1, 18 }, /* 9 Mb */ [2] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000, - 10000, 2, 24, 2, 2, 2, 2 }, /* 12 Mb */ + 10000, 2, 24 }, /* 12 Mb */ [3] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000, - 13900, 3, 36, 2, 3, 3, 3 }, /* 18 Mb */ + 13900, 3, 36 }, /* 18 Mb */ [4] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000, - 17300, 4, 48, 4, 4, 4, 4 }, /* 24 Mb */ + 17300, 4, 48 }, /* 24 Mb */ [5] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000, - 23000, 5, 72, 4, 5, 5, 5 }, /* 36 Mb */ + 23000, 5, 72 }, /* 36 Mb */ [6] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000, - 27400, 6, 96, 4, 6, 6, 6 }, /* 48 Mb */ + 27400, 6, 96 }, /* 48 Mb */ [7] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000, - 29300, 7, 108, 4, 7, 7, 7 }, /* 54 Mb */ + 29300, 7, 108 }, /* 54 Mb */ [8] = { RC_HT_SDT_2040, WLAN_RC_PHY_HT_20_SS, 6500, - 6400, 0, 0, 0, 38, 8, 38 }, /* 6.5 Mb */ + 6400, 0, 0 }, /* 6.5 Mb */ [9] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 13000, - 12700, 1, 1, 2, 39, 9, 39 }, /* 13 Mb */ + 12700, 1, 1 }, /* 13 Mb */ [10] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 19500, - 18800, 2, 2, 2, 40, 10, 40 }, /* 19.5 Mb */ + 18800, 2, 2 }, /* 19.5 Mb */ [11] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 26000, - 25000, 3, 3, 4, 41, 11, 41 }, /* 26 Mb */ + 25000, 3, 3 }, /* 26 Mb */ [12] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 39000, - 36700, 4, 4, 4, 42, 12, 42 }, /* 39 Mb */ + 36700, 4, 4 }, /* 39 Mb */ [13] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 52000, - 48100, 5, 5, 4, 43, 13, 43 }, /* 52 Mb */ + 48100, 5, 5 }, /* 52 Mb */ [14] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 58500, - 53500, 6, 6, 4, 44, 14, 44 }, /* 58.5 Mb */ + 53500, 6, 6 }, /* 58.5 Mb */ [15] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 65000, - 59000, 7, 7, 4, 45, 16, 46 }, /* 65 Mb */ + 59000, 7, 7 }, /* 65 Mb */ [16] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS_HGI, 72200, - 65400, 7, 7, 4, 45, 16, 46 }, /* 75 Mb */ + 65400, 7, 7 }, /* 75 Mb */ [17] = { RC_INVALID, WLAN_RC_PHY_HT_20_DS, 13000, - 12700, 8, 8, 0, 47, 17, 47 }, /* 13 Mb */ + 12700, 8, 8 }, /* 13 Mb */ [18] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 26000, - 24800, 9, 9, 2, 48, 18, 48 }, /* 26 Mb */ + 24800, 9, 9 }, /* 26 Mb */ [19] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 39000, - 36600, 10, 10, 2, 49, 19, 49 }, /* 39 Mb */ + 36600, 10, 10 }, /* 39 Mb */ [20] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 52000, - 48100, 11, 11, 4, 50, 20, 50 }, /* 52 Mb */ + 48100, 11, 11 }, /* 52 Mb */ [21] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 78000, - 69500, 12, 12, 4, 51, 21, 51 }, /* 78 Mb */ + 69500, 12, 12 }, /* 78 Mb */ [22] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 104000, - 89500, 13, 13, 4, 52, 22, 52 }, /* 104 Mb */ + 89500, 13, 13 }, /* 104 Mb */ [23] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 117000, - 98900, 14, 14, 4, 53, 23, 53 }, /* 117 Mb */ + 98900, 14, 14 }, /* 117 Mb */ [24] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 130000, - 108300, 15, 15, 4, 54, 25, 55 }, /* 130 Mb */ + 108300, 15, 15 }, /* 130 Mb */ [25] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS_HGI, 144400, - 120000, 15, 15, 4, 54, 25, 55 }, /* 144.4 Mb */ + 120000, 15, 15 }, /* 144.4 Mb */ [26] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 19500, - 17400, 16, 16, 0, 56, 26, 56 }, /* 19.5 Mb */ + 17400, 16, 16 }, /* 19.5 Mb */ [27] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 39000, - 35100, 17, 17, 2, 57, 27, 57 }, /* 39 Mb */ + 35100, 17, 17 }, /* 39 Mb */ [28] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 58500, - 52600, 18, 18, 2, 58, 28, 58 }, /* 58.5 Mb */ + 52600, 18, 18 }, /* 58.5 Mb */ [29] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 78000, - 70400, 19, 19, 4, 59, 29, 59 }, /* 78 Mb */ + 70400, 19, 19 }, /* 78 Mb */ [30] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 117000, - 104900, 20, 20, 4, 60, 31, 61 }, /* 117 Mb */ + 104900, 20, 20 }, /* 117 Mb */ [31] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS_HGI, 130000, - 115800, 20, 20, 4, 60, 31, 61 }, /* 130 Mb*/ + 115800, 20, 20 }, /* 130 Mb*/ [32] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 156000, - 137200, 21, 21, 4, 62, 33, 63 }, /* 156 Mb */ + 137200, 21, 21 }, /* 156 Mb */ [33] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 173300, - 151100, 21, 21, 4, 62, 33, 63 }, /* 173.3 Mb */ + 151100, 21, 21 }, /* 173.3 Mb */ [34] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 175500, - 152800, 22, 22, 4, 64, 35, 65 }, /* 175.5 Mb */ + 152800, 22, 22 }, /* 175.5 Mb */ [35] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 195000, - 168400, 22, 22, 4, 64, 35, 65 }, /* 195 Mb*/ + 168400, 22, 22 }, /* 195 Mb*/ [36] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 195000, - 168400, 23, 23, 4, 66, 37, 67 }, /* 195 Mb */ + 168400, 23, 23 }, /* 195 Mb */ [37] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 216700, - 185000, 23, 23, 4, 66, 37, 67 }, /* 216.7 Mb */ + 185000, 23, 23 }, /* 216.7 Mb */ [38] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 13500, - 13200, 0, 0, 0, 38, 38, 38 }, /* 13.5 Mb*/ + 13200, 0, 0 }, /* 13.5 Mb*/ [39] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 27500, - 25900, 1, 1, 2, 39, 39, 39 }, /* 27.0 Mb*/ + 25900, 1, 1 }, /* 27.0 Mb*/ [40] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 40500, - 38600, 2, 2, 2, 40, 40, 40 }, /* 40.5 Mb*/ + 38600, 2, 2 }, /* 40.5 Mb*/ [41] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 54000, - 49800, 3, 3, 4, 41, 41, 41 }, /* 54 Mb */ + 49800, 3, 3 }, /* 54 Mb */ [42] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 81500, - 72200, 4, 4, 4, 42, 42, 42 }, /* 81 Mb */ + 72200, 4, 4 }, /* 81 Mb */ [43] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 108000, - 92900, 5, 5, 4, 43, 43, 43 }, /* 108 Mb */ + 92900, 5, 5 }, /* 108 Mb */ [44] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 121500, - 102700, 6, 6, 4, 44, 44, 44 }, /* 121.5 Mb*/ + 102700, 6, 6 }, /* 121.5 Mb*/ [45] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 135000, - 112000, 7, 7, 4, 45, 46, 46 }, /* 135 Mb */ + 112000, 7, 7 }, /* 135 Mb */ [46] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS_HGI, 150000, - 122000, 7, 7, 4, 45, 46, 46 }, /* 150 Mb */ + 122000, 7, 7 }, /* 150 Mb */ [47] = { RC_INVALID, WLAN_RC_PHY_HT_40_DS, 27000, - 25800, 8, 8, 0, 47, 47, 47 }, /* 27 Mb */ + 25800, 8, 8 }, /* 27 Mb */ [48] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 54000, - 49800, 9, 9, 2, 48, 48, 48 }, /* 54 Mb */ + 49800, 9, 9 }, /* 54 Mb */ [49] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 81000, - 71900, 10, 10, 2, 49, 49, 49 }, /* 81 Mb */ + 71900, 10, 10 }, /* 81 Mb */ [50] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 108000, - 92500, 11, 11, 4, 50, 50, 50 }, /* 108 Mb */ + 92500, 11, 11 }, /* 108 Mb */ [51] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 162000, - 130300, 12, 12, 4, 51, 51, 51 }, /* 162 Mb */ + 130300, 12, 12 }, /* 162 Mb */ [52] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 216000, - 162800, 13, 13, 4, 52, 52, 52 }, /* 216 Mb */ + 162800, 13, 13 }, /* 216 Mb */ [53] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 243000, - 178200, 14, 14, 4, 53, 53, 53 }, /* 243 Mb */ + 178200, 14, 14 }, /* 243 Mb */ [54] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 270000, - 192100, 15, 15, 4, 54, 55, 55 }, /* 270 Mb */ + 192100, 15, 15 }, /* 270 Mb */ [55] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS_HGI, 300000, - 207000, 15, 15, 4, 54, 55, 55 }, /* 300 Mb */ + 207000, 15, 15 }, /* 300 Mb */ [56] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 40500, - 36100, 16, 16, 0, 56, 56, 56 }, /* 40.5 Mb */ + 36100, 16, 16 }, /* 40.5 Mb */ [57] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 81000, - 72900, 17, 17, 2, 57, 57, 57 }, /* 81 Mb */ + 72900, 17, 17 }, /* 81 Mb */ [58] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 121500, - 108300, 18, 18, 2, 58, 58, 58 }, /* 121.5 Mb */ + 108300, 18, 18 }, /* 121.5 Mb */ [59] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 162000, - 142000, 19, 19, 4, 59, 59, 59 }, /* 162 Mb */ + 142000, 19, 19 }, /* 162 Mb */ [60] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 243000, - 205100, 20, 20, 4, 60, 61, 61 }, /* 243 Mb */ + 205100, 20, 20 }, /* 243 Mb */ [61] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS_HGI, 270000, - 224700, 20, 20, 4, 60, 61, 61 }, /* 270 Mb */ + 224700, 20, 20 }, /* 270 Mb */ [62] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 324000, - 263100, 21, 21, 4, 62, 63, 63 }, /* 324 Mb */ + 263100, 21, 21 }, /* 324 Mb */ [63] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 360000, - 288000, 21, 21, 4, 62, 63, 63 }, /* 360 Mb */ + 288000, 21, 21 }, /* 360 Mb */ [64] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 364500, - 290700, 22, 22, 4, 64, 65, 65 }, /* 364.5 Mb */ + 290700, 22, 22 }, /* 364.5 Mb */ [65] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 405000, - 317200, 22, 22, 4, 64, 65, 65 }, /* 405 Mb */ + 317200, 22, 22 }, /* 405 Mb */ [66] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 405000, - 317200, 23, 23, 4, 66, 67, 67 }, /* 405 Mb */ + 317200, 23, 23 }, /* 405 Mb */ [67] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 450000, - 346400, 23, 23, 4, 66, 67, 67 }, /* 450 Mb */ + 346400, 23, 23 }, /* 450 Mb */ }, 50, /* probe interval */ WLAN_RC_HT_FLAG, /* Phy rates allowed initially */ @@ -173,149 +173,149 @@ static const struct ath_rate_table ar5416_11ng_ratetable = { 12, /* MCS start */ { [0] = { RC_ALL, WLAN_RC_PHY_CCK, 1000, - 900, 0, 2, 0, 0, 0, 0 }, /* 1 Mb */ + 900, 0, 2 }, /* 1 Mb */ [1] = { RC_ALL, WLAN_RC_PHY_CCK, 2000, - 1900, 1, 4, 1, 1, 1, 1 }, /* 2 Mb */ + 1900, 1, 4 }, /* 2 Mb */ [2] = { RC_ALL, WLAN_RC_PHY_CCK, 5500, - 4900, 2, 11, 2, 2, 2, 2 }, /* 5.5 Mb */ + 4900, 2, 11 }, /* 5.5 Mb */ [3] = { RC_ALL, WLAN_RC_PHY_CCK, 11000, - 8100, 3, 22, 3, 3, 3, 3 }, /* 11 Mb */ + 8100, 3, 22 }, /* 11 Mb */ [4] = { RC_INVALID, WLAN_RC_PHY_OFDM, 6000, - 5400, 4, 12, 4, 4, 4, 4 }, /* 6 Mb */ + 5400, 4, 12 }, /* 6 Mb */ [5] = { RC_INVALID, WLAN_RC_PHY_OFDM, 9000, - 7800, 5, 18, 4, 5, 5, 5 }, /* 9 Mb */ + 7800, 5, 18 }, /* 9 Mb */ [6] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000, - 10100, 6, 24, 6, 6, 6, 6 }, /* 12 Mb */ + 10100, 6, 24 }, /* 12 Mb */ [7] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000, - 14100, 7, 36, 6, 7, 7, 7 }, /* 18 Mb */ + 14100, 7, 36 }, /* 18 Mb */ [8] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000, - 17700, 8, 48, 8, 8, 8, 8 }, /* 24 Mb */ + 17700, 8, 48 }, /* 24 Mb */ [9] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000, - 23700, 9, 72, 8, 9, 9, 9 }, /* 36 Mb */ + 23700, 9, 72 }, /* 36 Mb */ [10] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000, - 27400, 10, 96, 8, 10, 10, 10 }, /* 48 Mb */ + 27400, 10, 96 }, /* 48 Mb */ [11] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000, - 30900, 11, 108, 8, 11, 11, 11 }, /* 54 Mb */ + 30900, 11, 108 }, /* 54 Mb */ [12] = { RC_INVALID, WLAN_RC_PHY_HT_20_SS, 6500, - 6400, 0, 0, 4, 42, 12, 42 }, /* 6.5 Mb */ + 6400, 0, 0 }, /* 6.5 Mb */ [13] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 13000, - 12700, 1, 1, 6, 43, 13, 43 }, /* 13 Mb */ + 12700, 1, 1 }, /* 13 Mb */ [14] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 19500, - 18800, 2, 2, 6, 44, 14, 44 }, /* 19.5 Mb*/ + 18800, 2, 2 }, /* 19.5 Mb*/ [15] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 26000, - 25000, 3, 3, 8, 45, 15, 45 }, /* 26 Mb */ + 25000, 3, 3 }, /* 26 Mb */ [16] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 39000, - 36700, 4, 4, 8, 46, 16, 46 }, /* 39 Mb */ + 36700, 4, 4 }, /* 39 Mb */ [17] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 52000, - 48100, 5, 5, 8, 47, 17, 47 }, /* 52 Mb */ + 48100, 5, 5 }, /* 52 Mb */ [18] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 58500, - 53500, 6, 6, 8, 48, 18, 48 }, /* 58.5 Mb */ + 53500, 6, 6 }, /* 58.5 Mb */ [19] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 65000, - 59000, 7, 7, 8, 49, 20, 50 }, /* 65 Mb */ + 59000, 7, 7 }, /* 65 Mb */ [20] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS_HGI, 72200, - 65400, 7, 7, 8, 49, 20, 50 }, /* 65 Mb*/ + 65400, 7, 7 }, /* 65 Mb*/ [21] = { RC_INVALID, WLAN_RC_PHY_HT_20_DS, 13000, - 12700, 8, 8, 4, 51, 21, 51 }, /* 13 Mb */ + 12700, 8, 8 }, /* 13 Mb */ [22] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 26000, - 24800, 9, 9, 6, 52, 22, 52 }, /* 26 Mb */ + 24800, 9, 9 }, /* 26 Mb */ [23] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 39000, - 36600, 10, 10, 6, 53, 23, 53 }, /* 39 Mb */ + 36600, 10, 10 }, /* 39 Mb */ [24] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 52000, - 48100, 11, 11, 8, 54, 24, 54 }, /* 52 Mb */ + 48100, 11, 11 }, /* 52 Mb */ [25] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 78000, - 69500, 12, 12, 8, 55, 25, 55 }, /* 78 Mb */ + 69500, 12, 12 }, /* 78 Mb */ [26] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 104000, - 89500, 13, 13, 8, 56, 26, 56 }, /* 104 Mb */ + 89500, 13, 13 }, /* 104 Mb */ [27] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 117000, - 98900, 14, 14, 8, 57, 27, 57 }, /* 117 Mb */ + 98900, 14, 14 }, /* 117 Mb */ [28] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 130000, - 108300, 15, 15, 8, 58, 29, 59 }, /* 130 Mb */ + 108300, 15, 15 }, /* 130 Mb */ [29] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS_HGI, 144400, - 120000, 15, 15, 8, 58, 29, 59 }, /* 144.4 Mb */ + 120000, 15, 15 }, /* 144.4 Mb */ [30] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 19500, - 17400, 16, 16, 4, 60, 30, 60 }, /* 19.5 Mb */ + 17400, 16, 16 }, /* 19.5 Mb */ [31] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 39000, - 35100, 17, 17, 6, 61, 31, 61 }, /* 39 Mb */ + 35100, 17, 17 }, /* 39 Mb */ [32] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 58500, - 52600, 18, 18, 6, 62, 32, 62 }, /* 58.5 Mb */ + 52600, 18, 18 }, /* 58.5 Mb */ [33] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 78000, - 70400, 19, 19, 8, 63, 33, 63 }, /* 78 Mb */ + 70400, 19, 19 }, /* 78 Mb */ [34] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 117000, - 104900, 20, 20, 8, 64, 35, 65 }, /* 117 Mb */ + 104900, 20, 20 }, /* 117 Mb */ [35] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS_HGI, 130000, - 115800, 20, 20, 8, 64, 35, 65 }, /* 130 Mb */ + 115800, 20, 20 }, /* 130 Mb */ [36] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 156000, - 137200, 21, 21, 8, 66, 37, 67 }, /* 156 Mb */ + 137200, 21, 21 }, /* 156 Mb */ [37] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 173300, - 151100, 21, 21, 8, 66, 37, 67 }, /* 173.3 Mb */ + 151100, 21, 21 }, /* 173.3 Mb */ [38] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 175500, - 152800, 22, 22, 8, 68, 39, 69 }, /* 175.5 Mb */ + 152800, 22, 22 }, /* 175.5 Mb */ [39] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 195000, - 168400, 22, 22, 8, 68, 39, 69 }, /* 195 Mb */ + 168400, 22, 22 }, /* 195 Mb */ [40] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 195000, - 168400, 23, 23, 8, 70, 41, 71 }, /* 195 Mb */ + 168400, 23, 23 }, /* 195 Mb */ [41] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 216700, - 185000, 23, 23, 8, 70, 41, 71 }, /* 216.7 Mb */ + 185000, 23, 23 }, /* 216.7 Mb */ [42] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 13500, - 13200, 0, 0, 8, 42, 42, 42 }, /* 13.5 Mb */ + 13200, 0, 0 }, /* 13.5 Mb */ [43] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 27500, - 25900, 1, 1, 8, 43, 43, 43 }, /* 27.0 Mb */ + 25900, 1, 1 }, /* 27.0 Mb */ [44] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 40500, - 38600, 2, 2, 8, 44, 44, 44 }, /* 40.5 Mb */ + 38600, 2, 2 }, /* 40.5 Mb */ [45] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 54000, - 49800, 3, 3, 8, 45, 45, 45 }, /* 54 Mb */ + 49800, 3, 3 }, /* 54 Mb */ [46] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 81500, - 72200, 4, 4, 8, 46, 46, 46 }, /* 81 Mb */ + 72200, 4, 4 }, /* 81 Mb */ [47] = { RC_HT_S_40 , WLAN_RC_PHY_HT_40_SS, 108000, - 92900, 5, 5, 8, 47, 47, 47 }, /* 108 Mb */ + 92900, 5, 5 }, /* 108 Mb */ [48] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 121500, - 102700, 6, 6, 8, 48, 48, 48 }, /* 121.5 Mb */ + 102700, 6, 6 }, /* 121.5 Mb */ [49] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 135000, - 112000, 7, 7, 8, 49, 50, 50 }, /* 135 Mb */ + 112000, 7, 7 }, /* 135 Mb */ [50] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS_HGI, 150000, - 122000, 7, 7, 8, 49, 50, 50 }, /* 150 Mb */ + 122000, 7, 7 }, /* 150 Mb */ [51] = { RC_INVALID, WLAN_RC_PHY_HT_40_DS, 27000, - 25800, 8, 8, 8, 51, 51, 51 }, /* 27 Mb */ + 25800, 8, 8 }, /* 27 Mb */ [52] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 54000, - 49800, 9, 9, 8, 52, 52, 52 }, /* 54 Mb */ + 49800, 9, 9 }, /* 54 Mb */ [53] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 81000, - 71900, 10, 10, 8, 53, 53, 53 }, /* 81 Mb */ + 71900, 10, 10 }, /* 81 Mb */ [54] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 108000, - 92500, 11, 11, 8, 54, 54, 54 }, /* 108 Mb */ + 92500, 11, 11 }, /* 108 Mb */ [55] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 162000, - 130300, 12, 12, 8, 55, 55, 55 }, /* 162 Mb */ + 130300, 12, 12 }, /* 162 Mb */ [56] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 216000, - 162800, 13, 13, 8, 56, 56, 56 }, /* 216 Mb */ + 162800, 13, 13 }, /* 216 Mb */ [57] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 243000, - 178200, 14, 14, 8, 57, 57, 57 }, /* 243 Mb */ + 178200, 14, 14 }, /* 243 Mb */ [58] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 270000, - 192100, 15, 15, 8, 58, 59, 59 }, /* 270 Mb */ + 192100, 15, 15 }, /* 270 Mb */ [59] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS_HGI, 300000, - 207000, 15, 15, 8, 58, 59, 59 }, /* 300 Mb */ + 207000, 15, 15 }, /* 300 Mb */ [60] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 40500, - 36100, 16, 16, 8, 60, 60, 60 }, /* 40.5 Mb */ + 36100, 16, 16 }, /* 40.5 Mb */ [61] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 81000, - 72900, 17, 17, 8, 61, 61, 61 }, /* 81 Mb */ + 72900, 17, 17 }, /* 81 Mb */ [62] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 121500, - 108300, 18, 18, 8, 62, 62, 62 }, /* 121.5 Mb */ + 108300, 18, 18 }, /* 121.5 Mb */ [63] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 162000, - 142000, 19, 19, 8, 63, 63, 63 }, /* 162 Mb */ + 142000, 19, 19 }, /* 162 Mb */ [64] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 243000, - 205100, 20, 20, 8, 64, 65, 65 }, /* 243 Mb */ + 205100, 20, 20 }, /* 243 Mb */ [65] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS_HGI, 270000, - 224700, 20, 20, 8, 64, 65, 65 }, /* 270 Mb */ + 224700, 20, 20 }, /* 270 Mb */ [66] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 324000, - 263100, 21, 21, 8, 66, 67, 67 }, /* 324 Mb */ + 263100, 21, 21 }, /* 324 Mb */ [67] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 360000, - 288000, 21, 21, 8, 66, 67, 67 }, /* 360 Mb */ + 288000, 21, 21 }, /* 360 Mb */ [68] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 364500, - 290700, 22, 22, 8, 68, 69, 69 }, /* 364.5 Mb */ + 290700, 22, 22 }, /* 364.5 Mb */ [69] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 405000, - 317200, 22, 22, 8, 68, 69, 69 }, /* 405 Mb */ + 317200, 22, 22 }, /* 405 Mb */ [70] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 405000, - 317200, 23, 23, 8, 70, 71, 71 }, /* 405 Mb */ + 317200, 23, 23 }, /* 405 Mb */ [71] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 450000, - 346400, 23, 23, 8, 70, 71, 71 }, /* 450 Mb */ + 346400, 23, 23 }, /* 450 Mb */ }, 50, /* probe interval */ WLAN_RC_HT_FLAG, /* Phy rates allowed initially */ @@ -326,21 +326,21 @@ static const struct ath_rate_table ar5416_11a_ratetable = { 0, { { RC_L_SDT, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */ - 5400, 0, 12, 0}, + 5400, 0, 12}, { RC_L_SDT, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */ - 7800, 1, 18, 0}, + 7800, 1, 18}, { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */ - 10000, 2, 24, 2}, + 10000, 2, 24}, { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000, /* 18 Mb */ - 13900, 3, 36, 2}, + 13900, 3, 36}, { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000, /* 24 Mb */ - 17300, 4, 48, 4}, + 17300, 4, 48}, { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000, /* 36 Mb */ - 23000, 5, 72, 4}, + 23000, 5, 72}, { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000, /* 48 Mb */ - 27400, 6, 96, 4}, + 27400, 6, 96}, { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */ - 29300, 7, 108, 4}, + 29300, 7, 108}, }, 50, /* probe interval */ 0, /* Phy rates allowed initially */ @@ -351,63 +351,62 @@ static const struct ath_rate_table ar5416_11g_ratetable = { 0, { { RC_L_SDT, WLAN_RC_PHY_CCK, 1000, /* 1 Mb */ - 900, 0, 2, 0}, + 900, 0, 2}, { RC_L_SDT, WLAN_RC_PHY_CCK, 2000, /* 2 Mb */ - 1900, 1, 4, 1}, + 1900, 1, 4}, { RC_L_SDT, WLAN_RC_PHY_CCK, 5500, /* 5.5 Mb */ - 4900, 2, 11, 2}, + 4900, 2, 11}, { RC_L_SDT, WLAN_RC_PHY_CCK, 11000, /* 11 Mb */ - 8100, 3, 22, 3}, + 8100, 3, 22}, { RC_INVALID, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */ - 5400, 4, 12, 4}, + 5400, 4, 12}, { RC_INVALID, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */ - 7800, 5, 18, 4}, + 7800, 5, 18}, { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */ - 10000, 6, 24, 6}, + 10000, 6, 24}, { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000, /* 18 Mb */ - 13900, 7, 36, 6}, + 13900, 7, 36}, { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000, /* 24 Mb */ - 17300, 8, 48, 8}, + 17300, 8, 48}, { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000, /* 36 Mb */ - 23000, 9, 72, 8}, + 23000, 9, 72}, { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000, /* 48 Mb */ - 27400, 10, 96, 8}, + 27400, 10, 96}, { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */ - 29300, 11, 108, 8}, + 29300, 11, 108}, }, 50, /* probe interval */ 0, /* Phy rates allowed initially */ }; -static int ath_rc_get_rateindex(const struct ath_rate_table *rate_table, +static int ath_rc_get_rateindex(struct ath_rate_priv *ath_rc_priv, struct ieee80211_tx_rate *rate) { - int rix = 0, i = 0; - static const int mcs_rix_off[] = { 7, 15, 20, 21, 22, 23 }; + const struct ath_rate_table *rate_table = ath_rc_priv->rate_table; + int rix, i, idx = 0; if (!(rate->flags & IEEE80211_TX_RC_MCS)) return rate->idx; - while (i < ARRAY_SIZE(mcs_rix_off) && rate->idx > mcs_rix_off[i]) { - rix++; i++; + for (i = 0; i < ath_rc_priv->max_valid_rate; i++) { + idx = ath_rc_priv->valid_rate_index[i]; + + if (WLAN_RC_PHY_HT(rate_table->info[idx].phy) && + rate_table->info[idx].ratecode == rate->idx) + break; } - rix += rate->idx + rate_table->mcs_start; + rix = idx; - if ((rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) && - (rate->flags & IEEE80211_TX_RC_SHORT_GI)) - rix = rate_table->info[rix].ht_index; - else if (rate->flags & IEEE80211_TX_RC_SHORT_GI) - rix = rate_table->info[rix].sgi_index; - else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) - rix = rate_table->info[rix].cw40index; + if (rate->flags & IEEE80211_TX_RC_SHORT_GI) + rix++; return rix; } -static void ath_rc_sort_validrates(const struct ath_rate_table *rate_table, - struct ath_rate_priv *ath_rc_priv) +static void ath_rc_sort_validrates(struct ath_rate_priv *ath_rc_priv) { + const struct ath_rate_table *rate_table = ath_rc_priv->rate_table; u8 i, j, idx, idx_next; for (i = ath_rc_priv->max_valid_rate - 1; i > 0; i--) { @@ -424,21 +423,6 @@ static void ath_rc_sort_validrates(const struct ath_rate_table *rate_table, } } -static void ath_rc_init_valid_rate_idx(struct ath_rate_priv *ath_rc_priv) -{ - u8 i; - - for (i = 0; i < ath_rc_priv->rate_table_size; i++) - ath_rc_priv->valid_rate_index[i] = 0; -} - -static inline void ath_rc_set_valid_rate_idx(struct ath_rate_priv *ath_rc_priv, - u8 index, int valid_tx_rate) -{ - BUG_ON(index > ath_rc_priv->rate_table_size); - ath_rc_priv->valid_rate_index[index] = !!valid_tx_rate; -} - static inline int ath_rc_get_nextvalid_txrate(const struct ath_rate_table *rate_table, struct ath_rate_priv *ath_rc_priv, @@ -479,8 +463,7 @@ static int ath_rc_valid_phyrate(u32 phy, u32 capflag, int ignore_cw) } static inline int -ath_rc_get_lower_rix(const struct ath_rate_table *rate_table, - struct ath_rate_priv *ath_rc_priv, +ath_rc_get_lower_rix(struct ath_rate_priv *ath_rc_priv, u8 cur_valid_txrate, u8 *next_idx) { int8_t i; @@ -495,10 +478,9 @@ ath_rc_get_lower_rix(const struct ath_rate_table *rate_table, return 0; } -static u8 ath_rc_init_validrates(struct ath_rate_priv *ath_rc_priv, - const struct ath_rate_table *rate_table, - u32 capflag) +static u8 ath_rc_init_validrates(struct ath_rate_priv *ath_rc_priv) { + const struct ath_rate_table *rate_table = ath_rc_priv->rate_table; u8 i, hi = 0; for (i = 0; i < rate_table->rate_cnt; i++) { @@ -506,14 +488,14 @@ static u8 ath_rc_init_validrates(struct ath_rate_priv *ath_rc_priv, u32 phy = rate_table->info[i].phy; u8 valid_rate_count = 0; - if (!ath_rc_valid_phyrate(phy, capflag, 0)) + if (!ath_rc_valid_phyrate(phy, ath_rc_priv->ht_cap, 0)) continue; valid_rate_count = ath_rc_priv->valid_phy_ratecnt[phy]; ath_rc_priv->valid_phy_rateidx[phy][valid_rate_count] = i; ath_rc_priv->valid_phy_ratecnt[phy] += 1; - ath_rc_set_valid_rate_idx(ath_rc_priv, i, 1); + ath_rc_priv->valid_rate_index[i] = true; hi = i; } } @@ -521,76 +503,73 @@ static u8 ath_rc_init_validrates(struct ath_rate_priv *ath_rc_priv, return hi; } -static u8 ath_rc_setvalid_rates(struct ath_rate_priv *ath_rc_priv, - const struct ath_rate_table *rate_table, - struct ath_rateset *rateset, - u32 capflag) +static inline bool ath_rc_check_legacy(u8 rate, u8 dot11rate, u16 rate_flags, + u32 phy, u32 capflag) { - u8 i, j, hi = 0; + if (rate != dot11rate || WLAN_RC_PHY_HT(phy)) + return false; - /* Use intersection of working rates and valid rates */ - for (i = 0; i < rateset->rs_nrates; i++) { - for (j = 0; j < rate_table->rate_cnt; j++) { - u32 phy = rate_table->info[j].phy; - u16 rate_flags = rate_table->info[j].rate_flags; - u8 rate = rateset->rs_rates[i]; - u8 dot11rate = rate_table->info[j].dot11rate; - - /* We allow a rate only if its valid and the - * capflag matches one of the validity - * (VALID/VALID_20/VALID_40) flags */ - - if ((rate == dot11rate) && - (rate_flags & WLAN_RC_CAP_MODE(capflag)) == - WLAN_RC_CAP_MODE(capflag) && - (rate_flags & WLAN_RC_CAP_STREAM(capflag)) && - !WLAN_RC_PHY_HT(phy)) { - u8 valid_rate_count = 0; - - if (!ath_rc_valid_phyrate(phy, capflag, 0)) - continue; - - valid_rate_count = - ath_rc_priv->valid_phy_ratecnt[phy]; - - ath_rc_priv->valid_phy_rateidx[phy] - [valid_rate_count] = j; - ath_rc_priv->valid_phy_ratecnt[phy] += 1; - ath_rc_set_valid_rate_idx(ath_rc_priv, j, 1); - hi = max(hi, j); - } - } - } + if ((rate_flags & WLAN_RC_CAP_MODE(capflag)) != WLAN_RC_CAP_MODE(capflag)) + return false; - return hi; + if (!(rate_flags & WLAN_RC_CAP_STREAM(capflag))) + return false; + + return true; } -static u8 ath_rc_setvalid_htrates(struct ath_rate_priv *ath_rc_priv, - const struct ath_rate_table *rate_table, - struct ath_rateset *rateset, u32 capflag) +static inline bool ath_rc_check_ht(u8 rate, u8 dot11rate, u16 rate_flags, + u32 phy, u32 capflag) { - u8 i, j, hi = 0; + if (rate != dot11rate || !WLAN_RC_PHY_HT(phy)) + return false; + + if (!WLAN_RC_PHY_HT_VALID(rate_flags, capflag)) + return false; + + if (!(rate_flags & WLAN_RC_CAP_STREAM(capflag))) + return false; + + return true; +} + +static u8 ath_rc_setvalid_rates(struct ath_rate_priv *ath_rc_priv, bool legacy) +{ + const struct ath_rate_table *rate_table = ath_rc_priv->rate_table; + struct ath_rateset *rateset; + u32 phy, capflag = ath_rc_priv->ht_cap; + u16 rate_flags; + u8 i, j, hi = 0, rate, dot11rate, valid_rate_count; + + if (legacy) + rateset = &ath_rc_priv->neg_rates; + else + rateset = &ath_rc_priv->neg_ht_rates; - /* Use intersection of working rates and valid rates */ for (i = 0; i < rateset->rs_nrates; i++) { for (j = 0; j < rate_table->rate_cnt; j++) { - u32 phy = rate_table->info[j].phy; - u16 rate_flags = rate_table->info[j].rate_flags; - u8 rate = rateset->rs_rates[i]; - u8 dot11rate = rate_table->info[j].dot11rate; - - if ((rate != dot11rate) || !WLAN_RC_PHY_HT(phy) || - !(rate_flags & WLAN_RC_CAP_STREAM(capflag)) || - !WLAN_RC_PHY_HT_VALID(rate_flags, capflag)) + phy = rate_table->info[j].phy; + rate_flags = rate_table->info[j].rate_flags; + rate = rateset->rs_rates[i]; + dot11rate = rate_table->info[j].dot11rate; + + if (legacy && + !ath_rc_check_legacy(rate, dot11rate, + rate_flags, phy, capflag)) + continue; + + if (!legacy && + !ath_rc_check_ht(rate, dot11rate, + rate_flags, phy, capflag)) continue; if (!ath_rc_valid_phyrate(phy, capflag, 0)) continue; - ath_rc_priv->valid_phy_rateidx[phy] - [ath_rc_priv->valid_phy_ratecnt[phy]] = j; + valid_rate_count = ath_rc_priv->valid_phy_ratecnt[phy]; + ath_rc_priv->valid_phy_rateidx[phy][valid_rate_count] = j; ath_rc_priv->valid_phy_ratecnt[phy] += 1; - ath_rc_set_valid_rate_idx(ath_rc_priv, j, 1); + ath_rc_priv->valid_rate_index[j] = true; hi = max(hi, j); } } @@ -598,13 +577,10 @@ static u8 ath_rc_setvalid_htrates(struct ath_rate_priv *ath_rc_priv, return hi; } -/* Finds the highest rate index we can use */ -static u8 ath_rc_get_highest_rix(struct ath_softc *sc, - struct ath_rate_priv *ath_rc_priv, - const struct ath_rate_table *rate_table, - int *is_probing, - bool legacy) +static u8 ath_rc_get_highest_rix(struct ath_rate_priv *ath_rc_priv, + int *is_probing) { + const struct ath_rate_table *rate_table = ath_rc_priv->rate_table; u32 best_thruput, this_thruput, now_msec; u8 rate, next_rate, best_rate, maxindex, minindex; int8_t index = 0; @@ -624,8 +600,6 @@ static u8 ath_rc_get_highest_rix(struct ath_softc *sc, u8 per_thres; rate = ath_rc_priv->valid_rate_index[index]; - if (legacy && !(rate_table->info[rate].rate_flags & RC_LEGACY)) - continue; if (rate > ath_rc_priv->rate_max_phy) continue; @@ -707,8 +681,6 @@ static void ath_rc_rate_set_series(const struct ath_rate_table *rate_table, rate->count = tries; rate->idx = rate_table->info[rix].ratecode; - if (txrc->short_preamble) - rate->flags |= IEEE80211_TX_RC_USE_SHORT_PREAMBLE; if (txrc->rts || rtsctsenable) rate->flags |= IEEE80211_TX_RC_USE_RTS_CTS; @@ -726,37 +698,25 @@ static void ath_rc_rate_set_rtscts(struct ath_softc *sc, const struct ath_rate_table *rate_table, struct ieee80211_tx_info *tx_info) { - struct ieee80211_tx_rate *rates = tx_info->control.rates; - int i = 0, rix = 0, cix, enable_g_protection = 0; + struct ieee80211_bss_conf *bss_conf; - /* get the cix for the lowest valid rix */ - for (i = 3; i >= 0; i--) { - if (rates[i].count && (rates[i].idx >= 0)) { - rix = ath_rc_get_rateindex(rate_table, &rates[i]); - break; - } - } - cix = rate_table->info[rix].ctrl_rate; + if (!tx_info->control.vif) + return; + /* + * For legacy frames, mac80211 takes care of CTS protection. + */ + if (!(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS)) + return; - /* All protection frames are transmited at 2Mb/s for 802.11g, - * otherwise we transmit them at 1Mb/s */ - if (sc->hw->conf.channel->band == IEEE80211_BAND_2GHZ && - !conf_is_ht(&sc->hw->conf)) - enable_g_protection = 1; + bss_conf = &tx_info->control.vif->bss_conf; + + if (!bss_conf->basic_rates) + return; /* - * If 802.11g protection is enabled, determine whether to use RTS/CTS or - * just CTS. Note that this is only done for OFDM/HT unicast frames. + * For now, use the lowest allowed basic rate for HT frames. */ - if ((tx_info->control.vif && - tx_info->control.vif->bss_conf.use_cts_prot) && - (rate_table->info[rix].phy == WLAN_RC_PHY_OFDM || - WLAN_RC_PHY_HT(rate_table->info[rix].phy))) { - rates[0].flags |= IEEE80211_TX_RC_USE_CTS_PROTECT; - cix = rate_table->info[enable_g_protection].ctrl_rate; - } - - tx_info->control.rts_cts_rate_idx = cix; + tx_info->control.rts_cts_rate_idx = __ffs(bss_conf->basic_rates); } static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta, @@ -789,14 +749,8 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta, try_per_rate = 4; rate_table = ath_rc_priv->rate_table; - rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table, - &is_probe, false); + rix = ath_rc_get_highest_rix(ath_rc_priv, &is_probe); - /* - * If we're in HT mode and both us and our peer supports LDPC. - * We don't need to check our own device's capabilities as our own - * ht capabilities would have already been intersected with our peer's. - */ if (conf_is_ht(&sc->hw->conf) && (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING)) tx_info->flags |= IEEE80211_TX_CTL_LDPC; @@ -806,52 +760,45 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta, tx_info->flags |= (1 << IEEE80211_TX_CTL_STBC_SHIFT); if (is_probe) { - /* set one try for probe rates. For the - * probes don't enable rts */ + /* + * Set one try for probe rates. For the + * probes don't enable RTS. + */ ath_rc_rate_set_series(rate_table, &rates[i++], txrc, 1, rix, 0); - - /* Get the next tried/allowed rate. No RTS for the next series - * after the probe rate + /* + * Get the next tried/allowed rate. + * No RTS for the next series after the probe rate. */ - ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &rix); + ath_rc_get_lower_rix(ath_rc_priv, rix, &rix); ath_rc_rate_set_series(rate_table, &rates[i++], txrc, try_per_rate, rix, 0); tx_info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE; } else { - /* Set the chosen rate. No RTS for first series entry. */ + /* + * Set the chosen rate. No RTS for first series entry. + */ ath_rc_rate_set_series(rate_table, &rates[i++], txrc, try_per_rate, rix, 0); } - /* Fill in the other rates for multirate retry */ - for ( ; i < 3; i++) { + for ( ; i < 4; i++) { + /* + * Use twice the number of tries for the last MRR segment. + */ + if (i + 1 == 4) + try_per_rate = 8; + + ath_rc_get_lower_rix(ath_rc_priv, rix, &rix); - ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &rix); - /* All other rates in the series have RTS enabled */ + /* + * All other rates in the series have RTS enabled. + */ ath_rc_rate_set_series(rate_table, &rates[i], txrc, try_per_rate, rix, 1); } - /* Use twice the number of tries for the last MRR segment. */ - try_per_rate = 8; - - /* - * If the last rate in the rate series is MCS and has - * more than 80% of per thresh, then use a legacy rate - * as last retry to ensure that the frame is tried in both - * MCS and legacy rate. - */ - ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &rix); - if (WLAN_RC_PHY_HT(rate_table->info[rix].phy) && - (ath_rc_priv->per[rix] > 45)) - rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table, - &is_probe, true); - - /* All other rates in the series have RTS enabled */ - ath_rc_rate_set_series(rate_table, &rates[i], txrc, - try_per_rate, rix, 1); /* * NB:Change rate series to enable aggregation when operating * at lower MCS rates. When first rate in series is MCS2 @@ -893,7 +840,6 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta, rates[0].count = ATH_TXMAXTRY; } - /* Setup RTS/CTS */ ath_rc_rate_set_rtscts(sc, rate_table, tx_info); } @@ -1046,9 +992,6 @@ static void ath_debug_stat_retries(struct ath_rate_priv *rc, int rix, stats->per = per; } -/* Update PER, RSSI and whatever else that the code thinks it is doing. - If you can make sense of all this, you really need to go out more. */ - static void ath_rc_update_ht(struct ath_softc *sc, struct ath_rate_priv *ath_rc_priv, struct ieee80211_tx_info *tx_info, @@ -1077,8 +1020,8 @@ static void ath_rc_update_ht(struct ath_softc *sc, if (ath_rc_priv->per[tx_rate] >= 55 && tx_rate > 0 && rate_table->info[tx_rate].ratekbps <= rate_table->info[ath_rc_priv->rate_max_phy].ratekbps) { - ath_rc_get_lower_rix(rate_table, ath_rc_priv, - (u8)tx_rate, &ath_rc_priv->rate_max_phy); + ath_rc_get_lower_rix(ath_rc_priv, (u8)tx_rate, + &ath_rc_priv->rate_max_phy); /* Don't probe for a little while. */ ath_rc_priv->probe_time = now_msec; @@ -1122,25 +1065,42 @@ static void ath_rc_update_ht(struct ath_softc *sc, } +static void ath_debug_stat_rc(struct ath_rate_priv *rc, int final_rate) +{ + struct ath_rc_stats *stats; + + stats = &rc->rcstats[final_rate]; + stats->success++; +} static void ath_rc_tx_status(struct ath_softc *sc, struct ath_rate_priv *ath_rc_priv, - struct ieee80211_tx_info *tx_info, - int final_ts_idx, int xretries, int long_retry) + struct sk_buff *skb) { - const struct ath_rate_table *rate_table; + struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); struct ieee80211_tx_rate *rates = tx_info->status.rates; + struct ieee80211_tx_rate *rate; + int final_ts_idx = 0, xretries = 0, long_retry = 0; u8 flags; u32 i = 0, rix; - rate_table = ath_rc_priv->rate_table; + for (i = 0; i < sc->hw->max_rates; i++) { + rate = &tx_info->status.rates[i]; + if (rate->idx < 0 || !rate->count) + break; + + final_ts_idx = i; + long_retry = rate->count - 1; + } + + if (!(tx_info->flags & IEEE80211_TX_STAT_ACK)) + xretries = 1; /* * If the first rate is not the final index, there * are intermediate rate failures to be processed. */ if (final_ts_idx != 0) { - /* Process intermediate rates that failed.*/ for (i = 0; i < final_ts_idx ; i++) { if (rates[i].count != 0 && (rates[i].idx >= 0)) { flags = rates[i].flags; @@ -1152,32 +1112,24 @@ static void ath_rc_tx_status(struct ath_softc *sc, !(ath_rc_priv->ht_cap & WLAN_RC_40_FLAG)) return; - rix = ath_rc_get_rateindex(rate_table, &rates[i]); + rix = ath_rc_get_rateindex(ath_rc_priv, &rates[i]); ath_rc_update_ht(sc, ath_rc_priv, tx_info, - rix, xretries ? 1 : 2, - rates[i].count); + rix, xretries ? 1 : 2, + rates[i].count); } } - } else { - /* - * Handle the special case of MIMO PS burst, where the second - * aggregate is sent out with only one rate and one try. - * Treating it as an excessive retry penalizes the rate - * inordinately. - */ - if (rates[0].count == 1 && xretries == 1) - xretries = 2; } - flags = rates[i].flags; + flags = rates[final_ts_idx].flags; /* If HT40 and we have switched mode from 40 to 20 => don't update */ if ((flags & IEEE80211_TX_RC_40_MHZ_WIDTH) && !(ath_rc_priv->ht_cap & WLAN_RC_40_FLAG)) return; - rix = ath_rc_get_rateindex(rate_table, &rates[i]); + rix = ath_rc_get_rateindex(ath_rc_priv, &rates[final_ts_idx]); ath_rc_update_ht(sc, ath_rc_priv, tx_info, rix, xretries, long_retry); + ath_debug_stat_rc(ath_rc_priv, rix); } static const @@ -1185,8 +1137,6 @@ struct ath_rate_table *ath_choose_rate_table(struct ath_softc *sc, enum ieee80211_band band, bool is_ht) { - struct ath_common *common = ath9k_hw_common(sc->sc_ah); - switch(band) { case IEEE80211_BAND_2GHZ: if (is_ht) @@ -1197,34 +1147,25 @@ struct ath_rate_table *ath_choose_rate_table(struct ath_softc *sc, return &ar5416_11na_ratetable; return &ar5416_11a_ratetable; default: - ath_dbg(common, CONFIG, "Invalid band\n"); return NULL; } } static void ath_rc_init(struct ath_softc *sc, - struct ath_rate_priv *ath_rc_priv, - struct ieee80211_supported_band *sband, - struct ieee80211_sta *sta, - const struct ath_rate_table *rate_table) + struct ath_rate_priv *ath_rc_priv) { + const struct ath_rate_table *rate_table = ath_rc_priv->rate_table; struct ath_rateset *rateset = &ath_rc_priv->neg_rates; struct ath_common *common = ath9k_hw_common(sc->sc_ah); - struct ath_rateset *ht_mcs = &ath_rc_priv->neg_ht_rates; u8 i, j, k, hi = 0, hthi = 0; - /* Initial rate table size. Will change depending - * on the working rate set */ ath_rc_priv->rate_table_size = RATE_TABLE_SIZE; - /* Initialize thresholds according to the global rate table */ for (i = 0 ; i < ath_rc_priv->rate_table_size; i++) { ath_rc_priv->per[i] = 0; + ath_rc_priv->valid_rate_index[i] = 0; } - /* Determine the valid rates */ - ath_rc_init_valid_rate_idx(ath_rc_priv); - for (i = 0; i < WLAN_RC_PHY_MAX; i++) { for (j = 0; j < RATE_TABLE_SIZE; j++) ath_rc_priv->valid_phy_rateidx[i][j] = 0; @@ -1232,25 +1173,19 @@ static void ath_rc_init(struct ath_softc *sc, } if (!rateset->rs_nrates) { - /* No working rate, just initialize valid rates */ - hi = ath_rc_init_validrates(ath_rc_priv, rate_table, - ath_rc_priv->ht_cap); + hi = ath_rc_init_validrates(ath_rc_priv); } else { - /* Use intersection of working rates and valid rates */ - hi = ath_rc_setvalid_rates(ath_rc_priv, rate_table, - rateset, ath_rc_priv->ht_cap); - if (ath_rc_priv->ht_cap & WLAN_RC_HT_FLAG) { - hthi = ath_rc_setvalid_htrates(ath_rc_priv, - rate_table, - ht_mcs, - ath_rc_priv->ht_cap); - } + hi = ath_rc_setvalid_rates(ath_rc_priv, true); + + if (ath_rc_priv->ht_cap & WLAN_RC_HT_FLAG) + hthi = ath_rc_setvalid_rates(ath_rc_priv, false); + hi = max(hi, hthi); } ath_rc_priv->rate_table_size = hi + 1; ath_rc_priv->rate_max_phy = 0; - BUG_ON(ath_rc_priv->rate_table_size > RATE_TABLE_SIZE); + WARN_ON(ath_rc_priv->rate_table_size > RATE_TABLE_SIZE); for (i = 0, k = 0; i < WLAN_RC_PHY_MAX; i++) { for (j = 0; j < ath_rc_priv->valid_phy_ratecnt[i]; j++) { @@ -1258,28 +1193,26 @@ static void ath_rc_init(struct ath_softc *sc, ath_rc_priv->valid_phy_rateidx[i][j]; } - if (!ath_rc_valid_phyrate(i, rate_table->initial_ratemax, 1) - || !ath_rc_priv->valid_phy_ratecnt[i]) + if (!ath_rc_valid_phyrate(i, rate_table->initial_ratemax, 1) || + !ath_rc_priv->valid_phy_ratecnt[i]) continue; ath_rc_priv->rate_max_phy = ath_rc_priv->valid_phy_rateidx[i][j-1]; } - BUG_ON(ath_rc_priv->rate_table_size > RATE_TABLE_SIZE); - BUG_ON(k > RATE_TABLE_SIZE); + WARN_ON(ath_rc_priv->rate_table_size > RATE_TABLE_SIZE); + WARN_ON(k > RATE_TABLE_SIZE); ath_rc_priv->max_valid_rate = k; - ath_rc_sort_validrates(rate_table, ath_rc_priv); + ath_rc_sort_validrates(ath_rc_priv); ath_rc_priv->rate_max_phy = (k > 4) ? - ath_rc_priv->valid_rate_index[k-4] : - ath_rc_priv->valid_rate_index[k-1]; - ath_rc_priv->rate_table = rate_table; + ath_rc_priv->valid_rate_index[k-4] : + ath_rc_priv->valid_rate_index[k-1]; ath_dbg(common, CONFIG, "RC Initialized with capabilities: 0x%x\n", ath_rc_priv->ht_cap); } -static u8 ath_rc_build_ht_caps(struct ath_softc *sc, struct ieee80211_sta *sta, - bool is_cw40, bool is_sgi) +static u8 ath_rc_build_ht_caps(struct ath_softc *sc, struct ieee80211_sta *sta) { u8 caps = 0; @@ -1289,9 +1222,10 @@ static u8 ath_rc_build_ht_caps(struct ath_softc *sc, struct ieee80211_sta *sta, caps |= WLAN_RC_TS_FLAG | WLAN_RC_DS_FLAG; else if (sta->ht_cap.mcs.rx_mask[1]) caps |= WLAN_RC_DS_FLAG; - if (is_cw40) + if (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) caps |= WLAN_RC_40_FLAG; - if (is_sgi) + if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40 || + sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) caps |= WLAN_RC_SGI_FLAG; } @@ -1319,15 +1253,6 @@ static bool ath_tx_aggr_check(struct ath_softc *sc, struct ieee80211_sta *sta, /* mac80211 Rate Control callbacks */ /***********************************/ -static void ath_debug_stat_rc(struct ath_rate_priv *rc, int final_rate) -{ - struct ath_rc_stats *stats; - - stats = &rc->rcstats[final_rate]; - stats->success++; -} - - static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband, struct ieee80211_sta *sta, void *priv_sta, struct sk_buff *skb) @@ -1335,22 +1260,8 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband, struct ath_softc *sc = priv; struct ath_rate_priv *ath_rc_priv = priv_sta; struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); - struct ieee80211_hdr *hdr; - int final_ts_idx = 0, tx_status = 0; - int long_retry = 0; - __le16 fc; - int i; - - hdr = (struct ieee80211_hdr *)skb->data; - fc = hdr->frame_control; - for (i = 0; i < sc->hw->max_rates; i++) { - struct ieee80211_tx_rate *rate = &tx_info->status.rates[i]; - if (rate->idx < 0 || !rate->count) - break; - - final_ts_idx = i; - long_retry = rate->count - 1; - } + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + __le16 fc = hdr->frame_control; if (!priv_sta || !ieee80211_is_data(fc)) return; @@ -1363,11 +1274,7 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband, if (tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED) return; - if (!(tx_info->flags & IEEE80211_TX_STAT_ACK)) - tx_status = 1; - - ath_rc_tx_status(sc, ath_rc_priv, tx_info, final_ts_idx, tx_status, - long_retry); + ath_rc_tx_status(sc, ath_rc_priv, skb); /* Check if aggregation has to be enabled for this tid */ if (conf_is_ht(&sc->hw->conf) && @@ -1383,19 +1290,14 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband, ieee80211_start_tx_ba_session(sta, tid, 0); } } - - ath_debug_stat_rc(ath_rc_priv, - ath_rc_get_rateindex(ath_rc_priv->rate_table, - &tx_info->status.rates[final_ts_idx])); } static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband, struct ieee80211_sta *sta, void *priv_sta) { struct ath_softc *sc = priv; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_rate_priv *ath_rc_priv = priv_sta; - const struct ath_rate_table *rate_table; - bool is_cw40, is_sgi = false; int i, j = 0; for (i = 0; i < sband->n_bitrates; i++) { @@ -1417,20 +1319,15 @@ static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband, ath_rc_priv->neg_ht_rates.rs_nrates = j; } - is_cw40 = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40); - - if (is_cw40) - is_sgi = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40); - else if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20) - is_sgi = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20); - - /* Choose rate table first */ - - rate_table = ath_choose_rate_table(sc, sband->band, - sta->ht_cap.ht_supported); + ath_rc_priv->rate_table = ath_choose_rate_table(sc, sband->band, + sta->ht_cap.ht_supported); + if (!ath_rc_priv->rate_table) { + ath_err(common, "No rate table chosen\n"); + return; + } - ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta, is_cw40, is_sgi); - ath_rc_init(sc, priv_sta, sband, sta, rate_table); + ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta); + ath_rc_init(sc, priv_sta); } static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband, @@ -1439,40 +1336,14 @@ static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband, { struct ath_softc *sc = priv; struct ath_rate_priv *ath_rc_priv = priv_sta; - const struct ath_rate_table *rate_table = NULL; - bool oper_cw40 = false, oper_sgi; - bool local_cw40 = !!(ath_rc_priv->ht_cap & WLAN_RC_40_FLAG); - bool local_sgi = !!(ath_rc_priv->ht_cap & WLAN_RC_SGI_FLAG); - - /* FIXME: Handle AP mode later when we support CWM */ if (changed & IEEE80211_RC_BW_CHANGED) { - if (sc->sc_ah->opmode != NL80211_IFTYPE_STATION) - return; + ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta); + ath_rc_init(sc, priv_sta); - if (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) - oper_cw40 = true; - - if (oper_cw40) - oper_sgi = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? - true : false; - else if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20) - oper_sgi = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? - true : false; - else - oper_sgi = false; - - if ((local_cw40 != oper_cw40) || (local_sgi != oper_sgi)) { - rate_table = ath_choose_rate_table(sc, sband->band, - sta->ht_cap.ht_supported); - ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta, - oper_cw40, oper_sgi); - ath_rc_init(sc, priv_sta, sband, sta, rate_table); - - ath_dbg(ath9k_hw_common(sc->sc_ah), CONFIG, - "Operating HT Bandwidth changed to: %d\n", - sc->hw->conf.channel_type); - } + ath_dbg(ath9k_hw_common(sc->sc_ah), CONFIG, + "Operating HT Bandwidth changed to: %d\n", + sc->hw->conf.channel_type); } } @@ -1484,7 +1355,7 @@ static ssize_t read_file_rcstat(struct file *file, char __user *user_buf, struct ath_rate_priv *rc = file->private_data; char *buf; unsigned int len = 0, max; - int i = 0; + int rix; ssize_t retval; if (rc->rate_table == NULL) @@ -1500,7 +1371,8 @@ static ssize_t read_file_rcstat(struct file *file, char __user *user_buf, "HT", "MCS", "Rate", "Success", "Retries", "XRetries", "PER"); - for (i = 0; i < rc->rate_table_size; i++) { + for (rix = 0; rix < rc->max_valid_rate; rix++) { + u8 i = rc->valid_rate_index[rix]; u32 ratekbps = rc->rate_table->info[i].ratekbps; struct ath_rc_stats *stats = &rc->rcstats[i]; char mcs[5]; diff --git a/drivers/net/wireless/ath/ath9k/rc.h b/drivers/net/wireless/ath/ath9k/rc.h index 75f8e9b06b28..268e67dc5fb2 100644 --- a/drivers/net/wireless/ath/ath9k/rc.h +++ b/drivers/net/wireless/ath/ath9k/rc.h @@ -160,10 +160,6 @@ struct ath_rate_table { u32 user_ratekbps; u8 ratecode; u8 dot11rate; - u8 ctrl_rate; - u8 cw40index; - u8 sgi_index; - u8 ht_index; } info[RATE_TABLE_SIZE]; u32 probe_interval; u8 initial_ratemax; diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 0d4155aec48d..e0230a43bc5b 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -1773,11 +1773,12 @@ static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, TX_STAT_INC(txq->axq_qnum, queued); } -static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb, +static void setup_frame_info(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + struct sk_buff *skb, int framelen) { struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); - struct ieee80211_sta *sta = tx_info->control.sta; struct ieee80211_key_conf *hw_key = tx_info->control.hw_key; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; const struct ieee80211_rate *rate; @@ -1935,7 +1936,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct ieee80211_sta *sta = info->control.sta; + struct ieee80211_sta *sta = txctl->sta; struct ieee80211_vif *vif = info->control.vif; struct ath_softc *sc = hw->priv; struct ath_txq *txq = txctl->txq; @@ -1979,7 +1980,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, !ieee80211_is_data(hdr->frame_control)) info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; - setup_frame_info(hw, skb, frmlen); + setup_frame_info(hw, sta, skb, frmlen); /* * At this point, the vif, hw_key and sta pointers in the tx control diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h index 376be11161c0..2aa4a59c72c8 100644 --- a/drivers/net/wireless/ath/carl9170/carl9170.h +++ b/drivers/net/wireless/ath/carl9170/carl9170.h @@ -425,6 +425,7 @@ struct ar9170 { bool rx_has_plcp; struct sk_buff *rx_failover; int rx_failover_missing; + u32 ampdu_ref; /* FIFO for collecting outstanding BlockAckRequest */ struct list_head bar_list[__AR9170_NUM_TXQ]; @@ -577,7 +578,9 @@ void carl9170_rx(struct ar9170 *ar, void *buf, unsigned int len); void carl9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len); /* TX */ -void carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb); +void carl9170_op_tx(struct ieee80211_hw *hw, + struct ieee80211_tx_control *control, + struct sk_buff *skb); void carl9170_tx_janitor(struct work_struct *work); void carl9170_tx_process_status(struct ar9170 *ar, const struct carl9170_rsp *cmd); diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c index c5ca6f1f5836..24ac2876a733 100644 --- a/drivers/net/wireless/ath/carl9170/fw.c +++ b/drivers/net/wireless/ath/carl9170/fw.c @@ -341,6 +341,7 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len) if (SUPP(CARL9170FW_WLANTX_CAB)) { if_comb_types |= BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_MESH_POINT) | BIT(NL80211_IFTYPE_P2P_GO); } } diff --git a/drivers/net/wireless/ath/carl9170/mac.c b/drivers/net/wireless/ath/carl9170/mac.c index 53415bfd8bef..f8676280dc36 100644 --- a/drivers/net/wireless/ath/carl9170/mac.c +++ b/drivers/net/wireless/ath/carl9170/mac.c @@ -318,10 +318,10 @@ int carl9170_set_operating_mode(struct ar9170 *ar) bssid = common->curbssid; switch (vif->type) { - case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_ADHOC: cam_mode |= AR9170_MAC_CAM_IBSS; break; + case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_AP: cam_mode |= AR9170_MAC_CAM_AP; diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c index 858e58dfc4dc..18554ab76733 100644 --- a/drivers/net/wireless/ath/carl9170/main.c +++ b/drivers/net/wireless/ath/carl9170/main.c @@ -616,10 +616,12 @@ static int carl9170_op_add_interface(struct ieee80211_hw *hw, goto unlock; + case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_AP: if ((vif->type == NL80211_IFTYPE_STATION) || (vif->type == NL80211_IFTYPE_WDS) || - (vif->type == NL80211_IFTYPE_AP)) + (vif->type == NL80211_IFTYPE_AP) || + (vif->type == NL80211_IFTYPE_MESH_POINT)) break; err = -EBUSY; diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c index 6f6a34155667..a0b723078547 100644 --- a/drivers/net/wireless/ath/carl9170/rx.c +++ b/drivers/net/wireless/ath/carl9170/rx.c @@ -206,6 +206,7 @@ void carl9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len) case NL80211_IFTYPE_AP: case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_MESH_POINT: carl9170_update_beacon(ar, true); break; @@ -623,7 +624,8 @@ static void carl9170_ba_check(struct ar9170 *ar, void *data, unsigned int len) #undef TID_CHECK } -static bool carl9170_ampdu_check(struct ar9170 *ar, u8 *buf, u8 ms) +static bool carl9170_ampdu_check(struct ar9170 *ar, u8 *buf, u8 ms, + struct ieee80211_rx_status *rx_status) { __le16 fc; @@ -636,6 +638,9 @@ static bool carl9170_ampdu_check(struct ar9170 *ar, u8 *buf, u8 ms) return true; } + rx_status->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN; + rx_status->ampdu_reference = ar->ampdu_ref; + /* * "802.11n - 7.4a.3 A-MPDU contents" describes in which contexts * certain frame types can be part of an aMPDU. @@ -684,12 +689,15 @@ static void carl9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len) if (unlikely(len < sizeof(*mac))) goto drop; + memset(&status, 0, sizeof(status)); + mpdu_len = len - sizeof(*mac); mac = (void *)(buf + mpdu_len); mac_status = mac->status; switch (mac_status & AR9170_RX_STATUS_MPDU) { case AR9170_RX_STATUS_MPDU_FIRST: + ar->ampdu_ref++; /* Aggregated MPDUs start with an PLCP header */ if (likely(mpdu_len >= sizeof(struct ar9170_rx_head))) { head = (void *) buf; @@ -720,12 +728,13 @@ static void carl9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len) break; case AR9170_RX_STATUS_MPDU_LAST: + status.flag |= RX_FLAG_AMPDU_IS_LAST; + /* * The last frame of an A-MPDU has an extra tail * which does contain the phy status of the whole * aggregate. */ - if (likely(mpdu_len >= sizeof(struct ar9170_rx_phystatus))) { mpdu_len -= sizeof(struct ar9170_rx_phystatus); phy = (void *)(buf + mpdu_len); @@ -773,11 +782,10 @@ static void carl9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len) if (unlikely(mpdu_len < (2 + 2 + ETH_ALEN + FCS_LEN))) goto drop; - memset(&status, 0, sizeof(status)); if (unlikely(carl9170_rx_mac_status(ar, head, mac, &status))) goto drop; - if (!carl9170_ampdu_check(ar, buf, mac_status)) + if (!carl9170_ampdu_check(ar, buf, mac_status, &status)) goto drop; if (phy) diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c index 6a8681407a1d..84377cf580e0 100644 --- a/drivers/net/wireless/ath/carl9170/tx.c +++ b/drivers/net/wireless/ath/carl9170/tx.c @@ -867,14 +867,15 @@ static bool carl9170_tx_cts_check(struct ar9170 *ar, return false; } -static int carl9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb) +static int carl9170_tx_prepare(struct ar9170 *ar, + struct ieee80211_sta *sta, + struct sk_buff *skb) { struct ieee80211_hdr *hdr; struct _carl9170_tx_superframe *txc; struct carl9170_vif_info *cvif; struct ieee80211_tx_info *info; struct ieee80211_tx_rate *txrate; - struct ieee80211_sta *sta; struct carl9170_tx_info *arinfo; unsigned int hw_queue; int i; @@ -910,8 +911,6 @@ static int carl9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb) else cvif = NULL; - sta = info->control.sta; - txc = (void *)skb_push(skb, sizeof(*txc)); memset(txc, 0, sizeof(*txc)); @@ -1457,20 +1456,21 @@ err_unlock_rcu: return false; } -void carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +void carl9170_op_tx(struct ieee80211_hw *hw, + struct ieee80211_tx_control *control, + struct sk_buff *skb) { struct ar9170 *ar = hw->priv; struct ieee80211_tx_info *info; - struct ieee80211_sta *sta; + struct ieee80211_sta *sta = control->sta; bool run; if (unlikely(!IS_STARTED(ar))) goto err_free; info = IEEE80211_SKB_CB(skb); - sta = info->control.sta; - if (unlikely(carl9170_tx_prepare(ar, skb))) + if (unlikely(carl9170_tx_prepare(ar, sta, skb))) goto err_free; carl9170_tx_accounting(ar, skb); diff --git a/drivers/net/wireless/b43/Makefile b/drivers/net/wireless/b43/Makefile index 4648bbf76abc..098fe9ee7096 100644 --- a/drivers/net/wireless/b43/Makefile +++ b/drivers/net/wireless/b43/Makefile @@ -4,6 +4,7 @@ b43-y += tables.o b43-$(CONFIG_B43_PHY_N) += tables_nphy.o b43-$(CONFIG_B43_PHY_N) += radio_2055.o b43-$(CONFIG_B43_PHY_N) += radio_2056.o +b43-$(CONFIG_B43_PHY_N) += radio_2057.o b43-y += phy_common.o b43-y += phy_g.o b43-y += phy_a.o diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h index 7c899fc7ddd0..b298e5d68be2 100644 --- a/drivers/net/wireless/b43/b43.h +++ b/drivers/net/wireless/b43/b43.h @@ -241,16 +241,18 @@ enum { #define B43_SHM_SH_PHYVER 0x0050 /* PHY version */ #define B43_SHM_SH_PHYTYPE 0x0052 /* PHY type */ #define B43_SHM_SH_ANTSWAP 0x005C /* Antenna swap threshold */ -#define B43_SHM_SH_HOSTFLO 0x005E /* Hostflags for ucode options (low) */ -#define B43_SHM_SH_HOSTFMI 0x0060 /* Hostflags for ucode options (middle) */ -#define B43_SHM_SH_HOSTFHI 0x0062 /* Hostflags for ucode options (high) */ +#define B43_SHM_SH_HOSTF1 0x005E /* Hostflags 1 for ucode options */ +#define B43_SHM_SH_HOSTF2 0x0060 /* Hostflags 2 for ucode options */ +#define B43_SHM_SH_HOSTF3 0x0062 /* Hostflags 3 for ucode options */ #define B43_SHM_SH_RFATT 0x0064 /* Current radio attenuation value */ #define B43_SHM_SH_RADAR 0x0066 /* Radar register */ #define B43_SHM_SH_PHYTXNOI 0x006E /* PHY noise directly after TX (lower 8bit only) */ #define B43_SHM_SH_RFRXSP1 0x0072 /* RF RX SP Register 1 */ +#define B43_SHM_SH_HOSTF4 0x0078 /* Hostflags 4 for ucode options */ #define B43_SHM_SH_CHAN 0x00A0 /* Current channel (low 8bit only) */ #define B43_SHM_SH_CHAN_5GHZ 0x0100 /* Bit set, if 5 Ghz channel */ #define B43_SHM_SH_CHAN_40MHZ 0x0200 /* Bit set, if 40 Mhz channel width */ +#define B43_SHM_SH_HOSTF5 0x00D4 /* Hostflags 5 for ucode options */ #define B43_SHM_SH_BCMCFIFOID 0x0108 /* Last posted cookie to the bcast/mcast FIFO */ /* TSSI information */ #define B43_SHM_SH_TSSI_CCK 0x0058 /* TSSI for last 4 CCK frames (32bit) */ @@ -415,6 +417,8 @@ enum { #define B43_PHYTYPE_HT 0x07 #define B43_PHYTYPE_LCN 0x08 #define B43_PHYTYPE_LCNXN 0x09 +#define B43_PHYTYPE_LCN40 0x0a +#define B43_PHYTYPE_AC 0x0b /* PHYRegisters */ #define B43_PHY_ILT_A_CTRL 0x0072 diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index a140165dfee0..73730e94e0ac 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c @@ -533,11 +533,11 @@ u64 b43_hf_read(struct b43_wldev *dev) { u64 ret; - ret = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFHI); + ret = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF3); ret <<= 16; - ret |= b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFMI); + ret |= b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF2); ret <<= 16; - ret |= b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFLO); + ret |= b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF1); return ret; } @@ -550,9 +550,9 @@ void b43_hf_write(struct b43_wldev *dev, u64 value) lo = (value & 0x00000000FFFFULL); mi = (value & 0x0000FFFF0000ULL) >> 16; hi = (value & 0xFFFF00000000ULL) >> 32; - b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFLO, lo); - b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFMI, mi); - b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFHI, hi); + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF1, lo); + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF2, mi); + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF3, hi); } /* Read the firmware capabilities bitmask (Opensource firmware only) */ @@ -3412,7 +3412,8 @@ static void b43_tx_work(struct work_struct *work) } static void b43_op_tx(struct ieee80211_hw *hw, - struct sk_buff *skb) + struct ieee80211_tx_control *control, + struct sk_buff *skb) { struct b43_wl *wl = hw_to_b43_wl(hw); @@ -4282,6 +4283,35 @@ out: return err; } +static char *b43_phy_name(struct b43_wldev *dev, u8 phy_type) +{ + switch (phy_type) { + case B43_PHYTYPE_A: + return "A"; + case B43_PHYTYPE_B: + return "B"; + case B43_PHYTYPE_G: + return "G"; + case B43_PHYTYPE_N: + return "N"; + case B43_PHYTYPE_LP: + return "LP"; + case B43_PHYTYPE_SSLPN: + return "SSLPN"; + case B43_PHYTYPE_HT: + return "HT"; + case B43_PHYTYPE_LCN: + return "LCN"; + case B43_PHYTYPE_LCNXN: + return "LCNXN"; + case B43_PHYTYPE_LCN40: + return "LCN40"; + case B43_PHYTYPE_AC: + return "AC"; + } + return "UNKNOWN"; +} + /* Get PHY and RADIO versioning numbers */ static int b43_phy_versioning(struct b43_wldev *dev) { @@ -4342,13 +4372,13 @@ static int b43_phy_versioning(struct b43_wldev *dev) unsupported = 1; } if (unsupported) { - b43err(dev->wl, "FOUND UNSUPPORTED PHY " - "(Analog %u, Type %u, Revision %u)\n", - analog_type, phy_type, phy_rev); + b43err(dev->wl, "FOUND UNSUPPORTED PHY (Analog %u, Type %d (%s), Revision %u)\n", + analog_type, phy_type, b43_phy_name(dev, phy_type), + phy_rev); return -EOPNOTSUPP; } - b43dbg(dev->wl, "Found PHY: Analog %u, Type %u, Revision %u\n", - analog_type, phy_type, phy_rev); + b43info(dev->wl, "Found PHY: Analog %u, Type %d (%s), Revision %u\n", + analog_type, phy_type, b43_phy_name(dev, phy_type), phy_rev); /* Get RADIO versioning */ if (dev->dev->core_rev >= 24) { diff --git a/drivers/net/wireless/b43/phy_common.c b/drivers/net/wireless/b43/phy_common.c index 3f8883b14d9c..f01676ac481b 100644 --- a/drivers/net/wireless/b43/phy_common.c +++ b/drivers/net/wireless/b43/phy_common.c @@ -240,6 +240,21 @@ void b43_radio_maskset(struct b43_wldev *dev, u16 offset, u16 mask, u16 set) (b43_radio_read16(dev, offset) & mask) | set); } +bool b43_radio_wait_value(struct b43_wldev *dev, u16 offset, u16 mask, + u16 value, int delay, int timeout) +{ + u16 val; + int i; + + for (i = 0; i < timeout; i += delay) { + val = b43_radio_read(dev, offset); + if ((val & mask) == value) + return true; + udelay(delay); + } + return false; +} + u16 b43_phy_read(struct b43_wldev *dev, u16 reg) { assert_mac_suspended(dev); @@ -428,7 +443,7 @@ int b43_phy_shm_tssi_read(struct b43_wldev *dev, u16 shm_offset) average = (a + b + c + d + 2) / 4; if (is_ofdm) { /* Adjust for CCK-boost */ - if (b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFLO) + if (b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTF1) & B43_HF_CCKBOOST) average = (average >= 13) ? (average - 13) : 0; } diff --git a/drivers/net/wireless/b43/phy_common.h b/drivers/net/wireless/b43/phy_common.h index 9233b13fc16d..f1b999349876 100644 --- a/drivers/net/wireless/b43/phy_common.h +++ b/drivers/net/wireless/b43/phy_common.h @@ -365,6 +365,12 @@ void b43_radio_set(struct b43_wldev *dev, u16 offset, u16 set); void b43_radio_maskset(struct b43_wldev *dev, u16 offset, u16 mask, u16 set); /** + * b43_radio_wait_value - Waits for a given value in masked register read + */ +bool b43_radio_wait_value(struct b43_wldev *dev, u16 offset, u16 mask, + u16 value, int delay, int timeout); + +/** * b43_radio_lock - Lock firmware radio register access */ void b43_radio_lock(struct b43_wldev *dev); diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c index b92bb9c92ad1..3c35382ee6c2 100644 --- a/drivers/net/wireless/b43/phy_n.c +++ b/drivers/net/wireless/b43/phy_n.c @@ -32,6 +32,7 @@ #include "tables_nphy.h" #include "radio_2055.h" #include "radio_2056.h" +#include "radio_2057.h" #include "main.h" struct nphy_txgains { @@ -126,6 +127,46 @@ ok: b43_phy_write(dev, B43_NPHY_RFSEQMODE, seq_mode); } +/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverrideRev7 */ +static void b43_nphy_rf_control_override_rev7(struct b43_wldev *dev, u16 field, + u16 value, u8 core, bool off, + u8 override) +{ + const struct nphy_rf_control_override_rev7 *e; + u16 en_addrs[3][2] = { + { 0x0E7, 0x0EC }, { 0x342, 0x343 }, { 0x346, 0x347 } + }; + u16 en_addr; + u16 en_mask = field; + u16 val_addr; + u8 i; + + /* Remember: we can get NULL! */ + e = b43_nphy_get_rf_ctl_over_rev7(dev, field, override); + + for (i = 0; i < 2; i++) { + if (override >= ARRAY_SIZE(en_addrs)) { + b43err(dev->wl, "Invalid override value %d\n", override); + return; + } + en_addr = en_addrs[override][i]; + + val_addr = (i == 0) ? e->val_addr_core0 : e->val_addr_core1; + + if (off) { + b43_phy_mask(dev, en_addr, ~en_mask); + if (e) /* Do it safer, better than wl */ + b43_phy_mask(dev, val_addr, ~e->val_mask); + } else { + if (!core || (core & (1 << i))) { + b43_phy_set(dev, en_addr, en_mask); + if (e) + b43_phy_maskset(dev, val_addr, ~e->val_mask, (value << e->val_shift)); + } + } + } +} + /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverride */ static void b43_nphy_rf_control_override(struct b43_wldev *dev, u16 field, u16 value, u8 core, bool off) @@ -459,6 +500,137 @@ static void b43_nphy_set_rf_sequence(struct b43_wldev *dev, u8 cmd, } /************************************************** + * Radio 0x2057 + **************************************************/ + +/* http://bcm-v4.sipsolutions.net/PHY/radio2057_rcal */ +static u8 b43_radio_2057_rcal(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + u16 tmp; + + if (phy->radio_rev == 5) { + b43_phy_mask(dev, 0x342, ~0x2); + udelay(10); + b43_radio_set(dev, R2057_IQTEST_SEL_PU, 0x1); + b43_radio_maskset(dev, 0x1ca, ~0x2, 0x1); + } + + b43_radio_set(dev, R2057_RCAL_CONFIG, 0x1); + udelay(10); + b43_radio_set(dev, R2057_RCAL_CONFIG, 0x3); + if (!b43_radio_wait_value(dev, R2057_RCCAL_N1_1, 1, 1, 100, 1000000)) { + b43err(dev->wl, "Radio 0x2057 rcal timeout\n"); + return 0; + } + b43_radio_mask(dev, R2057_RCAL_CONFIG, ~0x2); + tmp = b43_radio_read(dev, R2057_RCAL_STATUS) & 0x3E; + b43_radio_mask(dev, R2057_RCAL_CONFIG, ~0x1); + + if (phy->radio_rev == 5) { + b43_radio_mask(dev, R2057_IPA2G_CASCONV_CORE0, ~0x1); + b43_radio_mask(dev, 0x1ca, ~0x2); + } + if (phy->radio_rev <= 4 || phy->radio_rev == 6) { + b43_radio_maskset(dev, R2057_TEMPSENSE_CONFIG, ~0x3C, tmp); + b43_radio_maskset(dev, R2057_BANDGAP_RCAL_TRIM, ~0xF0, + tmp << 2); + } + + return tmp & 0x3e; +} + +/* http://bcm-v4.sipsolutions.net/PHY/radio2057_rccal */ +static u16 b43_radio_2057_rccal(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + bool special = (phy->radio_rev == 3 || phy->radio_rev == 4 || + phy->radio_rev == 6); + u16 tmp; + + if (special) { + b43_radio_write(dev, R2057_RCCAL_MASTER, 0x61); + b43_radio_write(dev, R2057_RCCAL_TRC0, 0xC0); + } else { + b43_radio_write(dev, 0x1AE, 0x61); + b43_radio_write(dev, R2057_RCCAL_TRC0, 0xE1); + } + b43_radio_write(dev, R2057_RCCAL_X1, 0x6E); + b43_radio_write(dev, R2057_RCCAL_START_R1_Q1_P1, 0x55); + if (!b43_radio_wait_value(dev, R2057_RCCAL_DONE_OSCCAP, 1, 1, 500, + 5000000)) + b43dbg(dev->wl, "Radio 0x2057 rccal timeout\n"); + b43_radio_write(dev, R2057_RCCAL_START_R1_Q1_P1, 0x15); + if (special) { + b43_radio_write(dev, R2057_RCCAL_MASTER, 0x69); + b43_radio_write(dev, R2057_RCCAL_TRC0, 0xB0); + } else { + b43_radio_write(dev, 0x1AE, 0x69); + b43_radio_write(dev, R2057_RCCAL_TRC0, 0xD5); + } + b43_radio_write(dev, R2057_RCCAL_X1, 0x6E); + b43_radio_write(dev, R2057_RCCAL_START_R1_Q1_P1, 0x55); + if (!b43_radio_wait_value(dev, R2057_RCCAL_DONE_OSCCAP, 1, 1, 500, + 5000000)) + b43dbg(dev->wl, "Radio 0x2057 rccal timeout\n"); + b43_radio_write(dev, R2057_RCCAL_START_R1_Q1_P1, 0x15); + if (special) { + b43_radio_write(dev, R2057_RCCAL_MASTER, 0x73); + b43_radio_write(dev, R2057_RCCAL_X1, 0x28); + b43_radio_write(dev, R2057_RCCAL_TRC0, 0xB0); + } else { + b43_radio_write(dev, 0x1AE, 0x73); + b43_radio_write(dev, R2057_RCCAL_X1, 0x6E); + b43_radio_write(dev, R2057_RCCAL_TRC0, 0x99); + } + b43_radio_write(dev, R2057_RCCAL_START_R1_Q1_P1, 0x55); + if (!b43_radio_wait_value(dev, R2057_RCCAL_DONE_OSCCAP, 1, 1, 500, + 5000000)) { + b43err(dev->wl, "Radio 0x2057 rcal timeout\n"); + return 0; + } + tmp = b43_radio_read(dev, R2057_RCCAL_DONE_OSCCAP); + b43_radio_write(dev, R2057_RCCAL_START_R1_Q1_P1, 0x15); + return tmp; +} + +static void b43_radio_2057_init_pre(struct b43_wldev *dev) +{ + b43_phy_mask(dev, B43_NPHY_RFCTL_CMD, ~B43_NPHY_RFCTL_CMD_CHIP0PU); + /* Maybe wl meant to reset and set (order?) RFCTL_CMD_OEPORFORCE? */ + b43_phy_mask(dev, B43_NPHY_RFCTL_CMD, B43_NPHY_RFCTL_CMD_OEPORFORCE); + b43_phy_set(dev, B43_NPHY_RFCTL_CMD, ~B43_NPHY_RFCTL_CMD_OEPORFORCE); + b43_phy_set(dev, B43_NPHY_RFCTL_CMD, B43_NPHY_RFCTL_CMD_CHIP0PU); +} + +static void b43_radio_2057_init_post(struct b43_wldev *dev) +{ + b43_radio_set(dev, R2057_XTALPUOVR_PINCTRL, 0x1); + + b43_radio_set(dev, R2057_RFPLL_MISC_CAL_RESETN, 0x78); + b43_radio_set(dev, R2057_XTAL_CONFIG2, 0x80); + mdelay(2); + b43_radio_mask(dev, R2057_RFPLL_MISC_CAL_RESETN, ~0x78); + b43_radio_mask(dev, R2057_XTAL_CONFIG2, ~0x80); + + if (dev->phy.n->init_por) { + b43_radio_2057_rcal(dev); + b43_radio_2057_rccal(dev); + } + b43_radio_mask(dev, R2057_RFPLL_MASTER, ~0x8); + + dev->phy.n->init_por = false; +} + +/* http://bcm-v4.sipsolutions.net/802.11/Radio/2057/Init */ +static void b43_radio_2057_init(struct b43_wldev *dev) +{ + b43_radio_2057_init_pre(dev); + r2057_upload_inittabs(dev); + b43_radio_2057_init_post(dev); +} + +/************************************************** * Radio 0x2056 **************************************************/ @@ -545,7 +717,9 @@ static void b43_radio_2056_setup(struct b43_wldev *dev, enum ieee80211_band band = b43_current_band(dev->wl); u16 offset; u8 i; - u16 bias, cbias, pag_boost, pgag_boost, mixg_boost, padg_boost; + u16 bias, cbias; + u16 pag_boost, padg_boost, pgag_boost, mixg_boost; + u16 paa_boost, pada_boost, pgaa_boost, mixa_boost; B43_WARN_ON(dev->phy.rev < 3); @@ -630,7 +804,56 @@ static void b43_radio_2056_setup(struct b43_wldev *dev, b43_radio_write(dev, offset | B2056_TX_PA_SPARE1, 0xee); } } else if (dev->phy.n->ipa5g_on && band == IEEE80211_BAND_5GHZ) { - /* TODO */ + u16 freq = dev->phy.channel_freq; + if (freq < 5100) { + paa_boost = 0xA; + pada_boost = 0x77; + pgaa_boost = 0xF; + mixa_boost = 0xF; + } else if (freq < 5340) { + paa_boost = 0x8; + pada_boost = 0x77; + pgaa_boost = 0xFB; + mixa_boost = 0xF; + } else if (freq < 5650) { + paa_boost = 0x0; + pada_boost = 0x77; + pgaa_boost = 0xB; + mixa_boost = 0xF; + } else { + paa_boost = 0x0; + pada_boost = 0x77; + if (freq != 5825) + pgaa_boost = -(freq - 18) / 36 + 168; + else + pgaa_boost = 6; + mixa_boost = 0xF; + } + + for (i = 0; i < 2; i++) { + offset = i ? B2056_TX1 : B2056_TX0; + + b43_radio_write(dev, + offset | B2056_TX_INTPAA_BOOST_TUNE, paa_boost); + b43_radio_write(dev, + offset | B2056_TX_PADA_BOOST_TUNE, pada_boost); + b43_radio_write(dev, + offset | B2056_TX_PGAA_BOOST_TUNE, pgaa_boost); + b43_radio_write(dev, + offset | B2056_TX_MIXA_BOOST_TUNE, mixa_boost); + b43_radio_write(dev, + offset | B2056_TX_TXSPARE1, 0x30); + b43_radio_write(dev, + offset | B2056_TX_PA_SPARE2, 0xee); + b43_radio_write(dev, + offset | B2056_TX_PADA_CASCBIAS, 0x03); + b43_radio_write(dev, + offset | B2056_TX_INTPAA_IAUX_STAT, 0x50); + b43_radio_write(dev, + offset | B2056_TX_INTPAA_IMAIN_STAT, 0x50); + b43_radio_write(dev, + offset | B2056_TX_INTPAA_CASCBIAS, 0x30); + } } udelay(50); @@ -643,6 +866,37 @@ static void b43_radio_2056_setup(struct b43_wldev *dev, udelay(300); } +static u8 b43_radio_2056_rcal(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + u16 mast2, tmp; + + if (phy->rev != 3) + return 0; + + mast2 = b43_radio_read(dev, B2056_SYN_PLL_MAST2); + b43_radio_write(dev, B2056_SYN_PLL_MAST2, mast2 | 0x7); + + udelay(10); + b43_radio_write(dev, B2056_SYN_RCAL_MASTER, 0x01); + udelay(10); + b43_radio_write(dev, B2056_SYN_RCAL_MASTER, 0x09); + + if (!b43_radio_wait_value(dev, B2056_SYN_RCAL_CODE_OUT, 0x80, 0x80, 100, + 1000000)) { + b43err(dev->wl, "Radio recalibration timeout\n"); + return 0; + } + + b43_radio_write(dev, B2056_SYN_RCAL_MASTER, 0x01); + tmp = b43_radio_read(dev, B2056_SYN_RCAL_CODE_OUT); + b43_radio_write(dev, B2056_SYN_RCAL_MASTER, 0x00); + + b43_radio_write(dev, B2056_SYN_PLL_MAST2, mast2); + + return tmp & 0x1f; +} + static void b43_radio_init2056_pre(struct b43_wldev *dev) { b43_phy_mask(dev, B43_NPHY_RFCTL_CMD, @@ -665,10 +919,8 @@ static void b43_radio_init2056_post(struct b43_wldev *dev) b43_radio_mask(dev, B2056_SYN_COM_RESET, ~0x2); b43_radio_mask(dev, B2056_SYN_PLL_MAST2, ~0xFC); b43_radio_mask(dev, B2056_SYN_RCCAL_CTRL0, ~0x1); - /* - if (nphy->init_por) - Call Radio 2056 Recalibrate - */ + if (dev->phy.n->init_por) + b43_radio_2056_rcal(dev); } /* @@ -680,6 +932,8 @@ static void b43_radio_init2056(struct b43_wldev *dev) b43_radio_init2056_pre(dev); b2056_upload_inittabs(dev, 0, 0); b43_radio_init2056_post(dev); + + dev->phy.n->init_por = false; } /************************************************** @@ -753,8 +1007,6 @@ static void b43_radio_init2055_post(struct b43_wldev *dev) { struct b43_phy_n *nphy = dev->phy.n; struct ssb_sprom *sprom = dev->dev->bus_sprom; - int i; - u16 val; bool workaround = false; if (sprom->revision < 4) @@ -777,15 +1029,7 @@ static void b43_radio_init2055_post(struct b43_wldev *dev) b43_radio_set(dev, B2055_CAL_MISC, 0x1); msleep(1); b43_radio_set(dev, B2055_CAL_MISC, 0x40); - for (i = 0; i < 200; i++) { - val = b43_radio_read(dev, B2055_CAL_COUT2); - if (val & 0x80) { - i = 0; - break; - } - udelay(10); - } - if (i) + if (!b43_radio_wait_value(dev, B2055_CAL_COUT2, 0x80, 0x80, 10, 2000)) b43err(dev->wl, "radio post init timeout\n"); b43_radio_mask(dev, B2055_CAL_LPOCTL, 0xFF7F); b43_switch_channel(dev, dev->phy.channel); @@ -1860,12 +2104,334 @@ static void b43_nphy_gain_ctl_workarounds_rev1_2(struct b43_wldev *dev) /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/WorkaroundsGainCtrl */ static void b43_nphy_gain_ctl_workarounds(struct b43_wldev *dev) { - if (dev->phy.rev >= 3) + if (dev->phy.rev >= 7) + ; /* TODO */ + else if (dev->phy.rev >= 3) b43_nphy_gain_ctl_workarounds_rev3plus(dev); else b43_nphy_gain_ctl_workarounds_rev1_2(dev); } +/* http://bcm-v4.sipsolutions.net/PHY/N/Read_Lpf_Bw_Ctl */ +static u16 b43_nphy_read_lpf_ctl(struct b43_wldev *dev, u16 offset) +{ + if (!offset) + offset = (dev->phy.is_40mhz) ? 0x159 : 0x154; + return b43_ntab_read(dev, B43_NTAB16(7, offset)) & 0x7; +} + +static void b43_nphy_workarounds_rev7plus(struct b43_wldev *dev) +{ + struct ssb_sprom *sprom = dev->dev->bus_sprom; + struct b43_phy *phy = &dev->phy; + + u8 rx2tx_events_ipa[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0xF, 0x3, + 0x1F }; + u8 rx2tx_delays_ipa[9] = { 8, 6, 6, 4, 4, 16, 43, 1, 1 }; + + u16 ntab7_15e_16e[] = { 0x10f, 0x10f }; + u8 ntab7_138_146[] = { 0x11, 0x11 }; + u8 ntab7_133[] = { 0x77, 0x11, 0x11 }; + + u16 lpf_20, lpf_40, lpf_11b; + u16 bcap_val, bcap_val_11b, bcap_val_11n_20, bcap_val_11n_40; + u16 scap_val, scap_val_11b, scap_val_11n_20, scap_val_11n_40; + bool rccal_ovrd = false; + + u16 rx2tx_lut_20_11b, rx2tx_lut_20_11n, rx2tx_lut_40_11n; + u16 bias, conv, filt; + + u32 tmp32; + u8 core; + + if (phy->rev == 7) { + b43_phy_set(dev, B43_NPHY_FINERX2_CGC, 0x10); + b43_phy_maskset(dev, B43_NPHY_FREQGAIN0, 0xFF80, 0x0020); + b43_phy_maskset(dev, B43_NPHY_FREQGAIN0, 0x80FF, 0x2700); + b43_phy_maskset(dev, B43_NPHY_FREQGAIN1, 0xFF80, 0x002E); + b43_phy_maskset(dev, B43_NPHY_FREQGAIN1, 0x80FF, 0x3300); + b43_phy_maskset(dev, B43_NPHY_FREQGAIN2, 0xFF80, 0x0037); + b43_phy_maskset(dev, B43_NPHY_FREQGAIN2, 0x80FF, 0x3A00); + b43_phy_maskset(dev, B43_NPHY_FREQGAIN3, 0xFF80, 0x003C); + b43_phy_maskset(dev, B43_NPHY_FREQGAIN3, 0x80FF, 0x3E00); + b43_phy_maskset(dev, B43_NPHY_FREQGAIN4, 0xFF80, 0x003E); + b43_phy_maskset(dev, B43_NPHY_FREQGAIN4, 0x80FF, 0x3F00); + b43_phy_maskset(dev, B43_NPHY_FREQGAIN5, 0xFF80, 0x0040); + b43_phy_maskset(dev, B43_NPHY_FREQGAIN5, 0x80FF, 0x4000); + b43_phy_maskset(dev, B43_NPHY_FREQGAIN6, 0xFF80, 0x0040); + b43_phy_maskset(dev, B43_NPHY_FREQGAIN6, 0x80FF, 0x4000); + b43_phy_maskset(dev, B43_NPHY_FREQGAIN7, 0xFF80, 0x0040); + b43_phy_maskset(dev, B43_NPHY_FREQGAIN7, 0x80FF, 0x4000); + } + if (phy->rev <= 8) { + b43_phy_write(dev, 0x23F, 0x1B0); + b43_phy_write(dev, 0x240, 0x1B0); + } + if (phy->rev >= 8) + b43_phy_maskset(dev, B43_NPHY_TXTAILCNT, ~0xFF, 0x72); + + b43_ntab_write(dev, B43_NTAB16(8, 0x00), 2); + b43_ntab_write(dev, B43_NTAB16(8, 0x10), 2); + tmp32 = b43_ntab_read(dev, B43_NTAB32(30, 0)); + tmp32 &= 0xffffff; + b43_ntab_write(dev, B43_NTAB32(30, 0), tmp32); + b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x15e), 2, ntab7_15e_16e); + b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x16e), 2, ntab7_15e_16e); + + if (b43_nphy_ipa(dev)) + b43_nphy_set_rf_sequence(dev, 0, rx2tx_events_ipa, + rx2tx_delays_ipa, ARRAY_SIZE(rx2tx_events_ipa)); + + b43_phy_maskset(dev, 0x299, 0x3FFF, 0x4000); + b43_phy_maskset(dev, 0x29D, 0x3FFF, 0x4000); + + lpf_20 = b43_nphy_read_lpf_ctl(dev, 0x154); + lpf_40 = b43_nphy_read_lpf_ctl(dev, 0x159); + lpf_11b = b43_nphy_read_lpf_ctl(dev, 0x152); + if (b43_nphy_ipa(dev)) { + if ((phy->radio_rev == 5 && phy->is_40mhz) || + phy->radio_rev == 7 || phy->radio_rev == 8) { + bcap_val = b43_radio_read(dev, 0x16b); + scap_val = b43_radio_read(dev, 0x16a); + scap_val_11b = scap_val; + bcap_val_11b = bcap_val; + if (phy->radio_rev == 5 && phy->is_40mhz) { + scap_val_11n_20 = scap_val; + bcap_val_11n_20 = bcap_val; + scap_val_11n_40 = bcap_val_11n_40 = 0xc; + rccal_ovrd = true; + } else { /* Rev 7/8 */ + lpf_20 = 4; + lpf_11b = 1; + if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + scap_val_11n_20 = 0xc; + bcap_val_11n_20 = 0xc; + scap_val_11n_40 = 0xa; + bcap_val_11n_40 = 0xa; + } else { + scap_val_11n_20 = 0x14; + bcap_val_11n_20 = 0x14; + scap_val_11n_40 = 0xf; + bcap_val_11n_40 = 0xf; + } + rccal_ovrd = true; + } + } + } else { + if (phy->radio_rev == 5) { + lpf_20 = 1; + lpf_40 = 3; + bcap_val = b43_radio_read(dev, 0x16b); + scap_val = b43_radio_read(dev, 0x16a); + scap_val_11b = scap_val; + bcap_val_11b = bcap_val; + scap_val_11n_20 = 0x11; + scap_val_11n_40 = 0x11; + bcap_val_11n_20 = 0x13; + bcap_val_11n_40 = 0x13; + rccal_ovrd = true; + } + } + if (rccal_ovrd) { + rx2tx_lut_20_11b = (bcap_val_11b << 8) | + (scap_val_11b << 3) | + lpf_11b; + rx2tx_lut_20_11n = (bcap_val_11n_20 << 8) | + (scap_val_11n_20 << 3) | + lpf_20; + rx2tx_lut_40_11n = (bcap_val_11n_40 << 8) | + (scap_val_11n_40 << 3) | + lpf_40; + for (core = 0; core < 2; core++) { + b43_ntab_write(dev, B43_NTAB16(7, 0x152 + core * 16), + rx2tx_lut_20_11b); + b43_ntab_write(dev, B43_NTAB16(7, 0x153 + core * 16), + rx2tx_lut_20_11n); + b43_ntab_write(dev, B43_NTAB16(7, 0x154 + core * 16), + rx2tx_lut_20_11n); + b43_ntab_write(dev, B43_NTAB16(7, 0x155 + core * 16), + rx2tx_lut_40_11n); + b43_ntab_write(dev, B43_NTAB16(7, 0x156 + core * 16), + rx2tx_lut_40_11n); + b43_ntab_write(dev, B43_NTAB16(7, 0x157 + core * 16), + rx2tx_lut_40_11n); + b43_ntab_write(dev, B43_NTAB16(7, 0x158 + core * 16), + rx2tx_lut_40_11n); + b43_ntab_write(dev, B43_NTAB16(7, 0x159 + core * 16), + rx2tx_lut_40_11n); + } + b43_nphy_rf_control_override_rev7(dev, 16, 1, 3, false, 2); + } + b43_phy_write(dev, 0x32F, 0x3); + if (phy->radio_rev == 4 || phy->radio_rev == 6) + b43_nphy_rf_control_override_rev7(dev, 4, 1, 3, false, 0); + + if (phy->radio_rev == 3 || phy->radio_rev == 4 || phy->radio_rev == 6) { + if (sprom->revision && + sprom->boardflags2_hi & B43_BFH2_IPALVLSHIFT_3P3) { + b43_radio_write(dev, 0x5, 0x05); + b43_radio_write(dev, 0x6, 0x30); + b43_radio_write(dev, 0x7, 0x00); + b43_radio_set(dev, 0x4f, 0x1); + b43_radio_set(dev, 0xd4, 0x1); + bias = 0x1f; + conv = 0x6f; + filt = 0xaa; + } else { + bias = 0x2b; + conv = 0x7f; + filt = 0xee; + } + if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + for (core = 0; core < 2; core++) { + if (core == 0) { + b43_radio_write(dev, 0x5F, bias); + b43_radio_write(dev, 0x64, conv); + b43_radio_write(dev, 0x66, filt); + } else { + b43_radio_write(dev, 0xE8, bias); + b43_radio_write(dev, 0xE9, conv); + b43_radio_write(dev, 0xEB, filt); + } + } + } + } + + if (b43_nphy_ipa(dev)) { + if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + if (phy->radio_rev == 3 || phy->radio_rev == 4 || + phy->radio_rev == 6) { + for (core = 0; core < 2; core++) { + if (core == 0) + b43_radio_write(dev, 0x51, + 0x7f); + else + b43_radio_write(dev, 0xd6, + 0x7f); + } + } + if (phy->radio_rev == 3) { + for (core = 0; core < 2; core++) { + if (core == 0) { + b43_radio_write(dev, 0x64, + 0x13); + b43_radio_write(dev, 0x5F, + 0x1F); + b43_radio_write(dev, 0x66, + 0xEE); + b43_radio_write(dev, 0x59, + 0x8A); + b43_radio_write(dev, 0x80, + 0x3E); + } else { + b43_radio_write(dev, 0x69, + 0x13); + b43_radio_write(dev, 0xE8, + 0x1F); + b43_radio_write(dev, 0xEB, + 0xEE); + b43_radio_write(dev, 0xDE, + 0x8A); + b43_radio_write(dev, 0x105, + 0x3E); + } + } + } else if (phy->radio_rev == 7 || phy->radio_rev == 8) { + if (!phy->is_40mhz) { + b43_radio_write(dev, 0x5F, 0x14); + b43_radio_write(dev, 0xE8, 0x12); + } else { + b43_radio_write(dev, 0x5F, 0x16); + b43_radio_write(dev, 0xE8, 0x16); + } + } + } else { + u16 freq = phy->channel_freq; + if ((freq >= 5180 && freq <= 5230) || + (freq >= 5745 && freq <= 5805)) { + b43_radio_write(dev, 0x7D, 0xFF); + b43_radio_write(dev, 0xFE, 0xFF); + } + } + } else { + if (phy->radio_rev != 5) { + for (core = 0; core < 2; core++) { + if (core == 0) { + b43_radio_write(dev, 0x5c, 0x61); + b43_radio_write(dev, 0x51, 0x70); + } else { + b43_radio_write(dev, 0xe1, 0x61); + b43_radio_write(dev, 0xd6, 0x70); + } + } + } + } + + if (phy->radio_rev == 4) { + b43_ntab_write(dev, B43_NTAB16(8, 0x05), 0x20); + b43_ntab_write(dev, B43_NTAB16(8, 0x15), 0x20); + for (core = 0; core < 2; core++) { + if (core == 0) { + b43_radio_write(dev, 0x1a1, 0x00); + b43_radio_write(dev, 0x1a2, 0x3f); + b43_radio_write(dev, 0x1a6, 0x3f); + } else { + b43_radio_write(dev, 0x1a7, 0x00); + b43_radio_write(dev, 0x1ab, 0x3f); + b43_radio_write(dev, 0x1ac, 0x3f); + } + } + } else { + b43_phy_set(dev, B43_NPHY_AFECTL_C1, 0x4); + b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x4); + b43_phy_set(dev, B43_NPHY_AFECTL_C2, 0x4); + b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x4); + + b43_phy_mask(dev, B43_NPHY_AFECTL_C1, ~0x1); + b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x1); + b43_phy_mask(dev, B43_NPHY_AFECTL_C2, ~0x1); + b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x1); + b43_ntab_write(dev, B43_NTAB16(8, 0x05), 0x20); + b43_ntab_write(dev, B43_NTAB16(8, 0x15), 0x20); + + b43_phy_mask(dev, B43_NPHY_AFECTL_C1, ~0x4); + b43_phy_mask(dev, B43_NPHY_AFECTL_OVER1, ~0x4); + b43_phy_mask(dev, B43_NPHY_AFECTL_C2, ~0x4); + b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x4); + } + + b43_phy_write(dev, B43_NPHY_ENDROP_TLEN, 0x2); + + b43_ntab_write(dev, B43_NTAB32(16, 0x100), 20); + b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x138), 2, ntab7_138_146); + b43_ntab_write(dev, B43_NTAB16(7, 0x141), 0x77); + b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x133), 3, ntab7_133); + b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x146), 2, ntab7_138_146); + b43_ntab_write(dev, B43_NTAB16(7, 0x123), 0x77); + b43_ntab_write(dev, B43_NTAB16(7, 0x12A), 0x77); + + if (!phy->is_40mhz) { + b43_ntab_write(dev, B43_NTAB32(16, 0x03), 0x18D); + b43_ntab_write(dev, B43_NTAB32(16, 0x7F), 0x18D); + } else { + b43_ntab_write(dev, B43_NTAB32(16, 0x03), 0x14D); + b43_ntab_write(dev, B43_NTAB32(16, 0x7F), 0x14D); + } + + b43_nphy_gain_ctl_workarounds(dev); + + /* TODO + b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x08), 4, + aux_adc_vmid_rev7_core0); + b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x18), 4, + aux_adc_vmid_rev7_core1); + b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x0C), 4, + aux_adc_gain_rev7); + b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x1C), 4, + aux_adc_gain_rev7); + */ +} + static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev) { struct b43_phy_n *nphy = dev->phy.n; @@ -1916,7 +2482,7 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev) rx2tx_delays[6] = 1; rx2tx_events[7] = 0x1F; } - b43_nphy_set_rf_sequence(dev, 1, rx2tx_events, rx2tx_delays, + b43_nphy_set_rf_sequence(dev, 0, rx2tx_events, rx2tx_delays, ARRAY_SIZE(rx2tx_events)); } @@ -1926,8 +2492,13 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev) b43_phy_maskset(dev, 0x294, 0xF0FF, 0x0700); - b43_ntab_write(dev, B43_NTAB32(16, 3), 0x18D); - b43_ntab_write(dev, B43_NTAB32(16, 127), 0x18D); + if (!dev->phy.is_40mhz) { + b43_ntab_write(dev, B43_NTAB32(16, 3), 0x18D); + b43_ntab_write(dev, B43_NTAB32(16, 127), 0x18D); + } else { + b43_ntab_write(dev, B43_NTAB32(16, 3), 0x14D); + b43_ntab_write(dev, B43_NTAB32(16, 127), 0x14D); + } b43_nphy_gain_ctl_workarounds(dev); @@ -1963,13 +2534,14 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev) b43_ntab_write(dev, B43_NTAB32(30, 3), tmp32); if (dev->phy.rev == 4 && - b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { + b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { b43_radio_write(dev, B2056_TX0 | B2056_TX_GMBB_IDAC, 0x70); b43_radio_write(dev, B2056_TX1 | B2056_TX_GMBB_IDAC, 0x70); } + /* Dropped probably-always-true condition */ b43_phy_write(dev, 0x224, 0x03eb); b43_phy_write(dev, 0x225, 0x03eb); b43_phy_write(dev, 0x226, 0x0341); @@ -1982,6 +2554,9 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev) b43_phy_write(dev, 0x22d, 0x042b); b43_phy_write(dev, 0x22e, 0x0381); b43_phy_write(dev, 0x22f, 0x0381); + + if (dev->phy.rev >= 6 && sprom->boardflags2_lo & B43_BFL2_SINGLEANT_CCK) + ; /* TODO: 0x0080000000000000 HF */ } static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev) @@ -1996,6 +2571,12 @@ static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev) u8 events2[7] = { 0x0, 0x3, 0x5, 0x4, 0x2, 0x1, 0x8 }; u8 delays2[7] = { 0x8, 0x6, 0x2, 0x4, 0x4, 0x6, 0x1 }; + if (sprom->boardflags2_lo & B43_BFL2_SKWRKFEM_BRD || + dev->dev->board_type == 0x8B) { + delays1[0] = 0x1; + delays1[5] = 0x14; + } + if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ && nphy->band5g_pwrgain) { b43_radio_mask(dev, B2055_C1_TX_RF_SPARE, ~0x8); @@ -2007,8 +2588,10 @@ static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev) b43_ntab_write(dev, B43_NTAB16(8, 0x00), 0x000A); b43_ntab_write(dev, B43_NTAB16(8, 0x10), 0x000A); - b43_ntab_write(dev, B43_NTAB16(8, 0x02), 0xCDAA); - b43_ntab_write(dev, B43_NTAB16(8, 0x12), 0xCDAA); + if (dev->phy.rev < 3) { + b43_ntab_write(dev, B43_NTAB16(8, 0x02), 0xCDAA); + b43_ntab_write(dev, B43_NTAB16(8, 0x12), 0xCDAA); + } if (dev->phy.rev < 2) { b43_ntab_write(dev, B43_NTAB16(8, 0x08), 0x0000); @@ -2024,11 +2607,6 @@ static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev) b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8); b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0x301); - if (sprom->boardflags2_lo & B43_BFL2_SKWRKFEM_BRD && - dev->dev->board_type == 0x8B) { - delays1[0] = 0x1; - delays1[5] = 0x14; - } b43_nphy_set_rf_sequence(dev, 0, events1, delays1, 7); b43_nphy_set_rf_sequence(dev, 1, events2, delays2, 7); @@ -2055,11 +2633,13 @@ static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev) b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0xCD); b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x20); - b43_phy_mask(dev, B43_NPHY_PIL_DW1, - ~B43_NPHY_PIL_DW_64QAM & 0xFFFF); - b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B1, 0xB5); - b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B2, 0xA4); - b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B3, 0x00); + if (dev->phy.rev < 3) { + b43_phy_mask(dev, B43_NPHY_PIL_DW1, + ~B43_NPHY_PIL_DW_64QAM & 0xFFFF); + b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B1, 0xB5); + b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B2, 0xA4); + b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B3, 0x00); + } if (dev->phy.rev == 2) b43_phy_set(dev, B43_NPHY_FINERX2_CGC, @@ -2083,7 +2663,9 @@ static void b43_nphy_workarounds(struct b43_wldev *dev) b43_phy_set(dev, B43_NPHY_IQFLIP, B43_NPHY_IQFLIP_ADC1 | B43_NPHY_IQFLIP_ADC2); - if (dev->phy.rev >= 3) + if (dev->phy.rev >= 7) + b43_nphy_workarounds_rev7plus(dev); + else if (dev->phy.rev >= 3) b43_nphy_workarounds_rev3plus(dev); else b43_nphy_workarounds_rev1_2(dev); @@ -2542,7 +3124,7 @@ static void b43_nphy_tx_power_ctl_idle_tssi(struct b43_wldev *dev) b43_nphy_ipa_internal_tssi_setup(dev); if (phy->rev >= 7) - ; /* TODO: Override Rev7 with 0x2000, 0, 3, 0, 0 as arguments */ + b43_nphy_rf_control_override_rev7(dev, 0x2000, 0, 3, false, 0); else if (phy->rev >= 3) b43_nphy_rf_control_override(dev, 0x2000, 0, 3, false); @@ -2554,7 +3136,7 @@ static void b43_nphy_tx_power_ctl_idle_tssi(struct b43_wldev *dev) b43_nphy_rssi_select(dev, 0, 0); if (phy->rev >= 7) - ; /* TODO: Override Rev7 with 0x2000, 0, 3, 1, 0 as arguments */ + b43_nphy_rf_control_override_rev7(dev, 0x2000, 0, 3, true, 0); else if (phy->rev >= 3) b43_nphy_rf_control_override(dev, 0x2000, 0, 3, true); @@ -4761,6 +5343,7 @@ static void b43_nphy_op_prepare_structs(struct b43_wldev *dev) nphy->hang_avoid = (phy->rev == 3 || phy->rev == 4); nphy->spur_avoid = (phy->rev >= 3) ? B43_SPUR_AVOID_AUTO : B43_SPUR_AVOID_DISABLE; + nphy->init_por = true; nphy->gain_boost = true; /* this way we follow wl, assume it is true */ nphy->txrx_chain = 2; /* sth different than 0 and 1 for now */ nphy->phyrxchain = 3; /* to avoid b43_nphy_set_rx_core_state like wl */ @@ -4801,6 +5384,8 @@ static void b43_nphy_op_prepare_structs(struct b43_wldev *dev) nphy->ipa2g_on = sprom->fem.ghz2.extpa_gain == 2; nphy->ipa5g_on = sprom->fem.ghz5.extpa_gain == 2; } + + nphy->init_por = true; } static void b43_nphy_op_free(struct b43_wldev *dev) @@ -4887,7 +5472,9 @@ static void b43_nphy_op_software_rfkill(struct b43_wldev *dev, if (blocked) { b43_phy_mask(dev, B43_NPHY_RFCTL_CMD, ~B43_NPHY_RFCTL_CMD_CHIP0PU); - if (dev->phy.rev >= 3) { + if (dev->phy.rev >= 7) { + /* TODO */ + } else if (dev->phy.rev >= 3) { b43_radio_mask(dev, 0x09, ~0x2); b43_radio_write(dev, 0x204D, 0); @@ -4905,7 +5492,10 @@ static void b43_nphy_op_software_rfkill(struct b43_wldev *dev, b43_radio_write(dev, 0x3064, 0); } } else { - if (dev->phy.rev >= 3) { + if (dev->phy.rev >= 7) { + b43_radio_2057_init(dev); + b43_switch_channel(dev, dev->phy.channel); + } else if (dev->phy.rev >= 3) { b43_radio_init2056(dev); b43_switch_channel(dev, dev->phy.channel); } else { diff --git a/drivers/net/wireless/b43/phy_n.h b/drivers/net/wireless/b43/phy_n.h index fd12b386fea1..092c0140c249 100644 --- a/drivers/net/wireless/b43/phy_n.h +++ b/drivers/net/wireless/b43/phy_n.h @@ -785,6 +785,7 @@ struct b43_phy_n { u16 papd_epsilon_offset[2]; s32 preamble_override; u32 bb_mult_save; + bool init_por; bool gain_boost; bool elna_gain_config; diff --git a/drivers/net/wireless/b43/radio_2057.c b/drivers/net/wireless/b43/radio_2057.c new file mode 100644 index 000000000000..d61d6830c5c7 --- /dev/null +++ b/drivers/net/wireless/b43/radio_2057.c @@ -0,0 +1,141 @@ +/* + + Broadcom B43 wireless driver + IEEE 802.11n 2057 radio device data tables + + Copyright (c) 2010 Rafał Miłecki <zajec5@gmail.com> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include "b43.h" +#include "radio_2057.h" +#include "phy_common.h" + +static u16 r2057_rev4_init[42][2] = { + { 0x0E, 0x20 }, { 0x31, 0x00 }, { 0x32, 0x00 }, { 0x33, 0x00 }, + { 0x35, 0x26 }, { 0x3C, 0xff }, { 0x3D, 0xff }, { 0x3E, 0xff }, + { 0x3F, 0xff }, { 0x62, 0x33 }, { 0x8A, 0xf0 }, { 0x8B, 0x10 }, + { 0x8C, 0xf0 }, { 0x91, 0x3f }, { 0x92, 0x36 }, { 0xA4, 0x8c }, + { 0xA8, 0x55 }, { 0xAF, 0x01 }, { 0x10F, 0xf0 }, { 0x110, 0x10 }, + { 0x111, 0xf0 }, { 0x116, 0x3f }, { 0x117, 0x36 }, { 0x129, 0x8c }, + { 0x12D, 0x55 }, { 0x134, 0x01 }, { 0x15E, 0x00 }, { 0x15F, 0x00 }, + { 0x160, 0x00 }, { 0x161, 0x00 }, { 0x162, 0x00 }, { 0x163, 0x00 }, + { 0x169, 0x02 }, { 0x16A, 0x00 }, { 0x16B, 0x00 }, { 0x16C, 0x00 }, + { 0x1A4, 0x00 }, { 0x1A5, 0x00 }, { 0x1A6, 0x00 }, { 0x1AA, 0x00 }, + { 0x1AB, 0x00 }, { 0x1AC, 0x00 }, +}; + +static u16 r2057_rev5_init[44][2] = { + { 0x00, 0x00 }, { 0x01, 0x57 }, { 0x02, 0x20 }, { 0x23, 0x6 }, + { 0x31, 0x00 }, { 0x32, 0x00 }, { 0x33, 0x00 }, { 0x51, 0x70 }, + { 0x59, 0x88 }, { 0x5C, 0x20 }, { 0x62, 0x33 }, { 0x63, 0x0f }, + { 0x64, 0x0f }, { 0x81, 0x01 }, { 0x91, 0x3f }, { 0x92, 0x36 }, + { 0xA1, 0x20 }, { 0xD6, 0x70 }, { 0xDE, 0x88 }, { 0xE1, 0x20 }, + { 0xE8, 0x0f }, { 0xE9, 0x0f }, { 0x106, 0x01 }, { 0x116, 0x3f }, + { 0x117, 0x36 }, { 0x126, 0x20 }, { 0x15E, 0x00 }, { 0x15F, 0x00 }, + { 0x160, 0x00 }, { 0x161, 0x00 }, { 0x162, 0x00 }, { 0x163, 0x00 }, + { 0x16A, 0x00 }, { 0x16B, 0x00 }, { 0x16C, 0x00 }, { 0x1A4, 0x00 }, + { 0x1A5, 0x00 }, { 0x1A6, 0x00 }, { 0x1AA, 0x00 }, { 0x1AB, 0x00 }, + { 0x1AC, 0x00 }, { 0x1B7, 0x0c }, { 0x1C1, 0x01 }, { 0x1C2, 0x80 }, +}; + +static u16 r2057_rev5a_init[45][2] = { + { 0x00, 0x15 }, { 0x01, 0x57 }, { 0x02, 0x20 }, { 0x23, 0x6 }, + { 0x31, 0x00 }, { 0x32, 0x00 }, { 0x33, 0x00 }, { 0x51, 0x70 }, + { 0x59, 0x88 }, { 0x5C, 0x20 }, { 0x62, 0x33 }, { 0x63, 0x0f }, + { 0x64, 0x0f }, { 0x81, 0x01 }, { 0x91, 0x3f }, { 0x92, 0x36 }, + { 0xC9, 0x01 }, { 0xD6, 0x70 }, { 0xDE, 0x88 }, { 0xE1, 0x20 }, + { 0xE8, 0x0f }, { 0xE9, 0x0f }, { 0x106, 0x01 }, { 0x116, 0x3f }, + { 0x117, 0x36 }, { 0x126, 0x20 }, { 0x14E, 0x01 }, { 0x15E, 0x00 }, + { 0x15F, 0x00 }, { 0x160, 0x00 }, { 0x161, 0x00 }, { 0x162, 0x00 }, + { 0x163, 0x00 }, { 0x16A, 0x00 }, { 0x16B, 0x00 }, { 0x16C, 0x00 }, + { 0x1A4, 0x00 }, { 0x1A5, 0x00 }, { 0x1A6, 0x00 }, { 0x1AA, 0x00 }, + { 0x1AB, 0x00 }, { 0x1AC, 0x00 }, { 0x1B7, 0x0c }, { 0x1C1, 0x01 }, + { 0x1C2, 0x80 }, +}; + +static u16 r2057_rev7_init[54][2] = { + { 0x00, 0x00 }, { 0x01, 0x57 }, { 0x02, 0x20 }, { 0x31, 0x00 }, + { 0x32, 0x00 }, { 0x33, 0x00 }, { 0x51, 0x70 }, { 0x59, 0x88 }, + { 0x5C, 0x20 }, { 0x62, 0x33 }, { 0x63, 0x0f }, { 0x64, 0x13 }, + { 0x66, 0xee }, { 0x6E, 0x58 }, { 0x75, 0x13 }, { 0x7B, 0x13 }, + { 0x7C, 0x14 }, { 0x7D, 0xee }, { 0x81, 0x01 }, { 0x91, 0x3f }, + { 0x92, 0x36 }, { 0xA1, 0x20 }, { 0xD6, 0x70 }, { 0xDE, 0x88 }, + { 0xE1, 0x20 }, { 0xE8, 0x0f }, { 0xE9, 0x13 }, { 0xEB, 0xee }, + { 0xF3, 0x58 }, { 0xFA, 0x13 }, { 0x100, 0x13 }, { 0x101, 0x14 }, + { 0x102, 0xee }, { 0x106, 0x01 }, { 0x116, 0x3f }, { 0x117, 0x36 }, + { 0x126, 0x20 }, { 0x15E, 0x00 }, { 0x15F, 0x00 }, { 0x160, 0x00 }, + { 0x161, 0x00 }, { 0x162, 0x00 }, { 0x163, 0x00 }, { 0x16A, 0x00 }, + { 0x16B, 0x00 }, { 0x16C, 0x00 }, { 0x1A4, 0x00 }, { 0x1A5, 0x00 }, + { 0x1A6, 0x00 }, { 0x1AA, 0x00 }, { 0x1AB, 0x00 }, { 0x1AC, 0x00 }, + { 0x1B7, 0x05 }, { 0x1C2, 0xa0 }, +}; + +static u16 r2057_rev8_init[54][2] = { + { 0x00, 0x08 }, { 0x01, 0x57 }, { 0x02, 0x20 }, { 0x31, 0x00 }, + { 0x32, 0x00 }, { 0x33, 0x00 }, { 0x51, 0x70 }, { 0x59, 0x88 }, + { 0x5C, 0x20 }, { 0x62, 0x33 }, { 0x63, 0x0f }, { 0x64, 0x0f }, + { 0x6E, 0x58 }, { 0x75, 0x13 }, { 0x7B, 0x13 }, { 0x7C, 0x0f }, + { 0x7D, 0xee }, { 0x81, 0x01 }, { 0x91, 0x3f }, { 0x92, 0x36 }, + { 0xA1, 0x20 }, { 0xC9, 0x01 }, { 0xD6, 0x70 }, { 0xDE, 0x88 }, + { 0xE1, 0x20 }, { 0xE8, 0x0f }, { 0xE9, 0x0f }, { 0xF3, 0x58 }, + { 0xFA, 0x13 }, { 0x100, 0x13 }, { 0x101, 0x0f }, { 0x102, 0xee }, + { 0x106, 0x01 }, { 0x116, 0x3f }, { 0x117, 0x36 }, { 0x126, 0x20 }, + { 0x14E, 0x01 }, { 0x15E, 0x00 }, { 0x15F, 0x00 }, { 0x160, 0x00 }, + { 0x161, 0x00 }, { 0x162, 0x00 }, { 0x163, 0x00 }, { 0x16A, 0x00 }, + { 0x16B, 0x00 }, { 0x16C, 0x00 }, { 0x1A4, 0x00 }, { 0x1A5, 0x00 }, + { 0x1A6, 0x00 }, { 0x1AA, 0x00 }, { 0x1AB, 0x00 }, { 0x1AC, 0x00 }, + { 0x1B7, 0x05 }, { 0x1C2, 0xa0 }, +}; + +void r2057_upload_inittabs(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + u16 *table = NULL; + u16 size, i; + + if (phy->rev == 7) { + table = r2057_rev4_init[0]; + size = ARRAY_SIZE(r2057_rev4_init); + } else if (phy->rev == 8 || phy->rev == 9) { + if (phy->radio_rev == 5) { + if (phy->radio_rev == 8) { + table = r2057_rev5_init[0]; + size = ARRAY_SIZE(r2057_rev5_init); + } else { + table = r2057_rev5a_init[0]; + size = ARRAY_SIZE(r2057_rev5a_init); + } + } else if (phy->radio_rev == 7) { + table = r2057_rev7_init[0]; + size = ARRAY_SIZE(r2057_rev7_init); + } else if (phy->radio_rev == 9) { + table = r2057_rev8_init[0]; + size = ARRAY_SIZE(r2057_rev8_init); + } + } + + if (table) { + for (i = 0; i < 10; i++) { + pr_info("radio_write 0x%X ", *table); + table++; + pr_info("0x%X\n", *table); + table++; + } + } +} diff --git a/drivers/net/wireless/b43/radio_2057.h b/drivers/net/wireless/b43/radio_2057.h new file mode 100644 index 000000000000..eeebd8fbeb0d --- /dev/null +++ b/drivers/net/wireless/b43/radio_2057.h @@ -0,0 +1,430 @@ +#ifndef B43_RADIO_2057_H_ +#define B43_RADIO_2057_H_ + +#include <linux/types.h> + +#include "tables_nphy.h" + +#define R2057_DACBUF_VINCM_CORE0 0x000 +#define R2057_IDCODE 0x001 +#define R2057_RCCAL_MASTER 0x002 +#define R2057_RCCAL_CAP_SIZE 0x003 +#define R2057_RCAL_CONFIG 0x004 +#define R2057_GPAIO_CONFIG 0x005 +#define R2057_GPAIO_SEL1 0x006 +#define R2057_GPAIO_SEL0 0x007 +#define R2057_CLPO_CONFIG 0x008 +#define R2057_BANDGAP_CONFIG 0x009 +#define R2057_BANDGAP_RCAL_TRIM 0x00a +#define R2057_AFEREG_CONFIG 0x00b +#define R2057_TEMPSENSE_CONFIG 0x00c +#define R2057_XTAL_CONFIG1 0x00d +#define R2057_XTAL_ICORE_SIZE 0x00e +#define R2057_XTAL_BUF_SIZE 0x00f +#define R2057_XTAL_PULLCAP_SIZE 0x010 +#define R2057_RFPLL_MASTER 0x011 +#define R2057_VCOMONITOR_VTH_L 0x012 +#define R2057_VCOMONITOR_VTH_H 0x013 +#define R2057_VCOCAL_BIASRESET_RFPLLREG_VOUT 0x014 +#define R2057_VCO_VARCSIZE_IDAC 0x015 +#define R2057_VCOCAL_COUNTVAL0 0x016 +#define R2057_VCOCAL_COUNTVAL1 0x017 +#define R2057_VCOCAL_INTCLK_COUNT 0x018 +#define R2057_VCOCAL_MASTER 0x019 +#define R2057_VCOCAL_NUMCAPCHANGE 0x01a +#define R2057_VCOCAL_WINSIZE 0x01b +#define R2057_VCOCAL_DELAY_AFTER_REFRESH 0x01c +#define R2057_VCOCAL_DELAY_AFTER_CLOSELOOP 0x01d +#define R2057_VCOCAL_DELAY_AFTER_OPENLOOP 0x01e +#define R2057_VCOCAL_DELAY_BEFORE_OPENLOOP 0x01f +#define R2057_VCO_FORCECAPEN_FORCECAP1 0x020 +#define R2057_VCO_FORCECAP0 0x021 +#define R2057_RFPLL_REFMASTER_SPAREXTALSIZE 0x022 +#define R2057_RFPLL_PFD_RESET_PW 0x023 +#define R2057_RFPLL_LOOPFILTER_R2 0x024 +#define R2057_RFPLL_LOOPFILTER_R1 0x025 +#define R2057_RFPLL_LOOPFILTER_C3 0x026 +#define R2057_RFPLL_LOOPFILTER_C2 0x027 +#define R2057_RFPLL_LOOPFILTER_C1 0x028 +#define R2057_CP_KPD_IDAC 0x029 +#define R2057_RFPLL_IDACS 0x02a +#define R2057_RFPLL_MISC_EN 0x02b +#define R2057_RFPLL_MMD0 0x02c +#define R2057_RFPLL_MMD1 0x02d +#define R2057_RFPLL_MISC_CAL_RESETN 0x02e +#define R2057_JTAGXTAL_SIZE_CPBIAS_FILTRES 0x02f +#define R2057_VCO_ALCREF_BBPLLXTAL_SIZE 0x030 +#define R2057_VCOCAL_READCAP0 0x031 +#define R2057_VCOCAL_READCAP1 0x032 +#define R2057_VCOCAL_STATUS 0x033 +#define R2057_LOGEN_PUS 0x034 +#define R2057_LOGEN_PTAT_RESETS 0x035 +#define R2057_VCOBUF_IDACS 0x036 +#define R2057_VCOBUF_TUNE 0x037 +#define R2057_CMOSBUF_TX2GQ_IDACS 0x038 +#define R2057_CMOSBUF_TX2GI_IDACS 0x039 +#define R2057_CMOSBUF_TX5GQ_IDACS 0x03a +#define R2057_CMOSBUF_TX5GI_IDACS 0x03b +#define R2057_CMOSBUF_RX2GQ_IDACS 0x03c +#define R2057_CMOSBUF_RX2GI_IDACS 0x03d +#define R2057_CMOSBUF_RX5GQ_IDACS 0x03e +#define R2057_CMOSBUF_RX5GI_IDACS 0x03f +#define R2057_LOGEN_MX2G_IDACS 0x040 +#define R2057_LOGEN_MX2G_TUNE 0x041 +#define R2057_LOGEN_MX5G_IDACS 0x042 +#define R2057_LOGEN_MX5G_TUNE 0x043 +#define R2057_LOGEN_MX5G_RCCR 0x044 +#define R2057_LOGEN_INDBUF2G_IDAC 0x045 +#define R2057_LOGEN_INDBUF2G_IBOOST 0x046 +#define R2057_LOGEN_INDBUF2G_TUNE 0x047 +#define R2057_LOGEN_INDBUF5G_IDAC 0x048 +#define R2057_LOGEN_INDBUF5G_IBOOST 0x049 +#define R2057_LOGEN_INDBUF5G_TUNE 0x04a +#define R2057_CMOSBUF_TX_RCCR 0x04b +#define R2057_CMOSBUF_RX_RCCR 0x04c +#define R2057_LOGEN_SEL_PKDET 0x04d +#define R2057_CMOSBUF_SHAREIQ_PTAT 0x04e +#define R2057_RXTXBIAS_CONFIG_CORE0 0x04f +#define R2057_TXGM_TXRF_PUS_CORE0 0x050 +#define R2057_TXGM_IDAC_BLEED_CORE0 0x051 +#define R2057_TXGM_GAIN_CORE0 0x056 +#define R2057_TXGM2G_PKDET_PUS_CORE0 0x057 +#define R2057_PAD2G_PTATS_CORE0 0x058 +#define R2057_PAD2G_IDACS_CORE0 0x059 +#define R2057_PAD2G_BOOST_PU_CORE0 0x05a +#define R2057_PAD2G_CASCV_GAIN_CORE0 0x05b +#define R2057_TXMIX2G_TUNE_BOOST_PU_CORE0 0x05c +#define R2057_TXMIX2G_LODC_CORE0 0x05d +#define R2057_PAD2G_TUNE_PUS_CORE0 0x05e +#define R2057_IPA2G_GAIN_CORE0 0x05f +#define R2057_TSSI2G_SPARE1_CORE0 0x060 +#define R2057_TSSI2G_SPARE2_CORE0 0x061 +#define R2057_IPA2G_TUNEV_CASCV_PTAT_CORE0 0x062 +#define R2057_IPA2G_IMAIN_CORE0 0x063 +#define R2057_IPA2G_CASCONV_CORE0 0x064 +#define R2057_IPA2G_CASCOFFV_CORE0 0x065 +#define R2057_IPA2G_BIAS_FILTER_CORE0 0x066 +#define R2057_TX5G_PKDET_CORE0 0x069 +#define R2057_PGA_PTAT_TXGM5G_PU_CORE0 0x06a +#define R2057_PAD5G_PTATS1_CORE0 0x06b +#define R2057_PAD5G_CLASS_PTATS2_CORE0 0x06c +#define R2057_PGA_BOOSTPTAT_IMAIN_CORE0 0x06d +#define R2057_PAD5G_CASCV_IMAIN_CORE0 0x06e +#define R2057_TXMIX5G_IBOOST_PAD_IAUX_CORE0 0x06f +#define R2057_PGA_BOOST_TUNE_CORE0 0x070 +#define R2057_PGA_GAIN_CORE0 0x071 +#define R2057_PAD5G_CASCOFFV_GAIN_PUS_CORE0 0x072 +#define R2057_TXMIX5G_BOOST_TUNE_CORE0 0x073 +#define R2057_PAD5G_TUNE_MISC_PUS_CORE0 0x074 +#define R2057_IPA5G_IAUX_CORE0 0x075 +#define R2057_IPA5G_GAIN_CORE0 0x076 +#define R2057_TSSI5G_SPARE1_CORE0 0x077 +#define R2057_TSSI5G_SPARE2_CORE0 0x078 +#define R2057_IPA5G_CASCOFFV_PU_CORE0 0x079 +#define R2057_IPA5G_PTAT_CORE0 0x07a +#define R2057_IPA5G_IMAIN_CORE0 0x07b +#define R2057_IPA5G_CASCONV_CORE0 0x07c +#define R2057_IPA5G_BIAS_FILTER_CORE0 0x07d +#define R2057_PAD_BIAS_FILTER_BWS_CORE0 0x080 +#define R2057_TR2G_CONFIG1_CORE0_NU 0x081 +#define R2057_TR2G_CONFIG2_CORE0_NU 0x082 +#define R2057_LNA5G_RFEN_CORE0 0x083 +#define R2057_TR5G_CONFIG2_CORE0_NU 0x084 +#define R2057_RXRFBIAS_IBOOST_PU_CORE0 0x085 +#define R2057_RXRF_IABAND_RXGM_IMAIN_PTAT_CORE0 0x086 +#define R2057_RXGM_CMFBITAIL_AUXPTAT_CORE0 0x087 +#define R2057_RXMIX_ICORE_RXGM_IAUX_CORE0 0x088 +#define R2057_RXMIX_CMFBITAIL_PU_CORE0 0x089 +#define R2057_LNA2_IMAIN_PTAT_PU_CORE0 0x08a +#define R2057_LNA2_IAUX_PTAT_CORE0 0x08b +#define R2057_LNA1_IMAIN_PTAT_PU_CORE0 0x08c +#define R2057_LNA15G_INPUT_MATCH_TUNE_CORE0 0x08d +#define R2057_RXRFBIAS_BANDSEL_CORE0 0x08e +#define R2057_TIA_CONFIG_CORE0 0x08f +#define R2057_TIA_IQGAIN_CORE0 0x090 +#define R2057_TIA_IBIAS2_CORE0 0x091 +#define R2057_TIA_IBIAS1_CORE0 0x092 +#define R2057_TIA_SPARE_Q_CORE0 0x093 +#define R2057_TIA_SPARE_I_CORE0 0x094 +#define R2057_RXMIX2G_PUS_CORE0 0x095 +#define R2057_RXMIX2G_VCMREFS_CORE0 0x096 +#define R2057_RXMIX2G_LODC_QI_CORE0 0x097 +#define R2057_W12G_BW_LNA2G_PUS_CORE0 0x098 +#define R2057_LNA2G_GAIN_CORE0 0x099 +#define R2057_LNA2G_TUNE_CORE0 0x09a +#define R2057_RXMIX5G_PUS_CORE0 0x09b +#define R2057_RXMIX5G_VCMREFS_CORE0 0x09c +#define R2057_RXMIX5G_LODC_QI_CORE0 0x09d +#define R2057_W15G_BW_LNA5G_PUS_CORE0 0x09e +#define R2057_LNA5G_GAIN_CORE0 0x09f +#define R2057_LNA5G_TUNE_CORE0 0x0a0 +#define R2057_LPFSEL_TXRX_RXBB_PUS_CORE0 0x0a1 +#define R2057_RXBB_BIAS_MASTER_CORE0 0x0a2 +#define R2057_RXBB_VGABUF_IDACS_CORE0 0x0a3 +#define R2057_LPF_VCMREF_TXBUF_VCMREF_CORE0 0x0a4 +#define R2057_TXBUF_VINCM_CORE0 0x0a5 +#define R2057_TXBUF_IDACS_CORE0 0x0a6 +#define R2057_LPF_RESP_RXBUF_BW_CORE0 0x0a7 +#define R2057_RXBB_CC_CORE0 0x0a8 +#define R2057_RXBB_SPARE3_CORE0 0x0a9 +#define R2057_RXBB_RCCAL_HPC_CORE0 0x0aa +#define R2057_LPF_IDACS_CORE0 0x0ab +#define R2057_LPFBYP_DCLOOP_BYP_IDAC_CORE0 0x0ac +#define R2057_TXBUF_GAIN_CORE0 0x0ad +#define R2057_AFELOOPBACK_AACI_RESP_CORE0 0x0ae +#define R2057_RXBUF_DEGEN_CORE0 0x0af +#define R2057_RXBB_SPARE2_CORE0 0x0b0 +#define R2057_RXBB_SPARE1_CORE0 0x0b1 +#define R2057_RSSI_MASTER_CORE0 0x0b2 +#define R2057_W2_MASTER_CORE0 0x0b3 +#define R2057_NB_MASTER_CORE0 0x0b4 +#define R2057_W2_IDACS0_Q_CORE0 0x0b5 +#define R2057_W2_IDACS1_Q_CORE0 0x0b6 +#define R2057_W2_IDACS0_I_CORE0 0x0b7 +#define R2057_W2_IDACS1_I_CORE0 0x0b8 +#define R2057_RSSI_GPAIOSEL_W1_IDACS_CORE0 0x0b9 +#define R2057_NB_IDACS_Q_CORE0 0x0ba +#define R2057_NB_IDACS_I_CORE0 0x0bb +#define R2057_BACKUP4_CORE0 0x0c1 +#define R2057_BACKUP3_CORE0 0x0c2 +#define R2057_BACKUP2_CORE0 0x0c3 +#define R2057_BACKUP1_CORE0 0x0c4 +#define R2057_SPARE16_CORE0 0x0c5 +#define R2057_SPARE15_CORE0 0x0c6 +#define R2057_SPARE14_CORE0 0x0c7 +#define R2057_SPARE13_CORE0 0x0c8 +#define R2057_SPARE12_CORE0 0x0c9 +#define R2057_SPARE11_CORE0 0x0ca +#define R2057_TX2G_BIAS_RESETS_CORE0 0x0cb +#define R2057_TX5G_BIAS_RESETS_CORE0 0x0cc +#define R2057_IQTEST_SEL_PU 0x0cd +#define R2057_XTAL_CONFIG2 0x0ce +#define R2057_BUFS_MISC_LPFBW_CORE0 0x0cf +#define R2057_TXLPF_RCCAL_CORE0 0x0d0 +#define R2057_RXBB_GPAIOSEL_RXLPF_RCCAL_CORE0 0x0d1 +#define R2057_LPF_GAIN_CORE0 0x0d2 +#define R2057_DACBUF_IDACS_BW_CORE0 0x0d3 +#define R2057_RXTXBIAS_CONFIG_CORE1 0x0d4 +#define R2057_TXGM_TXRF_PUS_CORE1 0x0d5 +#define R2057_TXGM_IDAC_BLEED_CORE1 0x0d6 +#define R2057_TXGM_GAIN_CORE1 0x0db +#define R2057_TXGM2G_PKDET_PUS_CORE1 0x0dc +#define R2057_PAD2G_PTATS_CORE1 0x0dd +#define R2057_PAD2G_IDACS_CORE1 0x0de +#define R2057_PAD2G_BOOST_PU_CORE1 0x0df +#define R2057_PAD2G_CASCV_GAIN_CORE1 0x0e0 +#define R2057_TXMIX2G_TUNE_BOOST_PU_CORE1 0x0e1 +#define R2057_TXMIX2G_LODC_CORE1 0x0e2 +#define R2057_PAD2G_TUNE_PUS_CORE1 0x0e3 +#define R2057_IPA2G_GAIN_CORE1 0x0e4 +#define R2057_TSSI2G_SPARE1_CORE1 0x0e5 +#define R2057_TSSI2G_SPARE2_CORE1 0x0e6 +#define R2057_IPA2G_TUNEV_CASCV_PTAT_CORE1 0x0e7 +#define R2057_IPA2G_IMAIN_CORE1 0x0e8 +#define R2057_IPA2G_CASCONV_CORE1 0x0e9 +#define R2057_IPA2G_CASCOFFV_CORE1 0x0ea +#define R2057_IPA2G_BIAS_FILTER_CORE1 0x0eb +#define R2057_TX5G_PKDET_CORE1 0x0ee +#define R2057_PGA_PTAT_TXGM5G_PU_CORE1 0x0ef +#define R2057_PAD5G_PTATS1_CORE1 0x0f0 +#define R2057_PAD5G_CLASS_PTATS2_CORE1 0x0f1 +#define R2057_PGA_BOOSTPTAT_IMAIN_CORE1 0x0f2 +#define R2057_PAD5G_CASCV_IMAIN_CORE1 0x0f3 +#define R2057_TXMIX5G_IBOOST_PAD_IAUX_CORE1 0x0f4 +#define R2057_PGA_BOOST_TUNE_CORE1 0x0f5 +#define R2057_PGA_GAIN_CORE1 0x0f6 +#define R2057_PAD5G_CASCOFFV_GAIN_PUS_CORE1 0x0f7 +#define R2057_TXMIX5G_BOOST_TUNE_CORE1 0x0f8 +#define R2057_PAD5G_TUNE_MISC_PUS_CORE1 0x0f9 +#define R2057_IPA5G_IAUX_CORE1 0x0fa +#define R2057_IPA5G_GAIN_CORE1 0x0fb +#define R2057_TSSI5G_SPARE1_CORE1 0x0fc +#define R2057_TSSI5G_SPARE2_CORE1 0x0fd +#define R2057_IPA5G_CASCOFFV_PU_CORE1 0x0fe +#define R2057_IPA5G_PTAT_CORE1 0x0ff +#define R2057_IPA5G_IMAIN_CORE1 0x100 +#define R2057_IPA5G_CASCONV_CORE1 0x101 +#define R2057_IPA5G_BIAS_FILTER_CORE1 0x102 +#define R2057_PAD_BIAS_FILTER_BWS_CORE1 0x105 +#define R2057_TR2G_CONFIG1_CORE1_NU 0x106 +#define R2057_TR2G_CONFIG2_CORE1_NU 0x107 +#define R2057_LNA5G_RFEN_CORE1 0x108 +#define R2057_TR5G_CONFIG2_CORE1_NU 0x109 +#define R2057_RXRFBIAS_IBOOST_PU_CORE1 0x10a +#define R2057_RXRF_IABAND_RXGM_IMAIN_PTAT_CORE1 0x10b +#define R2057_RXGM_CMFBITAIL_AUXPTAT_CORE1 0x10c +#define R2057_RXMIX_ICORE_RXGM_IAUX_CORE1 0x10d +#define R2057_RXMIX_CMFBITAIL_PU_CORE1 0x10e +#define R2057_LNA2_IMAIN_PTAT_PU_CORE1 0x10f +#define R2057_LNA2_IAUX_PTAT_CORE1 0x110 +#define R2057_LNA1_IMAIN_PTAT_PU_CORE1 0x111 +#define R2057_LNA15G_INPUT_MATCH_TUNE_CORE1 0x112 +#define R2057_RXRFBIAS_BANDSEL_CORE1 0x113 +#define R2057_TIA_CONFIG_CORE1 0x114 +#define R2057_TIA_IQGAIN_CORE1 0x115 +#define R2057_TIA_IBIAS2_CORE1 0x116 +#define R2057_TIA_IBIAS1_CORE1 0x117 +#define R2057_TIA_SPARE_Q_CORE1 0x118 +#define R2057_TIA_SPARE_I_CORE1 0x119 +#define R2057_RXMIX2G_PUS_CORE1 0x11a +#define R2057_RXMIX2G_VCMREFS_CORE1 0x11b +#define R2057_RXMIX2G_LODC_QI_CORE1 0x11c +#define R2057_W12G_BW_LNA2G_PUS_CORE1 0x11d +#define R2057_LNA2G_GAIN_CORE1 0x11e +#define R2057_LNA2G_TUNE_CORE1 0x11f +#define R2057_RXMIX5G_PUS_CORE1 0x120 +#define R2057_RXMIX5G_VCMREFS_CORE1 0x121 +#define R2057_RXMIX5G_LODC_QI_CORE1 0x122 +#define R2057_W15G_BW_LNA5G_PUS_CORE1 0x123 +#define R2057_LNA5G_GAIN_CORE1 0x124 +#define R2057_LNA5G_TUNE_CORE1 0x125 +#define R2057_LPFSEL_TXRX_RXBB_PUS_CORE1 0x126 +#define R2057_RXBB_BIAS_MASTER_CORE1 0x127 +#define R2057_RXBB_VGABUF_IDACS_CORE1 0x128 +#define R2057_LPF_VCMREF_TXBUF_VCMREF_CORE1 0x129 +#define R2057_TXBUF_VINCM_CORE1 0x12a +#define R2057_TXBUF_IDACS_CORE1 0x12b +#define R2057_LPF_RESP_RXBUF_BW_CORE1 0x12c +#define R2057_RXBB_CC_CORE1 0x12d +#define R2057_RXBB_SPARE3_CORE1 0x12e +#define R2057_RXBB_RCCAL_HPC_CORE1 0x12f +#define R2057_LPF_IDACS_CORE1 0x130 +#define R2057_LPFBYP_DCLOOP_BYP_IDAC_CORE1 0x131 +#define R2057_TXBUF_GAIN_CORE1 0x132 +#define R2057_AFELOOPBACK_AACI_RESP_CORE1 0x133 +#define R2057_RXBUF_DEGEN_CORE1 0x134 +#define R2057_RXBB_SPARE2_CORE1 0x135 +#define R2057_RXBB_SPARE1_CORE1 0x136 +#define R2057_RSSI_MASTER_CORE1 0x137 +#define R2057_W2_MASTER_CORE1 0x138 +#define R2057_NB_MASTER_CORE1 0x139 +#define R2057_W2_IDACS0_Q_CORE1 0x13a +#define R2057_W2_IDACS1_Q_CORE1 0x13b +#define R2057_W2_IDACS0_I_CORE1 0x13c +#define R2057_W2_IDACS1_I_CORE1 0x13d +#define R2057_RSSI_GPAIOSEL_W1_IDACS_CORE1 0x13e +#define R2057_NB_IDACS_Q_CORE1 0x13f +#define R2057_NB_IDACS_I_CORE1 0x140 +#define R2057_BACKUP4_CORE1 0x146 +#define R2057_BACKUP3_CORE1 0x147 +#define R2057_BACKUP2_CORE1 0x148 +#define R2057_BACKUP1_CORE1 0x149 +#define R2057_SPARE16_CORE1 0x14a +#define R2057_SPARE15_CORE1 0x14b +#define R2057_SPARE14_CORE1 0x14c +#define R2057_SPARE13_CORE1 0x14d +#define R2057_SPARE12_CORE1 0x14e +#define R2057_SPARE11_CORE1 0x14f +#define R2057_TX2G_BIAS_RESETS_CORE1 0x150 +#define R2057_TX5G_BIAS_RESETS_CORE1 0x151 +#define R2057_SPARE8_CORE1 0x152 +#define R2057_SPARE7_CORE1 0x153 +#define R2057_BUFS_MISC_LPFBW_CORE1 0x154 +#define R2057_TXLPF_RCCAL_CORE1 0x155 +#define R2057_RXBB_GPAIOSEL_RXLPF_RCCAL_CORE1 0x156 +#define R2057_LPF_GAIN_CORE1 0x157 +#define R2057_DACBUF_IDACS_BW_CORE1 0x158 +#define R2057_DACBUF_VINCM_CORE1 0x159 +#define R2057_RCCAL_START_R1_Q1_P1 0x15a +#define R2057_RCCAL_X1 0x15b +#define R2057_RCCAL_TRC0 0x15c +#define R2057_RCCAL_TRC1 0x15d +#define R2057_RCCAL_DONE_OSCCAP 0x15e +#define R2057_RCCAL_N0_0 0x15f +#define R2057_RCCAL_N0_1 0x160 +#define R2057_RCCAL_N1_0 0x161 +#define R2057_RCCAL_N1_1 0x162 +#define R2057_RCAL_STATUS 0x163 +#define R2057_XTALPUOVR_PINCTRL 0x164 +#define R2057_OVR_REG0 0x165 +#define R2057_OVR_REG1 0x166 +#define R2057_OVR_REG2 0x167 +#define R2057_OVR_REG3 0x168 +#define R2057_OVR_REG4 0x169 +#define R2057_RCCAL_SCAP_VAL 0x16a +#define R2057_RCCAL_BCAP_VAL 0x16b +#define R2057_RCCAL_HPC_VAL 0x16c +#define R2057_RCCAL_OVERRIDES 0x16d +#define R2057_TX0_IQCAL_GAIN_BW 0x170 +#define R2057_TX0_LOFT_FINE_I 0x171 +#define R2057_TX0_LOFT_FINE_Q 0x172 +#define R2057_TX0_LOFT_COARSE_I 0x173 +#define R2057_TX0_LOFT_COARSE_Q 0x174 +#define R2057_TX0_TX_SSI_MASTER 0x175 +#define R2057_TX0_IQCAL_VCM_HG 0x176 +#define R2057_TX0_IQCAL_IDAC 0x177 +#define R2057_TX0_TSSI_VCM 0x178 +#define R2057_TX0_TX_SSI_MUX 0x179 +#define R2057_TX0_TSSIA 0x17a +#define R2057_TX0_TSSIG 0x17b +#define R2057_TX0_TSSI_MISC1 0x17c +#define R2057_TX0_TXRXCOUPLE_2G_ATTEN 0x17d +#define R2057_TX0_TXRXCOUPLE_2G_PWRUP 0x17e +#define R2057_TX0_TXRXCOUPLE_5G_ATTEN 0x17f +#define R2057_TX0_TXRXCOUPLE_5G_PWRUP 0x180 +#define R2057_TX1_IQCAL_GAIN_BW 0x190 +#define R2057_TX1_LOFT_FINE_I 0x191 +#define R2057_TX1_LOFT_FINE_Q 0x192 +#define R2057_TX1_LOFT_COARSE_I 0x193 +#define R2057_TX1_LOFT_COARSE_Q 0x194 +#define R2057_TX1_TX_SSI_MASTER 0x195 +#define R2057_TX1_IQCAL_VCM_HG 0x196 +#define R2057_TX1_IQCAL_IDAC 0x197 +#define R2057_TX1_TSSI_VCM 0x198 +#define R2057_TX1_TX_SSI_MUX 0x199 +#define R2057_TX1_TSSIA 0x19a +#define R2057_TX1_TSSIG 0x19b +#define R2057_TX1_TSSI_MISC1 0x19c +#define R2057_TX1_TXRXCOUPLE_2G_ATTEN 0x19d +#define R2057_TX1_TXRXCOUPLE_2G_PWRUP 0x19e +#define R2057_TX1_TXRXCOUPLE_5G_ATTEN 0x19f +#define R2057_TX1_TXRXCOUPLE_5G_PWRUP 0x1a0 +#define R2057_AFE_VCM_CAL_MASTER_CORE0 0x1a1 +#define R2057_AFE_SET_VCM_I_CORE0 0x1a2 +#define R2057_AFE_SET_VCM_Q_CORE0 0x1a3 +#define R2057_AFE_STATUS_VCM_IQADC_CORE0 0x1a4 +#define R2057_AFE_STATUS_VCM_I_CORE0 0x1a5 +#define R2057_AFE_STATUS_VCM_Q_CORE0 0x1a6 +#define R2057_AFE_VCM_CAL_MASTER_CORE1 0x1a7 +#define R2057_AFE_SET_VCM_I_CORE1 0x1a8 +#define R2057_AFE_SET_VCM_Q_CORE1 0x1a9 +#define R2057_AFE_STATUS_VCM_IQADC_CORE1 0x1aa +#define R2057_AFE_STATUS_VCM_I_CORE1 0x1ab +#define R2057_AFE_STATUS_VCM_Q_CORE1 0x1ac + +#define R2057v7_DACBUF_VINCM_CORE0 0x1ad +#define R2057v7_RCCAL_MASTER 0x1ae +#define R2057v7_TR2G_CONFIG3_CORE0_NU 0x1af +#define R2057v7_TR2G_CONFIG3_CORE1_NU 0x1b0 +#define R2057v7_LOGEN_PUS1 0x1b1 +#define R2057v7_OVR_REG5 0x1b2 +#define R2057v7_OVR_REG6 0x1b3 +#define R2057v7_OVR_REG7 0x1b4 +#define R2057v7_OVR_REG8 0x1b5 +#define R2057v7_OVR_REG9 0x1b6 +#define R2057v7_OVR_REG10 0x1b7 +#define R2057v7_OVR_REG11 0x1b8 +#define R2057v7_OVR_REG12 0x1b9 +#define R2057v7_OVR_REG13 0x1ba +#define R2057v7_OVR_REG14 0x1bb +#define R2057v7_OVR_REG15 0x1bc +#define R2057v7_OVR_REG16 0x1bd +#define R2057v7_OVR_REG1 0x1be +#define R2057v7_OVR_REG18 0x1bf +#define R2057v7_OVR_REG19 0x1c0 +#define R2057v7_OVR_REG20 0x1c1 +#define R2057v7_OVR_REG21 0x1c2 +#define R2057v7_OVR_REG2 0x1c3 +#define R2057v7_OVR_REG23 0x1c4 +#define R2057v7_OVR_REG24 0x1c5 +#define R2057v7_OVR_REG25 0x1c6 +#define R2057v7_OVR_REG26 0x1c7 +#define R2057v7_OVR_REG27 0x1c8 +#define R2057v7_OVR_REG28 0x1c9 +#define R2057v7_IQTEST_SEL_PU2 0x1ca + +#define R2057_VCM_MASK 0x7 + +void r2057_upload_inittabs(struct b43_wldev *dev); + +#endif /* B43_RADIO_2057_H_ */ diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c index f0d8377429c6..97d4e27bf36f 100644 --- a/drivers/net/wireless/b43/tables_nphy.c +++ b/drivers/net/wireless/b43/tables_nphy.c @@ -2757,6 +2757,49 @@ const struct nphy_rf_control_override_rev3 tbl_rf_control_override_rev3[] = { { 0x00C0, 6, 0xE7, 0xF9, 0xEC, 0xFB } /* field == 0x4000 (fls 15) */ }; +/* field, val_addr_core0, val_addr_core1, val_mask, val_shift */ +static const struct nphy_rf_control_override_rev7 + tbl_rf_control_override_rev7_over0[] = { + { 0x0004, 0x07A, 0x07D, 0x0002, 1 }, + { 0x0008, 0x07A, 0x07D, 0x0004, 2 }, + { 0x0010, 0x07A, 0x07D, 0x0010, 4 }, + { 0x0020, 0x07A, 0x07D, 0x0020, 5 }, + { 0x0040, 0x07A, 0x07D, 0x0040, 6 }, + { 0x0080, 0x0F8, 0x0FA, 0x0080, 7 }, + { 0x0400, 0x0F8, 0x0FA, 0x0070, 4 }, + { 0x0800, 0x07B, 0x07E, 0xFFFF, 0 }, + { 0x1000, 0x07C, 0x07F, 0xFFFF, 0 }, + { 0x6000, 0x348, 0x349, 0xFFFF, 0 }, + { 0x2000, 0x348, 0x349, 0x000F, 0 }, +}; + +/* field, val_addr_core0, val_addr_core1, val_mask, val_shift */ +static const struct nphy_rf_control_override_rev7 + tbl_rf_control_override_rev7_over1[] = { + { 0x0002, 0x340, 0x341, 0x0002, 1 }, + { 0x0008, 0x340, 0x341, 0x0008, 3 }, + { 0x0020, 0x340, 0x341, 0x0020, 5 }, + { 0x0010, 0x340, 0x341, 0x0010, 4 }, + { 0x0004, 0x340, 0x341, 0x0004, 2 }, + { 0x0080, 0x340, 0x341, 0x0700, 8 }, + { 0x0800, 0x340, 0x341, 0x4000, 14 }, + { 0x0400, 0x340, 0x341, 0x2000, 13 }, + { 0x0200, 0x340, 0x341, 0x0800, 12 }, + { 0x0100, 0x340, 0x341, 0x0100, 11 }, + { 0x0040, 0x340, 0x341, 0x0040, 6 }, + { 0x0001, 0x340, 0x341, 0x0001, 0 }, +}; + +/* field, val_addr_core0, val_addr_core1, val_mask, val_shift */ +static const struct nphy_rf_control_override_rev7 + tbl_rf_control_override_rev7_over2[] = { + { 0x0008, 0x344, 0x345, 0x0008, 3 }, + { 0x0002, 0x344, 0x345, 0x0002, 1 }, + { 0x0001, 0x344, 0x345, 0x0001, 0 }, + { 0x0004, 0x344, 0x345, 0x0004, 2 }, + { 0x0010, 0x344, 0x345, 0x0010, 4 }, +}; + struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_wa_phy6_radio11_ghz2 = { { 10, 14, 19, 27 }, { -5, 6, 10, 15 }, @@ -3248,3 +3291,35 @@ struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent( return e; } + +const struct nphy_rf_control_override_rev7 *b43_nphy_get_rf_ctl_over_rev7( + struct b43_wldev *dev, u16 field, u8 override) +{ + const struct nphy_rf_control_override_rev7 *e; + u8 size, i; + + switch (override) { + case 0: + e = tbl_rf_control_override_rev7_over0; + size = ARRAY_SIZE(tbl_rf_control_override_rev7_over0); + break; + case 1: + e = tbl_rf_control_override_rev7_over1; + size = ARRAY_SIZE(tbl_rf_control_override_rev7_over1); + break; + case 2: + e = tbl_rf_control_override_rev7_over2; + size = ARRAY_SIZE(tbl_rf_control_override_rev7_over2); + break; + default: + b43err(dev->wl, "Invalid override value %d\n", override); + return NULL; + } + + for (i = 0; i < size; i++) { + if (e[i].field == field) + return &e[i]; + } + + return NULL; +} diff --git a/drivers/net/wireless/b43/tables_nphy.h b/drivers/net/wireless/b43/tables_nphy.h index f348953c0230..c600700ceedc 100644 --- a/drivers/net/wireless/b43/tables_nphy.h +++ b/drivers/net/wireless/b43/tables_nphy.h @@ -35,6 +35,14 @@ struct nphy_rf_control_override_rev3 { u8 val_addr1; }; +struct nphy_rf_control_override_rev7 { + u16 field; + u16 val_addr_core0; + u16 val_addr_core1; + u16 val_mask; + u8 val_shift; +}; + struct nphy_gain_ctl_workaround_entry { s8 lna1_gain[4]; s8 lna2_gain[4]; @@ -202,5 +210,7 @@ extern const struct nphy_rf_control_override_rev2 tbl_rf_control_override_rev2[]; extern const struct nphy_rf_control_override_rev3 tbl_rf_control_override_rev3[]; +const struct nphy_rf_control_override_rev7 *b43_nphy_get_rf_ctl_over_rev7( + struct b43_wldev *dev, u16 field, u8 override); #endif /* B43_TABLES_NPHY_H_ */ diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c index 8156135a0590..291cdf654088 100644 --- a/drivers/net/wireless/b43legacy/main.c +++ b/drivers/net/wireless/b43legacy/main.c @@ -1920,7 +1920,7 @@ static int b43legacy_gpio_init(struct b43legacy_wldev *dev) return 0; ssb_write32(gpiodev, B43legacy_GPIO_CONTROL, (ssb_read32(gpiodev, B43legacy_GPIO_CONTROL) - & mask) | set); + & ~mask) | set); return 0; } @@ -2492,6 +2492,7 @@ static void b43legacy_tx_work(struct work_struct *work) } static void b43legacy_op_tx(struct ieee80211_hw *hw, + struct ieee80211_tx_control *control, struct sk_buff *skb) { struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c index a5edebeb0b4f..718da8d6d658 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c @@ -86,7 +86,9 @@ MODULE_AUTHOR("Broadcom Corporation"); MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN driver."); MODULE_SUPPORTED_DEVICE("Broadcom 802.11n WLAN cards"); MODULE_LICENSE("Dual BSD/GPL"); - +/* This needs to be adjusted when brcms_firmwares changes */ +MODULE_FIRMWARE("brcm/bcm43xx-0.fw"); +MODULE_FIRMWARE("brcm/bcm43xx_hdr-0.fw"); /* recognized BCMA Core IDs */ static struct bcma_device_id brcms_coreid_table[] = { @@ -265,7 +267,9 @@ static void brcms_set_basic_rate(struct brcm_rateset *rs, u16 rate, bool is_br) } } -static void brcms_ops_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +static void brcms_ops_tx(struct ieee80211_hw *hw, + struct ieee80211_tx_control *control, + struct sk_buff *skb) { struct brcms_info *wl = hw->priv; struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); @@ -277,7 +281,7 @@ static void brcms_ops_tx(struct ieee80211_hw *hw, struct sk_buff *skb) goto done; } brcms_c_sendpkt_mac80211(wl->wlc, skb, hw); - tx_info->rate_driver_data[0] = tx_info->control.sta; + tx_info->rate_driver_data[0] = control->sta; done: spin_unlock_bh(&wl->lock); } diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c index 03ca65324845..75086b37c817 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/main.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c @@ -7512,15 +7512,10 @@ prep_mac80211_status(struct brcms_c_info *wlc, struct d11rxhdr *rxh, channel = BRCMS_CHAN_CHANNEL(rxh->RxChan); - if (channel > 14) { - rx_status->band = IEEE80211_BAND_5GHZ; - rx_status->freq = ieee80211_ofdm_chan_to_freq( - WF_CHAN_FACTOR_5_G/2, channel); - - } else { - rx_status->band = IEEE80211_BAND_2GHZ; - rx_status->freq = ieee80211_dsss_chan_to_freq(channel); - } + rx_status->band = + channel > 14 ? IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ; + rx_status->freq = + ieee80211_channel_to_frequency(channel, rx_status->band); rx_status->signal = wlc_phy_rssi_compute(wlc->hw->band->pi, rxh); diff --git a/drivers/net/wireless/brcm80211/include/brcmu_wifi.h b/drivers/net/wireless/brcm80211/include/brcmu_wifi.h index f10d30274c23..c11a290a1edf 100644 --- a/drivers/net/wireless/brcm80211/include/brcmu_wifi.h +++ b/drivers/net/wireless/brcm80211/include/brcmu_wifi.h @@ -67,11 +67,6 @@ #define WL_CHANSPEC_BAND_2G 0x2000 #define INVCHANSPEC 255 -/* used to calculate the chan_freq = chan_factor * 500Mhz + 5 * chan_number */ -#define WF_CHAN_FACTOR_2_4_G 4814 /* 2.4 GHz band, 2407 MHz */ -#define WF_CHAN_FACTOR_5_G 10000 /* 5 GHz band, 5000 MHz */ -#define WF_CHAN_FACTOR_4_G 8000 /* 4.9 GHz band for Japan */ - #define CHSPEC_CHANNEL(chspec) ((u8)((chspec) & WL_CHANSPEC_CHAN_MASK)) #define CHSPEC_BAND(chspec) ((chspec) & WL_CHANSPEC_BAND_MASK) diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c index faec40467208..e252acb9c862 100644 --- a/drivers/net/wireless/iwlegacy/3945-mac.c +++ b/drivers/net/wireless/iwlegacy/3945-mac.c @@ -460,7 +460,9 @@ il3945_build_tx_cmd_basic(struct il_priv *il, struct il_device_cmd *cmd, * start C_TX command process */ static int -il3945_tx_skb(struct il_priv *il, struct sk_buff *skb) +il3945_tx_skb(struct il_priv *il, + struct ieee80211_sta *sta, + struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); @@ -512,7 +514,7 @@ il3945_tx_skb(struct il_priv *il, struct sk_buff *skb) hdr_len = ieee80211_hdrlen(fc); /* Find idx into station table for destination station */ - sta_id = il_sta_id_or_broadcast(il, info->control.sta); + sta_id = il_sta_id_or_broadcast(il, sta); if (sta_id == IL_INVALID_STATION) { D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1); goto drop; @@ -2859,7 +2861,9 @@ il3945_mac_stop(struct ieee80211_hw *hw) } static void -il3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +il3945_mac_tx(struct ieee80211_hw *hw, + struct ieee80211_tx_control *control, + struct sk_buff *skb) { struct il_priv *il = hw->priv; @@ -2868,7 +2872,7 @@ il3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); - if (il3945_tx_skb(il, skb)) + if (il3945_tx_skb(il, control->sta, skb)) dev_kfree_skb_any(skb); D_MAC80211("leave\n"); diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c index 34f61a0581a2..eac4dc8bc879 100644 --- a/drivers/net/wireless/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/iwlegacy/4965-mac.c @@ -1526,8 +1526,11 @@ il4965_tx_cmd_build_basic(struct il_priv *il, struct sk_buff *skb, } static void -il4965_tx_cmd_build_rate(struct il_priv *il, struct il_tx_cmd *tx_cmd, - struct ieee80211_tx_info *info, __le16 fc) +il4965_tx_cmd_build_rate(struct il_priv *il, + struct il_tx_cmd *tx_cmd, + struct ieee80211_tx_info *info, + struct ieee80211_sta *sta, + __le16 fc) { const u8 rts_retry_limit = 60; u32 rate_flags; @@ -1561,9 +1564,7 @@ il4965_tx_cmd_build_rate(struct il_priv *il, struct il_tx_cmd *tx_cmd, rate_idx = info->control.rates[0].idx; if ((info->control.rates[0].flags & IEEE80211_TX_RC_MCS) || rate_idx < 0 || rate_idx > RATE_COUNT_LEGACY) - rate_idx = - rate_lowest_index(&il->bands[info->band], - info->control.sta); + rate_idx = rate_lowest_index(&il->bands[info->band], sta); /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ if (info->band == IEEE80211_BAND_5GHZ) rate_idx += IL_FIRST_OFDM_RATE; @@ -1630,11 +1631,12 @@ il4965_tx_cmd_build_hwcrypto(struct il_priv *il, struct ieee80211_tx_info *info, * start C_TX command process */ int -il4965_tx_skb(struct il_priv *il, struct sk_buff *skb) +il4965_tx_skb(struct il_priv *il, + struct ieee80211_sta *sta, + struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct ieee80211_sta *sta = info->control.sta; struct il_station_priv *sta_priv = NULL; struct il_tx_queue *txq; struct il_queue *q; @@ -1680,7 +1682,7 @@ il4965_tx_skb(struct il_priv *il, struct sk_buff *skb) sta_id = il->hw_params.bcast_id; else { /* Find idx into station table for destination station */ - sta_id = il_sta_id_or_broadcast(il, info->control.sta); + sta_id = il_sta_id_or_broadcast(il, sta); if (sta_id == IL_INVALID_STATION) { D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1); @@ -1786,7 +1788,7 @@ il4965_tx_skb(struct il_priv *il, struct sk_buff *skb) /* TODO need this for burst mode later on */ il4965_tx_cmd_build_basic(il, skb, tx_cmd, info, hdr, sta_id); - il4965_tx_cmd_build_rate(il, tx_cmd, info, fc); + il4965_tx_cmd_build_rate(il, tx_cmd, info, sta, fc); il_update_stats(il, true, fc, len); /* @@ -5828,7 +5830,9 @@ il4965_mac_stop(struct ieee80211_hw *hw) } void -il4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +il4965_mac_tx(struct ieee80211_hw *hw, + struct ieee80211_tx_control *control, + struct sk_buff *skb) { struct il_priv *il = hw->priv; @@ -5837,7 +5841,7 @@ il4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); - if (il4965_tx_skb(il, skb)) + if (il4965_tx_skb(il, control->sta, skb)) dev_kfree_skb_any(skb); D_MACDUMP("leave\n"); diff --git a/drivers/net/wireless/iwlegacy/4965.h b/drivers/net/wireless/iwlegacy/4965.h index 1db677689cfe..2d092f328547 100644 --- a/drivers/net/wireless/iwlegacy/4965.h +++ b/drivers/net/wireless/iwlegacy/4965.h @@ -78,7 +78,9 @@ int il4965_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq, int il4965_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq); void il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags, struct ieee80211_tx_info *info); -int il4965_tx_skb(struct il_priv *il, struct sk_buff *skb); +int il4965_tx_skb(struct il_priv *il, + struct ieee80211_sta *sta, + struct sk_buff *skb); int il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid, u16 * ssn); int il4965_tx_agg_stop(struct il_priv *il, struct ieee80211_vif *vif, @@ -163,7 +165,9 @@ void il4965_eeprom_release_semaphore(struct il_priv *il); int il4965_eeprom_check_version(struct il_priv *il); /* mac80211 handlers (for 4965) */ -void il4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb); +void il4965_mac_tx(struct ieee80211_hw *hw, + struct ieee80211_tx_control *control, + struct sk_buff *skb); int il4965_mac_start(struct ieee80211_hw *hw); void il4965_mac_stop(struct ieee80211_hw *hw); void il4965_configure_filter(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c index 0370403fd0bd..eb9987520d61 100644 --- a/drivers/net/wireless/iwlegacy/common.c +++ b/drivers/net/wireless/iwlegacy/common.c @@ -4860,7 +4860,7 @@ EXPORT_SYMBOL(il_add_beacon_time); #ifdef CONFIG_PM -int +static int il_pci_suspend(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); @@ -4877,9 +4877,8 @@ il_pci_suspend(struct device *device) return 0; } -EXPORT_SYMBOL(il_pci_suspend); -int +static int il_pci_resume(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); @@ -4906,16 +4905,8 @@ il_pci_resume(struct device *device) return 0; } -EXPORT_SYMBOL(il_pci_resume); -const struct dev_pm_ops il_pm_ops = { - .suspend = il_pci_suspend, - .resume = il_pci_resume, - .freeze = il_pci_suspend, - .thaw = il_pci_resume, - .poweroff = il_pci_suspend, - .restore = il_pci_resume, -}; +SIMPLE_DEV_PM_OPS(il_pm_ops, il_pci_suspend, il_pci_resume); EXPORT_SYMBOL(il_pm_ops); #endif /* CONFIG_PM */ diff --git a/drivers/net/wireless/iwlegacy/common.h b/drivers/net/wireless/iwlegacy/common.h index 5f5017767b99..3d3135ed62d7 100644 --- a/drivers/net/wireless/iwlegacy/common.h +++ b/drivers/net/wireless/iwlegacy/common.h @@ -1845,8 +1845,6 @@ __le32 il_add_beacon_time(struct il_priv *il, u32 base, u32 addon, u32 beacon_interval); #ifdef CONFIG_PM -int il_pci_suspend(struct device *device); -int il_pci_resume(struct device *device); extern const struct dev_pm_ops il_pm_ops; #define IL_LEGACY_PM_OPS (&il_pm_ops) diff --git a/drivers/net/wireless/iwlwifi/dvm/agn.h b/drivers/net/wireless/iwlwifi/dvm/agn.h index 9bb16bdf6d26..75e12f29d9eb 100644 --- a/drivers/net/wireless/iwlwifi/dvm/agn.h +++ b/drivers/net/wireless/iwlwifi/dvm/agn.h @@ -201,7 +201,9 @@ void iwl_chswitch_done(struct iwl_priv *priv, bool is_success); /* tx */ -int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb); +int iwlagn_tx_skb(struct iwl_priv *priv, + struct ieee80211_sta *sta, + struct sk_buff *skb); int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid, u16 *ssn); int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif, @@ -485,16 +487,13 @@ static inline void iwl_dvm_set_pmi(struct iwl_priv *priv, bool state) } #ifdef CONFIG_IWLWIFI_DEBUGFS -int iwl_dbgfs_register(struct iwl_priv *priv, const char *name); -void iwl_dbgfs_unregister(struct iwl_priv *priv); +int iwl_dbgfs_register(struct iwl_priv *priv, struct dentry *dbgfs_dir); #else -static inline int iwl_dbgfs_register(struct iwl_priv *priv, const char *name) +static inline int iwl_dbgfs_register(struct iwl_priv *priv, + struct dentry *dbgfs_dir) { return 0; } -static inline void iwl_dbgfs_unregister(struct iwl_priv *priv) -{ -} #endif /* CONFIG_IWLWIFI_DEBUGFS */ #ifdef CONFIG_IWLWIFI_DEBUG diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c index a47b306b522c..1a98fa3ab06d 100644 --- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c +++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c @@ -2352,24 +2352,19 @@ DEBUGFS_READ_WRITE_FILE_OPS(calib_disabled); * Create the debugfs files and directories * */ -int iwl_dbgfs_register(struct iwl_priv *priv, const char *name) +int iwl_dbgfs_register(struct iwl_priv *priv, struct dentry *dbgfs_dir) { - struct dentry *phyd = priv->hw->wiphy->debugfsdir; - struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug; + struct dentry *dir_data, *dir_rf, *dir_debug; - dir_drv = debugfs_create_dir(name, phyd); - if (!dir_drv) - return -ENOMEM; - - priv->debugfs_dir = dir_drv; + priv->debugfs_dir = dbgfs_dir; - dir_data = debugfs_create_dir("data", dir_drv); + dir_data = debugfs_create_dir("data", dbgfs_dir); if (!dir_data) goto err; - dir_rf = debugfs_create_dir("rf", dir_drv); + dir_rf = debugfs_create_dir("rf", dbgfs_dir); if (!dir_rf) goto err; - dir_debug = debugfs_create_dir("debug", dir_drv); + dir_debug = debugfs_create_dir("debug", dbgfs_dir); if (!dir_debug) goto err; @@ -2415,25 +2410,30 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name) /* Calibrations disabled/enabled status*/ DEBUGFS_ADD_FILE(calib_disabled, dir_rf, S_IWUSR | S_IRUSR); - if (iwl_trans_dbgfs_register(priv->trans, dir_debug)) - goto err; + /* + * Create a symlink with mac80211. This is not very robust, as it does + * not remove the symlink created. The implicit assumption is that + * when the opmode exits, mac80211 will also exit, and will remove + * this symlink as part of its cleanup. + */ + if (priv->mac80211_registered) { + char buf[100]; + struct dentry *mac80211_dir, *dev_dir, *root_dir; + + dev_dir = dbgfs_dir->d_parent; + root_dir = dev_dir->d_parent; + mac80211_dir = priv->hw->wiphy->debugfsdir; + + snprintf(buf, 100, "../../%s/%s", root_dir->d_name.name, + dev_dir->d_name.name); + + if (!debugfs_create_symlink("iwlwifi", mac80211_dir, buf)) + goto err; + } + return 0; err: - IWL_ERR(priv, "Can't create the debugfs directory\n"); - iwl_dbgfs_unregister(priv); + IWL_ERR(priv, "failed to create the dvm debugfs entries\n"); return -ENOMEM; } - -/** - * Remove the debugfs files and directories - * - */ -void iwl_dbgfs_unregister(struct iwl_priv *priv) -{ - if (!priv->debugfs_dir) - return; - - debugfs_remove_recursive(priv->debugfs_dir); - priv->debugfs_dir = NULL; -} diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c index a5f7bce96325..ff8162d4c454 100644 --- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c @@ -195,7 +195,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv, ARRAY_SIZE(iwlagn_iface_combinations_dualmode); } - hw->wiphy->max_remain_on_channel_duration = 1000; + hw->wiphy->max_remain_on_channel_duration = 500; hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS | @@ -511,14 +511,16 @@ static void iwlagn_mac_set_wakeup(struct ieee80211_hw *hw, bool enabled) } #endif -static void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +static void iwlagn_mac_tx(struct ieee80211_hw *hw, + struct ieee80211_tx_control *control, + struct sk_buff *skb) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); - if (iwlagn_tx_skb(priv, skb)) + if (iwlagn_tx_skb(priv, control->sta, skb)) dev_kfree_skb_any(skb); } diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c index 84d3db5aa506..7ff3f1430678 100644 --- a/drivers/net/wireless/iwlwifi/dvm/main.c +++ b/drivers/net/wireless/iwlwifi/dvm/main.c @@ -862,7 +862,8 @@ void iwl_down(struct iwl_priv *priv) * No race since we hold the mutex here and a new one * can't come in at this time. */ - ieee80211_remain_on_channel_expired(priv->hw); + if (priv->ucode_loaded && priv->cur_ucode != IWL_UCODE_INIT) + ieee80211_remain_on_channel_expired(priv->hw); exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status); @@ -994,7 +995,11 @@ static void iwl_bg_restart(struct work_struct *data) iwlagn_prepare_restart(priv); mutex_unlock(&priv->mutex); iwl_cancel_deferred_work(priv); - ieee80211_restart_hw(priv->hw); + if (priv->mac80211_registered) + ieee80211_restart_hw(priv->hw); + else + IWL_ERR(priv, + "Cannot request restart before registrating with mac80211"); } else { WARN_ON(1); } @@ -1222,7 +1227,8 @@ static int iwl_eeprom_init_hw_params(struct iwl_priv *priv) static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, - const struct iwl_fw *fw) + const struct iwl_fw *fw, + struct dentry *dbgfs_dir) { struct iwl_priv *priv; struct ieee80211_hw *hw; @@ -1466,13 +1472,17 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans, if (iwlagn_mac_setup_register(priv, &fw->ucode_capa)) goto out_destroy_workqueue; - if (iwl_dbgfs_register(priv, DRV_NAME)) - IWL_ERR(priv, - "failed to create debugfs files. Ignoring error\n"); + if (iwl_dbgfs_register(priv, dbgfs_dir)) + goto out_mac80211_unregister; return op_mode; +out_mac80211_unregister: + iwlagn_mac_unregister(priv); out_destroy_workqueue: + iwl_tt_exit(priv); + iwl_testmode_free(priv); + iwl_cancel_deferred_work(priv); destroy_workqueue(priv->workqueue); priv->workqueue = NULL; iwl_uninit_drv(priv); @@ -1493,8 +1503,6 @@ static void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode) IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n"); - iwl_dbgfs_unregister(priv); - iwl_testmode_free(priv); iwlagn_mac_unregister(priv); diff --git a/drivers/net/wireless/iwlwifi/dvm/sta.c b/drivers/net/wireless/iwlwifi/dvm/sta.c index b29b798f7550..fe36a38f3505 100644 --- a/drivers/net/wireless/iwlwifi/dvm/sta.c +++ b/drivers/net/wireless/iwlwifi/dvm/sta.c @@ -150,7 +150,7 @@ int iwl_send_add_sta(struct iwl_priv *priv, sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : ""); if (!(flags & CMD_ASYNC)) { - cmd.flags |= CMD_WANT_SKB; + cmd.flags |= CMD_WANT_SKB | CMD_WANT_HCMD; might_sleep(); } diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c index 5971a23aa47d..f5ca73a89870 100644 --- a/drivers/net/wireless/iwlwifi/dvm/tx.c +++ b/drivers/net/wireless/iwlwifi/dvm/tx.c @@ -127,6 +127,7 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv, static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv, struct iwl_tx_cmd *tx_cmd, struct ieee80211_tx_info *info, + struct ieee80211_sta *sta, __le16 fc) { u32 rate_flags; @@ -187,8 +188,7 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv, if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS || (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY)) rate_idx = rate_lowest_index( - &priv->eeprom_data->bands[info->band], - info->control.sta); + &priv->eeprom_data->bands[info->band], sta); /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ if (info->band == IEEE80211_BAND_5GHZ) rate_idx += IWL_FIRST_OFDM_RATE; @@ -291,7 +291,9 @@ static int iwl_sta_id_or_broadcast(struct iwl_rxon_context *context, /* * start REPLY_TX command process */ -int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) +int iwlagn_tx_skb(struct iwl_priv *priv, + struct ieee80211_sta *sta, + struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); @@ -345,7 +347,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) sta_id = ctx->bcast_sta_id; else { /* Find index into station table for destination station */ - sta_id = iwl_sta_id_or_broadcast(ctx, info->control.sta); + sta_id = iwl_sta_id_or_broadcast(ctx, sta); if (sta_id == IWL_INVALID_STATION) { IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", hdr->addr1); @@ -355,8 +357,8 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) IWL_DEBUG_TX(priv, "station Id %d\n", sta_id); - if (info->control.sta) - sta_priv = (void *)info->control.sta->drv_priv; + if (sta) + sta_priv = (void *)sta->drv_priv; if (sta_priv && sta_priv->asleep && (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER)) { @@ -397,7 +399,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) /* TODO need this for burst mode later on */ iwlagn_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id); - iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, fc); + iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, sta, fc); memset(&info->status, 0, sizeof(info->status)); @@ -431,7 +433,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) * only. Check this here. */ if (WARN_ONCE(tid_data->agg.state != IWL_AGG_ON && - tid_data->agg.state != IWL_AGG_OFF, + tid_data->agg.state != IWL_AGG_OFF, "Tx while agg.state = %d", tid_data->agg.state)) goto drop_unlock_sta; diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c index cc41cfaedfbd..48d6d44c16d0 100644 --- a/drivers/net/wireless/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/iwlwifi/iwl-drv.c @@ -101,6 +101,10 @@ MODULE_VERSION(DRV_VERSION); MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); MODULE_LICENSE("GPL"); +#ifdef CONFIG_IWLWIFI_DEBUGFS +static struct dentry *iwl_dbgfs_root; +#endif + /** * struct iwl_drv - drv common data * @list: list of drv structures using this opmode @@ -126,6 +130,12 @@ struct iwl_drv { char firmware_name[25]; /* name of firmware file to load */ struct completion request_firmware_complete; + +#ifdef CONFIG_IWLWIFI_DEBUGFS + struct dentry *dbgfs_drv; + struct dentry *dbgfs_trans; + struct dentry *dbgfs_op_mode; +#endif }; #define DVM_OP_MODE 0 @@ -194,7 +204,8 @@ static int iwl_alloc_fw_desc(struct iwl_drv *drv, struct fw_desc *desc, return 0; } -static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context); +static void iwl_req_fw_callback(const struct firmware *ucode_raw, + void *context); #define UCODE_EXPERIMENTAL_INDEX 100 #define UCODE_EXPERIMENTAL_TAG "exp" @@ -231,7 +242,7 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first) return request_firmware_nowait(THIS_MODULE, 1, drv->firmware_name, drv->trans->dev, - GFP_KERNEL, drv, iwl_ucode_callback); + GFP_KERNEL, drv, iwl_req_fw_callback); } struct fw_img_parsing { @@ -759,13 +770,57 @@ static int validate_sec_sizes(struct iwl_drv *drv, return 0; } +static struct iwl_op_mode * +_iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op) +{ + const struct iwl_op_mode_ops *ops = op->ops; + struct dentry *dbgfs_dir = NULL; + struct iwl_op_mode *op_mode = NULL; + +#ifdef CONFIG_IWLWIFI_DEBUGFS + drv->dbgfs_op_mode = debugfs_create_dir(op->name, + drv->dbgfs_drv); + if (!drv->dbgfs_op_mode) { + IWL_ERR(drv, + "failed to create opmode debugfs directory\n"); + return op_mode; + } + dbgfs_dir = drv->dbgfs_op_mode; +#endif + + op_mode = ops->start(drv->trans, drv->cfg, &drv->fw, dbgfs_dir); + +#ifdef CONFIG_IWLWIFI_DEBUGFS + if (!op_mode) { + debugfs_remove_recursive(drv->dbgfs_op_mode); + drv->dbgfs_op_mode = NULL; + } +#endif + + return op_mode; +} + +static void _iwl_op_mode_stop(struct iwl_drv *drv) +{ + /* op_mode can be NULL if its start failed */ + if (drv->op_mode) { + iwl_op_mode_stop(drv->op_mode); + drv->op_mode = NULL; + +#ifdef CONFIG_IWLWIFI_DEBUGFS + debugfs_remove_recursive(drv->dbgfs_op_mode); + drv->dbgfs_op_mode = NULL; +#endif + } +} + /** - * iwl_ucode_callback - callback when firmware was loaded + * iwl_req_fw_callback - callback when firmware was loaded * * If loaded successfully, copies the firmware into buffers * for the card to fetch (via DMA). */ -static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context) +static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) { struct iwl_drv *drv = context; struct iwl_fw *fw = &drv->fw; @@ -908,8 +963,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context) list_add_tail(&drv->list, &op->drv); if (op->ops) { - const struct iwl_op_mode_ops *ops = op->ops; - drv->op_mode = ops->start(drv->trans, drv->cfg, &drv->fw); + drv->op_mode = _iwl_op_mode_start(drv, op); if (!drv->op_mode) { mutex_unlock(&iwlwifi_opmode_table_mtx); @@ -969,24 +1023,51 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans, init_completion(&drv->request_firmware_complete); INIT_LIST_HEAD(&drv->list); +#ifdef CONFIG_IWLWIFI_DEBUGFS + /* Create the device debugfs entries. */ + drv->dbgfs_drv = debugfs_create_dir(dev_name(trans->dev), + iwl_dbgfs_root); + + if (!drv->dbgfs_drv) { + IWL_ERR(drv, "failed to create debugfs directory\n"); + goto err_free_drv; + } + + /* Create transport layer debugfs dir */ + drv->trans->dbgfs_dir = debugfs_create_dir("trans", drv->dbgfs_drv); + + if (!drv->trans->dbgfs_dir) { + IWL_ERR(drv, "failed to create transport debugfs directory\n"); + goto err_free_dbgfs; + } +#endif + ret = iwl_request_firmware(drv, true); if (ret) { IWL_ERR(trans, "Couldn't request the fw\n"); - kfree(drv); - drv = NULL; + goto err_fw; } return drv; + +err_fw: +#ifdef CONFIG_IWLWIFI_DEBUGFS +err_free_dbgfs: + debugfs_remove_recursive(drv->dbgfs_drv); +err_free_drv: +#endif + kfree(drv); + drv = NULL; + + return drv; } void iwl_drv_stop(struct iwl_drv *drv) { wait_for_completion(&drv->request_firmware_complete); - /* op_mode can be NULL if its start failed */ - if (drv->op_mode) - iwl_op_mode_stop(drv->op_mode); + _iwl_op_mode_stop(drv); iwl_dealloc_ucode(drv); @@ -1000,6 +1081,10 @@ void iwl_drv_stop(struct iwl_drv *drv) list_del(&drv->list); mutex_unlock(&iwlwifi_opmode_table_mtx); +#ifdef CONFIG_IWLWIFI_DEBUGFS + debugfs_remove_recursive(drv->dbgfs_drv); +#endif + kfree(drv); } @@ -1022,15 +1107,18 @@ int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops) { int i; struct iwl_drv *drv; + struct iwlwifi_opmode_table *op; mutex_lock(&iwlwifi_opmode_table_mtx); for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) { - if (strcmp(iwlwifi_opmode_table[i].name, name)) + op = &iwlwifi_opmode_table[i]; + if (strcmp(op->name, name)) continue; - iwlwifi_opmode_table[i].ops = ops; - list_for_each_entry(drv, &iwlwifi_opmode_table[i].drv, list) - drv->op_mode = ops->start(drv->trans, drv->cfg, - &drv->fw); + op->ops = ops; + /* TODO: need to handle exceptional case */ + list_for_each_entry(drv, &op->drv, list) + drv->op_mode = _iwl_op_mode_start(drv, op); + mutex_unlock(&iwlwifi_opmode_table_mtx); return 0; } @@ -1051,12 +1139,9 @@ void iwl_opmode_deregister(const char *name) iwlwifi_opmode_table[i].ops = NULL; /* call the stop routine for all devices */ - list_for_each_entry(drv, &iwlwifi_opmode_table[i].drv, list) { - if (drv->op_mode) { - iwl_op_mode_stop(drv->op_mode); - drv->op_mode = NULL; - } - } + list_for_each_entry(drv, &iwlwifi_opmode_table[i].drv, list) + _iwl_op_mode_stop(drv); + mutex_unlock(&iwlwifi_opmode_table_mtx); return; } @@ -1076,6 +1161,14 @@ static int __init iwl_drv_init(void) pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n"); pr_info(DRV_COPYRIGHT "\n"); +#ifdef CONFIG_IWLWIFI_DEBUGFS + /* Create the root of iwlwifi debugfs subsystem. */ + iwl_dbgfs_root = debugfs_create_dir(DRV_NAME, NULL); + + if (!iwl_dbgfs_root) + return -EFAULT; +#endif + return iwl_pci_register_driver(); } module_init(iwl_drv_init); @@ -1083,6 +1176,10 @@ module_init(iwl_drv_init); static void __exit iwl_drv_exit(void) { iwl_pci_unregister_driver(); + +#ifdef CONFIG_IWLWIFI_DEBUGFS + debugfs_remove_recursive(iwl_dbgfs_root); +#endif } module_exit(iwl_drv_exit); diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.h b/drivers/net/wireless/iwlwifi/iwl-drv.h index 2cbf137b25bf..285de5f68c05 100644 --- a/drivers/net/wireless/iwlwifi/iwl-drv.h +++ b/drivers/net/wireless/iwlwifi/iwl-drv.h @@ -90,9 +90,9 @@ * 4) The bus specific component configures the bus * 5) The bus specific component calls to the drv bus agnostic part * (iwl_drv_start) - * 6) iwl_drv_start fetches the fw ASYNC, iwl_ucode_callback - * 7) iwl_ucode_callback parses the fw file - * 8) iwl_ucode_callback starts the wifi implementation to matches the fw + * 6) iwl_drv_start fetches the fw ASYNC, iwl_req_fw_callback + * 7) iwl_req_fw_callback parses the fw file + * 8) iwl_req_fw_callback starts the wifi implementation to matches the fw */ struct iwl_drv; diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/iwlwifi/iwl-op-mode.h index 64886f95664f..c8d9b9517468 100644 --- a/drivers/net/wireless/iwlwifi/iwl-op-mode.h +++ b/drivers/net/wireless/iwlwifi/iwl-op-mode.h @@ -134,7 +134,8 @@ struct iwl_cfg; struct iwl_op_mode_ops { struct iwl_op_mode *(*start)(struct iwl_trans *trans, const struct iwl_cfg *cfg, - const struct iwl_fw *fw); + const struct iwl_fw *fw, + struct dentry *dbgfs_dir); void (*stop)(struct iwl_op_mode *op_mode); int (*rx)(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb, struct iwl_device_cmd *cmd); diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h index 92576a3e84ef..ff1154232885 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/iwlwifi/iwl-trans.h @@ -184,14 +184,20 @@ struct iwl_rx_packet { * @CMD_SYNC: The caller will be stalled until the fw responds to the command * @CMD_ASYNC: Return right away and don't want for the response * @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the - * response. + * response. The caller needs to call iwl_free_resp when done. + * @CMD_WANT_HCMD: The caller needs to get the HCMD that was sent in the + * response handler. Chunks flagged by %IWL_HCMD_DFL_NOCOPY won't be + * copied. The pointer passed to the response handler is in the transport + * ownership and don't need to be freed by the op_mode. This also means + * that the pointer is invalidated after the op_mode's handler returns. * @CMD_ON_DEMAND: This command is sent by the test mode pipe. */ enum CMD_MODE { CMD_SYNC = 0, CMD_ASYNC = BIT(0), CMD_WANT_SKB = BIT(1), - CMD_ON_DEMAND = BIT(2), + CMD_WANT_HCMD = BIT(2), + CMD_ON_DEMAND = BIT(3), }; #define DEF_CMD_PAYLOAD_SIZE 320 @@ -460,6 +466,8 @@ struct iwl_trans { size_t dev_cmd_headroom; char dev_cmd_pool_name[50]; + struct dentry *dbgfs_dir; + /* pointer to trans specific struct */ /*Ensure that this pointer will always be aligned to sizeof pointer */ char trans_specific[0] __aligned(sizeof(void *)); diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c index f4c3500b68c6..89bfb43f4946 100644 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c @@ -282,8 +282,14 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (!trans_pcie->drv) goto out_free_trans; + /* register transport layer debugfs here */ + if (iwl_trans_dbgfs_register(iwl_trans, iwl_trans->dbgfs_dir)) + goto out_free_drv; + return 0; +out_free_drv: + iwl_drv_stop(trans_pcie->drv); out_free_trans: iwl_trans_pcie_free(iwl_trans); pci_set_drvdata(pdev, NULL); diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h index 4ffc18dc3a57..71c79943e633 100644 --- a/drivers/net/wireless/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/iwlwifi/pcie/internal.h @@ -184,6 +184,7 @@ struct iwl_queue { struct iwl_pcie_tx_queue_entry { struct iwl_device_cmd *cmd; + struct iwl_device_cmd *copy_cmd; struct sk_buff *skb; struct iwl_cmd_meta meta; }; diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c index d1a61ba6247a..498372008810 100644 --- a/drivers/net/wireless/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/iwlwifi/pcie/rx.c @@ -421,13 +421,23 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans, index = SEQ_TO_INDEX(sequence); cmd_index = get_cmd_index(&txq->q, index); - if (reclaim) - cmd = txq->entries[cmd_index].cmd; - else + if (reclaim) { + struct iwl_pcie_tx_queue_entry *ent; + ent = &txq->entries[cmd_index]; + cmd = ent->copy_cmd; + WARN_ON_ONCE(!cmd && ent->meta.flags & CMD_WANT_HCMD); + } else { cmd = NULL; + } err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd); + if (reclaim) { + /* The original command isn't needed any more */ + kfree(txq->entries[cmd_index].copy_cmd); + txq->entries[cmd_index].copy_cmd = NULL; + } + /* * After here, we should always check rxcb._page_stolen, * if it is true then one of the handlers took the page. diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index 1e86ea2266d4..848851177e7e 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -492,10 +492,11 @@ static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id) iwl_tx_queue_unmap(trans, txq_id); /* De-alloc array of command/tx buffers */ - if (txq_id == trans_pcie->cmd_queue) - for (i = 0; i < txq->q.n_window; i++) + for (i = 0; i < txq->q.n_window; i++) { kfree(txq->entries[i].cmd); + kfree(txq->entries[i].copy_cmd); + } /* De-alloc circular buffer of TFDs */ if (txq->q.n_bd) { @@ -896,6 +897,7 @@ static int iwl_set_hw_ready(struct iwl_trans *trans) static int iwl_prepare_card_hw(struct iwl_trans *trans) { int ret; + int t = 0; IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n"); @@ -908,17 +910,15 @@ static int iwl_prepare_card_hw(struct iwl_trans *trans) iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PREPARE); - ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG, - ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, - CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000); + do { + ret = iwl_set_hw_ready(trans); + if (ret >= 0) + return 0; - if (ret < 0) - return ret; + usleep_range(200, 1000); + t += 200; + } while (t < 150000); - /* HW should be ready by now, check again. */ - ret = iwl_set_hw_ready(trans); - if (ret >= 0) - return 0; return ret; } @@ -1771,7 +1771,7 @@ void iwl_dump_csr(struct iwl_trans *trans) #define DEBUGFS_ADD_FILE(name, parent, mode) do { \ if (!debugfs_create_file(#name, mode, parent, trans, \ &iwl_dbgfs_##name##_ops)) \ - return -ENOMEM; \ + goto err; \ } while (0) /* file operation */ @@ -2035,6 +2035,10 @@ static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans, DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR); DEBUGFS_ADD_FILE(fw_restart, dir, S_IWUSR); return 0; + +err: + IWL_ERR(trans, "failed to create the trans debugfs entry\n"); + return -ENOMEM; } #else static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans, diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c index 6baf8deef519..392d2bc5e357 100644 --- a/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/iwlwifi/pcie/tx.c @@ -521,7 +521,7 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) u16 copy_size, cmd_size; bool had_nocopy = false; int i; - u8 *cmd_dest; + u32 cmd_pos; #ifdef CONFIG_IWLWIFI_DEVICE_TRACING const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {}; int trace_lens[IWL_MAX_CMD_TFDS + 1] = {}; @@ -584,15 +584,31 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) INDEX_TO_SEQ(q->write_ptr)); /* and copy the data that needs to be copied */ - - cmd_dest = out_cmd->payload; + cmd_pos = offsetof(struct iwl_device_cmd, payload); for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { if (!cmd->len[i]) continue; if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) break; - memcpy(cmd_dest, cmd->data[i], cmd->len[i]); - cmd_dest += cmd->len[i]; + memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], cmd->len[i]); + cmd_pos += cmd->len[i]; + } + + WARN_ON_ONCE(txq->entries[idx].copy_cmd); + + /* + * since out_cmd will be the source address of the FH, it will write + * the retry count there. So when the user needs to receivce the HCMD + * that corresponds to the response in the response handler, it needs + * to set CMD_WANT_HCMD. + */ + if (cmd->flags & CMD_WANT_HCMD) { + txq->entries[idx].copy_cmd = + kmemdup(out_cmd, cmd_pos, GFP_ATOMIC); + if (unlikely(!txq->entries[idx].copy_cmd)) { + idx = -ENOMEM; + goto out; + } } IWL_DEBUG_HC(trans, diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c index a03457292c88..7001856241e6 100644 --- a/drivers/net/wireless/libertas_tf/main.c +++ b/drivers/net/wireless/libertas_tf/main.c @@ -227,7 +227,9 @@ static void lbtf_free_adapter(struct lbtf_private *priv) lbtf_deb_leave(LBTF_DEB_MAIN); } -static void lbtf_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +static void lbtf_op_tx(struct ieee80211_hw *hw, + struct ieee80211_tx_control *control, + struct sk_buff *skb) { struct lbtf_private *priv = hw->priv; diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 00838395778c..9d45b3bb974c 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -38,7 +38,7 @@ MODULE_AUTHOR("Jouni Malinen"); MODULE_DESCRIPTION("Software simulator of 802.11 radio(s) for mac80211"); MODULE_LICENSE("GPL"); -static u32 wmediumd_pid; +static u32 wmediumd_portid; static int radios = 2; module_param(radios, int, 0444); @@ -545,7 +545,7 @@ static bool mac80211_hwsim_addr_match(struct mac80211_hwsim_data *data, static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw, struct sk_buff *my_skb, - int dst_pid) + int dst_portid) { struct sk_buff *skb; struct mac80211_hwsim_data *data = hw->priv; @@ -619,7 +619,7 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw, goto nla_put_failure; genlmsg_end(skb, msg_head); - genlmsg_unicast(&init_net, skb, dst_pid); + genlmsg_unicast(&init_net, skb, dst_portid); /* Enqueue the packet */ skb_queue_tail(&data->pending, my_skb); @@ -709,11 +709,13 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw, return ack; } -static void mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +static void mac80211_hwsim_tx(struct ieee80211_hw *hw, + struct ieee80211_tx_control *control, + struct sk_buff *skb) { bool ack; struct ieee80211_tx_info *txi; - u32 _pid; + u32 _portid; mac80211_hwsim_monitor_rx(hw, skb); @@ -724,10 +726,10 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb) } /* wmediumd mode check */ - _pid = ACCESS_ONCE(wmediumd_pid); + _portid = ACCESS_ONCE(wmediumd_portid); - if (_pid) - return mac80211_hwsim_tx_frame_nl(hw, skb, _pid); + if (_portid) + return mac80211_hwsim_tx_frame_nl(hw, skb, _portid); /* NO wmediumd detected, perfect medium simulation */ ack = mac80211_hwsim_tx_frame_no_nl(hw, skb); @@ -812,7 +814,7 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac, struct ieee80211_hw *hw = arg; struct sk_buff *skb; struct ieee80211_tx_info *info; - u32 _pid; + u32 _portid; hwsim_check_magic(vif); @@ -829,10 +831,10 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac, mac80211_hwsim_monitor_rx(hw, skb); /* wmediumd mode check */ - _pid = ACCESS_ONCE(wmediumd_pid); + _portid = ACCESS_ONCE(wmediumd_portid); - if (_pid) - return mac80211_hwsim_tx_frame_nl(hw, skb, _pid); + if (_portid) + return mac80211_hwsim_tx_frame_nl(hw, skb, _portid); mac80211_hwsim_tx_frame_no_nl(hw, skb); dev_kfree_skb(skb); @@ -1313,7 +1315,7 @@ static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif) struct hwsim_vif_priv *vp = (void *)vif->drv_priv; struct sk_buff *skb; struct ieee80211_pspoll *pspoll; - u32 _pid; + u32 _portid; if (!vp->assoc) return; @@ -1334,10 +1336,10 @@ static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif) memcpy(pspoll->ta, mac, ETH_ALEN); /* wmediumd mode check */ - _pid = ACCESS_ONCE(wmediumd_pid); + _portid = ACCESS_ONCE(wmediumd_portid); - if (_pid) - return mac80211_hwsim_tx_frame_nl(data->hw, skb, _pid); + if (_portid) + return mac80211_hwsim_tx_frame_nl(data->hw, skb, _portid); if (!mac80211_hwsim_tx_frame_no_nl(data->hw, skb)) printk(KERN_DEBUG "%s: PS-poll frame not ack'ed\n", __func__); @@ -1351,7 +1353,7 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac, struct hwsim_vif_priv *vp = (void *)vif->drv_priv; struct sk_buff *skb; struct ieee80211_hdr *hdr; - u32 _pid; + u32 _portid; if (!vp->assoc) return; @@ -1373,10 +1375,10 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac, memcpy(hdr->addr3, vp->bssid, ETH_ALEN); /* wmediumd mode check */ - _pid = ACCESS_ONCE(wmediumd_pid); + _portid = ACCESS_ONCE(wmediumd_portid); - if (_pid) - return mac80211_hwsim_tx_frame_nl(data->hw, skb, _pid); + if (_portid) + return mac80211_hwsim_tx_frame_nl(data->hw, skb, _portid); if (!mac80211_hwsim_tx_frame_no_nl(data->hw, skb)) printk(KERN_DEBUG "%s: nullfunc frame not ack'ed\n", __func__); @@ -1630,10 +1632,10 @@ static int hwsim_register_received_nl(struct sk_buff *skb_2, if (info == NULL) goto out; - wmediumd_pid = info->snd_pid; + wmediumd_portid = info->snd_portid; printk(KERN_DEBUG "mac80211_hwsim: received a REGISTER, " - "switching to wmediumd mode with pid %d\n", info->snd_pid); + "switching to wmediumd mode with pid %d\n", info->snd_portid); return 0; out: @@ -1670,10 +1672,10 @@ static int mac80211_hwsim_netlink_notify(struct notifier_block *nb, if (state != NETLINK_URELEASE) return NOTIFY_DONE; - if (notify->pid == wmediumd_pid) { + if (notify->portid == wmediumd_portid) { printk(KERN_INFO "mac80211_hwsim: wmediumd released netlink" " socket, switching to perfect channel medium\n"); - wmediumd_pid = 0; + wmediumd_portid = 0; } return NOTIFY_DONE; @@ -1727,6 +1729,7 @@ static const struct ieee80211_iface_limit hwsim_if_limits[] = { #endif BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_P2P_GO) }, + { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE) }, }; static const struct ieee80211_iface_combination hwsim_if_comb = { @@ -1813,7 +1816,8 @@ static int __init init_mac80211_hwsim(void) BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO) | BIT(NL80211_IFTYPE_ADHOC) | - BIT(NL80211_IFTYPE_MESH_POINT); + BIT(NL80211_IFTYPE_MESH_POINT) | + BIT(NL80211_IFTYPE_P2P_DEVICE); hw->flags = IEEE80211_HW_MFP_CAPABLE | IEEE80211_HW_SIGNAL_DBM | diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c index e535c937628b..d2732736f864 100644 --- a/drivers/net/wireless/mwifiex/11n.c +++ b/drivers/net/wireless/mwifiex/11n.c @@ -726,3 +726,29 @@ int mwifiex_get_tx_ba_stream_tbl(struct mwifiex_private *priv, return count; } + +/* + * This function retrieves the entry for specific tx BA stream table by RA and + * deletes it. + */ +void mwifiex_del_tx_ba_stream_tbl_by_ra(struct mwifiex_private *priv, u8 *ra) +{ + struct mwifiex_tx_ba_stream_tbl *tbl, *tmp; + unsigned long flags; + + if (!ra) + return; + + spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags); + list_for_each_entry_safe(tbl, tmp, &priv->tx_ba_stream_tbl_ptr, list) { + if (!memcmp(tbl->ra, ra, ETH_ALEN)) { + spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, + flags); + mwifiex_11n_delete_tx_ba_stream_tbl_entry(priv, tbl); + spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags); + } + } + spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags); + + return; +} diff --git a/drivers/net/wireless/mwifiex/11n.h b/drivers/net/wireless/mwifiex/11n.h index 28366e9211fb..67c087cf9dc7 100644 --- a/drivers/net/wireless/mwifiex/11n.h +++ b/drivers/net/wireless/mwifiex/11n.h @@ -69,6 +69,7 @@ int mwifiex_cmd_recfg_tx_buf(struct mwifiex_private *priv, int mwifiex_cmd_amsdu_aggr_ctrl(struct host_cmd_ds_command *cmd, int cmd_action, struct mwifiex_ds_11n_amsdu_aggr_ctrl *aa_ctrl); +void mwifiex_del_tx_ba_stream_tbl_by_ra(struct mwifiex_private *priv, u8 *ra); /* * This function checks whether AMPDU is allowed or not for a particular TID. @@ -157,4 +158,18 @@ mwifiex_is_ba_stream_setup(struct mwifiex_private *priv, return false; } + +/* + * This function checks whether associated station is 11n enabled + */ +static inline int mwifiex_is_sta_11n_enabled(struct mwifiex_private *priv, + struct mwifiex_sta_node *node) +{ + + if (!node || (priv->bss_role != MWIFIEX_BSS_ROLE_UAP) || + !priv->ap_11n_enabled) + return 0; + + return node->is_11n_enabled; +} #endif /* !_MWIFIEX_11N_H_ */ diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c index ab84eb943749..395f1bfd4102 100644 --- a/drivers/net/wireless/mwifiex/11n_aggr.c +++ b/drivers/net/wireless/mwifiex/11n_aggr.c @@ -62,9 +62,7 @@ mwifiex_11n_form_amsdu_pkt(struct sk_buff *skb_aggr, }; struct tx_packet_hdr *tx_header; - skb_put(skb_aggr, sizeof(*tx_header)); - - tx_header = (struct tx_packet_hdr *) skb_aggr->data; + tx_header = (void *)skb_put(skb_aggr, sizeof(*tx_header)); /* Copy DA and SA */ dt_offset = 2 * ETH_ALEN; @@ -82,12 +80,10 @@ mwifiex_11n_form_amsdu_pkt(struct sk_buff *skb_aggr, tx_header->eth803_hdr.h_proto = htons(skb_src->len + LLC_SNAP_LEN); /* Add payload */ - skb_put(skb_aggr, skb_src->len); - memcpy(skb_aggr->data + sizeof(*tx_header), skb_src->data, - skb_src->len); - *pad = (((skb_src->len + LLC_SNAP_LEN) & 3)) ? (4 - (((skb_src->len + - LLC_SNAP_LEN)) & 3)) : 0; - skb_put(skb_aggr, *pad); + memcpy(skb_put(skb_aggr, skb_src->len), skb_src->data, skb_src->len); + + /* Add padding for new MSDU to start from 4 byte boundary */ + *pad = (4 - ((unsigned long)skb_aggr->tail & 0x3)) % 4; return skb_aggr->len + *pad; } diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.c b/drivers/net/wireless/mwifiex/11n_rxreorder.c index 591ccd33f83c..24e2582b467c 100644 --- a/drivers/net/wireless/mwifiex/11n_rxreorder.c +++ b/drivers/net/wireless/mwifiex/11n_rxreorder.c @@ -54,8 +54,13 @@ mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv, tbl->rx_reorder_ptr[i] = NULL; } spin_unlock_irqrestore(&priv->rx_pkt_lock, flags); - if (rx_tmp_ptr) - mwifiex_process_rx_packet(priv->adapter, rx_tmp_ptr); + if (rx_tmp_ptr) { + if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP) + mwifiex_handle_uap_rx_forward(priv, rx_tmp_ptr); + else + mwifiex_process_rx_packet(priv->adapter, + rx_tmp_ptr); + } } spin_lock_irqsave(&priv->rx_pkt_lock, flags); @@ -97,7 +102,11 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv, rx_tmp_ptr = tbl->rx_reorder_ptr[i]; tbl->rx_reorder_ptr[i] = NULL; spin_unlock_irqrestore(&priv->rx_pkt_lock, flags); - mwifiex_process_rx_packet(priv->adapter, rx_tmp_ptr); + + if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP) + mwifiex_handle_uap_rx_forward(priv, rx_tmp_ptr); + else + mwifiex_process_rx_packet(priv->adapter, rx_tmp_ptr); } spin_lock_irqsave(&priv->rx_pkt_lock, flags); @@ -148,7 +157,7 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv, * This function returns the pointer to an entry in Rx reordering * table which matches the given TA/TID pair. */ -static struct mwifiex_rx_reorder_tbl * +struct mwifiex_rx_reorder_tbl * mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta) { struct mwifiex_rx_reorder_tbl *tbl; @@ -167,6 +176,31 @@ mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta) return NULL; } +/* This function retrieves the pointer to an entry in Rx reordering + * table which matches the given TA and deletes it. + */ +void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta) +{ + struct mwifiex_rx_reorder_tbl *tbl, *tmp; + unsigned long flags; + + if (!ta) + return; + + spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); + list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list) { + if (!memcmp(tbl->ta, ta, ETH_ALEN)) { + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, + flags); + mwifiex_del_rx_reorder_entry(priv, tbl); + spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); + } + } + spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); + + return; +} + /* * This function finds the last sequence number used in the packets * buffered in Rx reordering table. @@ -226,6 +260,7 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta, struct mwifiex_rx_reorder_tbl *tbl, *new_node; u16 last_seq = 0; unsigned long flags; + struct mwifiex_sta_node *node; /* * If we get a TID, ta pair which is already present dispatch all the @@ -248,13 +283,19 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta, new_node->tid = tid; memcpy(new_node->ta, ta, ETH_ALEN); new_node->start_win = seq_num; - if (mwifiex_queuing_ra_based(priv)) - /* TODO for adhoc */ + + if (mwifiex_queuing_ra_based(priv)) { dev_dbg(priv->adapter->dev, - "info: ADHOC:last_seq=%d start_win=%d\n", + "info: AP/ADHOC:last_seq=%d start_win=%d\n", last_seq, new_node->start_win); - else + if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP) { + node = mwifiex_get_sta_entry(priv, ta); + if (node) + last_seq = node->rx_seq[tid]; + } + } else { last_seq = priv->rx_seq[tid]; + } if (last_seq != MWIFIEX_DEF_11N_RX_SEQ_NUM && last_seq >= new_node->start_win) @@ -396,8 +437,13 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv, tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta); if (!tbl) { - if (pkt_type != PKT_TYPE_BAR) - mwifiex_process_rx_packet(priv->adapter, payload); + if (pkt_type != PKT_TYPE_BAR) { + if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP) + mwifiex_handle_uap_rx_forward(priv, payload); + else + mwifiex_process_rx_packet(priv->adapter, + payload); + } return 0; } start_win = tbl->start_win; diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.h b/drivers/net/wireless/mwifiex/11n_rxreorder.h index 6c9815a0f5d8..72848591691a 100644 --- a/drivers/net/wireless/mwifiex/11n_rxreorder.h +++ b/drivers/net/wireless/mwifiex/11n_rxreorder.h @@ -38,6 +38,8 @@ #define ADDBA_RSP_STATUS_ACCEPT 0 #define MWIFIEX_DEF_11N_RX_SEQ_NUM 0xffff +#define BA_SETUP_MAX_PACKET_THRESHOLD 16 +#define BA_SETUP_PACKET_OFFSET 16 static inline void mwifiex_reset_11n_rx_seq_num(struct mwifiex_private *priv) { @@ -68,5 +70,8 @@ struct mwifiex_rx_reorder_tbl *mwifiex_11n_get_rxreorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta); +struct mwifiex_rx_reorder_tbl * +mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta); +void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta); #endif /* _MWIFIEX_11N_RXREORDER_H_ */ diff --git a/drivers/net/wireless/mwifiex/Makefile b/drivers/net/wireless/mwifiex/Makefile index 3f66ebb0a630..dd0410d2d465 100644 --- a/drivers/net/wireless/mwifiex/Makefile +++ b/drivers/net/wireless/mwifiex/Makefile @@ -33,8 +33,10 @@ mwifiex-y += uap_cmd.o mwifiex-y += ie.o mwifiex-y += sta_cmdresp.o mwifiex-y += sta_event.o +mwifiex-y += uap_event.o mwifiex-y += sta_tx.o mwifiex-y += sta_rx.o +mwifiex-y += uap_txrx.o mwifiex-y += cfg80211.o mwifiex-$(CONFIG_DEBUG_FS) += debugfs.o obj-$(CONFIG_MWIFIEX) += mwifiex.o diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c index fe42137384da..e57f543413de 100644 --- a/drivers/net/wireless/mwifiex/cfg80211.c +++ b/drivers/net/wireless/mwifiex/cfg80211.c @@ -99,7 +99,7 @@ mwifiex_cfg80211_del_key(struct wiphy *wiphy, struct net_device *netdev, const u8 bc_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; const u8 *peer_mac = pairwise ? mac_addr : bc_mac; - if (mwifiex_set_encode(priv, NULL, 0, key_index, peer_mac, 1)) { + if (mwifiex_set_encode(priv, NULL, NULL, 0, key_index, peer_mac, 1)) { wiphy_err(wiphy, "deleting the crypto keys\n"); return -EFAULT; } @@ -171,7 +171,8 @@ mwifiex_cfg80211_set_default_key(struct wiphy *wiphy, struct net_device *netdev, if (priv->bss_type == MWIFIEX_BSS_TYPE_UAP) { priv->wep_key_curr_index = key_index; - } else if (mwifiex_set_encode(priv, NULL, 0, key_index, NULL, 0)) { + } else if (mwifiex_set_encode(priv, NULL, NULL, 0, key_index, + NULL, 0)) { wiphy_err(wiphy, "set default Tx key index\n"); return -EFAULT; } @@ -207,7 +208,7 @@ mwifiex_cfg80211_add_key(struct wiphy *wiphy, struct net_device *netdev, return 0; } - if (mwifiex_set_encode(priv, params->key, params->key_len, + if (mwifiex_set_encode(priv, params, params->key, params->key_len, key_index, peer_mac, 0)) { wiphy_err(wiphy, "crypto keys added\n"); return -EFAULT; @@ -748,6 +749,7 @@ static const u32 mwifiex_cipher_suites[] = { WLAN_CIPHER_SUITE_WEP104, WLAN_CIPHER_SUITE_TKIP, WLAN_CIPHER_SUITE_CCMP, + WLAN_CIPHER_SUITE_AES_CMAC, }; /* @@ -906,6 +908,8 @@ static int mwifiex_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *dev) if (mwifiex_del_mgmt_ies(priv)) wiphy_err(wiphy, "Failed to delete mgmt IEs!\n"); + priv->ap_11n_enabled = 0; + if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_UAP_BSS_STOP, HostCmd_ACT_GEN_SET, 0, NULL)) { wiphy_err(wiphy, "Failed to stop the BSS\n"); @@ -1159,7 +1163,7 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid, priv->wep_key_curr_index = 0; priv->sec_info.encryption_mode = 0; priv->sec_info.is_authtype_auto = 0; - ret = mwifiex_set_encode(priv, NULL, 0, 0, NULL, 1); + ret = mwifiex_set_encode(priv, NULL, NULL, 0, 0, NULL, 1); if (mode == NL80211_IFTYPE_ADHOC) { /* "privacy" is set only for ad-hoc mode */ @@ -1206,8 +1210,9 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid, "info: setting wep encryption" " with key len %d\n", sme->key_len); priv->wep_key_curr_index = sme->key_idx; - ret = mwifiex_set_encode(priv, sme->key, sme->key_len, - sme->key_idx, NULL, 0); + ret = mwifiex_set_encode(priv, NULL, sme->key, + sme->key_len, sme->key_idx, + NULL, 0); } } done: diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c index 565527aee0ea..225c1a4feeba 100644 --- a/drivers/net/wireless/mwifiex/cmdevt.c +++ b/drivers/net/wireless/mwifiex/cmdevt.c @@ -460,7 +460,10 @@ int mwifiex_process_event(struct mwifiex_adapter *adapter) priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); } - ret = mwifiex_process_sta_event(priv); + if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP) + ret = mwifiex_process_uap_event(priv); + else + ret = mwifiex_process_sta_event(priv); adapter->event_cause = 0; adapter->event_skb = NULL; diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/mwifiex/decl.h index 070ef25f5186..400d360ac91f 100644 --- a/drivers/net/wireless/mwifiex/decl.h +++ b/drivers/net/wireless/mwifiex/decl.h @@ -60,6 +60,9 @@ #define MWIFIEX_SDIO_BLOCK_SIZE 256 #define MWIFIEX_BUF_FLAG_REQUEUED_PKT BIT(0) +#define MWIFIEX_BUF_FLAG_BRIDGED_PKT BIT(1) + +#define MWIFIEX_BRIDGED_PKTS_THRESHOLD 1024 enum mwifiex_bss_type { MWIFIEX_BSS_TYPE_STA = 0, diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h index e831b440a24a..ae06f31c6838 100644 --- a/drivers/net/wireless/mwifiex/fw.h +++ b/drivers/net/wireless/mwifiex/fw.h @@ -65,10 +65,12 @@ enum KEY_TYPE_ID { KEY_TYPE_ID_TKIP, KEY_TYPE_ID_AES, KEY_TYPE_ID_WAPI, + KEY_TYPE_ID_AES_CMAC, }; #define KEY_MCAST BIT(0) #define KEY_UNICAST BIT(1) #define KEY_ENABLED BIT(2) +#define KEY_IGTK BIT(10) #define WAPI_KEY_LEN 50 @@ -424,10 +426,10 @@ struct txpd { struct rxpd { u8 bss_type; u8 bss_num; - u16 rx_pkt_length; - u16 rx_pkt_offset; - u16 rx_pkt_type; - u16 seq_num; + __le16 rx_pkt_length; + __le16 rx_pkt_offset; + __le16 rx_pkt_type; + __le16 seq_num; u8 priority; u8 rx_rate; s8 snr; @@ -439,6 +441,31 @@ struct rxpd { u8 reserved; } __packed; +struct uap_txpd { + u8 bss_type; + u8 bss_num; + __le16 tx_pkt_length; + __le16 tx_pkt_offset; + __le16 tx_pkt_type; + __le32 tx_control; + u8 priority; + u8 flags; + u8 pkt_delay_2ms; + u8 reserved1; + __le32 reserved2; +}; + +struct uap_rxpd { + u8 bss_type; + u8 bss_num; + __le16 rx_pkt_length; + __le16 rx_pkt_offset; + __le16 rx_pkt_type; + __le16 seq_num; + u8 priority; + u8 reserved1; +}; + enum mwifiex_chan_scan_mode_bitmasks { MWIFIEX_PASSIVE_SCAN = BIT(0), MWIFIEX_DISABLE_CHAN_FILT = BIT(1), @@ -558,6 +585,13 @@ struct mwifiex_ie_type_key_param_set { u8 key[50]; } __packed; +#define IGTK_PN_LEN 8 + +struct mwifiex_cmac_param { + u8 ipn[IGTK_PN_LEN]; + u8 key[WLAN_KEY_LEN_AES_CMAC]; +} __packed; + struct host_cmd_ds_802_11_key_material { __le16 action; struct mwifiex_ie_type_key_param_set key_param_set; diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c index 21fdc6c02775..fad2c8d2bdde 100644 --- a/drivers/net/wireless/mwifiex/init.c +++ b/drivers/net/wireless/mwifiex/init.c @@ -64,60 +64,72 @@ static void scan_delay_timer_fn(unsigned long data) struct cmd_ctrl_node *cmd_node, *tmp_node; unsigned long flags; - if (!mwifiex_wmm_lists_empty(adapter)) { - if (adapter->scan_delay_cnt == MWIFIEX_MAX_SCAN_DELAY_CNT) { + if (adapter->scan_delay_cnt == MWIFIEX_MAX_SCAN_DELAY_CNT) { + /* + * Abort scan operation by cancelling all pending scan + * commands + */ + spin_lock_irqsave(&adapter->scan_pending_q_lock, flags); + list_for_each_entry_safe(cmd_node, tmp_node, + &adapter->scan_pending_q, list) { + list_del(&cmd_node->list); + cmd_node->wait_q_enabled = false; + mwifiex_insert_cmd_to_free_q(adapter, cmd_node); + } + spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags); + + spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags); + adapter->scan_processing = false; + adapter->scan_delay_cnt = 0; + adapter->empty_tx_q_cnt = 0; + spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); + + if (priv->user_scan_cfg) { + dev_dbg(priv->adapter->dev, + "info: %s: scan aborted\n", __func__); + cfg80211_scan_done(priv->scan_request, 1); + priv->scan_request = NULL; + kfree(priv->user_scan_cfg); + priv->user_scan_cfg = NULL; + } + goto done; + } + + if (!atomic_read(&priv->adapter->is_tx_received)) { + adapter->empty_tx_q_cnt++; + if (adapter->empty_tx_q_cnt == MWIFIEX_MAX_EMPTY_TX_Q_CNT) { /* - * Abort scan operation by cancelling all pending scan - * command + * No Tx traffic for 200msec. Get scan command from + * scan pending queue and put to cmd pending queue to + * resume scan operation */ + adapter->scan_delay_cnt = 0; + adapter->empty_tx_q_cnt = 0; spin_lock_irqsave(&adapter->scan_pending_q_lock, flags); - list_for_each_entry_safe(cmd_node, tmp_node, - &adapter->scan_pending_q, - list) { - list_del(&cmd_node->list); - cmd_node->wait_q_enabled = false; - mwifiex_insert_cmd_to_free_q(adapter, cmd_node); - } + cmd_node = list_first_entry(&adapter->scan_pending_q, + struct cmd_ctrl_node, list); + list_del(&cmd_node->list); spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags); - spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags); - adapter->scan_processing = false; - spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, - flags); - - if (priv->user_scan_cfg) { - dev_dbg(priv->adapter->dev, - "info: %s: scan aborted\n", __func__); - cfg80211_scan_done(priv->scan_request, 1); - priv->scan_request = NULL; - kfree(priv->user_scan_cfg); - priv->user_scan_cfg = NULL; - } - } else { - /* - * Tx data queue is still not empty, delay scan - * operation further by 20msec. - */ - mod_timer(&priv->scan_delay_timer, jiffies + - msecs_to_jiffies(MWIFIEX_SCAN_DELAY_MSEC)); - adapter->scan_delay_cnt++; + mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, + true); + goto done; } - queue_work(priv->adapter->workqueue, &priv->adapter->main_work); } else { - /* - * Tx data queue is empty. Get scan command from scan_pending_q - * and put to cmd_pending_q to resume scan operation - */ - adapter->scan_delay_cnt = 0; - spin_lock_irqsave(&adapter->scan_pending_q_lock, flags); - cmd_node = list_first_entry(&adapter->scan_pending_q, - struct cmd_ctrl_node, list); - list_del(&cmd_node->list); - spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags); - - mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true); + adapter->empty_tx_q_cnt = 0; } + + /* Delay scan operation further by 20msec */ + mod_timer(&priv->scan_delay_timer, jiffies + + msecs_to_jiffies(MWIFIEX_SCAN_DELAY_MSEC)); + adapter->scan_delay_cnt++; + +done: + if (atomic_read(&priv->adapter->is_tx_received)) + atomic_set(&priv->adapter->is_tx_received, false); + + return; } /* @@ -196,6 +208,7 @@ static int mwifiex_init_priv(struct mwifiex_private *priv) priv->curr_bcn_size = 0; priv->wps_ie = NULL; priv->wps_ie_len = 0; + priv->ap_11n_enabled = 0; priv->scan_block = false; @@ -345,6 +358,7 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter) memset(&adapter->arp_filter, 0, sizeof(adapter->arp_filter)); adapter->arp_filter_size = 0; adapter->max_mgmt_ie_index = MAX_MGMT_IE_INDEX; + adapter->empty_tx_q_cnt = 0; } /* @@ -410,6 +424,7 @@ static void mwifiex_free_lock_list(struct mwifiex_adapter *adapter) list_del(&priv->wmm.tid_tbl_ptr[j].ra_list); list_del(&priv->tx_ba_stream_tbl_ptr); list_del(&priv->rx_reorder_tbl_ptr); + list_del(&priv->sta_list); } } } @@ -472,6 +487,7 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter) spin_lock_init(&priv->rx_pkt_lock); spin_lock_init(&priv->wmm.ra_list_spinlock); spin_lock_init(&priv->curr_bcn_buf_lock); + spin_lock_init(&priv->sta_list_spinlock); } } @@ -504,6 +520,7 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter) } INIT_LIST_HEAD(&priv->tx_ba_stream_tbl_ptr); INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr); + INIT_LIST_HEAD(&priv->sta_list); spin_lock_init(&priv->tx_ba_stream_tbl_lock); spin_lock_init(&priv->rx_reorder_tbl_lock); diff --git a/drivers/net/wireless/mwifiex/ioctl.h b/drivers/net/wireless/mwifiex/ioctl.h index 50191539bb32..6a5eded3be10 100644 --- a/drivers/net/wireless/mwifiex/ioctl.h +++ b/drivers/net/wireless/mwifiex/ioctl.h @@ -213,7 +213,7 @@ struct mwifiex_debug_info { }; #define MWIFIEX_KEY_INDEX_UNICAST 0x40000000 -#define WAPI_RXPN_LEN 16 +#define PN_LEN 16 struct mwifiex_ds_encrypt_key { u32 key_disable; @@ -222,7 +222,8 @@ struct mwifiex_ds_encrypt_key { u8 key_material[WLAN_MAX_KEY_LEN]; u8 mac_addr[ETH_ALEN]; u32 is_wapi_key; - u8 wapi_rxpn[WAPI_RXPN_LEN]; + u8 pn[PN_LEN]; /* packet number */ + u8 is_igtk_key; }; struct mwifiex_power_cfg { diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c index 46803621d015..cb1155286e0f 100644 --- a/drivers/net/wireless/mwifiex/main.c +++ b/drivers/net/wireless/mwifiex/main.c @@ -520,6 +520,9 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) mwifiex_wmm_add_buf_txqueue(priv, skb); atomic_inc(&priv->adapter->tx_pending); + if (priv->adapter->scan_delay_cnt) + atomic_set(&priv->adapter->is_tx_received, true); + if (atomic_read(&priv->adapter->tx_pending) >= MAX_TX_PENDING) { mwifiex_set_trans_start(dev); mwifiex_stop_net_dev_queue(priv->netdev, priv->adapter); diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h index e7c2a82fd610..994bc4fc263e 100644 --- a/drivers/net/wireless/mwifiex/main.h +++ b/drivers/net/wireless/mwifiex/main.h @@ -88,6 +88,7 @@ enum { #define MWIFIEX_MAX_TOTAL_SCAN_TIME (MWIFIEX_TIMER_10S - MWIFIEX_TIMER_1S) #define MWIFIEX_MAX_SCAN_DELAY_CNT 50 +#define MWIFIEX_MAX_EMPTY_TX_Q_CNT 10 #define MWIFIEX_SCAN_DELAY_MSEC 20 #define RSN_GTK_OUI_OFFSET 2 @@ -199,6 +200,9 @@ struct mwifiex_ra_list_tbl { u8 ra[ETH_ALEN]; u32 total_pkts_size; u32 is_11n_enabled; + u16 max_amsdu; + u16 pkt_count; + u8 ba_packet_thr; }; struct mwifiex_tid_tbl { @@ -431,6 +435,9 @@ struct mwifiex_private { u8 wmm_enabled; u8 wmm_qosinfo; struct mwifiex_wmm_desc wmm; + struct list_head sta_list; + /* spin lock for associated station list */ + spinlock_t sta_list_spinlock; struct list_head tx_ba_stream_tbl_ptr; /* spin lock for tx_ba_stream_tbl_ptr queue */ spinlock_t tx_ba_stream_tbl_lock; @@ -486,6 +493,7 @@ struct mwifiex_private { u16 assocresp_idx; u16 rsn_idx; struct timer_list scan_delay_timer; + u8 ap_11n_enabled; }; enum mwifiex_ba_status { @@ -550,6 +558,19 @@ struct mwifiex_bss_priv { u64 fw_tsf; }; +/* This is AP specific structure which stores information + * about associated STA + */ +struct mwifiex_sta_node { + struct list_head list; + u8 mac_addr[ETH_ALEN]; + u8 is_wmm_enabled; + u8 is_11n_enabled; + u8 ampdu_sta[MAX_NUM_TID]; + u16 rx_seq[MAX_NUM_TID]; + u16 max_amsdu; +}; + struct mwifiex_if_ops { int (*init_if) (struct mwifiex_adapter *); void (*cleanup_if) (struct mwifiex_adapter *); @@ -690,6 +711,9 @@ struct mwifiex_adapter { u8 country_code[IEEE80211_COUNTRY_STRING_LEN]; u16 max_mgmt_ie_index; u8 scan_delay_cnt; + u8 empty_tx_q_cnt; + atomic_t is_tx_received; + atomic_t pending_bridged_pkts; }; int mwifiex_init_lock_list(struct mwifiex_adapter *adapter); @@ -780,7 +804,15 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *, u16 cmdresp_no, struct host_cmd_ds_command *resp); int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *, struct sk_buff *skb); +int mwifiex_process_uap_rx_packet(struct mwifiex_adapter *adapter, + struct sk_buff *skb); +int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv, + struct sk_buff *skb); int mwifiex_process_sta_event(struct mwifiex_private *); +int mwifiex_process_uap_event(struct mwifiex_private *); +struct mwifiex_sta_node * +mwifiex_get_sta_entry(struct mwifiex_private *priv, u8 *mac); +void mwifiex_delete_all_station_list(struct mwifiex_private *priv); void *mwifiex_process_sta_txpd(struct mwifiex_private *, struct sk_buff *skb); int mwifiex_sta_init_cmd(struct mwifiex_private *, u8 first_sta); int mwifiex_cmd_802_11_scan(struct host_cmd_ds_command *cmd, @@ -949,9 +981,9 @@ int mwifiex_scan_networks(struct mwifiex_private *priv, const struct mwifiex_user_scan_cfg *user_scan_in); int mwifiex_set_radio(struct mwifiex_private *priv, u8 option); -int mwifiex_set_encode(struct mwifiex_private *priv, const u8 *key, - int key_len, u8 key_index, const u8 *mac_addr, - int disable); +int mwifiex_set_encode(struct mwifiex_private *priv, struct key_params *kp, + const u8 *key, int key_len, u8 key_index, + const u8 *mac_addr, int disable); int mwifiex_set_gen_ie(struct mwifiex_private *priv, u8 *ie, int ie_len); diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c index 04dc7ca4ac22..215d07e6c462 100644 --- a/drivers/net/wireless/mwifiex/scan.c +++ b/drivers/net/wireless/mwifiex/scan.c @@ -989,6 +989,8 @@ mwifiex_config_scan(struct mwifiex_private *priv, *max_chan_per_scan = 2; else if (chan_num < MWIFIEX_LIMIT_3_CHANNELS_PER_SCAN_CMD) *max_chan_per_scan = 3; + else + *max_chan_per_scan = 4; } } @@ -1433,9 +1435,9 @@ int mwifiex_check_network_compatibility(struct mwifiex_private *priv, if (ret) dev_err(priv->adapter->dev, "cannot find ssid " "%s\n", bss_desc->ssid.ssid); - break; + break; default: - ret = 0; + ret = 0; } } diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c index df3a33c530cf..0cc3406050dc 100644 --- a/drivers/net/wireless/mwifiex/sta_cmd.c +++ b/drivers/net/wireless/mwifiex/sta_cmd.c @@ -610,7 +610,7 @@ mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv, memcpy(&key_material->key_param_set.key[2], enc_key->key_material, enc_key->key_len); memcpy(&key_material->key_param_set.key[2 + enc_key->key_len], - enc_key->wapi_rxpn, WAPI_RXPN_LEN); + enc_key->pn, PN_LEN); key_material->key_param_set.length = cpu_to_le16(WAPI_KEY_LEN + KEYPARAMSET_FIXED_LEN); @@ -621,23 +621,38 @@ mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv, return ret; } if (enc_key->key_len == WLAN_KEY_LEN_CCMP) { - dev_dbg(priv->adapter->dev, "cmd: WPA_AES\n"); - key_material->key_param_set.key_type_id = + if (enc_key->is_igtk_key) { + dev_dbg(priv->adapter->dev, "cmd: CMAC_AES\n"); + key_material->key_param_set.key_type_id = + cpu_to_le16(KEY_TYPE_ID_AES_CMAC); + if (cmd_oid == KEY_INFO_ENABLED) + key_material->key_param_set.key_info = + cpu_to_le16(KEY_ENABLED); + else + key_material->key_param_set.key_info = + cpu_to_le16(!KEY_ENABLED); + + key_material->key_param_set.key_info |= + cpu_to_le16(KEY_IGTK); + } else { + dev_dbg(priv->adapter->dev, "cmd: WPA_AES\n"); + key_material->key_param_set.key_type_id = cpu_to_le16(KEY_TYPE_ID_AES); - if (cmd_oid == KEY_INFO_ENABLED) - key_material->key_param_set.key_info = + if (cmd_oid == KEY_INFO_ENABLED) + key_material->key_param_set.key_info = cpu_to_le16(KEY_ENABLED); - else - key_material->key_param_set.key_info = + else + key_material->key_param_set.key_info = cpu_to_le16(!KEY_ENABLED); - if (enc_key->key_index & MWIFIEX_KEY_INDEX_UNICAST) + if (enc_key->key_index & MWIFIEX_KEY_INDEX_UNICAST) /* AES pairwise key: unicast */ - key_material->key_param_set.key_info |= + key_material->key_param_set.key_info |= cpu_to_le16(KEY_UNICAST); - else /* AES group key: multicast */ - key_material->key_param_set.key_info |= + else /* AES group key: multicast */ + key_material->key_param_set.key_info |= cpu_to_le16(KEY_MCAST); + } } else if (enc_key->key_len == WLAN_KEY_LEN_TKIP) { dev_dbg(priv->adapter->dev, "cmd: WPA_TKIP\n"); key_material->key_param_set.key_type_id = @@ -668,6 +683,24 @@ mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv, key_param_len = (u16)(enc_key->key_len + KEYPARAMSET_FIXED_LEN) + sizeof(struct mwifiex_ie_types_header); + if (le16_to_cpu(key_material->key_param_set.key_type_id) == + KEY_TYPE_ID_AES_CMAC) { + struct mwifiex_cmac_param *param = + (void *)key_material->key_param_set.key; + + memcpy(param->ipn, enc_key->pn, IGTK_PN_LEN); + memcpy(param->key, enc_key->key_material, + WLAN_KEY_LEN_AES_CMAC); + + key_param_len = sizeof(struct mwifiex_cmac_param); + key_material->key_param_set.key_len = + cpu_to_le16(key_param_len); + key_param_len += KEYPARAMSET_FIXED_LEN; + key_material->key_param_set.length = + cpu_to_le16(key_param_len); + key_param_len += sizeof(struct mwifiex_ie_types_header); + } + cmd->size = cpu_to_le16(sizeof(key_material->action) + S_DS_GEN + key_param_len); diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c index b8614a825460..dff51d55271c 100644 --- a/drivers/net/wireless/mwifiex/sta_event.c +++ b/drivers/net/wireless/mwifiex/sta_event.c @@ -184,10 +184,9 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv) int mwifiex_process_sta_event(struct mwifiex_private *priv) { struct mwifiex_adapter *adapter = priv->adapter; - int len, ret = 0; + int ret = 0; u32 eventcause = adapter->event_cause; - struct station_info sinfo; - struct mwifiex_assoc_event *event; + u16 ctrl; switch (eventcause) { case EVENT_DUMMY_HOST_WAKEUP_SIGNAL: @@ -279,10 +278,16 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) case EVENT_MIC_ERR_UNICAST: dev_dbg(adapter->dev, "event: UNICAST MIC ERROR\n"); + cfg80211_michael_mic_failure(priv->netdev, priv->cfg_bssid, + NL80211_KEYTYPE_PAIRWISE, + -1, NULL, GFP_KERNEL); break; case EVENT_MIC_ERR_MULTICAST: dev_dbg(adapter->dev, "event: MULTICAST MIC ERROR\n"); + cfg80211_michael_mic_failure(priv->netdev, priv->cfg_bssid, + NL80211_KEYTYPE_GROUP, + -1, NULL, GFP_KERNEL); break; case EVENT_MIB_CHANGED: case EVENT_INIT_DONE: @@ -384,11 +389,11 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) adapter->event_body); break; case EVENT_AMSDU_AGGR_CTRL: - dev_dbg(adapter->dev, "event: AMSDU_AGGR_CTRL %d\n", - *(u16 *) adapter->event_body); + ctrl = le16_to_cpu(*(__le16 *)adapter->event_body); + dev_dbg(adapter->dev, "event: AMSDU_AGGR_CTRL %d\n", ctrl); + adapter->tx_buf_size = - min(adapter->curr_tx_buf_size, - le16_to_cpu(*(__le16 *) adapter->event_body)); + min_t(u16, adapter->curr_tx_buf_size, ctrl); dev_dbg(adapter->dev, "event: tx_buf_size %d\n", adapter->tx_buf_size); break; @@ -405,51 +410,6 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) dev_dbg(adapter->dev, "event: HOSTWAKE_STAIE %d\n", eventcause); break; - case EVENT_UAP_STA_ASSOC: - memset(&sinfo, 0, sizeof(sinfo)); - event = (struct mwifiex_assoc_event *) - (adapter->event_body + MWIFIEX_UAP_EVENT_EXTRA_HEADER); - if (le16_to_cpu(event->type) == TLV_TYPE_UAP_MGMT_FRAME) { - len = -1; - - if (ieee80211_is_assoc_req(event->frame_control)) - len = 0; - else if (ieee80211_is_reassoc_req(event->frame_control)) - /* There will be ETH_ALEN bytes of - * current_ap_addr before the re-assoc ies. - */ - len = ETH_ALEN; - - if (len != -1) { - sinfo.filled = STATION_INFO_ASSOC_REQ_IES; - sinfo.assoc_req_ies = &event->data[len]; - len = (u8 *)sinfo.assoc_req_ies - - (u8 *)&event->frame_control; - sinfo.assoc_req_ies_len = - le16_to_cpu(event->len) - (u16)len; - } - } - cfg80211_new_sta(priv->netdev, event->sta_addr, &sinfo, - GFP_KERNEL); - break; - case EVENT_UAP_STA_DEAUTH: - cfg80211_del_sta(priv->netdev, adapter->event_body + - MWIFIEX_UAP_EVENT_EXTRA_HEADER, GFP_KERNEL); - break; - case EVENT_UAP_BSS_IDLE: - priv->media_connected = false; - break; - case EVENT_UAP_BSS_ACTIVE: - priv->media_connected = true; - break; - case EVENT_UAP_BSS_START: - dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause); - memcpy(priv->netdev->dev_addr, adapter->event_body+2, ETH_ALEN); - break; - case EVENT_UAP_MIC_COUNTERMEASURES: - /* For future development */ - dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause); - break; default: dev_dbg(adapter->dev, "event: unknown event id: %#x\n", eventcause); diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c index fb2136089a22..3f025976f79a 100644 --- a/drivers/net/wireless/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/mwifiex/sta_ioctl.c @@ -942,20 +942,26 @@ mwifiex_drv_get_driver_version(struct mwifiex_adapter *adapter, char *version, * This function allocates the IOCTL request buffer, fills it * with requisite parameters and calls the IOCTL handler. */ -int mwifiex_set_encode(struct mwifiex_private *priv, const u8 *key, - int key_len, u8 key_index, - const u8 *mac_addr, int disable) +int mwifiex_set_encode(struct mwifiex_private *priv, struct key_params *kp, + const u8 *key, int key_len, u8 key_index, + const u8 *mac_addr, int disable) { struct mwifiex_ds_encrypt_key encrypt_key; memset(&encrypt_key, 0, sizeof(struct mwifiex_ds_encrypt_key)); encrypt_key.key_len = key_len; + + if (kp && kp->cipher == WLAN_CIPHER_SUITE_AES_CMAC) + encrypt_key.is_igtk_key = true; + if (!disable) { encrypt_key.key_index = key_index; if (key_len) memcpy(encrypt_key.key_material, key, key_len); if (mac_addr) memcpy(encrypt_key.mac_addr, mac_addr, ETH_ALEN); + if (kp && kp->seq && kp->seq_len) + memcpy(encrypt_key.pn, kp->seq, kp->seq_len); } else { encrypt_key.key_disable = true; if (mac_addr) diff --git a/drivers/net/wireless/mwifiex/sta_rx.c b/drivers/net/wireless/mwifiex/sta_rx.c index 02ce3b77d3e7..d91d5c08c73a 100644 --- a/drivers/net/wireless/mwifiex/sta_rx.c +++ b/drivers/net/wireless/mwifiex/sta_rx.c @@ -54,8 +54,8 @@ int mwifiex_process_rx_packet(struct mwifiex_adapter *adapter, local_rx_pd = (struct rxpd *) (skb->data); - rx_pkt_hdr = (struct rx_packet_hdr *) ((u8 *) local_rx_pd + - local_rx_pd->rx_pkt_offset); + rx_pkt_hdr = (void *)local_rx_pd + + le16_to_cpu(local_rx_pd->rx_pkt_offset); if (!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_eth_hdr, sizeof(rfc1042_eth_hdr))) { @@ -125,7 +125,7 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter, struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb); struct rx_packet_hdr *rx_pkt_hdr; u8 ta[ETH_ALEN]; - u16 rx_pkt_type; + u16 rx_pkt_type, rx_pkt_offset, rx_pkt_length, seq_num; struct mwifiex_private *priv = mwifiex_get_priv_by_id(adapter, rx_info->bss_num, rx_info->bss_type); @@ -134,16 +134,17 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter, return -1; local_rx_pd = (struct rxpd *) (skb->data); - rx_pkt_type = local_rx_pd->rx_pkt_type; + rx_pkt_type = le16_to_cpu(local_rx_pd->rx_pkt_type); + rx_pkt_offset = le16_to_cpu(local_rx_pd->rx_pkt_offset); + rx_pkt_length = le16_to_cpu(local_rx_pd->rx_pkt_length); + seq_num = le16_to_cpu(local_rx_pd->seq_num); - rx_pkt_hdr = (struct rx_packet_hdr *) ((u8 *) local_rx_pd + - local_rx_pd->rx_pkt_offset); + rx_pkt_hdr = (void *)local_rx_pd + rx_pkt_offset; - if ((local_rx_pd->rx_pkt_offset + local_rx_pd->rx_pkt_length) > - (u16) skb->len) { - dev_err(adapter->dev, "wrong rx packet: len=%d," - " rx_pkt_offset=%d, rx_pkt_length=%d\n", skb->len, - local_rx_pd->rx_pkt_offset, local_rx_pd->rx_pkt_length); + if ((rx_pkt_offset + rx_pkt_length) > (u16) skb->len) { + dev_err(adapter->dev, + "wrong rx packet: len=%d, rx_pkt_offset=%d, rx_pkt_length=%d\n", + skb->len, rx_pkt_offset, rx_pkt_length); priv->stats.rx_dropped++; if (adapter->if_ops.data_complete) @@ -154,14 +155,14 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter, return ret; } - if (local_rx_pd->rx_pkt_type == PKT_TYPE_AMSDU) { + if (rx_pkt_type == PKT_TYPE_AMSDU) { struct sk_buff_head list; struct sk_buff *rx_skb; __skb_queue_head_init(&list); - skb_pull(skb, local_rx_pd->rx_pkt_offset); - skb_trim(skb, local_rx_pd->rx_pkt_length); + skb_pull(skb, rx_pkt_offset); + skb_trim(skb, rx_pkt_length); ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr, priv->wdev->iftype, 0, false); @@ -189,17 +190,14 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter, memcpy(ta, rx_pkt_hdr->eth803_hdr.h_source, ETH_ALEN); } else { if (rx_pkt_type != PKT_TYPE_BAR) - priv->rx_seq[local_rx_pd->priority] = - local_rx_pd->seq_num; + priv->rx_seq[local_rx_pd->priority] = seq_num; memcpy(ta, priv->curr_bss_params.bss_descriptor.mac_address, ETH_ALEN); } /* Reorder and send to OS */ - ret = mwifiex_11n_rx_reorder_pkt(priv, local_rx_pd->seq_num, - local_rx_pd->priority, ta, - (u8) local_rx_pd->rx_pkt_type, - skb); + ret = mwifiex_11n_rx_reorder_pkt(priv, seq_num, local_rx_pd->priority, + ta, (u8) rx_pkt_type, skb); if (ret || (rx_pkt_type == PKT_TYPE_BAR)) { if (adapter->if_ops.data_complete) diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c index cecb27283196..985073d0df1a 100644 --- a/drivers/net/wireless/mwifiex/txrx.c +++ b/drivers/net/wireless/mwifiex/txrx.c @@ -51,6 +51,9 @@ int mwifiex_handle_rx_packet(struct mwifiex_adapter *adapter, rx_info->bss_num = priv->bss_num; rx_info->bss_type = priv->bss_type; + if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP) + return mwifiex_process_uap_rx_packet(adapter, skb); + return mwifiex_process_sta_rx_packet(adapter, skb); } EXPORT_SYMBOL_GPL(mwifiex_handle_rx_packet); @@ -157,6 +160,8 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter, priv->stats.tx_errors++; } + if (tx_info->flags & MWIFIEX_BUF_FLAG_BRIDGED_PKT) + atomic_dec_return(&adapter->pending_bridged_pkts); if (atomic_dec_return(&adapter->tx_pending) >= LOW_TX_PENDING) goto done; diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c index f40e93fe894a..c10aac04be6a 100644 --- a/drivers/net/wireless/mwifiex/uap_cmd.c +++ b/drivers/net/wireless/mwifiex/uap_cmd.c @@ -167,6 +167,7 @@ mwifiex_set_ht_params(struct mwifiex_private *priv, if (ht_ie) { memcpy(&bss_cfg->ht_cap, ht_ie + 2, sizeof(struct ieee80211_ht_cap)); + priv->ap_11n_enabled = 1; } else { memset(&bss_cfg->ht_cap , 0, sizeof(struct ieee80211_ht_cap)); bss_cfg->ht_cap.cap_info = cpu_to_le16(MWIFIEX_DEF_HT_CAP); diff --git a/drivers/net/wireless/mwifiex/uap_event.c b/drivers/net/wireless/mwifiex/uap_event.c new file mode 100644 index 000000000000..a33fa394e349 --- /dev/null +++ b/drivers/net/wireless/mwifiex/uap_event.c @@ -0,0 +1,290 @@ +/* + * Marvell Wireless LAN device driver: AP event handling + * + * Copyright (C) 2012, Marvell International Ltd. + * + * This software file (the "File") is distributed by Marvell International + * Ltd. under the terms of the GNU General Public License Version 2, June 1991 + * (the "License"). You may use, redistribute and/or modify this File in + * accordance with the terms and conditions of the License, a copy of which + * is available by writing to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the + * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. + * + * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE + * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE + * ARE EXPRESSLY DISCLAIMED. The License provides additional details about + * this warranty disclaimer. + */ + +#include "decl.h" +#include "main.h" +#include "11n.h" + +/* + * This function will return the pointer to station entry in station list + * table which matches specified mac address. + * This function should be called after acquiring RA list spinlock. + * NULL is returned if station entry is not found in associated STA list. + */ +struct mwifiex_sta_node * +mwifiex_get_sta_entry(struct mwifiex_private *priv, u8 *mac) +{ + struct mwifiex_sta_node *node; + + if (!mac) + return NULL; + + list_for_each_entry(node, &priv->sta_list, list) { + if (!memcmp(node->mac_addr, mac, ETH_ALEN)) + return node; + } + + return NULL; +} + +/* + * This function will add a sta_node entry to associated station list + * table with the given mac address. + * If entry exist already, existing entry is returned. + * If received mac address is NULL, NULL is returned. + */ +static struct mwifiex_sta_node * +mwifiex_add_sta_entry(struct mwifiex_private *priv, u8 *mac) +{ + struct mwifiex_sta_node *node; + unsigned long flags; + + if (!mac) + return NULL; + + spin_lock_irqsave(&priv->sta_list_spinlock, flags); + node = mwifiex_get_sta_entry(priv, mac); + if (node) + goto done; + + node = kzalloc(sizeof(struct mwifiex_sta_node), GFP_ATOMIC); + if (!node) + goto done; + + memcpy(node->mac_addr, mac, ETH_ALEN); + list_add_tail(&node->list, &priv->sta_list); + +done: + spin_unlock_irqrestore(&priv->sta_list_spinlock, flags); + return node; +} + +/* + * This function will search for HT IE in association request IEs + * and set station HT parameters accordingly. + */ +static void +mwifiex_set_sta_ht_cap(struct mwifiex_private *priv, const u8 *ies, + int ies_len, struct mwifiex_sta_node *node) +{ + const struct ieee80211_ht_cap *ht_cap; + + if (!ies) + return; + + ht_cap = (void *)cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies, ies_len); + if (ht_cap) { + node->is_11n_enabled = 1; + node->max_amsdu = le16_to_cpu(ht_cap->cap_info) & + IEEE80211_HT_CAP_MAX_AMSDU ? + MWIFIEX_TX_DATA_BUF_SIZE_8K : + MWIFIEX_TX_DATA_BUF_SIZE_4K; + } else { + node->is_11n_enabled = 0; + } + + return; +} + +/* + * This function will delete a station entry from station list + */ +static void mwifiex_del_sta_entry(struct mwifiex_private *priv, u8 *mac) +{ + struct mwifiex_sta_node *node, *tmp; + unsigned long flags; + + spin_lock_irqsave(&priv->sta_list_spinlock, flags); + + node = mwifiex_get_sta_entry(priv, mac); + if (node) { + list_for_each_entry_safe(node, tmp, &priv->sta_list, + list) { + list_del(&node->list); + kfree(node); + } + } + + spin_unlock_irqrestore(&priv->sta_list_spinlock, flags); + return; +} + +/* + * This function will delete all stations from associated station list. + */ +static void mwifiex_del_all_sta_list(struct mwifiex_private *priv) +{ + struct mwifiex_sta_node *node, *tmp; + unsigned long flags; + + spin_lock_irqsave(&priv->sta_list_spinlock, flags); + + list_for_each_entry_safe(node, tmp, &priv->sta_list, list) { + list_del(&node->list); + kfree(node); + } + + INIT_LIST_HEAD(&priv->sta_list); + spin_unlock_irqrestore(&priv->sta_list_spinlock, flags); + return; +} + +/* + * This function handles AP interface specific events generated by firmware. + * + * Event specific routines are called by this function based + * upon the generated event cause. + * + * + * Events supported for AP - + * - EVENT_UAP_STA_ASSOC + * - EVENT_UAP_STA_DEAUTH + * - EVENT_UAP_BSS_ACTIVE + * - EVENT_UAP_BSS_START + * - EVENT_UAP_BSS_IDLE + * - EVENT_UAP_MIC_COUNTERMEASURES: + */ +int mwifiex_process_uap_event(struct mwifiex_private *priv) +{ + struct mwifiex_adapter *adapter = priv->adapter; + int len, i; + u32 eventcause = adapter->event_cause; + struct station_info sinfo; + struct mwifiex_assoc_event *event; + struct mwifiex_sta_node *node; + u8 *deauth_mac; + struct host_cmd_ds_11n_batimeout *ba_timeout; + u16 ctrl; + + switch (eventcause) { + case EVENT_UAP_STA_ASSOC: + memset(&sinfo, 0, sizeof(sinfo)); + event = (struct mwifiex_assoc_event *) + (adapter->event_body + MWIFIEX_UAP_EVENT_EXTRA_HEADER); + if (le16_to_cpu(event->type) == TLV_TYPE_UAP_MGMT_FRAME) { + len = -1; + + if (ieee80211_is_assoc_req(event->frame_control)) + len = 0; + else if (ieee80211_is_reassoc_req(event->frame_control)) + /* There will be ETH_ALEN bytes of + * current_ap_addr before the re-assoc ies. + */ + len = ETH_ALEN; + + if (len != -1) { + sinfo.filled = STATION_INFO_ASSOC_REQ_IES; + sinfo.assoc_req_ies = &event->data[len]; + len = (u8 *)sinfo.assoc_req_ies - + (u8 *)&event->frame_control; + sinfo.assoc_req_ies_len = + le16_to_cpu(event->len) - (u16)len; + } + } + cfg80211_new_sta(priv->netdev, event->sta_addr, &sinfo, + GFP_KERNEL); + + node = mwifiex_add_sta_entry(priv, event->sta_addr); + if (!node) { + dev_warn(adapter->dev, + "could not create station entry!\n"); + return -1; + } + + if (!priv->ap_11n_enabled) + break; + + mwifiex_set_sta_ht_cap(priv, sinfo.assoc_req_ies, + sinfo.assoc_req_ies_len, node); + + for (i = 0; i < MAX_NUM_TID; i++) { + if (node->is_11n_enabled) + node->ampdu_sta[i] = + priv->aggr_prio_tbl[i].ampdu_user; + else + node->ampdu_sta[i] = BA_STREAM_NOT_ALLOWED; + } + memset(node->rx_seq, 0xff, sizeof(node->rx_seq)); + break; + case EVENT_UAP_STA_DEAUTH: + deauth_mac = adapter->event_body + + MWIFIEX_UAP_EVENT_EXTRA_HEADER; + cfg80211_del_sta(priv->netdev, deauth_mac, GFP_KERNEL); + + if (priv->ap_11n_enabled) { + mwifiex_11n_del_rx_reorder_tbl_by_ta(priv, deauth_mac); + mwifiex_del_tx_ba_stream_tbl_by_ra(priv, deauth_mac); + } + mwifiex_del_sta_entry(priv, deauth_mac); + break; + case EVENT_UAP_BSS_IDLE: + priv->media_connected = false; + mwifiex_clean_txrx(priv); + mwifiex_del_all_sta_list(priv); + break; + case EVENT_UAP_BSS_ACTIVE: + priv->media_connected = true; + break; + case EVENT_UAP_BSS_START: + dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause); + memcpy(priv->netdev->dev_addr, adapter->event_body + 2, + ETH_ALEN); + break; + case EVENT_UAP_MIC_COUNTERMEASURES: + /* For future development */ + dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause); + break; + case EVENT_AMSDU_AGGR_CTRL: + ctrl = le16_to_cpu(*(__le16 *)adapter->event_body); + dev_dbg(adapter->dev, "event: AMSDU_AGGR_CTRL %d\n", ctrl); + + if (priv->media_connected) { + adapter->tx_buf_size = + min_t(u16, adapter->curr_tx_buf_size, ctrl); + dev_dbg(adapter->dev, "event: tx_buf_size %d\n", + adapter->tx_buf_size); + } + break; + case EVENT_ADDBA: + dev_dbg(adapter->dev, "event: ADDBA Request\n"); + if (priv->media_connected) + mwifiex_send_cmd_async(priv, HostCmd_CMD_11N_ADDBA_RSP, + HostCmd_ACT_GEN_SET, 0, + adapter->event_body); + break; + case EVENT_DELBA: + dev_dbg(adapter->dev, "event: DELBA Request\n"); + if (priv->media_connected) + mwifiex_11n_delete_ba_stream(priv, adapter->event_body); + break; + case EVENT_BA_STREAM_TIEMOUT: + dev_dbg(adapter->dev, "event: BA Stream timeout\n"); + if (priv->media_connected) { + ba_timeout = (void *)adapter->event_body; + mwifiex_11n_ba_stream_timeout(priv, ba_timeout); + } + break; + default: + dev_dbg(adapter->dev, "event: unknown event id: %#x\n", + eventcause); + break; + } + + return 0; +} diff --git a/drivers/net/wireless/mwifiex/uap_txrx.c b/drivers/net/wireless/mwifiex/uap_txrx.c new file mode 100644 index 000000000000..6d814f0f07f2 --- /dev/null +++ b/drivers/net/wireless/mwifiex/uap_txrx.c @@ -0,0 +1,255 @@ +/* + * Marvell Wireless LAN device driver: AP TX and RX data handling + * + * Copyright (C) 2012, Marvell International Ltd. + * + * This software file (the "File") is distributed by Marvell International + * Ltd. under the terms of the GNU General Public License Version 2, June 1991 + * (the "License"). You may use, redistribute and/or modify this File in + * accordance with the terms and conditions of the License, a copy of which + * is available by writing to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the + * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. + * + * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE + * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE + * ARE EXPRESSLY DISCLAIMED. The License provides additional details about + * this warranty disclaimer. + */ + +#include "decl.h" +#include "ioctl.h" +#include "main.h" +#include "wmm.h" +#include "11n_aggr.h" +#include "11n_rxreorder.h" + +static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv, + struct sk_buff *skb) +{ + struct mwifiex_adapter *adapter = priv->adapter; + struct uap_rxpd *uap_rx_pd; + struct rx_packet_hdr *rx_pkt_hdr; + struct sk_buff *new_skb; + struct mwifiex_txinfo *tx_info; + int hdr_chop; + struct timeval tv; + u8 rfc1042_eth_hdr[ETH_ALEN] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 }; + + uap_rx_pd = (struct uap_rxpd *)(skb->data); + rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset); + + if ((atomic_read(&adapter->pending_bridged_pkts) >= + MWIFIEX_BRIDGED_PKTS_THRESHOLD)) { + dev_err(priv->adapter->dev, + "Tx: Bridge packet limit reached. Drop packet!\n"); + kfree_skb(skb); + return; + } + + if (!memcmp(&rx_pkt_hdr->rfc1042_hdr, + rfc1042_eth_hdr, sizeof(rfc1042_eth_hdr))) + /* Chop off the rxpd + the excess memory from + * 802.2/llc/snap header that was removed. + */ + hdr_chop = (u8 *)eth_hdr - (u8 *)uap_rx_pd; + else + /* Chop off the rxpd */ + hdr_chop = (u8 *)&rx_pkt_hdr->eth803_hdr - (u8 *)uap_rx_pd; + + /* Chop off the leading header bytes so the it points + * to the start of either the reconstructed EthII frame + * or the 802.2/llc/snap frame. + */ + skb_pull(skb, hdr_chop); + + if (skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN) { + dev_dbg(priv->adapter->dev, + "data: Tx: insufficient skb headroom %d\n", + skb_headroom(skb)); + /* Insufficient skb headroom - allocate a new skb */ + new_skb = + skb_realloc_headroom(skb, MWIFIEX_MIN_DATA_HEADER_LEN); + if (unlikely(!new_skb)) { + dev_err(priv->adapter->dev, + "Tx: cannot allocate new_skb\n"); + kfree_skb(skb); + priv->stats.tx_dropped++; + return; + } + + kfree_skb(skb); + skb = new_skb; + dev_dbg(priv->adapter->dev, "info: new skb headroom %d\n", + skb_headroom(skb)); + } + + tx_info = MWIFIEX_SKB_TXCB(skb); + tx_info->bss_num = priv->bss_num; + tx_info->bss_type = priv->bss_type; + tx_info->flags |= MWIFIEX_BUF_FLAG_BRIDGED_PKT; + + do_gettimeofday(&tv); + skb->tstamp = timeval_to_ktime(tv); + mwifiex_wmm_add_buf_txqueue(priv, skb); + atomic_inc(&adapter->tx_pending); + atomic_inc(&adapter->pending_bridged_pkts); + + if ((atomic_read(&adapter->tx_pending) >= MAX_TX_PENDING)) { + mwifiex_set_trans_start(priv->netdev); + mwifiex_stop_net_dev_queue(priv->netdev, priv->adapter); + } + return; +} + +/* + * This function contains logic for AP packet forwarding. + * + * If a packet is multicast/broadcast, it is sent to kernel/upper layer + * as well as queued back to AP TX queue so that it can be sent to other + * associated stations. + * If a packet is unicast and RA is present in associated station list, + * it is again requeued into AP TX queue. + * If a packet is unicast and RA is not in associated station list, + * packet is forwarded to kernel to handle routing logic. + */ +int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv, + struct sk_buff *skb) +{ + struct mwifiex_adapter *adapter = priv->adapter; + struct uap_rxpd *uap_rx_pd; + struct rx_packet_hdr *rx_pkt_hdr; + u8 ra[ETH_ALEN]; + struct sk_buff *skb_uap; + + uap_rx_pd = (struct uap_rxpd *)(skb->data); + rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset); + + /* don't do packet forwarding in disconnected state */ + if (!priv->media_connected) { + dev_err(adapter->dev, "drop packet in disconnected state.\n"); + dev_kfree_skb_any(skb); + return 0; + } + + memcpy(ra, rx_pkt_hdr->eth803_hdr.h_dest, ETH_ALEN); + + if (is_multicast_ether_addr(ra)) { + skb_uap = skb_copy(skb, GFP_ATOMIC); + mwifiex_uap_queue_bridged_pkt(priv, skb_uap); + } else { + if (mwifiex_get_sta_entry(priv, ra)) { + /* Requeue Intra-BSS packet */ + mwifiex_uap_queue_bridged_pkt(priv, skb); + return 0; + } + } + + /* Forward unicat/Inter-BSS packets to kernel. */ + return mwifiex_process_rx_packet(adapter, skb); +} + +/* + * This function processes the packet received on AP interface. + * + * The function looks into the RxPD and performs sanity tests on the + * received buffer to ensure its a valid packet before processing it + * further. If the packet is determined to be aggregated, it is + * de-aggregated accordingly. Then skb is passed to AP packet forwarding logic. + * + * The completion callback is called after processing is complete. + */ +int mwifiex_process_uap_rx_packet(struct mwifiex_adapter *adapter, + struct sk_buff *skb) +{ + int ret; + struct uap_rxpd *uap_rx_pd; + struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb); + struct rx_packet_hdr *rx_pkt_hdr; + u16 rx_pkt_type; + u8 ta[ETH_ALEN], pkt_type; + struct mwifiex_sta_node *node; + + struct mwifiex_private *priv = + mwifiex_get_priv_by_id(adapter, rx_info->bss_num, + rx_info->bss_type); + + if (!priv) + return -1; + + uap_rx_pd = (struct uap_rxpd *)(skb->data); + rx_pkt_type = le16_to_cpu(uap_rx_pd->rx_pkt_type); + rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset); + + if ((le16_to_cpu(uap_rx_pd->rx_pkt_offset) + + le16_to_cpu(uap_rx_pd->rx_pkt_length)) > (u16) skb->len) { + dev_err(adapter->dev, + "wrong rx packet: len=%d, offset=%d, length=%d\n", + skb->len, le16_to_cpu(uap_rx_pd->rx_pkt_offset), + le16_to_cpu(uap_rx_pd->rx_pkt_length)); + priv->stats.rx_dropped++; + + if (adapter->if_ops.data_complete) + adapter->if_ops.data_complete(adapter, skb); + else + dev_kfree_skb_any(skb); + + return 0; + } + + if (le16_to_cpu(uap_rx_pd->rx_pkt_type) == PKT_TYPE_AMSDU) { + struct sk_buff_head list; + struct sk_buff *rx_skb; + + __skb_queue_head_init(&list); + skb_pull(skb, le16_to_cpu(uap_rx_pd->rx_pkt_offset)); + skb_trim(skb, le16_to_cpu(uap_rx_pd->rx_pkt_length)); + + ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr, + priv->wdev->iftype, 0, false); + + while (!skb_queue_empty(&list)) { + rx_skb = __skb_dequeue(&list); + ret = mwifiex_recv_packet(adapter, rx_skb); + if (ret) + dev_err(adapter->dev, + "AP:Rx A-MSDU failed"); + } + + return 0; + } + + memcpy(ta, rx_pkt_hdr->eth803_hdr.h_source, ETH_ALEN); + + if (rx_pkt_type != PKT_TYPE_BAR && uap_rx_pd->priority < MAX_NUM_TID) { + node = mwifiex_get_sta_entry(priv, ta); + if (node) + node->rx_seq[uap_rx_pd->priority] = + le16_to_cpu(uap_rx_pd->seq_num); + } + + if (!priv->ap_11n_enabled || + (!mwifiex_11n_get_rx_reorder_tbl(priv, uap_rx_pd->priority, ta) && + (le16_to_cpu(uap_rx_pd->rx_pkt_type) != PKT_TYPE_AMSDU))) { + ret = mwifiex_handle_uap_rx_forward(priv, skb); + return ret; + } + + /* Reorder and send to kernel */ + pkt_type = (u8)le16_to_cpu(uap_rx_pd->rx_pkt_type); + ret = mwifiex_11n_rx_reorder_pkt(priv, le16_to_cpu(uap_rx_pd->seq_num), + uap_rx_pd->priority, ta, pkt_type, + skb); + + if (ret || (rx_pkt_type == PKT_TYPE_BAR)) { + if (adapter->if_ops.data_complete) + adapter->if_ops.data_complete(adapter, skb); + else + dev_kfree_skb_any(skb); + } + + if (ret) + priv->stats.rx_dropped++; + + return ret; +} diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c index 3fa4d4176993..8ccd6999fa9f 100644 --- a/drivers/net/wireless/mwifiex/wmm.c +++ b/drivers/net/wireless/mwifiex/wmm.c @@ -127,6 +127,29 @@ mwifiex_wmm_allocate_ralist_node(struct mwifiex_adapter *adapter, u8 *ra) return ra_list; } +/* This function returns random no between 16 and 32 to be used as threshold + * for no of packets after which BA setup is initiated. + */ +static u8 mwifiex_get_random_ba_threshold(void) +{ + u32 sec, usec; + struct timeval ba_tstamp; + u8 ba_threshold; + + /* setup ba_packet_threshold here random number between + * [BA_SETUP_PACKET_OFFSET, + * BA_SETUP_PACKET_OFFSET+BA_SETUP_MAX_PACKET_THRESHOLD-1] + */ + + do_gettimeofday(&ba_tstamp); + sec = (ba_tstamp.tv_sec & 0xFFFF) + (ba_tstamp.tv_sec >> 16); + usec = (ba_tstamp.tv_usec & 0xFFFF) + (ba_tstamp.tv_usec >> 16); + ba_threshold = (((sec << 16) + usec) % BA_SETUP_MAX_PACKET_THRESHOLD) + + BA_SETUP_PACKET_OFFSET; + + return ba_threshold; +} + /* * This function allocates and adds a RA list for all TIDs * with the given RA. @@ -137,6 +160,12 @@ mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra) int i; struct mwifiex_ra_list_tbl *ra_list; struct mwifiex_adapter *adapter = priv->adapter; + struct mwifiex_sta_node *node; + unsigned long flags; + + spin_lock_irqsave(&priv->sta_list_spinlock, flags); + node = mwifiex_get_sta_entry(priv, ra); + spin_unlock_irqrestore(&priv->sta_list_spinlock, flags); for (i = 0; i < MAX_NUM_TID; ++i) { ra_list = mwifiex_wmm_allocate_ralist_node(adapter, ra); @@ -145,14 +174,24 @@ mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra) if (!ra_list) break; - if (!mwifiex_queuing_ra_based(priv)) + ra_list->is_11n_enabled = 0; + if (!mwifiex_queuing_ra_based(priv)) { ra_list->is_11n_enabled = IS_11N_ENABLED(priv); - else - ra_list->is_11n_enabled = false; + } else { + ra_list->is_11n_enabled = + mwifiex_is_sta_11n_enabled(priv, node); + if (ra_list->is_11n_enabled) + ra_list->max_amsdu = node->max_amsdu; + } dev_dbg(adapter->dev, "data: ralist %p: is_11n_enabled=%d\n", ra_list, ra_list->is_11n_enabled); + if (ra_list->is_11n_enabled) { + ra_list->pkt_count = 0; + ra_list->ba_packet_thr = + mwifiex_get_random_ba_threshold(); + } list_add_tail(&ra_list->list, &priv->wmm.tid_tbl_ptr[i].ra_list); @@ -647,6 +686,7 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv, skb_queue_tail(&ra_list->skb_head, skb); ra_list->total_pkts_size += skb->len; + ra_list->pkt_count++; atomic_inc(&priv->wmm.tx_pkts_queued); @@ -986,10 +1026,17 @@ mwifiex_is_11n_aggragation_possible(struct mwifiex_private *priv, { int count = 0, total_size = 0; struct sk_buff *skb, *tmp; + int max_amsdu_size; + + if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP && priv->ap_11n_enabled && + ptr->is_11n_enabled) + max_amsdu_size = min_t(int, ptr->max_amsdu, max_buf_size); + else + max_amsdu_size = max_buf_size; skb_queue_walk_safe(&ptr->skb_head, skb, tmp) { total_size += skb->len; - if (total_size >= max_buf_size) + if (total_size >= max_amsdu_size) break; if (++count >= MIN_NUM_AMSDU) return true; @@ -1050,6 +1097,7 @@ mwifiex_send_single_packet(struct mwifiex_private *priv, skb_queue_tail(&ptr->skb_head, skb); ptr->total_pkts_size += skb->len; + ptr->pkt_count++; tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT; spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags); @@ -1231,7 +1279,8 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter) /* ra_list_spinlock has been freed in mwifiex_send_single_packet() */ } else { - if (mwifiex_is_ampdu_allowed(priv, tid)) { + if (mwifiex_is_ampdu_allowed(priv, tid) && + ptr->pkt_count > ptr->ba_packet_thr) { if (mwifiex_space_avail_for_new_ba_stream(adapter)) { mwifiex_create_ba_tbl(priv, ptr->ra, tid, BA_SETUP_INPROGRESS); diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c index 224e03ade145..5099e5375cb3 100644 --- a/drivers/net/wireless/mwl8k.c +++ b/drivers/net/wireless/mwl8k.c @@ -1830,12 +1830,14 @@ static inline void mwl8k_tx_count_packet(struct ieee80211_sta *sta, u8 tid) } static void -mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb) +mwl8k_txq_xmit(struct ieee80211_hw *hw, + int index, + struct ieee80211_sta *sta, + struct sk_buff *skb) { struct mwl8k_priv *priv = hw->priv; struct ieee80211_tx_info *tx_info; struct mwl8k_vif *mwl8k_vif; - struct ieee80211_sta *sta; struct ieee80211_hdr *wh; struct mwl8k_tx_queue *txq; struct mwl8k_tx_desc *tx; @@ -1867,7 +1869,6 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb) wh = &((struct mwl8k_dma_data *)skb->data)->wh; tx_info = IEEE80211_SKB_CB(skb); - sta = tx_info->control.sta; mwl8k_vif = MWL8K_VIF(tx_info->control.vif); if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { @@ -2019,8 +2020,8 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb) tx->pkt_phys_addr = cpu_to_le32(dma); tx->pkt_len = cpu_to_le16(skb->len); tx->rate_info = 0; - if (!priv->ap_fw && tx_info->control.sta != NULL) - tx->peer_id = MWL8K_STA(tx_info->control.sta)->peer_id; + if (!priv->ap_fw && sta != NULL) + tx->peer_id = MWL8K_STA(sta)->peer_id; else tx->peer_id = 0; @@ -4364,7 +4365,9 @@ static void mwl8k_rx_poll(unsigned long data) /* * Core driver operations. */ -static void mwl8k_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +static void mwl8k_tx(struct ieee80211_hw *hw, + struct ieee80211_tx_control *control, + struct sk_buff *skb) { struct mwl8k_priv *priv = hw->priv; int index = skb_get_queue_mapping(skb); @@ -4376,7 +4379,7 @@ static void mwl8k_tx(struct ieee80211_hw *hw, struct sk_buff *skb) return; } - mwl8k_txq_xmit(hw, index, skb); + mwl8k_txq_xmit(hw, index, control->sta, skb); } static int mwl8k_start(struct ieee80211_hw *hw) diff --git a/drivers/net/wireless/p54/eeprom.c b/drivers/net/wireless/p54/eeprom.c index 14037092ba89..1ef1bfe6a9d7 100644 --- a/drivers/net/wireless/p54/eeprom.c +++ b/drivers/net/wireless/p54/eeprom.c @@ -76,6 +76,7 @@ struct p54_channel_entry { u16 freq; u16 data; int index; + int max_power; enum ieee80211_band band; }; @@ -173,6 +174,7 @@ static int p54_generate_band(struct ieee80211_hw *dev, for (i = 0, j = 0; (j < list->band_channel_num[band]) && (i < list->entries); i++) { struct p54_channel_entry *chan = &list->channels[i]; + struct ieee80211_channel *dest = &tmp->channels[j]; if (chan->band != band) continue; @@ -190,14 +192,15 @@ static int p54_generate_band(struct ieee80211_hw *dev, continue; } - tmp->channels[j].band = chan->band; - tmp->channels[j].center_freq = chan->freq; + dest->band = chan->band; + dest->center_freq = chan->freq; + dest->max_power = chan->max_power; priv->survey[*chan_num].channel = &tmp->channels[j]; priv->survey[*chan_num].filled = SURVEY_INFO_NOISE_DBM | SURVEY_INFO_CHANNEL_TIME | SURVEY_INFO_CHANNEL_TIME_BUSY | SURVEY_INFO_CHANNEL_TIME_TX; - tmp->channels[j].hw_value = (*chan_num); + dest->hw_value = (*chan_num); j++; (*chan_num)++; } @@ -229,10 +232,11 @@ err_out: return ret; } -static void p54_update_channel_param(struct p54_channel_list *list, - u16 freq, u16 data) +static struct p54_channel_entry *p54_update_channel_param(struct p54_channel_list *list, + u16 freq, u16 data) { - int band, i; + int i; + struct p54_channel_entry *entry = NULL; /* * usually all lists in the eeprom are mostly sorted. @@ -241,30 +245,78 @@ static void p54_update_channel_param(struct p54_channel_list *list, */ for (i = list->entries; i >= 0; i--) { if (freq == list->channels[i].freq) { - list->channels[i].data |= data; + entry = &list->channels[i]; break; } } if ((i < 0) && (list->entries < list->max_entries)) { /* entry does not exist yet. Initialize a new one. */ - band = p54_get_band_from_freq(freq); + int band = p54_get_band_from_freq(freq); /* * filter out frequencies which don't belong into * any supported band. */ - if (band < 0) - return ; + if (band >= 0) { + i = list->entries++; + list->band_channel_num[band]++; + + entry = &list->channels[i]; + entry->freq = freq; + entry->band = band; + entry->index = ieee80211_frequency_to_channel(freq); + entry->max_power = 0; + entry->data = 0; + } + } - i = list->entries++; - list->band_channel_num[band]++; + if (entry) + entry->data |= data; - list->channels[i].freq = freq; - list->channels[i].data = data; - list->channels[i].band = band; - list->channels[i].index = ieee80211_frequency_to_channel(freq); - /* TODO: parse output_limit and fill max_power */ + return entry; +} + +static int p54_get_maxpower(struct p54_common *priv, void *data) +{ + switch (priv->rxhw & PDR_SYNTH_FRONTEND_MASK) { + case PDR_SYNTH_FRONTEND_LONGBOW: { + struct pda_channel_output_limit_longbow *pda = data; + int j; + u16 rawpower = 0; + pda = data; + for (j = 0; j < ARRAY_SIZE(pda->point); j++) { + struct pda_channel_output_limit_point_longbow *point = + &pda->point[j]; + rawpower = max_t(u16, + rawpower, le16_to_cpu(point->val_qpsk)); + rawpower = max_t(u16, + rawpower, le16_to_cpu(point->val_bpsk)); + rawpower = max_t(u16, + rawpower, le16_to_cpu(point->val_16qam)); + rawpower = max_t(u16, + rawpower, le16_to_cpu(point->val_64qam)); + } + /* longbow seems to use 1/16 dBm units */ + return rawpower / 16; + } + + case PDR_SYNTH_FRONTEND_DUETTE3: + case PDR_SYNTH_FRONTEND_DUETTE2: + case PDR_SYNTH_FRONTEND_FRISBEE: + case PDR_SYNTH_FRONTEND_XBOW: { + struct pda_channel_output_limit *pda = data; + u8 rawpower = 0; + rawpower = max(rawpower, pda->val_qpsk); + rawpower = max(rawpower, pda->val_bpsk); + rawpower = max(rawpower, pda->val_16qam); + rawpower = max(rawpower, pda->val_64qam); + /* raw values are in 1/4 dBm units */ + return rawpower / 4; + } + + default: + return 20; } } @@ -315,12 +367,19 @@ static int p54_generate_channel_lists(struct ieee80211_hw *dev) } if (i < priv->output_limit->entries) { - freq = le16_to_cpup((__le16 *) (i * - priv->output_limit->entry_size + - priv->output_limit->offset + - priv->output_limit->data)); - - p54_update_channel_param(list, freq, CHAN_HAS_LIMIT); + struct p54_channel_entry *tmp; + + void *data = (void *) ((unsigned long) i * + priv->output_limit->entry_size + + priv->output_limit->offset + + priv->output_limit->data); + + freq = le16_to_cpup((__le16 *) data); + tmp = p54_update_channel_param(list, freq, + CHAN_HAS_LIMIT); + if (tmp) { + tmp->max_power = p54_get_maxpower(priv, data); + } } if (i < priv->curve_data->entries) { @@ -834,11 +893,12 @@ good_eeprom: goto err; } + priv->rxhw = synth & PDR_SYNTH_FRONTEND_MASK; + err = p54_generate_channel_lists(dev); if (err) goto err; - priv->rxhw = synth & PDR_SYNTH_FRONTEND_MASK; if (priv->rxhw == PDR_SYNTH_FRONTEND_XBOW) p54_init_xbow_synth(priv); if (!(synth & PDR_SYNTH_24_GHZ_DISABLED)) diff --git a/drivers/net/wireless/p54/eeprom.h b/drivers/net/wireless/p54/eeprom.h index afde72b84606..20ebe39a3f4e 100644 --- a/drivers/net/wireless/p54/eeprom.h +++ b/drivers/net/wireless/p54/eeprom.h @@ -57,6 +57,18 @@ struct pda_channel_output_limit { u8 rate_set_size; } __packed; +struct pda_channel_output_limit_point_longbow { + __le16 val_bpsk; + __le16 val_qpsk; + __le16 val_16qam; + __le16 val_64qam; +} __packed; + +struct pda_channel_output_limit_longbow { + __le16 freq; + struct pda_channel_output_limit_point_longbow point[3]; +} __packed; + struct pda_pa_curve_data_sample_rev0 { u8 rf_power; u8 pa_detector; diff --git a/drivers/net/wireless/p54/lmac.h b/drivers/net/wireless/p54/lmac.h index 3d8d622bec55..de1d46bf97df 100644 --- a/drivers/net/wireless/p54/lmac.h +++ b/drivers/net/wireless/p54/lmac.h @@ -526,7 +526,9 @@ int p54_init_leds(struct p54_common *priv); void p54_unregister_leds(struct p54_common *priv); /* xmit functions */ -void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb); +void p54_tx_80211(struct ieee80211_hw *dev, + struct ieee80211_tx_control *control, + struct sk_buff *skb); int p54_tx_cancel(struct p54_common *priv, __le32 req_id); void p54_tx(struct p54_common *priv, struct sk_buff *skb); diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c index 7cffea795ad2..5e91ad06dd5d 100644 --- a/drivers/net/wireless/p54/main.c +++ b/drivers/net/wireless/p54/main.c @@ -158,7 +158,7 @@ static int p54_beacon_update(struct p54_common *priv, * to cancel the old beacon template by hand, instead the firmware * will release the previous one through the feedback mechanism. */ - p54_tx_80211(priv->hw, beacon); + p54_tx_80211(priv->hw, NULL, beacon); priv->tsf_high32 = 0; priv->tsf_low32 = 0; diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c index 89318adc8c7f..b4390797d78c 100644 --- a/drivers/net/wireless/p54/p54pci.c +++ b/drivers/net/wireless/p54/p54pci.c @@ -488,6 +488,58 @@ static int p54p_open(struct ieee80211_hw *dev) return 0; } +static void p54p_firmware_step2(const struct firmware *fw, + void *context) +{ + struct p54p_priv *priv = context; + struct ieee80211_hw *dev = priv->common.hw; + struct pci_dev *pdev = priv->pdev; + int err; + + if (!fw) { + dev_err(&pdev->dev, "Cannot find firmware (isl3886pci)\n"); + err = -ENOENT; + goto out; + } + + priv->firmware = fw; + + err = p54p_open(dev); + if (err) + goto out; + err = p54_read_eeprom(dev); + p54p_stop(dev); + if (err) + goto out; + + err = p54_register_common(dev, &pdev->dev); + if (err) + goto out; + +out: + + complete(&priv->fw_loaded); + + if (err) { + struct device *parent = pdev->dev.parent; + + if (parent) + device_lock(parent); + + /* + * This will indirectly result in a call to p54p_remove. + * Hence, we don't need to bother with freeing any + * allocated ressources at all. + */ + device_release_driver(&pdev->dev); + + if (parent) + device_unlock(parent); + } + + pci_dev_put(pdev); +} + static int __devinit p54p_probe(struct pci_dev *pdev, const struct pci_device_id *id) { @@ -496,6 +548,7 @@ static int __devinit p54p_probe(struct pci_dev *pdev, unsigned long mem_addr, mem_len; int err; + pci_dev_get(pdev); err = pci_enable_device(pdev); if (err) { dev_err(&pdev->dev, "Cannot enable new PCI device\n"); @@ -537,6 +590,7 @@ static int __devinit p54p_probe(struct pci_dev *pdev, priv = dev->priv; priv->pdev = pdev; + init_completion(&priv->fw_loaded); SET_IEEE80211_DEV(dev, &pdev->dev); pci_set_drvdata(pdev, dev); @@ -561,32 +615,12 @@ static int __devinit p54p_probe(struct pci_dev *pdev, spin_lock_init(&priv->lock); tasklet_init(&priv->tasklet, p54p_tasklet, (unsigned long)dev); - err = request_firmware(&priv->firmware, "isl3886pci", - &priv->pdev->dev); - if (err) { - dev_err(&pdev->dev, "Cannot find firmware (isl3886pci)\n"); - err = request_firmware(&priv->firmware, "isl3886", - &priv->pdev->dev); - if (err) - goto err_free_common; - } - - err = p54p_open(dev); - if (err) - goto err_free_common; - err = p54_read_eeprom(dev); - p54p_stop(dev); - if (err) - goto err_free_common; - - err = p54_register_common(dev, &pdev->dev); - if (err) - goto err_free_common; - - return 0; + err = request_firmware_nowait(THIS_MODULE, 1, "isl3886pci", + &priv->pdev->dev, GFP_KERNEL, + priv, p54p_firmware_step2); + if (!err) + return 0; - err_free_common: - release_firmware(priv->firmware); pci_free_consistent(pdev, sizeof(*priv->ring_control), priv->ring_control, priv->ring_control_dma); @@ -601,6 +635,7 @@ static int __devinit p54p_probe(struct pci_dev *pdev, pci_release_regions(pdev); err_disable_dev: pci_disable_device(pdev); + pci_dev_put(pdev); return err; } @@ -612,8 +647,9 @@ static void __devexit p54p_remove(struct pci_dev *pdev) if (!dev) return; - p54_unregister_common(dev); priv = dev->priv; + wait_for_completion(&priv->fw_loaded); + p54_unregister_common(dev); release_firmware(priv->firmware); pci_free_consistent(pdev, sizeof(*priv->ring_control), priv->ring_control, priv->ring_control_dma); diff --git a/drivers/net/wireless/p54/p54pci.h b/drivers/net/wireless/p54/p54pci.h index 7aa509f7e387..68405c142f97 100644 --- a/drivers/net/wireless/p54/p54pci.h +++ b/drivers/net/wireless/p54/p54pci.h @@ -105,6 +105,7 @@ struct p54p_priv { struct sk_buff *tx_buf_data[32]; struct sk_buff *tx_buf_mgmt[4]; struct completion boot_comp; + struct completion fw_loaded; }; #endif /* P54USB_H */ diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c index f38786e02623..5861e13a6fd8 100644 --- a/drivers/net/wireless/p54/txrx.c +++ b/drivers/net/wireless/p54/txrx.c @@ -676,8 +676,9 @@ int p54_rx(struct ieee80211_hw *dev, struct sk_buff *skb) EXPORT_SYMBOL_GPL(p54_rx); static void p54_tx_80211_header(struct p54_common *priv, struct sk_buff *skb, - struct ieee80211_tx_info *info, u8 *queue, - u32 *extra_len, u16 *flags, u16 *aid, + struct ieee80211_tx_info *info, + struct ieee80211_sta *sta, + u8 *queue, u32 *extra_len, u16 *flags, u16 *aid, bool *burst_possible) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; @@ -746,8 +747,8 @@ static void p54_tx_80211_header(struct p54_common *priv, struct sk_buff *skb, } } - if (info->control.sta) - *aid = info->control.sta->aid; + if (sta) + *aid = sta->aid; break; } } @@ -767,7 +768,9 @@ static u8 p54_convert_algo(u32 cipher) } } -void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb) +void p54_tx_80211(struct ieee80211_hw *dev, + struct ieee80211_tx_control *control, + struct sk_buff *skb) { struct p54_common *priv = dev->priv; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); @@ -784,7 +787,7 @@ void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb) u8 nrates = 0, nremaining = 8; bool burst_allowed = false; - p54_tx_80211_header(priv, skb, info, &queue, &extra_len, + p54_tx_80211_header(priv, skb, info, control->sta, &queue, &extra_len, &hdr_flags, &aid, &burst_allowed); if (p54_tx_qos_accounting_alloc(priv, skb, queue)) { diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h index 8afb546c2b2d..f991e8bedc70 100644 --- a/drivers/net/wireless/rt2x00/rt2x00.h +++ b/drivers/net/wireless/rt2x00/rt2x00.h @@ -1287,7 +1287,9 @@ void rt2x00lib_rxdone(struct queue_entry *entry, gfp_t gfp); /* * mac80211 handlers. */ -void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb); +void rt2x00mac_tx(struct ieee80211_hw *hw, + struct ieee80211_tx_control *control, + struct sk_buff *skb); int rt2x00mac_start(struct ieee80211_hw *hw); void rt2x00mac_stop(struct ieee80211_hw *hw); int rt2x00mac_add_interface(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c index 3f07e36f462b..10cf67267775 100644 --- a/drivers/net/wireless/rt2x00/rt2x00dev.c +++ b/drivers/net/wireless/rt2x00/rt2x00dev.c @@ -194,7 +194,7 @@ static void rt2x00lib_bc_buffer_iter(void *data, u8 *mac, */ skb = ieee80211_get_buffered_bc(rt2x00dev->hw, vif); while (skb) { - rt2x00mac_tx(rt2x00dev->hw, skb); + rt2x00mac_tx(rt2x00dev->hw, NULL, skb); skb = ieee80211_get_buffered_bc(rt2x00dev->hw, vif); } } diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c index 4ff26c2159bf..c3d0f2f87b69 100644 --- a/drivers/net/wireless/rt2x00/rt2x00mac.c +++ b/drivers/net/wireless/rt2x00/rt2x00mac.c @@ -99,7 +99,9 @@ static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev, return retval; } -void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +void rt2x00mac_tx(struct ieee80211_hw *hw, + struct ieee80211_tx_control *control, + struct sk_buff *skb) { struct rt2x00_dev *rt2x00dev = hw->priv; struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c index f7e74a0a7759..e488b944a034 100644 --- a/drivers/net/wireless/rt2x00/rt2x00queue.c +++ b/drivers/net/wireless/rt2x00/rt2x00queue.c @@ -315,6 +315,7 @@ static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev, static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb, struct txentry_desc *txdesc, + struct ieee80211_sta *sta, const struct rt2x00_rate *hwrate) { struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); @@ -322,11 +323,11 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev, struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct rt2x00_sta *sta_priv = NULL; - if (tx_info->control.sta) { + if (sta) { txdesc->u.ht.mpdu_density = - tx_info->control.sta->ht_cap.ampdu_density; + sta->ht_cap.ampdu_density; - sta_priv = sta_to_rt2x00_sta(tx_info->control.sta); + sta_priv = sta_to_rt2x00_sta(sta); txdesc->u.ht.wcid = sta_priv->wcid; } @@ -341,8 +342,8 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev, * MIMO PS should be set to 1 for STA's using dynamic SM PS * when using more then one tx stream (>MCS7). */ - if (tx_info->control.sta && txdesc->u.ht.mcs > 7 && - ((tx_info->control.sta->ht_cap.cap & + if (sta && txdesc->u.ht.mcs > 7 && + ((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> IEEE80211_HT_CAP_SM_PS_SHIFT) == WLAN_HT_CAP_SM_PS_DYNAMIC) @@ -409,7 +410,8 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev, static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb, - struct txentry_desc *txdesc) + struct txentry_desc *txdesc, + struct ieee80211_sta *sta) { struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; @@ -503,7 +505,7 @@ static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev, if (test_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags)) rt2x00queue_create_tx_descriptor_ht(rt2x00dev, skb, txdesc, - hwrate); + sta, hwrate); else rt2x00queue_create_tx_descriptor_plcp(rt2x00dev, skb, txdesc, hwrate); @@ -595,7 +597,7 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, * after that we are free to use the skb->cb array * for our information. */ - rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc); + rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc, NULL); /* * All information is retrieved from the skb->cb array, @@ -740,7 +742,7 @@ int rt2x00queue_update_beacon_locked(struct rt2x00_dev *rt2x00dev, * after that we are free to use the skb->cb array * for our information. */ - rt2x00queue_create_tx_descriptor(rt2x00dev, intf->beacon->skb, &txdesc); + rt2x00queue_create_tx_descriptor(rt2x00dev, intf->beacon->skb, &txdesc, NULL); /* * Fill in skb descriptor diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c index aceaf689f737..021d83e1b1d3 100644 --- a/drivers/net/wireless/rtl818x/rtl8180/dev.c +++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c @@ -244,7 +244,9 @@ static irqreturn_t rtl8180_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -static void rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb) +static void rtl8180_tx(struct ieee80211_hw *dev, + struct ieee80211_tx_control *control, + struct sk_buff *skb) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; @@ -710,7 +712,7 @@ static void rtl8180_beacon_work(struct work_struct *work) /* TODO: use actual beacon queue */ skb_set_queue_mapping(skb, 0); - rtl8180_tx(dev, skb); + rtl8180_tx(dev, NULL, skb); resched: /* diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c index 533024095c43..7811b6315973 100644 --- a/drivers/net/wireless/rtl818x/rtl8187/dev.c +++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c @@ -228,7 +228,9 @@ static void rtl8187_tx_cb(struct urb *urb) } } -static void rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb) +static void rtl8187_tx(struct ieee80211_hw *dev, + struct ieee80211_tx_control *control, + struct sk_buff *skb) { struct rtl8187_priv *priv = dev->priv; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); @@ -1076,7 +1078,7 @@ static void rtl8187_beacon_work(struct work_struct *work) /* TODO: use actual beacon queue */ skb_set_queue_mapping(skb, 0); - rtl8187_tx(dev, skb); + rtl8187_tx(dev, NULL, skb); resched: /* diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c index 942e56b77b60..59381fe8ed06 100644 --- a/drivers/net/wireless/rtlwifi/base.c +++ b/drivers/net/wireless/rtlwifi/base.c @@ -1341,9 +1341,8 @@ int rtl_send_smps_action(struct ieee80211_hw *hw, rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0); info->control.rates[0].idx = 0; - info->control.sta = sta; info->band = hw->conf.channel->band; - rtlpriv->intf_ops->adapter_tx(hw, skb, &tcb_desc); + rtlpriv->intf_ops->adapter_tx(hw, sta, skb, &tcb_desc); } err_free: return 0; diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c index a18ad2a98938..a7c0e52869ba 100644 --- a/drivers/net/wireless/rtlwifi/core.c +++ b/drivers/net/wireless/rtlwifi/core.c @@ -124,7 +124,9 @@ static void rtl_op_stop(struct ieee80211_hw *hw) mutex_unlock(&rtlpriv->locks.conf_mutex); } -static void rtl_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +static void rtl_op_tx(struct ieee80211_hw *hw, + struct ieee80211_tx_control *control, + struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); @@ -138,8 +140,8 @@ static void rtl_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) if (!test_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status)) goto err_free; - if (!rtlpriv->intf_ops->waitq_insert(hw, skb)) - rtlpriv->intf_ops->adapter_tx(hw, skb, &tcb_desc); + if (!rtlpriv->intf_ops->waitq_insert(hw, control->sta, skb)) + rtlpriv->intf_ops->adapter_tx(hw, control->sta, skb, &tcb_desc); return; diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c index 80f75d3ba84a..aad9d44c0a51 100644 --- a/drivers/net/wireless/rtlwifi/pci.c +++ b/drivers/net/wireless/rtlwifi/pci.c @@ -504,7 +504,7 @@ static void _rtl_pci_tx_chk_waitq(struct ieee80211_hw *hw) _rtl_update_earlymode_info(hw, skb, &tcb_desc, tid); - rtlpriv->intf_ops->adapter_tx(hw, skb, &tcb_desc); + rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, &tcb_desc); } } } @@ -929,7 +929,7 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw) info = IEEE80211_SKB_CB(pskb); pdesc = &ring->desc[0]; rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc, - info, pskb, BEACON_QUEUE, &tcb_desc); + info, NULL, pskb, BEACON_QUEUE, &tcb_desc); __skb_queue_tail(&ring->queue, pskb); @@ -1305,11 +1305,10 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw) } static bool rtl_pci_tx_chk_waitq_insert(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct ieee80211_sta *sta = info->control.sta; struct rtl_sta_info *sta_entry = NULL; u8 tid = rtl_get_tid(skb); @@ -1337,13 +1336,14 @@ static bool rtl_pci_tx_chk_waitq_insert(struct ieee80211_hw *hw, return true; } -static int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb, - struct rtl_tcb_desc *ptcb_desc) +static int rtl_pci_tx(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + struct sk_buff *skb, + struct rtl_tcb_desc *ptcb_desc) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_sta_info *sta_entry = NULL; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct ieee80211_sta *sta = info->control.sta; struct rtl8192_tx_ring *ring; struct rtl_tx_desc *pdesc; u8 idx; @@ -1418,7 +1418,7 @@ static int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb, rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX); rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc, - info, skb, hw_queue, ptcb_desc); + info, sta, skb, hw_queue, ptcb_desc); __skb_queue_tail(&ring->queue, skb); diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c index 52166640f167..390d6d4fcaa0 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c @@ -596,7 +596,9 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw, void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr, u8 *pdesc_tx, - struct ieee80211_tx_info *info, struct sk_buff *skb, + struct ieee80211_tx_info *info, + struct ieee80211_sta *sta, + struct sk_buff *skb, u8 hw_queue, struct rtl_tcb_desc *tcb_desc) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -604,7 +606,6 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw, struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); bool defaultadapter = true; - struct ieee80211_sta *sta; u8 *pdesc = pdesc_tx; u16 seq_number; __le16 fc = hdr->frame_control; diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h index c4adb9777365..a7cdd514cb2e 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h @@ -713,6 +713,7 @@ struct rx_desc_92c { void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr, u8 *pdesc, struct ieee80211_tx_info *info, + struct ieee80211_sta *sta, struct sk_buff *skb, u8 hw_queue, struct rtl_tcb_desc *ptcb_desc); bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c index 2e6eb356a93e..27863d773790 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c @@ -496,7 +496,9 @@ static void _rtl_tx_desc_checksum(u8 *txdesc) void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr, u8 *pdesc_tx, - struct ieee80211_tx_info *info, struct sk_buff *skb, + struct ieee80211_tx_info *info, + struct ieee80211_sta *sta, + struct sk_buff *skb, u8 queue_index, struct rtl_tcb_desc *tcb_desc) { @@ -504,7 +506,6 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw, struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); bool defaultadapter = true; - struct ieee80211_sta *sta = info->control.sta = info->control.sta; u8 *qc = ieee80211_get_qos_ctl(hdr); u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; u16 seq_number; diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h index 332b06e78b00..725c53accc58 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h @@ -420,7 +420,9 @@ struct sk_buff *rtl8192c_tx_aggregate_hdl(struct ieee80211_hw *, struct sk_buff_head *); void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr, u8 *pdesc_tx, - struct ieee80211_tx_info *info, struct sk_buff *skb, + struct ieee80211_tx_info *info, + struct ieee80211_sta *sta, + struct sk_buff *skb, u8 queue_index, struct rtl_tcb_desc *tcb_desc); void rtl92cu_fill_fake_txdesc(struct ieee80211_hw *hw, u8 * pDesc, diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c index f80690d82c11..4686f340b9d6 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c +++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c @@ -551,7 +551,9 @@ static void _rtl92de_insert_emcontent(struct rtl_tcb_desc *ptcb_desc, void rtl92de_tx_fill_desc(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr, u8 *pdesc_tx, - struct ieee80211_tx_info *info, struct sk_buff *skb, + struct ieee80211_tx_info *info, + struct ieee80211_sta *sta, + struct sk_buff *skb, u8 hw_queue, struct rtl_tcb_desc *ptcb_desc) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -559,7 +561,6 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw, struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtlpriv); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); - struct ieee80211_sta *sta = info->control.sta; u8 *pdesc = pdesc_tx; u16 seq_number; __le16 fc = hdr->frame_control; diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.h b/drivers/net/wireless/rtlwifi/rtl8192de/trx.h index 057a52431b00..c1b5dfb79d53 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.h +++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.h @@ -730,6 +730,7 @@ struct rx_desc_92d { void rtl92de_tx_fill_desc(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr, u8 *pdesc, struct ieee80211_tx_info *info, + struct ieee80211_sta *sta, struct sk_buff *skb, u8 hw_queue, struct rtl_tcb_desc *ptcb_desc); bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c index 36d1cb3aef8a..28c53fb12aeb 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c +++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c @@ -591,14 +591,15 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats, void rtl92se_tx_fill_desc(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr, u8 *pdesc_tx, - struct ieee80211_tx_info *info, struct sk_buff *skb, + struct ieee80211_tx_info *info, + struct ieee80211_sta *sta, + struct sk_buff *skb, u8 hw_queue, struct rtl_tcb_desc *ptcb_desc) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - struct ieee80211_sta *sta = info->control.sta; u8 *pdesc = pdesc_tx; u16 seq_number; __le16 fc = hdr->frame_control; diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.h b/drivers/net/wireless/rtlwifi/rtl8192se/trx.h index 011e7b0695f2..64dd66f287c1 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.h +++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.h @@ -31,6 +31,7 @@ void rtl92se_tx_fill_desc(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr, u8 *pdesc, struct ieee80211_tx_info *info, + struct ieee80211_sta *sta, struct sk_buff *skb, u8 hw_queue, struct rtl_tcb_desc *ptcb_desc); void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, bool firstseg, diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c index aa970fc18a21..914046903cfd 100644 --- a/drivers/net/wireless/rtlwifi/usb.c +++ b/drivers/net/wireless/rtlwifi/usb.c @@ -848,8 +848,10 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb, _rtl_submit_tx_urb(hw, _urb); } -static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw, struct sk_buff *skb, - u16 hw_queue) +static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + struct sk_buff *skb, + u16 hw_queue) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); @@ -891,7 +893,7 @@ static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw, struct sk_buff *skb, seq_number += 1; seq_number <<= 4; } - rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc, info, skb, + rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc, info, sta, skb, hw_queue, &tcb_desc); if (!ieee80211_has_morefrags(hdr->frame_control)) { if (qc) @@ -901,7 +903,9 @@ static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw, struct sk_buff *skb, rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX); } -static int rtl_usb_tx(struct ieee80211_hw *hw, struct sk_buff *skb, +static int rtl_usb_tx(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + struct sk_buff *skb, struct rtl_tcb_desc *dummy) { struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); @@ -913,7 +917,7 @@ static int rtl_usb_tx(struct ieee80211_hw *hw, struct sk_buff *skb, if (unlikely(is_hal_stop(rtlhal))) goto err_free; hw_queue = rtlusb->usb_mq_to_hwq(fc, skb_get_queue_mapping(skb)); - _rtl_usb_tx_preprocess(hw, skb, hw_queue); + _rtl_usb_tx_preprocess(hw, sta, skb, hw_queue); _rtl_usb_transmit(hw, skb, hw_queue); return NETDEV_TX_OK; @@ -923,6 +927,7 @@ err_free: } static bool rtl_usb_tx_chk_waitq_insert(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, struct sk_buff *skb) { return false; diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h index cdaa21f29710..40153e7bf702 100644 --- a/drivers/net/wireless/rtlwifi/wifi.h +++ b/drivers/net/wireless/rtlwifi/wifi.h @@ -122,7 +122,7 @@ enum rt_eeprom_type { EEPROM_BOOT_EFUSE, }; -enum rtl_status { +enum ttl_status { RTL_STATUS_INTERFACE_START = 0, }; @@ -1418,6 +1418,7 @@ struct rtl_hal_ops { void (*fill_tx_desc) (struct ieee80211_hw *hw, struct ieee80211_hdr *hdr, u8 *pdesc_tx, struct ieee80211_tx_info *info, + struct ieee80211_sta *sta, struct sk_buff *skb, u8 hw_queue, struct rtl_tcb_desc *ptcb_desc); void (*fill_fake_txdesc) (struct ieee80211_hw *hw, u8 *pDesc, @@ -1475,11 +1476,15 @@ struct rtl_intf_ops { int (*adapter_start) (struct ieee80211_hw *hw); void (*adapter_stop) (struct ieee80211_hw *hw); - int (*adapter_tx) (struct ieee80211_hw *hw, struct sk_buff *skb, - struct rtl_tcb_desc *ptcb_desc); + int (*adapter_tx) (struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + struct sk_buff *skb, + struct rtl_tcb_desc *ptcb_desc); void (*flush)(struct ieee80211_hw *hw, bool drop); int (*reset_trx_ring) (struct ieee80211_hw *hw); - bool (*waitq_insert) (struct ieee80211_hw *hw, struct sk_buff *skb); + bool (*waitq_insert) (struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + struct sk_buff *skb); /*pci */ void (*disable_aspm) (struct ieee80211_hw *hw); diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c index 3118c425bcf1..441cbccbd381 100644 --- a/drivers/net/wireless/ti/wl1251/main.c +++ b/drivers/net/wireless/ti/wl1251/main.c @@ -354,7 +354,9 @@ out: return ret; } -static void wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +static void wl1251_op_tx(struct ieee80211_hw *hw, + struct ieee80211_tx_control *control, + struct sk_buff *skb) { struct wl1251 *wl = hw->priv; unsigned long flags; diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index 72548609f711..ff830cf50c70 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -1181,7 +1181,9 @@ out: return ret; } -static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +static void wl1271_op_tx(struct ieee80211_hw *hw, + struct ieee80211_tx_control *control, + struct sk_buff *skb) { struct wl1271 *wl = hw->priv; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); @@ -1197,7 +1199,7 @@ static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) mapping = skb_get_queue_mapping(skb); q = wl1271_tx_get_queue(mapping); - hlid = wl12xx_tx_get_hlid(wl, wlvif, skb); + hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta); spin_lock_irqsave(&wl->wl_lock, flags); diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c index f0081f746482..1a2f31c289c5 100644 --- a/drivers/net/wireless/ti/wlcore/tx.c +++ b/drivers/net/wireless/ti/wlcore/tx.c @@ -130,16 +130,13 @@ bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb) } EXPORT_SYMBOL(wl12xx_is_dummy_packet); -u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif, - struct sk_buff *skb) +static u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif, + struct sk_buff *skb, struct ieee80211_sta *sta) { - struct ieee80211_tx_info *control = IEEE80211_SKB_CB(skb); - - if (control->control.sta) { + if (sta) { struct wl1271_station *wl_sta; - wl_sta = (struct wl1271_station *) - control->control.sta->drv_priv; + wl_sta = (struct wl1271_station *)sta->drv_priv; return wl_sta->hlid; } else { struct ieee80211_hdr *hdr; @@ -156,7 +153,7 @@ u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif, } u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif, - struct sk_buff *skb) + struct sk_buff *skb, struct ieee80211_sta *sta) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; @@ -164,7 +161,7 @@ u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif, return wl->system_hlid; if (wlvif->bss_type == BSS_TYPE_AP_BSS) - return wl12xx_tx_get_hlid_ap(wl, wlvif, skb); + return wl12xx_tx_get_hlid_ap(wl, wlvif, skb, sta); if ((test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) || test_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags)) && @@ -344,13 +341,12 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif, /* caller must hold wl->mutex */ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif, - struct sk_buff *skb, u32 buf_offset) + struct sk_buff *skb, u32 buf_offset, u8 hlid) { struct ieee80211_tx_info *info; u32 extra = 0; int ret = 0; u32 total_len; - u8 hlid; bool is_dummy; bool is_gem = false; @@ -359,9 +355,13 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif, return -EINVAL; } + if (hlid == WL12XX_INVALID_LINK_ID) { + wl1271_error("invalid hlid. dropping skb 0x%p", skb); + return -EINVAL; + } + info = IEEE80211_SKB_CB(skb); - /* TODO: handle dummy packets on multi-vifs */ is_dummy = wl12xx_is_dummy_packet(wl, skb); if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) && @@ -386,11 +386,6 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif, is_gem = (cipher == WL1271_CIPHER_SUITE_GEM); } - hlid = wl12xx_tx_get_hlid(wl, wlvif, skb); - if (hlid == WL12XX_INVALID_LINK_ID) { - wl1271_error("invalid hlid. dropping skb 0x%p", skb); - return -EINVAL; - } ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid, is_gem); @@ -517,7 +512,8 @@ static struct sk_buff *wl12xx_lnk_skb_dequeue(struct wl1271 *wl, } static struct sk_buff *wl12xx_vif_skb_dequeue(struct wl1271 *wl, - struct wl12xx_vif *wlvif) + struct wl12xx_vif *wlvif, + u8 *hlid) { struct sk_buff *skb = NULL; int i, h, start_hlid; @@ -544,10 +540,11 @@ static struct sk_buff *wl12xx_vif_skb_dequeue(struct wl1271 *wl, if (!skb) wlvif->last_tx_hlid = 0; + *hlid = wlvif->last_tx_hlid; return skb; } -static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl) +static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl, u8 *hlid) { unsigned long flags; struct wl12xx_vif *wlvif = wl->last_wlvif; @@ -556,7 +553,7 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl) /* continue from last wlvif (round robin) */ if (wlvif) { wl12xx_for_each_wlvif_continue(wl, wlvif) { - skb = wl12xx_vif_skb_dequeue(wl, wlvif); + skb = wl12xx_vif_skb_dequeue(wl, wlvif, hlid); if (skb) { wl->last_wlvif = wlvif; break; @@ -565,13 +562,15 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl) } /* dequeue from the system HLID before the restarting wlvif list */ - if (!skb) + if (!skb) { skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[wl->system_hlid]); + *hlid = wl->system_hlid; + } /* do a new pass over the wlvif list */ if (!skb) { wl12xx_for_each_wlvif(wl, wlvif) { - skb = wl12xx_vif_skb_dequeue(wl, wlvif); + skb = wl12xx_vif_skb_dequeue(wl, wlvif, hlid); if (skb) { wl->last_wlvif = wlvif; break; @@ -591,6 +590,7 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl) int q; skb = wl->dummy_packet; + *hlid = wl->system_hlid; q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); spin_lock_irqsave(&wl->wl_lock, flags); WARN_ON_ONCE(wl->tx_queue_count[q] <= 0); @@ -602,7 +602,7 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl) } static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif, - struct sk_buff *skb) + struct sk_buff *skb, u8 hlid) { unsigned long flags; int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); @@ -610,7 +610,6 @@ static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif, if (wl12xx_is_dummy_packet(wl, skb)) { set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags); } else { - u8 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb); skb_queue_head(&wl->links[hlid].tx_queue[q], skb); /* make sure we dequeue the same packet next time */ @@ -686,26 +685,30 @@ int wlcore_tx_work_locked(struct wl1271 *wl) unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0}; int ret = 0; int bus_ret = 0; + u8 hlid; if (unlikely(wl->state == WL1271_STATE_OFF)) return 0; - while ((skb = wl1271_skb_dequeue(wl))) { + while ((skb = wl1271_skb_dequeue(wl, &hlid))) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); bool has_data = false; wlvif = NULL; if (!wl12xx_is_dummy_packet(wl, skb) && info->control.vif) wlvif = wl12xx_vif_to_data(info->control.vif); + else + hlid = wl->system_hlid; has_data = wlvif && wl1271_tx_is_data_present(skb); - ret = wl1271_prepare_tx_frame(wl, wlvif, skb, buf_offset); + ret = wl1271_prepare_tx_frame(wl, wlvif, skb, buf_offset, + hlid); if (ret == -EAGAIN) { /* * Aggregation buffer is full. * Flush buffer and try again. */ - wl1271_skb_queue_head(wl, wlvif, skb); + wl1271_skb_queue_head(wl, wlvif, skb, hlid); buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset, last_len); @@ -722,7 +725,7 @@ int wlcore_tx_work_locked(struct wl1271 *wl) * Firmware buffer is full. * Queue back last skb, and stop aggregating. */ - wl1271_skb_queue_head(wl, wlvif, skb); + wl1271_skb_queue_head(wl, wlvif, skb, hlid); /* No work left, avoid scheduling redundant tx work */ set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags); goto out_ack; @@ -732,7 +735,7 @@ int wlcore_tx_work_locked(struct wl1271 *wl) * fw still expects dummy packet, * so re-enqueue it */ - wl1271_skb_queue_head(wl, wlvif, skb); + wl1271_skb_queue_head(wl, wlvif, skb, hlid); else ieee80211_free_txskb(wl->hw, skb); goto out_ack; diff --git a/drivers/net/wireless/ti/wlcore/tx.h b/drivers/net/wireless/ti/wlcore/tx.h index 1e939b016155..349520d8b724 100644 --- a/drivers/net/wireless/ti/wlcore/tx.h +++ b/drivers/net/wireless/ti/wlcore/tx.h @@ -243,10 +243,8 @@ u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band); u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set, enum ieee80211_band rate_band); u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set); -u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif, - struct sk_buff *skb); u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif, - struct sk_buff *skb); + struct sk_buff *skb, struct ieee80211_sta *sta); void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid); void wl1271_handle_tx_low_watermark(struct wl1271 *wl); bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb); diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c index c9e2660e1263..459880104758 100644 --- a/drivers/net/wireless/zd1211rw/zd_mac.c +++ b/drivers/net/wireless/zd1211rw/zd_mac.c @@ -937,7 +937,9 @@ static int fill_ctrlset(struct zd_mac *mac, * control block of the skbuff will be initialized. If necessary the incoming * mac80211 queues will be stopped. */ -static void zd_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +static void zd_op_tx(struct ieee80211_hw *hw, + struct ieee80211_tx_control *control, + struct sk_buff *skb) { struct zd_mac *mac = zd_hw_mac(hw); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); @@ -1176,7 +1178,7 @@ static void zd_beacon_done(struct zd_mac *mac) skb = ieee80211_get_buffered_bc(mac->hw, mac->vif); if (!skb) break; - zd_op_tx(mac->hw, skb); + zd_op_tx(mac->hw, NULL, skb); } /* diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 650f79a1f2bd..c934fe8583f5 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -1712,7 +1712,7 @@ static void netback_changed(struct xenbus_device *dev, break; case XenbusStateConnected: - netif_notify_peers(netdev); + netdev_notify_peers(netdev); break; case XenbusStateClosing: |